hip_filename
stringlengths 5
84
| hip_content
stringlengths 79
9.69M
| cuda_filename
stringlengths 4
83
| cuda_content
stringlengths 19
9.69M
|
---|---|---|---|
ed9b9fd53199cae6594c1038f6bcf1846e4eabd8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// incrementArray.cu
#include <stdio.h>
#include <time.h>
#include <sys/types.h>
#include <unistd.h>
void incrementArrayOnHost(float *a, int N)
{
int i;
for (i=0; i < N; i++) a[i] = a[i]+1.f;
}
__global__ void incrementArrayOnDevice(float *a, int N)
{
int idx = blockIdx.x*blockDim.x + threadIdx.x;
if (idx<N) a[idx] = a[idx]+1.f;
}
void printarray(float *a, int n)
{
int i = 0;
for (i = 0; i < n; i++) printf("%f ", a[i]);
printf("\n");
}
// http://www.concentric.net/~Ttwang/tech/inthash.htm
unsigned long mix(unsigned long a, unsigned long b, unsigned long c)
{
a=a-b; a=a-c; a=a^(c >> 13);
b=b-c; b=b-a; b=b^(a << 8);
c=c-a; c=c-b; c=c^(b >> 13);
a=a-b; a=a-c; a=a^(c >> 12);
b=b-c; b=b-a; b=b^(a << 16);
c=c-a; c=c-b; c=c^(b >> 5);
a=a-b; a=a-c; a=a^(c >> 3);
b=b-c; b=b-a; b=b^(a << 10);
c=c-a; c=c-b; c=c^(b >> 15);
return c;
}
int main(int argc, char** argv)
{
// program args
if (argc < 2) {
printf("usage: incrementArrayRandom [max_size] [repetitions]\n");
return EXIT_SUCCESS;
}
int max_size = atoi(argv[1]);
int repetitions = atoi(argv[2]);
// randomize within same run
srand(mix(clock(), time(NULL), getpid()));
float *a_h, *b_h; // pointers to host memory
float *a_d; // pointer to device memory
int i, epoch = 0;
int N = 0;
int total_success = 0;
for (epoch = 0; epoch < repetitions; epoch++) {
N = rand() % max_size;
size_t size = N*sizeof(float);
// allocate arrays on host
a_h = (float *)malloc(size);
b_h = (float *)malloc(size);
// allocate array on device
hipMalloc((void **) &a_d, size);
// initialization of host data
for (i=0; i<N; i++) a_h[i] = (float)i;
// copy data from host to device
hipMemcpy(a_d, a_h, sizeof(float)*N, hipMemcpyHostToDevice);
// do calculation on host
incrementArrayOnHost(a_h, N);
// printarray(a_h, N);
// do calculation on device:
// Part 1 of 2. Compute execution configuration
int blockSize = 4;
int nBlocks = N/blockSize + (N%blockSize == 0?0:1);
// Part 2 of 2. Call incrementArrayOnDevice kernel
hipLaunchKernelGGL(( incrementArrayOnDevice) , dim3(nBlocks), dim3(blockSize) , 0, 0, a_d, N);
// Retrieve result from device and store in b_h
hipMemcpy(b_h, a_d, sizeof(float)*N, hipMemcpyDeviceToHost);
// check results
// printarray(b_h, N);
int success = 1;
for (i=0; i<N; i++) {
if (a_h[i] != b_h[i]) {
success = 0;
break;
}
}
printf("epoch %d a[%d] = %s\n", epoch, N, (success == 1) ? "true" : "false");
if (success == 1) total_success += 1;
}
printf("\nsuccess rate: %f%%\n", total_success / ((float)repetitions) * 100.0);
// cleanup
free(a_h); free(b_h); hipFree(a_d);
return EXIT_SUCCESS;
}
| ed9b9fd53199cae6594c1038f6bcf1846e4eabd8.cu | // incrementArray.cu
#include <stdio.h>
#include <time.h>
#include <sys/types.h>
#include <unistd.h>
void incrementArrayOnHost(float *a, int N)
{
int i;
for (i=0; i < N; i++) a[i] = a[i]+1.f;
}
__global__ void incrementArrayOnDevice(float *a, int N)
{
int idx = blockIdx.x*blockDim.x + threadIdx.x;
if (idx<N) a[idx] = a[idx]+1.f;
}
void printarray(float *a, int n)
{
int i = 0;
for (i = 0; i < n; i++) printf("%f ", a[i]);
printf("\n");
}
// http://www.concentric.net/~Ttwang/tech/inthash.htm
unsigned long mix(unsigned long a, unsigned long b, unsigned long c)
{
a=a-b; a=a-c; a=a^(c >> 13);
b=b-c; b=b-a; b=b^(a << 8);
c=c-a; c=c-b; c=c^(b >> 13);
a=a-b; a=a-c; a=a^(c >> 12);
b=b-c; b=b-a; b=b^(a << 16);
c=c-a; c=c-b; c=c^(b >> 5);
a=a-b; a=a-c; a=a^(c >> 3);
b=b-c; b=b-a; b=b^(a << 10);
c=c-a; c=c-b; c=c^(b >> 15);
return c;
}
int main(int argc, char** argv)
{
// program args
if (argc < 2) {
printf("usage: incrementArrayRandom [max_size] [repetitions]\n");
return EXIT_SUCCESS;
}
int max_size = atoi(argv[1]);
int repetitions = atoi(argv[2]);
// randomize within same run
srand(mix(clock(), time(NULL), getpid()));
float *a_h, *b_h; // pointers to host memory
float *a_d; // pointer to device memory
int i, epoch = 0;
int N = 0;
int total_success = 0;
for (epoch = 0; epoch < repetitions; epoch++) {
N = rand() % max_size;
size_t size = N*sizeof(float);
// allocate arrays on host
a_h = (float *)malloc(size);
b_h = (float *)malloc(size);
// allocate array on device
cudaMalloc((void **) &a_d, size);
// initialization of host data
for (i=0; i<N; i++) a_h[i] = (float)i;
// copy data from host to device
cudaMemcpy(a_d, a_h, sizeof(float)*N, cudaMemcpyHostToDevice);
// do calculation on host
incrementArrayOnHost(a_h, N);
// printarray(a_h, N);
// do calculation on device:
// Part 1 of 2. Compute execution configuration
int blockSize = 4;
int nBlocks = N/blockSize + (N%blockSize == 0?0:1);
// Part 2 of 2. Call incrementArrayOnDevice kernel
incrementArrayOnDevice <<< nBlocks, blockSize >>> (a_d, N);
// Retrieve result from device and store in b_h
cudaMemcpy(b_h, a_d, sizeof(float)*N, cudaMemcpyDeviceToHost);
// check results
// printarray(b_h, N);
int success = 1;
for (i=0; i<N; i++) {
if (a_h[i] != b_h[i]) {
success = 0;
break;
}
}
printf("epoch %d a[%d] = %s\n", epoch, N, (success == 1) ? "true" : "false");
if (success == 1) total_success += 1;
}
printf("\nsuccess rate: %f%%\n", total_success / ((float)repetitions) * 100.0);
// cleanup
free(a_h); free(b_h); cudaFree(a_d);
return EXIT_SUCCESS;
}
|
09a4aed5774ae9c6f8ee47e277ea35c5e080ca05.hip | // !!! This is a file automatically generated by hipify!!!
//
// Created by liang on 2/16/18.
//
#include <vector>
#include <algorithm>
#include <thread>
#include <memory>
#include <random>
#include <hip/hip_runtime.h>
#include <device_launch_parameters.h>
#include <groute/event_pool.h>
#include <groute/graphs/csr_graph.h>
#include <groute/dwl/work_source.cuh>
#include <groute/device/cta_scheduler.cuh>
#include <utils/parser.h>
#include <utils/utils.h>
#include <utils/graphs/traversal.h>
#include <utils/stopwatch.h>
#include <moderngpu/context.hxx>
#include <moderngpu/kernel_scan.hxx>
#include <gflags/gflags.h>
#include <glog/logging.h>
#include <boost/format.hpp>
#include <utils/cuda_utils.h>
#include "pr_common.h"
DECLARE_double(wl_alloc_factor);
DECLARE_uint64(wl_alloc_abs);
DECLARE_int32(max_pr_iterations);
DECLARE_double(threshold);
DECLARE_int32(grid_size);
DECLARE_int32(block_size);
DECLARE_int32(mode);
DECLARE_int32(switch_threshold);
DECLARE_int32(async_to_sync);
DECLARE_int32(sync_to_async);
DECLARE_bool(force_sync);
DECLARE_bool(force_async);
namespace hybrid {
rank_t IDENTITY_ELEMENT = 0;
template<typename WorkSource,
typename TGraph,
template<typename> class RankDatum,
template<typename> class ResidualDatum>
__global__ void PageRankInit__Single__(
WorkSource work_source,
TGraph graph,
RankDatum<rank_t> current_ranks, ResidualDatum<rank_t> residual, ResidualDatum<rank_t> last_residual) {
unsigned tid = TID_1D;
unsigned nthreads = TOTAL_THREADS_1D;
for (index_t ii = 0 + tid; ii < work_source.get_size(); ii += nthreads) {
index_t node = work_source.get_work(ii);
current_ranks[node] = 1.0 - ALPHA;
last_residual[node] = 0.0;
index_t
begin_edge = graph.begin_edge(node),
end_edge = graph.end_edge(node),
out_degree = end_edge - begin_edge;
if (out_degree == 0) continue;
rank_t update = ((1.0 - ALPHA) * ALPHA) / out_degree;
for (index_t edge = begin_edge; edge < end_edge; ++edge) {
index_t dest = graph.edge_dest(edge);
atomicAdd(residual.get_item_ptr(dest), update);
}
}
}
template<typename TValue>
__inline__ __device__ TValue warpReduce(TValue localSum) {
localSum += __shfl_xor_sync(0xfffffff, localSum, 16);
localSum += __shfl_xor_sync(0xfffffff, localSum, 8);
localSum += __shfl_xor_sync(0xfffffff, localSum, 4);
localSum += __shfl_xor_sync(0xfffffff, localSum, 2);
localSum += __shfl_xor_sync(0xfffffff, localSum, 1);
return localSum;
}
template<template<typename> class TRankDatum>
__device__ void PageRankCheck__Single__(TRankDatum<rank_t > current_ranks,
rank_t *block_sum_buffer, rank_t *rtn_sum) {
unsigned tid = TID_1D;
unsigned nthreads = TOTAL_THREADS_1D;
int laneIdx = threadIdx.x % warpSize;
int warpIdx = threadIdx.x / warpSize;
const int SMEMDIM = blockDim.x / warpSize;
__shared__ rank_t smem[32];
uint32_t work_size = current_ranks.size;
rank_t local_sum = 0;
for (uint32_t node = 0 + tid; node < work_size; node += nthreads) {
rank_t dist = current_ranks[node];
if (dist != IDENTITY_ELEMENT)
local_sum += dist;
}
local_sum = warpReduce(local_sum);
if (laneIdx == 0)
smem[warpIdx] = local_sum;
__syncthreads();
local_sum = (threadIdx.x < SMEMDIM) ? smem[threadIdx.x] : 0;
if (warpIdx == 0)
local_sum = warpReduce(local_sum);
if (threadIdx.x == 0) {
block_sum_buffer[blockIdx.x] = local_sum;
}
if (tid == 0) {
uint32_t sum = 0;
for (int bid = 0; bid < gridDim.x; bid++) {
sum += block_sum_buffer[bid];
}
*rtn_sum = sum;
}
}
template<
typename TGraph,
template<typename> class RankDatum,
template<typename> class ResidualDatum,
typename WorkSource>
__global__ void PageRankSyncKernelCTA__Single__(
WorkSource work_source,
TGraph graph,
index_t iteration,
RankDatum<rank_t> current_ranks, ResidualDatum<rank_t> residual,
ResidualDatum<rank_t> last_residual) {
unsigned tid = TID_1D;
unsigned nthreads = TOTAL_THREADS_1D;
uint32_t work_size = work_source.get_size();
uint32_t work_size_rup = round_up(work_size, blockDim.x) * blockDim.x;
for (index_t i = 0 + tid; i < work_size_rup; i += nthreads) {
groute::dev::np_local<rank_t> local_work = {0, 0, 0.0};
if (i < work_size) {
index_t node = work_source.get_work(i);
rank_t res;
if (iteration % 2 == 0) {
res = residual[node];//atomicExch(residual.get_item_ptr(node), 0);
residual[node] = 0;
} else {
res = last_residual[node];
last_residual[node] = 0;
//res = atomicExch(last_residual.get_item_ptr(node), 0);
}
if (res > 0) {
current_ranks[node] += res;
local_work.start = graph.begin_edge(node);
local_work.size = graph.end_edge(node) - local_work.start;
local_work.meta_data = ALPHA * res / local_work.size;
}
}
groute::dev::CTAWorkScheduler<rank_t>::template schedule(
local_work,
[&graph, &residual, &last_residual, &iteration](index_t edge, index_t size, rank_t update) {
index_t dest = graph.edge_dest(edge);
if (iteration % 2 == 0) {
atomicAdd(last_residual.get_item_ptr(dest), update);
} else {
atomicAdd(residual.get_item_ptr(dest), update);
}
}
);
}
}
template<
typename WorkSource,
typename TGraph, template<typename> class RankDatum,
template<typename> class ResidualDatum>
__global__ void PageRankAsyncKernelCTA__Single__(
WorkSource work_source, TGraph graph,
RankDatum<rank_t> current_ranks, ResidualDatum<rank_t> residual) {
uint32_t tid = TID_1D;
uint32_t nthreads = TOTAL_THREADS_1D;
uint32_t work_size = work_source.get_size();
uint32_t work_size_rup = round_up(work_size, blockDim.x) * blockDim.x;
for (uint32_t i = 0 + tid; i < work_size_rup; i += nthreads) {
groute::dev::np_local<rank_t> local_work = {0, 0, 0.0};
if (i < work_size) {
index_t node = work_source.get_work(i);
rank_t res = atomicExch(residual.get_item_ptr(node), 0);
if (res > 0) {
current_ranks[node] += res;
local_work.start = graph.begin_edge(node);
local_work.size = graph.end_edge(node) - local_work.start;
local_work.meta_data = ALPHA * res / local_work.size;
}
}
groute::dev::CTAWorkScheduler<rank_t>::template schedule(
local_work,
[&graph, &residual](index_t edge, index_t size, rank_t update) {
index_t dest = graph.edge_dest(edge);
atomicAdd(residual.get_item_ptr(dest), update);
}
);
}
}
template<
typename WorkSource,
typename TGraph, template<typename> class RankDatum,
template<typename> class ResidualDatum>
__global__ void PageRankAsyncKernelCTAPersist__Single__(
WorkSource work_source, TGraph graph,
RankDatum<rank_t> current_ranks, ResidualDatum<rank_t> residual) {
uint32_t tid = TID_1D;
uint32_t nthreads = TOTAL_THREADS_1D;
uint32_t work_size = work_source.get_size();
uint32_t work_size_rup = round_up(work_size, blockDim.x) * blockDim.x;
while(true) {
for (uint32_t i = 0 + tid; i < work_size_rup; i += nthreads) {
groute::dev::np_local<rank_t> local_work = {0, 0, 0.0};
if (i < work_size) {
index_t node = work_source.get_work(i);
rank_t res = atomicExch(residual.get_item_ptr(node), 0);
if (res > 0) {
current_ranks[node] += res;
local_work.start = graph.begin_edge(node);
local_work.size = graph.end_edge(node) - local_work.start;
local_work.meta_data = ALPHA * res / local_work.size;
}
}
groute::dev::CTAWorkScheduler<rank_t>::template schedule(
local_work,
[&graph, &residual](index_t edge, index_t size, rank_t update) {
index_t dest = graph.edge_dest(edge);
atomicAdd(residual.get_item_ptr(dest), update);
}
);
// PageRankCheck__Single__
//check here
}
}
}
struct Algo {
static const char *NameLower() { return "pr"; }
static const char *Name() { return "PR"; }
template<
typename TGraphAllocator, typename ResidualDatum, typename RankDatum, typename...UnusedData>
static const std::vector<rank_t> &Gather(
TGraphAllocator &graph_allocator, ResidualDatum &residual, RankDatum ¤t_ranks,
UnusedData &... data) {
graph_allocator.GatherDatum(current_ranks);
return current_ranks.GetHostData();
}
template<
typename ResidualDatum, typename RankDatum, typename...UnusedData>
static std::vector<rank_t> Host(
groute::graphs::host::CSRGraph &graph, ResidualDatum &residual, RankDatum ¤t_ranks,
UnusedData &... data) {
return PageRankHost(graph);
}
static int Output(const char *file, const std::vector<rank_t> &ranks) {
return PageRankOutput(file, ranks);
}
static int CheckErrors(std::vector<rank_t> &ranks, std::vector<rank_t> ®ression) {
return PageRankCheckErrors(ranks, regression);
}
};
}
bool HybridTopologyDriven1() {
VLOG(0) << "HybridTopologyDriven";
typedef groute::Queue<index_t> Worklist;
groute::graphs::single::NodeOutputDatum<rank_t> current_ranks;
groute::graphs::single::NodeOutputDatum<rank_t> residual;
groute::graphs::single::NodeOutputDatum<rank_t> last_residual;
utils::traversal::Context<hybrid::Algo> context(1);
groute::graphs::single::CSRGraphAllocator
dev_graph_allocator(context.host_graph);
context.SetDevice(0);
dev_graph_allocator.AllocateDatumObjects(current_ranks, residual, last_residual);
context.SyncDevice(0); // graph allocations are on default streams, must sync device
groute::Stream stream = context.CreateStream(0);
mgpu::standard_context_t mgpu_context(true, stream.cuda_stream);
dim3 grid_dims, block_dims;
KernelSizing(grid_dims, block_dims, context.host_graph.nnodes);
hybrid::PageRankInit__Single__ << < grid_dims, block_dims, 0, stream.cuda_stream >> >
(groute::dev::WorkSourceRange<index_t>(
dev_graph_allocator.DeviceObject().owned_start_node(),
dev_graph_allocator.DeviceObject().owned_nnodes()),
dev_graph_allocator.DeviceObject(),
current_ranks.DeviceObject(),
residual.DeviceObject(),
last_residual.DeviceObject());
int iteration = 0;
bool running = true;
mgpu::mem_t<double> checkSum(1, mgpu_context);
mgpu::mem_t<int> deviceOffsets = mgpu::mem_t<int>(context.host_graph.nnodes, mgpu_context);
int *scanned_offsets = deviceOffsets.data();
rank_t last_sum = 0;
int mode;
double compute_consume = 0;
while (running) {
Stopwatch sw_update(true);
groute::graphs::single::NodeOutputDatum<rank_t> *available_residual = &residual;
if (FLAGS_force_async)
goto async;
else if (FLAGS_force_sync)
goto sync;
if (iteration < FLAGS_sync_to_async || iteration >= FLAGS_async_to_sync) {
sync:
hybrid::PageRankSyncKernelCTA__Single__
<< < grid_dims, block_dims, 0, stream.cuda_stream >> >
(groute::dev::WorkSourceRange<index_t>(
dev_graph_allocator.DeviceObject().owned_start_node(),
dev_graph_allocator.DeviceObject().owned_nnodes()),
dev_graph_allocator.DeviceObject(),
iteration,
current_ranks.DeviceObject(),
residual.DeviceObject(),
last_residual.DeviceObject());
if (iteration % 2 == 0) {
available_residual = &last_residual;
}
mode = 0;
} else {
async:
hybrid::PageRankAsyncKernelCTA__Single__
<< < grid_dims, block_dims, 0, stream.cuda_stream >> >
(groute::dev::WorkSourceRange<index_t>(
dev_graph_allocator.DeviceObject().owned_start_node(),
dev_graph_allocator.DeviceObject().owned_nnodes()),
dev_graph_allocator.DeviceObject(),
current_ranks.DeviceObject(),
available_residual->DeviceObject());
mode = 1;
}
stream.Sync();
sw_update.stop();
compute_consume += sw_update.ms();
VLOG(0) << boost::format("->%s Iter:%d THROUGHPUT:%f nodes/ms\n")
% (mode == 0 ? "Sync" : "Async") % iteration %
(dev_graph_allocator.DeviceObject().owned_nnodes() / sw_update.ms());
rank_t *tmp = current_ranks.DeviceObject().data_ptr;
auto func_data_sum_func = [=]__device__(int idx) {
return tmp[idx];
};
mgpu::transform_scan<double>(func_data_sum_func, context.host_graph.nnodes,
scanned_offsets, mgpu::plus_t<double>(), checkSum.data(), mgpu_context);
double pr_sum = mgpu::from_mem(checkSum)[0];
VLOG(1) << "Checking... SUM: " << pr_sum << " Relative SUM: " << pr_sum / context.host_graph.nnodes;
rank_t *p_residual = available_residual->DeviceObject().data_ptr;
auto func_residual_sum = [=]__device__(index_t idx) {
return p_residual[idx];
};
mgpu::transform_scan<double>(func_residual_sum, context.host_graph.nnodes, scanned_offsets,
mgpu::plus_t<double>(), checkSum.data(), mgpu_context);
double residual_sum = mgpu::from_mem(checkSum)[0];
double avg_residual_sum = residual_sum / context.host_graph.nnodes;
auto func_variance_compute = [=]__device__(index_t idx) {
return (p_residual[idx] - avg_residual_sum) * (p_residual[idx] - avg_residual_sum);
};
mgpu::transform_scan<double>(func_variance_compute, context.host_graph.nnodes, scanned_offsets,
mgpu::plus_t<double>(), checkSum.data(), mgpu_context);
double residual_variance = mgpu::from_mem(checkSum)[0] / context.host_graph.nnodes;
VLOG(0) << "Residual Variance: " << residual_variance;
if (last_sum > 0) {
rank_t sum_delta = pr_sum - last_sum;
VLOG(1) << "X FACTOR: " << sum_delta / sw_update.ms();
}
last_sum = pr_sum;
if (pr_sum / context.host_graph.nnodes > FLAGS_threshold) {
VLOG(0) << "Threshold reached";
break;
}
iteration++;
if (iteration >= FLAGS_max_pr_iterations) {
LOG(WARNING) << "maximum iterations reached";
break;
}
}
VLOG(1) << boost::format("%s terminated after %d iterations ") % hybrid::Algo::Name() % iteration;
VLOG(0) << hybrid::Algo::Name() << ": " << compute_consume << " ms. <filter>";
// Gather
auto gathered_output = hybrid::Algo::Gather(dev_graph_allocator, residual, current_ranks);
if (FLAGS_output.length() != 0)
hybrid::Algo::Output(FLAGS_output.c_str(), gathered_output);
if (FLAGS_check) {
auto regression = hybrid::Algo::Host(context.host_graph, residual, current_ranks);
return hybrid::Algo::CheckErrors(gathered_output, regression) == 0;
} else {
LOG(WARNING) << "Result not checked";
return true;
}
}
//bool HybridTopologyDriven() {
// VLOG(0) << "HybridTopologyDriven";
//
// typedef groute::Queue<index_t> Worklist;
// groute::graphs::single::NodeOutputDatum<rank_t> current_ranks;
// groute::graphs::single::NodeOutputDatum<rank_t> residual;
// groute::graphs::single::NodeOutputDatum<rank_t> last_residual;
//
// utils::traversal::Context<hybrid::Algo> context(1);
//
// groute::graphs::single::CSRGraphAllocator
// dev_graph_allocator(context.host_graph);
//
// context.SetDevice(0);
//
// dev_graph_allocator.AllocateDatumObjects(current_ranks, residual, last_residual);
//
// context.SyncDevice(0); // graph allocations are on default streams, must sync device
//
// groute::Stream stream = context.CreateStream(0);
//
// mgpu::standard_context_t mgpu_context(true, stream.cuda_stream);
//
//
// dim3 grid_dims, block_dims;
// KernelSizing(grid_dims, block_dims, context.host_graph.nnodes);
//
// Stopwatch sw(true);
//
// hybrid::PageRankInit__Single__ << < grid_dims, block_dims, 0, stream.cuda_stream >> > (groute::dev::WorkSourceRange<index_t>(
// dev_graph_allocator.DeviceObject().owned_start_node(),
// dev_graph_allocator.DeviceObject().owned_nnodes()),
// dev_graph_allocator.DeviceObject(),
// current_ranks.DeviceObject(),
// residual.DeviceObject(),
// last_residual.DeviceObject());
//
// int iteration = 0;
// bool running = true;
//
// //mode = 0 means sync, mode = 1 means async
// int mode = FLAGS_mode;
// int last_mode;
// srand(time(NULL));
// groute::graphs::single::NodeOutputDatum<rank_t> *available_residual = &residual;
//
// int totalIteration = 0;
//
// double totalSync = 0;
// double totalAsync = 0;
// mgpu::mem_t<double> checkSum(1, mgpu_context);
// mgpu::mem_t<int> deviceOffsets = mgpu::mem_t<int>(context.host_graph.nnodes, mgpu_context);
//
// int *scanned_offsets = deviceOffsets.data();
// rank_t last_sum = 0;
// if (mode == 2 && FLAGS_switch_threshold > 0)mode = 1;
// while (running) {
//// if (FLAGS_mode == 2)
//// mode = rand() % 2;
//// else
//
// if (totalIteration > FLAGS_switch_threshold && mode == 1) {
// last_sum = 0;
// mode = 0;
// }
// Stopwatch sw_update(false);
// VLOG(1) << "Iteration: " << ++totalIteration;
//
//
// if (mode == 0) {
// last_mode = 0;
//
// sw_update.start();
// hybrid::PageRankSyncKernelCTA__Single__ << < grid_dims, block_dims, 0, stream.cuda_stream >> > (groute::dev::WorkSourceRange<index_t>(
// dev_graph_allocator.DeviceObject().owned_start_node(),
// dev_graph_allocator.DeviceObject().owned_nnodes()),
// dev_graph_allocator.DeviceObject(),
// iteration++,
// current_ranks.DeviceObject(),
// residual.DeviceObject(),
// last_residual.DeviceObject());
// stream.Sync();
// sw_update.stop();
// totalSync += sw_update.ms();
// VLOG(1) << "SYNC THROUGHPUT: " << dev_graph_allocator.DeviceObject().owned_nnodes() / sw_update.ms() << " nodes/ms";
// } else {
// last_mode = 1;
// sw_update.start();
// hybrid::PageRankAsyncKernelCTA__Single__ << < grid_dims, block_dims, 0, stream.cuda_stream >> > (groute::dev::WorkSourceRange<index_t>(
// dev_graph_allocator.DeviceObject().owned_start_node(),
// dev_graph_allocator.DeviceObject().owned_nnodes()),
// dev_graph_allocator.DeviceObject(),
// current_ranks.DeviceObject(),
// available_residual->DeviceObject());
// stream.Sync();
// sw_update.stop();
// totalAsync += sw_update.ms();
// VLOG(1) << "ASYNC THROUGHPUT: " << dev_graph_allocator.DeviceObject().owned_nnodes() / sw_update.ms() << " nodes/ms";
// }
//
// if (last_mode == 0) {
// if (iteration % 2 == 0)//last round is last_residual-->residual
// available_residual = &residual;
// else
// available_residual = &last_residual;
// }
//
//
// rank_t *tmp = current_ranks.DeviceObject().data_ptr;
//
// auto func_data_sum_func = [=]__device__(int idx) {
// return tmp[idx];
// };
//
//
// mgpu::transform_scan<double>(func_data_sum_func, context.host_graph.nnodes,
// scanned_offsets, mgpu::plus_t<double>(), checkSum.data(), mgpu_context);
//
// double pr_sum = mgpu::from_mem(checkSum)[0];
//
// VLOG(1) << "Checking... SUM: " << pr_sum << " Relative SUM: " << pr_sum / context.host_graph.nnodes;
//
// rank_t *p_residual;
// if (mode == 0) {
// if (iteration % 2)
// p_residual = last_residual.DeviceObject().data_ptr;
// else
// p_residual = residual.DeviceObject().data_ptr;
// } else {
// p_residual = residual.DeviceObject().data_ptr;
// }
//
//
// auto func_residual_sum = [=]__device__(index_t idx) {
// return p_residual[idx];
// };
//
// mgpu::transform_scan<double>(func_residual_sum, context.host_graph.nnodes, scanned_offsets, mgpu::plus_t<double>(), checkSum.data(), mgpu_context);
//
// double residual_sum = mgpu::from_mem(checkSum)[0];
//
// double avg_residual_sum = residual_sum / context.host_graph.nnodes;
//
// auto func_variance_compute = [=]__device__(index_t idx) {
// return (p_residual[idx] - avg_residual_sum) * (p_residual[idx] - avg_residual_sum);
// };
//
// mgpu::transform_scan<double>(func_variance_compute, context.host_graph.nnodes, scanned_offsets, mgpu::plus_t<double>(), checkSum.data(), mgpu_context);
//
// double residual_variance = mgpu::from_mem(checkSum)[0] / context.host_graph.nnodes;
//
// VLOG(0) << "Residual Variance: " << residual_variance;
//
// if (last_sum > 0) {
// rank_t sum_delta = pr_sum - last_sum;
//
// VLOG(1) << "X FACTOR: " << sum_delta / sw_update.ms();
// }
// last_sum = pr_sum;
//
//
// if (pr_sum / context.host_graph.nnodes > FLAGS_threshold) {
// VLOG(0) << "Threshold reached";
// break;
// }
//
//
// if (totalIteration > FLAGS_max_pr_iterations) {
// LOG(WARNING) << "maximum iterations reached";
// break;
// }
//
// }
//
// sw.stop();
//
// VLOG(1)
// << boost::format("%s terminated after %d iterations (max: %d, sync: %d, async: %d)") % hybrid::Algo::Name() % totalIteration %
// FLAGS_max_pr_iterations % iteration % (totalIteration - iteration);
// VLOG(0) << hybrid::Algo::Name() << ": " << sw.ms() << " ms. <filter>";
// VLOG(0) << "AVG SYNC: " << totalSync / iteration << "ms / ASYNC: " << totalAsync / (totalIteration - iteration) << " ms.";
//
// // Gather
// auto gathered_output = hybrid::Algo::Gather(dev_graph_allocator, residual, current_ranks);
//
// if (FLAGS_output.length() != 0)
// hybrid::Algo::Output(FLAGS_output.c_str(), gathered_output);
//
// if (FLAGS_check) {
// auto regression = hybrid::Algo::Host(context.host_graph, residual, current_ranks);
// return hybrid::Algo::CheckErrors(gathered_output, regression) == 0;
// } else {
// LOG(WARNING) << "Result not checked";
// return true;
// }
//} | 09a4aed5774ae9c6f8ee47e277ea35c5e080ca05.cu | //
// Created by liang on 2/16/18.
//
#include <vector>
#include <algorithm>
#include <thread>
#include <memory>
#include <random>
#include <cuda.h>
#include <device_launch_parameters.h>
#include <groute/event_pool.h>
#include <groute/graphs/csr_graph.h>
#include <groute/dwl/work_source.cuh>
#include <groute/device/cta_scheduler.cuh>
#include <utils/parser.h>
#include <utils/utils.h>
#include <utils/graphs/traversal.h>
#include <utils/stopwatch.h>
#include <moderngpu/context.hxx>
#include <moderngpu/kernel_scan.hxx>
#include <gflags/gflags.h>
#include <glog/logging.h>
#include <boost/format.hpp>
#include <utils/cuda_utils.h>
#include "pr_common.h"
DECLARE_double(wl_alloc_factor);
DECLARE_uint64(wl_alloc_abs);
DECLARE_int32(max_pr_iterations);
DECLARE_double(threshold);
DECLARE_int32(grid_size);
DECLARE_int32(block_size);
DECLARE_int32(mode);
DECLARE_int32(switch_threshold);
DECLARE_int32(async_to_sync);
DECLARE_int32(sync_to_async);
DECLARE_bool(force_sync);
DECLARE_bool(force_async);
namespace hybrid {
rank_t IDENTITY_ELEMENT = 0;
template<typename WorkSource,
typename TGraph,
template<typename> class RankDatum,
template<typename> class ResidualDatum>
__global__ void PageRankInit__Single__(
WorkSource work_source,
TGraph graph,
RankDatum<rank_t> current_ranks, ResidualDatum<rank_t> residual, ResidualDatum<rank_t> last_residual) {
unsigned tid = TID_1D;
unsigned nthreads = TOTAL_THREADS_1D;
for (index_t ii = 0 + tid; ii < work_source.get_size(); ii += nthreads) {
index_t node = work_source.get_work(ii);
current_ranks[node] = 1.0 - ALPHA;
last_residual[node] = 0.0;
index_t
begin_edge = graph.begin_edge(node),
end_edge = graph.end_edge(node),
out_degree = end_edge - begin_edge;
if (out_degree == 0) continue;
rank_t update = ((1.0 - ALPHA) * ALPHA) / out_degree;
for (index_t edge = begin_edge; edge < end_edge; ++edge) {
index_t dest = graph.edge_dest(edge);
atomicAdd(residual.get_item_ptr(dest), update);
}
}
}
template<typename TValue>
__inline__ __device__ TValue warpReduce(TValue localSum) {
localSum += __shfl_xor_sync(0xfffffff, localSum, 16);
localSum += __shfl_xor_sync(0xfffffff, localSum, 8);
localSum += __shfl_xor_sync(0xfffffff, localSum, 4);
localSum += __shfl_xor_sync(0xfffffff, localSum, 2);
localSum += __shfl_xor_sync(0xfffffff, localSum, 1);
return localSum;
}
template<template<typename> class TRankDatum>
__device__ void PageRankCheck__Single__(TRankDatum<rank_t > current_ranks,
rank_t *block_sum_buffer, rank_t *rtn_sum) {
unsigned tid = TID_1D;
unsigned nthreads = TOTAL_THREADS_1D;
int laneIdx = threadIdx.x % warpSize;
int warpIdx = threadIdx.x / warpSize;
const int SMEMDIM = blockDim.x / warpSize;
__shared__ rank_t smem[32];
uint32_t work_size = current_ranks.size;
rank_t local_sum = 0;
for (uint32_t node = 0 + tid; node < work_size; node += nthreads) {
rank_t dist = current_ranks[node];
if (dist != IDENTITY_ELEMENT)
local_sum += dist;
}
local_sum = warpReduce(local_sum);
if (laneIdx == 0)
smem[warpIdx] = local_sum;
__syncthreads();
local_sum = (threadIdx.x < SMEMDIM) ? smem[threadIdx.x] : 0;
if (warpIdx == 0)
local_sum = warpReduce(local_sum);
if (threadIdx.x == 0) {
block_sum_buffer[blockIdx.x] = local_sum;
}
if (tid == 0) {
uint32_t sum = 0;
for (int bid = 0; bid < gridDim.x; bid++) {
sum += block_sum_buffer[bid];
}
*rtn_sum = sum;
}
}
template<
typename TGraph,
template<typename> class RankDatum,
template<typename> class ResidualDatum,
typename WorkSource>
__global__ void PageRankSyncKernelCTA__Single__(
WorkSource work_source,
TGraph graph,
index_t iteration,
RankDatum<rank_t> current_ranks, ResidualDatum<rank_t> residual,
ResidualDatum<rank_t> last_residual) {
unsigned tid = TID_1D;
unsigned nthreads = TOTAL_THREADS_1D;
uint32_t work_size = work_source.get_size();
uint32_t work_size_rup = round_up(work_size, blockDim.x) * blockDim.x;
for (index_t i = 0 + tid; i < work_size_rup; i += nthreads) {
groute::dev::np_local<rank_t> local_work = {0, 0, 0.0};
if (i < work_size) {
index_t node = work_source.get_work(i);
rank_t res;
if (iteration % 2 == 0) {
res = residual[node];//atomicExch(residual.get_item_ptr(node), 0);
residual[node] = 0;
} else {
res = last_residual[node];
last_residual[node] = 0;
//res = atomicExch(last_residual.get_item_ptr(node), 0);
}
if (res > 0) {
current_ranks[node] += res;
local_work.start = graph.begin_edge(node);
local_work.size = graph.end_edge(node) - local_work.start;
local_work.meta_data = ALPHA * res / local_work.size;
}
}
groute::dev::CTAWorkScheduler<rank_t>::template schedule(
local_work,
[&graph, &residual, &last_residual, &iteration](index_t edge, index_t size, rank_t update) {
index_t dest = graph.edge_dest(edge);
if (iteration % 2 == 0) {
atomicAdd(last_residual.get_item_ptr(dest), update);
} else {
atomicAdd(residual.get_item_ptr(dest), update);
}
}
);
}
}
template<
typename WorkSource,
typename TGraph, template<typename> class RankDatum,
template<typename> class ResidualDatum>
__global__ void PageRankAsyncKernelCTA__Single__(
WorkSource work_source, TGraph graph,
RankDatum<rank_t> current_ranks, ResidualDatum<rank_t> residual) {
uint32_t tid = TID_1D;
uint32_t nthreads = TOTAL_THREADS_1D;
uint32_t work_size = work_source.get_size();
uint32_t work_size_rup = round_up(work_size, blockDim.x) * blockDim.x;
for (uint32_t i = 0 + tid; i < work_size_rup; i += nthreads) {
groute::dev::np_local<rank_t> local_work = {0, 0, 0.0};
if (i < work_size) {
index_t node = work_source.get_work(i);
rank_t res = atomicExch(residual.get_item_ptr(node), 0);
if (res > 0) {
current_ranks[node] += res;
local_work.start = graph.begin_edge(node);
local_work.size = graph.end_edge(node) - local_work.start;
local_work.meta_data = ALPHA * res / local_work.size;
}
}
groute::dev::CTAWorkScheduler<rank_t>::template schedule(
local_work,
[&graph, &residual](index_t edge, index_t size, rank_t update) {
index_t dest = graph.edge_dest(edge);
atomicAdd(residual.get_item_ptr(dest), update);
}
);
}
}
template<
typename WorkSource,
typename TGraph, template<typename> class RankDatum,
template<typename> class ResidualDatum>
__global__ void PageRankAsyncKernelCTAPersist__Single__(
WorkSource work_source, TGraph graph,
RankDatum<rank_t> current_ranks, ResidualDatum<rank_t> residual) {
uint32_t tid = TID_1D;
uint32_t nthreads = TOTAL_THREADS_1D;
uint32_t work_size = work_source.get_size();
uint32_t work_size_rup = round_up(work_size, blockDim.x) * blockDim.x;
while(true) {
for (uint32_t i = 0 + tid; i < work_size_rup; i += nthreads) {
groute::dev::np_local<rank_t> local_work = {0, 0, 0.0};
if (i < work_size) {
index_t node = work_source.get_work(i);
rank_t res = atomicExch(residual.get_item_ptr(node), 0);
if (res > 0) {
current_ranks[node] += res;
local_work.start = graph.begin_edge(node);
local_work.size = graph.end_edge(node) - local_work.start;
local_work.meta_data = ALPHA * res / local_work.size;
}
}
groute::dev::CTAWorkScheduler<rank_t>::template schedule(
local_work,
[&graph, &residual](index_t edge, index_t size, rank_t update) {
index_t dest = graph.edge_dest(edge);
atomicAdd(residual.get_item_ptr(dest), update);
}
);
// PageRankCheck__Single__
//check here
}
}
}
struct Algo {
static const char *NameLower() { return "pr"; }
static const char *Name() { return "PR"; }
template<
typename TGraphAllocator, typename ResidualDatum, typename RankDatum, typename...UnusedData>
static const std::vector<rank_t> &Gather(
TGraphAllocator &graph_allocator, ResidualDatum &residual, RankDatum ¤t_ranks,
UnusedData &... data) {
graph_allocator.GatherDatum(current_ranks);
return current_ranks.GetHostData();
}
template<
typename ResidualDatum, typename RankDatum, typename...UnusedData>
static std::vector<rank_t> Host(
groute::graphs::host::CSRGraph &graph, ResidualDatum &residual, RankDatum ¤t_ranks,
UnusedData &... data) {
return PageRankHost(graph);
}
static int Output(const char *file, const std::vector<rank_t> &ranks) {
return PageRankOutput(file, ranks);
}
static int CheckErrors(std::vector<rank_t> &ranks, std::vector<rank_t> ®ression) {
return PageRankCheckErrors(ranks, regression);
}
};
}
bool HybridTopologyDriven1() {
VLOG(0) << "HybridTopologyDriven";
typedef groute::Queue<index_t> Worklist;
groute::graphs::single::NodeOutputDatum<rank_t> current_ranks;
groute::graphs::single::NodeOutputDatum<rank_t> residual;
groute::graphs::single::NodeOutputDatum<rank_t> last_residual;
utils::traversal::Context<hybrid::Algo> context(1);
groute::graphs::single::CSRGraphAllocator
dev_graph_allocator(context.host_graph);
context.SetDevice(0);
dev_graph_allocator.AllocateDatumObjects(current_ranks, residual, last_residual);
context.SyncDevice(0); // graph allocations are on default streams, must sync device
groute::Stream stream = context.CreateStream(0);
mgpu::standard_context_t mgpu_context(true, stream.cuda_stream);
dim3 grid_dims, block_dims;
KernelSizing(grid_dims, block_dims, context.host_graph.nnodes);
hybrid::PageRankInit__Single__ << < grid_dims, block_dims, 0, stream.cuda_stream >> >
(groute::dev::WorkSourceRange<index_t>(
dev_graph_allocator.DeviceObject().owned_start_node(),
dev_graph_allocator.DeviceObject().owned_nnodes()),
dev_graph_allocator.DeviceObject(),
current_ranks.DeviceObject(),
residual.DeviceObject(),
last_residual.DeviceObject());
int iteration = 0;
bool running = true;
mgpu::mem_t<double> checkSum(1, mgpu_context);
mgpu::mem_t<int> deviceOffsets = mgpu::mem_t<int>(context.host_graph.nnodes, mgpu_context);
int *scanned_offsets = deviceOffsets.data();
rank_t last_sum = 0;
int mode;
double compute_consume = 0;
while (running) {
Stopwatch sw_update(true);
groute::graphs::single::NodeOutputDatum<rank_t> *available_residual = &residual;
if (FLAGS_force_async)
goto async;
else if (FLAGS_force_sync)
goto sync;
if (iteration < FLAGS_sync_to_async || iteration >= FLAGS_async_to_sync) {
sync:
hybrid::PageRankSyncKernelCTA__Single__
<< < grid_dims, block_dims, 0, stream.cuda_stream >> >
(groute::dev::WorkSourceRange<index_t>(
dev_graph_allocator.DeviceObject().owned_start_node(),
dev_graph_allocator.DeviceObject().owned_nnodes()),
dev_graph_allocator.DeviceObject(),
iteration,
current_ranks.DeviceObject(),
residual.DeviceObject(),
last_residual.DeviceObject());
if (iteration % 2 == 0) {
available_residual = &last_residual;
}
mode = 0;
} else {
async:
hybrid::PageRankAsyncKernelCTA__Single__
<< < grid_dims, block_dims, 0, stream.cuda_stream >> >
(groute::dev::WorkSourceRange<index_t>(
dev_graph_allocator.DeviceObject().owned_start_node(),
dev_graph_allocator.DeviceObject().owned_nnodes()),
dev_graph_allocator.DeviceObject(),
current_ranks.DeviceObject(),
available_residual->DeviceObject());
mode = 1;
}
stream.Sync();
sw_update.stop();
compute_consume += sw_update.ms();
VLOG(0) << boost::format("->%s Iter:%d THROUGHPUT:%f nodes/ms\n")
% (mode == 0 ? "Sync" : "Async") % iteration %
(dev_graph_allocator.DeviceObject().owned_nnodes() / sw_update.ms());
rank_t *tmp = current_ranks.DeviceObject().data_ptr;
auto func_data_sum_func = [=]__device__(int idx) {
return tmp[idx];
};
mgpu::transform_scan<double>(func_data_sum_func, context.host_graph.nnodes,
scanned_offsets, mgpu::plus_t<double>(), checkSum.data(), mgpu_context);
double pr_sum = mgpu::from_mem(checkSum)[0];
VLOG(1) << "Checking... SUM: " << pr_sum << " Relative SUM: " << pr_sum / context.host_graph.nnodes;
rank_t *p_residual = available_residual->DeviceObject().data_ptr;
auto func_residual_sum = [=]__device__(index_t idx) {
return p_residual[idx];
};
mgpu::transform_scan<double>(func_residual_sum, context.host_graph.nnodes, scanned_offsets,
mgpu::plus_t<double>(), checkSum.data(), mgpu_context);
double residual_sum = mgpu::from_mem(checkSum)[0];
double avg_residual_sum = residual_sum / context.host_graph.nnodes;
auto func_variance_compute = [=]__device__(index_t idx) {
return (p_residual[idx] - avg_residual_sum) * (p_residual[idx] - avg_residual_sum);
};
mgpu::transform_scan<double>(func_variance_compute, context.host_graph.nnodes, scanned_offsets,
mgpu::plus_t<double>(), checkSum.data(), mgpu_context);
double residual_variance = mgpu::from_mem(checkSum)[0] / context.host_graph.nnodes;
VLOG(0) << "Residual Variance: " << residual_variance;
if (last_sum > 0) {
rank_t sum_delta = pr_sum - last_sum;
VLOG(1) << "X FACTOR: " << sum_delta / sw_update.ms();
}
last_sum = pr_sum;
if (pr_sum / context.host_graph.nnodes > FLAGS_threshold) {
VLOG(0) << "Threshold reached";
break;
}
iteration++;
if (iteration >= FLAGS_max_pr_iterations) {
LOG(WARNING) << "maximum iterations reached";
break;
}
}
VLOG(1) << boost::format("%s terminated after %d iterations ") % hybrid::Algo::Name() % iteration;
VLOG(0) << hybrid::Algo::Name() << ": " << compute_consume << " ms. <filter>";
// Gather
auto gathered_output = hybrid::Algo::Gather(dev_graph_allocator, residual, current_ranks);
if (FLAGS_output.length() != 0)
hybrid::Algo::Output(FLAGS_output.c_str(), gathered_output);
if (FLAGS_check) {
auto regression = hybrid::Algo::Host(context.host_graph, residual, current_ranks);
return hybrid::Algo::CheckErrors(gathered_output, regression) == 0;
} else {
LOG(WARNING) << "Result not checked";
return true;
}
}
//bool HybridTopologyDriven() {
// VLOG(0) << "HybridTopologyDriven";
//
// typedef groute::Queue<index_t> Worklist;
// groute::graphs::single::NodeOutputDatum<rank_t> current_ranks;
// groute::graphs::single::NodeOutputDatum<rank_t> residual;
// groute::graphs::single::NodeOutputDatum<rank_t> last_residual;
//
// utils::traversal::Context<hybrid::Algo> context(1);
//
// groute::graphs::single::CSRGraphAllocator
// dev_graph_allocator(context.host_graph);
//
// context.SetDevice(0);
//
// dev_graph_allocator.AllocateDatumObjects(current_ranks, residual, last_residual);
//
// context.SyncDevice(0); // graph allocations are on default streams, must sync device
//
// groute::Stream stream = context.CreateStream(0);
//
// mgpu::standard_context_t mgpu_context(true, stream.cuda_stream);
//
//
// dim3 grid_dims, block_dims;
// KernelSizing(grid_dims, block_dims, context.host_graph.nnodes);
//
// Stopwatch sw(true);
//
// hybrid::PageRankInit__Single__ << < grid_dims, block_dims, 0, stream.cuda_stream >> > (groute::dev::WorkSourceRange<index_t>(
// dev_graph_allocator.DeviceObject().owned_start_node(),
// dev_graph_allocator.DeviceObject().owned_nnodes()),
// dev_graph_allocator.DeviceObject(),
// current_ranks.DeviceObject(),
// residual.DeviceObject(),
// last_residual.DeviceObject());
//
// int iteration = 0;
// bool running = true;
//
// //mode = 0 means sync, mode = 1 means async
// int mode = FLAGS_mode;
// int last_mode;
// srand(time(NULL));
// groute::graphs::single::NodeOutputDatum<rank_t> *available_residual = &residual;
//
// int totalIteration = 0;
//
// double totalSync = 0;
// double totalAsync = 0;
// mgpu::mem_t<double> checkSum(1, mgpu_context);
// mgpu::mem_t<int> deviceOffsets = mgpu::mem_t<int>(context.host_graph.nnodes, mgpu_context);
//
// int *scanned_offsets = deviceOffsets.data();
// rank_t last_sum = 0;
// if (mode == 2 && FLAGS_switch_threshold > 0)mode = 1;
// while (running) {
//// if (FLAGS_mode == 2)
//// mode = rand() % 2;
//// else
//
// if (totalIteration > FLAGS_switch_threshold && mode == 1) {
// last_sum = 0;
// mode = 0;
// }
// Stopwatch sw_update(false);
// VLOG(1) << "Iteration: " << ++totalIteration;
//
//
// if (mode == 0) {
// last_mode = 0;
//
// sw_update.start();
// hybrid::PageRankSyncKernelCTA__Single__ << < grid_dims, block_dims, 0, stream.cuda_stream >> > (groute::dev::WorkSourceRange<index_t>(
// dev_graph_allocator.DeviceObject().owned_start_node(),
// dev_graph_allocator.DeviceObject().owned_nnodes()),
// dev_graph_allocator.DeviceObject(),
// iteration++,
// current_ranks.DeviceObject(),
// residual.DeviceObject(),
// last_residual.DeviceObject());
// stream.Sync();
// sw_update.stop();
// totalSync += sw_update.ms();
// VLOG(1) << "SYNC THROUGHPUT: " << dev_graph_allocator.DeviceObject().owned_nnodes() / sw_update.ms() << " nodes/ms";
// } else {
// last_mode = 1;
// sw_update.start();
// hybrid::PageRankAsyncKernelCTA__Single__ << < grid_dims, block_dims, 0, stream.cuda_stream >> > (groute::dev::WorkSourceRange<index_t>(
// dev_graph_allocator.DeviceObject().owned_start_node(),
// dev_graph_allocator.DeviceObject().owned_nnodes()),
// dev_graph_allocator.DeviceObject(),
// current_ranks.DeviceObject(),
// available_residual->DeviceObject());
// stream.Sync();
// sw_update.stop();
// totalAsync += sw_update.ms();
// VLOG(1) << "ASYNC THROUGHPUT: " << dev_graph_allocator.DeviceObject().owned_nnodes() / sw_update.ms() << " nodes/ms";
// }
//
// if (last_mode == 0) {
// if (iteration % 2 == 0)//last round is last_residual-->residual
// available_residual = &residual;
// else
// available_residual = &last_residual;
// }
//
//
// rank_t *tmp = current_ranks.DeviceObject().data_ptr;
//
// auto func_data_sum_func = [=]__device__(int idx) {
// return tmp[idx];
// };
//
//
// mgpu::transform_scan<double>(func_data_sum_func, context.host_graph.nnodes,
// scanned_offsets, mgpu::plus_t<double>(), checkSum.data(), mgpu_context);
//
// double pr_sum = mgpu::from_mem(checkSum)[0];
//
// VLOG(1) << "Checking... SUM: " << pr_sum << " Relative SUM: " << pr_sum / context.host_graph.nnodes;
//
// rank_t *p_residual;
// if (mode == 0) {
// if (iteration % 2)
// p_residual = last_residual.DeviceObject().data_ptr;
// else
// p_residual = residual.DeviceObject().data_ptr;
// } else {
// p_residual = residual.DeviceObject().data_ptr;
// }
//
//
// auto func_residual_sum = [=]__device__(index_t idx) {
// return p_residual[idx];
// };
//
// mgpu::transform_scan<double>(func_residual_sum, context.host_graph.nnodes, scanned_offsets, mgpu::plus_t<double>(), checkSum.data(), mgpu_context);
//
// double residual_sum = mgpu::from_mem(checkSum)[0];
//
// double avg_residual_sum = residual_sum / context.host_graph.nnodes;
//
// auto func_variance_compute = [=]__device__(index_t idx) {
// return (p_residual[idx] - avg_residual_sum) * (p_residual[idx] - avg_residual_sum);
// };
//
// mgpu::transform_scan<double>(func_variance_compute, context.host_graph.nnodes, scanned_offsets, mgpu::plus_t<double>(), checkSum.data(), mgpu_context);
//
// double residual_variance = mgpu::from_mem(checkSum)[0] / context.host_graph.nnodes;
//
// VLOG(0) << "Residual Variance: " << residual_variance;
//
// if (last_sum > 0) {
// rank_t sum_delta = pr_sum - last_sum;
//
// VLOG(1) << "X FACTOR: " << sum_delta / sw_update.ms();
// }
// last_sum = pr_sum;
//
//
// if (pr_sum / context.host_graph.nnodes > FLAGS_threshold) {
// VLOG(0) << "Threshold reached";
// break;
// }
//
//
// if (totalIteration > FLAGS_max_pr_iterations) {
// LOG(WARNING) << "maximum iterations reached";
// break;
// }
//
// }
//
// sw.stop();
//
// VLOG(1)
// << boost::format("%s terminated after %d iterations (max: %d, sync: %d, async: %d)") % hybrid::Algo::Name() % totalIteration %
// FLAGS_max_pr_iterations % iteration % (totalIteration - iteration);
// VLOG(0) << hybrid::Algo::Name() << ": " << sw.ms() << " ms. <filter>";
// VLOG(0) << "AVG SYNC: " << totalSync / iteration << "ms / ASYNC: " << totalAsync / (totalIteration - iteration) << " ms.";
//
// // Gather
// auto gathered_output = hybrid::Algo::Gather(dev_graph_allocator, residual, current_ranks);
//
// if (FLAGS_output.length() != 0)
// hybrid::Algo::Output(FLAGS_output.c_str(), gathered_output);
//
// if (FLAGS_check) {
// auto regression = hybrid::Algo::Host(context.host_graph, residual, current_ranks);
// return hybrid::Algo::CheckErrors(gathered_output, regression) == 0;
// } else {
// LOG(WARNING) << "Result not checked";
// return true;
// }
//} |
374c4855849b103b40fb319bed4b3b3559af0d46.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Dongzhuo Li 05/06/2018
#include <chrono>
#include <string>
#include "Boundary.h"
#include "Cpml.h"
#include "Model.h"
#include "Parameter.h"
#include "Src_Rec.h"
#include "utilities.h"
using std::string;
#define VERBOSE
#define DEBUG
// extern "C" void cufd(double *res, double *grad_Cp, double *grad_Cs,
// double *grad_Den, double *grad_stf, const double *Cp,
// const double *Cs, const double *Den, const double *stf,
// int calc_id, const int gpu_id, int group_size,
// const int *shot_ids, const string para_fname);
/*
double res : residual
double *grad_Cp : gradients of Cp (p-wave velocity)
double *grad_Cs : gradients of Cs (s-wave velocity)
double *grad_Den : gradients of density
double *grad_stf : gradients of source time function
double *Cp : p-wave velocity
double *Cs : s-wave velocity
double *Den : density
double *stf : source time function of all shots
int calc_id :
calc_id = 0 -- compute residual
calc_id = 1 -- compute gradient
calc_id = 2 -- compute observation only
int gpu_id : CUDA_VISIBLE_DEVICES
int group_size: number of shots in the group
int *shot_ids : processing shot shot_ids
string para_fname : parameter path
// string survey_fname : survey file (src/rec) path
// string data_dir : data directory
// string scratch_dir : temporary files
*/
void cufd(double *res, double *grad_Cp, double *grad_Cs, double *grad_Den,
double *grad_stf, const double *Cp, const double *Cs,
const double *Den, const double *stf, int calc_id, const int gpu_id,
int group_size, const int *shot_ids, const string para_fname) {
// int deviceCount = 0;
// CHECK(hipGetDeviceCount (&deviceCount));
// printf("number of devices = %d\n", deviceCount);
CHECK(hipSetDevice(gpu_id));
auto start0 = std::chrono::high_resolution_clock::now();
// std::string para_fname = para_dir + "/fwi_param.json";
// std::string survey_fname = "/survey_file.json";
if (calc_id < 0 || calc_id > 2) {
printf("Invalid calc_id %d\n", calc_id);
exit(0);
}
// NOTE Read parameter file
Parameter para(para_fname, calc_id);
int nz = para.nz();
int nx = para.nx();
int nPml = para.nPoints_pml();
int nPad = para.nPad();
float dz = para.dz();
float dx = para.dx();
float dt = para.dt();
float f0 = para.f0();
int iSnap = 0; // 400
int nrec = 1;
float win_ratio = 0.005;
int nSteps = para.nSteps();
float amp_ratio = 1.0;
// transpose models and convert to float
float *fCp, *fCs, *fDen;
fCp = (float *)malloc(nz * nx * sizeof(float));
fCs = (float *)malloc(nz * nx * sizeof(float));
fDen = (float *)malloc(nz * nx * sizeof(float));
for (int i = 0; i < nz; i++) {
for (int j = 0; j < nx; j++) {
fCp[j * nz + i] = Cp[i * nx + j];
fCs[j * nz + i] = Cs[i * nx + j];
fDen[j * nz + i] = Den[i * nx + j];
}
}
Model model(para, fCp, fCs, fDen);
// Model model;
Cpml cpml(para, model);
Bnd boundaries(para);
auto startSrc = std::chrono::high_resolution_clock::now();
Src_Rec src_rec(para, para.survey_fname(), stf, group_size, shot_ids);
// TODO: group_size -> shot group size
auto finishSrc = std::chrono::high_resolution_clock::now();
#ifdef VERBOSE
std::chrono::duration<double> elapsedSrc = finishSrc - startSrc;
std::cout << "Src_Rec time: " << elapsedSrc.count() << " second(s)"
<< std::endl;
std::cout << "number of shots " << src_rec.d_vec_z_rec.size() << std::endl;
std::cout << "number of d_data " << src_rec.d_vec_data.size() << std::endl;
#endif
// compute Courant number
compCourantNumber(model.h_Cp, nz * nx, dt, dz, dx);
dim3 threads(TX, TY);
dim3 blocks((nz + TX - 1) / TX, (nx + TY - 1) / TY);
dim3 threads2(TX + 4, TY + 4);
dim3 blocks2((nz + TX + 3) / (TX + 4), (nx + TY + 3) / (TY + 4));
float *d_vz, *d_vx, *d_szz, *d_sxx, *d_sxz, *d_vz_adj, *d_vx_adj, *d_szz_adj,
*d_szz_p1;
float *d_mem_dvz_dz, *d_mem_dvz_dx, *d_mem_dvx_dz, *d_mem_dvx_dx;
float *d_mem_dszz_dz, *d_mem_dsxx_dx, *d_mem_dsxz_dz, *d_mem_dsxz_dx;
float *d_mat_dvz_dz, *d_mat_dvx_dx;
float *d_l2Obj_temp;
float *h_l2Obj_temp = NULL;
h_l2Obj_temp = (float *)malloc(sizeof(float));
float h_l2Obj = 0.0;
CHECK(hipMalloc((void **)&d_vz, nz * nx * sizeof(float)));
CHECK(hipMalloc((void **)&d_vx, nz * nx * sizeof(float)));
CHECK(hipMalloc((void **)&d_szz, nz * nx * sizeof(float)));
CHECK(hipMalloc((void **)&d_sxx, nz * nx * sizeof(float)));
CHECK(hipMalloc((void **)&d_sxz, nz * nx * sizeof(float)));
CHECK(hipMalloc((void **)&d_vz_adj, nz * nx * sizeof(float)));
CHECK(hipMalloc((void **)&d_vx_adj, nz * nx * sizeof(float)));
CHECK(hipMalloc((void **)&d_szz_adj, nz * nx * sizeof(float)));
CHECK(hipMalloc((void **)&d_szz_p1, nz * nx * sizeof(float)));
CHECK(hipMalloc((void **)&d_mem_dvz_dz, nz * nx * sizeof(float)));
CHECK(hipMalloc((void **)&d_mem_dvz_dx, nz * nx * sizeof(float)));
CHECK(hipMalloc((void **)&d_mem_dvx_dz, nz * nx * sizeof(float)));
CHECK(hipMalloc((void **)&d_mem_dvx_dx, nz * nx * sizeof(float)));
CHECK(hipMalloc((void **)&d_mem_dszz_dz, nz * nx * sizeof(float)));
CHECK(hipMalloc((void **)&d_mem_dsxx_dx, nz * nx * sizeof(float)));
CHECK(hipMalloc((void **)&d_mem_dsxz_dz, nz * nx * sizeof(float)));
CHECK(hipMalloc((void **)&d_mem_dsxz_dx, nz * nx * sizeof(float)));
// spatial derivatives: for kernel computations
CHECK(hipMalloc((void **)&d_mat_dvz_dz, nz * nx * sizeof(float)));
CHECK(hipMalloc((void **)&d_mat_dvx_dx, nz * nx * sizeof(float)));
CHECK(hipMalloc((void **)&d_l2Obj_temp, 1 * sizeof(float)));
float *h_snap, *h_snap_back, *h_snap_adj;
h_snap = (float *)malloc(nz * nx * sizeof(float));
h_snap_back = (float *)malloc(nz * nx * sizeof(float));
h_snap_adj = (float *)malloc(nz * nx * sizeof(float));
hipStream_t streams[group_size];
auto finish0 = std::chrono::high_resolution_clock::now();
std::chrono::duration<double> elapsed0 = finish0 - start0;
#ifdef VERBOSE
std::cout << "Initialization time: " << elapsed0.count() << " second(s)"
<< std::endl;
#endif
auto start = std::chrono::high_resolution_clock::now();
// NOTE Processing Shot
for (int iShot = 0; iShot < group_size; iShot++) {
#ifdef VERBOSE
printf(" Processing shot %d\n", shot_ids[iShot]);
#endif
CHECK(hipStreamCreate(&streams[iShot]));
// load precomputed presure DL
// fileBinLoad(h_snap, nz*nx, "Pressure.bin");
// CHECK(hipMemcpy(d_szz, h_snap, nz*nx*sizeof(float),
// hipMemcpyHostToDevice)); CHECK(hipMemcpy(d_vx, h_snap,
// nz*nx*sizeof(float), hipMemcpyHostToDevice)); CHECK(hipMemcpy(d_vz,
// h_snap, nz*nx*sizeof(float), hipMemcpyHostToDevice));
hipLaunchKernelGGL(( intialArrayGPU), dim3(blocks), dim3(threads), 0, 0, d_vz, nz, nx, 0.0);
hipLaunchKernelGGL(( intialArrayGPU), dim3(blocks), dim3(threads), 0, 0, d_vx, nz, nx, 0.0);
hipLaunchKernelGGL(( intialArrayGPU), dim3(blocks), dim3(threads), 0, 0, d_szz, nz, nx, 0.0);
hipLaunchKernelGGL(( intialArrayGPU), dim3(blocks), dim3(threads), 0, 0, d_sxx, nz, nx, 0.0);
hipLaunchKernelGGL(( intialArrayGPU), dim3(blocks), dim3(threads), 0, 0, d_sxz, nz, nx, 0.0);
hipLaunchKernelGGL(( intialArrayGPU), dim3(blocks), dim3(threads), 0, 0, d_mem_dvz_dz, nz, nx, 0.0);
hipLaunchKernelGGL(( intialArrayGPU), dim3(blocks), dim3(threads), 0, 0, d_mem_dvz_dx, nz, nx, 0.0);
hipLaunchKernelGGL(( intialArrayGPU), dim3(blocks), dim3(threads), 0, 0, d_mem_dvx_dz, nz, nx, 0.0);
hipLaunchKernelGGL(( intialArrayGPU), dim3(blocks), dim3(threads), 0, 0, d_mem_dvx_dx, nz, nx, 0.0);
hipLaunchKernelGGL(( intialArrayGPU), dim3(blocks), dim3(threads), 0, 0, d_mem_dszz_dz, nz, nx, 0.0);
hipLaunchKernelGGL(( intialArrayGPU), dim3(blocks), dim3(threads), 0, 0, d_mem_dsxx_dx, nz, nx, 0.0);
hipLaunchKernelGGL(( intialArrayGPU), dim3(blocks), dim3(threads), 0, 0, d_mem_dsxz_dz, nz, nx, 0.0);
hipLaunchKernelGGL(( intialArrayGPU), dim3(blocks), dim3(threads), 0, 0, d_mem_dsxz_dx, nz, nx, 0.0);
hipLaunchKernelGGL(( intialArrayGPU), dim3(blocks), dim3(threads), 0, 0, d_mat_dvz_dz, nz, nx, 0.0);
hipLaunchKernelGGL(( intialArrayGPU), dim3(blocks), dim3(threads), 0, 0, d_mat_dvx_dx, nz, nx, 0.0);
nrec = src_rec.vec_nrec.at(iShot);
if (para.if_res()) {
fileBinLoad(src_rec.vec_data_obs.at(iShot), nSteps * nrec,
para.data_dir_name() + "/Shot" +
std::to_string(shot_ids[iShot]) + ".bin");
CHECK(hipMemcpyAsync(src_rec.d_vec_data_obs.at(iShot),
src_rec.vec_data_obs.at(iShot),
nrec * nSteps * sizeof(float),
hipMemcpyHostToDevice, streams[iShot]));
}
// ------------------------------------ time loop
// ------------------------------------
for (int it = 0; it <= nSteps - 2; it++) {
// =========================== elastic or acoustic
// ===========================
if (para.withAdj()) {
// save and record from the beginning
boundaries.field_from_bnd(d_szz, d_sxz, d_sxx, d_vz, d_vx, it);
}
// get snapshot at time it
if (it == iSnap && iShot == 0) {
CHECK(hipMemcpy(h_snap, d_szz, nz * nx * sizeof(float),
hipMemcpyDeviceToHost));
}
if (para.isAc()) {
hipLaunchKernelGGL(( ac_pressure), dim3(blocks), dim3(threads), 0, 0,
d_vz, d_vx, d_szz, d_mem_dvz_dz, d_mem_dvx_dx, model.d_Lambda,
model.d_Den, cpml.d_K_z_half, cpml.d_a_z_half, cpml.d_b_z_half,
cpml.d_K_x, cpml.d_a_x, cpml.d_b_x, nz, nx, dt, dz, dx, nPml, nPad,
true, d_mat_dvz_dz, d_mat_dvx_dx);
hipLaunchKernelGGL(( add_source), dim3(1), dim3(1), 0, 0, d_szz, d_sxx, src_rec.vec_source.at(iShot)[it], nz,
true, src_rec.vec_z_src.at(iShot),
src_rec.vec_x_src.at(iShot), dt, model.d_Cp);
hipLaunchKernelGGL(( ac_velocity), dim3(blocks), dim3(threads), 0, 0,
d_vz, d_vx, d_szz, d_mem_dszz_dz, d_mem_dsxx_dx, model.d_Lambda,
model.d_Den, model.d_ave_Byc_a, model.d_ave_Byc_b, cpml.d_K_z,
cpml.d_a_z, cpml.d_b_z, cpml.d_K_x_half, cpml.d_a_x_half,
cpml.d_b_x_half, nz, nx, dt, dz, dx, nPml, nPad, true);
} else {
hipLaunchKernelGGL(( el_stress), dim3(blocks), dim3(threads), 0, 0,
d_vz, d_vx, d_szz, d_sxx, d_sxz, d_mem_dvz_dz, d_mem_dvz_dx,
d_mem_dvx_dz, d_mem_dvx_dx, model.d_Lambda, model.d_Mu,
model.d_ave_Mu, model.d_Den, cpml.d_K_z, cpml.d_a_z, cpml.d_b_z,
cpml.d_K_z_half, cpml.d_a_z_half, cpml.d_b_z_half, cpml.d_K_x,
cpml.d_a_x, cpml.d_b_x, cpml.d_K_x_half, cpml.d_a_x_half,
cpml.d_b_x_half, nz, nx, dt, dz, dx, nPml, nPad, true);
hipLaunchKernelGGL(( add_source), dim3(1), dim3(1), 0, 0, d_szz, d_sxx, src_rec.vec_source.at(iShot)[it], nz,
true, src_rec.vec_z_src.at(iShot),
src_rec.vec_x_src.at(iShot), dt, model.d_Cp);
hipLaunchKernelGGL(( el_velocity), dim3(blocks), dim3(threads), 0, 0,
d_vz, d_vx, d_szz, d_sxx, d_sxz, d_mem_dszz_dz, d_mem_dsxz_dx,
d_mem_dsxz_dz, d_mem_dsxx_dx, model.d_Lambda, model.d_Mu,
model.d_ave_Byc_a, model.d_ave_Byc_b, cpml.d_K_z, cpml.d_a_z,
cpml.d_b_z, cpml.d_K_z_half, cpml.d_a_z_half, cpml.d_b_z_half,
cpml.d_K_x, cpml.d_a_x, cpml.d_b_x, cpml.d_K_x_half,
cpml.d_a_x_half, cpml.d_b_x_half, nz, nx, dt, dz, dx, nPml, nPad,
true);
}
hipLaunchKernelGGL(( recording), dim3((nrec + 31) / 32), dim3(32), 0, 0,
d_szz, nz, src_rec.d_vec_data.at(iShot), iShot, it + 1, nSteps, nrec,
src_rec.d_vec_z_rec.at(iShot), src_rec.d_vec_x_rec.at(iShot));
}
if (!para.if_res()) {
CHECK(hipMemcpyAsync(
src_rec.vec_data.at(iShot), src_rec.d_vec_data.at(iShot),
nSteps * nrec * sizeof(float), hipMemcpyDeviceToHost,
streams[iShot])); // test
}
// fileBinWrite(h_snap, nz*nx, "SnapGPU.bin");
// compute residuals
if (para.if_res()) {
dim3 blocksT((nSteps + TX - 1) / TX, (nrec + TY - 1) / TY);
// for fun modify observed data
// float filter2[4] = {8.0, 9.0, 12.0, 13.0};
// cuda_window<<<blocksT,threads>>>(nSteps, nrec, dt, win_ratio,
// src_rec.d_vec_data_obs.at(iShot)); bp_filter1d(nSteps, dt, nrec,
// src_rec.d_vec_data_obs.at(iShot), filter2);
// windowing
if (para.if_win()) {
hipLaunchKernelGGL(( cuda_window), dim3(blocksT), dim3(threads), 0, 0,
nSteps, nrec, dt, src_rec.d_vec_win_start.at(iShot),
src_rec.d_vec_win_end.at(iShot), src_rec.d_vec_weights.at(iShot),
win_ratio, src_rec.d_vec_data_obs.at(iShot));
hipLaunchKernelGGL(( cuda_window), dim3(blocksT), dim3(threads), 0, 0,
nSteps, nrec, dt, src_rec.d_vec_win_start.at(iShot),
src_rec.d_vec_win_end.at(iShot), src_rec.d_vec_weights.at(iShot),
win_ratio, src_rec.d_vec_data.at(iShot));
} else {
hipLaunchKernelGGL(( cuda_window), dim3(blocksT), dim3(threads), 0, 0, nSteps, nrec, dt, win_ratio,
src_rec.d_vec_data_obs.at(iShot));
hipLaunchKernelGGL(( cuda_window), dim3(blocksT), dim3(threads), 0, 0, nSteps, nrec, dt, win_ratio,
src_rec.d_vec_data.at(iShot));
}
// filtering
if (para.if_filter()) {
bp_filter1d(nSteps, dt, nrec, src_rec.d_vec_data_obs.at(iShot),
para.filter());
bp_filter1d(nSteps, dt, nrec, src_rec.d_vec_data.at(iShot),
para.filter());
}
// Calculate source update and filter calculated data
if (para.if_src_update()) {
amp_ratio =
source_update(nSteps, dt, nrec, src_rec.d_vec_data_obs.at(iShot),
src_rec.d_vec_data.at(iShot),
src_rec.d_vec_source.at(iShot), src_rec.d_coef);
printf(" Source update => Processing shot %d, amp_ratio = %f\n",
iShot, amp_ratio);
}
amp_ratio = 1.0; // amplitude not used, so set to 1.0
// objective function
hipLaunchKernelGGL(( gpuMinus), dim3(blocksT), dim3(threads), 0, 0,
src_rec.d_vec_res.at(iShot), src_rec.d_vec_data_obs.at(iShot),
src_rec.d_vec_data.at(iShot), nSteps, nrec);
hipLaunchKernelGGL(( cuda_cal_objective), dim3(1), dim3(512), 0, 0, d_l2Obj_temp, src_rec.d_vec_res.at(iShot),
nSteps * nrec);
CHECK(hipMemcpy(h_l2Obj_temp, d_l2Obj_temp, sizeof(float),
hipMemcpyDeviceToHost));
h_l2Obj += h_l2Obj_temp[0];
// update source again (adjoint)
if (para.if_src_update()) {
source_update_adj(nSteps, dt, nrec, src_rec.d_vec_res.at(iShot),
amp_ratio, src_rec.d_coef);
}
// filtering again (adjoint)
if (para.if_filter()) {
bp_filter1d(nSteps, dt, nrec, src_rec.d_vec_res.at(iShot),
para.filter());
}
// windowing again (adjoint)
if (para.if_win()) {
hipLaunchKernelGGL(( cuda_window), dim3(blocksT), dim3(threads), 0, 0,
nSteps, nrec, dt, src_rec.d_vec_win_start.at(iShot),
src_rec.d_vec_win_end.at(iShot), src_rec.d_vec_weights.at(iShot),
0.1, src_rec.d_vec_res.at(iShot));
} else {
hipLaunchKernelGGL(( cuda_window), dim3(blocksT), dim3(threads), 0, 0, nSteps, nrec, dt, win_ratio,
src_rec.d_vec_res.at(iShot));
}
CHECK(hipMemcpyAsync(
src_rec.vec_res.at(iShot), src_rec.d_vec_res.at(iShot),
nSteps * nrec * sizeof(float), hipMemcpyDeviceToHost,
streams[iShot])); // test
// CHECK(hipMemcpy(src_rec.vec_res.at(iShot), src_rec.d_vec_res.at(iShot), \
// nSteps*nrec*sizeof(float), hipMemcpyDeviceToHost)); // test
CHECK(hipMemcpyAsync(
src_rec.vec_data.at(iShot), src_rec.d_vec_data.at(iShot),
nSteps * nrec * sizeof(float), hipMemcpyDeviceToHost,
streams[iShot])); // test
CHECK(hipMemcpyAsync(
src_rec.vec_data_obs.at(iShot), src_rec.d_vec_data_obs.at(iShot),
nSteps * nrec * sizeof(float), hipMemcpyDeviceToHost,
streams[iShot])); // save preconditioned observed
CHECK(hipMemcpy(src_rec.vec_source.at(iShot),
src_rec.d_vec_source.at(iShot), nSteps * sizeof(float),
hipMemcpyDeviceToHost));
}
// =================
hipDeviceSynchronize();
if (para.withAdj()) {
// ------------------------------------- Backward
// ---------------------------------- initialization
hipLaunchKernelGGL(( intialArrayGPU), dim3(blocks), dim3(threads), 0, 0, d_vz_adj, nz, nx, 0.0);
hipLaunchKernelGGL(( intialArrayGPU), dim3(blocks), dim3(threads), 0, 0, d_vx_adj, nz, nx, 0.0);
hipLaunchKernelGGL(( intialArrayGPU), dim3(blocks), dim3(threads), 0, 0, d_szz_adj, nz, nx, 0.0);
hipLaunchKernelGGL(( intialArrayGPU), dim3(blocks), dim3(threads), 0, 0, d_szz_p1, nz, nx, 0.0);
hipLaunchKernelGGL(( intialArrayGPU), dim3(blocks), dim3(threads), 0, 0, d_mem_dvz_dz, nz, nx, 0.0);
hipLaunchKernelGGL(( intialArrayGPU), dim3(blocks), dim3(threads), 0, 0, d_mem_dvx_dx, nz, nx, 0.0);
hipLaunchKernelGGL(( intialArrayGPU), dim3(blocks), dim3(threads), 0, 0, d_mem_dszz_dz, nz, nx, 0.0);
hipLaunchKernelGGL(( intialArrayGPU), dim3(blocks), dim3(threads), 0, 0, d_mem_dsxx_dx, nz, nx, 0.0);
for (int it = nSteps - 2; it >= 0; it--) {
if (para.isAc()) {
// if (it <= nSteps - 2) {
// save p to szz_plus_one
hipLaunchKernelGGL(( assignArrayGPU), dim3(blocks), dim3(threads), 0, 0, d_szz, d_szz_p1, nz, nx);
// value at T-1
hipLaunchKernelGGL(( ac_velocity), dim3(blocks), dim3(threads), 0, 0,
d_vz, d_vx, d_szz, d_mem_dszz_dz, d_mem_dsxx_dx, model.d_Lambda,
model.d_Den, model.d_ave_Byc_a, model.d_ave_Byc_b, cpml.d_K_z,
cpml.d_a_z, cpml.d_b_z, cpml.d_K_x_half, cpml.d_a_x_half,
cpml.d_b_x_half, nz, nx, dt, dz, dx, nPml, nPad, false);
boundaries.field_to_bnd(d_szz, d_sxz, d_sxx, d_vz, d_vx, it, false);
hipLaunchKernelGGL(( add_source), dim3(1), dim3(1), 0, 0, d_szz, d_sxx, src_rec.vec_source.at(iShot)[it],
nz, false, src_rec.vec_z_src.at(iShot),
src_rec.vec_x_src.at(iShot), dt, model.d_Cp);
hipLaunchKernelGGL(( add_source), dim3(1), dim3(1), 0, 0, d_szz_p1, d_sxx,
src_rec.vec_source.at(iShot)[it], nz, false,
src_rec.vec_z_src.at(iShot),
src_rec.vec_x_src.at(iShot), dt, model.d_Cp);
hipLaunchKernelGGL(( ac_pressure), dim3(blocks), dim3(threads), 0, 0,
d_vz, d_vx, d_szz, d_mem_dvz_dz, d_mem_dvx_dx, model.d_Lambda,
model.d_Den, cpml.d_K_z_half, cpml.d_a_z_half, cpml.d_b_z_half,
cpml.d_K_x, cpml.d_a_x, cpml.d_b_x, nz, nx, dt, dz, dx, nPml,
nPad, false, d_mat_dvz_dz, d_mat_dvx_dx);
boundaries.field_to_bnd(d_szz, d_sxz, d_sxx, d_vz, d_vx, it, true);
// value at T-2
// ================
// adjoint computation
hipLaunchKernelGGL(( ac_velocity_adj), dim3(blocks), dim3(threads), 0, 0,
d_vz_adj, d_vx_adj, d_szz_adj, d_mem_dvz_dz, d_mem_dvx_dx,
d_mem_dszz_dz, d_mem_dsxx_dx, model.d_Lambda, model.d_Den,
model.d_ave_Byc_a, model.d_ave_Byc_b, cpml.d_K_z_half,
cpml.d_a_z_half, cpml.d_b_z_half, cpml.d_K_x_half,
cpml.d_a_x_half, cpml.d_b_x_half, cpml.d_K_z, cpml.d_a_z,
cpml.d_b_z, cpml.d_K_x, cpml.d_a_x, cpml.d_b_x, nz, nx, dt, dz,
dx, nPml, nPad);
// inject residuals
hipLaunchKernelGGL(( res_injection), dim3((nrec + 31) / 32), dim3(32), 0, 0,
d_szz_adj, nz, src_rec.d_vec_res.at(iShot), model.d_Lambda,
it + 1, dt, nSteps, nrec, src_rec.d_vec_z_rec.at(iShot),
src_rec.d_vec_x_rec.at(iShot));
hipLaunchKernelGGL(( ac_pressure_adj), dim3(blocks), dim3(threads), 0, 0,
d_vz_adj, d_vx_adj, d_szz_adj, d_mem_dvz_dz, d_mem_dvx_dx,
d_mem_dszz_dz, d_mem_dsxx_dx, model.d_Lambda, model.d_Den,
model.d_ave_Byc_a, model.d_ave_Byc_b, cpml.d_K_z_half,
cpml.d_a_z_half, cpml.d_b_z_half, cpml.d_K_x_half,
cpml.d_a_x_half, cpml.d_b_x_half, cpml.d_K_z, cpml.d_a_z,
cpml.d_b_z, cpml.d_K_x, cpml.d_a_x, cpml.d_b_x, nz, nx, dt, dz,
dx, nPml, nPad, model.d_Cp, d_mat_dvz_dz, d_mat_dvx_dx,
model.d_CpGrad);
// value at T-1
// ac_adj_push<<<blocks,threads2>>>(d_vz_adj, d_vx_adj, d_szz_adj, d_adj_temp, \
// d_mem_dvz_dz, d_mem_dvx_dx, d_mem_dszz_dz, d_mem_dsxx_dx, \
// model.d_Lambda, model.d_Den, model.d_ave_Byc_a, model.d_ave_Byc_b, \
// cpml.d_K_z_half, cpml.d_a_z_half, cpml.d_b_z_half, \
// cpml.d_K_x_half, cpml.d_a_x_half, cpml.d_b_x_half, \
// cpml.d_K_z, cpml.d_a_z, cpml.d_b_z, \
// cpml.d_K_x, cpml.d_a_x, cpml.d_b_x, \
// nz, nx, dt, dz, dx, nPml, nPad);
// image_vel<<<blocks,threads>>>(d_szz_adj, nz, nx, dt, dz, dx, nPml, nPad, \
// model.d_Cp, model.d_Den, d_mat_dvz_dz, d_mat_dvx_dx, model.d_CpGrad);
hipLaunchKernelGGL(( image_vel_time), dim3(blocks), dim3(threads), 0, 0,
d_szz, d_szz_p1, d_szz_adj, nz, nx, dt, dz, dx, nPml, nPad,
model.d_Cp, model.d_Lambda, model.d_CpGrad);
} else {
hipLaunchKernelGGL(( el_velocity), dim3(blocks), dim3(threads), 0, 0,
d_vz, d_vx, d_szz, d_sxx, d_sxz, d_mem_dszz_dz, d_mem_dsxz_dx,
d_mem_dsxz_dz, d_mem_dsxx_dx, model.d_Lambda, model.d_Mu,
model.d_ave_Byc_a, model.d_ave_Byc_b, cpml.d_K_z, cpml.d_a_z,
cpml.d_b_z, cpml.d_K_z_half, cpml.d_a_z_half, cpml.d_b_z_half,
cpml.d_K_x, cpml.d_a_x, cpml.d_b_x, cpml.d_K_x_half,
cpml.d_a_x_half, cpml.d_b_x_half, nz, nx, dt, dz, dx, nPml, nPad,
false);
hipLaunchKernelGGL(( el_stress), dim3(blocks), dim3(threads), 0, 0,
d_vz, d_vx, d_szz, d_sxx, d_sxz, d_mem_dvz_dz, d_mem_dvz_dx,
d_mem_dvx_dz, d_mem_dvx_dx, model.d_Lambda, model.d_Mu,
model.d_ave_Mu, model.d_Den, cpml.d_K_z, cpml.d_a_z, cpml.d_b_z,
cpml.d_K_z_half, cpml.d_a_z_half, cpml.d_b_z_half, cpml.d_K_x,
cpml.d_a_x, cpml.d_b_x, cpml.d_K_x_half, cpml.d_a_x_half,
cpml.d_b_x_half, nz, nx, dt, dz, dx, nPml, nPad, false);
}
// boundaries.field_to_bnd(d_szz, d_sxz, d_sxx, d_vz, d_vx, it);
if (it == iSnap && iShot == 0) {
CHECK(hipMemcpy(h_snap_back, d_szz, nz * nx * sizeof(float),
hipMemcpyDeviceToHost));
CHECK(hipMemcpy(h_snap_adj, d_szz_adj, nz * nx * sizeof(float),
hipMemcpyDeviceToHost));
}
if (iShot == 0) {
// CHECK(hipMemcpy(h_snap_adj, d_szz_adj, nz*nx*sizeof(float),
// hipMemcpyDeviceToHost)); fileBinWrite(h_snap_adj, nz*nx,
// "SnapGPU_adj_" + std::to_string(it) + ".bin");
// CHECK(hipMemcpy(h_snap, d_szz, nz*nx*sizeof(float),
// hipMemcpyDeviceToHost)); fileBinWrite(h_snap, nz*nx, "SnapGPU_"
// + std::to_string(it) + ".bin");
}
}
// fileBinWrite(h_snap_back, nz*nx, "SnapGPU_back.bin");
// fileBinWrite(h_snap_adj, nz*nx, "SnapGPU_adj.bin");
CHECK(hipMemcpy(model.h_CpGrad, model.d_CpGrad, nz * nx * sizeof(float),
hipMemcpyDeviceToHost));
// fileBinWrite(model.h_CpGrad, nz*nx, "CpGradient.bin");
for (int i = 0; i < nz; i++) {
for (int j = 0; j < nx; j++) {
grad_Cp[i * nx + j] = model.h_CpGrad[j * nz + i];
}
}
initialArray(grad_Cs, nz * nx, 0.0);
initialArray(grad_Den, nz * nx, 0.0);
initialArray(grad_stf, nSteps * src_rec.nShots, 0.0);
}
}
auto finish = std::chrono::high_resolution_clock::now();
std::chrono::duration<double> elapsed = finish - start;
#ifdef VERBOSE
std::cout << "Elapsed time: " << elapsed.count() << " second(s)."
<< std::endl;
#endif
if (!para.if_res()) {
for (int iShot = 0; iShot < group_size; iShot++) {
fileBinWrite(src_rec.vec_data.at(iShot),
nSteps * src_rec.vec_nrec.at(iShot),
para.data_dir_name() + "/Shot" +
std::to_string(shot_ids[iShot]) + ".bin");
}
}
if (para.if_save_scratch()) {
for (int iShot = 0; iShot < group_size; iShot++) {
fileBinWrite(src_rec.vec_res.at(iShot),
nSteps * src_rec.vec_nrec.at(iShot),
para.scratch_dir_name() + "/Residual_Shot" +
std::to_string(shot_ids[iShot]) + ".bin");
fileBinWrite(src_rec.vec_data.at(iShot),
nSteps * src_rec.vec_nrec.at(iShot),
para.scratch_dir_name() + "/Syn_Shot" +
std::to_string(shot_ids[iShot]) + ".bin");
fileBinWrite(src_rec.vec_data_obs.at(iShot),
nSteps * src_rec.vec_nrec.at(iShot),
para.scratch_dir_name() + "/CondObs_Shot" +
std::to_string(shot_ids[iShot]) + ".bin");
// fileBinWrite(src_rec.vec_source.at(iShot), nSteps,
// para.scratch_dir_name() + "src_updated" +
// std::to_string(iShot) + ".bin");
}
}
// #ifdef DEBUG
// std::cout << "cufd--" << __LINE__ << std::endl;
// #endif
// output residual
if (para.if_res() && !para.withAdj()) {
h_l2Obj = 0.5 * h_l2Obj; // DL 02/21/2019 (need to make misfit accurate
// here rather than in the script)
// fileBinWrite(&h_l2Obj, 1, "l2Obj.bin");
std::cout << "Total l2 residual = " << std::to_string(h_l2Obj) << std::endl;
std::cout << "calc_id = " << calc_id << std::endl;
*res = h_l2Obj;
}
free(h_l2Obj_temp);
free(h_snap);
free(h_snap_back);
free(h_snap_adj);
free(fCp);
free(fCs);
free(fDen);
// destroy the streams
for (int iShot = 0; iShot < group_size; iShot++)
CHECK(hipStreamDestroy(streams[iShot]));
hipFree(d_vz);
hipFree(d_vx);
hipFree(d_szz);
hipFree(d_sxx);
hipFree(d_sxz);
hipFree(d_vz_adj);
hipFree(d_vx_adj);
hipFree(d_szz_adj);
hipFree(d_szz_p1);
hipFree(d_mem_dvz_dz);
hipFree(d_mem_dvz_dx);
hipFree(d_mem_dvx_dz);
hipFree(d_mem_dvx_dx);
hipFree(d_mem_dszz_dz);
hipFree(d_mem_dsxx_dx);
hipFree(d_mem_dsxz_dz);
hipFree(d_mem_dsxz_dx);
hipFree(d_mat_dvz_dz);
hipFree(d_mat_dvx_dx);
hipFree(d_l2Obj_temp);
#ifdef VERBOSE
std::cout << "Done!" << std::endl;
#endif
}
| 374c4855849b103b40fb319bed4b3b3559af0d46.cu | // Dongzhuo Li 05/06/2018
#include <chrono>
#include <string>
#include "Boundary.h"
#include "Cpml.h"
#include "Model.h"
#include "Parameter.h"
#include "Src_Rec.h"
#include "utilities.h"
using std::string;
#define VERBOSE
#define DEBUG
// extern "C" void cufd(double *res, double *grad_Cp, double *grad_Cs,
// double *grad_Den, double *grad_stf, const double *Cp,
// const double *Cs, const double *Den, const double *stf,
// int calc_id, const int gpu_id, int group_size,
// const int *shot_ids, const string para_fname);
/*
double res : residual
double *grad_Cp : gradients of Cp (p-wave velocity)
double *grad_Cs : gradients of Cs (s-wave velocity)
double *grad_Den : gradients of density
double *grad_stf : gradients of source time function
double *Cp : p-wave velocity
double *Cs : s-wave velocity
double *Den : density
double *stf : source time function of all shots
int calc_id :
calc_id = 0 -- compute residual
calc_id = 1 -- compute gradient
calc_id = 2 -- compute observation only
int gpu_id : CUDA_VISIBLE_DEVICES
int group_size: number of shots in the group
int *shot_ids : processing shot shot_ids
string para_fname : parameter path
// string survey_fname : survey file (src/rec) path
// string data_dir : data directory
// string scratch_dir : temporary files
*/
void cufd(double *res, double *grad_Cp, double *grad_Cs, double *grad_Den,
double *grad_stf, const double *Cp, const double *Cs,
const double *Den, const double *stf, int calc_id, const int gpu_id,
int group_size, const int *shot_ids, const string para_fname) {
// int deviceCount = 0;
// CHECK(cudaGetDeviceCount (&deviceCount));
// printf("number of devices = %d\n", deviceCount);
CHECK(cudaSetDevice(gpu_id));
auto start0 = std::chrono::high_resolution_clock::now();
// std::string para_fname = para_dir + "/fwi_param.json";
// std::string survey_fname = "/survey_file.json";
if (calc_id < 0 || calc_id > 2) {
printf("Invalid calc_id %d\n", calc_id);
exit(0);
}
// NOTE Read parameter file
Parameter para(para_fname, calc_id);
int nz = para.nz();
int nx = para.nx();
int nPml = para.nPoints_pml();
int nPad = para.nPad();
float dz = para.dz();
float dx = para.dx();
float dt = para.dt();
float f0 = para.f0();
int iSnap = 0; // 400
int nrec = 1;
float win_ratio = 0.005;
int nSteps = para.nSteps();
float amp_ratio = 1.0;
// transpose models and convert to float
float *fCp, *fCs, *fDen;
fCp = (float *)malloc(nz * nx * sizeof(float));
fCs = (float *)malloc(nz * nx * sizeof(float));
fDen = (float *)malloc(nz * nx * sizeof(float));
for (int i = 0; i < nz; i++) {
for (int j = 0; j < nx; j++) {
fCp[j * nz + i] = Cp[i * nx + j];
fCs[j * nz + i] = Cs[i * nx + j];
fDen[j * nz + i] = Den[i * nx + j];
}
}
Model model(para, fCp, fCs, fDen);
// Model model;
Cpml cpml(para, model);
Bnd boundaries(para);
auto startSrc = std::chrono::high_resolution_clock::now();
Src_Rec src_rec(para, para.survey_fname(), stf, group_size, shot_ids);
// TODO: group_size -> shot group size
auto finishSrc = std::chrono::high_resolution_clock::now();
#ifdef VERBOSE
std::chrono::duration<double> elapsedSrc = finishSrc - startSrc;
std::cout << "Src_Rec time: " << elapsedSrc.count() << " second(s)"
<< std::endl;
std::cout << "number of shots " << src_rec.d_vec_z_rec.size() << std::endl;
std::cout << "number of d_data " << src_rec.d_vec_data.size() << std::endl;
#endif
// compute Courant number
compCourantNumber(model.h_Cp, nz * nx, dt, dz, dx);
dim3 threads(TX, TY);
dim3 blocks((nz + TX - 1) / TX, (nx + TY - 1) / TY);
dim3 threads2(TX + 4, TY + 4);
dim3 blocks2((nz + TX + 3) / (TX + 4), (nx + TY + 3) / (TY + 4));
float *d_vz, *d_vx, *d_szz, *d_sxx, *d_sxz, *d_vz_adj, *d_vx_adj, *d_szz_adj,
*d_szz_p1;
float *d_mem_dvz_dz, *d_mem_dvz_dx, *d_mem_dvx_dz, *d_mem_dvx_dx;
float *d_mem_dszz_dz, *d_mem_dsxx_dx, *d_mem_dsxz_dz, *d_mem_dsxz_dx;
float *d_mat_dvz_dz, *d_mat_dvx_dx;
float *d_l2Obj_temp;
float *h_l2Obj_temp = NULL;
h_l2Obj_temp = (float *)malloc(sizeof(float));
float h_l2Obj = 0.0;
CHECK(cudaMalloc((void **)&d_vz, nz * nx * sizeof(float)));
CHECK(cudaMalloc((void **)&d_vx, nz * nx * sizeof(float)));
CHECK(cudaMalloc((void **)&d_szz, nz * nx * sizeof(float)));
CHECK(cudaMalloc((void **)&d_sxx, nz * nx * sizeof(float)));
CHECK(cudaMalloc((void **)&d_sxz, nz * nx * sizeof(float)));
CHECK(cudaMalloc((void **)&d_vz_adj, nz * nx * sizeof(float)));
CHECK(cudaMalloc((void **)&d_vx_adj, nz * nx * sizeof(float)));
CHECK(cudaMalloc((void **)&d_szz_adj, nz * nx * sizeof(float)));
CHECK(cudaMalloc((void **)&d_szz_p1, nz * nx * sizeof(float)));
CHECK(cudaMalloc((void **)&d_mem_dvz_dz, nz * nx * sizeof(float)));
CHECK(cudaMalloc((void **)&d_mem_dvz_dx, nz * nx * sizeof(float)));
CHECK(cudaMalloc((void **)&d_mem_dvx_dz, nz * nx * sizeof(float)));
CHECK(cudaMalloc((void **)&d_mem_dvx_dx, nz * nx * sizeof(float)));
CHECK(cudaMalloc((void **)&d_mem_dszz_dz, nz * nx * sizeof(float)));
CHECK(cudaMalloc((void **)&d_mem_dsxx_dx, nz * nx * sizeof(float)));
CHECK(cudaMalloc((void **)&d_mem_dsxz_dz, nz * nx * sizeof(float)));
CHECK(cudaMalloc((void **)&d_mem_dsxz_dx, nz * nx * sizeof(float)));
// spatial derivatives: for kernel computations
CHECK(cudaMalloc((void **)&d_mat_dvz_dz, nz * nx * sizeof(float)));
CHECK(cudaMalloc((void **)&d_mat_dvx_dx, nz * nx * sizeof(float)));
CHECK(cudaMalloc((void **)&d_l2Obj_temp, 1 * sizeof(float)));
float *h_snap, *h_snap_back, *h_snap_adj;
h_snap = (float *)malloc(nz * nx * sizeof(float));
h_snap_back = (float *)malloc(nz * nx * sizeof(float));
h_snap_adj = (float *)malloc(nz * nx * sizeof(float));
cudaStream_t streams[group_size];
auto finish0 = std::chrono::high_resolution_clock::now();
std::chrono::duration<double> elapsed0 = finish0 - start0;
#ifdef VERBOSE
std::cout << "Initialization time: " << elapsed0.count() << " second(s)"
<< std::endl;
#endif
auto start = std::chrono::high_resolution_clock::now();
// NOTE Processing Shot
for (int iShot = 0; iShot < group_size; iShot++) {
#ifdef VERBOSE
printf(" Processing shot %d\n", shot_ids[iShot]);
#endif
CHECK(cudaStreamCreate(&streams[iShot]));
// load precomputed presure DL
// fileBinLoad(h_snap, nz*nx, "Pressure.bin");
// CHECK(cudaMemcpy(d_szz, h_snap, nz*nx*sizeof(float),
// cudaMemcpyHostToDevice)); CHECK(cudaMemcpy(d_vx, h_snap,
// nz*nx*sizeof(float), cudaMemcpyHostToDevice)); CHECK(cudaMemcpy(d_vz,
// h_snap, nz*nx*sizeof(float), cudaMemcpyHostToDevice));
intialArrayGPU<<<blocks, threads>>>(d_vz, nz, nx, 0.0);
intialArrayGPU<<<blocks, threads>>>(d_vx, nz, nx, 0.0);
intialArrayGPU<<<blocks, threads>>>(d_szz, nz, nx, 0.0);
intialArrayGPU<<<blocks, threads>>>(d_sxx, nz, nx, 0.0);
intialArrayGPU<<<blocks, threads>>>(d_sxz, nz, nx, 0.0);
intialArrayGPU<<<blocks, threads>>>(d_mem_dvz_dz, nz, nx, 0.0);
intialArrayGPU<<<blocks, threads>>>(d_mem_dvz_dx, nz, nx, 0.0);
intialArrayGPU<<<blocks, threads>>>(d_mem_dvx_dz, nz, nx, 0.0);
intialArrayGPU<<<blocks, threads>>>(d_mem_dvx_dx, nz, nx, 0.0);
intialArrayGPU<<<blocks, threads>>>(d_mem_dszz_dz, nz, nx, 0.0);
intialArrayGPU<<<blocks, threads>>>(d_mem_dsxx_dx, nz, nx, 0.0);
intialArrayGPU<<<blocks, threads>>>(d_mem_dsxz_dz, nz, nx, 0.0);
intialArrayGPU<<<blocks, threads>>>(d_mem_dsxz_dx, nz, nx, 0.0);
intialArrayGPU<<<blocks, threads>>>(d_mat_dvz_dz, nz, nx, 0.0);
intialArrayGPU<<<blocks, threads>>>(d_mat_dvx_dx, nz, nx, 0.0);
nrec = src_rec.vec_nrec.at(iShot);
if (para.if_res()) {
fileBinLoad(src_rec.vec_data_obs.at(iShot), nSteps * nrec,
para.data_dir_name() + "/Shot" +
std::to_string(shot_ids[iShot]) + ".bin");
CHECK(cudaMemcpyAsync(src_rec.d_vec_data_obs.at(iShot),
src_rec.vec_data_obs.at(iShot),
nrec * nSteps * sizeof(float),
cudaMemcpyHostToDevice, streams[iShot]));
}
// ------------------------------------ time loop
// ------------------------------------
for (int it = 0; it <= nSteps - 2; it++) {
// =========================== elastic or acoustic
// ===========================
if (para.withAdj()) {
// save and record from the beginning
boundaries.field_from_bnd(d_szz, d_sxz, d_sxx, d_vz, d_vx, it);
}
// get snapshot at time it
if (it == iSnap && iShot == 0) {
CHECK(cudaMemcpy(h_snap, d_szz, nz * nx * sizeof(float),
cudaMemcpyDeviceToHost));
}
if (para.isAc()) {
ac_pressure<<<blocks, threads>>>(
d_vz, d_vx, d_szz, d_mem_dvz_dz, d_mem_dvx_dx, model.d_Lambda,
model.d_Den, cpml.d_K_z_half, cpml.d_a_z_half, cpml.d_b_z_half,
cpml.d_K_x, cpml.d_a_x, cpml.d_b_x, nz, nx, dt, dz, dx, nPml, nPad,
true, d_mat_dvz_dz, d_mat_dvx_dx);
add_source<<<1, 1>>>(d_szz, d_sxx, src_rec.vec_source.at(iShot)[it], nz,
true, src_rec.vec_z_src.at(iShot),
src_rec.vec_x_src.at(iShot), dt, model.d_Cp);
ac_velocity<<<blocks, threads>>>(
d_vz, d_vx, d_szz, d_mem_dszz_dz, d_mem_dsxx_dx, model.d_Lambda,
model.d_Den, model.d_ave_Byc_a, model.d_ave_Byc_b, cpml.d_K_z,
cpml.d_a_z, cpml.d_b_z, cpml.d_K_x_half, cpml.d_a_x_half,
cpml.d_b_x_half, nz, nx, dt, dz, dx, nPml, nPad, true);
} else {
el_stress<<<blocks, threads>>>(
d_vz, d_vx, d_szz, d_sxx, d_sxz, d_mem_dvz_dz, d_mem_dvz_dx,
d_mem_dvx_dz, d_mem_dvx_dx, model.d_Lambda, model.d_Mu,
model.d_ave_Mu, model.d_Den, cpml.d_K_z, cpml.d_a_z, cpml.d_b_z,
cpml.d_K_z_half, cpml.d_a_z_half, cpml.d_b_z_half, cpml.d_K_x,
cpml.d_a_x, cpml.d_b_x, cpml.d_K_x_half, cpml.d_a_x_half,
cpml.d_b_x_half, nz, nx, dt, dz, dx, nPml, nPad, true);
add_source<<<1, 1>>>(d_szz, d_sxx, src_rec.vec_source.at(iShot)[it], nz,
true, src_rec.vec_z_src.at(iShot),
src_rec.vec_x_src.at(iShot), dt, model.d_Cp);
el_velocity<<<blocks, threads>>>(
d_vz, d_vx, d_szz, d_sxx, d_sxz, d_mem_dszz_dz, d_mem_dsxz_dx,
d_mem_dsxz_dz, d_mem_dsxx_dx, model.d_Lambda, model.d_Mu,
model.d_ave_Byc_a, model.d_ave_Byc_b, cpml.d_K_z, cpml.d_a_z,
cpml.d_b_z, cpml.d_K_z_half, cpml.d_a_z_half, cpml.d_b_z_half,
cpml.d_K_x, cpml.d_a_x, cpml.d_b_x, cpml.d_K_x_half,
cpml.d_a_x_half, cpml.d_b_x_half, nz, nx, dt, dz, dx, nPml, nPad,
true);
}
recording<<<(nrec + 31) / 32, 32>>>(
d_szz, nz, src_rec.d_vec_data.at(iShot), iShot, it + 1, nSteps, nrec,
src_rec.d_vec_z_rec.at(iShot), src_rec.d_vec_x_rec.at(iShot));
}
if (!para.if_res()) {
CHECK(cudaMemcpyAsync(
src_rec.vec_data.at(iShot), src_rec.d_vec_data.at(iShot),
nSteps * nrec * sizeof(float), cudaMemcpyDeviceToHost,
streams[iShot])); // test
}
// fileBinWrite(h_snap, nz*nx, "SnapGPU.bin");
// compute residuals
if (para.if_res()) {
dim3 blocksT((nSteps + TX - 1) / TX, (nrec + TY - 1) / TY);
// for fun modify observed data
// float filter2[4] = {8.0, 9.0, 12.0, 13.0};
// cuda_window<<<blocksT,threads>>>(nSteps, nrec, dt, win_ratio,
// src_rec.d_vec_data_obs.at(iShot)); bp_filter1d(nSteps, dt, nrec,
// src_rec.d_vec_data_obs.at(iShot), filter2);
// windowing
if (para.if_win()) {
cuda_window<<<blocksT, threads>>>(
nSteps, nrec, dt, src_rec.d_vec_win_start.at(iShot),
src_rec.d_vec_win_end.at(iShot), src_rec.d_vec_weights.at(iShot),
win_ratio, src_rec.d_vec_data_obs.at(iShot));
cuda_window<<<blocksT, threads>>>(
nSteps, nrec, dt, src_rec.d_vec_win_start.at(iShot),
src_rec.d_vec_win_end.at(iShot), src_rec.d_vec_weights.at(iShot),
win_ratio, src_rec.d_vec_data.at(iShot));
} else {
cuda_window<<<blocksT, threads>>>(nSteps, nrec, dt, win_ratio,
src_rec.d_vec_data_obs.at(iShot));
cuda_window<<<blocksT, threads>>>(nSteps, nrec, dt, win_ratio,
src_rec.d_vec_data.at(iShot));
}
// filtering
if (para.if_filter()) {
bp_filter1d(nSteps, dt, nrec, src_rec.d_vec_data_obs.at(iShot),
para.filter());
bp_filter1d(nSteps, dt, nrec, src_rec.d_vec_data.at(iShot),
para.filter());
}
// Calculate source update and filter calculated data
if (para.if_src_update()) {
amp_ratio =
source_update(nSteps, dt, nrec, src_rec.d_vec_data_obs.at(iShot),
src_rec.d_vec_data.at(iShot),
src_rec.d_vec_source.at(iShot), src_rec.d_coef);
printf(" Source update => Processing shot %d, amp_ratio = %f\n",
iShot, amp_ratio);
}
amp_ratio = 1.0; // amplitude not used, so set to 1.0
// objective function
gpuMinus<<<blocksT, threads>>>(
src_rec.d_vec_res.at(iShot), src_rec.d_vec_data_obs.at(iShot),
src_rec.d_vec_data.at(iShot), nSteps, nrec);
cuda_cal_objective<<<1, 512>>>(d_l2Obj_temp, src_rec.d_vec_res.at(iShot),
nSteps * nrec);
CHECK(cudaMemcpy(h_l2Obj_temp, d_l2Obj_temp, sizeof(float),
cudaMemcpyDeviceToHost));
h_l2Obj += h_l2Obj_temp[0];
// update source again (adjoint)
if (para.if_src_update()) {
source_update_adj(nSteps, dt, nrec, src_rec.d_vec_res.at(iShot),
amp_ratio, src_rec.d_coef);
}
// filtering again (adjoint)
if (para.if_filter()) {
bp_filter1d(nSteps, dt, nrec, src_rec.d_vec_res.at(iShot),
para.filter());
}
// windowing again (adjoint)
if (para.if_win()) {
cuda_window<<<blocksT, threads>>>(
nSteps, nrec, dt, src_rec.d_vec_win_start.at(iShot),
src_rec.d_vec_win_end.at(iShot), src_rec.d_vec_weights.at(iShot),
0.1, src_rec.d_vec_res.at(iShot));
} else {
cuda_window<<<blocksT, threads>>>(nSteps, nrec, dt, win_ratio,
src_rec.d_vec_res.at(iShot));
}
CHECK(cudaMemcpyAsync(
src_rec.vec_res.at(iShot), src_rec.d_vec_res.at(iShot),
nSteps * nrec * sizeof(float), cudaMemcpyDeviceToHost,
streams[iShot])); // test
// CHECK(cudaMemcpy(src_rec.vec_res.at(iShot), src_rec.d_vec_res.at(iShot), \
// nSteps*nrec*sizeof(float), cudaMemcpyDeviceToHost)); // test
CHECK(cudaMemcpyAsync(
src_rec.vec_data.at(iShot), src_rec.d_vec_data.at(iShot),
nSteps * nrec * sizeof(float), cudaMemcpyDeviceToHost,
streams[iShot])); // test
CHECK(cudaMemcpyAsync(
src_rec.vec_data_obs.at(iShot), src_rec.d_vec_data_obs.at(iShot),
nSteps * nrec * sizeof(float), cudaMemcpyDeviceToHost,
streams[iShot])); // save preconditioned observed
CHECK(cudaMemcpy(src_rec.vec_source.at(iShot),
src_rec.d_vec_source.at(iShot), nSteps * sizeof(float),
cudaMemcpyDeviceToHost));
}
// =================
cudaDeviceSynchronize();
if (para.withAdj()) {
// ------------------------------------- Backward
// ---------------------------------- initialization
intialArrayGPU<<<blocks, threads>>>(d_vz_adj, nz, nx, 0.0);
intialArrayGPU<<<blocks, threads>>>(d_vx_adj, nz, nx, 0.0);
intialArrayGPU<<<blocks, threads>>>(d_szz_adj, nz, nx, 0.0);
intialArrayGPU<<<blocks, threads>>>(d_szz_p1, nz, nx, 0.0);
intialArrayGPU<<<blocks, threads>>>(d_mem_dvz_dz, nz, nx, 0.0);
intialArrayGPU<<<blocks, threads>>>(d_mem_dvx_dx, nz, nx, 0.0);
intialArrayGPU<<<blocks, threads>>>(d_mem_dszz_dz, nz, nx, 0.0);
intialArrayGPU<<<blocks, threads>>>(d_mem_dsxx_dx, nz, nx, 0.0);
for (int it = nSteps - 2; it >= 0; it--) {
if (para.isAc()) {
// if (it <= nSteps - 2) {
// save p to szz_plus_one
assignArrayGPU<<<blocks, threads>>>(d_szz, d_szz_p1, nz, nx);
// value at T-1
ac_velocity<<<blocks, threads>>>(
d_vz, d_vx, d_szz, d_mem_dszz_dz, d_mem_dsxx_dx, model.d_Lambda,
model.d_Den, model.d_ave_Byc_a, model.d_ave_Byc_b, cpml.d_K_z,
cpml.d_a_z, cpml.d_b_z, cpml.d_K_x_half, cpml.d_a_x_half,
cpml.d_b_x_half, nz, nx, dt, dz, dx, nPml, nPad, false);
boundaries.field_to_bnd(d_szz, d_sxz, d_sxx, d_vz, d_vx, it, false);
add_source<<<1, 1>>>(d_szz, d_sxx, src_rec.vec_source.at(iShot)[it],
nz, false, src_rec.vec_z_src.at(iShot),
src_rec.vec_x_src.at(iShot), dt, model.d_Cp);
add_source<<<1, 1>>>(d_szz_p1, d_sxx,
src_rec.vec_source.at(iShot)[it], nz, false,
src_rec.vec_z_src.at(iShot),
src_rec.vec_x_src.at(iShot), dt, model.d_Cp);
ac_pressure<<<blocks, threads>>>(
d_vz, d_vx, d_szz, d_mem_dvz_dz, d_mem_dvx_dx, model.d_Lambda,
model.d_Den, cpml.d_K_z_half, cpml.d_a_z_half, cpml.d_b_z_half,
cpml.d_K_x, cpml.d_a_x, cpml.d_b_x, nz, nx, dt, dz, dx, nPml,
nPad, false, d_mat_dvz_dz, d_mat_dvx_dx);
boundaries.field_to_bnd(d_szz, d_sxz, d_sxx, d_vz, d_vx, it, true);
// value at T-2
// ================
// adjoint computation
ac_velocity_adj<<<blocks, threads>>>(
d_vz_adj, d_vx_adj, d_szz_adj, d_mem_dvz_dz, d_mem_dvx_dx,
d_mem_dszz_dz, d_mem_dsxx_dx, model.d_Lambda, model.d_Den,
model.d_ave_Byc_a, model.d_ave_Byc_b, cpml.d_K_z_half,
cpml.d_a_z_half, cpml.d_b_z_half, cpml.d_K_x_half,
cpml.d_a_x_half, cpml.d_b_x_half, cpml.d_K_z, cpml.d_a_z,
cpml.d_b_z, cpml.d_K_x, cpml.d_a_x, cpml.d_b_x, nz, nx, dt, dz,
dx, nPml, nPad);
// inject residuals
res_injection<<<(nrec + 31) / 32, 32>>>(
d_szz_adj, nz, src_rec.d_vec_res.at(iShot), model.d_Lambda,
it + 1, dt, nSteps, nrec, src_rec.d_vec_z_rec.at(iShot),
src_rec.d_vec_x_rec.at(iShot));
ac_pressure_adj<<<blocks, threads>>>(
d_vz_adj, d_vx_adj, d_szz_adj, d_mem_dvz_dz, d_mem_dvx_dx,
d_mem_dszz_dz, d_mem_dsxx_dx, model.d_Lambda, model.d_Den,
model.d_ave_Byc_a, model.d_ave_Byc_b, cpml.d_K_z_half,
cpml.d_a_z_half, cpml.d_b_z_half, cpml.d_K_x_half,
cpml.d_a_x_half, cpml.d_b_x_half, cpml.d_K_z, cpml.d_a_z,
cpml.d_b_z, cpml.d_K_x, cpml.d_a_x, cpml.d_b_x, nz, nx, dt, dz,
dx, nPml, nPad, model.d_Cp, d_mat_dvz_dz, d_mat_dvx_dx,
model.d_CpGrad);
// value at T-1
// ac_adj_push<<<blocks,threads2>>>(d_vz_adj, d_vx_adj, d_szz_adj, d_adj_temp, \
// d_mem_dvz_dz, d_mem_dvx_dx, d_mem_dszz_dz, d_mem_dsxx_dx, \
// model.d_Lambda, model.d_Den, model.d_ave_Byc_a, model.d_ave_Byc_b, \
// cpml.d_K_z_half, cpml.d_a_z_half, cpml.d_b_z_half, \
// cpml.d_K_x_half, cpml.d_a_x_half, cpml.d_b_x_half, \
// cpml.d_K_z, cpml.d_a_z, cpml.d_b_z, \
// cpml.d_K_x, cpml.d_a_x, cpml.d_b_x, \
// nz, nx, dt, dz, dx, nPml, nPad);
// image_vel<<<blocks,threads>>>(d_szz_adj, nz, nx, dt, dz, dx, nPml, nPad, \
// model.d_Cp, model.d_Den, d_mat_dvz_dz, d_mat_dvx_dx, model.d_CpGrad);
image_vel_time<<<blocks, threads>>>(
d_szz, d_szz_p1, d_szz_adj, nz, nx, dt, dz, dx, nPml, nPad,
model.d_Cp, model.d_Lambda, model.d_CpGrad);
} else {
el_velocity<<<blocks, threads>>>(
d_vz, d_vx, d_szz, d_sxx, d_sxz, d_mem_dszz_dz, d_mem_dsxz_dx,
d_mem_dsxz_dz, d_mem_dsxx_dx, model.d_Lambda, model.d_Mu,
model.d_ave_Byc_a, model.d_ave_Byc_b, cpml.d_K_z, cpml.d_a_z,
cpml.d_b_z, cpml.d_K_z_half, cpml.d_a_z_half, cpml.d_b_z_half,
cpml.d_K_x, cpml.d_a_x, cpml.d_b_x, cpml.d_K_x_half,
cpml.d_a_x_half, cpml.d_b_x_half, nz, nx, dt, dz, dx, nPml, nPad,
false);
el_stress<<<blocks, threads>>>(
d_vz, d_vx, d_szz, d_sxx, d_sxz, d_mem_dvz_dz, d_mem_dvz_dx,
d_mem_dvx_dz, d_mem_dvx_dx, model.d_Lambda, model.d_Mu,
model.d_ave_Mu, model.d_Den, cpml.d_K_z, cpml.d_a_z, cpml.d_b_z,
cpml.d_K_z_half, cpml.d_a_z_half, cpml.d_b_z_half, cpml.d_K_x,
cpml.d_a_x, cpml.d_b_x, cpml.d_K_x_half, cpml.d_a_x_half,
cpml.d_b_x_half, nz, nx, dt, dz, dx, nPml, nPad, false);
}
// boundaries.field_to_bnd(d_szz, d_sxz, d_sxx, d_vz, d_vx, it);
if (it == iSnap && iShot == 0) {
CHECK(cudaMemcpy(h_snap_back, d_szz, nz * nx * sizeof(float),
cudaMemcpyDeviceToHost));
CHECK(cudaMemcpy(h_snap_adj, d_szz_adj, nz * nx * sizeof(float),
cudaMemcpyDeviceToHost));
}
if (iShot == 0) {
// CHECK(cudaMemcpy(h_snap_adj, d_szz_adj, nz*nx*sizeof(float),
// cudaMemcpyDeviceToHost)); fileBinWrite(h_snap_adj, nz*nx,
// "SnapGPU_adj_" + std::to_string(it) + ".bin");
// CHECK(cudaMemcpy(h_snap, d_szz, nz*nx*sizeof(float),
// cudaMemcpyDeviceToHost)); fileBinWrite(h_snap, nz*nx, "SnapGPU_"
// + std::to_string(it) + ".bin");
}
}
// fileBinWrite(h_snap_back, nz*nx, "SnapGPU_back.bin");
// fileBinWrite(h_snap_adj, nz*nx, "SnapGPU_adj.bin");
CHECK(cudaMemcpy(model.h_CpGrad, model.d_CpGrad, nz * nx * sizeof(float),
cudaMemcpyDeviceToHost));
// fileBinWrite(model.h_CpGrad, nz*nx, "CpGradient.bin");
for (int i = 0; i < nz; i++) {
for (int j = 0; j < nx; j++) {
grad_Cp[i * nx + j] = model.h_CpGrad[j * nz + i];
}
}
initialArray(grad_Cs, nz * nx, 0.0);
initialArray(grad_Den, nz * nx, 0.0);
initialArray(grad_stf, nSteps * src_rec.nShots, 0.0);
}
}
auto finish = std::chrono::high_resolution_clock::now();
std::chrono::duration<double> elapsed = finish - start;
#ifdef VERBOSE
std::cout << "Elapsed time: " << elapsed.count() << " second(s)."
<< std::endl;
#endif
if (!para.if_res()) {
for (int iShot = 0; iShot < group_size; iShot++) {
fileBinWrite(src_rec.vec_data.at(iShot),
nSteps * src_rec.vec_nrec.at(iShot),
para.data_dir_name() + "/Shot" +
std::to_string(shot_ids[iShot]) + ".bin");
}
}
if (para.if_save_scratch()) {
for (int iShot = 0; iShot < group_size; iShot++) {
fileBinWrite(src_rec.vec_res.at(iShot),
nSteps * src_rec.vec_nrec.at(iShot),
para.scratch_dir_name() + "/Residual_Shot" +
std::to_string(shot_ids[iShot]) + ".bin");
fileBinWrite(src_rec.vec_data.at(iShot),
nSteps * src_rec.vec_nrec.at(iShot),
para.scratch_dir_name() + "/Syn_Shot" +
std::to_string(shot_ids[iShot]) + ".bin");
fileBinWrite(src_rec.vec_data_obs.at(iShot),
nSteps * src_rec.vec_nrec.at(iShot),
para.scratch_dir_name() + "/CondObs_Shot" +
std::to_string(shot_ids[iShot]) + ".bin");
// fileBinWrite(src_rec.vec_source.at(iShot), nSteps,
// para.scratch_dir_name() + "src_updated" +
// std::to_string(iShot) + ".bin");
}
}
// #ifdef DEBUG
// std::cout << "cufd--" << __LINE__ << std::endl;
// #endif
// output residual
if (para.if_res() && !para.withAdj()) {
h_l2Obj = 0.5 * h_l2Obj; // DL 02/21/2019 (need to make misfit accurate
// here rather than in the script)
// fileBinWrite(&h_l2Obj, 1, "l2Obj.bin");
std::cout << "Total l2 residual = " << std::to_string(h_l2Obj) << std::endl;
std::cout << "calc_id = " << calc_id << std::endl;
*res = h_l2Obj;
}
free(h_l2Obj_temp);
free(h_snap);
free(h_snap_back);
free(h_snap_adj);
free(fCp);
free(fCs);
free(fDen);
// destroy the streams
for (int iShot = 0; iShot < group_size; iShot++)
CHECK(cudaStreamDestroy(streams[iShot]));
cudaFree(d_vz);
cudaFree(d_vx);
cudaFree(d_szz);
cudaFree(d_sxx);
cudaFree(d_sxz);
cudaFree(d_vz_adj);
cudaFree(d_vx_adj);
cudaFree(d_szz_adj);
cudaFree(d_szz_p1);
cudaFree(d_mem_dvz_dz);
cudaFree(d_mem_dvz_dx);
cudaFree(d_mem_dvx_dz);
cudaFree(d_mem_dvx_dx);
cudaFree(d_mem_dszz_dz);
cudaFree(d_mem_dsxx_dx);
cudaFree(d_mem_dsxz_dz);
cudaFree(d_mem_dsxz_dx);
cudaFree(d_mat_dvz_dz);
cudaFree(d_mat_dvx_dx);
cudaFree(d_l2Obj_temp);
#ifdef VERBOSE
std::cout << "Done!" << std::endl;
#endif
}
|
0c7515bca9f253dbc2f59e4d2d8a6d03e9b03da3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by op2.py
//
//user function
__device__ void getTotalVol_gpu( const float* cellVolume, const float* value, float* totalVol) {
(*totalVol) += (*cellVolume) * value[0];
}
// CUDA kernel function
__global__ void op_cuda_getTotalVol(
const float *__restrict arg0,
const float *__restrict arg1,
float *arg2,
int set_size ) {
float arg2_l[1];
for ( int d=0; d<1; d++ ){
arg2_l[d]=ZERO_float;
}
//process set elements
for ( int n=threadIdx.x+blockIdx.x*blockDim.x; n<set_size; n+=blockDim.x*gridDim.x ){
//user-supplied kernel call
getTotalVol_gpu(arg0+n*1,
arg1+n*4,
arg2_l);
}
//global reductions
for ( int d=0; d<1; d++ ){
op_reduction<OP_INC>(&arg2[d+blockIdx.x*1],arg2_l[d]);
}
}
//host stub function
void op_par_loop_getTotalVol(char const *name, op_set set,
op_arg arg0,
op_arg arg1,
op_arg arg2){
float*arg2h = (float *)arg2.data;
int nargs = 3;
op_arg args[3];
args[0] = arg0;
args[1] = arg1;
args[2] = arg2;
// initialise timers
double cpu_t1, cpu_t2, wall_t1, wall_t2;
op_timing_realloc(22);
op_timers_core(&cpu_t1, &wall_t1);
OP_kernels[22].name = name;
OP_kernels[22].count += 1;
if (OP_diags>2) {
printf(" kernel routine w/o indirection: getTotalVol");
}
op_mpi_halo_exchanges_cuda(set, nargs, args);
if (set->size > 0) {
//set CUDA execution parameters
#ifdef OP_BLOCK_SIZE_22
int nthread = OP_BLOCK_SIZE_22;
#else
int nthread = OP_block_size;
// int nthread = 128;
#endif
int nblocks = 200;
//transfer global reduction data to GPU
int maxblocks = nblocks;
int reduct_bytes = 0;
int reduct_size = 0;
reduct_bytes += ROUND_UP(maxblocks*1*sizeof(float));
reduct_size = MAX(reduct_size,sizeof(float));
reallocReductArrays(reduct_bytes);
reduct_bytes = 0;
arg2.data = OP_reduct_h + reduct_bytes;
arg2.data_d = OP_reduct_d + reduct_bytes;
for ( int b=0; b<maxblocks; b++ ){
for ( int d=0; d<1; d++ ){
((float *)arg2.data)[d+b*1] = ZERO_float;
}
}
reduct_bytes += ROUND_UP(maxblocks*1*sizeof(float));
mvReductArraysToDevice(reduct_bytes);
int nshared = reduct_size*nthread;
hipLaunchKernelGGL(( op_cuda_getTotalVol), dim3(nblocks),dim3(nthread),nshared, 0,
(float *) arg0.data_d,
(float *) arg1.data_d,
(float *) arg2.data_d,
set->size );
//transfer global reduction data back to CPU
mvReductArraysToHost(reduct_bytes);
for ( int b=0; b<maxblocks; b++ ){
for ( int d=0; d<1; d++ ){
arg2h[d] = arg2h[d] + ((float *)arg2.data)[d+b*1];
}
}
arg2.data = (char *)arg2h;
op_mpi_reduce(&arg2,arg2h);
}
op_mpi_set_dirtybit_cuda(nargs, args);
cutilSafeCall(hipDeviceSynchronize());
//update kernel record
op_timers_core(&cpu_t2, &wall_t2);
OP_kernels[22].time += wall_t2 - wall_t1;
OP_kernels[22].transfer += (float)set->size * arg0.size;
OP_kernels[22].transfer += (float)set->size * arg1.size;
}
| 0c7515bca9f253dbc2f59e4d2d8a6d03e9b03da3.cu | //
// auto-generated by op2.py
//
//user function
__device__ void getTotalVol_gpu( const float* cellVolume, const float* value, float* totalVol) {
(*totalVol) += (*cellVolume) * value[0];
}
// CUDA kernel function
__global__ void op_cuda_getTotalVol(
const float *__restrict arg0,
const float *__restrict arg1,
float *arg2,
int set_size ) {
float arg2_l[1];
for ( int d=0; d<1; d++ ){
arg2_l[d]=ZERO_float;
}
//process set elements
for ( int n=threadIdx.x+blockIdx.x*blockDim.x; n<set_size; n+=blockDim.x*gridDim.x ){
//user-supplied kernel call
getTotalVol_gpu(arg0+n*1,
arg1+n*4,
arg2_l);
}
//global reductions
for ( int d=0; d<1; d++ ){
op_reduction<OP_INC>(&arg2[d+blockIdx.x*1],arg2_l[d]);
}
}
//host stub function
void op_par_loop_getTotalVol(char const *name, op_set set,
op_arg arg0,
op_arg arg1,
op_arg arg2){
float*arg2h = (float *)arg2.data;
int nargs = 3;
op_arg args[3];
args[0] = arg0;
args[1] = arg1;
args[2] = arg2;
// initialise timers
double cpu_t1, cpu_t2, wall_t1, wall_t2;
op_timing_realloc(22);
op_timers_core(&cpu_t1, &wall_t1);
OP_kernels[22].name = name;
OP_kernels[22].count += 1;
if (OP_diags>2) {
printf(" kernel routine w/o indirection: getTotalVol");
}
op_mpi_halo_exchanges_cuda(set, nargs, args);
if (set->size > 0) {
//set CUDA execution parameters
#ifdef OP_BLOCK_SIZE_22
int nthread = OP_BLOCK_SIZE_22;
#else
int nthread = OP_block_size;
// int nthread = 128;
#endif
int nblocks = 200;
//transfer global reduction data to GPU
int maxblocks = nblocks;
int reduct_bytes = 0;
int reduct_size = 0;
reduct_bytes += ROUND_UP(maxblocks*1*sizeof(float));
reduct_size = MAX(reduct_size,sizeof(float));
reallocReductArrays(reduct_bytes);
reduct_bytes = 0;
arg2.data = OP_reduct_h + reduct_bytes;
arg2.data_d = OP_reduct_d + reduct_bytes;
for ( int b=0; b<maxblocks; b++ ){
for ( int d=0; d<1; d++ ){
((float *)arg2.data)[d+b*1] = ZERO_float;
}
}
reduct_bytes += ROUND_UP(maxblocks*1*sizeof(float));
mvReductArraysToDevice(reduct_bytes);
int nshared = reduct_size*nthread;
op_cuda_getTotalVol<<<nblocks,nthread,nshared>>>(
(float *) arg0.data_d,
(float *) arg1.data_d,
(float *) arg2.data_d,
set->size );
//transfer global reduction data back to CPU
mvReductArraysToHost(reduct_bytes);
for ( int b=0; b<maxblocks; b++ ){
for ( int d=0; d<1; d++ ){
arg2h[d] = arg2h[d] + ((float *)arg2.data)[d+b*1];
}
}
arg2.data = (char *)arg2h;
op_mpi_reduce(&arg2,arg2h);
}
op_mpi_set_dirtybit_cuda(nargs, args);
cutilSafeCall(cudaDeviceSynchronize());
//update kernel record
op_timers_core(&cpu_t2, &wall_t2);
OP_kernels[22].time += wall_t2 - wall_t1;
OP_kernels[22].transfer += (float)set->size * arg0.size;
OP_kernels[22].transfer += (float)set->size * arg1.size;
}
|
0ac223c64e3ab0632b67a8d9f4df3ec54795f993.hip | // !!! This is a file automatically generated by hipify!!!
#include <algorithm>
#include <rmm/device_buffer.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <cudf/utilities/error.hpp>
#include <hip/hip_runtime.h>
#include "MemoryConsumer.cuh"
MemoryConsumer::MemoryConsumer()
: futureObj(exitSignal.get_future()),
memory_sizes{1024 * 1024 * 1024}, // 1GB
delay{250ms}
{
};
void MemoryConsumer::setOptionsMegaBytes(const std::vector<size_t> & memory_sizes, std::chrono::milliseconds delay){
this->memory_sizes.resize(memory_sizes.size());
std::transform(memory_sizes.cbegin(), memory_sizes.cend(), this->memory_sizes.begin(), [](auto size){ return size * 1024 * 1024; });
this->delay = delay;
}
void MemoryConsumer::setOptionsPercentage(const std::vector<float> & memory_percentages, std::chrono::milliseconds delay){
size_t free_size;
size_t total_size;
CUDA_TRY(hipMemGetInfo(&free_size, &total_size));
this->memory_sizes.resize(memory_percentages.size());
std::transform(memory_percentages.cbegin(), memory_percentages.cend(), this->memory_sizes.begin(), [total_size](auto percentage){ return static_cast<size_t>(percentage * total_size); });
this->delay = delay;
}
void MemoryConsumer::run() {
size_t idx = 0;
while (stopRequested() == false) {
size_t size = memory_sizes[idx];
rmm::device_buffer buffer(size, rmm::cuda_stream_view{});
idx = (idx + 1) % memory_sizes.size();
std::this_thread::sleep_for(delay);
}
}
void MemoryConsumer::stop() {
exitSignal.set_value();
}
bool MemoryConsumer::stopRequested() {
// checks if value in future object is available
if (futureObj.wait_for(0ms) == std::future_status::timeout)
return false;
return true;
}
| 0ac223c64e3ab0632b67a8d9f4df3ec54795f993.cu | #include <algorithm>
#include <rmm/device_buffer.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <cudf/utilities/error.hpp>
#include <cuda_runtime.h>
#include "MemoryConsumer.cuh"
MemoryConsumer::MemoryConsumer()
: futureObj(exitSignal.get_future()),
memory_sizes{1024 * 1024 * 1024}, // 1GB
delay{250ms}
{
};
void MemoryConsumer::setOptionsMegaBytes(const std::vector<size_t> & memory_sizes, std::chrono::milliseconds delay){
this->memory_sizes.resize(memory_sizes.size());
std::transform(memory_sizes.cbegin(), memory_sizes.cend(), this->memory_sizes.begin(), [](auto size){ return size * 1024 * 1024; });
this->delay = delay;
}
void MemoryConsumer::setOptionsPercentage(const std::vector<float> & memory_percentages, std::chrono::milliseconds delay){
size_t free_size;
size_t total_size;
CUDA_TRY(cudaMemGetInfo(&free_size, &total_size));
this->memory_sizes.resize(memory_percentages.size());
std::transform(memory_percentages.cbegin(), memory_percentages.cend(), this->memory_sizes.begin(), [total_size](auto percentage){ return static_cast<size_t>(percentage * total_size); });
this->delay = delay;
}
void MemoryConsumer::run() {
size_t idx = 0;
while (stopRequested() == false) {
size_t size = memory_sizes[idx];
rmm::device_buffer buffer(size, rmm::cuda_stream_view{});
idx = (idx + 1) % memory_sizes.size();
std::this_thread::sleep_for(delay);
}
}
void MemoryConsumer::stop() {
exitSignal.set_value();
}
bool MemoryConsumer::stopRequested() {
// checks if value in future object is available
if (futureObj.wait_for(0ms) == std::future_status::timeout)
return false;
return true;
}
|
00e900ced6e67472ad342eb7ae266419aa4e95c2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <vulcan/sampler.h>
#include <vulcan/device.h>
#include <vulcan/image.h>
namespace vulcan
{
template <Sampler::Filter filter, typename T>
VULCAN_GLOBAL
void GetSubimageKernel(const T* image, T* subimage, int width, int height)
{
const int sub_width = width / 2;
const int sub_height = height / 2;
const int sub_x = blockIdx.x * blockDim.x + threadIdx.x;
const int sub_y = blockIdx.y * blockDim.y + threadIdx.y;
if (sub_x < sub_width && sub_y < sub_height)
{
T value;
const int x = 2 * sub_x;
const int y = 2 * sub_y;
if (filter == Sampler::FILTER_LINEAR)
{
value = image[width * (y + 0) + (x + 0)];
value += image[width * (y + 0) + (x + 1)];
value += image[width * (y + 1) + (x + 0)];
value += image[width * (y + 1) + (x + 1)];
value *= 0.25f;
}
else
{
value = image[y * width + x];
}
subimage[sub_y * sub_width + sub_x] = value;
}
}
template <typename T>
VULCAN_GLOBAL
void GetGradientKernel(const T* image, T* gx, T* gy, int width, int height)
{
const int shared_resolution = 18;
const int shared_size = shared_resolution * shared_resolution;
VULCAN_SHARED T shared[shared_size];
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
const int block_size = blockDim.x * blockDim.y;
const int index = y * width + x;
int shared_index = threadIdx.y * blockDim.x + threadIdx.x;
const int global_offset_x = blockIdx.x * blockDim.x - 1;
const int global_offset_y = blockIdx.y * blockDim.y - 1;
do
{
int xx = global_offset_x + (shared_index % shared_resolution);
int yy = global_offset_y + (shared_index / shared_resolution);
xx = clamp(xx, 0, width - 1);
yy = clamp(yy, 0, height - 1);
const int global_index = yy * width + xx;
shared[shared_index] = image[global_index];
shared_index += block_size;
}
while (shared_index < shared_size);
__syncthreads();
if (x < width && y < height)
{
T xx;
T yy;
const int sx = threadIdx.x + 1;
const int sy = threadIdx.y + 1;
xx = 1 * shared[shared_resolution * (sy - 1) + (sx + 1)];
xx += 2 * shared[shared_resolution * (sy + 0) + (sx + 1)];
xx += 1 * shared[shared_resolution * (sy + 1) + (sx + 1)];
xx -= 1 * shared[shared_resolution * (sy - 1) + (sx - 1)];
xx -= 2 * shared[shared_resolution * (sy + 0) + (sx - 1)];
xx -= 1 * shared[shared_resolution * (sy + 1) + (sx - 1)];
yy = 1 * shared[shared_resolution * (sy + 1) + (sx - 1)];
yy += 2 * shared[shared_resolution * (sy + 1) + (sx + 0)];
yy += 1 * shared[shared_resolution * (sy + 1) + (sx + 1)];
yy -= 1 * shared[shared_resolution * (sy - 1) + (sx - 1)];
yy -= 2 * shared[shared_resolution * (sy - 1) + (sx + 0)];
yy -= 1 * shared[shared_resolution * (sy - 1) + (sx + 1)];
gx[index] = 0.125f * xx;
gy[index] = 0.125f * yy;
}
}
template <typename T>
inline void GetSubimage(const T& image, T& subimage, Sampler::Filter filter)
{
const int w = image.GetWidth();
const int h = image.GetHeight();
subimage.Resize(w / 2, h / 2);
const dim3 total(w, h);
const dim3 threads(16, 16);
const dim3 blocks = GetKernelBlocks(total, threads);
switch (filter)
{
case Sampler::FILTER_NEAREST:
{
CUDA_LAUNCH(GetSubimageKernel<Sampler::FILTER_NEAREST>,
blocks, threads, 0, 0, image.GetData(), subimage.GetData(), w, h);
break;
}
case Sampler::FILTER_LINEAR:
{
CUDA_LAUNCH(GetSubimageKernel<Sampler::FILTER_LINEAR>,
blocks, threads, 0, 0, image.GetData(), subimage.GetData(), w, h);
break;
}
}
}
template <typename T>
void GetGradient(const T& image, T& gradient_x, T& gradient_y)
{
const int w = image.GetWidth();
const int h = image.GetHeight();
gradient_x.Resize(w, h);
gradient_y.Resize(w, h);
const dim3 total(w, h);
const dim3 threads(16, 16);
const dim3 blocks = GetKernelBlocks(total, threads);
CUDA_LAUNCH(GetGradientKernel, blocks, threads, 0, 0, image.GetData(),
gradient_x.GetData(), gradient_y.GetData(), w, h);
}
Sampler::Sampler()
{
}
void Sampler::GetSubimage(const Image& image, Image& subimage,
Filter filter) const
{
vulcan::GetSubimage(image, subimage, filter);
}
void Sampler::GetSubimage(const ColorImage& image, ColorImage& subimage,
Filter filter) const
{
vulcan::GetSubimage(image, subimage, filter);
}
void Sampler::GetGradient(const Image& image, Image& gradient_x,
Image& gradient_y) const
{
vulcan::GetGradient(image, gradient_x, gradient_y);
}
void Sampler::GetGradient(const ColorImage& image, ColorImage& gradient_x,
ColorImage& gradient_y) const
{
vulcan::GetGradient(image, gradient_x, gradient_y);
}
} // namespace vulcan | 00e900ced6e67472ad342eb7ae266419aa4e95c2.cu | #include <vulcan/sampler.h>
#include <vulcan/device.h>
#include <vulcan/image.h>
namespace vulcan
{
template <Sampler::Filter filter, typename T>
VULCAN_GLOBAL
void GetSubimageKernel(const T* image, T* subimage, int width, int height)
{
const int sub_width = width / 2;
const int sub_height = height / 2;
const int sub_x = blockIdx.x * blockDim.x + threadIdx.x;
const int sub_y = blockIdx.y * blockDim.y + threadIdx.y;
if (sub_x < sub_width && sub_y < sub_height)
{
T value;
const int x = 2 * sub_x;
const int y = 2 * sub_y;
if (filter == Sampler::FILTER_LINEAR)
{
value = image[width * (y + 0) + (x + 0)];
value += image[width * (y + 0) + (x + 1)];
value += image[width * (y + 1) + (x + 0)];
value += image[width * (y + 1) + (x + 1)];
value *= 0.25f;
}
else
{
value = image[y * width + x];
}
subimage[sub_y * sub_width + sub_x] = value;
}
}
template <typename T>
VULCAN_GLOBAL
void GetGradientKernel(const T* image, T* gx, T* gy, int width, int height)
{
const int shared_resolution = 18;
const int shared_size = shared_resolution * shared_resolution;
VULCAN_SHARED T shared[shared_size];
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
const int block_size = blockDim.x * blockDim.y;
const int index = y * width + x;
int shared_index = threadIdx.y * blockDim.x + threadIdx.x;
const int global_offset_x = blockIdx.x * blockDim.x - 1;
const int global_offset_y = blockIdx.y * blockDim.y - 1;
do
{
int xx = global_offset_x + (shared_index % shared_resolution);
int yy = global_offset_y + (shared_index / shared_resolution);
xx = clamp(xx, 0, width - 1);
yy = clamp(yy, 0, height - 1);
const int global_index = yy * width + xx;
shared[shared_index] = image[global_index];
shared_index += block_size;
}
while (shared_index < shared_size);
__syncthreads();
if (x < width && y < height)
{
T xx;
T yy;
const int sx = threadIdx.x + 1;
const int sy = threadIdx.y + 1;
xx = 1 * shared[shared_resolution * (sy - 1) + (sx + 1)];
xx += 2 * shared[shared_resolution * (sy + 0) + (sx + 1)];
xx += 1 * shared[shared_resolution * (sy + 1) + (sx + 1)];
xx -= 1 * shared[shared_resolution * (sy - 1) + (sx - 1)];
xx -= 2 * shared[shared_resolution * (sy + 0) + (sx - 1)];
xx -= 1 * shared[shared_resolution * (sy + 1) + (sx - 1)];
yy = 1 * shared[shared_resolution * (sy + 1) + (sx - 1)];
yy += 2 * shared[shared_resolution * (sy + 1) + (sx + 0)];
yy += 1 * shared[shared_resolution * (sy + 1) + (sx + 1)];
yy -= 1 * shared[shared_resolution * (sy - 1) + (sx - 1)];
yy -= 2 * shared[shared_resolution * (sy - 1) + (sx + 0)];
yy -= 1 * shared[shared_resolution * (sy - 1) + (sx + 1)];
gx[index] = 0.125f * xx;
gy[index] = 0.125f * yy;
}
}
template <typename T>
inline void GetSubimage(const T& image, T& subimage, Sampler::Filter filter)
{
const int w = image.GetWidth();
const int h = image.GetHeight();
subimage.Resize(w / 2, h / 2);
const dim3 total(w, h);
const dim3 threads(16, 16);
const dim3 blocks = GetKernelBlocks(total, threads);
switch (filter)
{
case Sampler::FILTER_NEAREST:
{
CUDA_LAUNCH(GetSubimageKernel<Sampler::FILTER_NEAREST>,
blocks, threads, 0, 0, image.GetData(), subimage.GetData(), w, h);
break;
}
case Sampler::FILTER_LINEAR:
{
CUDA_LAUNCH(GetSubimageKernel<Sampler::FILTER_LINEAR>,
blocks, threads, 0, 0, image.GetData(), subimage.GetData(), w, h);
break;
}
}
}
template <typename T>
void GetGradient(const T& image, T& gradient_x, T& gradient_y)
{
const int w = image.GetWidth();
const int h = image.GetHeight();
gradient_x.Resize(w, h);
gradient_y.Resize(w, h);
const dim3 total(w, h);
const dim3 threads(16, 16);
const dim3 blocks = GetKernelBlocks(total, threads);
CUDA_LAUNCH(GetGradientKernel, blocks, threads, 0, 0, image.GetData(),
gradient_x.GetData(), gradient_y.GetData(), w, h);
}
Sampler::Sampler()
{
}
void Sampler::GetSubimage(const Image& image, Image& subimage,
Filter filter) const
{
vulcan::GetSubimage(image, subimage, filter);
}
void Sampler::GetSubimage(const ColorImage& image, ColorImage& subimage,
Filter filter) const
{
vulcan::GetSubimage(image, subimage, filter);
}
void Sampler::GetGradient(const Image& image, Image& gradient_x,
Image& gradient_y) const
{
vulcan::GetGradient(image, gradient_x, gradient_y);
}
void Sampler::GetGradient(const ColorImage& image, ColorImage& gradient_x,
ColorImage& gradient_y) const
{
vulcan::GetGradient(image, gradient_x, gradient_y);
}
} // namespace vulcan |
28f2d914d1f1e9a70619fa92630328020b07de52.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* References:
*
* Hong, Sungpack, et al.
* "Accelerating CUDA graph algorithms at maximum warp."
* Acm Sigplan Notices 46.8 (2011): 267-276.
*
* Lifeng Nai, Yinglong Xia, Ilie G. Tanase, Hyesoon Kim, and Ching-Yung Lin.
* GraphBIG: Understanding Graph Computing in the Context of Industrial Solutions,
* In the proccedings of the International Conference for High Performance Computing, Networking, Storage and Analysis (SC),
* Nov. 2015
*
*/
#include "helper_emogi.h"
#define MEM_ALIGN MEM_ALIGN_32
typedef uint32_t EdgeT;
typedef uint32_t WeightT;
__global__ void kernel_coalesce(bool *label, const WeightT *costList, WeightT *newCostList, const uint64_t vertex_count, const uint64_t *vertexList, const EdgeT *edgeList, const WeightT *weightList) {
const uint64_t tid = blockDim.x * BLOCK_SIZE * blockIdx.y + blockDim.x * blockIdx.x + threadIdx.x;
const uint64_t warpIdx = tid >> WARP_SHIFT;
const uint64_t laneIdx = tid & ((1 << WARP_SHIFT) - 1);
if (warpIdx < vertex_count && label[warpIdx]) {
uint64_t start = vertexList[warpIdx];
const uint64_t shift_start = start & MEM_ALIGN;
uint64_t end = vertexList[warpIdx+1];
WeightT cost = newCostList[warpIdx];
for(uint64_t i = shift_start + laneIdx; i < end; i += WARP_SIZE) {
if (newCostList[warpIdx] != cost)
break;
if (newCostList[edgeList[i]] > cost + weightList[i] && i >= start)
atomicMin(&(newCostList[edgeList[i]]), cost + weightList[i]);
}
label[warpIdx] = false;
}
}
__global__ void kernel_coalesce_chunk(bool *label, const WeightT *costList, WeightT *newCostList, const uint64_t vertex_count, const uint64_t *vertexList, const EdgeT *edgeList, const WeightT *weightList) {
const uint64_t tid = blockDim.x * BLOCK_SIZE * blockIdx.y + blockDim.x * blockIdx.x + threadIdx.x;
const uint64_t warpIdx = tid >> WARP_SHIFT;
const uint64_t laneIdx = tid & ((1 << WARP_SHIFT) - 1);
const uint64_t chunkIdx = warpIdx * CHUNK_SIZE;
uint64_t chunk_size = CHUNK_SIZE;
if((chunkIdx + CHUNK_SIZE) > vertex_count) {
if ( vertex_count > chunkIdx )
chunk_size = vertex_count - chunkIdx;
else
return;
}
for(uint32_t i = chunkIdx; i < chunk_size + chunkIdx; i++) {
if (label[i]) {
uint64_t start = vertexList[i];
const uint64_t shift_start = start & MEM_ALIGN;
uint64_t end = vertexList[i+1];
WeightT cost = newCostList[i];
for(uint64_t j = shift_start + laneIdx; j < end; j += WARP_SIZE) {
if (newCostList[i] != cost)
break;
if (newCostList[edgeList[j]] > cost + weightList[j] && j >= start)
atomicMin(&(newCostList[edgeList[j]]), cost + weightList[j]);
}
label[i] = false;
}
}
}
__global__ void update(bool *label, WeightT *costList, WeightT *newCostList, const uint32_t vertex_count, bool *changed) {
uint64_t tid = blockDim.x * BLOCK_SIZE * blockIdx.y + blockDim.x * blockIdx.x + threadIdx.x;
if (tid < vertex_count) {
if (newCostList[tid] < costList[tid]) {
costList[tid] = newCostList[tid];
label[tid] = true;
*changed = true;
}
}
}
int main(int argc, char *argv[]) {
std::ifstream file, file2;
std::string vertex_file, edge_file, weight_file;
std::string filename;
bool changed_h, *changed_d, no_src = false, *label_d;
int c, num_run = 1, arg_num = 0, device = 0;
impl_type type;
mem_type mem;
uint32_t one, iter;
WeightT offset = 0;
WeightT zero;
WeightT *costList_d, *newCostList_d, *weightList_h, *weightList_d;
uint64_t *vertexList_h, *vertexList_d;
EdgeT *edgeList_h, *edgeList_d;
uint64_t *edgeList64_h;
uint64_t vertex_count, edge_count, weight_count, vertex_size, edge_size, weight_size;
uint64_t typeT, src;
uint64_t numblocks_kernel, numblocks_update, numthreads;
float milliseconds;
double avg_milliseconds;
hipEvent_t start, end;
while ((c = getopt(argc, argv, "f:r:t:i:m:d:o:h")) != -1) {
switch (c) {
case 'f':
filename = optarg;
arg_num++;
break;
case 'r':
if (!no_src)
src = atoll(optarg);
arg_num++;
break;
case 't':
type = (impl_type)atoi(optarg);
arg_num++;
break;
case 'i':
no_src = true;
src = 0;
num_run = atoi(optarg);
arg_num++;
break;
case 'm':
mem = (mem_type)atoi(optarg);
arg_num++;
break;
case 'd':
device = atoi(optarg);
break;
case 'o':
offset = atoi(optarg);
arg_num++;
break;
case 'h':
printf("4-byte edge SSSP with uint32 edge weight\n");
printf("\t-f | input file name (must end with .bel)\n");
printf("\t-r | SSSP root (unused when i > 1)\n");
printf("\t-t | type of SSSP to run\n");
printf("\t | COALESCE = 1, COALESCE_CHUNK = 2\n");
printf("\t-m | memory allocation\n");
printf("\t | GPUMEM = 0, UVM_READONLY = 1, UVM_DIRECT = 2\n");
printf("\t-i | number of iterations to run\n");
printf("\t-d | GPU device id (default=0)\n");
printf("\t-o | edge weight offset (default=0)\n");
printf("\t-h | help message\n");
return 0;
case '?':
break;
default:
break;
}
}
if (arg_num < 4) {
printf("4-byte edge SSSP with uint32 edge weight\n");
printf("\t-f | input file name (must end with .bel)\n");
printf("\t-r | SSSP root (unused when i > 1)\n");
printf("\t-t | type of SSSP to run\n");
printf("\t | COALESCE = 1, COALESCE_CHUNK = 2\n");
printf("\t-m | memory allocation\n");
printf("\t | GPUMEM = 0, UVM_READONLY = 1, UVM_DIRECT = 2\n");
printf("\t-i | number of iterations to run\n");
printf("\t-d | GPU device id (default=0)\n");
printf("\t-o | edge weight offset (default=0)\n");
printf("\t-h | help message\n");
return 0;
}
checkCudaErrors(hipEventCreate(&start));
checkCudaErrors(hipEventCreate(&end));
vertex_file = filename + ".col";
edge_file = filename + ".dst";
weight_file = filename + ".val";
std::cout << filename << std::endl;
// Read files
// Start reading vertex list
file.open(vertex_file.c_str(), std::ios::in | std::ios::binary);
if (!file.is_open()) {
fprintf(stderr, "Vertex file open failed\n");
exit(1);
}
file.read((char*)(&vertex_count), 8);
file.read((char*)(&typeT), 8);
vertex_count--;
printf("Vertex: %lu, ", vertex_count);
vertex_size = (vertex_count+1) * sizeof(uint64_t);
vertexList_h = (uint64_t*)malloc(vertex_size);
file.read((char*)vertexList_h, vertex_size);
file.close();
// Start reading edge list
file.open(edge_file.c_str(), std::ios::in | std::ios::binary);
if (!file.is_open()) {
fprintf(stderr, "Edge file open failed\n");
exit(1);
}
file.read((char*)(&edge_count), 8);
file.read((char*)(&typeT), 8);
printf("Edge: %lu, ", edge_count);
fflush(stdout);
edge_size = edge_count * sizeof(EdgeT);
edgeList_h = NULL;
// Start reading edge weight list
file2.open(weight_file.c_str(), std::ios::in | std::ios::binary);
if (!file2.is_open()) {
fprintf(stderr, "Edge file open failed\n");
exit(1);
}
file2.read((char*)(&weight_count), 8);
file2.read((char*)(&typeT), 8);
printf("Weight: %lu\n", weight_count);
fflush(stdout);
weight_size = weight_count * sizeof(WeightT);
weightList_h = NULL;
edgeList64_h = (uint64_t*)malloc(edge_count * sizeof(uint64_t));
file.read((char*)edgeList64_h, edge_count * sizeof(uint64_t));
switch (mem) {
case GPUMEM:
edgeList_h = (EdgeT*)malloc(edge_size);
weightList_h = (WeightT*)malloc(weight_size);
file2.read((char*)weightList_h, weight_size);
checkCudaErrors(hipMalloc((void**)&edgeList_d, edge_size));
checkCudaErrors(hipMalloc((void**)&weightList_d, weight_size));
for (uint64_t i = 0; i < weight_count; i++) {
weightList_h[i] += offset;
edgeList_h[i] = (uint32_t)edgeList64_h[i];
}
break;
case UVM_READONLY:
checkCudaErrors(hipMallocManaged((void**)&edgeList_d, edge_size));
checkCudaErrors(hipMallocManaged((void**)&weightList_d, weight_size));
file2.read((char*)weightList_d, weight_size);
for (uint64_t i = 0; i < weight_count; i++) {
weightList_d[i] += offset;
edgeList_d[i] = (uint32_t)edgeList64_h[i];
}
checkCudaErrors(hipMemAdvise(edgeList_d, edge_size, hipMemAdviseSetReadMostly, device));
checkCudaErrors(hipMemAdvise(weightList_d, weight_size, hipMemAdviseSetReadMostly, device));
break;
case UVM_DIRECT:
checkCudaErrors(hipMallocManaged((void**)&edgeList_d, edge_size));
checkCudaErrors(hipMallocManaged((void**)&weightList_d, weight_size));
file2.read((char*)weightList_d, weight_size);
for (uint64_t i = 0; i < weight_count; i++) {
weightList_d[i] += offset;
edgeList_d[i] = (uint32_t)edgeList64_h[i];
}
checkCudaErrors(hipMemAdvise(edgeList_d, edge_size, hipMemAdviseSetAccessedBy, device));
checkCudaErrors(hipMemAdvise(weightList_d, weight_size, hipMemAdviseSetAccessedBy, device));
break;
}
free(edgeList64_h);
file.close();
file2.close();
// Allocate memory for GPU
checkCudaErrors(hipMalloc((void**)&vertexList_d, vertex_size));
checkCudaErrors(hipMalloc((void**)&label_d, vertex_count * sizeof(bool)));
checkCudaErrors(hipMalloc((void**)&changed_d, sizeof(bool)));
checkCudaErrors(hipMalloc((void**)&costList_d, vertex_count * sizeof(WeightT)));
checkCudaErrors(hipMalloc((void**)&newCostList_d, vertex_count * sizeof(WeightT)));
printf("Allocation finished\n");
fflush(stdout);
// Initialize values
checkCudaErrors(hipMemcpy(vertexList_d, vertexList_h, vertex_size, hipMemcpyHostToDevice));
if (mem == GPUMEM) {
checkCudaErrors(hipMemcpy(edgeList_d, edgeList_h, edge_size, hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(weightList_d, weightList_h, weight_size, hipMemcpyHostToDevice));
}
numthreads = BLOCK_SIZE;
switch (type) {
case COALESCE:
numblocks_kernel = ((vertex_count * WARP_SIZE + numthreads) / numthreads);
break;
case COALESCE_CHUNK:
numblocks_kernel = ((vertex_count * (WARP_SIZE / CHUNK_SIZE) + numthreads) / numthreads);
break;
default:
fprintf(stderr, "Invalid type\n");
exit(1);
break;
}
numblocks_update = ((vertex_count + numthreads) / numthreads);
dim3 blockDim_kernel(BLOCK_SIZE, (numblocks_kernel+BLOCK_SIZE)/BLOCK_SIZE);
dim3 blockDim_update(BLOCK_SIZE, (numblocks_update+BLOCK_SIZE)/BLOCK_SIZE);
avg_milliseconds = 0.0f;
printf("Initialization done\n");
fflush(stdout);
// Set root
for (int i = 0; i < num_run; i++) {
zero = 0;
one = 1;
checkCudaErrors(hipMemset(costList_d, 0xFF, vertex_count * sizeof(WeightT)));
checkCudaErrors(hipMemset(newCostList_d, 0xFF, vertex_count * sizeof(WeightT)));
checkCudaErrors(hipMemset(label_d, 0x0, vertex_count * sizeof(bool)));
checkCudaErrors(hipMemcpy(&label_d[src], &one, sizeof(bool), hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(&costList_d[src], &zero, sizeof(WeightT), hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(&newCostList_d[src], &zero, sizeof(WeightT), hipMemcpyHostToDevice));
iter = 0;
checkCudaErrors(hipEventRecord(start, 0));
// Run SSSP
do {
changed_h = false;
checkCudaErrors(hipMemcpy(changed_d, &changed_h, sizeof(bool), hipMemcpyHostToDevice));
switch (type) {
case COALESCE:
hipLaunchKernelGGL(( kernel_coalesce), dim3(blockDim_kernel), dim3(numthreads), 0, 0, label_d, costList_d, newCostList_d, vertex_count, vertexList_d, edgeList_d, weightList_d);
break;
case COALESCE_CHUNK:
hipLaunchKernelGGL(( kernel_coalesce_chunk), dim3(blockDim_kernel), dim3(numthreads), 0, 0, label_d, costList_d, newCostList_d, vertex_count, vertexList_d, edgeList_d, weightList_d);
break;
default:
fprintf(stderr, "Invalid type\n");
exit(1);
break;
}
hipLaunchKernelGGL(( update), dim3(blockDim_update), dim3(numthreads), 0, 0, label_d, costList_d, newCostList_d, vertex_count, changed_d);
iter++;
checkCudaErrors(hipMemcpy(&changed_h, changed_d, sizeof(bool), hipMemcpyDeviceToHost));
} while(changed_h);
checkCudaErrors(hipEventRecord(end, 0));
checkCudaErrors(hipEventSynchronize(end));
checkCudaErrors(hipEventElapsedTime(&milliseconds, start, end));
printf("run %*d: ", 3, i);
printf("src %*lu, ", 12, src);
printf("iteration %*u, ", 3, iter);
printf("time %*f ms\n", 12, milliseconds);
fflush(stdout);
avg_milliseconds += (double)milliseconds;
src += vertex_count / num_run;
if (i < num_run - 1) {
EdgeT *edgeList_temp;
WeightT *weightList_temp;
// Flush GPU page cache for each iteration by re-allocating UVM
switch (mem) {
case UVM_READONLY:
checkCudaErrors(hipMallocManaged((void**)&edgeList_temp, edge_size));
checkCudaErrors(hipMallocManaged((void**)&weightList_temp, weight_size));
memcpy(edgeList_temp, edgeList_d, edge_size);
memcpy(weightList_temp, weightList_d, weight_size);
checkCudaErrors(hipFree(edgeList_d));
checkCudaErrors(hipFree(weightList_d));
edgeList_d = edgeList_temp;
weightList_d = weightList_temp;
checkCudaErrors(hipMemAdvise(edgeList_d, edge_size, hipMemAdviseSetReadMostly, device));
checkCudaErrors(hipMemAdvise(weightList_d, weight_size, hipMemAdviseSetReadMostly, device));
break;
default:
break;
}
}
}
printf("Average run time %f ms\n", avg_milliseconds / num_run);
free(vertexList_h);
if (edgeList_h)
free(edgeList_h);
if (weightList_h)
free(weightList_h);
checkCudaErrors(hipFree(vertexList_d));
checkCudaErrors(hipFree(weightList_d));
checkCudaErrors(hipFree(edgeList_d));
checkCudaErrors(hipFree(costList_d));
checkCudaErrors(hipFree(newCostList_d));
checkCudaErrors(hipFree(label_d));
checkCudaErrors(hipFree(changed_d));
return 0;
}
| 28f2d914d1f1e9a70619fa92630328020b07de52.cu | /* References:
*
* Hong, Sungpack, et al.
* "Accelerating CUDA graph algorithms at maximum warp."
* Acm Sigplan Notices 46.8 (2011): 267-276.
*
* Lifeng Nai, Yinglong Xia, Ilie G. Tanase, Hyesoon Kim, and Ching-Yung Lin.
* GraphBIG: Understanding Graph Computing in the Context of Industrial Solutions,
* In the proccedings of the International Conference for High Performance Computing, Networking, Storage and Analysis (SC),
* Nov. 2015
*
*/
#include "helper_emogi.h"
#define MEM_ALIGN MEM_ALIGN_32
typedef uint32_t EdgeT;
typedef uint32_t WeightT;
__global__ void kernel_coalesce(bool *label, const WeightT *costList, WeightT *newCostList, const uint64_t vertex_count, const uint64_t *vertexList, const EdgeT *edgeList, const WeightT *weightList) {
const uint64_t tid = blockDim.x * BLOCK_SIZE * blockIdx.y + blockDim.x * blockIdx.x + threadIdx.x;
const uint64_t warpIdx = tid >> WARP_SHIFT;
const uint64_t laneIdx = tid & ((1 << WARP_SHIFT) - 1);
if (warpIdx < vertex_count && label[warpIdx]) {
uint64_t start = vertexList[warpIdx];
const uint64_t shift_start = start & MEM_ALIGN;
uint64_t end = vertexList[warpIdx+1];
WeightT cost = newCostList[warpIdx];
for(uint64_t i = shift_start + laneIdx; i < end; i += WARP_SIZE) {
if (newCostList[warpIdx] != cost)
break;
if (newCostList[edgeList[i]] > cost + weightList[i] && i >= start)
atomicMin(&(newCostList[edgeList[i]]), cost + weightList[i]);
}
label[warpIdx] = false;
}
}
__global__ void kernel_coalesce_chunk(bool *label, const WeightT *costList, WeightT *newCostList, const uint64_t vertex_count, const uint64_t *vertexList, const EdgeT *edgeList, const WeightT *weightList) {
const uint64_t tid = blockDim.x * BLOCK_SIZE * blockIdx.y + blockDim.x * blockIdx.x + threadIdx.x;
const uint64_t warpIdx = tid >> WARP_SHIFT;
const uint64_t laneIdx = tid & ((1 << WARP_SHIFT) - 1);
const uint64_t chunkIdx = warpIdx * CHUNK_SIZE;
uint64_t chunk_size = CHUNK_SIZE;
if((chunkIdx + CHUNK_SIZE) > vertex_count) {
if ( vertex_count > chunkIdx )
chunk_size = vertex_count - chunkIdx;
else
return;
}
for(uint32_t i = chunkIdx; i < chunk_size + chunkIdx; i++) {
if (label[i]) {
uint64_t start = vertexList[i];
const uint64_t shift_start = start & MEM_ALIGN;
uint64_t end = vertexList[i+1];
WeightT cost = newCostList[i];
for(uint64_t j = shift_start + laneIdx; j < end; j += WARP_SIZE) {
if (newCostList[i] != cost)
break;
if (newCostList[edgeList[j]] > cost + weightList[j] && j >= start)
atomicMin(&(newCostList[edgeList[j]]), cost + weightList[j]);
}
label[i] = false;
}
}
}
__global__ void update(bool *label, WeightT *costList, WeightT *newCostList, const uint32_t vertex_count, bool *changed) {
uint64_t tid = blockDim.x * BLOCK_SIZE * blockIdx.y + blockDim.x * blockIdx.x + threadIdx.x;
if (tid < vertex_count) {
if (newCostList[tid] < costList[tid]) {
costList[tid] = newCostList[tid];
label[tid] = true;
*changed = true;
}
}
}
int main(int argc, char *argv[]) {
std::ifstream file, file2;
std::string vertex_file, edge_file, weight_file;
std::string filename;
bool changed_h, *changed_d, no_src = false, *label_d;
int c, num_run = 1, arg_num = 0, device = 0;
impl_type type;
mem_type mem;
uint32_t one, iter;
WeightT offset = 0;
WeightT zero;
WeightT *costList_d, *newCostList_d, *weightList_h, *weightList_d;
uint64_t *vertexList_h, *vertexList_d;
EdgeT *edgeList_h, *edgeList_d;
uint64_t *edgeList64_h;
uint64_t vertex_count, edge_count, weight_count, vertex_size, edge_size, weight_size;
uint64_t typeT, src;
uint64_t numblocks_kernel, numblocks_update, numthreads;
float milliseconds;
double avg_milliseconds;
cudaEvent_t start, end;
while ((c = getopt(argc, argv, "f:r:t:i:m:d:o:h")) != -1) {
switch (c) {
case 'f':
filename = optarg;
arg_num++;
break;
case 'r':
if (!no_src)
src = atoll(optarg);
arg_num++;
break;
case 't':
type = (impl_type)atoi(optarg);
arg_num++;
break;
case 'i':
no_src = true;
src = 0;
num_run = atoi(optarg);
arg_num++;
break;
case 'm':
mem = (mem_type)atoi(optarg);
arg_num++;
break;
case 'd':
device = atoi(optarg);
break;
case 'o':
offset = atoi(optarg);
arg_num++;
break;
case 'h':
printf("4-byte edge SSSP with uint32 edge weight\n");
printf("\t-f | input file name (must end with .bel)\n");
printf("\t-r | SSSP root (unused when i > 1)\n");
printf("\t-t | type of SSSP to run\n");
printf("\t | COALESCE = 1, COALESCE_CHUNK = 2\n");
printf("\t-m | memory allocation\n");
printf("\t | GPUMEM = 0, UVM_READONLY = 1, UVM_DIRECT = 2\n");
printf("\t-i | number of iterations to run\n");
printf("\t-d | GPU device id (default=0)\n");
printf("\t-o | edge weight offset (default=0)\n");
printf("\t-h | help message\n");
return 0;
case '?':
break;
default:
break;
}
}
if (arg_num < 4) {
printf("4-byte edge SSSP with uint32 edge weight\n");
printf("\t-f | input file name (must end with .bel)\n");
printf("\t-r | SSSP root (unused when i > 1)\n");
printf("\t-t | type of SSSP to run\n");
printf("\t | COALESCE = 1, COALESCE_CHUNK = 2\n");
printf("\t-m | memory allocation\n");
printf("\t | GPUMEM = 0, UVM_READONLY = 1, UVM_DIRECT = 2\n");
printf("\t-i | number of iterations to run\n");
printf("\t-d | GPU device id (default=0)\n");
printf("\t-o | edge weight offset (default=0)\n");
printf("\t-h | help message\n");
return 0;
}
checkCudaErrors(cudaEventCreate(&start));
checkCudaErrors(cudaEventCreate(&end));
vertex_file = filename + ".col";
edge_file = filename + ".dst";
weight_file = filename + ".val";
std::cout << filename << std::endl;
// Read files
// Start reading vertex list
file.open(vertex_file.c_str(), std::ios::in | std::ios::binary);
if (!file.is_open()) {
fprintf(stderr, "Vertex file open failed\n");
exit(1);
}
file.read((char*)(&vertex_count), 8);
file.read((char*)(&typeT), 8);
vertex_count--;
printf("Vertex: %lu, ", vertex_count);
vertex_size = (vertex_count+1) * sizeof(uint64_t);
vertexList_h = (uint64_t*)malloc(vertex_size);
file.read((char*)vertexList_h, vertex_size);
file.close();
// Start reading edge list
file.open(edge_file.c_str(), std::ios::in | std::ios::binary);
if (!file.is_open()) {
fprintf(stderr, "Edge file open failed\n");
exit(1);
}
file.read((char*)(&edge_count), 8);
file.read((char*)(&typeT), 8);
printf("Edge: %lu, ", edge_count);
fflush(stdout);
edge_size = edge_count * sizeof(EdgeT);
edgeList_h = NULL;
// Start reading edge weight list
file2.open(weight_file.c_str(), std::ios::in | std::ios::binary);
if (!file2.is_open()) {
fprintf(stderr, "Edge file open failed\n");
exit(1);
}
file2.read((char*)(&weight_count), 8);
file2.read((char*)(&typeT), 8);
printf("Weight: %lu\n", weight_count);
fflush(stdout);
weight_size = weight_count * sizeof(WeightT);
weightList_h = NULL;
edgeList64_h = (uint64_t*)malloc(edge_count * sizeof(uint64_t));
file.read((char*)edgeList64_h, edge_count * sizeof(uint64_t));
switch (mem) {
case GPUMEM:
edgeList_h = (EdgeT*)malloc(edge_size);
weightList_h = (WeightT*)malloc(weight_size);
file2.read((char*)weightList_h, weight_size);
checkCudaErrors(cudaMalloc((void**)&edgeList_d, edge_size));
checkCudaErrors(cudaMalloc((void**)&weightList_d, weight_size));
for (uint64_t i = 0; i < weight_count; i++) {
weightList_h[i] += offset;
edgeList_h[i] = (uint32_t)edgeList64_h[i];
}
break;
case UVM_READONLY:
checkCudaErrors(cudaMallocManaged((void**)&edgeList_d, edge_size));
checkCudaErrors(cudaMallocManaged((void**)&weightList_d, weight_size));
file2.read((char*)weightList_d, weight_size);
for (uint64_t i = 0; i < weight_count; i++) {
weightList_d[i] += offset;
edgeList_d[i] = (uint32_t)edgeList64_h[i];
}
checkCudaErrors(cudaMemAdvise(edgeList_d, edge_size, cudaMemAdviseSetReadMostly, device));
checkCudaErrors(cudaMemAdvise(weightList_d, weight_size, cudaMemAdviseSetReadMostly, device));
break;
case UVM_DIRECT:
checkCudaErrors(cudaMallocManaged((void**)&edgeList_d, edge_size));
checkCudaErrors(cudaMallocManaged((void**)&weightList_d, weight_size));
file2.read((char*)weightList_d, weight_size);
for (uint64_t i = 0; i < weight_count; i++) {
weightList_d[i] += offset;
edgeList_d[i] = (uint32_t)edgeList64_h[i];
}
checkCudaErrors(cudaMemAdvise(edgeList_d, edge_size, cudaMemAdviseSetAccessedBy, device));
checkCudaErrors(cudaMemAdvise(weightList_d, weight_size, cudaMemAdviseSetAccessedBy, device));
break;
}
free(edgeList64_h);
file.close();
file2.close();
// Allocate memory for GPU
checkCudaErrors(cudaMalloc((void**)&vertexList_d, vertex_size));
checkCudaErrors(cudaMalloc((void**)&label_d, vertex_count * sizeof(bool)));
checkCudaErrors(cudaMalloc((void**)&changed_d, sizeof(bool)));
checkCudaErrors(cudaMalloc((void**)&costList_d, vertex_count * sizeof(WeightT)));
checkCudaErrors(cudaMalloc((void**)&newCostList_d, vertex_count * sizeof(WeightT)));
printf("Allocation finished\n");
fflush(stdout);
// Initialize values
checkCudaErrors(cudaMemcpy(vertexList_d, vertexList_h, vertex_size, cudaMemcpyHostToDevice));
if (mem == GPUMEM) {
checkCudaErrors(cudaMemcpy(edgeList_d, edgeList_h, edge_size, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(weightList_d, weightList_h, weight_size, cudaMemcpyHostToDevice));
}
numthreads = BLOCK_SIZE;
switch (type) {
case COALESCE:
numblocks_kernel = ((vertex_count * WARP_SIZE + numthreads) / numthreads);
break;
case COALESCE_CHUNK:
numblocks_kernel = ((vertex_count * (WARP_SIZE / CHUNK_SIZE) + numthreads) / numthreads);
break;
default:
fprintf(stderr, "Invalid type\n");
exit(1);
break;
}
numblocks_update = ((vertex_count + numthreads) / numthreads);
dim3 blockDim_kernel(BLOCK_SIZE, (numblocks_kernel+BLOCK_SIZE)/BLOCK_SIZE);
dim3 blockDim_update(BLOCK_SIZE, (numblocks_update+BLOCK_SIZE)/BLOCK_SIZE);
avg_milliseconds = 0.0f;
printf("Initialization done\n");
fflush(stdout);
// Set root
for (int i = 0; i < num_run; i++) {
zero = 0;
one = 1;
checkCudaErrors(cudaMemset(costList_d, 0xFF, vertex_count * sizeof(WeightT)));
checkCudaErrors(cudaMemset(newCostList_d, 0xFF, vertex_count * sizeof(WeightT)));
checkCudaErrors(cudaMemset(label_d, 0x0, vertex_count * sizeof(bool)));
checkCudaErrors(cudaMemcpy(&label_d[src], &one, sizeof(bool), cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(&costList_d[src], &zero, sizeof(WeightT), cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(&newCostList_d[src], &zero, sizeof(WeightT), cudaMemcpyHostToDevice));
iter = 0;
checkCudaErrors(cudaEventRecord(start, 0));
// Run SSSP
do {
changed_h = false;
checkCudaErrors(cudaMemcpy(changed_d, &changed_h, sizeof(bool), cudaMemcpyHostToDevice));
switch (type) {
case COALESCE:
kernel_coalesce<<<blockDim_kernel, numthreads>>>(label_d, costList_d, newCostList_d, vertex_count, vertexList_d, edgeList_d, weightList_d);
break;
case COALESCE_CHUNK:
kernel_coalesce_chunk<<<blockDim_kernel, numthreads>>>(label_d, costList_d, newCostList_d, vertex_count, vertexList_d, edgeList_d, weightList_d);
break;
default:
fprintf(stderr, "Invalid type\n");
exit(1);
break;
}
update<<<blockDim_update, numthreads>>>(label_d, costList_d, newCostList_d, vertex_count, changed_d);
iter++;
checkCudaErrors(cudaMemcpy(&changed_h, changed_d, sizeof(bool), cudaMemcpyDeviceToHost));
} while(changed_h);
checkCudaErrors(cudaEventRecord(end, 0));
checkCudaErrors(cudaEventSynchronize(end));
checkCudaErrors(cudaEventElapsedTime(&milliseconds, start, end));
printf("run %*d: ", 3, i);
printf("src %*lu, ", 12, src);
printf("iteration %*u, ", 3, iter);
printf("time %*f ms\n", 12, milliseconds);
fflush(stdout);
avg_milliseconds += (double)milliseconds;
src += vertex_count / num_run;
if (i < num_run - 1) {
EdgeT *edgeList_temp;
WeightT *weightList_temp;
// Flush GPU page cache for each iteration by re-allocating UVM
switch (mem) {
case UVM_READONLY:
checkCudaErrors(cudaMallocManaged((void**)&edgeList_temp, edge_size));
checkCudaErrors(cudaMallocManaged((void**)&weightList_temp, weight_size));
memcpy(edgeList_temp, edgeList_d, edge_size);
memcpy(weightList_temp, weightList_d, weight_size);
checkCudaErrors(cudaFree(edgeList_d));
checkCudaErrors(cudaFree(weightList_d));
edgeList_d = edgeList_temp;
weightList_d = weightList_temp;
checkCudaErrors(cudaMemAdvise(edgeList_d, edge_size, cudaMemAdviseSetReadMostly, device));
checkCudaErrors(cudaMemAdvise(weightList_d, weight_size, cudaMemAdviseSetReadMostly, device));
break;
default:
break;
}
}
}
printf("Average run time %f ms\n", avg_milliseconds / num_run);
free(vertexList_h);
if (edgeList_h)
free(edgeList_h);
if (weightList_h)
free(weightList_h);
checkCudaErrors(cudaFree(vertexList_d));
checkCudaErrors(cudaFree(weightList_d));
checkCudaErrors(cudaFree(edgeList_d));
checkCudaErrors(cudaFree(costList_d));
checkCudaErrors(cudaFree(newCostList_d));
checkCudaErrors(cudaFree(label_d));
checkCudaErrors(cudaFree(changed_d));
return 0;
}
|
kernel_BinaryErosion.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
__global__ void BinaryErosion (unsigned int *dst, int imageW, int imageH, int mask_w, int mask_h)
{
const int ix = __umul24(blockIdx.x, blockDim.x) + threadIdx.x;
const int iy = __umul24(blockIdx.y, blockDim.y) + threadIdx.y;
if(ix < imageW && iy < imageH){
int match = 0;
for (int m = ix - mask_w ; m < ix + mask_w && !match; m++){
for (int n = iy - mask_h ; n < iy + mask_h && !match; n++){
float4 fresult = tex2D(texUCHAR, m, n);
if (fresult.x == 1.f && fresult.y == 1.f && fresult.z == 1.f )
match = 1;
}
}
if(!match)
dst[imageW * iy + ix] = make_color(0.f, 0.f, 0.f , 1.f);
else
dst[imageW * iy + ix] = make_color(1.f, 1.f, 1.f , 1.f);
}
}
extern "C" float binaryErosionWrapper (unsigned int *dst, int imageW, int imageH, int threshold, int iteration, float brightness, float contrast, int mask_w, int mask_h, int adjust)
{
//for more effective kernel execution
dim3 threads(BLOCKDIM_X, BLOCKDIM_Y);
dim3 grid(iDivUp(imageW, BLOCKDIM_X), iDivUp(imageH, BLOCKDIM_Y));
unsigned int timer;
float runtime;
cutCreateTimer(&timer);
cutStartTimer(timer);
if(adjust)
hipLaunchKernelGGL(( Grayscale), dim3(grid), dim3(threads), 0, 0, dst, imageW, imageH, brightness, contrast);
else
hipLaunchKernelGGL(( Grayscale2), dim3(grid), dim3(threads), 0, 0, dst, imageW, imageH);
hipLaunchKernelGGL(( Binarize), dim3(grid), dim3(threads), 0, 0, dst, imageW, imageH, threshold);
for(int i=0; i<iteration; i++)
{
hipMemcpyToArray( d_tempArray, 0, 0, dst, imageW * imageH * sizeof(unsigned int), hipMemcpyDeviceToDevice);
hipBindTextureToArray(texUCHAR, d_tempArray);
hipLaunchKernelGGL(( BinaryErosion), dim3(grid), dim3(threads), 0, 0, dst, imageW, imageH, mask_w, mask_h);
}
hipUnbindTexture(texUCHAR);
hipDeviceSynchronize();
cutStopTimer(timer);
runtime = cutGetTimerValue(timer)/1000;
cutDeleteTimer(timer);
return runtime;
} | kernel_BinaryErosion.cu | __global__ void BinaryErosion (unsigned int *dst, int imageW, int imageH, int mask_w, int mask_h)
{
const int ix = __umul24(blockIdx.x, blockDim.x) + threadIdx.x;
const int iy = __umul24(blockIdx.y, blockDim.y) + threadIdx.y;
if(ix < imageW && iy < imageH){
int match = 0;
for (int m = ix - mask_w ; m < ix + mask_w && !match; m++){
for (int n = iy - mask_h ; n < iy + mask_h && !match; n++){
float4 fresult = tex2D(texUCHAR, m, n);
if (fresult.x == 1.f && fresult.y == 1.f && fresult.z == 1.f )
match = 1;
}
}
if(!match)
dst[imageW * iy + ix] = make_color(0.f, 0.f, 0.f , 1.f);
else
dst[imageW * iy + ix] = make_color(1.f, 1.f, 1.f , 1.f);
}
}
extern "C" float binaryErosionWrapper (unsigned int *dst, int imageW, int imageH, int threshold, int iteration, float brightness, float contrast, int mask_w, int mask_h, int adjust)
{
//for more effective kernel execution
dim3 threads(BLOCKDIM_X, BLOCKDIM_Y);
dim3 grid(iDivUp(imageW, BLOCKDIM_X), iDivUp(imageH, BLOCKDIM_Y));
unsigned int timer;
float runtime;
cutCreateTimer(&timer);
cutStartTimer(timer);
if(adjust)
Grayscale<<<grid, threads>>>(dst, imageW, imageH, brightness, contrast);
else
Grayscale2<<<grid, threads>>>(dst, imageW, imageH);
Binarize<<<grid, threads>>>(dst, imageW, imageH, threshold);
for(int i=0; i<iteration; i++)
{
cudaMemcpyToArray( d_tempArray, 0, 0, dst, imageW * imageH * sizeof(unsigned int), cudaMemcpyDeviceToDevice);
cudaBindTextureToArray(texUCHAR, d_tempArray);
BinaryErosion<<<grid, threads>>>(dst, imageW, imageH, mask_w, mask_h);
}
cudaUnbindTexture(texUCHAR);
cudaThreadSynchronize();
cutStopTimer(timer);
runtime = cutGetTimerValue(timer)/1000;
cutDeleteTimer(timer);
return runtime;
} |
63445ecbf28bc1db4a429882cf2f5e0c048a7036.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*! \file VL_3D_cuda.cu
* \brief Definitions of the cuda 3 D VL algorithm functions. MHD algorithm
* from Stone & Gardiner 2009 "A simple unsplit Godunov method for
* multidimensional MHD"
*/
#if defined(CUDA) && defined(VL)
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include "../global/global.h"
#include "../global/global_cuda.h"
#include "../hydro/hydro_cuda.h"
#include "../integrators/VL_3D_cuda.h"
#include "../io/io.h"
#include "../mhd/ct_electric_fields.h"
#include "../mhd/magnetic_update.h"
#include "../reconstruction/pcm_cuda.h"
#include "../reconstruction/plmc_cuda.h"
#include "../reconstruction/plmp_cuda.h"
#include "../reconstruction/ppmc_cuda.h"
#include "../reconstruction/ppmp_cuda.h"
#include "../riemann_solvers/exact_cuda.h"
#include "../riemann_solvers/hll_cuda.h"
#include "../riemann_solvers/hllc_cuda.h"
#include "../riemann_solvers/hlld_cuda.h"
#include "../riemann_solvers/roe_cuda.h"
#include "../utils/gpu.hpp"
#include "../utils/hydro_utilities.h"
__global__ void Update_Conserved_Variables_3D_half(Real *dev_conserved, Real *dev_conserved_half, Real *dev_F_x,
Real *dev_F_y, Real *dev_F_z, int nx, int ny, int nz, int n_ghost,
Real dx, Real dy, Real dz, Real dt, Real gamma, int n_fields,
Real density_floor);
void VL_Algorithm_3D_CUDA(Real *d_conserved, Real *d_grav_potential, int nx, int ny, int nz, int x_off, int y_off,
int z_off, int n_ghost, Real dx, Real dy, Real dz, Real xbound, Real ybound, Real zbound,
Real dt, int n_fields, Real density_floor, Real U_floor, Real *host_grav_potential)
{
// Here, *dev_conserved contains the entire
// set of conserved variables on the grid
// concatenated into a 1-d array
int n_cells = nx * ny * nz;
int ngrid = (n_cells + TPB - 1) / TPB;
// set values for GPU kernels
// number of blocks per 1D grid
dim3 dim1dGrid(ngrid, 1, 1);
// number of threads per 1D block
dim3 dim1dBlock(TPB, 1, 1);
// host_grav_potential is NULL if not using GRAVITY
temp_potential = host_grav_potential;
if (!memory_allocated) {
// allocate memory on the GPU
dev_conserved = d_conserved;
// Set the size of the interface and flux arrays
#ifdef MHD
// In MHD/Constrained Transport the interface arrays have one fewer fields
// since the magnetic field that is stored on the face does not require
// reconstructions. Similarly the fluxes have one fewer fields since the
// magnetic field on that face doesn't have an associated flux. Each
// interface array store the magnetic fields on that interface that are
// not perpendicular to the interface and arranged cyclically. I.e. the
// `Q_Lx` interface store the reconstructed Y and Z magnetic fields in
// that order, the `Q_Ly` interface stores the Z and X mangetic fields in
// that order, and the `Q_Lz` interface stores the X and Y magnetic fields
// in that order. These fields can be indexed with the Q_?_dir grid_enums.
// The interface state arrays store in the interface on the "right" side of
// the cell, so the flux arrays store the fluxes through the right interface
//
// According to Stone et al. 2008 section 5.3 and the source code of
// Athena, the following equation relate the magnetic flux to the face
// centered electric fields/EMF. -cross(V,B)x is the negative of the
// x-component of V cross B. Note that "X" is the direction the solver is
// running in this case, not necessarily the true "X".
// F_x[(grid_enum::fluxX_magnetic_z)*n_cells] = VxBy - BxVy =
// -(-cross(V,B))z = -EMF_Z F_x[(grid_enum::fluxX_magnetic_y)*n_cells] =
// VxBz - BxVz = (-cross(V,B))y = EMF_Y
// F_y[(grid_enum::fluxY_magnetic_x)*n_cells] = VxBy - BxVy =
// -(-cross(V,B))z = -EMF_X F_y[(grid_enum::fluxY_magnetic_z)*n_cells] =
// VxBz - BxVz = (-cross(V,B))y = EMF_Z
// F_z[(grid_enum::fluxZ_magnetic_y)*n_cells] = VxBy - BxVy =
// -(-cross(V,B))z = -EMF_Y F_z[(grid_enum::fluxZ_magnetic_x)*n_cells] =
// VxBz - BxVz = (-cross(V,B))y = EMF_X
size_t const arraySize = (n_fields - 1) * n_cells * sizeof(Real);
size_t const ctArraySize = 3 * n_cells * sizeof(Real);
#else // not MHD
size_t const arraySize = n_fields * n_cells * sizeof(Real);
#endif // MHD
CudaSafeCall(hipMalloc((void **)&dev_conserved_half, n_fields * n_cells * sizeof(Real)));
CudaSafeCall(hipMalloc((void **)&Q_Lx, arraySize));
CudaSafeCall(hipMalloc((void **)&Q_Rx, arraySize));
CudaSafeCall(hipMalloc((void **)&Q_Ly, arraySize));
CudaSafeCall(hipMalloc((void **)&Q_Ry, arraySize));
CudaSafeCall(hipMalloc((void **)&Q_Lz, arraySize));
CudaSafeCall(hipMalloc((void **)&Q_Rz, arraySize));
CudaSafeCall(hipMalloc((void **)&F_x, arraySize));
CudaSafeCall(hipMalloc((void **)&F_y, arraySize));
CudaSafeCall(hipMalloc((void **)&F_z, arraySize));
cuda_utilities::initGpuMemory(dev_conserved_half, n_fields * n_cells * sizeof(Real));
cuda_utilities::initGpuMemory(Q_Lx, arraySize);
cuda_utilities::initGpuMemory(Q_Rx, arraySize);
cuda_utilities::initGpuMemory(Q_Ly, arraySize);
cuda_utilities::initGpuMemory(Q_Ry, arraySize);
cuda_utilities::initGpuMemory(Q_Lz, arraySize);
cuda_utilities::initGpuMemory(Q_Rz, arraySize);
cuda_utilities::initGpuMemory(F_x, arraySize);
cuda_utilities::initGpuMemory(F_y, arraySize);
cuda_utilities::initGpuMemory(F_z, arraySize);
#ifdef MHD
CudaSafeCall(hipMalloc((void **)&ctElectricFields, ctArraySize));
#endif // MHD
#if defined(GRAVITY)
dev_grav_potential = d_grav_potential;
#else // not GRAVITY
dev_grav_potential = NULL;
#endif // GRAVITY
// If memory is single allocated: memory_allocated becomes true and
// successive timesteps won't allocate memory. If the memory is not single
// allocated: memory_allocated remains Null and memory is allocated every
// timestep.
memory_allocated = true;
}
#if defined(GRAVITY) && !defined(GRAVITY_GPU)
CudaSafeCall(hipMemcpy(dev_grav_potential, temp_potential, n_cells * sizeof(Real), hipMemcpyHostToDevice));
#endif // GRAVITY and GRAVITY_GPU
// Step 1: Use PCM reconstruction to put primitive variables into interface
// arrays
hipLaunchKernelGGL(PCM_Reconstruction_3D, dim1dGrid, dim1dBlock, 0, 0, dev_conserved, Q_Lx, Q_Rx, Q_Ly, Q_Ry, Q_Lz,
Q_Rz, nx, ny, nz, n_ghost, gama, n_fields);
CudaCheckError();
// Step 2: Calculate first-order upwind fluxes
#ifdef EXACT
hipLaunchKernelGGL(Calculate_Exact_Fluxes_CUDA, dim1dGrid, dim1dBlock, 0, 0, Q_Lx, Q_Rx, F_x, nx, ny, nz, n_ghost,
gama, 0, n_fields);
hipLaunchKernelGGL(Calculate_Exact_Fluxes_CUDA, dim1dGrid, dim1dBlock, 0, 0, Q_Ly, Q_Ry, F_y, nx, ny, nz, n_ghost,
gama, 1, n_fields);
hipLaunchKernelGGL(Calculate_Exact_Fluxes_CUDA, dim1dGrid, dim1dBlock, 0, 0, Q_Lz, Q_Rz, F_z, nx, ny, nz, n_ghost,
gama, 2, n_fields);
#endif // EXACT
#ifdef ROE
hipLaunchKernelGGL(Calculate_Roe_Fluxes_CUDA, dim1dGrid, dim1dBlock, 0, 0, Q_Lx, Q_Rx, F_x, nx, ny, nz, n_ghost, gama,
0, n_fields);
hipLaunchKernelGGL(Calculate_Roe_Fluxes_CUDA, dim1dGrid, dim1dBlock, 0, 0, Q_Ly, Q_Ry, F_y, nx, ny, nz, n_ghost, gama,
1, n_fields);
hipLaunchKernelGGL(Calculate_Roe_Fluxes_CUDA, dim1dGrid, dim1dBlock, 0, 0, Q_Lz, Q_Rz, F_z, nx, ny, nz, n_ghost, gama,
2, n_fields);
#endif // ROE
#ifdef HLLC
hipLaunchKernelGGL(Calculate_HLLC_Fluxes_CUDA, dim1dGrid, dim1dBlock, 0, 0, Q_Lx, Q_Rx, F_x, nx, ny, nz, n_ghost,
gama, 0, n_fields);
hipLaunchKernelGGL(Calculate_HLLC_Fluxes_CUDA, dim1dGrid, dim1dBlock, 0, 0, Q_Ly, Q_Ry, F_y, nx, ny, nz, n_ghost,
gama, 1, n_fields);
hipLaunchKernelGGL(Calculate_HLLC_Fluxes_CUDA, dim1dGrid, dim1dBlock, 0, 0, Q_Lz, Q_Rz, F_z, nx, ny, nz, n_ghost,
gama, 2, n_fields);
#endif // HLLC
#ifdef HLL
hipLaunchKernelGGL(Calculate_HLL_Fluxes_CUDA, dim1dGrid, dim1dBlock, 0, 0, Q_Lx, Q_Rx, F_x, nx, ny, nz, n_ghost, gama,
0, n_fields);
hipLaunchKernelGGL(Calculate_HLL_Fluxes_CUDA, dim1dGrid, dim1dBlock, 0, 0, Q_Ly, Q_Ry, F_y, nx, ny, nz, n_ghost, gama,
1, n_fields);
hipLaunchKernelGGL(Calculate_HLL_Fluxes_CUDA, dim1dGrid, dim1dBlock, 0, 0, Q_Lz, Q_Rz, F_z, nx, ny, nz, n_ghost, gama,
2, n_fields);
#endif // HLL
#ifdef HLLD
hipLaunchKernelGGL(mhd::Calculate_HLLD_Fluxes_CUDA, dim1dGrid, dim1dBlock, 0, 0, Q_Lx, Q_Rx,
&(dev_conserved[(grid_enum::magnetic_x)*n_cells]), F_x, n_cells, gama, 0, n_fields);
hipLaunchKernelGGL(mhd::Calculate_HLLD_Fluxes_CUDA, dim1dGrid, dim1dBlock, 0, 0, Q_Ly, Q_Ry,
&(dev_conserved[(grid_enum::magnetic_y)*n_cells]), F_y, n_cells, gama, 1, n_fields);
hipLaunchKernelGGL(mhd::Calculate_HLLD_Fluxes_CUDA, dim1dGrid, dim1dBlock, 0, 0, Q_Lz, Q_Rz,
&(dev_conserved[(grid_enum::magnetic_z)*n_cells]), F_z, n_cells, gama, 2, n_fields);
#endif // HLLD
CudaCheckError();
#ifdef MHD
// Step 2.5: Compute the Constrained transport electric fields
hipLaunchKernelGGL(mhd::Calculate_CT_Electric_Fields, dim1dGrid, dim1dBlock, 0, 0, F_x, F_y, F_z, dev_conserved,
ctElectricFields, nx, ny, nz, n_cells);
CudaCheckError();
#endif // MHD
// Step 3: Update the conserved variables half a timestep
hipLaunchKernelGGL(Update_Conserved_Variables_3D_half, dim1dGrid, dim1dBlock, 0, 0, dev_conserved, dev_conserved_half,
F_x, F_y, F_z, nx, ny, nz, n_ghost, dx, dy, dz, 0.5 * dt, gama, n_fields, density_floor);
CudaCheckError();
#ifdef MHD
// Update the magnetic fields
hipLaunchKernelGGL(mhd::Update_Magnetic_Field_3D, dim1dGrid, dim1dBlock, 0, 0, dev_conserved, dev_conserved_half,
ctElectricFields, nx, ny, nz, n_cells, 0.5 * dt, dx, dy, dz);
CudaCheckError();
#endif // MHD
// Step 4: Construct left and right interface values using updated conserved
// variables
#ifdef PCM
hipLaunchKernelGGL(PCM_Reconstruction_3D, dim1dGrid, dim1dBlock, 0, 0, dev_conserved_half, Q_Lx, Q_Rx, Q_Ly, Q_Ry,
Q_Lz, Q_Rz, nx, ny, nz, n_ghost, gama, n_fields);
#endif // PCM
#ifdef PLMP
hipLaunchKernelGGL(PLMP_cuda, dim1dGrid, dim1dBlock, 0, 0, dev_conserved_half, Q_Lx, Q_Rx, nx, ny, nz, n_ghost, dx,
dt, gama, 0, n_fields);
hipLaunchKernelGGL(PLMP_cuda, dim1dGrid, dim1dBlock, 0, 0, dev_conserved_half, Q_Ly, Q_Ry, nx, ny, nz, n_ghost, dy,
dt, gama, 1, n_fields);
hipLaunchKernelGGL(PLMP_cuda, dim1dGrid, dim1dBlock, 0, 0, dev_conserved_half, Q_Lz, Q_Rz, nx, ny, nz, n_ghost, dz,
dt, gama, 2, n_fields);
#endif // PLMP
#ifdef PLMC
hipLaunchKernelGGL(PLMC_cuda, dim1dGrid, dim1dBlock, 0, 0, dev_conserved_half, Q_Lx, Q_Rx, nx, ny, nz, dx, dt, gama,
0, n_fields);
hipLaunchKernelGGL(PLMC_cuda, dim1dGrid, dim1dBlock, 0, 0, dev_conserved_half, Q_Ly, Q_Ry, nx, ny, nz, dy, dt, gama,
1, n_fields);
hipLaunchKernelGGL(PLMC_cuda, dim1dGrid, dim1dBlock, 0, 0, dev_conserved_half, Q_Lz, Q_Rz, nx, ny, nz, dz, dt, gama,
2, n_fields);
#endif // PLMC
#ifdef PPMP
hipLaunchKernelGGL(PPMP_cuda, dim1dGrid, dim1dBlock, 0, 0, dev_conserved_half, Q_Lx, Q_Rx, nx, ny, nz, n_ghost, dx,
dt, gama, 0, n_fields);
hipLaunchKernelGGL(PPMP_cuda, dim1dGrid, dim1dBlock, 0, 0, dev_conserved_half, Q_Ly, Q_Ry, nx, ny, nz, n_ghost, dy,
dt, gama, 1, n_fields);
hipLaunchKernelGGL(PPMP_cuda, dim1dGrid, dim1dBlock, 0, 0, dev_conserved_half, Q_Lz, Q_Rz, nx, ny, nz, n_ghost, dz,
dt, gama, 2, n_fields);
#endif // PPMP
#ifdef PPMC
hipLaunchKernelGGL(PPMC_VL, dim1dGrid, dim1dBlock, 0, 0, dev_conserved_half, Q_Lx, Q_Rx, nx, ny, nz, gama, 0);
hipLaunchKernelGGL(PPMC_VL, dim1dGrid, dim1dBlock, 0, 0, dev_conserved_half, Q_Ly, Q_Ry, nx, ny, nz, gama, 1);
hipLaunchKernelGGL(PPMC_VL, dim1dGrid, dim1dBlock, 0, 0, dev_conserved_half, Q_Lz, Q_Rz, nx, ny, nz, gama, 2);
#endif // PPMC
CudaCheckError();
// Step 5: Calculate the fluxes again
#ifdef EXACT
hipLaunchKernelGGL(Calculate_Exact_Fluxes_CUDA, dim1dGrid, dim1dBlock, 0, 0, Q_Lx, Q_Rx, F_x, nx, ny, nz, n_ghost,
gama, 0, n_fields);
hipLaunchKernelGGL(Calculate_Exact_Fluxes_CUDA, dim1dGrid, dim1dBlock, 0, 0, Q_Ly, Q_Ry, F_y, nx, ny, nz, n_ghost,
gama, 1, n_fields);
hipLaunchKernelGGL(Calculate_Exact_Fluxes_CUDA, dim1dGrid, dim1dBlock, 0, 0, Q_Lz, Q_Rz, F_z, nx, ny, nz, n_ghost,
gama, 2, n_fields);
#endif // EXACT
#ifdef ROE
hipLaunchKernelGGL(Calculate_Roe_Fluxes_CUDA, dim1dGrid, dim1dBlock, 0, 0, Q_Lx, Q_Rx, F_x, nx, ny, nz, n_ghost, gama,
0, n_fields);
hipLaunchKernelGGL(Calculate_Roe_Fluxes_CUDA, dim1dGrid, dim1dBlock, 0, 0, Q_Ly, Q_Ry, F_y, nx, ny, nz, n_ghost, gama,
1, n_fields);
hipLaunchKernelGGL(Calculate_Roe_Fluxes_CUDA, dim1dGrid, dim1dBlock, 0, 0, Q_Lz, Q_Rz, F_z, nx, ny, nz, n_ghost, gama,
2, n_fields);
#endif // ROE
#ifdef HLLC
hipLaunchKernelGGL(Calculate_HLLC_Fluxes_CUDA, dim1dGrid, dim1dBlock, 0, 0, Q_Lx, Q_Rx, F_x, nx, ny, nz, n_ghost,
gama, 0, n_fields);
hipLaunchKernelGGL(Calculate_HLLC_Fluxes_CUDA, dim1dGrid, dim1dBlock, 0, 0, Q_Ly, Q_Ry, F_y, nx, ny, nz, n_ghost,
gama, 1, n_fields);
hipLaunchKernelGGL(Calculate_HLLC_Fluxes_CUDA, dim1dGrid, dim1dBlock, 0, 0, Q_Lz, Q_Rz, F_z, nx, ny, nz, n_ghost,
gama, 2, n_fields);
#endif // HLLC
#ifdef HLL
hipLaunchKernelGGL(Calculate_HLL_Fluxes_CUDA, dim1dGrid, dim1dBlock, 0, 0, Q_Lx, Q_Rx, F_x, nx, ny, nz, n_ghost, gama,
0, n_fields);
hipLaunchKernelGGL(Calculate_HLL_Fluxes_CUDA, dim1dGrid, dim1dBlock, 0, 0, Q_Ly, Q_Ry, F_y, nx, ny, nz, n_ghost, gama,
1, n_fields);
hipLaunchKernelGGL(Calculate_HLL_Fluxes_CUDA, dim1dGrid, dim1dBlock, 0, 0, Q_Lz, Q_Rz, F_z, nx, ny, nz, n_ghost, gama,
2, n_fields);
#endif // HLLC
#ifdef HLLD
hipLaunchKernelGGL(mhd::Calculate_HLLD_Fluxes_CUDA, dim1dGrid, dim1dBlock, 0, 0, Q_Lx, Q_Rx,
&(dev_conserved_half[(grid_enum::magnetic_x)*n_cells]), F_x, n_cells, gama, 0, n_fields);
hipLaunchKernelGGL(mhd::Calculate_HLLD_Fluxes_CUDA, dim1dGrid, dim1dBlock, 0, 0, Q_Ly, Q_Ry,
&(dev_conserved_half[(grid_enum::magnetic_y)*n_cells]), F_y, n_cells, gama, 1, n_fields);
hipLaunchKernelGGL(mhd::Calculate_HLLD_Fluxes_CUDA, dim1dGrid, dim1dBlock, 0, 0, Q_Lz, Q_Rz,
&(dev_conserved_half[(grid_enum::magnetic_z)*n_cells]), F_z, n_cells, gama, 2, n_fields);
#endif // HLLD
CudaCheckError();
#ifdef DE
// Compute the divergence of Vel before updating the conserved array, this
// solves synchronization issues when adding this term on
// Update_Conserved_Variables_3D
hipLaunchKernelGGL(Partial_Update_Advected_Internal_Energy_3D, dim1dGrid, dim1dBlock, 0, 0, dev_conserved, Q_Lx, Q_Rx,
Q_Ly, Q_Ry, Q_Lz, Q_Rz, nx, ny, nz, n_ghost, dx, dy, dz, dt, gama, n_fields);
CudaCheckError();
#endif // DE
#ifdef MHD
// Step 5.5: Compute the Constrained transport electric fields
hipLaunchKernelGGL(mhd::Calculate_CT_Electric_Fields, dim1dGrid, dim1dBlock, 0, 0, F_x, F_y, F_z, dev_conserved_half,
ctElectricFields, nx, ny, nz, n_cells);
CudaCheckError();
#endif // MHD
// Step 6: Update the conserved variable array
hipLaunchKernelGGL(Update_Conserved_Variables_3D, dim1dGrid, dim1dBlock, 0, 0, dev_conserved, Q_Lx, Q_Rx, Q_Ly, Q_Ry,
Q_Lz, Q_Rz, F_x, F_y, F_z, nx, ny, nz, x_off, y_off, z_off, n_ghost, dx, dy, dz, xbound, ybound,
zbound, dt, gama, n_fields, density_floor, dev_grav_potential);
CudaCheckError();
#ifdef MHD
// Update the magnetic fields
hipLaunchKernelGGL(mhd::Update_Magnetic_Field_3D, dim1dGrid, dim1dBlock, 0, 0, dev_conserved, dev_conserved,
ctElectricFields, nx, ny, nz, n_cells, dt, dx, dy, dz);
CudaCheckError();
#endif // MHD
#ifdef DE
hipLaunchKernelGGL(Select_Internal_Energy_3D, dim1dGrid, dim1dBlock, 0, 0, dev_conserved, nx, ny, nz, n_ghost,
n_fields);
hipLaunchKernelGGL(Sync_Energies_3D, dim1dGrid, dim1dBlock, 0, 0, dev_conserved, nx, ny, nz, n_ghost, gama, n_fields);
CudaCheckError();
#endif // DE
#ifdef TEMPERATURE_FLOOR
hipLaunchKernelGGL(Apply_Temperature_Floor, dim1dGrid, dim1dBlock, 0, 0, dev_conserved, nx, ny, nz, n_ghost, n_fields,
U_floor);
CudaCheckError();
#endif // TEMPERATURE_FLOOR
return;
}
void Free_Memory_VL_3D()
{
// free the GPU memory
hipFree(dev_conserved);
hipFree(dev_conserved_half);
hipFree(Q_Lx);
hipFree(Q_Rx);
hipFree(Q_Ly);
hipFree(Q_Ry);
hipFree(Q_Lz);
hipFree(Q_Rz);
hipFree(F_x);
hipFree(F_y);
hipFree(F_z);
hipFree(ctElectricFields);
}
__global__ void Update_Conserved_Variables_3D_half(Real *dev_conserved, Real *dev_conserved_half, Real *dev_F_x,
Real *dev_F_y, Real *dev_F_z, int nx, int ny, int nz, int n_ghost,
Real dx, Real dy, Real dz, Real dt, Real gamma, int n_fields,
Real density_floor)
{
Real dtodx = dt / dx;
Real dtody = dt / dy;
Real dtodz = dt / dz;
int n_cells = nx * ny * nz;
// get a global thread ID
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int zid = tid / (nx * ny);
int yid = (tid - zid * nx * ny) / nx;
int xid = tid - zid * nx * ny - yid * nx;
int id = xid + yid * nx + zid * nx * ny;
int imo = xid - 1 + yid * nx + zid * nx * ny;
int jmo = xid + (yid - 1) * nx + zid * nx * ny;
int kmo = xid + yid * nx + (zid - 1) * nx * ny;
#ifdef DE
Real d, d_inv, vx, vy, vz;
Real vx_imo, vx_ipo, vy_jmo, vy_jpo, vz_kmo, vz_kpo, P, E, E_kin, GE;
int ipo, jpo, kpo;
#endif // DE
#ifdef DENSITY_FLOOR
Real dens_0;
#endif // DENSITY_FLOOR
// threads corresponding to all cells except outer ring of ghost cells do the
// calculation
if (xid > 0 && xid < nx - 1 && yid > 0 && yid < ny - 1 && zid > 0 && zid < nz - 1) {
#ifdef DE
d = dev_conserved[id];
d_inv = 1.0 / d;
vx = dev_conserved[1 * n_cells + id] * d_inv;
vy = dev_conserved[2 * n_cells + id] * d_inv;
vz = dev_conserved[3 * n_cells + id] * d_inv;
// PRESSURE_DE
E = dev_conserved[4 * n_cells + id];
GE = dev_conserved[(n_fields - 1) * n_cells + id];
E_kin = hydro_utilities::Calc_Kinetic_Energy_From_Velocity(d, vx, vy, vz);
#ifdef MHD
// Add the magnetic energy
auto const [centeredBx, centeredBy, centeredBz] =
mhd::utils::cellCenteredMagneticFields(dev_conserved, id, xid, yid, zid, n_cells, nx, ny);
E_kin += mhd::utils::computeMagneticEnergy(centeredBx, centeredBy, centeredBz);
#endif // MHD
P = hydro_utilities::Get_Pressure_From_DE(E, E - E_kin, GE, gamma);
P = fmax(P, (Real)TINY_NUMBER);
// P = (dev_conserved[4*n_cells + id] - 0.5*d*(vx*vx + vy*vy + vz*vz)) *
// (gamma - 1.0);
// if (d < 0.0 || d != d) printf("Negative density before half step
// update.\n"); if (P < 0.0) printf("%d Negative pressure before half step
// update.\n", id);
ipo = xid + 1 + yid * nx + zid * nx * ny;
jpo = xid + (yid + 1) * nx + zid * nx * ny;
kpo = xid + yid * nx + (zid + 1) * nx * ny;
vx_imo = dev_conserved[1 * n_cells + imo] / dev_conserved[imo];
vx_ipo = dev_conserved[1 * n_cells + ipo] / dev_conserved[ipo];
vy_jmo = dev_conserved[2 * n_cells + jmo] / dev_conserved[jmo];
vy_jpo = dev_conserved[2 * n_cells + jpo] / dev_conserved[jpo];
vz_kmo = dev_conserved[3 * n_cells + kmo] / dev_conserved[kmo];
vz_kpo = dev_conserved[3 * n_cells + kpo] / dev_conserved[kpo];
#endif // DE
// update the conserved variable array
dev_conserved_half[id] = dev_conserved[id] + dtodx * (dev_F_x[imo] - dev_F_x[id]) +
dtody * (dev_F_y[jmo] - dev_F_y[id]) + dtodz * (dev_F_z[kmo] - dev_F_z[id]);
dev_conserved_half[n_cells + id] = dev_conserved[n_cells + id] +
dtodx * (dev_F_x[n_cells + imo] - dev_F_x[n_cells + id]) +
dtody * (dev_F_y[n_cells + jmo] - dev_F_y[n_cells + id]) +
dtodz * (dev_F_z[n_cells + kmo] - dev_F_z[n_cells + id]);
dev_conserved_half[2 * n_cells + id] = dev_conserved[2 * n_cells + id] +
dtodx * (dev_F_x[2 * n_cells + imo] - dev_F_x[2 * n_cells + id]) +
dtody * (dev_F_y[2 * n_cells + jmo] - dev_F_y[2 * n_cells + id]) +
dtodz * (dev_F_z[2 * n_cells + kmo] - dev_F_z[2 * n_cells + id]);
dev_conserved_half[3 * n_cells + id] = dev_conserved[3 * n_cells + id] +
dtodx * (dev_F_x[3 * n_cells + imo] - dev_F_x[3 * n_cells + id]) +
dtody * (dev_F_y[3 * n_cells + jmo] - dev_F_y[3 * n_cells + id]) +
dtodz * (dev_F_z[3 * n_cells + kmo] - dev_F_z[3 * n_cells + id]);
dev_conserved_half[4 * n_cells + id] = dev_conserved[4 * n_cells + id] +
dtodx * (dev_F_x[4 * n_cells + imo] - dev_F_x[4 * n_cells + id]) +
dtody * (dev_F_y[4 * n_cells + jmo] - dev_F_y[4 * n_cells + id]) +
dtodz * (dev_F_z[4 * n_cells + kmo] - dev_F_z[4 * n_cells + id]);
#ifdef SCALAR
for (int i = 0; i < NSCALARS; i++) {
dev_conserved_half[(5 + i) * n_cells + id] =
dev_conserved[(5 + i) * n_cells + id] +
dtodx * (dev_F_x[(5 + i) * n_cells + imo] - dev_F_x[(5 + i) * n_cells + id]) +
dtody * (dev_F_y[(5 + i) * n_cells + jmo] - dev_F_y[(5 + i) * n_cells + id]) +
dtodz * (dev_F_z[(5 + i) * n_cells + kmo] - dev_F_z[(5 + i) * n_cells + id]);
}
#endif // SCALAR
#ifdef DE
dev_conserved_half[(n_fields - 1) * n_cells + id] =
dev_conserved[(n_fields - 1) * n_cells + id] +
dtodx * (dev_F_x[(n_fields - 1) * n_cells + imo] - dev_F_x[(n_fields - 1) * n_cells + id]) +
dtody * (dev_F_y[(n_fields - 1) * n_cells + jmo] - dev_F_y[(n_fields - 1) * n_cells + id]) +
dtodz * (dev_F_z[(n_fields - 1) * n_cells + kmo] - dev_F_z[(n_fields - 1) * n_cells + id]) +
0.5 * P * (dtodx * (vx_imo - vx_ipo) + dtody * (vy_jmo - vy_jpo) + dtodz * (vz_kmo - vz_kpo));
#endif // DE
#ifdef DENSITY_FLOOR
if (dev_conserved_half[id] < density_floor) {
dens_0 = dev_conserved_half[id];
printf("###Thread density change %f -> %f \n", dens_0, density_floor);
dev_conserved_half[id] = density_floor;
// Scale the conserved values to the new density
dev_conserved_half[1 * n_cells + id] *= (density_floor / dens_0);
dev_conserved_half[2 * n_cells + id] *= (density_floor / dens_0);
dev_conserved_half[3 * n_cells + id] *= (density_floor / dens_0);
dev_conserved_half[4 * n_cells + id] *= (density_floor / dens_0);
#ifdef DE
dev_conserved_half[(n_fields - 1) * n_cells + id] *= (density_floor / dens_0);
#endif // DE
}
#endif // DENSITY_FLOOR
}
}
#endif // CUDA and VL
| 63445ecbf28bc1db4a429882cf2f5e0c048a7036.cu | /*! \file VL_3D_cuda.cu
* \brief Definitions of the cuda 3 D VL algorithm functions. MHD algorithm
* from Stone & Gardiner 2009 "A simple unsplit Godunov method for
* multidimensional MHD"
*/
#if defined(CUDA) && defined(VL)
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include "../global/global.h"
#include "../global/global_cuda.h"
#include "../hydro/hydro_cuda.h"
#include "../integrators/VL_3D_cuda.h"
#include "../io/io.h"
#include "../mhd/ct_electric_fields.h"
#include "../mhd/magnetic_update.h"
#include "../reconstruction/pcm_cuda.h"
#include "../reconstruction/plmc_cuda.h"
#include "../reconstruction/plmp_cuda.h"
#include "../reconstruction/ppmc_cuda.h"
#include "../reconstruction/ppmp_cuda.h"
#include "../riemann_solvers/exact_cuda.h"
#include "../riemann_solvers/hll_cuda.h"
#include "../riemann_solvers/hllc_cuda.h"
#include "../riemann_solvers/hlld_cuda.h"
#include "../riemann_solvers/roe_cuda.h"
#include "../utils/gpu.hpp"
#include "../utils/hydro_utilities.h"
__global__ void Update_Conserved_Variables_3D_half(Real *dev_conserved, Real *dev_conserved_half, Real *dev_F_x,
Real *dev_F_y, Real *dev_F_z, int nx, int ny, int nz, int n_ghost,
Real dx, Real dy, Real dz, Real dt, Real gamma, int n_fields,
Real density_floor);
void VL_Algorithm_3D_CUDA(Real *d_conserved, Real *d_grav_potential, int nx, int ny, int nz, int x_off, int y_off,
int z_off, int n_ghost, Real dx, Real dy, Real dz, Real xbound, Real ybound, Real zbound,
Real dt, int n_fields, Real density_floor, Real U_floor, Real *host_grav_potential)
{
// Here, *dev_conserved contains the entire
// set of conserved variables on the grid
// concatenated into a 1-d array
int n_cells = nx * ny * nz;
int ngrid = (n_cells + TPB - 1) / TPB;
// set values for GPU kernels
// number of blocks per 1D grid
dim3 dim1dGrid(ngrid, 1, 1);
// number of threads per 1D block
dim3 dim1dBlock(TPB, 1, 1);
// host_grav_potential is NULL if not using GRAVITY
temp_potential = host_grav_potential;
if (!memory_allocated) {
// allocate memory on the GPU
dev_conserved = d_conserved;
// Set the size of the interface and flux arrays
#ifdef MHD
// In MHD/Constrained Transport the interface arrays have one fewer fields
// since the magnetic field that is stored on the face does not require
// reconstructions. Similarly the fluxes have one fewer fields since the
// magnetic field on that face doesn't have an associated flux. Each
// interface array store the magnetic fields on that interface that are
// not perpendicular to the interface and arranged cyclically. I.e. the
// `Q_Lx` interface store the reconstructed Y and Z magnetic fields in
// that order, the `Q_Ly` interface stores the Z and X mangetic fields in
// that order, and the `Q_Lz` interface stores the X and Y magnetic fields
// in that order. These fields can be indexed with the Q_?_dir grid_enums.
// The interface state arrays store in the interface on the "right" side of
// the cell, so the flux arrays store the fluxes through the right interface
//
// According to Stone et al. 2008 section 5.3 and the source code of
// Athena, the following equation relate the magnetic flux to the face
// centered electric fields/EMF. -cross(V,B)x is the negative of the
// x-component of V cross B. Note that "X" is the direction the solver is
// running in this case, not necessarily the true "X".
// F_x[(grid_enum::fluxX_magnetic_z)*n_cells] = VxBy - BxVy =
// -(-cross(V,B))z = -EMF_Z F_x[(grid_enum::fluxX_magnetic_y)*n_cells] =
// VxBz - BxVz = (-cross(V,B))y = EMF_Y
// F_y[(grid_enum::fluxY_magnetic_x)*n_cells] = VxBy - BxVy =
// -(-cross(V,B))z = -EMF_X F_y[(grid_enum::fluxY_magnetic_z)*n_cells] =
// VxBz - BxVz = (-cross(V,B))y = EMF_Z
// F_z[(grid_enum::fluxZ_magnetic_y)*n_cells] = VxBy - BxVy =
// -(-cross(V,B))z = -EMF_Y F_z[(grid_enum::fluxZ_magnetic_x)*n_cells] =
// VxBz - BxVz = (-cross(V,B))y = EMF_X
size_t const arraySize = (n_fields - 1) * n_cells * sizeof(Real);
size_t const ctArraySize = 3 * n_cells * sizeof(Real);
#else // not MHD
size_t const arraySize = n_fields * n_cells * sizeof(Real);
#endif // MHD
CudaSafeCall(cudaMalloc((void **)&dev_conserved_half, n_fields * n_cells * sizeof(Real)));
CudaSafeCall(cudaMalloc((void **)&Q_Lx, arraySize));
CudaSafeCall(cudaMalloc((void **)&Q_Rx, arraySize));
CudaSafeCall(cudaMalloc((void **)&Q_Ly, arraySize));
CudaSafeCall(cudaMalloc((void **)&Q_Ry, arraySize));
CudaSafeCall(cudaMalloc((void **)&Q_Lz, arraySize));
CudaSafeCall(cudaMalloc((void **)&Q_Rz, arraySize));
CudaSafeCall(cudaMalloc((void **)&F_x, arraySize));
CudaSafeCall(cudaMalloc((void **)&F_y, arraySize));
CudaSafeCall(cudaMalloc((void **)&F_z, arraySize));
cuda_utilities::initGpuMemory(dev_conserved_half, n_fields * n_cells * sizeof(Real));
cuda_utilities::initGpuMemory(Q_Lx, arraySize);
cuda_utilities::initGpuMemory(Q_Rx, arraySize);
cuda_utilities::initGpuMemory(Q_Ly, arraySize);
cuda_utilities::initGpuMemory(Q_Ry, arraySize);
cuda_utilities::initGpuMemory(Q_Lz, arraySize);
cuda_utilities::initGpuMemory(Q_Rz, arraySize);
cuda_utilities::initGpuMemory(F_x, arraySize);
cuda_utilities::initGpuMemory(F_y, arraySize);
cuda_utilities::initGpuMemory(F_z, arraySize);
#ifdef MHD
CudaSafeCall(cudaMalloc((void **)&ctElectricFields, ctArraySize));
#endif // MHD
#if defined(GRAVITY)
dev_grav_potential = d_grav_potential;
#else // not GRAVITY
dev_grav_potential = NULL;
#endif // GRAVITY
// If memory is single allocated: memory_allocated becomes true and
// successive timesteps won't allocate memory. If the memory is not single
// allocated: memory_allocated remains Null and memory is allocated every
// timestep.
memory_allocated = true;
}
#if defined(GRAVITY) && !defined(GRAVITY_GPU)
CudaSafeCall(cudaMemcpy(dev_grav_potential, temp_potential, n_cells * sizeof(Real), cudaMemcpyHostToDevice));
#endif // GRAVITY and GRAVITY_GPU
// Step 1: Use PCM reconstruction to put primitive variables into interface
// arrays
hipLaunchKernelGGL(PCM_Reconstruction_3D, dim1dGrid, dim1dBlock, 0, 0, dev_conserved, Q_Lx, Q_Rx, Q_Ly, Q_Ry, Q_Lz,
Q_Rz, nx, ny, nz, n_ghost, gama, n_fields);
CudaCheckError();
// Step 2: Calculate first-order upwind fluxes
#ifdef EXACT
hipLaunchKernelGGL(Calculate_Exact_Fluxes_CUDA, dim1dGrid, dim1dBlock, 0, 0, Q_Lx, Q_Rx, F_x, nx, ny, nz, n_ghost,
gama, 0, n_fields);
hipLaunchKernelGGL(Calculate_Exact_Fluxes_CUDA, dim1dGrid, dim1dBlock, 0, 0, Q_Ly, Q_Ry, F_y, nx, ny, nz, n_ghost,
gama, 1, n_fields);
hipLaunchKernelGGL(Calculate_Exact_Fluxes_CUDA, dim1dGrid, dim1dBlock, 0, 0, Q_Lz, Q_Rz, F_z, nx, ny, nz, n_ghost,
gama, 2, n_fields);
#endif // EXACT
#ifdef ROE
hipLaunchKernelGGL(Calculate_Roe_Fluxes_CUDA, dim1dGrid, dim1dBlock, 0, 0, Q_Lx, Q_Rx, F_x, nx, ny, nz, n_ghost, gama,
0, n_fields);
hipLaunchKernelGGL(Calculate_Roe_Fluxes_CUDA, dim1dGrid, dim1dBlock, 0, 0, Q_Ly, Q_Ry, F_y, nx, ny, nz, n_ghost, gama,
1, n_fields);
hipLaunchKernelGGL(Calculate_Roe_Fluxes_CUDA, dim1dGrid, dim1dBlock, 0, 0, Q_Lz, Q_Rz, F_z, nx, ny, nz, n_ghost, gama,
2, n_fields);
#endif // ROE
#ifdef HLLC
hipLaunchKernelGGL(Calculate_HLLC_Fluxes_CUDA, dim1dGrid, dim1dBlock, 0, 0, Q_Lx, Q_Rx, F_x, nx, ny, nz, n_ghost,
gama, 0, n_fields);
hipLaunchKernelGGL(Calculate_HLLC_Fluxes_CUDA, dim1dGrid, dim1dBlock, 0, 0, Q_Ly, Q_Ry, F_y, nx, ny, nz, n_ghost,
gama, 1, n_fields);
hipLaunchKernelGGL(Calculate_HLLC_Fluxes_CUDA, dim1dGrid, dim1dBlock, 0, 0, Q_Lz, Q_Rz, F_z, nx, ny, nz, n_ghost,
gama, 2, n_fields);
#endif // HLLC
#ifdef HLL
hipLaunchKernelGGL(Calculate_HLL_Fluxes_CUDA, dim1dGrid, dim1dBlock, 0, 0, Q_Lx, Q_Rx, F_x, nx, ny, nz, n_ghost, gama,
0, n_fields);
hipLaunchKernelGGL(Calculate_HLL_Fluxes_CUDA, dim1dGrid, dim1dBlock, 0, 0, Q_Ly, Q_Ry, F_y, nx, ny, nz, n_ghost, gama,
1, n_fields);
hipLaunchKernelGGL(Calculate_HLL_Fluxes_CUDA, dim1dGrid, dim1dBlock, 0, 0, Q_Lz, Q_Rz, F_z, nx, ny, nz, n_ghost, gama,
2, n_fields);
#endif // HLL
#ifdef HLLD
hipLaunchKernelGGL(mhd::Calculate_HLLD_Fluxes_CUDA, dim1dGrid, dim1dBlock, 0, 0, Q_Lx, Q_Rx,
&(dev_conserved[(grid_enum::magnetic_x)*n_cells]), F_x, n_cells, gama, 0, n_fields);
hipLaunchKernelGGL(mhd::Calculate_HLLD_Fluxes_CUDA, dim1dGrid, dim1dBlock, 0, 0, Q_Ly, Q_Ry,
&(dev_conserved[(grid_enum::magnetic_y)*n_cells]), F_y, n_cells, gama, 1, n_fields);
hipLaunchKernelGGL(mhd::Calculate_HLLD_Fluxes_CUDA, dim1dGrid, dim1dBlock, 0, 0, Q_Lz, Q_Rz,
&(dev_conserved[(grid_enum::magnetic_z)*n_cells]), F_z, n_cells, gama, 2, n_fields);
#endif // HLLD
CudaCheckError();
#ifdef MHD
// Step 2.5: Compute the Constrained transport electric fields
hipLaunchKernelGGL(mhd::Calculate_CT_Electric_Fields, dim1dGrid, dim1dBlock, 0, 0, F_x, F_y, F_z, dev_conserved,
ctElectricFields, nx, ny, nz, n_cells);
CudaCheckError();
#endif // MHD
// Step 3: Update the conserved variables half a timestep
hipLaunchKernelGGL(Update_Conserved_Variables_3D_half, dim1dGrid, dim1dBlock, 0, 0, dev_conserved, dev_conserved_half,
F_x, F_y, F_z, nx, ny, nz, n_ghost, dx, dy, dz, 0.5 * dt, gama, n_fields, density_floor);
CudaCheckError();
#ifdef MHD
// Update the magnetic fields
hipLaunchKernelGGL(mhd::Update_Magnetic_Field_3D, dim1dGrid, dim1dBlock, 0, 0, dev_conserved, dev_conserved_half,
ctElectricFields, nx, ny, nz, n_cells, 0.5 * dt, dx, dy, dz);
CudaCheckError();
#endif // MHD
// Step 4: Construct left and right interface values using updated conserved
// variables
#ifdef PCM
hipLaunchKernelGGL(PCM_Reconstruction_3D, dim1dGrid, dim1dBlock, 0, 0, dev_conserved_half, Q_Lx, Q_Rx, Q_Ly, Q_Ry,
Q_Lz, Q_Rz, nx, ny, nz, n_ghost, gama, n_fields);
#endif // PCM
#ifdef PLMP
hipLaunchKernelGGL(PLMP_cuda, dim1dGrid, dim1dBlock, 0, 0, dev_conserved_half, Q_Lx, Q_Rx, nx, ny, nz, n_ghost, dx,
dt, gama, 0, n_fields);
hipLaunchKernelGGL(PLMP_cuda, dim1dGrid, dim1dBlock, 0, 0, dev_conserved_half, Q_Ly, Q_Ry, nx, ny, nz, n_ghost, dy,
dt, gama, 1, n_fields);
hipLaunchKernelGGL(PLMP_cuda, dim1dGrid, dim1dBlock, 0, 0, dev_conserved_half, Q_Lz, Q_Rz, nx, ny, nz, n_ghost, dz,
dt, gama, 2, n_fields);
#endif // PLMP
#ifdef PLMC
hipLaunchKernelGGL(PLMC_cuda, dim1dGrid, dim1dBlock, 0, 0, dev_conserved_half, Q_Lx, Q_Rx, nx, ny, nz, dx, dt, gama,
0, n_fields);
hipLaunchKernelGGL(PLMC_cuda, dim1dGrid, dim1dBlock, 0, 0, dev_conserved_half, Q_Ly, Q_Ry, nx, ny, nz, dy, dt, gama,
1, n_fields);
hipLaunchKernelGGL(PLMC_cuda, dim1dGrid, dim1dBlock, 0, 0, dev_conserved_half, Q_Lz, Q_Rz, nx, ny, nz, dz, dt, gama,
2, n_fields);
#endif // PLMC
#ifdef PPMP
hipLaunchKernelGGL(PPMP_cuda, dim1dGrid, dim1dBlock, 0, 0, dev_conserved_half, Q_Lx, Q_Rx, nx, ny, nz, n_ghost, dx,
dt, gama, 0, n_fields);
hipLaunchKernelGGL(PPMP_cuda, dim1dGrid, dim1dBlock, 0, 0, dev_conserved_half, Q_Ly, Q_Ry, nx, ny, nz, n_ghost, dy,
dt, gama, 1, n_fields);
hipLaunchKernelGGL(PPMP_cuda, dim1dGrid, dim1dBlock, 0, 0, dev_conserved_half, Q_Lz, Q_Rz, nx, ny, nz, n_ghost, dz,
dt, gama, 2, n_fields);
#endif // PPMP
#ifdef PPMC
hipLaunchKernelGGL(PPMC_VL, dim1dGrid, dim1dBlock, 0, 0, dev_conserved_half, Q_Lx, Q_Rx, nx, ny, nz, gama, 0);
hipLaunchKernelGGL(PPMC_VL, dim1dGrid, dim1dBlock, 0, 0, dev_conserved_half, Q_Ly, Q_Ry, nx, ny, nz, gama, 1);
hipLaunchKernelGGL(PPMC_VL, dim1dGrid, dim1dBlock, 0, 0, dev_conserved_half, Q_Lz, Q_Rz, nx, ny, nz, gama, 2);
#endif // PPMC
CudaCheckError();
// Step 5: Calculate the fluxes again
#ifdef EXACT
hipLaunchKernelGGL(Calculate_Exact_Fluxes_CUDA, dim1dGrid, dim1dBlock, 0, 0, Q_Lx, Q_Rx, F_x, nx, ny, nz, n_ghost,
gama, 0, n_fields);
hipLaunchKernelGGL(Calculate_Exact_Fluxes_CUDA, dim1dGrid, dim1dBlock, 0, 0, Q_Ly, Q_Ry, F_y, nx, ny, nz, n_ghost,
gama, 1, n_fields);
hipLaunchKernelGGL(Calculate_Exact_Fluxes_CUDA, dim1dGrid, dim1dBlock, 0, 0, Q_Lz, Q_Rz, F_z, nx, ny, nz, n_ghost,
gama, 2, n_fields);
#endif // EXACT
#ifdef ROE
hipLaunchKernelGGL(Calculate_Roe_Fluxes_CUDA, dim1dGrid, dim1dBlock, 0, 0, Q_Lx, Q_Rx, F_x, nx, ny, nz, n_ghost, gama,
0, n_fields);
hipLaunchKernelGGL(Calculate_Roe_Fluxes_CUDA, dim1dGrid, dim1dBlock, 0, 0, Q_Ly, Q_Ry, F_y, nx, ny, nz, n_ghost, gama,
1, n_fields);
hipLaunchKernelGGL(Calculate_Roe_Fluxes_CUDA, dim1dGrid, dim1dBlock, 0, 0, Q_Lz, Q_Rz, F_z, nx, ny, nz, n_ghost, gama,
2, n_fields);
#endif // ROE
#ifdef HLLC
hipLaunchKernelGGL(Calculate_HLLC_Fluxes_CUDA, dim1dGrid, dim1dBlock, 0, 0, Q_Lx, Q_Rx, F_x, nx, ny, nz, n_ghost,
gama, 0, n_fields);
hipLaunchKernelGGL(Calculate_HLLC_Fluxes_CUDA, dim1dGrid, dim1dBlock, 0, 0, Q_Ly, Q_Ry, F_y, nx, ny, nz, n_ghost,
gama, 1, n_fields);
hipLaunchKernelGGL(Calculate_HLLC_Fluxes_CUDA, dim1dGrid, dim1dBlock, 0, 0, Q_Lz, Q_Rz, F_z, nx, ny, nz, n_ghost,
gama, 2, n_fields);
#endif // HLLC
#ifdef HLL
hipLaunchKernelGGL(Calculate_HLL_Fluxes_CUDA, dim1dGrid, dim1dBlock, 0, 0, Q_Lx, Q_Rx, F_x, nx, ny, nz, n_ghost, gama,
0, n_fields);
hipLaunchKernelGGL(Calculate_HLL_Fluxes_CUDA, dim1dGrid, dim1dBlock, 0, 0, Q_Ly, Q_Ry, F_y, nx, ny, nz, n_ghost, gama,
1, n_fields);
hipLaunchKernelGGL(Calculate_HLL_Fluxes_CUDA, dim1dGrid, dim1dBlock, 0, 0, Q_Lz, Q_Rz, F_z, nx, ny, nz, n_ghost, gama,
2, n_fields);
#endif // HLLC
#ifdef HLLD
hipLaunchKernelGGL(mhd::Calculate_HLLD_Fluxes_CUDA, dim1dGrid, dim1dBlock, 0, 0, Q_Lx, Q_Rx,
&(dev_conserved_half[(grid_enum::magnetic_x)*n_cells]), F_x, n_cells, gama, 0, n_fields);
hipLaunchKernelGGL(mhd::Calculate_HLLD_Fluxes_CUDA, dim1dGrid, dim1dBlock, 0, 0, Q_Ly, Q_Ry,
&(dev_conserved_half[(grid_enum::magnetic_y)*n_cells]), F_y, n_cells, gama, 1, n_fields);
hipLaunchKernelGGL(mhd::Calculate_HLLD_Fluxes_CUDA, dim1dGrid, dim1dBlock, 0, 0, Q_Lz, Q_Rz,
&(dev_conserved_half[(grid_enum::magnetic_z)*n_cells]), F_z, n_cells, gama, 2, n_fields);
#endif // HLLD
CudaCheckError();
#ifdef DE
// Compute the divergence of Vel before updating the conserved array, this
// solves synchronization issues when adding this term on
// Update_Conserved_Variables_3D
hipLaunchKernelGGL(Partial_Update_Advected_Internal_Energy_3D, dim1dGrid, dim1dBlock, 0, 0, dev_conserved, Q_Lx, Q_Rx,
Q_Ly, Q_Ry, Q_Lz, Q_Rz, nx, ny, nz, n_ghost, dx, dy, dz, dt, gama, n_fields);
CudaCheckError();
#endif // DE
#ifdef MHD
// Step 5.5: Compute the Constrained transport electric fields
hipLaunchKernelGGL(mhd::Calculate_CT_Electric_Fields, dim1dGrid, dim1dBlock, 0, 0, F_x, F_y, F_z, dev_conserved_half,
ctElectricFields, nx, ny, nz, n_cells);
CudaCheckError();
#endif // MHD
// Step 6: Update the conserved variable array
hipLaunchKernelGGL(Update_Conserved_Variables_3D, dim1dGrid, dim1dBlock, 0, 0, dev_conserved, Q_Lx, Q_Rx, Q_Ly, Q_Ry,
Q_Lz, Q_Rz, F_x, F_y, F_z, nx, ny, nz, x_off, y_off, z_off, n_ghost, dx, dy, dz, xbound, ybound,
zbound, dt, gama, n_fields, density_floor, dev_grav_potential);
CudaCheckError();
#ifdef MHD
// Update the magnetic fields
hipLaunchKernelGGL(mhd::Update_Magnetic_Field_3D, dim1dGrid, dim1dBlock, 0, 0, dev_conserved, dev_conserved,
ctElectricFields, nx, ny, nz, n_cells, dt, dx, dy, dz);
CudaCheckError();
#endif // MHD
#ifdef DE
hipLaunchKernelGGL(Select_Internal_Energy_3D, dim1dGrid, dim1dBlock, 0, 0, dev_conserved, nx, ny, nz, n_ghost,
n_fields);
hipLaunchKernelGGL(Sync_Energies_3D, dim1dGrid, dim1dBlock, 0, 0, dev_conserved, nx, ny, nz, n_ghost, gama, n_fields);
CudaCheckError();
#endif // DE
#ifdef TEMPERATURE_FLOOR
hipLaunchKernelGGL(Apply_Temperature_Floor, dim1dGrid, dim1dBlock, 0, 0, dev_conserved, nx, ny, nz, n_ghost, n_fields,
U_floor);
CudaCheckError();
#endif // TEMPERATURE_FLOOR
return;
}
void Free_Memory_VL_3D()
{
// free the GPU memory
cudaFree(dev_conserved);
cudaFree(dev_conserved_half);
cudaFree(Q_Lx);
cudaFree(Q_Rx);
cudaFree(Q_Ly);
cudaFree(Q_Ry);
cudaFree(Q_Lz);
cudaFree(Q_Rz);
cudaFree(F_x);
cudaFree(F_y);
cudaFree(F_z);
cudaFree(ctElectricFields);
}
__global__ void Update_Conserved_Variables_3D_half(Real *dev_conserved, Real *dev_conserved_half, Real *dev_F_x,
Real *dev_F_y, Real *dev_F_z, int nx, int ny, int nz, int n_ghost,
Real dx, Real dy, Real dz, Real dt, Real gamma, int n_fields,
Real density_floor)
{
Real dtodx = dt / dx;
Real dtody = dt / dy;
Real dtodz = dt / dz;
int n_cells = nx * ny * nz;
// get a global thread ID
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int zid = tid / (nx * ny);
int yid = (tid - zid * nx * ny) / nx;
int xid = tid - zid * nx * ny - yid * nx;
int id = xid + yid * nx + zid * nx * ny;
int imo = xid - 1 + yid * nx + zid * nx * ny;
int jmo = xid + (yid - 1) * nx + zid * nx * ny;
int kmo = xid + yid * nx + (zid - 1) * nx * ny;
#ifdef DE
Real d, d_inv, vx, vy, vz;
Real vx_imo, vx_ipo, vy_jmo, vy_jpo, vz_kmo, vz_kpo, P, E, E_kin, GE;
int ipo, jpo, kpo;
#endif // DE
#ifdef DENSITY_FLOOR
Real dens_0;
#endif // DENSITY_FLOOR
// threads corresponding to all cells except outer ring of ghost cells do the
// calculation
if (xid > 0 && xid < nx - 1 && yid > 0 && yid < ny - 1 && zid > 0 && zid < nz - 1) {
#ifdef DE
d = dev_conserved[id];
d_inv = 1.0 / d;
vx = dev_conserved[1 * n_cells + id] * d_inv;
vy = dev_conserved[2 * n_cells + id] * d_inv;
vz = dev_conserved[3 * n_cells + id] * d_inv;
// PRESSURE_DE
E = dev_conserved[4 * n_cells + id];
GE = dev_conserved[(n_fields - 1) * n_cells + id];
E_kin = hydro_utilities::Calc_Kinetic_Energy_From_Velocity(d, vx, vy, vz);
#ifdef MHD
// Add the magnetic energy
auto const [centeredBx, centeredBy, centeredBz] =
mhd::utils::cellCenteredMagneticFields(dev_conserved, id, xid, yid, zid, n_cells, nx, ny);
E_kin += mhd::utils::computeMagneticEnergy(centeredBx, centeredBy, centeredBz);
#endif // MHD
P = hydro_utilities::Get_Pressure_From_DE(E, E - E_kin, GE, gamma);
P = fmax(P, (Real)TINY_NUMBER);
// P = (dev_conserved[4*n_cells + id] - 0.5*d*(vx*vx + vy*vy + vz*vz)) *
// (gamma - 1.0);
// if (d < 0.0 || d != d) printf("Negative density before half step
// update.\n"); if (P < 0.0) printf("%d Negative pressure before half step
// update.\n", id);
ipo = xid + 1 + yid * nx + zid * nx * ny;
jpo = xid + (yid + 1) * nx + zid * nx * ny;
kpo = xid + yid * nx + (zid + 1) * nx * ny;
vx_imo = dev_conserved[1 * n_cells + imo] / dev_conserved[imo];
vx_ipo = dev_conserved[1 * n_cells + ipo] / dev_conserved[ipo];
vy_jmo = dev_conserved[2 * n_cells + jmo] / dev_conserved[jmo];
vy_jpo = dev_conserved[2 * n_cells + jpo] / dev_conserved[jpo];
vz_kmo = dev_conserved[3 * n_cells + kmo] / dev_conserved[kmo];
vz_kpo = dev_conserved[3 * n_cells + kpo] / dev_conserved[kpo];
#endif // DE
// update the conserved variable array
dev_conserved_half[id] = dev_conserved[id] + dtodx * (dev_F_x[imo] - dev_F_x[id]) +
dtody * (dev_F_y[jmo] - dev_F_y[id]) + dtodz * (dev_F_z[kmo] - dev_F_z[id]);
dev_conserved_half[n_cells + id] = dev_conserved[n_cells + id] +
dtodx * (dev_F_x[n_cells + imo] - dev_F_x[n_cells + id]) +
dtody * (dev_F_y[n_cells + jmo] - dev_F_y[n_cells + id]) +
dtodz * (dev_F_z[n_cells + kmo] - dev_F_z[n_cells + id]);
dev_conserved_half[2 * n_cells + id] = dev_conserved[2 * n_cells + id] +
dtodx * (dev_F_x[2 * n_cells + imo] - dev_F_x[2 * n_cells + id]) +
dtody * (dev_F_y[2 * n_cells + jmo] - dev_F_y[2 * n_cells + id]) +
dtodz * (dev_F_z[2 * n_cells + kmo] - dev_F_z[2 * n_cells + id]);
dev_conserved_half[3 * n_cells + id] = dev_conserved[3 * n_cells + id] +
dtodx * (dev_F_x[3 * n_cells + imo] - dev_F_x[3 * n_cells + id]) +
dtody * (dev_F_y[3 * n_cells + jmo] - dev_F_y[3 * n_cells + id]) +
dtodz * (dev_F_z[3 * n_cells + kmo] - dev_F_z[3 * n_cells + id]);
dev_conserved_half[4 * n_cells + id] = dev_conserved[4 * n_cells + id] +
dtodx * (dev_F_x[4 * n_cells + imo] - dev_F_x[4 * n_cells + id]) +
dtody * (dev_F_y[4 * n_cells + jmo] - dev_F_y[4 * n_cells + id]) +
dtodz * (dev_F_z[4 * n_cells + kmo] - dev_F_z[4 * n_cells + id]);
#ifdef SCALAR
for (int i = 0; i < NSCALARS; i++) {
dev_conserved_half[(5 + i) * n_cells + id] =
dev_conserved[(5 + i) * n_cells + id] +
dtodx * (dev_F_x[(5 + i) * n_cells + imo] - dev_F_x[(5 + i) * n_cells + id]) +
dtody * (dev_F_y[(5 + i) * n_cells + jmo] - dev_F_y[(5 + i) * n_cells + id]) +
dtodz * (dev_F_z[(5 + i) * n_cells + kmo] - dev_F_z[(5 + i) * n_cells + id]);
}
#endif // SCALAR
#ifdef DE
dev_conserved_half[(n_fields - 1) * n_cells + id] =
dev_conserved[(n_fields - 1) * n_cells + id] +
dtodx * (dev_F_x[(n_fields - 1) * n_cells + imo] - dev_F_x[(n_fields - 1) * n_cells + id]) +
dtody * (dev_F_y[(n_fields - 1) * n_cells + jmo] - dev_F_y[(n_fields - 1) * n_cells + id]) +
dtodz * (dev_F_z[(n_fields - 1) * n_cells + kmo] - dev_F_z[(n_fields - 1) * n_cells + id]) +
0.5 * P * (dtodx * (vx_imo - vx_ipo) + dtody * (vy_jmo - vy_jpo) + dtodz * (vz_kmo - vz_kpo));
#endif // DE
#ifdef DENSITY_FLOOR
if (dev_conserved_half[id] < density_floor) {
dens_0 = dev_conserved_half[id];
printf("###Thread density change %f -> %f \n", dens_0, density_floor);
dev_conserved_half[id] = density_floor;
// Scale the conserved values to the new density
dev_conserved_half[1 * n_cells + id] *= (density_floor / dens_0);
dev_conserved_half[2 * n_cells + id] *= (density_floor / dens_0);
dev_conserved_half[3 * n_cells + id] *= (density_floor / dens_0);
dev_conserved_half[4 * n_cells + id] *= (density_floor / dens_0);
#ifdef DE
dev_conserved_half[(n_fields - 1) * n_cells + id] *= (density_floor / dens_0);
#endif // DE
}
#endif // DENSITY_FLOOR
}
}
#endif // CUDA and VL
|
7bf0e3050e83e826e31257a9654aaaffd65ff09e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "cudaType.h"
#include "Sphere.h"
#include "Indice2D.h"
#include "IndiceTools.h"
#include "ColorTools.h"
#define MAX_DISTANCE 1e80
__global__ void raytracing(uchar4* ptrDevPixels, int w, int h, Sphere** ptrDevSpheres, int nbSheres, float t);
__device__ void computeNearestSphere(Sphere** ptrDevSpheres, int nbSpheres, float2 floorPoint, int* nearestSphereIndex, float* brightness);
__global__ void raytracing(uchar4* ptrDevPixels, int w, int h, Sphere** ptrDevSpheres, int nbSpheres, float t) {
const int NB_THREADS = Indice2D::nbThread();
const int TID = Indice2D::tid();
const int WH = w * h;
int i, j;
float2 floorPoint;
float3 hsb;
int nearestSphereIndex;
float dz;
int s = TID;
while (s < WH) {
IndiceTools::toIJ(s, w, &i, &j);
floorPoint.x = (float) j;
floorPoint.y = (float) i;
computeNearestSphere(ptrDevSpheres, nbSpheres, floorPoint, &nearestSphereIndex, &dz);
if (nearestSphereIndex < 0) {
hsb.x = 0;
hsb.y = 0;
hsb.z = 0;
} else {
hsb.x = ptrDevSpheres[nearestSphereIndex]->hue(t);
hsb.y = 1;
hsb.z = ptrDevSpheres[nearestSphereIndex]->brightness(dz);;
}
ColorTools::HSB_TO_RVB(hsb, &ptrDevPixels[s]);
ptrDevPixels[s].w = 255;
s += NB_THREADS;
}
}
__device__ void computeNearestSphere(Sphere** ptrDevSpheres, int nbSpheres, float2 floorPoint, int* nearestSphereIndex, float* dz) {
float hCarre, currentDz, distance;
float distanceMin = MAX_DISTANCE;
*nearestSphereIndex = -1;
for( int i = 0 ; i < nbSpheres ; i++ ) {
hCarre = ptrDevSpheres[i]->hCarre(floorPoint);
if (ptrDevSpheres[i]->isEnDessous(hCarre)) {
currentDz = ptrDevSpheres[i]->dz(hCarre);
distance = ptrDevSpheres[i]->distance(currentDz);
if (distance < distanceMin) {
distanceMin = distance;
*nearestSphereIndex = i;
*dz = currentDz;
}
}
}
}
| 7bf0e3050e83e826e31257a9654aaaffd65ff09e.cu | #include "cudaType.h"
#include "Sphere.h"
#include "Indice2D.h"
#include "IndiceTools.h"
#include "ColorTools.h"
#define MAX_DISTANCE 1e80
__global__ void raytracing(uchar4* ptrDevPixels, int w, int h, Sphere** ptrDevSpheres, int nbSheres, float t);
__device__ void computeNearestSphere(Sphere** ptrDevSpheres, int nbSpheres, float2 floorPoint, int* nearestSphereIndex, float* brightness);
__global__ void raytracing(uchar4* ptrDevPixels, int w, int h, Sphere** ptrDevSpheres, int nbSpheres, float t) {
const int NB_THREADS = Indice2D::nbThread();
const int TID = Indice2D::tid();
const int WH = w * h;
int i, j;
float2 floorPoint;
float3 hsb;
int nearestSphereIndex;
float dz;
int s = TID;
while (s < WH) {
IndiceTools::toIJ(s, w, &i, &j);
floorPoint.x = (float) j;
floorPoint.y = (float) i;
computeNearestSphere(ptrDevSpheres, nbSpheres, floorPoint, &nearestSphereIndex, &dz);
if (nearestSphereIndex < 0) {
hsb.x = 0;
hsb.y = 0;
hsb.z = 0;
} else {
hsb.x = ptrDevSpheres[nearestSphereIndex]->hue(t);
hsb.y = 1;
hsb.z = ptrDevSpheres[nearestSphereIndex]->brightness(dz);;
}
ColorTools::HSB_TO_RVB(hsb, &ptrDevPixels[s]);
ptrDevPixels[s].w = 255;
s += NB_THREADS;
}
}
__device__ void computeNearestSphere(Sphere** ptrDevSpheres, int nbSpheres, float2 floorPoint, int* nearestSphereIndex, float* dz) {
float hCarre, currentDz, distance;
float distanceMin = MAX_DISTANCE;
*nearestSphereIndex = -1;
for( int i = 0 ; i < nbSpheres ; i++ ) {
hCarre = ptrDevSpheres[i]->hCarre(floorPoint);
if (ptrDevSpheres[i]->isEnDessous(hCarre)) {
currentDz = ptrDevSpheres[i]->dz(hCarre);
distance = ptrDevSpheres[i]->distance(currentDz);
if (distance < distanceMin) {
distanceMin = distance;
*nearestSphereIndex = i;
*dz = currentDz;
}
}
}
}
|
e9003bed8e6a1d71f06ca8932f18d29b44e6b50f.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "reduceCompleteUnrollWarps8Sync.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int *g_idata = NULL;
hipMalloc(&g_idata, XSIZE*YSIZE);
int *g_odata = NULL;
hipMalloc(&g_odata, XSIZE*YSIZE);
unsigned int n = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
reduceCompleteUnrollWarps8Sync), dim3(gridBlock),dim3(threadBlock), 0, 0, g_idata,g_odata,n);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
reduceCompleteUnrollWarps8Sync), dim3(gridBlock),dim3(threadBlock), 0, 0, g_idata,g_odata,n);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
reduceCompleteUnrollWarps8Sync), dim3(gridBlock),dim3(threadBlock), 0, 0, g_idata,g_odata,n);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | e9003bed8e6a1d71f06ca8932f18d29b44e6b50f.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "reduceCompleteUnrollWarps8Sync.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int *g_idata = NULL;
cudaMalloc(&g_idata, XSIZE*YSIZE);
int *g_odata = NULL;
cudaMalloc(&g_odata, XSIZE*YSIZE);
unsigned int n = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
reduceCompleteUnrollWarps8Sync<<<gridBlock,threadBlock>>>(g_idata,g_odata,n);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
reduceCompleteUnrollWarps8Sync<<<gridBlock,threadBlock>>>(g_idata,g_odata,n);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
reduceCompleteUnrollWarps8Sync<<<gridBlock,threadBlock>>>(g_idata,g_odata,n);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
1ccd7b2d4049d971b23a37a527fc701209d65457.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <ATen/ATen.h>
#include <ATen/AccumulateType.h>
#include <ATen/hip/HIPApplyUtils.cuh>
#include <ATen/CUDAGeneratorImpl.h>
#include <ATen/hip/detail/IndexUtils.cuh>
#include <ATen/hip/detail/TensorInfo.cuh>
#include <c10/macros/Macros.h>
#include <hiprand/hiprand_kernel.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/hip/Loops.cuh>
#include <ATen/native/hip/MemoryAccess.cuh>
#include <THH/THHGeneral.h>
namespace at{
namespace native{
namespace {
// philox generates 128 bits of randomness at a time. Kernel uses this explicitly by putting suitably transformed result into float4
// for all members of float4 to be consumed UNROLL has to be 4. Don't change!
// Note: VEC <= 4 (and in most real-world cases will be 4), so same logic applies.
const int UNROLL = 4;
template <
typename scalar_t,
typename accscalar_t,
typename IndexType,
int ADims,
int VEC>
#if __CUDA_ARCH__ >= 350
C10_LAUNCH_BOUNDS_2(256, 8)
#elif defined (__HIP_PLATFORM_HCC__)
C10_LAUNCH_BOUNDS_2(256, 4)
#endif
__global__ void
fused_dropout_kernel_vec(at::cuda::detail::TensorInfo<scalar_t, IndexType> a,
at::cuda::detail::TensorInfo<scalar_t, IndexType> b,
at::cuda::detail::TensorInfo<uint8_t, IndexType> c,
IndexType totalElements, accscalar_t p, std::pair<uint64_t, uint64_t> seeds
) {
// make sure we don't break assumption that we can't have > 4 elements / thread
static_assert(VEC <= 4, "Value of VEC must be in [2, 4]");
using LoadT = memory::aligned_vector<scalar_t, VEC>;
using MaskLoadT = memory::aligned_vector<uint8_t, VEC>;
accscalar_t pinv = accscalar_t(1)/p;
IndexType idx = blockIdx.x * blockDim.x + threadIdx.x;
hiprandStatePhilox4_32_10_t state;
hiprand_init(
seeds.first,
idx,
seeds.second,
&state);
// Note: Vectorized loads means we'll stride each thread by an additional VEC factor, as we'll load VEC elements at a time
for (IndexType linearIndex = idx * VEC;
linearIndex < totalElements;
linearIndex += gridDim.x * blockDim.x * VEC) {
// local storage
scalar_t src[VEC];
// We'll use this to actually cause vectorized loads later
LoadT *value = reinterpret_cast<LoadT*>(&src);
//hiprand_uniform_double was pure evil anyway, not doing what it promises, and there's nothing for halfs, so generate float for everything
// Note: need a new set of random values per 4 elements -- we'll handle VEC elements in this thread, so need ceil(VEC / 4)
// sets of rand.
float4 rand = hiprand_uniform4(&state);
rand.x = rand.x < p;
rand.y = rand.y < p;
rand.z = rand.z < p;
rand.w = rand.w < p;
// Note: We explicitly check for is_contiguous() before launching the vectorized kernel
// and replace IndexToOffset call with linearIndex to allow vectorization of NHWC (or other)
// ordering.
// Single vectorized load
*value = *reinterpret_cast<LoadT*>(&a.data[linearIndex]);
scalar_t r[VEC];
uint8_t mask[VEC];
// Perform the actual computation
#pragma unroll
for (int ii = 0; ii < VEC; ii++) {
r[ii] = src[ii]*(&rand.x)[ii]*pinv;
mask[ii] = (uint8_t)(&rand.x)[ii];
}
// Vectorized writes for both mask & result
*(reinterpret_cast<LoadT*>(&b.data[linearIndex])) = *reinterpret_cast<LoadT*>(&r[0]);
*(reinterpret_cast<MaskLoadT*>(&c.data[linearIndex])) = *reinterpret_cast<MaskLoadT*>(&mask[0]);
__syncthreads();
}
}
template <
typename scalar_t,
typename accscalar_t,
typename IndexType,
int ADims>
#if __CUDA_ARCH__ >= 350
C10_LAUNCH_BOUNDS_2(256, 8)
#elif defined (__HIP_PLATFORM_HCC__)
C10_LAUNCH_BOUNDS_2(256, 4)
#endif
__global__ void
fused_dropout_kernel(cuda::detail::TensorInfo<scalar_t, IndexType> a,
cuda::detail::TensorInfo<scalar_t, IndexType> b,
cuda::detail::TensorInfo<uint8_t, IndexType> c,
IndexType totalElements, accscalar_t p, std::pair<uint64_t, uint64_t> seeds
) {
accscalar_t pinv = accscalar_t(1)/p;
IndexType idx = blockIdx.x * blockDim.x + threadIdx.x;
hiprandStatePhilox4_32_10_t state;
hiprand_init(
seeds.first,
idx,
seeds.second,
&state);
IndexType rounded_size = ((totalElements - 1)/(blockDim.x * gridDim.x * UNROLL)+1) *
blockDim.x * gridDim.x * UNROLL;
for (IndexType linearIndex = idx;
linearIndex < rounded_size;
linearIndex += gridDim.x * blockDim.x*UNROLL) {
//hiprand_uniform_double was pure evil anyway, not doing what it promises, and there's nothing for halfs, so generate float for everything
float4 rand = hiprand_uniform4(&state);
scalar_t src[UNROLL];
rand.x = rand.x < p;
rand.y = rand.y < p;
rand.z = rand.z < p;
rand.w = rand.w < p;
for (int ii = 0; ii < UNROLL; ii++) {
IndexType li = linearIndex + blockDim.x * gridDim.x * ii;
if (li < totalElements) {
// Convert `linearIndex` into an offset of `a`
const IndexType aOffset =
cuda::detail::IndexToOffset<scalar_t, IndexType, ADims>::get(li, a);
src[ii] = a.data[aOffset];
}
}
for (int ii = 0; ii < UNROLL; ii++) {
IndexType li = linearIndex + blockDim.x * gridDim.x * ii;
if (li < totalElements) {
// Convert `linearIndex` into an offset of `b`
const IndexType bOffset =
cuda::detail::IndexToOffset<scalar_t, IndexType, 1>::get(li, b);
b.data[bOffset] = src[ii]*(&rand.x)[ii]*pinv;
c.data[bOffset] = (uint8_t)(&rand.x)[ii];
}
}
__syncthreads();
}
}
template<typename scalar_t, typename accscalar_t>
void masked_scale_kernel(at::Tensor& ret, const at::Tensor src, const at::Tensor mask, accscalar_t scale){
auto iter = at::TensorIterator();
iter.check_all_same_dtype(false);
iter.add_output(ret);
iter.add_input(src);
iter.add_input(mask);
iter.build();
at::native::gpu_kernel(
iter,
[=]GPU_LAMBDA(const scalar_t src_val, const uint8_t mask_val) -> scalar_t {
return (float)mask_val * src_val * scale;
});
}
template <typename scalar_t>
int get_vector_size(at::Tensor self, at::Tensor ret, at::Tensor mask) {
int vec_size = 4;
// get the vector size
auto memory_format = self.suggest_memory_format();
if (!self.is_contiguous(memory_format) || !ret.is_contiguous(memory_format) || !mask.is_contiguous(memory_format)) {
vec_size = 1;
} else {
vec_size = memory::can_vectorize_up_to<scalar_t>((char*)self.data_ptr());
}
// check that we'd have no remainders - prefer a smaller vector size with no remainders over a larger vector and remainder.
bool can_vectorize = true;
do {
can_vectorize = self.numel() % vec_size == 0 && ret.numel() % vec_size == 0 && mask.numel() % vec_size == 0;
if (!can_vectorize) vec_size /= 2;
} while (vec_size > 1 && !can_vectorize);
return can_vectorize ? vec_size : 1;
}
} //anonymous namespace
std::tuple<Tensor,Tensor>
fused_dropout_cuda(const Tensor& self, double p, c10::optional<Generator> gen_){
auto gen = get_generator_or_default<CUDAGeneratorImpl>(gen_, cuda::detail::getDefaultCUDAGenerator());
Tensor ret = at::empty_like(self, self.suggest_memory_format());
Tensor mask = at::empty(self.sizes(), self.options().dtype(kByte), self.suggest_memory_format());
const int64_t nelem = self.numel();
//empty tensors should not get here, but just in case, avoid FPE
if (nelem==0) return std::tuple<Tensor,Tensor>(self, mask);
const int64_t block_size = 256;
unsigned int blocks_per_sm = at::cuda::getCurrentDeviceProperties()->maxThreadsPerMultiProcessor/block_size;
dim3 dim_block(block_size);
dim3 grid((nelem + block_size -1)/block_size);
grid.x = ::min((unsigned int)at::cuda::getCurrentDeviceProperties()->multiProcessorCount * blocks_per_sm, grid.x);
//number of times random will be generated per thread, to offset philox counter in thc random state
int64_t counter_offset = ((nelem - 1)/(block_size*grid.x*UNROLL)+1)*UNROLL;
std::pair<uint64_t, uint64_t> rng_engine_inputs;
{
// See Note [Acquire lock when using random generators]
std::lock_guard<std::mutex> lock(gen->mutex_);
rng_engine_inputs = gen->philox_engine_inputs(counter_offset);
}
if (cuda::detail::canUse32BitIndexMath(self)){
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, self.scalar_type(), "fused_dropout", [&] {
AT_SKIP_BFLOAT16_IF_NOT_ROCM(scalar_t, "fused_dropout", [&] {
using accscalar_t = acc_type<scalar_t, true>;
accscalar_t pa = (accscalar_t)(p);
auto self_info = cuda::detail::getTensorInfo<scalar_t, unsigned int>(self);
auto ret_info = cuda::detail::getTensorInfo<scalar_t, unsigned int>(ret);
auto mask_info = cuda::detail::getTensorInfo<uint8_t, unsigned int>(mask);
self_info.collapseDims();
ret_info.collapseDims();
mask_info.collapseDims(); //ret and mask are collapsed to 1d contiguous tensor
int vec_size = get_vector_size<scalar_t>(self, ret, mask);
if (vec_size > 1) {
switch (vec_size) {
case 4:
hipLaunchKernelGGL(( fused_dropout_kernel_vec<scalar_t, accscalar_t, unsigned int, 1, 4>), dim3(grid), dim3(dim_block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), self_info, ret_info, mask_info, nelem, pa, rng_engine_inputs);
break;
case 2:
hipLaunchKernelGGL(( fused_dropout_kernel_vec<scalar_t, accscalar_t, unsigned int, 1, 2>), dim3(grid), dim3(dim_block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), self_info, ret_info, mask_info, nelem, pa, rng_engine_inputs);
break;
}
} else {
switch (self_info.dims) {
case 1:
hipLaunchKernelGGL(( fused_dropout_kernel<scalar_t, accscalar_t, unsigned int, 1>), dim3(grid), dim3(dim_block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), self_info, ret_info, mask_info, nelem, pa, rng_engine_inputs);
break;
default:
hipLaunchKernelGGL(( fused_dropout_kernel<scalar_t, accscalar_t, unsigned int, -1>), dim3(grid), dim3(dim_block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), self_info, ret_info, mask_info, nelem, pa, rng_engine_inputs);
}
}
});
});
} else {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, self.scalar_type(), "fused_dropout", [&] {
AT_SKIP_BFLOAT16_IF_NOT_ROCM(scalar_t, "fused_dropout", [&] {
using accscalar_t = acc_type<scalar_t, true>;
accscalar_t pa = (accscalar_t)(p);
auto self_info = cuda::detail::getTensorInfo<scalar_t, uint64_t>(self);
auto ret_info = cuda::detail::getTensorInfo<scalar_t, uint64_t>(ret);
auto mask_info = cuda::detail::getTensorInfo<uint8_t, uint64_t>(mask);
self_info.collapseDims();
ret_info.collapseDims();
mask_info.collapseDims(); //ret and mask are collapsed to 1d contiguous tensor
int vec_size = get_vector_size<scalar_t>(self, ret, mask);
if (vec_size > 1) {
switch (vec_size) {
case 4:
hipLaunchKernelGGL(( fused_dropout_kernel_vec<scalar_t, accscalar_t, uint64_t, 1, 4>), dim3(grid), dim3(dim_block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), self_info, ret_info, mask_info, nelem, pa, rng_engine_inputs);
break;
case 2:
hipLaunchKernelGGL(( fused_dropout_kernel_vec<scalar_t, accscalar_t, uint64_t, 1, 2>), dim3(grid), dim3(dim_block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), self_info, ret_info, mask_info, nelem, pa, rng_engine_inputs);
break;
}
} else {
switch (self_info.dims) {
case 1:
hipLaunchKernelGGL(( fused_dropout_kernel<scalar_t, accscalar_t, uint64_t, 1>), dim3(grid), dim3(dim_block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), self_info, ret_info, mask_info, nelem, pa, rng_engine_inputs);
break;
default:
hipLaunchKernelGGL(( fused_dropout_kernel<scalar_t, accscalar_t, uint64_t, -1>), dim3(grid), dim3(dim_block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), self_info, ret_info, mask_info, nelem, pa, rng_engine_inputs);
}
}
});
});
}
AT_CUDA_CHECK(hipGetLastError());
return std::tuple<Tensor,Tensor>(ret, mask);
}
Tensor masked_scale_cuda(const Tensor& self, const Tensor& mask, double scale){
Tensor ret = at::empty_like(self, self.suggest_memory_format());
TORCH_CHECK(mask.scalar_type() == at::ScalarType::Byte, "mask should be torch.uint8 dtype");
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, ret.scalar_type(), "masked_scale", [&] {
AT_SKIP_BFLOAT16_IF_NOT_ROCM(scalar_t, "masked_scale", [&] {
using accscalar_t = acc_type<scalar_t, true>;
accscalar_t pa = (accscalar_t)(scale);
masked_scale_kernel<scalar_t>(ret, self, mask, pa);
});
});
return ret;
}
}
}
| 1ccd7b2d4049d971b23a37a527fc701209d65457.cu | #include <ATen/ATen.h>
#include <ATen/AccumulateType.h>
#include <ATen/cuda/CUDAApplyUtils.cuh>
#include <ATen/CUDAGeneratorImpl.h>
#include <ATen/cuda/detail/IndexUtils.cuh>
#include <ATen/cuda/detail/TensorInfo.cuh>
#include <c10/macros/Macros.h>
#include <curand_kernel.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/cuda/Loops.cuh>
#include <ATen/native/cuda/MemoryAccess.cuh>
#include <THC/THCGeneral.h>
namespace at{
namespace native{
namespace {
// philox generates 128 bits of randomness at a time. Kernel uses this explicitly by putting suitably transformed result into float4
// for all members of float4 to be consumed UNROLL has to be 4. Don't change!
// Note: VEC <= 4 (and in most real-world cases will be 4), so same logic applies.
const int UNROLL = 4;
template <
typename scalar_t,
typename accscalar_t,
typename IndexType,
int ADims,
int VEC>
#if __CUDA_ARCH__ >= 350
C10_LAUNCH_BOUNDS_2(256, 8)
#elif defined (__HIP_PLATFORM_HCC__)
C10_LAUNCH_BOUNDS_2(256, 4)
#endif
__global__ void
fused_dropout_kernel_vec(at::cuda::detail::TensorInfo<scalar_t, IndexType> a,
at::cuda::detail::TensorInfo<scalar_t, IndexType> b,
at::cuda::detail::TensorInfo<uint8_t, IndexType> c,
IndexType totalElements, accscalar_t p, std::pair<uint64_t, uint64_t> seeds
) {
// make sure we don't break assumption that we can't have > 4 elements / thread
static_assert(VEC <= 4, "Value of VEC must be in [2, 4]");
using LoadT = memory::aligned_vector<scalar_t, VEC>;
using MaskLoadT = memory::aligned_vector<uint8_t, VEC>;
accscalar_t pinv = accscalar_t(1)/p;
IndexType idx = blockIdx.x * blockDim.x + threadIdx.x;
curandStatePhilox4_32_10_t state;
curand_init(
seeds.first,
idx,
seeds.second,
&state);
// Note: Vectorized loads means we'll stride each thread by an additional VEC factor, as we'll load VEC elements at a time
for (IndexType linearIndex = idx * VEC;
linearIndex < totalElements;
linearIndex += gridDim.x * blockDim.x * VEC) {
// local storage
scalar_t src[VEC];
// We'll use this to actually cause vectorized loads later
LoadT *value = reinterpret_cast<LoadT*>(&src);
//curand_uniform_double was pure evil anyway, not doing what it promises, and there's nothing for halfs, so generate float for everything
// Note: need a new set of random values per 4 elements -- we'll handle VEC elements in this thread, so need ceil(VEC / 4)
// sets of rand.
float4 rand = curand_uniform4(&state);
rand.x = rand.x < p;
rand.y = rand.y < p;
rand.z = rand.z < p;
rand.w = rand.w < p;
// Note: We explicitly check for is_contiguous() before launching the vectorized kernel
// and replace IndexToOffset call with linearIndex to allow vectorization of NHWC (or other)
// ordering.
// Single vectorized load
*value = *reinterpret_cast<LoadT*>(&a.data[linearIndex]);
scalar_t r[VEC];
uint8_t mask[VEC];
// Perform the actual computation
#pragma unroll
for (int ii = 0; ii < VEC; ii++) {
r[ii] = src[ii]*(&rand.x)[ii]*pinv;
mask[ii] = (uint8_t)(&rand.x)[ii];
}
// Vectorized writes for both mask & result
*(reinterpret_cast<LoadT*>(&b.data[linearIndex])) = *reinterpret_cast<LoadT*>(&r[0]);
*(reinterpret_cast<MaskLoadT*>(&c.data[linearIndex])) = *reinterpret_cast<MaskLoadT*>(&mask[0]);
__syncthreads();
}
}
template <
typename scalar_t,
typename accscalar_t,
typename IndexType,
int ADims>
#if __CUDA_ARCH__ >= 350
C10_LAUNCH_BOUNDS_2(256, 8)
#elif defined (__HIP_PLATFORM_HCC__)
C10_LAUNCH_BOUNDS_2(256, 4)
#endif
__global__ void
fused_dropout_kernel(cuda::detail::TensorInfo<scalar_t, IndexType> a,
cuda::detail::TensorInfo<scalar_t, IndexType> b,
cuda::detail::TensorInfo<uint8_t, IndexType> c,
IndexType totalElements, accscalar_t p, std::pair<uint64_t, uint64_t> seeds
) {
accscalar_t pinv = accscalar_t(1)/p;
IndexType idx = blockIdx.x * blockDim.x + threadIdx.x;
curandStatePhilox4_32_10_t state;
curand_init(
seeds.first,
idx,
seeds.second,
&state);
IndexType rounded_size = ((totalElements - 1)/(blockDim.x * gridDim.x * UNROLL)+1) *
blockDim.x * gridDim.x * UNROLL;
for (IndexType linearIndex = idx;
linearIndex < rounded_size;
linearIndex += gridDim.x * blockDim.x*UNROLL) {
//curand_uniform_double was pure evil anyway, not doing what it promises, and there's nothing for halfs, so generate float for everything
float4 rand = curand_uniform4(&state);
scalar_t src[UNROLL];
rand.x = rand.x < p;
rand.y = rand.y < p;
rand.z = rand.z < p;
rand.w = rand.w < p;
for (int ii = 0; ii < UNROLL; ii++) {
IndexType li = linearIndex + blockDim.x * gridDim.x * ii;
if (li < totalElements) {
// Convert `linearIndex` into an offset of `a`
const IndexType aOffset =
cuda::detail::IndexToOffset<scalar_t, IndexType, ADims>::get(li, a);
src[ii] = a.data[aOffset];
}
}
for (int ii = 0; ii < UNROLL; ii++) {
IndexType li = linearIndex + blockDim.x * gridDim.x * ii;
if (li < totalElements) {
// Convert `linearIndex` into an offset of `b`
const IndexType bOffset =
cuda::detail::IndexToOffset<scalar_t, IndexType, 1>::get(li, b);
b.data[bOffset] = src[ii]*(&rand.x)[ii]*pinv;
c.data[bOffset] = (uint8_t)(&rand.x)[ii];
}
}
__syncthreads();
}
}
template<typename scalar_t, typename accscalar_t>
void masked_scale_kernel(at::Tensor& ret, const at::Tensor src, const at::Tensor mask, accscalar_t scale){
auto iter = at::TensorIterator();
iter.check_all_same_dtype(false);
iter.add_output(ret);
iter.add_input(src);
iter.add_input(mask);
iter.build();
at::native::gpu_kernel(
iter,
[=]GPU_LAMBDA(const scalar_t src_val, const uint8_t mask_val) -> scalar_t {
return (float)mask_val * src_val * scale;
});
}
template <typename scalar_t>
int get_vector_size(at::Tensor self, at::Tensor ret, at::Tensor mask) {
int vec_size = 4;
// get the vector size
auto memory_format = self.suggest_memory_format();
if (!self.is_contiguous(memory_format) || !ret.is_contiguous(memory_format) || !mask.is_contiguous(memory_format)) {
vec_size = 1;
} else {
vec_size = memory::can_vectorize_up_to<scalar_t>((char*)self.data_ptr());
}
// check that we'd have no remainders - prefer a smaller vector size with no remainders over a larger vector and remainder.
bool can_vectorize = true;
do {
can_vectorize = self.numel() % vec_size == 0 && ret.numel() % vec_size == 0 && mask.numel() % vec_size == 0;
if (!can_vectorize) vec_size /= 2;
} while (vec_size > 1 && !can_vectorize);
return can_vectorize ? vec_size : 1;
}
} //anonymous namespace
std::tuple<Tensor,Tensor>
fused_dropout_cuda(const Tensor& self, double p, c10::optional<Generator> gen_){
auto gen = get_generator_or_default<CUDAGeneratorImpl>(gen_, cuda::detail::getDefaultCUDAGenerator());
Tensor ret = at::empty_like(self, self.suggest_memory_format());
Tensor mask = at::empty(self.sizes(), self.options().dtype(kByte), self.suggest_memory_format());
const int64_t nelem = self.numel();
//empty tensors should not get here, but just in case, avoid FPE
if (nelem==0) return std::tuple<Tensor,Tensor>(self, mask);
const int64_t block_size = 256;
unsigned int blocks_per_sm = at::cuda::getCurrentDeviceProperties()->maxThreadsPerMultiProcessor/block_size;
dim3 dim_block(block_size);
dim3 grid((nelem + block_size -1)/block_size);
grid.x = std::min((unsigned int)at::cuda::getCurrentDeviceProperties()->multiProcessorCount * blocks_per_sm, grid.x);
//number of times random will be generated per thread, to offset philox counter in thc random state
int64_t counter_offset = ((nelem - 1)/(block_size*grid.x*UNROLL)+1)*UNROLL;
std::pair<uint64_t, uint64_t> rng_engine_inputs;
{
// See Note [Acquire lock when using random generators]
std::lock_guard<std::mutex> lock(gen->mutex_);
rng_engine_inputs = gen->philox_engine_inputs(counter_offset);
}
if (cuda::detail::canUse32BitIndexMath(self)){
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, self.scalar_type(), "fused_dropout", [&] {
AT_SKIP_BFLOAT16_IF_NOT_ROCM(scalar_t, "fused_dropout", [&] {
using accscalar_t = acc_type<scalar_t, true>;
accscalar_t pa = (accscalar_t)(p);
auto self_info = cuda::detail::getTensorInfo<scalar_t, unsigned int>(self);
auto ret_info = cuda::detail::getTensorInfo<scalar_t, unsigned int>(ret);
auto mask_info = cuda::detail::getTensorInfo<uint8_t, unsigned int>(mask);
self_info.collapseDims();
ret_info.collapseDims();
mask_info.collapseDims(); //ret and mask are collapsed to 1d contiguous tensor
int vec_size = get_vector_size<scalar_t>(self, ret, mask);
if (vec_size > 1) {
switch (vec_size) {
case 4:
fused_dropout_kernel_vec<scalar_t, accscalar_t, unsigned int, 1, 4><<<grid, dim_block, 0, at::cuda::getCurrentCUDAStream()>>>(self_info, ret_info, mask_info, nelem, pa, rng_engine_inputs);
break;
case 2:
fused_dropout_kernel_vec<scalar_t, accscalar_t, unsigned int, 1, 2><<<grid, dim_block, 0, at::cuda::getCurrentCUDAStream()>>>(self_info, ret_info, mask_info, nelem, pa, rng_engine_inputs);
break;
}
} else {
switch (self_info.dims) {
case 1:
fused_dropout_kernel<scalar_t, accscalar_t, unsigned int, 1><<<grid, dim_block, 0, at::cuda::getCurrentCUDAStream()>>>(self_info, ret_info, mask_info, nelem, pa, rng_engine_inputs);
break;
default:
fused_dropout_kernel<scalar_t, accscalar_t, unsigned int, -1><<<grid, dim_block, 0, at::cuda::getCurrentCUDAStream()>>>(self_info, ret_info, mask_info, nelem, pa, rng_engine_inputs);
}
}
});
});
} else {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, self.scalar_type(), "fused_dropout", [&] {
AT_SKIP_BFLOAT16_IF_NOT_ROCM(scalar_t, "fused_dropout", [&] {
using accscalar_t = acc_type<scalar_t, true>;
accscalar_t pa = (accscalar_t)(p);
auto self_info = cuda::detail::getTensorInfo<scalar_t, uint64_t>(self);
auto ret_info = cuda::detail::getTensorInfo<scalar_t, uint64_t>(ret);
auto mask_info = cuda::detail::getTensorInfo<uint8_t, uint64_t>(mask);
self_info.collapseDims();
ret_info.collapseDims();
mask_info.collapseDims(); //ret and mask are collapsed to 1d contiguous tensor
int vec_size = get_vector_size<scalar_t>(self, ret, mask);
if (vec_size > 1) {
switch (vec_size) {
case 4:
fused_dropout_kernel_vec<scalar_t, accscalar_t, uint64_t, 1, 4><<<grid, dim_block, 0, at::cuda::getCurrentCUDAStream()>>>(self_info, ret_info, mask_info, nelem, pa, rng_engine_inputs);
break;
case 2:
fused_dropout_kernel_vec<scalar_t, accscalar_t, uint64_t, 1, 2><<<grid, dim_block, 0, at::cuda::getCurrentCUDAStream()>>>(self_info, ret_info, mask_info, nelem, pa, rng_engine_inputs);
break;
}
} else {
switch (self_info.dims) {
case 1:
fused_dropout_kernel<scalar_t, accscalar_t, uint64_t, 1><<<grid, dim_block, 0, at::cuda::getCurrentCUDAStream()>>>(self_info, ret_info, mask_info, nelem, pa, rng_engine_inputs);
break;
default:
fused_dropout_kernel<scalar_t, accscalar_t, uint64_t, -1><<<grid, dim_block, 0, at::cuda::getCurrentCUDAStream()>>>(self_info, ret_info, mask_info, nelem, pa, rng_engine_inputs);
}
}
});
});
}
AT_CUDA_CHECK(cudaGetLastError());
return std::tuple<Tensor,Tensor>(ret, mask);
}
Tensor masked_scale_cuda(const Tensor& self, const Tensor& mask, double scale){
Tensor ret = at::empty_like(self, self.suggest_memory_format());
TORCH_CHECK(mask.scalar_type() == at::ScalarType::Byte, "mask should be torch.uint8 dtype");
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, ret.scalar_type(), "masked_scale", [&] {
AT_SKIP_BFLOAT16_IF_NOT_ROCM(scalar_t, "masked_scale", [&] {
using accscalar_t = acc_type<scalar_t, true>;
accscalar_t pa = (accscalar_t)(scale);
masked_scale_kernel<scalar_t>(ret, self, mask, pa);
});
});
return ret;
}
}
}
|
41ab2a60e4607797e3241b77e5fa50ca84488a8b.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/null_mask.hpp>
#include <cudf/column/column.hpp>
#include <cudf/column/column_factories.hpp>
#include <cudf/column/column_device_view.cuh>
#include <cudf/strings/strings_column_view.hpp>
#include <cudf/strings/string_view.cuh>
#include <cudf/strings/findall.hpp>
#include <cudf/detail/nvtx/ranges.hpp>
#include <strings/utilities.hpp>
#include <strings/regex/regex.cuh>
#include <thrust/extrema.h>
namespace cudf
{
namespace strings
{
namespace detail
{
using string_index_pair = thrust::pair<const char*,size_type>;
using findall_result = thrust::pair<size_type,string_index_pair>;
namespace
{
/**
* @brief This functor handles extracting matched strings by applying the compiled regex pattern
* and creating string_index_pairs for all the substrings.
*/
template<size_t stack_size>
struct findall_fn
{
column_device_view const d_strings;
reprog_device prog;
size_type column_index;
size_type const* d_counts;
findall_fn( column_device_view const& d_strings,
reprog_device& prog,
size_type column_index = -1,
size_type const* d_counts = nullptr )
: d_strings(d_strings), prog(prog), column_index(column_index), d_counts(d_counts) {}
// this will count columns as well as locate a specific string for a column
__device__ findall_result findall(size_type idx)
{
string_index_pair result{nullptr,0};
if( d_strings.is_null(idx) ||
(d_counts && (column_index >= d_counts[idx])) )
return findall_result{0,result};
u_char data1[stack_size];
u_char data2[stack_size];
prog.set_stack_mem(data1,data2);
string_view d_str = d_strings.element<string_view>(idx);
auto nchars = d_str.length();
size_type spos = 0;
size_type epos = nchars;
size_type column_count = 0;
while( spos <= nchars )
{
if( prog.find(idx,d_str,spos,epos) <=0 )
break; // no more matches found
if( column_count == column_index )
break; // found our column
spos = epos > spos ? epos : spos + 1;
epos = nchars;
++column_count;
}
if( spos <= epos )
{
spos = d_str.byte_offset(spos); // convert
epos = d_str.byte_offset(epos); // to bytes
result = string_index_pair{d_str.data() + spos, (epos-spos)};
}
// return the strings location and the column count
return findall_result{column_count,result};
}
__device__ string_index_pair operator()(size_type idx)
{
// this one only cares about the string
return findall(idx).second;
}
};
template<size_t stack_size>
struct findall_count_fn : public findall_fn<stack_size>
{
findall_count_fn( column_device_view const& strings,
reprog_device& prog)
: findall_fn<stack_size>{strings,prog} {}
__device__ size_type operator()(size_type idx)
{
// this one only cares about the column count
return findall_fn<stack_size>::findall(idx).first;
}
};
} // namespace
//
std::unique_ptr<experimental::table> findall_re( strings_column_view const& strings,
std::string const& pattern,
rmm::mr::device_memory_resource* mr = rmm::mr::get_default_resource(),
hipStream_t stream = 0)
{
auto strings_count = strings.size();
auto strings_column = column_device_view::create(strings.parent(),stream);
auto d_strings = *strings_column;
auto d_flags = detail::get_character_flags_table();
// compile regex into device object
auto prog = reprog_device::create(pattern,d_flags,strings_count,stream);
auto d_prog = *prog;
auto execpol = rmm::exec_policy(stream);
int regex_insts = prog->insts_counts();
rmm::device_vector<size_type> find_counts(strings_count);
auto d_find_counts = find_counts.data().get();
if( (regex_insts > MAX_STACK_INSTS) || (regex_insts <= RX_SMALL_INSTS) )
thrust::transform(execpol->on(stream),
thrust::make_counting_iterator<size_type>(0),
thrust::make_counting_iterator<size_type>(strings_count),
d_find_counts, findall_count_fn<RX_STACK_SMALL>{d_strings,d_prog});
else if( regex_insts <= RX_MEDIUM_INSTS )
thrust::transform(execpol->on(stream),
thrust::make_counting_iterator<size_type>(0),
thrust::make_counting_iterator<size_type>(strings_count),
d_find_counts, findall_count_fn<RX_STACK_MEDIUM>{d_strings,d_prog});
else
thrust::transform(execpol->on(stream),
thrust::make_counting_iterator<size_type>(0),
thrust::make_counting_iterator<size_type>(strings_count),
d_find_counts, findall_count_fn<RX_STACK_LARGE>{d_strings,d_prog});
std::vector<std::unique_ptr<column>> results;
size_type columns = *thrust::max_element(execpol->on(stream), find_counts.begin(), find_counts.end() );
// boundary case: if no columns, return all nulls column (issue #119)
if( columns==0 )
results.push_back(std::make_unique<column>( data_type{STRING}, strings_count,
rmm::device_buffer{0,stream,mr}, // no data
create_null_mask(strings_count,mask_state::ALL_NULL,stream,mr), strings_count ));
for( int32_t column_index=0; column_index < columns; ++column_index )
{
rmm::device_vector<string_index_pair> indices(strings_count);
string_index_pair* d_indices = indices.data().get();
if( (regex_insts > MAX_STACK_INSTS) || (regex_insts <= RX_SMALL_INSTS) )
thrust::transform(execpol->on(stream),
thrust::make_counting_iterator<size_type>(0),
thrust::make_counting_iterator<size_type>(strings_count),
d_indices, findall_fn<RX_STACK_SMALL>{d_strings, d_prog, column_index, d_find_counts});
else if( regex_insts <= RX_MEDIUM_INSTS )
thrust::transform(execpol->on(stream),
thrust::make_counting_iterator<size_type>(0),
thrust::make_counting_iterator<size_type>(strings_count),
d_indices, findall_fn<RX_STACK_MEDIUM>{d_strings, d_prog, column_index, d_find_counts});
else
thrust::transform(execpol->on(stream),
thrust::make_counting_iterator<size_type>(0),
thrust::make_counting_iterator<size_type>(strings_count),
d_indices, findall_fn<RX_STACK_LARGE>{d_strings, d_prog, column_index, d_find_counts});
//
auto column = make_strings_column(indices,stream,mr);
results.emplace_back(std::move(column));
}
return std::make_unique<experimental::table>(std::move(results));
}
} // namespace detail
// external API
std::unique_ptr<experimental::table> findall_re( strings_column_view const& strings,
std::string const& pattern,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::findall_re(strings, pattern, mr);
}
} // namespace strings
} // namespace cudf
| 41ab2a60e4607797e3241b77e5fa50ca84488a8b.cu | /*
* Copyright (c) 2019-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/null_mask.hpp>
#include <cudf/column/column.hpp>
#include <cudf/column/column_factories.hpp>
#include <cudf/column/column_device_view.cuh>
#include <cudf/strings/strings_column_view.hpp>
#include <cudf/strings/string_view.cuh>
#include <cudf/strings/findall.hpp>
#include <cudf/detail/nvtx/ranges.hpp>
#include <strings/utilities.hpp>
#include <strings/regex/regex.cuh>
#include <thrust/extrema.h>
namespace cudf
{
namespace strings
{
namespace detail
{
using string_index_pair = thrust::pair<const char*,size_type>;
using findall_result = thrust::pair<size_type,string_index_pair>;
namespace
{
/**
* @brief This functor handles extracting matched strings by applying the compiled regex pattern
* and creating string_index_pairs for all the substrings.
*/
template<size_t stack_size>
struct findall_fn
{
column_device_view const d_strings;
reprog_device prog;
size_type column_index;
size_type const* d_counts;
findall_fn( column_device_view const& d_strings,
reprog_device& prog,
size_type column_index = -1,
size_type const* d_counts = nullptr )
: d_strings(d_strings), prog(prog), column_index(column_index), d_counts(d_counts) {}
// this will count columns as well as locate a specific string for a column
__device__ findall_result findall(size_type idx)
{
string_index_pair result{nullptr,0};
if( d_strings.is_null(idx) ||
(d_counts && (column_index >= d_counts[idx])) )
return findall_result{0,result};
u_char data1[stack_size];
u_char data2[stack_size];
prog.set_stack_mem(data1,data2);
string_view d_str = d_strings.element<string_view>(idx);
auto nchars = d_str.length();
size_type spos = 0;
size_type epos = nchars;
size_type column_count = 0;
while( spos <= nchars )
{
if( prog.find(idx,d_str,spos,epos) <=0 )
break; // no more matches found
if( column_count == column_index )
break; // found our column
spos = epos > spos ? epos : spos + 1;
epos = nchars;
++column_count;
}
if( spos <= epos )
{
spos = d_str.byte_offset(spos); // convert
epos = d_str.byte_offset(epos); // to bytes
result = string_index_pair{d_str.data() + spos, (epos-spos)};
}
// return the strings location and the column count
return findall_result{column_count,result};
}
__device__ string_index_pair operator()(size_type idx)
{
// this one only cares about the string
return findall(idx).second;
}
};
template<size_t stack_size>
struct findall_count_fn : public findall_fn<stack_size>
{
findall_count_fn( column_device_view const& strings,
reprog_device& prog)
: findall_fn<stack_size>{strings,prog} {}
__device__ size_type operator()(size_type idx)
{
// this one only cares about the column count
return findall_fn<stack_size>::findall(idx).first;
}
};
} // namespace
//
std::unique_ptr<experimental::table> findall_re( strings_column_view const& strings,
std::string const& pattern,
rmm::mr::device_memory_resource* mr = rmm::mr::get_default_resource(),
cudaStream_t stream = 0)
{
auto strings_count = strings.size();
auto strings_column = column_device_view::create(strings.parent(),stream);
auto d_strings = *strings_column;
auto d_flags = detail::get_character_flags_table();
// compile regex into device object
auto prog = reprog_device::create(pattern,d_flags,strings_count,stream);
auto d_prog = *prog;
auto execpol = rmm::exec_policy(stream);
int regex_insts = prog->insts_counts();
rmm::device_vector<size_type> find_counts(strings_count);
auto d_find_counts = find_counts.data().get();
if( (regex_insts > MAX_STACK_INSTS) || (regex_insts <= RX_SMALL_INSTS) )
thrust::transform(execpol->on(stream),
thrust::make_counting_iterator<size_type>(0),
thrust::make_counting_iterator<size_type>(strings_count),
d_find_counts, findall_count_fn<RX_STACK_SMALL>{d_strings,d_prog});
else if( regex_insts <= RX_MEDIUM_INSTS )
thrust::transform(execpol->on(stream),
thrust::make_counting_iterator<size_type>(0),
thrust::make_counting_iterator<size_type>(strings_count),
d_find_counts, findall_count_fn<RX_STACK_MEDIUM>{d_strings,d_prog});
else
thrust::transform(execpol->on(stream),
thrust::make_counting_iterator<size_type>(0),
thrust::make_counting_iterator<size_type>(strings_count),
d_find_counts, findall_count_fn<RX_STACK_LARGE>{d_strings,d_prog});
std::vector<std::unique_ptr<column>> results;
size_type columns = *thrust::max_element(execpol->on(stream), find_counts.begin(), find_counts.end() );
// boundary case: if no columns, return all nulls column (issue #119)
if( columns==0 )
results.push_back(std::make_unique<column>( data_type{STRING}, strings_count,
rmm::device_buffer{0,stream,mr}, // no data
create_null_mask(strings_count,mask_state::ALL_NULL,stream,mr), strings_count ));
for( int32_t column_index=0; column_index < columns; ++column_index )
{
rmm::device_vector<string_index_pair> indices(strings_count);
string_index_pair* d_indices = indices.data().get();
if( (regex_insts > MAX_STACK_INSTS) || (regex_insts <= RX_SMALL_INSTS) )
thrust::transform(execpol->on(stream),
thrust::make_counting_iterator<size_type>(0),
thrust::make_counting_iterator<size_type>(strings_count),
d_indices, findall_fn<RX_STACK_SMALL>{d_strings, d_prog, column_index, d_find_counts});
else if( regex_insts <= RX_MEDIUM_INSTS )
thrust::transform(execpol->on(stream),
thrust::make_counting_iterator<size_type>(0),
thrust::make_counting_iterator<size_type>(strings_count),
d_indices, findall_fn<RX_STACK_MEDIUM>{d_strings, d_prog, column_index, d_find_counts});
else
thrust::transform(execpol->on(stream),
thrust::make_counting_iterator<size_type>(0),
thrust::make_counting_iterator<size_type>(strings_count),
d_indices, findall_fn<RX_STACK_LARGE>{d_strings, d_prog, column_index, d_find_counts});
//
auto column = make_strings_column(indices,stream,mr);
results.emplace_back(std::move(column));
}
return std::make_unique<experimental::table>(std::move(results));
}
} // namespace detail
// external API
std::unique_ptr<experimental::table> findall_re( strings_column_view const& strings,
std::string const& pattern,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::findall_re(strings, pattern, mr);
}
} // namespace strings
} // namespace cudf
|
d7df177007d6203ddd426281f4f2bf2c116c7e88.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <hip/hip_runtime.h>
const int INF = 1000000000;
const int V = 20010;
const int num_thread = 256;
clock_t begin, end;
double IO_time = 0;
int n, m, Dist[V][V];
int *device_ptr;
size_t pitch;
inline int ceil(int a, int b) {
return (a + b - 1) / b;
}
void input(char *inFileName) {
begin = clock();
FILE *infile = fopen(inFileName, "r");
fscanf(infile, "%d %d", &n, &m);
for (int i = 0; i < n; ++i) {
for (int j = 0; j < n; ++j) {
if (i == j) Dist[i][j] = 0;
else Dist[i][j] = INF;
}
}
while (--m >= 0) {
int a, b, v;
fscanf(infile, "%d %d %d", &a, &b, &v);
Dist[a][b] = v;
}
fclose(infile);
end = clock();
IO_time += (double) (end - begin) / CLOCKS_PER_SEC;
}
void output(char *outFileName) {
begin = clock();
FILE *outfile = fopen(outFileName, "w");
for (int i = 0; i < n; ++i) {
for (int j = 0; j < n; ++j) {
if (Dist[i][j] >= INF)
Dist[i][j] = INF;
}
fwrite(Dist[i], sizeof(int), n, outfile);
}
fclose(outfile);
end = clock();
IO_time += (double) (end - begin) / CLOCKS_PER_SEC;
}
//done &= cal(B, r, r, r, 1, 1);
/*
done &= cal(B, r, r, 0, r, 1);
done &= cal(B, r, r, r +1, round - r -1, 1);
done &= cal(B, r, 0, r, 1, r);
done &= cal(B, r, r +1, r, 1, round - r -1);
*/
__global__ void cal_kernel(int stat, int *device_ptr, int n, size_t pitch, int B, int Round, int block_start_x, int block_start_y, int block_width) {
int x = threadIdx.x / B;
int y = threadIdx.x % B;
int i = (block_start_x + blockIdx.x / block_width) * B + x;
int j = (block_start_y + blockIdx.x % block_width) * B + y;
/*for (int k = Round * B; k < (Round + 1) * B && k < n; k++) {
int *i_row = (int*)((char*)device_ptr + i * pitch);
int *k_row = (int*)((char*)device_ptr + k * pitch);
if (i_row[k] + k_row[j] < i_row[j]) {
i_row[j] = i_row[k] + k_row[j];
}
__syncthreads();
}*/
__shared__ int target[16][33];
__shared__ int a[16][33];
__shared__ int b[16][33];
target[x][y] = *((int*)((char*)device_ptr + i * pitch) + j);
a[x][y] = *((int*)((char*)device_ptr + i * pitch) + Round * B + y);
b[x][y] = *((int*)((char*)device_ptr + (Round * B + x) * pitch) + j);
__syncthreads();
if (i >= n || j >= n) return;
int rb = Round * B;
if (stat == 1) {
for (int k = 0; k < 16 && rb + k < n; k++) {
if (target[x][k] + target[k][y] < target[x][y])
target[x][y] = target[x][k] + target[k][y];
__syncthreads();
}
} else if (stat == 2) {
for (int k = 0; k < 16 && rb + k < n; k++) {
if (a[x][k] + b[k][y] < target[x][y])
target[x][y] = a[x][k] + b[k][y];
__syncthreads();
}
} else if (stat == 3) {
for (int k = 0; k < 16 && rb + k < n; k++) {
if (a[x][k] + target[k][y] < target[x][y])
target[x][y] = a[x][k] + target[k][y];
__syncthreads();
}
} else {
for (int k = 0; k < 16 && rb + k < n; k++) {
if (target[x][k] + b[k][y] < target[x][y])
target[x][y] = target[x][k] + b[k][y];
__syncthreads();
}
}
*((int*)((char*)device_ptr + i * pitch) + j) = target[x][y];
}
bool cal(int B, int Round, int block_start_x, int block_start_y, int block_width, int block_height) {
bool done = true;
int block_end_x = block_start_x + block_height;
int block_end_y = block_start_y + block_width;
for (int b_i = block_start_x; b_i < block_end_x; ++b_i) {
for (int b_j = block_start_y; b_j < block_end_y; ++b_j) {
// To calculate B*B elements in the block (b_i, b_j)
// For each block, it need to compute B times
for (int k = Round * B; k < (Round +1) * B && k < n; ++k) {
// To calculate original index of elements in the block (b_i, b_j)
// For instance, original index of (0,0) in block (1,2) is (2,5) for V=6,B=2
int block_internal_start_x = b_i * B;
int block_internal_end_x = (b_i +1) * B;
int block_internal_start_y = b_j * B;
int block_internal_end_y = (b_j +1) * B;
if (block_internal_end_x > n) block_internal_end_x = n;
if (block_internal_end_y > n) block_internal_end_y = n;
for (int i = block_internal_start_x; i < block_internal_end_x; ++i) {
for (int j = block_internal_start_y; j < block_internal_end_y; ++j) {
if (Dist[i][k] + Dist[k][j] < Dist[i][j]) {
Dist[i][j] = Dist[i][k] + Dist[k][j];
done = false;
}
}
}
}
}
}
return done;
}
void show(int32_t input[V][V]) {
for (int i = 0; i < n; i++) {
for (int j = 0; j < n; j++) {
printf("%d ", input[i][j]);
}
puts("");
}
}
void block_FW(int B) {
hipMallocPitch(&device_ptr, &pitch, n * sizeof(int), n);
hipMemcpy2D(device_ptr, pitch, Dist, V * sizeof(int), n * sizeof(int), n, hipMemcpyHostToDevice);
//show(Dist);
//memset(Dist, 0, sizeof(Dist));
//hipMemcpy2D(Dist, V * sizeof(int), device_ptr, pitch, n * sizeof(int), n, hipMemcpyDeviceToHost);
//show(Dist);
int round = ceil(n, B);
for (int r = 0; r < round; ++r) {
//printf("%d %d\n", r, round);
/*
cal(B, r, r, r, 1, 1);
cal(B, r, r, 0, r, 1);
cal(B, r, r, r +1, round - r -1, 1);
cal(B, r, 0, r, 1, r);
cal(B, r, r +1, r, 1, round - r -1);
cal(B, r, 0, 0, r, r);
cal(B, r, 0, r +1, round -r -1, r);
cal(B, r, r +1, 0, r, round - r -1);
cal(B, r, r +1, r +1, round -r -1, round - r -1);*/
int temp = (round - r - 1);
hipLaunchKernelGGL(( cal_kernel), dim3(1), dim3(num_thread), 0, 0, 1, device_ptr, n, pitch, B, r, r, r, 1);
//if (r < 1) {
hipLaunchKernelGGL(( cal_kernel), dim3(r), dim3(num_thread), 0, 0, 3, device_ptr, n, pitch, B, r, r, 0, r);
hipLaunchKernelGGL(( cal_kernel), dim3(temp), dim3(num_thread), 0, 0, 3, device_ptr, n, pitch, B, r, r, r + 1, temp);
hipLaunchKernelGGL(( cal_kernel), dim3(r), dim3(num_thread), 0, 0, 4, device_ptr, n, pitch, B, r, 0, r, 1);
hipLaunchKernelGGL(( cal_kernel), dim3(temp), dim3(num_thread), 0, 0, 4, device_ptr, n, pitch, B, r, r + 1, r, 1);
hipLaunchKernelGGL(( cal_kernel), dim3(r * r), dim3(num_thread), 0, 0, 2, device_ptr, n, pitch, B, r, 0, 0, r);
hipLaunchKernelGGL(( cal_kernel), dim3(r * temp), dim3(num_thread), 0, 0, 2, device_ptr, n, pitch, B, r, 0, r + 1, temp);
hipLaunchKernelGGL(( cal_kernel), dim3(r * temp), dim3(num_thread), 0, 0, 2, device_ptr, n, pitch, B, r, r + 1, 0, r);
hipLaunchKernelGGL(( cal_kernel), dim3(temp * temp), dim3(num_thread), 0, 0, 2, device_ptr, n, pitch, B, r, r + 1, r + 1, temp);
//}
}
hipMemcpy2D(Dist, V * sizeof(int), device_ptr, pitch, n * sizeof(int), n, hipMemcpyDeviceToHost);
}
int main(int argc, char* argv[]) {
input(argv[1]);
int B = atoi(argv[3]);
block_FW(B);
output(argv[2]);
printf("IO time: %f\n", IO_time);
return 0;
}
| d7df177007d6203ddd426281f4f2bf2c116c7e88.cu | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <cuda.h>
const int INF = 1000000000;
const int V = 20010;
const int num_thread = 256;
clock_t begin, end;
double IO_time = 0;
int n, m, Dist[V][V];
int *device_ptr;
size_t pitch;
inline int ceil(int a, int b) {
return (a + b - 1) / b;
}
void input(char *inFileName) {
begin = clock();
FILE *infile = fopen(inFileName, "r");
fscanf(infile, "%d %d", &n, &m);
for (int i = 0; i < n; ++i) {
for (int j = 0; j < n; ++j) {
if (i == j) Dist[i][j] = 0;
else Dist[i][j] = INF;
}
}
while (--m >= 0) {
int a, b, v;
fscanf(infile, "%d %d %d", &a, &b, &v);
Dist[a][b] = v;
}
fclose(infile);
end = clock();
IO_time += (double) (end - begin) / CLOCKS_PER_SEC;
}
void output(char *outFileName) {
begin = clock();
FILE *outfile = fopen(outFileName, "w");
for (int i = 0; i < n; ++i) {
for (int j = 0; j < n; ++j) {
if (Dist[i][j] >= INF)
Dist[i][j] = INF;
}
fwrite(Dist[i], sizeof(int), n, outfile);
}
fclose(outfile);
end = clock();
IO_time += (double) (end - begin) / CLOCKS_PER_SEC;
}
//done &= cal(B, r, r, r, 1, 1);
/*
done &= cal(B, r, r, 0, r, 1);
done &= cal(B, r, r, r +1, round - r -1, 1);
done &= cal(B, r, 0, r, 1, r);
done &= cal(B, r, r +1, r, 1, round - r -1);
*/
__global__ void cal_kernel(int stat, int *device_ptr, int n, size_t pitch, int B, int Round, int block_start_x, int block_start_y, int block_width) {
int x = threadIdx.x / B;
int y = threadIdx.x % B;
int i = (block_start_x + blockIdx.x / block_width) * B + x;
int j = (block_start_y + blockIdx.x % block_width) * B + y;
/*for (int k = Round * B; k < (Round + 1) * B && k < n; k++) {
int *i_row = (int*)((char*)device_ptr + i * pitch);
int *k_row = (int*)((char*)device_ptr + k * pitch);
if (i_row[k] + k_row[j] < i_row[j]) {
i_row[j] = i_row[k] + k_row[j];
}
__syncthreads();
}*/
__shared__ int target[16][33];
__shared__ int a[16][33];
__shared__ int b[16][33];
target[x][y] = *((int*)((char*)device_ptr + i * pitch) + j);
a[x][y] = *((int*)((char*)device_ptr + i * pitch) + Round * B + y);
b[x][y] = *((int*)((char*)device_ptr + (Round * B + x) * pitch) + j);
__syncthreads();
if (i >= n || j >= n) return;
int rb = Round * B;
if (stat == 1) {
for (int k = 0; k < 16 && rb + k < n; k++) {
if (target[x][k] + target[k][y] < target[x][y])
target[x][y] = target[x][k] + target[k][y];
__syncthreads();
}
} else if (stat == 2) {
for (int k = 0; k < 16 && rb + k < n; k++) {
if (a[x][k] + b[k][y] < target[x][y])
target[x][y] = a[x][k] + b[k][y];
__syncthreads();
}
} else if (stat == 3) {
for (int k = 0; k < 16 && rb + k < n; k++) {
if (a[x][k] + target[k][y] < target[x][y])
target[x][y] = a[x][k] + target[k][y];
__syncthreads();
}
} else {
for (int k = 0; k < 16 && rb + k < n; k++) {
if (target[x][k] + b[k][y] < target[x][y])
target[x][y] = target[x][k] + b[k][y];
__syncthreads();
}
}
*((int*)((char*)device_ptr + i * pitch) + j) = target[x][y];
}
bool cal(int B, int Round, int block_start_x, int block_start_y, int block_width, int block_height) {
bool done = true;
int block_end_x = block_start_x + block_height;
int block_end_y = block_start_y + block_width;
for (int b_i = block_start_x; b_i < block_end_x; ++b_i) {
for (int b_j = block_start_y; b_j < block_end_y; ++b_j) {
// To calculate B*B elements in the block (b_i, b_j)
// For each block, it need to compute B times
for (int k = Round * B; k < (Round +1) * B && k < n; ++k) {
// To calculate original index of elements in the block (b_i, b_j)
// For instance, original index of (0,0) in block (1,2) is (2,5) for V=6,B=2
int block_internal_start_x = b_i * B;
int block_internal_end_x = (b_i +1) * B;
int block_internal_start_y = b_j * B;
int block_internal_end_y = (b_j +1) * B;
if (block_internal_end_x > n) block_internal_end_x = n;
if (block_internal_end_y > n) block_internal_end_y = n;
for (int i = block_internal_start_x; i < block_internal_end_x; ++i) {
for (int j = block_internal_start_y; j < block_internal_end_y; ++j) {
if (Dist[i][k] + Dist[k][j] < Dist[i][j]) {
Dist[i][j] = Dist[i][k] + Dist[k][j];
done = false;
}
}
}
}
}
}
return done;
}
void show(int32_t input[V][V]) {
for (int i = 0; i < n; i++) {
for (int j = 0; j < n; j++) {
printf("%d ", input[i][j]);
}
puts("");
}
}
void block_FW(int B) {
cudaMallocPitch(&device_ptr, &pitch, n * sizeof(int), n);
cudaMemcpy2D(device_ptr, pitch, Dist, V * sizeof(int), n * sizeof(int), n, cudaMemcpyHostToDevice);
//show(Dist);
//memset(Dist, 0, sizeof(Dist));
//cudaMemcpy2D(Dist, V * sizeof(int), device_ptr, pitch, n * sizeof(int), n, cudaMemcpyDeviceToHost);
//show(Dist);
int round = ceil(n, B);
for (int r = 0; r < round; ++r) {
//printf("%d %d\n", r, round);
/*
cal(B, r, r, r, 1, 1);
cal(B, r, r, 0, r, 1);
cal(B, r, r, r +1, round - r -1, 1);
cal(B, r, 0, r, 1, r);
cal(B, r, r +1, r, 1, round - r -1);
cal(B, r, 0, 0, r, r);
cal(B, r, 0, r +1, round -r -1, r);
cal(B, r, r +1, 0, r, round - r -1);
cal(B, r, r +1, r +1, round -r -1, round - r -1);*/
int temp = (round - r - 1);
cal_kernel<<< 1, num_thread>>>(1, device_ptr, n, pitch, B, r, r, r, 1);
//if (r < 1) {
cal_kernel<<< r, num_thread>>>(3, device_ptr, n, pitch, B, r, r, 0, r);
cal_kernel<<< temp, num_thread>>>(3, device_ptr, n, pitch, B, r, r, r + 1, temp);
cal_kernel<<< r, num_thread>>>(4, device_ptr, n, pitch, B, r, 0, r, 1);
cal_kernel<<< temp, num_thread>>>(4, device_ptr, n, pitch, B, r, r + 1, r, 1);
cal_kernel<<< r * r, num_thread>>>(2, device_ptr, n, pitch, B, r, 0, 0, r);
cal_kernel<<< r * temp, num_thread>>>(2, device_ptr, n, pitch, B, r, 0, r + 1, temp);
cal_kernel<<< r * temp, num_thread>>>(2, device_ptr, n, pitch, B, r, r + 1, 0, r);
cal_kernel<<< temp * temp, num_thread>>>(2, device_ptr, n, pitch, B, r, r + 1, r + 1, temp);
//}
}
cudaMemcpy2D(Dist, V * sizeof(int), device_ptr, pitch, n * sizeof(int), n, cudaMemcpyDeviceToHost);
}
int main(int argc, char* argv[]) {
input(argv[1]);
int B = atoi(argv[3]);
block_FW(B);
output(argv[2]);
printf("IO time: %f\n", IO_time);
return 0;
}
|
a7229c5b011002d043e361500234ed3f2315ac28.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <stdio.h>
#include <cassert>
#include <vector>
#include "glog/logging.h"
#include "paddle/fluid/inference/tensorrt/plugin/swish_op_plugin.h"
namespace paddle {
namespace inference {
namespace tensorrt {
namespace plugin {
int SwishPlugin::initialize() TRT_NOEXCEPT { return 0; }
void SwishPlugin::terminate() TRT_NOEXCEPT {}
bool SwishPlugin::supportsFormat(
nvinfer1::DataType type, nvinfer1::PluginFormat format) const TRT_NOEXCEPT {
if (with_fp16_) {
return type == nvinfer1::DataType::kFLOAT ||
type == nvinfer1::DataType::kHALF;
}
return type == nvinfer1::DataType::kFLOAT;
}
nvinfer1::Dims SwishPlugin::getOutputDimensions(int index,
const nvinfer1::Dims *inputDims,
int nbInputs) TRT_NOEXCEPT {
assert(nbInputs == 1);
assert(index < this->getNbOutputs());
nvinfer1::Dims const &input_dims = inputDims[0];
nvinfer1::Dims output_dims = input_dims;
return output_dims;
}
template <typename T>
__device__ T math_exp(T a);
template <>
__device__ half math_exp<half>(half a) {
#if CUDA_ARCH_FP16_SUPPORTED(__CUDA_ARCH__)
return hexp(a);
#endif
}
template <>
__device__ float math_exp<float>(float a) {
return expf(a);
}
template <typename T>
__global__ void swish_kernel(int num, const T *input, T *output, T beta) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < num) {
#if __CUDA_ARCH__ >= 350
output[index] =
__ldg(input + index) /
(static_cast<T>(1.0) + math_exp<T>(-beta * __ldg(input + index)));
#else
output[index] = input[index] /
(static_cast<T>(1.0) + math_exp<T>(-beta * input[index]));
#endif
}
}
template <>
__global__ void swish_kernel<half>(int num,
const half *input,
half *output,
half beta) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < num) {
#if CUDA_ARCH_FP16_SUPPORTED(__CUDA_ARCH__)
output[index] =
__ldg(input + index) /
(static_cast<half>(1.0) + math_exp<half>(-beta * __ldg(input + index)));
#endif
}
}
int SwishPlugin::enqueue(int batch_size,
const void *const *inputs,
#if IS_TRT_VERSION_LT(8000)
void **outputs,
void *workspace,
hipStream_t stream) {
#else
void *const *outputs,
void *workspace,
hipStream_t stream) TRT_NOEXCEPT {
#endif
const auto &input_dims = this->getInputDims(0);
int num = batch_size;
for (int i = 0; i < input_dims.nbDims; i++) {
num *= input_dims.d[i];
}
int threads = 1024;
int blocks = (num + threads - 1) / threads;
auto type = getDataType();
if (type == nvinfer1::DataType::kFLOAT) {
VLOG(1) << "TRT Plugin DataType selected. Swish-->fp32";
const float *input = reinterpret_cast<const float *>(inputs[0]);
float *output = reinterpret_cast<float *const *>(outputs)[0];
hipLaunchKernelGGL(( swish_kernel), dim3(blocks), dim3(threads), 0, stream, num, input, output, beta_);
} else if (type == nvinfer1::DataType::kHALF) {
VLOG(1) << "TRT Plugin DataType selected. Swish-->fp16";
const half *input = reinterpret_cast<const half *>(inputs[0]);
half *output = reinterpret_cast<half *const *>(outputs)[0];
hipLaunchKernelGGL(( swish_kernel), dim3(blocks), dim3(threads), 0, stream,
num, input, output, (half)beta_);
} else {
PADDLE_THROW(platform::errors::InvalidArgument(
"The Swish TRT Plugin's input type should be float or half."));
}
return hipGetLastError() != hipSuccess;
}
// Dynamic Plugin below.
#if IS_TRT_VERSION_GE(6000)
int SwishPluginDynamic::initialize() TRT_NOEXCEPT {
getPluginNamespace();
return 0;
}
size_t SwishPluginDynamic::getSerializationSize() const TRT_NOEXCEPT {
return SerializedSize(beta_) + SerializedSize(with_fp16_);
}
void SwishPluginDynamic::serialize(void *buffer) const TRT_NOEXCEPT {
SerializeValue(&buffer, beta_);
SerializeValue(&buffer, with_fp16_);
}
nvinfer1::DimsExprs SwishPluginDynamic::getOutputDimensions(
int output_index,
const nvinfer1::DimsExprs *inputs,
int nb_inputs,
nvinfer1::IExprBuilder &expr_builder) TRT_NOEXCEPT {
return inputs[0];
}
bool SwishPluginDynamic::supportsFormatCombination(
int pos,
const nvinfer1::PluginTensorDesc *in_out,
int nb_inputs,
int nb_outputs) TRT_NOEXCEPT {
PADDLE_ENFORCE_NOT_NULL(
in_out,
platform::errors::InvalidArgument(
"The input of swish plugin shoule not be nullptr."));
PADDLE_ENFORCE_LT(
pos,
nb_inputs + nb_outputs,
platform::errors::InvalidArgument("The pos(%d) should be less than the "
"num(%d) of the input and the output.",
pos,
nb_inputs + nb_outputs));
(in_out && pos < (nb_inputs + nb_outputs));
const nvinfer1::PluginTensorDesc &in = in_out[pos];
if (pos == 0) {
if (with_fp16_) {
bool res = (in.type == nvinfer1::DataType::kFLOAT ||
in.type == nvinfer1::DataType::kHALF);
// encounter trt crash bug
#if IS_TRT_VERSION_LT(8000)
res = res && (in.format == nvinfer1::TensorFormat::kLINEAR);
#endif
return res;
} else {
return in.type == nvinfer1::DataType::kFLOAT;
}
}
const nvinfer1::PluginTensorDesc &prev = in_out[pos - 1];
// output
return in.type == prev.type && in.format == prev.format;
}
nvinfer1::DataType SwishPluginDynamic::getOutputDataType(
int index,
const nvinfer1::DataType *input_types,
int nb_inputs) const TRT_NOEXCEPT {
PADDLE_ENFORCE_EQ(index,
0,
platform::errors::InvalidArgument(
"The Swish Plugin only has one input, so the "
"index value should be 0, but get %d.",
index));
return input_types[0];
}
int SwishPluginDynamic::enqueue(const nvinfer1::PluginTensorDesc *input_desc,
const nvinfer1::PluginTensorDesc *output_desc,
const void *const *inputs,
void *const *outputs,
void *workspace,
hipStream_t stream) TRT_NOEXCEPT {
auto input_dims = input_desc[0].dims;
size_t num = ProductDim(input_dims);
int threads = 1024;
int blocks = (num + threads - 1) / threads;
auto input_type = input_desc[0].type;
if (input_type == nvinfer1::DataType::kFLOAT) {
VLOG(1) << "TRT Plugin DataType selected. Swish-->fp32";
const float *input = static_cast<const float *>(inputs[0]);
float *output = static_cast<float *>(outputs[0]);
hipLaunchKernelGGL(( swish_kernel<float>)
, dim3(blocks), dim3(threads), 0, stream, num, input, output, beta_);
} else if (input_type == nvinfer1::DataType::kHALF) {
VLOG(1) << "TRT Plugin DataType selected. Swish-->fp16";
const half *input = static_cast<const half *>(inputs[0]);
half *output = static_cast<half *>(outputs[0]);
hipLaunchKernelGGL(( swish_kernel<half>), dim3(blocks), dim3(threads), 0, stream,
num, input, output, static_cast<half>(beta_));
} else {
PADDLE_THROW(platform::errors::InvalidArgument(
"The Swish TRT Plugin's input type should be float or half."));
}
return hipGetLastError() != hipSuccess;
}
#endif
} // namespace plugin
} // namespace tensorrt
} // namespace inference
} // namespace paddle
| a7229c5b011002d043e361500234ed3f2315ac28.cu | // Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <stdio.h>
#include <cassert>
#include <vector>
#include "glog/logging.h"
#include "paddle/fluid/inference/tensorrt/plugin/swish_op_plugin.h"
namespace paddle {
namespace inference {
namespace tensorrt {
namespace plugin {
int SwishPlugin::initialize() TRT_NOEXCEPT { return 0; }
void SwishPlugin::terminate() TRT_NOEXCEPT {}
bool SwishPlugin::supportsFormat(
nvinfer1::DataType type, nvinfer1::PluginFormat format) const TRT_NOEXCEPT {
if (with_fp16_) {
return type == nvinfer1::DataType::kFLOAT ||
type == nvinfer1::DataType::kHALF;
}
return type == nvinfer1::DataType::kFLOAT;
}
nvinfer1::Dims SwishPlugin::getOutputDimensions(int index,
const nvinfer1::Dims *inputDims,
int nbInputs) TRT_NOEXCEPT {
assert(nbInputs == 1);
assert(index < this->getNbOutputs());
nvinfer1::Dims const &input_dims = inputDims[0];
nvinfer1::Dims output_dims = input_dims;
return output_dims;
}
template <typename T>
__device__ T math_exp(T a);
template <>
__device__ half math_exp<half>(half a) {
#if CUDA_ARCH_FP16_SUPPORTED(__CUDA_ARCH__)
return hexp(a);
#endif
}
template <>
__device__ float math_exp<float>(float a) {
return expf(a);
}
template <typename T>
__global__ void swish_kernel(int num, const T *input, T *output, T beta) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < num) {
#if __CUDA_ARCH__ >= 350
output[index] =
__ldg(input + index) /
(static_cast<T>(1.0) + math_exp<T>(-beta * __ldg(input + index)));
#else
output[index] = input[index] /
(static_cast<T>(1.0) + math_exp<T>(-beta * input[index]));
#endif
}
}
template <>
__global__ void swish_kernel<half>(int num,
const half *input,
half *output,
half beta) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < num) {
#if CUDA_ARCH_FP16_SUPPORTED(__CUDA_ARCH__)
output[index] =
__ldg(input + index) /
(static_cast<half>(1.0) + math_exp<half>(-beta * __ldg(input + index)));
#endif
}
}
int SwishPlugin::enqueue(int batch_size,
const void *const *inputs,
#if IS_TRT_VERSION_LT(8000)
void **outputs,
void *workspace,
cudaStream_t stream) {
#else
void *const *outputs,
void *workspace,
cudaStream_t stream) TRT_NOEXCEPT {
#endif
const auto &input_dims = this->getInputDims(0);
int num = batch_size;
for (int i = 0; i < input_dims.nbDims; i++) {
num *= input_dims.d[i];
}
int threads = 1024;
int blocks = (num + threads - 1) / threads;
auto type = getDataType();
if (type == nvinfer1::DataType::kFLOAT) {
VLOG(1) << "TRT Plugin DataType selected. Swish-->fp32";
const float *input = reinterpret_cast<const float *>(inputs[0]);
float *output = reinterpret_cast<float *const *>(outputs)[0];
swish_kernel<<<blocks, threads, 0, stream>>>(num, input, output, beta_);
} else if (type == nvinfer1::DataType::kHALF) {
VLOG(1) << "TRT Plugin DataType selected. Swish-->fp16";
const half *input = reinterpret_cast<const half *>(inputs[0]);
half *output = reinterpret_cast<half *const *>(outputs)[0];
swish_kernel<<<blocks, threads, 0, stream>>>(
num, input, output, (half)beta_);
} else {
PADDLE_THROW(platform::errors::InvalidArgument(
"The Swish TRT Plugin's input type should be float or half."));
}
return cudaGetLastError() != cudaSuccess;
}
// Dynamic Plugin below.
#if IS_TRT_VERSION_GE(6000)
int SwishPluginDynamic::initialize() TRT_NOEXCEPT {
getPluginNamespace();
return 0;
}
size_t SwishPluginDynamic::getSerializationSize() const TRT_NOEXCEPT {
return SerializedSize(beta_) + SerializedSize(with_fp16_);
}
void SwishPluginDynamic::serialize(void *buffer) const TRT_NOEXCEPT {
SerializeValue(&buffer, beta_);
SerializeValue(&buffer, with_fp16_);
}
nvinfer1::DimsExprs SwishPluginDynamic::getOutputDimensions(
int output_index,
const nvinfer1::DimsExprs *inputs,
int nb_inputs,
nvinfer1::IExprBuilder &expr_builder) TRT_NOEXCEPT {
return inputs[0];
}
bool SwishPluginDynamic::supportsFormatCombination(
int pos,
const nvinfer1::PluginTensorDesc *in_out,
int nb_inputs,
int nb_outputs) TRT_NOEXCEPT {
PADDLE_ENFORCE_NOT_NULL(
in_out,
platform::errors::InvalidArgument(
"The input of swish plugin shoule not be nullptr."));
PADDLE_ENFORCE_LT(
pos,
nb_inputs + nb_outputs,
platform::errors::InvalidArgument("The pos(%d) should be less than the "
"num(%d) of the input and the output.",
pos,
nb_inputs + nb_outputs));
(in_out && pos < (nb_inputs + nb_outputs));
const nvinfer1::PluginTensorDesc &in = in_out[pos];
if (pos == 0) {
if (with_fp16_) {
bool res = (in.type == nvinfer1::DataType::kFLOAT ||
in.type == nvinfer1::DataType::kHALF);
// encounter trt crash bug
#if IS_TRT_VERSION_LT(8000)
res = res && (in.format == nvinfer1::TensorFormat::kLINEAR);
#endif
return res;
} else {
return in.type == nvinfer1::DataType::kFLOAT;
}
}
const nvinfer1::PluginTensorDesc &prev = in_out[pos - 1];
// output
return in.type == prev.type && in.format == prev.format;
}
nvinfer1::DataType SwishPluginDynamic::getOutputDataType(
int index,
const nvinfer1::DataType *input_types,
int nb_inputs) const TRT_NOEXCEPT {
PADDLE_ENFORCE_EQ(index,
0,
platform::errors::InvalidArgument(
"The Swish Plugin only has one input, so the "
"index value should be 0, but get %d.",
index));
return input_types[0];
}
int SwishPluginDynamic::enqueue(const nvinfer1::PluginTensorDesc *input_desc,
const nvinfer1::PluginTensorDesc *output_desc,
const void *const *inputs,
void *const *outputs,
void *workspace,
cudaStream_t stream) TRT_NOEXCEPT {
auto input_dims = input_desc[0].dims;
size_t num = ProductDim(input_dims);
int threads = 1024;
int blocks = (num + threads - 1) / threads;
auto input_type = input_desc[0].type;
if (input_type == nvinfer1::DataType::kFLOAT) {
VLOG(1) << "TRT Plugin DataType selected. Swish-->fp32";
const float *input = static_cast<const float *>(inputs[0]);
float *output = static_cast<float *>(outputs[0]);
swish_kernel<float>
<<<blocks, threads, 0, stream>>>(num, input, output, beta_);
} else if (input_type == nvinfer1::DataType::kHALF) {
VLOG(1) << "TRT Plugin DataType selected. Swish-->fp16";
const half *input = static_cast<const half *>(inputs[0]);
half *output = static_cast<half *>(outputs[0]);
swish_kernel<half><<<blocks, threads, 0, stream>>>(
num, input, output, static_cast<half>(beta_));
} else {
PADDLE_THROW(platform::errors::InvalidArgument(
"The Swish TRT Plugin's input type should be float or half."));
}
return cudaGetLastError() != cudaSuccess;
}
#endif
} // namespace plugin
} // namespace tensorrt
} // namespace inference
} // namespace paddle
|
b5adb39008a0d2b2ffdcf1aa3016e85749c8fa6d.hip | // !!! This is a file automatically generated by hipify!!!
// ----------------------------------------------------------------------------
// Gunrock -- Fast and Efficient GPU Graph Library
// ----------------------------------------------------------------------------
// This source code is distributed under the terms of LICENSE.TXT
// in the root directory of this source distribution.
// ----------------------------------------------------------------------------
/**
* @file knn_app.cu
*
* @brief Simple Gunrock Application
*/
// Gunrock api
#include <gunrock/gunrock.h>
// Test utils
#include <gunrock/util/test_utils.cuh>
#include <gunrock/util/array_utils.cuh>
// Graphio include
#include <gunrock/graphio/graphio.cuh>
#include <gunrock/graphio/labels.cuh>
// App and test base includes
#include <gunrock/app/app_base.cuh>
#include <gunrock/app/test_base.cuh>
// KNN includes
#include <gunrock/app/knn/knn_helpers.cuh>
#include <gunrock/app/knn/knn_enactor.cuh>
#include <gunrock/app/knn/knn_test.cuh>
#include <iostream>
#include <algorithm>
#include <iterator>
//#define KNN_APP_DEBUG
#ifdef KNN_APP_DEBUG
#define debug(a...) printf(a)
#else
#define debug(a...)
#endif
namespace gunrock {
namespace app {
namespace knn {
hipError_t UseParameters(util::Parameters ¶meters) {
hipError_t retval = hipSuccess;
GUARD_CU(UseParameters_app(parameters));
GUARD_CU(UseParameters_problem(parameters));
GUARD_CU(UseParameters_enactor(parameters));
GUARD_CU(UseParameters_test(parameters));
GUARD_CU(parameters.Use<int>(
"n",
util::REQUIRED_ARGUMENT | util::MULTI_VALUE | util::OPTIONAL_PARAMETER,
0, "Number of points in dim-dimensional space", __FILE__, __LINE__));
GUARD_CU(parameters.Use<std::string>(
"labels-file",
util::REQUIRED_ARGUMENT | util::REQUIRED_PARAMETER,
"", "List of points of dim-dimensional space", __FILE__, __LINE__));
GUARD_CU(parameters.Use<bool>(
"transpose",
util::REQUIRED_ARGUMENT | util::MULTI_VALUE | util::OPTIONAL_PARAMETER,
false, "If false then labels will not transpose", __FILE__, __LINE__));
GUARD_CU(parameters.Use<int>(
"dim",
util::REQUIRED_ARGUMENT | util::MULTI_VALUE | util::OPTIONAL_PARAMETER,
2, "Dimensions of space", __FILE__, __LINE__));
GUARD_CU(parameters.Use<int>(
"k",
util::REQUIRED_ARGUMENT | util::MULTI_VALUE | util::OPTIONAL_PARAMETER,
10, "Number of k neighbors.", __FILE__, __LINE__));
GUARD_CU(parameters.Use<int>(
"NUM-THREADS",
util::REQUIRED_ARGUMENT | util::MULTI_VALUE | util::OPTIONAL_PARAMETER,
128, "Number of threads running per block.", __FILE__, __LINE__));
GUARD_CU(parameters.Use<bool>(
"use-shared-mem",
util::REQUIRED_ARGUMENT | util::MULTI_VALUE | util::OPTIONAL_PARAMETER,
false, "True if kernel must use shared memory.", __FILE__, __LINE__));
GUARD_CU(parameters.Use<bool>(
"save-knn-results",
util::REQUIRED_ARGUMENT | util::MULTI_VALUE | util::OPTIONAL_PARAMETER,
false, "If true then knn array will save to file.", __FILE__, __LINE__));
GUARD_CU(parameters.Use<std::string>(
"knn-output-file",
util::REQUIRED_ARGUMENT | util::MULTI_VALUE | util::OPTIONAL_PARAMETER,
"knn_output", "File name.", __FILE__, __LINE__));
GUARD_CU(parameters.Use<float>(
"cpu-elapsed",
util::REQUIRED_ARGUMENT | util::OPTIONAL_PARAMETER, 0.0f,
"CPU implementation, elapsed time (ms) for JSON.", __FILE__, __LINE__));
return retval;
}
/**
* @brief Run knn tests
* @tparam GraphT Type of the graph
* @tparam ValueT Type of the distances
* @param[in] parameters Excution parameters
* @param[in] graph Input graph
...
* @param[in] target where to perform the app
* \return hipError_t error message(s), if any
*/
template <typename GraphT, typename ArrayT>
hipError_t RunTests(util::Parameters ¶meters,
ArrayT& points,
GraphT& graph,
typename GraphT::SizeT n,
typename GraphT::SizeT dim,
typename GraphT::SizeT k,
typename GraphT::SizeT *h_knns,
typename GraphT::SizeT *ref_knns,
util::Location target) {
hipError_t retval = hipSuccess;
typedef typename GraphT::ValueT ValueT;
typedef typename GraphT::SizeT SizeT;
typedef Problem<GraphT> ProblemT;
typedef Enactor<ProblemT> EnactorT;
// CLI parameters
bool quiet_mode = parameters.Get<bool>("quiet");
int num_runs = parameters.Get<int>("num-runs");
std::string validation = parameters.Get<std::string>("validation");
bool save_knn_results = parameters.Get<bool>("save-knn-results");
std::string knn_output_file = parameters.Get<std::string>("knn-output-file");
util::Info info("knn", parameters, graph);
util::CpuTimer cpu_timer, total_timer;
cpu_timer.Start();
total_timer.Start();
// Allocate problem and enactor on GPU, and initialize them
ProblemT problem(parameters);
EnactorT enactor;
GUARD_CU(problem.Init(graph, target));
GUARD_CU(enactor.Init(problem, target));
cpu_timer.Stop();
parameters.Set("preprocess-time", cpu_timer.ElapsedMillis());
for (int run_num = 0; run_num < num_runs; ++run_num) {
GUARD_CU(problem.Reset(points.GetPointer(util::HOST), target));
GUARD_CU(enactor.Reset(n, k, target));
util::PrintMsg("__________________________", !quiet_mode);
cpu_timer.Start();
GUARD_CU(enactor.Enact());
cpu_timer.Stop();
info.CollectSingleRun(cpu_timer.ElapsedMillis());
util::PrintMsg(
"--------------------------\nRun " + std::to_string(run_num) +
" elapsed: " + std::to_string(cpu_timer.ElapsedMillis()) +
", #iterations = " +
std::to_string(enactor.enactor_slices[0].enactor_stats.iteration),
!quiet_mode);
if (validation == "each") {
GUARD_CU(problem.Extract(h_knns));
#ifdef KNN_APP_DEBUG
debug("extracted knns:\n");
for (int i=0; i<n; ++i){
debug("point %d\n", i);
for (int j=0; j<k; ++j){
debug("%d ", h_knns[i*k + j]);
}
debug("\n");
}
#endif
util::PrintMsg("-------------Validation-----------");
SizeT num_errors =
Validate_Results(parameters, graph, h_knns, ref_knns, points, false);
}
}
cpu_timer.Start();
GUARD_CU(problem.Extract(h_knns));
#ifdef KNN_APP_DEBUG
debug("extracted knns:\n");
for (int i=0; i<n; ++i){
debug("point %d\n", i);
for (int j=0; j<k; ++j){
debug("%d ", h_knns[i*k + j]);
}
debug("\n");
}
#endif
if (validation == "last") {
util::PrintMsg("-------------Validation-----------");
SizeT num_errors =
Validate_Results(parameters, graph, h_knns, ref_knns, points, false);
}
// compute running statistics
info.ComputeTraversalStats(enactor, (SizeT *)NULL);
// Display_Memory_Usage(problem);
#ifdef ENABLE_PERFORMANCE_PROFILING
// Display_Performance_Profiling(&enactor);
#endif
if (save_knn_results){
std::ofstream output(knn_output_file);
for (int i=0; i<n-1; ++i){
copy(h_knns + (k * i), h_knns + (k * (i+1)), std::ostream_iterator<ValueT>(output, " "));
output << "\n";
}
output.close();
}
// Clean up
GUARD_CU(enactor.Release(target));
GUARD_CU(problem.Release(target));
cpu_timer.Stop();
total_timer.Stop();
info.Finalize(cpu_timer.ElapsedMillis(), total_timer.ElapsedMillis());
return retval;
}
} // namespace knn
} // namespace app
} // namespace gunrock
// Leave this at the end of the file
// Local Variables:
// mode:c++
// c-file-style: "NVIDIA"
// End:
| b5adb39008a0d2b2ffdcf1aa3016e85749c8fa6d.cu | // ----------------------------------------------------------------------------
// Gunrock -- Fast and Efficient GPU Graph Library
// ----------------------------------------------------------------------------
// This source code is distributed under the terms of LICENSE.TXT
// in the root directory of this source distribution.
// ----------------------------------------------------------------------------
/**
* @file knn_app.cu
*
* @brief Simple Gunrock Application
*/
// Gunrock api
#include <gunrock/gunrock.h>
// Test utils
#include <gunrock/util/test_utils.cuh>
#include <gunrock/util/array_utils.cuh>
// Graphio include
#include <gunrock/graphio/graphio.cuh>
#include <gunrock/graphio/labels.cuh>
// App and test base includes
#include <gunrock/app/app_base.cuh>
#include <gunrock/app/test_base.cuh>
// KNN includes
#include <gunrock/app/knn/knn_helpers.cuh>
#include <gunrock/app/knn/knn_enactor.cuh>
#include <gunrock/app/knn/knn_test.cuh>
#include <iostream>
#include <algorithm>
#include <iterator>
//#define KNN_APP_DEBUG
#ifdef KNN_APP_DEBUG
#define debug(a...) printf(a)
#else
#define debug(a...)
#endif
namespace gunrock {
namespace app {
namespace knn {
cudaError_t UseParameters(util::Parameters ¶meters) {
cudaError_t retval = cudaSuccess;
GUARD_CU(UseParameters_app(parameters));
GUARD_CU(UseParameters_problem(parameters));
GUARD_CU(UseParameters_enactor(parameters));
GUARD_CU(UseParameters_test(parameters));
GUARD_CU(parameters.Use<int>(
"n",
util::REQUIRED_ARGUMENT | util::MULTI_VALUE | util::OPTIONAL_PARAMETER,
0, "Number of points in dim-dimensional space", __FILE__, __LINE__));
GUARD_CU(parameters.Use<std::string>(
"labels-file",
util::REQUIRED_ARGUMENT | util::REQUIRED_PARAMETER,
"", "List of points of dim-dimensional space", __FILE__, __LINE__));
GUARD_CU(parameters.Use<bool>(
"transpose",
util::REQUIRED_ARGUMENT | util::MULTI_VALUE | util::OPTIONAL_PARAMETER,
false, "If false then labels will not transpose", __FILE__, __LINE__));
GUARD_CU(parameters.Use<int>(
"dim",
util::REQUIRED_ARGUMENT | util::MULTI_VALUE | util::OPTIONAL_PARAMETER,
2, "Dimensions of space", __FILE__, __LINE__));
GUARD_CU(parameters.Use<int>(
"k",
util::REQUIRED_ARGUMENT | util::MULTI_VALUE | util::OPTIONAL_PARAMETER,
10, "Number of k neighbors.", __FILE__, __LINE__));
GUARD_CU(parameters.Use<int>(
"NUM-THREADS",
util::REQUIRED_ARGUMENT | util::MULTI_VALUE | util::OPTIONAL_PARAMETER,
128, "Number of threads running per block.", __FILE__, __LINE__));
GUARD_CU(parameters.Use<bool>(
"use-shared-mem",
util::REQUIRED_ARGUMENT | util::MULTI_VALUE | util::OPTIONAL_PARAMETER,
false, "True if kernel must use shared memory.", __FILE__, __LINE__));
GUARD_CU(parameters.Use<bool>(
"save-knn-results",
util::REQUIRED_ARGUMENT | util::MULTI_VALUE | util::OPTIONAL_PARAMETER,
false, "If true then knn array will save to file.", __FILE__, __LINE__));
GUARD_CU(parameters.Use<std::string>(
"knn-output-file",
util::REQUIRED_ARGUMENT | util::MULTI_VALUE | util::OPTIONAL_PARAMETER,
"knn_output", "File name.", __FILE__, __LINE__));
GUARD_CU(parameters.Use<float>(
"cpu-elapsed",
util::REQUIRED_ARGUMENT | util::OPTIONAL_PARAMETER, 0.0f,
"CPU implementation, elapsed time (ms) for JSON.", __FILE__, __LINE__));
return retval;
}
/**
* @brief Run knn tests
* @tparam GraphT Type of the graph
* @tparam ValueT Type of the distances
* @param[in] parameters Excution parameters
* @param[in] graph Input graph
...
* @param[in] target where to perform the app
* \return cudaError_t error message(s), if any
*/
template <typename GraphT, typename ArrayT>
cudaError_t RunTests(util::Parameters ¶meters,
ArrayT& points,
GraphT& graph,
typename GraphT::SizeT n,
typename GraphT::SizeT dim,
typename GraphT::SizeT k,
typename GraphT::SizeT *h_knns,
typename GraphT::SizeT *ref_knns,
util::Location target) {
cudaError_t retval = cudaSuccess;
typedef typename GraphT::ValueT ValueT;
typedef typename GraphT::SizeT SizeT;
typedef Problem<GraphT> ProblemT;
typedef Enactor<ProblemT> EnactorT;
// CLI parameters
bool quiet_mode = parameters.Get<bool>("quiet");
int num_runs = parameters.Get<int>("num-runs");
std::string validation = parameters.Get<std::string>("validation");
bool save_knn_results = parameters.Get<bool>("save-knn-results");
std::string knn_output_file = parameters.Get<std::string>("knn-output-file");
util::Info info("knn", parameters, graph);
util::CpuTimer cpu_timer, total_timer;
cpu_timer.Start();
total_timer.Start();
// Allocate problem and enactor on GPU, and initialize them
ProblemT problem(parameters);
EnactorT enactor;
GUARD_CU(problem.Init(graph, target));
GUARD_CU(enactor.Init(problem, target));
cpu_timer.Stop();
parameters.Set("preprocess-time", cpu_timer.ElapsedMillis());
for (int run_num = 0; run_num < num_runs; ++run_num) {
GUARD_CU(problem.Reset(points.GetPointer(util::HOST), target));
GUARD_CU(enactor.Reset(n, k, target));
util::PrintMsg("__________________________", !quiet_mode);
cpu_timer.Start();
GUARD_CU(enactor.Enact());
cpu_timer.Stop();
info.CollectSingleRun(cpu_timer.ElapsedMillis());
util::PrintMsg(
"--------------------------\nRun " + std::to_string(run_num) +
" elapsed: " + std::to_string(cpu_timer.ElapsedMillis()) +
", #iterations = " +
std::to_string(enactor.enactor_slices[0].enactor_stats.iteration),
!quiet_mode);
if (validation == "each") {
GUARD_CU(problem.Extract(h_knns));
#ifdef KNN_APP_DEBUG
debug("extracted knns:\n");
for (int i=0; i<n; ++i){
debug("point %d\n", i);
for (int j=0; j<k; ++j){
debug("%d ", h_knns[i*k + j]);
}
debug("\n");
}
#endif
util::PrintMsg("-------------Validation-----------");
SizeT num_errors =
Validate_Results(parameters, graph, h_knns, ref_knns, points, false);
}
}
cpu_timer.Start();
GUARD_CU(problem.Extract(h_knns));
#ifdef KNN_APP_DEBUG
debug("extracted knns:\n");
for (int i=0; i<n; ++i){
debug("point %d\n", i);
for (int j=0; j<k; ++j){
debug("%d ", h_knns[i*k + j]);
}
debug("\n");
}
#endif
if (validation == "last") {
util::PrintMsg("-------------Validation-----------");
SizeT num_errors =
Validate_Results(parameters, graph, h_knns, ref_knns, points, false);
}
// compute running statistics
info.ComputeTraversalStats(enactor, (SizeT *)NULL);
// Display_Memory_Usage(problem);
#ifdef ENABLE_PERFORMANCE_PROFILING
// Display_Performance_Profiling(&enactor);
#endif
if (save_knn_results){
std::ofstream output(knn_output_file);
for (int i=0; i<n-1; ++i){
copy(h_knns + (k * i), h_knns + (k * (i+1)), std::ostream_iterator<ValueT>(output, " "));
output << "\n";
}
output.close();
}
// Clean up
GUARD_CU(enactor.Release(target));
GUARD_CU(problem.Release(target));
cpu_timer.Stop();
total_timer.Stop();
info.Finalize(cpu_timer.ElapsedMillis(), total_timer.ElapsedMillis());
return retval;
}
} // namespace knn
} // namespace app
} // namespace gunrock
// Leave this at the end of the file
// Local Variables:
// mode:c++
// c-file-style: "NVIDIA"
// End:
|
f380814c5741a5d42724029d7c0c859d68f9a0c5.hip | // !!! This is a file automatically generated by hipify!!!
//pass
//--blockDim=32 --gridDim=2
#include <hip/hip_runtime.h>
__global__ void race (int* A)
{
int tid = threadIdx.x;
int bid = blockIdx.x;
int idx = blockDim.x * bid + tid;
int temp = 10;
A[idx] = temp;
temp = A[idx + 1];
} | f380814c5741a5d42724029d7c0c859d68f9a0c5.cu | //pass
//--blockDim=32 --gridDim=2
#include <cuda.h>
__global__ void race (int* A)
{
int tid = threadIdx.x;
int bid = blockIdx.x;
int idx = blockDim.x * bid + tid;
int temp = 10;
A[idx] = temp;
temp = A[idx + 1];
} |
956f8f9d5b177b3fbc96aa73a655eb882834a758.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
//
// This sample demonstrates the use of streams for concurrent execution. It also
// illustrates how to introduce dependencies between CUDA streams with the
// hipStreamWaitEvent function.
//
// Devices of compute capability 2.0 or higher can overlap the kernels
//
#include <hip/hip_cooperative_groups.h>
#include <stdio.h>
namespace cg = cooperative_groups;
#include <helper_cuda.h>
#include <helper_functions.h>
// This is a kernel that does no real work but runs at least for a specified
// number of clocks
__global__ void clock_block(clock_t *d_o, clock_t clock_count) {
unsigned int start_clock = (unsigned int)clock();
clock_t clock_offset = 0;
while (clock_offset < clock_count) {
unsigned int end_clock = (unsigned int)clock();
// The code below should work like
// this (thanks to modular arithmetics):
//
// clock_offset = (clock_t) (end_clock > start_clock ?
// end_clock - start_clock :
// end_clock + (0xffffffffu - start_clock));
//
// Indeed, let m = 2^32 then
// end - start = end + m - start (mod m).
clock_offset = (clock_t)(end_clock - start_clock);
}
d_o[0] = clock_offset;
}
// Single warp reduction kernel
__global__ void sum(clock_t *d_clocks, int N) {
// Handle to thread block group
cg::thread_block cta = cg::this_thread_block();
__shared__ clock_t s_clocks[32];
clock_t my_sum = 0;
for (int i = threadIdx.x; i < N; i += blockDim.x) {
my_sum += d_clocks[i];
}
s_clocks[threadIdx.x] = my_sum;
cg::sync(cta);
for (int i = 16; i > 0; i /= 2) {
if (threadIdx.x < i) {
s_clocks[threadIdx.x] += s_clocks[threadIdx.x + i];
}
cg::sync(cta);
}
d_clocks[0] = s_clocks[0];
}
int main(int argc, char **argv) {
int nkernels = 8; // number of concurrent kernels
int nstreams = nkernels + 1; // use one more stream than concurrent kernel
int nbytes = nkernels * sizeof(clock_t); // number of data bytes
float kernel_time = 10; // time the kernel should run in ms
float elapsed_time; // timing variables
int cuda_device = 0;
printf("[%s] - Starting...\n", argv[0]);
// get number of kernels if overridden on the command line
if (checkCmdLineFlag(argc, (const char **)argv, "nkernels")) {
nkernels = getCmdLineArgumentInt(argc, (const char **)argv, "nkernels");
nstreams = nkernels + 1;
}
// use command-line specified CUDA device, otherwise use device with highest
// Gflops/s
cuda_device = findCudaDevice(argc, (const char **)argv);
hipDeviceProp_t deviceProp;
checkCudaErrors(hipGetDevice(&cuda_device));
checkCudaErrors(hipGetDeviceProperties(&deviceProp, cuda_device));
if ((deviceProp.concurrentKernels == 0)) {
printf("> GPU does not support concurrent kernel execution\n");
printf(" CUDA kernel runs will be serialized\n");
}
printf("> Detected Compute SM %d.%d hardware with %d multi-processors\n",
deviceProp.major, deviceProp.minor, deviceProp.multiProcessorCount);
// allocate host memory
clock_t *a = 0; // pointer to the array data in host memory
checkCudaErrors(hipHostMalloc((void **)&a, nbytes));
// allocate device memory
clock_t *d_a = 0; // pointers to data and init value in the device memory
checkCudaErrors(hipMalloc((void **)&d_a, nbytes));
// allocate and initialize an array of stream handles
hipStream_t *streams =
(hipStream_t *)malloc(nstreams * sizeof(hipStream_t));
for (int i = 0; i < nstreams; i++) {
checkCudaErrors(hipStreamCreate(&(streams[i])));
}
// create CUDA event handles
hipEvent_t start_event, stop_event;
checkCudaErrors(hipEventCreate(&start_event));
checkCudaErrors(hipEventCreate(&stop_event));
// the events are used for synchronization only and hence do not need to
// record timings this also makes events not introduce global sync points when
// recorded which is critical to get overlap
hipEvent_t *kernelEvent;
kernelEvent = (hipEvent_t *)malloc(nkernels * sizeof(hipEvent_t));
for (int i = 0; i < nkernels; i++) {
checkCudaErrors(
hipEventCreateWithFlags(&(kernelEvent[i]), hipEventDisableTiming));
}
//////////////////////////////////////////////////////////////////////
// time execution with nkernels streams
clock_t total_clocks = 0;
#if defined(__arm__) || defined(__aarch64__)
// the kernel takes more time than the channel reset time on arm archs, so to
// prevent hangs reduce time_clocks.
clock_t time_clocks = (clock_t)(kernel_time * (deviceProp.clockRate / 100));
#else
clock_t time_clocks = (clock_t)(kernel_time * deviceProp.clockRate);
#endif
hipEventRecord(start_event, 0);
// queue nkernels in separate streams and record when they are done
for (int i = 0; i < nkernels; ++i) {
hipLaunchKernelGGL(( clock_block), dim3(1), dim3(1), 0, streams[i], &d_a[i], time_clocks);
total_clocks += time_clocks;
checkCudaErrors(hipEventRecord(kernelEvent[i], streams[i]));
// make the last stream wait for the kernel event to be recorded
checkCudaErrors(
hipStreamWaitEvent(streams[nstreams - 1], kernelEvent[i], 0));
}
// queue a sum kernel and a copy back to host in the last stream.
// the commands in this stream get dispatched as soon as all the kernel events
// have been recorded
hipLaunchKernelGGL(( sum), dim3(1), dim3(32), 0, streams[nstreams - 1], d_a, nkernels);
checkCudaErrors(hipMemcpyAsync(
a, d_a, sizeof(clock_t), hipMemcpyDeviceToHost, streams[nstreams - 1]));
// at this point the CPU has dispatched all work for the GPU and can continue
// processing other tasks in parallel
// in this sample we just wait until the GPU is done
checkCudaErrors(hipEventRecord(stop_event, 0));
checkCudaErrors(hipEventSynchronize(stop_event));
checkCudaErrors(hipEventElapsedTime(&elapsed_time, start_event, stop_event));
printf("Expected time for serial execution of %d kernels = %.3fs\n", nkernels,
nkernels * kernel_time / 1000.0f);
printf("Expected time for concurrent execution of %d kernels = %.3fs\n",
nkernels, kernel_time / 1000.0f);
printf("Measured time for sample = %.3fs\n", elapsed_time / 1000.0f);
bool bTestResult = (a[0] > total_clocks);
// release resources
for (int i = 0; i < nkernels; i++) {
hipStreamDestroy(streams[i]);
hipEventDestroy(kernelEvent[i]);
}
free(streams);
free(kernelEvent);
hipEventDestroy(start_event);
hipEventDestroy(stop_event);
hipHostFree(a);
hipFree(d_a);
if (!bTestResult) {
printf("Test failed!\n");
exit(EXIT_FAILURE);
}
printf("Test passed\n");
exit(EXIT_SUCCESS);
}
| 956f8f9d5b177b3fbc96aa73a655eb882834a758.cu | /*
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
//
// This sample demonstrates the use of streams for concurrent execution. It also
// illustrates how to introduce dependencies between CUDA streams with the
// cudaStreamWaitEvent function.
//
// Devices of compute capability 2.0 or higher can overlap the kernels
//
#include <cooperative_groups.h>
#include <stdio.h>
namespace cg = cooperative_groups;
#include <helper_cuda.h>
#include <helper_functions.h>
// This is a kernel that does no real work but runs at least for a specified
// number of clocks
__global__ void clock_block(clock_t *d_o, clock_t clock_count) {
unsigned int start_clock = (unsigned int)clock();
clock_t clock_offset = 0;
while (clock_offset < clock_count) {
unsigned int end_clock = (unsigned int)clock();
// The code below should work like
// this (thanks to modular arithmetics):
//
// clock_offset = (clock_t) (end_clock > start_clock ?
// end_clock - start_clock :
// end_clock + (0xffffffffu - start_clock));
//
// Indeed, let m = 2^32 then
// end - start = end + m - start (mod m).
clock_offset = (clock_t)(end_clock - start_clock);
}
d_o[0] = clock_offset;
}
// Single warp reduction kernel
__global__ void sum(clock_t *d_clocks, int N) {
// Handle to thread block group
cg::thread_block cta = cg::this_thread_block();
__shared__ clock_t s_clocks[32];
clock_t my_sum = 0;
for (int i = threadIdx.x; i < N; i += blockDim.x) {
my_sum += d_clocks[i];
}
s_clocks[threadIdx.x] = my_sum;
cg::sync(cta);
for (int i = 16; i > 0; i /= 2) {
if (threadIdx.x < i) {
s_clocks[threadIdx.x] += s_clocks[threadIdx.x + i];
}
cg::sync(cta);
}
d_clocks[0] = s_clocks[0];
}
int main(int argc, char **argv) {
int nkernels = 8; // number of concurrent kernels
int nstreams = nkernels + 1; // use one more stream than concurrent kernel
int nbytes = nkernels * sizeof(clock_t); // number of data bytes
float kernel_time = 10; // time the kernel should run in ms
float elapsed_time; // timing variables
int cuda_device = 0;
printf("[%s] - Starting...\n", argv[0]);
// get number of kernels if overridden on the command line
if (checkCmdLineFlag(argc, (const char **)argv, "nkernels")) {
nkernels = getCmdLineArgumentInt(argc, (const char **)argv, "nkernels");
nstreams = nkernels + 1;
}
// use command-line specified CUDA device, otherwise use device with highest
// Gflops/s
cuda_device = findCudaDevice(argc, (const char **)argv);
cudaDeviceProp deviceProp;
checkCudaErrors(cudaGetDevice(&cuda_device));
checkCudaErrors(cudaGetDeviceProperties(&deviceProp, cuda_device));
if ((deviceProp.concurrentKernels == 0)) {
printf("> GPU does not support concurrent kernel execution\n");
printf(" CUDA kernel runs will be serialized\n");
}
printf("> Detected Compute SM %d.%d hardware with %d multi-processors\n",
deviceProp.major, deviceProp.minor, deviceProp.multiProcessorCount);
// allocate host memory
clock_t *a = 0; // pointer to the array data in host memory
checkCudaErrors(cudaMallocHost((void **)&a, nbytes));
// allocate device memory
clock_t *d_a = 0; // pointers to data and init value in the device memory
checkCudaErrors(cudaMalloc((void **)&d_a, nbytes));
// allocate and initialize an array of stream handles
cudaStream_t *streams =
(cudaStream_t *)malloc(nstreams * sizeof(cudaStream_t));
for (int i = 0; i < nstreams; i++) {
checkCudaErrors(cudaStreamCreate(&(streams[i])));
}
// create CUDA event handles
cudaEvent_t start_event, stop_event;
checkCudaErrors(cudaEventCreate(&start_event));
checkCudaErrors(cudaEventCreate(&stop_event));
// the events are used for synchronization only and hence do not need to
// record timings this also makes events not introduce global sync points when
// recorded which is critical to get overlap
cudaEvent_t *kernelEvent;
kernelEvent = (cudaEvent_t *)malloc(nkernels * sizeof(cudaEvent_t));
for (int i = 0; i < nkernels; i++) {
checkCudaErrors(
cudaEventCreateWithFlags(&(kernelEvent[i]), cudaEventDisableTiming));
}
//////////////////////////////////////////////////////////////////////
// time execution with nkernels streams
clock_t total_clocks = 0;
#if defined(__arm__) || defined(__aarch64__)
// the kernel takes more time than the channel reset time on arm archs, so to
// prevent hangs reduce time_clocks.
clock_t time_clocks = (clock_t)(kernel_time * (deviceProp.clockRate / 100));
#else
clock_t time_clocks = (clock_t)(kernel_time * deviceProp.clockRate);
#endif
cudaEventRecord(start_event, 0);
// queue nkernels in separate streams and record when they are done
for (int i = 0; i < nkernels; ++i) {
clock_block<<<1, 1, 0, streams[i]>>>(&d_a[i], time_clocks);
total_clocks += time_clocks;
checkCudaErrors(cudaEventRecord(kernelEvent[i], streams[i]));
// make the last stream wait for the kernel event to be recorded
checkCudaErrors(
cudaStreamWaitEvent(streams[nstreams - 1], kernelEvent[i], 0));
}
// queue a sum kernel and a copy back to host in the last stream.
// the commands in this stream get dispatched as soon as all the kernel events
// have been recorded
sum<<<1, 32, 0, streams[nstreams - 1]>>>(d_a, nkernels);
checkCudaErrors(cudaMemcpyAsync(
a, d_a, sizeof(clock_t), cudaMemcpyDeviceToHost, streams[nstreams - 1]));
// at this point the CPU has dispatched all work for the GPU and can continue
// processing other tasks in parallel
// in this sample we just wait until the GPU is done
checkCudaErrors(cudaEventRecord(stop_event, 0));
checkCudaErrors(cudaEventSynchronize(stop_event));
checkCudaErrors(cudaEventElapsedTime(&elapsed_time, start_event, stop_event));
printf("Expected time for serial execution of %d kernels = %.3fs\n", nkernels,
nkernels * kernel_time / 1000.0f);
printf("Expected time for concurrent execution of %d kernels = %.3fs\n",
nkernels, kernel_time / 1000.0f);
printf("Measured time for sample = %.3fs\n", elapsed_time / 1000.0f);
bool bTestResult = (a[0] > total_clocks);
// release resources
for (int i = 0; i < nkernels; i++) {
cudaStreamDestroy(streams[i]);
cudaEventDestroy(kernelEvent[i]);
}
free(streams);
free(kernelEvent);
cudaEventDestroy(start_event);
cudaEventDestroy(stop_event);
cudaFreeHost(a);
cudaFree(d_a);
if (!bTestResult) {
printf("Test failed!\n");
exit(EXIT_FAILURE);
}
printf("Test passed\n");
exit(EXIT_SUCCESS);
}
|
2a3e8d45c77e38eb5c8bb3d451e6dea47c676212.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <boost/scoped_ptr.hpp>
#include <cuv/matrix_ops/matrix_ops.hpp>
#include "opt.hpp"
#define sgn(a) ((a==(typeof(a))0) ? 0.f : copysign(1.f,a))
#define DIVUP(X, Y) (((X)%(Y)!=0) ? X/Y+1 : X/Y)
namespace cuv { namespace libs { namespace opt {
#define LOGREG_THREADS 128
#define LOGREG_GRAD_THREADS_X 128
#define LOGREG_GRAD_THREADS_Y 4
namespace impl{
/**
This is for patterns in the second dimension, and is more efficient.
*/
template<class V, class V2>
__global__
void multinomial_logistic_loss_kernel(
V* true_label_log_probs, V* correct_probs,
const unsigned int n_patterns, const unsigned int n_labels,
const V* probs, const V2* labels, const V* maxprobs){
const int tidx = blockIdx.x * LOGREG_THREADS + threadIdx.x;
if(tidx < n_patterns){
const unsigned int label = labels[tidx];
const float maxp = maxprobs[tidx];
const float labelp = probs[label * n_patterns + tidx];
true_label_log_probs[tidx] = __logf(labelp);
if(labelp != maxp){
correct_probs[tidx] = 0;
}else{
unsigned int n_max = 0;
for(unsigned int i=0; i<n_labels; i++){
n_max += probs[i * n_patterns + tidx] == maxp;
}
correct_probs[tidx] = 1.f / (float) n_max;
}
}
}
/**
this is for patterns in the first dimension, and is inefficient.
*/
template<class V, class V2>
__global__
void multinomial_logistic_loss_kernel_t(
V* true_label_log_probs, V* correct_probs,
const unsigned int n_patterns, const unsigned int n_labels,
const V* probs, const V2* labels, const V* maxprobs){
const int tidx = blockIdx.x * LOGREG_THREADS + threadIdx.x;
if(tidx < n_patterns){
const unsigned int label = labels[tidx];
const float maxp = maxprobs[tidx];
// TODO this is not coalesced!
const float labelp = probs[tidx * n_labels + label];
true_label_log_probs[tidx] = __logf(labelp);
if(labelp != maxp){
correct_probs[tidx] = 0;
}else{
unsigned int n_max = 0;
for(unsigned int i=0; i<n_labels; i++){
n_max += probs[tidx * n_labels + i] == maxp;
}
correct_probs[tidx] = 1.f / (float) n_max;
}
}
}
template<bool add, class V, class V2>
__global__
void multinomial_logistic_loss_grad_kernel(
V* grads, const V* probs, const V2* labels, unsigned int n_patterns, unsigned int n_labels, float fact)
{
const unsigned int tx = blockIdx.x * LOGREG_GRAD_THREADS_X + threadIdx.x;
const unsigned int ty = blockIdx.y * LOGREG_GRAD_THREADS_Y + threadIdx.y;
const unsigned int tidx = ty * n_patterns + tx;
if(ty < n_labels && tx < n_patterns){
const unsigned int label = labels[tx];
float v = fact * ((label == ty) - probs[tidx]);
if(add)
grads[tidx] += v;
else
grads[tidx] = v;
}
}
/**
transposed version.
*/
template<bool add, class V, class V2>
__global__
void multinomial_logistic_loss_grad_kernel_t(
V* grads, const V* probs, const V2* labels,
unsigned int n_patterns, unsigned int n_labels, float fact)
{
// note: X, Y swapped for transposed version
const unsigned int tx = blockIdx.x * LOGREG_GRAD_THREADS_Y + threadIdx.x;
const unsigned int ty = blockIdx.y * LOGREG_GRAD_THREADS_X + threadIdx.y;
const unsigned int tidx = ty * n_labels + tx;
if(ty < n_patterns && tx < n_labels){
const unsigned int label = (unsigned int) (labels[ty] + 0.000001f);
float v = fact * ((label == tx) - probs[tidx]);
if(add)
grads[tidx] += v;
else
grads[tidx] = v;
}
}
template<class V, class M, class L>
void softmax_derivative(cuv::tensor<V, M, L>& dst, const cuv::tensor<V, M, L>& softmax_act, const cuv::tensor<V,M,L>& residual, unsigned int vardim, float fact_old){
typedef typename cuv::tensor<V, host_memory_space>::index_type index_type;
const index_type n_variables = dst.shape(vardim);
const index_type n_vals = dst.shape(!vardim);
boost::scoped_ptr<cuv::tensor<V,M,L> > tmp;
if(fact_old != 0.f){
// remember previous value for end
tmp.reset(new cuv::tensor<V,M,L>(dst.copy()));
}
cuv::tensor<V,M> red (n_variables, dst.m_allocator);
cuv::tensor<V,M,L> prod (softmax_act.shape(), dst.m_allocator);
cuv::apply_binary_functor(prod,softmax_act,residual,BF_MULT);
if(vardim==1){
cuv::reduce_to_row (red, prod,RF_ADD, -1.f);
cuv::matrix_op_vec(dst, residual, red, dst.ndim()-1, BF_ADD);
}
else{
cuv::reduce_to_col(red, prod,RF_ADD, -1.f);
cuv::matrix_op_vec(dst, residual, red, 0, BF_ADD);
}
dst *= softmax_act;
if(tmp)
cuv::apply_binary_functor(dst, *tmp, BF_XPBY, fact_old);
}
template<class V, class M, class L>
void softmax(cuv::tensor<V, M,L>& dst, const cuv::tensor<V, M,L>& src, unsigned int vardim){
typedef typename cuv::tensor<V, M, L>::index_type index_type;
const index_type n_variables = dst.shape( vardim);
cuv::tensor<V,M> red(cuv::extents[n_variables]);
if(vardim==1) cuv::reduce_to_row(red, src, RF_LOGADDEXP, -1.f);
else cuv::reduce_to_col(red, src, RF_LOGADDEXP, -1.f);
if(dst.ptr() != src.ptr()){
dst = src.copy();
}
if(vardim==1) cuv::matrix_plus_row(dst,red);
else cuv::matrix_plus_col(dst,red);
cuv::apply_scalar_functor(dst,SF_EXP);
}
template<class T>
__global__ void adagrad_kernel(T* Wptr, const T* dWptr, T* sWptr, T learnrate, T delta, T decay, T sparsedecay, unsigned int size) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int off = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < size; i += off){
sWptr[i] += dWptr[i] * dWptr[i];
float lr = learnrate / (sqrt(sWptr[i]) + delta);
/*Wptr[i] = Wptr[i] - lr * (dWptr[i]);*/
float f = Wptr[i] - lr * dWptr[i];
Wptr[i] = sgn(f) * max(0.f, fabs(f) - lr * sparsedecay);
}
}
template<class V, class L>
void adagrad(tensor<V,host_memory_space, L>& W, const tensor<V,host_memory_space, L>& dW, tensor<V,host_memory_space, L>& sW, const float& learnrate, const float& delta, const float& decay, const float& sparsedecay){
unsigned int size = W.size();
V* Wptr = W.ptr();
const V* dWptr = dW.ptr();
V* sWptr = sW.ptr();
for(unsigned int i=0; i < size; i++){
sWptr[i] += dWptr[i] * dWptr[i];
float lr = learnrate / (sqrt(sWptr[i]) + delta);
/*Wptr[i] = Wptr[i] - lr * (dWptr[i]);*/
float f = Wptr[i] - lr * dWptr[i];
Wptr[i] = sgn(f) * max(0.f, fabs(f) - learnrate * sparsedecay/lr);
}
}
template<class V, class L>
void adagrad(tensor<V,dev_memory_space,L>& W, const tensor<V,dev_memory_space,L>& dW, tensor<V,dev_memory_space,L>& sW, const float& learnrate, const float& delta, const float& decay, const float& sparsedecay){
unsigned int size = dW.size();
unsigned int num_threads = 512;
unsigned int num_blocks = min(512,(unsigned int)ceil((float)dW.size() / num_threads));
hipLaunchKernelGGL(( adagrad_kernel), dim3(num_threads), dim3(num_blocks), 0, 0, W.ptr(), dW.ptr(), sW.ptr(), learnrate,delta,decay,sparsedecay, size);
cuvSafeCall(hipDeviceSynchronize());
}
template<class T>
__global__ void rmsprop_kernel(T* Wptr, const T* dWptr, T* sWptr, T learnrate, T delta, T decay, T sparsedecay, unsigned int size, float grad_avg) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int off = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < size; i += off){
sWptr[i] = grad_avg * sWptr[i] + (1.f-grad_avg) * dWptr[i] * dWptr[i];
float lr = learnrate / (sqrt(sWptr[i]) + delta);
/*Wptr[i] = Wptr[i] - lr * (dWptr[i]);*/
float f = Wptr[i] - lr * dWptr[i];
Wptr[i] = sgn(f) * max(0.f, fabs(f) - learnrate * sparsedecay/lr);
}
}
template<class V, class L>
void rmsprop(tensor<V,host_memory_space, L>& W, const tensor<V,host_memory_space, L>& dW, tensor<V,host_memory_space, L>& sW, const float& learnrate, const float& delta, const float& decay, const float& sparsedecay, const float& grad_avg){
unsigned int size = W.size();
V* Wptr = W.ptr();
const V* dWptr = dW.ptr();
V* sWptr = sW.ptr();
for(unsigned int i=0; i < size; i++){
sWptr[i] = grad_avg * sWptr[i] + (1.f-grad_avg) * dWptr[i] * dWptr[i];
float lr = learnrate / (sqrt(sWptr[i]) + delta);
/*Wptr[i] = Wptr[i] - lr * (dWptr[i]);*/
float f = Wptr[i] - lr * dWptr[i];
Wptr[i] = sgn(f) * max(0.f, fabs(f) - learnrate * sparsedecay/lr);
}
}
template<class V, class L>
void rmsprop(tensor<V,dev_memory_space,L>& W, const tensor<V,dev_memory_space,L>& dW, tensor<V,dev_memory_space,L>& sW, const float& learnrate, const float& delta, const float& decay, const float& sparsedecay, const float& grad_avg){
unsigned int size = dW.size();
unsigned int num_threads = 512;
unsigned int num_blocks = min(512,(unsigned int)ceil((float)dW.size() / num_threads));
hipLaunchKernelGGL(( rmsprop_kernel), dim3(num_threads), dim3(num_blocks), 0, 0, W.ptr(), dW.ptr(), sW.ptr(), learnrate,delta,decay,sparsedecay, size, grad_avg);
cuvSafeCall(hipDeviceSynchronize());
}
template<class T>
__global__ void na_rmsprop(T* Wptr, const T* dWptr, T* oldWptr, T* sWptr, T* lrptr, T momentum, T grad_avg, T step_adapt, T delta, T lr_max, T lr_min, unsigned int size) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int off = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < size; i += off){
sWptr[i] = grad_avg * sWptr[i] + (1.f-grad_avg) * dWptr[i] * dWptr[i];
float upd = lrptr[i] * dWptr[i] / (sqrt(sWptr[i])+delta);
float tmp = Wptr[i] - upd;
float v = momentum*(tmp - oldWptr[i]);
float f = tmp + v;
Wptr[i] = sgn(f) * max(0.f, fabs(f) /*- learnrate * sparsedecay/lr*/);
oldWptr[i] = tmp;
float lr;
if(sgn(v) == sgn(v + upd))
lr = lrptr[i] * (1 + step_adapt);
else
lr = lrptr[i] * (1 - step_adapt);
if(lr > lr_max)
lrptr[i] = lr_max;
else if(lr < lr_min)
lrptr[i] = lr_min;
else
lrptr[i] = lr;
}
}
template<class V, class L>
void na_rmsprop(tensor<V,host_memory_space, L>& W, const tensor<V,host_memory_space, L>& dW, tensor<V,host_memory_space, L>& oldW, tensor<V,host_memory_space, L>& sW, tensor<V,host_memory_space, L>& learnrates, const float& momentum, const float& grad_avg, const float& step_adapt, const float& delta, const float& lr_max, const float& lr_min){
unsigned int size = W.size();
V* Wptr = W.ptr();
const V* dWptr = dW.ptr();
V* oldWptr = oldW.ptr();
V* sWptr = sW.ptr();
V* lrptr = learnrates.ptr();
for(unsigned int i=0; i < size; i++){
sWptr[i] = grad_avg * sWptr[i] + (1.f-grad_avg) * dWptr[i] * dWptr[i];
float upd = lrptr[i] * dWptr[i] / (sqrt(sWptr[i])+delta);
float tmp = Wptr[i] - upd;
float v = momentum*(tmp - oldWptr[i]);
float f = tmp + v;
Wptr[i] = sgn(f) * max(0.f, fabs(f) /*- learnrate * sparsedecay/lr*/);
oldWptr[i] = tmp;
float lr;
if(sgn(v) == sgn(v + upd))
lr = lrptr[i] * (1 + step_adapt);
else
lr = lrptr[i] * (1 - step_adapt);
if(lr > lr_max)
lrptr[i] = lr_max;
else if(lr < lr_min)
lrptr[i] = lr_min;
else
lrptr[i] = lr;
}
}
template<class V, class L>
void na_rmsprop(tensor<V,dev_memory_space,L>& W, const tensor<V,dev_memory_space,L>& dW, tensor<V,dev_memory_space,L>& oldW, tensor<V,dev_memory_space,L>& sW, tensor<V,dev_memory_space,L>& learnrates, const float& momentum, const float& grad_avg, const float& step_adapt, const float& delta, const float& lr_max, const float& lr_min){
unsigned int size = dW.size();
unsigned int num_threads = 512;
unsigned int num_blocks = min(512,(unsigned int)ceil((float)dW.size() / num_threads));
hipLaunchKernelGGL(( na_rmsprop), dim3(num_threads), dim3(num_blocks), 0, 0, W.ptr(), dW.ptr(), oldW.ptr(), sW.ptr(), learnrates.ptr(), momentum, grad_avg, step_adapt, delta, lr_max, lr_min, size);
cuvSafeCall(hipDeviceSynchronize());
}
}
template<class V, class V2, class M, class L>
std::pair<float, float> multinomial_logistic_loss(
cuv::tensor<V, M, L>& softmaxX,
const cuv::tensor<V, M, L>& X,
const cuv::tensor<V2, M, L>& Y,
int pattern_axis,
boost::shared_ptr<allocator> alloc){
int n_patterns = X.shape(pattern_axis);
int n_labels = X.size() / n_patterns;
cuvAssert(Y.ndim() == 1);
cuvAssert(Y.shape(0) == n_patterns);
// find maximum over columns
tensor<V, M, L> red(cuv::extents[n_patterns], alloc);
// determine softmax of X
if(pattern_axis == 0)
reduce_to_col(red, X, RF_MAX, -1.f, 0.f);
else if(pattern_axis == X.ndim() - 1)
reduce_to_row(red, X, RF_MAX, -1.f, 0.f);
else{
cuvAssert(false /* illegal dimension in multinomial_logistic_loss */);
}
matrix_op_vec(softmaxX, X, red, pattern_axis, BF_ADD);
apply_scalar_functor(softmaxX, SF_EXP);
if(pattern_axis == 0){
reduce_to_col(red, softmaxX, RF_ADD);
}else{
reduce_to_row(red, softmaxX, RF_ADD);
}
matrix_op_vec(softmaxX, softmaxX, red, pattern_axis, BF_DIV);
tensor<V, M, L> true_label_log_probs(n_patterns, alloc);
tensor<V, M, L> correct_probs(n_patterns, alloc);
if(pattern_axis == 0){
reduce_to_col(red, softmaxX, RF_MAX);
}else{
reduce_to_row(red, softmaxX, RF_MAX);
}
dim3 threads(LOGREG_THREADS, 1);
dim3 blocks(DIVUP(n_patterns, LOGREG_THREADS), 1);
using namespace impl;
if(pattern_axis == 0){
// TODO this kernel is suboptimal!
hipLaunchKernelGGL(( multinomial_logistic_loss_kernel_t), dim3(blocks), dim3(threads), 0, 0,
true_label_log_probs.ptr(), correct_probs.ptr(),
n_patterns, n_labels,
softmaxX.ptr(), Y.ptr(), red.ptr()
);
}else{
hipLaunchKernelGGL(( multinomial_logistic_loss_kernel), dim3(blocks), dim3(threads), 0, 0,
true_label_log_probs.ptr(), correct_probs.ptr(),
n_patterns, n_labels,
softmaxX.ptr(), Y.ptr(), red.ptr()
);
}
cuvSafeCall(hipDeviceSynchronize());
std::pair<float, float> retval;
retval.first = -cuv::sum(true_label_log_probs);
retval.second = 1.f - cuv::mean(correct_probs);
return retval;
}
template<class V, class V2, class M, class L>
void multinomial_logistic_loss_grad(
cuv::tensor<V, M, L>& dmll_dX,
const cuv::tensor<V, M, L>& X,
const cuv::tensor<V2, M, L>& Y,
int pattern_axis, float fact_new, bool add
){
int n_patterns = X.shape(pattern_axis);
int n_labels = X.size() / n_patterns;
cuvAssert(X.shape() == dmll_dX.shape());
cuvAssert(Y.ndim() == 1);
cuvAssert(Y.shape(0) == n_patterns);
using namespace impl;
if(pattern_axis == 0){
// swapped X, Y for ``transposed'' kernel
dim3 threads(LOGREG_GRAD_THREADS_Y, LOGREG_GRAD_THREADS_X);
dim3 blocks(DIVUP(n_labels, LOGREG_GRAD_THREADS_Y),
DIVUP(n_patterns, LOGREG_GRAD_THREADS_X));
if(!add){
hipLaunchKernelGGL(( multinomial_logistic_loss_grad_kernel_t<false>), dim3(blocks), dim3(threads), 0, 0, dmll_dX.ptr(),
X.ptr(), Y.ptr(), n_patterns, n_labels, -1.f * fact_new);
}else{
hipLaunchKernelGGL(( multinomial_logistic_loss_grad_kernel_t<true>), dim3(blocks), dim3(threads), 0, 0, dmll_dX.ptr(),
X.ptr(), Y.ptr(), n_patterns, n_labels, -1.f * fact_new);
}
}else{
dim3 threads(LOGREG_GRAD_THREADS_X, LOGREG_GRAD_THREADS_Y);
dim3 blocks(DIVUP(n_patterns, LOGREG_GRAD_THREADS_X),
DIVUP(n_labels, LOGREG_GRAD_THREADS_Y));
if(!add)
hipLaunchKernelGGL(( multinomial_logistic_loss_grad_kernel<false>), dim3(blocks), dim3(threads), 0, 0, dmll_dX.ptr(),
X.ptr(), Y.ptr(), n_patterns, n_labels, -1.f * fact_new);
else
hipLaunchKernelGGL(( multinomial_logistic_loss_grad_kernel<true>), dim3(blocks), dim3(threads), 0, 0, dmll_dX.ptr(),
X.ptr(), Y.ptr(), n_patterns, n_labels, -1.f * fact_new);
}
}
template<class V, class M, class L>
void adagrad(tensor<V,M,L>& W, const tensor<V,M,L>& dW, tensor<V,M,L>& sW, const float& learnrate, const float& delta, const float& decay, const float& sparsedecay){
cuvAssert(equal_shape(W,dW));
cuvAssert(equal_shape(W,sW));
impl::adagrad(W,dW,sW,learnrate,delta,decay,sparsedecay);
}
template<class V, class M, class L>
void rmsprop(tensor<V,M,L>& W, const tensor<V,M,L>& dW, tensor<V,M,L>& sW, const float& learnrate, const float& delta, const float& decay, const float& sparsedecay, const float& grad_avg){
cuvAssert(equal_shape(W,dW));
cuvAssert(equal_shape(W,sW));
impl::rmsprop(W,dW,sW,learnrate,delta,decay,sparsedecay,grad_avg);
}
template<class V, class M,class L>
void softmax_derivative(cuv::tensor<V, M,L>& dst, const cuv::tensor<V, M,L>& softmax_act, const cuv::tensor<V,M,L>& residual,unsigned int vardim, float fact_old){
cuvAssert(equal_shape(dst,softmax_act));
cuvAssert(equal_shape(dst,residual));
cuvAssert(vardim == 0 || vardim==1);
impl::softmax_derivative(dst,softmax_act,residual,vardim, fact_old);
}
template<class V, class M, class L>
void softmax(cuv::tensor<V, M,L>& dst, const cuv::tensor<V, M,L>& src,unsigned int vardim){
cuvAssert(equal_shape(dst,src));
cuvAssert(vardim == 0 || vardim==1);
impl::softmax(dst,src,vardim);
}
template<class V, class M, class L>
void na_rmsprop(tensor<V,M,L>& W, const tensor<V,M,L>& dW, tensor<V,M,L>& oldW, tensor<V,M,L>& sW, tensor<V,M,L>& learnrates, const float& momentum, const float& grad_avg, const float& step_adapt, const float& delta, const float& lr_max, const float& lr_min){
cuvAssert(equal_shape(W,dW));
cuvAssert(equal_shape(W,oldW));
cuvAssert(equal_shape(W,sW));
cuvAssert(equal_shape(W,learnrates));
impl::na_rmsprop(W,dW,oldW,sW,learnrates,momentum,grad_avg,step_adapt,delta,lr_max,lr_min);
}
#define TENSOR(V,M,L) cuv::tensor<V,M,L>
#define INSTANTIATE(V,M,L) \
template void softmax_derivative(TENSOR(V,M,L)&, const TENSOR(V,M,L)&, const TENSOR(V,M,L)&,unsigned int,float);\
template void softmax(TENSOR(V,M,L)&, const TENSOR(V,M,L)&,unsigned int); \
template void adagrad(TENSOR(V,M,L)&, const TENSOR(V,M,L)&,TENSOR(V,M,L)&,const float&, const float&, const float&, const float&); \
template void rmsprop(TENSOR(V,M,L)&, const TENSOR(V,M,L)&,TENSOR(V,M,L)&,const float&, const float&, const float&, const float&, const float&); \
template void na_rmsprop(TENSOR(V,M,L)&,const TENSOR(V,M,L)&,TENSOR(V,M,L)&,TENSOR(V,M,L)&,TENSOR(V,M,L)&,const float&, const float&, const float&, const float&, const float&, const float&);
#define INSTANTIATE_MLL(V,V2,M,L) \
template std::pair<float, float> multinomial_logistic_loss(TENSOR(V,M,L)&, const TENSOR(V,M,L)&, const TENSOR(V2,M,L)&, int pattern_axis, boost::shared_ptr<allocator>);\
template void multinomial_logistic_loss_grad(TENSOR(V,M,L)&, const TENSOR(V,M,L)&, const TENSOR(V2,M,L)&, int pattern_axis, float, bool add);\
INSTANTIATE(float,host_memory_space,row_major);
INSTANTIATE(float,host_memory_space,column_major);
INSTANTIATE(float,dev_memory_space,row_major);
INSTANTIATE_MLL(float,float,dev_memory_space,row_major);
INSTANTIATE_MLL(float,unsigned int,dev_memory_space,row_major);
} } }
| 2a3e8d45c77e38eb5c8bb3d451e6dea47c676212.cu | #include <boost/scoped_ptr.hpp>
#include <cuv/matrix_ops/matrix_ops.hpp>
#include "opt.hpp"
#define sgn(a) ((a==(typeof(a))0) ? 0.f : copysign(1.f,a))
#define DIVUP(X, Y) (((X)%(Y)!=0) ? X/Y+1 : X/Y)
namespace cuv { namespace libs { namespace opt {
#define LOGREG_THREADS 128
#define LOGREG_GRAD_THREADS_X 128
#define LOGREG_GRAD_THREADS_Y 4
namespace impl{
/**
This is for patterns in the second dimension, and is more efficient.
*/
template<class V, class V2>
__global__
void multinomial_logistic_loss_kernel(
V* true_label_log_probs, V* correct_probs,
const unsigned int n_patterns, const unsigned int n_labels,
const V* probs, const V2* labels, const V* maxprobs){
const int tidx = blockIdx.x * LOGREG_THREADS + threadIdx.x;
if(tidx < n_patterns){
const unsigned int label = labels[tidx];
const float maxp = maxprobs[tidx];
const float labelp = probs[label * n_patterns + tidx];
true_label_log_probs[tidx] = __logf(labelp);
if(labelp != maxp){
correct_probs[tidx] = 0;
}else{
unsigned int n_max = 0;
for(unsigned int i=0; i<n_labels; i++){
n_max += probs[i * n_patterns + tidx] == maxp;
}
correct_probs[tidx] = 1.f / (float) n_max;
}
}
}
/**
this is for patterns in the first dimension, and is inefficient.
*/
template<class V, class V2>
__global__
void multinomial_logistic_loss_kernel_t(
V* true_label_log_probs, V* correct_probs,
const unsigned int n_patterns, const unsigned int n_labels,
const V* probs, const V2* labels, const V* maxprobs){
const int tidx = blockIdx.x * LOGREG_THREADS + threadIdx.x;
if(tidx < n_patterns){
const unsigned int label = labels[tidx];
const float maxp = maxprobs[tidx];
// TODO this is not coalesced!
const float labelp = probs[tidx * n_labels + label];
true_label_log_probs[tidx] = __logf(labelp);
if(labelp != maxp){
correct_probs[tidx] = 0;
}else{
unsigned int n_max = 0;
for(unsigned int i=0; i<n_labels; i++){
n_max += probs[tidx * n_labels + i] == maxp;
}
correct_probs[tidx] = 1.f / (float) n_max;
}
}
}
template<bool add, class V, class V2>
__global__
void multinomial_logistic_loss_grad_kernel(
V* grads, const V* probs, const V2* labels, unsigned int n_patterns, unsigned int n_labels, float fact)
{
const unsigned int tx = blockIdx.x * LOGREG_GRAD_THREADS_X + threadIdx.x;
const unsigned int ty = blockIdx.y * LOGREG_GRAD_THREADS_Y + threadIdx.y;
const unsigned int tidx = ty * n_patterns + tx;
if(ty < n_labels && tx < n_patterns){
const unsigned int label = labels[tx];
float v = fact * ((label == ty) - probs[tidx]);
if(add)
grads[tidx] += v;
else
grads[tidx] = v;
}
}
/**
transposed version.
*/
template<bool add, class V, class V2>
__global__
void multinomial_logistic_loss_grad_kernel_t(
V* grads, const V* probs, const V2* labels,
unsigned int n_patterns, unsigned int n_labels, float fact)
{
// note: X, Y swapped for transposed version
const unsigned int tx = blockIdx.x * LOGREG_GRAD_THREADS_Y + threadIdx.x;
const unsigned int ty = blockIdx.y * LOGREG_GRAD_THREADS_X + threadIdx.y;
const unsigned int tidx = ty * n_labels + tx;
if(ty < n_patterns && tx < n_labels){
const unsigned int label = (unsigned int) (labels[ty] + 0.000001f);
float v = fact * ((label == tx) - probs[tidx]);
if(add)
grads[tidx] += v;
else
grads[tidx] = v;
}
}
template<class V, class M, class L>
void softmax_derivative(cuv::tensor<V, M, L>& dst, const cuv::tensor<V, M, L>& softmax_act, const cuv::tensor<V,M,L>& residual, unsigned int vardim, float fact_old){
typedef typename cuv::tensor<V, host_memory_space>::index_type index_type;
const index_type n_variables = dst.shape(vardim);
const index_type n_vals = dst.shape(!vardim);
boost::scoped_ptr<cuv::tensor<V,M,L> > tmp;
if(fact_old != 0.f){
// remember previous value for end
tmp.reset(new cuv::tensor<V,M,L>(dst.copy()));
}
cuv::tensor<V,M> red (n_variables, dst.m_allocator);
cuv::tensor<V,M,L> prod (softmax_act.shape(), dst.m_allocator);
cuv::apply_binary_functor(prod,softmax_act,residual,BF_MULT);
if(vardim==1){
cuv::reduce_to_row (red, prod,RF_ADD, -1.f);
cuv::matrix_op_vec(dst, residual, red, dst.ndim()-1, BF_ADD);
}
else{
cuv::reduce_to_col(red, prod,RF_ADD, -1.f);
cuv::matrix_op_vec(dst, residual, red, 0, BF_ADD);
}
dst *= softmax_act;
if(tmp)
cuv::apply_binary_functor(dst, *tmp, BF_XPBY, fact_old);
}
template<class V, class M, class L>
void softmax(cuv::tensor<V, M,L>& dst, const cuv::tensor<V, M,L>& src, unsigned int vardim){
typedef typename cuv::tensor<V, M, L>::index_type index_type;
const index_type n_variables = dst.shape( vardim);
cuv::tensor<V,M> red(cuv::extents[n_variables]);
if(vardim==1) cuv::reduce_to_row(red, src, RF_LOGADDEXP, -1.f);
else cuv::reduce_to_col(red, src, RF_LOGADDEXP, -1.f);
if(dst.ptr() != src.ptr()){
dst = src.copy();
}
if(vardim==1) cuv::matrix_plus_row(dst,red);
else cuv::matrix_plus_col(dst,red);
cuv::apply_scalar_functor(dst,SF_EXP);
}
template<class T>
__global__ void adagrad_kernel(T* Wptr, const T* dWptr, T* sWptr, T learnrate, T delta, T decay, T sparsedecay, unsigned int size) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int off = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < size; i += off){
sWptr[i] += dWptr[i] * dWptr[i];
float lr = learnrate / (sqrt(sWptr[i]) + delta);
/*Wptr[i] = Wptr[i] - lr * (dWptr[i]);*/
float f = Wptr[i] - lr * dWptr[i];
Wptr[i] = sgn(f) * max(0.f, fabs(f) - lr * sparsedecay);
}
}
template<class V, class L>
void adagrad(tensor<V,host_memory_space, L>& W, const tensor<V,host_memory_space, L>& dW, tensor<V,host_memory_space, L>& sW, const float& learnrate, const float& delta, const float& decay, const float& sparsedecay){
unsigned int size = W.size();
V* Wptr = W.ptr();
const V* dWptr = dW.ptr();
V* sWptr = sW.ptr();
for(unsigned int i=0; i < size; i++){
sWptr[i] += dWptr[i] * dWptr[i];
float lr = learnrate / (sqrt(sWptr[i]) + delta);
/*Wptr[i] = Wptr[i] - lr * (dWptr[i]);*/
float f = Wptr[i] - lr * dWptr[i];
Wptr[i] = sgn(f) * max(0.f, fabs(f) - learnrate * sparsedecay/lr);
}
}
template<class V, class L>
void adagrad(tensor<V,dev_memory_space,L>& W, const tensor<V,dev_memory_space,L>& dW, tensor<V,dev_memory_space,L>& sW, const float& learnrate, const float& delta, const float& decay, const float& sparsedecay){
unsigned int size = dW.size();
unsigned int num_threads = 512;
unsigned int num_blocks = min(512,(unsigned int)ceil((float)dW.size() / num_threads));
adagrad_kernel<<< num_threads, num_blocks>>>(W.ptr(), dW.ptr(), sW.ptr(), learnrate,delta,decay,sparsedecay, size);
cuvSafeCall(cudaThreadSynchronize());
}
template<class T>
__global__ void rmsprop_kernel(T* Wptr, const T* dWptr, T* sWptr, T learnrate, T delta, T decay, T sparsedecay, unsigned int size, float grad_avg) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int off = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < size; i += off){
sWptr[i] = grad_avg * sWptr[i] + (1.f-grad_avg) * dWptr[i] * dWptr[i];
float lr = learnrate / (sqrt(sWptr[i]) + delta);
/*Wptr[i] = Wptr[i] - lr * (dWptr[i]);*/
float f = Wptr[i] - lr * dWptr[i];
Wptr[i] = sgn(f) * max(0.f, fabs(f) - learnrate * sparsedecay/lr);
}
}
template<class V, class L>
void rmsprop(tensor<V,host_memory_space, L>& W, const tensor<V,host_memory_space, L>& dW, tensor<V,host_memory_space, L>& sW, const float& learnrate, const float& delta, const float& decay, const float& sparsedecay, const float& grad_avg){
unsigned int size = W.size();
V* Wptr = W.ptr();
const V* dWptr = dW.ptr();
V* sWptr = sW.ptr();
for(unsigned int i=0; i < size; i++){
sWptr[i] = grad_avg * sWptr[i] + (1.f-grad_avg) * dWptr[i] * dWptr[i];
float lr = learnrate / (sqrt(sWptr[i]) + delta);
/*Wptr[i] = Wptr[i] - lr * (dWptr[i]);*/
float f = Wptr[i] - lr * dWptr[i];
Wptr[i] = sgn(f) * max(0.f, fabs(f) - learnrate * sparsedecay/lr);
}
}
template<class V, class L>
void rmsprop(tensor<V,dev_memory_space,L>& W, const tensor<V,dev_memory_space,L>& dW, tensor<V,dev_memory_space,L>& sW, const float& learnrate, const float& delta, const float& decay, const float& sparsedecay, const float& grad_avg){
unsigned int size = dW.size();
unsigned int num_threads = 512;
unsigned int num_blocks = min(512,(unsigned int)ceil((float)dW.size() / num_threads));
rmsprop_kernel<<< num_threads, num_blocks>>>(W.ptr(), dW.ptr(), sW.ptr(), learnrate,delta,decay,sparsedecay, size, grad_avg);
cuvSafeCall(cudaThreadSynchronize());
}
template<class T>
__global__ void na_rmsprop(T* Wptr, const T* dWptr, T* oldWptr, T* sWptr, T* lrptr, T momentum, T grad_avg, T step_adapt, T delta, T lr_max, T lr_min, unsigned int size) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int off = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < size; i += off){
sWptr[i] = grad_avg * sWptr[i] + (1.f-grad_avg) * dWptr[i] * dWptr[i];
float upd = lrptr[i] * dWptr[i] / (sqrt(sWptr[i])+delta);
float tmp = Wptr[i] - upd;
float v = momentum*(tmp - oldWptr[i]);
float f = tmp + v;
Wptr[i] = sgn(f) * max(0.f, fabs(f) /*- learnrate * sparsedecay/lr*/);
oldWptr[i] = tmp;
float lr;
if(sgn(v) == sgn(v + upd))
lr = lrptr[i] * (1 + step_adapt);
else
lr = lrptr[i] * (1 - step_adapt);
if(lr > lr_max)
lrptr[i] = lr_max;
else if(lr < lr_min)
lrptr[i] = lr_min;
else
lrptr[i] = lr;
}
}
template<class V, class L>
void na_rmsprop(tensor<V,host_memory_space, L>& W, const tensor<V,host_memory_space, L>& dW, tensor<V,host_memory_space, L>& oldW, tensor<V,host_memory_space, L>& sW, tensor<V,host_memory_space, L>& learnrates, const float& momentum, const float& grad_avg, const float& step_adapt, const float& delta, const float& lr_max, const float& lr_min){
unsigned int size = W.size();
V* Wptr = W.ptr();
const V* dWptr = dW.ptr();
V* oldWptr = oldW.ptr();
V* sWptr = sW.ptr();
V* lrptr = learnrates.ptr();
for(unsigned int i=0; i < size; i++){
sWptr[i] = grad_avg * sWptr[i] + (1.f-grad_avg) * dWptr[i] * dWptr[i];
float upd = lrptr[i] * dWptr[i] / (sqrt(sWptr[i])+delta);
float tmp = Wptr[i] - upd;
float v = momentum*(tmp - oldWptr[i]);
float f = tmp + v;
Wptr[i] = sgn(f) * max(0.f, fabs(f) /*- learnrate * sparsedecay/lr*/);
oldWptr[i] = tmp;
float lr;
if(sgn(v) == sgn(v + upd))
lr = lrptr[i] * (1 + step_adapt);
else
lr = lrptr[i] * (1 - step_adapt);
if(lr > lr_max)
lrptr[i] = lr_max;
else if(lr < lr_min)
lrptr[i] = lr_min;
else
lrptr[i] = lr;
}
}
template<class V, class L>
void na_rmsprop(tensor<V,dev_memory_space,L>& W, const tensor<V,dev_memory_space,L>& dW, tensor<V,dev_memory_space,L>& oldW, tensor<V,dev_memory_space,L>& sW, tensor<V,dev_memory_space,L>& learnrates, const float& momentum, const float& grad_avg, const float& step_adapt, const float& delta, const float& lr_max, const float& lr_min){
unsigned int size = dW.size();
unsigned int num_threads = 512;
unsigned int num_blocks = min(512,(unsigned int)ceil((float)dW.size() / num_threads));
na_rmsprop<<< num_threads, num_blocks>>>(W.ptr(), dW.ptr(), oldW.ptr(), sW.ptr(), learnrates.ptr(), momentum, grad_avg, step_adapt, delta, lr_max, lr_min, size);
cuvSafeCall(cudaThreadSynchronize());
}
}
template<class V, class V2, class M, class L>
std::pair<float, float> multinomial_logistic_loss(
cuv::tensor<V, M, L>& softmaxX,
const cuv::tensor<V, M, L>& X,
const cuv::tensor<V2, M, L>& Y,
int pattern_axis,
boost::shared_ptr<allocator> alloc){
int n_patterns = X.shape(pattern_axis);
int n_labels = X.size() / n_patterns;
cuvAssert(Y.ndim() == 1);
cuvAssert(Y.shape(0) == n_patterns);
// find maximum over columns
tensor<V, M, L> red(cuv::extents[n_patterns], alloc);
// determine softmax of X
if(pattern_axis == 0)
reduce_to_col(red, X, RF_MAX, -1.f, 0.f);
else if(pattern_axis == X.ndim() - 1)
reduce_to_row(red, X, RF_MAX, -1.f, 0.f);
else{
cuvAssert(false /* illegal dimension in multinomial_logistic_loss */);
}
matrix_op_vec(softmaxX, X, red, pattern_axis, BF_ADD);
apply_scalar_functor(softmaxX, SF_EXP);
if(pattern_axis == 0){
reduce_to_col(red, softmaxX, RF_ADD);
}else{
reduce_to_row(red, softmaxX, RF_ADD);
}
matrix_op_vec(softmaxX, softmaxX, red, pattern_axis, BF_DIV);
tensor<V, M, L> true_label_log_probs(n_patterns, alloc);
tensor<V, M, L> correct_probs(n_patterns, alloc);
if(pattern_axis == 0){
reduce_to_col(red, softmaxX, RF_MAX);
}else{
reduce_to_row(red, softmaxX, RF_MAX);
}
dim3 threads(LOGREG_THREADS, 1);
dim3 blocks(DIVUP(n_patterns, LOGREG_THREADS), 1);
using namespace impl;
if(pattern_axis == 0){
// TODO this kernel is suboptimal!
multinomial_logistic_loss_kernel_t<<<blocks, threads>>>(
true_label_log_probs.ptr(), correct_probs.ptr(),
n_patterns, n_labels,
softmaxX.ptr(), Y.ptr(), red.ptr()
);
}else{
multinomial_logistic_loss_kernel<<<blocks, threads>>>(
true_label_log_probs.ptr(), correct_probs.ptr(),
n_patterns, n_labels,
softmaxX.ptr(), Y.ptr(), red.ptr()
);
}
cuvSafeCall(cudaThreadSynchronize());
std::pair<float, float> retval;
retval.first = -cuv::sum(true_label_log_probs);
retval.second = 1.f - cuv::mean(correct_probs);
return retval;
}
template<class V, class V2, class M, class L>
void multinomial_logistic_loss_grad(
cuv::tensor<V, M, L>& dmll_dX,
const cuv::tensor<V, M, L>& X,
const cuv::tensor<V2, M, L>& Y,
int pattern_axis, float fact_new, bool add
){
int n_patterns = X.shape(pattern_axis);
int n_labels = X.size() / n_patterns;
cuvAssert(X.shape() == dmll_dX.shape());
cuvAssert(Y.ndim() == 1);
cuvAssert(Y.shape(0) == n_patterns);
using namespace impl;
if(pattern_axis == 0){
// swapped X, Y for ``transposed'' kernel
dim3 threads(LOGREG_GRAD_THREADS_Y, LOGREG_GRAD_THREADS_X);
dim3 blocks(DIVUP(n_labels, LOGREG_GRAD_THREADS_Y),
DIVUP(n_patterns, LOGREG_GRAD_THREADS_X));
if(!add){
multinomial_logistic_loss_grad_kernel_t<false><<<blocks, threads>>>(dmll_dX.ptr(),
X.ptr(), Y.ptr(), n_patterns, n_labels, -1.f * fact_new);
}else{
multinomial_logistic_loss_grad_kernel_t<true><<<blocks, threads>>>(dmll_dX.ptr(),
X.ptr(), Y.ptr(), n_patterns, n_labels, -1.f * fact_new);
}
}else{
dim3 threads(LOGREG_GRAD_THREADS_X, LOGREG_GRAD_THREADS_Y);
dim3 blocks(DIVUP(n_patterns, LOGREG_GRAD_THREADS_X),
DIVUP(n_labels, LOGREG_GRAD_THREADS_Y));
if(!add)
multinomial_logistic_loss_grad_kernel<false><<<blocks, threads>>>(dmll_dX.ptr(),
X.ptr(), Y.ptr(), n_patterns, n_labels, -1.f * fact_new);
else
multinomial_logistic_loss_grad_kernel<true><<<blocks, threads>>>(dmll_dX.ptr(),
X.ptr(), Y.ptr(), n_patterns, n_labels, -1.f * fact_new);
}
}
template<class V, class M, class L>
void adagrad(tensor<V,M,L>& W, const tensor<V,M,L>& dW, tensor<V,M,L>& sW, const float& learnrate, const float& delta, const float& decay, const float& sparsedecay){
cuvAssert(equal_shape(W,dW));
cuvAssert(equal_shape(W,sW));
impl::adagrad(W,dW,sW,learnrate,delta,decay,sparsedecay);
}
template<class V, class M, class L>
void rmsprop(tensor<V,M,L>& W, const tensor<V,M,L>& dW, tensor<V,M,L>& sW, const float& learnrate, const float& delta, const float& decay, const float& sparsedecay, const float& grad_avg){
cuvAssert(equal_shape(W,dW));
cuvAssert(equal_shape(W,sW));
impl::rmsprop(W,dW,sW,learnrate,delta,decay,sparsedecay,grad_avg);
}
template<class V, class M,class L>
void softmax_derivative(cuv::tensor<V, M,L>& dst, const cuv::tensor<V, M,L>& softmax_act, const cuv::tensor<V,M,L>& residual,unsigned int vardim, float fact_old){
cuvAssert(equal_shape(dst,softmax_act));
cuvAssert(equal_shape(dst,residual));
cuvAssert(vardim == 0 || vardim==1);
impl::softmax_derivative(dst,softmax_act,residual,vardim, fact_old);
}
template<class V, class M, class L>
void softmax(cuv::tensor<V, M,L>& dst, const cuv::tensor<V, M,L>& src,unsigned int vardim){
cuvAssert(equal_shape(dst,src));
cuvAssert(vardim == 0 || vardim==1);
impl::softmax(dst,src,vardim);
}
template<class V, class M, class L>
void na_rmsprop(tensor<V,M,L>& W, const tensor<V,M,L>& dW, tensor<V,M,L>& oldW, tensor<V,M,L>& sW, tensor<V,M,L>& learnrates, const float& momentum, const float& grad_avg, const float& step_adapt, const float& delta, const float& lr_max, const float& lr_min){
cuvAssert(equal_shape(W,dW));
cuvAssert(equal_shape(W,oldW));
cuvAssert(equal_shape(W,sW));
cuvAssert(equal_shape(W,learnrates));
impl::na_rmsprop(W,dW,oldW,sW,learnrates,momentum,grad_avg,step_adapt,delta,lr_max,lr_min);
}
#define TENSOR(V,M,L) cuv::tensor<V,M,L>
#define INSTANTIATE(V,M,L) \
template void softmax_derivative(TENSOR(V,M,L)&, const TENSOR(V,M,L)&, const TENSOR(V,M,L)&,unsigned int,float);\
template void softmax(TENSOR(V,M,L)&, const TENSOR(V,M,L)&,unsigned int); \
template void adagrad(TENSOR(V,M,L)&, const TENSOR(V,M,L)&,TENSOR(V,M,L)&,const float&, const float&, const float&, const float&); \
template void rmsprop(TENSOR(V,M,L)&, const TENSOR(V,M,L)&,TENSOR(V,M,L)&,const float&, const float&, const float&, const float&, const float&); \
template void na_rmsprop(TENSOR(V,M,L)&,const TENSOR(V,M,L)&,TENSOR(V,M,L)&,TENSOR(V,M,L)&,TENSOR(V,M,L)&,const float&, const float&, const float&, const float&, const float&, const float&);
#define INSTANTIATE_MLL(V,V2,M,L) \
template std::pair<float, float> multinomial_logistic_loss(TENSOR(V,M,L)&, const TENSOR(V,M,L)&, const TENSOR(V2,M,L)&, int pattern_axis, boost::shared_ptr<allocator>);\
template void multinomial_logistic_loss_grad(TENSOR(V,M,L)&, const TENSOR(V,M,L)&, const TENSOR(V2,M,L)&, int pattern_axis, float, bool add);\
INSTANTIATE(float,host_memory_space,row_major);
INSTANTIATE(float,host_memory_space,column_major);
INSTANTIATE(float,dev_memory_space,row_major);
INSTANTIATE_MLL(float,float,dev_memory_space,row_major);
INSTANTIATE_MLL(float,unsigned int,dev_memory_space,row_major);
} } }
|
d6b7b9508acc3ce5c40e0b47c1185d1b48d92ff0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/***************************************************************************************************
* Copyright (c) 2017-2021, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification, are permitted
* provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright notice, this list of
* conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright notice, this list of
* conditions and the following disclaimer in the documentation and/or other materials
* provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
* FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*
This example demonstrates how to call a CUTLASS GEMM kernel and provides a naive reference
matrix multiply kernel to verify its correctness.
The CUTLASS Gemm template is instantiated in the function CutlassSgemmNN. This is kernel computes
the general matrix product (GEMM) using single-precision floating-point arithmetic and assumes
all matrices have column-major layout.
The threadblock tile size is chosen as 128x128x8 which offers good performance for large matrices.
See the CUTLASS Parallel for All blog post for more exposition on the tunable parameters available
in CUTLASS.
https://devblogs.nvidia.com/cutlass-linear-algebra-cuda/
Aside from defining and launching the SGEMM kernel, this example does not use any other components
or utilities within CUTLASS. Such utilities are demonstrated elsewhere in other examples and are
prevalent in the CUTLASS unit tests.
This example has delibrately been kept similar to the basic_gemm example from cutass-1.3 to
highlight the minimum amount of differences needed to transition to cutlass-2.0.
Cutlass-1.3 sgemm: https://github.com/NVIDIA/cutlass/blob/master/examples/00_basic_gemm/basic_gemm.cu
*/
// Standard Library includes
#include <iostream>
#include <sstream>
#include <vector>
// Helper methods to check for errors
#include "helper.h"
//
// CUTLASS includes needed for single-precision GEMM kernel
//
// Defines cutlass::gemm::device::Gemm, the generic Gemm computation template class.
#include "cutlass/gemm/device/gemm.h"
///////////////////////////////////////////////////////////////////////////////////////////////////
//
// This function defines a CUTLASS GEMM kernel instantiation, constructs its parameters object,
// and launches it on the CUDA device.
//
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Define a CUTLASS GEMM template and launch a GEMM kernel.
hipError_t CutlassSgemmNN(
int M,
int N,
int K,
float alpha,
float const *A,
int lda,
float const *B,
int ldb,
float beta,
float *C,
int ldc) {
// Define type definition for single-precision CUTLASS GEMM with column-major
// input matrices and 128x128x8 threadblock tile size (chosen by default).
//
// To keep the interface manageable, several helpers are defined for plausible compositions
// including the following example for single-precision GEMM. Typical values are used as
// default template arguments. See `cutlass/gemm/device/default_gemm_configuration.h` for more details.
//
// To view the full gemm device API interface, see `cutlass/gemm/device/gemm.h`
using ColumnMajor = cutlass::layout::ColumnMajor;
using CutlassGemm = cutlass::gemm::device::Gemm<float, // Data-type of A matrix
ColumnMajor, // Layout of A matrix
float, // Data-type of B matrix
ColumnMajor, // Layout of B matrix
float, // Data-type of C matrix
ColumnMajor>; // Layout of C matrix
// Define a CUTLASS GEMM type
CutlassGemm gemm_operator;
// Construct the CUTLASS GEMM arguments object.
//
// One of CUTLASS's design patterns is to define gemm argument objects that are constructible
// in host code and passed to kernels by value. These may include pointers, strides, scalars,
// and other arguments needed by Gemm and its components.
//
// The benefits of this pattern are (1.) a structured, composable strategy for passing host-constructible
// arguments to kernels and (2.) minimized initialization overhead on kernel entry.
//
CutlassGemm::Arguments args({M , N, K}, // Gemm Problem dimensions
{A, lda}, // Tensor-ref for source matrix A
{B, ldb}, // Tensor-ref for source matrix B
{C, ldc}, // Tensor-ref for source matrix C
{C, ldc}, // Tensor-ref for destination matrix D (may be different memory than source C matrix)
{alpha, beta}); // Scalars used in the Epilogue
//
// Launch the CUTLASS GEMM kernel.
//
cutlass::Status status = gemm_operator(args);
//
// Return a hipError_t if the CUTLASS GEMM operator returned an error code.
//
if (status != cutlass::Status::kSuccess) {
return hipErrorUnknown;
}
// Return success, if no errors were encountered.
return hipSuccess;
}
///////////////////////////////////////////////////////////////////////////////////////////////////
//
// The source code after this point in the file is generic CUDA using the CUDA Runtime API
// and simple CUDA kernels to initialize matrices and compute the general matrix product.
//
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Kernel to initialize a matrix with small integers.
__global__ void InitializeMatrix_kernel(
float *matrix,
int rows,
int columns,
int seed = 0) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
if (i < rows && j < columns) {
int offset = i + j * rows;
// Generate arbitrary elements.
int const k = 16807;
int const m = 16;
float value = float(((offset + seed) * k % m) - m / 2);
matrix[offset] = value;
}
}
/// Simple function to initialize a matrix to arbitrary small integers.
hipError_t InitializeMatrix(float *matrix, int rows, int columns, int seed = 0) {
dim3 block(16, 16);
dim3 grid(
(rows + block.x - 1) / block.x,
(columns + block.y - 1) / block.y
);
hipLaunchKernelGGL(( InitializeMatrix_kernel), dim3(grid), dim3(block) , 0, 0, matrix, rows, columns, seed);
return hipGetLastError();
}
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Allocates device memory for a matrix then fills with arbitrary small integers.
hipError_t AllocateMatrix(float **matrix, int rows, int columns, int seed = 0) {
hipError_t result;
size_t sizeof_matrix = sizeof(float) * rows * columns;
// Allocate device memory.
result = hipMalloc(reinterpret_cast<void **>(matrix), sizeof_matrix);
if (result != hipSuccess) {
std::cerr << "Failed to allocate matrix: "
<< hipGetErrorString(result) << std::endl;
return result;
}
// Clear the allocation.
result = hipMemset(*matrix, 0, sizeof_matrix);
if (result != hipSuccess) {
std::cerr << "Failed to clear matrix device memory: "
<< hipGetErrorString(result) << std::endl;
return result;
}
// Initialize matrix elements to arbitrary small integers.
result = InitializeMatrix(*matrix, rows, columns, seed);
if (result != hipSuccess) {
std::cerr << "Failed to initialize matrix: "
<< hipGetErrorString(result) << std::endl;
return result;
}
return result;
}
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Naive reference GEMM computation.
__global__ void ReferenceGemm_kernel(
int M,
int N,
int K,
float alpha,
float const *A,
int lda,
float const *B,
int ldb,
float beta,
float *C,
int ldc) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
if (i < M && j < N) {
float accumulator = 0;
for (int k = 0; k < K; ++k) {
accumulator += A[i + k * lda] * B[k + j * ldb];
}
C[i + j * ldc] = alpha * accumulator + beta * C[i + j * ldc];
}
}
/// Reference GEMM computation.
hipError_t ReferenceGemm(
int M,
int N,
int K,
float alpha,
float const *A,
int lda,
float const *B,
int ldb,
float beta,
float *C,
int ldc) {
dim3 block(16, 16);
dim3 grid(
(M + block.x - 1) / block.x,
(N + block.y - 1) / block.y
);
hipLaunchKernelGGL(( ReferenceGemm_kernel), dim3(grid), dim3(block) , 0, 0, M, N, K, alpha, A, lda, B, ldb, beta, C, ldc);
return hipGetLastError();
}
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Allocate several matrices in GPU device memory and call a single-precision
/// CUTLASS GEMM kernel.
hipError_t TestCutlassGemm(int M, int N, int K, float alpha, float beta) {
hipError_t result;
//
// Define several matrices to be used as operands to GEMM kernels.
//
// Compute leading dimensions for each matrix.
int lda = M;
int ldb = K;
int ldc = M;
// Compute size in bytes of the C matrix.
size_t sizeof_C = sizeof(float) * ldc * N;
// Define pointers to matrices in GPU device memory.
float *A;
float *B;
float *C_cutlass;
float *C_reference;
//
// Allocate matrices in GPU device memory with arbitrary seeds.
//
result = AllocateMatrix(&A, M, K, 0);
if (result != hipSuccess) {
return result;
}
result = AllocateMatrix(&B, K, N, 17);
if (result != hipSuccess) {
hipFree(A);
return result;
}
result = AllocateMatrix(&C_cutlass, M, N, 101);
if (result != hipSuccess) {
hipFree(A);
hipFree(B);
return result;
}
result = AllocateMatrix(&C_reference, M, N, 101);
if (result != hipSuccess) {
hipFree(A);
hipFree(B);
hipFree(C_cutlass);
return result;
}
result = hipMemcpy(C_reference, C_cutlass, sizeof_C, hipMemcpyDeviceToDevice);
if (result != hipSuccess) {
std::cerr << "Failed to copy C_cutlass matrix to C_reference: "
<< hipGetErrorString(result) << std::endl;
hipFree(C_reference);
hipFree(C_cutlass);
hipFree(B);
hipFree(A);
return result;
}
//
// Launch CUTLASS GEMM.
//
result = CutlassSgemmNN(M, N, K, alpha, A, lda, B, ldb, beta, C_cutlass, ldc);
if (result != hipSuccess) {
std::cerr << "CUTLASS GEMM kernel failed: "
<< hipGetErrorString(result) << std::endl;
hipFree(C_reference);
hipFree(C_cutlass);
hipFree(B);
hipFree(A);
return result;
}
//
// Verify.
//
// Launch reference GEMM
result = ReferenceGemm(M, N, K, alpha, A, lda, B, ldb, beta, C_reference, ldc);
if (result != hipSuccess) {
std::cerr << "Reference GEMM kernel failed: "
<< hipGetErrorString(result) << std::endl;
hipFree(C_reference);
hipFree(C_cutlass);
hipFree(B);
hipFree(A);
return result;
}
// Copy to host and verify equivalence.
std::vector<float> host_cutlass(ldc * N, 0);
std::vector<float> host_reference(ldc * N, 0);
result = hipMemcpy(host_cutlass.data(), C_cutlass, sizeof_C, hipMemcpyDeviceToHost);
if (result != hipSuccess) {
std::cerr << "Failed to copy CUTLASS GEMM results: "
<< hipGetErrorString(result) << std::endl;
hipFree(C_reference);
hipFree(C_cutlass);
hipFree(B);
hipFree(A);
return result;
}
result = hipMemcpy(host_reference.data(), C_reference, sizeof_C, hipMemcpyDeviceToHost);
if (result != hipSuccess) {
std::cerr << "Failed to copy Reference GEMM results: "
<< hipGetErrorString(result) << std::endl;
hipFree(C_reference);
hipFree(C_cutlass);
hipFree(B);
hipFree(A);
return result;
}
//
// Free device memory allocations.
//
hipFree(C_reference);
hipFree(C_cutlass);
hipFree(B);
hipFree(A);
//
// Test for bit equivalence of results.
//
if (host_cutlass != host_reference) {
std::cerr << "CUTLASS results incorrect." << std::endl;
return hipErrorUnknown;
}
return hipSuccess;
}
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Entry point to basic_gemm example.
//
// usage:
//
// 00_basic_gemm <M> <N> <K> <alpha> <beta>
//
int main(int argc, const char *arg[]) {
//
// Parse the command line to obtain GEMM dimensions and scalar values.
//
// GEMM problem dimensions.
int problem[3] = { 128, 128, 128 };
for (int i = 1; i < argc && i < 4; ++i) {
std::stringstream ss(arg[i]);
ss >> problem[i - 1];
}
// Scalars used for linear scaling the result of the matrix product.
float scalars[2] = { 1, 0 };
for (int i = 4; i < argc && i < 6; ++i) {
std::stringstream ss(arg[i]);
ss >> scalars[i - 4];
}
//
// Run the CUTLASS GEMM test.
//
hipError_t result = TestCutlassGemm(
problem[0], // GEMM M dimension
problem[1], // GEMM N dimension
problem[2], // GEMM K dimension
scalars[0], // alpha
scalars[1] // beta
);
if (result == hipSuccess) {
std::cout << "Passed." << std::endl;
}
// Exit.
return result == hipSuccess ? 0 : -1;
}
///////////////////////////////////////////////////////////////////////////////////////////////////
| d6b7b9508acc3ce5c40e0b47c1185d1b48d92ff0.cu | /***************************************************************************************************
* Copyright (c) 2017-2021, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification, are permitted
* provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright notice, this list of
* conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright notice, this list of
* conditions and the following disclaimer in the documentation and/or other materials
* provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
* FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*
This example demonstrates how to call a CUTLASS GEMM kernel and provides a naive reference
matrix multiply kernel to verify its correctness.
The CUTLASS Gemm template is instantiated in the function CutlassSgemmNN. This is kernel computes
the general matrix product (GEMM) using single-precision floating-point arithmetic and assumes
all matrices have column-major layout.
The threadblock tile size is chosen as 128x128x8 which offers good performance for large matrices.
See the CUTLASS Parallel for All blog post for more exposition on the tunable parameters available
in CUTLASS.
https://devblogs.nvidia.com/cutlass-linear-algebra-cuda/
Aside from defining and launching the SGEMM kernel, this example does not use any other components
or utilities within CUTLASS. Such utilities are demonstrated elsewhere in other examples and are
prevalent in the CUTLASS unit tests.
This example has delibrately been kept similar to the basic_gemm example from cutass-1.3 to
highlight the minimum amount of differences needed to transition to cutlass-2.0.
Cutlass-1.3 sgemm: https://github.com/NVIDIA/cutlass/blob/master/examples/00_basic_gemm/basic_gemm.cu
*/
// Standard Library includes
#include <iostream>
#include <sstream>
#include <vector>
// Helper methods to check for errors
#include "helper.h"
//
// CUTLASS includes needed for single-precision GEMM kernel
//
// Defines cutlass::gemm::device::Gemm, the generic Gemm computation template class.
#include "cutlass/gemm/device/gemm.h"
///////////////////////////////////////////////////////////////////////////////////////////////////
//
// This function defines a CUTLASS GEMM kernel instantiation, constructs its parameters object,
// and launches it on the CUDA device.
//
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Define a CUTLASS GEMM template and launch a GEMM kernel.
cudaError_t CutlassSgemmNN(
int M,
int N,
int K,
float alpha,
float const *A,
int lda,
float const *B,
int ldb,
float beta,
float *C,
int ldc) {
// Define type definition for single-precision CUTLASS GEMM with column-major
// input matrices and 128x128x8 threadblock tile size (chosen by default).
//
// To keep the interface manageable, several helpers are defined for plausible compositions
// including the following example for single-precision GEMM. Typical values are used as
// default template arguments. See `cutlass/gemm/device/default_gemm_configuration.h` for more details.
//
// To view the full gemm device API interface, see `cutlass/gemm/device/gemm.h`
using ColumnMajor = cutlass::layout::ColumnMajor;
using CutlassGemm = cutlass::gemm::device::Gemm<float, // Data-type of A matrix
ColumnMajor, // Layout of A matrix
float, // Data-type of B matrix
ColumnMajor, // Layout of B matrix
float, // Data-type of C matrix
ColumnMajor>; // Layout of C matrix
// Define a CUTLASS GEMM type
CutlassGemm gemm_operator;
// Construct the CUTLASS GEMM arguments object.
//
// One of CUTLASS's design patterns is to define gemm argument objects that are constructible
// in host code and passed to kernels by value. These may include pointers, strides, scalars,
// and other arguments needed by Gemm and its components.
//
// The benefits of this pattern are (1.) a structured, composable strategy for passing host-constructible
// arguments to kernels and (2.) minimized initialization overhead on kernel entry.
//
CutlassGemm::Arguments args({M , N, K}, // Gemm Problem dimensions
{A, lda}, // Tensor-ref for source matrix A
{B, ldb}, // Tensor-ref for source matrix B
{C, ldc}, // Tensor-ref for source matrix C
{C, ldc}, // Tensor-ref for destination matrix D (may be different memory than source C matrix)
{alpha, beta}); // Scalars used in the Epilogue
//
// Launch the CUTLASS GEMM kernel.
//
cutlass::Status status = gemm_operator(args);
//
// Return a cudaError_t if the CUTLASS GEMM operator returned an error code.
//
if (status != cutlass::Status::kSuccess) {
return cudaErrorUnknown;
}
// Return success, if no errors were encountered.
return cudaSuccess;
}
///////////////////////////////////////////////////////////////////////////////////////////////////
//
// The source code after this point in the file is generic CUDA using the CUDA Runtime API
// and simple CUDA kernels to initialize matrices and compute the general matrix product.
//
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Kernel to initialize a matrix with small integers.
__global__ void InitializeMatrix_kernel(
float *matrix,
int rows,
int columns,
int seed = 0) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
if (i < rows && j < columns) {
int offset = i + j * rows;
// Generate arbitrary elements.
int const k = 16807;
int const m = 16;
float value = float(((offset + seed) * k % m) - m / 2);
matrix[offset] = value;
}
}
/// Simple function to initialize a matrix to arbitrary small integers.
cudaError_t InitializeMatrix(float *matrix, int rows, int columns, int seed = 0) {
dim3 block(16, 16);
dim3 grid(
(rows + block.x - 1) / block.x,
(columns + block.y - 1) / block.y
);
InitializeMatrix_kernel<<< grid, block >>>(matrix, rows, columns, seed);
return cudaGetLastError();
}
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Allocates device memory for a matrix then fills with arbitrary small integers.
cudaError_t AllocateMatrix(float **matrix, int rows, int columns, int seed = 0) {
cudaError_t result;
size_t sizeof_matrix = sizeof(float) * rows * columns;
// Allocate device memory.
result = cudaMalloc(reinterpret_cast<void **>(matrix), sizeof_matrix);
if (result != cudaSuccess) {
std::cerr << "Failed to allocate matrix: "
<< cudaGetErrorString(result) << std::endl;
return result;
}
// Clear the allocation.
result = cudaMemset(*matrix, 0, sizeof_matrix);
if (result != cudaSuccess) {
std::cerr << "Failed to clear matrix device memory: "
<< cudaGetErrorString(result) << std::endl;
return result;
}
// Initialize matrix elements to arbitrary small integers.
result = InitializeMatrix(*matrix, rows, columns, seed);
if (result != cudaSuccess) {
std::cerr << "Failed to initialize matrix: "
<< cudaGetErrorString(result) << std::endl;
return result;
}
return result;
}
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Naive reference GEMM computation.
__global__ void ReferenceGemm_kernel(
int M,
int N,
int K,
float alpha,
float const *A,
int lda,
float const *B,
int ldb,
float beta,
float *C,
int ldc) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
if (i < M && j < N) {
float accumulator = 0;
for (int k = 0; k < K; ++k) {
accumulator += A[i + k * lda] * B[k + j * ldb];
}
C[i + j * ldc] = alpha * accumulator + beta * C[i + j * ldc];
}
}
/// Reference GEMM computation.
cudaError_t ReferenceGemm(
int M,
int N,
int K,
float alpha,
float const *A,
int lda,
float const *B,
int ldb,
float beta,
float *C,
int ldc) {
dim3 block(16, 16);
dim3 grid(
(M + block.x - 1) / block.x,
(N + block.y - 1) / block.y
);
ReferenceGemm_kernel<<< grid, block >>>(M, N, K, alpha, A, lda, B, ldb, beta, C, ldc);
return cudaGetLastError();
}
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Allocate several matrices in GPU device memory and call a single-precision
/// CUTLASS GEMM kernel.
cudaError_t TestCutlassGemm(int M, int N, int K, float alpha, float beta) {
cudaError_t result;
//
// Define several matrices to be used as operands to GEMM kernels.
//
// Compute leading dimensions for each matrix.
int lda = M;
int ldb = K;
int ldc = M;
// Compute size in bytes of the C matrix.
size_t sizeof_C = sizeof(float) * ldc * N;
// Define pointers to matrices in GPU device memory.
float *A;
float *B;
float *C_cutlass;
float *C_reference;
//
// Allocate matrices in GPU device memory with arbitrary seeds.
//
result = AllocateMatrix(&A, M, K, 0);
if (result != cudaSuccess) {
return result;
}
result = AllocateMatrix(&B, K, N, 17);
if (result != cudaSuccess) {
cudaFree(A);
return result;
}
result = AllocateMatrix(&C_cutlass, M, N, 101);
if (result != cudaSuccess) {
cudaFree(A);
cudaFree(B);
return result;
}
result = AllocateMatrix(&C_reference, M, N, 101);
if (result != cudaSuccess) {
cudaFree(A);
cudaFree(B);
cudaFree(C_cutlass);
return result;
}
result = cudaMemcpy(C_reference, C_cutlass, sizeof_C, cudaMemcpyDeviceToDevice);
if (result != cudaSuccess) {
std::cerr << "Failed to copy C_cutlass matrix to C_reference: "
<< cudaGetErrorString(result) << std::endl;
cudaFree(C_reference);
cudaFree(C_cutlass);
cudaFree(B);
cudaFree(A);
return result;
}
//
// Launch CUTLASS GEMM.
//
result = CutlassSgemmNN(M, N, K, alpha, A, lda, B, ldb, beta, C_cutlass, ldc);
if (result != cudaSuccess) {
std::cerr << "CUTLASS GEMM kernel failed: "
<< cudaGetErrorString(result) << std::endl;
cudaFree(C_reference);
cudaFree(C_cutlass);
cudaFree(B);
cudaFree(A);
return result;
}
//
// Verify.
//
// Launch reference GEMM
result = ReferenceGemm(M, N, K, alpha, A, lda, B, ldb, beta, C_reference, ldc);
if (result != cudaSuccess) {
std::cerr << "Reference GEMM kernel failed: "
<< cudaGetErrorString(result) << std::endl;
cudaFree(C_reference);
cudaFree(C_cutlass);
cudaFree(B);
cudaFree(A);
return result;
}
// Copy to host and verify equivalence.
std::vector<float> host_cutlass(ldc * N, 0);
std::vector<float> host_reference(ldc * N, 0);
result = cudaMemcpy(host_cutlass.data(), C_cutlass, sizeof_C, cudaMemcpyDeviceToHost);
if (result != cudaSuccess) {
std::cerr << "Failed to copy CUTLASS GEMM results: "
<< cudaGetErrorString(result) << std::endl;
cudaFree(C_reference);
cudaFree(C_cutlass);
cudaFree(B);
cudaFree(A);
return result;
}
result = cudaMemcpy(host_reference.data(), C_reference, sizeof_C, cudaMemcpyDeviceToHost);
if (result != cudaSuccess) {
std::cerr << "Failed to copy Reference GEMM results: "
<< cudaGetErrorString(result) << std::endl;
cudaFree(C_reference);
cudaFree(C_cutlass);
cudaFree(B);
cudaFree(A);
return result;
}
//
// Free device memory allocations.
//
cudaFree(C_reference);
cudaFree(C_cutlass);
cudaFree(B);
cudaFree(A);
//
// Test for bit equivalence of results.
//
if (host_cutlass != host_reference) {
std::cerr << "CUTLASS results incorrect." << std::endl;
return cudaErrorUnknown;
}
return cudaSuccess;
}
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Entry point to basic_gemm example.
//
// usage:
//
// 00_basic_gemm <M> <N> <K> <alpha> <beta>
//
int main(int argc, const char *arg[]) {
//
// Parse the command line to obtain GEMM dimensions and scalar values.
//
// GEMM problem dimensions.
int problem[3] = { 128, 128, 128 };
for (int i = 1; i < argc && i < 4; ++i) {
std::stringstream ss(arg[i]);
ss >> problem[i - 1];
}
// Scalars used for linear scaling the result of the matrix product.
float scalars[2] = { 1, 0 };
for (int i = 4; i < argc && i < 6; ++i) {
std::stringstream ss(arg[i]);
ss >> scalars[i - 4];
}
//
// Run the CUTLASS GEMM test.
//
cudaError_t result = TestCutlassGemm(
problem[0], // GEMM M dimension
problem[1], // GEMM N dimension
problem[2], // GEMM K dimension
scalars[0], // alpha
scalars[1] // beta
);
if (result == cudaSuccess) {
std::cout << "Passed." << std::endl;
}
// Exit.
return result == cudaSuccess ? 0 : -1;
}
///////////////////////////////////////////////////////////////////////////////////////////////////
|
7f547fd39a2f7189f2fe225c2d631d98cb8b228b.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
/* Includes, cuda */
#include <hip/hip_runtime.h>
#include <rocblas.h>
#include <iostream>
const int n = 6;
int main()
{
hipblasStatus_t stat;
hipblasHandle_t handle;
int j;
float *x;
x = new float[n];
for (j = 0; j < n; j++)
{
x[j] = float(j);
}
float *d_x;
hipMalloc((void**)&d_x, n*sizeof(float));
stat = hipblasCreate(&handle);
stat = hipblasSetVector(n, sizeof(float), x, 1, d_x, 1);
int result;
stat = hipblasIsamax(handle, n, d_x, 1, &result);
std::cout << "The Largest Value is : " << x[result-1] << std::endl;
hipFree(d_x);
hipblasDestroy(handle);
delete[] x;
char temp;
std::cin >> temp;
return 0;
}
| 7f547fd39a2f7189f2fe225c2d631d98cb8b228b.cu |
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
/* Includes, cuda */
#include <cuda_runtime.h>
#include <cublas_v2.h>
#include <iostream>
const int n = 6;
int main()
{
cublasStatus_t stat;
cublasHandle_t handle;
int j;
float *x;
x = new float[n];
for (j = 0; j < n; j++)
{
x[j] = float(j);
}
float *d_x;
cudaMalloc((void**)&d_x, n*sizeof(float));
stat = cublasCreate(&handle);
stat = cublasSetVector(n, sizeof(float), x, 1, d_x, 1);
int result;
stat = cublasIsamax(handle, n, d_x, 1, &result);
std::cout << "The Largest Value is : " << x[result-1] << std::endl;
cudaFree(d_x);
cublasDestroy(handle);
delete[] x;
char temp;
std::cin >> temp;
return 0;
}
|
4e7f95c0b3239dfb14c84bf547a626d1c0095677.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "query_ball_point_gpu.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int b = 2;
int n = XSIZE*YSIZE;
int m = 2;
const float *radius = NULL;
hipMalloc(&radius, XSIZE*YSIZE);
int nsample = 1;
const float *xyz1 = NULL;
hipMalloc(&xyz1, XSIZE*YSIZE);
const float *xyz2 = NULL;
hipMalloc(&xyz2, XSIZE*YSIZE);
int *idx = NULL;
hipMalloc(&idx, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
query_ball_point_gpu), dim3(gridBlock),dim3(threadBlock), 0, 0, b,n,m,radius,nsample,xyz1,xyz2,idx);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
query_ball_point_gpu), dim3(gridBlock),dim3(threadBlock), 0, 0, b,n,m,radius,nsample,xyz1,xyz2,idx);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
query_ball_point_gpu), dim3(gridBlock),dim3(threadBlock), 0, 0, b,n,m,radius,nsample,xyz1,xyz2,idx);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 4e7f95c0b3239dfb14c84bf547a626d1c0095677.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "query_ball_point_gpu.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int b = 2;
int n = XSIZE*YSIZE;
int m = 2;
const float *radius = NULL;
cudaMalloc(&radius, XSIZE*YSIZE);
int nsample = 1;
const float *xyz1 = NULL;
cudaMalloc(&xyz1, XSIZE*YSIZE);
const float *xyz2 = NULL;
cudaMalloc(&xyz2, XSIZE*YSIZE);
int *idx = NULL;
cudaMalloc(&idx, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
query_ball_point_gpu<<<gridBlock,threadBlock>>>(b,n,m,radius,nsample,xyz1,xyz2,idx);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
query_ball_point_gpu<<<gridBlock,threadBlock>>>(b,n,m,radius,nsample,xyz1,xyz2,idx);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
query_ball_point_gpu<<<gridBlock,threadBlock>>>(b,n,m,radius,nsample,xyz1,xyz2,idx);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
cf4ef0ce0a9858ddd7e7f4d0be15a286916060aa.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "matmul.cuh"
#include <iostream>
using namespace std;
__global__ void matmul_kernel(const float *A, const float *B, float *C,
size_t n) {
size_t i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < n * n) {
size_t row = i / n;
size_t col = i % n;
C[i]=0;
for (size_t r = 0; r < n; r++) {
C[i] += A[row * n + r] * B[r * n + col];
}
}
}
void matmul(const float *A, const float *B, float *C, size_t n,
unsigned int threads_per_block) {
hipLaunchKernelGGL(( matmul_kernel), dim3(((n * n) + threads_per_block - 1) / threads_per_block),
dim3(threads_per_block), 0, 0, A, B, C, n);
hipDeviceSynchronize();
}
| cf4ef0ce0a9858ddd7e7f4d0be15a286916060aa.cu | #include "matmul.cuh"
#include <iostream>
using namespace std;
__global__ void matmul_kernel(const float *A, const float *B, float *C,
size_t n) {
size_t i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < n * n) {
size_t row = i / n;
size_t col = i % n;
C[i]=0;
for (size_t r = 0; r < n; r++) {
C[i] += A[row * n + r] * B[r * n + col];
}
}
}
void matmul(const float *A, const float *B, float *C, size_t n,
unsigned int threads_per_block) {
matmul_kernel<<<((n * n) + threads_per_block - 1) / threads_per_block,
threads_per_block>>>(A, B, C, n);
cudaDeviceSynchronize();
}
|
66390a58631e4346d75d16242ca08b835c66017a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Modified from
// https://github.com/sshaoshuai/Pointnet2.PyTorch/tree/master/pointnet2/src/sampling_gpu.cu
#include <stdio.h>
#include <stdlib.h>
#define TOTAL_THREADS 1024
#define THREADS_PER_BLOCK 256
#define DIVUP(m, n) ((m) / (n) + ((m) % (n) > 0))
inline int opt_n_threads(int work_size) {
const int pow_2 = ::log(static_cast<double>(work_size)) / ::log(2.0);
return max(min(1 << pow_2, TOTAL_THREADS), 1);
}
__device__ void __update(float *__restrict__ dists, int *__restrict__ dists_i,
int idx1, int idx2) {
const float v1 = dists[idx1], v2 = dists[idx2];
const int i1 = dists_i[idx1], i2 = dists_i[idx2];
dists[idx1] = max(v1, v2);
dists_i[idx1] = v2 > v1 ? i2 : i1;
}
template <unsigned int block_size>
__global__ void furthest_point_sampling_kernel(
int b, int n, int m, const float *__restrict__ dataset,
float *__restrict__ temp, int *__restrict__ idxs) {
// dataset: (B, N, 3)
// tmp: (B, N)
// output:
// idx: (B, M)
if (m <= 0) return;
__shared__ float dists[block_size];
__shared__ int dists_i[block_size];
int batch_index = blockIdx.x;
dataset += batch_index * n * 3;
temp += batch_index * n;
idxs += batch_index * m;
int tid = threadIdx.x;
const int stride = block_size;
int old = 0;
if (threadIdx.x == 0) idxs[0] = old;
__syncthreads();
for (int j = 1; j < m; j++) {
int besti = 0;
float best = -1;
float x1 = dataset[old * 3 + 0];
float y1 = dataset[old * 3 + 1];
float z1 = dataset[old * 3 + 2];
for (int k = tid; k < n; k += stride) {
float x2, y2, z2;
x2 = dataset[k * 3 + 0];
y2 = dataset[k * 3 + 1];
z2 = dataset[k * 3 + 2];
// float mag = (x2 * x2) + (y2 * y2) + (z2 * z2);
// if (mag <= 1e-3)
// continue;
float d =
(x2 - x1) * (x2 - x1) + (y2 - y1) * (y2 - y1) + (z2 - z1) * (z2 - z1);
float d2 = min(d, temp[k]);
temp[k] = d2;
besti = d2 > best ? k : besti;
best = d2 > best ? d2 : best;
}
dists[tid] = best;
dists_i[tid] = besti;
__syncthreads();
if (block_size >= 1024) {
if (tid < 512) {
__update(dists, dists_i, tid, tid + 512);
}
__syncthreads();
}
if (block_size >= 512) {
if (tid < 256) {
__update(dists, dists_i, tid, tid + 256);
}
__syncthreads();
}
if (block_size >= 256) {
if (tid < 128) {
__update(dists, dists_i, tid, tid + 128);
}
__syncthreads();
}
if (block_size >= 128) {
if (tid < 64) {
__update(dists, dists_i, tid, tid + 64);
}
__syncthreads();
}
if (block_size >= 64) {
if (tid < 32) {
__update(dists, dists_i, tid, tid + 32);
}
__syncthreads();
}
if (block_size >= 32) {
if (tid < 16) {
__update(dists, dists_i, tid, tid + 16);
}
__syncthreads();
}
if (block_size >= 16) {
if (tid < 8) {
__update(dists, dists_i, tid, tid + 8);
}
__syncthreads();
}
if (block_size >= 8) {
if (tid < 4) {
__update(dists, dists_i, tid, tid + 4);
}
__syncthreads();
}
if (block_size >= 4) {
if (tid < 2) {
__update(dists, dists_i, tid, tid + 2);
}
__syncthreads();
}
if (block_size >= 2) {
if (tid < 1) {
__update(dists, dists_i, tid, tid + 1);
}
__syncthreads();
}
old = dists_i[0];
if (tid == 0) idxs[j] = old;
}
}
void furthest_point_sampling_kernel_launcher(int b, int n, int m,
const float *dataset, float *temp,
int *idxs, hipStream_t stream) {
// dataset: (B, N, 3)
// tmp: (B, N)
// output:
// idx: (B, M)
hipError_t err;
unsigned int n_threads = opt_n_threads(n);
switch (n_threads) {
case 1024:
hipLaunchKernelGGL(( furthest_point_sampling_kernel<1024>)
, dim3(b), dim3(n_threads), 0, stream, b, n, m, dataset, temp, idxs);
break;
case 512:
hipLaunchKernelGGL(( furthest_point_sampling_kernel<512>)
, dim3(b), dim3(n_threads), 0, stream, b, n, m, dataset, temp, idxs);
break;
case 256:
hipLaunchKernelGGL(( furthest_point_sampling_kernel<256>)
, dim3(b), dim3(n_threads), 0, stream, b, n, m, dataset, temp, idxs);
break;
case 128:
hipLaunchKernelGGL(( furthest_point_sampling_kernel<128>)
, dim3(b), dim3(n_threads), 0, stream, b, n, m, dataset, temp, idxs);
break;
case 64:
hipLaunchKernelGGL(( furthest_point_sampling_kernel<64>)
, dim3(b), dim3(n_threads), 0, stream, b, n, m, dataset, temp, idxs);
break;
case 32:
hipLaunchKernelGGL(( furthest_point_sampling_kernel<32>)
, dim3(b), dim3(n_threads), 0, stream, b, n, m, dataset, temp, idxs);
break;
case 16:
hipLaunchKernelGGL(( furthest_point_sampling_kernel<16>)
, dim3(b), dim3(n_threads), 0, stream, b, n, m, dataset, temp, idxs);
break;
case 8:
hipLaunchKernelGGL(( furthest_point_sampling_kernel<8>)
, dim3(b), dim3(n_threads), 0, stream, b, n, m, dataset, temp, idxs);
break;
case 4:
hipLaunchKernelGGL(( furthest_point_sampling_kernel<4>)
, dim3(b), dim3(n_threads), 0, stream, b, n, m, dataset, temp, idxs);
break;
case 2:
hipLaunchKernelGGL(( furthest_point_sampling_kernel<2>)
, dim3(b), dim3(n_threads), 0, stream, b, n, m, dataset, temp, idxs);
break;
case 1:
hipLaunchKernelGGL(( furthest_point_sampling_kernel<1>)
, dim3(b), dim3(n_threads), 0, stream, b, n, m, dataset, temp, idxs);
break;
default:
hipLaunchKernelGGL(( furthest_point_sampling_kernel<512>)
, dim3(b), dim3(n_threads), 0, stream, b, n, m, dataset, temp, idxs);
}
err = hipGetLastError();
if (hipSuccess != err) {
fprintf(stderr, "CUDA kernel failed : %s\n", hipGetErrorString(err));
exit(-1);
}
}
| 66390a58631e4346d75d16242ca08b835c66017a.cu | // Modified from
// https://github.com/sshaoshuai/Pointnet2.PyTorch/tree/master/pointnet2/src/sampling_gpu.cu
#include <stdio.h>
#include <stdlib.h>
#define TOTAL_THREADS 1024
#define THREADS_PER_BLOCK 256
#define DIVUP(m, n) ((m) / (n) + ((m) % (n) > 0))
inline int opt_n_threads(int work_size) {
const int pow_2 = std::log(static_cast<double>(work_size)) / std::log(2.0);
return max(min(1 << pow_2, TOTAL_THREADS), 1);
}
__device__ void __update(float *__restrict__ dists, int *__restrict__ dists_i,
int idx1, int idx2) {
const float v1 = dists[idx1], v2 = dists[idx2];
const int i1 = dists_i[idx1], i2 = dists_i[idx2];
dists[idx1] = max(v1, v2);
dists_i[idx1] = v2 > v1 ? i2 : i1;
}
template <unsigned int block_size>
__global__ void furthest_point_sampling_kernel(
int b, int n, int m, const float *__restrict__ dataset,
float *__restrict__ temp, int *__restrict__ idxs) {
// dataset: (B, N, 3)
// tmp: (B, N)
// output:
// idx: (B, M)
if (m <= 0) return;
__shared__ float dists[block_size];
__shared__ int dists_i[block_size];
int batch_index = blockIdx.x;
dataset += batch_index * n * 3;
temp += batch_index * n;
idxs += batch_index * m;
int tid = threadIdx.x;
const int stride = block_size;
int old = 0;
if (threadIdx.x == 0) idxs[0] = old;
__syncthreads();
for (int j = 1; j < m; j++) {
int besti = 0;
float best = -1;
float x1 = dataset[old * 3 + 0];
float y1 = dataset[old * 3 + 1];
float z1 = dataset[old * 3 + 2];
for (int k = tid; k < n; k += stride) {
float x2, y2, z2;
x2 = dataset[k * 3 + 0];
y2 = dataset[k * 3 + 1];
z2 = dataset[k * 3 + 2];
// float mag = (x2 * x2) + (y2 * y2) + (z2 * z2);
// if (mag <= 1e-3)
// continue;
float d =
(x2 - x1) * (x2 - x1) + (y2 - y1) * (y2 - y1) + (z2 - z1) * (z2 - z1);
float d2 = min(d, temp[k]);
temp[k] = d2;
besti = d2 > best ? k : besti;
best = d2 > best ? d2 : best;
}
dists[tid] = best;
dists_i[tid] = besti;
__syncthreads();
if (block_size >= 1024) {
if (tid < 512) {
__update(dists, dists_i, tid, tid + 512);
}
__syncthreads();
}
if (block_size >= 512) {
if (tid < 256) {
__update(dists, dists_i, tid, tid + 256);
}
__syncthreads();
}
if (block_size >= 256) {
if (tid < 128) {
__update(dists, dists_i, tid, tid + 128);
}
__syncthreads();
}
if (block_size >= 128) {
if (tid < 64) {
__update(dists, dists_i, tid, tid + 64);
}
__syncthreads();
}
if (block_size >= 64) {
if (tid < 32) {
__update(dists, dists_i, tid, tid + 32);
}
__syncthreads();
}
if (block_size >= 32) {
if (tid < 16) {
__update(dists, dists_i, tid, tid + 16);
}
__syncthreads();
}
if (block_size >= 16) {
if (tid < 8) {
__update(dists, dists_i, tid, tid + 8);
}
__syncthreads();
}
if (block_size >= 8) {
if (tid < 4) {
__update(dists, dists_i, tid, tid + 4);
}
__syncthreads();
}
if (block_size >= 4) {
if (tid < 2) {
__update(dists, dists_i, tid, tid + 2);
}
__syncthreads();
}
if (block_size >= 2) {
if (tid < 1) {
__update(dists, dists_i, tid, tid + 1);
}
__syncthreads();
}
old = dists_i[0];
if (tid == 0) idxs[j] = old;
}
}
void furthest_point_sampling_kernel_launcher(int b, int n, int m,
const float *dataset, float *temp,
int *idxs, cudaStream_t stream) {
// dataset: (B, N, 3)
// tmp: (B, N)
// output:
// idx: (B, M)
cudaError_t err;
unsigned int n_threads = opt_n_threads(n);
switch (n_threads) {
case 1024:
furthest_point_sampling_kernel<1024>
<<<b, n_threads, 0, stream>>>(b, n, m, dataset, temp, idxs);
break;
case 512:
furthest_point_sampling_kernel<512>
<<<b, n_threads, 0, stream>>>(b, n, m, dataset, temp, idxs);
break;
case 256:
furthest_point_sampling_kernel<256>
<<<b, n_threads, 0, stream>>>(b, n, m, dataset, temp, idxs);
break;
case 128:
furthest_point_sampling_kernel<128>
<<<b, n_threads, 0, stream>>>(b, n, m, dataset, temp, idxs);
break;
case 64:
furthest_point_sampling_kernel<64>
<<<b, n_threads, 0, stream>>>(b, n, m, dataset, temp, idxs);
break;
case 32:
furthest_point_sampling_kernel<32>
<<<b, n_threads, 0, stream>>>(b, n, m, dataset, temp, idxs);
break;
case 16:
furthest_point_sampling_kernel<16>
<<<b, n_threads, 0, stream>>>(b, n, m, dataset, temp, idxs);
break;
case 8:
furthest_point_sampling_kernel<8>
<<<b, n_threads, 0, stream>>>(b, n, m, dataset, temp, idxs);
break;
case 4:
furthest_point_sampling_kernel<4>
<<<b, n_threads, 0, stream>>>(b, n, m, dataset, temp, idxs);
break;
case 2:
furthest_point_sampling_kernel<2>
<<<b, n_threads, 0, stream>>>(b, n, m, dataset, temp, idxs);
break;
case 1:
furthest_point_sampling_kernel<1>
<<<b, n_threads, 0, stream>>>(b, n, m, dataset, temp, idxs);
break;
default:
furthest_point_sampling_kernel<512>
<<<b, n_threads, 0, stream>>>(b, n, m, dataset, temp, idxs);
}
err = cudaGetLastError();
if (cudaSuccess != err) {
fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err));
exit(-1);
}
}
|
262658e5e6cbc15241eb563c927bf92f1cfe5fd7.hip | // !!! This is a file automatically generated by hipify!!!
#include "RPS.h"
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <hip/hip_runtime.h>
#include <hiprand/hiprand.h>
#include <helper_cuda.h> // helper functions for CUDA error checking and initialization
#define CUDA_CALL(x) do { if((x)!=hipSuccess) { \
printf("Error at %s:%d\n",__FILE__,__LINE__);\
return EXIT_FAILURE;}} while(0)
#define CURAND_CALL(x) do { if((x)!=HIPRAND_STATUS_SUCCESS) { \
printf("Error at %s:%d\n",__FILE__,__LINE__);\
return EXIT_FAILURE;}} while(0)
#define PI 3.1415926536f
#define PI2_3 2.0943951023931953f
#define PI4_3 4.1887902047863905f
#define PI2 6.283185307179586f
#define PI_3 1.0471975511965976f
#define GET_SCORE(ARR,X,Y) ((ARR)[(X) + (Y)*width])
#define GET_PIXEL(ARR,X,Y) ((float *)((ARR) + (Y)*pitch) + 4 * (X))
__device__ float3 convert_one_pixel_to_rgb_f(float3 pixel) {
float r, g, b;
float h, s, v;
h = pixel.x;
s = pixel.y;
v = pixel.z;
float f = h / PI_3;
int hi = (int)floorf(f) % 6;
float temp;
f = modff(f,&temp);
float p = v * (1 - s);
float q = v * (1 - s * f);
float t = v * (1 - s * (1 - f));
switch (hi)
{
case 0:
r = v;
g = t;
b = p;
break;
case 1:
r = q;
g = v;
b = p;
break;
case 2:
r = p;
g = v;
b = t;
break;
case 3:
r = p;
g = q;
b = v;
break;
case 4:
r = t;
g = p;
b = v;
break;
default:
r = v;
g = p;
b = q;
break;
}
return float3 { r, g, b };
}
__global__ void kern1(unsigned char* surface, int width, int height, size_t pitch, float t)
{
int x = blockIdx.x*blockDim.x + threadIdx.x;
int y = blockIdx.y*blockDim.y + threadIdx.y;
float *pixel;
if (x >= width || y >= height) return;
pixel = (float *)(surface + y*pitch) + 4 * x;
float centerx = width * 0.5f;
float centery = height * 0.5f;
float fXDiff = (x - centerx);
float fYDiff = (y - centery);
float fDistance = sqrtf(fXDiff*fXDiff + fYDiff*fYDiff);
float fAngle = atan2f(fYDiff, fXDiff);
float fColAngle = t + fAngle;
float v = __saturatef(1.0f - fDistance / width);
float3 P = convert_one_pixel_to_rgb_f(float3{ fColAngle ,1.0f,v });
/*pixel[0] = P.x;
pixel[1] = P.y;
pixel[2] = P.z;*/
pixel[0] += 0.01;
pixel[1] += 0.01;
pixel[2] += 0.01;
pixel[3] = 1.0f;
}
__global__ void K_CalcScores(const unsigned char* arrPixels,float* arrScore, int width, int height, size_t pitch)
{
int x = blockIdx.x*blockDim.x + threadIdx.x ;
int y = blockIdx.y*blockDim.y + threadIdx.y ;
if (x >= width || y >= height) return;
const float* globalPixelIn = GET_PIXEL(arrPixels,x, y);
const float pixelIn[3] = { globalPixelIn[0],globalPixelIn[1],globalPixelIn[2] };
const float* neighbors[4];
neighbors[0] = (x == 0 ) ? GET_PIXEL(arrPixels,width-1, y) : GET_PIXEL(arrPixels, x-1, y);
neighbors[1] = (x == width - 1) ? GET_PIXEL(arrPixels,0, y) : GET_PIXEL(arrPixels,x+1,y);
neighbors[2] = (y == 0) ? GET_PIXEL(arrPixels,x, height-1) : GET_PIXEL(arrPixels,x,y-1);
neighbors[3] = (y==height-1) ? GET_PIXEL(arrPixels,x, 0) : GET_PIXEL(arrPixels,x,y+1);
float scoreOut = 0.0f;
for (int i = 0; i < 4; ++i)
{
const float this_neighbor[3] = { neighbors[i][0] ,neighbors[i][1],neighbors[i][2] };
scoreOut += this_neighbor[0] * pixelIn[1] - this_neighbor[1] * pixelIn[0]
+ this_neighbor[1] * pixelIn[2] - this_neighbor[2] * pixelIn[1]
+ this_neighbor[2] * pixelIn[0] - this_neighbor[0] * pixelIn[2];
}
GET_SCORE(arrScore,x,y) = scoreOut;
}
__global__ void K_MakeNextFrame(const unsigned char* arrPixels, const float* arrScore, unsigned char *arrFrameOut, int width, int height, size_t pitch, const float fNormPower, const float fDiffusionPower, const float fDiffusionCoeff)
{
int x = blockIdx.x*blockDim.x + threadIdx.x;
int y = blockIdx.y*blockDim.y + threadIdx.y;
if (x >= width || y >= height) return;
const float* pixelIn = GET_PIXEL(arrPixels, x, y);
float* pixelOut = GET_PIXEL(arrFrameOut, x, y);
const float* neighbors[5];
neighbors[0] = (x == 0) ? GET_PIXEL(arrPixels, width - 1, y) : GET_PIXEL(arrPixels, x - 1, y);
neighbors[1] = (x == width - 1) ? GET_PIXEL(arrPixels, 0, y) : GET_PIXEL(arrPixels, x + 1, y);
neighbors[2] = (y == 0) ? GET_PIXEL(arrPixels, x, height - 1) : GET_PIXEL(arrPixels, x, y - 1);
neighbors[3] = (y == height - 1) ? GET_PIXEL(arrPixels, x, 0) : GET_PIXEL(arrPixels, x, y + 1);
neighbors[4] = pixelIn;
float neighborScores[4];
neighborScores[0] = (x == 0) ? GET_SCORE(arrScore, width - 1, y) : GET_SCORE(arrScore, x - 1, y);
neighborScores[1] = (x == width - 1) ? GET_SCORE(arrScore, 0, y) : GET_SCORE(arrScore, x + 1, y);
neighborScores[2] = (y == 0) ? GET_SCORE(arrScore, x, height - 1) : GET_SCORE(arrScore, x, y - 1);
neighborScores[3] = (y == height - 1) ? GET_SCORE(arrScore, x, 0) : GET_SCORE(arrScore, x, y + 1);
//choose highest ranking neighbor
float fHighestScore = GET_SCORE(arrScore,x,y);
int iChosenNeighbor = 4;
for (int i = 0; i < 4; ++i)
{
if (1.01f * fHighestScore < neighborScores[i])
{
fHighestScore = neighborScores[i];
iChosenNeighbor = i;
}
}
const float* pixelChosen = neighbors[iChosenNeighbor];
float pixelResult[3];
float fSum = 0;
//diffuse
for (int i = 0;i < 3;++i)
{
pixelResult[i] = powf(pixelChosen[i] * fDiffusionCoeff + pixelIn[i], fDiffusionPower);
fSum += powf(pixelResult[i], fNormPower);
}
//normalize
if (fSum != 0)
{
fSum = powf(fSum, 1.0f/ fNormPower);
for (int i = 0;i < 3;++i)
{
pixelResult[i] /= fSum;
}
}
//assign output
for (int i = 0; i < 3; ++i)
{
pixelOut[i] = pixelResult[i];
}
pixelOut[3] = 1.0f;
}
RPSSim::~RPSSim()
{
hipFree(m_d_lastFrame);
getLastCudaError("hipFree (g_texture_2d) failed");
hipFree(m_d_thisFrame);
getLastCudaError("hipFree (g_texture_2d) failed");
}
RPSSim::RPSSim(const char *strInitStatePath)
{
_ASSERT(0);
}
int RPSSim::RandomizeBuffer(float *d_buffer)
{
size_t nSize = m_height * m_width * 4;
hiprandGenerator_t gen;
CURAND_CALL(hiprandCreateGenerator(&gen,
HIPRAND_RNG_PSEUDO_DEFAULT));
CURAND_CALL(hiprandSetPseudoRandomGeneratorSeed(gen,
1234ULL));
CURAND_CALL(hiprandGenerateUniform(gen, d_buffer, nSize));
return 0;
}
RPSSim::RPSSim(int width, int height)
{
m_width = width;
m_height = height;
hipMallocPitch(&m_d_lastFrame, &m_pitch, width * sizeof(float) * 4, height);
getLastCudaError("hipMallocPitch (g_texture_2d) failed");
hipMallocPitch(&m_d_thisFrame, &m_pitch, width * sizeof(float) * 4, height);
getLastCudaError("hipMallocPitch (g_texture_2d) failed");
hipMalloc(&m_d_arrScores, width * height * sizeof(float));
getLastCudaError("hipMalloc (g_texture_2d) failed");
if ( RandomizeBuffer(m_d_thisFrame) )
printf("Error randomizing\n");
}
void* RPSSim::MakeOneRPSFrame(float t, const float fNormPower, const float fDiffusionPower, const float fDiffusionCoeff)
{
hipError_t error = hipSuccess;
dim3 Db = dim3(16, 16); // block dimensions are fixed to be 256 threads
dim3 Dg = dim3((GetWidth() + Db.x - 1) / Db.x, (GetHeight() + Db.y - 1) / Db.y);
float* temp = m_d_lastFrame;
m_d_lastFrame = m_d_thisFrame;
m_d_thisFrame = temp;
//kern1 << <Dg, Db >> >((unsigned char *)surface, width, height, pitch, t);
K_CalcScores << <Dg, Db >> >((unsigned char *)m_d_lastFrame,m_d_arrScores, GetWidth(), GetHeight(), GetPitch());
if (error != hipSuccess)
{
printf("K_CalcScores() failed to launch error = %d\n", error);
}
K_MakeNextFrame << <Dg, Db >> > ((unsigned char*)m_d_lastFrame, m_d_arrScores, (unsigned char*)m_d_thisFrame,
GetWidth(), GetHeight(), GetPitch(), fNormPower, fDiffusionPower, fDiffusionCoeff);
error = hipGetLastError();
if (error != hipSuccess)
{
printf("K_MakeNextFrame failed to launch error = %d\n", error);
}
return m_d_thisFrame;
}
| 262658e5e6cbc15241eb563c927bf92f1cfe5fd7.cu | #include "RPS.h"
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <cuda.h>
#include <curand.h>
#include <helper_cuda.h> // helper functions for CUDA error checking and initialization
#define CUDA_CALL(x) do { if((x)!=cudaSuccess) { \
printf("Error at %s:%d\n",__FILE__,__LINE__);\
return EXIT_FAILURE;}} while(0)
#define CURAND_CALL(x) do { if((x)!=CURAND_STATUS_SUCCESS) { \
printf("Error at %s:%d\n",__FILE__,__LINE__);\
return EXIT_FAILURE;}} while(0)
#define PI 3.1415926536f
#define PI2_3 2.0943951023931953f
#define PI4_3 4.1887902047863905f
#define PI2 6.283185307179586f
#define PI_3 1.0471975511965976f
#define GET_SCORE(ARR,X,Y) ((ARR)[(X) + (Y)*width])
#define GET_PIXEL(ARR,X,Y) ((float *)((ARR) + (Y)*pitch) + 4 * (X))
__device__ float3 convert_one_pixel_to_rgb_f(float3 pixel) {
float r, g, b;
float h, s, v;
h = pixel.x;
s = pixel.y;
v = pixel.z;
float f = h / PI_3;
int hi = (int)floorf(f) % 6;
float temp;
f = modff(f,&temp);
float p = v * (1 - s);
float q = v * (1 - s * f);
float t = v * (1 - s * (1 - f));
switch (hi)
{
case 0:
r = v;
g = t;
b = p;
break;
case 1:
r = q;
g = v;
b = p;
break;
case 2:
r = p;
g = v;
b = t;
break;
case 3:
r = p;
g = q;
b = v;
break;
case 4:
r = t;
g = p;
b = v;
break;
default:
r = v;
g = p;
b = q;
break;
}
return float3 { r, g, b };
}
__global__ void kern1(unsigned char* surface, int width, int height, size_t pitch, float t)
{
int x = blockIdx.x*blockDim.x + threadIdx.x;
int y = blockIdx.y*blockDim.y + threadIdx.y;
float *pixel;
if (x >= width || y >= height) return;
pixel = (float *)(surface + y*pitch) + 4 * x;
float centerx = width * 0.5f;
float centery = height * 0.5f;
float fXDiff = (x - centerx);
float fYDiff = (y - centery);
float fDistance = sqrtf(fXDiff*fXDiff + fYDiff*fYDiff);
float fAngle = atan2f(fYDiff, fXDiff);
float fColAngle = t + fAngle;
float v = __saturatef(1.0f - fDistance / width);
float3 P = convert_one_pixel_to_rgb_f(float3{ fColAngle ,1.0f,v });
/*pixel[0] = P.x;
pixel[1] = P.y;
pixel[2] = P.z;*/
pixel[0] += 0.01;
pixel[1] += 0.01;
pixel[2] += 0.01;
pixel[3] = 1.0f;
}
__global__ void K_CalcScores(const unsigned char* arrPixels,float* arrScore, int width, int height, size_t pitch)
{
int x = blockIdx.x*blockDim.x + threadIdx.x ;
int y = blockIdx.y*blockDim.y + threadIdx.y ;
if (x >= width || y >= height) return;
const float* globalPixelIn = GET_PIXEL(arrPixels,x, y);
const float pixelIn[3] = { globalPixelIn[0],globalPixelIn[1],globalPixelIn[2] };
const float* neighbors[4];
neighbors[0] = (x == 0 ) ? GET_PIXEL(arrPixels,width-1, y) : GET_PIXEL(arrPixels, x-1, y);
neighbors[1] = (x == width - 1) ? GET_PIXEL(arrPixels,0, y) : GET_PIXEL(arrPixels,x+1,y);
neighbors[2] = (y == 0) ? GET_PIXEL(arrPixels,x, height-1) : GET_PIXEL(arrPixels,x,y-1);
neighbors[3] = (y==height-1) ? GET_PIXEL(arrPixels,x, 0) : GET_PIXEL(arrPixels,x,y+1);
float scoreOut = 0.0f;
for (int i = 0; i < 4; ++i)
{
const float this_neighbor[3] = { neighbors[i][0] ,neighbors[i][1],neighbors[i][2] };
scoreOut += this_neighbor[0] * pixelIn[1] - this_neighbor[1] * pixelIn[0]
+ this_neighbor[1] * pixelIn[2] - this_neighbor[2] * pixelIn[1]
+ this_neighbor[2] * pixelIn[0] - this_neighbor[0] * pixelIn[2];
}
GET_SCORE(arrScore,x,y) = scoreOut;
}
__global__ void K_MakeNextFrame(const unsigned char* arrPixels, const float* arrScore, unsigned char *arrFrameOut, int width, int height, size_t pitch, const float fNormPower, const float fDiffusionPower, const float fDiffusionCoeff)
{
int x = blockIdx.x*blockDim.x + threadIdx.x;
int y = blockIdx.y*blockDim.y + threadIdx.y;
if (x >= width || y >= height) return;
const float* pixelIn = GET_PIXEL(arrPixels, x, y);
float* pixelOut = GET_PIXEL(arrFrameOut, x, y);
const float* neighbors[5];
neighbors[0] = (x == 0) ? GET_PIXEL(arrPixels, width - 1, y) : GET_PIXEL(arrPixels, x - 1, y);
neighbors[1] = (x == width - 1) ? GET_PIXEL(arrPixels, 0, y) : GET_PIXEL(arrPixels, x + 1, y);
neighbors[2] = (y == 0) ? GET_PIXEL(arrPixels, x, height - 1) : GET_PIXEL(arrPixels, x, y - 1);
neighbors[3] = (y == height - 1) ? GET_PIXEL(arrPixels, x, 0) : GET_PIXEL(arrPixels, x, y + 1);
neighbors[4] = pixelIn;
float neighborScores[4];
neighborScores[0] = (x == 0) ? GET_SCORE(arrScore, width - 1, y) : GET_SCORE(arrScore, x - 1, y);
neighborScores[1] = (x == width - 1) ? GET_SCORE(arrScore, 0, y) : GET_SCORE(arrScore, x + 1, y);
neighborScores[2] = (y == 0) ? GET_SCORE(arrScore, x, height - 1) : GET_SCORE(arrScore, x, y - 1);
neighborScores[3] = (y == height - 1) ? GET_SCORE(arrScore, x, 0) : GET_SCORE(arrScore, x, y + 1);
//choose highest ranking neighbor
float fHighestScore = GET_SCORE(arrScore,x,y);
int iChosenNeighbor = 4;
for (int i = 0; i < 4; ++i)
{
if (1.01f * fHighestScore < neighborScores[i])
{
fHighestScore = neighborScores[i];
iChosenNeighbor = i;
}
}
const float* pixelChosen = neighbors[iChosenNeighbor];
float pixelResult[3];
float fSum = 0;
//diffuse
for (int i = 0;i < 3;++i)
{
pixelResult[i] = powf(pixelChosen[i] * fDiffusionCoeff + pixelIn[i], fDiffusionPower);
fSum += powf(pixelResult[i], fNormPower);
}
//normalize
if (fSum != 0)
{
fSum = powf(fSum, 1.0f/ fNormPower);
for (int i = 0;i < 3;++i)
{
pixelResult[i] /= fSum;
}
}
//assign output
for (int i = 0; i < 3; ++i)
{
pixelOut[i] = pixelResult[i];
}
pixelOut[3] = 1.0f;
}
RPSSim::~RPSSim()
{
cudaFree(m_d_lastFrame);
getLastCudaError("cudaFree (g_texture_2d) failed");
cudaFree(m_d_thisFrame);
getLastCudaError("cudaFree (g_texture_2d) failed");
}
RPSSim::RPSSim(const char *strInitStatePath)
{
_ASSERT(0);
}
int RPSSim::RandomizeBuffer(float *d_buffer)
{
size_t nSize = m_height * m_width * 4;
curandGenerator_t gen;
CURAND_CALL(curandCreateGenerator(&gen,
CURAND_RNG_PSEUDO_DEFAULT));
CURAND_CALL(curandSetPseudoRandomGeneratorSeed(gen,
1234ULL));
CURAND_CALL(curandGenerateUniform(gen, d_buffer, nSize));
return 0;
}
RPSSim::RPSSim(int width, int height)
{
m_width = width;
m_height = height;
cudaMallocPitch(&m_d_lastFrame, &m_pitch, width * sizeof(float) * 4, height);
getLastCudaError("cudaMallocPitch (g_texture_2d) failed");
cudaMallocPitch(&m_d_thisFrame, &m_pitch, width * sizeof(float) * 4, height);
getLastCudaError("cudaMallocPitch (g_texture_2d) failed");
cudaMalloc(&m_d_arrScores, width * height * sizeof(float));
getLastCudaError("cudaMalloc (g_texture_2d) failed");
if ( RandomizeBuffer(m_d_thisFrame) )
printf("Error randomizing\n");
}
void* RPSSim::MakeOneRPSFrame(float t, const float fNormPower, const float fDiffusionPower, const float fDiffusionCoeff)
{
cudaError_t error = cudaSuccess;
dim3 Db = dim3(16, 16); // block dimensions are fixed to be 256 threads
dim3 Dg = dim3((GetWidth() + Db.x - 1) / Db.x, (GetHeight() + Db.y - 1) / Db.y);
float* temp = m_d_lastFrame;
m_d_lastFrame = m_d_thisFrame;
m_d_thisFrame = temp;
//kern1 << <Dg, Db >> >((unsigned char *)surface, width, height, pitch, t);
K_CalcScores << <Dg, Db >> >((unsigned char *)m_d_lastFrame,m_d_arrScores, GetWidth(), GetHeight(), GetPitch());
if (error != cudaSuccess)
{
printf("K_CalcScores() failed to launch error = %d\n", error);
}
K_MakeNextFrame << <Dg, Db >> > ((unsigned char*)m_d_lastFrame, m_d_arrScores, (unsigned char*)m_d_thisFrame,
GetWidth(), GetHeight(), GetPitch(), fNormPower, fDiffusionPower, fDiffusionCoeff);
error = cudaGetLastError();
if (error != cudaSuccess)
{
printf("K_MakeNextFrame failed to launch error = %d\n", error);
}
return m_d_thisFrame;
}
|
e9dc1eb06520380e60ccdfc3f827ec949f434b8b.hip | // !!! This is a file automatically generated by hipify!!!
#ifndef __MEDIANFILTER_CU_
#define __MEDIANFILTER_CU_
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <time.h>
#include <iostream>
#define datasize 100
inline void checkCudaErrors(hipError_t err) //cuda error handle function
{
if (hipSuccess != err)
{
fprintf(stderr, "CUDA Runtime API error:%s.\n", hipGetErrorString(err));
return;
}
}
__global__ void MedianFilter(int *In, int *Out, int Width, int Height)
{
int window[9];
int y = blockDim.y * blockIdx.y + threadIdx.y;
int x = blockDim.x * blockIdx.x + threadIdx.x;
if (x <= Width && x >= 0 && y <= Height && y >= 0)
{
window[0] = (y == 0 || x == 0) ? 0 : In[(y - 1)* Width + x - 1];
window[1] = (y == 0) ? 0 : In[(y - 1)* Width + x];
window[2] = (y == 0 || x == Width - 1) ? 0 : In[(y - 1)* Width + x + 1];
window[3] = (x == 0) ? 0 : In[y* Width + x - 1];
window[4] = In[y* Width + x];
window[5] = (x == Width - 1) ? 0 : In[y* Width + x + 1];
window[6] = (y == Height - 1 || x == 0) ? 0 : In[(y + 1)* Width + x - 1];
window[7] = (y == Height - 1) ? 0 : In[(y + 1)* Width + x];
window[8] = (y == Height - 1 || x == Width - 1) ? 0 : In[(y + 1)* Width + x + 1];
for (unsigned int j = 0; j < 5; j++)
{
int min = j;
for (unsigned int l = j + 1; l < 9; l++)
if (window[l] < window[min])
min = l;
const float temp = window[j];
window[j] = window[min];
window[min] = temp;
}
Out[y* Width + x] = window[4];
}
}
extern "C" void MedianFilter_host(int *pixel, int Width, int Height)
{
int *pixelIn, *pixelOut;
dim3 dimBlock(32, 32);
dim3 dimGrid((Width + dimBlock.x - 1) / dimBlock.x, (Height + dimBlock.y - 1) / dimBlock.y);
checkCudaErrors(hipMalloc((void**)&pixelIn, sizeof(int) * Width * Height));
checkCudaErrors(hipMalloc((void**)&pixelOut, sizeof(int) * Width * Height));
checkCudaErrors(hipMemcpy(pixelIn, pixel, sizeof(int) * Width * Height, hipMemcpyHostToDevice));
MedianFilter << <dimGrid, dimBlock >> > (pixelIn, pixelOut, Width, Height);
checkCudaErrors(hipMemcpy(pixel, pixelOut, sizeof(int) * Width * Height, hipMemcpyDeviceToHost));
hipFree(pixelIn);
hipFree(pixelOut);
}
#endif // ! __MEDIANFILTER_KERNEL_CU_ | e9dc1eb06520380e60ccdfc3f827ec949f434b8b.cu | #ifndef __MEDIANFILTER_CU_
#define __MEDIANFILTER_CU_
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <time.h>
#include <iostream>
#define datasize 100
inline void checkCudaErrors(cudaError err) //cuda error handle function
{
if (cudaSuccess != err)
{
fprintf(stderr, "CUDA Runtime API error:%s.\n", cudaGetErrorString(err));
return;
}
}
__global__ void MedianFilter(int *In, int *Out, int Width, int Height)
{
int window[9];
int y = blockDim.y * blockIdx.y + threadIdx.y;
int x = blockDim.x * blockIdx.x + threadIdx.x;
if (x <= Width && x >= 0 && y <= Height && y >= 0)
{
window[0] = (y == 0 || x == 0) ? 0 : In[(y - 1)* Width + x - 1];
window[1] = (y == 0) ? 0 : In[(y - 1)* Width + x];
window[2] = (y == 0 || x == Width - 1) ? 0 : In[(y - 1)* Width + x + 1];
window[3] = (x == 0) ? 0 : In[y* Width + x - 1];
window[4] = In[y* Width + x];
window[5] = (x == Width - 1) ? 0 : In[y* Width + x + 1];
window[6] = (y == Height - 1 || x == 0) ? 0 : In[(y + 1)* Width + x - 1];
window[7] = (y == Height - 1) ? 0 : In[(y + 1)* Width + x];
window[8] = (y == Height - 1 || x == Width - 1) ? 0 : In[(y + 1)* Width + x + 1];
for (unsigned int j = 0; j < 5; j++)
{
int min = j;
for (unsigned int l = j + 1; l < 9; l++)
if (window[l] < window[min])
min = l;
const float temp = window[j];
window[j] = window[min];
window[min] = temp;
}
Out[y* Width + x] = window[4];
}
}
extern "C" void MedianFilter_host(int *pixel, int Width, int Height)
{
int *pixelIn, *pixelOut;
dim3 dimBlock(32, 32);
dim3 dimGrid((Width + dimBlock.x - 1) / dimBlock.x, (Height + dimBlock.y - 1) / dimBlock.y);
checkCudaErrors(cudaMalloc((void**)&pixelIn, sizeof(int) * Width * Height));
checkCudaErrors(cudaMalloc((void**)&pixelOut, sizeof(int) * Width * Height));
checkCudaErrors(cudaMemcpy(pixelIn, pixel, sizeof(int) * Width * Height, cudaMemcpyHostToDevice));
MedianFilter << <dimGrid, dimBlock >> > (pixelIn, pixelOut, Width, Height);
checkCudaErrors(cudaMemcpy(pixel, pixelOut, sizeof(int) * Width * Height, cudaMemcpyDeviceToHost));
cudaFree(pixelIn);
cudaFree(pixelOut);
}
#endif // ! __MEDIANFILTER_KERNEL_CU_ |
cca7b72f7f3612b43ea63b338d97d8236c4ddb06.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifndef GPU_KERNEL
#define GPU_KERNEL
#endif
#include "CU_DS.h"
#include "../stencil.h"
#include <stdio.h>
using namespace FGPU;
__device__ int get_linear_size(int *size, int num_dims)
{
if(num_dims == 2)
{
return size[0]*size[1];
}
else
return size[0]*size[1]*size[2];
}
__device__ void get_relative_cor(int *cor, int *tile_cor, int *size, int dim, int relative_number)
{
for(int i = 0; i < dim; i++)
{
cor[i] = tile_cor[i] + relative_number%size[i];
relative_number/=size[i];
}
}
__global__ void compute_cuda_internal
(
int num_tiles,
Ltile *tiles,
void *input,
void *output,
int num_dims,
int unit_size,
int *dims, //contains size info for current grid
int stencil_idx
)
{
const unsigned int tid = threadIdx.x;
const unsigned int bid = blockIdx.x;
const unsigned int num_blocks = gridDim.x;
const unsigned int block_size = blockDim.x;
__shared__ int sdims[3];
__shared__ Ltile current_tile;
__shared__ int linear_size;
if(tid == 0)
{
for(int i = 0; i < 3; i++)
{
sdims[i] = dims[i];
}
}
for(int i = bid; i < num_tiles; i+= num_blocks)
{
if(tid==0)
{
current_tile = tiles[i];
linear_size = get_linear_size(current_tile.size, num_dims);
//printf("current tile: %d, %d, %d, size: %d, %d, %d\n", current_tile.offset[0], current_tile.offset[1], current_tile.offset[2], current_tile.size[0], current_tile.size[1], current_tile.size[2]);
}
__syncthreads();
//process the tile by a thread block
int tile_size = linear_size;
int offset[3];
int global_size[3];
for(int i = 0; i < 3; i++)
{
global_size[i] = sdims[i];
}
for(int j = tid; j < tile_size; j += block_size)
{
get_relative_cor(offset, current_tile.offset, current_tile.size, num_dims, j);
//printf("( %d, %d, %d )\n", offset[0], offset[1], offset[2]);
//if(offset[0]>=dims[0]||offset[1]>=dims[1]||offset[2]>=dims[2])
//{
// printf("( %d, %d, %d )\n", offset[0], offset[1], offset[2]);
//}
//printf("( %d, %d, %d )\n", offset[0], offset[1], offset[2]);
//FGPU::stencil(input, output, offset, global_size);
stencil_function_table[stencil_idx](input, output, offset, global_size);
}
__syncthreads();
}
}
| cca7b72f7f3612b43ea63b338d97d8236c4ddb06.cu | #ifndef GPU_KERNEL
#define GPU_KERNEL
#endif
#include "CU_DS.h"
#include "../stencil.h"
#include <stdio.h>
using namespace FGPU;
__device__ int get_linear_size(int *size, int num_dims)
{
if(num_dims == 2)
{
return size[0]*size[1];
}
else
return size[0]*size[1]*size[2];
}
__device__ void get_relative_cor(int *cor, int *tile_cor, int *size, int dim, int relative_number)
{
for(int i = 0; i < dim; i++)
{
cor[i] = tile_cor[i] + relative_number%size[i];
relative_number/=size[i];
}
}
__global__ void compute_cuda_internal
(
int num_tiles,
Ltile *tiles,
void *input,
void *output,
int num_dims,
int unit_size,
int *dims, //contains size info for current grid
int stencil_idx
)
{
const unsigned int tid = threadIdx.x;
const unsigned int bid = blockIdx.x;
const unsigned int num_blocks = gridDim.x;
const unsigned int block_size = blockDim.x;
__shared__ int sdims[3];
__shared__ Ltile current_tile;
__shared__ int linear_size;
if(tid == 0)
{
for(int i = 0; i < 3; i++)
{
sdims[i] = dims[i];
}
}
for(int i = bid; i < num_tiles; i+= num_blocks)
{
if(tid==0)
{
current_tile = tiles[i];
linear_size = get_linear_size(current_tile.size, num_dims);
//printf("current tile: %d, %d, %d, size: %d, %d, %d\n", current_tile.offset[0], current_tile.offset[1], current_tile.offset[2], current_tile.size[0], current_tile.size[1], current_tile.size[2]);
}
__syncthreads();
//process the tile by a thread block
int tile_size = linear_size;
int offset[3];
int global_size[3];
for(int i = 0; i < 3; i++)
{
global_size[i] = sdims[i];
}
for(int j = tid; j < tile_size; j += block_size)
{
get_relative_cor(offset, current_tile.offset, current_tile.size, num_dims, j);
//printf("( %d, %d, %d )\n", offset[0], offset[1], offset[2]);
//if(offset[0]>=dims[0]||offset[1]>=dims[1]||offset[2]>=dims[2])
//{
// printf("( %d, %d, %d )\n", offset[0], offset[1], offset[2]);
//}
//printf("( %d, %d, %d )\n", offset[0], offset[1], offset[2]);
//FGPU::stencil(input, output, offset, global_size);
stencil_function_table[stencil_idx](input, output, offset, global_size);
}
__syncthreads();
}
}
|
46e656270a70b2cf7ca8cc381b3407474b38a955.hip | // !!! This is a file automatically generated by hipify!!!
#include "WOA_Lib.h"
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <hiprand/hiprand_kernel.h>
#include <chrono>
#include <math.h>
#include <random>
#include <stdio.h>
#include <stdlib.h>
#include <thrust/copy.h>
#include <thrust/device_vector.h>
#include <thrust/execution_policy.h>
#include <thrust/fill.h>
#include <thrust/generate.h>
#include <thrust/host_vector.h>
#include <thrust/random.h>
#include <thrust/sequence.h>
#include <thrust/sort.h>
#include <time.h>
#include <vector>
#include <string>
#include <sstream>
#include <fstream>
#pragma region GPU method
__global__ void assignSequencedValueWOA(int* data) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
data[i] = i;
};
__global__ void assignSearchAgent(int* indexData, float* searchAgent, int size, int numberOfSearchAgent) {
extern __shared__ float sFloat[];
float* tempSearchAgent = sFloat;
int i = threadIdx.x;
int index = indexData[i];
for (int j = 0; j < size; j++) {
tempSearchAgent[i * size + j] = searchAgent[index * size + j];
}
__syncthreads();
for (int j = 0; j < size; j++) {
searchAgent[i * size + j] = tempSearchAgent[i * size + j];
}
};
__global__ void copyArray(float* newSearchAgent, float* searchAgent) {
int i = blockIdx.x;
int j = threadIdx.x;
searchAgent[(i + 1) * blockDim.x + j] = newSearchAgent[i * blockDim.x + j];
};
__global__ void spiralUpdatingPosition(int size, float* searchAgent, float* newSearchAgent, bool* genChangedOption) {
int i = blockIdx.x;
int j = threadIdx.x;
hiprandState_t state;
hiprand_init((unsigned long)clock() + i, 0, 0, &state);
float t = (hiprand_uniform(&state) * 2) - 1;
if (genChangedOption[j]) {
float D = abs(searchAgent[j] - searchAgent[(i+1) * size + j]);
newSearchAgent[i * size + j] = D * exp(t) * cos(2 * M_PI * t) + searchAgent[j];
}
else newSearchAgent[i * size + j] = searchAgent[(i+1) * size + j];
};
__global__ void huntPrey(int size, float* searchAgent, float* newSearchAgent, float C, float A, int indexOtherWhale, bool* genChangedOption){
int i = blockIdx.x;
int j = threadIdx.x;
if (genChangedOption[j]) {
float D = abs(C * searchAgent[indexOtherWhale * size + j] - searchAgent[(i+1) * size + j]);
newSearchAgent[i * size + j] = searchAgent[indexOtherWhale * size + j] - A * D;
}
else newSearchAgent[i * size + j] = searchAgent[(i+1) * size + j];
};
#pragma endregion
#pragma region Constructor
WOA_Lib::WOA_Lib(int size) {
this->size = size;
numberOfSearchAgent = 200;
generation = 10000;
doInitializationClass();
};
WOA_Lib::WOA_Lib(long generation, int size, long numberOfSearchAgent) {
this->size = size;
this->generation = generation;
this->numberOfSearchAgent = numberOfSearchAgent;
doInitializationClass();
};
WOA_Lib::~WOA_Lib() {
}
#pragma endregion
#pragma region List of Accessor and Mutator
long WOA_Lib::getGeneration() { return generation; };
void WOA_Lib::setGeneration(long generation) { this->generation = generation; };
long WOA_Lib::getNumberOfSearchAgent() { return numberOfSearchAgent; };
void WOA_Lib::setNumberOfSearchAgent(long numberOfSearchAgent) { this->numberOfSearchAgent = numberOfSearchAgent; };
float* WOA_Lib::getLeader() { return leader; };
//Used in Initialization for Setting Search Agent
void WOA_Lib::setSearchAgent(float* searchAgentArray) { searchAgent = searchAgentArray; };
float* WOA_Lib::getSearchAgent() { return searchAgent; };
float* WOA_Lib::getFitness() { return fitness; };
float WOA_Lib::getLastBestFitness() { return lastBestFitness; };
void WOA_Lib::setFitness(float* fitnessArray) { fitness = fitnessArray; };
int WOA_Lib::getSize() { return size; };
void WOA_Lib::setSize(int size) { this->size = size; };
float WOA_Lib::getTotalTime() { return totalTime; };
long WOA_Lib::getLastGeneration() { return currentGeneration; };
#pragma endregion
#pragma region Public Method
void WOA_Lib::run() {
t1 = high_resolution_clock::now();
doInitialize();
while (!checkStoppingCriteria()) {
if(currentGeneration%100==0) {
t2 = high_resolution_clock::now();
auto duration = duration_cast<milliseconds>(t2 - t1).count();
totalTime = (float)duration / 1000.00;
printf("%f\n", totalTime);
}
currentGeneration++;
doLoopInitialization();
doGPUOperation();
doSaveHistory();
}
doFreeMemory();
doPrintResults();
};
//Return Best Fitness In Every Generation
float* WOA_Lib::generateBestFitnessHistory() { return bestFitnessPerGeneration.data(); };
//Return Average Fitness In Every Generation
float* WOA_Lib::generateAverageFitnessHistory() { return averageFitnessPerGeneration.data(); };
#pragma endregion
__host__ __device__ float WOA_Lib::randomUniform() {
#ifdef __CUDA_ARCH__
int i = blockDim.x * blockIdx.x + threadIdx.x;
hiprandState_t state;
hiprand_init((unsigned long)clock() + i, 0, 0, &state);
return hiprand_uniform(&state);
#else
return rand() / (RAND_MAX);
#endif
};
__host__ __device__ int WOA_Lib::randomInt(int max) {
#ifdef __CUDA_ARCH__
int i = blockDim.x * blockIdx.x + threadIdx.x;
hiprandState_t state;
hiprand_init((unsigned long)clock() + i, 0, 0, &state);
return hiprand_uniform(&state) * max;
#else
return rand() % (max + 1);
#endif
};
__host__ __device__ int WOA_Lib::randomInt(int min, int max) {
#ifdef __CUDA_ARCH__
int i = blockDim.x * blockIdx.x + threadIdx.x;
hiprandState_t state;
hiprand_init((unsigned long)clock() + i, 0, 0, &state);
return (hiprand_uniform(&state) * (max - min)) + min;
#else
return (rand() % (max + 1 - min)) + min;
#endif
};
__host__ __device__ float WOA_Lib::randomFloat(float max) {
#ifdef __CUDA_ARCH__
int i = blockDim.x * blockIdx.x + threadIdx.x;
hiprandState_t state;
hiprand_init((unsigned long)clock() + i, 0, 0, &state);
return hiprand_uniform(&state) * max;
#else
return (((float)rand()) / (float)RAND_MAX) * max;
#endif
};
__host__ __device__ float WOA_Lib::randomFloat(float min, float max) {
#ifdef __CUDA_ARCH__
int i = blockDim.x * blockIdx.x + threadIdx.x;
hiprandState_t state;
hiprand_init((unsigned long)clock() + i, 0, 0, &state);
return (hiprand_uniform(&state) * (max - min)) + min;
#else
return ((((float)rand()) / (float)RAND_MAX) * (max - min)) + min;
#endif
};
#pragma region Private Method
//Initialization After Constructor
void WOA_Lib::doInitializationClass() {
printf("Do initialization Class..\n");
srand(time(NULL));
hipSetDevice(0);
hipGetDeviceProperties(&deviceProp, 0);
printf("Finished Do initialization Class...\n");
};
//Initialization Before Looping
void WOA_Lib::doInitialize() {
printf("Do initialization..\n");
currentGeneration = -1;
lastBestFitness = 0.0f;
hipMallocManaged(&fitness, sizeof(float)*numberOfSearchAgent);
hipMallocManaged(&searchAgent, sizeof(float)*size*numberOfSearchAgent);
hipMallocManaged(&genChangedOption, sizeof(bool)*size);
hipHostMalloc(&leader, sizeof(float)*size);
doCountVariable();
this->doInitialization();
this->setGenChangedOption(genChangedOption);
this->doFitnessCheck(numberOfSearchAgent);
hipDeviceSynchronize();
doSortingAndAveraging(numberOfSearchAgent);
counterStop = 0;
printf("Finished Do Initialization...\n");
};
void WOA_Lib::doLoopInitialization() {
doCountVariable();
};
void WOA_Lib::doCountVariable() {
float r1 = (float)(rand() / (float)RAND_MAX), r2 = (float)(rand() / (float)RAND_MAX);
if (currentGeneration == -1) a = 2.0;
else a -= (2.0 / generation);
A = 2.0 * a * r1 - a;
C = 2.0 * r2;
p = (float)(rand() / (float)RAND_MAX);
};
//All Cuda Operation Inside Class
void WOA_Lib::doGPUOperation() {
float* newSearchAgent;
hipMallocManaged(&newSearchAgent, (numberOfSearchAgent-1) * sizeof(float) * size);
updateSearchAgentPosition(searchAgent, newSearchAgent);
hipDeviceSynchronize();
hipLaunchKernelGGL(( copyArray), dim3(numberOfSearchAgent-1),dim3(size), 0, 0, newSearchAgent,searchAgent);
hipDeviceSynchronize();
this->doFitnessCheck(numberOfSearchAgent);
hipDeviceSynchronize();
doSortingAndAveraging(numberOfSearchAgent);
hipDeviceSynchronize();
hipFree(newSearchAgent);
};
void WOA_Lib::updateSearchAgentPosition(float* searchAgent, float* newSearchAgent) {
if (p < 0.5) {
if (abs(A) >= 1) huntPrey<< <numberOfSearchAgent-1, size >> > (size, searchAgent, newSearchAgent, C, A, 0, genChangedOption);
elsehipLaunchKernelGGL(( huntPrey), dim3(numberOfSearchAgent-1),dim3(size), 0, 0, size, searchAgent, newSearchAgent, C, A, rand() % numberOfSearchAgent, genChangedOption);
}
elsehipLaunchKernelGGL(( spiralUpdatingPosition), dim3(numberOfSearchAgent-1),dim3(size), 0, 0, size, searchAgent, newSearchAgent, genChangedOption);
};
//Sorting Every searchAgent Based on Its Fitness's
void WOA_Lib::doSortingAndAveraging(int fitnessSize) {
//printf("Sorting And Averaging..\n");
int *resultedIndexSearchAgent;
hipMallocManaged(&resultedIndexSearchAgent, sizeof(int)*(fitnessSize));
int numBlocks = (int)ceil(numberOfSearchAgent*1.0f / deviceProp.maxThreadsPerBlock*1.0f);
int threadPerBlocks = numberOfSearchAgent* 1.0f / numBlocks * 1.0f;
hipLaunchKernelGGL(( assignSequencedValueWOA), dim3(numBlocks), dim3(threadPerBlocks), 0, 0, resultedIndexSearchAgent);
hipDeviceSynchronize();
if (currentGeneration != -1) {
averageFitnessThisGeneration = thrust::reduce(fitness, fitness + numberOfSearchAgent, 0, thrust::plus<float>());
hipDeviceSynchronize();
averageFitnessThisGeneration /= numberOfSearchAgent;
}
//Do sort_by_key using thrust, then return it to origin fitness and chromosome
hipDeviceSynchronize();
thrust::sort_by_key(fitness, fitness + fitnessSize, resultedIndexSearchAgent, thrust::greater<float>());
hipDeviceSynchronize();
numBlocks = 1;
threadPerBlocks = fitnessSize;
hipLaunchKernelGGL(( assignSearchAgent), dim3(numBlocks), dim3(threadPerBlocks), sizeof(float) * size * fitnessSize , 0, resultedIndexSearchAgent, searchAgent, size, numberOfSearchAgent);
hipDeviceSynchronize();
hipFree(resultedIndexSearchAgent);
};
//Save History of Best and Average Fitness
void WOA_Lib::doSaveHistory() {
this->bestFitnessPerGeneration.push_back(fitness[0]);
this->averageFitnessPerGeneration.push_back(this->averageFitnessThisGeneration);
for (int i = 0; i < size; i++) {
leader[i] = searchAgent[i];
}
};
void WOA_Lib::doPrintResults() {
t2 = high_resolution_clock::now();
auto duration = duration_cast<milliseconds>(t2 - t1).count();
totalTime = (float)duration / 1000.00;
printf("Operation Finished..\n");
printf("Total Execution Time: %f s\n", totalTime);
printf("Operation finished in generation %d...\n", currentGeneration);
printf("Best Fitness in last generation: %f\n", lastBestFitness);
}
void WOA_Lib::doFreeMemory() {
hipFree(fitness);
hipFree(searchAgent);
hipFree(genChangedOption);
};
//Function to Check Stopping Criteria
bool WOA_Lib::checkStoppingCriteria() {
//If in 100 generation the best fitness is the same, stop the process
if (lastBestFitness == fitness[0]) {
if (counterStop >= 100000) {
return true;
}
counterStop++;
}
else counterStop = 0;
lastBestFitness = fitness[0];
if (currentGeneration >= generation) { return true; }
return false;
};
#pragma endregion | 46e656270a70b2cf7ca8cc381b3407474b38a955.cu | #include "WOA_Lib.h"
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <curand_kernel.h>
#include <chrono>
#include <math.h>
#include <random>
#include <stdio.h>
#include <stdlib.h>
#include <thrust/copy.h>
#include <thrust/device_vector.h>
#include <thrust/execution_policy.h>
#include <thrust/fill.h>
#include <thrust/generate.h>
#include <thrust/host_vector.h>
#include <thrust/random.h>
#include <thrust/sequence.h>
#include <thrust/sort.h>
#include <time.h>
#include <vector>
#include <string>
#include <sstream>
#include <fstream>
#pragma region GPU method
__global__ void assignSequencedValueWOA(int* data) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
data[i] = i;
};
__global__ void assignSearchAgent(int* indexData, float* searchAgent, int size, int numberOfSearchAgent) {
extern __shared__ float sFloat[];
float* tempSearchAgent = sFloat;
int i = threadIdx.x;
int index = indexData[i];
for (int j = 0; j < size; j++) {
tempSearchAgent[i * size + j] = searchAgent[index * size + j];
}
__syncthreads();
for (int j = 0; j < size; j++) {
searchAgent[i * size + j] = tempSearchAgent[i * size + j];
}
};
__global__ void copyArray(float* newSearchAgent, float* searchAgent) {
int i = blockIdx.x;
int j = threadIdx.x;
searchAgent[(i + 1) * blockDim.x + j] = newSearchAgent[i * blockDim.x + j];
};
__global__ void spiralUpdatingPosition(int size, float* searchAgent, float* newSearchAgent, bool* genChangedOption) {
int i = blockIdx.x;
int j = threadIdx.x;
curandState state;
curand_init((unsigned long)clock() + i, 0, 0, &state);
float t = (curand_uniform(&state) * 2) - 1;
if (genChangedOption[j]) {
float D = abs(searchAgent[j] - searchAgent[(i+1) * size + j]);
newSearchAgent[i * size + j] = D * exp(t) * cos(2 * M_PI * t) + searchAgent[j];
}
else newSearchAgent[i * size + j] = searchAgent[(i+1) * size + j];
};
__global__ void huntPrey(int size, float* searchAgent, float* newSearchAgent, float C, float A, int indexOtherWhale, bool* genChangedOption){
int i = blockIdx.x;
int j = threadIdx.x;
if (genChangedOption[j]) {
float D = abs(C * searchAgent[indexOtherWhale * size + j] - searchAgent[(i+1) * size + j]);
newSearchAgent[i * size + j] = searchAgent[indexOtherWhale * size + j] - A * D;
}
else newSearchAgent[i * size + j] = searchAgent[(i+1) * size + j];
};
#pragma endregion
#pragma region Constructor
WOA_Lib::WOA_Lib(int size) {
this->size = size;
numberOfSearchAgent = 200;
generation = 10000;
doInitializationClass();
};
WOA_Lib::WOA_Lib(long generation, int size, long numberOfSearchAgent) {
this->size = size;
this->generation = generation;
this->numberOfSearchAgent = numberOfSearchAgent;
doInitializationClass();
};
WOA_Lib::~WOA_Lib() {
}
#pragma endregion
#pragma region List of Accessor and Mutator
long WOA_Lib::getGeneration() { return generation; };
void WOA_Lib::setGeneration(long generation) { this->generation = generation; };
long WOA_Lib::getNumberOfSearchAgent() { return numberOfSearchAgent; };
void WOA_Lib::setNumberOfSearchAgent(long numberOfSearchAgent) { this->numberOfSearchAgent = numberOfSearchAgent; };
float* WOA_Lib::getLeader() { return leader; };
//Used in Initialization for Setting Search Agent
void WOA_Lib::setSearchAgent(float* searchAgentArray) { searchAgent = searchAgentArray; };
float* WOA_Lib::getSearchAgent() { return searchAgent; };
float* WOA_Lib::getFitness() { return fitness; };
float WOA_Lib::getLastBestFitness() { return lastBestFitness; };
void WOA_Lib::setFitness(float* fitnessArray) { fitness = fitnessArray; };
int WOA_Lib::getSize() { return size; };
void WOA_Lib::setSize(int size) { this->size = size; };
float WOA_Lib::getTotalTime() { return totalTime; };
long WOA_Lib::getLastGeneration() { return currentGeneration; };
#pragma endregion
#pragma region Public Method
void WOA_Lib::run() {
t1 = high_resolution_clock::now();
doInitialize();
while (!checkStoppingCriteria()) {
if(currentGeneration%100==0) {
t2 = high_resolution_clock::now();
auto duration = duration_cast<milliseconds>(t2 - t1).count();
totalTime = (float)duration / 1000.00;
printf("%f\n", totalTime);
}
currentGeneration++;
doLoopInitialization();
doGPUOperation();
doSaveHistory();
}
doFreeMemory();
doPrintResults();
};
//Return Best Fitness In Every Generation
float* WOA_Lib::generateBestFitnessHistory() { return bestFitnessPerGeneration.data(); };
//Return Average Fitness In Every Generation
float* WOA_Lib::generateAverageFitnessHistory() { return averageFitnessPerGeneration.data(); };
#pragma endregion
__host__ __device__ float WOA_Lib::randomUniform() {
#ifdef __CUDA_ARCH__
int i = blockDim.x * blockIdx.x + threadIdx.x;
curandState state;
curand_init((unsigned long)clock() + i, 0, 0, &state);
return curand_uniform(&state);
#else
return rand() / (RAND_MAX);
#endif
};
__host__ __device__ int WOA_Lib::randomInt(int max) {
#ifdef __CUDA_ARCH__
int i = blockDim.x * blockIdx.x + threadIdx.x;
curandState state;
curand_init((unsigned long)clock() + i, 0, 0, &state);
return curand_uniform(&state) * max;
#else
return rand() % (max + 1);
#endif
};
__host__ __device__ int WOA_Lib::randomInt(int min, int max) {
#ifdef __CUDA_ARCH__
int i = blockDim.x * blockIdx.x + threadIdx.x;
curandState state;
curand_init((unsigned long)clock() + i, 0, 0, &state);
return (curand_uniform(&state) * (max - min)) + min;
#else
return (rand() % (max + 1 - min)) + min;
#endif
};
__host__ __device__ float WOA_Lib::randomFloat(float max) {
#ifdef __CUDA_ARCH__
int i = blockDim.x * blockIdx.x + threadIdx.x;
curandState state;
curand_init((unsigned long)clock() + i, 0, 0, &state);
return curand_uniform(&state) * max;
#else
return (((float)rand()) / (float)RAND_MAX) * max;
#endif
};
__host__ __device__ float WOA_Lib::randomFloat(float min, float max) {
#ifdef __CUDA_ARCH__
int i = blockDim.x * blockIdx.x + threadIdx.x;
curandState state;
curand_init((unsigned long)clock() + i, 0, 0, &state);
return (curand_uniform(&state) * (max - min)) + min;
#else
return ((((float)rand()) / (float)RAND_MAX) * (max - min)) + min;
#endif
};
#pragma region Private Method
//Initialization After Constructor
void WOA_Lib::doInitializationClass() {
printf("Do initialization Class..\n");
srand(time(NULL));
cudaSetDevice(0);
cudaGetDeviceProperties(&deviceProp, 0);
printf("Finished Do initialization Class...\n");
};
//Initialization Before Looping
void WOA_Lib::doInitialize() {
printf("Do initialization..\n");
currentGeneration = -1;
lastBestFitness = 0.0f;
cudaMallocManaged(&fitness, sizeof(float)*numberOfSearchAgent);
cudaMallocManaged(&searchAgent, sizeof(float)*size*numberOfSearchAgent);
cudaMallocManaged(&genChangedOption, sizeof(bool)*size);
cudaMallocHost(&leader, sizeof(float)*size);
doCountVariable();
this->doInitialization();
this->setGenChangedOption(genChangedOption);
this->doFitnessCheck(numberOfSearchAgent);
cudaDeviceSynchronize();
doSortingAndAveraging(numberOfSearchAgent);
counterStop = 0;
printf("Finished Do Initialization...\n");
};
void WOA_Lib::doLoopInitialization() {
doCountVariable();
};
void WOA_Lib::doCountVariable() {
float r1 = (float)(rand() / (float)RAND_MAX), r2 = (float)(rand() / (float)RAND_MAX);
if (currentGeneration == -1) a = 2.0;
else a -= (2.0 / generation);
A = 2.0 * a * r1 - a;
C = 2.0 * r2;
p = (float)(rand() / (float)RAND_MAX);
};
//All Cuda Operation Inside Class
void WOA_Lib::doGPUOperation() {
float* newSearchAgent;
cudaMallocManaged(&newSearchAgent, (numberOfSearchAgent-1) * sizeof(float) * size);
updateSearchAgentPosition(searchAgent, newSearchAgent);
cudaDeviceSynchronize();
copyArray<<<numberOfSearchAgent-1,size>>>(newSearchAgent,searchAgent);
cudaDeviceSynchronize();
this->doFitnessCheck(numberOfSearchAgent);
cudaDeviceSynchronize();
doSortingAndAveraging(numberOfSearchAgent);
cudaDeviceSynchronize();
cudaFree(newSearchAgent);
};
void WOA_Lib::updateSearchAgentPosition(float* searchAgent, float* newSearchAgent) {
if (p < 0.5) {
if (abs(A) >= 1) huntPrey<< <numberOfSearchAgent-1, size >> > (size, searchAgent, newSearchAgent, C, A, 0, genChangedOption);
else huntPrey<<<numberOfSearchAgent-1,size>>> (size, searchAgent, newSearchAgent, C, A, rand() % numberOfSearchAgent, genChangedOption);
}
else spiralUpdatingPosition<<<numberOfSearchAgent-1,size>>>(size, searchAgent, newSearchAgent, genChangedOption);
};
//Sorting Every searchAgent Based on Its Fitness's
void WOA_Lib::doSortingAndAveraging(int fitnessSize) {
//printf("Sorting And Averaging..\n");
int *resultedIndexSearchAgent;
cudaMallocManaged(&resultedIndexSearchAgent, sizeof(int)*(fitnessSize));
int numBlocks = (int)ceil(numberOfSearchAgent*1.0f / deviceProp.maxThreadsPerBlock*1.0f);
int threadPerBlocks = numberOfSearchAgent* 1.0f / numBlocks * 1.0f;
assignSequencedValueWOA<<<numBlocks, threadPerBlocks>>>(resultedIndexSearchAgent);
cudaDeviceSynchronize();
if (currentGeneration != -1) {
averageFitnessThisGeneration = thrust::reduce(fitness, fitness + numberOfSearchAgent, 0, thrust::plus<float>());
cudaDeviceSynchronize();
averageFitnessThisGeneration /= numberOfSearchAgent;
}
//Do sort_by_key using thrust, then return it to origin fitness and chromosome
cudaDeviceSynchronize();
thrust::sort_by_key(fitness, fitness + fitnessSize, resultedIndexSearchAgent, thrust::greater<float>());
cudaDeviceSynchronize();
numBlocks = 1;
threadPerBlocks = fitnessSize;
assignSearchAgent<<<numBlocks, threadPerBlocks, sizeof(float) * size * fitnessSize >>> (resultedIndexSearchAgent, searchAgent, size, numberOfSearchAgent);
cudaDeviceSynchronize();
cudaFree(resultedIndexSearchAgent);
};
//Save History of Best and Average Fitness
void WOA_Lib::doSaveHistory() {
this->bestFitnessPerGeneration.push_back(fitness[0]);
this->averageFitnessPerGeneration.push_back(this->averageFitnessThisGeneration);
for (int i = 0; i < size; i++) {
leader[i] = searchAgent[i];
}
};
void WOA_Lib::doPrintResults() {
t2 = high_resolution_clock::now();
auto duration = duration_cast<milliseconds>(t2 - t1).count();
totalTime = (float)duration / 1000.00;
printf("Operation Finished..\n");
printf("Total Execution Time: %f s\n", totalTime);
printf("Operation finished in generation %d...\n", currentGeneration);
printf("Best Fitness in last generation: %f\n", lastBestFitness);
}
void WOA_Lib::doFreeMemory() {
cudaFree(fitness);
cudaFree(searchAgent);
cudaFree(genChangedOption);
};
//Function to Check Stopping Criteria
bool WOA_Lib::checkStoppingCriteria() {
//If in 100 generation the best fitness is the same, stop the process
if (lastBestFitness == fitness[0]) {
if (counterStop >= 100000) {
return true;
}
counterStop++;
}
else counterStop = 0;
lastBestFitness = fitness[0];
if (currentGeneration >= generation) { return true; }
return false;
};
#pragma endregion |
f88fcf3462a11c1b425ccf393cc2c260b5359e0d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "helper.cuh"
using namespace std;
Helper::Helper(Parameters &p) : p(p)
{ // Batching affects dimensions. Width and frames are the same for each class. Height changes.
height_1fr = p.numAscansPerBscan / p.batchFrames; // height of 1 frame
height_bfr = p.numAscansPerBscan; // height of a batch of frames
width = p.numCameraPixels;
width_2x = p.numCameraPixels * 2;
width_trm = p.endPixel - p.startPixel + 1; // trim width.
frames = p.numBScans;
// Grid dimensions also change as height changes.
dimLine_w = dim3((width+THREADS_PER_BLOCK-1)/THREADS_PER_BLOCK, 1, 1);
dimLine_w2 = dim3((width_2x+THREADS_PER_BLOCK-1)/THREADS_PER_BLOCK, 1, 1);
dimLine_wt = dim3((width_trm+THREADS_PER_BLOCK-1)/THREADS_PER_BLOCK, 1, 1);
dimGrid_B = dim3(TILE_WIDTH, TILE_WIDTH, 1); //kernel launch block size, 2d
dimLine_B = dim3(THREADS_PER_BLOCK, 1, 1); //kernel launch block size, 1d
w2Recip = 1.f/(float)width_2x; //reciprocal of these is taken so that later we can multiply instead of divide
grayRecip = 1.f/(float)p.grayLevel; //
}
Helper::~Helper()
{
}
void Helper::gpuAssert(hipError_t code, const char *file, int line, bool abort)
{
if (code != hipSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", hipGetErrorString(code), file, line);
//DP_LOGGER_ERROR << __FUNCTION__<<"\t\tgpuAssert: "<<hipGetErrorString(code);
if (abort) throw invalid_argument("CUDA Error");
}
}
void Helper::cufftAssert(hipfftResult err, const char *file, const int line)
{
if( HIPFFT_SUCCESS != err)
{
fprintf(stderr, "CUFFT error in file '%s', line %d\n %s\nerror %d: %s\nterminating!\n",__FILE__, __LINE__,err,cudaGetErrorEnum(err));
throw invalid_argument("CUFFT Error");
}
}
const char* Helper::cudaGetErrorEnum(hipfftResult error)
{
switch (error)
{
case HIPFFT_SUCCESS:
return "HIPFFT_SUCCESS";
case HIPFFT_INVALID_PLAN:
return "HIPFFT_INVALID_PLAN";
case HIPFFT_ALLOC_FAILED:
return "HIPFFT_ALLOC_FAILED";
case HIPFFT_INVALID_TYPE:
return "HIPFFT_INVALID_TYPE";
case HIPFFT_INVALID_VALUE:
return "HIPFFT_INVALID_VALUE";
case HIPFFT_INTERNAL_ERROR:
return "HIPFFT_INTERNAL_ERROR";
case HIPFFT_EXEC_FAILED:
return "HIPFFT_EXEC_FAILED";
case HIPFFT_SETUP_FAILED:
return "HIPFFT_SETUP_FAILED";
case HIPFFT_INVALID_SIZE:
return "HIPFFT_INVALID_SIZE";
case HIPFFT_UNALIGNED_DATA:
return "HIPFFT_UNALIGNED_DATA";
}
return "<unknown>";
}
void Helper::columnMean(int h, int w, float2 *my_array, float *result_array, float &columnMeanMax)
{
float sum = 0;
float mean = 0;
int count = 0;
for (int j = 0; j < w; ++j)
{
for (int i = 0; i < h; ++i)
{
sum += my_array[i * w + j].x;
count++;
}
mean = (float)sum / count;
result_array[j] = mean;
if (mean > columnMeanMax)
columnMeanMax = mean;
sum = 0;
count = 0;
}
}
void Helper::FFT(int h, int w, float2 *initial_array, float2 *result_array)
{
int n[2] = {w, h};
hipfftHandle plan;
cufftErrchk( hipfftPlanMany(&plan,1,n,NULL,1,0,NULL,1,0,HIPFFT_C2C,h) );
cufftErrchk( hipfftExecC2C(plan, initial_array, result_array, HIPFFT_FORWARD) );
cufftErrchk( hipfftDestroy(plan) );
}
void Helper::IFT(int h, int w, dim3 dg, dim3 db, float2 *initial_array, float2 *result_array){
int n[2] = {w, h};
hipfftHandle plan;
cufftErrchk( hipfftPlanMany(&plan,1,n,NULL,1,0,NULL,1,0,HIPFFT_C2C,h) );
cufftErrchk( hipfftExecC2C(plan, initial_array, result_array, HIPFFT_BACKWARD) );
cufftErrchk( hipfftDestroy(plan) );
float sclr = (1.f/w);
hipLaunchKernelGGL(( scale_IFT), dim3(dg),dim3(db), 0, 0, h, w, sclr, result_array); gpuErrchk( hipPeekAtLastError() );
}
void Helper::output_csv(int height, int width, float *my_array, std::string flname)
{
float *temp_array = new float[height*width];
hipMemcpy(temp_array, my_array, (height * width * sizeof(float)), hipMemcpyDeviceToHost);
cout << "OUTPUT begins" << endl;
ofstream output_file(flname);
for (int i = 0; i < height ; i++){
for (int j = 0; j < width; j++)
output_file << temp_array[i * width + j] << ",";
output_file << endl;
}
output_file.close();
cout << "OUTPUT ends" << endl;
delete[] temp_array;
}
void Helper::output_csv(int height, int width, float2 *my_array, std::string flname)
{
float2 *temp_array = new float2[height*width];
hipMemcpy(temp_array, my_array, (height * width * sizeof(float2)), hipMemcpyDeviceToHost);
cout << "OUTPUT begins" << endl;
ofstream output_file(flname);
for (int i = 0; i < height ; i++){
for (int j = 0; j < width; j++)
output_file << temp_array[i * width + j].x << ","; // change to y for imaginary check.
output_file << endl;
}
output_file.close();
cout << "OUTPUT ends" << endl;
delete[] temp_array;
}
/* //Some extra debug functions I wrote.
void output_csv(int height, int width, vector<float> &my_array){
cout << "OUTPUT begins" << endl;
ofstream output_file("C:\\Users\\ans915\\Desktop\\data\\testfile.csv");
for (int i = 0; i < height ; i++){
for (int j = 0; j < width; j++)
output_file << my_array[i * width + j] << ",";
output_file << endl;
}
output_file.close();
cout << "OUTPUT ends" << endl;
}
void check_function(int height, int width, float *my_array){
cout << "Check_function starts" << endl;
vector<float> check_vector;
vector<float> difference_vector(height * width);
ifstream check_file;
check_file.open ("C:\\Users\\ans915\\Documents\\MATLAB\\my_data.txt");
float y;
while (check_file >> y)
check_vector.push_back(y);
check_file.close();
cout << "Check vector length is: " << check_vector.size() << endl;
float max_value = 0;
float min_value = 0;
int count = 0;
int min_index = 0;
int max_index = 0;
int max_error_occurences = 0;
for (int i = 0; i < height; i++){
for (int j = 0; j < width; j++){ //change here (length of j, variable below)
difference_vector[i * width + j] = check_vector[i * width + j] - my_array[i * width + j];
if ((i == 0) && (j < 100)) {
cout << j << " " << check_vector[i * width + j] << " " << my_array[i * width + j] << endl;
}
if (difference_vector[i * width + j] < min_value)
min_value = difference_vector[i * width + j];
if (difference_vector[i * width + j] > max_value)
max_value = difference_vector[i * width + j];
if ((difference_vector[i * width + j] == min_value) || (difference_vector[i * width + j] == max_value))
max_error_occurences++;
if (difference_vector[i * width + j] > difference_vector[max_index])
max_index = i * width + j;
if (difference_vector[i * width + j] < difference_vector[min_index])
min_index = i * width + j;
count++;
}
}
cout << endl << "Max difference value is: " << max_value << endl;
cout << "Max difference occurs at index: " << max_index << endl;
cout << "Min difference value is: " << min_value << endl;
cout << "Min difference occurs at index: " << min_index << endl;
cout << "Occurences: " << max_error_occurences << endl;
cout << "Values compared: " << count << endl << endl;
system("pause");
}
void check_function(int height, int width, float2 *my_array){
cout << "Check_function starts" << endl;
vector<float> check_vector;
vector<float> difference_vector(height * width);
ifstream check_file;
check_file.open ("C:\\Users\\ans915\\Documents\\MATLAB\\my_data.txt");
float y;
while (check_file >> y)
check_vector.push_back(y);
check_file.close();
cout << "Check vector length is: " << check_vector.size() << endl;
//cout << "cpp vector length is: " << my_array.size() << endl; //change here
float max_value = 0;
float min_value = 0;
int count = 0;
int min_index = 0;
int max_index = 0;
int max_error_occurences = 0;
for (int i = 0; i < height; i++){
for (int j = 0; j < width; j++){ //change here (length of j, variable below)
difference_vector[i * width + j] = check_vector[i * width + j] - my_array[i * width + j].x;
if ((i == 0) && (j < 100)) {
cout << j << " " << check_vector[i * width + j] << " " << my_array[i * width + j].x << endl;
}
if (difference_vector[i * width + j] < min_value){
min_value = difference_vector[i * width + j];
}
if (difference_vector[i * width + j] > max_value)
max_value = difference_vector[i * width + j];
if ((difference_vector[i * width + j] == min_value) || (difference_vector[i * width + j] == max_value))
max_error_occurences++;
if (difference_vector[i * width + j] > difference_vector[max_index])
max_index = i * width + j;
if (difference_vector[i * width + j] < difference_vector[min_index])
min_index = i * width + j;
count++;
}
}
float min_percent_error = 0;
float max_percent_error = 0;
float current_percent_error = 0;
for (int i = 0; i < height; i++){
for (int j = 0; j < width; j++){
current_percent_error = difference_vector[i * width + j] / my_array[i * width + j].x * 100;
if (current_percent_error < min_percent_error)
min_percent_error = current_percent_error;
if (current_percent_error > max_percent_error)
max_percent_error = current_percent_error;
}
}
cout << endl << "Max difference value is: " << max_value << " at index: " << max_index << endl;
cout << "Min difference value is: " << min_value << " at index: " << min_index << endl;
cout << "Occurences: " << max_error_occurences << endl;
cout << "Values compared: " << count << endl << endl << endl;
cout << "Min percent error is: " << min_percent_error << endl;
cout << "Max percent error is: " << max_percent_error << endl;
system("pause");
}
void check_function(int height, int width, vector<float> &my_array){
cout << "Check_function starts" << endl;
vector<float> check_vector;
vector<float> difference_vector(height * width);
ifstream check_file;
check_file.open ("C:\\Users\\ans915\\Documents\\MATLAB\\my_data.txt");
float y;
while (check_file >> y)
check_vector.push_back(y);
check_file.close();
cout << "Check vector length is: " << check_vector.size() << endl;
cout << "cpp vector length is: " << my_array.size() << endl; //change here
float max_value = 0;
float min_value = 0;
int count = 0;
int min_index = 0;
int max_index = 0;
int max_error_occurences = 0;
for (int i = 0; i < height; i++){
for (int j = 0; j < width; j++){ //change here (length of j, variable below)
difference_vector[i * width + j] = check_vector[i * width + j] - my_array[i * width + j];
if ((i == 0) && (j < 100)) {
cout << j << " " << check_vector[i * width + j] << " " << my_array[i * width + j] << endl;
}
if (difference_vector[i * width + j] < min_value)
min_value = difference_vector[i * width + j];
if (difference_vector[i * width + j] > max_value)
max_value = difference_vector[i * width + j];
if ((difference_vector[i * width + j] == min_value) || (difference_vector[i * width + j] == max_value))
max_error_occurences++;
if (difference_vector[i * width + j] > difference_vector[max_index])
max_index = i * width + j;
if (difference_vector[i * width + j] < difference_vector[min_index])
min_index = i * width + j;
count++;
}
}
cout << endl << "Max difference value is: " << max_value << endl;
cout << "Max difference occurs at index: " << max_index << endl;
cout << "Min difference value is: " << min_value << endl;
cout << "Min difference occurs at index: " << min_index << endl;
cout << "Occurences: " << max_error_occurences << endl;
cout << "Values compared: " << count << endl << endl;
system("pause");
}
*/ | f88fcf3462a11c1b425ccf393cc2c260b5359e0d.cu | #include "helper.cuh"
using namespace std;
Helper::Helper(Parameters &p) : p(p)
{ // Batching affects dimensions. Width and frames are the same for each class. Height changes.
height_1fr = p.numAscansPerBscan / p.batchFrames; // height of 1 frame
height_bfr = p.numAscansPerBscan; // height of a batch of frames
width = p.numCameraPixels;
width_2x = p.numCameraPixels * 2;
width_trm = p.endPixel - p.startPixel + 1; // trim width.
frames = p.numBScans;
// Grid dimensions also change as height changes.
dimLine_w = dim3((width+THREADS_PER_BLOCK-1)/THREADS_PER_BLOCK, 1, 1);
dimLine_w2 = dim3((width_2x+THREADS_PER_BLOCK-1)/THREADS_PER_BLOCK, 1, 1);
dimLine_wt = dim3((width_trm+THREADS_PER_BLOCK-1)/THREADS_PER_BLOCK, 1, 1);
dimGrid_B = dim3(TILE_WIDTH, TILE_WIDTH, 1); //kernel launch block size, 2d
dimLine_B = dim3(THREADS_PER_BLOCK, 1, 1); //kernel launch block size, 1d
w2Recip = 1.f/(float)width_2x; //reciprocal of these is taken so that later we can multiply instead of divide
grayRecip = 1.f/(float)p.grayLevel; //
}
Helper::~Helper()
{
}
void Helper::gpuAssert(cudaError_t code, const char *file, int line, bool abort)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
//DP_LOGGER_ERROR << __FUNCTION__<<"\t\tgpuAssert: "<<cudaGetErrorString(code);
if (abort) throw invalid_argument("CUDA Error");
}
}
void Helper::cufftAssert(cufftResult err, const char *file, const int line)
{
if( CUFFT_SUCCESS != err)
{
fprintf(stderr, "CUFFT error in file '%s', line %d\n %s\nerror %d: %s\nterminating!\n",__FILE__, __LINE__,err,cudaGetErrorEnum(err));
throw invalid_argument("CUFFT Error");
}
}
const char* Helper::cudaGetErrorEnum(cufftResult error)
{
switch (error)
{
case CUFFT_SUCCESS:
return "CUFFT_SUCCESS";
case CUFFT_INVALID_PLAN:
return "CUFFT_INVALID_PLAN";
case CUFFT_ALLOC_FAILED:
return "CUFFT_ALLOC_FAILED";
case CUFFT_INVALID_TYPE:
return "CUFFT_INVALID_TYPE";
case CUFFT_INVALID_VALUE:
return "CUFFT_INVALID_VALUE";
case CUFFT_INTERNAL_ERROR:
return "CUFFT_INTERNAL_ERROR";
case CUFFT_EXEC_FAILED:
return "CUFFT_EXEC_FAILED";
case CUFFT_SETUP_FAILED:
return "CUFFT_SETUP_FAILED";
case CUFFT_INVALID_SIZE:
return "CUFFT_INVALID_SIZE";
case CUFFT_UNALIGNED_DATA:
return "CUFFT_UNALIGNED_DATA";
}
return "<unknown>";
}
void Helper::columnMean(int h, int w, float2 *my_array, float *result_array, float &columnMeanMax)
{
float sum = 0;
float mean = 0;
int count = 0;
for (int j = 0; j < w; ++j)
{
for (int i = 0; i < h; ++i)
{
sum += my_array[i * w + j].x;
count++;
}
mean = (float)sum / count;
result_array[j] = mean;
if (mean > columnMeanMax)
columnMeanMax = mean;
sum = 0;
count = 0;
}
}
void Helper::FFT(int h, int w, float2 *initial_array, float2 *result_array)
{
int n[2] = {w, h};
cufftHandle plan;
cufftErrchk( cufftPlanMany(&plan,1,n,NULL,1,0,NULL,1,0,CUFFT_C2C,h) );
cufftErrchk( cufftExecC2C(plan, initial_array, result_array, CUFFT_FORWARD) );
cufftErrchk( cufftDestroy(plan) );
}
void Helper::IFT(int h, int w, dim3 dg, dim3 db, float2 *initial_array, float2 *result_array){
int n[2] = {w, h};
cufftHandle plan;
cufftErrchk( cufftPlanMany(&plan,1,n,NULL,1,0,NULL,1,0,CUFFT_C2C,h) );
cufftErrchk( cufftExecC2C(plan, initial_array, result_array, CUFFT_INVERSE) );
cufftErrchk( cufftDestroy(plan) );
float sclr = (1.f/w);
scale_IFT<<<dg,db>>>(h, w, sclr, result_array); gpuErrchk( cudaPeekAtLastError() );
}
void Helper::output_csv(int height, int width, float *my_array, std::string flname)
{
float *temp_array = new float[height*width];
cudaMemcpy(temp_array, my_array, (height * width * sizeof(float)), cudaMemcpyDeviceToHost);
cout << "OUTPUT begins" << endl;
ofstream output_file(flname);
for (int i = 0; i < height ; i++){
for (int j = 0; j < width; j++)
output_file << temp_array[i * width + j] << ",";
output_file << endl;
}
output_file.close();
cout << "OUTPUT ends" << endl;
delete[] temp_array;
}
void Helper::output_csv(int height, int width, float2 *my_array, std::string flname)
{
float2 *temp_array = new float2[height*width];
cudaMemcpy(temp_array, my_array, (height * width * sizeof(float2)), cudaMemcpyDeviceToHost);
cout << "OUTPUT begins" << endl;
ofstream output_file(flname);
for (int i = 0; i < height ; i++){
for (int j = 0; j < width; j++)
output_file << temp_array[i * width + j].x << ","; // change to y for imaginary check.
output_file << endl;
}
output_file.close();
cout << "OUTPUT ends" << endl;
delete[] temp_array;
}
/* //Some extra debug functions I wrote.
void output_csv(int height, int width, vector<float> &my_array){
cout << "OUTPUT begins" << endl;
ofstream output_file("C:\\Users\\ans915\\Desktop\\data\\testfile.csv");
for (int i = 0; i < height ; i++){
for (int j = 0; j < width; j++)
output_file << my_array[i * width + j] << ",";
output_file << endl;
}
output_file.close();
cout << "OUTPUT ends" << endl;
}
void check_function(int height, int width, float *my_array){
cout << "Check_function starts" << endl;
vector<float> check_vector;
vector<float> difference_vector(height * width);
ifstream check_file;
check_file.open ("C:\\Users\\ans915\\Documents\\MATLAB\\my_data.txt");
float y;
while (check_file >> y)
check_vector.push_back(y);
check_file.close();
cout << "Check vector length is: " << check_vector.size() << endl;
float max_value = 0;
float min_value = 0;
int count = 0;
int min_index = 0;
int max_index = 0;
int max_error_occurences = 0;
for (int i = 0; i < height; i++){
for (int j = 0; j < width; j++){ //change here (length of j, variable below)
difference_vector[i * width + j] = check_vector[i * width + j] - my_array[i * width + j];
if ((i == 0) && (j < 100)) {
cout << j << " " << check_vector[i * width + j] << " " << my_array[i * width + j] << endl;
}
if (difference_vector[i * width + j] < min_value)
min_value = difference_vector[i * width + j];
if (difference_vector[i * width + j] > max_value)
max_value = difference_vector[i * width + j];
if ((difference_vector[i * width + j] == min_value) || (difference_vector[i * width + j] == max_value))
max_error_occurences++;
if (difference_vector[i * width + j] > difference_vector[max_index])
max_index = i * width + j;
if (difference_vector[i * width + j] < difference_vector[min_index])
min_index = i * width + j;
count++;
}
}
cout << endl << "Max difference value is: " << max_value << endl;
cout << "Max difference occurs at index: " << max_index << endl;
cout << "Min difference value is: " << min_value << endl;
cout << "Min difference occurs at index: " << min_index << endl;
cout << "Occurences: " << max_error_occurences << endl;
cout << "Values compared: " << count << endl << endl;
system("pause");
}
void check_function(int height, int width, float2 *my_array){
cout << "Check_function starts" << endl;
vector<float> check_vector;
vector<float> difference_vector(height * width);
ifstream check_file;
check_file.open ("C:\\Users\\ans915\\Documents\\MATLAB\\my_data.txt");
float y;
while (check_file >> y)
check_vector.push_back(y);
check_file.close();
cout << "Check vector length is: " << check_vector.size() << endl;
//cout << "cpp vector length is: " << my_array.size() << endl; //change here
float max_value = 0;
float min_value = 0;
int count = 0;
int min_index = 0;
int max_index = 0;
int max_error_occurences = 0;
for (int i = 0; i < height; i++){
for (int j = 0; j < width; j++){ //change here (length of j, variable below)
difference_vector[i * width + j] = check_vector[i * width + j] - my_array[i * width + j].x;
if ((i == 0) && (j < 100)) {
cout << j << " " << check_vector[i * width + j] << " " << my_array[i * width + j].x << endl;
}
if (difference_vector[i * width + j] < min_value){
min_value = difference_vector[i * width + j];
}
if (difference_vector[i * width + j] > max_value)
max_value = difference_vector[i * width + j];
if ((difference_vector[i * width + j] == min_value) || (difference_vector[i * width + j] == max_value))
max_error_occurences++;
if (difference_vector[i * width + j] > difference_vector[max_index])
max_index = i * width + j;
if (difference_vector[i * width + j] < difference_vector[min_index])
min_index = i * width + j;
count++;
}
}
float min_percent_error = 0;
float max_percent_error = 0;
float current_percent_error = 0;
for (int i = 0; i < height; i++){
for (int j = 0; j < width; j++){
current_percent_error = difference_vector[i * width + j] / my_array[i * width + j].x * 100;
if (current_percent_error < min_percent_error)
min_percent_error = current_percent_error;
if (current_percent_error > max_percent_error)
max_percent_error = current_percent_error;
}
}
cout << endl << "Max difference value is: " << max_value << " at index: " << max_index << endl;
cout << "Min difference value is: " << min_value << " at index: " << min_index << endl;
cout << "Occurences: " << max_error_occurences << endl;
cout << "Values compared: " << count << endl << endl << endl;
cout << "Min percent error is: " << min_percent_error << endl;
cout << "Max percent error is: " << max_percent_error << endl;
system("pause");
}
void check_function(int height, int width, vector<float> &my_array){
cout << "Check_function starts" << endl;
vector<float> check_vector;
vector<float> difference_vector(height * width);
ifstream check_file;
check_file.open ("C:\\Users\\ans915\\Documents\\MATLAB\\my_data.txt");
float y;
while (check_file >> y)
check_vector.push_back(y);
check_file.close();
cout << "Check vector length is: " << check_vector.size() << endl;
cout << "cpp vector length is: " << my_array.size() << endl; //change here
float max_value = 0;
float min_value = 0;
int count = 0;
int min_index = 0;
int max_index = 0;
int max_error_occurences = 0;
for (int i = 0; i < height; i++){
for (int j = 0; j < width; j++){ //change here (length of j, variable below)
difference_vector[i * width + j] = check_vector[i * width + j] - my_array[i * width + j];
if ((i == 0) && (j < 100)) {
cout << j << " " << check_vector[i * width + j] << " " << my_array[i * width + j] << endl;
}
if (difference_vector[i * width + j] < min_value)
min_value = difference_vector[i * width + j];
if (difference_vector[i * width + j] > max_value)
max_value = difference_vector[i * width + j];
if ((difference_vector[i * width + j] == min_value) || (difference_vector[i * width + j] == max_value))
max_error_occurences++;
if (difference_vector[i * width + j] > difference_vector[max_index])
max_index = i * width + j;
if (difference_vector[i * width + j] < difference_vector[min_index])
min_index = i * width + j;
count++;
}
}
cout << endl << "Max difference value is: " << max_value << endl;
cout << "Max difference occurs at index: " << max_index << endl;
cout << "Min difference value is: " << min_value << endl;
cout << "Min difference occurs at index: " << min_index << endl;
cout << "Occurences: " << max_error_occurences << endl;
cout << "Values compared: " << count << endl << endl;
system("pause");
}
*/ |
d118b7335aa316d0a8edca7391ed09188e43f763.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "add_calculation.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
char *dev_a = NULL;
hipMalloc(&dev_a, XSIZE*YSIZE);
char *dev_b = NULL;
hipMalloc(&dev_b, XSIZE*YSIZE);
char *dev_c = NULL;
hipMalloc(&dev_c, XSIZE*YSIZE);
int k = 1;
int j = 1;
int num_matrices = 1;
int matrix_size = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
add_calculation), dim3(gridBlock),dim3(threadBlock), 0, 0, dev_a,dev_b,dev_c,k,j,num_matrices,matrix_size);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
add_calculation), dim3(gridBlock),dim3(threadBlock), 0, 0, dev_a,dev_b,dev_c,k,j,num_matrices,matrix_size);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
add_calculation), dim3(gridBlock),dim3(threadBlock), 0, 0, dev_a,dev_b,dev_c,k,j,num_matrices,matrix_size);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | d118b7335aa316d0a8edca7391ed09188e43f763.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "add_calculation.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
char *dev_a = NULL;
cudaMalloc(&dev_a, XSIZE*YSIZE);
char *dev_b = NULL;
cudaMalloc(&dev_b, XSIZE*YSIZE);
char *dev_c = NULL;
cudaMalloc(&dev_c, XSIZE*YSIZE);
int k = 1;
int j = 1;
int num_matrices = 1;
int matrix_size = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
add_calculation<<<gridBlock,threadBlock>>>(dev_a,dev_b,dev_c,k,j,num_matrices,matrix_size);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
add_calculation<<<gridBlock,threadBlock>>>(dev_a,dev_b,dev_c,k,j,num_matrices,matrix_size);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
add_calculation<<<gridBlock,threadBlock>>>(dev_a,dev_b,dev_c,k,j,num_matrices,matrix_size);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
0ef4c4c8718a619b73f65d2a0b7c2f9a8ec631b3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#define cudaCheckErrors(msg) \
do { \
hipError_t __err = hipGetLastError(); \
if (__err != hipSuccess) { \
fprintf(stderr, "Fatal error: %s (%s at %s:%d)\n", \
msg, hipGetErrorString(__err), \
__FILE__, __LINE__); \
fprintf(stderr, "*** FAILED - ABORTING\n"); \
exit(1); \
} \
} while (0)
__global__ void testkernel(int *data, int size){
for (int i = 1; i < size; i++) data[0] += data[i];
}
extern "C" {
int cudatestfunc(int *data, int size){
int *d_data;
hipMalloc(&d_data, size*sizeof(int));
hipMemcpy(d_data, data, size*sizeof(int), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( testkernel), dim3(1),dim3(1), 0, 0, d_data, size);
int result;
hipMemcpy(&result, d_data, sizeof(int), hipMemcpyDeviceToHost);
cudaCheckErrors("cuda error");
return result;
}
}
| 0ef4c4c8718a619b73f65d2a0b7c2f9a8ec631b3.cu | #include <stdio.h>
#define cudaCheckErrors(msg) \
do { \
cudaError_t __err = cudaGetLastError(); \
if (__err != cudaSuccess) { \
fprintf(stderr, "Fatal error: %s (%s at %s:%d)\n", \
msg, cudaGetErrorString(__err), \
__FILE__, __LINE__); \
fprintf(stderr, "*** FAILED - ABORTING\n"); \
exit(1); \
} \
} while (0)
__global__ void testkernel(int *data, int size){
for (int i = 1; i < size; i++) data[0] += data[i];
}
extern "C" {
int cudatestfunc(int *data, int size){
int *d_data;
cudaMalloc(&d_data, size*sizeof(int));
cudaMemcpy(d_data, data, size*sizeof(int), cudaMemcpyHostToDevice);
testkernel<<<1,1>>>(d_data, size);
int result;
cudaMemcpy(&result, d_data, sizeof(int), cudaMemcpyDeviceToHost);
cudaCheckErrors("cuda error");
return result;
}
}
|
5f653d2ea1d28c1b3e33e9fcaa4a44d58d6ba099.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include <algorithm>
#include <hip/hip_fp16.h>
#include <cassert>
#include "ConstantOfShape.hpp"
//sds:splitindex0
//sds: index,The index of the output tensor.
nvinfer1::Dims ConstantOfShapePlugin::getOutputDimensions(int index,
const nvinfer1::Dims *inputDims,
int nbInputs) {
// input shape and input value is weights
assert(nbInputs == 0);
assert(index == 0);// only one output
return output_dims;
}
int ConstantOfShapePlugin::initialize() {
// nvinfer1::Dims dims = this->getInputDims(0);
_numbers = 1;
for( int i=output_dims.nbDims-1; i>=0; i-- ) {
_numbers *= output_dims.d[i];
}
return 0;
}
template<typename T>
__global__ void constant_shape_kernel(unsigned long long n, T value, T * __restrict__ y) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < (n); index += blockDim.x * gridDim.x)
{
y[index] = value;
}
}
//sds-temp,input[0] shape, input[1]scalar
int ConstantOfShapePlugin::enqueue(int batchSize,
const void *const *inputs, void **outputs,
void *workspace, hipStream_t stream) {
//float const* idata1 = reinterpret_cast<float const*>(inputs[0]);
float * odatas = reinterpret_cast<float *>(outputs[0]);
dim3 block(512);
dim3 grid((_numbers + 512 - 1) / 512);
hipLaunchKernelGGL(( constant_shape_kernel), dim3(grid), dim3(block), 0, stream, _numbers, _value, odatas);
gdb_copy_to_cpu("constantOfShape", odatas, _numbers);
return hipGetLastError() != hipSuccess;
}
| 5f653d2ea1d28c1b3e33e9fcaa4a44d58d6ba099.cu | /*
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include <algorithm>
#include <cuda_fp16.h>
#include <cassert>
#include "ConstantOfShape.hpp"
//sds:对于这个split来说,这里的index必须是0,表示只有一个输出
//sds: index,The index of the output tensor.
nvinfer1::Dims ConstantOfShapePlugin::getOutputDimensions(int index,
const nvinfer1::Dims *inputDims,
int nbInputs) {
// input shape and input value is weights
assert(nbInputs == 0);
assert(index == 0);// only one output
return output_dims;
}
int ConstantOfShapePlugin::initialize() {
// nvinfer1::Dims dims = this->getInputDims(0);
_numbers = 1;
for( int i=output_dims.nbDims-1; i>=0; i-- ) {
_numbers *= output_dims.d[i];
}
return 0;
}
template<typename T>
__global__ void constant_shape_kernel(unsigned long long n, T value, T * __restrict__ y) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < (n); index += blockDim.x * gridDim.x)
{
y[index] = value;
}
}
//sds-temp,仅支持input[0] 是shape, input[1]是scalar的输入
int ConstantOfShapePlugin::enqueue(int batchSize,
const void *const *inputs, void **outputs,
void *workspace, cudaStream_t stream) {
//float const* idata1 = reinterpret_cast<float const*>(inputs[0]);
float * odatas = reinterpret_cast<float *>(outputs[0]);
dim3 block(512);
dim3 grid((_numbers + 512 - 1) / 512);
constant_shape_kernel<<<grid, block, 0, stream>>>(_numbers, _value, odatas);
gdb_copy_to_cpu("constantOfShape", odatas, _numbers);
return cudaGetLastError() != cudaSuccess;
}
|
26ebb373e7da6c11405d0ecc30084ab76d7f8296.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "mtbs_cu.h"
#include <unistd.h>
#include "stream.h"
hipCtx_t context;
hipFunction_t func_sub_kernel;
__device__ skrun_t *d_skruns;
#define SK_PROTO(name) __device__ int name(void *args[])
#define SK_FUNCS(base) SK_PROTO(base);
SK_FUNCS(loopcalc)
SK_FUNCS(mklc)
SK_FUNCS(gma)
SK_FUNCS(lma)
SK_FUNCS(kmeans)
SK_FUNCS(mandelbrot)
SK_FUNCS(irregular)
SK_FUNCS(mm)
SK_FUNCS(syncsum)
static __device__ int
run_sub_kernel_func(skid_t skid, void *args[])
{
switch (skid) {
case LOOPCALC:
return loopcalc(args);
case MKLC:
return mklc(args);
case GMA:
return gma(args);
case LMA:
return lma(args);
case KMEANS:
return kmeans(args);
case MANDELBROT:
return mandelbrot(args);
case IRREGULAR:
return irregular(args);
case MM:
return mm(args);
case SYNCSUM:
return syncsum(args);
default:
return 0;
}
}
__device__ void
run_sub_kernel(skrun_t *skr)
{
int res;
res = run_sub_kernel_func(skr->skid, (void **)skr->args);
if (get_blockIdxX() == 0 && get_blockIdxY() == 0 && get_threadIdxX() == 0 && get_threadIdxY() == 0) {
skr->res = res;
}
}
extern "C" __global__ void
sub_kernel_func(skrun_t *skr)
{
run_sub_kernel(skr);
}
sk_t
launch_kernel(skid_t skid, vstream_t stream, dim3 dimGrid, dim3 dimBlock, void *args[])
{
skrun_t skrun;
skrun.skid = skid;
skrun.dimGrid = dimGrid;
skrun.dimBlock = dimBlock;
memcpy(skrun.args, args, sizeof(void *) * MAX_ARGS);
skrun.res = 0;
skrun.n_tbs = dimGrid.x * dimGrid.y;
skrun.n_mtbs_per_tb = dimBlock.x * dimBlock.y / N_THREADS_PER_mTB;
return sched->submit_skrun(stream, &skrun);
}
void
wait_kernel(sk_t sk, vstream_t vstream, int *pres)
{
sched->wait_skrun(sk, vstream, pres);
}
void
init_skrun(void)
{
hipError_t res;
res = hipModuleGetFunction(&func_sub_kernel, mod, "sub_kernel_func");
if (res != hipSuccess) {
error("failed to get sub_kernel_func: %s\n", get_cuda_error_msg(res));
}
if (sched->init_skrun)
sched->init_skrun();
}
void
fini_skrun(void)
{
if (sched->fini_skrun)
sched->fini_skrun();
}
| 26ebb373e7da6c11405d0ecc30084ab76d7f8296.cu | #include "mtbs_cu.h"
#include <unistd.h>
#include "stream.h"
CUcontext context;
CUfunction func_sub_kernel;
__device__ skrun_t *d_skruns;
#define SK_PROTO(name) __device__ int name(void *args[])
#define SK_FUNCS(base) SK_PROTO(base);
SK_FUNCS(loopcalc)
SK_FUNCS(mklc)
SK_FUNCS(gma)
SK_FUNCS(lma)
SK_FUNCS(kmeans)
SK_FUNCS(mandelbrot)
SK_FUNCS(irregular)
SK_FUNCS(mm)
SK_FUNCS(syncsum)
static __device__ int
run_sub_kernel_func(skid_t skid, void *args[])
{
switch (skid) {
case LOOPCALC:
return loopcalc(args);
case MKLC:
return mklc(args);
case GMA:
return gma(args);
case LMA:
return lma(args);
case KMEANS:
return kmeans(args);
case MANDELBROT:
return mandelbrot(args);
case IRREGULAR:
return irregular(args);
case MM:
return mm(args);
case SYNCSUM:
return syncsum(args);
default:
return 0;
}
}
__device__ void
run_sub_kernel(skrun_t *skr)
{
int res;
res = run_sub_kernel_func(skr->skid, (void **)skr->args);
if (get_blockIdxX() == 0 && get_blockIdxY() == 0 && get_threadIdxX() == 0 && get_threadIdxY() == 0) {
skr->res = res;
}
}
extern "C" __global__ void
sub_kernel_func(skrun_t *skr)
{
run_sub_kernel(skr);
}
sk_t
launch_kernel(skid_t skid, vstream_t stream, dim3 dimGrid, dim3 dimBlock, void *args[])
{
skrun_t skrun;
skrun.skid = skid;
skrun.dimGrid = dimGrid;
skrun.dimBlock = dimBlock;
memcpy(skrun.args, args, sizeof(void *) * MAX_ARGS);
skrun.res = 0;
skrun.n_tbs = dimGrid.x * dimGrid.y;
skrun.n_mtbs_per_tb = dimBlock.x * dimBlock.y / N_THREADS_PER_mTB;
return sched->submit_skrun(stream, &skrun);
}
void
wait_kernel(sk_t sk, vstream_t vstream, int *pres)
{
sched->wait_skrun(sk, vstream, pres);
}
void
init_skrun(void)
{
CUresult res;
res = cuModuleGetFunction(&func_sub_kernel, mod, "sub_kernel_func");
if (res != CUDA_SUCCESS) {
error("failed to get sub_kernel_func: %s\n", get_cuda_error_msg(res));
}
if (sched->init_skrun)
sched->init_skrun();
}
void
fini_skrun(void)
{
if (sched->fini_skrun)
sched->fini_skrun();
}
|
239e84ff3827d008f6a5d741a612b3a9c6077023.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "oneflow/user/kernels/gather_kernel_util.h"
#include "oneflow/core/kernel/kernel.h"
#include <assert.h>
namespace oneflow {
namespace {
template<typename K, typename IDX>
__device__ IDX GetInOffset(const IDX out_offset, const K* indices, const IDX num_indices,
const IDX gather_dim_size, const IDX inner_dim_size, const IDX offset) {
const IDX outer_dim_elem_cnt = num_indices * inner_dim_size;
const IDX outer_idx = out_offset / outer_dim_elem_cnt;
const IDX indices_idx = out_offset % outer_dim_elem_cnt / inner_dim_size;
const IDX inner_idx = out_offset % inner_dim_size;
assert(indices[indices_idx] >= 0);
const IDX idx = indices[indices_idx] - offset;
if (idx >= 0 && idx < gather_dim_size) {
return outer_idx * gather_dim_size * inner_dim_size + idx * inner_dim_size + inner_idx;
} else {
return -1;
}
}
template<typename T, typename K, typename IDX>
__global__ void GatherForwardGpu(const IDX elem_cnt, const K* indices, const IDX num_indices,
const T* in, const IDX gather_dim_size, const IDX inner_dim_size,
T* out, const IDX offset) {
CUDA_1D_KERNEL_LOOP_T(IDX, i, elem_cnt) {
const IDX in_offset =
GetInOffset<K, IDX>(i, indices, num_indices, gather_dim_size, inner_dim_size, offset);
if (in_offset < 0) {
out[i] = 0;
} else {
out[i] = in[in_offset];
}
}
}
bool IsSafeUseIndex32(const Shape& flat_in_shape, const int64_t num_indices) {
const int64_t in_elem_cnt = flat_in_shape.elem_cnt();
const int64_t out_elem_cnt = flat_in_shape.At(0) * num_indices * flat_in_shape.At(2);
return ::max(out_elem_cnt, in_elem_cnt) < GetMaxVal<int32_t>() / 2;
}
} // namespace
template<typename T, typename K>
struct GatherKernelUtilImpl<DeviceType::kGPU, T, K> final {
static void Forward(DeviceCtx* ctx, const K* indices, int64_t num_indices, const T* in,
const Shape& flat_in_shape, T* out, const int64_t offset) {
const int64_t out_elem_cnt = flat_in_shape.At(0) * num_indices * flat_in_shape.At(2);
if (IsSafeUseIndex32(flat_in_shape, num_indices)) {
hipLaunchKernelGGL(( GatherForwardGpu<T, K, int32_t>)
, dim3(BlocksNum4ThreadsNum(out_elem_cnt)), dim3(kCudaThreadsNumPerBlock), 0, ctx->cuda_stream(),
out_elem_cnt, indices, num_indices, in, flat_in_shape.At(1), flat_in_shape.At(2), out,
offset);
} else {
hipLaunchKernelGGL(( GatherForwardGpu<T, K, int64_t>)
, dim3(BlocksNum4ThreadsNum(out_elem_cnt)), dim3(kCudaThreadsNumPerBlock), 0, ctx->cuda_stream(),
out_elem_cnt, indices, num_indices, in, flat_in_shape.At(1), flat_in_shape.At(2), out,
offset);
}
}
};
template<typename K>
struct GatherKernelUtilImpl<DeviceType::kGPU, float16, K> final {
static void Forward(DeviceCtx* ctx, const K* indices, int64_t num_indices, const float16* in,
const Shape& flat_in_shape, float16* out, const int64_t offset) {
GatherKernelUtilImpl<DeviceType::kGPU, half, K>::Forward(
ctx, indices, num_indices, reinterpret_cast<const half*>(in), flat_in_shape,
reinterpret_cast<half*>(out), offset);
}
};
#define INITIATE_GATHER_KERNEL_UTIL_GPU_IMPL(in_type_pair, index_type_pair) \
template struct GatherKernelUtilImpl<DeviceType::kGPU, OF_PP_PAIR_FIRST(in_type_pair), \
OF_PP_PAIR_FIRST(index_type_pair)>;
OF_PP_SEQ_PRODUCT_FOR_EACH_TUPLE(INITIATE_GATHER_KERNEL_UTIL_GPU_IMPL, GATHER_DATA_TYPE_SEQ,
INDEX_DATA_TYPE_SEQ);
#undef INITIATE_GATHER_KERNEL_UTIL_GPU_IMPL
} // namespace oneflow
| 239e84ff3827d008f6a5d741a612b3a9c6077023.cu | /*
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "oneflow/user/kernels/gather_kernel_util.h"
#include "oneflow/core/kernel/kernel.h"
#include <assert.h>
namespace oneflow {
namespace {
template<typename K, typename IDX>
__device__ IDX GetInOffset(const IDX out_offset, const K* indices, const IDX num_indices,
const IDX gather_dim_size, const IDX inner_dim_size, const IDX offset) {
const IDX outer_dim_elem_cnt = num_indices * inner_dim_size;
const IDX outer_idx = out_offset / outer_dim_elem_cnt;
const IDX indices_idx = out_offset % outer_dim_elem_cnt / inner_dim_size;
const IDX inner_idx = out_offset % inner_dim_size;
assert(indices[indices_idx] >= 0);
const IDX idx = indices[indices_idx] - offset;
if (idx >= 0 && idx < gather_dim_size) {
return outer_idx * gather_dim_size * inner_dim_size + idx * inner_dim_size + inner_idx;
} else {
return -1;
}
}
template<typename T, typename K, typename IDX>
__global__ void GatherForwardGpu(const IDX elem_cnt, const K* indices, const IDX num_indices,
const T* in, const IDX gather_dim_size, const IDX inner_dim_size,
T* out, const IDX offset) {
CUDA_1D_KERNEL_LOOP_T(IDX, i, elem_cnt) {
const IDX in_offset =
GetInOffset<K, IDX>(i, indices, num_indices, gather_dim_size, inner_dim_size, offset);
if (in_offset < 0) {
out[i] = 0;
} else {
out[i] = in[in_offset];
}
}
}
bool IsSafeUseIndex32(const Shape& flat_in_shape, const int64_t num_indices) {
const int64_t in_elem_cnt = flat_in_shape.elem_cnt();
const int64_t out_elem_cnt = flat_in_shape.At(0) * num_indices * flat_in_shape.At(2);
return std::max(out_elem_cnt, in_elem_cnt) < GetMaxVal<int32_t>() / 2;
}
} // namespace
template<typename T, typename K>
struct GatherKernelUtilImpl<DeviceType::kGPU, T, K> final {
static void Forward(DeviceCtx* ctx, const K* indices, int64_t num_indices, const T* in,
const Shape& flat_in_shape, T* out, const int64_t offset) {
const int64_t out_elem_cnt = flat_in_shape.At(0) * num_indices * flat_in_shape.At(2);
if (IsSafeUseIndex32(flat_in_shape, num_indices)) {
GatherForwardGpu<T, K, int32_t>
<<<BlocksNum4ThreadsNum(out_elem_cnt), kCudaThreadsNumPerBlock, 0, ctx->cuda_stream()>>>(
out_elem_cnt, indices, num_indices, in, flat_in_shape.At(1), flat_in_shape.At(2), out,
offset);
} else {
GatherForwardGpu<T, K, int64_t>
<<<BlocksNum4ThreadsNum(out_elem_cnt), kCudaThreadsNumPerBlock, 0, ctx->cuda_stream()>>>(
out_elem_cnt, indices, num_indices, in, flat_in_shape.At(1), flat_in_shape.At(2), out,
offset);
}
}
};
template<typename K>
struct GatherKernelUtilImpl<DeviceType::kGPU, float16, K> final {
static void Forward(DeviceCtx* ctx, const K* indices, int64_t num_indices, const float16* in,
const Shape& flat_in_shape, float16* out, const int64_t offset) {
GatherKernelUtilImpl<DeviceType::kGPU, half, K>::Forward(
ctx, indices, num_indices, reinterpret_cast<const half*>(in), flat_in_shape,
reinterpret_cast<half*>(out), offset);
}
};
#define INITIATE_GATHER_KERNEL_UTIL_GPU_IMPL(in_type_pair, index_type_pair) \
template struct GatherKernelUtilImpl<DeviceType::kGPU, OF_PP_PAIR_FIRST(in_type_pair), \
OF_PP_PAIR_FIRST(index_type_pair)>;
OF_PP_SEQ_PRODUCT_FOR_EACH_TUPLE(INITIATE_GATHER_KERNEL_UTIL_GPU_IMPL, GATHER_DATA_TYPE_SEQ,
INDEX_DATA_TYPE_SEQ);
#undef INITIATE_GATHER_KERNEL_UTIL_GPU_IMPL
} // namespace oneflow
|
9f0231592e7f572ed92ce7007973a5bb21baf19f.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
*
* Mauro Bisson <[email protected]>
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <getopt.h>
#include <unistd.h>
#include <hip/hip_runtime.h>
#include <hiprand/hiprand_kernel.h>
#include "cudamacro.h" /* for time() */
#include "utils.h"
#define DIV_UP(a,b) (((a)+((b)-1))/(b))
#define THREADS 128
#define BIT_X_SPIN (4)
#define CRIT_TEMP (2.26918531421f)
#define ALPHA_DEF (0.1f)
#define MIN_TEMP (0.05f*CRIT_TEMP)
#define MIN(a,b) (((a)<(b))?(a):(b))
#define MAX(a,b) (((a)>(b))?(a):(b))
// 2048+: 16, 16, 2, 1
// 1024: 16, 16, 1, 2
// 512: 8, 8, 1, 1
// 256: 4, 8, 1, 1
// 128: 2, 8, 1, 1
#define BLOCK_X (16)
#define BLOCK_Y (16)
#define BMULT_X (2)
#define BMULT_Y (1)
#define MAX_GPU (256)
#define NUMIT_DEF (1)
#define SEED_DEF (463463564571ull)
#define TGT_MAGN_MAX_DIFF (1.0E-3)
#define MAX_EXP_TIME (200)
#define MIN_EXP_TIME (152)
#define MAX_CORR_LEN (128)
__device__ __forceinline__ unsigned int __mypopc(const unsigned int x) {
return __popc(x);
}
__device__ __forceinline__ unsigned long long int __mypopc(const unsigned long long int x) {
return __popcll(x);
}
enum {C_BLACK, C_WHITE};
__device__ __forceinline__ uint2 __mymake_int2(const unsigned int x,
const unsigned int y) {
return make_uint2(x, y);
}
__device__ __forceinline__ ulonglong2 __mymake_int2(const unsigned long long x,
const unsigned long long y) {
return make_ulonglong2(x, y);
}
template<int BDIM_X,
int BDIM_Y,
int LOOP_X,
int LOOP_Y,
int BITXSP,
int COLOR,
typename INT_T,
typename INT2_T>
__global__ void latticeInit_k(const int devid,
const long long seed,
const int it,
const long long begY,
const long long dimX, // ld
INT2_T *__restrict__ vDst) {
const int __i = blockIdx.y*BDIM_Y*LOOP_Y + threadIdx.y;
const int __j = blockIdx.x*BDIM_X*LOOP_X + threadIdx.x;
const int SPIN_X_WORD = 8*sizeof(INT_T)/BITXSP;
const long long tid = ((devid*gridDim.y + blockIdx.y)*gridDim.x + blockIdx.x)*BDIM_X*BDIM_Y +
threadIdx.y*BDIM_X + threadIdx.x;
hiprandStatePhilox4_32_10_t st;
hiprand_init(seed, tid, static_cast<long long>(2*SPIN_X_WORD)*LOOP_X*LOOP_Y*(2*it+COLOR), &st);
INT2_T __tmp[LOOP_Y][LOOP_X];
#pragma unroll
for(int i = 0; i < LOOP_Y; i++) {
#pragma unroll
for(int j = 0; j < LOOP_X; j++) {
__tmp[i][j] = __mymake_int2(INT_T(0),INT_T(0));
}
}
#pragma unroll
for(int i = 0; i < LOOP_Y; i++) {
#pragma unroll
for(int j = 0; j < LOOP_X; j++) {
#pragma unroll
for(int k = 0; k < 8*sizeof(INT_T); k += BITXSP) {
if (hiprand_uniform(&st) < 0.5f) {
__tmp[i][j].x |= INT_T(1) << k;
}
if (hiprand_uniform(&st) < 0.5f) {
__tmp[i][j].y |= INT_T(1) << k;
}
}
}
}
#pragma unroll
for(int i = 0; i < LOOP_Y; i++) {
#pragma unroll
for(int j = 0; j < LOOP_X; j++) {
vDst[(begY + __i+i*BDIM_Y)*dimX + __j+j*BDIM_X] = __tmp[i][j];
}
}
return;
}
template<int BDIM_X,
int BDIM_Y,
int LOOP_X,
int LOOP_Y,
int BITXSP,
typename INT_T,
typename INT2_T>
__global__ void hamiltInitB_k(const int devid,
const float tgtProb,
const long long seed,
const long long begY,
const long long dimX, // ld
INT2_T *__restrict__ hamB) {
const int __i = blockIdx.y*BDIM_Y*LOOP_Y + threadIdx.y;
const int __j = blockIdx.x*BDIM_X*LOOP_X + threadIdx.x;
const long long tid = ((devid*gridDim.y + blockIdx.y)*gridDim.x + blockIdx.x)*BDIM_X*BDIM_Y +
threadIdx.y*BDIM_X + threadIdx.x;
hiprandStatePhilox4_32_10_t st;
hiprand_init(seed, tid, 0, &st);
INT2_T __tmp[LOOP_Y][LOOP_X];
#pragma unroll
for(int i = 0; i < LOOP_Y; i++) {
#pragma unroll
for(int j = 0; j < LOOP_X; j++) {
__tmp[i][j] = __mymake_int2(INT_T(0),INT_T(0));
}
}
#pragma unroll
for(int i = 0; i < LOOP_Y; i++) {
#pragma unroll
for(int j = 0; j < LOOP_X; j++) {
#pragma unroll
for(int k = 0; k < 8*sizeof(INT_T); k += BITXSP) {
#pragma unroll
for(int l = 0; l < BITXSP; l++) {
if (hiprand_uniform(&st) < tgtProb) {
__tmp[i][j].x |= INT_T(1) << (k+l);
}
if (hiprand_uniform(&st) < tgtProb) {
__tmp[i][j].y |= INT_T(1) << (k+l);
}
}
}
}
}
#pragma unroll
for(int i = 0; i < LOOP_Y; i++) {
#pragma unroll
for(int j = 0; j < LOOP_X; j++) {
hamB[(begY + __i+i*BDIM_Y)*dimX + __j+j*BDIM_X] = __tmp[i][j];
}
}
return;
}
template<int BDIM_X,
int BDIM_Y,
int LOOP_X,
int LOOP_Y,
int BITXSP,
typename INT_T,
typename INT2_T>
__global__ void hamiltInitW_k(const int xsl,
const int ysl,
const long long begY,
const long long dimX,
const INT2_T *__restrict__ hamB,
INT2_T *__restrict__ hamW) {
const int tidx = threadIdx.x;
const int tidy = threadIdx.y;
const int __i = blockIdx.y*BDIM_Y*LOOP_Y + tidy;
const int __j = blockIdx.x*BDIM_X*LOOP_X + tidx;
INT2_T __me[LOOP_Y][LOOP_X];
#pragma unroll
for(int i = 0; i < LOOP_Y; i++) {
#pragma unroll
for(int j = 0; j < LOOP_X; j++) {
__me[i][j] = hamB[(begY+__i+i*BDIM_Y)*dimX + __j+j*BDIM_X];
}
}
INT2_T __up[LOOP_Y][LOOP_X];
INT2_T __ct[LOOP_Y][LOOP_X];
INT2_T __dw[LOOP_Y][LOOP_X];
INT2_T __sd[LOOP_Y][LOOP_X];
#pragma unroll
for(int i = 0; i < LOOP_Y; i++) {
#pragma unroll
for(int j = 0; j < LOOP_X; j++) {
__up[i][j].x = (__me[i][j].x & 0x8888888888888888ull) >> 1;
__up[i][j].y = (__me[i][j].y & 0x8888888888888888ull) >> 1;
__dw[i][j].x = (__me[i][j].x & 0x4444444444444444ull) << 1;
__dw[i][j].y = (__me[i][j].y & 0x4444444444444444ull) << 1;
}
}
const int readBack = !(__i%2); // this kernel reads only BLACK Js
const int BITXWORD = 8*sizeof(INT_T);
if (!readBack) {
#pragma unroll
for(int i = 0; i < LOOP_Y; i++) {
#pragma unroll
for(int j = 0; j < LOOP_X; j++) {
__ct[i][j].x = (__me[i][j].x & 0x2222222222222222ull) >> 1;
__ct[i][j].y = (__me[i][j].y & 0x2222222222222222ull) >> 1;
__ct[i][j].x |= (__me[i][j].x & 0x1111111111111111ull) << (BITXSP+1);
__ct[i][j].y |= (__me[i][j].x & 0x1111111111111111ull) >> (BITXWORD-BITXSP - 1);
__ct[i][j].y |= (__me[i][j].y & 0x1111111111111111ull) << (BITXSP+1);
__sd[i][j].x = (__me[i][j].y & 0x1111111111111111ull) >> (BITXWORD-BITXSP - 1);
__sd[i][j].y = 0;
}
}
} else {
#pragma unroll
for(int i = 0; i < LOOP_Y; i++) {
#pragma unroll
for(int j = 0; j < LOOP_X; j++) {
__ct[i][j].x = (__me[i][j].x & 0x1111111111111111ull) << 1;
__ct[i][j].y = (__me[i][j].y & 0x1111111111111111ull) << 1;
__ct[i][j].y |= (__me[i][j].y & 0x2222222222222222ull) >> (BITXSP+1);
__ct[i][j].x |= (__me[i][j].y & 0x2222222222222222ull) << (BITXWORD-BITXSP - 1);
__ct[i][j].x |= (__me[i][j].x & 0x2222222222222222ull) >> (BITXSP+1);
__sd[i][j].y = (__me[i][j].x & 0x2222222222222222ull) << (BITXWORD-BITXSP - 1);
__sd[i][j].x = 0;
}
}
}
#pragma unroll
for(int i = 0; i < LOOP_Y; i++) {
const int yoff = begY+__i + i*BDIM_Y;
const int upOff = ( yoff %ysl) == 0 ? yoff+ysl-1 : yoff-1;
const int dwOff = ((yoff+1)%ysl) == 0 ? yoff-ysl+1 : yoff+1;
#pragma unroll
for(int j = 0; j < LOOP_X; j++) {
const int xoff = __j + j*BDIM_X;
atomicOr(&hamW[yoff*dimX + xoff].x, __ct[i][j].x);
atomicOr(&hamW[yoff*dimX + xoff].y, __ct[i][j].y);
atomicOr(&hamW[upOff*dimX + xoff].x, __up[i][j].x);
atomicOr(&hamW[upOff*dimX + xoff].y, __up[i][j].y);
atomicOr(&hamW[dwOff*dimX + xoff].x, __dw[i][j].x);
atomicOr(&hamW[dwOff*dimX + xoff].y, __dw[i][j].y);
const int sideOff = readBack ? ( (xoff %xsl) == 0 ? xoff+xsl-1 : xoff-1 ):
( ((xoff+1)%xsl) == 0 ? xoff-xsl+1 : xoff+1);
atomicOr(&hamW[yoff*dimX + sideOff].x, __sd[i][j].x);
atomicOr(&hamW[yoff*dimX + sideOff].y, __sd[i][j].y);
}
}
return;
}
template<int BDIM_X,
int BDIM_Y,
int TILE_X,
int TILE_Y,
int FRAME_X,
int FRAME_Y,
typename INT_T,
typename INT2_T>
__device__ void loadTileOLD(const long long begY,
const long long dimY,
const long long dimX,
const INT2_T *__restrict__ v,
INT2_T tile[][TILE_X+2*FRAME_X]) {
const int tidx = threadIdx.x;
const int tidy = threadIdx.y;
const int blkx = blockIdx.x;
const int blky = blockIdx.y;
const int FULL_X = TILE_X+2*FRAME_X;
const int FULL_Y = TILE_Y+2*FRAME_Y;
#pragma unroll
for(int j = 0; j < FULL_Y; j += BDIM_Y) {
const int yoff = begY + blky*TILE_Y + j+tidy - FRAME_Y;
const int yoffAdj = (yoff < 0) ? dimY+yoff : (yoff >= dimY ? yoff-dimY : yoff);
#pragma unroll
for(int i = 0; i < FULL_X; i += BDIM_X) {
const int xoff = blkx*TILE_X + i+tidx - FRAME_X;
const int xoffAdj = (xoff < 0) ? dimX+xoff : (xoff >= dimX ? xoff-dimX : xoff);
INT2_T __t = v[yoffAdj*dimX + xoffAdj];
if (j+tidy < FULL_Y && i+tidx < FULL_X) {
tile[j+tidy][i+tidx] = __t;
}
}
}
return;
}
template<int BDIM_X,
int BDIM_Y,
int TILE_X,
int TILE_Y,
int FRAME_X,
int FRAME_Y,
typename INT2_T>
__device__ void loadTile(const int slX,
const int slY,
const long long begY,
const long long dimX,
const INT2_T *__restrict__ v,
INT2_T tile[][TILE_X+2*FRAME_X]) {
const int blkx = blockIdx.x;
const int blky = blockIdx.y;
const int tidx = threadIdx.x;
const int tidy = threadIdx.y;
const int startX = blkx*TILE_X;
const int startY = begY + blky*TILE_Y;
#pragma unroll
for(int j = 0; j < TILE_Y; j += BDIM_Y) {
int yoff = startY + j+tidy;
#pragma unroll
for(int i = 0; i < TILE_X; i += BDIM_X) {
const int xoff = startX + i+tidx;
tile[FRAME_Y + j+tidy][FRAME_X + i+tidx] = v[yoff*dimX + xoff];
}
}
if (tidy == 0) {
int yoff = (startY % slY) == 0 ? startY+slY-1 : startY-1;
#pragma unroll
for(int i = 0; i < TILE_X; i += BDIM_X) {
const int xoff = startX + i+tidx;
tile[0][FRAME_X + i+tidx] = v[yoff*dimX + xoff];
}
yoff = ((startY+TILE_Y) % slY) == 0 ? startY+TILE_Y - slY : startY+TILE_Y;
#pragma unroll
for(int i = 0; i < TILE_X; i += BDIM_X) {
const int xoff = startX + i+tidx;
tile[FRAME_Y + TILE_Y][FRAME_X + i+tidx] = v[yoff*dimX + xoff];
}
// the other branch in slower so skip it if possible
if (BDIM_X <= TILE_Y) {
int xoff = (startX % slX) == 0 ? startX+slX-1 : startX-1;
#pragma unroll
for(int j = 0; j < TILE_Y; j += BDIM_X) {
yoff = startY + j+tidx;
tile[FRAME_Y + j+tidx][0] = v[yoff*dimX + xoff];
}
xoff = ((startX+TILE_X) % slX) == 0 ? startX+TILE_X - slX : startX+TILE_X;
#pragma unroll
for(int j = 0; j < TILE_Y; j += BDIM_X) {
yoff = startY + j+tidx;
tile[FRAME_Y + j+tidx][FRAME_X + TILE_X] = v[yoff*dimX + xoff];
}
} else {
if (tidx < TILE_Y) {
int xoff = (startX % slX) == 0 ? startX+slX-1 : startX-1;
yoff = startY + tidx;
tile[FRAME_Y + tidx][0] = v[yoff*dimX + xoff];;
xoff = ((startX+TILE_X) % slX) == 0 ? startX+TILE_X - slX : startX+TILE_X;
tile[FRAME_Y + tidx][FRAME_X + TILE_X] = v[yoff*dimX + xoff];
}
}
}
return;
}
template<int BDIM_X,
int BDIM_Y,
int LOOP_X,
int LOOP_Y,
int BITXSP,
int COLOR,
typename INT_T,
typename INT2_T>
__global__
void spinUpdateV_2D_k(const int devid,
const long long seed,
const int it,
const int slX, // sublattice size X of one color (in words)
const int slY, // sublattice size Y of one color
const long long begY,
const long long dimX, // ld
const float vExp[][5],
const INT2_T *__restrict__ jDst,
const INT2_T *__restrict__ vSrc,
INT2_T *__restrict__ vDst) {
const int SPIN_X_WORD = 8*sizeof(INT_T)/BITXSP;
const int tidx = threadIdx.x;
const int tidy = threadIdx.y;
__shared__ INT2_T shTile[BDIM_Y*LOOP_Y+2][BDIM_X*LOOP_X+2];
loadTile<BDIM_X, BDIM_Y,
BDIM_X*LOOP_X,
BDIM_Y*LOOP_Y,
1, 1, INT2_T>(slX, slY, begY, dimX, vSrc, shTile);
// __shExp[cur_s{0,1}][sum_s{0,1}] = __expf(-2*cur_s{-1,+1}*F{+1,-1}(sum_s{0,1})*INV_TEMP)
__shared__ float __shExp[2][5];
// for small lattices BDIM_X/Y may be smaller than 2/5
#pragma unroll
for(int i = 0; i < 2; i += BDIM_Y) {
#pragma unroll
for(int j = 0; j < 5; j += BDIM_X) {
if (i+tidy < 2 && j+tidx < 5) {
__shExp[i+tidy][j+tidx] = vExp[i+tidy][j+tidx];
}
}
}
__syncthreads();
const int __i = blockIdx.y*BDIM_Y*LOOP_Y + tidy;
const int __j = blockIdx.x*BDIM_X*LOOP_X + tidx;
const long long tid = ((devid*gridDim.y + blockIdx.y)*gridDim.x + blockIdx.x)*BDIM_X*BDIM_Y +
threadIdx.y*BDIM_X + threadIdx.x;
INT2_T __me[LOOP_Y][LOOP_X];
#pragma unroll
for(int i = 0; i < LOOP_Y; i++) {
#pragma unroll
for(int j = 0; j < LOOP_X; j++) {
__me[i][j] = vDst[(begY+__i+i*BDIM_Y)*dimX + __j+j*BDIM_X];
}
}
INT2_T __up[LOOP_Y][LOOP_X];
INT2_T __ct[LOOP_Y][LOOP_X];
INT2_T __dw[LOOP_Y][LOOP_X];
#pragma unroll
for(int i = 0; i < LOOP_Y; i++) {
#pragma unroll
for(int j = 0; j < LOOP_X; j++) {
__up[i][j] = shTile[i*BDIM_Y + tidy][j*BDIM_X + 1+tidx];
__ct[i][j] = shTile[i*BDIM_Y + 1+tidy][j*BDIM_X + 1+tidx];
__dw[i][j] = shTile[i*BDIM_Y + 2+tidy][j*BDIM_X + 1+tidx];
}
}
// BDIM_Y is power of two so row parity won't change across loops
const int readBack = (COLOR == C_BLACK) ? !(__i%2) : (__i%2);
INT2_T __sd[LOOP_Y][LOOP_X];
#pragma unroll
for(int i = 0; i < LOOP_Y; i++) {
#pragma unroll
for(int j = 0; j < LOOP_X; j++) {
__sd[i][j] = (readBack) ? shTile[i*BDIM_Y + 1+tidy][j*BDIM_X + tidx]:
shTile[i*BDIM_Y + 1+tidy][j*BDIM_X + 2+tidx];
}
}
if (readBack) {
#pragma unroll
for(int i = 0; i < LOOP_Y; i++) {
#pragma unroll
for(int j = 0; j < LOOP_X; j++) {
__sd[i][j].x = (__ct[i][j].x << BITXSP) | (__sd[i][j].y >> (8*sizeof(__sd[i][j].y)-BITXSP));
__sd[i][j].y = (__ct[i][j].y << BITXSP) | (__ct[i][j].x >> (8*sizeof(__ct[i][j].x)-BITXSP));
}
}
} else {
#pragma unroll
for(int i = 0; i < LOOP_Y; i++) {
#pragma unroll
for(int j = 0; j < LOOP_X; j++) {
__sd[i][j].y = (__ct[i][j].y >> BITXSP) | (__sd[i][j].x << (8*sizeof(__sd[i][j].x)-BITXSP));
__sd[i][j].x = (__ct[i][j].x >> BITXSP) | (__ct[i][j].y << (8*sizeof(__ct[i][j].y)-BITXSP));
}
}
}
if (jDst != NULL) {
INT2_T __J[LOOP_Y][LOOP_X];
#pragma unroll
for(int i = 0; i < LOOP_Y; i++) {
#pragma unroll
for(int j = 0; j < LOOP_X; j++) {
__J[i][j] = jDst[(begY+__i+i*BDIM_Y)*dimX + __j+j*BDIM_X];
}
}
// apply them
// the 4 bits of J codify: <upJ, downJ, leftJ, rightJ>
#pragma unroll
for(int i = 0; i < LOOP_Y; i++) {
#pragma unroll
for(int j = 0; j < LOOP_X; j++) {
__up[i][j].x ^= (__J[i][j].x & 0x8888888888888888ull) >> 3;
__up[i][j].y ^= (__J[i][j].y & 0x8888888888888888ull) >> 3;
__dw[i][j].x ^= (__J[i][j].x & 0x4444444444444444ull) >> 2;
__dw[i][j].y ^= (__J[i][j].y & 0x4444444444444444ull) >> 2;
if (readBack) {
// __sd[][] holds "left" spins
// __ct[][] holds "right" spins
__sd[i][j].x ^= (__J[i][j].x & 0x2222222222222222ull) >> 1;
__sd[i][j].y ^= (__J[i][j].y & 0x2222222222222222ull) >> 1;
__ct[i][j].x ^= (__J[i][j].x & 0x1111111111111111ull);
__ct[i][j].y ^= (__J[i][j].y & 0x1111111111111111ull);
} else {
// __ct[][] holds "left" spins
// __sd[][] holds "right" spins
__ct[i][j].x ^= (__J[i][j].x & 0x2222222222222222ull) >> 1;
__ct[i][j].y ^= (__J[i][j].y & 0x2222222222222222ull) >> 1;
__sd[i][j].x ^= (__J[i][j].x & 0x1111111111111111ull);
__sd[i][j].y ^= (__J[i][j].y & 0x1111111111111111ull);
}
}
}
}
hiprandStatePhilox4_32_10_t st;
hiprand_init(seed, tid, static_cast<long long>(2*SPIN_X_WORD)*LOOP_X*LOOP_Y*(2*it+COLOR), &st);
#pragma unroll
for(int i = 0; i < LOOP_Y; i++) {
#pragma unroll
for(int j = 0; j < LOOP_X; j++) {
__ct[i][j].x += __up[i][j].x;
__dw[i][j].x += __sd[i][j].x;
__ct[i][j].x += __dw[i][j].x;
__ct[i][j].y += __up[i][j].y;
__dw[i][j].y += __sd[i][j].y;
__ct[i][j].y += __dw[i][j].y;
}
}
#pragma unroll
for(int i = 0; i < LOOP_Y; i++) {
#pragma unroll
for(int j = 0; j < LOOP_X; j++) {
#pragma unroll
for(int z = 0; z < 8*sizeof(INT_T); z += BITXSP) {
const int2 __src = make_int2((__me[i][j].x >> z) & 0xF,
(__me[i][j].y >> z) & 0xF);
const int2 __sum = make_int2((__ct[i][j].x >> z) & 0xF,
(__ct[i][j].y >> z) & 0xF);
const INT_T ONE = static_cast<INT_T>(1);
if (hiprand_uniform(&st) <= __shExp[__src.x][__sum.x]) {
__me[i][j].x ^= ONE << z;
}
if (hiprand_uniform(&st) <= __shExp[__src.y][__sum.y]) {
__me[i][j].y ^= ONE << z;
}
}
}
}
#pragma unroll
for(int i = 0; i < LOOP_Y; i++) {
#pragma unroll
for(int j = 0; j < LOOP_X; j++) {
vDst[(begY + __i+i*BDIM_Y)*dimX + __j+j*BDIM_X] = __me[i][j];
}
}
return;
}
template<int BDIM_X,
int WSIZE,
typename T>
__device__ __forceinline__ T __block_sum(T v) {
__shared__ T sh[BDIM_X/WSIZE];
const int lid = threadIdx.x%WSIZE;
const int wid = threadIdx.x/WSIZE;
#pragma unroll
for(int i = WSIZE/2; i; i >>= 1) {
v += __shfl_down_sync(0xFFFFFFFF, v, i);
}
if (lid == 0) sh[wid] = v;
__syncthreads();
if (wid == 0) {
v = (lid < (BDIM_X/WSIZE)) ? sh[lid] : 0;
#pragma unroll
for(int i = (BDIM_X/WSIZE)/2; i; i >>= 1) {
v += __shfl_down_sync(0xFFFFFFFF, v, i);
}
}
__syncthreads();
return v;
}
// to be optimized
template<int BDIM_X,
int BITXSP,
typename INT_T,
typename SUM_T>
__global__ void getMagn_k(const long long n,
const INT_T *__restrict__ v,
SUM_T *__restrict__ sum) {
const int SPIN_X_WORD = 8*sizeof(INT_T)/BITXSP;
const long long nth = static_cast<long long>(blockDim.x)*gridDim.x;
const long long tid = static_cast<long long>(blockDim.x)*blockIdx.x + threadIdx.x;
SUM_T __cntP = 0;
SUM_T __cntN = 0;
for(long long i = 0; i < n; i += nth) {
if (i+tid < n) {
const int __c = __mypopc(v[i+tid]);
__cntP += __c;
__cntN += SPIN_X_WORD - __c;
}
}
__cntP = __block_sum<BDIM_X, 32>(__cntP);
__cntN = __block_sum<BDIM_X, 32>(__cntN);
if (threadIdx.x == 0) {
atomicAdd(sum+0, __cntP);
atomicAdd(sum+1, __cntN);
}
return;
}
static void usage(const int SPIN_X_WORD, const char *pname) {
const char *bname = rindex(pname, '/');
if (!bname) {bname = pname;}
else {bname++;}
fprintf(stdout,
"Usage: %1$s [options]\n"
"options:\n"
"\t-x|--x <HORIZ_DIM>\n"
"\t\tSpecifies the horizontal dimension of the entire lattice (black+white spins),\n"
"\t\tper GPU. This dimension must be a multiple of %2$d.\n"
"\n"
"\t-y|--y <VERT_DIM>\n"
"\t\tSpecifies the vertical dimension of the entire lattice (black+white spins), per\n"
"\t\tGPU. This dimension must be a multiple of %3$d.\n"
"\n"
"\t-n|--n <NSTEPS>\n"
"\t\tSpecifies the number of iteration to run.\n"
"\t\tDefualt: %4$d\n"
"\n"
"\t-d|--devs <NUM_DEVICES>\n"
"\t\tSpecifies the number of GPUs to use. Will use devices with ids [0, NUM_DEVS-1].\n"
"\t\tDefualt: 1.\n"
"\n"
"\t-s|--seed <SEED>\n"
"\t\tSpecifies the seed used to generate random numbers.\n"
"\t\tDefault: %5$llu\n"
"\n"
"\t-a|--alpha <ALPHA>\n"
"\t\tSpecifies the temperature in T_CRIT units. If both this option and '-t' are\n"
"\t\tspecified then the '-t' option is used.\n"
"\t\tDefault: %6$f\n"
"\n"
"\t-t|--temp <TEMP>\n"
"\t\tSpecifies the temperature in absolute units. If both this option and '-a' are\n"
"\t\tspecified then this option is used.\n"
"\t\tDefault: %7$f\n"
"\n"
"\t-p|--print <STAT_FREQ>\n"
"\t\tSpecifies the frequency, in no. of iteration, with which the magnetization\n"
"\t\tstatistics is printed. If this option is used together to the '-e' option, this\n"
"\t\toption is ignored.\n"
"\t\tDefault: only at the beginning and at end of the simulation\n"
"\n"
"\t-e|--exppr\n"
"\t\tPrints the magnetization at time steps in the series 0 <= 2^(x/4) < NSTEPS. If\n"
"\t\tthis option is used together to the '-p' option, the latter is ignored.\n"
"\t\tDefault: disabled\n"
"\n"
"\t-c|--corr\n"
"\t\tDumps to a file named corr_{X}x{Y}_T_{TEMP} the correlation of each point\n"
"\t\twith the %8$d points on the right and below. The correlation is computed every\n"
"\t\ttime the magnetization is printed on screen (based on either the '-p' or '-e'\n"
"\t\toption) and it is written in the file one line per measure.\n"
"\t\tDefault: disabled\n"
"\n"
"\t-m|--magn <TGT_MAGN>\n"
"\t\tSpecifies the magnetization value at which the simulation is interrupted. The\n"
"\t\tmagnetization of the system is checked against TGT_MAGN every STAT_FREQ, if the\n"
"\t\t'-p' option is specified, or according to the exponential timestep series, if\n"
"\t\tthe '-e' option is specified. If neither '-p' not '-e' are specified then this\n"
"\t\toption is ignored.\n"
"\t\tDefault: unset\n"
"\n"
"\t-J|--J <PROB>\n"
"\t\tSpecifies the probability [0.0-1.0] that links connecting any two spins are\n"
"\t\tanti-ferromagnetic. \n"
"\t\tDefault: 0.0\n"
"\n"
"\t --xsl <HORIZ_SUB_DIM>\n"
"\t\tSpecifies the horizontal dimension of each sub-lattice (black+white spins), per\n"
"\t\tGPU. This dimension must be a divisor of the horizontal dimension of the entire\n"
"\t\tlattice per GPU (specified with the '-x' option) and a multiple of %2$d.\n"
"\t\tDefault: sub-lattices are disabled.\n"
"\n"
"\t --ysl <VERT_SUB_DIM>\n"
"\t\tSpecifies the vertical dimension of each sub-lattice (black+white spins), per\n"
"\t\tGPU. This dimension must be a divisor of the vertical dimension of the entire\n"
"\t\tlattice per GPU (specified with the '-y' option) and a multiple of %3$d.\n"
"\n"
"\t-o|--o\n"
"\t\tEnables the file dump of the lattice every time the magnetization is printed.\n"
"\t\tDefault: off\n\n",
bname,
2*SPIN_X_WORD*2*BLOCK_X*BMULT_X,
BLOCK_Y*BMULT_Y,
NUMIT_DEF,
SEED_DEF,
ALPHA_DEF,
ALPHA_DEF*CRIT_TEMP,
MAX_CORR_LEN);
exit(EXIT_SUCCESS);
}
static void countSpins(const int ndev,
const int redBlocks,
const size_t llen,
const size_t llenLoc,
const unsigned long long *black_d,
const unsigned long long *white_d,
unsigned long long **sum_d,
unsigned long long *bsum,
unsigned long long *wsum) {
if (ndev == 1) {
CHECK_CUDA(hipMemset(sum_d[0], 0, 2*sizeof(**sum_d)));
hipLaunchKernelGGL(( getMagn_k<THREADS, BIT_X_SPIN>), dim3(redBlocks), dim3(THREADS), 0, 0, llen, black_d, sum_d[0]);
CHECK_ERROR("getMagn_k");
CHECK_CUDA(hipDeviceSynchronize());
} else {
for(int i = 0; i < ndev; i++) {
CHECK_CUDA(hipSetDevice(i));
CHECK_CUDA(hipMemset(sum_d[i], 0, 2*sizeof(**sum_d)));
hipLaunchKernelGGL(( getMagn_k<THREADS, BIT_X_SPIN>), dim3(redBlocks), dim3(THREADS), 0, 0, llenLoc, black_d + i*llenLoc, sum_d[i]);
hipLaunchKernelGGL(( getMagn_k<THREADS, BIT_X_SPIN>), dim3(redBlocks), dim3(THREADS), 0, 0, llenLoc, white_d + i*llenLoc, sum_d[i]);
CHECK_ERROR("getMagn_k");
}
}
bsum[0] = 0;
wsum[0] = 0;
unsigned long long sum_h[MAX_GPU][2];
for(int i = 0; i < ndev; i++) {
CHECK_CUDA(hipMemcpy(sum_h[i], sum_d[i], 2*sizeof(**sum_h), hipMemcpyDeviceToHost));
bsum[0] += sum_h[i][0];
wsum[0] += sum_h[i][1];
}
return;
}
template<int BDIM_X,
int BITXSP,
int N_CORR,
typename INT_T,
typename SUM_T>
__global__ void getCorr2D_k(const int corrLen,
const long long dimX,
const long long dimY,
const long long begY,
const INT_T *__restrict__ black,
const INT_T *__restrict__ white,
SUM_T *__restrict__ corr) {
const int SPIN_X_WORD = 8*sizeof(INT_T)/BITXSP;
const int tid = threadIdx.x;
const long long startY = begY + blockIdx.x;
const int SH_LEN = BDIM_X + DIV_UP(N_CORR/2, SPIN_X_WORD);
__shared__ INT_T __shB[SH_LEN];
__shared__ INT_T __shW[SH_LEN];
__shared__ SUM_T __shC[N_CORR];
#pragma unroll
for(int j = 0; j < N_CORR; j += BDIM_X) {
if (j+tid < N_CORR) {
__shC[j+tid] = 0;
}
}
const int chunkDimX = 2*BDIM_X*SPIN_X_WORD;
for(long long l = 0; l < dimX; l += BDIM_X) {
__syncthreads();
#pragma unroll
for(int j = 0; j < SH_LEN; j += BDIM_X) {
if (j+tid < SH_LEN) {
const int off = (l+j+tid < dimX) ? l+j+tid : l+j+tid - dimX;
__shB[j+tid] = black[startY*dimX + off];
__shW[j+tid] = white[startY*dimX + off];
}
}
__syncthreads();
for(int j = 1; j <= corrLen; j++) {
SUM_T myCorr = 0;
for(long long i = tid; i < chunkDimX; i += BDIM_X) {
// horiz corr
const long long myWrdX = (i/2) / SPIN_X_WORD;
const long long myOffX = (i/2) % SPIN_X_WORD;
INT_T __tmp = ((startY ^ i) & 1) ? __shW[myWrdX] : __shB[myWrdX];
const int mySpin = (__tmp >> (myOffX*BITXSP)) & 0xF;
const long long nextX = i+j;
const long long nextWrdX = (nextX/2) / SPIN_X_WORD;
const long long nextOffX = (nextX/2) % SPIN_X_WORD;
__tmp = ((startY ^ nextX) & 1) ? __shW[nextWrdX] : __shB[nextWrdX];
const int nextSpin = (__tmp >> (nextOffX*BITXSP)) & 0xF;
myCorr += (mySpin == nextSpin) ? SUM_T(1) : SUM_T(-1);
// vert corr
const long long nextY = (startY+j >= dimY) ? startY+j-dimY : startY+j;
__tmp = ((nextY ^ i) & 1) ? white[nextY*dimX + l+myWrdX]:
black[nextY*dimX + l+myWrdX];
const int vertSpin = (__tmp >> (myOffX*BITXSP)) & 0xF;
myCorr += (mySpin == vertSpin) ? SUM_T(1) : SUM_T(-1);
}
myCorr = __block_sum<BDIM_X, 32>(myCorr);
if (!tid) {
__shC[j-1] += myCorr;
}
}
}
__syncthreads();
#pragma unroll
for(int j = 0; j < N_CORR; j += BDIM_X) {
if (j+tid < N_CORR) {
atomicAdd(corr + j+tid, __shC[j+tid]);
}
}
return;
}
template<int BDIM_X,
int BITXSP,
int N_CORR,
typename INT_T,
typename SUM_T>
__global__ void getCorr2DRepl_k(const int corrLen,
const long long dimX,
const long long begY,
const long long slX, // sublattice size X of one color (in words)
const long long slY, // sublattice size Y of one color
const INT_T *__restrict__ black,
const INT_T *__restrict__ white,
SUM_T *__restrict__ corr) {
const int tid = threadIdx.x;
const int SPIN_X_WORD = 8*sizeof(INT_T)/BITXSP;
const long long startY = begY + blockIdx.x;
const long long mySLY = startY / slY;
const long long NSLX = 2ull*dimX*SPIN_X_WORD / slX;
const int SH_LEN = BDIM_X + DIV_UP(N_CORR/2, SPIN_X_WORD);
__shared__ INT_T __shB[SH_LEN];
__shared__ INT_T __shW[SH_LEN];
__shared__ SUM_T __shC[N_CORR];
#pragma unroll
for(int j = 0; j < N_CORR; j += BDIM_X) {
if (j+tid < N_CORR) {
__shC[j+tid] = 0;
}
}
const int chunkDimX = MIN(2*BDIM_X*SPIN_X_WORD, slX);
const int slXLD = (slX/2) / SPIN_X_WORD;
for(long long sl = 0; sl < NSLX; sl++) {
for(long long l = 0; l < slXLD; l += BDIM_X) {
__syncthreads();
#pragma unroll
for(int j = 0; j < SH_LEN; j += BDIM_X) {
if (j+tid < SH_LEN) {
const int off = (l+j+tid) % slXLD;
__shB[j+tid] = black[startY*dimX + sl*slXLD + off];
__shW[j+tid] = white[startY*dimX + sl*slXLD + off];
}
}
__syncthreads();
for(int j = 1; j <= corrLen; j++) {
SUM_T myCorr = 0;
for(long long i = tid; i < chunkDimX; i += BDIM_X) {
// horiz corr
const long long myWrdX = (i/2) / SPIN_X_WORD;
const long long myOffX = (i/2) % SPIN_X_WORD;
INT_T __tmp = ((startY ^ i) & 1) ? __shW[myWrdX] : __shB[myWrdX];
const int mySpin = (__tmp >> (myOffX*BITXSP)) & 0xF;
const long long nextX = i+j;
const long long nextWrdX = (nextX/2) / SPIN_X_WORD;
const long long nextOffX = (nextX/2) % SPIN_X_WORD;
__tmp = ((startY ^ nextX) & 1) ? __shW[nextWrdX] : __shB[nextWrdX];
const int nextSpin = (__tmp >> (nextOffX*BITXSP)) & 0xF;
myCorr += (mySpin == nextSpin) ? SUM_T(1) : SUM_T(-1);
// vert corr
const long long nextY = (startY+j >= (mySLY+1)*slY) ? startY+j-slY : startY+j;
__tmp = ((nextY ^ i) & 1) ? white[nextY*dimX + sl*slXLD + l+myWrdX]:
black[nextY*dimX + sl*slXLD + l+myWrdX];
const int vertSpin = (__tmp >> (myOffX*BITXSP)) & 0xF;
myCorr += (mySpin == vertSpin) ? SUM_T(1) : SUM_T(-1);
}
myCorr = __block_sum<BDIM_X, 32>(myCorr);
if (!tid) {
__shC[j-1] += myCorr;
}
}
}
}
__syncthreads();
#pragma unroll
for(int j = 0; j < N_CORR; j += BDIM_X) {
if (j+tid < N_CORR) {
atomicAdd(corr + j+tid, __shC[j+tid]);
}
}
return;
}
static void computeCorr(const char *fname,
const int ndev,
const int it,
const int lld,
const int useRepl,
const int XSL, // full sub-lattice (B+W) X
const int YSL, // full sub-lattice (B+W) X
const int X, // per-GPU full lattice (B+W) X
const int Y, // per-GPU full lattice (B+W) Y
const unsigned long long *black_d,
const unsigned long long *white_d,
double **corr_d,
double **corr_h) {
const int n_corr = MAX_CORR_LEN;
for(int i = 0; i < ndev; i++) {
CHECK_CUDA(hipSetDevice(i));
CHECK_CUDA(hipMemset(corr_d[i], 0, n_corr*sizeof(**corr_d)));
if (!useRepl) {
hipLaunchKernelGGL(( getCorr2D_k<THREADS, BIT_X_SPIN, MAX_CORR_LEN>), dim3(Y), dim3(THREADS), 0, 0, n_corr,
lld,
ndev*Y,
i*Y,
black_d,
white_d,
corr_d[i]);
CHECK_ERROR("getCorr2D_k");
} else {
hipLaunchKernelGGL(( getCorr2DRepl_k<THREADS, BIT_X_SPIN, MAX_CORR_LEN>), dim3(Y), dim3(THREADS), 0, 0, n_corr,
lld,
i*Y,
XSL,
YSL,
black_d,
white_d,
corr_d[i]);
CHECK_ERROR("getCorr2DRepl_k");
}
}
for(int i = 0; i < ndev; i++) {
CHECK_CUDA(hipSetDevice(i));
CHECK_CUDA(hipMemcpy(corr_h[i],
corr_d[i],
n_corr*sizeof(**corr_h),
hipMemcpyDeviceToHost));
}
for(int d = 1; d < ndev; d++) {
for(int i = 0; i < n_corr; i++) {
corr_h[0][i] += corr_h[d][i];
}
}
FILE *fp = Fopen(fname, "a");
fprintf(fp,"%10d", it);
for(int i = 0; i < n_corr; i++) {
fprintf(fp," % -12G", corr_h[0][i] / (2.0*X*Y*ndev));
}
fprintf(fp,"\n");
fclose(fp);
return;
}
static void dumpLattice(const char *fprefix,
const int ndev,
const int Y,
const size_t lld,
const size_t llen,
const size_t llenLoc,
const unsigned long long *v_d) {
char fname[256];
if (ndev == 1) {
unsigned long long *v_h = (unsigned long long *)Malloc(llen*sizeof(*v_h));
CHECK_CUDA(hipMemcpy(v_h, v_d, llen*sizeof(*v_h), hipMemcpyDeviceToHost));
unsigned long long *black_h = v_h;
unsigned long long *white_h = v_h + llen/2;
snprintf(fname, sizeof(fname), "%s0.txt", fprefix);
FILE *fp = Fopen(fname, "w");
for(int i = 0; i < Y; i++) {
for(int j = 0; j < lld; j++) {
unsigned long long __b = black_h[i*lld + j];
unsigned long long __w = white_h[i*lld + j];
for(int k = 0; k < 8*sizeof(*v_h); k += BIT_X_SPIN) {
if (i&1) {
fprintf(fp, "%llX", (__w >> k) & 0xF);
fprintf(fp, "%llX", (__b >> k) & 0xF);
} else {
fprintf(fp, "%llX", (__b >> k) & 0xF);
fprintf(fp, "%llX", (__w >> k) & 0xF);
}
}
}
fprintf(fp, "\n");
}
fclose(fp);
free(v_h);
} else {
#pragma omp parallel for schedule(static)
for(int d = 0; d < ndev; d++) {
const unsigned long long *black_h = v_d + d*llenLoc;
const unsigned long long *white_h = v_d + llen/2 + d*llenLoc;
snprintf(fname, sizeof(fname), "%s%d.txt", fprefix, d);
FILE *fp = Fopen(fname, "w");
for(int i = 0; i < Y; i++) {
for(int j = 0; j < lld; j++) {
unsigned long long __b = black_h[i*lld + j];
unsigned long long __w = white_h[i*lld + j];
for(int k = 0; k < 8*sizeof(*black_h); k += BIT_X_SPIN) {
if (i&1) {
fprintf(fp, "%llX", (__w >> k) & 0xF);
fprintf(fp, "%llX", (__b >> k) & 0xF);
} else {
fprintf(fp, "%llX", (__b >> k) & 0xF);
fprintf(fp, "%llX", (__w >> k) & 0xF);
}
}
}
fprintf(fp, "\n");
}
fclose(fp);
}
}
return;
}
static void generate_times(unsigned long long nsteps,
unsigned long long *list_times) {
int nt = 0;
list_times[0]=MIN_EXP_TIME;
unsigned long long t = 0;
for(unsigned long long j = 0; j < nsteps && t < nsteps; j++) {
t = rint(pow(2.0, j/4.0));
if (t >= 2*list_times[nt] && nt < MAX_EXP_TIME-1) {
// if (t > list_times[nt] && nt < MAX_EXP_TIME-1) {
nt++;
list_times[nt] = t;
//printf("list_times[%d]: %llu\n", nt, list_times[nt]);
}
}
return;
}
int main(int argc, char **argv) {
unsigned long long *v_d=NULL;
unsigned long long *black_d=NULL;
unsigned long long *white_d=NULL;
unsigned long long *ham_d=NULL;
unsigned long long *hamB_d=NULL;
unsigned long long *hamW_d=NULL;
hipEvent_t start, stop;
float et;
const int SPIN_X_WORD = (8*sizeof(*v_d)) / BIT_X_SPIN;
int X = 0;
int Y = 0;
int dumpOut = 0;
char cname[256];
int corrOut = 0;
double *corr_d[MAX_GPU];
double *corr_h[MAX_GPU];
int nsteps = NUMIT_DEF;
unsigned long long seed = SEED_DEF;
int ndev = 1;
float alpha = -1.0f;
float temp = -1.0f;
float tempUpdStep = 0;
int tempUpdFreq = 0;
int printFreq = 0;
int printExp = 0;
int printExpCur = 0;
unsigned long long printExpSteps[MAX_EXP_TIME];
double tgtMagn = -1.0;
int useGenHamilt = 0;
float hamiltPerc1 = 0.0f;
int useSubLatt = 0;
int XSL = 0;
int YSL = 0;
int NSLX = 1;
int NSLY = 1;
int och;
while(1) {
int option_index = 0;
static struct option long_options[] = {
{ "x", required_argument, 0, 'x'},
{ "y", required_argument, 0, 'y'},
{ "nit", required_argument, 0, 'n'},
{ "seed", required_argument, 0, 's'},
{ "out", no_argument, 0, 'o'},
{ "devs", required_argument, 0, 'd'},
{ "alpha", required_argument, 0, 'a'},
{ "temp", required_argument, 0, 't'},
{ "print", required_argument, 0, 'p'},
{"update", required_argument, 0, 'u'},
{ "magn", required_argument, 0, 'm'},
{ "exppr", no_argument, 0, 'e'},
{ "corr", no_argument, 0, 'c'},
{ "J", required_argument, 0, 'J'},
{ "xsl", required_argument, 0, 1},
{ "ysl", required_argument, 0, 2},
{ "help", required_argument, 0, 'h'},
{ 0, 0, 0, 0}
};
och = getopt_long(argc, argv, "x:y:n:ohs:d:a:t:p:u:m:ecJ:r:", long_options, &option_index);
if (och == -1) break;
switch (och) {
case 0:// handles long opts with non-NULL flag field
break;
case 'x':
X = atoi(optarg);
break;
case 'y':
Y = atoi(optarg);
break;
case 'n':
nsteps = atoi(optarg);
break;
case 'o':
dumpOut = 1;
break;
case 'h':
usage(SPIN_X_WORD, argv[0]);
break;
case 's':
seed = atoll(optarg);
if(seed==0) {
seed=((getpid()*rand())&0x7FFFFFFFF);
}
break;
case 'd':
ndev = atoi(optarg);
break;
case 'a':
alpha = atof(optarg);
break;
case 't':
temp = atof(optarg);
break;
case 'p':
printFreq = atoi(optarg);
break;
case 'e':
printExp = 1;
break;
case 'u':
// format -u FLT,INT
{
char *__tmp0 = strtok(optarg, ",");
if (!__tmp0) {
fprintf(stderr, "cannot find temperature step in parameter...\n");
exit(EXIT_FAILURE);
}
char *__tmp1 = strtok(NULL, ",");
if (!__tmp1) {
fprintf(stderr, "cannot find iteration count in parameter...\n");
exit(EXIT_FAILURE);
}
tempUpdStep = atof(__tmp0);
tempUpdFreq = atoi(__tmp1);
printf("tempUpdStep: %f, tempUpdFreq: %d\n", tempUpdStep, tempUpdFreq);
}
break;
case 'm':
tgtMagn = atof(optarg);
break;
case 'c':
corrOut = 1;
break;
case 'J':
useGenHamilt = 1;
hamiltPerc1 = atof(optarg);
hamiltPerc1 = MIN(MAX(0.0f, hamiltPerc1), 1.0f);
break;
case 1:
useSubLatt = 1;
XSL = atoi(optarg);
break;
case 2:
useSubLatt = 1;
YSL = atoi(optarg);
break;
case '?':
exit(EXIT_FAILURE);
default:
fprintf(stderr, "unknown option: %c\n", och);
exit(EXIT_FAILURE);
}
}
if (!X || !Y) {
if (!X) {
if (Y && !(Y % (2*SPIN_X_WORD*2*BLOCK_X*BMULT_X))) {
X = Y;
} else {
X = 2*SPIN_X_WORD*2*BLOCK_X*BMULT_X;
}
}
if (!Y) {
if (!(X%(BLOCK_Y*BMULT_Y))) {
Y = X;
} else {
Y = BLOCK_Y*BMULT_Y;
}
}
}
if (!X || (X%2) || ((X/2)%(SPIN_X_WORD*2*BLOCK_X*BMULT_X))) {
fprintf(stderr, "\nPlease specify an X dim multiple of %d\n\n", 2*SPIN_X_WORD*2*BLOCK_X*BMULT_X);
usage(SPIN_X_WORD, argv[0]);
exit(EXIT_FAILURE);
}
if (!Y || (Y%(BLOCK_Y*BMULT_Y))) {
fprintf(stderr, "\nPlease specify a Y dim multiple of %d\n\n", BLOCK_Y*BMULT_Y);
usage(SPIN_X_WORD, argv[0]);
exit(EXIT_FAILURE);
}
if (useSubLatt) {
if (!XSL || !YSL) {
if (!XSL) {
if (YSL && !(YSL % (2*SPIN_X_WORD*2*BLOCK_X*BMULT_X))) {
XSL = YSL;
} else {
XSL = 2*SPIN_X_WORD*2*BLOCK_X*BMULT_X;
}
}
if (!YSL) {
if (!(XSL%(BLOCK_Y*BMULT_Y))) {
YSL = XSL;
} else {
YSL = BLOCK_Y*BMULT_Y;
}
}
}
if ((X%XSL) || !XSL || (XSL%2) || ((XSL/2)%(SPIN_X_WORD*2*BLOCK_X*BMULT_X))) {
fprintf(stderr,
"\nPlease specify an X sub-lattice dim multiple of %d and divisor of %d\n\n",
2*SPIN_X_WORD*2*BLOCK_X*BMULT_X, X);
usage(SPIN_X_WORD, argv[0]);
exit(EXIT_FAILURE);
}
if ((Y%YSL) || !YSL || (YSL%(BLOCK_Y*BMULT_Y))) {
fprintf(stderr,
"\nPlease specify a Y sub-lattice dim multiple of %d divisor of %d\n\n",
BLOCK_Y*BMULT_Y, Y);
usage(SPIN_X_WORD, argv[0]);
exit(EXIT_FAILURE);
}
NSLX = X / XSL;
NSLY = Y / YSL;
} else {
XSL = X;
YSL = Y*ndev;
NSLX = 1;
NSLY = 1;
}
if (temp == -1.0f) {
if (alpha == -1.0f) {
temp = ALPHA_DEF*CRIT_TEMP;
} else {
temp = alpha*CRIT_TEMP;
}
}
if (printExp && printFreq) {
printFreq = 0;
}
if (printExp) {
generate_times(nsteps, printExpSteps);
}
hipDeviceProp_t props;
printf("\nUsing GPUs:\n");
for(int i = 0; i < ndev; i++) {
CHECK_CUDA(hipGetDeviceProperties(&props, i));
printf("\t%2d (%s, %d SMs, %d th/SM max, CC %d.%d, ECC %s)\n",
i, props.name, props.multiProcessorCount,
props.maxThreadsPerMultiProcessor,
props.major, props.minor,
props.ECCEnabled?"on":"off");
}
printf("\n");
// we assums all gpus to be the same so we'll later
// use the props filled for the last GPU...
if (ndev > 1) {
for(int i = 0; i < ndev; i++) {
int attVal = 0;
CHECK_CUDA(hipDeviceGetAttribute(&attVal, hipDeviceAttributeConcurrentManagedAccess, i));
if (!attVal) {
fprintf(stderr,
"error: device %d does not support concurrent managed memory access!\n", i);
exit(EXIT_FAILURE);
}
}
printf("GPUs direct access matrix:\n ");
for(int i = 0; i < ndev; i++) {
printf("%4d", i);
}
int missingLinks = 0;
printf("\n");
for(int i = 0; i < ndev; i++) {
printf("GPU %2d:", i);
CHECK_CUDA(hipSetDevice(i));
for(int j = 0; j < ndev; j++) {
int access = 1;
if (i != j) {
CHECK_CUDA(hipDeviceCanAccessPeer(&access, i, j));
if (access) {
CHECK_CUDA(hipDeviceEnablePeerAccess(j, 0));
} else {
missingLinks++;
}
}
printf("%4c", access ? 'V' : 'X');
}
printf("\n");
}
printf("\n");
if (missingLinks) {
fprintf(stderr,
"error: %d direct memory links among devices missing\n",
missingLinks);
exit(EXIT_FAILURE);
}
}
size_t lld = (X/2)/SPIN_X_WORD;
// length of a single color section per GPU
size_t llenLoc = static_cast<size_t>(Y)*lld;
// total lattice length (all GPUs, all colors)
size_t llen = 2ull*ndev*llenLoc;
dim3 grid(DIV_UP(lld/2, BLOCK_X*BMULT_X),
DIV_UP( Y, BLOCK_Y*BMULT_Y));
dim3 block(BLOCK_X, BLOCK_Y);
printf("Run configuration:\n");
printf("\tspin/word: %d\n", SPIN_X_WORD);
printf("\tspins: %zu\n", llen*SPIN_X_WORD);
printf("\tseed: %llu\n", seed);
printf("\titerations: %d\n", nsteps);
printf("\tblock (X, Y): %d, %d\n", block.x, block.y);
printf("\ttile (X, Y): %d, %d\n", BLOCK_X*BMULT_X, BLOCK_Y*BMULT_Y);
printf("\tgrid (X, Y): %d, %d\n", grid.x, grid.y);
if (printFreq) {
printf("\tprint magn. every %d steps\n", printFreq);
} else if (printExp) {
printf("\tprint magn. following exponential series\n");
} else {
printf("\tprint magn. at 1st and last step\n");
}
if ((printFreq || printExp) && tgtMagn != -1.0) {
printf("\tearly exit if magn. == %lf+-%lf\n", tgtMagn, TGT_MAGN_MAX_DIFF);
}
printf("\ttemp: %f (%f*T_crit)\n", temp, temp/CRIT_TEMP);
if (!tempUpdFreq) {
printf("\ttemp update not set\n");
} else {
printf("\ttemp update: %f / %d iterations\n", tempUpdStep, tempUpdFreq);
}
if (useGenHamilt) {
printf("\tusing Hamiltonian buffer, setting links to -1 with prob %G\n", hamiltPerc1);
} else {
printf("\tnot using Hamiltonian buffer\n");
}
printf("\n");
if (useSubLatt) {
printf("\tusing sub-lattices:\n");
printf("\t\tno. of sub-lattices per GPU: %8d\n", NSLX*NSLY);
printf("\t\tno. of sub-lattices (total): %8d\n", ndev*NSLX*NSLY);
printf("\t\tsub-lattices size: %7d x %7d\n\n", XSL, YSL);
}
printf("\tlocal lattice size: %8d x %8d\n", Y, X);
printf("\ttotal lattice size: %8d x %8d\n", ndev*Y, X);
printf("\tlocal lattice shape: 2 x %8d x %8zu (%12zu %s)\n", Y, lld, llenLoc*2, sizeof(*v_d) == 4 ? "uints" : "ulls");
printf("\ttotal lattice shape: 2 x %8d x %8zu (%12zu %s)\n", ndev*Y, lld, llen, sizeof(*v_d) == 4 ? "uints" : "ulls");
printf("\tmemory: %.2lf MB (%.2lf MB per GPU)\n", (llen*sizeof(*v_d))/(1024.0*1024.0), llenLoc*2*sizeof(*v_d)/(1024.0*1024.0));
const int redBlocks = MIN(DIV_UP(llen, THREADS),
(props.maxThreadsPerMultiProcessor/THREADS)*props.multiProcessorCount);
unsigned long long cntPos;
unsigned long long cntNeg;
unsigned long long *sum_d[MAX_GPU];
if (ndev == 1) {
CHECK_CUDA(hipMalloc(&v_d, llen*sizeof(*v_d)));
CHECK_CUDA(hipMemset(v_d, 0, llen*sizeof(*v_d)));
CHECK_CUDA(hipMalloc(&sum_d[0], 2*sizeof(**sum_d)));
if (useGenHamilt) {
CHECK_CUDA(hipMalloc(&ham_d, llen*sizeof(*ham_d)));
CHECK_CUDA(hipMemset(ham_d, 0, llen*sizeof(*ham_d)));
}
} else {
CHECK_CUDA(hipMallocManaged(&v_d, llen*sizeof(*v_d), hipMemAttachGlobal));
if (useGenHamilt) {
CHECK_CUDA(hipMallocManaged(&ham_d, llen*sizeof(*ham_d), hipMemAttachGlobal));
}
printf("\nSetting up multi-gpu configuration:\n"); fflush(stdout);
//#pragma omp parallel for schedule(static)
for(int i = 0; i < ndev; i++) {
CHECK_CUDA(hipSetDevice(i));
CHECK_CUDA(hipMalloc(sum_d+i, 2*sizeof(**sum_d)));
CHECK_CUDA(hipMemset(sum_d[i], 0, 2*sizeof(**sum_d)));
// set preferred loc for black/white
CHECK_CUDA(hipMemAdvise(v_d + i*llenLoc, llenLoc*sizeof(*v_d), hipMemAdviseSetPreferredLocation, i));
CHECK_CUDA(hipMemAdvise(v_d + (llen/2) + i*llenLoc, llenLoc*sizeof(*v_d), hipMemAdviseSetPreferredLocation, i));
if (useGenHamilt) {
CHECK_CUDA(hipMemAdvise(ham_d + i*llenLoc, llenLoc*sizeof(*ham_d), hipMemAdviseSetPreferredLocation, i));
CHECK_CUDA(hipMemAdvise(ham_d + (llen/2) + i*llenLoc, llenLoc*sizeof(*ham_d), hipMemAdviseSetPreferredLocation, i));
}
// black boundaries up/down
//fprintf(stderr, "v_d + %12zu + %12zu, %12zu, ..., %2d)\n", i*llenLoc, (Y-1)*lld, lld*sizeof(*v_d), (i+ndev+1)%ndev);
CHECK_CUDA(hipMemAdvise(v_d + i*llenLoc, lld*sizeof(*v_d), hipMemAdviseSetAccessedBy, (i+ndev-1)%ndev));
CHECK_CUDA(hipMemAdvise(v_d + i*llenLoc + (Y-1)*lld, lld*sizeof(*v_d), hipMemAdviseSetAccessedBy, (i+ndev+1)%ndev));
// white boundaries up/down
CHECK_CUDA(hipMemAdvise(v_d + (llen/2) + i*llenLoc, lld*sizeof(*v_d), hipMemAdviseSetAccessedBy, (i+ndev-1)%ndev));
CHECK_CUDA(hipMemAdvise(v_d + (llen/2) + i*llenLoc + (Y-1)*lld, lld*sizeof(*v_d), hipMemAdviseSetAccessedBy, (i+ndev+1)%ndev));
//CHECK_CUDA(hipMemPrefetchAsync(v_d + i*llenLoc, llenLoc*sizeof(*v_d), i, 0));
//CHECK_CUDA(hipMemPrefetchAsync(v_d + (llen/2) + i*llenLoc, llenLoc*sizeof(*v_d), i, 0));
// reset black/white
CHECK_CUDA(hipMemset(v_d + i*llenLoc, 0, llenLoc*sizeof(*v_d)));
CHECK_CUDA(hipMemset(v_d + (llen/2) + i*llenLoc, 0, llenLoc*sizeof(*v_d)));
if (useGenHamilt) {
CHECK_CUDA(hipMemset(ham_d + i*llenLoc, 0, llenLoc*sizeof(*ham_d)));
CHECK_CUDA(hipMemset(ham_d + (llen/2) + i*llenLoc, 0, llenLoc*sizeof(*ham_d)));
}
printf("\tGPU %2d done\n", i); fflush(stdout);
}
}
if(corrOut) {
snprintf(cname, sizeof(cname), "corr_%dx%d_T_%f_%llu", Y, X, temp, seed);
Remove(cname);
for(int i = 0; i < ndev; i++) {
CHECK_CUDA(hipSetDevice(i));
corr_h[i] = (double *)Malloc(MAX_CORR_LEN*sizeof(**corr_h));
CHECK_CUDA(hipMalloc(corr_d+i, MAX_CORR_LEN*sizeof(**corr_d)));
}
}
black_d = v_d;
white_d = v_d + llen/2;
if (useGenHamilt) {
hamB_d = ham_d;
hamW_d = ham_d + llen/2;
}
float *exp_d[MAX_GPU];
float exp_h[2][5];
// precompute possible exponentials
for(int i = 0; i < 2; i++) {
for(int j = 0; j < 5; j++) {
if(temp > 0) {
exp_h[i][j] = expf((i?-2.0f:2.0f)*static_cast<float>(j*2-4)*(1.0f/temp));
} else {
if(j == 2) {
exp_h[i][j] = 0.5f;
} else {
exp_h[i][j] = (i?-2.0f:2.0f)*static_cast<float>(j*2-4);
}
}
//printf("exp[%2d][%d]: %E\n", i?1:-1, j, exp_h[i][j]);
}
}
for(int i = 0; i < ndev; i++) {
CHECK_CUDA(hipSetDevice(i));
CHECK_CUDA(hipMalloc(exp_d+i, 2*5*sizeof(**exp_d)));
CHECK_CUDA(hipMemcpy(exp_d[i], exp_h, 2*5*sizeof(**exp_d), hipMemcpyHostToDevice));
}
CHECK_CUDA(hipEventCreate(&start));
CHECK_CUDA(hipEventCreate(&stop));
for(int i = 0; i < ndev; i++) {
CHECK_CUDA(hipSetDevice(i));
hipLaunchKernelGGL(( latticeInit_k<BLOCK_X, BLOCK_Y,
BMULT_X, BMULT_Y,
BIT_X_SPIN, C_BLACK,
unsigned long long>), dim3(grid), dim3(block), 0, 0, i,
seed,
0, i*Y, lld/2,
reinterpret_cast<ulonglong2 *>(black_d));
CHECK_ERROR("initLattice_k");
hipLaunchKernelGGL(( latticeInit_k<BLOCK_X, BLOCK_Y,
BMULT_X, BMULT_Y,
BIT_X_SPIN, C_WHITE,
unsigned long long>), dim3(grid), dim3(block), 0, 0, i,
seed,
0, i*Y, lld/2,
reinterpret_cast<ulonglong2 *>(white_d));
CHECK_ERROR("initLattice_k");
if (useGenHamilt) {
hipLaunchKernelGGL(( hamiltInitB_k<BLOCK_X, BLOCK_Y,
BMULT_X, BMULT_Y,
BIT_X_SPIN,
unsigned long long>), dim3(grid), dim3(block), 0, 0, i,
hamiltPerc1,
seed+1, // just use a different seed
i*Y, lld/2,
reinterpret_cast<ulonglong2 *>(hamB_d));
hipLaunchKernelGGL(( hamiltInitW_k<BLOCK_X, BLOCK_Y,
BMULT_X, BMULT_Y,
BIT_X_SPIN,
unsigned long long>), dim3(grid), dim3(block), 0, 0, (XSL/2)/SPIN_X_WORD/2, YSL, i*Y, lld/2,
reinterpret_cast<ulonglong2 *>(hamB_d),
reinterpret_cast<ulonglong2 *>(hamW_d));
}
}
countSpins(ndev, redBlocks, llen, llenLoc, black_d, white_d, sum_d, &cntPos, &cntNeg);
printf("\nInitial magnetization: %9.6lf, up_s: %12llu, dw_s: %12llu\n",
abs(static_cast<double>(cntPos)-static_cast<double>(cntNeg)) / (llen*SPIN_X_WORD),
cntPos, cntNeg);
for(int i = 0; i < ndev; i++) {
CHECK_CUDA(hipSetDevice(i));
CHECK_CUDA(hipDeviceSynchronize());
}
double __t0;
if (ndev == 1) {
CHECK_CUDA(hipEventRecord(start, 0));
} else {
__t0 = Wtime();
}
int j;
for(j = 0; j < nsteps; j++) {
for(int i = 0; i < ndev; i++) {
CHECK_CUDA(hipSetDevice(i));
hipLaunchKernelGGL(( spinUpdateV_2D_k<BLOCK_X, BLOCK_Y,
BMULT_X, BMULT_Y,
BIT_X_SPIN, C_BLACK,
unsigned long long>), dim3(grid), dim3(block), 0, 0, i,
seed,
j+1,
(XSL/2)/SPIN_X_WORD/2, YSL,
i*Y, /*ndev*Y,*/ lld/2,
reinterpret_cast<float (*)[5]>(exp_d[i]),
reinterpret_cast<ulonglong2 *>(hamW_d),
reinterpret_cast<ulonglong2 *>(white_d),
reinterpret_cast<ulonglong2 *>(black_d));
}
if (ndev > 1) {
for(int i = 0; i < ndev; i++) {
CHECK_CUDA(hipSetDevice(i));
CHECK_CUDA(hipDeviceSynchronize());
}
}
for(int i = 0; i < ndev; i++) {
CHECK_CUDA(hipSetDevice(i));
hipLaunchKernelGGL(( spinUpdateV_2D_k<BLOCK_X, BLOCK_Y,
BMULT_X, BMULT_Y,
BIT_X_SPIN, C_WHITE,
unsigned long long>), dim3(grid), dim3(block), 0, 0, i,
seed,
j+1,
(XSL/2)/SPIN_X_WORD/2, YSL,
i*Y, /*ndev*Y,*/ lld/2,
reinterpret_cast<float (*)[5]>(exp_d[i]),
reinterpret_cast<ulonglong2 *>(hamB_d),
reinterpret_cast<ulonglong2 *>(black_d),
reinterpret_cast<ulonglong2 *>(white_d));
}
if (ndev > 1) {
for(int i = 0; i < ndev; i++) {
CHECK_CUDA(hipSetDevice(i));
CHECK_CUDA(hipDeviceSynchronize());
}
}
if (printFreq && ((j+1) % printFreq) == 0) {
countSpins(ndev, redBlocks, llen, llenLoc, black_d, white_d, sum_d, &cntPos, &cntNeg);
const double magn = abs(static_cast<double>(cntPos)-static_cast<double>(cntNeg)) / (llen*SPIN_X_WORD);
printf(" magnetization: %9.6lf, up_s: %12llu, dw_s: %12llu (iter: %8d)\n",
magn, cntPos, cntNeg, j+1);
if (corrOut) {
computeCorr(cname, ndev, j+1, lld, useSubLatt, XSL, YSL, X, Y, black_d, white_d, corr_d, corr_h);
}
if (dumpOut) {
char fname[256];
snprintf(fname, sizeof(fname), "lattice_%dx%d_T_%f_IT_%08d_", Y, X, temp, j+1);
dumpLattice(fname, ndev, Y, lld, llen, llenLoc, v_d);
}
if (tgtMagn != -1.0) {
if (abs(magn-tgtMagn) < TGT_MAGN_MAX_DIFF) {
j++;
break;
}
}
}
//printf("j: %d, printExpSteps[%d]: %d\n", j, printExpCur, printExpSteps[printExpCur]);
if (printExp && printExpSteps[printExpCur] == j) {
printExpCur++;
countSpins(ndev, redBlocks, llen, llenLoc, black_d, white_d, sum_d, &cntPos, &cntNeg);
const double magn = abs(static_cast<double>(cntPos)-static_cast<double>(cntNeg)) / (llen*SPIN_X_WORD);
printf(" magnetization: %9.6lf (^2: %9.6lf), up_s: %12llu, dw_s: %12llu (iter: %8d)\n",
magn, magn*magn, cntPos, cntNeg, j+1);
if (corrOut) {
computeCorr(cname, ndev, j+1, lld, useSubLatt, XSL, YSL, X, Y, black_d, white_d, corr_d, corr_h);
}
if (dumpOut) {
char fname[256];
snprintf(fname, sizeof(fname), "lattice_%dx%d_T_%f_IT_%08d_", Y, X, temp, j+1);
dumpLattice(fname, ndev, Y, lld, llen, llenLoc, v_d);
}
if (tgtMagn != -1.0) {
if (abs(magn-tgtMagn) < TGT_MAGN_MAX_DIFF) {
j++;
break;
}
}
}
if (tempUpdFreq && ((j+1) % tempUpdFreq) == 0) {
temp = MAX(MIN_TEMP, temp+tempUpdStep);
printf("Changing temperature to %f\n", temp);
for(int i = 0; i < 2; i++) {
for(int k = 0; k < 5; k++) {
exp_h[i][k] = expf((i?-2.0f:2.0f)*static_cast<float>(k*2-4)*(1.0f/temp));
printf("exp[%2d][%d]: %E\n", i?1:-1, k, exp_h[i][k]);
}
}
for(int i = 0; i < ndev; i++) {
CHECK_CUDA(hipMemcpy(exp_d[i], exp_h, 2*5*sizeof(**exp_d), hipMemcpyHostToDevice));
}
}
}
if (ndev == 1) {
CHECK_CUDA(hipEventRecord(stop, 0));
CHECK_CUDA(hipEventSynchronize(stop));
} else {
for(int i = 0; i < ndev; i++) {
CHECK_CUDA(hipSetDevice(i));
CHECK_CUDA(hipDeviceSynchronize());
}
__t0 = Wtime()-__t0;
}
countSpins(ndev, redBlocks, llen, llenLoc, black_d, white_d, sum_d, &cntPos, &cntNeg);
printf("Final magnetization: %9.6lf, up_s: %12llu, dw_s: %12llu (iter: %8d)\n\n",
abs(static_cast<double>(cntPos)-static_cast<double>(cntNeg)) / (llen*SPIN_X_WORD),
cntPos, cntNeg, j);
if (ndev == 1) {
CHECK_CUDA(hipEventElapsedTime(&et, start, stop));
} else {
et = __t0*1.0E+3;
}
printf("Kernel execution time for %d update steps: %E ms, %.2lf flips/ns (BW: %.2lf GB/s)\n",
j, et, static_cast<double>(llen*SPIN_X_WORD)*j / (et*1.0E+6),
//(llen*sizeof(*v_d)*2*j/1.0E+9) / (et/1.0E+3));
(2ull*j*
( sizeof(*v_d)*((llen/2) + (llen/2) + (llen/2)) + // src color read, dst color read, dst color write
sizeof(*exp_d)*5*grid.x*grid.y ) /
1.0E+9) / (et/1.0E+3));
if (dumpOut) {
char fname[256];
snprintf(fname, sizeof(fname), "lattice_%dx%d_T_%f_IT_%08d_", Y, X, temp, j);
dumpLattice(fname, ndev, Y, lld, llen, llenLoc, v_d);
}
CHECK_CUDA(hipFree(v_d));
if (useGenHamilt) {
CHECK_CUDA(hipFree(ham_d));
}
if (ndev == 1) {
CHECK_CUDA(hipFree(exp_d[0]));
CHECK_CUDA(hipFree(sum_d[0]));
} else {
for(int i = 0; i < ndev; i++) {
CHECK_CUDA(hipSetDevice(i));
CHECK_CUDA(hipFree(exp_d[i]));
CHECK_CUDA(hipFree(sum_d[i]));
}
}
if (corrOut) {
for(int i = 0; i < ndev; i++) {
CHECK_CUDA(hipSetDevice(i));
CHECK_CUDA(hipFree(corr_d[i]));
free(corr_h[i]);
}
}
for(int i = 0; i < ndev; i++) {
CHECK_CUDA(hipSetDevice(i));
CHECK_CUDA(hipDeviceReset());
}
return 0;
}
| 9f0231592e7f572ed92ce7007973a5bb21baf19f.cu | /*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
*
* Mauro Bisson <[email protected]>
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <getopt.h>
#include <unistd.h>
#include <cuda_runtime.h>
#include <curand_kernel.h>
#include "cudamacro.h" /* for time() */
#include "utils.h"
#define DIV_UP(a,b) (((a)+((b)-1))/(b))
#define THREADS 128
#define BIT_X_SPIN (4)
#define CRIT_TEMP (2.26918531421f)
#define ALPHA_DEF (0.1f)
#define MIN_TEMP (0.05f*CRIT_TEMP)
#define MIN(a,b) (((a)<(b))?(a):(b))
#define MAX(a,b) (((a)>(b))?(a):(b))
// 2048+: 16, 16, 2, 1
// 1024: 16, 16, 1, 2
// 512: 8, 8, 1, 1
// 256: 4, 8, 1, 1
// 128: 2, 8, 1, 1
#define BLOCK_X (16)
#define BLOCK_Y (16)
#define BMULT_X (2)
#define BMULT_Y (1)
#define MAX_GPU (256)
#define NUMIT_DEF (1)
#define SEED_DEF (463463564571ull)
#define TGT_MAGN_MAX_DIFF (1.0E-3)
#define MAX_EXP_TIME (200)
#define MIN_EXP_TIME (152)
#define MAX_CORR_LEN (128)
__device__ __forceinline__ unsigned int __mypopc(const unsigned int x) {
return __popc(x);
}
__device__ __forceinline__ unsigned long long int __mypopc(const unsigned long long int x) {
return __popcll(x);
}
enum {C_BLACK, C_WHITE};
__device__ __forceinline__ uint2 __mymake_int2(const unsigned int x,
const unsigned int y) {
return make_uint2(x, y);
}
__device__ __forceinline__ ulonglong2 __mymake_int2(const unsigned long long x,
const unsigned long long y) {
return make_ulonglong2(x, y);
}
template<int BDIM_X,
int BDIM_Y,
int LOOP_X,
int LOOP_Y,
int BITXSP,
int COLOR,
typename INT_T,
typename INT2_T>
__global__ void latticeInit_k(const int devid,
const long long seed,
const int it,
const long long begY,
const long long dimX, // ld
INT2_T *__restrict__ vDst) {
const int __i = blockIdx.y*BDIM_Y*LOOP_Y + threadIdx.y;
const int __j = blockIdx.x*BDIM_X*LOOP_X + threadIdx.x;
const int SPIN_X_WORD = 8*sizeof(INT_T)/BITXSP;
const long long tid = ((devid*gridDim.y + blockIdx.y)*gridDim.x + blockIdx.x)*BDIM_X*BDIM_Y +
threadIdx.y*BDIM_X + threadIdx.x;
curandStatePhilox4_32_10_t st;
curand_init(seed, tid, static_cast<long long>(2*SPIN_X_WORD)*LOOP_X*LOOP_Y*(2*it+COLOR), &st);
INT2_T __tmp[LOOP_Y][LOOP_X];
#pragma unroll
for(int i = 0; i < LOOP_Y; i++) {
#pragma unroll
for(int j = 0; j < LOOP_X; j++) {
__tmp[i][j] = __mymake_int2(INT_T(0),INT_T(0));
}
}
#pragma unroll
for(int i = 0; i < LOOP_Y; i++) {
#pragma unroll
for(int j = 0; j < LOOP_X; j++) {
#pragma unroll
for(int k = 0; k < 8*sizeof(INT_T); k += BITXSP) {
if (curand_uniform(&st) < 0.5f) {
__tmp[i][j].x |= INT_T(1) << k;
}
if (curand_uniform(&st) < 0.5f) {
__tmp[i][j].y |= INT_T(1) << k;
}
}
}
}
#pragma unroll
for(int i = 0; i < LOOP_Y; i++) {
#pragma unroll
for(int j = 0; j < LOOP_X; j++) {
vDst[(begY + __i+i*BDIM_Y)*dimX + __j+j*BDIM_X] = __tmp[i][j];
}
}
return;
}
template<int BDIM_X,
int BDIM_Y,
int LOOP_X,
int LOOP_Y,
int BITXSP,
typename INT_T,
typename INT2_T>
__global__ void hamiltInitB_k(const int devid,
const float tgtProb,
const long long seed,
const long long begY,
const long long dimX, // ld
INT2_T *__restrict__ hamB) {
const int __i = blockIdx.y*BDIM_Y*LOOP_Y + threadIdx.y;
const int __j = blockIdx.x*BDIM_X*LOOP_X + threadIdx.x;
const long long tid = ((devid*gridDim.y + blockIdx.y)*gridDim.x + blockIdx.x)*BDIM_X*BDIM_Y +
threadIdx.y*BDIM_X + threadIdx.x;
curandStatePhilox4_32_10_t st;
curand_init(seed, tid, 0, &st);
INT2_T __tmp[LOOP_Y][LOOP_X];
#pragma unroll
for(int i = 0; i < LOOP_Y; i++) {
#pragma unroll
for(int j = 0; j < LOOP_X; j++) {
__tmp[i][j] = __mymake_int2(INT_T(0),INT_T(0));
}
}
#pragma unroll
for(int i = 0; i < LOOP_Y; i++) {
#pragma unroll
for(int j = 0; j < LOOP_X; j++) {
#pragma unroll
for(int k = 0; k < 8*sizeof(INT_T); k += BITXSP) {
#pragma unroll
for(int l = 0; l < BITXSP; l++) {
if (curand_uniform(&st) < tgtProb) {
__tmp[i][j].x |= INT_T(1) << (k+l);
}
if (curand_uniform(&st) < tgtProb) {
__tmp[i][j].y |= INT_T(1) << (k+l);
}
}
}
}
}
#pragma unroll
for(int i = 0; i < LOOP_Y; i++) {
#pragma unroll
for(int j = 0; j < LOOP_X; j++) {
hamB[(begY + __i+i*BDIM_Y)*dimX + __j+j*BDIM_X] = __tmp[i][j];
}
}
return;
}
template<int BDIM_X,
int BDIM_Y,
int LOOP_X,
int LOOP_Y,
int BITXSP,
typename INT_T,
typename INT2_T>
__global__ void hamiltInitW_k(const int xsl,
const int ysl,
const long long begY,
const long long dimX,
const INT2_T *__restrict__ hamB,
INT2_T *__restrict__ hamW) {
const int tidx = threadIdx.x;
const int tidy = threadIdx.y;
const int __i = blockIdx.y*BDIM_Y*LOOP_Y + tidy;
const int __j = blockIdx.x*BDIM_X*LOOP_X + tidx;
INT2_T __me[LOOP_Y][LOOP_X];
#pragma unroll
for(int i = 0; i < LOOP_Y; i++) {
#pragma unroll
for(int j = 0; j < LOOP_X; j++) {
__me[i][j] = hamB[(begY+__i+i*BDIM_Y)*dimX + __j+j*BDIM_X];
}
}
INT2_T __up[LOOP_Y][LOOP_X];
INT2_T __ct[LOOP_Y][LOOP_X];
INT2_T __dw[LOOP_Y][LOOP_X];
INT2_T __sd[LOOP_Y][LOOP_X];
#pragma unroll
for(int i = 0; i < LOOP_Y; i++) {
#pragma unroll
for(int j = 0; j < LOOP_X; j++) {
__up[i][j].x = (__me[i][j].x & 0x8888888888888888ull) >> 1;
__up[i][j].y = (__me[i][j].y & 0x8888888888888888ull) >> 1;
__dw[i][j].x = (__me[i][j].x & 0x4444444444444444ull) << 1;
__dw[i][j].y = (__me[i][j].y & 0x4444444444444444ull) << 1;
}
}
const int readBack = !(__i%2); // this kernel reads only BLACK Js
const int BITXWORD = 8*sizeof(INT_T);
if (!readBack) {
#pragma unroll
for(int i = 0; i < LOOP_Y; i++) {
#pragma unroll
for(int j = 0; j < LOOP_X; j++) {
__ct[i][j].x = (__me[i][j].x & 0x2222222222222222ull) >> 1;
__ct[i][j].y = (__me[i][j].y & 0x2222222222222222ull) >> 1;
__ct[i][j].x |= (__me[i][j].x & 0x1111111111111111ull) << (BITXSP+1);
__ct[i][j].y |= (__me[i][j].x & 0x1111111111111111ull) >> (BITXWORD-BITXSP - 1);
__ct[i][j].y |= (__me[i][j].y & 0x1111111111111111ull) << (BITXSP+1);
__sd[i][j].x = (__me[i][j].y & 0x1111111111111111ull) >> (BITXWORD-BITXSP - 1);
__sd[i][j].y = 0;
}
}
} else {
#pragma unroll
for(int i = 0; i < LOOP_Y; i++) {
#pragma unroll
for(int j = 0; j < LOOP_X; j++) {
__ct[i][j].x = (__me[i][j].x & 0x1111111111111111ull) << 1;
__ct[i][j].y = (__me[i][j].y & 0x1111111111111111ull) << 1;
__ct[i][j].y |= (__me[i][j].y & 0x2222222222222222ull) >> (BITXSP+1);
__ct[i][j].x |= (__me[i][j].y & 0x2222222222222222ull) << (BITXWORD-BITXSP - 1);
__ct[i][j].x |= (__me[i][j].x & 0x2222222222222222ull) >> (BITXSP+1);
__sd[i][j].y = (__me[i][j].x & 0x2222222222222222ull) << (BITXWORD-BITXSP - 1);
__sd[i][j].x = 0;
}
}
}
#pragma unroll
for(int i = 0; i < LOOP_Y; i++) {
const int yoff = begY+__i + i*BDIM_Y;
const int upOff = ( yoff %ysl) == 0 ? yoff+ysl-1 : yoff-1;
const int dwOff = ((yoff+1)%ysl) == 0 ? yoff-ysl+1 : yoff+1;
#pragma unroll
for(int j = 0; j < LOOP_X; j++) {
const int xoff = __j + j*BDIM_X;
atomicOr(&hamW[yoff*dimX + xoff].x, __ct[i][j].x);
atomicOr(&hamW[yoff*dimX + xoff].y, __ct[i][j].y);
atomicOr(&hamW[upOff*dimX + xoff].x, __up[i][j].x);
atomicOr(&hamW[upOff*dimX + xoff].y, __up[i][j].y);
atomicOr(&hamW[dwOff*dimX + xoff].x, __dw[i][j].x);
atomicOr(&hamW[dwOff*dimX + xoff].y, __dw[i][j].y);
const int sideOff = readBack ? ( (xoff %xsl) == 0 ? xoff+xsl-1 : xoff-1 ):
( ((xoff+1)%xsl) == 0 ? xoff-xsl+1 : xoff+1);
atomicOr(&hamW[yoff*dimX + sideOff].x, __sd[i][j].x);
atomicOr(&hamW[yoff*dimX + sideOff].y, __sd[i][j].y);
}
}
return;
}
template<int BDIM_X,
int BDIM_Y,
int TILE_X,
int TILE_Y,
int FRAME_X,
int FRAME_Y,
typename INT_T,
typename INT2_T>
__device__ void loadTileOLD(const long long begY,
const long long dimY,
const long long dimX,
const INT2_T *__restrict__ v,
INT2_T tile[][TILE_X+2*FRAME_X]) {
const int tidx = threadIdx.x;
const int tidy = threadIdx.y;
const int blkx = blockIdx.x;
const int blky = blockIdx.y;
const int FULL_X = TILE_X+2*FRAME_X;
const int FULL_Y = TILE_Y+2*FRAME_Y;
#pragma unroll
for(int j = 0; j < FULL_Y; j += BDIM_Y) {
const int yoff = begY + blky*TILE_Y + j+tidy - FRAME_Y;
const int yoffAdj = (yoff < 0) ? dimY+yoff : (yoff >= dimY ? yoff-dimY : yoff);
#pragma unroll
for(int i = 0; i < FULL_X; i += BDIM_X) {
const int xoff = blkx*TILE_X + i+tidx - FRAME_X;
const int xoffAdj = (xoff < 0) ? dimX+xoff : (xoff >= dimX ? xoff-dimX : xoff);
INT2_T __t = v[yoffAdj*dimX + xoffAdj];
if (j+tidy < FULL_Y && i+tidx < FULL_X) {
tile[j+tidy][i+tidx] = __t;
}
}
}
return;
}
template<int BDIM_X,
int BDIM_Y,
int TILE_X,
int TILE_Y,
int FRAME_X,
int FRAME_Y,
typename INT2_T>
__device__ void loadTile(const int slX,
const int slY,
const long long begY,
const long long dimX,
const INT2_T *__restrict__ v,
INT2_T tile[][TILE_X+2*FRAME_X]) {
const int blkx = blockIdx.x;
const int blky = blockIdx.y;
const int tidx = threadIdx.x;
const int tidy = threadIdx.y;
const int startX = blkx*TILE_X;
const int startY = begY + blky*TILE_Y;
#pragma unroll
for(int j = 0; j < TILE_Y; j += BDIM_Y) {
int yoff = startY + j+tidy;
#pragma unroll
for(int i = 0; i < TILE_X; i += BDIM_X) {
const int xoff = startX + i+tidx;
tile[FRAME_Y + j+tidy][FRAME_X + i+tidx] = v[yoff*dimX + xoff];
}
}
if (tidy == 0) {
int yoff = (startY % slY) == 0 ? startY+slY-1 : startY-1;
#pragma unroll
for(int i = 0; i < TILE_X; i += BDIM_X) {
const int xoff = startX + i+tidx;
tile[0][FRAME_X + i+tidx] = v[yoff*dimX + xoff];
}
yoff = ((startY+TILE_Y) % slY) == 0 ? startY+TILE_Y - slY : startY+TILE_Y;
#pragma unroll
for(int i = 0; i < TILE_X; i += BDIM_X) {
const int xoff = startX + i+tidx;
tile[FRAME_Y + TILE_Y][FRAME_X + i+tidx] = v[yoff*dimX + xoff];
}
// the other branch in slower so skip it if possible
if (BDIM_X <= TILE_Y) {
int xoff = (startX % slX) == 0 ? startX+slX-1 : startX-1;
#pragma unroll
for(int j = 0; j < TILE_Y; j += BDIM_X) {
yoff = startY + j+tidx;
tile[FRAME_Y + j+tidx][0] = v[yoff*dimX + xoff];
}
xoff = ((startX+TILE_X) % slX) == 0 ? startX+TILE_X - slX : startX+TILE_X;
#pragma unroll
for(int j = 0; j < TILE_Y; j += BDIM_X) {
yoff = startY + j+tidx;
tile[FRAME_Y + j+tidx][FRAME_X + TILE_X] = v[yoff*dimX + xoff];
}
} else {
if (tidx < TILE_Y) {
int xoff = (startX % slX) == 0 ? startX+slX-1 : startX-1;
yoff = startY + tidx;
tile[FRAME_Y + tidx][0] = v[yoff*dimX + xoff];;
xoff = ((startX+TILE_X) % slX) == 0 ? startX+TILE_X - slX : startX+TILE_X;
tile[FRAME_Y + tidx][FRAME_X + TILE_X] = v[yoff*dimX + xoff];
}
}
}
return;
}
template<int BDIM_X,
int BDIM_Y,
int LOOP_X,
int LOOP_Y,
int BITXSP,
int COLOR,
typename INT_T,
typename INT2_T>
__global__
void spinUpdateV_2D_k(const int devid,
const long long seed,
const int it,
const int slX, // sublattice size X of one color (in words)
const int slY, // sublattice size Y of one color
const long long begY,
const long long dimX, // ld
const float vExp[][5],
const INT2_T *__restrict__ jDst,
const INT2_T *__restrict__ vSrc,
INT2_T *__restrict__ vDst) {
const int SPIN_X_WORD = 8*sizeof(INT_T)/BITXSP;
const int tidx = threadIdx.x;
const int tidy = threadIdx.y;
__shared__ INT2_T shTile[BDIM_Y*LOOP_Y+2][BDIM_X*LOOP_X+2];
loadTile<BDIM_X, BDIM_Y,
BDIM_X*LOOP_X,
BDIM_Y*LOOP_Y,
1, 1, INT2_T>(slX, slY, begY, dimX, vSrc, shTile);
// __shExp[cur_s{0,1}][sum_s{0,1}] = __expf(-2*cur_s{-1,+1}*F{+1,-1}(sum_s{0,1})*INV_TEMP)
__shared__ float __shExp[2][5];
// for small lattices BDIM_X/Y may be smaller than 2/5
#pragma unroll
for(int i = 0; i < 2; i += BDIM_Y) {
#pragma unroll
for(int j = 0; j < 5; j += BDIM_X) {
if (i+tidy < 2 && j+tidx < 5) {
__shExp[i+tidy][j+tidx] = vExp[i+tidy][j+tidx];
}
}
}
__syncthreads();
const int __i = blockIdx.y*BDIM_Y*LOOP_Y + tidy;
const int __j = blockIdx.x*BDIM_X*LOOP_X + tidx;
const long long tid = ((devid*gridDim.y + blockIdx.y)*gridDim.x + blockIdx.x)*BDIM_X*BDIM_Y +
threadIdx.y*BDIM_X + threadIdx.x;
INT2_T __me[LOOP_Y][LOOP_X];
#pragma unroll
for(int i = 0; i < LOOP_Y; i++) {
#pragma unroll
for(int j = 0; j < LOOP_X; j++) {
__me[i][j] = vDst[(begY+__i+i*BDIM_Y)*dimX + __j+j*BDIM_X];
}
}
INT2_T __up[LOOP_Y][LOOP_X];
INT2_T __ct[LOOP_Y][LOOP_X];
INT2_T __dw[LOOP_Y][LOOP_X];
#pragma unroll
for(int i = 0; i < LOOP_Y; i++) {
#pragma unroll
for(int j = 0; j < LOOP_X; j++) {
__up[i][j] = shTile[i*BDIM_Y + tidy][j*BDIM_X + 1+tidx];
__ct[i][j] = shTile[i*BDIM_Y + 1+tidy][j*BDIM_X + 1+tidx];
__dw[i][j] = shTile[i*BDIM_Y + 2+tidy][j*BDIM_X + 1+tidx];
}
}
// BDIM_Y is power of two so row parity won't change across loops
const int readBack = (COLOR == C_BLACK) ? !(__i%2) : (__i%2);
INT2_T __sd[LOOP_Y][LOOP_X];
#pragma unroll
for(int i = 0; i < LOOP_Y; i++) {
#pragma unroll
for(int j = 0; j < LOOP_X; j++) {
__sd[i][j] = (readBack) ? shTile[i*BDIM_Y + 1+tidy][j*BDIM_X + tidx]:
shTile[i*BDIM_Y + 1+tidy][j*BDIM_X + 2+tidx];
}
}
if (readBack) {
#pragma unroll
for(int i = 0; i < LOOP_Y; i++) {
#pragma unroll
for(int j = 0; j < LOOP_X; j++) {
__sd[i][j].x = (__ct[i][j].x << BITXSP) | (__sd[i][j].y >> (8*sizeof(__sd[i][j].y)-BITXSP));
__sd[i][j].y = (__ct[i][j].y << BITXSP) | (__ct[i][j].x >> (8*sizeof(__ct[i][j].x)-BITXSP));
}
}
} else {
#pragma unroll
for(int i = 0; i < LOOP_Y; i++) {
#pragma unroll
for(int j = 0; j < LOOP_X; j++) {
__sd[i][j].y = (__ct[i][j].y >> BITXSP) | (__sd[i][j].x << (8*sizeof(__sd[i][j].x)-BITXSP));
__sd[i][j].x = (__ct[i][j].x >> BITXSP) | (__ct[i][j].y << (8*sizeof(__ct[i][j].y)-BITXSP));
}
}
}
if (jDst != NULL) {
INT2_T __J[LOOP_Y][LOOP_X];
#pragma unroll
for(int i = 0; i < LOOP_Y; i++) {
#pragma unroll
for(int j = 0; j < LOOP_X; j++) {
__J[i][j] = jDst[(begY+__i+i*BDIM_Y)*dimX + __j+j*BDIM_X];
}
}
// apply them
// the 4 bits of J codify: <upJ, downJ, leftJ, rightJ>
#pragma unroll
for(int i = 0; i < LOOP_Y; i++) {
#pragma unroll
for(int j = 0; j < LOOP_X; j++) {
__up[i][j].x ^= (__J[i][j].x & 0x8888888888888888ull) >> 3;
__up[i][j].y ^= (__J[i][j].y & 0x8888888888888888ull) >> 3;
__dw[i][j].x ^= (__J[i][j].x & 0x4444444444444444ull) >> 2;
__dw[i][j].y ^= (__J[i][j].y & 0x4444444444444444ull) >> 2;
if (readBack) {
// __sd[][] holds "left" spins
// __ct[][] holds "right" spins
__sd[i][j].x ^= (__J[i][j].x & 0x2222222222222222ull) >> 1;
__sd[i][j].y ^= (__J[i][j].y & 0x2222222222222222ull) >> 1;
__ct[i][j].x ^= (__J[i][j].x & 0x1111111111111111ull);
__ct[i][j].y ^= (__J[i][j].y & 0x1111111111111111ull);
} else {
// __ct[][] holds "left" spins
// __sd[][] holds "right" spins
__ct[i][j].x ^= (__J[i][j].x & 0x2222222222222222ull) >> 1;
__ct[i][j].y ^= (__J[i][j].y & 0x2222222222222222ull) >> 1;
__sd[i][j].x ^= (__J[i][j].x & 0x1111111111111111ull);
__sd[i][j].y ^= (__J[i][j].y & 0x1111111111111111ull);
}
}
}
}
curandStatePhilox4_32_10_t st;
curand_init(seed, tid, static_cast<long long>(2*SPIN_X_WORD)*LOOP_X*LOOP_Y*(2*it+COLOR), &st);
#pragma unroll
for(int i = 0; i < LOOP_Y; i++) {
#pragma unroll
for(int j = 0; j < LOOP_X; j++) {
__ct[i][j].x += __up[i][j].x;
__dw[i][j].x += __sd[i][j].x;
__ct[i][j].x += __dw[i][j].x;
__ct[i][j].y += __up[i][j].y;
__dw[i][j].y += __sd[i][j].y;
__ct[i][j].y += __dw[i][j].y;
}
}
#pragma unroll
for(int i = 0; i < LOOP_Y; i++) {
#pragma unroll
for(int j = 0; j < LOOP_X; j++) {
#pragma unroll
for(int z = 0; z < 8*sizeof(INT_T); z += BITXSP) {
const int2 __src = make_int2((__me[i][j].x >> z) & 0xF,
(__me[i][j].y >> z) & 0xF);
const int2 __sum = make_int2((__ct[i][j].x >> z) & 0xF,
(__ct[i][j].y >> z) & 0xF);
const INT_T ONE = static_cast<INT_T>(1);
if (curand_uniform(&st) <= __shExp[__src.x][__sum.x]) {
__me[i][j].x ^= ONE << z;
}
if (curand_uniform(&st) <= __shExp[__src.y][__sum.y]) {
__me[i][j].y ^= ONE << z;
}
}
}
}
#pragma unroll
for(int i = 0; i < LOOP_Y; i++) {
#pragma unroll
for(int j = 0; j < LOOP_X; j++) {
vDst[(begY + __i+i*BDIM_Y)*dimX + __j+j*BDIM_X] = __me[i][j];
}
}
return;
}
template<int BDIM_X,
int WSIZE,
typename T>
__device__ __forceinline__ T __block_sum(T v) {
__shared__ T sh[BDIM_X/WSIZE];
const int lid = threadIdx.x%WSIZE;
const int wid = threadIdx.x/WSIZE;
#pragma unroll
for(int i = WSIZE/2; i; i >>= 1) {
v += __shfl_down_sync(0xFFFFFFFF, v, i);
}
if (lid == 0) sh[wid] = v;
__syncthreads();
if (wid == 0) {
v = (lid < (BDIM_X/WSIZE)) ? sh[lid] : 0;
#pragma unroll
for(int i = (BDIM_X/WSIZE)/2; i; i >>= 1) {
v += __shfl_down_sync(0xFFFFFFFF, v, i);
}
}
__syncthreads();
return v;
}
// to be optimized
template<int BDIM_X,
int BITXSP,
typename INT_T,
typename SUM_T>
__global__ void getMagn_k(const long long n,
const INT_T *__restrict__ v,
SUM_T *__restrict__ sum) {
const int SPIN_X_WORD = 8*sizeof(INT_T)/BITXSP;
const long long nth = static_cast<long long>(blockDim.x)*gridDim.x;
const long long tid = static_cast<long long>(blockDim.x)*blockIdx.x + threadIdx.x;
SUM_T __cntP = 0;
SUM_T __cntN = 0;
for(long long i = 0; i < n; i += nth) {
if (i+tid < n) {
const int __c = __mypopc(v[i+tid]);
__cntP += __c;
__cntN += SPIN_X_WORD - __c;
}
}
__cntP = __block_sum<BDIM_X, 32>(__cntP);
__cntN = __block_sum<BDIM_X, 32>(__cntN);
if (threadIdx.x == 0) {
atomicAdd(sum+0, __cntP);
atomicAdd(sum+1, __cntN);
}
return;
}
static void usage(const int SPIN_X_WORD, const char *pname) {
const char *bname = rindex(pname, '/');
if (!bname) {bname = pname;}
else {bname++;}
fprintf(stdout,
"Usage: %1$s [options]\n"
"options:\n"
"\t-x|--x <HORIZ_DIM>\n"
"\t\tSpecifies the horizontal dimension of the entire lattice (black+white spins),\n"
"\t\tper GPU. This dimension must be a multiple of %2$d.\n"
"\n"
"\t-y|--y <VERT_DIM>\n"
"\t\tSpecifies the vertical dimension of the entire lattice (black+white spins), per\n"
"\t\tGPU. This dimension must be a multiple of %3$d.\n"
"\n"
"\t-n|--n <NSTEPS>\n"
"\t\tSpecifies the number of iteration to run.\n"
"\t\tDefualt: %4$d\n"
"\n"
"\t-d|--devs <NUM_DEVICES>\n"
"\t\tSpecifies the number of GPUs to use. Will use devices with ids [0, NUM_DEVS-1].\n"
"\t\tDefualt: 1.\n"
"\n"
"\t-s|--seed <SEED>\n"
"\t\tSpecifies the seed used to generate random numbers.\n"
"\t\tDefault: %5$llu\n"
"\n"
"\t-a|--alpha <ALPHA>\n"
"\t\tSpecifies the temperature in T_CRIT units. If both this option and '-t' are\n"
"\t\tspecified then the '-t' option is used.\n"
"\t\tDefault: %6$f\n"
"\n"
"\t-t|--temp <TEMP>\n"
"\t\tSpecifies the temperature in absolute units. If both this option and '-a' are\n"
"\t\tspecified then this option is used.\n"
"\t\tDefault: %7$f\n"
"\n"
"\t-p|--print <STAT_FREQ>\n"
"\t\tSpecifies the frequency, in no. of iteration, with which the magnetization\n"
"\t\tstatistics is printed. If this option is used together to the '-e' option, this\n"
"\t\toption is ignored.\n"
"\t\tDefault: only at the beginning and at end of the simulation\n"
"\n"
"\t-e|--exppr\n"
"\t\tPrints the magnetization at time steps in the series 0 <= 2^(x/4) < NSTEPS. If\n"
"\t\tthis option is used together to the '-p' option, the latter is ignored.\n"
"\t\tDefault: disabled\n"
"\n"
"\t-c|--corr\n"
"\t\tDumps to a file named corr_{X}x{Y}_T_{TEMP} the correlation of each point\n"
"\t\twith the %8$d points on the right and below. The correlation is computed every\n"
"\t\ttime the magnetization is printed on screen (based on either the '-p' or '-e'\n"
"\t\toption) and it is written in the file one line per measure.\n"
"\t\tDefault: disabled\n"
"\n"
"\t-m|--magn <TGT_MAGN>\n"
"\t\tSpecifies the magnetization value at which the simulation is interrupted. The\n"
"\t\tmagnetization of the system is checked against TGT_MAGN every STAT_FREQ, if the\n"
"\t\t'-p' option is specified, or according to the exponential timestep series, if\n"
"\t\tthe '-e' option is specified. If neither '-p' not '-e' are specified then this\n"
"\t\toption is ignored.\n"
"\t\tDefault: unset\n"
"\n"
"\t-J|--J <PROB>\n"
"\t\tSpecifies the probability [0.0-1.0] that links connecting any two spins are\n"
"\t\tanti-ferromagnetic. \n"
"\t\tDefault: 0.0\n"
"\n"
"\t --xsl <HORIZ_SUB_DIM>\n"
"\t\tSpecifies the horizontal dimension of each sub-lattice (black+white spins), per\n"
"\t\tGPU. This dimension must be a divisor of the horizontal dimension of the entire\n"
"\t\tlattice per GPU (specified with the '-x' option) and a multiple of %2$d.\n"
"\t\tDefault: sub-lattices are disabled.\n"
"\n"
"\t --ysl <VERT_SUB_DIM>\n"
"\t\tSpecifies the vertical dimension of each sub-lattice (black+white spins), per\n"
"\t\tGPU. This dimension must be a divisor of the vertical dimension of the entire\n"
"\t\tlattice per GPU (specified with the '-y' option) and a multiple of %3$d.\n"
"\n"
"\t-o|--o\n"
"\t\tEnables the file dump of the lattice every time the magnetization is printed.\n"
"\t\tDefault: off\n\n",
bname,
2*SPIN_X_WORD*2*BLOCK_X*BMULT_X,
BLOCK_Y*BMULT_Y,
NUMIT_DEF,
SEED_DEF,
ALPHA_DEF,
ALPHA_DEF*CRIT_TEMP,
MAX_CORR_LEN);
exit(EXIT_SUCCESS);
}
static void countSpins(const int ndev,
const int redBlocks,
const size_t llen,
const size_t llenLoc,
const unsigned long long *black_d,
const unsigned long long *white_d,
unsigned long long **sum_d,
unsigned long long *bsum,
unsigned long long *wsum) {
if (ndev == 1) {
CHECK_CUDA(cudaMemset(sum_d[0], 0, 2*sizeof(**sum_d)));
getMagn_k<THREADS, BIT_X_SPIN><<<redBlocks, THREADS>>>(llen, black_d, sum_d[0]);
CHECK_ERROR("getMagn_k");
CHECK_CUDA(cudaDeviceSynchronize());
} else {
for(int i = 0; i < ndev; i++) {
CHECK_CUDA(cudaSetDevice(i));
CHECK_CUDA(cudaMemset(sum_d[i], 0, 2*sizeof(**sum_d)));
getMagn_k<THREADS, BIT_X_SPIN><<<redBlocks, THREADS>>>(llenLoc, black_d + i*llenLoc, sum_d[i]);
getMagn_k<THREADS, BIT_X_SPIN><<<redBlocks, THREADS>>>(llenLoc, white_d + i*llenLoc, sum_d[i]);
CHECK_ERROR("getMagn_k");
}
}
bsum[0] = 0;
wsum[0] = 0;
unsigned long long sum_h[MAX_GPU][2];
for(int i = 0; i < ndev; i++) {
CHECK_CUDA(cudaMemcpy(sum_h[i], sum_d[i], 2*sizeof(**sum_h), cudaMemcpyDeviceToHost));
bsum[0] += sum_h[i][0];
wsum[0] += sum_h[i][1];
}
return;
}
template<int BDIM_X,
int BITXSP,
int N_CORR,
typename INT_T,
typename SUM_T>
__global__ void getCorr2D_k(const int corrLen,
const long long dimX,
const long long dimY,
const long long begY,
const INT_T *__restrict__ black,
const INT_T *__restrict__ white,
SUM_T *__restrict__ corr) {
const int SPIN_X_WORD = 8*sizeof(INT_T)/BITXSP;
const int tid = threadIdx.x;
const long long startY = begY + blockIdx.x;
const int SH_LEN = BDIM_X + DIV_UP(N_CORR/2, SPIN_X_WORD);
__shared__ INT_T __shB[SH_LEN];
__shared__ INT_T __shW[SH_LEN];
__shared__ SUM_T __shC[N_CORR];
#pragma unroll
for(int j = 0; j < N_CORR; j += BDIM_X) {
if (j+tid < N_CORR) {
__shC[j+tid] = 0;
}
}
const int chunkDimX = 2*BDIM_X*SPIN_X_WORD;
for(long long l = 0; l < dimX; l += BDIM_X) {
__syncthreads();
#pragma unroll
for(int j = 0; j < SH_LEN; j += BDIM_X) {
if (j+tid < SH_LEN) {
const int off = (l+j+tid < dimX) ? l+j+tid : l+j+tid - dimX;
__shB[j+tid] = black[startY*dimX + off];
__shW[j+tid] = white[startY*dimX + off];
}
}
__syncthreads();
for(int j = 1; j <= corrLen; j++) {
SUM_T myCorr = 0;
for(long long i = tid; i < chunkDimX; i += BDIM_X) {
// horiz corr
const long long myWrdX = (i/2) / SPIN_X_WORD;
const long long myOffX = (i/2) % SPIN_X_WORD;
INT_T __tmp = ((startY ^ i) & 1) ? __shW[myWrdX] : __shB[myWrdX];
const int mySpin = (__tmp >> (myOffX*BITXSP)) & 0xF;
const long long nextX = i+j;
const long long nextWrdX = (nextX/2) / SPIN_X_WORD;
const long long nextOffX = (nextX/2) % SPIN_X_WORD;
__tmp = ((startY ^ nextX) & 1) ? __shW[nextWrdX] : __shB[nextWrdX];
const int nextSpin = (__tmp >> (nextOffX*BITXSP)) & 0xF;
myCorr += (mySpin == nextSpin) ? SUM_T(1) : SUM_T(-1);
// vert corr
const long long nextY = (startY+j >= dimY) ? startY+j-dimY : startY+j;
__tmp = ((nextY ^ i) & 1) ? white[nextY*dimX + l+myWrdX]:
black[nextY*dimX + l+myWrdX];
const int vertSpin = (__tmp >> (myOffX*BITXSP)) & 0xF;
myCorr += (mySpin == vertSpin) ? SUM_T(1) : SUM_T(-1);
}
myCorr = __block_sum<BDIM_X, 32>(myCorr);
if (!tid) {
__shC[j-1] += myCorr;
}
}
}
__syncthreads();
#pragma unroll
for(int j = 0; j < N_CORR; j += BDIM_X) {
if (j+tid < N_CORR) {
atomicAdd(corr + j+tid, __shC[j+tid]);
}
}
return;
}
template<int BDIM_X,
int BITXSP,
int N_CORR,
typename INT_T,
typename SUM_T>
__global__ void getCorr2DRepl_k(const int corrLen,
const long long dimX,
const long long begY,
const long long slX, // sublattice size X of one color (in words)
const long long slY, // sublattice size Y of one color
const INT_T *__restrict__ black,
const INT_T *__restrict__ white,
SUM_T *__restrict__ corr) {
const int tid = threadIdx.x;
const int SPIN_X_WORD = 8*sizeof(INT_T)/BITXSP;
const long long startY = begY + blockIdx.x;
const long long mySLY = startY / slY;
const long long NSLX = 2ull*dimX*SPIN_X_WORD / slX;
const int SH_LEN = BDIM_X + DIV_UP(N_CORR/2, SPIN_X_WORD);
__shared__ INT_T __shB[SH_LEN];
__shared__ INT_T __shW[SH_LEN];
__shared__ SUM_T __shC[N_CORR];
#pragma unroll
for(int j = 0; j < N_CORR; j += BDIM_X) {
if (j+tid < N_CORR) {
__shC[j+tid] = 0;
}
}
const int chunkDimX = MIN(2*BDIM_X*SPIN_X_WORD, slX);
const int slXLD = (slX/2) / SPIN_X_WORD;
for(long long sl = 0; sl < NSLX; sl++) {
for(long long l = 0; l < slXLD; l += BDIM_X) {
__syncthreads();
#pragma unroll
for(int j = 0; j < SH_LEN; j += BDIM_X) {
if (j+tid < SH_LEN) {
const int off = (l+j+tid) % slXLD;
__shB[j+tid] = black[startY*dimX + sl*slXLD + off];
__shW[j+tid] = white[startY*dimX + sl*slXLD + off];
}
}
__syncthreads();
for(int j = 1; j <= corrLen; j++) {
SUM_T myCorr = 0;
for(long long i = tid; i < chunkDimX; i += BDIM_X) {
// horiz corr
const long long myWrdX = (i/2) / SPIN_X_WORD;
const long long myOffX = (i/2) % SPIN_X_WORD;
INT_T __tmp = ((startY ^ i) & 1) ? __shW[myWrdX] : __shB[myWrdX];
const int mySpin = (__tmp >> (myOffX*BITXSP)) & 0xF;
const long long nextX = i+j;
const long long nextWrdX = (nextX/2) / SPIN_X_WORD;
const long long nextOffX = (nextX/2) % SPIN_X_WORD;
__tmp = ((startY ^ nextX) & 1) ? __shW[nextWrdX] : __shB[nextWrdX];
const int nextSpin = (__tmp >> (nextOffX*BITXSP)) & 0xF;
myCorr += (mySpin == nextSpin) ? SUM_T(1) : SUM_T(-1);
// vert corr
const long long nextY = (startY+j >= (mySLY+1)*slY) ? startY+j-slY : startY+j;
__tmp = ((nextY ^ i) & 1) ? white[nextY*dimX + sl*slXLD + l+myWrdX]:
black[nextY*dimX + sl*slXLD + l+myWrdX];
const int vertSpin = (__tmp >> (myOffX*BITXSP)) & 0xF;
myCorr += (mySpin == vertSpin) ? SUM_T(1) : SUM_T(-1);
}
myCorr = __block_sum<BDIM_X, 32>(myCorr);
if (!tid) {
__shC[j-1] += myCorr;
}
}
}
}
__syncthreads();
#pragma unroll
for(int j = 0; j < N_CORR; j += BDIM_X) {
if (j+tid < N_CORR) {
atomicAdd(corr + j+tid, __shC[j+tid]);
}
}
return;
}
static void computeCorr(const char *fname,
const int ndev,
const int it,
const int lld,
const int useRepl,
const int XSL, // full sub-lattice (B+W) X
const int YSL, // full sub-lattice (B+W) X
const int X, // per-GPU full lattice (B+W) X
const int Y, // per-GPU full lattice (B+W) Y
const unsigned long long *black_d,
const unsigned long long *white_d,
double **corr_d,
double **corr_h) {
const int n_corr = MAX_CORR_LEN;
for(int i = 0; i < ndev; i++) {
CHECK_CUDA(cudaSetDevice(i));
CHECK_CUDA(cudaMemset(corr_d[i], 0, n_corr*sizeof(**corr_d)));
if (!useRepl) {
getCorr2D_k<THREADS, BIT_X_SPIN, MAX_CORR_LEN><<<Y, THREADS>>>(n_corr,
lld,
ndev*Y,
i*Y,
black_d,
white_d,
corr_d[i]);
CHECK_ERROR("getCorr2D_k");
} else {
getCorr2DRepl_k<THREADS, BIT_X_SPIN, MAX_CORR_LEN><<<Y, THREADS>>>(n_corr,
lld,
i*Y,
XSL,
YSL,
black_d,
white_d,
corr_d[i]);
CHECK_ERROR("getCorr2DRepl_k");
}
}
for(int i = 0; i < ndev; i++) {
CHECK_CUDA(cudaSetDevice(i));
CHECK_CUDA(cudaMemcpy(corr_h[i],
corr_d[i],
n_corr*sizeof(**corr_h),
cudaMemcpyDeviceToHost));
}
for(int d = 1; d < ndev; d++) {
for(int i = 0; i < n_corr; i++) {
corr_h[0][i] += corr_h[d][i];
}
}
FILE *fp = Fopen(fname, "a");
fprintf(fp,"%10d", it);
for(int i = 0; i < n_corr; i++) {
fprintf(fp," % -12G", corr_h[0][i] / (2.0*X*Y*ndev));
}
fprintf(fp,"\n");
fclose(fp);
return;
}
static void dumpLattice(const char *fprefix,
const int ndev,
const int Y,
const size_t lld,
const size_t llen,
const size_t llenLoc,
const unsigned long long *v_d) {
char fname[256];
if (ndev == 1) {
unsigned long long *v_h = (unsigned long long *)Malloc(llen*sizeof(*v_h));
CHECK_CUDA(cudaMemcpy(v_h, v_d, llen*sizeof(*v_h), cudaMemcpyDeviceToHost));
unsigned long long *black_h = v_h;
unsigned long long *white_h = v_h + llen/2;
snprintf(fname, sizeof(fname), "%s0.txt", fprefix);
FILE *fp = Fopen(fname, "w");
for(int i = 0; i < Y; i++) {
for(int j = 0; j < lld; j++) {
unsigned long long __b = black_h[i*lld + j];
unsigned long long __w = white_h[i*lld + j];
for(int k = 0; k < 8*sizeof(*v_h); k += BIT_X_SPIN) {
if (i&1) {
fprintf(fp, "%llX", (__w >> k) & 0xF);
fprintf(fp, "%llX", (__b >> k) & 0xF);
} else {
fprintf(fp, "%llX", (__b >> k) & 0xF);
fprintf(fp, "%llX", (__w >> k) & 0xF);
}
}
}
fprintf(fp, "\n");
}
fclose(fp);
free(v_h);
} else {
#pragma omp parallel for schedule(static)
for(int d = 0; d < ndev; d++) {
const unsigned long long *black_h = v_d + d*llenLoc;
const unsigned long long *white_h = v_d + llen/2 + d*llenLoc;
snprintf(fname, sizeof(fname), "%s%d.txt", fprefix, d);
FILE *fp = Fopen(fname, "w");
for(int i = 0; i < Y; i++) {
for(int j = 0; j < lld; j++) {
unsigned long long __b = black_h[i*lld + j];
unsigned long long __w = white_h[i*lld + j];
for(int k = 0; k < 8*sizeof(*black_h); k += BIT_X_SPIN) {
if (i&1) {
fprintf(fp, "%llX", (__w >> k) & 0xF);
fprintf(fp, "%llX", (__b >> k) & 0xF);
} else {
fprintf(fp, "%llX", (__b >> k) & 0xF);
fprintf(fp, "%llX", (__w >> k) & 0xF);
}
}
}
fprintf(fp, "\n");
}
fclose(fp);
}
}
return;
}
static void generate_times(unsigned long long nsteps,
unsigned long long *list_times) {
int nt = 0;
list_times[0]=MIN_EXP_TIME;
unsigned long long t = 0;
for(unsigned long long j = 0; j < nsteps && t < nsteps; j++) {
t = rint(pow(2.0, j/4.0));
if (t >= 2*list_times[nt] && nt < MAX_EXP_TIME-1) {
// if (t > list_times[nt] && nt < MAX_EXP_TIME-1) {
nt++;
list_times[nt] = t;
//printf("list_times[%d]: %llu\n", nt, list_times[nt]);
}
}
return;
}
int main(int argc, char **argv) {
unsigned long long *v_d=NULL;
unsigned long long *black_d=NULL;
unsigned long long *white_d=NULL;
unsigned long long *ham_d=NULL;
unsigned long long *hamB_d=NULL;
unsigned long long *hamW_d=NULL;
cudaEvent_t start, stop;
float et;
const int SPIN_X_WORD = (8*sizeof(*v_d)) / BIT_X_SPIN;
int X = 0;
int Y = 0;
int dumpOut = 0;
char cname[256];
int corrOut = 0;
double *corr_d[MAX_GPU];
double *corr_h[MAX_GPU];
int nsteps = NUMIT_DEF;
unsigned long long seed = SEED_DEF;
int ndev = 1;
float alpha = -1.0f;
float temp = -1.0f;
float tempUpdStep = 0;
int tempUpdFreq = 0;
int printFreq = 0;
int printExp = 0;
int printExpCur = 0;
unsigned long long printExpSteps[MAX_EXP_TIME];
double tgtMagn = -1.0;
int useGenHamilt = 0;
float hamiltPerc1 = 0.0f;
int useSubLatt = 0;
int XSL = 0;
int YSL = 0;
int NSLX = 1;
int NSLY = 1;
int och;
while(1) {
int option_index = 0;
static struct option long_options[] = {
{ "x", required_argument, 0, 'x'},
{ "y", required_argument, 0, 'y'},
{ "nit", required_argument, 0, 'n'},
{ "seed", required_argument, 0, 's'},
{ "out", no_argument, 0, 'o'},
{ "devs", required_argument, 0, 'd'},
{ "alpha", required_argument, 0, 'a'},
{ "temp", required_argument, 0, 't'},
{ "print", required_argument, 0, 'p'},
{"update", required_argument, 0, 'u'},
{ "magn", required_argument, 0, 'm'},
{ "exppr", no_argument, 0, 'e'},
{ "corr", no_argument, 0, 'c'},
{ "J", required_argument, 0, 'J'},
{ "xsl", required_argument, 0, 1},
{ "ysl", required_argument, 0, 2},
{ "help", required_argument, 0, 'h'},
{ 0, 0, 0, 0}
};
och = getopt_long(argc, argv, "x:y:n:ohs:d:a:t:p:u:m:ecJ:r:", long_options, &option_index);
if (och == -1) break;
switch (och) {
case 0:// handles long opts with non-NULL flag field
break;
case 'x':
X = atoi(optarg);
break;
case 'y':
Y = atoi(optarg);
break;
case 'n':
nsteps = atoi(optarg);
break;
case 'o':
dumpOut = 1;
break;
case 'h':
usage(SPIN_X_WORD, argv[0]);
break;
case 's':
seed = atoll(optarg);
if(seed==0) {
seed=((getpid()*rand())&0x7FFFFFFFF);
}
break;
case 'd':
ndev = atoi(optarg);
break;
case 'a':
alpha = atof(optarg);
break;
case 't':
temp = atof(optarg);
break;
case 'p':
printFreq = atoi(optarg);
break;
case 'e':
printExp = 1;
break;
case 'u':
// format -u FLT,INT
{
char *__tmp0 = strtok(optarg, ",");
if (!__tmp0) {
fprintf(stderr, "cannot find temperature step in parameter...\n");
exit(EXIT_FAILURE);
}
char *__tmp1 = strtok(NULL, ",");
if (!__tmp1) {
fprintf(stderr, "cannot find iteration count in parameter...\n");
exit(EXIT_FAILURE);
}
tempUpdStep = atof(__tmp0);
tempUpdFreq = atoi(__tmp1);
printf("tempUpdStep: %f, tempUpdFreq: %d\n", tempUpdStep, tempUpdFreq);
}
break;
case 'm':
tgtMagn = atof(optarg);
break;
case 'c':
corrOut = 1;
break;
case 'J':
useGenHamilt = 1;
hamiltPerc1 = atof(optarg);
hamiltPerc1 = MIN(MAX(0.0f, hamiltPerc1), 1.0f);
break;
case 1:
useSubLatt = 1;
XSL = atoi(optarg);
break;
case 2:
useSubLatt = 1;
YSL = atoi(optarg);
break;
case '?':
exit(EXIT_FAILURE);
default:
fprintf(stderr, "unknown option: %c\n", och);
exit(EXIT_FAILURE);
}
}
if (!X || !Y) {
if (!X) {
if (Y && !(Y % (2*SPIN_X_WORD*2*BLOCK_X*BMULT_X))) {
X = Y;
} else {
X = 2*SPIN_X_WORD*2*BLOCK_X*BMULT_X;
}
}
if (!Y) {
if (!(X%(BLOCK_Y*BMULT_Y))) {
Y = X;
} else {
Y = BLOCK_Y*BMULT_Y;
}
}
}
if (!X || (X%2) || ((X/2)%(SPIN_X_WORD*2*BLOCK_X*BMULT_X))) {
fprintf(stderr, "\nPlease specify an X dim multiple of %d\n\n", 2*SPIN_X_WORD*2*BLOCK_X*BMULT_X);
usage(SPIN_X_WORD, argv[0]);
exit(EXIT_FAILURE);
}
if (!Y || (Y%(BLOCK_Y*BMULT_Y))) {
fprintf(stderr, "\nPlease specify a Y dim multiple of %d\n\n", BLOCK_Y*BMULT_Y);
usage(SPIN_X_WORD, argv[0]);
exit(EXIT_FAILURE);
}
if (useSubLatt) {
if (!XSL || !YSL) {
if (!XSL) {
if (YSL && !(YSL % (2*SPIN_X_WORD*2*BLOCK_X*BMULT_X))) {
XSL = YSL;
} else {
XSL = 2*SPIN_X_WORD*2*BLOCK_X*BMULT_X;
}
}
if (!YSL) {
if (!(XSL%(BLOCK_Y*BMULT_Y))) {
YSL = XSL;
} else {
YSL = BLOCK_Y*BMULT_Y;
}
}
}
if ((X%XSL) || !XSL || (XSL%2) || ((XSL/2)%(SPIN_X_WORD*2*BLOCK_X*BMULT_X))) {
fprintf(stderr,
"\nPlease specify an X sub-lattice dim multiple of %d and divisor of %d\n\n",
2*SPIN_X_WORD*2*BLOCK_X*BMULT_X, X);
usage(SPIN_X_WORD, argv[0]);
exit(EXIT_FAILURE);
}
if ((Y%YSL) || !YSL || (YSL%(BLOCK_Y*BMULT_Y))) {
fprintf(stderr,
"\nPlease specify a Y sub-lattice dim multiple of %d divisor of %d\n\n",
BLOCK_Y*BMULT_Y, Y);
usage(SPIN_X_WORD, argv[0]);
exit(EXIT_FAILURE);
}
NSLX = X / XSL;
NSLY = Y / YSL;
} else {
XSL = X;
YSL = Y*ndev;
NSLX = 1;
NSLY = 1;
}
if (temp == -1.0f) {
if (alpha == -1.0f) {
temp = ALPHA_DEF*CRIT_TEMP;
} else {
temp = alpha*CRIT_TEMP;
}
}
if (printExp && printFreq) {
printFreq = 0;
}
if (printExp) {
generate_times(nsteps, printExpSteps);
}
cudaDeviceProp props;
printf("\nUsing GPUs:\n");
for(int i = 0; i < ndev; i++) {
CHECK_CUDA(cudaGetDeviceProperties(&props, i));
printf("\t%2d (%s, %d SMs, %d th/SM max, CC %d.%d, ECC %s)\n",
i, props.name, props.multiProcessorCount,
props.maxThreadsPerMultiProcessor,
props.major, props.minor,
props.ECCEnabled?"on":"off");
}
printf("\n");
// we assums all gpus to be the same so we'll later
// use the props filled for the last GPU...
if (ndev > 1) {
for(int i = 0; i < ndev; i++) {
int attVal = 0;
CHECK_CUDA(cudaDeviceGetAttribute(&attVal, cudaDevAttrConcurrentManagedAccess, i));
if (!attVal) {
fprintf(stderr,
"error: device %d does not support concurrent managed memory access!\n", i);
exit(EXIT_FAILURE);
}
}
printf("GPUs direct access matrix:\n ");
for(int i = 0; i < ndev; i++) {
printf("%4d", i);
}
int missingLinks = 0;
printf("\n");
for(int i = 0; i < ndev; i++) {
printf("GPU %2d:", i);
CHECK_CUDA(cudaSetDevice(i));
for(int j = 0; j < ndev; j++) {
int access = 1;
if (i != j) {
CHECK_CUDA(cudaDeviceCanAccessPeer(&access, i, j));
if (access) {
CHECK_CUDA(cudaDeviceEnablePeerAccess(j, 0));
} else {
missingLinks++;
}
}
printf("%4c", access ? 'V' : 'X');
}
printf("\n");
}
printf("\n");
if (missingLinks) {
fprintf(stderr,
"error: %d direct memory links among devices missing\n",
missingLinks);
exit(EXIT_FAILURE);
}
}
size_t lld = (X/2)/SPIN_X_WORD;
// length of a single color section per GPU
size_t llenLoc = static_cast<size_t>(Y)*lld;
// total lattice length (all GPUs, all colors)
size_t llen = 2ull*ndev*llenLoc;
dim3 grid(DIV_UP(lld/2, BLOCK_X*BMULT_X),
DIV_UP( Y, BLOCK_Y*BMULT_Y));
dim3 block(BLOCK_X, BLOCK_Y);
printf("Run configuration:\n");
printf("\tspin/word: %d\n", SPIN_X_WORD);
printf("\tspins: %zu\n", llen*SPIN_X_WORD);
printf("\tseed: %llu\n", seed);
printf("\titerations: %d\n", nsteps);
printf("\tblock (X, Y): %d, %d\n", block.x, block.y);
printf("\ttile (X, Y): %d, %d\n", BLOCK_X*BMULT_X, BLOCK_Y*BMULT_Y);
printf("\tgrid (X, Y): %d, %d\n", grid.x, grid.y);
if (printFreq) {
printf("\tprint magn. every %d steps\n", printFreq);
} else if (printExp) {
printf("\tprint magn. following exponential series\n");
} else {
printf("\tprint magn. at 1st and last step\n");
}
if ((printFreq || printExp) && tgtMagn != -1.0) {
printf("\tearly exit if magn. == %lf+-%lf\n", tgtMagn, TGT_MAGN_MAX_DIFF);
}
printf("\ttemp: %f (%f*T_crit)\n", temp, temp/CRIT_TEMP);
if (!tempUpdFreq) {
printf("\ttemp update not set\n");
} else {
printf("\ttemp update: %f / %d iterations\n", tempUpdStep, tempUpdFreq);
}
if (useGenHamilt) {
printf("\tusing Hamiltonian buffer, setting links to -1 with prob %G\n", hamiltPerc1);
} else {
printf("\tnot using Hamiltonian buffer\n");
}
printf("\n");
if (useSubLatt) {
printf("\tusing sub-lattices:\n");
printf("\t\tno. of sub-lattices per GPU: %8d\n", NSLX*NSLY);
printf("\t\tno. of sub-lattices (total): %8d\n", ndev*NSLX*NSLY);
printf("\t\tsub-lattices size: %7d x %7d\n\n", XSL, YSL);
}
printf("\tlocal lattice size: %8d x %8d\n", Y, X);
printf("\ttotal lattice size: %8d x %8d\n", ndev*Y, X);
printf("\tlocal lattice shape: 2 x %8d x %8zu (%12zu %s)\n", Y, lld, llenLoc*2, sizeof(*v_d) == 4 ? "uints" : "ulls");
printf("\ttotal lattice shape: 2 x %8d x %8zu (%12zu %s)\n", ndev*Y, lld, llen, sizeof(*v_d) == 4 ? "uints" : "ulls");
printf("\tmemory: %.2lf MB (%.2lf MB per GPU)\n", (llen*sizeof(*v_d))/(1024.0*1024.0), llenLoc*2*sizeof(*v_d)/(1024.0*1024.0));
const int redBlocks = MIN(DIV_UP(llen, THREADS),
(props.maxThreadsPerMultiProcessor/THREADS)*props.multiProcessorCount);
unsigned long long cntPos;
unsigned long long cntNeg;
unsigned long long *sum_d[MAX_GPU];
if (ndev == 1) {
CHECK_CUDA(cudaMalloc(&v_d, llen*sizeof(*v_d)));
CHECK_CUDA(cudaMemset(v_d, 0, llen*sizeof(*v_d)));
CHECK_CUDA(cudaMalloc(&sum_d[0], 2*sizeof(**sum_d)));
if (useGenHamilt) {
CHECK_CUDA(cudaMalloc(&ham_d, llen*sizeof(*ham_d)));
CHECK_CUDA(cudaMemset(ham_d, 0, llen*sizeof(*ham_d)));
}
} else {
CHECK_CUDA(cudaMallocManaged(&v_d, llen*sizeof(*v_d), cudaMemAttachGlobal));
if (useGenHamilt) {
CHECK_CUDA(cudaMallocManaged(&ham_d, llen*sizeof(*ham_d), cudaMemAttachGlobal));
}
printf("\nSetting up multi-gpu configuration:\n"); fflush(stdout);
//#pragma omp parallel for schedule(static)
for(int i = 0; i < ndev; i++) {
CHECK_CUDA(cudaSetDevice(i));
CHECK_CUDA(cudaMalloc(sum_d+i, 2*sizeof(**sum_d)));
CHECK_CUDA(cudaMemset(sum_d[i], 0, 2*sizeof(**sum_d)));
// set preferred loc for black/white
CHECK_CUDA(cudaMemAdvise(v_d + i*llenLoc, llenLoc*sizeof(*v_d), cudaMemAdviseSetPreferredLocation, i));
CHECK_CUDA(cudaMemAdvise(v_d + (llen/2) + i*llenLoc, llenLoc*sizeof(*v_d), cudaMemAdviseSetPreferredLocation, i));
if (useGenHamilt) {
CHECK_CUDA(cudaMemAdvise(ham_d + i*llenLoc, llenLoc*sizeof(*ham_d), cudaMemAdviseSetPreferredLocation, i));
CHECK_CUDA(cudaMemAdvise(ham_d + (llen/2) + i*llenLoc, llenLoc*sizeof(*ham_d), cudaMemAdviseSetPreferredLocation, i));
}
// black boundaries up/down
//fprintf(stderr, "v_d + %12zu + %12zu, %12zu, ..., %2d)\n", i*llenLoc, (Y-1)*lld, lld*sizeof(*v_d), (i+ndev+1)%ndev);
CHECK_CUDA(cudaMemAdvise(v_d + i*llenLoc, lld*sizeof(*v_d), cudaMemAdviseSetAccessedBy, (i+ndev-1)%ndev));
CHECK_CUDA(cudaMemAdvise(v_d + i*llenLoc + (Y-1)*lld, lld*sizeof(*v_d), cudaMemAdviseSetAccessedBy, (i+ndev+1)%ndev));
// white boundaries up/down
CHECK_CUDA(cudaMemAdvise(v_d + (llen/2) + i*llenLoc, lld*sizeof(*v_d), cudaMemAdviseSetAccessedBy, (i+ndev-1)%ndev));
CHECK_CUDA(cudaMemAdvise(v_d + (llen/2) + i*llenLoc + (Y-1)*lld, lld*sizeof(*v_d), cudaMemAdviseSetAccessedBy, (i+ndev+1)%ndev));
//CHECK_CUDA(cudaMemPrefetchAsync(v_d + i*llenLoc, llenLoc*sizeof(*v_d), i, 0));
//CHECK_CUDA(cudaMemPrefetchAsync(v_d + (llen/2) + i*llenLoc, llenLoc*sizeof(*v_d), i, 0));
// reset black/white
CHECK_CUDA(cudaMemset(v_d + i*llenLoc, 0, llenLoc*sizeof(*v_d)));
CHECK_CUDA(cudaMemset(v_d + (llen/2) + i*llenLoc, 0, llenLoc*sizeof(*v_d)));
if (useGenHamilt) {
CHECK_CUDA(cudaMemset(ham_d + i*llenLoc, 0, llenLoc*sizeof(*ham_d)));
CHECK_CUDA(cudaMemset(ham_d + (llen/2) + i*llenLoc, 0, llenLoc*sizeof(*ham_d)));
}
printf("\tGPU %2d done\n", i); fflush(stdout);
}
}
if(corrOut) {
snprintf(cname, sizeof(cname), "corr_%dx%d_T_%f_%llu", Y, X, temp, seed);
Remove(cname);
for(int i = 0; i < ndev; i++) {
CHECK_CUDA(cudaSetDevice(i));
corr_h[i] = (double *)Malloc(MAX_CORR_LEN*sizeof(**corr_h));
CHECK_CUDA(cudaMalloc(corr_d+i, MAX_CORR_LEN*sizeof(**corr_d)));
}
}
black_d = v_d;
white_d = v_d + llen/2;
if (useGenHamilt) {
hamB_d = ham_d;
hamW_d = ham_d + llen/2;
}
float *exp_d[MAX_GPU];
float exp_h[2][5];
// precompute possible exponentials
for(int i = 0; i < 2; i++) {
for(int j = 0; j < 5; j++) {
if(temp > 0) {
exp_h[i][j] = expf((i?-2.0f:2.0f)*static_cast<float>(j*2-4)*(1.0f/temp));
} else {
if(j == 2) {
exp_h[i][j] = 0.5f;
} else {
exp_h[i][j] = (i?-2.0f:2.0f)*static_cast<float>(j*2-4);
}
}
//printf("exp[%2d][%d]: %E\n", i?1:-1, j, exp_h[i][j]);
}
}
for(int i = 0; i < ndev; i++) {
CHECK_CUDA(cudaSetDevice(i));
CHECK_CUDA(cudaMalloc(exp_d+i, 2*5*sizeof(**exp_d)));
CHECK_CUDA(cudaMemcpy(exp_d[i], exp_h, 2*5*sizeof(**exp_d), cudaMemcpyHostToDevice));
}
CHECK_CUDA(cudaEventCreate(&start));
CHECK_CUDA(cudaEventCreate(&stop));
for(int i = 0; i < ndev; i++) {
CHECK_CUDA(cudaSetDevice(i));
latticeInit_k<BLOCK_X, BLOCK_Y,
BMULT_X, BMULT_Y,
BIT_X_SPIN, C_BLACK,
unsigned long long><<<grid, block>>>(i,
seed,
0, i*Y, lld/2,
reinterpret_cast<ulonglong2 *>(black_d));
CHECK_ERROR("initLattice_k");
latticeInit_k<BLOCK_X, BLOCK_Y,
BMULT_X, BMULT_Y,
BIT_X_SPIN, C_WHITE,
unsigned long long><<<grid, block>>>(i,
seed,
0, i*Y, lld/2,
reinterpret_cast<ulonglong2 *>(white_d));
CHECK_ERROR("initLattice_k");
if (useGenHamilt) {
hamiltInitB_k<BLOCK_X, BLOCK_Y,
BMULT_X, BMULT_Y,
BIT_X_SPIN,
unsigned long long><<<grid, block>>>(i,
hamiltPerc1,
seed+1, // just use a different seed
i*Y, lld/2,
reinterpret_cast<ulonglong2 *>(hamB_d));
hamiltInitW_k<BLOCK_X, BLOCK_Y,
BMULT_X, BMULT_Y,
BIT_X_SPIN,
unsigned long long><<<grid, block>>>((XSL/2)/SPIN_X_WORD/2, YSL, i*Y, lld/2,
reinterpret_cast<ulonglong2 *>(hamB_d),
reinterpret_cast<ulonglong2 *>(hamW_d));
}
}
countSpins(ndev, redBlocks, llen, llenLoc, black_d, white_d, sum_d, &cntPos, &cntNeg);
printf("\nInitial magnetization: %9.6lf, up_s: %12llu, dw_s: %12llu\n",
abs(static_cast<double>(cntPos)-static_cast<double>(cntNeg)) / (llen*SPIN_X_WORD),
cntPos, cntNeg);
for(int i = 0; i < ndev; i++) {
CHECK_CUDA(cudaSetDevice(i));
CHECK_CUDA(cudaDeviceSynchronize());
}
double __t0;
if (ndev == 1) {
CHECK_CUDA(cudaEventRecord(start, 0));
} else {
__t0 = Wtime();
}
int j;
for(j = 0; j < nsteps; j++) {
for(int i = 0; i < ndev; i++) {
CHECK_CUDA(cudaSetDevice(i));
spinUpdateV_2D_k<BLOCK_X, BLOCK_Y,
BMULT_X, BMULT_Y,
BIT_X_SPIN, C_BLACK,
unsigned long long><<<grid, block>>>(i,
seed,
j+1,
(XSL/2)/SPIN_X_WORD/2, YSL,
i*Y, /*ndev*Y,*/ lld/2,
reinterpret_cast<float (*)[5]>(exp_d[i]),
reinterpret_cast<ulonglong2 *>(hamW_d),
reinterpret_cast<ulonglong2 *>(white_d),
reinterpret_cast<ulonglong2 *>(black_d));
}
if (ndev > 1) {
for(int i = 0; i < ndev; i++) {
CHECK_CUDA(cudaSetDevice(i));
CHECK_CUDA(cudaDeviceSynchronize());
}
}
for(int i = 0; i < ndev; i++) {
CHECK_CUDA(cudaSetDevice(i));
spinUpdateV_2D_k<BLOCK_X, BLOCK_Y,
BMULT_X, BMULT_Y,
BIT_X_SPIN, C_WHITE,
unsigned long long><<<grid, block>>>(i,
seed,
j+1,
(XSL/2)/SPIN_X_WORD/2, YSL,
i*Y, /*ndev*Y,*/ lld/2,
reinterpret_cast<float (*)[5]>(exp_d[i]),
reinterpret_cast<ulonglong2 *>(hamB_d),
reinterpret_cast<ulonglong2 *>(black_d),
reinterpret_cast<ulonglong2 *>(white_d));
}
if (ndev > 1) {
for(int i = 0; i < ndev; i++) {
CHECK_CUDA(cudaSetDevice(i));
CHECK_CUDA(cudaDeviceSynchronize());
}
}
if (printFreq && ((j+1) % printFreq) == 0) {
countSpins(ndev, redBlocks, llen, llenLoc, black_d, white_d, sum_d, &cntPos, &cntNeg);
const double magn = abs(static_cast<double>(cntPos)-static_cast<double>(cntNeg)) / (llen*SPIN_X_WORD);
printf(" magnetization: %9.6lf, up_s: %12llu, dw_s: %12llu (iter: %8d)\n",
magn, cntPos, cntNeg, j+1);
if (corrOut) {
computeCorr(cname, ndev, j+1, lld, useSubLatt, XSL, YSL, X, Y, black_d, white_d, corr_d, corr_h);
}
if (dumpOut) {
char fname[256];
snprintf(fname, sizeof(fname), "lattice_%dx%d_T_%f_IT_%08d_", Y, X, temp, j+1);
dumpLattice(fname, ndev, Y, lld, llen, llenLoc, v_d);
}
if (tgtMagn != -1.0) {
if (abs(magn-tgtMagn) < TGT_MAGN_MAX_DIFF) {
j++;
break;
}
}
}
//printf("j: %d, printExpSteps[%d]: %d\n", j, printExpCur, printExpSteps[printExpCur]);
if (printExp && printExpSteps[printExpCur] == j) {
printExpCur++;
countSpins(ndev, redBlocks, llen, llenLoc, black_d, white_d, sum_d, &cntPos, &cntNeg);
const double magn = abs(static_cast<double>(cntPos)-static_cast<double>(cntNeg)) / (llen*SPIN_X_WORD);
printf(" magnetization: %9.6lf (^2: %9.6lf), up_s: %12llu, dw_s: %12llu (iter: %8d)\n",
magn, magn*magn, cntPos, cntNeg, j+1);
if (corrOut) {
computeCorr(cname, ndev, j+1, lld, useSubLatt, XSL, YSL, X, Y, black_d, white_d, corr_d, corr_h);
}
if (dumpOut) {
char fname[256];
snprintf(fname, sizeof(fname), "lattice_%dx%d_T_%f_IT_%08d_", Y, X, temp, j+1);
dumpLattice(fname, ndev, Y, lld, llen, llenLoc, v_d);
}
if (tgtMagn != -1.0) {
if (abs(magn-tgtMagn) < TGT_MAGN_MAX_DIFF) {
j++;
break;
}
}
}
if (tempUpdFreq && ((j+1) % tempUpdFreq) == 0) {
temp = MAX(MIN_TEMP, temp+tempUpdStep);
printf("Changing temperature to %f\n", temp);
for(int i = 0; i < 2; i++) {
for(int k = 0; k < 5; k++) {
exp_h[i][k] = expf((i?-2.0f:2.0f)*static_cast<float>(k*2-4)*(1.0f/temp));
printf("exp[%2d][%d]: %E\n", i?1:-1, k, exp_h[i][k]);
}
}
for(int i = 0; i < ndev; i++) {
CHECK_CUDA(cudaMemcpy(exp_d[i], exp_h, 2*5*sizeof(**exp_d), cudaMemcpyHostToDevice));
}
}
}
if (ndev == 1) {
CHECK_CUDA(cudaEventRecord(stop, 0));
CHECK_CUDA(cudaEventSynchronize(stop));
} else {
for(int i = 0; i < ndev; i++) {
CHECK_CUDA(cudaSetDevice(i));
CHECK_CUDA(cudaDeviceSynchronize());
}
__t0 = Wtime()-__t0;
}
countSpins(ndev, redBlocks, llen, llenLoc, black_d, white_d, sum_d, &cntPos, &cntNeg);
printf("Final magnetization: %9.6lf, up_s: %12llu, dw_s: %12llu (iter: %8d)\n\n",
abs(static_cast<double>(cntPos)-static_cast<double>(cntNeg)) / (llen*SPIN_X_WORD),
cntPos, cntNeg, j);
if (ndev == 1) {
CHECK_CUDA(cudaEventElapsedTime(&et, start, stop));
} else {
et = __t0*1.0E+3;
}
printf("Kernel execution time for %d update steps: %E ms, %.2lf flips/ns (BW: %.2lf GB/s)\n",
j, et, static_cast<double>(llen*SPIN_X_WORD)*j / (et*1.0E+6),
//(llen*sizeof(*v_d)*2*j/1.0E+9) / (et/1.0E+3));
(2ull*j*
( sizeof(*v_d)*((llen/2) + (llen/2) + (llen/2)) + // src color read, dst color read, dst color write
sizeof(*exp_d)*5*grid.x*grid.y ) /
1.0E+9) / (et/1.0E+3));
if (dumpOut) {
char fname[256];
snprintf(fname, sizeof(fname), "lattice_%dx%d_T_%f_IT_%08d_", Y, X, temp, j);
dumpLattice(fname, ndev, Y, lld, llen, llenLoc, v_d);
}
CHECK_CUDA(cudaFree(v_d));
if (useGenHamilt) {
CHECK_CUDA(cudaFree(ham_d));
}
if (ndev == 1) {
CHECK_CUDA(cudaFree(exp_d[0]));
CHECK_CUDA(cudaFree(sum_d[0]));
} else {
for(int i = 0; i < ndev; i++) {
CHECK_CUDA(cudaSetDevice(i));
CHECK_CUDA(cudaFree(exp_d[i]));
CHECK_CUDA(cudaFree(sum_d[i]));
}
}
if (corrOut) {
for(int i = 0; i < ndev; i++) {
CHECK_CUDA(cudaSetDevice(i));
CHECK_CUDA(cudaFree(corr_d[i]));
free(corr_h[i]);
}
}
for(int i = 0; i < ndev; i++) {
CHECK_CUDA(cudaSetDevice(i));
CHECK_CUDA(cudaDeviceReset());
}
return 0;
}
|
cbf7ac15a1bef83b022235c19df386051c5d0191.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// -*- Mode: C++ ; c-file-style:"stroustrup"; indent-tabs-mode:nil; -*-
#include <stdio.h>
#include <stdlib.h>
#include "Model.cu"
#define DTYPE @DataType@
// The size of the tile is calculated at compile time by the SL processor.
// But the data array is statically sized.
// So, make these are big as they can get.
// Chnaged to be large enough to handle fermi.
// (int)sqrt(1024) = 32
#define TILE_WIDTH 32
#define TILE_HEIGHT 32
/**
* Block of memory shared by threads working on a single tile.
* Contains all necessary cell values and edge values from the
* previous iteration.
*/
__shared__ DTYPE shmem[TILE_HEIGHT][TILE_WIDTH];
__device__ DTYPE get(int x, int y)
{
return shmem[threadIdx.y+y][threadIdx.x+x];
}
#define getNew(xxx, yyy) output[yyy * input_size.x + xxx]
#if ConvergeValue
__device__ int converge_value;
__device__ int ConvergeValue(dim3 input_size, int x, int y, DTYPE *output @ConvergeScalarVariables@)
{
@ConvergeValue@
}
#endif
// Macro to read global read only data from within CellValue code.
#define read(offset)(ro_data[offset])
__device__ DTYPE CellValue(dim3 input_size, int x, int y, DTYPE *ro_data
@ScalarVariables@)
{
@CellValue@
}
#if EdgeValue
__device__ DTYPE EdgeValue(dim3 input_size, int x, int y, DTYPE value)
{
@EdgeValue@
}
#endif
/**
* Each thread runs this kernel to calculate the value at one particular
* cell in one particular iteration.
*/
// We need to declare it C style naming.
// This avoids name mangling and allows us to get attributes about the kernel call from Cuda.
// Its possible to do this with a C++ interface, but that will only run on certain devices.
// This technique is older and therefore more reliable across Cuda devices.
extern "C" {
void @FunctionName@Kernel(dim3 input_size, dim3 stencil_size,
DTYPE *input, DTYPE *output, int pyramid_height,
DTYPE *ro_data
@ScalarVariables@
@ConvergeScalarVariables@);
}
__global__
void @FunctionName@Kernel(dim3 input_size, dim3 stencil_size,
DTYPE *input, DTYPE *output, int pyramid_height,
DTYPE *ro_data
@ScalarVariables@
@ConvergeScalarVariables@)
{
dim3 border;
int bx, by, tx, ty, x, y, ex, ey, uidx, iter, inside;
DTYPE value;
// (bx, by) is the location in the input of the top left of this block.
border.x = pyramid_height * stencil_size.x;
border.y = pyramid_height * stencil_size.y;
bx = blockIdx.x * (blockDim.x - 2*border.x) - border.x;
by = blockIdx.y * (blockDim.y - 2*border.y) - border.y;
// (x, y) is the location in the input of this thread.
tx = threadIdx.x;
ty = threadIdx.y;
x = bx + tx;
y = by + ty;
#if ConvergeValue
do {
int converge_value_result;
#endif
// (ex, ey) = (x, y) pushed into the boundaries of the input.
ex = x;
ey = y;
if (ex < 0) ex = 0;
if (ey < 0) ey = 0;
if (ex >= input_size.x) ex = input_size.x-1;
if (ey >= input_size.y) ey = input_size.y-1;
// Get current cell value or edge value.
uidx = ey * input_size.x + ex;
value = input[uidx];
inside = ((x == ex) && (y == ey));
#if EdgeValue
if (!inside)
{
value = EdgeValue(input_size, x, y, value @ScalarVariableNames@);
}
#endif
// Store value in shared memory for stencil calculations, and go.
shmem[ty][tx] = value;
iter = 0;
border.x = border.y = 0;
while (true)
{
__syncthreads();
iter++;
if (inside)
{
border.x += stencil_size.x;
border.y += stencil_size.y;
inside = ((tx >= border.x) && (tx < blockDim.x-border.x) &&
(ty >= border.y) && (ty < blockDim.y-border.y));
}
if (inside)
{
value = CellValue(input_size, x, y, ro_data
@ScalarVariableNames@);
}
if (iter >= pyramid_height)
{
if (inside)
output[uidx] = value;
break;
}
__syncthreads();
shmem[ty][tx] = value;
}
#if ConvergeValue
converge_value = @ConvergeType@;
__syncthreads();
converge_value_result = ConvergeValue(input_size, x, y, output @ConvergeScalarVariableNames@);
if (@ConvergeType@) {
if (!converge_value_result) {
converge_value = converge_value_result;
}
} else {
if (converge_value_result) {
converge_value = converge_value_result;
}
}
__syncthreads();
} while (!converge_value);
#endif
}
/**
* Store data between calls to SetData() and run().
* This is basically a hack.
*/
static DTYPE *global_ro_data = NULL;
/**
* Function exported to do the entire stencil computation.
*/
void @FunctionName@(DTYPE *host_data, int x_max, int y_max, int iterations
@ScalarVariables@
@ConvergeScalarVariables@)
{
// User-specific parameters
dim3 input_size(x_max, y_max);
dim3 stencil_size@StencilSize@;
// Host to device
DTYPE *device_input, *device_output;
int num_bytes = input_size.x * input_size.y * sizeof(DTYPE);
hipMalloc((void **) &device_input, num_bytes);
hipMalloc((void **) &device_output, num_bytes);
hipMemcpy(device_input, host_data, num_bytes, hipMemcpyHostToDevice);
#ifdef STATISTICS
struct timeval trainingstarttime, trainingendtime;
unsigned int trainingusec;
gettimeofday(&trainingstarttime, NULL);
#endif
// Setup the structure that holds parameters for the application.
// And from that, get the block size.
char * KernelName = "@FunctionName@Kernel";
dim3 tile_size = initSAProps(@NumDimensions@, input_size, stencil_size, iterations, sizeof(DTYPE), KernelName);
dim3 border, tile_data_size, grid_dims;
// Now ready for the training period.
// Need to get some timings of small kernel runs.
// TODO It would be faster if these could be 0 and 1 heights instead of 1 and 2.
int pyramid_height = 2;
filldim3(&border, pyramid_height * stencil_size.x, pyramid_height * stencil_size.y);
filldim3(&tile_data_size, tile_size.x - 2*border.x, tile_size.y - 2*border.y);
filldim3(&grid_dims, div_ceil(input_size.x, tile_data_size.x), div_ceil(input_size.y, tile_data_size.y));
unsigned int twoIterTime;
timeInMicroSeconds(twoIterTime, (@FunctionNamehipLaunchKernelGGL((@Kernel), dim3(grid_dims), dim3(tile_size) , 0, 0,
input_size, stencil_size, device_input, device_output,
pyramid_height, global_ro_data
@ScalarVariableNames@
@ConvergeScalarVariableNames@)));
pyramid_height = 1;
filldim3(&border, pyramid_height * stencil_size.x, pyramid_height * stencil_size.y);
filldim3(&tile_data_size, tile_size.x - 2*border.x, tile_size.y - 2*border.y);
filldim3(&grid_dims, div_ceil(input_size.x, tile_data_size.x), div_ceil(input_size.y, tile_data_size.y));
unsigned int oneIterTime;
timeInMicroSeconds(oneIterTime, (@FunctionNamehipLaunchKernelGGL((@Kernel), dim3(grid_dims), dim3(tile_size) , 0, 0,
input_size, stencil_size, device_input, device_output,
pyramid_height, global_ro_data
@ScalarVariableNames@
@ConvergeScalarVariableNames@)));
#ifdef STATISTICS
/////////////////////////////////////////////////////////////////////////////////////
// Start of code to gather statistics to hone model. Remove in final version.
////////////////////////////////////////////////////////////////////////////////////
fprintf(stderr, "***********************************Start of a new Run****************************************\n");
fprintf(stderr, "Data Size=%d, Tile Size=%d Iteration Count=%d\n", input_size.x, tile_size.x, iterations);
// Precalculate the pyramid height so we can get stats on the calculated value.
int calcMinPyramid = calcPyramidHeight(grid_dims, oneIterTime, twoIterTime);
gettimeofday(&trainingendtime, NULL);
trainingusec = ((trainingendtime.tv_sec - trainingstarttime.tv_sec) * 1000000 +
(trainingendtime.tv_usec - trainingstarttime.tv_usec));
// Get second best for same reason.
int secondMinPyramid = getSecond(calcMinPyramid);
// Gather statistics to help hone model.
double calcMinTime, secondMinTime;
double actualMinTime = 1000000000;
int actualMinPyramid;
// Now let's just try them all to see what the optimal pyramid height is.
for (int i=1; i<tile_size.x/(2 * stencil_size.x); i++)
{
int pyramid_height = i;
// Now we can calculate the other sizes.
dim3 border(pyramid_height * stencil_size.x,
pyramid_height * stencil_size.y);
dim3 tile_data_size(tile_size.x - 2*border.x,
tile_size.y - 2*border.y);
dim3 grid_dims(div_ceil(input_size.x, tile_data_size.x),
div_ceil(input_size.y, tile_data_size.y));
uint32_t time;
timeInMicroSeconds(time, (@FunctionNamehipLaunchKernelGGL((@Kernel), dim3(grid_dims), dim3(tile_size) , 0, 0,
input_size, stencil_size, device_input, device_output,
i, global_ro_data
@ScalarVariableNames@
@ConvergeScalarVariableNames@)));
double timePer = ((double)time)/i;
if (i == calcMinPyramid) calcMinTime = timePer;
if (i == secondMinPyramid) secondMinTime = timePer;
if (timePer < actualMinTime)
{
actualMinPyramid = i;
actualMinTime = timePer;
}
// fprintf(stderr, "Pyramid Height=%d, time=%u, Time per iteration=%f.\n", i, time, ((double)time/i));
}
// Now we can output some statistics.
double firstError = ((1. - (actualMinTime/calcMinTime)) * 100.);
double secondError = ((1. - (actualMinTime/secondMinTime)) * 100.);
fprintf(stderr, "Size %d BestHeight %d CalcHeight %d %%Slowdown %4.2f CalcSecond %d %%Slowdown %4.2f MinSlowdown %4.2f\n",
input_size.x, actualMinPyramid, calcMinPyramid, firstError, secondMinPyramid, secondError, MIN(firstError, secondError));
/////////////////////////////////////////////////////////////////////////////////////
// End of code to gather statistics to hone model. Remove in final version.
////////////////////////////////////////////////////////////////////////////////////
#endif
#ifdef STATISTICS
for (int i=1; i<tile_size.x/(2 * stencil_size.x); i++)
{
struct timeval starttime, endtime;
unsigned int usec2;
gettimeofday(&starttime, NULL);
pyramid_height=i;
#else
// Now we can calculate the pyramid height.
pyramid_height = calcPyramidHeight(grid_dims, oneIterTime, twoIterTime);
#endif
// And use the result to calculate various sizes.
filldim3(&border, pyramid_height * stencil_size.x, pyramid_height * stencil_size.y);
filldim3(&tile_data_size, tile_size.x - 2*border.x, tile_size.y - 2*border.y);
filldim3(&grid_dims, div_ceil(input_size.x, tile_data_size.x), div_ceil(input_size.y, tile_data_size.y));
// Run computation
for (int iter = 0; iter < iterations; iter += pyramid_height)
{
if (iter + pyramid_height > iterations)
pyramid_height = iterations - iter;
@FunctionNamehipLaunchKernelGGL((@Kernel), dim3(grid_dims), dim3(tile_size) , 0, 0,
input_size, stencil_size, device_input, device_output,
pyramid_height, global_ro_data
@ScalarVariableNames@
@ConvergeScalarVariableNames@);
DTYPE *temp = device_input;
device_input = device_output;
device_output = temp;
}
#ifdef STATISTICS
// Synch the threads to make sure everything is done before taking a timing.
CUDA_SAFE_THREAD_SYNC();
gettimeofday(&endtime, NULL);
usec2 = ((endtime.tv_sec - starttime.tv_sec) * 1000000 +
(endtime.tv_usec - starttime.tv_usec));
fprintf(stderr, "Actual pyramid=%d, Actual iteration time=%Lu, Actual Total time=%lu\n", i, usec2, usec2+trainingusec);
}
#endif
// Device to host
hipMemcpy(host_data, device_input, num_bytes, hipMemcpyDeviceToHost);
hipFree(device_input);
hipFree(device_output);
if (global_ro_data != NULL)
{
hipFree(global_ro_data);
global_ro_data = NULL;
}
}
/**
* Store unnamed data on device.
*/
void @FunctionName@SetData(DTYPE *host_data, int num_elements)
{
int num_bytes = sizeof(DTYPE) * num_elements;
hipMalloc((void **) &global_ro_data, num_bytes);
hipMemcpy(global_ro_data, host_data, num_bytes, hipMemcpyHostToDevice);
}
| cbf7ac15a1bef83b022235c19df386051c5d0191.cu | // -*- Mode: C++ ; c-file-style:"stroustrup"; indent-tabs-mode:nil; -*-
#include <stdio.h>
#include <stdlib.h>
#include "Model.cu"
#define DTYPE @DataType@
// The size of the tile is calculated at compile time by the SL processor.
// But the data array is statically sized.
// So, make these are big as they can get.
// Chnaged to be large enough to handle fermi.
// (int)sqrt(1024) = 32
#define TILE_WIDTH 32
#define TILE_HEIGHT 32
/**
* Block of memory shared by threads working on a single tile.
* Contains all necessary cell values and edge values from the
* previous iteration.
*/
__shared__ DTYPE shmem[TILE_HEIGHT][TILE_WIDTH];
__device__ DTYPE get(int x, int y)
{
return shmem[threadIdx.y+y][threadIdx.x+x];
}
#define getNew(xxx, yyy) output[yyy * input_size.x + xxx]
#if ConvergeValue
__device__ int converge_value;
__device__ int ConvergeValue(dim3 input_size, int x, int y, DTYPE *output @ConvergeScalarVariables@)
{
@ConvergeValue@
}
#endif
// Macro to read global read only data from within CellValue code.
#define read(offset)(ro_data[offset])
__device__ DTYPE CellValue(dim3 input_size, int x, int y, DTYPE *ro_data
@ScalarVariables@)
{
@CellValue@
}
#if EdgeValue
__device__ DTYPE EdgeValue(dim3 input_size, int x, int y, DTYPE value)
{
@EdgeValue@
}
#endif
/**
* Each thread runs this kernel to calculate the value at one particular
* cell in one particular iteration.
*/
// We need to declare it C style naming.
// This avoids name mangling and allows us to get attributes about the kernel call from Cuda.
// Its possible to do this with a C++ interface, but that will only run on certain devices.
// This technique is older and therefore more reliable across Cuda devices.
extern "C" {
void @FunctionName@Kernel(dim3 input_size, dim3 stencil_size,
DTYPE *input, DTYPE *output, int pyramid_height,
DTYPE *ro_data
@ScalarVariables@
@ConvergeScalarVariables@);
}
__global__
void @FunctionName@Kernel(dim3 input_size, dim3 stencil_size,
DTYPE *input, DTYPE *output, int pyramid_height,
DTYPE *ro_data
@ScalarVariables@
@ConvergeScalarVariables@)
{
dim3 border;
int bx, by, tx, ty, x, y, ex, ey, uidx, iter, inside;
DTYPE value;
// (bx, by) is the location in the input of the top left of this block.
border.x = pyramid_height * stencil_size.x;
border.y = pyramid_height * stencil_size.y;
bx = blockIdx.x * (blockDim.x - 2*border.x) - border.x;
by = blockIdx.y * (blockDim.y - 2*border.y) - border.y;
// (x, y) is the location in the input of this thread.
tx = threadIdx.x;
ty = threadIdx.y;
x = bx + tx;
y = by + ty;
#if ConvergeValue
do {
int converge_value_result;
#endif
// (ex, ey) = (x, y) pushed into the boundaries of the input.
ex = x;
ey = y;
if (ex < 0) ex = 0;
if (ey < 0) ey = 0;
if (ex >= input_size.x) ex = input_size.x-1;
if (ey >= input_size.y) ey = input_size.y-1;
// Get current cell value or edge value.
uidx = ey * input_size.x + ex;
value = input[uidx];
inside = ((x == ex) && (y == ey));
#if EdgeValue
if (!inside)
{
value = EdgeValue(input_size, x, y, value @ScalarVariableNames@);
}
#endif
// Store value in shared memory for stencil calculations, and go.
shmem[ty][tx] = value;
iter = 0;
border.x = border.y = 0;
while (true)
{
__syncthreads();
iter++;
if (inside)
{
border.x += stencil_size.x;
border.y += stencil_size.y;
inside = ((tx >= border.x) && (tx < blockDim.x-border.x) &&
(ty >= border.y) && (ty < blockDim.y-border.y));
}
if (inside)
{
value = CellValue(input_size, x, y, ro_data
@ScalarVariableNames@);
}
if (iter >= pyramid_height)
{
if (inside)
output[uidx] = value;
break;
}
__syncthreads();
shmem[ty][tx] = value;
}
#if ConvergeValue
converge_value = @ConvergeType@;
__syncthreads();
converge_value_result = ConvergeValue(input_size, x, y, output @ConvergeScalarVariableNames@);
if (@ConvergeType@) {
if (!converge_value_result) {
converge_value = converge_value_result;
}
} else {
if (converge_value_result) {
converge_value = converge_value_result;
}
}
__syncthreads();
} while (!converge_value);
#endif
}
/**
* Store data between calls to SetData() and run().
* This is basically a hack.
*/
static DTYPE *global_ro_data = NULL;
/**
* Function exported to do the entire stencil computation.
*/
void @FunctionName@(DTYPE *host_data, int x_max, int y_max, int iterations
@ScalarVariables@
@ConvergeScalarVariables@)
{
// User-specific parameters
dim3 input_size(x_max, y_max);
dim3 stencil_size@StencilSize@;
// Host to device
DTYPE *device_input, *device_output;
int num_bytes = input_size.x * input_size.y * sizeof(DTYPE);
cudaMalloc((void **) &device_input, num_bytes);
cudaMalloc((void **) &device_output, num_bytes);
cudaMemcpy(device_input, host_data, num_bytes, cudaMemcpyHostToDevice);
#ifdef STATISTICS
struct timeval trainingstarttime, trainingendtime;
unsigned int trainingusec;
gettimeofday(&trainingstarttime, NULL);
#endif
// Setup the structure that holds parameters for the application.
// And from that, get the block size.
char * KernelName = "@FunctionName@Kernel";
dim3 tile_size = initSAProps(@NumDimensions@, input_size, stencil_size, iterations, sizeof(DTYPE), KernelName);
dim3 border, tile_data_size, grid_dims;
// Now ready for the training period.
// Need to get some timings of small kernel runs.
// TODO It would be faster if these could be 0 and 1 heights instead of 1 and 2.
int pyramid_height = 2;
filldim3(&border, pyramid_height * stencil_size.x, pyramid_height * stencil_size.y);
filldim3(&tile_data_size, tile_size.x - 2*border.x, tile_size.y - 2*border.y);
filldim3(&grid_dims, div_ceil(input_size.x, tile_data_size.x), div_ceil(input_size.y, tile_data_size.y));
unsigned int twoIterTime;
timeInMicroSeconds(twoIterTime, (@FunctionName@Kernel<<< grid_dims, tile_size >>>(
input_size, stencil_size, device_input, device_output,
pyramid_height, global_ro_data
@ScalarVariableNames@
@ConvergeScalarVariableNames@)));
pyramid_height = 1;
filldim3(&border, pyramid_height * stencil_size.x, pyramid_height * stencil_size.y);
filldim3(&tile_data_size, tile_size.x - 2*border.x, tile_size.y - 2*border.y);
filldim3(&grid_dims, div_ceil(input_size.x, tile_data_size.x), div_ceil(input_size.y, tile_data_size.y));
unsigned int oneIterTime;
timeInMicroSeconds(oneIterTime, (@FunctionName@Kernel<<< grid_dims, tile_size >>>(
input_size, stencil_size, device_input, device_output,
pyramid_height, global_ro_data
@ScalarVariableNames@
@ConvergeScalarVariableNames@)));
#ifdef STATISTICS
/////////////////////////////////////////////////////////////////////////////////////
// Start of code to gather statistics to hone model. Remove in final version.
////////////////////////////////////////////////////////////////////////////////////
fprintf(stderr, "***********************************Start of a new Run****************************************\n");
fprintf(stderr, "Data Size=%d, Tile Size=%d Iteration Count=%d\n", input_size.x, tile_size.x, iterations);
// Precalculate the pyramid height so we can get stats on the calculated value.
int calcMinPyramid = calcPyramidHeight(grid_dims, oneIterTime, twoIterTime);
gettimeofday(&trainingendtime, NULL);
trainingusec = ((trainingendtime.tv_sec - trainingstarttime.tv_sec) * 1000000 +
(trainingendtime.tv_usec - trainingstarttime.tv_usec));
// Get second best for same reason.
int secondMinPyramid = getSecond(calcMinPyramid);
// Gather statistics to help hone model.
double calcMinTime, secondMinTime;
double actualMinTime = 1000000000;
int actualMinPyramid;
// Now let's just try them all to see what the optimal pyramid height is.
for (int i=1; i<tile_size.x/(2 * stencil_size.x); i++)
{
int pyramid_height = i;
// Now we can calculate the other sizes.
dim3 border(pyramid_height * stencil_size.x,
pyramid_height * stencil_size.y);
dim3 tile_data_size(tile_size.x - 2*border.x,
tile_size.y - 2*border.y);
dim3 grid_dims(div_ceil(input_size.x, tile_data_size.x),
div_ceil(input_size.y, tile_data_size.y));
uint32_t time;
timeInMicroSeconds(time, (@FunctionName@Kernel<<< grid_dims, tile_size >>>(
input_size, stencil_size, device_input, device_output,
i, global_ro_data
@ScalarVariableNames@
@ConvergeScalarVariableNames@)));
double timePer = ((double)time)/i;
if (i == calcMinPyramid) calcMinTime = timePer;
if (i == secondMinPyramid) secondMinTime = timePer;
if (timePer < actualMinTime)
{
actualMinPyramid = i;
actualMinTime = timePer;
}
// fprintf(stderr, "Pyramid Height=%d, time=%u, Time per iteration=%f.\n", i, time, ((double)time/i));
}
// Now we can output some statistics.
double firstError = ((1. - (actualMinTime/calcMinTime)) * 100.);
double secondError = ((1. - (actualMinTime/secondMinTime)) * 100.);
fprintf(stderr, "Size %d BestHeight %d CalcHeight %d %%Slowdown %4.2f CalcSecond %d %%Slowdown %4.2f MinSlowdown %4.2f\n",
input_size.x, actualMinPyramid, calcMinPyramid, firstError, secondMinPyramid, secondError, MIN(firstError, secondError));
/////////////////////////////////////////////////////////////////////////////////////
// End of code to gather statistics to hone model. Remove in final version.
////////////////////////////////////////////////////////////////////////////////////
#endif
#ifdef STATISTICS
for (int i=1; i<tile_size.x/(2 * stencil_size.x); i++)
{
struct timeval starttime, endtime;
unsigned int usec2;
gettimeofday(&starttime, NULL);
pyramid_height=i;
#else
// Now we can calculate the pyramid height.
pyramid_height = calcPyramidHeight(grid_dims, oneIterTime, twoIterTime);
#endif
// And use the result to calculate various sizes.
filldim3(&border, pyramid_height * stencil_size.x, pyramid_height * stencil_size.y);
filldim3(&tile_data_size, tile_size.x - 2*border.x, tile_size.y - 2*border.y);
filldim3(&grid_dims, div_ceil(input_size.x, tile_data_size.x), div_ceil(input_size.y, tile_data_size.y));
// Run computation
for (int iter = 0; iter < iterations; iter += pyramid_height)
{
if (iter + pyramid_height > iterations)
pyramid_height = iterations - iter;
@FunctionName@Kernel<<< grid_dims, tile_size >>>(
input_size, stencil_size, device_input, device_output,
pyramid_height, global_ro_data
@ScalarVariableNames@
@ConvergeScalarVariableNames@);
DTYPE *temp = device_input;
device_input = device_output;
device_output = temp;
}
#ifdef STATISTICS
// Synch the threads to make sure everything is done before taking a timing.
CUDA_SAFE_THREAD_SYNC();
gettimeofday(&endtime, NULL);
usec2 = ((endtime.tv_sec - starttime.tv_sec) * 1000000 +
(endtime.tv_usec - starttime.tv_usec));
fprintf(stderr, "Actual pyramid=%d, Actual iteration time=%Lu, Actual Total time=%lu\n", i, usec2, usec2+trainingusec);
}
#endif
// Device to host
cudaMemcpy(host_data, device_input, num_bytes, cudaMemcpyDeviceToHost);
cudaFree(device_input);
cudaFree(device_output);
if (global_ro_data != NULL)
{
cudaFree(global_ro_data);
global_ro_data = NULL;
}
}
/**
* Store unnamed data on device.
*/
void @FunctionName@SetData(DTYPE *host_data, int num_elements)
{
int num_bytes = sizeof(DTYPE) * num_elements;
cudaMalloc((void **) &global_ro_data, num_bytes);
cudaMemcpy(global_ro_data, host_data, num_bytes, cudaMemcpyHostToDevice);
}
|
5e0b523078f9219b67c034dbf298f5800eb8c724.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void cu_dsigmoid_a(const float* src, float* dst, int n){
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
while(tid < n){
float tmp = __fsub_rd(1.0, src[tid]);
dst[tid] = __fmul_rd(tmp, src[tid]);
tid += stride;
}
} | 5e0b523078f9219b67c034dbf298f5800eb8c724.cu | #include "includes.h"
__global__ void cu_dsigmoid_a(const float* src, float* dst, int n){
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
while(tid < n){
float tmp = __fsub_rd(1.0, src[tid]);
dst[tid] = __fmul_rd(tmp, src[tid]);
tid += stride;
}
} |
6608248a0a452236ca99ae74c8a38c483e1835d2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright 2023 CMU, Facebook, LANL, MIT, NVIDIA, and Stanford (alphabetical)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "flexflow/ops/aggregate_spec.h"
#include "flexflow/utils/cuda_helper.h"
namespace FlexFlow {
__global__ void
aggspec_forward_kernel(float **exp_preds,
int const *exp_assign,
float *output,
int n, // num experts
int const k, // num chosen experts
int exp_samples, // max samples per expert
int const batch_size,
int out_dim) {
__shared__ float
*chosen_exp_preds[AGGREGATE_SPEC_MAX_K * AGGREGATE_SPEC_MAX_BATCH_SIZE];
// Get pred pointers, single thread per block
if (threadIdx.x == 0) {
int expert_idx[AGGREGATE_SPEC_MAX_N] = {0};
for (int i = 0; i < batch_size; i++) {
for (int j = 0; j < k; j++) {
// Get pointer to chosen expert predictions
int expert = exp_assign[i * k + j];
if (expert_idx[expert] >= exp_samples) {
// dropped sample
chosen_exp_preds[i * k + j] = 0;
continue;
}
chosen_exp_preds[i * k + j] =
exp_preds[expert] + expert_idx[expert] * out_dim;
expert_idx[expert]++;
}
}
}
__syncthreads();
// compute output
CUDA_KERNEL_LOOP(i, k * batch_size * out_dim) {
if (chosen_exp_preds[i / out_dim] != 0) {
output[i] = chosen_exp_preds[i / out_dim][i % out_dim];
} else {
output[i] = 0.0f;
}
}
}
__device__ void aggspec_backward_kernel_gate(float const *output_grad,
float *full_gate_grads,
int const *expert_assign,
bool const *cache_corr,
float const *gate_pred,
int *expert_bal,
float lambda_bal,
int batch_size,
int k,
int n,
int out_dim) {
__shared__ float gate_grad_sum[AGGREGATE_SPEC_MAX_BATCH_SIZE];
// init gate_grad_sum to 0
CUDA_KERNEL_LOOP(i, batch_size) {
gate_grad_sum[i] = 0.0f;
}
__syncthreads();
// get sum of expert errors
/* NOTE: Errors just squared L2 norm of gradients. * batch_size because the
expert gradients are /= batch_size and then it would be /= batch_size^2 here
*/
CUDA_KERNEL_LOOP(i, batch_size * k * out_dim) {
if (cache_corr[i / (k * out_dim)]) {
float res = output_grad[i] * output_grad[i] * batch_size;
float *gate_grad_idx =
full_gate_grads + (i / (out_dim * k)) * n +
expert_assign[(i / (out_dim * k)) * k + (i / out_dim) % k];
atomicAdd(gate_grad_idx, res);
atomicAdd(gate_grad_sum + i / (k * out_dim), res);
}
}
// Compute gate gradients:
// Assigned expert i, sample j: pred(i,j) - err_(i,j)/sum_l err(l,j)
__syncthreads();
CUDA_KERNEL_LOOP(i, k * batch_size) {
if (cache_corr[i / k]) {
full_gate_grads[i / k * n + expert_assign[i]] /= gate_grad_sum[i / k];
full_gate_grads[i / k * n + expert_assign[i]] -= (1.0f - gate_pred[i]);
}
}
// balance term
__syncthreads();
CUDA_KERNEL_LOOP(i, n * batch_size) {
full_gate_grads[i] += lambda_bal * expert_bal[i % n];
}
__syncthreads();
// make 0 mean
CUDA_KERNEL_LOOP(i, n * batch_size) {
int start = (i / n) * n;
float sub = -full_gate_grads[i] / n;
for (int j = 0; j < n; j++) {
atomicAdd(full_gate_grads + start + j, sub);
}
}
}
__device__ void aggspec_backward_kernel_exp(float const *output_grad,
float const *gate_preds,
float **exp_grads,
int batch_size,
int k,
int out_dim) {
// compute expert gradients
CUDA_KERNEL_LOOP(i, k * out_dim * batch_size) {
if (exp_grads[i / out_dim] != 0) {
exp_grads[i / out_dim][i % out_dim] +=
gate_preds[i / out_dim] * output_grad[i];
}
}
}
__global__ void
aggspec_backward_kernel(float **exp_grads,
int const *exp_assign,
int const *true_exp_assign,
float const *gating_net_preds,
float *full_gating_grads,
float const *output_grads,
int n, // num experts
int k, // num chosen experts
int exp_samples, // max samples per expert
float lambda_bal,
int batch_size,
int out_dim) {
__shared__ float
*chosen_exp_grads[AGGREGATE_SPEC_MAX_K * AGGREGATE_SPEC_MAX_BATCH_SIZE];
__shared__ int expert_bal[AGGREGATE_SPEC_MAX_N];
__shared__ bool cache_corr[AGGREGATE_SPEC_MAX_BATCH_SIZE];
// Get pred pointers, single thread per block
if (threadIdx.x == 0) {
// init arrays
for (int i = 0; i < n; i++) {
expert_bal[i] = 0;
}
for (int i = 0; i < batch_size; i++) {
cache_corr[i] = true;
}
// Get pointer to chosen expert grads and expert counts
for (int i = 0; i < batch_size; i++) {
for (int j = 0; j < k; j++) {
int expert = true_exp_assign[k * i + j];
if (expert != exp_assign[k * i + j]) {
cache_corr[i] = false;
}
if (expert_bal[expert] >= exp_samples) {
// dropped sample
chosen_exp_grads[i * k + j] = 0;
expert_bal[expert]++;
continue;
}
chosen_exp_grads[i * k + j] =
exp_grads[expert] + expert_bal[expert] * out_dim;
expert_bal[expert]++;
}
}
}
__syncthreads();
// NOTE: These 2 functions could execute independently in parallel
// get expert gradients
aggspec_backward_kernel_exp(
output_grads, gating_net_preds, chosen_exp_grads, batch_size, k, out_dim);
// get gating net gradients
aggspec_backward_kernel_gate(output_grads,
full_gating_grads,
exp_assign,
cache_corr,
gating_net_preds,
expert_bal,
(lambda_bal * n) / batch_size,
batch_size,
k,
n,
out_dim);
}
/*static*/
void AggregateSpec::forward_kernel_wrapper(AggregateSpecMeta const *m,
float **exp_preds,
int const *acc_gate_assign_ptr,
float *acc_output_ptr,
int n,
int const k,
int rows,
int const batch_size,
int out_dim) {
hipStream_t stream;
checkCUDA(get_legion_stream(&stream));
checkCUDA(hipblasSetStream(m->handle.blas, stream));
checkCUDNN(cudnnSetStream(m->handle.dnn, stream));
// call forward kernel
hipMemcpy(m->dev_region_ptrs,
exp_preds,
n * sizeof(float *),
hipMemcpyHostToDevice);
hipLaunchKernelGGL(( aggspec_forward_kernel), dim3(GET_BLOCKS(batch_size * k * out_dim)),
min(CUDA_NUM_THREADS,
(int)(batch_size * k * out_dim)),
0,
stream, m->dev_region_ptrs,
acc_gate_assign_ptr,
acc_output_ptr,
n,
k,
rows,
batch_size,
out_dim);
}
/*static*/
void AggregateSpec::backward_kernel_wrapper(AggregateSpecMeta const *m,
float **exp_grads,
int const *acc_gate_assign_ptr,
int const *acc_true_gate_assign_ptr,
float const *acc_gate_pred_ptr,
float *acc_full_gate_grad_ptr,
float const *acc_output_grad_ptr,
int n,
int const k,
int rows,
float lambda_bal,
int const batch_size,
int out_dim) {
hipStream_t stream;
checkCUDA(get_legion_stream(&stream));
checkCUDA(hipblasSetStream(m->handle.blas, stream));
checkCUDNN(cudnnSetStream(m->handle.dnn, stream));
// call backward kernel
hipMemcpy(m->dev_region_ptrs,
exp_grads,
n * sizeof(float *),
hipMemcpyHostToDevice);
hipLaunchKernelGGL(( aggspec_backward_kernel), dim3(GET_BLOCKS(batch_size * k * out_dim)),
min(CUDA_NUM_THREADS,
(int)(batch_size * k * out_dim)),
0,
stream, m->dev_region_ptrs,
acc_gate_assign_ptr,
acc_true_gate_assign_ptr,
acc_gate_pred_ptr,
acc_full_gate_grad_ptr,
acc_output_grad_ptr,
n,
k,
rows,
lambda_bal,
batch_size,
out_dim);
}
AggregateSpecMeta::AggregateSpecMeta(FFHandler handler, int n)
: OpMeta(handler) {
checkCUDA(hipMalloc(&dev_region_ptrs, n * sizeof(float *)));
}
AggregateSpecMeta::~AggregateSpecMeta(void) {
checkCUDA(hipFree(&dev_region_ptrs));
}
}; // namespace FlexFlow
| 6608248a0a452236ca99ae74c8a38c483e1835d2.cu | /* Copyright 2023 CMU, Facebook, LANL, MIT, NVIDIA, and Stanford (alphabetical)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "flexflow/ops/aggregate_spec.h"
#include "flexflow/utils/cuda_helper.h"
namespace FlexFlow {
__global__ void
aggspec_forward_kernel(float **exp_preds,
int const *exp_assign,
float *output,
int n, // num experts
int const k, // num chosen experts
int exp_samples, // max samples per expert
int const batch_size,
int out_dim) {
__shared__ float
*chosen_exp_preds[AGGREGATE_SPEC_MAX_K * AGGREGATE_SPEC_MAX_BATCH_SIZE];
// Get pred pointers, single thread per block
if (threadIdx.x == 0) {
int expert_idx[AGGREGATE_SPEC_MAX_N] = {0};
for (int i = 0; i < batch_size; i++) {
for (int j = 0; j < k; j++) {
// Get pointer to chosen expert predictions
int expert = exp_assign[i * k + j];
if (expert_idx[expert] >= exp_samples) {
// dropped sample
chosen_exp_preds[i * k + j] = 0;
continue;
}
chosen_exp_preds[i * k + j] =
exp_preds[expert] + expert_idx[expert] * out_dim;
expert_idx[expert]++;
}
}
}
__syncthreads();
// compute output
CUDA_KERNEL_LOOP(i, k * batch_size * out_dim) {
if (chosen_exp_preds[i / out_dim] != 0) {
output[i] = chosen_exp_preds[i / out_dim][i % out_dim];
} else {
output[i] = 0.0f;
}
}
}
__device__ void aggspec_backward_kernel_gate(float const *output_grad,
float *full_gate_grads,
int const *expert_assign,
bool const *cache_corr,
float const *gate_pred,
int *expert_bal,
float lambda_bal,
int batch_size,
int k,
int n,
int out_dim) {
__shared__ float gate_grad_sum[AGGREGATE_SPEC_MAX_BATCH_SIZE];
// init gate_grad_sum to 0
CUDA_KERNEL_LOOP(i, batch_size) {
gate_grad_sum[i] = 0.0f;
}
__syncthreads();
// get sum of expert errors
/* NOTE: Errors just squared L2 norm of gradients. * batch_size because the
expert gradients are /= batch_size and then it would be /= batch_size^2 here
*/
CUDA_KERNEL_LOOP(i, batch_size * k * out_dim) {
if (cache_corr[i / (k * out_dim)]) {
float res = output_grad[i] * output_grad[i] * batch_size;
float *gate_grad_idx =
full_gate_grads + (i / (out_dim * k)) * n +
expert_assign[(i / (out_dim * k)) * k + (i / out_dim) % k];
atomicAdd(gate_grad_idx, res);
atomicAdd(gate_grad_sum + i / (k * out_dim), res);
}
}
// Compute gate gradients:
// Assigned expert i, sample j: pred(i,j) - err_(i,j)/sum_l err(l,j)
__syncthreads();
CUDA_KERNEL_LOOP(i, k * batch_size) {
if (cache_corr[i / k]) {
full_gate_grads[i / k * n + expert_assign[i]] /= gate_grad_sum[i / k];
full_gate_grads[i / k * n + expert_assign[i]] -= (1.0f - gate_pred[i]);
}
}
// balance term
__syncthreads();
CUDA_KERNEL_LOOP(i, n * batch_size) {
full_gate_grads[i] += lambda_bal * expert_bal[i % n];
}
__syncthreads();
// make 0 mean
CUDA_KERNEL_LOOP(i, n * batch_size) {
int start = (i / n) * n;
float sub = -full_gate_grads[i] / n;
for (int j = 0; j < n; j++) {
atomicAdd(full_gate_grads + start + j, sub);
}
}
}
__device__ void aggspec_backward_kernel_exp(float const *output_grad,
float const *gate_preds,
float **exp_grads,
int batch_size,
int k,
int out_dim) {
// compute expert gradients
CUDA_KERNEL_LOOP(i, k * out_dim * batch_size) {
if (exp_grads[i / out_dim] != 0) {
exp_grads[i / out_dim][i % out_dim] +=
gate_preds[i / out_dim] * output_grad[i];
}
}
}
__global__ void
aggspec_backward_kernel(float **exp_grads,
int const *exp_assign,
int const *true_exp_assign,
float const *gating_net_preds,
float *full_gating_grads,
float const *output_grads,
int n, // num experts
int k, // num chosen experts
int exp_samples, // max samples per expert
float lambda_bal,
int batch_size,
int out_dim) {
__shared__ float
*chosen_exp_grads[AGGREGATE_SPEC_MAX_K * AGGREGATE_SPEC_MAX_BATCH_SIZE];
__shared__ int expert_bal[AGGREGATE_SPEC_MAX_N];
__shared__ bool cache_corr[AGGREGATE_SPEC_MAX_BATCH_SIZE];
// Get pred pointers, single thread per block
if (threadIdx.x == 0) {
// init arrays
for (int i = 0; i < n; i++) {
expert_bal[i] = 0;
}
for (int i = 0; i < batch_size; i++) {
cache_corr[i] = true;
}
// Get pointer to chosen expert grads and expert counts
for (int i = 0; i < batch_size; i++) {
for (int j = 0; j < k; j++) {
int expert = true_exp_assign[k * i + j];
if (expert != exp_assign[k * i + j]) {
cache_corr[i] = false;
}
if (expert_bal[expert] >= exp_samples) {
// dropped sample
chosen_exp_grads[i * k + j] = 0;
expert_bal[expert]++;
continue;
}
chosen_exp_grads[i * k + j] =
exp_grads[expert] + expert_bal[expert] * out_dim;
expert_bal[expert]++;
}
}
}
__syncthreads();
// NOTE: These 2 functions could execute independently in parallel
// get expert gradients
aggspec_backward_kernel_exp(
output_grads, gating_net_preds, chosen_exp_grads, batch_size, k, out_dim);
// get gating net gradients
aggspec_backward_kernel_gate(output_grads,
full_gating_grads,
exp_assign,
cache_corr,
gating_net_preds,
expert_bal,
(lambda_bal * n) / batch_size,
batch_size,
k,
n,
out_dim);
}
/*static*/
void AggregateSpec::forward_kernel_wrapper(AggregateSpecMeta const *m,
float **exp_preds,
int const *acc_gate_assign_ptr,
float *acc_output_ptr,
int n,
int const k,
int rows,
int const batch_size,
int out_dim) {
cudaStream_t stream;
checkCUDA(get_legion_stream(&stream));
checkCUDA(cublasSetStream(m->handle.blas, stream));
checkCUDNN(cudnnSetStream(m->handle.dnn, stream));
// call forward kernel
cudaMemcpy(m->dev_region_ptrs,
exp_preds,
n * sizeof(float *),
cudaMemcpyHostToDevice);
aggspec_forward_kernel<<<GET_BLOCKS(batch_size * k * out_dim),
min(CUDA_NUM_THREADS,
(int)(batch_size * k * out_dim)),
0,
stream>>>(m->dev_region_ptrs,
acc_gate_assign_ptr,
acc_output_ptr,
n,
k,
rows,
batch_size,
out_dim);
}
/*static*/
void AggregateSpec::backward_kernel_wrapper(AggregateSpecMeta const *m,
float **exp_grads,
int const *acc_gate_assign_ptr,
int const *acc_true_gate_assign_ptr,
float const *acc_gate_pred_ptr,
float *acc_full_gate_grad_ptr,
float const *acc_output_grad_ptr,
int n,
int const k,
int rows,
float lambda_bal,
int const batch_size,
int out_dim) {
cudaStream_t stream;
checkCUDA(get_legion_stream(&stream));
checkCUDA(cublasSetStream(m->handle.blas, stream));
checkCUDNN(cudnnSetStream(m->handle.dnn, stream));
// call backward kernel
cudaMemcpy(m->dev_region_ptrs,
exp_grads,
n * sizeof(float *),
cudaMemcpyHostToDevice);
aggspec_backward_kernel<<<GET_BLOCKS(batch_size * k * out_dim),
min(CUDA_NUM_THREADS,
(int)(batch_size * k * out_dim)),
0,
stream>>>(m->dev_region_ptrs,
acc_gate_assign_ptr,
acc_true_gate_assign_ptr,
acc_gate_pred_ptr,
acc_full_gate_grad_ptr,
acc_output_grad_ptr,
n,
k,
rows,
lambda_bal,
batch_size,
out_dim);
}
AggregateSpecMeta::AggregateSpecMeta(FFHandler handler, int n)
: OpMeta(handler) {
checkCUDA(cudaMalloc(&dev_region_ptrs, n * sizeof(float *)));
}
AggregateSpecMeta::~AggregateSpecMeta(void) {
checkCUDA(cudaFree(&dev_region_ptrs));
}
}; // namespace FlexFlow
|
b23212c9756ebf1403cdb84a2f78ad0c28d09b65.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <Cuda.h>
#include <hiprand/hiprand.h>
#include <hiprand/hiprand_kernel.h>
struct Settings
{
int Width;
int Height;
int Iterations;
float XMin;
float XMax;
float YMin;
float YMax;
float NxFactor;
float NyFactor;
};
__device__ Settings globalSettings;
__device__ void IncreasePixel(Settings* settings, unsigned int* arr, float x, float y)
{
if (x >= settings->XMax || x < settings->XMin)
return;
if (y >= settings->YMax || y < settings->YMin)
return;
int nx = (int)((x - settings->XMin) * settings->NxFactor);
int ny = (int)((y - settings->YMin) * settings->NyFactor);
int idx = nx + ny * settings->Width;
atomicAdd(&arr[idx], 1);
}
__global__ void Init(hiprandState_t* state)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
hiprand_init(idx, 0, 0, &state[idx]);
}
__global__ void SetSettings(Settings* newSettings)
{
globalSettings = *newSettings;
}
__global__ void RunBuddha(unsigned int* array, hiprandState_t *state)
{
Settings settings = globalSettings;
int idx = blockIdx.x * blockDim.x + threadIdx.x;
float x = hiprand_uniform(&state[idx]) * 2 * (settings.XMax - settings.XMin) + settings.XMin;
float y = hiprand_uniform(&state[idx]) * 2 * (settings.YMax - settings.YMin) + settings.YMin;
float zr = 0.0;
float zi = 0.0;
float cr = x;
float ci = y;
// check for escape
for (int i = 0; i < settings.Iterations; i++)
{
float zzr = zr * zr - zi * zi;
float zzi = zr * zi + zi * zr;
zr = zzr + cr;
zi = zzi + ci;
if ((zr * zr + zi * zi) > 4)
break;
}
if ((zr * zr + zi * zi) > 4) // did escape
{
zr = 0;
zi = 0;
for (int i = 0; i < settings.Iterations; i++)
{
float zzr = zr * zr - zi * zi;
float zzi = zr * zi + zi * zr;
zr = zzr + cr;
zi = zzi + ci;
if ((zr * zr + zi * zi) > 14)
break;
IncreasePixel(&settings, array, zr, zi);
IncreasePixel(&settings, array, zr, -zi);
}
}
}
| b23212c9756ebf1403cdb84a2f78ad0c28d09b65.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <Cuda.h>
#include <curand.h>
#include <curand_kernel.h>
struct Settings
{
int Width;
int Height;
int Iterations;
float XMin;
float XMax;
float YMin;
float YMax;
float NxFactor;
float NyFactor;
};
__device__ Settings globalSettings;
__device__ void IncreasePixel(Settings* settings, unsigned int* arr, float x, float y)
{
if (x >= settings->XMax || x < settings->XMin)
return;
if (y >= settings->YMax || y < settings->YMin)
return;
int nx = (int)((x - settings->XMin) * settings->NxFactor);
int ny = (int)((y - settings->YMin) * settings->NyFactor);
int idx = nx + ny * settings->Width;
atomicAdd(&arr[idx], 1);
}
__global__ void Init(curandState* state)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
curand_init(idx, 0, 0, &state[idx]);
}
__global__ void SetSettings(Settings* newSettings)
{
globalSettings = *newSettings;
}
__global__ void RunBuddha(unsigned int* array, curandState *state)
{
Settings settings = globalSettings;
int idx = blockIdx.x * blockDim.x + threadIdx.x;
float x = curand_uniform(&state[idx]) * 2 * (settings.XMax - settings.XMin) + settings.XMin;
float y = curand_uniform(&state[idx]) * 2 * (settings.YMax - settings.YMin) + settings.YMin;
float zr = 0.0;
float zi = 0.0;
float cr = x;
float ci = y;
// check for escape
for (int i = 0; i < settings.Iterations; i++)
{
float zzr = zr * zr - zi * zi;
float zzi = zr * zi + zi * zr;
zr = zzr + cr;
zi = zzi + ci;
if ((zr * zr + zi * zi) > 4)
break;
}
if ((zr * zr + zi * zi) > 4) // did escape
{
zr = 0;
zi = 0;
for (int i = 0; i < settings.Iterations; i++)
{
float zzr = zr * zr - zi * zi;
float zzi = zr * zi + zi * zr;
zr = zzr + cr;
zi = zzi + ci;
if ((zr * zr + zi * zi) > 14)
break;
IncreasePixel(&settings, array, zr, zi);
IncreasePixel(&settings, array, zr, -zi);
}
}
}
|
7d675b1206311fcf736959409a1ff9eb8cb17cbb.hip | // !!! This is a file automatically generated by hipify!!!
#include <vector>
#include <hip/hip_runtime.h>
#include "perform.h"
#include "cuda_common.h"
const int GRID_SIZE = 448;
const int BLOCK_SIZE = 32;
__global__ void sum_dev(double *a, int n, double *out) {
__shared__ double work[BLOCK_SIZE];
const int tidx = threadIdx.x;
const int bidx = blockIdx.x;
const int idx = threadIdx.x + blockIdx.x * blockDim.x;
const int dim = blockDim.x * gridDim.x;
work[tidx] = 0;
for (int i = idx; i < n; i += dim) {
work[tidx] += a[i];
}
__syncthreads();
int w = BLOCK_SIZE;
while (w > 2) {
w /= 2;
if (tidx < w) work[tidx] = work[tidx + w];
__syncthreads();
}
if (tidx == 0) {
out[bidx] = work[0] + work[1];
}
}
double sum(const std::vector<double> &a) {
double *a_dev = NULL;
double ans = 0;
CUDA_CALL(hipMalloc((void**) &a_dev, sizeof(double) * a.size()));
CUDA_CALL(hipMemcpy(
a_dev, a.data(), sizeof(double) * a.size(),
hipMemcpyHostToDevice));
hipLaunchKernelGGL(( sum_dev), dim3(GRID_SIZE), dim3(BLOCK_SIZE), 0, 0, a_dev, a.size(), a_dev);
CUDA_CHECK();
hipLaunchKernelGGL(( sum_dev), dim3(1), dim3(BLOCK_SIZE), 0, 0, a_dev, GRID_SIZE, a_dev);
CUDA_CHECK();
CUDA_CALL(hipMemcpy(
&ans, a_dev, sizeof(double), hipMemcpyDeviceToHost));
finally:
hipFree(a_dev);
return ans;
}
| 7d675b1206311fcf736959409a1ff9eb8cb17cbb.cu | #include <vector>
#include <cuda.h>
#include "perform.h"
#include "cuda_common.h"
const int GRID_SIZE = 448;
const int BLOCK_SIZE = 32;
__global__ void sum_dev(double *a, int n, double *out) {
__shared__ double work[BLOCK_SIZE];
const int tidx = threadIdx.x;
const int bidx = blockIdx.x;
const int idx = threadIdx.x + blockIdx.x * blockDim.x;
const int dim = blockDim.x * gridDim.x;
work[tidx] = 0;
for (int i = idx; i < n; i += dim) {
work[tidx] += a[i];
}
__syncthreads();
int w = BLOCK_SIZE;
while (w > 2) {
w /= 2;
if (tidx < w) work[tidx] = work[tidx + w];
__syncthreads();
}
if (tidx == 0) {
out[bidx] = work[0] + work[1];
}
}
double sum(const std::vector<double> &a) {
double *a_dev = NULL;
double ans = 0;
CUDA_CALL(cudaMalloc((void**) &a_dev, sizeof(double) * a.size()));
CUDA_CALL(cudaMemcpy(
a_dev, a.data(), sizeof(double) * a.size(),
cudaMemcpyHostToDevice));
sum_dev<<<GRID_SIZE, BLOCK_SIZE>>>(a_dev, a.size(), a_dev);
CUDA_CHECK();
sum_dev<<<1, BLOCK_SIZE>>>(a_dev, GRID_SIZE, a_dev);
CUDA_CHECK();
CUDA_CALL(cudaMemcpy(
&ans, a_dev, sizeof(double), cudaMemcpyDeviceToHost));
finally:
cudaFree(a_dev);
return ans;
}
|
f922625e5311d05eab635a16e888e0bddbfe83f2.hip | // !!! This is a file automatically generated by hipify!!!
#define WIN32_LEAN_AND_MEAN
#include <stdio.h>
#include <assert.h>
#include <string>
#include <hip/hip_runtime.h>
#include <helper_functions.h>
#include <helper_cuda.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <iostream>
#include <vector>
#include <winsock2.h>
#include <ws2tcpip.h>
#include <windows.h>
#pragma comment (lib, "Ws2_32.lib")
#include "CyQu_bridge.h"
#define DEFAULT_BUFLEN 512
#define DEFAULT_PORT "12008"
using namespace std;
void CySend(SOCKET ClientSocket, string cyData) {
cyData = cyData + "\n";
char *sendData;
sendData = new char[cyData.size() + 1];
memcpy(sendData, cyData.c_str(), cyData.size() + 1);
int iSendResult;
iSendResult = send(ClientSocket, sendData, cyData.size(), 0);
// iSendResult = send( ClientSocket, recvbuf, iResult, 0 );
if (iSendResult == SOCKET_ERROR) {
cout << "Send Failure: " << WSAGetLastError();
closesocket(ClientSocket);
WSACleanup();
return;
}
return;
}
int __cdecl init_Server(void)
{
int CQ_success;
int iResult;
WSADATA wsaData;
SOCKET ListenSocket = INVALID_SOCKET;
SOCKET ClientSocket = INVALID_SOCKET;
struct addrinfo *result = NULL;
struct addrinfo hints;
std::vector<char> myRecv;
char recvbuf[DEFAULT_BUFLEN];
int recvbuflen = DEFAULT_BUFLEN;
// Initialize Winsock
iResult = WSAStartup(MAKEWORD(2,2), &wsaData);
if (iResult != 0) {
printf("WSAStartup failed with error: %d\n", iResult);
return 1;
}
ZeroMemory(&hints, sizeof(hints));
hints.ai_family = AF_INET;
hints.ai_socktype = SOCK_STREAM;
hints.ai_protocol = IPPROTO_TCP;
hints.ai_flags = AI_PASSIVE;
// Resolve the server address and port
iResult = getaddrinfo(NULL, DEFAULT_PORT, &hints, &result);
if ( iResult != 0 ) {
printf("getaddrinfo failed with error: %d\n", iResult);
WSACleanup();
return 1;
}
// Create a SOCKET for connecting to server
ListenSocket = socket(result->ai_family, result->ai_socktype, result->ai_protocol);
if (ListenSocket == INVALID_SOCKET) {
printf("socket failed with error: %ld\n", WSAGetLastError());
freeaddrinfo(result);
WSACleanup();
return 1;
}
// Setup the TCP listening socket
iResult = bind( ListenSocket, result->ai_addr, (int)result->ai_addrlen);
if (iResult == SOCKET_ERROR) {
printf("bind failed with error: %d\n", WSAGetLastError());
freeaddrinfo(result);
closesocket(ListenSocket);
WSACleanup();
return 1;
}
freeaddrinfo(result);
iResult = listen(ListenSocket, SOMAXCONN);
if (iResult == SOCKET_ERROR) {
printf("listen failed with error: %d\n", WSAGetLastError());
closesocket(ListenSocket);
WSACleanup();
return 1;
}
// Accept a client socket
ClientSocket = accept(ListenSocket, NULL, NULL);
if (ClientSocket == INVALID_SOCKET) {
printf("accept failed with error: %d\n", WSAGetLastError());
closesocket(ListenSocket);
WSACleanup();
return 1;
}
// No longer need server socket
closesocket(ListenSocket);
// Receive until the peer shuts down the connection
do {
iResult = recv(ClientSocket, recvbuf, recvbuflen, 0);
if (iResult > 0) {
std::vector<char> vec(recvbuf, recvbuf + iResult);
std::string myRecv(vec.begin(), vec.end());
// END - Terminates connection and program.
if (myRecv == "END\r\n") {
CyQu_EXIT(ClientSocket);
}
std::cout << myRecv;
char split_char = ' ';
std::istringstream split(myRecv);
std::vector<std::string> myCmd2;
for (std::string each; std::getline(split, each, split_char); myCmd2.push_back(each));
if (myCmd2.size() < 2) {
myRecv = "";
myCmd2.resize(0);
continue;
}
// ADD [TABLE] [DATA] - Adds [DATA] to [TABLE] where [DATA] is in 1.1.1 format
if (myCmd2[0] == "ADD") {
CQ_success = CyQu_ADD(ClientSocket, myCmd2[1]);
}
// GET [TABLE] [INDEX] - Retrieves "1.2.3" string of data by [INDEX] starting with integer 1 in the order added.
if (myCmd2[0] == "GET") {
CQ_success = CyQu_GET(ClientSocket, myCmd2[1]);
if (CQ_success == 0) {
cout << "\n Out of Range \n";
CySend(ClientSocket, "CY: OUT OF RANGE");
}
}
// UPDATE [TABLE] - Refreshes [TABLE] arrays in GPU memory, that was inserted by ADD.
if (myCmd2[0] == "UPDATE") {
}
if (myCmd2[0] == "FIND") {
CQ_success = CyQu_FIND(ClientSocket, myCmd2[1]);
}
if (myCmd2[0] == "CLEAR") {
CySend(ClientSocket, "Database Cleared");
CQ_success = CyQu_CLEAR(ClientSocket);
}
// Programmers Notes:
// Using [TABLE] will probably require use of a 2d array and &pointer setup to reference it.
// where [TABLE] is likely defined as an integer index. For now, we will stick to a
// flat array for proof of concept.
// MAKE [TABLE]
// ADD [TABLE] [DATA] - Adds [DATA] to [TABLE] where [DATA] is in 1.1.1 format
// GET [TABLE] [INDEX] - Retrieves "1.2.3" string of data by [INDEX] starting with integer 1 in the order added.
// UPDATE [TABLE] - Refreshes [TABLE] arrays in GPU memory, that was inserted by ADD.
// FIND [TABLE] [DATA] - Searches for [DATA] 1.1.1 which is converted to CPU array, and sent to GPU memory.
// STOP [TABLE] [SEARCH_INDEX] - Stops searching for [DATA] in the provided [SEARCH_INDEX]
// FREE [TABLE] - Frees up [TABLE] array from local memory and GPU memory.
// ACTIVE - Outputs all active search indexes in form of
// :ACTIVE_TOTAL [TOTAL_ACTIVE_SEARCHES]
// :ACTIVE [SEARCH_INDEX] [SEARCH_ARRAY_STRING] [TOTAL_FOUND_RESULTS]
//
// SAVE [TABLE] [FILE] - Saves [TABLE] and outputs [TABLE] and [FILE] saved to.
// LOAD [TABLE] [FILE] - Loads [TABLE] and outputs [TABLE] and [FILE] loaded from.
// END - Terminates connection and program.
}
else if (iResult == 0)
printf("Connection closing...\n");
else {
printf("recv failed with error: %d\n", WSAGetLastError());
closesocket(ClientSocket);
WSACleanup();
return 1;
}
} while (iResult > 0);
// shutdown the connection since we're done
iResult = shutdown(ClientSocket, SD_SEND);
if (iResult == SOCKET_ERROR) {
printf("shutdown failed with error: %d\n", WSAGetLastError());
closesocket(ClientSocket);
WSACleanup();
return 1;
}
// cleanup
closesocket(ClientSocket);
WSACleanup();
return 0;
}
// Main Function.
int main(int argc, const char *argv[]) {
init_Server();
return 0;
}
| f922625e5311d05eab635a16e888e0bddbfe83f2.cu | #define WIN32_LEAN_AND_MEAN
#include <stdio.h>
#include <assert.h>
#include <string>
#include <cuda_runtime.h>
#include <helper_functions.h>
#include <helper_cuda.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <iostream>
#include <vector>
#include <winsock2.h>
#include <ws2tcpip.h>
#include <windows.h>
#pragma comment (lib, "Ws2_32.lib")
#include "CyQu_bridge.h"
#define DEFAULT_BUFLEN 512
#define DEFAULT_PORT "12008"
using namespace std;
void CySend(SOCKET ClientSocket, string cyData) {
cyData = cyData + "\n";
char *sendData;
sendData = new char[cyData.size() + 1];
memcpy(sendData, cyData.c_str(), cyData.size() + 1);
int iSendResult;
iSendResult = send(ClientSocket, sendData, cyData.size(), 0);
// iSendResult = send( ClientSocket, recvbuf, iResult, 0 );
if (iSendResult == SOCKET_ERROR) {
cout << "Send Failure: " << WSAGetLastError();
closesocket(ClientSocket);
WSACleanup();
return;
}
return;
}
int __cdecl init_Server(void)
{
int CQ_success;
int iResult;
WSADATA wsaData;
SOCKET ListenSocket = INVALID_SOCKET;
SOCKET ClientSocket = INVALID_SOCKET;
struct addrinfo *result = NULL;
struct addrinfo hints;
std::vector<char> myRecv;
char recvbuf[DEFAULT_BUFLEN];
int recvbuflen = DEFAULT_BUFLEN;
// Initialize Winsock
iResult = WSAStartup(MAKEWORD(2,2), &wsaData);
if (iResult != 0) {
printf("WSAStartup failed with error: %d\n", iResult);
return 1;
}
ZeroMemory(&hints, sizeof(hints));
hints.ai_family = AF_INET;
hints.ai_socktype = SOCK_STREAM;
hints.ai_protocol = IPPROTO_TCP;
hints.ai_flags = AI_PASSIVE;
// Resolve the server address and port
iResult = getaddrinfo(NULL, DEFAULT_PORT, &hints, &result);
if ( iResult != 0 ) {
printf("getaddrinfo failed with error: %d\n", iResult);
WSACleanup();
return 1;
}
// Create a SOCKET for connecting to server
ListenSocket = socket(result->ai_family, result->ai_socktype, result->ai_protocol);
if (ListenSocket == INVALID_SOCKET) {
printf("socket failed with error: %ld\n", WSAGetLastError());
freeaddrinfo(result);
WSACleanup();
return 1;
}
// Setup the TCP listening socket
iResult = bind( ListenSocket, result->ai_addr, (int)result->ai_addrlen);
if (iResult == SOCKET_ERROR) {
printf("bind failed with error: %d\n", WSAGetLastError());
freeaddrinfo(result);
closesocket(ListenSocket);
WSACleanup();
return 1;
}
freeaddrinfo(result);
iResult = listen(ListenSocket, SOMAXCONN);
if (iResult == SOCKET_ERROR) {
printf("listen failed with error: %d\n", WSAGetLastError());
closesocket(ListenSocket);
WSACleanup();
return 1;
}
// Accept a client socket
ClientSocket = accept(ListenSocket, NULL, NULL);
if (ClientSocket == INVALID_SOCKET) {
printf("accept failed with error: %d\n", WSAGetLastError());
closesocket(ListenSocket);
WSACleanup();
return 1;
}
// No longer need server socket
closesocket(ListenSocket);
// Receive until the peer shuts down the connection
do {
iResult = recv(ClientSocket, recvbuf, recvbuflen, 0);
if (iResult > 0) {
std::vector<char> vec(recvbuf, recvbuf + iResult);
std::string myRecv(vec.begin(), vec.end());
// END - Terminates connection and program.
if (myRecv == "END\r\n") {
CyQu_EXIT(ClientSocket);
}
std::cout << myRecv;
char split_char = ' ';
std::istringstream split(myRecv);
std::vector<std::string> myCmd2;
for (std::string each; std::getline(split, each, split_char); myCmd2.push_back(each));
if (myCmd2.size() < 2) {
myRecv = "";
myCmd2.resize(0);
continue;
}
// ADD [TABLE] [DATA] - Adds [DATA] to [TABLE] where [DATA] is in 1.1.1 format
if (myCmd2[0] == "ADD") {
CQ_success = CyQu_ADD(ClientSocket, myCmd2[1]);
}
// GET [TABLE] [INDEX] - Retrieves "1.2.3" string of data by [INDEX] starting with integer 1 in the order added.
if (myCmd2[0] == "GET") {
CQ_success = CyQu_GET(ClientSocket, myCmd2[1]);
if (CQ_success == 0) {
cout << "\n Out of Range \n";
CySend(ClientSocket, "CY: OUT OF RANGE");
}
}
// UPDATE [TABLE] - Refreshes [TABLE] arrays in GPU memory, that was inserted by ADD.
if (myCmd2[0] == "UPDATE") {
}
if (myCmd2[0] == "FIND") {
CQ_success = CyQu_FIND(ClientSocket, myCmd2[1]);
}
if (myCmd2[0] == "CLEAR") {
CySend(ClientSocket, "Database Cleared");
CQ_success = CyQu_CLEAR(ClientSocket);
}
// Programmers Notes:
// Using [TABLE] will probably require use of a 2d array and &pointer setup to reference it.
// where [TABLE] is likely defined as an integer index. For now, we will stick to a
// flat array for proof of concept.
// MAKE [TABLE]
// ADD [TABLE] [DATA] - Adds [DATA] to [TABLE] where [DATA] is in 1.1.1 format
// GET [TABLE] [INDEX] - Retrieves "1.2.3" string of data by [INDEX] starting with integer 1 in the order added.
// UPDATE [TABLE] - Refreshes [TABLE] arrays in GPU memory, that was inserted by ADD.
// FIND [TABLE] [DATA] - Searches for [DATA] 1.1.1 which is converted to CPU array, and sent to GPU memory.
// STOP [TABLE] [SEARCH_INDEX] - Stops searching for [DATA] in the provided [SEARCH_INDEX]
// FREE [TABLE] - Frees up [TABLE] array from local memory and GPU memory.
// ACTIVE - Outputs all active search indexes in form of
// :ACTIVE_TOTAL [TOTAL_ACTIVE_SEARCHES]
// :ACTIVE [SEARCH_INDEX] [SEARCH_ARRAY_STRING] [TOTAL_FOUND_RESULTS]
//
// SAVE [TABLE] [FILE] - Saves [TABLE] and outputs [TABLE] and [FILE] saved to.
// LOAD [TABLE] [FILE] - Loads [TABLE] and outputs [TABLE] and [FILE] loaded from.
// END - Terminates connection and program.
}
else if (iResult == 0)
printf("Connection closing...\n");
else {
printf("recv failed with error: %d\n", WSAGetLastError());
closesocket(ClientSocket);
WSACleanup();
return 1;
}
} while (iResult > 0);
// shutdown the connection since we're done
iResult = shutdown(ClientSocket, SD_SEND);
if (iResult == SOCKET_ERROR) {
printf("shutdown failed with error: %d\n", WSAGetLastError());
closesocket(ClientSocket);
WSACleanup();
return 1;
}
// cleanup
closesocket(ClientSocket);
WSACleanup();
return 0;
}
// Main Function.
int main(int argc, const char *argv[]) {
init_Server();
return 0;
}
|
dce465337d7d41d42a43fad3c3c7be2dc6d010fe.hip | // !!! This is a file automatically generated by hipify!!!
// System includes
#include <stdio.h>
#include <math.h>
// CUDA runtime
#include <hip/hip_runtime.h>
// Helper functions and utilities to work with CUDA
#include <helper_functions.h>
__global__ void myKernel1(kernelParams params, int offsetX, int offsetY, int offsetZ, dim3 gridDim)
{
float *x = (float*)(params.getParameter(0));
long n = params.getParameter<long>(1);
/****************************************************************/
// rebuild blockId
dim3 blockIdx = rebuildBlock(offsetX, offsetY, offsetZ);
/****************************************************************/
int tid = threadIdx.x + blockIdx.x * blockDim.x;
for (long i = tid; i < n; i += blockDim.x * gridDim.x) {
x[i] = sqrt(pow(3.14159, (double)x[i]));
}
}
long N = 1048576;
int mainM(int argc, char **argv)
{
/*
srand (time(NULL));
if (checkCmdLineFlag(argc, (const char **)argv, "N"))
{
getCmdLineArgumentValue<long>(argc, (const char **)argv, "N", &N);
}
float *h_myKernel1Data;
h_myKernel1Data = (float*)malloc(N * sizeof(float));
for (long i = 0;i < N;i++)
h_myKernel1Data[i] = rand() * 1000;
float *myKernel1Data;
hipMalloc(&myKernel1Data, N * sizeof(float));
hipError_t error;
error = hipMemcpy(myKernel1Data, h_myKernel1Data, N * sizeof(float), hipMemcpyHostToDevice);
if (error != hipSuccess)
{
printf("hipMemcpy returned error code %d, line(%d)\n", error, __LINE__);
exit(EXIT_FAILURE);
}
dim3 my1Threads(256, 1);
if (N % my1Threads.x != 0)
{
printf("invalid N\n");
exit(111000);
}
dim3 my1Blocks(sqrt(N / my1Threads.x), sqrt(N / my1Threads.x));
printf("N: %ld, grid(%d,%d), block(%d,%d)\n", N, my1Blocks.x, my1Blocks.y, my1Threads.x, my1Threads.y);
myKernel1<<<my1Blocks, my1Threads>>>(myKernel1Data, N);
error = hipDeviceSynchronize();
hipDeviceReset();
*/
return 0;
} | dce465337d7d41d42a43fad3c3c7be2dc6d010fe.cu |
// System includes
#include <stdio.h>
#include <math.h>
// CUDA runtime
#include <cuda_runtime.h>
// Helper functions and utilities to work with CUDA
#include <helper_functions.h>
__global__ void myKernel1(kernelParams params, int offsetX, int offsetY, int offsetZ, dim3 gridDim)
{
float *x = (float*)(params.getParameter(0));
long n = params.getParameter<long>(1);
/****************************************************************/
// rebuild blockId
dim3 blockIdx = rebuildBlock(offsetX, offsetY, offsetZ);
/****************************************************************/
int tid = threadIdx.x + blockIdx.x * blockDim.x;
for (long i = tid; i < n; i += blockDim.x * gridDim.x) {
x[i] = sqrt(pow(3.14159, (double)x[i]));
}
}
long N = 1048576;
int mainM(int argc, char **argv)
{
/*
srand (time(NULL));
if (checkCmdLineFlag(argc, (const char **)argv, "N"))
{
getCmdLineArgumentValue<long>(argc, (const char **)argv, "N", &N);
}
float *h_myKernel1Data;
h_myKernel1Data = (float*)malloc(N * sizeof(float));
for (long i = 0;i < N;i++)
h_myKernel1Data[i] = rand() * 1000;
float *myKernel1Data;
cudaMalloc(&myKernel1Data, N * sizeof(float));
cudaError_t error;
error = cudaMemcpy(myKernel1Data, h_myKernel1Data, N * sizeof(float), cudaMemcpyHostToDevice);
if (error != cudaSuccess)
{
printf("cudaMemcpy returned error code %d, line(%d)\n", error, __LINE__);
exit(EXIT_FAILURE);
}
dim3 my1Threads(256, 1);
if (N % my1Threads.x != 0)
{
printf("invalid N\n");
exit(111000);
}
dim3 my1Blocks(sqrt(N / my1Threads.x), sqrt(N / my1Threads.x));
printf("N: %ld, grid(%d,%d), block(%d,%d)\n", N, my1Blocks.x, my1Blocks.y, my1Threads.x, my1Threads.y);
myKernel1<<<my1Blocks, my1Threads>>>(myKernel1Data, N);
error = cudaDeviceSynchronize();
cudaDeviceReset();
*/
return 0;
} |
625c0714981de1c60e2d4fb1be5ddf92a7f2ea37.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/****
*
* GPU accelerated Monte Carlo simulation of the 2D Ising model
*
* Copyright (C) 2008 Tobias Preis (http://www.tobiaspreis.de)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 3 of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public
* License along with this program; if not, see
* http://www.gnu.org/licenses/.
*
* Related publication:
*
* T. Preis, P. Virnau, W. Paul, and J. J. Schneider,
* Journal of Computational Physics 228, 4468-4477 (2009)
* doi:10.1016/j.jcp.2009.03.018
*
*/
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <time.h>
#include <cutil.h>
#define FLAG_PRINT_SPINS 0
#define FLAG_ENERGY 0
#define T_START 3.00
#define T_FACTOR 0.9
#define T_END 2.00
#define GLOBAL_ITERATIONS 100
#define RANDOM_A 1664525
#define RANDOM_B 1013904223
#define BLOCK_SIZE 256
const unsigned int N=4*BLOCK_SIZE*BLOCK_SIZE;
const unsigned int n=2*BLOCK_SIZE;
/****
*
* Function declaration
*
*/
void calc(int argc,char** argv);
void cpu_function(double*,int*);
__global__ void device_function_main(int*,int*,int*,float,bool);
/****
*
* Main function
*
*/
int main(int argc,char** argv) {
calc(argc,argv);
}
/****
*
* Calc
*
*/
void calc(int argc,char** argv) {
printf("
----------------------------------------------------------------------- \n");
printf(" *\n");
printf(" * GPU accelerated Monte Carlo simulation of the 2D Ising model\n");
printf(" *\n");
printf(" * Copyright (C) 2008 Tobias Preis (http://www.tobiaspreis.de)\n");
printf(" *\n");
printf(" * This program is free software; you can redistribute it and/or\n");
printf(" * modify it under the terms of the GNU General Public License\n");
printf(" * as published by the Free Software Foundation; either version\n");
printf(" * 3 of the License, or (at your option) any later version.\n");
printf(" *\n");
printf(" * This program is distributed in the hope that it will be useful,\n");
printf(" * but WITHOUT ANY WARRANTY; without even the implied warranty
of\n");
printf(" * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n");
printf(" * GNU General Public License for more details.\n");
printf(" *\n");
printf(" * You should have received a copy of the GNU General Public\n");
printf(" * License along with this program; if not, see\n");
printf(" * http://www.gnu.org/licenses/\n");
printf(" *\n");
printf(" * Related publication:\n");
printf(" *\n");
printf(" * T. Preis, P. Virnau, W. Paul, and J. J. Schneider,\n");
printf(" * Journal of Computational Physics 228, 4468-4477 (2009)\n");
printf(" * doi:10.1016/j.jcp.2009.03.018\n");
printf(" *\n");
printf(" ----------------------------- Ising model
----------------------------- \n");
printf(" Number of Spins: %d \n",N);
printf(" Start Temperature: %f \n",T_START);
printf(" Decreasing Factor: %f \n",T_FACTOR);
printf(" Final Temperature: %f \n",T_END);
printf(" Global Iterations: %d \n",GLOBAL_ITERATIONS);
//Init
CUT_DEVICE_INIT(argc,argv);
srand48(23);
//Allocate and init host memory for output arrays
int num_entries=0;
for(double t=T_START; t>=T_END; t=t*T_FACTOR) num_entries++;
unsigned int mem_out_size=sizeof(float)*num_entries;
float* h_T=(float*) malloc(mem_out_size);
float* h_E=(float*) malloc(mem_out_size);
unsigned int mem_ref_out_size=sizeof(double)*num_entries;
double* h_ref_E=(double*) malloc(mem_ref_out_size);
num_entries=0;
for(double t=T_START; t>=T_END; t=t*T_FACTOR) {
h_T[num_entries]=t;
num_entries++;
}
//Allocate and init host memory for simulation arrays
unsigned int mem_size=sizeof(int)*N;
unsigned int mem_size_random=sizeof(int)*BLOCK_SIZE*BLOCK_SIZE;
int* h_random_data=(int*) malloc(mem_size_random);
int* h_S=(int*) malloc(mem_size);
unsigned int mem_size_out=sizeof(int)*BLOCK_SIZE;
int* h_out=(int*) malloc(mem_size_out);
h_random_data[0]=1;
for(int i=1;i<BLOCK_SIZE*BLOCK_SIZE;i++) {
h_random_data[i]=16807*h_random_data[i-1];
}
for(int i=0;i<N;i++) {
if(drand48()>0.5) h_S[i]=-1;
else h_S[i]=1;
}
//Create and start timer
float gpu_sum=0;
unsigned int timer=0;
CUDA_SAFE_CALL(hipDeviceSynchronize());
CUT_SAFE_CALL(cutCreateTimer(&timer));
CUT_SAFE_CALL(cutStartTimer(timer));
//Allocate device memory for arrays
int* d_random_data;
int* d_S;
int* d_out;
CUDA_SAFE_CALL(hipMalloc((void**) &d_random_data,mem_size_random));
CUDA_SAFE_CALL(hipMalloc((void**) &d_S,mem_size));
CUDA_SAFE_CALL(hipMalloc((void**) &d_out,mem_size_out));
//Stop and destroy timer
CUDA_SAFE_CALL(hipDeviceSynchronize());
CUT_SAFE_CALL(cutStopTimer(timer));
float gpu_dt_malloc=cutGetTimerValue(timer);
gpu_sum+=gpu_dt_malloc;
printf("\n --------------------------------- GPU
--------------------------------- \n");
printf(" Processing time on GPU for allocating: %f (ms) \n",gpu_dt_malloc);
CUT_SAFE_CALL(cutDeleteTimer(timer));
//Create and start timer
timer=0;
CUDA_SAFE_CALL(hipDeviceSynchronize());
CUT_SAFE_CALL(cutCreateTimer(&timer));
CUT_SAFE_CALL(cutStartTimer(timer));
//Copy host memory to device and create mirror of d_S
CUDA_SAFE_CALL(hipMemcpy(d_random_data,h_random_data,mem_size_random,hipMemcpy
HostToDevice));
CUDA_SAFE_CALL(hipMemcpy(d_S,h_S,mem_size,hipMemcpyHostToDevice));
//Stop and destroy timer
CUDA_SAFE_CALL(hipDeviceSynchronize());
CUT_SAFE_CALL(cutStopTimer(timer));
float gpu_dt_mem=cutGetTimerValue(timer);
gpu_sum+=gpu_dt_mem;
printf(" Processing time on GPU for memory transfer: %f (ms) \n",gpu_dt_mem);
CUT_SAFE_CALL(cutDeleteTimer(timer));
//Print spins
if(FLAG_PRINT_SPINS) {
CUDA_SAFE_CALL(hipMemcpy(h_S,d_S,mem_size,hipMemcpyDeviceToHost));
for(int i=0;i<BLOCK_SIZE*2;i++) {
for(int j=0;j<BLOCK_SIZE*2;j++) {
if(h_S[i*BLOCK_SIZE*2+j]>0) printf("+ ");
else printf("- ");
}
printf("\n");
}
printf("\n");
}
//Create and start timer
timer=0;
CUDA_SAFE_CALL(hipDeviceSynchronize());
CUT_SAFE_CALL(cutCreateTimer(&timer));
CUT_SAFE_CALL(cutStartTimer(timer));
//Calc energy
num_entries=0;
dim3 threads(BLOCK_SIZE);
dim3 grid(BLOCK_SIZE);
for(float t=T_START;t>=T_END;t=t*T_FACTOR) {
double avg_H=0;
for(int
global_iteration=0;global_iteration<GLOBAL_ITERATIONS;global_iteration++) {
hipLaunchKernelGGL(( device_function_main), dim3(grid),dim3(threads), 0, 0, d_S,d_out,d_random_data,t,true);
hipLaunchKernelGGL(( device_function_main), dim3(grid),dim3(threads), 0, 0, d_S,d_out,d_random_data,t,false);
CUDA_SAFE_CALL(hipMemcpy(h_out,d_out,mem_size_out,hipMemcpyDeviceToHost));
int energy_sum=0;
for(int i=0;i<BLOCK_SIZE;i++) energy_sum+=h_out[i];
avg_H+=(float)energy_sum/N;
}
h_E[num_entries]=avg_H/GLOBAL_ITERATIONS;
num_entries++;
}
//Stop and destroy timer
CUDA_SAFE_CALL(hipDeviceSynchronize());
CUT_SAFE_CALL(cutStopTimer(timer));
float gpu_dt_main=cutGetTimerValue(timer);
gpu_sum+=gpu_dt_main;
printf(" Processing time on GPU for main function: %f (ms) \n",gpu_dt_main);
printf(" Total processing time on GPU: %f (ms) \n",gpu_sum);
CUT_SAFE_CALL(cutDeleteTimer(timer));
//Check kernel execution
CUT_CHECK_ERROR("Kernel execution failed");
//Print spins
if(FLAG_PRINT_SPINS) {
CUDA_SAFE_CALL(hipMemcpy(h_S,d_S,mem_size,hipMemcpyDeviceToHost));
for(int i=0;i<BLOCK_SIZE*2;i++) {
for(int j=0;j<BLOCK_SIZE*2;j++) {
if(h_S[i*BLOCK_SIZE*2+j]>0) printf("+ ");
else printf("- ");
}
printf("\n");
}
}
//Create and start timer
timer=0;
CUDA_SAFE_CALL(hipDeviceSynchronize());
CUT_SAFE_CALL(cutCreateTimer(&timer));
CUT_SAFE_CALL(cutStartTimer(timer));
//Reference solution
cpu_function(h_ref_E,h_S);
//Print spins
if(FLAG_PRINT_SPINS) {
printf("\n");
for(int i=0;i<BLOCK_SIZE*2;i++) {
for(int j=0;j<BLOCK_SIZE*2;j++) {
if(h_S[i*BLOCK_SIZE*2+j]>0) printf("+ ");
else printf("- ");
}
printf("\n");
}
}
//Stop and destroy timer
CUDA_SAFE_CALL(hipDeviceSynchronize());
CUT_SAFE_CALL(cutStopTimer(timer));
float cpu_sum=cutGetTimerValue(timer);
printf("\n --------------------------------- CPU
--------------------------------- \n");
printf(" Total processing time on CPU: %f (ms) \n",cpu_sum);
CUT_SAFE_CALL(cutDeleteTimer(timer));
printf("\n Speedup: %fX \n\n",(cpu_sum/gpu_sum));
//Cleaning memory
free(h_T);
free(h_E);
free(h_ref_E);
free(h_random_data);
free(h_S);
free(h_out);
CUDA_SAFE_CALL(hipFree(d_random_data));
CUDA_SAFE_CALL(hipFree(d_S));
CUDA_SAFE_CALL(hipFree(d_out));
}
/****
*
* Device function main
*
*/
__global__ void device_function_main(int* S,int* out,int* R,float t,bool flag) {
//Energy variable
int dH=0;
float exp_dH_4=exp(-(4.0)/t);
float exp_dH_8=exp(-(8.0)/t);
//Allocate shared memory
__shared__ int r[BLOCK_SIZE];
//Load random data
r[threadIdx.x]=R[threadIdx.x+BLOCK_SIZE*blockIdx.x];
__syncthreads();
if(flag) {
//Create new random numbers
r[threadIdx.x]=RANDOM_A*r[threadIdx.x]+RANDOM_B;
//Spin update top left
if(blockIdx.x==0) { //Top
if(threadIdx.x==0) { //Left
dH=2*S[2*threadIdx.x]*(
S[2*threadIdx.x+1]+
S[2*threadIdx.x-1+2*BLOCK_SIZE]+
S[2*threadIdx.x+2*BLOCK_SIZE]+
S[2*threadIdx.x+N-2*BLOCK_SIZE]);
}
else {
dH=2*S[2*threadIdx.x]*(
S[2*threadIdx.x+1]+
S[2*threadIdx.x-1]+
S[2*threadIdx.x+2*BLOCK_SIZE]+
S[2*threadIdx.x+N-2*BLOCK_SIZE]);
}
}
else {
if(threadIdx.x==0) { //Left
dH=2*S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x]*(
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+1]+
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x-1+2*BLOCK_SIZE]+
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]+
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x-2*BLOCK_SIZE]);
}
else {
dH=2*S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x]*(
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+1]+
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x-1]+
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]+
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x-2*BLOCK_SIZE]);
}
}
if(dH==4) {
if(fabs(r[threadIdx.x]*4.656612e-10)<exp_dH_4) {
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x]=-S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.
x];
}
}
else if(dH==8) {
if(fabs(r[threadIdx.x]*4.656612e-10)<exp_dH_8) {
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x]=-S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.
x];
}
}
else {
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x]=-S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.
x];
}
//Create new random numbers
r[threadIdx.x]=RANDOM_A*r[threadIdx.x]+RANDOM_B;
//Spin update bottom right
if(blockIdx.x==BLOCK_SIZE-1) { //Bottom
if(threadIdx.x==BLOCK_SIZE-1) { //Right
dH=2*S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]*(
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2]+
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]+
S[2*threadIdx.x+1]+
S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x]);
}
else {
dH=2*S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]*(
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE+2]+
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]+
S[2*threadIdx.x+1]+
S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x]);
}
}
else {
if(threadIdx.x==BLOCK_SIZE-1) { //Right
dH=2*S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]*(
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2]+
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]+
S[2*threadIdx.x+1+4*BLOCK_SIZE*(blockIdx.x+1)]+
S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x]);
}
else {
dH=2*S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]*(
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE+2]+
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]+
S[2*threadIdx.x+1+4*BLOCK_SIZE*(blockIdx.x+1)]+
S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x]);
}
}
if(dH==4) {
if(fabs(r[threadIdx.x]*4.656612e-10)<exp_dH_4) {
S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]=-S[2*threadIdx.x+1+4*BLO
CK_SIZE*blockIdx.x+2*BLOCK_SIZE];
}
}
else if(dH==8) {
if(fabs(r[threadIdx.x]*4.656612e-10)<exp_dH_8) {
S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]=-S[2*threadIdx.x+1+4*BLO
CK_SIZE*blockIdx.x+2*BLOCK_SIZE];
}
}
else {
S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]=-S[2*threadIdx.x+1+4*BLO
CK_SIZE*blockIdx.x+2*BLOCK_SIZE];
}
__syncthreads();
}
else {
//Create new random numbers
r[threadIdx.x]=RANDOM_A*r[threadIdx.x]+RANDOM_B;
//Spin update top right
if(blockIdx.x==0) { //Top
if(threadIdx.x==BLOCK_SIZE-1) { //Right
dH=2*S[2*threadIdx.x+1]*(
S[2*threadIdx.x+2-2*BLOCK_SIZE]+
S[2*threadIdx.x]+
S[2*threadIdx.x+1+2*BLOCK_SIZE]+
S[2*threadIdx.x+1+N-2*BLOCK_SIZE]);
}
else {
dH=2*S[2*threadIdx.x+1]*(
S[2*threadIdx.x+2]+
S[2*threadIdx.x]+
S[2*threadIdx.x+1+2*BLOCK_SIZE]+
S[2*threadIdx.x+1+N-2*BLOCK_SIZE]);
}
}
else {
if(threadIdx.x==BLOCK_SIZE-1) { //Right
dH=2*S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x]*(
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2-2*BLOCK_SIZE]+
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x]+
S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]+
S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x-2*BLOCK_SIZE]);
}
else {
dH=2*S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x]*(
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2]+
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x]+
S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]+
S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x-2*BLOCK_SIZE]);
}
}
if(dH==4) {
if(fabs(r[threadIdx.x]*4.656612e-10)<exp_dH_4) {
S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x]=-S[2*threadIdx.x+1+4*BLOCK_SIZE*block
Idx.x];
}
}
else if(dH==8) {
if(fabs(r[threadIdx.x]*4.656612e-10)<exp_dH_8) {
S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x]=-S[2*threadIdx.x+1+4*BLOCK_SIZE*block
Idx.x];
}
}
else {
S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x]=-S[2*threadIdx.x+1+4*BLOCK_SIZE*block
Idx.x];
}
//Create new random numbers
r[threadIdx.x]=RANDOM_A*r[threadIdx.x]+RANDOM_B;
//Spin update bottom left
if(blockIdx.x==BLOCK_SIZE-1) { //Bottom
if(threadIdx.x==0) { //Left
dH=2*S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]*(
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE+1]+
S[2*threadIdx.x+4*BLOCK_SIZE*(blockIdx.x+1)-1]+
S[2*threadIdx.x]+
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x]);
}
else {
dH=2*S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]*(
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE+1]+
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE-1]+
S[2*threadIdx.x]+
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x]);
}
}
else {
if(threadIdx.x==0) { //Left
dH=2*S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]*(
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE+1]+
S[2*threadIdx.x+4*BLOCK_SIZE*(blockIdx.x+1)-1]+
S[2*threadIdx.x+4*BLOCK_SIZE*(blockIdx.x+1)]+
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x]);
}
else {
dH=2*S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]*(
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE+1]+
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE-1]+
S[2*threadIdx.x+4*BLOCK_SIZE*(blockIdx.x+1)]+
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x]);
}
}
if(dH==4) {
if(fabs(r[threadIdx.x]*4.656612e-10)<exp_dH_4) {
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]=-S[2*threadIdx.x+4*BLOCK_S
IZE*blockIdx.x+2*BLOCK_SIZE];
}
}
else if(dH==8) {
if(fabs(r[threadIdx.x]*4.656612e-10)<exp_dH_8) {
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]=-S[2*threadIdx.x+4*BLOCK_S
IZE*blockIdx.x+2*BLOCK_SIZE];
}
}
else {
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]=-S[2*threadIdx.x+4*BLOCK_S
IZE*blockIdx.x+2*BLOCK_SIZE];
}
}
//Transfer random data back to global memory
R[threadIdx.x+BLOCK_SIZE*blockIdx.x]=r[threadIdx.x];
if(!flag) {
//For reduction shared memory array r is used
if(FLAG_ENERGY) {
//Calc energy
if(blockIdx.x==BLOCK_SIZE-1) { //Bottom
if(threadIdx.x==BLOCK_SIZE-1) { //Right
dH=-S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x]*(S[2*threadIdx.x+4*BLOCK_SIZE*block
Idx.x+1]+S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE])
-S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x]*(S[2*threadIdx.x+1+4*BLOCK_SIZE*bloc
kIdx.x+1-2*BLOCK_SIZE]+S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE])
-S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]*(S[2*threadIdx.x+4*BLOCK_
SIZE*blockIdx.x+1+2*BLOCK_SIZE]+S[2*threadIdx.x])
-S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]*(S[2*threadIdx.x+4*BLOC
K_SIZE*blockIdx.x+2]+S[2*threadIdx.x+1]);
}
else {
dH=-S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x]*(S[2*threadIdx.x+4*BLOCK_SIZE*block
Idx.x+1]+S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE])
-S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x]*(S[2*threadIdx.x+1+4*BLOCK_SIZE*bloc
kIdx.x+1]+S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE])
-S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]*(S[2*threadIdx.x+4*BLOCK_
SIZE*blockIdx.x+1+2*BLOCK_SIZE]+S[2*threadIdx.x])
-S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]*(S[2*threadIdx.x+4*BLOC
K_SIZE*blockIdx.x+2+2*BLOCK_SIZE]+S[2*threadIdx.x+1]);
}
}
else {
if(threadIdx.x==BLOCK_SIZE-1) { //Right
dH=-S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x]*(S[2*threadIdx.x+4*BLOCK_SIZE*block
Idx.x+1]+S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE])
-S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x]*(S[2*threadIdx.x+1+4*BLOCK_SIZE*bloc
kIdx.x+1-2*BLOCK_SIZE]+S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE])
-S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]*(S[2*threadIdx.x+4*BLOCK_
SIZE*blockIdx.x+1+2*BLOCK_SIZE]+S[2*threadIdx.x+4*BLOCK_SIZE*(blockIdx.x+1)])
-S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]*(S[2*threadIdx.x+4*BLOC
K_SIZE*blockIdx.x+2]+S[2*threadIdx.x+1+4*BLOCK_SIZE*(blockIdx.x+1)]);
}
else {
dH=-S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x]*(S[2*threadIdx.x+4*BLOCK_SIZE*block
Idx.x+1]+S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE])
-S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x]*(S[2*threadIdx.x+1+4*BLOCK_SIZE*bloc
kIdx.x+1]+S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE])
-S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]*(S[2*threadIdx.x+4*BLOCK_
SIZE*blockIdx.x+1+2*BLOCK_SIZE]+S[2*threadIdx.x+4*BLOCK_SIZE*(blockIdx.x+1)])
-S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]*(S[2*threadIdx.x+4*BLOC
K_SIZE*blockIdx.x+2+2*BLOCK_SIZE]+S[2*threadIdx.x+1+4*BLOCK_SIZE*(blockIdx.x+1)]
);
}
}
__syncthreads();
}
else {
//Calc magnetisation
dH=S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x]
+S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x]
+S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]
+S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE];
__syncthreads();
}
//Save partial results back to shared memory in new structure
r[threadIdx.x]=dH;
//Reduction on GPU
for(unsigned int dx=1;dx<BLOCK_SIZE;dx*=2) {
if(threadIdx.x%(2*dx)==0) {
r[threadIdx.x]+=r[threadIdx.x+dx];
}
__syncthreads();
}
//Save in out
if(threadIdx.x==0) out[blockIdx.x]=r[0];
}
}
/****
*
* CPU function
*
*/
void cpu_function(double* E, int* S) {
int random=23;
int num_entries=0;
for(double t=T_START;t>=T_END;t=t*T_FACTOR) {
double avg_H=0;
double exp_dH_4=exp(-(4.0)/t);
double exp_dH_8=exp(-(8.0)/t);
for(int
global_iteration=0;global_iteration<GLOBAL_ITERATIONS;++global_iteration) {
if(FLAG_ENERGY) {
//Energy
double H=0;
for(int x=0;x<n;++x) {
for(int y=0;y<n;++y) {
int xr=x+1,yd=y+1;
if(xr==n) xr=0;
if(yd==n) yd=0;
H+=-S[y*n+x]*(S[y*n+xr]+S[yd*n+x]);
}
}
avg_H+=H/N;
}
else {
//Magnetisation
double H=0;
for(int x=0;x<N;++x) {
H+=S[x];
}
avg_H+=H/N;
}
for(int x=0;x<n;++x) {
for(int y=0;y<n;++y) {
if((y*(n+1)+x)%2==0) {
int xl=x-1,yl=y,xu=x,yu=y-1,xr=x+1,yr=y,xd=x,yd=y+1;
if(x==0) {
xl=n-1;
}
else if(x==n-1) {
xr=0;
}
if(y==0) {
yu=n-1;
}
else if(y==n-1) {
yd=0;
}
//Initial local energy
int dH=2*S[y*n+x]*(
S[yl*n+xl]+
S[yr*n+xr]+
S[yu*n+xu]+
S[yd*n+xd]
);
if(dH==4) {
random=RANDOM_A*random+RANDOM_B;
if(fabs(random*4.656612e-10)<exp_dH_4) {
S[y*n+x]=-S[y*n+x];
}
}
else if(dH==8) {
random=RANDOM_A*random+RANDOM_B;
if(fabs(random*4.656612e-10)<exp_dH_8) {
S[y*n+x]=-S[y*n+x];
}
}
else {
S[y*n+x]=-S[y*n+x];
}
}
}
}
for(int x=0;x<n;++x) {
for(int y=0;y<n;++y) {
if((y*(n+1)+x)%2==1) {
int xl=x-1,yl=y,xu=x,yu=y-1,xr=x+1,yr=y,xd=x,yd=y+1;
if(x==0) {
xl=n-1;
}
else if(x==n-1) {
xr=0;
}
if(y==0) {
yu=n-1;
}
else if(y==n-1) {
yd=0;
}
//Initial local energy
int dH=2*S[y*n+x]*(
S[yl*n+xl]+
S[yr*n+xr]+
S[yu*n+xu]+
S[yd*n+xd]
);
if(dH==4) {
random=RANDOM_A*random+RANDOM_B;
if(fabs(random*4.656612e-10)<exp_dH_4) {
S[y*n+x]=-S[y*n+x];
}
}
else if(dH==8) {
random=RANDOM_A*random+RANDOM_B;
if(fabs(random*4.656612e-10)<exp_dH_8) {
S[y*n+x]=-S[y*n+x];
}
}
else {
S[y*n+x]=-S[y*n+x];
}
}
}
}
}
E[num_entries]=avg_H/GLOBAL_ITERATIONS;
num_entries++;
}
} | 625c0714981de1c60e2d4fb1be5ddf92a7f2ea37.cu | /****
*
* GPU accelerated Monte Carlo simulation of the 2D Ising model
*
* Copyright (C) 2008 Tobias Preis (http://www.tobiaspreis.de)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 3 of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public
* License along with this program; if not, see
* http://www.gnu.org/licenses/.
*
* Related publication:
*
* T. Preis, P. Virnau, W. Paul, and J. J. Schneider,
* Journal of Computational Physics 228, 4468-4477 (2009)
* doi:10.1016/j.jcp.2009.03.018
*
*/
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <time.h>
#include <cutil.h>
#define FLAG_PRINT_SPINS 0
#define FLAG_ENERGY 0
#define T_START 3.00
#define T_FACTOR 0.9
#define T_END 2.00
#define GLOBAL_ITERATIONS 100
#define RANDOM_A 1664525
#define RANDOM_B 1013904223
#define BLOCK_SIZE 256
const unsigned int N=4*BLOCK_SIZE*BLOCK_SIZE;
const unsigned int n=2*BLOCK_SIZE;
/****
*
* Function declaration
*
*/
void calc(int argc,char** argv);
void cpu_function(double*,int*);
__global__ void device_function_main(int*,int*,int*,float,bool);
/****
*
* Main function
*
*/
int main(int argc,char** argv) {
calc(argc,argv);
}
/****
*
* Calc
*
*/
void calc(int argc,char** argv) {
printf("
----------------------------------------------------------------------- \n");
printf(" *\n");
printf(" * GPU accelerated Monte Carlo simulation of the 2D Ising model\n");
printf(" *\n");
printf(" * Copyright (C) 2008 Tobias Preis (http://www.tobiaspreis.de)\n");
printf(" *\n");
printf(" * This program is free software; you can redistribute it and/or\n");
printf(" * modify it under the terms of the GNU General Public License\n");
printf(" * as published by the Free Software Foundation; either version\n");
printf(" * 3 of the License, or (at your option) any later version.\n");
printf(" *\n");
printf(" * This program is distributed in the hope that it will be useful,\n");
printf(" * but WITHOUT ANY WARRANTY; without even the implied warranty
of\n");
printf(" * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n");
printf(" * GNU General Public License for more details.\n");
printf(" *\n");
printf(" * You should have received a copy of the GNU General Public\n");
printf(" * License along with this program; if not, see\n");
printf(" * http://www.gnu.org/licenses/\n");
printf(" *\n");
printf(" * Related publication:\n");
printf(" *\n");
printf(" * T. Preis, P. Virnau, W. Paul, and J. J. Schneider,\n");
printf(" * Journal of Computational Physics 228, 4468-4477 (2009)\n");
printf(" * doi:10.1016/j.jcp.2009.03.018\n");
printf(" *\n");
printf(" ----------------------------- Ising model
----------------------------- \n");
printf(" Number of Spins: %d \n",N);
printf(" Start Temperature: %f \n",T_START);
printf(" Decreasing Factor: %f \n",T_FACTOR);
printf(" Final Temperature: %f \n",T_END);
printf(" Global Iterations: %d \n",GLOBAL_ITERATIONS);
//Init
CUT_DEVICE_INIT(argc,argv);
srand48(23);
//Allocate and init host memory for output arrays
int num_entries=0;
for(double t=T_START; t>=T_END; t=t*T_FACTOR) num_entries++;
unsigned int mem_out_size=sizeof(float)*num_entries;
float* h_T=(float*) malloc(mem_out_size);
float* h_E=(float*) malloc(mem_out_size);
unsigned int mem_ref_out_size=sizeof(double)*num_entries;
double* h_ref_E=(double*) malloc(mem_ref_out_size);
num_entries=0;
for(double t=T_START; t>=T_END; t=t*T_FACTOR) {
h_T[num_entries]=t;
num_entries++;
}
//Allocate and init host memory for simulation arrays
unsigned int mem_size=sizeof(int)*N;
unsigned int mem_size_random=sizeof(int)*BLOCK_SIZE*BLOCK_SIZE;
int* h_random_data=(int*) malloc(mem_size_random);
int* h_S=(int*) malloc(mem_size);
unsigned int mem_size_out=sizeof(int)*BLOCK_SIZE;
int* h_out=(int*) malloc(mem_size_out);
h_random_data[0]=1;
for(int i=1;i<BLOCK_SIZE*BLOCK_SIZE;i++) {
h_random_data[i]=16807*h_random_data[i-1];
}
for(int i=0;i<N;i++) {
if(drand48()>0.5) h_S[i]=-1;
else h_S[i]=1;
}
//Create and start timer
float gpu_sum=0;
unsigned int timer=0;
CUDA_SAFE_CALL(cudaThreadSynchronize());
CUT_SAFE_CALL(cutCreateTimer(&timer));
CUT_SAFE_CALL(cutStartTimer(timer));
//Allocate device memory for arrays
int* d_random_data;
int* d_S;
int* d_out;
CUDA_SAFE_CALL(cudaMalloc((void**) &d_random_data,mem_size_random));
CUDA_SAFE_CALL(cudaMalloc((void**) &d_S,mem_size));
CUDA_SAFE_CALL(cudaMalloc((void**) &d_out,mem_size_out));
//Stop and destroy timer
CUDA_SAFE_CALL(cudaThreadSynchronize());
CUT_SAFE_CALL(cutStopTimer(timer));
float gpu_dt_malloc=cutGetTimerValue(timer);
gpu_sum+=gpu_dt_malloc;
printf("\n --------------------------------- GPU
--------------------------------- \n");
printf(" Processing time on GPU for allocating: %f (ms) \n",gpu_dt_malloc);
CUT_SAFE_CALL(cutDeleteTimer(timer));
//Create and start timer
timer=0;
CUDA_SAFE_CALL(cudaThreadSynchronize());
CUT_SAFE_CALL(cutCreateTimer(&timer));
CUT_SAFE_CALL(cutStartTimer(timer));
//Copy host memory to device and create mirror of d_S
CUDA_SAFE_CALL(cudaMemcpy(d_random_data,h_random_data,mem_size_random,cudaMemcpy
HostToDevice));
CUDA_SAFE_CALL(cudaMemcpy(d_S,h_S,mem_size,cudaMemcpyHostToDevice));
//Stop and destroy timer
CUDA_SAFE_CALL(cudaThreadSynchronize());
CUT_SAFE_CALL(cutStopTimer(timer));
float gpu_dt_mem=cutGetTimerValue(timer);
gpu_sum+=gpu_dt_mem;
printf(" Processing time on GPU for memory transfer: %f (ms) \n",gpu_dt_mem);
CUT_SAFE_CALL(cutDeleteTimer(timer));
//Print spins
if(FLAG_PRINT_SPINS) {
CUDA_SAFE_CALL(cudaMemcpy(h_S,d_S,mem_size,cudaMemcpyDeviceToHost));
for(int i=0;i<BLOCK_SIZE*2;i++) {
for(int j=0;j<BLOCK_SIZE*2;j++) {
if(h_S[i*BLOCK_SIZE*2+j]>0) printf("+ ");
else printf("- ");
}
printf("\n");
}
printf("\n");
}
//Create and start timer
timer=0;
CUDA_SAFE_CALL(cudaThreadSynchronize());
CUT_SAFE_CALL(cutCreateTimer(&timer));
CUT_SAFE_CALL(cutStartTimer(timer));
//Calc energy
num_entries=0;
dim3 threads(BLOCK_SIZE);
dim3 grid(BLOCK_SIZE);
for(float t=T_START;t>=T_END;t=t*T_FACTOR) {
double avg_H=0;
for(int
global_iteration=0;global_iteration<GLOBAL_ITERATIONS;global_iteration++) {
device_function_main<<<grid,threads>>>(d_S,d_out,d_random_data,t,true);
device_function_main<<<grid,threads>>>(d_S,d_out,d_random_data,t,false);
CUDA_SAFE_CALL(cudaMemcpy(h_out,d_out,mem_size_out,cudaMemcpyDeviceToHost));
int energy_sum=0;
for(int i=0;i<BLOCK_SIZE;i++) energy_sum+=h_out[i];
avg_H+=(float)energy_sum/N;
}
h_E[num_entries]=avg_H/GLOBAL_ITERATIONS;
num_entries++;
}
//Stop and destroy timer
CUDA_SAFE_CALL(cudaThreadSynchronize());
CUT_SAFE_CALL(cutStopTimer(timer));
float gpu_dt_main=cutGetTimerValue(timer);
gpu_sum+=gpu_dt_main;
printf(" Processing time on GPU for main function: %f (ms) \n",gpu_dt_main);
printf(" Total processing time on GPU: %f (ms) \n",gpu_sum);
CUT_SAFE_CALL(cutDeleteTimer(timer));
//Check kernel execution
CUT_CHECK_ERROR("Kernel execution failed");
//Print spins
if(FLAG_PRINT_SPINS) {
CUDA_SAFE_CALL(cudaMemcpy(h_S,d_S,mem_size,cudaMemcpyDeviceToHost));
for(int i=0;i<BLOCK_SIZE*2;i++) {
for(int j=0;j<BLOCK_SIZE*2;j++) {
if(h_S[i*BLOCK_SIZE*2+j]>0) printf("+ ");
else printf("- ");
}
printf("\n");
}
}
//Create and start timer
timer=0;
CUDA_SAFE_CALL(cudaThreadSynchronize());
CUT_SAFE_CALL(cutCreateTimer(&timer));
CUT_SAFE_CALL(cutStartTimer(timer));
//Reference solution
cpu_function(h_ref_E,h_S);
//Print spins
if(FLAG_PRINT_SPINS) {
printf("\n");
for(int i=0;i<BLOCK_SIZE*2;i++) {
for(int j=0;j<BLOCK_SIZE*2;j++) {
if(h_S[i*BLOCK_SIZE*2+j]>0) printf("+ ");
else printf("- ");
}
printf("\n");
}
}
//Stop and destroy timer
CUDA_SAFE_CALL(cudaThreadSynchronize());
CUT_SAFE_CALL(cutStopTimer(timer));
float cpu_sum=cutGetTimerValue(timer);
printf("\n --------------------------------- CPU
--------------------------------- \n");
printf(" Total processing time on CPU: %f (ms) \n",cpu_sum);
CUT_SAFE_CALL(cutDeleteTimer(timer));
printf("\n Speedup: %fX \n\n",(cpu_sum/gpu_sum));
//Cleaning memory
free(h_T);
free(h_E);
free(h_ref_E);
free(h_random_data);
free(h_S);
free(h_out);
CUDA_SAFE_CALL(cudaFree(d_random_data));
CUDA_SAFE_CALL(cudaFree(d_S));
CUDA_SAFE_CALL(cudaFree(d_out));
}
/****
*
* Device function main
*
*/
__global__ void device_function_main(int* S,int* out,int* R,float t,bool flag) {
//Energy variable
int dH=0;
float exp_dH_4=exp(-(4.0)/t);
float exp_dH_8=exp(-(8.0)/t);
//Allocate shared memory
__shared__ int r[BLOCK_SIZE];
//Load random data
r[threadIdx.x]=R[threadIdx.x+BLOCK_SIZE*blockIdx.x];
__syncthreads();
if(flag) {
//Create new random numbers
r[threadIdx.x]=RANDOM_A*r[threadIdx.x]+RANDOM_B;
//Spin update top left
if(blockIdx.x==0) { //Top
if(threadIdx.x==0) { //Left
dH=2*S[2*threadIdx.x]*(
S[2*threadIdx.x+1]+
S[2*threadIdx.x-1+2*BLOCK_SIZE]+
S[2*threadIdx.x+2*BLOCK_SIZE]+
S[2*threadIdx.x+N-2*BLOCK_SIZE]);
}
else {
dH=2*S[2*threadIdx.x]*(
S[2*threadIdx.x+1]+
S[2*threadIdx.x-1]+
S[2*threadIdx.x+2*BLOCK_SIZE]+
S[2*threadIdx.x+N-2*BLOCK_SIZE]);
}
}
else {
if(threadIdx.x==0) { //Left
dH=2*S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x]*(
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+1]+
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x-1+2*BLOCK_SIZE]+
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]+
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x-2*BLOCK_SIZE]);
}
else {
dH=2*S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x]*(
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+1]+
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x-1]+
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]+
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x-2*BLOCK_SIZE]);
}
}
if(dH==4) {
if(fabs(r[threadIdx.x]*4.656612e-10)<exp_dH_4) {
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x]=-S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.
x];
}
}
else if(dH==8) {
if(fabs(r[threadIdx.x]*4.656612e-10)<exp_dH_8) {
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x]=-S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.
x];
}
}
else {
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x]=-S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.
x];
}
//Create new random numbers
r[threadIdx.x]=RANDOM_A*r[threadIdx.x]+RANDOM_B;
//Spin update bottom right
if(blockIdx.x==BLOCK_SIZE-1) { //Bottom
if(threadIdx.x==BLOCK_SIZE-1) { //Right
dH=2*S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]*(
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2]+
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]+
S[2*threadIdx.x+1]+
S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x]);
}
else {
dH=2*S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]*(
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE+2]+
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]+
S[2*threadIdx.x+1]+
S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x]);
}
}
else {
if(threadIdx.x==BLOCK_SIZE-1) { //Right
dH=2*S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]*(
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2]+
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]+
S[2*threadIdx.x+1+4*BLOCK_SIZE*(blockIdx.x+1)]+
S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x]);
}
else {
dH=2*S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]*(
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE+2]+
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]+
S[2*threadIdx.x+1+4*BLOCK_SIZE*(blockIdx.x+1)]+
S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x]);
}
}
if(dH==4) {
if(fabs(r[threadIdx.x]*4.656612e-10)<exp_dH_4) {
S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]=-S[2*threadIdx.x+1+4*BLO
CK_SIZE*blockIdx.x+2*BLOCK_SIZE];
}
}
else if(dH==8) {
if(fabs(r[threadIdx.x]*4.656612e-10)<exp_dH_8) {
S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]=-S[2*threadIdx.x+1+4*BLO
CK_SIZE*blockIdx.x+2*BLOCK_SIZE];
}
}
else {
S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]=-S[2*threadIdx.x+1+4*BLO
CK_SIZE*blockIdx.x+2*BLOCK_SIZE];
}
__syncthreads();
}
else {
//Create new random numbers
r[threadIdx.x]=RANDOM_A*r[threadIdx.x]+RANDOM_B;
//Spin update top right
if(blockIdx.x==0) { //Top
if(threadIdx.x==BLOCK_SIZE-1) { //Right
dH=2*S[2*threadIdx.x+1]*(
S[2*threadIdx.x+2-2*BLOCK_SIZE]+
S[2*threadIdx.x]+
S[2*threadIdx.x+1+2*BLOCK_SIZE]+
S[2*threadIdx.x+1+N-2*BLOCK_SIZE]);
}
else {
dH=2*S[2*threadIdx.x+1]*(
S[2*threadIdx.x+2]+
S[2*threadIdx.x]+
S[2*threadIdx.x+1+2*BLOCK_SIZE]+
S[2*threadIdx.x+1+N-2*BLOCK_SIZE]);
}
}
else {
if(threadIdx.x==BLOCK_SIZE-1) { //Right
dH=2*S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x]*(
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2-2*BLOCK_SIZE]+
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x]+
S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]+
S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x-2*BLOCK_SIZE]);
}
else {
dH=2*S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x]*(
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2]+
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x]+
S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]+
S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x-2*BLOCK_SIZE]);
}
}
if(dH==4) {
if(fabs(r[threadIdx.x]*4.656612e-10)<exp_dH_4) {
S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x]=-S[2*threadIdx.x+1+4*BLOCK_SIZE*block
Idx.x];
}
}
else if(dH==8) {
if(fabs(r[threadIdx.x]*4.656612e-10)<exp_dH_8) {
S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x]=-S[2*threadIdx.x+1+4*BLOCK_SIZE*block
Idx.x];
}
}
else {
S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x]=-S[2*threadIdx.x+1+4*BLOCK_SIZE*block
Idx.x];
}
//Create new random numbers
r[threadIdx.x]=RANDOM_A*r[threadIdx.x]+RANDOM_B;
//Spin update bottom left
if(blockIdx.x==BLOCK_SIZE-1) { //Bottom
if(threadIdx.x==0) { //Left
dH=2*S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]*(
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE+1]+
S[2*threadIdx.x+4*BLOCK_SIZE*(blockIdx.x+1)-1]+
S[2*threadIdx.x]+
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x]);
}
else {
dH=2*S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]*(
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE+1]+
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE-1]+
S[2*threadIdx.x]+
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x]);
}
}
else {
if(threadIdx.x==0) { //Left
dH=2*S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]*(
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE+1]+
S[2*threadIdx.x+4*BLOCK_SIZE*(blockIdx.x+1)-1]+
S[2*threadIdx.x+4*BLOCK_SIZE*(blockIdx.x+1)]+
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x]);
}
else {
dH=2*S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]*(
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE+1]+
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE-1]+
S[2*threadIdx.x+4*BLOCK_SIZE*(blockIdx.x+1)]+
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x]);
}
}
if(dH==4) {
if(fabs(r[threadIdx.x]*4.656612e-10)<exp_dH_4) {
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]=-S[2*threadIdx.x+4*BLOCK_S
IZE*blockIdx.x+2*BLOCK_SIZE];
}
}
else if(dH==8) {
if(fabs(r[threadIdx.x]*4.656612e-10)<exp_dH_8) {
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]=-S[2*threadIdx.x+4*BLOCK_S
IZE*blockIdx.x+2*BLOCK_SIZE];
}
}
else {
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]=-S[2*threadIdx.x+4*BLOCK_S
IZE*blockIdx.x+2*BLOCK_SIZE];
}
}
//Transfer random data back to global memory
R[threadIdx.x+BLOCK_SIZE*blockIdx.x]=r[threadIdx.x];
if(!flag) {
//For reduction shared memory array r is used
if(FLAG_ENERGY) {
//Calc energy
if(blockIdx.x==BLOCK_SIZE-1) { //Bottom
if(threadIdx.x==BLOCK_SIZE-1) { //Right
dH=-S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x]*(S[2*threadIdx.x+4*BLOCK_SIZE*block
Idx.x+1]+S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE])
-S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x]*(S[2*threadIdx.x+1+4*BLOCK_SIZE*bloc
kIdx.x+1-2*BLOCK_SIZE]+S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE])
-S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]*(S[2*threadIdx.x+4*BLOCK_
SIZE*blockIdx.x+1+2*BLOCK_SIZE]+S[2*threadIdx.x])
-S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]*(S[2*threadIdx.x+4*BLOC
K_SIZE*blockIdx.x+2]+S[2*threadIdx.x+1]);
}
else {
dH=-S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x]*(S[2*threadIdx.x+4*BLOCK_SIZE*block
Idx.x+1]+S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE])
-S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x]*(S[2*threadIdx.x+1+4*BLOCK_SIZE*bloc
kIdx.x+1]+S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE])
-S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]*(S[2*threadIdx.x+4*BLOCK_
SIZE*blockIdx.x+1+2*BLOCK_SIZE]+S[2*threadIdx.x])
-S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]*(S[2*threadIdx.x+4*BLOC
K_SIZE*blockIdx.x+2+2*BLOCK_SIZE]+S[2*threadIdx.x+1]);
}
}
else {
if(threadIdx.x==BLOCK_SIZE-1) { //Right
dH=-S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x]*(S[2*threadIdx.x+4*BLOCK_SIZE*block
Idx.x+1]+S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE])
-S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x]*(S[2*threadIdx.x+1+4*BLOCK_SIZE*bloc
kIdx.x+1-2*BLOCK_SIZE]+S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE])
-S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]*(S[2*threadIdx.x+4*BLOCK_
SIZE*blockIdx.x+1+2*BLOCK_SIZE]+S[2*threadIdx.x+4*BLOCK_SIZE*(blockIdx.x+1)])
-S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]*(S[2*threadIdx.x+4*BLOC
K_SIZE*blockIdx.x+2]+S[2*threadIdx.x+1+4*BLOCK_SIZE*(blockIdx.x+1)]);
}
else {
dH=-S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x]*(S[2*threadIdx.x+4*BLOCK_SIZE*block
Idx.x+1]+S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE])
-S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x]*(S[2*threadIdx.x+1+4*BLOCK_SIZE*bloc
kIdx.x+1]+S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE])
-S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]*(S[2*threadIdx.x+4*BLOCK_
SIZE*blockIdx.x+1+2*BLOCK_SIZE]+S[2*threadIdx.x+4*BLOCK_SIZE*(blockIdx.x+1)])
-S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]*(S[2*threadIdx.x+4*BLOC
K_SIZE*blockIdx.x+2+2*BLOCK_SIZE]+S[2*threadIdx.x+1+4*BLOCK_SIZE*(blockIdx.x+1)]
);
}
}
__syncthreads();
}
else {
//Calc magnetisation
dH=S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x]
+S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x]
+S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]
+S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE];
__syncthreads();
}
//Save partial results back to shared memory in new structure
r[threadIdx.x]=dH;
//Reduction on GPU
for(unsigned int dx=1;dx<BLOCK_SIZE;dx*=2) {
if(threadIdx.x%(2*dx)==0) {
r[threadIdx.x]+=r[threadIdx.x+dx];
}
__syncthreads();
}
//Save in out
if(threadIdx.x==0) out[blockIdx.x]=r[0];
}
}
/****
*
* CPU function
*
*/
void cpu_function(double* E, int* S) {
int random=23;
int num_entries=0;
for(double t=T_START;t>=T_END;t=t*T_FACTOR) {
double avg_H=0;
double exp_dH_4=exp(-(4.0)/t);
double exp_dH_8=exp(-(8.0)/t);
for(int
global_iteration=0;global_iteration<GLOBAL_ITERATIONS;++global_iteration) {
if(FLAG_ENERGY) {
//Energy
double H=0;
for(int x=0;x<n;++x) {
for(int y=0;y<n;++y) {
int xr=x+1,yd=y+1;
if(xr==n) xr=0;
if(yd==n) yd=0;
H+=-S[y*n+x]*(S[y*n+xr]+S[yd*n+x]);
}
}
avg_H+=H/N;
}
else {
//Magnetisation
double H=0;
for(int x=0;x<N;++x) {
H+=S[x];
}
avg_H+=H/N;
}
for(int x=0;x<n;++x) {
for(int y=0;y<n;++y) {
if((y*(n+1)+x)%2==0) {
int xl=x-1,yl=y,xu=x,yu=y-1,xr=x+1,yr=y,xd=x,yd=y+1;
if(x==0) {
xl=n-1;
}
else if(x==n-1) {
xr=0;
}
if(y==0) {
yu=n-1;
}
else if(y==n-1) {
yd=0;
}
//Initial local energy
int dH=2*S[y*n+x]*(
S[yl*n+xl]+
S[yr*n+xr]+
S[yu*n+xu]+
S[yd*n+xd]
);
if(dH==4) {
random=RANDOM_A*random+RANDOM_B;
if(fabs(random*4.656612e-10)<exp_dH_4) {
S[y*n+x]=-S[y*n+x];
}
}
else if(dH==8) {
random=RANDOM_A*random+RANDOM_B;
if(fabs(random*4.656612e-10)<exp_dH_8) {
S[y*n+x]=-S[y*n+x];
}
}
else {
S[y*n+x]=-S[y*n+x];
}
}
}
}
for(int x=0;x<n;++x) {
for(int y=0;y<n;++y) {
if((y*(n+1)+x)%2==1) {
int xl=x-1,yl=y,xu=x,yu=y-1,xr=x+1,yr=y,xd=x,yd=y+1;
if(x==0) {
xl=n-1;
}
else if(x==n-1) {
xr=0;
}
if(y==0) {
yu=n-1;
}
else if(y==n-1) {
yd=0;
}
//Initial local energy
int dH=2*S[y*n+x]*(
S[yl*n+xl]+
S[yr*n+xr]+
S[yu*n+xu]+
S[yd*n+xd]
);
if(dH==4) {
random=RANDOM_A*random+RANDOM_B;
if(fabs(random*4.656612e-10)<exp_dH_4) {
S[y*n+x]=-S[y*n+x];
}
}
else if(dH==8) {
random=RANDOM_A*random+RANDOM_B;
if(fabs(random*4.656612e-10)<exp_dH_8) {
S[y*n+x]=-S[y*n+x];
}
}
else {
S[y*n+x]=-S[y*n+x];
}
}
}
}
}
E[num_entries]=avg_H/GLOBAL_ITERATIONS;
num_entries++;
}
} |
09072c45b25c1435df404a4d10457b67158f9002.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 2020 insaneyilin All Rights Reserved.
*
*
*/
#include "../common/common.h"
#define SIZE (100*1024*1024)
__global__ void histo_kernel(unsigned char *buffer,
long size, unsigned int *histo) {
// calculate the starting index and the offset to the next
// block that each thread will be processing
int i = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
while (i < size) {
atomicAdd(&histo[buffer[i]], 1);
i += stride;
}
}
int main(int argc, char **argv) {
// allocate buffer with random values
unsigned char *buffer =
(unsigned char*)big_random_block(SIZE);
for (int i = 0; i < SIZE; ++i) {
buffer[i] = buffer[i] % 256;
}
unsigned int gpu_histo[256];
for (int i = 0; i < 256; ++i) {
gpu_histo[i] = 0;
}
// ------ GPU histogram ------
hipEvent_t start;
hipEvent_t stop;
CHECK_CUDA_ERROR(hipEventCreate(&start));
CHECK_CUDA_ERROR(hipEventCreate(&stop));
CHECK_CUDA_ERROR(hipEventRecord(start, 0));
unsigned char *dev_buffer;
unsigned int *dev_histo;
CHECK_CUDA_ERROR(hipMalloc((void**)&dev_buffer, SIZE));
CHECK_CUDA_ERROR(hipMemcpy(dev_buffer, buffer, SIZE,
hipMemcpyHostToDevice));
CHECK_CUDA_ERROR(hipMalloc((void**)&dev_histo,
256 * sizeof(int)));
CHECK_CUDA_ERROR(hipMemset(dev_histo, 0,
256 * sizeof(int)));
// kernel launch - 2x the number of mps gave best timing
hipDeviceProp_t prop;
CHECK_CUDA_ERROR(hipGetDeviceProperties(&prop, 0));
int blocks = prop.multiProcessorCount;
hipLaunchKernelGGL(( histo_kernel), dim3(blocks * 2), dim3(256), 0, 0, dev_buffer, SIZE, dev_histo);
CHECK_CUDA_ERROR(hipMemcpy(gpu_histo, dev_histo,
256 * sizeof(int),
hipMemcpyDeviceToHost));
// get stop time, and display the timing results
CHECK_CUDA_ERROR(hipEventRecord(stop, 0));
CHECK_CUDA_ERROR(hipEventSynchronize(stop));
float elapsed_time;
CHECK_CUDA_ERROR(hipEventElapsedTime(&elapsed_time, start, stop));
printf("GPU histogram time: %3.1f ms\n", elapsed_time);
long histo_count = 0;
for (int i = 0; i < 256; ++i) {
histo_count += gpu_histo[i];
// printf("%d ", histo[i]);
}
// printf("\n");
printf("GPU Histogram Sum: %ld\n", histo_count);
// ------ CPU histogram ------
unsigned int cpu_histo[256];
for (int i = 0; i < 256; ++i) {
cpu_histo[i] = 0;
}
clock_t cpu_start;
clock_t cpu_stop;
cpu_start = clock();
for (int i = 0; i < SIZE; ++i) {
++cpu_histo[buffer[i]];
}
cpu_stop = clock();
float cpu_elapsed_time = (float)(cpu_stop - cpu_start) /
(float)CLOCKS_PER_SEC * 1000.0f;
printf("CPU histogram time: %3.1f ms\n", cpu_elapsed_time);
histo_count = 0;
for (int i = 0; i < 256; ++i) {
histo_count += cpu_histo[i];
// printf("%d ", histo[i]);
}
// printf("\n");
printf("CPU Histogram Sum: %ld\n", histo_count);
for (int i = 0; i < 256; ++i) {
if (gpu_histo[i] != cpu_histo[i]) {
printf("ERROR! gpu histogram is different with cpu histogram\n");
}
}
printf("gpu histogram is the same with cpu histogram\n");
CHECK_CUDA_ERROR(hipEventDestroy(start));
CHECK_CUDA_ERROR(hipEventDestroy(stop));
hipFree(dev_histo);
hipFree(dev_buffer);
free(buffer);
return 0;
} | 09072c45b25c1435df404a4d10457b67158f9002.cu | /*
* Copyright 2020 insaneyilin All Rights Reserved.
*
*
*/
#include "../common/common.h"
#define SIZE (100*1024*1024)
__global__ void histo_kernel(unsigned char *buffer,
long size, unsigned int *histo) {
// calculate the starting index and the offset to the next
// block that each thread will be processing
int i = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
while (i < size) {
atomicAdd(&histo[buffer[i]], 1);
i += stride;
}
}
int main(int argc, char **argv) {
// allocate buffer with random values
unsigned char *buffer =
(unsigned char*)big_random_block(SIZE);
for (int i = 0; i < SIZE; ++i) {
buffer[i] = buffer[i] % 256;
}
unsigned int gpu_histo[256];
for (int i = 0; i < 256; ++i) {
gpu_histo[i] = 0;
}
// ------ GPU histogram ------
cudaEvent_t start;
cudaEvent_t stop;
CHECK_CUDA_ERROR(cudaEventCreate(&start));
CHECK_CUDA_ERROR(cudaEventCreate(&stop));
CHECK_CUDA_ERROR(cudaEventRecord(start, 0));
unsigned char *dev_buffer;
unsigned int *dev_histo;
CHECK_CUDA_ERROR(cudaMalloc((void**)&dev_buffer, SIZE));
CHECK_CUDA_ERROR(cudaMemcpy(dev_buffer, buffer, SIZE,
cudaMemcpyHostToDevice));
CHECK_CUDA_ERROR(cudaMalloc((void**)&dev_histo,
256 * sizeof(int)));
CHECK_CUDA_ERROR(cudaMemset(dev_histo, 0,
256 * sizeof(int)));
// kernel launch - 2x the number of mps gave best timing
cudaDeviceProp prop;
CHECK_CUDA_ERROR(cudaGetDeviceProperties(&prop, 0));
int blocks = prop.multiProcessorCount;
histo_kernel<<<blocks * 2, 256>>>(dev_buffer, SIZE, dev_histo);
CHECK_CUDA_ERROR(cudaMemcpy(gpu_histo, dev_histo,
256 * sizeof(int),
cudaMemcpyDeviceToHost));
// get stop time, and display the timing results
CHECK_CUDA_ERROR(cudaEventRecord(stop, 0));
CHECK_CUDA_ERROR(cudaEventSynchronize(stop));
float elapsed_time;
CHECK_CUDA_ERROR(cudaEventElapsedTime(&elapsed_time, start, stop));
printf("GPU histogram time: %3.1f ms\n", elapsed_time);
long histo_count = 0;
for (int i = 0; i < 256; ++i) {
histo_count += gpu_histo[i];
// printf("%d ", histo[i]);
}
// printf("\n");
printf("GPU Histogram Sum: %ld\n", histo_count);
// ------ CPU histogram ------
unsigned int cpu_histo[256];
for (int i = 0; i < 256; ++i) {
cpu_histo[i] = 0;
}
clock_t cpu_start;
clock_t cpu_stop;
cpu_start = clock();
for (int i = 0; i < SIZE; ++i) {
++cpu_histo[buffer[i]];
}
cpu_stop = clock();
float cpu_elapsed_time = (float)(cpu_stop - cpu_start) /
(float)CLOCKS_PER_SEC * 1000.0f;
printf("CPU histogram time: %3.1f ms\n", cpu_elapsed_time);
histo_count = 0;
for (int i = 0; i < 256; ++i) {
histo_count += cpu_histo[i];
// printf("%d ", histo[i]);
}
// printf("\n");
printf("CPU Histogram Sum: %ld\n", histo_count);
for (int i = 0; i < 256; ++i) {
if (gpu_histo[i] != cpu_histo[i]) {
printf("ERROR! gpu histogram is different with cpu histogram\n");
}
}
printf("gpu histogram is the same with cpu histogram\n");
CHECK_CUDA_ERROR(cudaEventDestroy(start));
CHECK_CUDA_ERROR(cudaEventDestroy(stop));
cudaFree(dev_histo);
cudaFree(dev_buffer);
free(buffer);
return 0;
} |
2599e40a103b0e52bd4865fc14ec435fb1dd5a25.hip | // !!! This is a file automatically generated by hipify!!!
// =================================================================
//
// File: example6.cu
// Author: Pedro Perez
// Description: This file implements the multiplication of a matrix
// by a vector using CUDA.
//
// Copyright (c) 2020 by Tecnologico de Monterrey.
// All Rights Reserved. May be reproduced for any non-commercial
// purpose.
//
// =================================================================
#include <stdio.h>
#include <stdlib.h>
#include <limits.h>
#include <hip/hip_runtime.h>
#include "utils.h"
#define RENS 10000
#define COLS 10000
#define THREADS 256
#define BLOCKS MMIN(32, (((REN * COLS) / THREADS) + 1))
__global__ void matrix_vector(int *m, int *b, int *c) {
int tid = threadIdx.x + (blockIdx.x * blockDim.x);
int j, sum = 0;
while (tid < RENS){
sum = 0;
for(j = 0; j < COLS; j++) {
sum += (m[(tid * COLS) + j] * b[tid]);
}
c[tid] = sum;
tid += blockDim.x * gridDim.x;
}
}
int main(int argc, char* argv[]) {
int i, j, *m, *b, *c;
int *d_m, *d_b, *d_c;
double ms;
m = (int*) malloc(sizeof(int) * RENS* COLS);
b = (int*) malloc(sizeof(int) * RENS);
c = (int*) malloc(sizeof(int) * RENS);
for (i = 0; i < RENS; i++) {
for (j = 0; j < COLS; j++) {
m[(i * COLS) + j] = (j + 1);
}
b[i] = 1;
}
hipMalloc((void**)&d_m, sizeof(int) * RENS* COLS);
hipMalloc((void**)&d_b, sizeof(int) * RENS);
hipMalloc((void**)&d_c, sizeof(int) * RENS);
hipMemcpy(d_m, m, sizeof(int) * RENS* COLS, hipMemcpyHostToDevice);
hipMemcpy(d_b, b, sizeof(int) * RENS, hipMemcpyHostToDevice);
printf("Starting...\n");
ms = 0;
for (i = 0; i < N; i++) {
start_timer();
hipLaunchKernelGGL(( matrix_vector), dim3(BLOCKS), dim3(THREADS), 0, 0, d_m, d_b, d_c);
ms += stop_timer();
}
hipMemcpy(c, d_c, sizeof(int) * RENS, hipMemcpyDeviceToHost);
display_array("c:", c);
printf("avg time = %.5lf ms\n", (ms / N));
hipFree(d_m); hipFree(d_b); hipFree(d_c);
free(m); free(b); free(c);
return 0;
}
| 2599e40a103b0e52bd4865fc14ec435fb1dd5a25.cu | // =================================================================
//
// File: example6.cu
// Author: Pedro Perez
// Description: This file implements the multiplication of a matrix
// by a vector using CUDA.
//
// Copyright (c) 2020 by Tecnologico de Monterrey.
// All Rights Reserved. May be reproduced for any non-commercial
// purpose.
//
// =================================================================
#include <stdio.h>
#include <stdlib.h>
#include <limits.h>
#include <cuda_runtime.h>
#include "utils.h"
#define RENS 10000
#define COLS 10000
#define THREADS 256
#define BLOCKS MMIN(32, (((REN * COLS) / THREADS) + 1))
__global__ void matrix_vector(int *m, int *b, int *c) {
int tid = threadIdx.x + (blockIdx.x * blockDim.x);
int j, sum = 0;
while (tid < RENS){
sum = 0;
for(j = 0; j < COLS; j++) {
sum += (m[(tid * COLS) + j] * b[tid]);
}
c[tid] = sum;
tid += blockDim.x * gridDim.x;
}
}
int main(int argc, char* argv[]) {
int i, j, *m, *b, *c;
int *d_m, *d_b, *d_c;
double ms;
m = (int*) malloc(sizeof(int) * RENS* COLS);
b = (int*) malloc(sizeof(int) * RENS);
c = (int*) malloc(sizeof(int) * RENS);
for (i = 0; i < RENS; i++) {
for (j = 0; j < COLS; j++) {
m[(i * COLS) + j] = (j + 1);
}
b[i] = 1;
}
cudaMalloc((void**)&d_m, sizeof(int) * RENS* COLS);
cudaMalloc((void**)&d_b, sizeof(int) * RENS);
cudaMalloc((void**)&d_c, sizeof(int) * RENS);
cudaMemcpy(d_m, m, sizeof(int) * RENS* COLS, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b, sizeof(int) * RENS, cudaMemcpyHostToDevice);
printf("Starting...\n");
ms = 0;
for (i = 0; i < N; i++) {
start_timer();
matrix_vector<<<BLOCKS, THREADS>>>(d_m, d_b, d_c);
ms += stop_timer();
}
cudaMemcpy(c, d_c, sizeof(int) * RENS, cudaMemcpyDeviceToHost);
display_array("c:", c);
printf("avg time = %.5lf ms\n", (ms / N));
cudaFree(d_m); cudaFree(d_b); cudaFree(d_c);
free(m); free(b); free(c);
return 0;
}
|
1c722940847ec83299764c5f09c4475e6043498d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__device__ int translate_idx_inv( int ii, int d1, int d2, int d3, int scale_factor, int off_x, int off_y) {
int x, y, z, w;
w = ii % d3;
ii = ii/d3;
z = ii % d2;
ii = ii/d2;
y = ii % d1;
ii = ii/d1;
x = ii;
w = w*scale_factor+off_x;
z = z*scale_factor+off_y;
d2 *= scale_factor;
d3 *= scale_factor;
return (((x*d1+y)*d2)+z)*d3+w;
}
__device__ int translate_idx(int ii, int d1, int d2, int d3, int scale_factor) {
int x, y, z, w;
w = ii % d3;
ii = ii/d3;
z = ii % d2;
ii = ii/d2;
y = ii % d1;
ii = ii/d1;
x = ii;
w = w/scale_factor;
z = z/scale_factor;
d2 /= scale_factor;
d3 /= scale_factor;
return (((x*d1+y)*d2)+z)*d3+w;
}
__device__ __forceinline__ size_t idx(const size_t nc, const size_t height, const size_t width, const size_t y, const size_t x) {
return (nc * height + y) * width + x;
}
__global__ void downscale(float *gradInput_data, const float *gradOutput_data, long no_elements, int scale_factor, int d1, int d2, int d3) {
long ii = threadIdx.x + blockDim.x * blockIdx.x;
ii += threadIdx.y + blockDim.y * (blockDim.x * gridDim.x) * blockIdx.y;
if (ii >= no_elements) return;
for (int i=0; i < scale_factor; i++){
for(int j=0; j < scale_factor; j++){
int ipidx = translate_idx_inv(ii, d1, d2, d3, scale_factor, i, j);
gradInput_data[ii] += gradOutput_data[ipidx];
}
}
} | 1c722940847ec83299764c5f09c4475e6043498d.cu | #include "includes.h"
__device__ int translate_idx_inv( int ii, int d1, int d2, int d3, int scale_factor, int off_x, int off_y) {
int x, y, z, w;
w = ii % d3;
ii = ii/d3;
z = ii % d2;
ii = ii/d2;
y = ii % d1;
ii = ii/d1;
x = ii;
w = w*scale_factor+off_x;
z = z*scale_factor+off_y;
d2 *= scale_factor;
d3 *= scale_factor;
return (((x*d1+y)*d2)+z)*d3+w;
}
__device__ int translate_idx(int ii, int d1, int d2, int d3, int scale_factor) {
int x, y, z, w;
w = ii % d3;
ii = ii/d3;
z = ii % d2;
ii = ii/d2;
y = ii % d1;
ii = ii/d1;
x = ii;
w = w/scale_factor;
z = z/scale_factor;
d2 /= scale_factor;
d3 /= scale_factor;
return (((x*d1+y)*d2)+z)*d3+w;
}
__device__ __forceinline__ size_t idx(const size_t nc, const size_t height, const size_t width, const size_t y, const size_t x) {
return (nc * height + y) * width + x;
}
__global__ void downscale(float *gradInput_data, const float *gradOutput_data, long no_elements, int scale_factor, int d1, int d2, int d3) {
long ii = threadIdx.x + blockDim.x * blockIdx.x;
ii += threadIdx.y + blockDim.y * (blockDim.x * gridDim.x) * blockIdx.y;
if (ii >= no_elements) return;
for (int i=0; i < scale_factor; i++){
for(int j=0; j < scale_factor; j++){
int ipidx = translate_idx_inv(ii, d1, d2, d3, scale_factor, i, j);
gradInput_data[ii] += gradOutput_data[ipidx];
}
}
} |
05c134d5989c30490966ffd90f029a1df1159522.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Apologies to whoever will have to read this code, I just discovered precompiler macros and I went crazy with it..
*/
#include <chrono>
#include <iostream>
#include <random>
#include <cmath>
#include <atomic>
#include <stdio.h>
#include "Timer.cuh"
#include "CheckError.cuh"
#include <omp.h>
using namespace timer;
// Set PRINT to 1 for debug output
#define PRINT 0
#define FROM_debug 0
#define TO_debug 16
// Set ZEROCOPY to 1 to use Zero Copy Memory Mode, UNIFIED to 1 to use Unified Memory, COPY to 1 to use Copy
#define ZEROCOPY 0
#define UNIFIED 0
#define COPY 1
// Set RESULTCHECK to 1 to verify the result with a single CPU thread
#define RESULTCHECK 1
// Set CPU to 1 to use the CPU concurrently
#define CPU 1
// Set OPENMP to 1 to use more than 1 thread for the CPU
#define OPENMP 1
#define TILE 1024
unsigned int N = 2;
const int POW = 3; // Maximum is 30, anything higher and the system will use swap, making the Cuda kernels crash
const int RUNS = 1;
const int SUMS = 2;
const int BLOCK_SIZE_X = TILE;
const int BLOCK_SIZE_Y = 1;
__global__
void sum_gpu_left(float* matrix, const int N) {
int row = blockIdx.x * blockDim.x + threadIdx.x;
if (row < N/2) {
if (row % 2 != 0) {
for (int i = 1; i < N/2; i+=2) {
for (int l = 0; l < 2; l++) {
matrix[row] += sqrt(float(matrix[i + N/2]));
}
}
}
}
}
__global__
void sum_gpu_right(float* matrix, const int N) {
int row = blockIdx.x * blockDim.x + threadIdx.x;
if (row >= N/2 && row < N) {
if (row % 2 == 0) {
for (int i = N/2; i < N; i+=2) {
for (int l = 0; l < 2; l++) {
matrix[row] += sqrt(float(matrix[i - N/2]));
}
}
}
}
}
void sum_cpu_only(float * matrix){
#if CPU
for (int i = 0; i < SUMS; i++) {
if (i % 2 != 0) {
for (int j = 0; j < N/2; j++) {
if (j % 2 != 0) {
for (int f = 1; f < N/2; f+=2) {
for (int l = 0; l < 2; l++) {
matrix[j] += sqrt(float(matrix[f+N/2]));
}
}
}
}
for (int j = N/2; j < N; j++) {
if (j % 2 == 0) {
for (int r = 0; r < 1000; r++) {
matrix[j] = sqrt((j+matrix[j])*(matrix[j] / 2.3));
}
}
}
} else {
for (int j = N/2; j < N; j++) {
if (j % 2 == 0) {
for (int f = N/2; f < N; f+=2) {
for (int l = 0; l < 2; l++) {
matrix[j] += sqrt(float(matrix[f-N/2]));
}
}
}
}
for (int j = 0; j < N/2; j++) {
if (j % 2 != 0) {
for (int r = 0; r < 1000; r++) {
matrix[j] = sqrt((j+matrix[j])*(matrix[j] / 2.3));
}
}
}
}
#if PRINT
printf("RUN %d\n", i);
printf("Values from index %d to %d\n", FROM_debug, TO_debug);
printf("H: ");
for (int i = FROM_debug; i < TO_debug; i++) {
if (i % (N/2) == 0) printf("| ");
printf("%.2f ", matrix[i]);
}
printf("\n");
#endif
}
#else
for (int i = 0; i < SUMS; i++) {
for (int j = 0; j < N/2; j++) {
for (int f = 1; f < N/2; f+=2) {
if (j % 2 != 0) {
for (int l = 0; l < 2; l++) {
matrix[j] += sqrt(float(matrix[f+N/2]));
}
}
}
}
for (int j = N/2; j < N; j++) {
for (int f = N/2; f < N; f+=2) {
if (j % 2 == 0) {
for (int l = 0; l < 2; l++) {
matrix[j] += sqrt(float(matrix[f-N/2]));
}
}
}
}
}
#endif
}
int main() {
N = (unsigned int) pow(N, POW);
int grid = N / BLOCK_SIZE_X;
// -------------------------------------------------------------------------
// DEVICE INIT
dim3 DimGrid(grid, 1, 1);
if (N % grid) DimGrid.x++;
dim3 DimBlock(BLOCK_SIZE_X, BLOCK_SIZE_Y, 1);
// -------------------------------------------------------------------------
// HOST MEMORY ALLOCATION
float * h_matrix = new float[N];
std::vector<float> results; // Stores computation times for CPU+GPU
std::vector<float> cpu_results; // Stores CPU (only) computation times
std::vector<float> gpu_results; // Stores GPU (only) computation times
// -------------------------------------------------------------------------
#if ZEROCOPY
hipSetDeviceFlags(hipDeviceMapHost);
#endif
for (int z = 0; z < RUNS; z++) {
std::cout << "Run " << z << " --------------------------- ";
if (ZEROCOPY) std::cout << "ZC" << std::endl;
else if(UNIFIED) std::cout << "UM" << std::endl;
else if(COPY) std::cout << "CP" << std::endl;
Timer<HOST> TM;
Timer<HOST> TM_host;
Timer<DEVICE> TM_device;
// -------------------------------------------------------------------------
// DEVICE MEMORY ALLOCATION
float * d_matrix_host;
float * d_matrix;
#if ZEROCOPY
// Zero Copy Allocation
SAFE_CALL(hipHostMalloc((void **)&d_matrix_host, N * sizeof(float), hipHostMallocMapped));
SAFE_CALL(hipHostGetDevicePointer((void **)&d_matrix, (void *) d_matrix_host , 0));
#elif UNIFIED
// Unified Memory Allocation
SAFE_CALL(hipMallocManaged(&d_matrix, N * sizeof(float)));
#elif COPY
// Standard Copy
float * d_matrix_device;
SAFE_CALL(hipMalloc(&d_matrix_device, N * sizeof(float)));
d_matrix = new float[N];
#endif
// -------------------------------------------------------------------------
// MATRIX INITILIZATION
std::cout << "Starting Initialization..." << std::endl;
unsigned seed = std::chrono::system_clock::now().time_since_epoch().count();
std::default_random_engine generator(seed);
std::uniform_int_distribution<int> distribution(1, 100);
#if PRINT
int count = 1;
printf("Progress: 0 \%\t");
fflush(stdout);
float arr[8] = {86.0, 47.0, 55.0, 72.0, 53.0, 38.0, 97.0, 93.0};
#endif
for (int i = 0; i < N; i++) {
#if PRINT
float cur_prog = (float) i / (float) N;
if ( cur_prog >= 0.1 * (float) count) {
printf("\rProgress: %.0f \%\t", cur_prog * (float) 100);
fflush(stdout);
count++;
}
#endif
//int temp = distribution(generator);
int temp = arr[i];
h_matrix[i] = temp;
d_matrix[i] = temp;
}
#if PRINT
printf("\r \r");
#endif
// -------------------------------------------------------------------------
// INITILIZATION PRINT (DEBUG)
#if PRINT
printf("Values from index %d to %d\n", FROM_debug, TO_debug);
printf("H: ");
for (int i = FROM_debug; i < TO_debug; i++) {
if (i % (N/2) == 0) printf("| ");
printf("%.2f ", h_matrix[i]);
}
printf("\n");
printf("D: ");
for (int i = FROM_debug; i < TO_debug; i++) {
if (i % (N/2) == 0) printf("| ");
printf("%.2f ", d_matrix[i]);
}
printf("\n");
#endif
std::cout << "Initialization Finished" << std::endl;
// -------------------------------------------------------------------------
// CPU ONLY EXECUTION
#if RESULTCHECK
std::cout << "Starting computation (1T - NO GPU)..." << std::endl;
sum_cpu_only(h_matrix);
#endif
// -------------------------------------------------------------------------
// DEVICE EXECUTION
std::cout << "Starting computation (GPU+CPU)..." << std::endl;
TM.start();
#if CPU
for (int i = 0; i < SUMS; i++) {
if (i % 2 != 0) {
#if COPY
SAFE_CALL(hipMemcpy(d_matrix_device, d_matrix, N * sizeof(int), hipMemcpyHostToDevice));
TM_device.start();
sum_gpu_left << < DimGrid, DimBlock >> > (d_matrix_device, N);
TM_device.stop();
CHECK_CUDA_ERROR
SAFE_CALL(hipMemcpy(d_matrix, d_matrix_device, N * sizeof(int), hipMemcpyDeviceToHost));
#else
TM_device.start();
sum_gpu_left << < DimGrid, DimBlock >> > (d_matrix, N);
TM_device.stop();
#endif
#if UNIFIED
// This macro includes hipDeviceSynchronize(), which makes the program work on the data in lockstep
CHECK_CUDA_ERROR
#endif
TM_host.start();
#if OPENMP
#pragma omp parallel for
#endif
for (int j = N/2; j < N; j++) {
if (j % 2 == 0) {
//__sync_fetch_and_add(&d_matrix[j], 1);
for (int r = 0; r < 1000; r++) {
d_matrix[j] = sqrt((j+d_matrix[j])*(d_matrix[j] / 2.3));
}
//printf("cpu right: %d\n", j);
}
}
TM_host.stop();
} else {
#if COPY
SAFE_CALL(hipMemcpy(d_matrix_device, d_matrix, N * sizeof(int), hipMemcpyHostToDevice));
TM_device.start();
sum_gpu_right << < DimGrid, DimBlock >> > (d_matrix_device, N);
TM_device.stop();
CHECK_CUDA_ERROR
SAFE_CALL(hipMemcpy(d_matrix, d_matrix_device, N * sizeof(int), hipMemcpyDeviceToHost));
#else
TM_device.start();
sum_gpu_right << < DimGrid, DimBlock >> > (d_matrix, N);
TM_device.stop();
#endif
#if UNIFIED
CHECK_CUDA_ERROR
#endif
TM_host.start();
#if OPENMP
#pragma omp parallel for
#endif
for (int j = 0; j < N/2; j++) {
if (j % 2 != 0) {
//__sync_fetch_and_add(&d_matrix[j], 1);
for (int r = 0; r < 1000; r++) {
d_matrix[j] = sqrt((j+d_matrix[j])*(d_matrix[j] / 2.3));
}
//printf("cpu left: %d\n", j);
}
}
TM_host.stop();
}
// Synchronization needed to avoid race conditions (after the CPU and GPU have done their sides, we need to sync)
#if ZEROCOPY
CHECK_CUDA_ERROR
#endif
// -------------------------------------------------------------------------
// PARTIAL RESULT PRINT (DEBUG)
#if PRINT
printf("RUN %d\n", i);
printf("Values from index %d to %d\n", FROM_debug, TO_debug);
printf("D: ");
for (int i = FROM_debug; i < TO_debug; i++) {
if (i % (N/2) == 0) printf("| ");
printf("%.2f ", d_matrix[i]);
}
printf("\n");
#endif
// -------------------------------------------------------------------------
}
#else
#if COPY
SAFE_CALL(hipMemcpy(d_matrix_device, d_matrix, N * sizeof(int), hipMemcpyHostToDevice));
#endif
for (int i = 0; i < SUMS; i++) {
#if COPY
sum_gpu_left << < DimGrid, DimBlock >> > (d_matrix_device, N);
sum_gpu_right << < DimGrid, DimBlock >> > (d_matrix_device, N);
#else
sum_gpu_left << < DimGrid, DimBlock >> > (d_matrix, N);
sum_gpu_right << < DimGrid, DimBlock >> > (d_matrix, N);
#endif
}
#endif
#if COPY && !CPU
SAFE_CALL(hipMemcpy(d_matrix, d_matrix_device, N * sizeof(int), hipMemcpyDeviceToHost));
#endif
CHECK_CUDA_ERROR
TM.stop();
// -------------------------------------------------------------------------
// RESULT PRINT (DEBUG)
#if PRINT
printf("Values from index %d to %d\n", FROM_debug, TO_debug);
printf("H: ");
for (int i = FROM_debug; i < TO_debug; i++) {
if (i % (N/2) == 0) printf("| ");
printf("%.2f ", h_matrix[i]);
}
printf("\n");
printf("D: ");
for (int i = FROM_debug; i < TO_debug; i++) {
if (i % (N/2) == 0) printf("| ");
printf("%.2f ", d_matrix[i]);
}
printf("\n");
#endif
cpu_results.push_back(TM_host.total_duration());
gpu_results.push_back(TM_device.total_duration());
results.push_back(TM.total_duration());
// -------------------------------------------------------------------------
// RESULT CHECK
#if RESULTCHECK
for (int i = 0; i < N; i++) {
if (h_matrix[i] != d_matrix[i]) {
std::cerr << ">< wrong result at: "
<< (i)
<< "\n\thost: " << h_matrix[i]
<< "\n\tdevice: " << d_matrix[i] << "\n";
#if PRINT
int err_min = i-5;
int err_max = i+5;
if (err_min < 0) err_min = 0;
if (err_max > N) err_max = N;
printf("Values from index %d to %d\n", err_min, err_max);
printf("\tH: ");
for (int j = err_min; j < err_max; j++) {
printf("%.2f ", h_matrix[j]);
}
printf("\n");
printf("\tD: ");
for (int j = err_min; j < err_max; j++) {
printf("%.2f ", d_matrix[j]);
}
printf("\n\n");
#endif
hipDeviceReset();
std::exit(EXIT_FAILURE);
}
}
std::cout << "<> Correct\n\n";
#endif
// -------------------------------------------------------------------------
// DEVICE MEMORY DEALLOCATION
#if ZEROCOPY
SAFE_CALL(hipHostFree(d_matrix));
#elif UNIFIED
SAFE_CALL(hipFree(d_matrix));
#elif COPY
SAFE_CALL(hipFree(d_matrix_device));
#endif
}
// -------------------------------------------------------------------------
hipDeviceReset();
delete(h_matrix);
// -------------------------------------------------------------------------
std::cout << "Average ";
if (ZEROCOPY) std::cout << "ZC";
else if(UNIFIED) std::cout << "UM";
else if(COPY) std::cout << "CP";
std::cout << " Run time: " << std::accumulate(results.begin(), results.end(), 0) / float(RUNS) << " ms - ";
std::cout << "CPU time only " << std::accumulate(cpu_results.begin(), cpu_results.end(), 0) / float(RUNS) << " ms - ";
std::cout << "GPU kernel time " << std::accumulate(gpu_results.begin(), gpu_results.end(), 0) / float(RUNS*SUMS) << " ms" << std::endl;
}
| 05c134d5989c30490966ffd90f029a1df1159522.cu | /*
* Apologies to whoever will have to read this code, I just discovered precompiler macros and I went crazy with it..
*/
#include <chrono>
#include <iostream>
#include <random>
#include <cmath>
#include <atomic>
#include <stdio.h>
#include "Timer.cuh"
#include "CheckError.cuh"
#include <omp.h>
using namespace timer;
// Set PRINT to 1 for debug output
#define PRINT 0
#define FROM_debug 0
#define TO_debug 16
// Set ZEROCOPY to 1 to use Zero Copy Memory Mode, UNIFIED to 1 to use Unified Memory, COPY to 1 to use Copy
#define ZEROCOPY 0
#define UNIFIED 0
#define COPY 1
// Set RESULTCHECK to 1 to verify the result with a single CPU thread
#define RESULTCHECK 1
// Set CPU to 1 to use the CPU concurrently
#define CPU 1
// Set OPENMP to 1 to use more than 1 thread for the CPU
#define OPENMP 1
#define TILE 1024
unsigned int N = 2;
const int POW = 3; // Maximum is 30, anything higher and the system will use swap, making the Cuda kernels crash
const int RUNS = 1;
const int SUMS = 2;
const int BLOCK_SIZE_X = TILE;
const int BLOCK_SIZE_Y = 1;
__global__
void sum_gpu_left(float* matrix, const int N) {
int row = blockIdx.x * blockDim.x + threadIdx.x;
if (row < N/2) {
if (row % 2 != 0) {
for (int i = 1; i < N/2; i+=2) {
for (int l = 0; l < 2; l++) {
matrix[row] += sqrt(float(matrix[i + N/2]));
}
}
}
}
}
__global__
void sum_gpu_right(float* matrix, const int N) {
int row = blockIdx.x * blockDim.x + threadIdx.x;
if (row >= N/2 && row < N) {
if (row % 2 == 0) {
for (int i = N/2; i < N; i+=2) {
for (int l = 0; l < 2; l++) {
matrix[row] += sqrt(float(matrix[i - N/2]));
}
}
}
}
}
void sum_cpu_only(float * matrix){
#if CPU
for (int i = 0; i < SUMS; i++) {
if (i % 2 != 0) {
for (int j = 0; j < N/2; j++) {
if (j % 2 != 0) {
for (int f = 1; f < N/2; f+=2) {
for (int l = 0; l < 2; l++) {
matrix[j] += sqrt(float(matrix[f+N/2]));
}
}
}
}
for (int j = N/2; j < N; j++) {
if (j % 2 == 0) {
for (int r = 0; r < 1000; r++) {
matrix[j] = sqrt((j+matrix[j])*(matrix[j] / 2.3));
}
}
}
} else {
for (int j = N/2; j < N; j++) {
if (j % 2 == 0) {
for (int f = N/2; f < N; f+=2) {
for (int l = 0; l < 2; l++) {
matrix[j] += sqrt(float(matrix[f-N/2]));
}
}
}
}
for (int j = 0; j < N/2; j++) {
if (j % 2 != 0) {
for (int r = 0; r < 1000; r++) {
matrix[j] = sqrt((j+matrix[j])*(matrix[j] / 2.3));
}
}
}
}
#if PRINT
printf("RUN %d\n", i);
printf("Values from index %d to %d\n", FROM_debug, TO_debug);
printf("H: ");
for (int i = FROM_debug; i < TO_debug; i++) {
if (i % (N/2) == 0) printf("| ");
printf("%.2f ", matrix[i]);
}
printf("\n");
#endif
}
#else
for (int i = 0; i < SUMS; i++) {
for (int j = 0; j < N/2; j++) {
for (int f = 1; f < N/2; f+=2) {
if (j % 2 != 0) {
for (int l = 0; l < 2; l++) {
matrix[j] += sqrt(float(matrix[f+N/2]));
}
}
}
}
for (int j = N/2; j < N; j++) {
for (int f = N/2; f < N; f+=2) {
if (j % 2 == 0) {
for (int l = 0; l < 2; l++) {
matrix[j] += sqrt(float(matrix[f-N/2]));
}
}
}
}
}
#endif
}
int main() {
N = (unsigned int) pow(N, POW);
int grid = N / BLOCK_SIZE_X;
// -------------------------------------------------------------------------
// DEVICE INIT
dim3 DimGrid(grid, 1, 1);
if (N % grid) DimGrid.x++;
dim3 DimBlock(BLOCK_SIZE_X, BLOCK_SIZE_Y, 1);
// -------------------------------------------------------------------------
// HOST MEMORY ALLOCATION
float * h_matrix = new float[N];
std::vector<float> results; // Stores computation times for CPU+GPU
std::vector<float> cpu_results; // Stores CPU (only) computation times
std::vector<float> gpu_results; // Stores GPU (only) computation times
// -------------------------------------------------------------------------
#if ZEROCOPY
cudaSetDeviceFlags(cudaDeviceMapHost);
#endif
for (int z = 0; z < RUNS; z++) {
std::cout << "Run " << z << " --------------------------- ";
if (ZEROCOPY) std::cout << "ZC" << std::endl;
else if(UNIFIED) std::cout << "UM" << std::endl;
else if(COPY) std::cout << "CP" << std::endl;
Timer<HOST> TM;
Timer<HOST> TM_host;
Timer<DEVICE> TM_device;
// -------------------------------------------------------------------------
// DEVICE MEMORY ALLOCATION
float * d_matrix_host;
float * d_matrix;
#if ZEROCOPY
// Zero Copy Allocation
SAFE_CALL(cudaHostAlloc((void **)&d_matrix_host, N * sizeof(float), cudaHostAllocMapped));
SAFE_CALL(cudaHostGetDevicePointer((void **)&d_matrix, (void *) d_matrix_host , 0));
#elif UNIFIED
// Unified Memory Allocation
SAFE_CALL(cudaMallocManaged(&d_matrix, N * sizeof(float)));
#elif COPY
// Standard Copy
float * d_matrix_device;
SAFE_CALL(cudaMalloc(&d_matrix_device, N * sizeof(float)));
d_matrix = new float[N];
#endif
// -------------------------------------------------------------------------
// MATRIX INITILIZATION
std::cout << "Starting Initialization..." << std::endl;
unsigned seed = std::chrono::system_clock::now().time_since_epoch().count();
std::default_random_engine generator(seed);
std::uniform_int_distribution<int> distribution(1, 100);
#if PRINT
int count = 1;
printf("Progress: 0 \%\t");
fflush(stdout);
float arr[8] = {86.0, 47.0, 55.0, 72.0, 53.0, 38.0, 97.0, 93.0};
#endif
for (int i = 0; i < N; i++) {
#if PRINT
float cur_prog = (float) i / (float) N;
if ( cur_prog >= 0.1 * (float) count) {
printf("\rProgress: %.0f \%\t", cur_prog * (float) 100);
fflush(stdout);
count++;
}
#endif
//int temp = distribution(generator);
int temp = arr[i];
h_matrix[i] = temp;
d_matrix[i] = temp;
}
#if PRINT
printf("\r \r");
#endif
// -------------------------------------------------------------------------
// INITILIZATION PRINT (DEBUG)
#if PRINT
printf("Values from index %d to %d\n", FROM_debug, TO_debug);
printf("H: ");
for (int i = FROM_debug; i < TO_debug; i++) {
if (i % (N/2) == 0) printf("| ");
printf("%.2f ", h_matrix[i]);
}
printf("\n");
printf("D: ");
for (int i = FROM_debug; i < TO_debug; i++) {
if (i % (N/2) == 0) printf("| ");
printf("%.2f ", d_matrix[i]);
}
printf("\n");
#endif
std::cout << "Initialization Finished" << std::endl;
// -------------------------------------------------------------------------
// CPU ONLY EXECUTION
#if RESULTCHECK
std::cout << "Starting computation (1T - NO GPU)..." << std::endl;
sum_cpu_only(h_matrix);
#endif
// -------------------------------------------------------------------------
// DEVICE EXECUTION
std::cout << "Starting computation (GPU+CPU)..." << std::endl;
TM.start();
#if CPU
for (int i = 0; i < SUMS; i++) {
if (i % 2 != 0) {
#if COPY
SAFE_CALL(cudaMemcpy(d_matrix_device, d_matrix, N * sizeof(int), cudaMemcpyHostToDevice));
TM_device.start();
sum_gpu_left << < DimGrid, DimBlock >> > (d_matrix_device, N);
TM_device.stop();
CHECK_CUDA_ERROR
SAFE_CALL(cudaMemcpy(d_matrix, d_matrix_device, N * sizeof(int), cudaMemcpyDeviceToHost));
#else
TM_device.start();
sum_gpu_left << < DimGrid, DimBlock >> > (d_matrix, N);
TM_device.stop();
#endif
#if UNIFIED
// This macro includes cudaDeviceSynchronize(), which makes the program work on the data in lockstep
CHECK_CUDA_ERROR
#endif
TM_host.start();
#if OPENMP
#pragma omp parallel for
#endif
for (int j = N/2; j < N; j++) {
if (j % 2 == 0) {
//__sync_fetch_and_add(&d_matrix[j], 1);
for (int r = 0; r < 1000; r++) {
d_matrix[j] = sqrt((j+d_matrix[j])*(d_matrix[j] / 2.3));
}
//printf("cpu right: %d\n", j);
}
}
TM_host.stop();
} else {
#if COPY
SAFE_CALL(cudaMemcpy(d_matrix_device, d_matrix, N * sizeof(int), cudaMemcpyHostToDevice));
TM_device.start();
sum_gpu_right << < DimGrid, DimBlock >> > (d_matrix_device, N);
TM_device.stop();
CHECK_CUDA_ERROR
SAFE_CALL(cudaMemcpy(d_matrix, d_matrix_device, N * sizeof(int), cudaMemcpyDeviceToHost));
#else
TM_device.start();
sum_gpu_right << < DimGrid, DimBlock >> > (d_matrix, N);
TM_device.stop();
#endif
#if UNIFIED
CHECK_CUDA_ERROR
#endif
TM_host.start();
#if OPENMP
#pragma omp parallel for
#endif
for (int j = 0; j < N/2; j++) {
if (j % 2 != 0) {
//__sync_fetch_and_add(&d_matrix[j], 1);
for (int r = 0; r < 1000; r++) {
d_matrix[j] = sqrt((j+d_matrix[j])*(d_matrix[j] / 2.3));
}
//printf("cpu left: %d\n", j);
}
}
TM_host.stop();
}
// Synchronization needed to avoid race conditions (after the CPU and GPU have done their sides, we need to sync)
#if ZEROCOPY
CHECK_CUDA_ERROR
#endif
// -------------------------------------------------------------------------
// PARTIAL RESULT PRINT (DEBUG)
#if PRINT
printf("RUN %d\n", i);
printf("Values from index %d to %d\n", FROM_debug, TO_debug);
printf("D: ");
for (int i = FROM_debug; i < TO_debug; i++) {
if (i % (N/2) == 0) printf("| ");
printf("%.2f ", d_matrix[i]);
}
printf("\n");
#endif
// -------------------------------------------------------------------------
}
#else
#if COPY
SAFE_CALL(cudaMemcpy(d_matrix_device, d_matrix, N * sizeof(int), cudaMemcpyHostToDevice));
#endif
for (int i = 0; i < SUMS; i++) {
#if COPY
sum_gpu_left << < DimGrid, DimBlock >> > (d_matrix_device, N);
sum_gpu_right << < DimGrid, DimBlock >> > (d_matrix_device, N);
#else
sum_gpu_left << < DimGrid, DimBlock >> > (d_matrix, N);
sum_gpu_right << < DimGrid, DimBlock >> > (d_matrix, N);
#endif
}
#endif
#if COPY && !CPU
SAFE_CALL(cudaMemcpy(d_matrix, d_matrix_device, N * sizeof(int), cudaMemcpyDeviceToHost));
#endif
CHECK_CUDA_ERROR
TM.stop();
// -------------------------------------------------------------------------
// RESULT PRINT (DEBUG)
#if PRINT
printf("Values from index %d to %d\n", FROM_debug, TO_debug);
printf("H: ");
for (int i = FROM_debug; i < TO_debug; i++) {
if (i % (N/2) == 0) printf("| ");
printf("%.2f ", h_matrix[i]);
}
printf("\n");
printf("D: ");
for (int i = FROM_debug; i < TO_debug; i++) {
if (i % (N/2) == 0) printf("| ");
printf("%.2f ", d_matrix[i]);
}
printf("\n");
#endif
cpu_results.push_back(TM_host.total_duration());
gpu_results.push_back(TM_device.total_duration());
results.push_back(TM.total_duration());
// -------------------------------------------------------------------------
// RESULT CHECK
#if RESULTCHECK
for (int i = 0; i < N; i++) {
if (h_matrix[i] != d_matrix[i]) {
std::cerr << ">< wrong result at: "
<< (i)
<< "\n\thost: " << h_matrix[i]
<< "\n\tdevice: " << d_matrix[i] << "\n";
#if PRINT
int err_min = i-5;
int err_max = i+5;
if (err_min < 0) err_min = 0;
if (err_max > N) err_max = N;
printf("Values from index %d to %d\n", err_min, err_max);
printf("\tH: ");
for (int j = err_min; j < err_max; j++) {
printf("%.2f ", h_matrix[j]);
}
printf("\n");
printf("\tD: ");
for (int j = err_min; j < err_max; j++) {
printf("%.2f ", d_matrix[j]);
}
printf("\n\n");
#endif
cudaDeviceReset();
std::exit(EXIT_FAILURE);
}
}
std::cout << "<> Correct\n\n";
#endif
// -------------------------------------------------------------------------
// DEVICE MEMORY DEALLOCATION
#if ZEROCOPY
SAFE_CALL(cudaFreeHost(d_matrix));
#elif UNIFIED
SAFE_CALL(cudaFree(d_matrix));
#elif COPY
SAFE_CALL(cudaFree(d_matrix_device));
#endif
}
// -------------------------------------------------------------------------
cudaDeviceReset();
delete(h_matrix);
// -------------------------------------------------------------------------
std::cout << "Average ";
if (ZEROCOPY) std::cout << "ZC";
else if(UNIFIED) std::cout << "UM";
else if(COPY) std::cout << "CP";
std::cout << " Run time: " << std::accumulate(results.begin(), results.end(), 0) / float(RUNS) << " ms - ";
std::cout << "CPU time only " << std::accumulate(cpu_results.begin(), cpu_results.end(), 0) / float(RUNS) << " ms - ";
std::cout << "GPU kernel time " << std::accumulate(gpu_results.begin(), gpu_results.end(), 0) / float(RUNS*SUMS) << " ms" << std::endl;
}
|
301badee4c1243076805728c502163b907495aef.hip | // !!! This is a file automatically generated by hipify!!!
//
// Created by brian on 11/20/18.
//
#include "complexCUDA.cuh"
#include <iostream>
#include <hip/hip_runtime.h>
#include <cmath>
const float PI = 3.14159265358979f;
Complex::Complex() : real(0.0f), imag(0.0f) {}
Complex::Complex(float r) : real(r), imag(0.0f) {}
Complex::Complex(float r, float i) : real(r), imag(i) {}
//add complex numbers
Complex Complex::operator+(const Complex &b) const {
return Complex(real + b.real, imag + b.imag);
}
// subtract complex numbers
Complex Complex::operator-(const Complex &b) const {
return Complex(real - b.real, imag - b.imag);
}
Complex Complex::operator*(const Complex &b) const {
return Complex(real*b.real-imag*b.imag, real*b.imag+imag*b.real);
}
Complex Complex::mag() const {
return Complex(sqrt(real*real+imag*imag));
}
// tan^-1(b/a) in degrees
Complex Complex::angle() const {
return Complex(atan2(imag,real) * 360/(2*PI));
}
Complex Complex::conj() const {
return Complex(real, -imag);
}
std::ostream& operator<< (std::ostream& os, const Complex& rhs) {
Complex c(rhs);
if(fabsf(rhs.imag) < 1e-10) c.imag = 0.0f;
if(fabsf(rhs.real) < 1e-10) c.real = 0.0f;
if(c.imag == 0) {
os << c.real;
}
else {
os << "(" << c.real << "," << c.imag << ")";
}
return os;
} | 301badee4c1243076805728c502163b907495aef.cu | //
// Created by brian on 11/20/18.
//
#include "complexCUDA.cuh"
#include <iostream>
#include <cuda.h>
#include <cmath>
const float PI = 3.14159265358979f;
Complex::Complex() : real(0.0f), imag(0.0f) {}
Complex::Complex(float r) : real(r), imag(0.0f) {}
Complex::Complex(float r, float i) : real(r), imag(i) {}
//add complex numbers
Complex Complex::operator+(const Complex &b) const {
return Complex(real + b.real, imag + b.imag);
}
// subtract complex numbers
Complex Complex::operator-(const Complex &b) const {
return Complex(real - b.real, imag - b.imag);
}
Complex Complex::operator*(const Complex &b) const {
return Complex(real*b.real-imag*b.imag, real*b.imag+imag*b.real);
}
Complex Complex::mag() const {
return Complex(sqrt(real*real+imag*imag));
}
// tan^-1(b/a) in degrees
Complex Complex::angle() const {
return Complex(atan2(imag,real) * 360/(2*PI));
}
Complex Complex::conj() const {
return Complex(real, -imag);
}
std::ostream& operator<< (std::ostream& os, const Complex& rhs) {
Complex c(rhs);
if(fabsf(rhs.imag) < 1e-10) c.imag = 0.0f;
if(fabsf(rhs.real) < 1e-10) c.real = 0.0f;
if(c.imag == 0) {
os << c.real;
}
else {
os << "(" << c.real << "," << c.imag << ")";
}
return os;
} |
2940a6f9530265b09d14c58a9f7036d9132781dd.hip | // !!! This is a file automatically generated by hipify!!!
//general parts
#include <stdio.h>
#include <vector>
#include <memory>
#include <string.h>
#include <chrono>
#include <thread>
#include <iostream>
#ifndef __STDC_FORMAT_MACROS
#define __STDC_FORMAT_MACROS
#endif
#include <inttypes.h>
//CUDA parts
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <hipfft.h>
#include "user_benchmark_cuFFT.h"
void user_benchmark_cuFFT(bool file_output, FILE* output, cuFFTUserSystemParameters* userParams)
{
const int num_runs = 3;
double benchmark_result[2] = { 0,0 };//averaged result = sum(system_size/iteration_time)/num_benchmark_samples
uint64_t storageComplexSize;
switch (userParams->P) {
case 0:
storageComplexSize = (2 * sizeof(float));
break;
case 1:
storageComplexSize = (2 * sizeof(double));
break;
case 2:
storageComplexSize = (2 * 2);
break;
}
for (int n = 0; n < 2; n++) {
double run_time[num_runs][2];
for (int r = 0; r < num_runs; r++) {
hipfftHandle plan;
hipfftHandle plan2;
void* dataC;
int dims[3];
int FFTdim = 1;
if (userParams->Y > 1) FFTdim++;
if (userParams->Z > 1) FFTdim++;
switch (FFTdim) {
case 1:
dims[0] = userParams->X;
dims[1] = 1;
dims[2] = 1;
break;
case 2:
dims[0] = userParams->Y;
dims[1] = userParams->X;
dims[2] = 1;
break;
case 3:
dims[0] = userParams->Z;
dims[1] = userParams->Y;
dims[2] = userParams->X;
break;
}
uint64_t bufferSize;
if (userParams->R2C)
bufferSize = (uint64_t)(storageComplexSize / 2) * (userParams->X + 2) * userParams->Y * userParams->Z * userParams->B;
else
bufferSize = (uint64_t)storageComplexSize * userParams->X * userParams->Y * userParams->Z * userParams->B;
hipMalloc((void**)&dataC, bufferSize);
if (hipGetLastError() != hipSuccess) {
fprintf(stderr, "Cuda error: Failed to allocate\n");
return;
}
//forward + inverse
int iembed[2][3];
int istride[2] = { 1, 1 };
int idist[2] = {bufferSize / userParams->B / storageComplexSize, bufferSize / userParams->B / storageComplexSize};
if (userParams->R2C) idist[0] *= 2;
int oembed[2][3];
int ostride[2] = { 1, 1 };
int odist[2] = { bufferSize / userParams->B / storageComplexSize, bufferSize / userParams->B / storageComplexSize };
if (userParams->R2C) odist[1] *= 2;
switch (FFTdim) {
case 1:
iembed[0][0] = (userParams->R2C) ? dims[0] + 2 : dims[0];
oembed[0][0] = (userParams->R2C) ? (dims[0] + 2) / 2 : dims[0];
iembed[1][0] = (userParams->R2C) ? (dims[0] + 2) / 2 : dims[0];
oembed[1][0] = (userParams->R2C) ? dims[0] + 2 : dims[0];
break;
case 2:
iembed[0][0] = dims[0];
iembed[0][1] = (userParams->R2C) ? dims[1] + 2 : dims[1];
oembed[0][0] = dims[0];
oembed[0][1] = (userParams->R2C) ? (dims[1] + 2) / 2 : dims[1];
iembed[1][0] = dims[0];
iembed[1][1] = (userParams->R2C) ? (dims[1] + 2) / 2 : dims[1];
oembed[1][0] = dims[0];
oembed[1][1] = (userParams->R2C) ? dims[1] + 2 : dims[1];
break;
case 3:
iembed[0][0] = idist[0];
iembed[0][1] = dims[1];
iembed[0][2] = (userParams->R2C) ? dims[2] + 2 : dims[2];
oembed[0][0] = odist[0];
oembed[0][1] = dims[1];
oembed[0][2] = (userParams->R2C) ? (dims[2] + 2)/2 : dims[2];
iembed[1][0] = idist[0];
iembed[1][1] = dims[1];
iembed[1][2] = (userParams->R2C) ? (dims[2] + 2)/2 : dims[2];
oembed[1][0] = odist[0];
oembed[1][1] = dims[1];
oembed[1][2] = (userParams->R2C) ? dims[2] + 2 : dims[2];
break;
}
switch (userParams->P) {
case 0:
if (userParams->R2C) {
hipfftPlanMany(&plan, FFTdim, dims, iembed[0], istride[0], idist[0], oembed[0], ostride[0], odist[0], HIPFFT_R2C, userParams->B);
hipfftPlanMany(&plan2, FFTdim, dims, iembed[1], istride[1], idist[1], oembed[1], ostride[1], odist[1], HIPFFT_C2R, userParams->B);
}
else {
hipfftPlanMany(&plan, FFTdim, dims, iembed[0], istride[0], idist[0], oembed[0], ostride[0], odist[0], HIPFFT_C2C, userParams->B);
}
break;
case 1:
if (userParams->R2C) {
hipfftPlanMany(&plan, FFTdim, dims, iembed[0], istride[0], idist[0], oembed[0], ostride[0], odist[0], HIPFFT_D2Z, userParams->B);
hipfftPlanMany(&plan2, FFTdim, dims, iembed[1], istride[1], idist[1], oembed[1], ostride[1], odist[1], HIPFFT_Z2D, userParams->B);
}
else
hipfftPlanMany(&plan, FFTdim, dims, iembed[0], istride[0], idist[0], oembed[0], ostride[0], odist[0], HIPFFT_Z2Z, userParams->B);
break;
}
float totTime = 0;
std::chrono::steady_clock::time_point timeSubmit = std::chrono::steady_clock::now();
for (int i = 0; i < userParams->N; i++) {
switch (userParams->P) {
case 0:
if (userParams->R2C){
hipfftExecR2C(plan, (hipfftReal*) dataC, (hipfftComplex*) dataC);
hipfftExecC2R(plan2, (hipfftComplex*) dataC, (hipfftReal*) dataC);
}else{
hipfftExecC2C(plan, (hipfftComplex*) dataC, (hipfftComplex*) dataC, -1);
hipfftExecC2C(plan, (hipfftComplex*) dataC, (hipfftComplex*) dataC, 1);
}
break;
case 1:
if (userParams->R2C){
hipfftExecD2Z(plan, (hipfftDoubleReal*) dataC, (hipfftDoubleComplex*) dataC);
hipfftExecZ2D(plan2, (hipfftDoubleComplex*) dataC, (hipfftDoubleReal*) dataC);
}else{
hipfftExecZ2Z(plan, (hipfftDoubleComplex*) dataC, (hipfftDoubleComplex*) dataC, -1);
hipfftExecZ2Z(plan, (hipfftDoubleComplex*) dataC, (hipfftDoubleComplex*) dataC, 1);
}
break;
}
}
hipDeviceSynchronize();
std::chrono::steady_clock::time_point timeEnd = std::chrono::steady_clock::now();
totTime = (std::chrono::duration_cast<std::chrono::microseconds>(timeEnd - timeSubmit).count() * 0.001) / userParams->N;
run_time[r][0] = totTime;
if (n > 0) {
if (r == num_runs - 1) {
double std_error = 0;
double avg_time = 0;
for (uint64_t t = 0; t < num_runs; t++) {
avg_time += run_time[t][0];
}
avg_time /= num_runs;
for (uint64_t t = 0; t < num_runs; t++) {
std_error += (run_time[t][0] - avg_time) * (run_time[t][0] - avg_time);
}
std_error = sqrt(std_error / num_runs);
if (file_output)
fprintf(output, "cuFFT System: %" PRIu64 "x%" PRIu64 "x%" PRIu64 " Batch: %" PRIu64 " Buffer: %" PRIu64 " MB avg_time_per_step: %0.3f ms std_error: %0.3f num_iter: %" PRIu64 " benchmark: %" PRIu64 " scaled bandwidth: %0.1f\n", userParams->X, userParams->Y, userParams->Z, userParams->B, bufferSize / 1024 / 1024, avg_time, std_error, userParams->N, (uint64_t)(((double)bufferSize / 1024) / avg_time), ((double)bufferSize / 1024.0 / 1024.0 / 1.024 * 4 * FFTdim / avg_time));
printf("cuFFT System: %" PRIu64 "x%" PRIu64 "x%" PRIu64 " Batch: %" PRIu64 " Buffer: %" PRIu64 " MB avg_time_per_step: %0.3f ms std_error: %0.3f num_iter: %" PRIu64 " benchmark: %" PRIu64 " scaled bandwidth: %0.1f\n", userParams->X, userParams->Y, userParams->Z, userParams->B, bufferSize / 1024 / 1024, avg_time, std_error, userParams->N, (uint64_t)(((double)bufferSize / 1024) / avg_time), ((double)bufferSize / 1024.0 / 1024.0 / 1.024 * 4 * FFTdim / avg_time));
benchmark_result[0] += ((double)bufferSize / 1024) / avg_time;
}
}
hipfftDestroy(plan);
if (userParams->R2C)
hipfftDestroy(plan2);
hipFree(dataC);
hipDeviceSynchronize();
}
}
}
| 2940a6f9530265b09d14c58a9f7036d9132781dd.cu | //general parts
#include <stdio.h>
#include <vector>
#include <memory>
#include <string.h>
#include <chrono>
#include <thread>
#include <iostream>
#ifndef __STDC_FORMAT_MACROS
#define __STDC_FORMAT_MACROS
#endif
#include <inttypes.h>
//CUDA parts
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <cufft.h>
#include "user_benchmark_cuFFT.h"
void user_benchmark_cuFFT(bool file_output, FILE* output, cuFFTUserSystemParameters* userParams)
{
const int num_runs = 3;
double benchmark_result[2] = { 0,0 };//averaged result = sum(system_size/iteration_time)/num_benchmark_samples
uint64_t storageComplexSize;
switch (userParams->P) {
case 0:
storageComplexSize = (2 * sizeof(float));
break;
case 1:
storageComplexSize = (2 * sizeof(double));
break;
case 2:
storageComplexSize = (2 * 2);
break;
}
for (int n = 0; n < 2; n++) {
double run_time[num_runs][2];
for (int r = 0; r < num_runs; r++) {
cufftHandle plan;
cufftHandle plan2;
void* dataC;
int dims[3];
int FFTdim = 1;
if (userParams->Y > 1) FFTdim++;
if (userParams->Z > 1) FFTdim++;
switch (FFTdim) {
case 1:
dims[0] = userParams->X;
dims[1] = 1;
dims[2] = 1;
break;
case 2:
dims[0] = userParams->Y;
dims[1] = userParams->X;
dims[2] = 1;
break;
case 3:
dims[0] = userParams->Z;
dims[1] = userParams->Y;
dims[2] = userParams->X;
break;
}
uint64_t bufferSize;
if (userParams->R2C)
bufferSize = (uint64_t)(storageComplexSize / 2) * (userParams->X + 2) * userParams->Y * userParams->Z * userParams->B;
else
bufferSize = (uint64_t)storageComplexSize * userParams->X * userParams->Y * userParams->Z * userParams->B;
cudaMalloc((void**)&dataC, bufferSize);
if (cudaGetLastError() != cudaSuccess) {
fprintf(stderr, "Cuda error: Failed to allocate\n");
return;
}
//forward + inverse
int iembed[2][3];
int istride[2] = { 1, 1 };
int idist[2] = {bufferSize / userParams->B / storageComplexSize, bufferSize / userParams->B / storageComplexSize};
if (userParams->R2C) idist[0] *= 2;
int oembed[2][3];
int ostride[2] = { 1, 1 };
int odist[2] = { bufferSize / userParams->B / storageComplexSize, bufferSize / userParams->B / storageComplexSize };
if (userParams->R2C) odist[1] *= 2;
switch (FFTdim) {
case 1:
iembed[0][0] = (userParams->R2C) ? dims[0] + 2 : dims[0];
oembed[0][0] = (userParams->R2C) ? (dims[0] + 2) / 2 : dims[0];
iembed[1][0] = (userParams->R2C) ? (dims[0] + 2) / 2 : dims[0];
oembed[1][0] = (userParams->R2C) ? dims[0] + 2 : dims[0];
break;
case 2:
iembed[0][0] = dims[0];
iembed[0][1] = (userParams->R2C) ? dims[1] + 2 : dims[1];
oembed[0][0] = dims[0];
oembed[0][1] = (userParams->R2C) ? (dims[1] + 2) / 2 : dims[1];
iembed[1][0] = dims[0];
iembed[1][1] = (userParams->R2C) ? (dims[1] + 2) / 2 : dims[1];
oembed[1][0] = dims[0];
oembed[1][1] = (userParams->R2C) ? dims[1] + 2 : dims[1];
break;
case 3:
iembed[0][0] = idist[0];
iembed[0][1] = dims[1];
iembed[0][2] = (userParams->R2C) ? dims[2] + 2 : dims[2];
oembed[0][0] = odist[0];
oembed[0][1] = dims[1];
oembed[0][2] = (userParams->R2C) ? (dims[2] + 2)/2 : dims[2];
iembed[1][0] = idist[0];
iembed[1][1] = dims[1];
iembed[1][2] = (userParams->R2C) ? (dims[2] + 2)/2 : dims[2];
oembed[1][0] = odist[0];
oembed[1][1] = dims[1];
oembed[1][2] = (userParams->R2C) ? dims[2] + 2 : dims[2];
break;
}
switch (userParams->P) {
case 0:
if (userParams->R2C) {
cufftPlanMany(&plan, FFTdim, dims, iembed[0], istride[0], idist[0], oembed[0], ostride[0], odist[0], CUFFT_R2C, userParams->B);
cufftPlanMany(&plan2, FFTdim, dims, iembed[1], istride[1], idist[1], oembed[1], ostride[1], odist[1], CUFFT_C2R, userParams->B);
}
else {
cufftPlanMany(&plan, FFTdim, dims, iembed[0], istride[0], idist[0], oembed[0], ostride[0], odist[0], CUFFT_C2C, userParams->B);
}
break;
case 1:
if (userParams->R2C) {
cufftPlanMany(&plan, FFTdim, dims, iembed[0], istride[0], idist[0], oembed[0], ostride[0], odist[0], CUFFT_D2Z, userParams->B);
cufftPlanMany(&plan2, FFTdim, dims, iembed[1], istride[1], idist[1], oembed[1], ostride[1], odist[1], CUFFT_Z2D, userParams->B);
}
else
cufftPlanMany(&plan, FFTdim, dims, iembed[0], istride[0], idist[0], oembed[0], ostride[0], odist[0], CUFFT_Z2Z, userParams->B);
break;
}
float totTime = 0;
std::chrono::steady_clock::time_point timeSubmit = std::chrono::steady_clock::now();
for (int i = 0; i < userParams->N; i++) {
switch (userParams->P) {
case 0:
if (userParams->R2C){
cufftExecR2C(plan, (cufftReal*) dataC, (cufftComplex*) dataC);
cufftExecC2R(plan2, (cufftComplex*) dataC, (cufftReal*) dataC);
}else{
cufftExecC2C(plan, (cufftComplex*) dataC, (cufftComplex*) dataC, -1);
cufftExecC2C(plan, (cufftComplex*) dataC, (cufftComplex*) dataC, 1);
}
break;
case 1:
if (userParams->R2C){
cufftExecD2Z(plan, (cufftDoubleReal*) dataC, (cufftDoubleComplex*) dataC);
cufftExecZ2D(plan2, (cufftDoubleComplex*) dataC, (cufftDoubleReal*) dataC);
}else{
cufftExecZ2Z(plan, (cufftDoubleComplex*) dataC, (cufftDoubleComplex*) dataC, -1);
cufftExecZ2Z(plan, (cufftDoubleComplex*) dataC, (cufftDoubleComplex*) dataC, 1);
}
break;
}
}
cudaDeviceSynchronize();
std::chrono::steady_clock::time_point timeEnd = std::chrono::steady_clock::now();
totTime = (std::chrono::duration_cast<std::chrono::microseconds>(timeEnd - timeSubmit).count() * 0.001) / userParams->N;
run_time[r][0] = totTime;
if (n > 0) {
if (r == num_runs - 1) {
double std_error = 0;
double avg_time = 0;
for (uint64_t t = 0; t < num_runs; t++) {
avg_time += run_time[t][0];
}
avg_time /= num_runs;
for (uint64_t t = 0; t < num_runs; t++) {
std_error += (run_time[t][0] - avg_time) * (run_time[t][0] - avg_time);
}
std_error = sqrt(std_error / num_runs);
if (file_output)
fprintf(output, "cuFFT System: %" PRIu64 "x%" PRIu64 "x%" PRIu64 " Batch: %" PRIu64 " Buffer: %" PRIu64 " MB avg_time_per_step: %0.3f ms std_error: %0.3f num_iter: %" PRIu64 " benchmark: %" PRIu64 " scaled bandwidth: %0.1f\n", userParams->X, userParams->Y, userParams->Z, userParams->B, bufferSize / 1024 / 1024, avg_time, std_error, userParams->N, (uint64_t)(((double)bufferSize / 1024) / avg_time), ((double)bufferSize / 1024.0 / 1024.0 / 1.024 * 4 * FFTdim / avg_time));
printf("cuFFT System: %" PRIu64 "x%" PRIu64 "x%" PRIu64 " Batch: %" PRIu64 " Buffer: %" PRIu64 " MB avg_time_per_step: %0.3f ms std_error: %0.3f num_iter: %" PRIu64 " benchmark: %" PRIu64 " scaled bandwidth: %0.1f\n", userParams->X, userParams->Y, userParams->Z, userParams->B, bufferSize / 1024 / 1024, avg_time, std_error, userParams->N, (uint64_t)(((double)bufferSize / 1024) / avg_time), ((double)bufferSize / 1024.0 / 1024.0 / 1.024 * 4 * FFTdim / avg_time));
benchmark_result[0] += ((double)bufferSize / 1024) / avg_time;
}
}
cufftDestroy(plan);
if (userParams->R2C)
cufftDestroy(plan2);
cudaFree(dataC);
cudaDeviceSynchronize();
}
}
}
|
b4402bf4c1dda30f9c9a88180255e50ae72ae8d4.hip | // !!! This is a file automatically generated by hipify!!!
#include <mat.h>
#include <stdio.h>
#include <time.h>
#include <sys/time.h>
#include <math.h>
#include <iostream>
#include <rocblas.h>
#include "cokus.cpp"
#include "cuda_util.h"
#include <hip/hip_runtime.h>
using namespace std;
#define CUDA_CALL(x) do{ if( (x) != hipSuccess){\
printf("Error at %s:%d\n",__FILE__,__LINE__);\
return EXIT_FAILURE;}}while(0);
bool InitCUDA(){
int count;
hipGetDeviceCount(&count);
if(count==0){
fprintf(stderr,"There is no device.\n");
return false;
}
int i;
for (i =0; i<count;i++){
hipDeviceProp_t prop;
if(hipGetDeviceProperties(&prop,i)==hipSuccess){
if(prop.major>=1){
break;
}
}
}
if(i==count){
fprintf(stderr,"There is no device supporting CUDA 1.x.\n");
return false;
}
hipSetDevice(i);
return true;
}
//
int max(int array[],int n){
int m=array[0];
int index=0;
for(int i=1;i<n;i++){
if(m<array[i]){
m=array[i];
index=i;
}
}
return index;
}
__global__ static void sort(int iter,double * distance,double * index,int a,int m,int n,int k){
int tid = blockIdx.x * blockDim.x + threadIdx.x;
//int mid;
// double Mid;
double mid;
double * q = new double [k];
if (tid < a){
int threadNum = blockDim.x * gridDim.x;
int id = tid + iter * threadNum;
for (int i=0;i<k;i++){
index[id+i*a]=i;
}
q[0] = distance[id];
for (int i=1;i<k;i++){
q[i]=distance[id + i*a];
int j=i;
while(j>0){
if(q[j]<q[j-1]){
mid=q[j];
q[j]=q[j-1];
q[j-1]=mid;
mid=index[id + j*a];
index[id + j*a]=index[id + (j-1)*a];
index[id + (j-1)*a]=mid;
j--;
}
else
break;
}
}
for (int i=k;i<m;i++){
if (distance[id + i*a]<q[k-1]){
q[k-1]=distance[id + i*a];
index[id + (k-1)*a]=i;
int j=k-1;
while(j>0){
if (q[j]<q[j-1]){
mid = q[j];
q[j] = q[j-1];
q[j-1] =mid;
mid = index[id + j*a];
index[id + j*a]=index[id + (j-1)*a];
index[id +(j-1)*a]=mid;
j--;
}
else
break;
}
}
}
}
return;
}
//
__global__ static void calculate_dis(int iter,double * train,double *test,double *distance,int m,int n,int a,int b){
double mid;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < a){
int threadNum = blockDim.x * gridDim.x;
int id = tid + iter * threadNum;
for (int i=0;i<m;i++){
mid=0;
for (int j=0;j<n-1;j++){
//mid=0;
mid+=(test[j * a + id]-train[j * m + i])*(test[j * a + id]-train[j * m + i]);
//distance[i * a + id]=sqrt(mid);
}
distance[i * a +id] = sqrt(mid);
}
}
return;
}
//knn
int * knn(double * train,int m,int n,double * test,int a,int b,int k,int nclass){
double * gpu_train,*gpu_test;
double *gpu_distance;
double * gpu_index;
clock_t start,end;
SAFE_CALL(hipMalloc((void**) &gpu_train, sizeof(double) * m * n ));
SAFE_CALL(hipMemcpy(gpu_train,train,sizeof(double) * m * n,hipMemcpyHostToDevice));
SAFE_CALL(hipMalloc((void **) &gpu_test, sizeof(double) * a *b));
SAFE_CALL(hipMemcpy(gpu_test,test,sizeof(double) * a * b,hipMemcpyHostToDevice));
//predict_label
int * predict_label = new int [a];
for(int i=0;i<a;i++){
predict_label[i]=0;
}
//distance,distance
double * distance0 = new double [a*m];
SAFE_CALL(hipMalloc((void **) &gpu_distance, sizeof(double) * a * m));
//labels
int ** labels=new int *[a];
for(int i=0;i<a;i++){
labels[i]=new int [k];
}
for(int i=0;i<a;i++)
for(int j=0;j<k;j++)
labels[i][j]=0;
//
int gridSize = 150;
int blockSize = 512;
int threadNum = gridSize * blockSize;
fprintf(stdout,"Start calculating distances:\n");
int i;
hipDeviceSynchronize();
start = clock();
for (i=0;i<a/threadNum;i++){
hipLaunchKernelGGL(( calculate_dis), dim3(gridSize) , dim3(blockSize), 0, 0, i,gpu_train,gpu_test,gpu_distance,m,n,a,b);
}
if (a%threadNum != 0){
hipLaunchKernelGGL(( calculate_dis), dim3(gridSize) , dim3(blockSize), 0, 0, i,gpu_train,gpu_test,gpu_distance,m,n,a,b);
}
hipDeviceSynchronize();
SAFE_CALL(hipMemcpy(distance0,gpu_distance,sizeof(double)*a*m,hipMemcpyDeviceToHost));
hipDeviceSynchronize();
end = clock();
double usetime = double(end - start);
fprintf(stdout,"Calculating distances finished! Usetime:%lf(s)\n",usetime/CLOCKS_PER_SEC);
for(int i=0;i<20;i++)
fprintf(stdout,"%lf %lf %lf %lf\n",distance0[i],distance0[a+i],distance0[2*a+i],distance0[3*a+i]);
hipDeviceSynchronize();
hipFree(gpu_test);
hipFree(gpu_train);
hipDeviceSynchronize();
fprintf(stdout,"CudaFree completed!\n");
double * index = new double [a*k];
SAFE_CALL(hipMalloc((void**) &gpu_index, sizeof(double) * a * k));
// hipDeviceSynchronize();
start = clock();
fprintf(stdout,"Start sorting distances:\n");
int ii;
for (ii=0;ii<a/threadNum;ii++){
hipLaunchKernelGGL(( sort), dim3(gridSize),dim3(blockSize), 0, 0, ii,gpu_distance,gpu_index,a,m,n,k);
}
if (a%threadNum != 0){
hipLaunchKernelGGL(( sort), dim3(gridSize),dim3(blockSize), 0, 0, ii,gpu_distance,gpu_index,a,m,n,k);
}
hipDeviceSynchronize();
end = clock();
usetime = double(end - start);
fprintf(stdout,"Sorting distances finished! Usetime:%lf\n",usetime/CLOCKS_PER_SEC);
hipDeviceSynchronize();
SAFE_CALL(hipMemcpy(index,gpu_index,sizeof(double) * a * k,hipMemcpyDeviceToHost));
// hipMemcpy(labels,gpu_labels,sizeof(double) * a * m,hipMemcpyDeviceToHost);
hipDeviceSynchronize();
hipFree(gpu_distance);
hipFree(gpu_index);
hipDeviceSynchronize();
int *count=new int[nclass];
// for(int i=0;i<20;i++)
// fprintf(stdout,"%d %d %d %d\n",int(index[i]),int(index[i+a]),int(index[i+2*a]),int(index[i+3*a]));
start = clock();
int mm=0;
for (int i=0;i<a;i++){
for(int j=0;j<k;j++){
mm = int(index[i+j*a] + (n-1) * m);
labels[i][j] = int(train [mm]);
}
}
//label
for(int i=0;i<a;i++){
for(int x=0;x<nclass;x++)
count[x]=0;
for(int y=0;y<nclass;y++){
for(int j=0;j<k;j++){
if(labels[i][j]==(y+1))
count[y]++;
}
}
int idx=max(count,nclass);
predict_label[i]=idx+1;
}
end = clock();
usetime = double(end - start);
fprintf(stdout,"Usetime of generate predit_label:%lf\n",usetime/CLOCKS_PER_SEC);
return predict_label;
}
int main(int argc, char * argv[])
{
if(!InitCUDA()){
return 0;
}
printf("CUDA initialized.\n");
clock_t start,end;
int k,a,b,m,n,nclass;
double *trainset,*testset;
if(argc!=4){
fprintf(stderr, "4 input arguments required!");
}
MATFile * datamat = matOpen(argv[1], "r");
mxArray * train = matGetVariable(datamat,"trainset");
mxArray * test = matGetVariable(datamat,"testset");
//MATFile * testmat = matOpen(argv[2], "r");
//mxArray * test = matGetVariable(testmat,"DS");
trainset = (double*)mxGetData(train);
testset = (double*)mxGetData(test);
//get the number of rows and columns of trainset
m=mxGetM(train);
n=mxGetN(train);
// hipMemcpy(gpu_m,m,sizeof(int),hipMemcpyHostToDevice);
// hipMemcpy(gpu_n,n,sizeof(int),hipMemcpyHostToDevice);
//Matrix train_set
/*double ** train_set=new double *[m];
for(int i=0;i<m;i++){
train_set[i]=new double[n];
}
for(int i=0;i<m;i++){
for(int j=0;j<n;j++){
train_set[i][j]=trainset[j*m+i];
}
}*/
//trainset = (double **)mxGetData(train);
fprintf(stdout,"number of rows of trainset:%d\n",m);
fprintf(stdout,"number of columns of trainset:%d\n",n);
//fprintf(stdout,"Value of train_set[0][4] is:%lf\n",train_set[0][4]);
//get the number of rows and columns of testset
a=mxGetM(test);
b=mxGetN(test);
//Matrix test_set
/*double ** test_set = new double * [a];
for (int i=0;i<a;i++){
test_set[i]=new double [b];
}
for (int i=0;i<a;i++){
for (int j=0;j<b;j++){
test_set[i][j] = testset[j*a+i];
}
}*/
fprintf(stdout,"Number of rows of testset:%d\n",a);
fprintf(stdout,"Number of columns of testset:%d\n",b);
//fprintf(stdout,"Value of test_set[0][3] is:%lf\n",test_set[0][3]);
if(b!=n && b!=(n-1)){
fprintf(stderr, "Number of testset's columns should be equal to number of trainset's column!");
}
//Get the value of k
k = (int)atoi(argv[2]);
if(k<=0)
fprintf(stderr, "Value of k must be greater than zero!");
//Get the number of classes
nclass = (int)atoi(argv[3]);
//chushihua predict_label
int * predict_label = new int [a];
for(int i=0;i<a;i++){
predict_label[i]=0;
}
//fprintf(stdout,"Initialation finished!!!\n");
start=clock();
predict_label = knn(trainset,m,n,testset,a,b,k,nclass);
end=clock();
double usetime=(double)(end-start);
//fprintf(stdout,"Predicting labels for testset has finished!\n");
fprintf(stdout,"Using time of knnclassifier is:%lf(s)\n",usetime/CLOCKS_PER_SEC);
int out=a;
if(a>100)
out=100;
for (int i=0;i<out;i++){
fprintf(stdout,"predict label for testset[%d] is %d\n",i,predict_label[i]);
}
double accuracy=0.0;
int right = 0;
if (b==n){
for (int i=0;i<a;i++){
if(predict_label[i] == int(testset[i + (b-1)*a]))
right++;
}
accuracy=double(right)/double(a);
}
fprintf(stdout,"Presicion of knnclassifier is:%.2lf%%\n",100*accuracy);
return 0;
}
| b4402bf4c1dda30f9c9a88180255e50ae72ae8d4.cu | #include <mat.h>
#include <stdio.h>
#include <time.h>
#include <sys/time.h>
#include <math.h>
#include <iostream>
#include <cublas_v2.h>
#include "cokus.cpp"
#include "cuda_util.h"
#include <cuda_runtime.h>
using namespace std;
#define CUDA_CALL(x) do{ if( (x) != cudaSuccess){\
printf("Error at %s:%d\n",__FILE__,__LINE__);\
return EXIT_FAILURE;}}while(0);
bool InitCUDA(){
int count;
cudaGetDeviceCount(&count);
if(count==0){
fprintf(stderr,"There is no device.\n");
return false;
}
int i;
for (i =0; i<count;i++){
cudaDeviceProp prop;
if(cudaGetDeviceProperties(&prop,i)==cudaSuccess){
if(prop.major>=1){
break;
}
}
}
if(i==count){
fprintf(stderr,"There is no device supporting CUDA 1.x.\n");
return false;
}
cudaSetDevice(i);
return true;
}
//寻找最大值,并返回索引位置
int max(int array[],int n){
int m=array[0];
int index=0;
for(int i=1;i<n;i++){
if(m<array[i]){
m=array[i];
index=i;
}
}
return index;
}
__global__ static void sort(int iter,double * distance,double * index,int a,int m,int n,int k){
int tid = blockIdx.x * blockDim.x + threadIdx.x;
//int mid;
// double Mid;
double mid;
double * q = new double [k];
if (tid < a){
int threadNum = blockDim.x * gridDim.x;
int id = tid + iter * threadNum;
for (int i=0;i<k;i++){
index[id+i*a]=i;
}
q[0] = distance[id];
for (int i=1;i<k;i++){
q[i]=distance[id + i*a];
int j=i;
while(j>0){
if(q[j]<q[j-1]){
mid=q[j];
q[j]=q[j-1];
q[j-1]=mid;
mid=index[id + j*a];
index[id + j*a]=index[id + (j-1)*a];
index[id + (j-1)*a]=mid;
j--;
}
else
break;
}
}
for (int i=k;i<m;i++){
if (distance[id + i*a]<q[k-1]){
q[k-1]=distance[id + i*a];
index[id + (k-1)*a]=i;
int j=k-1;
while(j>0){
if (q[j]<q[j-1]){
mid = q[j];
q[j] = q[j-1];
q[j-1] =mid;
mid = index[id + j*a];
index[id + j*a]=index[id + (j-1)*a];
index[id +(j-1)*a]=mid;
j--;
}
else
break;
}
}
}
}
return;
}
//并行计算距离
__global__ static void calculate_dis(int iter,double * train,double *test,double *distance,int m,int n,int a,int b){
double mid;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < a){
int threadNum = blockDim.x * gridDim.x;
int id = tid + iter * threadNum;
for (int i=0;i<m;i++){
mid=0;
for (int j=0;j<n-1;j++){
//mid=0;
mid+=(test[j * a + id]-train[j * m + i])*(test[j * a + id]-train[j * m + i]);
//distance[i * a + id]=sqrt(mid);
}
distance[i * a +id] = sqrt(mid);
}
}
return;
}
//knn
int * knn(double * train,int m,int n,double * test,int a,int b,int k,int nclass){
double * gpu_train,*gpu_test;
double *gpu_distance;
double * gpu_index;
clock_t start,end;
SAFE_CALL(cudaMalloc((void**) &gpu_train, sizeof(double) * m * n ));
SAFE_CALL(cudaMemcpy(gpu_train,train,sizeof(double) * m * n,cudaMemcpyHostToDevice));
SAFE_CALL(cudaMalloc((void **) &gpu_test, sizeof(double) * a *b));
SAFE_CALL(cudaMemcpy(gpu_test,test,sizeof(double) * a * b,cudaMemcpyHostToDevice));
//初始化predict_label
int * predict_label = new int [a];
for(int i=0;i<a;i++){
predict_label[i]=0;
}
//distance数组用于存放特征点之间的距离,初始化distance
double * distance0 = new double [a*m];
SAFE_CALL(cudaMalloc((void **) &gpu_distance, sizeof(double) * a * m));
//labels数组存放训练集中特征点对应的标签
int ** labels=new int *[a];
for(int i=0;i<a;i++){
labels[i]=new int [k];
}
for(int i=0;i<a;i++)
for(int j=0;j<k;j++)
labels[i][j]=0;
//距离排序
int gridSize = 150;
int blockSize = 512;
int threadNum = gridSize * blockSize;
fprintf(stdout,"Start calculating distances:\n");
int i;
cudaDeviceSynchronize();
start = clock();
for (i=0;i<a/threadNum;i++){
calculate_dis<<<gridSize , blockSize>>>(i,gpu_train,gpu_test,gpu_distance,m,n,a,b);
}
if (a%threadNum != 0){
calculate_dis<<<gridSize , blockSize>>>(i,gpu_train,gpu_test,gpu_distance,m,n,a,b);
}
cudaDeviceSynchronize();
SAFE_CALL(cudaMemcpy(distance0,gpu_distance,sizeof(double)*a*m,cudaMemcpyDeviceToHost));
cudaDeviceSynchronize();
end = clock();
double usetime = double(end - start);
fprintf(stdout,"Calculating distances finished! Usetime:%lf(s)\n",usetime/CLOCKS_PER_SEC);
for(int i=0;i<20;i++)
fprintf(stdout,"%lf %lf %lf %lf\n",distance0[i],distance0[a+i],distance0[2*a+i],distance0[3*a+i]);
cudaDeviceSynchronize();
cudaFree(gpu_test);
cudaFree(gpu_train);
cudaDeviceSynchronize();
fprintf(stdout,"CudaFree completed!\n");
double * index = new double [a*k];
SAFE_CALL(cudaMalloc((void**) &gpu_index, sizeof(double) * a * k));
// cudaDeviceSynchronize();
start = clock();
fprintf(stdout,"Start sorting distances:\n");
int ii;
for (ii=0;ii<a/threadNum;ii++){
sort<<<gridSize,blockSize>>>(ii,gpu_distance,gpu_index,a,m,n,k);
}
if (a%threadNum != 0){
sort<<<gridSize,blockSize>>>(ii,gpu_distance,gpu_index,a,m,n,k);
}
cudaDeviceSynchronize();
end = clock();
usetime = double(end - start);
fprintf(stdout,"Sorting distances finished! Usetime:%lf\n",usetime/CLOCKS_PER_SEC);
cudaDeviceSynchronize();
SAFE_CALL(cudaMemcpy(index,gpu_index,sizeof(double) * a * k,cudaMemcpyDeviceToHost));
// cudaMemcpy(labels,gpu_labels,sizeof(double) * a * m,cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
cudaFree(gpu_distance);
cudaFree(gpu_index);
cudaDeviceSynchronize();
int *count=new int[nclass];
// for(int i=0;i<20;i++)
// fprintf(stdout,"%d %d %d %d\n",int(index[i]),int(index[i+a]),int(index[i+2*a]),int(index[i+3*a]));
start = clock();
int mm=0;
for (int i=0;i<a;i++){
for(int j=0;j<k;j++){
mm = int(index[i+j*a] + (n-1) * m);
labels[i][j] = int(train [mm]);
}
}
//生成预测label
for(int i=0;i<a;i++){
for(int x=0;x<nclass;x++)
count[x]=0;
for(int y=0;y<nclass;y++){
for(int j=0;j<k;j++){
if(labels[i][j]==(y+1))
count[y]++;
}
}
int idx=max(count,nclass);
predict_label[i]=idx+1;
}
end = clock();
usetime = double(end - start);
fprintf(stdout,"Usetime of generate predit_label:%lf\n",usetime/CLOCKS_PER_SEC);
return predict_label;
}
int main(int argc, char * argv[])
{
if(!InitCUDA()){
return 0;
}
printf("CUDA initialized.\n");
clock_t start,end;
int k,a,b,m,n,nclass;
double *trainset,*testset;
if(argc!=4){
fprintf(stderr, "4 input arguments required!");
}
MATFile * datamat = matOpen(argv[1], "r");
mxArray * train = matGetVariable(datamat,"trainset");
mxArray * test = matGetVariable(datamat,"testset");
//MATFile * testmat = matOpen(argv[2], "r");
//mxArray * test = matGetVariable(testmat,"DS");
trainset = (double*)mxGetData(train);
testset = (double*)mxGetData(test);
//get the number of rows and columns of trainset
m=mxGetM(train);
n=mxGetN(train);
// cudaMemcpy(gpu_m,m,sizeof(int),cudaMemcpyHostToDevice);
// cudaMemcpy(gpu_n,n,sizeof(int),cudaMemcpyHostToDevice);
//Matrix train_set
/*double ** train_set=new double *[m];
for(int i=0;i<m;i++){
train_set[i]=new double[n];
}
for(int i=0;i<m;i++){
for(int j=0;j<n;j++){
train_set[i][j]=trainset[j*m+i];
}
}*/
//trainset = (double **)mxGetData(train);
fprintf(stdout,"number of rows of trainset:%d\n",m);
fprintf(stdout,"number of columns of trainset:%d\n",n);
//fprintf(stdout,"Value of train_set[0][4] is:%lf\n",train_set[0][4]);
//get the number of rows and columns of testset
a=mxGetM(test);
b=mxGetN(test);
//Matrix test_set
/*double ** test_set = new double * [a];
for (int i=0;i<a;i++){
test_set[i]=new double [b];
}
for (int i=0;i<a;i++){
for (int j=0;j<b;j++){
test_set[i][j] = testset[j*a+i];
}
}*/
fprintf(stdout,"Number of rows of testset:%d\n",a);
fprintf(stdout,"Number of columns of testset:%d\n",b);
//fprintf(stdout,"Value of test_set[0][3] is:%lf\n",test_set[0][3]);
if(b!=n && b!=(n-1)){
fprintf(stderr, "Number of testset's columns should be equal to number of trainset's column!");
}
//Get the value of k
k = (int)atoi(argv[2]);
if(k<=0)
fprintf(stderr, "Value of k must be greater than zero!");
//Get the number of classes
nclass = (int)atoi(argv[3]);
//chushihua predict_label
int * predict_label = new int [a];
for(int i=0;i<a;i++){
predict_label[i]=0;
}
//fprintf(stdout,"Initialation finished!!!\n");
start=clock();
predict_label = knn(trainset,m,n,testset,a,b,k,nclass);
end=clock();
double usetime=(double)(end-start);
//fprintf(stdout,"Predicting labels for testset has finished!\n");
fprintf(stdout,"Using time of knnclassifier is:%lf(s)\n",usetime/CLOCKS_PER_SEC);
int out=a;
if(a>100)
out=100;
for (int i=0;i<out;i++){
fprintf(stdout,"predict label for testset[%d] is %d\n",i,predict_label[i]);
}
double accuracy=0.0;
int right = 0;
if (b==n){
for (int i=0;i<a;i++){
if(predict_label[i] == int(testset[i + (b-1)*a]))
right++;
}
accuracy=double(right)/double(a);
}
fprintf(stdout,"Presicion of knnclassifier is:%.2lf%%\n",100*accuracy);
return 0;
}
|
a2ef595a9168ae1feedac6a6cd9b032670ba00df.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.1.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date August 2016
@precisions normal z -> c d s
*/
#include "magmasparse_internal.h"
__global__ void
magma_zlobpcg_shift_kernel(
magma_int_t num_rows,
magma_int_t num_vecs,
magma_int_t shift,
magmaDoubleComplex * x )
{
int idx = threadIdx.x; // thread in row
int row = blockIdx.y * gridDim.x + blockIdx.x; // global block index
if ( row<num_rows) {
magmaDoubleComplex tmp = x[idx];
__syncthreads();
if ( idx > shift-1 ) {
idx-=shift;
x[idx] = tmp;
__syncthreads();
}
}
}
/**
Purpose
-------
For a Block-LOBPCG, the set of residuals (entries consecutive in memory)
shrinks and the vectors are shifted in case shift residuals drop below
threshold. The memory layout of x is:
/ x1[0] x2[0] x3[0] \
| x1[1] x2[1] x3[1] |
x = | x1[2] x2[2] x3[2] | = x1[0] x2[0] x3[0] x1[1] x2[1] x3[1] x1[2] .
| x1[3] x2[3] x3[3] |
\ x1[4] x2[4] x3[4] /
Arguments
---------
@param[in]
num_rows magma_int_t
number of rows
@param[in]
num_vecs magma_int_t
number of vectors
@param[in]
shift magma_int_t
shift number
@param[in,out]
x magmaDoubleComplex_ptr
input/output vector x
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_zaux
********************************************************************/
extern "C" magma_int_t
magma_zlobpcg_shift(
magma_int_t num_rows,
magma_int_t num_vecs,
magma_int_t shift,
magmaDoubleComplex_ptr x,
magma_queue_t queue )
{
magma_int_t num_threads = num_vecs;
// every thread handles one row containing the
if ( num_threads > 1024 )
printf("error: too many threads requested.\n");
int Ms = num_threads * sizeof( magmaDoubleComplex );
if ( Ms > 1024*8 )
printf("error: too much shared memory requested.\n");
dim3 block( num_threads, 1, 1 );
int dimgrid1 = int( sqrt( double( num_rows )));
int dimgrid2 = magma_ceildiv( num_rows, dimgrid1 );
dim3 grid( dimgrid1, dimgrid2, 1);
hipLaunchKernelGGL(( magma_zlobpcg_shift_kernel), dim3(grid), dim3(block), Ms, queue->cuda_stream() ,
num_rows, num_vecs, shift, x );
return MAGMA_SUCCESS;
}
| a2ef595a9168ae1feedac6a6cd9b032670ba00df.cu | /*
-- MAGMA (version 2.1.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date August 2016
@precisions normal z -> c d s
*/
#include "magmasparse_internal.h"
__global__ void
magma_zlobpcg_shift_kernel(
magma_int_t num_rows,
magma_int_t num_vecs,
magma_int_t shift,
magmaDoubleComplex * x )
{
int idx = threadIdx.x; // thread in row
int row = blockIdx.y * gridDim.x + blockIdx.x; // global block index
if ( row<num_rows) {
magmaDoubleComplex tmp = x[idx];
__syncthreads();
if ( idx > shift-1 ) {
idx-=shift;
x[idx] = tmp;
__syncthreads();
}
}
}
/**
Purpose
-------
For a Block-LOBPCG, the set of residuals (entries consecutive in memory)
shrinks and the vectors are shifted in case shift residuals drop below
threshold. The memory layout of x is:
/ x1[0] x2[0] x3[0] \
| x1[1] x2[1] x3[1] |
x = | x1[2] x2[2] x3[2] | = x1[0] x2[0] x3[0] x1[1] x2[1] x3[1] x1[2] .
| x1[3] x2[3] x3[3] |
\ x1[4] x2[4] x3[4] /
Arguments
---------
@param[in]
num_rows magma_int_t
number of rows
@param[in]
num_vecs magma_int_t
number of vectors
@param[in]
shift magma_int_t
shift number
@param[in,out]
x magmaDoubleComplex_ptr
input/output vector x
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_zaux
********************************************************************/
extern "C" magma_int_t
magma_zlobpcg_shift(
magma_int_t num_rows,
magma_int_t num_vecs,
magma_int_t shift,
magmaDoubleComplex_ptr x,
magma_queue_t queue )
{
magma_int_t num_threads = num_vecs;
// every thread handles one row containing the
if ( num_threads > 1024 )
printf("error: too many threads requested.\n");
int Ms = num_threads * sizeof( magmaDoubleComplex );
if ( Ms > 1024*8 )
printf("error: too much shared memory requested.\n");
dim3 block( num_threads, 1, 1 );
int dimgrid1 = int( sqrt( double( num_rows )));
int dimgrid2 = magma_ceildiv( num_rows, dimgrid1 );
dim3 grid( dimgrid1, dimgrid2, 1);
magma_zlobpcg_shift_kernel<<< grid, block, Ms, queue->cuda_stream() >>>
( num_rows, num_vecs, shift, x );
return MAGMA_SUCCESS;
}
|
b7c0d375ac4e23d9642523a52967b77f3ac5d9b7.hip | // !!! This is a file automatically generated by hipify!!!
#include"cuda_runtime.h"
#include"device_launch_parameters.h"
#include<stdio.h>
#include<stdlib.h>
#include "hip/hip_texture_types.h"
#include<math.h>
#include "hip/hip_runtime.h"
//#include "cpu_anim.h" //texture
#define size 256
texture<float, hipTextureType2D, hipReadModeElementType> texRef;
__global__ void transformKernel(float* input, float* output, int width, int height, float theta)
{
unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y*blockDim.y + threadIdx.y;
float u = x / (float)width;
float v = y / (float)height;
//
u -= 0.5f;
v -= 0.5f;
float tu = u * cosf(theta) - v * sinf(theta) + 0.5f;
float tv = v * cosf(theta) + u * sinf(theta) + 0.5f;
int col = tu*width;
int row = tv*height;
//output[y*width + x] = input[0];
output[y*width + x] = tex2D(texRef, tu, tv);
}
extern "C"
void testTexture()
{
int width = 3840, height = 1920;
hipChannelFormatDesc channelDesc = hipCreateChannelDesc(32, 0, 0, 0, hipChannelFormatKindFloat);
hipArray*cuArray;
hipMallocArray(&cuArray, &channelDesc, width, height);
float*h_data = (float*)malloc(width*height * sizeof(float));
for (int i = 0; i<height; ++i)
{
for (int j = 0; j<width; ++j)
{
h_data[i*width + j] = i*width + j;
}
}
hipMemcpyToArray(cuArray, 0, 0, h_data, width*height * sizeof(float), hipMemcpyHostToDevice);
texRef.addressMode[0] = hipAddressModeWrap;
texRef.addressMode[1] = hipAddressModeWrap;
texRef.filterMode = hipFilterModeLinear;
texRef.normalized = true;
hipBindTextureToArray(texRef, cuArray, channelDesc);
float*output;
hipMalloc(&output, width*height * sizeof(float));
dim3 dimBlock(16, 16);
dim3 dimGrid((width + dimBlock.x - 1) / dimBlock.x, (height + dimBlock.y - 1) / dimBlock.y);
float angle = 30;
float *input = NULL;
hipMalloc(&input, width*height * sizeof(float));
hipMemcpy(input, h_data, width*height * sizeof(float), hipMemcpyHostToDevice);
transformKernel << <dimGrid, dimBlock >> >(input, output, width, height, angle);
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, NULL);
for (int i = 0; i < 1000; i++)
{
transformKernel << <dimGrid, dimBlock >> >(input, output, width, height, angle);
hipGetLastError();
}
hipEventRecord(stop, NULL);
hipEventSynchronize(stop);
float costtime;
hipEventElapsedTime(&costtime, start, stop);
printf("kernel run time: %f ms\n", costtime);
float*hostPtr = (float*)malloc(sizeof(float)*width*height);
hipMemcpy(hostPtr, output, sizeof(float)*width*height, hipMemcpyDeviceToHost);
/*for (int i = 0; i<height; ++i)
{
for (int j = 0; j<width; ++j)
{
printf("%f\n", hostPtr[i*width + j]);
}
printf("\n");
}*/
free(hostPtr);
hipFreeArray(cuArray);
hipFree(output);
system("pause");
} | b7c0d375ac4e23d9642523a52967b77f3ac5d9b7.cu | #include"cuda_runtime.h"
#include"device_launch_parameters.h"
#include<stdio.h>
#include<stdlib.h>
#include "cuda_texture_types.h"
#include<math.h>
#include "cuda.h"
//#include "cpu_anim.h" //调用texture的时候必须加上这个头文件
#define size 256
texture<float, cudaTextureType2D, cudaReadModeElementType> texRef;
__global__ void transformKernel(float* input, float* output, int width, int height, float theta)
{
unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y*blockDim.y + threadIdx.y;
float u = x / (float)width;
float v = y / (float)height;
// 坐标转换
u -= 0.5f;
v -= 0.5f;
float tu = u * cosf(theta) - v * sinf(theta) + 0.5f;
float tv = v * cosf(theta) + u * sinf(theta) + 0.5f;
int col = tu*width;
int row = tv*height;
//output[y*width + x] = input[0];
output[y*width + x] = tex2D(texRef, tu, tv);
}
extern "C"
void testTexture()
{
int width = 3840, height = 1920;
cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc(32, 0, 0, 0, cudaChannelFormatKindFloat);
cudaArray*cuArray;
cudaMallocArray(&cuArray, &channelDesc, width, height);
float*h_data = (float*)malloc(width*height * sizeof(float));
for (int i = 0; i<height; ++i)
{
for (int j = 0; j<width; ++j)
{
h_data[i*width + j] = i*width + j;
}
}
cudaMemcpyToArray(cuArray, 0, 0, h_data, width*height * sizeof(float), cudaMemcpyHostToDevice);
texRef.addressMode[0] = cudaAddressModeWrap;
texRef.addressMode[1] = cudaAddressModeWrap;
texRef.filterMode = cudaFilterModeLinear;
texRef.normalized = true;
cudaBindTextureToArray(texRef, cuArray, channelDesc);
float*output;
cudaMalloc(&output, width*height * sizeof(float));
dim3 dimBlock(16, 16);
dim3 dimGrid((width + dimBlock.x - 1) / dimBlock.x, (height + dimBlock.y - 1) / dimBlock.y);
float angle = 30;
float *input = NULL;
cudaMalloc(&input, width*height * sizeof(float));
cudaMemcpy(input, h_data, width*height * sizeof(float), cudaMemcpyHostToDevice);
transformKernel << <dimGrid, dimBlock >> >(input, output, width, height, angle);
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, NULL);
for (int i = 0; i < 1000; i++)
{
transformKernel << <dimGrid, dimBlock >> >(input, output, width, height, angle);
cudaGetLastError();
}
cudaEventRecord(stop, NULL);
cudaEventSynchronize(stop);
float costtime;
cudaEventElapsedTime(&costtime, start, stop);
printf("kernel run time: %f ms\n", costtime);
float*hostPtr = (float*)malloc(sizeof(float)*width*height);
cudaMemcpy(hostPtr, output, sizeof(float)*width*height, cudaMemcpyDeviceToHost);
/*for (int i = 0; i<height; ++i)
{
for (int j = 0; j<width; ++j)
{
printf("%f\n", hostPtr[i*width + j]);
}
printf("\n");
}*/
free(hostPtr);
cudaFreeArray(cuArray);
cudaFree(output);
system("pause");
} |
a9b2c39ebf48afc3abe1b2fbeed0cb3ad66dfd68.hip | // !!! This is a file automatically generated by hipify!!!
/*!
* Copyright 2017-2019 XGBoost contributors
*/
#include <thrust/device_vector.h>
#include <dmlc/filesystem.h>
#include <xgboost/base.h>
#include <random>
#include <string>
#include <vector>
#include "../helpers.h"
#include "gtest/gtest.h"
#include "../../../src/data/sparse_page_source.h"
#include "../../../src/gbm/gbtree_model.h"
#include "../../../src/tree/updater_gpu_hist.cu"
#include "../../../src/tree/updater_gpu_common.cuh"
#include "../../../src/common/common.h"
#include "../../../src/tree/constraints.cuh"
namespace xgboost {
namespace tree {
TEST(GpuHist, DeviceHistogram) {
// Ensures that node allocates correctly after reaching `kStopGrowingSize`.
dh::SaveCudaContext{
[&]() {
dh::safe_cuda(hipSetDevice(0));
constexpr size_t kNBins = 128;
constexpr size_t kNNodes = 4;
constexpr size_t kStopGrowing = kNNodes * kNBins * 2u;
DeviceHistogram<GradientPairPrecise, kStopGrowing> histogram;
histogram.Init(0, kNBins);
for (size_t i = 0; i < kNNodes; ++i) {
histogram.AllocateHistogram(i);
}
histogram.Reset();
ASSERT_EQ(histogram.Data().size(), kStopGrowing);
// Use allocated memory but do not erase nidx_map.
for (size_t i = 0; i < kNNodes; ++i) {
histogram.AllocateHistogram(i);
}
for (size_t i = 0; i < kNNodes; ++i) {
ASSERT_TRUE(histogram.HistogramExists(i));
}
// Erase existing nidx_map.
for (size_t i = kNNodes; i < kNNodes * 2; ++i) {
histogram.AllocateHistogram(i);
}
for (size_t i = 0; i < kNNodes; ++i) {
ASSERT_FALSE(histogram.HistogramExists(i));
}
}
};
}
std::vector<GradientPairPrecise> GetHostHistGpair() {
// 24 bins, 3 bins for each feature (column).
std::vector<GradientPairPrecise> hist_gpair = {
{0.8314f, 0.7147f}, {1.7989f, 3.7312f}, {3.3846f, 3.4598f},
{2.9277f, 3.5886f}, {1.8429f, 2.4152f}, {1.2443f, 1.9019f},
{1.6380f, 2.9174f}, {1.5657f, 2.5107f}, {2.8111f, 2.4776f},
{2.1322f, 3.0651f}, {3.2927f, 3.8540f}, {0.5899f, 0.9866f},
{1.5185f, 1.6263f}, {2.0686f, 3.1844f}, {2.4278f, 3.0950f},
{1.5105f, 2.1403f}, {2.6922f, 4.2217f}, {1.8122f, 1.5437f},
{0.0000f, 0.0000f}, {4.3245f, 5.7955f}, {1.6903f, 2.1103f},
{2.4012f, 4.4754f}, {3.6136f, 3.4303f}, {0.0000f, 0.0000f}
};
return hist_gpair;
}
template <typename GradientSumT>
void TestBuildHist(bool use_shared_memory_histograms) {
int const kNRows = 16, kNCols = 8;
TrainParam param;
std::vector<std::pair<std::string, std::string>> args {
{"max_depth", "6"},
{"max_leaves", "0"},
};
param.Init(args);
auto page = BuildEllpackPage(kNRows, kNCols);
BatchParam batch_param{};
GPUHistMakerDevice<GradientSumT> maker(0, page.get(), kNRows, param, kNCols, kNCols, batch_param);
maker.InitHistogram();
xgboost::SimpleLCG gen;
xgboost::SimpleRealUniformDistribution<bst_float> dist(0.0f, 1.0f);
std::vector<GradientPair> h_gpair(kNRows);
for (auto &gpair : h_gpair) {
bst_float grad = dist(&gen);
bst_float hess = dist(&gen);
gpair = GradientPair(grad, hess);
}
thrust::host_vector<common::CompressedByteT> h_gidx_buffer (page->gidx_buffer.size());
common::CompressedByteT* d_gidx_buffer_ptr = page->gidx_buffer.data();
dh::safe_cuda(hipMemcpy(h_gidx_buffer.data(), d_gidx_buffer_ptr,
sizeof(common::CompressedByteT) * page->gidx_buffer.size(),
hipMemcpyDeviceToHost));
maker.row_partitioner.reset(new RowPartitioner(0, kNRows));
maker.hist.AllocateHistogram(0);
dh::CopyVectorToDeviceSpan(maker.gpair, h_gpair);
maker.use_shared_memory_histograms = use_shared_memory_histograms;
maker.BuildHist(0);
DeviceHistogram<GradientSumT> d_hist = maker.hist;
auto node_histogram = d_hist.GetNodeHistogram(0);
// d_hist.data stored in float, not gradient pair
thrust::host_vector<GradientSumT> h_result (d_hist.Data().size() / 2);
size_t data_size =
sizeof(GradientSumT) /
(sizeof(GradientSumT) / sizeof(typename GradientSumT::ValueT));
data_size *= d_hist.Data().size();
dh::safe_cuda(hipMemcpy(h_result.data(), node_histogram.data(), data_size,
hipMemcpyDeviceToHost));
std::vector<GradientPairPrecise> solution = GetHostHistGpair();
std::cout << std::fixed;
for (size_t i = 0; i < h_result.size(); ++i) {
EXPECT_NEAR(h_result[i].GetGrad(), solution[i].GetGrad(), 0.01f);
EXPECT_NEAR(h_result[i].GetHess(), solution[i].GetHess(), 0.01f);
}
}
TEST(GpuHist, BuildHistGlobalMem) {
TestBuildHist<GradientPairPrecise>(false);
TestBuildHist<GradientPair>(false);
}
TEST(GpuHist, BuildHistSharedMem) {
TestBuildHist<GradientPairPrecise>(true);
TestBuildHist<GradientPair>(true);
}
HistogramCutsWrapper GetHostCutMatrix () {
HistogramCutsWrapper cmat;
cmat.SetPtrs({0, 3, 6, 9, 12, 15, 18, 21, 24});
cmat.SetMins({0.1f, 0.2f, 0.3f, 0.1f, 0.2f, 0.3f, 0.2f, 0.2f});
// 24 cut fields, 3 cut fields for each feature (column).
// Each row of the cut represents the cuts for a data column.
cmat.SetValues({0.30f, 0.67f, 1.64f,
0.32f, 0.77f, 1.95f,
0.29f, 0.70f, 1.80f,
0.32f, 0.75f, 1.85f,
0.18f, 0.59f, 1.69f,
0.25f, 0.74f, 2.00f,
0.26f, 0.74f, 1.98f,
0.26f, 0.71f, 1.83f});
return cmat;
}
// TODO(trivialfis): This test is over simplified.
TEST(GpuHist, EvaluateSplits) {
constexpr int kNRows = 16;
constexpr int kNCols = 8;
TrainParam param;
std::vector<std::pair<std::string, std::string>> args {
{"max_depth", "1"},
{"max_leaves", "0"},
// Disable all other parameters.
{"colsample_bynode", "1"},
{"colsample_bylevel", "1"},
{"colsample_bytree", "1"},
{"min_child_weight", "0.01"},
{"reg_alpha", "0"},
{"reg_lambda", "0"},
{"max_delta_step", "0"}
};
param.Init(args);
for (size_t i = 0; i < kNCols; ++i) {
param.monotone_constraints.emplace_back(0);
}
int max_bins = 4;
// Initialize GPUHistMakerDevice
auto page = BuildEllpackPage(kNRows, kNCols);
BatchParam batch_param{};
GPUHistMakerDevice<GradientPairPrecise>
maker(0, page.get(), kNRows, param, kNCols, kNCols, batch_param);
// Initialize GPUHistMakerDevice::node_sum_gradients
maker.node_sum_gradients = {{6.4f, 12.8f}};
// Initialize GPUHistMakerDevice::cut
auto cmat = GetHostCutMatrix();
// Copy cut matrix to device.
maker.ba.Allocate(0,
&(page->matrix.info.feature_segments), cmat.Ptrs().size(),
&(page->matrix.info.min_fvalue), cmat.MinValues().size(),
&(page->matrix.info.gidx_fvalue_map), 24,
&(maker.monotone_constraints), kNCols);
dh::CopyVectorToDeviceSpan(page->matrix.info.feature_segments, cmat.Ptrs());
dh::CopyVectorToDeviceSpan(page->matrix.info.gidx_fvalue_map, cmat.Values());
dh::CopyVectorToDeviceSpan(maker.monotone_constraints, param.monotone_constraints);
dh::CopyVectorToDeviceSpan(page->matrix.info.min_fvalue, cmat.MinValues());
// Initialize GPUHistMakerDevice::hist
maker.hist.Init(0, (max_bins - 1) * kNCols);
maker.hist.AllocateHistogram(0);
// Each row of hist_gpair represents gpairs for one feature.
// Each entry represents a bin.
std::vector<GradientPairPrecise> hist_gpair = GetHostHistGpair();
std::vector<bst_float> hist;
for (auto pair : hist_gpair) {
hist.push_back(pair.GetGrad());
hist.push_back(pair.GetHess());
}
ASSERT_EQ(maker.hist.Data().size(), hist.size());
thrust::copy(hist.begin(), hist.end(),
maker.hist.Data().begin());
maker.column_sampler.Init(kNCols,
param.colsample_bynode,
param.colsample_bylevel,
param.colsample_bytree,
false);
RegTree tree;
MetaInfo info;
info.num_row_ = kNRows;
info.num_col_ = kNCols;
maker.node_value_constraints.resize(1);
maker.node_value_constraints[0].lower_bound = -1.0;
maker.node_value_constraints[0].upper_bound = 1.0;
std::vector<DeviceSplitCandidate> res = maker.EvaluateSplits({0, 0 }, tree, kNCols);
ASSERT_EQ(res[0].findex, 7);
ASSERT_EQ(res[1].findex, 7);
ASSERT_NEAR(res[0].fvalue, 0.26, xgboost::kRtEps);
ASSERT_NEAR(res[1].fvalue, 0.26, xgboost::kRtEps);
}
void TestHistogramIndexImpl() {
// Test if the compressed histogram index matches when using a sparse
// dmatrix with and without using external memory
int constexpr kNRows = 1000, kNCols = 10;
// Build 2 matrices and build a histogram maker with that
tree::GPUHistMakerSpecialised<GradientPairPrecise> hist_maker, hist_maker_ext;
std::unique_ptr<DMatrix> hist_maker_dmat(
CreateSparsePageDMatrixWithRC(kNRows, kNCols, 0, true));
dmlc::TemporaryDirectory tempdir;
std::unique_ptr<DMatrix> hist_maker_ext_dmat(
CreateSparsePageDMatrixWithRC(kNRows, kNCols, 128UL, true, tempdir));
std::vector<std::pair<std::string, std::string>> training_params = {
{"max_depth", "10"},
{"max_leaves", "0"}
};
GenericParameter generic_param(CreateEmptyGenericParam(0));
hist_maker.Configure(training_params, &generic_param);
hist_maker.InitDataOnce(hist_maker_dmat.get());
hist_maker_ext.Configure(training_params, &generic_param);
hist_maker_ext.InitDataOnce(hist_maker_ext_dmat.get());
// Extract the device maker from the histogram makers and from that its compressed
// histogram index
const auto &maker = hist_maker.maker;
std::vector<common::CompressedByteT> h_gidx_buffer(maker->page->gidx_buffer.size());
dh::CopyDeviceSpanToVector(&h_gidx_buffer, maker->page->gidx_buffer);
const auto &maker_ext = hist_maker_ext.maker;
std::vector<common::CompressedByteT> h_gidx_buffer_ext(maker_ext->page->gidx_buffer.size());
dh::CopyDeviceSpanToVector(&h_gidx_buffer_ext, maker_ext->page->gidx_buffer);
ASSERT_EQ(maker->page->matrix.info.n_bins, maker_ext->page->matrix.info.n_bins);
ASSERT_EQ(maker->page->gidx_buffer.size(), maker_ext->page->gidx_buffer.size());
ASSERT_EQ(h_gidx_buffer, h_gidx_buffer_ext);
}
TEST(GpuHist, TestHistogramIndex) {
TestHistogramIndexImpl();
}
// gamma is an alias of min_split_loss
int32_t TestMinSplitLoss(DMatrix* dmat, float gamma, HostDeviceVector<GradientPair>* gpair) {
Args args {
{"max_depth", "1"},
{"max_leaves", "0"},
// Disable all other parameters.
{"colsample_bynode", "1"},
{"colsample_bylevel", "1"},
{"colsample_bytree", "1"},
{"min_child_weight", "0.01"},
{"reg_alpha", "0"},
{"reg_lambda", "0"},
{"max_delta_step", "0"},
// test gamma
{"gamma", std::to_string(gamma)}
};
tree::GPUHistMakerSpecialised<GradientPairPrecise> hist_maker;
GenericParameter generic_param(CreateEmptyGenericParam(0));
hist_maker.Configure(args, &generic_param);
RegTree tree;
hist_maker.Update(gpair, dmat, {&tree});
auto n_nodes = tree.NumExtraNodes();
return n_nodes;
}
HostDeviceVector<GradientPair> GenerateRandomGradients(const size_t n_rows) {
xgboost::SimpleLCG gen;
xgboost::SimpleRealUniformDistribution<bst_float> dist(0.0f, 1.0f);
std::vector<GradientPair> h_gpair(n_rows);
for (auto &gpair : h_gpair) {
bst_float grad = dist(&gen);
bst_float hess = dist(&gen);
gpair = GradientPair(grad, hess);
}
HostDeviceVector<GradientPair> gpair(h_gpair);
return gpair;
}
TEST(GpuHist, MinSplitLoss) {
constexpr size_t kRows = 32;
constexpr size_t kCols = 16;
constexpr float kSparsity = 0.6;
auto dmat = CreateDMatrix(kRows, kCols, kSparsity, 3);
auto gpair = GenerateRandomGradients(kRows);
{
int32_t n_nodes = TestMinSplitLoss((*dmat).get(), 0.01, &gpair);
// This is not strictly verified, meaning the numeber `2` is whatever GPU_Hist retured
// when writing this test, and only used for testing larger gamma (below) does prevent
// building tree.
ASSERT_EQ(n_nodes, 2);
}
{
int32_t n_nodes = TestMinSplitLoss((*dmat).get(), 100.0, &gpair);
// No new nodes with gamma == 100.
ASSERT_EQ(n_nodes, static_cast<decltype(n_nodes)>(0));
}
delete dmat;
}
void UpdateTree(HostDeviceVector<GradientPair>* gpair,
DMatrix* dmat,
size_t gpu_page_size,
RegTree* tree,
HostDeviceVector<bst_float>* preds) {
constexpr size_t kMaxBin = 2;
if (gpu_page_size > 0) {
// Loop over the batches and count the records
int64_t batch_count = 0;
int64_t row_count = 0;
for (const auto& batch : dmat->GetBatches<EllpackPage>({0, kMaxBin, 0, gpu_page_size})) {
EXPECT_LT(batch.Size(), dmat->Info().num_row_);
batch_count++;
row_count += batch.Size();
}
EXPECT_GE(batch_count, 2);
EXPECT_EQ(row_count, dmat->Info().num_row_);
}
Args args{
{"max_depth", "2"},
{"max_bin", std::to_string(kMaxBin)},
{"min_child_weight", "0.0"},
{"reg_alpha", "0"},
{"reg_lambda", "0"}
};
tree::GPUHistMakerSpecialised<GradientPairPrecise> hist_maker;
GenericParameter generic_param(CreateEmptyGenericParam(0));
generic_param.gpu_page_size = gpu_page_size;
hist_maker.Configure(args, &generic_param);
hist_maker.Update(gpair, dmat, {tree});
hist_maker.UpdatePredictionCache(dmat, preds);
}
TEST(GpuHist, ExternalMemory) {
constexpr size_t kRows = 6;
constexpr size_t kCols = 2;
constexpr size_t kPageSize = 1;
// Create an in-memory DMatrix.
std::unique_ptr<DMatrix> dmat(CreateSparsePageDMatrixWithRC(kRows, kCols, 0, true));
// Create a DMatrix with multiple batches.
dmlc::TemporaryDirectory tmpdir;
std::unique_ptr<DMatrix>
dmat_ext(CreateSparsePageDMatrixWithRC(kRows, kCols, kPageSize, true, tmpdir));
auto gpair = GenerateRandomGradients(kRows);
// Build a tree using the in-memory DMatrix.
RegTree tree;
HostDeviceVector<bst_float> preds(kRows, 0.0, 0);
UpdateTree(&gpair, dmat.get(), 0, &tree, &preds);
// Build another tree using multiple ELLPACK pages.
RegTree tree_ext;
HostDeviceVector<bst_float> preds_ext(kRows, 0.0, 0);
UpdateTree(&gpair, dmat_ext.get(), kPageSize, &tree_ext, &preds_ext);
// Make sure the predictions are the same.
auto preds_h = preds.ConstHostVector();
auto preds_ext_h = preds_ext.ConstHostVector();
for (int i = 0; i < kRows; i++) {
ASSERT_FLOAT_EQ(preds_h[i], preds_ext_h[i]);
}
}
} // namespace tree
} // namespace xgboost
| a9b2c39ebf48afc3abe1b2fbeed0cb3ad66dfd68.cu | /*!
* Copyright 2017-2019 XGBoost contributors
*/
#include <thrust/device_vector.h>
#include <dmlc/filesystem.h>
#include <xgboost/base.h>
#include <random>
#include <string>
#include <vector>
#include "../helpers.h"
#include "gtest/gtest.h"
#include "../../../src/data/sparse_page_source.h"
#include "../../../src/gbm/gbtree_model.h"
#include "../../../src/tree/updater_gpu_hist.cu"
#include "../../../src/tree/updater_gpu_common.cuh"
#include "../../../src/common/common.h"
#include "../../../src/tree/constraints.cuh"
namespace xgboost {
namespace tree {
TEST(GpuHist, DeviceHistogram) {
// Ensures that node allocates correctly after reaching `kStopGrowingSize`.
dh::SaveCudaContext{
[&]() {
dh::safe_cuda(cudaSetDevice(0));
constexpr size_t kNBins = 128;
constexpr size_t kNNodes = 4;
constexpr size_t kStopGrowing = kNNodes * kNBins * 2u;
DeviceHistogram<GradientPairPrecise, kStopGrowing> histogram;
histogram.Init(0, kNBins);
for (size_t i = 0; i < kNNodes; ++i) {
histogram.AllocateHistogram(i);
}
histogram.Reset();
ASSERT_EQ(histogram.Data().size(), kStopGrowing);
// Use allocated memory but do not erase nidx_map.
for (size_t i = 0; i < kNNodes; ++i) {
histogram.AllocateHistogram(i);
}
for (size_t i = 0; i < kNNodes; ++i) {
ASSERT_TRUE(histogram.HistogramExists(i));
}
// Erase existing nidx_map.
for (size_t i = kNNodes; i < kNNodes * 2; ++i) {
histogram.AllocateHistogram(i);
}
for (size_t i = 0; i < kNNodes; ++i) {
ASSERT_FALSE(histogram.HistogramExists(i));
}
}
};
}
std::vector<GradientPairPrecise> GetHostHistGpair() {
// 24 bins, 3 bins for each feature (column).
std::vector<GradientPairPrecise> hist_gpair = {
{0.8314f, 0.7147f}, {1.7989f, 3.7312f}, {3.3846f, 3.4598f},
{2.9277f, 3.5886f}, {1.8429f, 2.4152f}, {1.2443f, 1.9019f},
{1.6380f, 2.9174f}, {1.5657f, 2.5107f}, {2.8111f, 2.4776f},
{2.1322f, 3.0651f}, {3.2927f, 3.8540f}, {0.5899f, 0.9866f},
{1.5185f, 1.6263f}, {2.0686f, 3.1844f}, {2.4278f, 3.0950f},
{1.5105f, 2.1403f}, {2.6922f, 4.2217f}, {1.8122f, 1.5437f},
{0.0000f, 0.0000f}, {4.3245f, 5.7955f}, {1.6903f, 2.1103f},
{2.4012f, 4.4754f}, {3.6136f, 3.4303f}, {0.0000f, 0.0000f}
};
return hist_gpair;
}
template <typename GradientSumT>
void TestBuildHist(bool use_shared_memory_histograms) {
int const kNRows = 16, kNCols = 8;
TrainParam param;
std::vector<std::pair<std::string, std::string>> args {
{"max_depth", "6"},
{"max_leaves", "0"},
};
param.Init(args);
auto page = BuildEllpackPage(kNRows, kNCols);
BatchParam batch_param{};
GPUHistMakerDevice<GradientSumT> maker(0, page.get(), kNRows, param, kNCols, kNCols, batch_param);
maker.InitHistogram();
xgboost::SimpleLCG gen;
xgboost::SimpleRealUniformDistribution<bst_float> dist(0.0f, 1.0f);
std::vector<GradientPair> h_gpair(kNRows);
for (auto &gpair : h_gpair) {
bst_float grad = dist(&gen);
bst_float hess = dist(&gen);
gpair = GradientPair(grad, hess);
}
thrust::host_vector<common::CompressedByteT> h_gidx_buffer (page->gidx_buffer.size());
common::CompressedByteT* d_gidx_buffer_ptr = page->gidx_buffer.data();
dh::safe_cuda(cudaMemcpy(h_gidx_buffer.data(), d_gidx_buffer_ptr,
sizeof(common::CompressedByteT) * page->gidx_buffer.size(),
cudaMemcpyDeviceToHost));
maker.row_partitioner.reset(new RowPartitioner(0, kNRows));
maker.hist.AllocateHistogram(0);
dh::CopyVectorToDeviceSpan(maker.gpair, h_gpair);
maker.use_shared_memory_histograms = use_shared_memory_histograms;
maker.BuildHist(0);
DeviceHistogram<GradientSumT> d_hist = maker.hist;
auto node_histogram = d_hist.GetNodeHistogram(0);
// d_hist.data stored in float, not gradient pair
thrust::host_vector<GradientSumT> h_result (d_hist.Data().size() / 2);
size_t data_size =
sizeof(GradientSumT) /
(sizeof(GradientSumT) / sizeof(typename GradientSumT::ValueT));
data_size *= d_hist.Data().size();
dh::safe_cuda(cudaMemcpy(h_result.data(), node_histogram.data(), data_size,
cudaMemcpyDeviceToHost));
std::vector<GradientPairPrecise> solution = GetHostHistGpair();
std::cout << std::fixed;
for (size_t i = 0; i < h_result.size(); ++i) {
EXPECT_NEAR(h_result[i].GetGrad(), solution[i].GetGrad(), 0.01f);
EXPECT_NEAR(h_result[i].GetHess(), solution[i].GetHess(), 0.01f);
}
}
TEST(GpuHist, BuildHistGlobalMem) {
TestBuildHist<GradientPairPrecise>(false);
TestBuildHist<GradientPair>(false);
}
TEST(GpuHist, BuildHistSharedMem) {
TestBuildHist<GradientPairPrecise>(true);
TestBuildHist<GradientPair>(true);
}
HistogramCutsWrapper GetHostCutMatrix () {
HistogramCutsWrapper cmat;
cmat.SetPtrs({0, 3, 6, 9, 12, 15, 18, 21, 24});
cmat.SetMins({0.1f, 0.2f, 0.3f, 0.1f, 0.2f, 0.3f, 0.2f, 0.2f});
// 24 cut fields, 3 cut fields for each feature (column).
// Each row of the cut represents the cuts for a data column.
cmat.SetValues({0.30f, 0.67f, 1.64f,
0.32f, 0.77f, 1.95f,
0.29f, 0.70f, 1.80f,
0.32f, 0.75f, 1.85f,
0.18f, 0.59f, 1.69f,
0.25f, 0.74f, 2.00f,
0.26f, 0.74f, 1.98f,
0.26f, 0.71f, 1.83f});
return cmat;
}
// TODO(trivialfis): This test is over simplified.
TEST(GpuHist, EvaluateSplits) {
constexpr int kNRows = 16;
constexpr int kNCols = 8;
TrainParam param;
std::vector<std::pair<std::string, std::string>> args {
{"max_depth", "1"},
{"max_leaves", "0"},
// Disable all other parameters.
{"colsample_bynode", "1"},
{"colsample_bylevel", "1"},
{"colsample_bytree", "1"},
{"min_child_weight", "0.01"},
{"reg_alpha", "0"},
{"reg_lambda", "0"},
{"max_delta_step", "0"}
};
param.Init(args);
for (size_t i = 0; i < kNCols; ++i) {
param.monotone_constraints.emplace_back(0);
}
int max_bins = 4;
// Initialize GPUHistMakerDevice
auto page = BuildEllpackPage(kNRows, kNCols);
BatchParam batch_param{};
GPUHistMakerDevice<GradientPairPrecise>
maker(0, page.get(), kNRows, param, kNCols, kNCols, batch_param);
// Initialize GPUHistMakerDevice::node_sum_gradients
maker.node_sum_gradients = {{6.4f, 12.8f}};
// Initialize GPUHistMakerDevice::cut
auto cmat = GetHostCutMatrix();
// Copy cut matrix to device.
maker.ba.Allocate(0,
&(page->matrix.info.feature_segments), cmat.Ptrs().size(),
&(page->matrix.info.min_fvalue), cmat.MinValues().size(),
&(page->matrix.info.gidx_fvalue_map), 24,
&(maker.monotone_constraints), kNCols);
dh::CopyVectorToDeviceSpan(page->matrix.info.feature_segments, cmat.Ptrs());
dh::CopyVectorToDeviceSpan(page->matrix.info.gidx_fvalue_map, cmat.Values());
dh::CopyVectorToDeviceSpan(maker.monotone_constraints, param.monotone_constraints);
dh::CopyVectorToDeviceSpan(page->matrix.info.min_fvalue, cmat.MinValues());
// Initialize GPUHistMakerDevice::hist
maker.hist.Init(0, (max_bins - 1) * kNCols);
maker.hist.AllocateHistogram(0);
// Each row of hist_gpair represents gpairs for one feature.
// Each entry represents a bin.
std::vector<GradientPairPrecise> hist_gpair = GetHostHistGpair();
std::vector<bst_float> hist;
for (auto pair : hist_gpair) {
hist.push_back(pair.GetGrad());
hist.push_back(pair.GetHess());
}
ASSERT_EQ(maker.hist.Data().size(), hist.size());
thrust::copy(hist.begin(), hist.end(),
maker.hist.Data().begin());
maker.column_sampler.Init(kNCols,
param.colsample_bynode,
param.colsample_bylevel,
param.colsample_bytree,
false);
RegTree tree;
MetaInfo info;
info.num_row_ = kNRows;
info.num_col_ = kNCols;
maker.node_value_constraints.resize(1);
maker.node_value_constraints[0].lower_bound = -1.0;
maker.node_value_constraints[0].upper_bound = 1.0;
std::vector<DeviceSplitCandidate> res = maker.EvaluateSplits({0, 0 }, tree, kNCols);
ASSERT_EQ(res[0].findex, 7);
ASSERT_EQ(res[1].findex, 7);
ASSERT_NEAR(res[0].fvalue, 0.26, xgboost::kRtEps);
ASSERT_NEAR(res[1].fvalue, 0.26, xgboost::kRtEps);
}
void TestHistogramIndexImpl() {
// Test if the compressed histogram index matches when using a sparse
// dmatrix with and without using external memory
int constexpr kNRows = 1000, kNCols = 10;
// Build 2 matrices and build a histogram maker with that
tree::GPUHistMakerSpecialised<GradientPairPrecise> hist_maker, hist_maker_ext;
std::unique_ptr<DMatrix> hist_maker_dmat(
CreateSparsePageDMatrixWithRC(kNRows, kNCols, 0, true));
dmlc::TemporaryDirectory tempdir;
std::unique_ptr<DMatrix> hist_maker_ext_dmat(
CreateSparsePageDMatrixWithRC(kNRows, kNCols, 128UL, true, tempdir));
std::vector<std::pair<std::string, std::string>> training_params = {
{"max_depth", "10"},
{"max_leaves", "0"}
};
GenericParameter generic_param(CreateEmptyGenericParam(0));
hist_maker.Configure(training_params, &generic_param);
hist_maker.InitDataOnce(hist_maker_dmat.get());
hist_maker_ext.Configure(training_params, &generic_param);
hist_maker_ext.InitDataOnce(hist_maker_ext_dmat.get());
// Extract the device maker from the histogram makers and from that its compressed
// histogram index
const auto &maker = hist_maker.maker;
std::vector<common::CompressedByteT> h_gidx_buffer(maker->page->gidx_buffer.size());
dh::CopyDeviceSpanToVector(&h_gidx_buffer, maker->page->gidx_buffer);
const auto &maker_ext = hist_maker_ext.maker;
std::vector<common::CompressedByteT> h_gidx_buffer_ext(maker_ext->page->gidx_buffer.size());
dh::CopyDeviceSpanToVector(&h_gidx_buffer_ext, maker_ext->page->gidx_buffer);
ASSERT_EQ(maker->page->matrix.info.n_bins, maker_ext->page->matrix.info.n_bins);
ASSERT_EQ(maker->page->gidx_buffer.size(), maker_ext->page->gidx_buffer.size());
ASSERT_EQ(h_gidx_buffer, h_gidx_buffer_ext);
}
TEST(GpuHist, TestHistogramIndex) {
TestHistogramIndexImpl();
}
// gamma is an alias of min_split_loss
int32_t TestMinSplitLoss(DMatrix* dmat, float gamma, HostDeviceVector<GradientPair>* gpair) {
Args args {
{"max_depth", "1"},
{"max_leaves", "0"},
// Disable all other parameters.
{"colsample_bynode", "1"},
{"colsample_bylevel", "1"},
{"colsample_bytree", "1"},
{"min_child_weight", "0.01"},
{"reg_alpha", "0"},
{"reg_lambda", "0"},
{"max_delta_step", "0"},
// test gamma
{"gamma", std::to_string(gamma)}
};
tree::GPUHistMakerSpecialised<GradientPairPrecise> hist_maker;
GenericParameter generic_param(CreateEmptyGenericParam(0));
hist_maker.Configure(args, &generic_param);
RegTree tree;
hist_maker.Update(gpair, dmat, {&tree});
auto n_nodes = tree.NumExtraNodes();
return n_nodes;
}
HostDeviceVector<GradientPair> GenerateRandomGradients(const size_t n_rows) {
xgboost::SimpleLCG gen;
xgboost::SimpleRealUniformDistribution<bst_float> dist(0.0f, 1.0f);
std::vector<GradientPair> h_gpair(n_rows);
for (auto &gpair : h_gpair) {
bst_float grad = dist(&gen);
bst_float hess = dist(&gen);
gpair = GradientPair(grad, hess);
}
HostDeviceVector<GradientPair> gpair(h_gpair);
return gpair;
}
TEST(GpuHist, MinSplitLoss) {
constexpr size_t kRows = 32;
constexpr size_t kCols = 16;
constexpr float kSparsity = 0.6;
auto dmat = CreateDMatrix(kRows, kCols, kSparsity, 3);
auto gpair = GenerateRandomGradients(kRows);
{
int32_t n_nodes = TestMinSplitLoss((*dmat).get(), 0.01, &gpair);
// This is not strictly verified, meaning the numeber `2` is whatever GPU_Hist retured
// when writing this test, and only used for testing larger gamma (below) does prevent
// building tree.
ASSERT_EQ(n_nodes, 2);
}
{
int32_t n_nodes = TestMinSplitLoss((*dmat).get(), 100.0, &gpair);
// No new nodes with gamma == 100.
ASSERT_EQ(n_nodes, static_cast<decltype(n_nodes)>(0));
}
delete dmat;
}
void UpdateTree(HostDeviceVector<GradientPair>* gpair,
DMatrix* dmat,
size_t gpu_page_size,
RegTree* tree,
HostDeviceVector<bst_float>* preds) {
constexpr size_t kMaxBin = 2;
if (gpu_page_size > 0) {
// Loop over the batches and count the records
int64_t batch_count = 0;
int64_t row_count = 0;
for (const auto& batch : dmat->GetBatches<EllpackPage>({0, kMaxBin, 0, gpu_page_size})) {
EXPECT_LT(batch.Size(), dmat->Info().num_row_);
batch_count++;
row_count += batch.Size();
}
EXPECT_GE(batch_count, 2);
EXPECT_EQ(row_count, dmat->Info().num_row_);
}
Args args{
{"max_depth", "2"},
{"max_bin", std::to_string(kMaxBin)},
{"min_child_weight", "0.0"},
{"reg_alpha", "0"},
{"reg_lambda", "0"}
};
tree::GPUHistMakerSpecialised<GradientPairPrecise> hist_maker;
GenericParameter generic_param(CreateEmptyGenericParam(0));
generic_param.gpu_page_size = gpu_page_size;
hist_maker.Configure(args, &generic_param);
hist_maker.Update(gpair, dmat, {tree});
hist_maker.UpdatePredictionCache(dmat, preds);
}
TEST(GpuHist, ExternalMemory) {
constexpr size_t kRows = 6;
constexpr size_t kCols = 2;
constexpr size_t kPageSize = 1;
// Create an in-memory DMatrix.
std::unique_ptr<DMatrix> dmat(CreateSparsePageDMatrixWithRC(kRows, kCols, 0, true));
// Create a DMatrix with multiple batches.
dmlc::TemporaryDirectory tmpdir;
std::unique_ptr<DMatrix>
dmat_ext(CreateSparsePageDMatrixWithRC(kRows, kCols, kPageSize, true, tmpdir));
auto gpair = GenerateRandomGradients(kRows);
// Build a tree using the in-memory DMatrix.
RegTree tree;
HostDeviceVector<bst_float> preds(kRows, 0.0, 0);
UpdateTree(&gpair, dmat.get(), 0, &tree, &preds);
// Build another tree using multiple ELLPACK pages.
RegTree tree_ext;
HostDeviceVector<bst_float> preds_ext(kRows, 0.0, 0);
UpdateTree(&gpair, dmat_ext.get(), kPageSize, &tree_ext, &preds_ext);
// Make sure the predictions are the same.
auto preds_h = preds.ConstHostVector();
auto preds_ext_h = preds_ext.ConstHostVector();
for (int i = 0; i < kRows; i++) {
ASSERT_FLOAT_EQ(preds_h[i], preds_ext_h[i]);
}
}
} // namespace tree
} // namespace xgboost
|
8cfbae6c7b3a2338559e7aa747cca199232e5ed0.hip | // !!! This is a file automatically generated by hipify!!!
#include "memory.h"
int createcontribAspace(Data* data)
{
int fullsize;
int dataSize;
fullsize = data->mapInfo.width * data->mapInfo.height;
dataSize = fullsize * sizeof(int);
data->contribA = (int *) malloc(dataSize);
fprintf(data->outlog,"Host memory allocation for contribA \n");
return 0;
}
int clearcontribAspace(Data* data)
{
free(data->contribA);
//free(data->watershed_id); // need to clear this?
return 0;
}
int createfilenamespace(Data* data)
{
data->heightfile = (char*) malloc(sizeof(char) *100);
data->diff_file= (char*) malloc(sizeof(char) *100);
data->FDfile = (char*) malloc(sizeof(char) *100);
data->FAfile = (char*) malloc(sizeof(char) *100);
data->Precipfile = (char*) malloc(sizeof(char) *100);
data->Tempfile = (char*) malloc(sizeof(char) *100);
data->erofile = (char*) malloc(sizeof(char) *100);
data-> incifile = (char*) malloc(sizeof(char) *100);
data->gelifile = (char*) malloc(sizeof(char) *100);
data->depofile = (char*) malloc(sizeof(char) *100);
data->slopefile = (char*) malloc(sizeof(char) *100);
data->finesfile = (char*) malloc(sizeof(char) *100);
data->stonesfile = (char*) malloc(sizeof(char) *100);
data->totbiofile = (char*) malloc(sizeof(char) *100);
data->soilTfile = (char*) malloc(sizeof(char) *100);
data->nutfile = (char*) malloc(sizeof(char) *100);
data->soilMfile = (char*) malloc(sizeof(char) *100);
data->soilBfile = (char*) malloc(sizeof(char) *100);
data->wCfile = (char*) malloc(sizeof(char) *100);
data->wPfile = (char*) malloc(sizeof(char) *100);
data->catchmap = (char*) malloc(sizeof(char) *100);
data->catchmask = (char*) malloc(sizeof(char) *100);
data->contrib = (char*) malloc(sizeof(char) *100);
data->rivermaskfile = (char*) malloc(sizeof(char) *100);
data->flatfile = (char*) malloc(sizeof(char) *100);
data-> logfile = (char*) malloc(sizeof(char) *100);
data->outfilename = (char*) malloc(sizeof(char) *100);
data->matrixDIR = (char*) malloc(sizeof(char) *100);
data->modelcode = (char*) malloc(sizeof(char) *100);
data->outputfilefile = (char*) malloc(sizeof(char) *100);
data->bedrockfile = (char*) malloc(sizeof(char) *100);
data->demfile = (char*) malloc(sizeof(char) *100);
data->clim_file = (char*) malloc(sizeof(char) *100);
data->dummystring = (char*) malloc(sizeof(char) *100);
data->Burnfile = (char*) malloc(sizeof(char) *100);
return(1);
}
int createProcessMatrices(Data* data)
{
int fullsize;
int dataSize;
int dataSizeInt;
fullsize = data->mapInfo.width * data->mapInfo.height;
dataSize = fullsize * sizeof(double);
dataSizeInt = fullsize * sizeof(int);
// these are the static grids in which data is stored from one iteration to the next ie. these are ONLY freed at the end of the simulation
checkCudaErrors(hipHostMalloc((void **)&data->prevdem, dataSize));
checkCudaErrors(hipHostMalloc((void **)&data->diffdem, dataSize));
checkCudaErrors(hipHostMalloc((void **)&data->fd, dataSizeInt ));
checkCudaErrors(hipHostMalloc((void **)&data->SFD, dataSizeInt ));
checkCudaErrors(hipHostMalloc((void**)&data->prevfd, dataSizeInt));
fprintf(data->outlog, "Flow direction (fd and SFD) space on host allocated %s\n", hipGetErrorString(hipGetLastError()));
checkCudaErrors(hipHostMalloc((void **)&data->fa, dataSize));
fprintf(data->outlog, "Flow accumulation space on host allocated %s\n", hipGetErrorString(hipGetLastError()));
checkCudaErrors(hipHostMalloc((void **)&data->SlopePtr, dataSize));
checkCudaErrors(hipHostMalloc((void **)&data->flatmask, dataSizeInt ));
// room to store the slopes and proportions in all directions
int fullsize8;
fullsize8 = dataSize * 8;
checkCudaErrors(hipHostMalloc((void **)&data->Slopes, fullsize8 ));
checkCudaErrors(hipHostMalloc((void **)&data->prop, fullsize8 ));
checkCudaErrors(hipHostMalloc((void **)&data->runoffweight, dataSize));
checkCudaErrors(hipHostMalloc((void **)&data->stonePtr, dataSize));
checkCudaErrors(hipHostMalloc((void **)&data->finesPtr, dataSize));
checkCudaErrors(hipHostMalloc((void **)&data->soilMPtr, dataSize));
checkCudaErrors(hipHostMalloc((void **)&data->soilBPtr, dataSize));
//checkCudaErrors(hipHostMalloc((void **)&data->soilTPtr, dataSize));
checkCudaErrors(hipHostMalloc((void **)&data->nutPtr, dataSize));
checkCudaErrors(hipHostMalloc((void **)&data->TotBPtr, dataSize));
checkCudaErrors(hipHostMalloc((void **)&data->eroPtr, dataSize));
checkCudaErrors(hipHostMalloc((void **)&data->geliPtr, dataSize));
checkCudaErrors(hipHostMalloc((void **)&data->inciPtr, dataSize));
checkCudaErrors(hipHostMalloc((void **)&data->depoPtr, dataSize));
checkCudaErrors(hipHostMalloc((void **)&data->dz, dataSize));
checkCudaErrors(hipHostMalloc((void **)&data->weatherC, dataSize));
checkCudaErrors(hipHostMalloc((void **)&data->weatherP, dataSize));
fprintf(data->outlog, "All hosts matrices memory allocated %s\n", hipGetErrorString(hipGetLastError()));
return 0;
}
int deleteProcessMatrices(Data* data)
{
//checkCudaErrors(hipHostFree(data->dem));
checkCudaErrors(hipHostFree(data->prevdem));
checkCudaErrors(hipHostFree(data->diffdem));
checkCudaErrors(hipHostFree(data->dz));
checkCudaErrors(hipHostFree(data->fd));
checkCudaErrors(hipHostFree(data->fa));
checkCudaErrors(hipHostFree(data->SlopePtr));
checkCudaErrors(hipHostFree(data->rainmat));
checkCudaErrors(hipHostFree(data->tempmat));
checkCudaErrors(hipHostFree(data->last_rainmat));
checkCudaErrors(hipHostFree(data->last_tempmat));
checkCudaErrors(hipHostFree(data->mask));
checkCudaErrors(hipHostFree(data->flatmask));
//checkCudaErrors(hipHostFree(data->Flow_C));
// free the slope and prop matrices
checkCudaErrors(hipHostFree(data->Slopes));
checkCudaErrors(hipHostFree(data->prop));
checkCudaErrors(hipHostFree(data->runoffweight));
checkCudaErrors(hipHostFree(data->stonePtr));
checkCudaErrors(hipHostFree(data->finesPtr));
checkCudaErrors(hipHostFree(data->soilMPtr));
checkCudaErrors(hipHostFree(data->soilBPtr));
checkCudaErrors(hipHostFree(data->soilTPtr));
checkCudaErrors(hipHostFree(data->nutPtr));
checkCudaErrors(hipHostFree(data->TotBPtr));
checkCudaErrors(hipHostFree(data->eroPtr));
checkCudaErrors(hipHostFree(data->geliPtr));
checkCudaErrors(hipHostFree(data->inciPtr));
checkCudaErrors(hipHostFree(data->depoPtr));
// checkCudaErrors(hipHostFree(data->dz));
checkCudaErrors(hipHostFree(data->weatherC));
checkCudaErrors(hipHostFree(data->weatherP));
fprintf(data->outlog, "All hosts matrices memory freed \n");
return 0;
}
//////////////////////////////////////////////////////////////////////////////
// Setup store for catchment data ( needed for summary outputs etc)
//////////////////////////////////////////////////////////////////////////////
int createCatchmentSpace(Data* data, Catchment* Catchments) {
//allocate space for catchment data and selective list and set values to zero
Catchments->watershed_id = (int *) calloc(sizeof(int) , data->mapInfo.height * data->mapInfo.width);
Catchments->mask = (int *) calloc(sizeof(int), data->mapInfo.height * data->mapInfo.width); // all mask values set to zero
fprintf(data->outlog, "Catchment space allocated \n");
return 0;
}
void createSoilTfromformula(Data* data){
int cell;
checkCudaErrors(hipHostMalloc((void **)&data->soilTPtr, data->mapInfo.width * data->mapInfo.height * sizeof(double)));
// what if the lowest cell is currently in the grid!
for (int i = 0; i < data->mapInfo.height; i++){
for (int j = 0; j < data->mapInfo.width; j++) {
cell = i*data->mapInfo.width + j;
if ( (data->dem[cell]) > 0 ) { data->soilTPtr[cell] = ( (data->dem[cell] - 400) / 1400 ) * 5;}
else { data->soilTPtr[cell] = 0.0;}
}
}
printf( "Soil Thickness Data Created \n");
return;
}
int createmask(Data* data)
{
int width = data->mapInfo.width;
int height = data->mapInfo.height ;
int fullsize = width * height;
double nodataold;
nodataold = data->mapInfo.nodata;
printf("DEM old no data value = %.6f will be reset to -9999\n", data->mapInfo.nodata);
checkCudaErrors(hipHostMalloc((void **)&data->mask, fullsize*sizeof(int) ));
for (int x = 0; x < fullsize; x++) {
data->mask[x] = 1;
if (data->dem[x] == -9999)
{
data->mask[x] = 0;
//data->dem[x] = -9999; //reset the no data value to -9999
}
}
data->mapInfo.nodata = -9999;
return 1;
}
| 8cfbae6c7b3a2338559e7aa747cca199232e5ed0.cu | #include "memory.h"
int createcontribAspace(Data* data)
{
int fullsize;
int dataSize;
fullsize = data->mapInfo.width * data->mapInfo.height;
dataSize = fullsize * sizeof(int);
data->contribA = (int *) malloc(dataSize);
fprintf(data->outlog,"Host memory allocation for contribA \n");
return 0;
}
int clearcontribAspace(Data* data)
{
free(data->contribA);
//free(data->watershed_id); // need to clear this?
return 0;
}
int createfilenamespace(Data* data)
{
data->heightfile = (char*) malloc(sizeof(char) *100);
data->diff_file= (char*) malloc(sizeof(char) *100);
data->FDfile = (char*) malloc(sizeof(char) *100);
data->FAfile = (char*) malloc(sizeof(char) *100);
data->Precipfile = (char*) malloc(sizeof(char) *100);
data->Tempfile = (char*) malloc(sizeof(char) *100);
data->erofile = (char*) malloc(sizeof(char) *100);
data-> incifile = (char*) malloc(sizeof(char) *100);
data->gelifile = (char*) malloc(sizeof(char) *100);
data->depofile = (char*) malloc(sizeof(char) *100);
data->slopefile = (char*) malloc(sizeof(char) *100);
data->finesfile = (char*) malloc(sizeof(char) *100);
data->stonesfile = (char*) malloc(sizeof(char) *100);
data->totbiofile = (char*) malloc(sizeof(char) *100);
data->soilTfile = (char*) malloc(sizeof(char) *100);
data->nutfile = (char*) malloc(sizeof(char) *100);
data->soilMfile = (char*) malloc(sizeof(char) *100);
data->soilBfile = (char*) malloc(sizeof(char) *100);
data->wCfile = (char*) malloc(sizeof(char) *100);
data->wPfile = (char*) malloc(sizeof(char) *100);
data->catchmap = (char*) malloc(sizeof(char) *100);
data->catchmask = (char*) malloc(sizeof(char) *100);
data->contrib = (char*) malloc(sizeof(char) *100);
data->rivermaskfile = (char*) malloc(sizeof(char) *100);
data->flatfile = (char*) malloc(sizeof(char) *100);
data-> logfile = (char*) malloc(sizeof(char) *100);
data->outfilename = (char*) malloc(sizeof(char) *100);
data->matrixDIR = (char*) malloc(sizeof(char) *100);
data->modelcode = (char*) malloc(sizeof(char) *100);
data->outputfilefile = (char*) malloc(sizeof(char) *100);
data->bedrockfile = (char*) malloc(sizeof(char) *100);
data->demfile = (char*) malloc(sizeof(char) *100);
data->clim_file = (char*) malloc(sizeof(char) *100);
data->dummystring = (char*) malloc(sizeof(char) *100);
data->Burnfile = (char*) malloc(sizeof(char) *100);
return(1);
}
int createProcessMatrices(Data* data)
{
int fullsize;
int dataSize;
int dataSizeInt;
fullsize = data->mapInfo.width * data->mapInfo.height;
dataSize = fullsize * sizeof(double);
dataSizeInt = fullsize * sizeof(int);
// these are the static grids in which data is stored from one iteration to the next ie. these are ONLY freed at the end of the simulation
checkCudaErrors(cudaMallocHost((void **)&data->prevdem, dataSize));
checkCudaErrors(cudaMallocHost((void **)&data->diffdem, dataSize));
checkCudaErrors(cudaMallocHost((void **)&data->fd, dataSizeInt ));
checkCudaErrors(cudaMallocHost((void **)&data->SFD, dataSizeInt ));
checkCudaErrors(cudaMallocHost((void**)&data->prevfd, dataSizeInt));
fprintf(data->outlog, "Flow direction (fd and SFD) space on host allocated %s\n", cudaGetErrorString(cudaGetLastError()));
checkCudaErrors(cudaMallocHost((void **)&data->fa, dataSize));
fprintf(data->outlog, "Flow accumulation space on host allocated %s\n", cudaGetErrorString(cudaGetLastError()));
checkCudaErrors(cudaMallocHost((void **)&data->SlopePtr, dataSize));
checkCudaErrors(cudaMallocHost((void **)&data->flatmask, dataSizeInt ));
// room to store the slopes and proportions in all directions
int fullsize8;
fullsize8 = dataSize * 8;
checkCudaErrors(cudaMallocHost((void **)&data->Slopes, fullsize8 ));
checkCudaErrors(cudaMallocHost((void **)&data->prop, fullsize8 ));
checkCudaErrors(cudaMallocHost((void **)&data->runoffweight, dataSize));
checkCudaErrors(cudaMallocHost((void **)&data->stonePtr, dataSize));
checkCudaErrors(cudaMallocHost((void **)&data->finesPtr, dataSize));
checkCudaErrors(cudaMallocHost((void **)&data->soilMPtr, dataSize));
checkCudaErrors(cudaMallocHost((void **)&data->soilBPtr, dataSize));
//checkCudaErrors(cudaMallocHost((void **)&data->soilTPtr, dataSize));
checkCudaErrors(cudaMallocHost((void **)&data->nutPtr, dataSize));
checkCudaErrors(cudaMallocHost((void **)&data->TotBPtr, dataSize));
checkCudaErrors(cudaMallocHost((void **)&data->eroPtr, dataSize));
checkCudaErrors(cudaMallocHost((void **)&data->geliPtr, dataSize));
checkCudaErrors(cudaMallocHost((void **)&data->inciPtr, dataSize));
checkCudaErrors(cudaMallocHost((void **)&data->depoPtr, dataSize));
checkCudaErrors(cudaMallocHost((void **)&data->dz, dataSize));
checkCudaErrors(cudaMallocHost((void **)&data->weatherC, dataSize));
checkCudaErrors(cudaMallocHost((void **)&data->weatherP, dataSize));
fprintf(data->outlog, "All hosts matrices memory allocated %s\n", cudaGetErrorString(cudaGetLastError()));
return 0;
}
int deleteProcessMatrices(Data* data)
{
//checkCudaErrors(cudaFreeHost(data->dem));
checkCudaErrors(cudaFreeHost(data->prevdem));
checkCudaErrors(cudaFreeHost(data->diffdem));
checkCudaErrors(cudaFreeHost(data->dz));
checkCudaErrors(cudaFreeHost(data->fd));
checkCudaErrors(cudaFreeHost(data->fa));
checkCudaErrors(cudaFreeHost(data->SlopePtr));
checkCudaErrors(cudaFreeHost(data->rainmat));
checkCudaErrors(cudaFreeHost(data->tempmat));
checkCudaErrors(cudaFreeHost(data->last_rainmat));
checkCudaErrors(cudaFreeHost(data->last_tempmat));
checkCudaErrors(cudaFreeHost(data->mask));
checkCudaErrors(cudaFreeHost(data->flatmask));
//checkCudaErrors(cudaFreeHost(data->Flow_C));
// free the slope and prop matrices
checkCudaErrors(cudaFreeHost(data->Slopes));
checkCudaErrors(cudaFreeHost(data->prop));
checkCudaErrors(cudaFreeHost(data->runoffweight));
checkCudaErrors(cudaFreeHost(data->stonePtr));
checkCudaErrors(cudaFreeHost(data->finesPtr));
checkCudaErrors(cudaFreeHost(data->soilMPtr));
checkCudaErrors(cudaFreeHost(data->soilBPtr));
checkCudaErrors(cudaFreeHost(data->soilTPtr));
checkCudaErrors(cudaFreeHost(data->nutPtr));
checkCudaErrors(cudaFreeHost(data->TotBPtr));
checkCudaErrors(cudaFreeHost(data->eroPtr));
checkCudaErrors(cudaFreeHost(data->geliPtr));
checkCudaErrors(cudaFreeHost(data->inciPtr));
checkCudaErrors(cudaFreeHost(data->depoPtr));
// checkCudaErrors(cudaFreeHost(data->dz));
checkCudaErrors(cudaFreeHost(data->weatherC));
checkCudaErrors(cudaFreeHost(data->weatherP));
fprintf(data->outlog, "All hosts matrices memory freed \n");
return 0;
}
//////////////////////////////////////////////////////////////////////////////
// Setup store for catchment data ( needed for summary outputs etc)
//////////////////////////////////////////////////////////////////////////////
int createCatchmentSpace(Data* data, Catchment* Catchments) {
//allocate space for catchment data and selective list and set values to zero
Catchments->watershed_id = (int *) calloc(sizeof(int) , data->mapInfo.height * data->mapInfo.width);
Catchments->mask = (int *) calloc(sizeof(int), data->mapInfo.height * data->mapInfo.width); // all mask values set to zero
fprintf(data->outlog, "Catchment space allocated \n");
return 0;
}
void createSoilTfromformula(Data* data){
int cell;
checkCudaErrors(cudaMallocHost((void **)&data->soilTPtr, data->mapInfo.width * data->mapInfo.height * sizeof(double)));
// what if the lowest cell is currently in the grid!
for (int i = 0; i < data->mapInfo.height; i++){
for (int j = 0; j < data->mapInfo.width; j++) {
cell = i*data->mapInfo.width + j;
if ( (data->dem[cell]) > 0 ) { data->soilTPtr[cell] = ( (data->dem[cell] - 400) / 1400 ) * 5;}
else { data->soilTPtr[cell] = 0.0;}
}
}
printf( "Soil Thickness Data Created \n");
return;
}
int createmask(Data* data)
{
int width = data->mapInfo.width;
int height = data->mapInfo.height ;
int fullsize = width * height;
double nodataold;
nodataold = data->mapInfo.nodata;
printf("DEM old no data value = %.6f will be reset to -9999\n", data->mapInfo.nodata);
checkCudaErrors(cudaMallocHost((void **)&data->mask, fullsize*sizeof(int) ));
for (int x = 0; x < fullsize; x++) {
data->mask[x] = 1;
if (data->dem[x] == -9999)
{
data->mask[x] = 0;
//data->dem[x] = -9999; //reset the no data value to -9999
}
}
data->mapInfo.nodata = -9999;
return 1;
}
|
d2b53e08537c66e48bd54c6bd5921a5c6bcc299b.hip | // !!! This is a file automatically generated by hipify!!!
/*
* spGPU - Sparse matrices on GPU library.
*
* Copyright (C) 2010 - 2015
* Davide Barbieri - University of Rome Tor Vergata
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 3 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include "cudadebug.h"
#include "cudalang.h"
#include "hip/hip_complex.h"
extern "C"
{
#include "core.h"
#include "dia.h"
}
#include "debug.h"
#define VALUE_TYPE hipDoubleComplex
#define TYPE_SYMBOL Z
#define TEX_FETCH_TYPE int4
#include "dia_spmv_base.cuh"
| d2b53e08537c66e48bd54c6bd5921a5c6bcc299b.cu | /*
* spGPU - Sparse matrices on GPU library.
*
* Copyright (C) 2010 - 2015
* Davide Barbieri - University of Rome Tor Vergata
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 3 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include "cudadebug.h"
#include "cudalang.h"
#include "cuComplex.h"
extern "C"
{
#include "core.h"
#include "dia.h"
}
#include "debug.h"
#define VALUE_TYPE cuDoubleComplex
#define TYPE_SYMBOL Z
#define TEX_FETCH_TYPE int4
#include "dia_spmv_base.cuh"
|
181a2f1a57b846d531408243c21f05528ce33864.hip | // !!! This is a file automatically generated by hipify!!!
/*
* spmm_csc_driver.cu
* Copyright (C) 2018
* P Sadayappan (saday) <[email protected]>
* Aravind SUKUMARAN RAJAM (asr) <[email protected]>
*
* Distributed under terms of the GNU LGPL3 license.
*/
#include "mm_helper.hpp"
#include "sparse_representation.hpp"
#include <iostream>
#include <cstdlib>
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <stdlib.h>
#define TILE_WIDTH 32
void check_dmat(double* a, double *b, unsigned int n, unsigned int K, bool quit_on_err = true ) {
for (unsigned int i = 0; i < n; ++i) {
for (unsigned int k = 0; k < K; ++k) {
if(std::abs(a[i * K + k] - b[i * K + k]) > 1e-1) {
std::cerr << "Possible error at " << i << std::endl;
if(quit_on_err) {
exit(-1);
}
}
}
}
if(quit_on_err)
std::cout << "Verification succeeded\n";
else
std::cout << "Check error messages to see if verification succeeded. (No error msg == success)\n";
}
static unsigned int g_seed = 0X4B1D;
inline int fastrand() {
g_seed = (214013 * g_seed + 2531011);
return (g_seed >> 16) & 0x7FFF;
}
void init_dmat(double *a, unsigned int n, unsigned int K, double offset) {
for (unsigned int i = 0; i < n; ++i) {
for (unsigned int k = 0; k < K; ++k) {
a[i * K + k] = i * K + k + offset;
//a[i * K + j] = fastrand() + offset;
}
}
}
void print_dmat(double *a, unsigned int n, unsigned int K) {
for (unsigned int i = 0; i < n; ++i) {
for (unsigned int j = 0; j < K; ++j) {
std::cout << a[i * K + j] << ' ';
}
std::cout << '\n';
}
}
void host_csc_spmm(CSC mat, double * dmat_in, double * dmat_out, unsigned int K) {
for (unsigned int r = 0; r < mat.nrows; ++r) {
for (unsigned int k = 0; k < K; ++k) {
dmat_out[r * K + k] = 0;
}
}
for (unsigned int c = 0; c < mat.ncols; ++c) {
unsigned int col_start = mat.col_indx[c];
unsigned int col_end = mat.col_indx[c + 1];
for (unsigned int r = col_start; r < col_end; ++r) {
unsigned int row_id = mat.row_id[r];
double val = mat.values[r];
for (unsigned int k = 0; k < K; ++k) {
dmat_out[row_id * K + k] += val * dmat_in[c * K + k];
}
}
}
}
//Emin Code start
__global__ void dev_csc_spmm(unsigned int * deviceCSCcol_indx , unsigned int * deviceCSCrow_id , double * deviceCSCvalues,
double * dmat_in_device, double* dmat_out_device , int K , unsigned int device_ncols , unsigned int device_nrows){
const int row=blockIdx.y * blockDim.y + threadIdx.y;
const int col= blockIdx.x * blockDim.x + threadIdx.x ;
unsigned int numberOfColCSC = device_ncols ;
if ( (col < numberOfColCSC) && (row < K) ) {
//printf(" thread %d , block %d \n", col , row);
//__syncthreads();
double sum=0;
int rowId;
// int row_start = A.row_indx[iy] ;
unsigned int col_start = deviceCSCcol_indx[col];
//printf(" row_start = %d thread %d , block %d \n", row_start, col , row);
// int row_end = A.row_indx[iy + 1] ;
unsigned int col_end = deviceCSCcol_indx[col+1] ;
//printf(" row_end = %d thread %d , block %d \n", row_end, col , row);
for ( int element = col_start; element < col_end; element++) {
/* code */
//colId= A.col_id[i] ;
rowId = deviceCSCrow_id[element] ;
//printf(" rolId = %d thread %d , block %d \n", rowId, col , row);
double value = deviceCSCvalues[element] ;
double value2 = dmat_in_device[col * K + row] ;
//Lets try atomic operation
sum = value * value2;
atomicAdd(&dmat_out_device[rowId * K + row] ,sum );
//printf(" sum = %d ,thread %d , block %d", sum, col , row);
//__syncthreads();
}
}
}
int main(int argc, char *argv[]) {
if(argc < 3) {
std::cerr << "usage ./exec inputfile K " << std::endl;
exit(-1);
}
unsigned int K = std::atoi(argv[2]);
CSC mat = read_matrix_market_to_CSC(argv[1]);
std::cout << mat.nrows << ' ' << mat.ncols << ' ' << mat.nnz << ' ' << K << '\n';
//Cuda Events
// events for timing
hipEvent_t startEvent, stopEvent;
hipEventCreate(&startEvent) ;
hipEventCreate(&stopEvent) ;
//int TILE_WIDTH = K+1;
hipEvent_t startEventMemKer , stopEventMemKer ;
hipEventCreate(&startEventMemKer);
hipEventCreate(&stopEventMemKer) ;
double *dmat_in = (double*)malloc(mat.ncols * K * sizeof(double));
double *dmat_out = (double*)malloc(mat.nrows * K * sizeof(double));
double *dmat_out_D = (double*)malloc(mat.nrows * K * sizeof(double));
double *dmat_out_GPU = (double*)malloc(mat.nrows * K * sizeof(double));
init_dmat(dmat_in, mat.ncols, K, 1.0);
//print_dmat(dmat_in, mat.ncols, K);
host_csc_spmm(mat, dmat_in, dmat_out, K);
unsigned int* deviceCSCcol_indx;
unsigned int* deviceCSCrow_id;
double* deviceCSCvalues;
hipMalloc((void**) &deviceCSCcol_indx ,(mat.ncols +1) * sizeof(unsigned int)) ;
hipMalloc((void**) &deviceCSCrow_id , mat.nnz * sizeof(unsigned int)) ;
hipMalloc((void**) &deviceCSCvalues , mat.nnz * sizeof(double)) ;
double *dmat_in_device ;
hipMalloc((void**) &dmat_in_device , mat.ncols * K * sizeof(double)) ;
double *dmat_out_device ;
hipMalloc((void**) &dmat_out_device, mat.nrows * K * sizeof(double)) ;
hipEventRecord(startEventMemKer, 0);
hipMemcpy(deviceCSCcol_indx , mat.col_indx , (mat.ncols+1) * sizeof(unsigned int) , hipMemcpyHostToDevice) ;
hipMemcpy(deviceCSCrow_id, mat.row_id , mat.nnz * sizeof(unsigned int) , hipMemcpyHostToDevice) ;
hipMemcpy(deviceCSCvalues , mat.values , mat.nnz * sizeof(double) , hipMemcpyHostToDevice) ;
//copy to device
hipMemcpy( dmat_in_device , dmat_in , mat.ncols * K * sizeof(double) , hipMemcpyHostToDevice ) ;
//Initialize the Grid and Block Dimension
//dim3 dimGrid((K-1) / TILE_WIDTH + 1 , (mat.ncols -1)/TILE_WIDTH +1 , 1 ) ;
//
//dim3 dimGrid( (K-1) / TILE_WIDTH +1 , (mat.ncols -1)/TILE_WIDTH+1 , 1 ) ;
//dim3 dimGrid( (K-1) / TILE_WIDTH +1 , (mat.ncols -1)/TILE_WIDTH+1 , 1 ) ;
dim3 dimGrid( (mat.ncols -1)/TILE_WIDTH+1 , (K-1) / TILE_WIDTH +1 , 1 ) ;
dim3 dimBlock(TILE_WIDTH , TILE_WIDTH , 1) ;
hipEventRecord(startEvent, 0);
hipLaunchKernelGGL(( dev_csc_spmm), dim3(dimGrid) , dim3(dimBlock), 0, 0, deviceCSCcol_indx, deviceCSCrow_id, deviceCSCvalues , dmat_in_device , dmat_out_device , K , mat.ncols, mat.nrows) ;
hipEventRecord(stopEvent, 0) ;
hipEventSynchronize(startEvent);
hipEventSynchronize(stopEvent);
float timeforKernel;
hipEventElapsedTime(&timeforKernel, startEvent, stopEvent) ;
printf(" Time for Kernel : %f\n", timeforKernel);
hipMemcpy(dmat_out_GPU , dmat_out_device ,mat.nrows * K * sizeof(double) , hipMemcpyDeviceToHost ) ;
hipEventRecord(stopEventMemKer, 0) ;
hipEventSynchronize(startEventMemKer);
hipEventSynchronize(stopEventMemKer);
float timeforMemKernel;
hipEventElapsedTime(&timeforMemKernel, startEventMemKer, stopEventMemKer) ;
printf(" Time for Mem Cpy and Kernel : %f\n", timeforMemKernel);
//std::cout << "replace one argument to the below function with the values from gpu " << std::endl;
//std::cout << "replace one argument to the below function with the values from gpu " << std::endl;
//std::cout << "CPU\n";
//print_dmat(dmat_out, mat.nrows , K);
//std::cout << "GPU\n";
//print_dmat(dmat_out_GPU, mat.nrows , K);
check_dmat(dmat_out, dmat_out_GPU, mat.nrows, K);
//Lets compute GFLOP
unsigned int twoKnnz= 2 * K * mat.nnz ;
printf(" 2 * K * nnz : %d\n", twoKnnz);
float GFLOP = (twoKnnz / timeforMemKernel )/1000000; ;
printf(" GFLOP : %f\n", GFLOP);
//print_dmat(dmat_out, mat.nrows, K);
free(mat.col_indx);
free(mat.row_id);
free(mat.values);
hipFree(deviceCSCcol_indx) ;
hipFree(deviceCSCrow_id) ;
hipFree(deviceCSCvalues) ;
hipEventDestroy(startEvent) ;
hipEventDestroy(stopEvent ) ;
hipEventDestroy(startEventMemKer);
hipEventDestroy(stopEventMemKer) ;
return 0;
}
| 181a2f1a57b846d531408243c21f05528ce33864.cu | /*
* spmm_csc_driver.cu
* Copyright (C) 2018
* P Sadayappan (saday) <[email protected]>
* Aravind SUKUMARAN RAJAM (asr) <[email protected]>
*
* Distributed under terms of the GNU LGPL3 license.
*/
#include "mm_helper.hpp"
#include "sparse_representation.hpp"
#include <iostream>
#include <cstdlib>
#include <stdio.h>
#include <cuda.h>
#include <stdlib.h>
#define TILE_WIDTH 32
void check_dmat(double* a, double *b, unsigned int n, unsigned int K, bool quit_on_err = true ) {
for (unsigned int i = 0; i < n; ++i) {
for (unsigned int k = 0; k < K; ++k) {
if(std::abs(a[i * K + k] - b[i * K + k]) > 1e-1) {
std::cerr << "Possible error at " << i << std::endl;
if(quit_on_err) {
exit(-1);
}
}
}
}
if(quit_on_err)
std::cout << "Verification succeeded\n";
else
std::cout << "Check error messages to see if verification succeeded. (No error msg == success)\n";
}
static unsigned int g_seed = 0X4B1D;
inline int fastrand() {
g_seed = (214013 * g_seed + 2531011);
return (g_seed >> 16) & 0x7FFF;
}
void init_dmat(double *a, unsigned int n, unsigned int K, double offset) {
for (unsigned int i = 0; i < n; ++i) {
for (unsigned int k = 0; k < K; ++k) {
a[i * K + k] = i * K + k + offset;
//a[i * K + j] = fastrand() + offset;
}
}
}
void print_dmat(double *a, unsigned int n, unsigned int K) {
for (unsigned int i = 0; i < n; ++i) {
for (unsigned int j = 0; j < K; ++j) {
std::cout << a[i * K + j] << ' ';
}
std::cout << '\n';
}
}
void host_csc_spmm(CSC mat, double * dmat_in, double * dmat_out, unsigned int K) {
for (unsigned int r = 0; r < mat.nrows; ++r) {
for (unsigned int k = 0; k < K; ++k) {
dmat_out[r * K + k] = 0;
}
}
for (unsigned int c = 0; c < mat.ncols; ++c) {
unsigned int col_start = mat.col_indx[c];
unsigned int col_end = mat.col_indx[c + 1];
for (unsigned int r = col_start; r < col_end; ++r) {
unsigned int row_id = mat.row_id[r];
double val = mat.values[r];
for (unsigned int k = 0; k < K; ++k) {
dmat_out[row_id * K + k] += val * dmat_in[c * K + k];
}
}
}
}
//Emin Code start
__global__ void dev_csc_spmm(unsigned int * deviceCSCcol_indx , unsigned int * deviceCSCrow_id , double * deviceCSCvalues,
double * dmat_in_device, double* dmat_out_device , int K , unsigned int device_ncols , unsigned int device_nrows){
const int row=blockIdx.y * blockDim.y + threadIdx.y;
const int col= blockIdx.x * blockDim.x + threadIdx.x ;
unsigned int numberOfColCSC = device_ncols ;
if ( (col < numberOfColCSC) && (row < K) ) {
//printf(" thread %d , block %d \n", col , row);
//__syncthreads();
double sum=0;
int rowId;
// int row_start = A.row_indx[iy] ;
unsigned int col_start = deviceCSCcol_indx[col];
//printf(" row_start = %d thread %d , block %d \n", row_start, col , row);
// int row_end = A.row_indx[iy + 1] ;
unsigned int col_end = deviceCSCcol_indx[col+1] ;
//printf(" row_end = %d thread %d , block %d \n", row_end, col , row);
for ( int element = col_start; element < col_end; element++) {
/* code */
//colId= A.col_id[i] ;
rowId = deviceCSCrow_id[element] ;
//printf(" rolId = %d thread %d , block %d \n", rowId, col , row);
double value = deviceCSCvalues[element] ;
double value2 = dmat_in_device[col * K + row] ;
//Lets try atomic operation
sum = value * value2;
atomicAdd(&dmat_out_device[rowId * K + row] ,sum );
//printf(" sum = %d ,thread %d , block %d", sum, col , row);
//__syncthreads();
}
}
}
int main(int argc, char *argv[]) {
if(argc < 3) {
std::cerr << "usage ./exec inputfile K " << std::endl;
exit(-1);
}
unsigned int K = std::atoi(argv[2]);
CSC mat = read_matrix_market_to_CSC(argv[1]);
std::cout << mat.nrows << ' ' << mat.ncols << ' ' << mat.nnz << ' ' << K << '\n';
//Cuda Events
// events for timing
cudaEvent_t startEvent, stopEvent;
cudaEventCreate(&startEvent) ;
cudaEventCreate(&stopEvent) ;
//int TILE_WIDTH = K+1;
cudaEvent_t startEventMemKer , stopEventMemKer ;
cudaEventCreate(&startEventMemKer);
cudaEventCreate(&stopEventMemKer) ;
double *dmat_in = (double*)malloc(mat.ncols * K * sizeof(double));
double *dmat_out = (double*)malloc(mat.nrows * K * sizeof(double));
double *dmat_out_D = (double*)malloc(mat.nrows * K * sizeof(double));
double *dmat_out_GPU = (double*)malloc(mat.nrows * K * sizeof(double));
init_dmat(dmat_in, mat.ncols, K, 1.0);
//print_dmat(dmat_in, mat.ncols, K);
host_csc_spmm(mat, dmat_in, dmat_out, K);
unsigned int* deviceCSCcol_indx;
unsigned int* deviceCSCrow_id;
double* deviceCSCvalues;
cudaMalloc((void**) &deviceCSCcol_indx ,(mat.ncols +1) * sizeof(unsigned int)) ;
cudaMalloc((void**) &deviceCSCrow_id , mat.nnz * sizeof(unsigned int)) ;
cudaMalloc((void**) &deviceCSCvalues , mat.nnz * sizeof(double)) ;
double *dmat_in_device ;
cudaMalloc((void**) &dmat_in_device , mat.ncols * K * sizeof(double)) ;
double *dmat_out_device ;
cudaMalloc((void**) &dmat_out_device, mat.nrows * K * sizeof(double)) ;
cudaEventRecord(startEventMemKer, 0);
cudaMemcpy(deviceCSCcol_indx , mat.col_indx , (mat.ncols+1) * sizeof(unsigned int) , cudaMemcpyHostToDevice) ;
cudaMemcpy(deviceCSCrow_id, mat.row_id , mat.nnz * sizeof(unsigned int) , cudaMemcpyHostToDevice) ;
cudaMemcpy(deviceCSCvalues , mat.values , mat.nnz * sizeof(double) , cudaMemcpyHostToDevice) ;
//copy to device
cudaMemcpy( dmat_in_device , dmat_in , mat.ncols * K * sizeof(double) , cudaMemcpyHostToDevice ) ;
//Initialize the Grid and Block Dimension
//dim3 dimGrid((K-1) / TILE_WIDTH + 1 , (mat.ncols -1)/TILE_WIDTH +1 , 1 ) ;
//
//dim3 dimGrid( (K-1) / TILE_WIDTH +1 , (mat.ncols -1)/TILE_WIDTH+1 , 1 ) ;
//dim3 dimGrid( (K-1) / TILE_WIDTH +1 , (mat.ncols -1)/TILE_WIDTH+1 , 1 ) ;
dim3 dimGrid( (mat.ncols -1)/TILE_WIDTH+1 , (K-1) / TILE_WIDTH +1 , 1 ) ;
dim3 dimBlock(TILE_WIDTH , TILE_WIDTH , 1) ;
cudaEventRecord(startEvent, 0);
dev_csc_spmm<<<dimGrid , dimBlock>>>(deviceCSCcol_indx, deviceCSCrow_id, deviceCSCvalues , dmat_in_device , dmat_out_device , K , mat.ncols, mat.nrows) ;
cudaEventRecord(stopEvent, 0) ;
cudaEventSynchronize(startEvent);
cudaEventSynchronize(stopEvent);
float timeforKernel;
cudaEventElapsedTime(&timeforKernel, startEvent, stopEvent) ;
printf(" Time for Kernel : %f\n", timeforKernel);
cudaMemcpy(dmat_out_GPU , dmat_out_device ,mat.nrows * K * sizeof(double) , cudaMemcpyDeviceToHost ) ;
cudaEventRecord(stopEventMemKer, 0) ;
cudaEventSynchronize(startEventMemKer);
cudaEventSynchronize(stopEventMemKer);
float timeforMemKernel;
cudaEventElapsedTime(&timeforMemKernel, startEventMemKer, stopEventMemKer) ;
printf(" Time for Mem Cpy and Kernel : %f\n", timeforMemKernel);
//std::cout << "replace one argument to the below function with the values from gpu " << std::endl;
//std::cout << "replace one argument to the below function with the values from gpu " << std::endl;
//std::cout << "CPU\n";
//print_dmat(dmat_out, mat.nrows , K);
//std::cout << "GPU\n";
//print_dmat(dmat_out_GPU, mat.nrows , K);
check_dmat(dmat_out, dmat_out_GPU, mat.nrows, K);
//Lets compute GFLOP
unsigned int twoKnnz= 2 * K * mat.nnz ;
printf(" 2 * K * nnz : %d\n", twoKnnz);
float GFLOP = (twoKnnz / timeforMemKernel )/1000000; ;
printf(" GFLOP : %f\n", GFLOP);
//print_dmat(dmat_out, mat.nrows, K);
free(mat.col_indx);
free(mat.row_id);
free(mat.values);
cudaFree(deviceCSCcol_indx) ;
cudaFree(deviceCSCrow_id) ;
cudaFree(deviceCSCvalues) ;
cudaEventDestroy(startEvent) ;
cudaEventDestroy(stopEvent ) ;
cudaEventDestroy(startEventMemKer);
cudaEventDestroy(stopEventMemKer) ;
return 0;
}
|
87a3a7ea48892f40a5d7930b1e89eebd3e31cab8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void GoniometricFunctionKernel(float* input, float* output, const int size, const int type)
{
int id = blockDim.x * blockIdx.y * gridDim.x
+ blockDim.x * blockIdx.x
+ threadIdx.x;
if(id < size)
{ // Sine = 0, Cosine = 1, Tan = 2, Tanh = 3, Sinh = 4, Cosh = 5 see MyGonioType in MyTransform.cs
switch (type)
{
case 0:
output[id] = sinf(input[id]);
break;
case 1:
output[id] = cosf(input[id]);
break;
case 2:
output[id] = tanf(input[id]);
break;
case 3:
output[id] = tanhf(input[id]);
break;
case 4:
output[id] = sinhf(input[id]);
break;
case 5:
output[id] = coshf(input[id]);
break;
case 6:
output[id] = asinf(input[id]);
break;
case 7:
output[id] = acosf(input[id]);
break;
case 10:
output[id] = atan2f(input[2*id], input[2*id+1]);
break;
}
}
} | 87a3a7ea48892f40a5d7930b1e89eebd3e31cab8.cu | #include "includes.h"
__global__ void GoniometricFunctionKernel(float* input, float* output, const int size, const int type)
{
int id = blockDim.x * blockIdx.y * gridDim.x
+ blockDim.x * blockIdx.x
+ threadIdx.x;
if(id < size)
{ // Sine = 0, Cosine = 1, Tan = 2, Tanh = 3, Sinh = 4, Cosh = 5 see MyGonioType in MyTransform.cs
switch (type)
{
case 0:
output[id] = sinf(input[id]);
break;
case 1:
output[id] = cosf(input[id]);
break;
case 2:
output[id] = tanf(input[id]);
break;
case 3:
output[id] = tanhf(input[id]);
break;
case 4:
output[id] = sinhf(input[id]);
break;
case 5:
output[id] = coshf(input[id]);
break;
case 6:
output[id] = asinf(input[id]);
break;
case 7:
output[id] = acosf(input[id]);
break;
case 10:
output[id] = atan2f(input[2*id], input[2*id+1]);
break;
}
}
} |
99578e800c49fa8799c7b77ec8f26b8154293c55.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
extern "C"
{
__global__ void DYbinaryentropy_32(const int lengthX, const float *x, const float *y, const float *t, float *z)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i<lengthX)
{
z[i] += t[0]*((y[i]-x[i])/(y[i]*(1.0-y[i])))/lengthX;
}
}
} | 99578e800c49fa8799c7b77ec8f26b8154293c55.cu | extern "C"
{
__global__ void DYbinaryentropy_32(const int lengthX, const float *x, const float *y, const float *t, float *z)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i<lengthX)
{
z[i] += t[0]*((y[i]-x[i])/(y[i]*(1.0-y[i])))/lengthX;
}
}
} |
e8168bbcb3e262a324197a2894e3615df9d36272.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by ops.py
//
__constant__ int xdim0_tea_leaf_axpby_kernel;
int xdim0_tea_leaf_axpby_kernel_h = -1;
__constant__ int xdim1_tea_leaf_axpby_kernel;
int xdim1_tea_leaf_axpby_kernel_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#define OPS_ACC0(x,y) (x+xdim0_tea_leaf_axpby_kernel*(y))
#define OPS_ACC1(x,y) (x+xdim1_tea_leaf_axpby_kernel*(y))
//user function
__device__
void tea_leaf_axpby_kernel_gpu(double * u, const double * p, const double * alpha, const double * beta) {
u[OPS_ACC0(0,0)] = (*alpha) * u[OPS_ACC0(0,0)] + (*beta)*p[OPS_ACC1(0,0)];
}
#undef OPS_ACC0
#undef OPS_ACC1
__global__ void ops_tea_leaf_axpby_kernel(
double* __restrict arg0,
const double* __restrict arg1,
const double arg2,
const double arg3,
int size0,
int size1 ){
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1*1 + idx_y * 1*1 * xdim0_tea_leaf_axpby_kernel;
arg1 += idx_x * 1*1 + idx_y * 1*1 * xdim1_tea_leaf_axpby_kernel;
if (idx_x < size0 && idx_y < size1) {
tea_leaf_axpby_kernel_gpu(arg0, arg1, &arg2, &arg3);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_tea_leaf_axpby_kernel(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3) {
#else
void ops_par_loop_tea_leaf_axpby_kernel_execute(ops_kernel_descriptor *desc) {
int dim = desc->dim;
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
ops_arg arg2 = desc->args[2];
ops_arg arg3 = desc->args[3];
#endif
//Timing
double t1,t2,c1,c2;
ops_arg args[4] = { arg0, arg1, arg2, arg3};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args,4,range,27)) return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(27,"tea_leaf_axpby_kernel");
OPS_kernels[27].count++;
ops_timers_core(&c1,&t1);
}
//compute locally allocated range for the sub-block
int start[2];
int end[2];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned) return;
for ( int n=0; n<2; n++ ){
start[n] = sb->decomp_disp[n];end[n] = sb->decomp_disp[n]+sb->decomp_size[n];
if (start[n] >= range[2*n]) {
start[n] = 0;
}
else {
start[n] = range[2*n] - start[n];
}
if (sb->id_m[n]==MPI_PROC_NULL && range[2*n] < 0) start[n] = range[2*n];
if (end[n] >= range[2*n+1]) {
end[n] = range[2*n+1] - sb->decomp_disp[n];
}
else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n]==MPI_PROC_NULL && (range[2*n+1] > sb->decomp_disp[n]+sb->decomp_size[n]))
end[n] += (range[2*n+1]-sb->decomp_disp[n]-sb->decomp_size[n]);
}
#else
for ( int n=0; n<2; n++ ){
start[n] = range[2*n];end[n] = range[2*n+1];
}
#endif
int x_size = MAX(0,end[0]-start[0]);
int y_size = MAX(0,end[1]-start[1]);
int xdim0 = args[0].dat->size[0];
int xdim1 = args[1].dat->size[0];
if (xdim0 != xdim0_tea_leaf_axpby_kernel_h || xdim1 != xdim1_tea_leaf_axpby_kernel_h) {
hipMemcpyToSymbol( xdim0_tea_leaf_axpby_kernel, &xdim0, sizeof(int) );
xdim0_tea_leaf_axpby_kernel_h = xdim0;
hipMemcpyToSymbol( xdim1_tea_leaf_axpby_kernel, &xdim1, sizeof(int) );
xdim1_tea_leaf_axpby_kernel_h = xdim1;
}
dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, 1);
dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
char *p_a[4];
//set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
(start[1] * args[0].stencil->stride[1]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
(start[1] * args[1].stencil->stride[1]);
p_a[1] = (char *)args[1].data_d + base1;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 4);
ops_halo_exchanges(args,4,range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2,&t2);
OPS_kernels[27].mpi_time += t2-t1;
}
//call kernel wrapper function, passing in pointers to data
if (x_size > 0 && y_size > 0)
hipLaunchKernelGGL(( ops_tea_leaf_axpby_kernel), dim3(grid), dim3(tblock) , 0, 0, (double *)p_a[0], (double *)p_a[1],
*(double *)arg2.data, *(double *)arg3.data,x_size, y_size);
cutilSafeCall(hipGetLastError());
if (OPS_diags>1) {
cutilSafeCall(hipDeviceSynchronize());
ops_timers_core(&c1,&t1);
OPS_kernels[27].time += t1-t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 4);
ops_set_halo_dirtybit3(&args[0],range);
#endif
if (OPS_diags > 1) {
//Update kernel record
ops_timers_core(&c2,&t2);
OPS_kernels[27].mpi_time += t2-t1;
OPS_kernels[27].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[27].transfer += ops_compute_transfer(dim, start, end, &arg1);
}
}
#ifdef OPS_LAZY
void ops_par_loop_tea_leaf_axpby_kernel(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3) {
ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 27;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 27;
for ( int i=0; i<4; i++ ){
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 4;
desc->args = (ops_arg*)malloc(4*sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->args[2] = arg2;
char *tmp = (char*)malloc(1*sizeof(double));
memcpy(tmp, arg2.data,1*sizeof(double));
desc->args[2].data = tmp;
desc->args[3] = arg3;
tmp = (char*)malloc(1*sizeof(double));
memcpy(tmp, arg3.data,1*sizeof(double));
desc->args[3].data = tmp;
desc->function = ops_par_loop_tea_leaf_axpby_kernel_execute;
if (OPS_diags > 1) {
ops_timing_realloc(27,"tea_leaf_axpby_kernel");
}
ops_enqueue_kernel(desc);
}
#endif
| e8168bbcb3e262a324197a2894e3615df9d36272.cu | //
// auto-generated by ops.py
//
__constant__ int xdim0_tea_leaf_axpby_kernel;
int xdim0_tea_leaf_axpby_kernel_h = -1;
__constant__ int xdim1_tea_leaf_axpby_kernel;
int xdim1_tea_leaf_axpby_kernel_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#define OPS_ACC0(x,y) (x+xdim0_tea_leaf_axpby_kernel*(y))
#define OPS_ACC1(x,y) (x+xdim1_tea_leaf_axpby_kernel*(y))
//user function
__device__
void tea_leaf_axpby_kernel_gpu(double * u, const double * p, const double * alpha, const double * beta) {
u[OPS_ACC0(0,0)] = (*alpha) * u[OPS_ACC0(0,0)] + (*beta)*p[OPS_ACC1(0,0)];
}
#undef OPS_ACC0
#undef OPS_ACC1
__global__ void ops_tea_leaf_axpby_kernel(
double* __restrict arg0,
const double* __restrict arg1,
const double arg2,
const double arg3,
int size0,
int size1 ){
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1*1 + idx_y * 1*1 * xdim0_tea_leaf_axpby_kernel;
arg1 += idx_x * 1*1 + idx_y * 1*1 * xdim1_tea_leaf_axpby_kernel;
if (idx_x < size0 && idx_y < size1) {
tea_leaf_axpby_kernel_gpu(arg0, arg1, &arg2, &arg3);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_tea_leaf_axpby_kernel(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3) {
#else
void ops_par_loop_tea_leaf_axpby_kernel_execute(ops_kernel_descriptor *desc) {
int dim = desc->dim;
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
ops_arg arg2 = desc->args[2];
ops_arg arg3 = desc->args[3];
#endif
//Timing
double t1,t2,c1,c2;
ops_arg args[4] = { arg0, arg1, arg2, arg3};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args,4,range,27)) return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(27,"tea_leaf_axpby_kernel");
OPS_kernels[27].count++;
ops_timers_core(&c1,&t1);
}
//compute locally allocated range for the sub-block
int start[2];
int end[2];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned) return;
for ( int n=0; n<2; n++ ){
start[n] = sb->decomp_disp[n];end[n] = sb->decomp_disp[n]+sb->decomp_size[n];
if (start[n] >= range[2*n]) {
start[n] = 0;
}
else {
start[n] = range[2*n] - start[n];
}
if (sb->id_m[n]==MPI_PROC_NULL && range[2*n] < 0) start[n] = range[2*n];
if (end[n] >= range[2*n+1]) {
end[n] = range[2*n+1] - sb->decomp_disp[n];
}
else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n]==MPI_PROC_NULL && (range[2*n+1] > sb->decomp_disp[n]+sb->decomp_size[n]))
end[n] += (range[2*n+1]-sb->decomp_disp[n]-sb->decomp_size[n]);
}
#else
for ( int n=0; n<2; n++ ){
start[n] = range[2*n];end[n] = range[2*n+1];
}
#endif
int x_size = MAX(0,end[0]-start[0]);
int y_size = MAX(0,end[1]-start[1]);
int xdim0 = args[0].dat->size[0];
int xdim1 = args[1].dat->size[0];
if (xdim0 != xdim0_tea_leaf_axpby_kernel_h || xdim1 != xdim1_tea_leaf_axpby_kernel_h) {
cudaMemcpyToSymbol( xdim0_tea_leaf_axpby_kernel, &xdim0, sizeof(int) );
xdim0_tea_leaf_axpby_kernel_h = xdim0;
cudaMemcpyToSymbol( xdim1_tea_leaf_axpby_kernel, &xdim1, sizeof(int) );
xdim1_tea_leaf_axpby_kernel_h = xdim1;
}
dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, 1);
dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
char *p_a[4];
//set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
(start[1] * args[0].stencil->stride[1]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
(start[1] * args[1].stencil->stride[1]);
p_a[1] = (char *)args[1].data_d + base1;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 4);
ops_halo_exchanges(args,4,range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2,&t2);
OPS_kernels[27].mpi_time += t2-t1;
}
//call kernel wrapper function, passing in pointers to data
if (x_size > 0 && y_size > 0)
ops_tea_leaf_axpby_kernel<<<grid, tblock >>> ( (double *)p_a[0], (double *)p_a[1],
*(double *)arg2.data, *(double *)arg3.data,x_size, y_size);
cutilSafeCall(cudaGetLastError());
if (OPS_diags>1) {
cutilSafeCall(cudaDeviceSynchronize());
ops_timers_core(&c1,&t1);
OPS_kernels[27].time += t1-t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 4);
ops_set_halo_dirtybit3(&args[0],range);
#endif
if (OPS_diags > 1) {
//Update kernel record
ops_timers_core(&c2,&t2);
OPS_kernels[27].mpi_time += t2-t1;
OPS_kernels[27].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[27].transfer += ops_compute_transfer(dim, start, end, &arg1);
}
}
#ifdef OPS_LAZY
void ops_par_loop_tea_leaf_axpby_kernel(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3) {
ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 27;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 27;
for ( int i=0; i<4; i++ ){
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 4;
desc->args = (ops_arg*)malloc(4*sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->args[2] = arg2;
char *tmp = (char*)malloc(1*sizeof(double));
memcpy(tmp, arg2.data,1*sizeof(double));
desc->args[2].data = tmp;
desc->args[3] = arg3;
tmp = (char*)malloc(1*sizeof(double));
memcpy(tmp, arg3.data,1*sizeof(double));
desc->args[3].data = tmp;
desc->function = ops_par_loop_tea_leaf_axpby_kernel_execute;
if (OPS_diags > 1) {
ops_timing_realloc(27,"tea_leaf_axpby_kernel");
}
ops_enqueue_kernel(desc);
}
#endif
|
948cc624e6b123b311846bde9e84b7b87e31491a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
///
/// \file pooling_layer.cu
///
#include "pooling_layer.hpp"
using namespace std;
template <typename Dtype>
PoolingLayer<Dtype>::PoolingLayer(PoolParam *lcp){
this->_lcp = lcp;
_num_box = _lcp->getBoxNumHeight()*_lcp->getBoxNumWidth();
hipblasCreate(&this->handle);
}
template <typename Dtype>
PoolingLayer<Dtype>::~PoolingLayer() {
delete this-> _y;
delete this->_dE_dy;
if(_lcp->getPoolType() == MAX_POOLING )
delete _max_pos;
if((_lcp->getOutHeight() > MAX_THREAD_SIZE \
|| _lcp->getOutWidth() > MAX_THREAD_SIZE) \
&& (_lcp->getOverlapHeight() > 0 || _lcp->getOverlapWidth() > 0))
delete unranged_dE_dx;
hipblasDestroy(this->handle);
}
template <typename Dtype>
void PoolingLayer<Dtype>::initCuda() {
this->_y = new Matrix<Dtype>(_lcp->getMinibatchSize(), \
_lcp->getOutHeight()*_lcp->getOutWidth()* _lcp->getOutChannel());
this->_dE_dy = new Matrix<Dtype>(this->_y);
if(_lcp->getPoolType() == MAX_POOLING ){
_max_pos = new Matrix<int>(_lcp->getMinibatchSize(), \
_lcp->getOutHeight()*_lcp->getOutWidth()* _lcp->getOutChannel());
}
if((_lcp->getOutHeight() > MAX_THREAD_SIZE \
|| _lcp->getOutWidth() > MAX_THREAD_SIZE) \
&& (_lcp->getOverlapHeight() > 0 || _lcp->getOverlapWidth() > 0)){
unranged_dE_dx = new Matrix<Dtype>(_lcp->getMinibatchSize(), \
_lcp->getBoxInHeight()*_lcp->getBoxInWidth() \
* _lcp->getBoxNumHeight()*_lcp->getBoxNumWidth() \
* _lcp->getOutChannel());
}
}
template <typename Dtype>
void PoolingLayer<Dtype>::computeOutput(Matrix<Dtype>* x){
this->_y->zeros();
dim3 blocks = dim3(_lcp->getMinibatchSize(), _lcp->getInChannel() * _num_box);
dim3 threads = dim3(_lcp->getThreadWidth(), _lcp->getThreadHeight());
if(_lcp->getPoolType() == MAX_POOLING ){
hipLaunchKernelGGL(( max_pooling), dim3(blocks), dim3(threads), 0, 0, x->getDevData(), \
this->_y->getDevData(), _max_pos->getDevData(), \
_lcp->getInHeight(), _lcp->getInWidth(), \
_lcp->getInChannel(), \
_lcp->getOutHeight(), _lcp->getOutWidth(), \
_lcp->getFilterHeight(), _lcp->getFilterWidth(), \
_lcp->getStrideHeight(), _lcp->getStrideWidth(), \
_lcp->getBoxOutHeight(), _lcp->getBoxOutWidth(), \
_lcp->getBoxNumHeight(), _lcp->getBoxNumWidth());
}else if(_lcp->getPoolType() == AVG_POOLING){
hipLaunchKernelGGL(( avg_pooling), dim3(blocks), dim3(threads), 0, 0, x->getDevData(), \
this->_y->getDevData(), \
_lcp->getInHeight(), _lcp->getInWidth(), \
_lcp->getInChannel(), \
_lcp->getOutHeight(), _lcp->getOutWidth(), \
_lcp->getFilterHeight(), _lcp->getFilterWidth(), \
_lcp->getStrideHeight(), _lcp->getStrideWidth(), \
_lcp->getBoxOutHeight(), _lcp->getBoxOutWidth(), \
_lcp->getBoxNumHeight(), _lcp->getBoxNumWidth());
}else{
cout << "Pooling type is invalid !\n";
exit(EXIT_FAILURE);
}
hipDeviceSynchronize();
cudaCheckError();
}
template <typename Dtype>
void PoolingLayer<Dtype>::computeDerivsOfInput(Matrix<Dtype>* dE_dx){
dim3 blocks = dim3(_lcp->getMinibatchSize(), _lcp->getInChannel() * _num_box);
dim3 threads = dim3(_lcp->getThreadWidth(), _lcp->getThreadHeight());
int box_in_height = MAX_THREAD_SIZE > _lcp->getOutHeight() \
? _lcp->getInHeight() : _lcp->getBoxInHeight();
int box_in_width = MAX_THREAD_SIZE > _lcp->getOutWidth() \
? _lcp->getInWidth() : _lcp->getBoxInWidth();
Dtype* p_dE_dx;
if((_lcp->getOutHeight() > MAX_THREAD_SIZE \
|| _lcp->getOutWidth() > MAX_THREAD_SIZE) \
&& (_lcp->getOverlapHeight() > 0 || _lcp->getOverlapWidth() > 0)){
unranged_dE_dx->zeros();
p_dE_dx = unranged_dE_dx->getDevData();
}else{
dE_dx->zeros();
p_dE_dx = dE_dx->getDevData();
}
if(_lcp->getPoolType() == MAX_POOLING ){
hipLaunchKernelGGL(( compute_dE_dy_max), dim3(blocks), dim3(threads), \
sizeof(Dtype)*box_in_height*box_in_width, 0, \
this->_dE_dy->getDevData(), \
p_dE_dx, _max_pos->getDevData(), \
box_in_height, box_in_width, \
_lcp->getBoxOutHeight(), _lcp->getBoxOutWidth(), \
_lcp->getInChannel(), \
_lcp->getOutHeight(), _lcp->getOutWidth(), \
_lcp->getFilterHeight(), _lcp->getFilterWidth(), \
_lcp->getStrideHeight(), _lcp->getStrideWidth(), \
_lcp->getBoxNumHeight(), _lcp->getBoxNumWidth());
hipDeviceSynchronize();
cudaCheckError();
}else if(_lcp->getPoolType() == AVG_POOLING){
hipLaunchKernelGGL(( compute_dE_dy_avg), dim3(blocks), dim3(threads), \
sizeof(Dtype)*box_in_height*box_in_width, 0, \
this->_dE_dy->getDevData(), p_dE_dx, \
box_in_height, box_in_width, \
_lcp->getBoxOutHeight(), _lcp->getBoxOutWidth(), \
_lcp->getInChannel(), \
_lcp->getOutHeight(), _lcp->getOutWidth(), \
_lcp->getFilterHeight(), _lcp->getFilterWidth(), \
_lcp->getStrideHeight(), _lcp->getStrideWidth(), \
_lcp->getBoxNumHeight(), _lcp->getBoxNumWidth());
hipDeviceSynchronize();
cudaCheckError();
}else{
cout << "Pooling type is invalid !\n";
exit(EXIT_FAILURE);
}
if((_lcp->getOutHeight() > MAX_THREAD_SIZE \
|| _lcp->getOutWidth() > MAX_THREAD_SIZE) \
&& (_lcp->getOverlapHeight() > 0 || _lcp->getOverlapWidth() > 0)){
dE_dx->zeros();
hipLaunchKernelGGL(( compactOverlap), dim3(_lcp->getMinibatchSize()), dim3(_lcp->getInChannel()), 0, 0, \
unranged_dE_dx->getDevData(), dE_dx->getDevData(), \
_lcp->getInHeight(), _lcp->getInWidth(), \
_lcp->getInChannel(), _lcp->getOverlapHeight(), \
_lcp->getOverlapWidth(), \
box_in_height, box_in_width, \
_lcp->getBoxNumHeight(), _lcp->getBoxNumWidth());
hipDeviceSynchronize();
cudaCheckError();
}
}
| 948cc624e6b123b311846bde9e84b7b87e31491a.cu | ///
/// \file pooling_layer.cu
///
#include "pooling_layer.hpp"
using namespace std;
template <typename Dtype>
PoolingLayer<Dtype>::PoolingLayer(PoolParam *lcp){
this->_lcp = lcp;
_num_box = _lcp->getBoxNumHeight()*_lcp->getBoxNumWidth();
cublasCreate(&this->handle);
}
template <typename Dtype>
PoolingLayer<Dtype>::~PoolingLayer() {
delete this-> _y;
delete this->_dE_dy;
if(_lcp->getPoolType() == MAX_POOLING )
delete _max_pos;
if((_lcp->getOutHeight() > MAX_THREAD_SIZE \
|| _lcp->getOutWidth() > MAX_THREAD_SIZE) \
&& (_lcp->getOverlapHeight() > 0 || _lcp->getOverlapWidth() > 0))
delete unranged_dE_dx;
cublasDestroy(this->handle);
}
template <typename Dtype>
void PoolingLayer<Dtype>::initCuda() {
this->_y = new Matrix<Dtype>(_lcp->getMinibatchSize(), \
_lcp->getOutHeight()*_lcp->getOutWidth()* _lcp->getOutChannel());
this->_dE_dy = new Matrix<Dtype>(this->_y);
if(_lcp->getPoolType() == MAX_POOLING ){
_max_pos = new Matrix<int>(_lcp->getMinibatchSize(), \
_lcp->getOutHeight()*_lcp->getOutWidth()* _lcp->getOutChannel());
}
if((_lcp->getOutHeight() > MAX_THREAD_SIZE \
|| _lcp->getOutWidth() > MAX_THREAD_SIZE) \
&& (_lcp->getOverlapHeight() > 0 || _lcp->getOverlapWidth() > 0)){
unranged_dE_dx = new Matrix<Dtype>(_lcp->getMinibatchSize(), \
_lcp->getBoxInHeight()*_lcp->getBoxInWidth() \
* _lcp->getBoxNumHeight()*_lcp->getBoxNumWidth() \
* _lcp->getOutChannel());
}
}
template <typename Dtype>
void PoolingLayer<Dtype>::computeOutput(Matrix<Dtype>* x){
this->_y->zeros();
dim3 blocks = dim3(_lcp->getMinibatchSize(), _lcp->getInChannel() * _num_box);
dim3 threads = dim3(_lcp->getThreadWidth(), _lcp->getThreadHeight());
if(_lcp->getPoolType() == MAX_POOLING ){
max_pooling<<<blocks, threads>>>(x->getDevData(), \
this->_y->getDevData(), _max_pos->getDevData(), \
_lcp->getInHeight(), _lcp->getInWidth(), \
_lcp->getInChannel(), \
_lcp->getOutHeight(), _lcp->getOutWidth(), \
_lcp->getFilterHeight(), _lcp->getFilterWidth(), \
_lcp->getStrideHeight(), _lcp->getStrideWidth(), \
_lcp->getBoxOutHeight(), _lcp->getBoxOutWidth(), \
_lcp->getBoxNumHeight(), _lcp->getBoxNumWidth());
}else if(_lcp->getPoolType() == AVG_POOLING){
avg_pooling<<<blocks, threads>>>(x->getDevData(), \
this->_y->getDevData(), \
_lcp->getInHeight(), _lcp->getInWidth(), \
_lcp->getInChannel(), \
_lcp->getOutHeight(), _lcp->getOutWidth(), \
_lcp->getFilterHeight(), _lcp->getFilterWidth(), \
_lcp->getStrideHeight(), _lcp->getStrideWidth(), \
_lcp->getBoxOutHeight(), _lcp->getBoxOutWidth(), \
_lcp->getBoxNumHeight(), _lcp->getBoxNumWidth());
}else{
cout << "Pooling type is invalid !\n";
exit(EXIT_FAILURE);
}
cudaThreadSynchronize();
cudaCheckError();
}
template <typename Dtype>
void PoolingLayer<Dtype>::computeDerivsOfInput(Matrix<Dtype>* dE_dx){
dim3 blocks = dim3(_lcp->getMinibatchSize(), _lcp->getInChannel() * _num_box);
dim3 threads = dim3(_lcp->getThreadWidth(), _lcp->getThreadHeight());
int box_in_height = MAX_THREAD_SIZE > _lcp->getOutHeight() \
? _lcp->getInHeight() : _lcp->getBoxInHeight();
int box_in_width = MAX_THREAD_SIZE > _lcp->getOutWidth() \
? _lcp->getInWidth() : _lcp->getBoxInWidth();
Dtype* p_dE_dx;
if((_lcp->getOutHeight() > MAX_THREAD_SIZE \
|| _lcp->getOutWidth() > MAX_THREAD_SIZE) \
&& (_lcp->getOverlapHeight() > 0 || _lcp->getOverlapWidth() > 0)){
unranged_dE_dx->zeros();
p_dE_dx = unranged_dE_dx->getDevData();
}else{
dE_dx->zeros();
p_dE_dx = dE_dx->getDevData();
}
if(_lcp->getPoolType() == MAX_POOLING ){
compute_dE_dy_max<<<blocks, threads, \
sizeof(Dtype)*box_in_height*box_in_width>>>( \
this->_dE_dy->getDevData(), \
p_dE_dx, _max_pos->getDevData(), \
box_in_height, box_in_width, \
_lcp->getBoxOutHeight(), _lcp->getBoxOutWidth(), \
_lcp->getInChannel(), \
_lcp->getOutHeight(), _lcp->getOutWidth(), \
_lcp->getFilterHeight(), _lcp->getFilterWidth(), \
_lcp->getStrideHeight(), _lcp->getStrideWidth(), \
_lcp->getBoxNumHeight(), _lcp->getBoxNumWidth());
cudaThreadSynchronize();
cudaCheckError();
}else if(_lcp->getPoolType() == AVG_POOLING){
compute_dE_dy_avg<<<blocks, threads, \
sizeof(Dtype)*box_in_height*box_in_width>>>( \
this->_dE_dy->getDevData(), p_dE_dx, \
box_in_height, box_in_width, \
_lcp->getBoxOutHeight(), _lcp->getBoxOutWidth(), \
_lcp->getInChannel(), \
_lcp->getOutHeight(), _lcp->getOutWidth(), \
_lcp->getFilterHeight(), _lcp->getFilterWidth(), \
_lcp->getStrideHeight(), _lcp->getStrideWidth(), \
_lcp->getBoxNumHeight(), _lcp->getBoxNumWidth());
cudaThreadSynchronize();
cudaCheckError();
}else{
cout << "Pooling type is invalid !\n";
exit(EXIT_FAILURE);
}
if((_lcp->getOutHeight() > MAX_THREAD_SIZE \
|| _lcp->getOutWidth() > MAX_THREAD_SIZE) \
&& (_lcp->getOverlapHeight() > 0 || _lcp->getOverlapWidth() > 0)){
dE_dx->zeros();
compactOverlap<<<_lcp->getMinibatchSize(), _lcp->getInChannel()>>>( \
unranged_dE_dx->getDevData(), dE_dx->getDevData(), \
_lcp->getInHeight(), _lcp->getInWidth(), \
_lcp->getInChannel(), _lcp->getOverlapHeight(), \
_lcp->getOverlapWidth(), \
box_in_height, box_in_width, \
_lcp->getBoxNumHeight(), _lcp->getBoxNumWidth());
cudaThreadSynchronize();
cudaCheckError();
}
}
|
d95bf229d249ce08110f460adb77d7c6a4cbbcb4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date September 2014
@precisions normal z -> c d s
*/
#include "common_magma.h"
// 512 is maximum number of threads for CUDA capability 1.x
#define BLOCK_SIZE 512
#define BLOCK_SIZEx 32
#define BLOCK_SIZEy 16
#define PRECISION_z
// copied from dznrm2.cu in trunk/magmablas
// ----------------------------------------
// Does sum reduction of array x, leaving total in x[0].
// Contents of x are destroyed in the process.
// With k threads, can reduce array up to 2*k in size.
// Assumes number of threads <= 1024 (which is max number of threads up to CUDA capability 3.0)
// Having n as template parameter allows compiler to evaluate some conditions at compile time.
template< int n >
__device__ void sum_reduce( /*int n,*/ int i, double* x )
{
__syncthreads();
if ( n > 1024 ) { if ( i < 1024 && i + 1024 < n ) { x[i] += x[i+1024]; } __syncthreads(); }
if ( n > 512 ) { if ( i < 512 && i + 512 < n ) { x[i] += x[i+ 512]; } __syncthreads(); }
if ( n > 256 ) { if ( i < 256 && i + 256 < n ) { x[i] += x[i+ 256]; } __syncthreads(); }
if ( n > 128 ) { if ( i < 128 && i + 128 < n ) { x[i] += x[i+ 128]; } __syncthreads(); }
if ( n > 64 ) { if ( i < 64 && i + 64 < n ) { x[i] += x[i+ 64]; } __syncthreads(); }
if ( n > 32 ) { if ( i < 32 && i + 32 < n ) { x[i] += x[i+ 32]; } __syncthreads(); }
// probably don't need __syncthreads for < 16 threads
// because of implicit warp level synchronization.
if ( n > 16 ) { if ( i < 16 && i + 16 < n ) { x[i] += x[i+ 16]; } __syncthreads(); }
if ( n > 8 ) { if ( i < 8 && i + 8 < n ) { x[i] += x[i+ 8]; } __syncthreads(); }
if ( n > 4 ) { if ( i < 4 && i + 4 < n ) { x[i] += x[i+ 4]; } __syncthreads(); }
if ( n > 2 ) { if ( i < 2 && i + 2 < n ) { x[i] += x[i+ 2]; } __syncthreads(); }
if ( n > 1 ) { if ( i < 1 && i + 1 < n ) { x[i] += x[i+ 1]; } __syncthreads(); }
}
// end sum_reduce
__global__ void
magma_zlobpcg_res_kernel( magma_int_t num_rows,
magma_int_t num_vecs,
double *evals,
magmaDoubleComplex *X,
magmaDoubleComplex *R,
double *res){
int row = blockIdx.x * blockDim.x + threadIdx.x; // global row index
if( row<num_rows){
for( int i=0; i<num_vecs; i++ ){
R[row + i*num_rows] = R[row + i*num_rows]
+ MAGMA_Z_MAKE( -evals[i], 0.0 )
* X[ row + i*num_rows ];
}
}
}
/*
magmablas_dznrm2_kernel( int m, magmaDoubleComplex *da, int ldda, double *dxnorm )
{
const int i = threadIdx.x;
magmaDoubleComplex *dx = da + blockIdx.x * ldda;
__shared__ double sum[ BLOCK_SIZE ];
double re, lsum;
// get norm of dx
lsum = 0;
for( int j = i; j < m; j += BLOCK_SIZE ) {
#if (defined(PRECISION_s) || defined(PRECISION_d))
re = dx[j];
lsum += re*re;
#else
re = MAGMA_Z_REAL( dx[j] );
double im = MAGMA_Z_IMAG( dx[j] );
lsum += re*re + im*im;
#endif
}
sum[i] = lsum;
sum_reduce< BLOCK_SIZE >( i, sum );
if (i==0)
res[blockIdx.x] = sqrt(sum[0]);
}
*/
/**
Purpose
-------
This routine computes for Block-LOBPCG, the set of residuals.
R = Ax - x evalues
It replaces:
for(int i=0; i < n; i++){
magma_zaxpy(m, MAGMA_Z_MAKE(-evalues[i],0),blockX+i*m,1,blockR+i*m,1);
}
The memory layout of x is:
/ x1[0] x2[0] x3[0] \
| x1[1] x2[1] x3[1] |
x = | x1[2] x2[2] x3[2] | = x1[0] x1[1] x1[2] x1[3] x1[4] x2[0] x2[1] .
| x1[3] x2[3] x3[3] |
\ x1[4] x2[4] x3[4] /
Arguments
---------
@param
num_rows magma_int_t
number of rows
@param
num_vecs magma_int_t
number of vectors
@param
evalues double*
array of eigenvalues/approximations
@param
X magmaDoubleComplex*
block of eigenvector approximations
@param
R magmaDoubleComplex*
block of residuals
@param
res double*
array of residuals
@ingroup magmasparse_zaux
********************************************************************/
extern "C" magma_int_t
magma_zlobpcg_res( magma_int_t num_rows,
magma_int_t num_vecs,
double *evalues,
magmaDoubleComplex *X,
magmaDoubleComplex *R,
double *res ){
// every thread handles one row
magma_int_t block_size = BLOCK_SIZE;
dim3 block( block_size );
dim3 grid( (num_rows+block_size-1)/block_size );
hipLaunchKernelGGL(( magma_zlobpcg_res_kernel), dim3(grid), dim3(block), 0, magma_stream ,
num_rows, num_vecs, evalues, X, R, res );
return MAGMA_SUCCESS;
}
| d95bf229d249ce08110f460adb77d7c6a4cbbcb4.cu | /*
-- MAGMA (version 1.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date September 2014
@precisions normal z -> c d s
*/
#include "common_magma.h"
// 512 is maximum number of threads for CUDA capability 1.x
#define BLOCK_SIZE 512
#define BLOCK_SIZEx 32
#define BLOCK_SIZEy 16
#define PRECISION_z
// copied from dznrm2.cu in trunk/magmablas
// ----------------------------------------
// Does sum reduction of array x, leaving total in x[0].
// Contents of x are destroyed in the process.
// With k threads, can reduce array up to 2*k in size.
// Assumes number of threads <= 1024 (which is max number of threads up to CUDA capability 3.0)
// Having n as template parameter allows compiler to evaluate some conditions at compile time.
template< int n >
__device__ void sum_reduce( /*int n,*/ int i, double* x )
{
__syncthreads();
if ( n > 1024 ) { if ( i < 1024 && i + 1024 < n ) { x[i] += x[i+1024]; } __syncthreads(); }
if ( n > 512 ) { if ( i < 512 && i + 512 < n ) { x[i] += x[i+ 512]; } __syncthreads(); }
if ( n > 256 ) { if ( i < 256 && i + 256 < n ) { x[i] += x[i+ 256]; } __syncthreads(); }
if ( n > 128 ) { if ( i < 128 && i + 128 < n ) { x[i] += x[i+ 128]; } __syncthreads(); }
if ( n > 64 ) { if ( i < 64 && i + 64 < n ) { x[i] += x[i+ 64]; } __syncthreads(); }
if ( n > 32 ) { if ( i < 32 && i + 32 < n ) { x[i] += x[i+ 32]; } __syncthreads(); }
// probably don't need __syncthreads for < 16 threads
// because of implicit warp level synchronization.
if ( n > 16 ) { if ( i < 16 && i + 16 < n ) { x[i] += x[i+ 16]; } __syncthreads(); }
if ( n > 8 ) { if ( i < 8 && i + 8 < n ) { x[i] += x[i+ 8]; } __syncthreads(); }
if ( n > 4 ) { if ( i < 4 && i + 4 < n ) { x[i] += x[i+ 4]; } __syncthreads(); }
if ( n > 2 ) { if ( i < 2 && i + 2 < n ) { x[i] += x[i+ 2]; } __syncthreads(); }
if ( n > 1 ) { if ( i < 1 && i + 1 < n ) { x[i] += x[i+ 1]; } __syncthreads(); }
}
// end sum_reduce
__global__ void
magma_zlobpcg_res_kernel( magma_int_t num_rows,
magma_int_t num_vecs,
double *evals,
magmaDoubleComplex *X,
magmaDoubleComplex *R,
double *res){
int row = blockIdx.x * blockDim.x + threadIdx.x; // global row index
if( row<num_rows){
for( int i=0; i<num_vecs; i++ ){
R[row + i*num_rows] = R[row + i*num_rows]
+ MAGMA_Z_MAKE( -evals[i], 0.0 )
* X[ row + i*num_rows ];
}
}
}
/*
magmablas_dznrm2_kernel( int m, magmaDoubleComplex *da, int ldda, double *dxnorm )
{
const int i = threadIdx.x;
magmaDoubleComplex *dx = da + blockIdx.x * ldda;
__shared__ double sum[ BLOCK_SIZE ];
double re, lsum;
// get norm of dx
lsum = 0;
for( int j = i; j < m; j += BLOCK_SIZE ) {
#if (defined(PRECISION_s) || defined(PRECISION_d))
re = dx[j];
lsum += re*re;
#else
re = MAGMA_Z_REAL( dx[j] );
double im = MAGMA_Z_IMAG( dx[j] );
lsum += re*re + im*im;
#endif
}
sum[i] = lsum;
sum_reduce< BLOCK_SIZE >( i, sum );
if (i==0)
res[blockIdx.x] = sqrt(sum[0]);
}
*/
/**
Purpose
-------
This routine computes for Block-LOBPCG, the set of residuals.
R = Ax - x evalues
It replaces:
for(int i=0; i < n; i++){
magma_zaxpy(m, MAGMA_Z_MAKE(-evalues[i],0),blockX+i*m,1,blockR+i*m,1);
}
The memory layout of x is:
/ x1[0] x2[0] x3[0] \
| x1[1] x2[1] x3[1] |
x = | x1[2] x2[2] x3[2] | = x1[0] x1[1] x1[2] x1[3] x1[4] x2[0] x2[1] .
| x1[3] x2[3] x3[3] |
\ x1[4] x2[4] x3[4] /
Arguments
---------
@param
num_rows magma_int_t
number of rows
@param
num_vecs magma_int_t
number of vectors
@param
evalues double*
array of eigenvalues/approximations
@param
X magmaDoubleComplex*
block of eigenvector approximations
@param
R magmaDoubleComplex*
block of residuals
@param
res double*
array of residuals
@ingroup magmasparse_zaux
********************************************************************/
extern "C" magma_int_t
magma_zlobpcg_res( magma_int_t num_rows,
magma_int_t num_vecs,
double *evalues,
magmaDoubleComplex *X,
magmaDoubleComplex *R,
double *res ){
// every thread handles one row
magma_int_t block_size = BLOCK_SIZE;
dim3 block( block_size );
dim3 grid( (num_rows+block_size-1)/block_size );
magma_zlobpcg_res_kernel<<< grid, block, 0, magma_stream >>>
( num_rows, num_vecs, evalues, X, R, res );
return MAGMA_SUCCESS;
}
|
b9de438d475c94d33e1c49e0ebe03c0d3db5636d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <stdio.h>
#include <assert.h>
#include "pixel_shuffle.h"
#include "pixel_shuffle_common.h"
using kernel_params_ncdhw32_2x2x2_256 = Pixel_shuffle_kernel_params<2, 2, 2, int8_t, 256, 8, 32>;
using kernel_params_ncdhw32_2x2x2_128 = Pixel_shuffle_kernel_params<2, 2, 2, int8_t, 128, 8, 32>;
using kernel_params_ncdhw_fp32_2x2x2_256 = Pixel_shuffle_kernel_params<2, 2, 2, float, 256, 8, 32>;
////////////////////////////////////////////////////////////////////////////////////////////////////
static inline __host__ __device__ int div_up(int m, int n)
{
return (m + n - 1) / n;
}
template <typename Kernel_params, int Do_scales = 0>
__global__ void __launch_bounds__(Kernel_params::THREADS_PER_CTA, 1)
pixel_shuffle_ncdhw32_to_ncdhw32(PixelShuffleParams params)
{
// blockIdx.x for O*P
// blockIdx.y for k
// blockIdx.z for batch
const int tid = threadIdx.x;
const int WARP_SIZE = Kernel_params::WARP_SIZE;
const int warp_id = tid / WARP_SIZE;
const int lane_id = tid % WARP_SIZE;
const int R = Kernel_params::R;
const int S = Kernel_params::S;
const int T = Kernel_params::T;
const int RST = R * S * T;
int opq = params.o * params.p * params.q;
int pq = params.p * params.q;
int dhw = opq / RST;
int hw = pq / (S * T);
int o = blockIdx.x / params.p;
int p = blockIdx.x % params.p;
const int cta_k = blockIdx.y;
const int k = cta_k * 32 + lane_id;
const int is_valid_k = (k < params.k);
if (!is_valid_k) return;
int d = o / R;
int h = p / S;
// The base pointer to load from.
const int8_t *gmem_src = &reinterpret_cast<const int8_t *>(params.gmem_src)[ blockIdx.z * params.k * opq
+ d * hw * 32
+ h * params.q / T * 32];
int8_t *gmem_dst = &reinterpret_cast<int8_t *>(params.gmem_dst)[ blockIdx.z * params.output_stride
+ cta_k * 32 * opq
+ blockIdx.x * params.q * 32
//+ o * pq * 32 + p * params.q * 32
+ lane_id];
int8_t nx[Kernel_params::ELEMENTS_PER_LDG] = {0};
for (int iq = 0; iq < params.q; iq += Kernel_params::NUM_WARPS * Kernel_params::ELEMENTS_PER_LDG)
{
#pragma unroll
for (int i = 0; i < Kernel_params::ELEMENTS_PER_LDG; i++)
{
int q = iq + warp_id * Kernel_params::ELEMENTS_PER_LDG + i;
int is_valid_q = (q < params.q);
int w = q / T;
int c = k * RST + (o % R) * (S*T) + (p % S) * T + q % T;
int is_valid_c = (c < params.k * RST);
if (is_valid_c && is_valid_q)
{
nx[i] = gmem_src[ (c / 32) * dhw * 32 + w * 32 + c % 32];
if (Do_scales)
{
float x = __int2float_rn(nx[i]) * params.scale;
nx[i] = __float_as_int(min(max(x + 12582912.0F, 12582785.0F), 12583039.0F));
//nx[i] = __float2int_rn(fminf(fmaxf(x, INT8_MIN), INT8_MAX));
}
}
}
// vectorizing stores through "int" just a bit faster than below, since we need to transpose the warp in smem
#pragma unroll
for (int i = 0; i < Kernel_params::ELEMENTS_PER_LDG; i++)
{
int q = iq + warp_id * Kernel_params::ELEMENTS_PER_LDG + i;
if (q >= params.q) continue;
gmem_dst[q * 32] = nx[i];
}
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////
template <typename Kernel_params>
__global__ void __launch_bounds__(Kernel_params::THREADS_PER_CTA, 1)
pixel_shuffle_ncdhw_to_ncdhw(PixelShuffleParams params)
{
// blockIdx.x for O*P*Q / block_dim
// blockIdx.y for k
// blockIdx.z for batch
const int R = Kernel_params::R;
const int S = Kernel_params::S;
const int T = Kernel_params::T;
const int RST = R * S * T;
int opq = params.o * params.p * params.q;
int pq = params.p * params.q;
int dhw = opq / RST;
int hw = pq / (S * T);
int opq_idx = blockIdx.x * Kernel_params::THREADS_PER_CTA + threadIdx.x;
if (opq_idx >= opq) return;
int pq_reminder = opq_idx % (params.p * params.q);
int o = opq_idx / (params.p * params.q);
int p = pq_reminder / params.q;
int q = pq_reminder % params.q;
const int cta_k = blockIdx.y;
const int k = cta_k;
int d = o / R;
int h = p / S;
int w = q / T;
// The base pointer to load from.
const typename Kernel_params::Data_Type *gmem_src = &reinterpret_cast<const typename Kernel_params::Data_Type *>(params.gmem_src)[ blockIdx.z * params.k * opq
+ d * hw
+ h * params.q / T + w];
typename Kernel_params::Data_Type *gmem_dst = &reinterpret_cast<typename Kernel_params::Data_Type *>(params.gmem_dst)[ blockIdx.z * params.output_stride
+ cta_k * opq
+ opq_idx];
int c = k * RST + (o % R) * (S*T) + (p % S) * T + q % T;
int is_valid_c = (c < params.k * RST);
if (is_valid_c)
{
*gmem_dst = gmem_src[c * dhw];
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////
int pixel_shuffle_ncdhw32_to_ncdhw32_dispatch(PixelShuffleParams params, hipStream_t stream)
{
using kernel_params = kernel_params_ncdhw32_2x2x2_128;
assert(params.k >= 32);
if (params.r == 2 && params.s == 2 && params.t == 2)
{
dim3 grid(params.o * params.p, div_up(params.k, 32), params.n);
//CHECK_CUDA(hipFuncSetCacheConfig(pixel_shuffle_ncdhw32_to_ncdhw32<T,R,S>, hipFuncCachePreferL1));
if (params.scale == 1.f)
{
hipLaunchKernelGGL(( pixel_shuffle_ncdhw32_to_ncdhw32<kernel_params, 0>), dim3(grid)
, dim3(kernel_params::THREADS_PER_CTA)
, 0, stream, params);
}
else
{
hipLaunchKernelGGL(( pixel_shuffle_ncdhw32_to_ncdhw32<kernel_params, 1>), dim3(grid)
, dim3(kernel_params::THREADS_PER_CTA)
, 0, stream, params);
}
}
else
{
fprintf(stderr, "%d, %d, %d pixel shuffle is not supported\n", params.r, params.s, params.t);
assert(0);
}
return 0;
}
////////////////////////////////////////////////////////////////////////////////////////////////////
int pixel_shuffle_ncdhw_to_ncdhw_dispatch(PixelShuffleParams params, hipStream_t stream)
{
assert(params.k >= 32);
if (params.r == 2 && params.s == 2 && params.t == 2)
{
dim3 grid(div_up(params.o * params.p * params.q, kernel_params_ncdhw_fp32_2x2x2_256::THREADS_PER_CTA), params.k, params.n);
//CHECK_CUDA(hipFuncSetCacheConfig(pixel_shuffle_ncdhw32_to_ncdhw32<T,R,S>, hipFuncCachePreferL1));
hipLaunchKernelGGL(( pixel_shuffle_ncdhw_to_ncdhw<kernel_params_ncdhw_fp32_2x2x2_256>), dim3(grid)
, dim3(kernel_params_ncdhw_fp32_2x2x2_256::THREADS_PER_CTA)
, 0, stream, params);
}
else
{
fprintf(stderr, "%d, %d, %d pixel shuffle is not supported\n", params.r, params.s, params.t);
assert(0);
}
return 0;
} | b9de438d475c94d33e1c49e0ebe03c0d3db5636d.cu | /*
* Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <stdio.h>
#include <assert.h>
#include "pixel_shuffle.h"
#include "pixel_shuffle_common.h"
using kernel_params_ncdhw32_2x2x2_256 = Pixel_shuffle_kernel_params<2, 2, 2, int8_t, 256, 8, 32>;
using kernel_params_ncdhw32_2x2x2_128 = Pixel_shuffle_kernel_params<2, 2, 2, int8_t, 128, 8, 32>;
using kernel_params_ncdhw_fp32_2x2x2_256 = Pixel_shuffle_kernel_params<2, 2, 2, float, 256, 8, 32>;
////////////////////////////////////////////////////////////////////////////////////////////////////
static inline __host__ __device__ int div_up(int m, int n)
{
return (m + n - 1) / n;
}
template <typename Kernel_params, int Do_scales = 0>
__global__ void __launch_bounds__(Kernel_params::THREADS_PER_CTA, 1)
pixel_shuffle_ncdhw32_to_ncdhw32(PixelShuffleParams params)
{
// blockIdx.x for O*P
// blockIdx.y for k
// blockIdx.z for batch
const int tid = threadIdx.x;
const int WARP_SIZE = Kernel_params::WARP_SIZE;
const int warp_id = tid / WARP_SIZE;
const int lane_id = tid % WARP_SIZE;
const int R = Kernel_params::R;
const int S = Kernel_params::S;
const int T = Kernel_params::T;
const int RST = R * S * T;
int opq = params.o * params.p * params.q;
int pq = params.p * params.q;
int dhw = opq / RST;
int hw = pq / (S * T);
int o = blockIdx.x / params.p;
int p = blockIdx.x % params.p;
const int cta_k = blockIdx.y;
const int k = cta_k * 32 + lane_id;
const int is_valid_k = (k < params.k);
if (!is_valid_k) return;
int d = o / R;
int h = p / S;
// The base pointer to load from.
const int8_t *gmem_src = &reinterpret_cast<const int8_t *>(params.gmem_src)[ blockIdx.z * params.k * opq
+ d * hw * 32
+ h * params.q / T * 32];
int8_t *gmem_dst = &reinterpret_cast<int8_t *>(params.gmem_dst)[ blockIdx.z * params.output_stride
+ cta_k * 32 * opq
+ blockIdx.x * params.q * 32
//+ o * pq * 32 + p * params.q * 32
+ lane_id];
int8_t nx[Kernel_params::ELEMENTS_PER_LDG] = {0};
for (int iq = 0; iq < params.q; iq += Kernel_params::NUM_WARPS * Kernel_params::ELEMENTS_PER_LDG)
{
#pragma unroll
for (int i = 0; i < Kernel_params::ELEMENTS_PER_LDG; i++)
{
int q = iq + warp_id * Kernel_params::ELEMENTS_PER_LDG + i;
int is_valid_q = (q < params.q);
int w = q / T;
int c = k * RST + (o % R) * (S*T) + (p % S) * T + q % T;
int is_valid_c = (c < params.k * RST);
if (is_valid_c && is_valid_q)
{
nx[i] = gmem_src[ (c / 32) * dhw * 32 + w * 32 + c % 32];
if (Do_scales)
{
float x = __int2float_rn(nx[i]) * params.scale;
nx[i] = __float_as_int(min(max(x + 12582912.0F, 12582785.0F), 12583039.0F));
//nx[i] = __float2int_rn(fminf(fmaxf(x, INT8_MIN), INT8_MAX));
}
}
}
// vectorizing stores through "int" just a bit faster than below, since we need to transpose the warp in smem
#pragma unroll
for (int i = 0; i < Kernel_params::ELEMENTS_PER_LDG; i++)
{
int q = iq + warp_id * Kernel_params::ELEMENTS_PER_LDG + i;
if (q >= params.q) continue;
gmem_dst[q * 32] = nx[i];
}
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////
template <typename Kernel_params>
__global__ void __launch_bounds__(Kernel_params::THREADS_PER_CTA, 1)
pixel_shuffle_ncdhw_to_ncdhw(PixelShuffleParams params)
{
// blockIdx.x for O*P*Q / block_dim
// blockIdx.y for k
// blockIdx.z for batch
const int R = Kernel_params::R;
const int S = Kernel_params::S;
const int T = Kernel_params::T;
const int RST = R * S * T;
int opq = params.o * params.p * params.q;
int pq = params.p * params.q;
int dhw = opq / RST;
int hw = pq / (S * T);
int opq_idx = blockIdx.x * Kernel_params::THREADS_PER_CTA + threadIdx.x;
if (opq_idx >= opq) return;
int pq_reminder = opq_idx % (params.p * params.q);
int o = opq_idx / (params.p * params.q);
int p = pq_reminder / params.q;
int q = pq_reminder % params.q;
const int cta_k = blockIdx.y;
const int k = cta_k;
int d = o / R;
int h = p / S;
int w = q / T;
// The base pointer to load from.
const typename Kernel_params::Data_Type *gmem_src = &reinterpret_cast<const typename Kernel_params::Data_Type *>(params.gmem_src)[ blockIdx.z * params.k * opq
+ d * hw
+ h * params.q / T + w];
typename Kernel_params::Data_Type *gmem_dst = &reinterpret_cast<typename Kernel_params::Data_Type *>(params.gmem_dst)[ blockIdx.z * params.output_stride
+ cta_k * opq
+ opq_idx];
int c = k * RST + (o % R) * (S*T) + (p % S) * T + q % T;
int is_valid_c = (c < params.k * RST);
if (is_valid_c)
{
*gmem_dst = gmem_src[c * dhw];
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////
int pixel_shuffle_ncdhw32_to_ncdhw32_dispatch(PixelShuffleParams params, cudaStream_t stream)
{
using kernel_params = kernel_params_ncdhw32_2x2x2_128;
assert(params.k >= 32);
if (params.r == 2 && params.s == 2 && params.t == 2)
{
dim3 grid(params.o * params.p, div_up(params.k, 32), params.n);
//CHECK_CUDA(cudaFuncSetCacheConfig(pixel_shuffle_ncdhw32_to_ncdhw32<T,R,S>, cudaFuncCachePreferL1));
if (params.scale == 1.f)
{
pixel_shuffle_ncdhw32_to_ncdhw32<kernel_params, 0><<< grid
, kernel_params::THREADS_PER_CTA
, 0, stream>>> (params);
}
else
{
pixel_shuffle_ncdhw32_to_ncdhw32<kernel_params, 1><<< grid
, kernel_params::THREADS_PER_CTA
, 0, stream>>> (params);
}
}
else
{
fprintf(stderr, "%d, %d, %d pixel shuffle is not supported\n", params.r, params.s, params.t);
assert(0);
}
return 0;
}
////////////////////////////////////////////////////////////////////////////////////////////////////
int pixel_shuffle_ncdhw_to_ncdhw_dispatch(PixelShuffleParams params, cudaStream_t stream)
{
assert(params.k >= 32);
if (params.r == 2 && params.s == 2 && params.t == 2)
{
dim3 grid(div_up(params.o * params.p * params.q, kernel_params_ncdhw_fp32_2x2x2_256::THREADS_PER_CTA), params.k, params.n);
//CHECK_CUDA(cudaFuncSetCacheConfig(pixel_shuffle_ncdhw32_to_ncdhw32<T,R,S>, cudaFuncCachePreferL1));
pixel_shuffle_ncdhw_to_ncdhw<kernel_params_ncdhw_fp32_2x2x2_256><<< grid
, kernel_params_ncdhw_fp32_2x2x2_256::THREADS_PER_CTA
, 0, stream>>> (params);
}
else
{
fprintf(stderr, "%d, %d, %d pixel shuffle is not supported\n", params.r, params.s, params.t);
assert(0);
}
return 0;
} |
e70e16e4c1b9fd25fa2727fd54a9adb24307e287.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.6.1) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date January 2015
@generated from zlascl_2x2.cu normal z -> s, Fri Jan 30 19:00:09 2015
@author Ichitaro Yamazaki
*/
#include "common_magma.h"
#define NB 64
#define A(i,j) (A[(i) + (j)*lda])
#define W(i,j) (W[(i) + (j)*ldw])
// each thread block does one NB x n block row of A.
// each thread does one row, starting from left edge and moving right to diagonal.
__global__ void
slascl_2x2_lower(int m, const float* W, int ldw, float* A, int lda)
{
int ind = blockIdx.x * NB + threadIdx.x;
float D21 = W( 1, 0 );
float D11 = MAGMA_S_DIV( W( 1, 1 ), D21 );
float D22 = MAGMA_S_DIV( W( 0, 0 ), MAGMA_S_CNJG( D21 ) );
float T = 1.0 / ( MAGMA_S_REAL( D11*D22 ) - 1.0 );
D21 = MAGMA_S_DIV( MAGMA_S_MAKE(T,0.0), D21 );
if (ind < m) {
A( ind, 0 ) = MAGMA_S_CNJG( D21 )*( D11*W( 2+ind, 0 )-W( 2+ind, 1 ) );
A( ind, 1 ) = D21*( D22*W( 2+ind, 1 )-W( 2+ind, 0 ) );
}
}
// each thread block does one NB x n block row of A.
// each thread does one row, starting from right edge and moving left to diagonal.
__global__ void
slascl_2x2_upper(int m, const float *W, int ldw, float* A, int lda)
{
int ind = blockIdx.x * NB + threadIdx.x;
float D21 = W( m, 1 );
float D11 = MAGMA_S_DIV( W( m+1, 1 ), MAGMA_S_CNJG( D21 ) );
float D22 = MAGMA_S_DIV( W( m, 0 ), D21 );
float T = 1.0 / ( MAGMA_S_REAL( D11*D22 ) - 1.0 );
D21 = MAGMA_S_DIV( MAGMA_S_MAKE(T,0.0), D21 );
if (ind < m) {
A( ind, 0 ) = D21*( D11*W( ind, 0 )-W( ind, 1 ) );
A( ind, 1 ) = MAGMA_S_CNJG( D21 )*( D22*W( ind, 1 )-W( ind, 0 ) );
}
}
/**
Purpose
-------
SLASCL_2x2 scales the M by M real matrix A by the 2-by-2 pivot.
TYPE specifies that A may be upper or lower triangular.
Arguments
---------
\param[in]
type magma_type_t
TYPE indices the storage type of the input matrix A.
= MagmaLower: lower triangular matrix.
= MagmaUpper: upper triangular matrix.
Other formats that LAPACK supports, MAGMA does not currently support.
\param[in]
m INTEGER
The number of rows of the matrix A. M >= 0.
\param[in]
dW REAL vector, dimension (2*lddw)
The matrix containing the 2-by-2 pivot.
\param[in]
lddw INTEGER
The leading dimension of the array W. LDDA >= max(1,M).
\param[in,out]
dA REAL array, dimension (LDDA,N)
The matrix to be scaled by dW. See TYPE for the
storage type.
\param[in]
ldda INTEGER
The leading dimension of the array A. LDDA >= max(1,M).
\param[out]
info INTEGER
- = 0: successful exit
- < 0: if INFO = -i, the i-th argument had an illegal value.
@ingroup magma_saux2
********************************************************************/
extern "C" void
magmablas_slascl_2x2_q(
magma_type_t type, magma_int_t m,
const float *dW, magma_int_t lddw,
float *dA, magma_int_t ldda,
magma_int_t *info, magma_queue_t queue )
{
*info = 0;
if ( type != MagmaLower && type != MagmaUpper )
*info = -1;
else if ( m < 0 )
*info = -2;
else if ( ldda < max(1,m) )
*info = -4;
if (*info != 0) {
magma_xerbla( __func__, -(*info) );
return; //info;
}
dim3 grid( (m + NB - 1)/NB );
dim3 threads( NB );
if (type == MagmaLower) {
hipLaunchKernelGGL(( slascl_2x2_lower) , dim3(grid), dim3(threads), 0, queue , m, dW, lddw, dA, ldda);
}
else {
hipLaunchKernelGGL(( slascl_2x2_upper) , dim3(grid), dim3(threads), 0, queue , m, dW, lddw, dA, ldda);
}
}
/**
@see magmablas_slascl2_q
@ingroup magma_saux2
********************************************************************/
extern "C" void
magmablas_slascl_2x2(
magma_type_t type, magma_int_t m,
float *dW, magma_int_t lddw,
float *dA, magma_int_t ldda,
magma_int_t *info )
{
magmablas_slascl_2x2_q( type, m, dW, lddw, dA, ldda, info, magma_stream );
}
| e70e16e4c1b9fd25fa2727fd54a9adb24307e287.cu | /*
-- MAGMA (version 1.6.1) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date January 2015
@generated from zlascl_2x2.cu normal z -> s, Fri Jan 30 19:00:09 2015
@author Ichitaro Yamazaki
*/
#include "common_magma.h"
#define NB 64
#define A(i,j) (A[(i) + (j)*lda])
#define W(i,j) (W[(i) + (j)*ldw])
// each thread block does one NB x n block row of A.
// each thread does one row, starting from left edge and moving right to diagonal.
__global__ void
slascl_2x2_lower(int m, const float* W, int ldw, float* A, int lda)
{
int ind = blockIdx.x * NB + threadIdx.x;
float D21 = W( 1, 0 );
float D11 = MAGMA_S_DIV( W( 1, 1 ), D21 );
float D22 = MAGMA_S_DIV( W( 0, 0 ), MAGMA_S_CNJG( D21 ) );
float T = 1.0 / ( MAGMA_S_REAL( D11*D22 ) - 1.0 );
D21 = MAGMA_S_DIV( MAGMA_S_MAKE(T,0.0), D21 );
if (ind < m) {
A( ind, 0 ) = MAGMA_S_CNJG( D21 )*( D11*W( 2+ind, 0 )-W( 2+ind, 1 ) );
A( ind, 1 ) = D21*( D22*W( 2+ind, 1 )-W( 2+ind, 0 ) );
}
}
// each thread block does one NB x n block row of A.
// each thread does one row, starting from right edge and moving left to diagonal.
__global__ void
slascl_2x2_upper(int m, const float *W, int ldw, float* A, int lda)
{
int ind = blockIdx.x * NB + threadIdx.x;
float D21 = W( m, 1 );
float D11 = MAGMA_S_DIV( W( m+1, 1 ), MAGMA_S_CNJG( D21 ) );
float D22 = MAGMA_S_DIV( W( m, 0 ), D21 );
float T = 1.0 / ( MAGMA_S_REAL( D11*D22 ) - 1.0 );
D21 = MAGMA_S_DIV( MAGMA_S_MAKE(T,0.0), D21 );
if (ind < m) {
A( ind, 0 ) = D21*( D11*W( ind, 0 )-W( ind, 1 ) );
A( ind, 1 ) = MAGMA_S_CNJG( D21 )*( D22*W( ind, 1 )-W( ind, 0 ) );
}
}
/**
Purpose
-------
SLASCL_2x2 scales the M by M real matrix A by the 2-by-2 pivot.
TYPE specifies that A may be upper or lower triangular.
Arguments
---------
\param[in]
type magma_type_t
TYPE indices the storage type of the input matrix A.
= MagmaLower: lower triangular matrix.
= MagmaUpper: upper triangular matrix.
Other formats that LAPACK supports, MAGMA does not currently support.
\param[in]
m INTEGER
The number of rows of the matrix A. M >= 0.
\param[in]
dW REAL vector, dimension (2*lddw)
The matrix containing the 2-by-2 pivot.
\param[in]
lddw INTEGER
The leading dimension of the array W. LDDA >= max(1,M).
\param[in,out]
dA REAL array, dimension (LDDA,N)
The matrix to be scaled by dW. See TYPE for the
storage type.
\param[in]
ldda INTEGER
The leading dimension of the array A. LDDA >= max(1,M).
\param[out]
info INTEGER
- = 0: successful exit
- < 0: if INFO = -i, the i-th argument had an illegal value.
@ingroup magma_saux2
********************************************************************/
extern "C" void
magmablas_slascl_2x2_q(
magma_type_t type, magma_int_t m,
const float *dW, magma_int_t lddw,
float *dA, magma_int_t ldda,
magma_int_t *info, magma_queue_t queue )
{
*info = 0;
if ( type != MagmaLower && type != MagmaUpper )
*info = -1;
else if ( m < 0 )
*info = -2;
else if ( ldda < max(1,m) )
*info = -4;
if (*info != 0) {
magma_xerbla( __func__, -(*info) );
return; //info;
}
dim3 grid( (m + NB - 1)/NB );
dim3 threads( NB );
if (type == MagmaLower) {
slascl_2x2_lower <<< grid, threads, 0, queue >>> (m, dW, lddw, dA, ldda);
}
else {
slascl_2x2_upper <<< grid, threads, 0, queue >>> (m, dW, lddw, dA, ldda);
}
}
/**
@see magmablas_slascl2_q
@ingroup magma_saux2
********************************************************************/
extern "C" void
magmablas_slascl_2x2(
magma_type_t type, magma_int_t m,
float *dW, magma_int_t lddw,
float *dA, magma_int_t ldda,
magma_int_t *info )
{
magmablas_slascl_2x2_q( type, m, dW, lddw, dA, ldda, info, magma_stream );
}
|
347bb2b5cd941d229c19656ecc5216ddc87eb371.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//This code allocates a widthxheight 2D array of floats and
//shows how to loop over the array elements in device code.
//Device code
__global__ void MyKernel(float* devPtr, size_t pitch, int width, int height)
{
for (int r = 0; r < height; r++)
{
float* row = (float*)((char*)devPtr + r*pitch);
for (int c = 0; c < width; c++)
{
float element = row[c];
}
}
}
//Host code
int main()
{
int width = 64, height = 64;
float* devPtr; //devPtr is a pointer.
size_t pitch;
hipMallocPitch(&devPtr, &pitch, width*sizeof(float), height);
hipLaunchKernelGGL(( MyKernel), dim3(100), dim3(512), 0, 0, devPtr, pitch, width, height);
return 0;
}
| 347bb2b5cd941d229c19656ecc5216ddc87eb371.cu | //This code allocates a widthxheight 2D array of floats and
//shows how to loop over the array elements in device code.
//Device code
__global__ void MyKernel(float* devPtr, size_t pitch, int width, int height)
{
for (int r = 0; r < height; r++)
{
float* row = (float*)((char*)devPtr + r*pitch);
for (int c = 0; c < width; c++)
{
float element = row[c];
}
}
}
//Host code
int main()
{
int width = 64, height = 64;
float* devPtr; //devPtr is a pointer.
size_t pitch;
cudaMallocPitch(&devPtr, &pitch, width*sizeof(float), height);
MyKernel<<<100, 512>>>(devPtr, pitch, width, height);
return 0;
}
|
60bb9488be7a9677658fe47a4d328d129a860705.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.5.4) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date October 2020
@precisions normal z -> c d s
*/
#include "magmasparse_internal.h"
#define BLOCK_SIZE 256
// axpy kernel for matrices stored in the MAGMA format
__global__ void
zgedensereimsplit_kernel(
int num_rows,
int num_cols,
magma_index_t* rowidx,
magmaDoubleComplex * A,
magmaDoubleComplex * ReA,
magmaDoubleComplex * ImA )
{
int row = blockIdx.x*blockDim.x+threadIdx.x;
int j;
if( row<num_rows ){
for( j=0; j<num_cols; j++ ){
ReA[ j ] = MAGMA_Z_MAKE( MAGMA_Z_REAL( A[ j ] ), 0.0 );
ImA[ j ] = MAGMA_Z_MAKE( MAGMA_Z_IMAG( A[ j ] ), 0.0 );
}
}
}
/**
Purpose
-------
This routine takes an input matrix A in DENSE format and located on the GPU
and splits it into two matrixes ReA and ImA containing the real and the
imaginary contributions of A.
The output matrices are allocated within the routine.
Arguments
---------
@param[in]
A magma_z_matrix
input matrix A.
@param[out]
ReA magma_z_matrix*
output matrix contaning real contributions.
@param[out]
ImA magma_z_matrix*
output matrix contaning complex contributions.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_zblas
********************************************************************/
extern "C"
magma_int_t
magma_zgedensereimsplit(
magma_z_matrix A,
magma_z_matrix *ReA,
magma_z_matrix *ImA,
magma_queue_t queue )
{
magma_zmtransfer( A, ReA, Magma_DEV, Magma_DEV, queue );
magma_zmtransfer( A, ImA, Magma_DEV, Magma_DEV, queue );
int m = A.num_rows;
int n = A.num_cols;
dim3 grid( magma_ceildiv( m, BLOCK_SIZE ) );
magma_int_t threads = BLOCK_SIZE;
hipLaunchKernelGGL(( zgedensereimsplit_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream() ,
m, n, A.row, A.dval, ReA->dval, ImA->dval );
return MAGMA_SUCCESS;
}
| 60bb9488be7a9677658fe47a4d328d129a860705.cu | /*
-- MAGMA (version 2.5.4) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date October 2020
@precisions normal z -> c d s
*/
#include "magmasparse_internal.h"
#define BLOCK_SIZE 256
// axpy kernel for matrices stored in the MAGMA format
__global__ void
zgedensereimsplit_kernel(
int num_rows,
int num_cols,
magma_index_t* rowidx,
magmaDoubleComplex * A,
magmaDoubleComplex * ReA,
magmaDoubleComplex * ImA )
{
int row = blockIdx.x*blockDim.x+threadIdx.x;
int j;
if( row<num_rows ){
for( j=0; j<num_cols; j++ ){
ReA[ j ] = MAGMA_Z_MAKE( MAGMA_Z_REAL( A[ j ] ), 0.0 );
ImA[ j ] = MAGMA_Z_MAKE( MAGMA_Z_IMAG( A[ j ] ), 0.0 );
}
}
}
/**
Purpose
-------
This routine takes an input matrix A in DENSE format and located on the GPU
and splits it into two matrixes ReA and ImA containing the real and the
imaginary contributions of A.
The output matrices are allocated within the routine.
Arguments
---------
@param[in]
A magma_z_matrix
input matrix A.
@param[out]
ReA magma_z_matrix*
output matrix contaning real contributions.
@param[out]
ImA magma_z_matrix*
output matrix contaning complex contributions.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_zblas
********************************************************************/
extern "C"
magma_int_t
magma_zgedensereimsplit(
magma_z_matrix A,
magma_z_matrix *ReA,
magma_z_matrix *ImA,
magma_queue_t queue )
{
magma_zmtransfer( A, ReA, Magma_DEV, Magma_DEV, queue );
magma_zmtransfer( A, ImA, Magma_DEV, Magma_DEV, queue );
int m = A.num_rows;
int n = A.num_cols;
dim3 grid( magma_ceildiv( m, BLOCK_SIZE ) );
magma_int_t threads = BLOCK_SIZE;
zgedensereimsplit_kernel<<< grid, threads, 0, queue->cuda_stream() >>>
( m, n, A.row, A.dval, ReA->dval, ImA->dval );
return MAGMA_SUCCESS;
}
|
dbcf740e2cfd0f42ed9be8026def812336f4e079.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// -------------------------------------------------------------
// CUDPP -- CUDA Data Parallel Primitives library
// -------------------------------------------------------------
// $Revision: 5636 $
// $Date: 2009-07-02 13:39:38 +1000 (Thu, 02 Jul 2009) $
// -------------------------------------------------------------
// This source code is distributed under the terms of license.txt
// in the root directory of this source distribution.
// -------------------------------------------------------------
/**
* @file
* scan_app.cu
*
* @brief CUDPP application-level scan routines
*/
/** \defgroup cudpp_app CUDPP Application-Level API
* The CUDPP Application-Level API contains functions
* that run on the host CPU and invoke GPU routines in
* the CUDPP \link cudpp_kernel Kernel-Level API\endlink.
* Application-Level API functions are used by
* CUDPP \link publicInterface Public Interface\endlink
* functions to implement CUDPP's core functionality.
* @{
*/
/** @name Scan Functions
* @{
*/
// #include <cstdlib>
// #include <cstdio>
// #include <assert.h>
#include "cuda_util.h"
#include "cudpp.h"
#include "cudpp_util.h"
//#include "cudpp_plan.h"
#include "cudpp_scan.h"
#include "scan_kernel.cuh"
#include "vector_kernel.cuh"
/** @brief Perform recursive scan on arbitrary size arrays
*
* This is the CPU-side workhorse function of the scan engine. This function
* invokes the CUDA kernels which perform the scan on individual blocks.
*
* Scans of large arrays must be split (possibly recursively) into a hierarchy of block scans,
* where each block is scanned by a single CUDA thread block. At each recursive level of the
* scanArrayRecursive first invokes a kernel to scan all blocks of that level, and if the level
* has more than one block, it calls itself recursively. On returning from each recursive level,
* the total sum of each block from the level below is added to all elements of the corresponding
* block in this level. See "Parallel Prefix Sum (Scan) in CUDA" for more information (see
* \ref references ).
*
* Template parameter \a T is the datatype; \a isBackward specifies backward or forward scan;
* \a isExclusive specifies exclusive or inclusive scan, and \a op specifies the binary associative
* operator to be used.
*
* @param[out] d_out The output array for the scan results
* @param[in] d_in The input array to be scanned
* @param[out] d_blockSums Array of arrays of per-block sums (one array per recursive level, allocated
* by allocScanStorage())
* @param[in] numElements The number of elements in the array to scan
* @param[in] numRows The number of rows in the array to scan
* @param[in] rowPitches Array of row pitches (one array per recursive level, allocated by
* allocScanStorage())
* @param[in] level The current recursive level of the scan
*/
template <class T, bool isBackward, bool isExclusive, class Op>
void scanArrayRecursive(T *d_out,
const T *d_in,
T **d_blockSums,
size_t numElements,
size_t numRows,
const size_t *rowPitches,
int level)
{
unsigned int numBlocks =
max(1, (unsigned int)ceil((double)numElements / ((double)SCAN_ELTS_PER_THREAD * SCAN_CTA_SIZE)));
unsigned int sharedEltsPerBlock = SCAN_CTA_SIZE * 2;
unsigned int sharedMemSize = sizeof(T) * sharedEltsPerBlock;
// divide pitch by four since scan's load/store addresses are for vec4 elements
unsigned int rowPitch = 1;
unsigned int blockSumRowPitch = 1;
if (numRows > 1)
{
rowPitch = (unsigned int)(rowPitches[level] / 4);
blockSumRowPitch = (unsigned int)((numBlocks > 1) ? rowPitches[level+1] / 4 : 0);
}
bool fullBlock = (numElements == numBlocks * SCAN_ELTS_PER_THREAD * SCAN_CTA_SIZE);
// setup execution parameters
dim3 grid(numBlocks, (unsigned int)numRows, 1);
dim3 threads(SCAN_CTA_SIZE, 1, 1);
// make sure there are no CUDA errors before we start
CUDA_CHECK_ERROR("scanArray before kernels");
unsigned int traitsCode = 0;
if (numBlocks > 1) traitsCode |= 1;
if (numRows > 1) traitsCode |= 2;
if (fullBlock) traitsCode |= 4;
switch (traitsCode)
{
case 0: // single block, single row, non-full block
hipLaunchKernelGGL(( scan4<T, ScanTraits<T, Op, isBackward, isExclusive, false, false, false> >)
, dim3(grid), dim3(threads), sharedMemSize , 0,
d_out, d_in, 0, (unsigned)numElements, rowPitch, blockSumRowPitch);
break;
case 1: // multiblock, single row, non-full block
hipLaunchKernelGGL(( scan4< T, ScanTraits<T, Op, isBackward, isExclusive, false, true, false> >)
, dim3(grid), dim3(threads), sharedMemSize , 0,
d_out, d_in, d_blockSums[level], (unsigned)numElements, rowPitch, blockSumRowPitch);
break;
case 2: // single block, multirow, non-full block
hipLaunchKernelGGL(( scan4<T, ScanTraits<T, Op, isBackward, isExclusive, true, false, false> >)
, dim3(grid), dim3(threads), sharedMemSize , 0,
d_out, d_in, 0, (unsigned)numElements, rowPitch, blockSumRowPitch);
break;
case 3: // multiblock, multirow, non-full block
hipLaunchKernelGGL(( scan4<T, ScanTraits<T, Op, isBackward, isExclusive, true, true, false> >)
, dim3(grid), dim3(threads), sharedMemSize , 0,
d_out, d_in, d_blockSums[level], (unsigned)numElements, rowPitch, blockSumRowPitch);
break;
case 4: // single block, single row, full block
hipLaunchKernelGGL(( scan4<T, ScanTraits<T, Op, isBackward, isExclusive, false, false, true> >)
, dim3(grid), dim3(threads), sharedMemSize , 0,
d_out, d_in, 0, (unsigned)numElements, rowPitch, blockSumRowPitch);
break;
case 5: // multiblock, single row, full block
hipLaunchKernelGGL(( scan4< T, ScanTraits<T, Op, isBackward, isExclusive, false, true, true> >)
, dim3(grid), dim3(threads), sharedMemSize , 0,
d_out, d_in, d_blockSums[level], (unsigned)numElements, rowPitch, blockSumRowPitch);
break;
case 6: // single block, multirow, full block
hipLaunchKernelGGL(( scan4<T, ScanTraits<T, Op, isBackward, isExclusive, true, false, true> >)
, dim3(grid), dim3(threads), sharedMemSize , 0,
d_out, d_in, 0, (unsigned)numElements, rowPitch, blockSumRowPitch);
break;
case 7: // multiblock, multirow, full block
hipLaunchKernelGGL(( scan4<T, ScanTraits<T, Op, isBackward, isExclusive, true, true, true> >)
, dim3(grid), dim3(threads), sharedMemSize , 0,
d_out, d_in, d_blockSums[level], (unsigned)numElements, rowPitch, blockSumRowPitch);
break;
}
CUDA_CHECK_ERROR("prescan");
if (numBlocks > 1)
{
// After scanning all the sub-blocks, we are mostly done. But
// now we need to take all of the last values of the
// sub-blocks and scan those. This will give us a new value
// that must be sdded to each block to get the final results.
scanArrayRecursive<T, isBackward, true, Op>
((T*)d_blockSums[level], (const T*)d_blockSums[level],
(T**)d_blockSums, numBlocks, numRows, rowPitches, level + 1); // recursive (CPU) call
if (fullBlock)
hipLaunchKernelGGL(( vectorAddUniform4<T, Op, SCAN_ELTS_PER_THREAD, true>)
, dim3(grid), dim3(threads) , 0, 0, d_out,
(T*)d_blockSums[level],
(unsigned)numElements,
rowPitch*4,
blockSumRowPitch*4,
0, 0);
else
hipLaunchKernelGGL(( vectorAddUniform4<T, Op, SCAN_ELTS_PER_THREAD, false>)
, dim3(grid), dim3(threads) , 0, 0, d_out,
(T*)d_blockSums[level],
(unsigned)numElements,
rowPitch*4,
blockSumRowPitch*4,
0, 0);
CUDA_CHECK_ERROR("vectorAddUniform");
}
}
// global
#ifdef __cplusplus
extern "C"
{
#endif
/** @brief Allocate intermediate arrays used by scan.
*
* Scans of large arrays must be split (possibly recursively) into a hierarchy
* of block scans, where each block is scanned by a single CUDA thread block.
* At each recursive level of the scan, we need an array in which to store the
* total sums of all blocks in that level. This function computes the amount
* of storage needed and allocates it.
*
* @param plan Pointer to CUDPPScanPlan object containing options and number
* of elements, which is used to compute storage requirements, and
* within which intermediate storage is allocated.
*/
void allocScanStorage(CUDPPScanPlan *plan)
{
plan->m_numEltsAllocated = plan->m_numElements;
size_t numElts = plan->m_numElements;
size_t level = 0;
do
{
size_t numBlocks =
max(1, (unsigned int)ceil((double)numElts / ((double)SCAN_ELTS_PER_THREAD * SCAN_CTA_SIZE)));
if (numBlocks > 1)
{
level++;
}
numElts = numBlocks;
} while (numElts > 1);
size_t elementSize = 0;
switch(plan->m_config.datatype)
{
case CUDPP_CHAR:
plan->m_blockSums = (void**) malloc(level * sizeof(char*));
elementSize = sizeof(char);
break;
case CUDPP_UCHAR:
plan->m_blockSums = (void**) malloc(level * sizeof(unsigned char*));
elementSize = sizeof(unsigned char);
break;
case CUDPP_SHORT:
plan->m_blockSums = (void**) malloc(level * sizeof(short*));
elementSize = sizeof(short);
break;
case CUDPP_USHORT:
plan->m_blockSums = (void**) malloc(level * sizeof(unsigned short*));
elementSize = sizeof(unsigned short);
break;
case CUDPP_INT:
plan->m_blockSums = (void**) malloc(level * sizeof(int*));
elementSize = sizeof(int);
break;
case CUDPP_UINT:
plan->m_blockSums = (void**) malloc(level * sizeof(unsigned int*));
elementSize = sizeof(unsigned int);
break;
case CUDPP_FLOAT:
plan->m_blockSums = (void**) malloc(level * sizeof(float*));
elementSize = sizeof(float);
break;
case CUDPP_DOUBLE:
plan->m_blockSums = (void**) malloc(level * sizeof(double*));
elementSize = sizeof(double);
break;
case CUDPP_LONGLONG:
plan->m_blockSums = (void**) malloc(level * sizeof(long long*));
elementSize = sizeof(long long);
break;
case CUDPP_ULONGLONG:
plan->m_blockSums = (void**) malloc(level * sizeof(unsigned long long*));
elementSize = sizeof(unsigned long long);
break;
default:
break;
}
plan->m_numLevelsAllocated = level;
numElts = plan->m_numElements;
size_t numRows = plan->m_numRows;
plan->m_numRowsAllocated = numRows;
plan->m_rowPitches = 0;
if (numRows > 1)
{
plan->m_rowPitches = (size_t*) malloc((level + 1) * sizeof(size_t));
plan->m_rowPitches[0] = plan->m_rowPitch;
}
level = 0;
do
{
size_t numBlocks =
max(1, (unsigned int)ceil((double)numElts / ((double)SCAN_ELTS_PER_THREAD * SCAN_CTA_SIZE)));
if (numBlocks > 1)
{
// Use hipMallocPitch for multi-row block sums to ensure alignment
if (numRows > 1)
{
size_t dpitch;
CUDA_SAFE_CALL( hipMallocPitch((void**) &(plan->m_blockSums[level]),
&dpitch,
numBlocks * elementSize,
numRows));
plan->m_rowPitches[level+1] = dpitch / elementSize;
level++;
}
else
{
CUDA_SAFE_CALL(hipMalloc((void**) &(plan->m_blockSums[level++]),
numBlocks * elementSize));
}
}
numElts = numBlocks;
} while (numElts > 1);
CUDA_CHECK_ERROR("allocScanStorage");
}
/** @brief Deallocate intermediate block sums arrays in a CUDPPScanPlan object.
*
* These arrays must have been allocated by allocScanStorage(), which is called
* by the constructor of cudppScanPlan().
*
* @param plan Pointer to CUDPPScanPlan object initialized by allocScanStorage().
*/
void freeScanStorage(CUDPPScanPlan *plan)
{
for (unsigned int i = 0; i < plan->m_numLevelsAllocated; i++)
{
hipFree(plan->m_blockSums[i]);
}
CUDA_CHECK_ERROR("freeScanStorage");
free((void**)plan->m_blockSums);
if (plan->m_numRows > 1)
free((void*)plan->m_rowPitches);
plan->m_blockSums = 0;
plan->m_numEltsAllocated = 0;
plan->m_numLevelsAllocated = 0;
}
#ifdef __cplusplus
}
#endif
template <typename T, bool isBackward, bool isExclusive>
void cudppScanDispatchOperator(void *d_out,
const void *d_in,
size_t numElements,
size_t numRows,
const CUDPPScanPlan *plan)
{
switch(plan->m_config.op)
{
case CUDPP_ADD:
scanArrayRecursive<T, isBackward, isExclusive, OperatorAdd<T> >
((T*)d_out, (const T*)d_in,
(T**)plan->m_blockSums,
numElements, numRows, plan->m_rowPitches, 0);
break;
case CUDPP_MULTIPLY:
scanArrayRecursive<T, isBackward, isExclusive, OperatorMultiply<T> >
((T*)d_out, (const T*)d_in,
(T**)plan->m_blockSums,
numElements, numRows, plan->m_rowPitches, 0);
break;
case CUDPP_MAX:
scanArrayRecursive<T, isBackward, isExclusive, OperatorMax<T> >
((T*)d_out, (const T*)d_in,
(T**)plan->m_blockSums,
numElements, numRows, plan->m_rowPitches, 0);
break;
case CUDPP_MIN:
scanArrayRecursive<T, isBackward, isExclusive, OperatorMin<T> >
((T*)d_out, (const T*)d_in,
(T**)plan->m_blockSums,
numElements, numRows, plan->m_rowPitches, 0);
break;
default:
break;
}
}
template <bool isBackward, bool isExclusive>
void cudppScanDispatchType(void *d_out,
const void *d_in,
size_t numElements,
size_t numRows,
const CUDPPScanPlan *plan)
{
switch(plan->m_config.datatype)
{
case CUDPP_CHAR:
cudppScanDispatchOperator<char,
isBackward,
isExclusive>(d_out, d_in, numElements,
numRows, plan);
break;
case CUDPP_UCHAR:
cudppScanDispatchOperator<unsigned char,
isBackward,
isExclusive>(d_out, d_in, numElements,
numRows, plan);
break;
case CUDPP_SHORT:
cudppScanDispatchOperator<short,
isBackward,
isExclusive>(d_out, d_in, numElements,
numRows, plan);
break;
case CUDPP_USHORT:
cudppScanDispatchOperator<unsigned short,
isBackward,
isExclusive>(d_out, d_in, numElements,
numRows, plan);
break;
case CUDPP_INT:
cudppScanDispatchOperator<int,
isBackward,
isExclusive>(d_out, d_in, numElements,
numRows, plan);
break;
case CUDPP_UINT:
cudppScanDispatchOperator<unsigned int,
isBackward,
isExclusive>(d_out, d_in, numElements,
numRows, plan);
break;
case CUDPP_FLOAT:
cudppScanDispatchOperator<float,
isBackward,
isExclusive>(d_out, d_in, numElements,
numRows, plan);
break;
case CUDPP_DOUBLE:
cudppScanDispatchOperator<double,
isBackward,
isExclusive>(d_out, d_in, numElements,
numRows, plan);
break;
case CUDPP_LONGLONG:
cudppScanDispatchOperator<long long,
isBackward,
isExclusive>(d_out, d_in, numElements,
numRows, plan);
break;
case CUDPP_ULONGLONG:
cudppScanDispatchOperator<unsigned long long,
isBackward,
isExclusive>(d_out, d_in, numElements,
numRows, plan);
break;
default:
break;
}
}
#ifdef __cplusplus
extern "C"
{
#endif
/** @brief Dispatch function to perform a scan (prefix sum) on an
* array with the specified configuration.
*
* This is the dispatch routine which calls scanArrayRecursive() with
* appropriate template parameters and arguments to achieve the scan as
* specified in \a plan.
*
* @param[out] d_out The output array of scan results
* @param[in] d_in The input array
* @param[in] numElements The number of elements to scan
* @param[in] numRows The number of rows to scan in parallel
* @param[in] plan Pointer to CUDPPScanPlan object containing scan options
* and intermediate storage
*/
void cudppScanDispatch(void *d_out,
const void *d_in,
size_t numElements,
size_t numRows,
const CUDPPScanPlan *plan)
{
if (CUDPP_OPTION_EXCLUSIVE & plan->m_config.options)
{
if (CUDPP_OPTION_BACKWARD & plan->m_config.options)
{
cudppScanDispatchType<true, true>(d_out, d_in, numElements,
numRows, plan);
}
else
{
cudppScanDispatchType<false, true>(d_out, d_in, numElements,
numRows, plan);
}
}
else
{
if (CUDPP_OPTION_BACKWARD & plan->m_config.options)
{
cudppScanDispatchType<true, false>(d_out, d_in, numElements,
numRows, plan);
}
else
{
cudppScanDispatchType<false, false>(d_out, d_in, numElements,
numRows, plan);
}
}
}
#ifdef __cplusplus
}
#endif
/** @} */ // end scan functions
/** @} */ // end cudpp_app
| dbcf740e2cfd0f42ed9be8026def812336f4e079.cu | // -------------------------------------------------------------
// CUDPP -- CUDA Data Parallel Primitives library
// -------------------------------------------------------------
// $Revision: 5636 $
// $Date: 2009-07-02 13:39:38 +1000 (Thu, 02 Jul 2009) $
// -------------------------------------------------------------
// This source code is distributed under the terms of license.txt
// in the root directory of this source distribution.
// -------------------------------------------------------------
/**
* @file
* scan_app.cu
*
* @brief CUDPP application-level scan routines
*/
/** \defgroup cudpp_app CUDPP Application-Level API
* The CUDPP Application-Level API contains functions
* that run on the host CPU and invoke GPU routines in
* the CUDPP \link cudpp_kernel Kernel-Level API\endlink.
* Application-Level API functions are used by
* CUDPP \link publicInterface Public Interface\endlink
* functions to implement CUDPP's core functionality.
* @{
*/
/** @name Scan Functions
* @{
*/
// #include <cstdlib>
// #include <cstdio>
// #include <assert.h>
#include "cuda_util.h"
#include "cudpp.h"
#include "cudpp_util.h"
//#include "cudpp_plan.h"
#include "cudpp_scan.h"
#include "scan_kernel.cuh"
#include "vector_kernel.cuh"
/** @brief Perform recursive scan on arbitrary size arrays
*
* This is the CPU-side workhorse function of the scan engine. This function
* invokes the CUDA kernels which perform the scan on individual blocks.
*
* Scans of large arrays must be split (possibly recursively) into a hierarchy of block scans,
* where each block is scanned by a single CUDA thread block. At each recursive level of the
* scanArrayRecursive first invokes a kernel to scan all blocks of that level, and if the level
* has more than one block, it calls itself recursively. On returning from each recursive level,
* the total sum of each block from the level below is added to all elements of the corresponding
* block in this level. See "Parallel Prefix Sum (Scan) in CUDA" for more information (see
* \ref references ).
*
* Template parameter \a T is the datatype; \a isBackward specifies backward or forward scan;
* \a isExclusive specifies exclusive or inclusive scan, and \a op specifies the binary associative
* operator to be used.
*
* @param[out] d_out The output array for the scan results
* @param[in] d_in The input array to be scanned
* @param[out] d_blockSums Array of arrays of per-block sums (one array per recursive level, allocated
* by allocScanStorage())
* @param[in] numElements The number of elements in the array to scan
* @param[in] numRows The number of rows in the array to scan
* @param[in] rowPitches Array of row pitches (one array per recursive level, allocated by
* allocScanStorage())
* @param[in] level The current recursive level of the scan
*/
template <class T, bool isBackward, bool isExclusive, class Op>
void scanArrayRecursive(T *d_out,
const T *d_in,
T **d_blockSums,
size_t numElements,
size_t numRows,
const size_t *rowPitches,
int level)
{
unsigned int numBlocks =
max(1, (unsigned int)ceil((double)numElements / ((double)SCAN_ELTS_PER_THREAD * SCAN_CTA_SIZE)));
unsigned int sharedEltsPerBlock = SCAN_CTA_SIZE * 2;
unsigned int sharedMemSize = sizeof(T) * sharedEltsPerBlock;
// divide pitch by four since scan's load/store addresses are for vec4 elements
unsigned int rowPitch = 1;
unsigned int blockSumRowPitch = 1;
if (numRows > 1)
{
rowPitch = (unsigned int)(rowPitches[level] / 4);
blockSumRowPitch = (unsigned int)((numBlocks > 1) ? rowPitches[level+1] / 4 : 0);
}
bool fullBlock = (numElements == numBlocks * SCAN_ELTS_PER_THREAD * SCAN_CTA_SIZE);
// setup execution parameters
dim3 grid(numBlocks, (unsigned int)numRows, 1);
dim3 threads(SCAN_CTA_SIZE, 1, 1);
// make sure there are no CUDA errors before we start
CUDA_CHECK_ERROR("scanArray before kernels");
unsigned int traitsCode = 0;
if (numBlocks > 1) traitsCode |= 1;
if (numRows > 1) traitsCode |= 2;
if (fullBlock) traitsCode |= 4;
switch (traitsCode)
{
case 0: // single block, single row, non-full block
scan4<T, ScanTraits<T, Op, isBackward, isExclusive, false, false, false> >
<<< grid, threads, sharedMemSize >>>
(d_out, d_in, 0, (unsigned)numElements, rowPitch, blockSumRowPitch);
break;
case 1: // multiblock, single row, non-full block
scan4< T, ScanTraits<T, Op, isBackward, isExclusive, false, true, false> >
<<< grid, threads, sharedMemSize >>>
(d_out, d_in, d_blockSums[level], (unsigned)numElements, rowPitch, blockSumRowPitch);
break;
case 2: // single block, multirow, non-full block
scan4<T, ScanTraits<T, Op, isBackward, isExclusive, true, false, false> >
<<< grid, threads, sharedMemSize >>>
(d_out, d_in, 0, (unsigned)numElements, rowPitch, blockSumRowPitch);
break;
case 3: // multiblock, multirow, non-full block
scan4<T, ScanTraits<T, Op, isBackward, isExclusive, true, true, false> >
<<< grid, threads, sharedMemSize >>>
(d_out, d_in, d_blockSums[level], (unsigned)numElements, rowPitch, blockSumRowPitch);
break;
case 4: // single block, single row, full block
scan4<T, ScanTraits<T, Op, isBackward, isExclusive, false, false, true> >
<<< grid, threads, sharedMemSize >>>
(d_out, d_in, 0, (unsigned)numElements, rowPitch, blockSumRowPitch);
break;
case 5: // multiblock, single row, full block
scan4< T, ScanTraits<T, Op, isBackward, isExclusive, false, true, true> >
<<< grid, threads, sharedMemSize >>>
(d_out, d_in, d_blockSums[level], (unsigned)numElements, rowPitch, blockSumRowPitch);
break;
case 6: // single block, multirow, full block
scan4<T, ScanTraits<T, Op, isBackward, isExclusive, true, false, true> >
<<< grid, threads, sharedMemSize >>>
(d_out, d_in, 0, (unsigned)numElements, rowPitch, blockSumRowPitch);
break;
case 7: // multiblock, multirow, full block
scan4<T, ScanTraits<T, Op, isBackward, isExclusive, true, true, true> >
<<< grid, threads, sharedMemSize >>>
(d_out, d_in, d_blockSums[level], (unsigned)numElements, rowPitch, blockSumRowPitch);
break;
}
CUDA_CHECK_ERROR("prescan");
if (numBlocks > 1)
{
// After scanning all the sub-blocks, we are mostly done. But
// now we need to take all of the last values of the
// sub-blocks and scan those. This will give us a new value
// that must be sdded to each block to get the final results.
scanArrayRecursive<T, isBackward, true, Op>
((T*)d_blockSums[level], (const T*)d_blockSums[level],
(T**)d_blockSums, numBlocks, numRows, rowPitches, level + 1); // recursive (CPU) call
if (fullBlock)
vectorAddUniform4<T, Op, SCAN_ELTS_PER_THREAD, true>
<<< grid, threads >>>(d_out,
(T*)d_blockSums[level],
(unsigned)numElements,
rowPitch*4,
blockSumRowPitch*4,
0, 0);
else
vectorAddUniform4<T, Op, SCAN_ELTS_PER_THREAD, false>
<<< grid, threads >>>(d_out,
(T*)d_blockSums[level],
(unsigned)numElements,
rowPitch*4,
blockSumRowPitch*4,
0, 0);
CUDA_CHECK_ERROR("vectorAddUniform");
}
}
// global
#ifdef __cplusplus
extern "C"
{
#endif
/** @brief Allocate intermediate arrays used by scan.
*
* Scans of large arrays must be split (possibly recursively) into a hierarchy
* of block scans, where each block is scanned by a single CUDA thread block.
* At each recursive level of the scan, we need an array in which to store the
* total sums of all blocks in that level. This function computes the amount
* of storage needed and allocates it.
*
* @param plan Pointer to CUDPPScanPlan object containing options and number
* of elements, which is used to compute storage requirements, and
* within which intermediate storage is allocated.
*/
void allocScanStorage(CUDPPScanPlan *plan)
{
plan->m_numEltsAllocated = plan->m_numElements;
size_t numElts = plan->m_numElements;
size_t level = 0;
do
{
size_t numBlocks =
max(1, (unsigned int)ceil((double)numElts / ((double)SCAN_ELTS_PER_THREAD * SCAN_CTA_SIZE)));
if (numBlocks > 1)
{
level++;
}
numElts = numBlocks;
} while (numElts > 1);
size_t elementSize = 0;
switch(plan->m_config.datatype)
{
case CUDPP_CHAR:
plan->m_blockSums = (void**) malloc(level * sizeof(char*));
elementSize = sizeof(char);
break;
case CUDPP_UCHAR:
plan->m_blockSums = (void**) malloc(level * sizeof(unsigned char*));
elementSize = sizeof(unsigned char);
break;
case CUDPP_SHORT:
plan->m_blockSums = (void**) malloc(level * sizeof(short*));
elementSize = sizeof(short);
break;
case CUDPP_USHORT:
plan->m_blockSums = (void**) malloc(level * sizeof(unsigned short*));
elementSize = sizeof(unsigned short);
break;
case CUDPP_INT:
plan->m_blockSums = (void**) malloc(level * sizeof(int*));
elementSize = sizeof(int);
break;
case CUDPP_UINT:
plan->m_blockSums = (void**) malloc(level * sizeof(unsigned int*));
elementSize = sizeof(unsigned int);
break;
case CUDPP_FLOAT:
plan->m_blockSums = (void**) malloc(level * sizeof(float*));
elementSize = sizeof(float);
break;
case CUDPP_DOUBLE:
plan->m_blockSums = (void**) malloc(level * sizeof(double*));
elementSize = sizeof(double);
break;
case CUDPP_LONGLONG:
plan->m_blockSums = (void**) malloc(level * sizeof(long long*));
elementSize = sizeof(long long);
break;
case CUDPP_ULONGLONG:
plan->m_blockSums = (void**) malloc(level * sizeof(unsigned long long*));
elementSize = sizeof(unsigned long long);
break;
default:
break;
}
plan->m_numLevelsAllocated = level;
numElts = plan->m_numElements;
size_t numRows = plan->m_numRows;
plan->m_numRowsAllocated = numRows;
plan->m_rowPitches = 0;
if (numRows > 1)
{
plan->m_rowPitches = (size_t*) malloc((level + 1) * sizeof(size_t));
plan->m_rowPitches[0] = plan->m_rowPitch;
}
level = 0;
do
{
size_t numBlocks =
max(1, (unsigned int)ceil((double)numElts / ((double)SCAN_ELTS_PER_THREAD * SCAN_CTA_SIZE)));
if (numBlocks > 1)
{
// Use cudaMallocPitch for multi-row block sums to ensure alignment
if (numRows > 1)
{
size_t dpitch;
CUDA_SAFE_CALL( cudaMallocPitch((void**) &(plan->m_blockSums[level]),
&dpitch,
numBlocks * elementSize,
numRows));
plan->m_rowPitches[level+1] = dpitch / elementSize;
level++;
}
else
{
CUDA_SAFE_CALL(cudaMalloc((void**) &(plan->m_blockSums[level++]),
numBlocks * elementSize));
}
}
numElts = numBlocks;
} while (numElts > 1);
CUDA_CHECK_ERROR("allocScanStorage");
}
/** @brief Deallocate intermediate block sums arrays in a CUDPPScanPlan object.
*
* These arrays must have been allocated by allocScanStorage(), which is called
* by the constructor of cudppScanPlan().
*
* @param plan Pointer to CUDPPScanPlan object initialized by allocScanStorage().
*/
void freeScanStorage(CUDPPScanPlan *plan)
{
for (unsigned int i = 0; i < plan->m_numLevelsAllocated; i++)
{
cudaFree(plan->m_blockSums[i]);
}
CUDA_CHECK_ERROR("freeScanStorage");
free((void**)plan->m_blockSums);
if (plan->m_numRows > 1)
free((void*)plan->m_rowPitches);
plan->m_blockSums = 0;
plan->m_numEltsAllocated = 0;
plan->m_numLevelsAllocated = 0;
}
#ifdef __cplusplus
}
#endif
template <typename T, bool isBackward, bool isExclusive>
void cudppScanDispatchOperator(void *d_out,
const void *d_in,
size_t numElements,
size_t numRows,
const CUDPPScanPlan *plan)
{
switch(plan->m_config.op)
{
case CUDPP_ADD:
scanArrayRecursive<T, isBackward, isExclusive, OperatorAdd<T> >
((T*)d_out, (const T*)d_in,
(T**)plan->m_blockSums,
numElements, numRows, plan->m_rowPitches, 0);
break;
case CUDPP_MULTIPLY:
scanArrayRecursive<T, isBackward, isExclusive, OperatorMultiply<T> >
((T*)d_out, (const T*)d_in,
(T**)plan->m_blockSums,
numElements, numRows, plan->m_rowPitches, 0);
break;
case CUDPP_MAX:
scanArrayRecursive<T, isBackward, isExclusive, OperatorMax<T> >
((T*)d_out, (const T*)d_in,
(T**)plan->m_blockSums,
numElements, numRows, plan->m_rowPitches, 0);
break;
case CUDPP_MIN:
scanArrayRecursive<T, isBackward, isExclusive, OperatorMin<T> >
((T*)d_out, (const T*)d_in,
(T**)plan->m_blockSums,
numElements, numRows, plan->m_rowPitches, 0);
break;
default:
break;
}
}
template <bool isBackward, bool isExclusive>
void cudppScanDispatchType(void *d_out,
const void *d_in,
size_t numElements,
size_t numRows,
const CUDPPScanPlan *plan)
{
switch(plan->m_config.datatype)
{
case CUDPP_CHAR:
cudppScanDispatchOperator<char,
isBackward,
isExclusive>(d_out, d_in, numElements,
numRows, plan);
break;
case CUDPP_UCHAR:
cudppScanDispatchOperator<unsigned char,
isBackward,
isExclusive>(d_out, d_in, numElements,
numRows, plan);
break;
case CUDPP_SHORT:
cudppScanDispatchOperator<short,
isBackward,
isExclusive>(d_out, d_in, numElements,
numRows, plan);
break;
case CUDPP_USHORT:
cudppScanDispatchOperator<unsigned short,
isBackward,
isExclusive>(d_out, d_in, numElements,
numRows, plan);
break;
case CUDPP_INT:
cudppScanDispatchOperator<int,
isBackward,
isExclusive>(d_out, d_in, numElements,
numRows, plan);
break;
case CUDPP_UINT:
cudppScanDispatchOperator<unsigned int,
isBackward,
isExclusive>(d_out, d_in, numElements,
numRows, plan);
break;
case CUDPP_FLOAT:
cudppScanDispatchOperator<float,
isBackward,
isExclusive>(d_out, d_in, numElements,
numRows, plan);
break;
case CUDPP_DOUBLE:
cudppScanDispatchOperator<double,
isBackward,
isExclusive>(d_out, d_in, numElements,
numRows, plan);
break;
case CUDPP_LONGLONG:
cudppScanDispatchOperator<long long,
isBackward,
isExclusive>(d_out, d_in, numElements,
numRows, plan);
break;
case CUDPP_ULONGLONG:
cudppScanDispatchOperator<unsigned long long,
isBackward,
isExclusive>(d_out, d_in, numElements,
numRows, plan);
break;
default:
break;
}
}
#ifdef __cplusplus
extern "C"
{
#endif
/** @brief Dispatch function to perform a scan (prefix sum) on an
* array with the specified configuration.
*
* This is the dispatch routine which calls scanArrayRecursive() with
* appropriate template parameters and arguments to achieve the scan as
* specified in \a plan.
*
* @param[out] d_out The output array of scan results
* @param[in] d_in The input array
* @param[in] numElements The number of elements to scan
* @param[in] numRows The number of rows to scan in parallel
* @param[in] plan Pointer to CUDPPScanPlan object containing scan options
* and intermediate storage
*/
void cudppScanDispatch(void *d_out,
const void *d_in,
size_t numElements,
size_t numRows,
const CUDPPScanPlan *plan)
{
if (CUDPP_OPTION_EXCLUSIVE & plan->m_config.options)
{
if (CUDPP_OPTION_BACKWARD & plan->m_config.options)
{
cudppScanDispatchType<true, true>(d_out, d_in, numElements,
numRows, plan);
}
else
{
cudppScanDispatchType<false, true>(d_out, d_in, numElements,
numRows, plan);
}
}
else
{
if (CUDPP_OPTION_BACKWARD & plan->m_config.options)
{
cudppScanDispatchType<true, false>(d_out, d_in, numElements,
numRows, plan);
}
else
{
cudppScanDispatchType<false, false>(d_out, d_in, numElements,
numRows, plan);
}
}
}
#ifdef __cplusplus
}
#endif
/** @} */ // end scan functions
/** @} */ // end cudpp_app
|
b3bfaa59f490dcc918401d2bf92f165cd7a127d4.hip | // !!! This is a file automatically generated by hipify!!!
/*
Copyright (C) 2016 Sergey Demyanov.
contact: [email protected]
http://www.demyanov.net
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "mat_gpu.h"
#include "cuda_util.h"
void MatGPU::SetTensorDesc(const Dim &dims) {
mexAssertMsg(size() == dims[0] * dims[1] * dims[2] * dims[3],
"In SetTensorDesc dims assert");
if (dims[0] > 0 || dims[1] > 0 || dims[2] > 0 || dims[3] > 0) {
if (tensor_desc_ == NULL) {
CUDNN_CALL(cudnnCreateTensorDescriptor(&tensor_desc_));
}
CUDNN_CALL(cudnnSetTensor4dDescriptor(
tensor_desc_, CUDNN_LAYOUT, CUDNN_TYPE,
dims[0], dims[1], dims[2], dims[3]
));
}
}
void MatGPU::SetFilterDesc(const Dim &dims) {
mexAssertMsg(size() == dims[0] * dims[1] * dims[2] * dims[3],
"In SetFilterDesc dims assert");
if (dims[0] > 0 || dims[1] > 0 || dims[2] > 0 || dims[3] > 0) {
if (filter_desc_ == NULL) {
CUDNN_CALL(cudnnCreateFilterDescriptor(&filter_desc_));
}
CUDNN_CALL(cudnnSetFilter4dDescriptor(
filter_desc_, CUDNN_TYPE, CUDNN_LAYOUT,
dims[0], dims[1], dims[2], dims[3]
));
}
}
cudnnTensorDescriptor_t MatGPU::GetTensorDesc() {
mexAssertMsg(tensor_desc_ != NULL, "Empty tensor descriptor");
return tensor_desc_;
}
cudnnFilterDescriptor_t MatGPU::GetFilterDesc() {
mexAssertMsg(filter_desc_ != NULL, "Empty filter descriptor");
return filter_desc_;
}
void MatGPU::ClearTensorDesc() {
if (tensor_desc_ != NULL) {
CUDNN_CALL(cudnnDestroyTensorDescriptor(tensor_desc_));
tensor_desc_ = NULL;
}
}
void MatGPU::ClearFilterDesc() {
if (filter_desc_ != NULL) {
CUDNN_CALL(cudnnDestroyFilterDescriptor(filter_desc_));
filter_desc_ = NULL;
}
}
// static
hipEvent_t MatGPU::_start, MatGPU::_stop;
void MatGPU::StartCudaTimer() {
if (print < 2) return;
hipEventRecord(_start, 0);
}
void MatGPU::MeasureCudaTime(std::string msg) {
if (print < 2) return;
float elapsedTime;
hipEventRecord(_stop, 0);
hipEventSynchronize(_stop);
hipEventElapsedTime(&elapsedTime, _start, _stop);
mexPrintMsg(msg, elapsedTime);
}
hiprandGenerator_t MatGPU::_randGen;
hipStream_t MatGPU::_defaultStream;
hipblasHandle_t MatGPU::_cublasHandle;
cudnnHandle_t MatGPU::_cudnnHandle;
size_t MatGPU::_cudnnMemoryLimit;
MatGPU MatGPU::_workspace;
int MatGPU::getDeviceID() {
int id;
CUDA_CALL(hipGetDevice(&id));
return id;
}
void MatGPU::InitCuda(int gpu) {
int num;
CUDA_CALL(hipGetDeviceCount(&num));
mexAssertMsg(gpu < num, "Requested GPU index is not available");
CUDA_CALL(hipSetDevice(gpu));
hipDeviceProp_t prop;
CUDA_CALL(hipGetDeviceProperties(&prop, getDeviceID()));
mexPrintMsg("Executing on", prop.name);
CUDA_CALL(hipStreamCreate(&_defaultStream));
CURAND_CALL(hiprandCreateGenerator(&_randGen, HIPRAND_RNG_PSEUDO_DEFAULT));
CUBLAS_CALL(hipblasCreate(&_cublasHandle));
CUDNN_CALL(cudnnCreate(&_cudnnHandle));
hipEventCreate(&_start);
hipEventCreate(&_stop);
}
void MatGPU::InitRand(int seed) {
CURAND_CALL(hiprandSetPseudoRandomGeneratorSeed(_randGen, seed));
}
void MatGPU::CudaReset() {
hipEventDestroy(_start);
hipEventDestroy(_stop);
_workspace.clear();
CUDNN_CALL(cudnnDestroy(_cudnnHandle));
CUBLAS_CALL(hipblasDestroy(_cublasHandle));
CURAND_CALL(hiprandDestroyGenerator(_randGen));
CUDA_CALL(hipStreamDestroy(_defaultStream));
CUDA_CALL(hipDeviceReset());
}
void MatGPU::SetMemoryLimit(size_t memory) {
// converting from megabytes to bytes
_cudnnMemoryLimit = memory * 1024 * 1024;
}
// data access
ftype MatGPU::operator () (size_t i, size_t j) const {
ftype *val_ptr = new ftype [1];
CUDA_CALL(hipMemcpy(val_ptr, &data(i, j), sizeof(ftype), hipMemcpyDeviceToHost));
ftype val = val_ptr[0];
delete [] val_ptr;
return val;
}
/*
ftype MatGPU::operator () (size_t ind) const {
ftype *val_ptr = new ftype [1];
CUDA_CALL(hipMemcpy(val_ptr, &data(ind), sizeof(ftype), hipMemcpyDeviceToHost));
ftype val = val_ptr[0];
delete [] val_ptr;
return val;
}
MatGPU MatGPU::operator () (size_t ind) {
MatGPU val_mat;
val_mat.attach(&data(ind), 1, 1);
return val_mat;
}
*/
size_t MatGPU::BytesNum() const {
return size() * sizeof(ftype);
}
// memory functions
MatGPU::MatGPU() {
init();
}
MatGPU::MatGPU(size_t size1, size_t size2) {
init();
resize(size1, size2);
}
MatGPU::MatGPU(const MatGPU &b) {
init();
if (b.empty()) return;
resize(b.size1_, b.size2_);
(*this) = b;
}
MatGPU::MatGPU(MatGPU &&b) {
init();
Swap(*this, b);
}
MatGPU::~MatGPU() {
clear();
}
MatGPU& MatGPU::init() {
data_ = NULL;
size1_ = 0;
size2_ = 0;
order_ = kInternalOrder;
owner_ = false;
tensor_desc_ = NULL;
filter_desc_ = NULL;
return *this;
}
// copies only the content, not other parameters!!
MatGPU& MatGPU::operator = (const MatGPU &b) {
//mexPrintMsg("Array assignment");
mexAssertMsg(size1_ == b.size1_ && size2_ == b.size2_,
"In MatGPU::operator = the arrays have different size");
if (order_ == b.order_) {
CUDA_CALL(hipMemcpy(data_, b.data_, BytesNum(), hipMemcpyDeviceToDevice));
} else if (b.order_ != kInternalOrder) { // order_ == kInternalOrder
MatGPU br;
br.attach(b.data_, b.size2_, b.size1_, kInternalOrder);
Trans(br, *this);
} else { // b.order_ == kInternalOrder, order_ != kInternalOrder
MatGPU br;
br.attach(data_, size2_, size1_, kInternalOrder);
Trans(b, br);
}
/*
if (b.tensor_desc_ != NULL) {
SetTensorDesc(b.tensor_shape());
} else {
tensor_desc_ = NULL;
}
if (b.filter_desc_ != NULL) {
SetFilterDesc(b.filter_shape());
} else {
filter_desc_ = NULL;
} */
return *this;
}
MatGPU& MatGPU::resize(size_t size1, size_t size2) {
// required for all cuda opearations
mexAssertMsg(size1 <= INT_MAX / size2, "Matrix is too large!");
if (size1 * size2 != size()) {
clear();
if (size1 * size2 > 0) {
//mexPrintInt("rs1", size1);
//mexPrintInt("rs2", size2);
CUDA_CALL(hipMalloc(&data_, size1 * size2 * sizeof(ftype)));
owner_ = true;
}
}
if (size1 != size1_ || size2 != size2_) {
ClearTensorDesc();
ClearFilterDesc();
}
size1_ = size1;
size2_ = size2;
return *this;
}
MatGPU& MatGPU::resize_tensor(Dim dims) {
resize(dims[0], dims[1] * dims[2] * dims[3]);
SetTensorDesc(dims);
return *this;
}
MatGPU& MatGPU::resize_filter(Dim dims) {
resize(dims[0], dims[1] * dims[2] * dims[3]);
SetFilterDesc(dims);
return *this;
}
MatGPU& MatGPU::reshape(size_t size1, size_t size2) {
mexAssertMsg(size() == size1 * size2,
"In MatGPU::reshape the sizes do not correspond");
resize(size1, size2);
return *this;
}
MatGPU& MatGPU::reshape_tensor(Dim dims) {
mexAssertMsg(size() == dims[0] * dims[1] * dims[2] * dims[3],
"In MatGPU::reshape_tensor the dimensions do not correspond");
resize_tensor(dims);
return *this;
}
MatGPU& MatGPU::reshape_filter(Dim dims) {
mexAssertMsg(size() == dims[0] * dims[1] * dims[2] * dims[3],
"In MatGPU::reshape_tensor the dimensions do not correspond");
resize_filter(dims);
return *this;
}
MatGPU& MatGPU::attach(const MatGPU &b) {
return attach(b.data_, b.size1_, b.size2_, b.order_);
}
MatGPU& MatGPU::attach(const MatGPU &b, size_t offset, size_t size1, size_t size2, bool order) {
//mexAssertMsg(b.size1_ == 1 || b.size2_ == 1, "In MatGPU::attach with offset one of sizes should be 1");
mexAssertMsg(offset + size1 * size2 <= b.size(),
"In MatGPU::attach the sizes don't correspond each other");
return attach(b.data_ + offset, size1, size2, order);
}
MatGPU& MatGPU::attach(ftype *ptr, size_t size1, size_t size2) {
return attach(ptr, size1, size2, kInternalOrder);
}
MatGPU& MatGPU::attach(ftype *ptr, size_t size1, size_t size2, bool order) {
//mexAssertMsg(order == false, "In MatGPU::attach order should be always false");
clear();
data_ = ptr;
size1_ = size1;
size2_ = size2;
order_ = order;
return *this;
}
MatGPU& MatGPU::clear() {
ClearTensorDesc();
ClearFilterDesc();
if (owner_) {
mexAssert(data_ != NULL);
mexAssert(size() > 0);
//mexPrintInt("clear s1", size1_);
//mexPrintInt("clear s2", size2_);
CUDA_CALL(hipFree(data_));
owner_ = false;
}
init();
//mexPrintMsg("Array clear end");
return *this;
}
// be careful of using it as it does not guarantee
// that it is not used somewhere else at the same time
MatGPU& MatGPU::GetFromWorkspace(size_t size1, size_t size2) {
if (size1 * size2 > MatGPU::_workspace.size()) {
MatGPU::_workspace.resize(size1, size2);
}
attach(MatGPU::_workspace, 0, size1, size2, kInternalOrder);
return *this;
}
void Swap(MatGPU &a, MatGPU &b) {
ftype *data_tmp = b.data_;
b.data_ = a.data_;
a.data_ = data_tmp;
size_t size1_tmp = b.size1_;
b.size1_ = a.size1_;
a.size1_ = size1_tmp;
size_t size2_tmp = b.size2_;
b.size2_ = a.size2_;
a.size2_ = size2_tmp;
bool order_tmp = b.order_;
b.order_ = a.order_;
a.order_ = order_tmp;
bool owner_tmp = b.owner_;
b.owner_ = a.owner_;
a.owner_ = owner_tmp;
cudnnTensorDescriptor_t tensor_desc_tmp_ = b.tensor_desc_;
b.tensor_desc_ = a.tensor_desc_;
a.tensor_desc_ = tensor_desc_tmp_;
cudnnFilterDescriptor_t filter_desc_tmp_ = b.filter_desc_;
b.filter_desc_ = a.filter_desc_;
a.filter_desc_ = filter_desc_tmp_;
}
// data functions
MatGPU& MatGPU::ident() {
cuda_ident(*this);
return *this;
}
MatGPU& MatGPU::assign(ftype val) {
cuda_assval(*this, val);
return *this;
}
MatGPU& MatGPU::rand() {
if (!empty()) {
CURAND_CALL(hiprandGenerateUniform(_randGen, data_, size()));
}
return *this;
}
MatGPU& MatGPU::randnorm() {
if (!empty()) {
CURAND_CALL(hiprandGenerateNormal(_randGen, data_, size(), 0, 1));
}
return *this;
}
MatGPU& MatGPU::linear(ftype ca, ftype cb, const MatGPU &b, bool b_tr) {
mexAssertMsg(cb == 0 || data_ != b.data_, "In linear pointers should be different");
mexAssertMsg(order_ == b.order_, "In linear orders should be the same");
hipblasOperation_t a_op = HIPBLAS_OP_N, b_op;
if (!b_tr) {
mexAssertMsg(size1_ == b.size1_ && size2_ == b.size2_,
"In linear sizes does not correspond to each other");
b_op = HIPBLAS_OP_N;
} else {
mexAssertMsg(size1_ == b.size2_ && size2_ == b.size1_,
"In linear sizes does not correspond to each other");
b_op = HIPBLAS_OP_T;
}
int as1, as2, bs1;
if (order_ == false) {
as1 = size1_; as2 = size2_;
bs1 = b.size1_;
} else {
as1 = size2_; as2 = size1_;
bs1 = b.size2_;
}
hipStream_t stream = MatGPU::_defaultStream;
hipblasHandle_t handle = MatGPU::_cublasHandle;
const ftype scale1 = ca, scale2 = cb;
CUBLAS_CALL(hipblasSetStream(handle, stream));
CUBLAS_CALL(hipblasSgeam(handle, a_op, b_op, as1, as2,
&scale1, data_, as1,
&scale2, b.data_, bs1,
data_, as1));
return *this;
}
MatGPU& MatGPU::operator += (const MatGPU &b) {
cuda_addmat(*this, b);
//linear(1, 1, b, false);
return *this;
}
MatGPU& MatGPU::operator -= (const MatGPU &b) {
cuda_submat(*this, b);
//linear(1, -1, b, false);
return *this;
}
MatGPU& MatGPU::operator *= (const MatGPU &b) {
cuda_multmat(*this, b);
return *this;
}
MatGPU& MatGPU::operator /= (const MatGPU &b) {
cuda_divmat(*this, b);
return *this;
}
MatGPU& MatGPU::operator += (ftype c) {
cuda_addval(*this, c);
return *this;
}
MatGPU& MatGPU::operator -= (ftype c) {
cuda_subval(*this, c);
return *this;
}
MatGPU& MatGPU::operator *= (ftype c) {
cuda_multval(*this, c);
//linear(c, 0, *this, false);
return *this;
}
MatGPU& MatGPU::operator /= (ftype c) {
cuda_divval(*this, c);
//linear(1.0/c, 0, *this, false);
return *this;
}
MatGPU& MatGPU::Sign() {
cuda_sign(*this);
return *this;
}
MatGPU& MatGPU::Sqrt() {
cuda_sqrt(*this);
return *this;
}
MatGPU& MatGPU::Log() {
cuda_log(*this);
return *this;
}
MatGPU& MatGPU::Exp() {
cuda_exp(*this);
return *this;
}
MatGPU& MatGPU::Sigmoid() {
cuda_sigmoid(*this);
return *this;
}
MatGPU& MatGPU::SigmDer(const MatGPU& a) {
cuda_sigmder(*this, a);
return *this;
}
MatGPU& MatGPU::SoftMax() {
mexAssert(kInternalOrder == true);
cudnnTensorDescriptor_t src_desc = GetTensorDesc();
const ftype scale_res = 1.0, scale_cur = 0.0;
CUDNN_CALL(cudnnSoftmaxForward(
MatGPU::_cudnnHandle, CUDNN_SOFTMAX_ACCURATE, CUDNN_SOFTMAX_MODE_CHANNEL,
&scale_res, src_desc, data_, &scale_cur, src_desc, data_
));
return *this;
}
MatGPU& MatGPU::SoftDer(MatGPU& b) {
//computeSoftmaxGrad(a, *this, *this);
mexAssert(kInternalOrder == true);
cudnnTensorDescriptor_t src_desc = GetTensorDesc();
cudnnTensorDescriptor_t par_desc = b.GetTensorDesc();
const ftype scale_res = 1.0, scale_cur = 0.0;
CUDNN_CALL(cudnnSoftmaxBackward(
MatGPU::_cudnnHandle, CUDNN_SOFTMAX_ACCURATE, CUDNN_SOFTMAX_MODE_CHANNEL,
&scale_res, par_desc, b.data_, src_desc, data_, &scale_cur, src_desc, data_
));
return *this;
}
MatGPU& MatGPU::CondAssign(const MatGPU &condmat, bool incase, ftype threshold, ftype a) {
cuda_condassign(*this, condmat, incase, threshold, a);
return *this;
}
MatGPU& MatGPU::CondAdd(const MatGPU &condmat, bool incase, ftype threshold, ftype a) {
cuda_condadd(*this, condmat, incase, threshold, a);
return *this;
}
MatGPU& MatGPU::CondMult(const MatGPU &condmat, bool incase, ftype threshold, ftype a) {
cuda_condmult(*this, condmat, incase, threshold, a);
return *this;
}
MatGPU& MatGPU::AddVect(MatGPU &vect, int dim) {
//cuda_addvect(*this, vect, dim);
mexAssertMsg(data_ != vect.data_, "In AddVect pointers should be different");
if (dim == 1) {
mexAssertMsg(vect.size1_ == 1 && vect.size2_ == size2_,
"In AddVect the sizes don't correspond");
} else if (dim == 2) {
mexAssertMsg(vect.size1_ == size1_ && vect.size2_ == 1,
"In AddVect the sizes don't correspond");
} else {
mexAssertMsg(false, "In MatGPU::AddVect the dimension parameter must be either 1 or 2");
}
Dim dims = {(int) size1_, (int) size2_, 1, 1};
Dim vect_dims = {(int) vect.size1_, (int) vect.size2_, 1, 1};
reshape_tensor(dims);
vect.reshape_tensor(vect_dims);
AddTensor(vect);
return *this;
}
MatGPU& MatGPU::MultVect(const MatGPU &vect, int dim) {
//cuda_multvect(*this, vect, dim);
mexAssertMsg(data_ != vect.data_, "In MultVect pointers should be different");
hipStream_t stream = MatGPU::_defaultStream;
hipblasHandle_t handle = MatGPU::_cublasHandle;
CUBLAS_CALL(hipblasSetStream(handle, stream));
int as1, as2;
hipblasSideMode_t side_mode;
if (order_ == false) {
as1 = size1_; as2 = size2_;
} else {
as1 = size2_; as2 = size1_;
}
if (dim == 1) {
mexAssertMsg(vect.size1_ == 1 && vect.size2_ == size2_,
"In MultVect the sizes don't correspond");
if (order_ == true) {
side_mode = HIPBLAS_SIDE_LEFT;
} else {
side_mode = HIPBLAS_SIDE_RIGHT;
}
} else if (dim == 2) {
mexAssertMsg(vect.size1_ == size1_ && vect.size2_ == 1,
"In MultVect the sizes don't correspond");
if (order_ == true) {
side_mode = HIPBLAS_SIDE_RIGHT;
} else {
side_mode = HIPBLAS_SIDE_LEFT;
}
} else {
mexAssertMsg(false, "In MatGPU::MultVect the dimension parameter must be either 1 or 2");
}
CUBLAS_CALL(hipblasSdgmm(handle, side_mode, as1, as2,
data_, as1, vect.data_, 1, data_, as1));
return *this;
}
MatGPU& MatGPU::Reorder(bool order) {
//mexAssertMsg(order_ == order, "In MatGPU::reorder order should be the same");
if (order_ != order) {
if (size1_ > 1 && size2_ > 1) {
MatGPU mr(size1_, size2_);
mr.order_ = order;
mr = (*this); // reorder
order_ = order;
(*this) = mr;
} else {
order_ = order;
}
}
return *this;
}
MatGPU& MatGPU::ReorderMaps(bool cur_order, bool order) {
if (cur_order != order) {
mexAssertMsg(order_ == true, "In ReorderMaps the order should be true");
std::vector< std::vector<MatGPU> > maps = InitMaps();
for (size_t i = 0; i < maps.size(); ++i) {
for (size_t j = 0; j < maps[i].size(); ++j) {
// setting the correct order instead of default kInternalOrder
maps[i][j].set_order(cur_order);
// actual reordering
maps[i][j].Reorder(order);
}
}
}
return *this;
}
MatGPU& MatGPU::Validate() {
cuda_validate(*this);
return *this;
}
// const functions
Dim MatGPU::tensor_shape() const {
mexAssertMsg(tensor_desc_ != NULL, "Tensor descriptor is not defined");
Dim shape, stride;
cudnnDataType_t data_type = CUDNN_TYPE;
CUDNN_CALL(cudnnGetTensor4dDescriptor(
tensor_desc_, &data_type,
&shape[0], &shape[1], &shape[2], &shape[3],
&stride[0], &stride[1], &stride[2], &stride[3]
));
return shape;
}
Dim MatGPU::filter_shape() const {
mexAssertMsg(filter_desc_ != NULL, "Filter descriptor is not defined");
Dim shape;
cudnnDataType_t data_type = CUDNN_TYPE;
cudnnTensorFormat_t tensor_format = CUDNN_LAYOUT;
CUDNN_CALL(cudnnGetFilter4dDescriptor(
filter_desc_, &data_type, &tensor_format,
&shape[0], &shape[1], &shape[2], &shape[3]
));
return shape;
}
std::vector< std::vector<MatGPU> > MatGPU::InitMaps() const {
mexAssertMsg(order_ == true, "In InitMaps the order should be true");
mexAssertMsg(tensor_desc_ == NULL || filter_desc_ == NULL, "Both descriptors are defined");
Dim dims;
if (tensor_desc_ != NULL) {
dims = tensor_shape();
} else if (filter_desc_ != NULL) {
dims = filter_shape();
} else {
mexAssertMsg(false, "Neither of descriptors is defined");
}
mexAssertMsg(size() == dims[0] * dims[1] * dims[2] * dims[3],
"In InitMaps dims assert");
// splitting the 2nd dimension
size_t batchsize = dims[0], channels = dims[1], numel = dims[2] * dims[3];
size_t pixels_num = channels * numel;
std::vector< std::vector<MatGPU> > matrices(batchsize);
for (size_t k = 0; k < batchsize; ++k) {
matrices[k].resize(channels);
for (size_t j = 0; j < channels; ++j) {
matrices[k][j].attach(data_ + k * pixels_num + j * numel,
dims[2], dims[3], kInternalOrder);
}
}
return matrices;
}
ftype MatGPU::sum() const {
return cuda_sum(*this);
}
// CPU <-> GPU functions
MatGPU& MatGPU::operator = (const MatCPU &a) {
// no resize in order to ensure that b.data_ is fixed
mexAssertMsg(!a.empty() && !empty(), "In HostToDevice one of the arrays is empty");
mexAssertMsg(a.size1() == size1_ && a.size2() == size2_,
"In HostToDevice the sizes of matrices do not correspond");
// conversion is to get access to protected members
const MatGPU *a_ptr = static_cast<const MatGPU*>(&a);
if (a.order() == order_) {
CUDA_CALL(hipMemcpy(data_, a_ptr->data_, BytesNum(), hipMemcpyHostToDevice));
} else {
MatGPU br(a.size1(), a.size2());
br.order_ = a.order();
br = a; // previous case
(*this) = br; // reorder
}
return *this;
}
void DeviceToHost(const MatGPU &b, MatCPU &a) {
// no resize in order to ensure that b.data_ is fixed
mexAssertMsg(!a.empty() && !b.empty(), "In DeviceToHost one of the arrays is empty");
mexAssertMsg(a.size1() == b.size1_ && a.size2() == b.size2_,
"In DeviceToHost the sizes of matrices do not correspond");
// conversion is to get access to protected members
MatGPU *a_ptr = static_cast<MatGPU*>(&a);
if (a.order() == b.order()) {
CUDA_CALL(hipMemcpy(a_ptr->data_, b.data_, b.BytesNum(), hipMemcpyDeviceToHost));
} else {
MatGPU br(a.size1(), a.size2());
br.order_ = a.order();
br = b; // reorder
a = br; // previous case
}
}
void SubSet(MatCPU &a, MatGPU &b, size_t offset, bool dir) {
if (print >= 3) {
MatGPU::StartCudaTimer();
}
MatGPU *a_ptr = static_cast<MatGPU*>(&a);
mexAssertMsg(a_ptr->order_ == true, "In SubSet 'a.order_' should be true");
mexAssertMsg(b.order_ == true, "In SubSet 'b.order_' should be true");
mexAssertMsg(offset + b.size1_ <= a_ptr->size1_ && b.size2_ == a_ptr->size2_,
"In SubSet the sizes don't correspond each other");
MatCPU as;
as.attach(a_ptr->data_ + offset * a_ptr->size2_, b.size1_, b.size2_, true);
if (dir) {
b = as; // HostToDevice
} else {
DeviceToHost(b, as);
}
if (print >= 3) {
MatGPU::MeasureCudaTime("SubSet");
}
}
// friend functions
void Sum(MatGPU &a, MatGPU &vect, int dim) {
//cuda_sumvect(a, vect, dim);
mexAssertMsg(a.data_ != vect.data_, "In Sum pointers should be different");
hipStream_t stream = MatGPU::_defaultStream;
hipblasHandle_t handle = MatGPU::_cublasHandle;
CUBLAS_CALL(hipblasSetStream(handle, stream));
const ftype scale1 = 1.0, scale2 = 0.0;
hipblasOperation_t op;
MatGPU ones_vect;
if (dim == 1) {
mexAssertMsg(vect.size1_ == 1 && vect.size2_ == a.size2_,
"In Sum the sizes do not correspond each other");
if (a.order_ == false) {
op = HIPBLAS_OP_T;
} else {
op = HIPBLAS_OP_N;
}
ones_vect.GetFromWorkspace(1, a.size1_);
} else if (dim == 2) {
mexAssertMsg(vect.size1_ == a.size1_ && vect.size2_ == 1,
"In Sum the sizes do not correspond each other");
if (a.order_ == false) {
op = HIPBLAS_OP_N;
} else {
op = HIPBLAS_OP_T;
}
ones_vect.GetFromWorkspace(a.size2_, 1);
} else {
mexAssertMsg(false, "In MatGPU::Sum the dimension parameter must be either 1 or 2");
}
int as1, as2;
if (a.order_ == false) {
as1 = a.size1_;
as2 = a.size2_;
} else {
as1 = a.size2_;
as2 = a.size1_;
}
ones_vect.assign(1);
CUBLAS_CALL(hipblasSgemv(handle, op, as1, as2,
&scale1, a.data_, as1,
ones_vect.data_, 1,
&scale2, vect.data_, 1));
}
void Mean(MatGPU &a, MatGPU &vect, int dim) {
Sum(a, vect, dim);
if (dim == 1) {
vect /= (ftype) a.size1_;
} else if (dim == 2) {
vect /= (ftype) a.size2_;
} else {
mexAssertMsg(false, "In MatGPU::Mean the dimension parameter must be either 1 or 2");
}
}
void Trans(const MatGPU &a, MatGPU &b) {
//cuda_trans(a, b);
b.linear(0, 1, a, true);
}
// layer transformation functions
void Prod(const MatGPU &a, bool a_tr, const MatGPU &b, bool b_tr, MatGPU &c) {
mexAssertMsg(a.order_ == b.order_ && b.order_ == c.order_, "In Prod the orders should be the same");
hipStream_t stream = MatGPU::_defaultStream;
hipblasHandle_t handle = MatGPU::_cublasHandle;
const ftype scale_res = 1.0, scale_cur = 0.0;
CUBLAS_CALL(hipblasSetStream(handle, stream));
hipblasOperation_t a_op, b_op;
if (!a_tr) {
a_op = HIPBLAS_OP_N;
} else {
a_op = HIPBLAS_OP_T;
}
if (!b_tr) {
b_op = HIPBLAS_OP_N;
} else {
b_op = HIPBLAS_OP_T;
}
int as1, as2, bs1, bs2;
if (a.order_ == false) { // Alex kernels
if (!a_tr) { // a
as1 = a.size1_; as2 = a.size2_;
} else { // aT
as1 = a.size2_; as2 = a.size1_;
}
if (!b_tr) { // b
bs1 = b.size1_; bs2 = b.size2_;
} else { // bT
bs1 = b.size2_; bs2 = b.size1_;
}
mexAssertMsg(as2 == bs1, "In Prod the sizes of matrices do not correspond");
mexAssertMsg(c.size1_ == as1 && c.size2_ == bs2, "In Prod the size of output matrix is wrong");
CUBLAS_CALL(hipblasSgemm(handle, a_op, b_op, as1, bs2, as2,
&scale_res, a.data_, a.size1_, b.data_, b.size1_,
&scale_cur, c.data_, c.size1_));
} else { // cuDNN kernels
if (!a_tr) { // a
as1 = a.size2_; as2 = a.size1_;
} else { // aT
as1 = a.size1_; as2 = a.size2_;
}
if (!b_tr) { // b
bs1 = b.size2_; bs2 = b.size1_;
} else { // bT
bs1 = b.size1_; bs2 = b.size2_;
}
mexAssertMsg(as1 == bs2, "In Prod the sizes of matrices do not correspond");
mexAssertMsg(c.size1_ == as2 && c.size2_ == bs1, "In Prod the size of output matrix is wrong");
CUBLAS_CALL(hipblasSgemm(handle, b_op, a_op, bs1, as2, bs2,
&scale_res, b.data_, b.size2_, a.data_, a.size2_,
&scale_cur, c.data_, c.size2_));
}
}
// filter functions
MatGPU& MatGPU::AddTensor(MatGPU &tensor) {
mexAssert(kInternalOrder == true);
cudnnTensorDescriptor_t desc = GetTensorDesc();
cudnnTensorDescriptor_t tns_desc = tensor.GetTensorDesc();
const ftype scale_res = 1.0, scale_cur = 1.0;
CUDNN_CALL(cudnnAddTensor(MatGPU::_cudnnHandle,
&scale_res, tns_desc, tensor.data_,
&scale_cur, desc, data_
));
return *this;
}
void ConvolutionForward(MatGPU& activs, MatGPU& filters, MatGPU& targets,
const cudnnConvolutionDescriptor_t &conv_desc) {
mexAssert(kInternalOrder == true);
if (print >= 3) {
MatGPU::StartCudaTimer();
}
cudnnTensorDescriptor_t act_desc = activs.GetTensorDesc();
cudnnFilterDescriptor_t flt_desc = filters.GetFilterDesc();
cudnnTensorDescriptor_t trg_desc = targets.GetTensorDesc();
Dim dims;
CUDNN_CALL(cudnnGetConvolution2dForwardOutputDim(
conv_desc, act_desc, flt_desc, &dims[0], &dims[1], &dims[2], &dims[3]
));
Dim trg_shape = targets.tensor_shape();
mexAssertMsg(trg_shape[0] == dims[0] && trg_shape[1] == dims[1] &&
trg_shape[2] == dims[2] && trg_shape[3] == dims[3],
"ConvolutionForward shape assert");
cudnnConvolutionFwdAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm(
MatGPU::_cudnnHandle, act_desc, flt_desc, conv_desc, trg_desc,
CUDNN_CONVOLUTION_FWD_SPECIFY_WORKSPACE_LIMIT,
MatGPU::_cudnnMemoryLimit, &algo
));
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize(
MatGPU::_cudnnHandle, act_desc, flt_desc, conv_desc, trg_desc,
algo, &ws_size
));
MatGPU workspace;
workspace.GetFromWorkspace(1, ws_size / sizeof(ftype));
const ftype scale_res = 1.0, scale_cur = 0.0;
CUDNN_CALL(cudnnConvolutionForward(MatGPU::_cudnnHandle,
&scale_res, act_desc, activs.data_, flt_desc, filters.data_,
conv_desc, algo, workspace.data_, ws_size,
&scale_cur, trg_desc, targets.data_
));
if (print >= 3) {
MatGPU::MeasureCudaTime("FilterActs");
}
}
void ConvolutionBackwardData(MatGPU& derivs, MatGPU& filters, MatGPU& targets,
const cudnnConvolutionDescriptor_t &conv_desc) {
mexAssert(kInternalOrder == true);
if (print >= 3) {
MatGPU::StartCudaTimer();
}
cudnnTensorDescriptor_t trg_desc = targets.GetTensorDesc();
cudnnFilterDescriptor_t flt_desc = filters.GetFilterDesc();
cudnnTensorDescriptor_t der_desc = derivs.GetTensorDesc();
Dim dims;
CUDNN_CALL(cudnnGetConvolution2dForwardOutputDim(
conv_desc, trg_desc, flt_desc, &dims[0], &dims[1], &dims[2], &dims[3]
));
Dim der_shape = derivs.tensor_shape();
mexAssertMsg(der_shape[0] == dims[0] && der_shape[1] == dims[1] &&
der_shape[2] == dims[2] && der_shape[3] == dims[3],
"ConvolutionBackwardData shape assert");
cudnnConvolutionBwdDataAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionBackwardDataAlgorithm(
MatGPU::_cudnnHandle, flt_desc, der_desc, conv_desc, trg_desc,
CUDNN_CONVOLUTION_BWD_DATA_SPECIFY_WORKSPACE_LIMIT,
MatGPU::_cudnnMemoryLimit, &algo
));
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionBackwardDataWorkspaceSize(
MatGPU::_cudnnHandle, flt_desc, der_desc, conv_desc, trg_desc,
algo, &ws_size
));
MatGPU workspace;
workspace.GetFromWorkspace(1, ws_size / sizeof(ftype));
const ftype scale_res = 1.0, scale_cur = 0.0;
CUDNN_CALL(cudnnConvolutionBackwardData(MatGPU::_cudnnHandle,
&scale_res, flt_desc, filters.data_, der_desc, derivs.data_,
conv_desc, algo, workspace.data_, ws_size,
&scale_cur, trg_desc, targets.data_
));
if (print >= 3) {
MatGPU::MeasureCudaTime("ImgActs");
}
}
void ConvolutionBackwardFilter(MatGPU& activs, MatGPU& derivs, MatGPU& targets,
const cudnnConvolutionDescriptor_t &conv_desc) {
mexAssert(kInternalOrder == true);
cudnnTensorDescriptor_t act_desc = activs.GetTensorDesc();
cudnnFilterDescriptor_t trg_desc = targets.GetFilterDesc();
cudnnTensorDescriptor_t der_desc = derivs.GetTensorDesc();
Dim dims;
CUDNN_CALL(cudnnGetConvolution2dForwardOutputDim(
conv_desc, act_desc, trg_desc, &dims[0], &dims[1], &dims[2], &dims[3]
));
Dim der_shape = derivs.tensor_shape();
mexAssertMsg(der_shape[0] == dims[0] && der_shape[1] == dims[1] &&
der_shape[2] == dims[2] && der_shape[3] == dims[3],
"ConvolutionBackwardFilter shape assert");
cudnnConvolutionBwdFilterAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionBackwardFilterAlgorithm(
MatGPU::_cudnnHandle, act_desc, der_desc, conv_desc, trg_desc,
CUDNN_CONVOLUTION_BWD_FILTER_SPECIFY_WORKSPACE_LIMIT,
MatGPU::_cudnnMemoryLimit, &algo
));
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionBackwardFilterWorkspaceSize(
MatGPU::_cudnnHandle, act_desc, der_desc, conv_desc, trg_desc,
algo, &ws_size
));
MatGPU workspace;
workspace.GetFromWorkspace(1, ws_size / sizeof(ftype));
const ftype scale_res = 1.0, scale_cur = 0.0;
CUDNN_CALL(cudnnConvolutionBackwardFilter(MatGPU::_cudnnHandle,
&scale_res, act_desc, activs.data_, der_desc, derivs.data_,
conv_desc, algo, workspace.data_, ws_size,
&scale_cur, trg_desc, targets.data_
));
}
void ConvolutionBackwardBias(MatGPU& derivs, MatGPU &targets) {
mexAssert(kInternalOrder == true);
cudnnTensorDescriptor_t der_desc = derivs.GetTensorDesc();
cudnnTensorDescriptor_t trg_desc = targets.GetTensorDesc();
const ftype scale_res = 1.0, scale_cur = 0.0;
CUDNN_CALL(cudnnConvolutionBackwardBias(MatGPU::_cudnnHandle,
&scale_res, der_desc, derivs.data_,
&scale_cur, trg_desc, targets.data_
));
};
// scaling functions
void Pooling(MatGPU& activs, MatGPU& targets,
cudnnPoolingDescriptor_t pool_desc) {
mexAssert(kInternalOrder == true);
cudnnTensorDescriptor_t act_desc = activs.GetTensorDesc();
cudnnTensorDescriptor_t trg_desc = targets.GetTensorDesc();
Dim dims;
CUDNN_CALL(cudnnGetPooling2dForwardOutputDim(
pool_desc, act_desc, &dims[0], &dims[1], &dims[2], &dims[3]
));
Dim trg_shape = targets.tensor_shape();
mexAssertMsg(trg_shape[0] == dims[0] && trg_shape[1] == dims[1] &&
trg_shape[2] == dims[2] && trg_shape[3] == dims[3],
"Pooling shape assert");
const ftype scale_res = 1.0, scale_cur = 0.0;
CUDNN_CALL(cudnnPoolingForward(MatGPU::_cudnnHandle, pool_desc,
&scale_res, act_desc, activs.data_,
&scale_cur, trg_desc, targets.data_
));
}
void PoolingUndo(MatGPU& activs, MatGPU& pool_activs,
MatGPU& pool_derivs, MatGPU& derivs,
cudnnPoolingDescriptor_t pool_desc, bool dir) {
// dir == true -> backward, derivs are targets
// dir == false -> forward, pool_derivs are targets
mexAssert(kInternalOrder == true);
mexAssertMsg(activs.size1_ == derivs.size1_ &&
activs.size2_ == derivs.size2_,
"In 'PoolingUndo' activs size assert");
mexAssertMsg(pool_activs.size1_ == pool_derivs.size1_ &&
pool_activs.size2_ == pool_derivs.size2_,
"In 'PoolingUndo' pool_activs.size assert");
cudnnTensorDescriptor_t act_desc = activs.GetTensorDesc();
cudnnTensorDescriptor_t trg_desc = derivs.GetTensorDesc();
cudnnTensorDescriptor_t pool_act_desc = pool_activs.GetTensorDesc();
cudnnTensorDescriptor_t pool_der_desc = pool_derivs.GetTensorDesc();
Dim dims;
CUDNN_CALL(cudnnGetPooling2dForwardOutputDim(
pool_desc, act_desc, &dims[0], &dims[1], &dims[2], &dims[3]
));
Dim pool_act_shape = pool_activs.tensor_shape();
mexAssertMsg(pool_act_shape[0] == dims[0] && pool_act_shape[1] == dims[1] &&
pool_act_shape[2] == dims[2] && pool_act_shape[3] == dims[3],
"PoolingUndo shape assert");
if (dir == true) {
const ftype scale_res = 1.0, scale_cur = 0.0;
CUDNN_CALL(cudnnPoolingBackward(MatGPU::_cudnnHandle, pool_desc,
&scale_res, pool_act_desc, pool_activs.data_,
pool_der_desc, pool_derivs.data_, act_desc, activs.data_,
&scale_cur, trg_desc, derivs.data_
));
} else {
Dim prev_dims = activs.tensor_shape();
Pair scale, padding, stride;
cudnnPoolingMode_t pool_mode = CUDNN_POOLING_MAX;
cudnnNanPropagation_t nan_prop_mode = CUDNN_PROPAGATE_NAN;
CUDNN_CALL(cudnnGetPooling2dDescriptor(pool_desc,
&pool_mode, &nan_prop_mode,
&scale[0], &scale[1], &padding[0], &padding[1], &stride[0], &stride[1]
));
_maxPoolThirdPass(activs, pool_activs, derivs, pool_derivs,
prev_dims[2], prev_dims[3], dims[2], dims[3],
scale, padding, stride);
}
}
void AffineTransform(const MatGPU &images, MatGPU &targets,
const MatGPU &shift_mat, const MatGPU &scale_mat,
const MatGPU &mirror_mat, const MatGPU &angle_mat,
ftype defval, bool dir) {
Dim img_dims = images.tensor_shape();
Dim trg_dims = targets.tensor_shape();
_affineTransform(images, targets,
img_dims[2], img_dims[3], trg_dims[2], trg_dims[3],
shift_mat, scale_mat, mirror_mat, angle_mat, defval, dir);
}
/*
void VaryColors(MatGPU &images, const Dim &dims,
const MatGPU &eigenvectors, ftype noise_std) {
int batchsize = images.size1();
int channels = images.size2() / (dims[2] * dims[3]);
MatGPU noise_mat, add_mat;
MatGPU::swapWithBuffer(noise_mat, -7);
noise_mat.resize(batchsize, channels);
// hack, because randnorm does not work for odd numbers. Start.
if (noise_mat.size1() * noise_mat.size2() % 2 > 0) {
MatGPU rndmat;
rndmat.attach(
noise_mat, 0,
noise_mat.size1() * noise_mat.size2() - 1, 1, noise_mat.order()
);
rndmat.randnorm() *= noise_std;
rndmat.attach(
noise_mat, noise_mat.size1() * noise_mat.size2() - 1,
1, 1, noise_mat.order()
);
(rndmat.rand() -= 0.5) *= noise_std;
} else {
noise_mat.randnorm() *= noise_std;
}
// hack, because randnorm does not work for odd numbers. End.
MatGPU::swapWithBuffer(add_mat, -8);
add_mat.resize(batchsize, channels);
Prod(noise_mat, false, eigenvectors, true, add_mat);
_varyColors(images, add_mat);
MatGPU::swapWithBuffer(noise_mat, -7);
MatGPU::swapWithBuffer(add_mat, -8);
} */
| b3bfaa59f490dcc918401d2bf92f165cd7a127d4.cu | /*
Copyright (C) 2016 Sergey Demyanov.
contact: [email protected]
http://www.demyanov.net
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "mat_gpu.h"
#include "cuda_util.h"
void MatGPU::SetTensorDesc(const Dim &dims) {
mexAssertMsg(size() == dims[0] * dims[1] * dims[2] * dims[3],
"In SetTensorDesc dims assert");
if (dims[0] > 0 || dims[1] > 0 || dims[2] > 0 || dims[3] > 0) {
if (tensor_desc_ == NULL) {
CUDNN_CALL(cudnnCreateTensorDescriptor(&tensor_desc_));
}
CUDNN_CALL(cudnnSetTensor4dDescriptor(
tensor_desc_, CUDNN_LAYOUT, CUDNN_TYPE,
dims[0], dims[1], dims[2], dims[3]
));
}
}
void MatGPU::SetFilterDesc(const Dim &dims) {
mexAssertMsg(size() == dims[0] * dims[1] * dims[2] * dims[3],
"In SetFilterDesc dims assert");
if (dims[0] > 0 || dims[1] > 0 || dims[2] > 0 || dims[3] > 0) {
if (filter_desc_ == NULL) {
CUDNN_CALL(cudnnCreateFilterDescriptor(&filter_desc_));
}
CUDNN_CALL(cudnnSetFilter4dDescriptor(
filter_desc_, CUDNN_TYPE, CUDNN_LAYOUT,
dims[0], dims[1], dims[2], dims[3]
));
}
}
cudnnTensorDescriptor_t MatGPU::GetTensorDesc() {
mexAssertMsg(tensor_desc_ != NULL, "Empty tensor descriptor");
return tensor_desc_;
}
cudnnFilterDescriptor_t MatGPU::GetFilterDesc() {
mexAssertMsg(filter_desc_ != NULL, "Empty filter descriptor");
return filter_desc_;
}
void MatGPU::ClearTensorDesc() {
if (tensor_desc_ != NULL) {
CUDNN_CALL(cudnnDestroyTensorDescriptor(tensor_desc_));
tensor_desc_ = NULL;
}
}
void MatGPU::ClearFilterDesc() {
if (filter_desc_ != NULL) {
CUDNN_CALL(cudnnDestroyFilterDescriptor(filter_desc_));
filter_desc_ = NULL;
}
}
// static
cudaEvent_t MatGPU::_start, MatGPU::_stop;
void MatGPU::StartCudaTimer() {
if (print < 2) return;
cudaEventRecord(_start, 0);
}
void MatGPU::MeasureCudaTime(std::string msg) {
if (print < 2) return;
float elapsedTime;
cudaEventRecord(_stop, 0);
cudaEventSynchronize(_stop);
cudaEventElapsedTime(&elapsedTime, _start, _stop);
mexPrintMsg(msg, elapsedTime);
}
curandGenerator_t MatGPU::_randGen;
cudaStream_t MatGPU::_defaultStream;
cublasHandle_t MatGPU::_cublasHandle;
cudnnHandle_t MatGPU::_cudnnHandle;
size_t MatGPU::_cudnnMemoryLimit;
MatGPU MatGPU::_workspace;
int MatGPU::getDeviceID() {
int id;
CUDA_CALL(cudaGetDevice(&id));
return id;
}
void MatGPU::InitCuda(int gpu) {
int num;
CUDA_CALL(cudaGetDeviceCount(&num));
mexAssertMsg(gpu < num, "Requested GPU index is not available");
CUDA_CALL(cudaSetDevice(gpu));
cudaDeviceProp prop;
CUDA_CALL(cudaGetDeviceProperties(&prop, getDeviceID()));
mexPrintMsg("Executing on", prop.name);
CUDA_CALL(cudaStreamCreate(&_defaultStream));
CURAND_CALL(curandCreateGenerator(&_randGen, CURAND_RNG_PSEUDO_DEFAULT));
CUBLAS_CALL(cublasCreate(&_cublasHandle));
CUDNN_CALL(cudnnCreate(&_cudnnHandle));
cudaEventCreate(&_start);
cudaEventCreate(&_stop);
}
void MatGPU::InitRand(int seed) {
CURAND_CALL(curandSetPseudoRandomGeneratorSeed(_randGen, seed));
}
void MatGPU::CudaReset() {
cudaEventDestroy(_start);
cudaEventDestroy(_stop);
_workspace.clear();
CUDNN_CALL(cudnnDestroy(_cudnnHandle));
CUBLAS_CALL(cublasDestroy(_cublasHandle));
CURAND_CALL(curandDestroyGenerator(_randGen));
CUDA_CALL(cudaStreamDestroy(_defaultStream));
CUDA_CALL(cudaDeviceReset());
}
void MatGPU::SetMemoryLimit(size_t memory) {
// converting from megabytes to bytes
_cudnnMemoryLimit = memory * 1024 * 1024;
}
// data access
ftype MatGPU::operator () (size_t i, size_t j) const {
ftype *val_ptr = new ftype [1];
CUDA_CALL(cudaMemcpy(val_ptr, &data(i, j), sizeof(ftype), cudaMemcpyDeviceToHost));
ftype val = val_ptr[0];
delete [] val_ptr;
return val;
}
/*
ftype MatGPU::operator () (size_t ind) const {
ftype *val_ptr = new ftype [1];
CUDA_CALL(cudaMemcpy(val_ptr, &data(ind), sizeof(ftype), cudaMemcpyDeviceToHost));
ftype val = val_ptr[0];
delete [] val_ptr;
return val;
}
MatGPU MatGPU::operator () (size_t ind) {
MatGPU val_mat;
val_mat.attach(&data(ind), 1, 1);
return val_mat;
}
*/
size_t MatGPU::BytesNum() const {
return size() * sizeof(ftype);
}
// memory functions
MatGPU::MatGPU() {
init();
}
MatGPU::MatGPU(size_t size1, size_t size2) {
init();
resize(size1, size2);
}
MatGPU::MatGPU(const MatGPU &b) {
init();
if (b.empty()) return;
resize(b.size1_, b.size2_);
(*this) = b;
}
MatGPU::MatGPU(MatGPU &&b) {
init();
Swap(*this, b);
}
MatGPU::~MatGPU() {
clear();
}
MatGPU& MatGPU::init() {
data_ = NULL;
size1_ = 0;
size2_ = 0;
order_ = kInternalOrder;
owner_ = false;
tensor_desc_ = NULL;
filter_desc_ = NULL;
return *this;
}
// copies only the content, not other parameters!!
MatGPU& MatGPU::operator = (const MatGPU &b) {
//mexPrintMsg("Array assignment");
mexAssertMsg(size1_ == b.size1_ && size2_ == b.size2_,
"In MatGPU::operator = the arrays have different size");
if (order_ == b.order_) {
CUDA_CALL(cudaMemcpy(data_, b.data_, BytesNum(), cudaMemcpyDeviceToDevice));
} else if (b.order_ != kInternalOrder) { // order_ == kInternalOrder
MatGPU br;
br.attach(b.data_, b.size2_, b.size1_, kInternalOrder);
Trans(br, *this);
} else { // b.order_ == kInternalOrder, order_ != kInternalOrder
MatGPU br;
br.attach(data_, size2_, size1_, kInternalOrder);
Trans(b, br);
}
/*
if (b.tensor_desc_ != NULL) {
SetTensorDesc(b.tensor_shape());
} else {
tensor_desc_ = NULL;
}
if (b.filter_desc_ != NULL) {
SetFilterDesc(b.filter_shape());
} else {
filter_desc_ = NULL;
} */
return *this;
}
MatGPU& MatGPU::resize(size_t size1, size_t size2) {
// required for all cuda opearations
mexAssertMsg(size1 <= INT_MAX / size2, "Matrix is too large!");
if (size1 * size2 != size()) {
clear();
if (size1 * size2 > 0) {
//mexPrintInt("rs1", size1);
//mexPrintInt("rs2", size2);
CUDA_CALL(cudaMalloc(&data_, size1 * size2 * sizeof(ftype)));
owner_ = true;
}
}
if (size1 != size1_ || size2 != size2_) {
ClearTensorDesc();
ClearFilterDesc();
}
size1_ = size1;
size2_ = size2;
return *this;
}
MatGPU& MatGPU::resize_tensor(Dim dims) {
resize(dims[0], dims[1] * dims[2] * dims[3]);
SetTensorDesc(dims);
return *this;
}
MatGPU& MatGPU::resize_filter(Dim dims) {
resize(dims[0], dims[1] * dims[2] * dims[3]);
SetFilterDesc(dims);
return *this;
}
MatGPU& MatGPU::reshape(size_t size1, size_t size2) {
mexAssertMsg(size() == size1 * size2,
"In MatGPU::reshape the sizes do not correspond");
resize(size1, size2);
return *this;
}
MatGPU& MatGPU::reshape_tensor(Dim dims) {
mexAssertMsg(size() == dims[0] * dims[1] * dims[2] * dims[3],
"In MatGPU::reshape_tensor the dimensions do not correspond");
resize_tensor(dims);
return *this;
}
MatGPU& MatGPU::reshape_filter(Dim dims) {
mexAssertMsg(size() == dims[0] * dims[1] * dims[2] * dims[3],
"In MatGPU::reshape_tensor the dimensions do not correspond");
resize_filter(dims);
return *this;
}
MatGPU& MatGPU::attach(const MatGPU &b) {
return attach(b.data_, b.size1_, b.size2_, b.order_);
}
MatGPU& MatGPU::attach(const MatGPU &b, size_t offset, size_t size1, size_t size2, bool order) {
//mexAssertMsg(b.size1_ == 1 || b.size2_ == 1, "In MatGPU::attach with offset one of sizes should be 1");
mexAssertMsg(offset + size1 * size2 <= b.size(),
"In MatGPU::attach the sizes don't correspond each other");
return attach(b.data_ + offset, size1, size2, order);
}
MatGPU& MatGPU::attach(ftype *ptr, size_t size1, size_t size2) {
return attach(ptr, size1, size2, kInternalOrder);
}
MatGPU& MatGPU::attach(ftype *ptr, size_t size1, size_t size2, bool order) {
//mexAssertMsg(order == false, "In MatGPU::attach order should be always false");
clear();
data_ = ptr;
size1_ = size1;
size2_ = size2;
order_ = order;
return *this;
}
MatGPU& MatGPU::clear() {
ClearTensorDesc();
ClearFilterDesc();
if (owner_) {
mexAssert(data_ != NULL);
mexAssert(size() > 0);
//mexPrintInt("clear s1", size1_);
//mexPrintInt("clear s2", size2_);
CUDA_CALL(cudaFree(data_));
owner_ = false;
}
init();
//mexPrintMsg("Array clear end");
return *this;
}
// be careful of using it as it does not guarantee
// that it is not used somewhere else at the same time
MatGPU& MatGPU::GetFromWorkspace(size_t size1, size_t size2) {
if (size1 * size2 > MatGPU::_workspace.size()) {
MatGPU::_workspace.resize(size1, size2);
}
attach(MatGPU::_workspace, 0, size1, size2, kInternalOrder);
return *this;
}
void Swap(MatGPU &a, MatGPU &b) {
ftype *data_tmp = b.data_;
b.data_ = a.data_;
a.data_ = data_tmp;
size_t size1_tmp = b.size1_;
b.size1_ = a.size1_;
a.size1_ = size1_tmp;
size_t size2_tmp = b.size2_;
b.size2_ = a.size2_;
a.size2_ = size2_tmp;
bool order_tmp = b.order_;
b.order_ = a.order_;
a.order_ = order_tmp;
bool owner_tmp = b.owner_;
b.owner_ = a.owner_;
a.owner_ = owner_tmp;
cudnnTensorDescriptor_t tensor_desc_tmp_ = b.tensor_desc_;
b.tensor_desc_ = a.tensor_desc_;
a.tensor_desc_ = tensor_desc_tmp_;
cudnnFilterDescriptor_t filter_desc_tmp_ = b.filter_desc_;
b.filter_desc_ = a.filter_desc_;
a.filter_desc_ = filter_desc_tmp_;
}
// data functions
MatGPU& MatGPU::ident() {
cuda_ident(*this);
return *this;
}
MatGPU& MatGPU::assign(ftype val) {
cuda_assval(*this, val);
return *this;
}
MatGPU& MatGPU::rand() {
if (!empty()) {
CURAND_CALL(curandGenerateUniform(_randGen, data_, size()));
}
return *this;
}
MatGPU& MatGPU::randnorm() {
if (!empty()) {
CURAND_CALL(curandGenerateNormal(_randGen, data_, size(), 0, 1));
}
return *this;
}
MatGPU& MatGPU::linear(ftype ca, ftype cb, const MatGPU &b, bool b_tr) {
mexAssertMsg(cb == 0 || data_ != b.data_, "In linear pointers should be different");
mexAssertMsg(order_ == b.order_, "In linear orders should be the same");
cublasOperation_t a_op = CUBLAS_OP_N, b_op;
if (!b_tr) {
mexAssertMsg(size1_ == b.size1_ && size2_ == b.size2_,
"In linear sizes does not correspond to each other");
b_op = CUBLAS_OP_N;
} else {
mexAssertMsg(size1_ == b.size2_ && size2_ == b.size1_,
"In linear sizes does not correspond to each other");
b_op = CUBLAS_OP_T;
}
int as1, as2, bs1;
if (order_ == false) {
as1 = size1_; as2 = size2_;
bs1 = b.size1_;
} else {
as1 = size2_; as2 = size1_;
bs1 = b.size2_;
}
cudaStream_t stream = MatGPU::_defaultStream;
cublasHandle_t handle = MatGPU::_cublasHandle;
const ftype scale1 = ca, scale2 = cb;
CUBLAS_CALL(cublasSetStream(handle, stream));
CUBLAS_CALL(cublasSgeam(handle, a_op, b_op, as1, as2,
&scale1, data_, as1,
&scale2, b.data_, bs1,
data_, as1));
return *this;
}
MatGPU& MatGPU::operator += (const MatGPU &b) {
cuda_addmat(*this, b);
//linear(1, 1, b, false);
return *this;
}
MatGPU& MatGPU::operator -= (const MatGPU &b) {
cuda_submat(*this, b);
//linear(1, -1, b, false);
return *this;
}
MatGPU& MatGPU::operator *= (const MatGPU &b) {
cuda_multmat(*this, b);
return *this;
}
MatGPU& MatGPU::operator /= (const MatGPU &b) {
cuda_divmat(*this, b);
return *this;
}
MatGPU& MatGPU::operator += (ftype c) {
cuda_addval(*this, c);
return *this;
}
MatGPU& MatGPU::operator -= (ftype c) {
cuda_subval(*this, c);
return *this;
}
MatGPU& MatGPU::operator *= (ftype c) {
cuda_multval(*this, c);
//linear(c, 0, *this, false);
return *this;
}
MatGPU& MatGPU::operator /= (ftype c) {
cuda_divval(*this, c);
//linear(1.0/c, 0, *this, false);
return *this;
}
MatGPU& MatGPU::Sign() {
cuda_sign(*this);
return *this;
}
MatGPU& MatGPU::Sqrt() {
cuda_sqrt(*this);
return *this;
}
MatGPU& MatGPU::Log() {
cuda_log(*this);
return *this;
}
MatGPU& MatGPU::Exp() {
cuda_exp(*this);
return *this;
}
MatGPU& MatGPU::Sigmoid() {
cuda_sigmoid(*this);
return *this;
}
MatGPU& MatGPU::SigmDer(const MatGPU& a) {
cuda_sigmder(*this, a);
return *this;
}
MatGPU& MatGPU::SoftMax() {
mexAssert(kInternalOrder == true);
cudnnTensorDescriptor_t src_desc = GetTensorDesc();
const ftype scale_res = 1.0, scale_cur = 0.0;
CUDNN_CALL(cudnnSoftmaxForward(
MatGPU::_cudnnHandle, CUDNN_SOFTMAX_ACCURATE, CUDNN_SOFTMAX_MODE_CHANNEL,
&scale_res, src_desc, data_, &scale_cur, src_desc, data_
));
return *this;
}
MatGPU& MatGPU::SoftDer(MatGPU& b) {
//computeSoftmaxGrad(a, *this, *this);
mexAssert(kInternalOrder == true);
cudnnTensorDescriptor_t src_desc = GetTensorDesc();
cudnnTensorDescriptor_t par_desc = b.GetTensorDesc();
const ftype scale_res = 1.0, scale_cur = 0.0;
CUDNN_CALL(cudnnSoftmaxBackward(
MatGPU::_cudnnHandle, CUDNN_SOFTMAX_ACCURATE, CUDNN_SOFTMAX_MODE_CHANNEL,
&scale_res, par_desc, b.data_, src_desc, data_, &scale_cur, src_desc, data_
));
return *this;
}
MatGPU& MatGPU::CondAssign(const MatGPU &condmat, bool incase, ftype threshold, ftype a) {
cuda_condassign(*this, condmat, incase, threshold, a);
return *this;
}
MatGPU& MatGPU::CondAdd(const MatGPU &condmat, bool incase, ftype threshold, ftype a) {
cuda_condadd(*this, condmat, incase, threshold, a);
return *this;
}
MatGPU& MatGPU::CondMult(const MatGPU &condmat, bool incase, ftype threshold, ftype a) {
cuda_condmult(*this, condmat, incase, threshold, a);
return *this;
}
MatGPU& MatGPU::AddVect(MatGPU &vect, int dim) {
//cuda_addvect(*this, vect, dim);
mexAssertMsg(data_ != vect.data_, "In AddVect pointers should be different");
if (dim == 1) {
mexAssertMsg(vect.size1_ == 1 && vect.size2_ == size2_,
"In AddVect the sizes don't correspond");
} else if (dim == 2) {
mexAssertMsg(vect.size1_ == size1_ && vect.size2_ == 1,
"In AddVect the sizes don't correspond");
} else {
mexAssertMsg(false, "In MatGPU::AddVect the dimension parameter must be either 1 or 2");
}
Dim dims = {(int) size1_, (int) size2_, 1, 1};
Dim vect_dims = {(int) vect.size1_, (int) vect.size2_, 1, 1};
reshape_tensor(dims);
vect.reshape_tensor(vect_dims);
AddTensor(vect);
return *this;
}
MatGPU& MatGPU::MultVect(const MatGPU &vect, int dim) {
//cuda_multvect(*this, vect, dim);
mexAssertMsg(data_ != vect.data_, "In MultVect pointers should be different");
cudaStream_t stream = MatGPU::_defaultStream;
cublasHandle_t handle = MatGPU::_cublasHandle;
CUBLAS_CALL(cublasSetStream(handle, stream));
int as1, as2;
cublasSideMode_t side_mode;
if (order_ == false) {
as1 = size1_; as2 = size2_;
} else {
as1 = size2_; as2 = size1_;
}
if (dim == 1) {
mexAssertMsg(vect.size1_ == 1 && vect.size2_ == size2_,
"In MultVect the sizes don't correspond");
if (order_ == true) {
side_mode = CUBLAS_SIDE_LEFT;
} else {
side_mode = CUBLAS_SIDE_RIGHT;
}
} else if (dim == 2) {
mexAssertMsg(vect.size1_ == size1_ && vect.size2_ == 1,
"In MultVect the sizes don't correspond");
if (order_ == true) {
side_mode = CUBLAS_SIDE_RIGHT;
} else {
side_mode = CUBLAS_SIDE_LEFT;
}
} else {
mexAssertMsg(false, "In MatGPU::MultVect the dimension parameter must be either 1 or 2");
}
CUBLAS_CALL(cublasSdgmm(handle, side_mode, as1, as2,
data_, as1, vect.data_, 1, data_, as1));
return *this;
}
MatGPU& MatGPU::Reorder(bool order) {
//mexAssertMsg(order_ == order, "In MatGPU::reorder order should be the same");
if (order_ != order) {
if (size1_ > 1 && size2_ > 1) {
MatGPU mr(size1_, size2_);
mr.order_ = order;
mr = (*this); // reorder
order_ = order;
(*this) = mr;
} else {
order_ = order;
}
}
return *this;
}
MatGPU& MatGPU::ReorderMaps(bool cur_order, bool order) {
if (cur_order != order) {
mexAssertMsg(order_ == true, "In ReorderMaps the order should be true");
std::vector< std::vector<MatGPU> > maps = InitMaps();
for (size_t i = 0; i < maps.size(); ++i) {
for (size_t j = 0; j < maps[i].size(); ++j) {
// setting the correct order instead of default kInternalOrder
maps[i][j].set_order(cur_order);
// actual reordering
maps[i][j].Reorder(order);
}
}
}
return *this;
}
MatGPU& MatGPU::Validate() {
cuda_validate(*this);
return *this;
}
// const functions
Dim MatGPU::tensor_shape() const {
mexAssertMsg(tensor_desc_ != NULL, "Tensor descriptor is not defined");
Dim shape, stride;
cudnnDataType_t data_type = CUDNN_TYPE;
CUDNN_CALL(cudnnGetTensor4dDescriptor(
tensor_desc_, &data_type,
&shape[0], &shape[1], &shape[2], &shape[3],
&stride[0], &stride[1], &stride[2], &stride[3]
));
return shape;
}
Dim MatGPU::filter_shape() const {
mexAssertMsg(filter_desc_ != NULL, "Filter descriptor is not defined");
Dim shape;
cudnnDataType_t data_type = CUDNN_TYPE;
cudnnTensorFormat_t tensor_format = CUDNN_LAYOUT;
CUDNN_CALL(cudnnGetFilter4dDescriptor(
filter_desc_, &data_type, &tensor_format,
&shape[0], &shape[1], &shape[2], &shape[3]
));
return shape;
}
std::vector< std::vector<MatGPU> > MatGPU::InitMaps() const {
mexAssertMsg(order_ == true, "In InitMaps the order should be true");
mexAssertMsg(tensor_desc_ == NULL || filter_desc_ == NULL, "Both descriptors are defined");
Dim dims;
if (tensor_desc_ != NULL) {
dims = tensor_shape();
} else if (filter_desc_ != NULL) {
dims = filter_shape();
} else {
mexAssertMsg(false, "Neither of descriptors is defined");
}
mexAssertMsg(size() == dims[0] * dims[1] * dims[2] * dims[3],
"In InitMaps dims assert");
// splitting the 2nd dimension
size_t batchsize = dims[0], channels = dims[1], numel = dims[2] * dims[3];
size_t pixels_num = channels * numel;
std::vector< std::vector<MatGPU> > matrices(batchsize);
for (size_t k = 0; k < batchsize; ++k) {
matrices[k].resize(channels);
for (size_t j = 0; j < channels; ++j) {
matrices[k][j].attach(data_ + k * pixels_num + j * numel,
dims[2], dims[3], kInternalOrder);
}
}
return matrices;
}
ftype MatGPU::sum() const {
return cuda_sum(*this);
}
// CPU <-> GPU functions
MatGPU& MatGPU::operator = (const MatCPU &a) {
// no resize in order to ensure that b.data_ is fixed
mexAssertMsg(!a.empty() && !empty(), "In HostToDevice one of the arrays is empty");
mexAssertMsg(a.size1() == size1_ && a.size2() == size2_,
"In HostToDevice the sizes of matrices do not correspond");
// conversion is to get access to protected members
const MatGPU *a_ptr = static_cast<const MatGPU*>(&a);
if (a.order() == order_) {
CUDA_CALL(cudaMemcpy(data_, a_ptr->data_, BytesNum(), cudaMemcpyHostToDevice));
} else {
MatGPU br(a.size1(), a.size2());
br.order_ = a.order();
br = a; // previous case
(*this) = br; // reorder
}
return *this;
}
void DeviceToHost(const MatGPU &b, MatCPU &a) {
// no resize in order to ensure that b.data_ is fixed
mexAssertMsg(!a.empty() && !b.empty(), "In DeviceToHost one of the arrays is empty");
mexAssertMsg(a.size1() == b.size1_ && a.size2() == b.size2_,
"In DeviceToHost the sizes of matrices do not correspond");
// conversion is to get access to protected members
MatGPU *a_ptr = static_cast<MatGPU*>(&a);
if (a.order() == b.order()) {
CUDA_CALL(cudaMemcpy(a_ptr->data_, b.data_, b.BytesNum(), cudaMemcpyDeviceToHost));
} else {
MatGPU br(a.size1(), a.size2());
br.order_ = a.order();
br = b; // reorder
a = br; // previous case
}
}
void SubSet(MatCPU &a, MatGPU &b, size_t offset, bool dir) {
if (print >= 3) {
MatGPU::StartCudaTimer();
}
MatGPU *a_ptr = static_cast<MatGPU*>(&a);
mexAssertMsg(a_ptr->order_ == true, "In SubSet 'a.order_' should be true");
mexAssertMsg(b.order_ == true, "In SubSet 'b.order_' should be true");
mexAssertMsg(offset + b.size1_ <= a_ptr->size1_ && b.size2_ == a_ptr->size2_,
"In SubSet the sizes don't correspond each other");
MatCPU as;
as.attach(a_ptr->data_ + offset * a_ptr->size2_, b.size1_, b.size2_, true);
if (dir) {
b = as; // HostToDevice
} else {
DeviceToHost(b, as);
}
if (print >= 3) {
MatGPU::MeasureCudaTime("SubSet");
}
}
// friend functions
void Sum(MatGPU &a, MatGPU &vect, int dim) {
//cuda_sumvect(a, vect, dim);
mexAssertMsg(a.data_ != vect.data_, "In Sum pointers should be different");
cudaStream_t stream = MatGPU::_defaultStream;
cublasHandle_t handle = MatGPU::_cublasHandle;
CUBLAS_CALL(cublasSetStream(handle, stream));
const ftype scale1 = 1.0, scale2 = 0.0;
cublasOperation_t op;
MatGPU ones_vect;
if (dim == 1) {
mexAssertMsg(vect.size1_ == 1 && vect.size2_ == a.size2_,
"In Sum the sizes do not correspond each other");
if (a.order_ == false) {
op = CUBLAS_OP_T;
} else {
op = CUBLAS_OP_N;
}
ones_vect.GetFromWorkspace(1, a.size1_);
} else if (dim == 2) {
mexAssertMsg(vect.size1_ == a.size1_ && vect.size2_ == 1,
"In Sum the sizes do not correspond each other");
if (a.order_ == false) {
op = CUBLAS_OP_N;
} else {
op = CUBLAS_OP_T;
}
ones_vect.GetFromWorkspace(a.size2_, 1);
} else {
mexAssertMsg(false, "In MatGPU::Sum the dimension parameter must be either 1 or 2");
}
int as1, as2;
if (a.order_ == false) {
as1 = a.size1_;
as2 = a.size2_;
} else {
as1 = a.size2_;
as2 = a.size1_;
}
ones_vect.assign(1);
CUBLAS_CALL(cublasSgemv(handle, op, as1, as2,
&scale1, a.data_, as1,
ones_vect.data_, 1,
&scale2, vect.data_, 1));
}
void Mean(MatGPU &a, MatGPU &vect, int dim) {
Sum(a, vect, dim);
if (dim == 1) {
vect /= (ftype) a.size1_;
} else if (dim == 2) {
vect /= (ftype) a.size2_;
} else {
mexAssertMsg(false, "In MatGPU::Mean the dimension parameter must be either 1 or 2");
}
}
void Trans(const MatGPU &a, MatGPU &b) {
//cuda_trans(a, b);
b.linear(0, 1, a, true);
}
// layer transformation functions
void Prod(const MatGPU &a, bool a_tr, const MatGPU &b, bool b_tr, MatGPU &c) {
mexAssertMsg(a.order_ == b.order_ && b.order_ == c.order_, "In Prod the orders should be the same");
cudaStream_t stream = MatGPU::_defaultStream;
cublasHandle_t handle = MatGPU::_cublasHandle;
const ftype scale_res = 1.0, scale_cur = 0.0;
CUBLAS_CALL(cublasSetStream(handle, stream));
cublasOperation_t a_op, b_op;
if (!a_tr) {
a_op = CUBLAS_OP_N;
} else {
a_op = CUBLAS_OP_T;
}
if (!b_tr) {
b_op = CUBLAS_OP_N;
} else {
b_op = CUBLAS_OP_T;
}
int as1, as2, bs1, bs2;
if (a.order_ == false) { // Alex kernels
if (!a_tr) { // a
as1 = a.size1_; as2 = a.size2_;
} else { // aT
as1 = a.size2_; as2 = a.size1_;
}
if (!b_tr) { // b
bs1 = b.size1_; bs2 = b.size2_;
} else { // bT
bs1 = b.size2_; bs2 = b.size1_;
}
mexAssertMsg(as2 == bs1, "In Prod the sizes of matrices do not correspond");
mexAssertMsg(c.size1_ == as1 && c.size2_ == bs2, "In Prod the size of output matrix is wrong");
CUBLAS_CALL(cublasSgemm(handle, a_op, b_op, as1, bs2, as2,
&scale_res, a.data_, a.size1_, b.data_, b.size1_,
&scale_cur, c.data_, c.size1_));
} else { // cuDNN kernels
if (!a_tr) { // a
as1 = a.size2_; as2 = a.size1_;
} else { // aT
as1 = a.size1_; as2 = a.size2_;
}
if (!b_tr) { // b
bs1 = b.size2_; bs2 = b.size1_;
} else { // bT
bs1 = b.size1_; bs2 = b.size2_;
}
mexAssertMsg(as1 == bs2, "In Prod the sizes of matrices do not correspond");
mexAssertMsg(c.size1_ == as2 && c.size2_ == bs1, "In Prod the size of output matrix is wrong");
CUBLAS_CALL(cublasSgemm(handle, b_op, a_op, bs1, as2, bs2,
&scale_res, b.data_, b.size2_, a.data_, a.size2_,
&scale_cur, c.data_, c.size2_));
}
}
// filter functions
MatGPU& MatGPU::AddTensor(MatGPU &tensor) {
mexAssert(kInternalOrder == true);
cudnnTensorDescriptor_t desc = GetTensorDesc();
cudnnTensorDescriptor_t tns_desc = tensor.GetTensorDesc();
const ftype scale_res = 1.0, scale_cur = 1.0;
CUDNN_CALL(cudnnAddTensor(MatGPU::_cudnnHandle,
&scale_res, tns_desc, tensor.data_,
&scale_cur, desc, data_
));
return *this;
}
void ConvolutionForward(MatGPU& activs, MatGPU& filters, MatGPU& targets,
const cudnnConvolutionDescriptor_t &conv_desc) {
mexAssert(kInternalOrder == true);
if (print >= 3) {
MatGPU::StartCudaTimer();
}
cudnnTensorDescriptor_t act_desc = activs.GetTensorDesc();
cudnnFilterDescriptor_t flt_desc = filters.GetFilterDesc();
cudnnTensorDescriptor_t trg_desc = targets.GetTensorDesc();
Dim dims;
CUDNN_CALL(cudnnGetConvolution2dForwardOutputDim(
conv_desc, act_desc, flt_desc, &dims[0], &dims[1], &dims[2], &dims[3]
));
Dim trg_shape = targets.tensor_shape();
mexAssertMsg(trg_shape[0] == dims[0] && trg_shape[1] == dims[1] &&
trg_shape[2] == dims[2] && trg_shape[3] == dims[3],
"ConvolutionForward shape assert");
cudnnConvolutionFwdAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm(
MatGPU::_cudnnHandle, act_desc, flt_desc, conv_desc, trg_desc,
CUDNN_CONVOLUTION_FWD_SPECIFY_WORKSPACE_LIMIT,
MatGPU::_cudnnMemoryLimit, &algo
));
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize(
MatGPU::_cudnnHandle, act_desc, flt_desc, conv_desc, trg_desc,
algo, &ws_size
));
MatGPU workspace;
workspace.GetFromWorkspace(1, ws_size / sizeof(ftype));
const ftype scale_res = 1.0, scale_cur = 0.0;
CUDNN_CALL(cudnnConvolutionForward(MatGPU::_cudnnHandle,
&scale_res, act_desc, activs.data_, flt_desc, filters.data_,
conv_desc, algo, workspace.data_, ws_size,
&scale_cur, trg_desc, targets.data_
));
if (print >= 3) {
MatGPU::MeasureCudaTime("FilterActs");
}
}
void ConvolutionBackwardData(MatGPU& derivs, MatGPU& filters, MatGPU& targets,
const cudnnConvolutionDescriptor_t &conv_desc) {
mexAssert(kInternalOrder == true);
if (print >= 3) {
MatGPU::StartCudaTimer();
}
cudnnTensorDescriptor_t trg_desc = targets.GetTensorDesc();
cudnnFilterDescriptor_t flt_desc = filters.GetFilterDesc();
cudnnTensorDescriptor_t der_desc = derivs.GetTensorDesc();
Dim dims;
CUDNN_CALL(cudnnGetConvolution2dForwardOutputDim(
conv_desc, trg_desc, flt_desc, &dims[0], &dims[1], &dims[2], &dims[3]
));
Dim der_shape = derivs.tensor_shape();
mexAssertMsg(der_shape[0] == dims[0] && der_shape[1] == dims[1] &&
der_shape[2] == dims[2] && der_shape[3] == dims[3],
"ConvolutionBackwardData shape assert");
cudnnConvolutionBwdDataAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionBackwardDataAlgorithm(
MatGPU::_cudnnHandle, flt_desc, der_desc, conv_desc, trg_desc,
CUDNN_CONVOLUTION_BWD_DATA_SPECIFY_WORKSPACE_LIMIT,
MatGPU::_cudnnMemoryLimit, &algo
));
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionBackwardDataWorkspaceSize(
MatGPU::_cudnnHandle, flt_desc, der_desc, conv_desc, trg_desc,
algo, &ws_size
));
MatGPU workspace;
workspace.GetFromWorkspace(1, ws_size / sizeof(ftype));
const ftype scale_res = 1.0, scale_cur = 0.0;
CUDNN_CALL(cudnnConvolutionBackwardData(MatGPU::_cudnnHandle,
&scale_res, flt_desc, filters.data_, der_desc, derivs.data_,
conv_desc, algo, workspace.data_, ws_size,
&scale_cur, trg_desc, targets.data_
));
if (print >= 3) {
MatGPU::MeasureCudaTime("ImgActs");
}
}
void ConvolutionBackwardFilter(MatGPU& activs, MatGPU& derivs, MatGPU& targets,
const cudnnConvolutionDescriptor_t &conv_desc) {
mexAssert(kInternalOrder == true);
cudnnTensorDescriptor_t act_desc = activs.GetTensorDesc();
cudnnFilterDescriptor_t trg_desc = targets.GetFilterDesc();
cudnnTensorDescriptor_t der_desc = derivs.GetTensorDesc();
Dim dims;
CUDNN_CALL(cudnnGetConvolution2dForwardOutputDim(
conv_desc, act_desc, trg_desc, &dims[0], &dims[1], &dims[2], &dims[3]
));
Dim der_shape = derivs.tensor_shape();
mexAssertMsg(der_shape[0] == dims[0] && der_shape[1] == dims[1] &&
der_shape[2] == dims[2] && der_shape[3] == dims[3],
"ConvolutionBackwardFilter shape assert");
cudnnConvolutionBwdFilterAlgo_t algo;
CUDNN_CALL(cudnnGetConvolutionBackwardFilterAlgorithm(
MatGPU::_cudnnHandle, act_desc, der_desc, conv_desc, trg_desc,
CUDNN_CONVOLUTION_BWD_FILTER_SPECIFY_WORKSPACE_LIMIT,
MatGPU::_cudnnMemoryLimit, &algo
));
size_t ws_size;
CUDNN_CALL(cudnnGetConvolutionBackwardFilterWorkspaceSize(
MatGPU::_cudnnHandle, act_desc, der_desc, conv_desc, trg_desc,
algo, &ws_size
));
MatGPU workspace;
workspace.GetFromWorkspace(1, ws_size / sizeof(ftype));
const ftype scale_res = 1.0, scale_cur = 0.0;
CUDNN_CALL(cudnnConvolutionBackwardFilter(MatGPU::_cudnnHandle,
&scale_res, act_desc, activs.data_, der_desc, derivs.data_,
conv_desc, algo, workspace.data_, ws_size,
&scale_cur, trg_desc, targets.data_
));
}
void ConvolutionBackwardBias(MatGPU& derivs, MatGPU &targets) {
mexAssert(kInternalOrder == true);
cudnnTensorDescriptor_t der_desc = derivs.GetTensorDesc();
cudnnTensorDescriptor_t trg_desc = targets.GetTensorDesc();
const ftype scale_res = 1.0, scale_cur = 0.0;
CUDNN_CALL(cudnnConvolutionBackwardBias(MatGPU::_cudnnHandle,
&scale_res, der_desc, derivs.data_,
&scale_cur, trg_desc, targets.data_
));
};
// scaling functions
void Pooling(MatGPU& activs, MatGPU& targets,
cudnnPoolingDescriptor_t pool_desc) {
mexAssert(kInternalOrder == true);
cudnnTensorDescriptor_t act_desc = activs.GetTensorDesc();
cudnnTensorDescriptor_t trg_desc = targets.GetTensorDesc();
Dim dims;
CUDNN_CALL(cudnnGetPooling2dForwardOutputDim(
pool_desc, act_desc, &dims[0], &dims[1], &dims[2], &dims[3]
));
Dim trg_shape = targets.tensor_shape();
mexAssertMsg(trg_shape[0] == dims[0] && trg_shape[1] == dims[1] &&
trg_shape[2] == dims[2] && trg_shape[3] == dims[3],
"Pooling shape assert");
const ftype scale_res = 1.0, scale_cur = 0.0;
CUDNN_CALL(cudnnPoolingForward(MatGPU::_cudnnHandle, pool_desc,
&scale_res, act_desc, activs.data_,
&scale_cur, trg_desc, targets.data_
));
}
void PoolingUndo(MatGPU& activs, MatGPU& pool_activs,
MatGPU& pool_derivs, MatGPU& derivs,
cudnnPoolingDescriptor_t pool_desc, bool dir) {
// dir == true -> backward, derivs are targets
// dir == false -> forward, pool_derivs are targets
mexAssert(kInternalOrder == true);
mexAssertMsg(activs.size1_ == derivs.size1_ &&
activs.size2_ == derivs.size2_,
"In 'PoolingUndo' activs size assert");
mexAssertMsg(pool_activs.size1_ == pool_derivs.size1_ &&
pool_activs.size2_ == pool_derivs.size2_,
"In 'PoolingUndo' pool_activs.size assert");
cudnnTensorDescriptor_t act_desc = activs.GetTensorDesc();
cudnnTensorDescriptor_t trg_desc = derivs.GetTensorDesc();
cudnnTensorDescriptor_t pool_act_desc = pool_activs.GetTensorDesc();
cudnnTensorDescriptor_t pool_der_desc = pool_derivs.GetTensorDesc();
Dim dims;
CUDNN_CALL(cudnnGetPooling2dForwardOutputDim(
pool_desc, act_desc, &dims[0], &dims[1], &dims[2], &dims[3]
));
Dim pool_act_shape = pool_activs.tensor_shape();
mexAssertMsg(pool_act_shape[0] == dims[0] && pool_act_shape[1] == dims[1] &&
pool_act_shape[2] == dims[2] && pool_act_shape[3] == dims[3],
"PoolingUndo shape assert");
if (dir == true) {
const ftype scale_res = 1.0, scale_cur = 0.0;
CUDNN_CALL(cudnnPoolingBackward(MatGPU::_cudnnHandle, pool_desc,
&scale_res, pool_act_desc, pool_activs.data_,
pool_der_desc, pool_derivs.data_, act_desc, activs.data_,
&scale_cur, trg_desc, derivs.data_
));
} else {
Dim prev_dims = activs.tensor_shape();
Pair scale, padding, stride;
cudnnPoolingMode_t pool_mode = CUDNN_POOLING_MAX;
cudnnNanPropagation_t nan_prop_mode = CUDNN_PROPAGATE_NAN;
CUDNN_CALL(cudnnGetPooling2dDescriptor(pool_desc,
&pool_mode, &nan_prop_mode,
&scale[0], &scale[1], &padding[0], &padding[1], &stride[0], &stride[1]
));
_maxPoolThirdPass(activs, pool_activs, derivs, pool_derivs,
prev_dims[2], prev_dims[3], dims[2], dims[3],
scale, padding, stride);
}
}
void AffineTransform(const MatGPU &images, MatGPU &targets,
const MatGPU &shift_mat, const MatGPU &scale_mat,
const MatGPU &mirror_mat, const MatGPU &angle_mat,
ftype defval, bool dir) {
Dim img_dims = images.tensor_shape();
Dim trg_dims = targets.tensor_shape();
_affineTransform(images, targets,
img_dims[2], img_dims[3], trg_dims[2], trg_dims[3],
shift_mat, scale_mat, mirror_mat, angle_mat, defval, dir);
}
/*
void VaryColors(MatGPU &images, const Dim &dims,
const MatGPU &eigenvectors, ftype noise_std) {
int batchsize = images.size1();
int channels = images.size2() / (dims[2] * dims[3]);
MatGPU noise_mat, add_mat;
MatGPU::swapWithBuffer(noise_mat, -7);
noise_mat.resize(batchsize, channels);
// hack, because randnorm does not work for odd numbers. Start.
if (noise_mat.size1() * noise_mat.size2() % 2 > 0) {
MatGPU rndmat;
rndmat.attach(
noise_mat, 0,
noise_mat.size1() * noise_mat.size2() - 1, 1, noise_mat.order()
);
rndmat.randnorm() *= noise_std;
rndmat.attach(
noise_mat, noise_mat.size1() * noise_mat.size2() - 1,
1, 1, noise_mat.order()
);
(rndmat.rand() -= 0.5) *= noise_std;
} else {
noise_mat.randnorm() *= noise_std;
}
// hack, because randnorm does not work for odd numbers. End.
MatGPU::swapWithBuffer(add_mat, -8);
add_mat.resize(batchsize, channels);
Prod(noise_mat, false, eigenvectors, true, add_mat);
_varyColors(images, add_mat);
MatGPU::swapWithBuffer(noise_mat, -7);
MatGPU::swapWithBuffer(add_mat, -8);
} */
|
f41e21b68bf6031a6fdee6c37a334198566bb0e9.hip | // !!! This is a file automatically generated by hipify!!!
// Copyright Contributors to the Open Shading Language project.
// SPDX-License-Identifier: BSD-3-Clause
// https://github.com/AcademySoftwareFoundation/OpenShadingLanguage
#include <optix.h>
#if (OPTIX_VERSION < 70000)
#include <optixu/optixu_math_namespace.h>
#else
#include <optix.h>
#include <hip/hip_runtime.h>
#endif
#include <OSL/device_string.h>
#include <OSL/oslclosure.h>
#include "rend_lib.h"
#include "util.h"
#if (OPTIX_VERSION < 70000)
// Ray payload
rtDeclareVariable (PRD_radiance, prd_radiance, rtPayload, );
// ray/hit variables
rtDeclareVariable (float3, shading_normal, attribute shading_normal, );
rtDeclareVariable (float3, geometric_normal, attribute geometric_normal,);
rtDeclareVariable (float3, texcoord, attribute texcoord, );
rtDeclareVariable (float, surface_area, attribute surface_area, );
rtDeclareVariable (float3, dPdu, attribute dPdu, );
rtDeclareVariable (float3, dPdv, attribute dPdv, );
rtDeclareVariable (int, obj_id, attribute obj_id, );
rtDeclareVariable (int, lgt_idx, attribute lgt_idx, );
// ray/hit variables
rtDeclareVariable (uint2, launch_index, rtLaunchIndex, );
rtDeclareVariable (uint2, launch_dim, rtLaunchDim, );
rtDeclareVariable (optix::Ray, ray, rtCurrentRay, );
rtDeclareVariable (float, t_hit, rtIntersectionDistance, );
// Buffers
rtBuffer<float3,2> output_buffer;
// Function pointers for the OSL shader
rtDeclareVariable (rtCallableProgramId<void (void*, void*, void*, void*, int)>, osl_init_func, , );
rtDeclareVariable (rtCallableProgramId<void (void*, void*, void*, void*, int)>, osl_group_func, ,);
RT_PROGRAM void any_hit_shadow()
{
rtTerminateRay();
}
static __device__
void globals_from_hit(ShaderGlobals& sg)
{
// Setup the ShaderGlobals
sg.I = ray.direction;
sg.N = normalize(rtTransformNormal (RT_OBJECT_TO_WORLD, shading_normal));
sg.Ng = normalize(rtTransformNormal (RT_OBJECT_TO_WORLD, geometric_normal));
sg.P = ray.origin + t_hit * ray.direction;
sg.dPdu = dPdu;
sg.u = texcoord.x;
sg.v = texcoord.y;
sg.Ci = NULL;
sg.surfacearea = surface_area;
sg.backfacing = (dot(sg.N, sg.I) > 0.0f);
if (sg.backfacing) {
sg.N = -sg.N;
sg.Ng = -sg.Ng;
}
// NB: These variables are not used in the current iteration of the sample
sg.raytype = CAMERA;
sg.flipHandedness = 0;
}
static __device__
float3 process_closure(const OSL::ClosureColor* closure_tree)
{
OSL::Color3 result = OSL::Color3 (0.0f);
if (!closure_tree) {
return make_float3(result.x, result.y, result.z);
}
// The depth of the closure tree must not exceed the stack size.
// A stack size of 8 is probably quite generous for relatively
// balanced trees.
const int STACK_SIZE = 8;
// Non-recursive traversal stack
int stack_idx = 0;
const OSL::ClosureColor* ptr_stack[STACK_SIZE];
OSL::Color3 weight_stack[STACK_SIZE];
// Shading accumulator
OSL::Color3 weight = OSL::Color3(1.0f);
const void* cur = closure_tree;
while (cur) {
switch (((OSL::ClosureColor*)cur)->id) {
case OSL::ClosureColor::ADD: {
ptr_stack [stack_idx ] = ((OSL::ClosureAdd*) cur)->closureB;
weight_stack[stack_idx++] = weight;
cur = ((OSL::ClosureAdd*) cur)->closureA;
break;
}
case OSL::ClosureColor::MUL: {
weight *= ((OSL::ClosureMul*) cur)->weight;
cur = ((OSL::ClosureMul*) cur)->closure;
break;
}
case EMISSION_ID: {
cur = NULL;
break;
}
case DIFFUSE_ID:
case OREN_NAYAR_ID:
case PHONG_ID:
case WARD_ID:
case REFLECTION_ID:
case REFRACTION_ID:
case FRESNEL_REFLECTION_ID: {
result += ((OSL::ClosureComponent*) cur)->w * weight;
cur = NULL;
break;
}
case MICROFACET_ID: {
const char* mem = (const char*)((OSL::ClosureComponent*) cur)->data();
const char* dist_str = *(const char**) &mem[0];
#if 0
if (launch_index.x == launch_dim.x / 2 && launch_index.y == launch_dim.y / 2)
printf ("microfacet, dist: %s\n", HDSTR(dist_str).c_str());
#endif
if (HDSTR(dist_str) == OSL::DeviceStrings::default_)
return make_float3(0.0f, 1.0f, 1.0f);
return make_float3(1.0f, 0.0f, 1.0f);
}
default:
cur = NULL;
break;
}
if (cur == NULL && stack_idx > 0) {
cur = ptr_stack [--stack_idx];
weight = weight_stack[ stack_idx];
}
}
return make_float3(result.x, result.y, result.z);
}
RT_PROGRAM void closest_hit_osl()
{
// TODO: Fixed-sized allocations can easily be exceeded by arbitrary shader
// networks, so there should be (at least) some mechanism to issue a
// warning or error if the closure or param storage can possibly be
// exceeded.
alignas(8) char closure_pool[256];
alignas(8) char params [256];
ShaderGlobals sg;
globals_from_hit (sg);
// Pack the "closure pool" into one of the ShaderGlobals pointers
*(int*) &closure_pool[0] = 0;
sg.renderstate = &closure_pool[0];
// Create some run-time options structs. The OSL shader fills in the structs
// as it executes, based on the options specified in the shader source.
NoiseOptCUDA noiseopt;
TextureOptCUDA textureopt;
TraceOptCUDA traceopt;
// Pack the pointers to the options structs in a faux "context",
// which is a rough stand-in for the host ShadingContext.
ShadingContextCUDA shading_context = {
&noiseopt, &textureopt, &traceopt
};
sg.context = &shading_context;
// Run the OSL group and init functions
osl_init_func (&sg, params);
osl_group_func(&sg, params);
prd_radiance.result = process_closure ((OSL::ClosureColor*) sg.Ci);
}
#else //#if (OPTIX_VERSION < 70000)
#include "../render_params.h"
extern "C" {
__device__ __constant__ RenderParams render_params;
}
extern"C" __global__ void __anyhit__any_hit_shadow ()
{
optixTerminateRay();
}
static __device__
void globals_from_hit (ShaderGlobals& sg)
{
const GenericRecord *record = reinterpret_cast<GenericRecord *> (optixGetSbtDataPointer());
ShaderGlobals local_sg;
// hit-kind 0: quad hit
// 1: sphere hit
optixDirectCall<void, unsigned int, float, float3, float3, ShaderGlobals *>(
optixGetHitKind(),
optixGetPrimitiveIndex(),
optixGetRayTmax(),
optixGetWorldRayOrigin(),
optixGetWorldRayDirection(),
&local_sg);
// Setup the ShaderGlobals
const float3 ray_direction = optixGetWorldRayDirection();
const float3 ray_origin = optixGetWorldRayOrigin();
const float t_hit = optixGetRayTmin();
sg.I = ray_direction;
sg.N = normalize (optixTransformNormalFromObjectToWorldSpace (local_sg.N));
sg.Ng = normalize (optixTransformNormalFromObjectToWorldSpace (local_sg.Ng));
sg.P = ray_origin + t_hit * ray_direction;
sg.dPdu = local_sg.dPdu;
sg.dPdv = local_sg.dPdv;
sg.u = local_sg.u;
sg.v = local_sg.v;
sg.Ci = NULL;
sg.surfacearea = local_sg.surfacearea;
sg.backfacing = dot (sg.N, sg.I) > 0.0f;
sg.shaderID = local_sg.shaderID;
if (sg.backfacing) {
sg.N = -sg.N;
sg.Ng = -sg.Ng;
}
// NB: These variables are not used in the current iteration of the sample
sg.raytype = CAMERA;
sg.flipHandedness = 0;
}
static __device__
float3 process_closure (const OSL::ClosureColor* closure_tree)
{
OSL::Color3 result = OSL::Color3 (0.0f);
if (!closure_tree) {
return make_float3 (result.x, result.y, result.z);
}
// The depth of the closure tree must not exceed the stack size.
// A stack size of 8 is probably quite generous for relatively
// balanced trees.
const int STACK_SIZE = 8;
// Non-recursive traversal stack
int stack_idx = 0;
const OSL::ClosureColor* ptr_stack[STACK_SIZE];
OSL::Color3 weight_stack[STACK_SIZE];
// Shading accumulator
OSL::Color3 weight = OSL::Color3 (1.0f);
const void* cur = closure_tree;
while (cur) {
switch (((OSL::ClosureColor*)cur)->id) {
case OSL::ClosureColor::ADD: {
ptr_stack [stack_idx ] = ((OSL::ClosureAdd*) cur)->closureB;
weight_stack[stack_idx++] = weight;
cur = ((OSL::ClosureAdd*) cur)->closureA;
break;
}
case OSL::ClosureColor::MUL: {
weight *= ((OSL::ClosureMul*) cur)->weight;
cur = ((OSL::ClosureMul*) cur)->closure;
break;
}
case EMISSION_ID: {
cur = NULL;
break;
}
case DIFFUSE_ID:
case OREN_NAYAR_ID:
case PHONG_ID:
case WARD_ID:
case REFLECTION_ID:
case REFRACTION_ID:
case FRESNEL_REFLECTION_ID: {
result += ((OSL::ClosureComponent*) cur)->w * weight;
cur = NULL;
break;
}
case MICROFACET_ID: {
const char* mem = (const char*)((OSL::ClosureComponent*) cur)->data();
const char* dist_str = *(const char**) &mem[0];
if (HDSTR(dist_str) == STRING_PARAMS(default))
return make_float3(0.0f, 1.0f, 1.0f);
else
return make_float3(1.0f, 0.0f, 1.0f);
break;
}
default:
cur = NULL;
break;
}
if (cur == NULL && stack_idx > 0) {
cur = ptr_stack [--stack_idx];
weight = weight_stack[ stack_idx];
}
}
return make_float3(result.x, result.y, result.z);
}
extern "C" __global__ void __closesthit__closest_hit_osl()
{
// TODO: Fixed-sized allocations can easily be exceeded by arbitrary shader
// networks, so there should be (at least) some mechanism to issue a
// warning or error if the closure or param storage can possibly be
// exceeded.
alignas(8) char closure_pool[256];
alignas(8) char params [256];
ShaderGlobals sg;
globals_from_hit (sg);
// Pack the "closure pool" into one of the ShaderGlobals pointers
*(int*) &closure_pool[0] = 0;
sg.renderstate = &closure_pool[0];
// Create some run-time options structs. The OSL shader fills in the structs
// as it executes, based on the options specified in the shader source.
NoiseOptCUDA noiseopt;
TextureOptCUDA textureopt;
TraceOptCUDA traceopt;
// Pack the pointers to the options structs in a faux "context",
// which is a rough stand-in for the host ShadingContext.
ShadingContextCUDA shading_context = {
&noiseopt, &textureopt, &traceopt
};
sg.context = &shading_context;
// Run the OSL group and init functions
const unsigned int shaderInitOpIdx = 2u + 2u * sg.shaderID + 0u;
const unsigned int shaderGroupIdx = 2u + 2u * sg.shaderID + 1u;
optixDirectCall<void, ShaderGlobals*, void *, void*, void*, int>(shaderInitOpIdx, &sg, params, nullptr, nullptr, 0); // call osl_init_func
optixDirectCall<void, ShaderGlobals*, void *, void*, void*, int>(shaderGroupIdx , &sg, params, nullptr, nullptr, 0); // call osl_group_func
float3 result = process_closure ((OSL::ClosureColor*) sg.Ci);
uint3 launch_dims = optixGetLaunchDimensions();
uint3 launch_index = optixGetLaunchIndex();
float3* output_buffer = reinterpret_cast<float3 *>(render_params.output_buffer);
int pixel = launch_index.y * launch_dims.x + launch_index.x;
output_buffer[pixel] = make_float3(result.x, result.y, result.z);
}
#endif //#if (OPTIX_VERSION < 70000)
| f41e21b68bf6031a6fdee6c37a334198566bb0e9.cu | // Copyright Contributors to the Open Shading Language project.
// SPDX-License-Identifier: BSD-3-Clause
// https://github.com/AcademySoftwareFoundation/OpenShadingLanguage
#include <optix.h>
#if (OPTIX_VERSION < 70000)
#include <optixu/optixu_math_namespace.h>
#else
#include <optix.h>
#include <cuda_runtime.h>
#endif
#include <OSL/device_string.h>
#include <OSL/oslclosure.h>
#include "rend_lib.h"
#include "util.h"
#if (OPTIX_VERSION < 70000)
// Ray payload
rtDeclareVariable (PRD_radiance, prd_radiance, rtPayload, );
// ray/hit variables
rtDeclareVariable (float3, shading_normal, attribute shading_normal, );
rtDeclareVariable (float3, geometric_normal, attribute geometric_normal,);
rtDeclareVariable (float3, texcoord, attribute texcoord, );
rtDeclareVariable (float, surface_area, attribute surface_area, );
rtDeclareVariable (float3, dPdu, attribute dPdu, );
rtDeclareVariable (float3, dPdv, attribute dPdv, );
rtDeclareVariable (int, obj_id, attribute obj_id, );
rtDeclareVariable (int, lgt_idx, attribute lgt_idx, );
// ray/hit variables
rtDeclareVariable (uint2, launch_index, rtLaunchIndex, );
rtDeclareVariable (uint2, launch_dim, rtLaunchDim, );
rtDeclareVariable (optix::Ray, ray, rtCurrentRay, );
rtDeclareVariable (float, t_hit, rtIntersectionDistance, );
// Buffers
rtBuffer<float3,2> output_buffer;
// Function pointers for the OSL shader
rtDeclareVariable (rtCallableProgramId<void (void*, void*, void*, void*, int)>, osl_init_func, , );
rtDeclareVariable (rtCallableProgramId<void (void*, void*, void*, void*, int)>, osl_group_func, ,);
RT_PROGRAM void any_hit_shadow()
{
rtTerminateRay();
}
static __device__
void globals_from_hit(ShaderGlobals& sg)
{
// Setup the ShaderGlobals
sg.I = ray.direction;
sg.N = normalize(rtTransformNormal (RT_OBJECT_TO_WORLD, shading_normal));
sg.Ng = normalize(rtTransformNormal (RT_OBJECT_TO_WORLD, geometric_normal));
sg.P = ray.origin + t_hit * ray.direction;
sg.dPdu = dPdu;
sg.u = texcoord.x;
sg.v = texcoord.y;
sg.Ci = NULL;
sg.surfacearea = surface_area;
sg.backfacing = (dot(sg.N, sg.I) > 0.0f);
if (sg.backfacing) {
sg.N = -sg.N;
sg.Ng = -sg.Ng;
}
// NB: These variables are not used in the current iteration of the sample
sg.raytype = CAMERA;
sg.flipHandedness = 0;
}
static __device__
float3 process_closure(const OSL::ClosureColor* closure_tree)
{
OSL::Color3 result = OSL::Color3 (0.0f);
if (!closure_tree) {
return make_float3(result.x, result.y, result.z);
}
// The depth of the closure tree must not exceed the stack size.
// A stack size of 8 is probably quite generous for relatively
// balanced trees.
const int STACK_SIZE = 8;
// Non-recursive traversal stack
int stack_idx = 0;
const OSL::ClosureColor* ptr_stack[STACK_SIZE];
OSL::Color3 weight_stack[STACK_SIZE];
// Shading accumulator
OSL::Color3 weight = OSL::Color3(1.0f);
const void* cur = closure_tree;
while (cur) {
switch (((OSL::ClosureColor*)cur)->id) {
case OSL::ClosureColor::ADD: {
ptr_stack [stack_idx ] = ((OSL::ClosureAdd*) cur)->closureB;
weight_stack[stack_idx++] = weight;
cur = ((OSL::ClosureAdd*) cur)->closureA;
break;
}
case OSL::ClosureColor::MUL: {
weight *= ((OSL::ClosureMul*) cur)->weight;
cur = ((OSL::ClosureMul*) cur)->closure;
break;
}
case EMISSION_ID: {
cur = NULL;
break;
}
case DIFFUSE_ID:
case OREN_NAYAR_ID:
case PHONG_ID:
case WARD_ID:
case REFLECTION_ID:
case REFRACTION_ID:
case FRESNEL_REFLECTION_ID: {
result += ((OSL::ClosureComponent*) cur)->w * weight;
cur = NULL;
break;
}
case MICROFACET_ID: {
const char* mem = (const char*)((OSL::ClosureComponent*) cur)->data();
const char* dist_str = *(const char**) &mem[0];
#if 0
if (launch_index.x == launch_dim.x / 2 && launch_index.y == launch_dim.y / 2)
printf ("microfacet, dist: %s\n", HDSTR(dist_str).c_str());
#endif
if (HDSTR(dist_str) == OSL::DeviceStrings::default_)
return make_float3(0.0f, 1.0f, 1.0f);
return make_float3(1.0f, 0.0f, 1.0f);
}
default:
cur = NULL;
break;
}
if (cur == NULL && stack_idx > 0) {
cur = ptr_stack [--stack_idx];
weight = weight_stack[ stack_idx];
}
}
return make_float3(result.x, result.y, result.z);
}
RT_PROGRAM void closest_hit_osl()
{
// TODO: Fixed-sized allocations can easily be exceeded by arbitrary shader
// networks, so there should be (at least) some mechanism to issue a
// warning or error if the closure or param storage can possibly be
// exceeded.
alignas(8) char closure_pool[256];
alignas(8) char params [256];
ShaderGlobals sg;
globals_from_hit (sg);
// Pack the "closure pool" into one of the ShaderGlobals pointers
*(int*) &closure_pool[0] = 0;
sg.renderstate = &closure_pool[0];
// Create some run-time options structs. The OSL shader fills in the structs
// as it executes, based on the options specified in the shader source.
NoiseOptCUDA noiseopt;
TextureOptCUDA textureopt;
TraceOptCUDA traceopt;
// Pack the pointers to the options structs in a faux "context",
// which is a rough stand-in for the host ShadingContext.
ShadingContextCUDA shading_context = {
&noiseopt, &textureopt, &traceopt
};
sg.context = &shading_context;
// Run the OSL group and init functions
osl_init_func (&sg, params);
osl_group_func(&sg, params);
prd_radiance.result = process_closure ((OSL::ClosureColor*) sg.Ci);
}
#else //#if (OPTIX_VERSION < 70000)
#include "../render_params.h"
extern "C" {
__device__ __constant__ RenderParams render_params;
}
extern"C" __global__ void __anyhit__any_hit_shadow ()
{
optixTerminateRay();
}
static __device__
void globals_from_hit (ShaderGlobals& sg)
{
const GenericRecord *record = reinterpret_cast<GenericRecord *> (optixGetSbtDataPointer());
ShaderGlobals local_sg;
// hit-kind 0: quad hit
// 1: sphere hit
optixDirectCall<void, unsigned int, float, float3, float3, ShaderGlobals *>(
optixGetHitKind(),
optixGetPrimitiveIndex(),
optixGetRayTmax(),
optixGetWorldRayOrigin(),
optixGetWorldRayDirection(),
&local_sg);
// Setup the ShaderGlobals
const float3 ray_direction = optixGetWorldRayDirection();
const float3 ray_origin = optixGetWorldRayOrigin();
const float t_hit = optixGetRayTmin();
sg.I = ray_direction;
sg.N = normalize (optixTransformNormalFromObjectToWorldSpace (local_sg.N));
sg.Ng = normalize (optixTransformNormalFromObjectToWorldSpace (local_sg.Ng));
sg.P = ray_origin + t_hit * ray_direction;
sg.dPdu = local_sg.dPdu;
sg.dPdv = local_sg.dPdv;
sg.u = local_sg.u;
sg.v = local_sg.v;
sg.Ci = NULL;
sg.surfacearea = local_sg.surfacearea;
sg.backfacing = dot (sg.N, sg.I) > 0.0f;
sg.shaderID = local_sg.shaderID;
if (sg.backfacing) {
sg.N = -sg.N;
sg.Ng = -sg.Ng;
}
// NB: These variables are not used in the current iteration of the sample
sg.raytype = CAMERA;
sg.flipHandedness = 0;
}
static __device__
float3 process_closure (const OSL::ClosureColor* closure_tree)
{
OSL::Color3 result = OSL::Color3 (0.0f);
if (!closure_tree) {
return make_float3 (result.x, result.y, result.z);
}
// The depth of the closure tree must not exceed the stack size.
// A stack size of 8 is probably quite generous for relatively
// balanced trees.
const int STACK_SIZE = 8;
// Non-recursive traversal stack
int stack_idx = 0;
const OSL::ClosureColor* ptr_stack[STACK_SIZE];
OSL::Color3 weight_stack[STACK_SIZE];
// Shading accumulator
OSL::Color3 weight = OSL::Color3 (1.0f);
const void* cur = closure_tree;
while (cur) {
switch (((OSL::ClosureColor*)cur)->id) {
case OSL::ClosureColor::ADD: {
ptr_stack [stack_idx ] = ((OSL::ClosureAdd*) cur)->closureB;
weight_stack[stack_idx++] = weight;
cur = ((OSL::ClosureAdd*) cur)->closureA;
break;
}
case OSL::ClosureColor::MUL: {
weight *= ((OSL::ClosureMul*) cur)->weight;
cur = ((OSL::ClosureMul*) cur)->closure;
break;
}
case EMISSION_ID: {
cur = NULL;
break;
}
case DIFFUSE_ID:
case OREN_NAYAR_ID:
case PHONG_ID:
case WARD_ID:
case REFLECTION_ID:
case REFRACTION_ID:
case FRESNEL_REFLECTION_ID: {
result += ((OSL::ClosureComponent*) cur)->w * weight;
cur = NULL;
break;
}
case MICROFACET_ID: {
const char* mem = (const char*)((OSL::ClosureComponent*) cur)->data();
const char* dist_str = *(const char**) &mem[0];
if (HDSTR(dist_str) == STRING_PARAMS(default))
return make_float3(0.0f, 1.0f, 1.0f);
else
return make_float3(1.0f, 0.0f, 1.0f);
break;
}
default:
cur = NULL;
break;
}
if (cur == NULL && stack_idx > 0) {
cur = ptr_stack [--stack_idx];
weight = weight_stack[ stack_idx];
}
}
return make_float3(result.x, result.y, result.z);
}
extern "C" __global__ void __closesthit__closest_hit_osl()
{
// TODO: Fixed-sized allocations can easily be exceeded by arbitrary shader
// networks, so there should be (at least) some mechanism to issue a
// warning or error if the closure or param storage can possibly be
// exceeded.
alignas(8) char closure_pool[256];
alignas(8) char params [256];
ShaderGlobals sg;
globals_from_hit (sg);
// Pack the "closure pool" into one of the ShaderGlobals pointers
*(int*) &closure_pool[0] = 0;
sg.renderstate = &closure_pool[0];
// Create some run-time options structs. The OSL shader fills in the structs
// as it executes, based on the options specified in the shader source.
NoiseOptCUDA noiseopt;
TextureOptCUDA textureopt;
TraceOptCUDA traceopt;
// Pack the pointers to the options structs in a faux "context",
// which is a rough stand-in for the host ShadingContext.
ShadingContextCUDA shading_context = {
&noiseopt, &textureopt, &traceopt
};
sg.context = &shading_context;
// Run the OSL group and init functions
const unsigned int shaderInitOpIdx = 2u + 2u * sg.shaderID + 0u;
const unsigned int shaderGroupIdx = 2u + 2u * sg.shaderID + 1u;
optixDirectCall<void, ShaderGlobals*, void *, void*, void*, int>(shaderInitOpIdx, &sg, params, nullptr, nullptr, 0); // call osl_init_func
optixDirectCall<void, ShaderGlobals*, void *, void*, void*, int>(shaderGroupIdx , &sg, params, nullptr, nullptr, 0); // call osl_group_func
float3 result = process_closure ((OSL::ClosureColor*) sg.Ci);
uint3 launch_dims = optixGetLaunchDimensions();
uint3 launch_index = optixGetLaunchIndex();
float3* output_buffer = reinterpret_cast<float3 *>(render_params.output_buffer);
int pixel = launch_index.y * launch_dims.x + launch_index.x;
output_buffer[pixel] = make_float3(result.x, result.y, result.z);
}
#endif //#if (OPTIX_VERSION < 70000)
|
a1083f8d72a995e8bee51433e9138d768e1c16df.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include"fd.h"
#include"../gpu.h"
#include"../param/const.h"
#include"../param/param.h"
#include"../material/material.h"
#include"../field/field.h"
#include"../cpml/cpml.h"
#include"../slide/slide.h"
#include<cstdio>
#include<cstdlib>
//#define topabsorb false
#define s_data(i,j) s_data[j][i]
#define CHECK_IF_NEED_UPDATE \
if(usepunch){\
if(\
current_step <= first_arrive_step[(iz/BDIMY)*(nx/BDIMX)+(ix/BDIMX)] ||\
current_step >= last_step [(iz/BDIMY)*(nx/BDIMX)+(ix/BDIMX)] ){ \
return;\
}\
}
#define TxDIFF \
Tx_x=d_coef[ord][0]*( s_data(tx,ty) - s_data(tx-1,ty) )\
-d_coef[ord][1]*( s_data(tx+1,ty) - s_data(tx-2,ty) )\
+d_coef[ord][2]*( s_data(tx+2,ty) - s_data(tx-3,ty) )\
-d_coef[ord][3]*( s_data(tx+3,ty) - s_data(tx-4,ty) );
#define TzDIFF \
Tz_z=d_coef[ord][0]*( s_data(tx,ty+1) - s_data(tx,ty) )\
-d_coef[ord][1]*( s_data(tx,ty+2) - s_data(tx,ty-1) )\
+d_coef[ord][2]*( s_data(tx,ty+3) - s_data(tx,ty-2) )\
-d_coef[ord][3]*( s_data(tx,ty+4) - s_data(tx,ty-3) );
#define DxV \
dxV=d_coef[ord][0]*( s_data(tx+1,ty) - s_data(tx,ty) )\
-d_coef[ord][1]*( s_data(tx+2,ty) - s_data(tx-1,ty) )\
+d_coef[ord][2]*( s_data(tx+3,ty) - s_data(tx-2,ty) )\
-d_coef[ord][3]*( s_data(tx+4,ty) - s_data(tx-3,ty) );
#define DzV \
dzV=d_coef[ord][0]*( s_data(tx,ty) - s_data(tx,ty-1) )\
-d_coef[ord][1]*( s_data(tx,ty+1) - s_data(tx,ty-2) )\
+d_coef[ord][2]*( s_data(tx,ty+2) - s_data(tx,ty-3) )\
-d_coef[ord][3]*( s_data(tx,ty+3) - s_data(tx,ty-4) );
#define ZERO_HOLO \
s_data[threadIdx.y][threadIdx.x]=0.0;\
s_data[threadIdx.y][BDIMX+2*radius-1-threadIdx.x]=0.0;\
s_data[BDIMY+2*radius-1-threadIdx.y][threadIdx.x]=0.0;\
s_data[BDIMY+2*radius-1-threadIdx.y][BDIMX+2*radius-1-threadIdx.x]=0.0;
#define EDGE_SHARE(d_F) \
if(threadIdx.y<radius && iz>radius){\
s_data[threadIdx.y][tx] = d_F[in_idx-radius*nx];\
}\
if(threadIdx.y<radius && iz+BDIMY<nz){\
s_data[threadIdx.y+BDIMY+radius][tx] = d_F[in_idx+BDIMY*nx];\
}\
if(threadIdx.x<radius && ix>radius){\
s_data[ty][threadIdx.x] = d_F[in_idx-radius];\
}\
if(threadIdx.x<radius && ix+BDIMX<nx){\
s_data[ty][threadIdx.x+BDIMX+radius] = d_F[in_idx+BDIMX];\
}\
s_data[ty][tx] = d_F[in_idx];
__constant__ float d_coef[5][4];
__global__ void cudaupdate_stress(
bool usetable,
int * userestrict mat_index,
bool usepunch,
int current_step,
int * userestrict first_arrive_step,
int * userestrict last_step,
float * userestrict Tx,
float * userestrict Tz,
float * userestrict V,
float * userestrict MUX,
float * userestrict MUZ,
float * userestrict psi_V_x,
float * userestrict psi_V_z,
float * userestrict b_V_x,
float * userestrict b_V_z,
float * userestrict c_V_x,
float * userestrict c_V_z,
float * userestrict k_V_x,
float * userestrict k_V_z,
int nx,
int nz,
int npml,
int izshift)
{
__shared__ float s_data[BDIMY+2*radius][BDIMX+2*radius];
int ix = blockIdx.x*blockDim.x + threadIdx.x;
int iz = blockIdx.y*blockDim.y + threadIdx.y + izshift;
CHECK_IF_NEED_UPDATE;
int ord;
int idx;
int in_idx = iz*nx + ix; // index for global memory
int tx=threadIdx.x+radius; //index for shared memory
int ty=threadIdx.y+radius;
float dxV,dzV;
float MUX_ix_iz;
float MUZ_ix_iz;
if(usetable){
MUX_ix_iz= MUX[mat_index[in_idx]];
MUZ_ix_iz= MUZ[mat_index[in_idx]];
}else{
MUX_ix_iz= MUX[in_idx];
MUZ_ix_iz= MUZ[in_idx];
}
ZERO_HOLO;
__syncthreads();
ord=min(ix,iz);
// ord=ix; // maybe top layer can also use 8th,this reduced dispersion
ord=min(ord,nx-1-ix);
ord=min(ord,nz-1-iz);
ord=min(ord,ORD8);
EDGE_SHARE(V);
__syncthreads();
DxV;
DzV;
if(ix==0 || ix>=nx-1 || iz>=nz-1) return;
if(ix<npml){
//left pml
idx=ix+iz*2*npml;
psi_V_x[idx] = b_V_x[ix] * psi_V_x[idx] + c_V_x[ix] * dxV;
Tx[in_idx] += MUX_ix_iz*( dxV * k_V_x[ix] + psi_V_x[idx] );
}else if(ix>=nx-npml){
//right pml
idx=npml+nx-1-ix+iz*2*npml;
psi_V_x[idx] = b_V_x[ix] * psi_V_x[idx] + c_V_x[ix] * dxV;
Tx[in_idx] += MUX_ix_iz*( dxV * k_V_x[ix] + psi_V_x[idx] );
}else{
Tx[in_idx] += MUX_ix_iz*( dxV );
}
if(iz<npml && topabsorb){
//top pml
idx=(iz*nx)+ix;
psi_V_z[idx] = b_V_z[iz] * psi_V_z[idx] + c_V_z[iz] * dzV;
Tz[in_idx] += MUZ_ix_iz*( dzV * k_V_z[iz] + psi_V_z[idx] ) ;
}else if(iz>=nz-npml){
// bottom
idx=(npml+nz-1-iz)*nx+ix;
psi_V_z[idx] = b_V_z[iz] * psi_V_z[idx] + c_V_z[iz] * dzV;
Tz[in_idx] += MUZ_ix_iz*( dzV * k_V_z[iz] + psi_V_z[idx] ) ;
}else{
Tz[in_idx] += MUZ_ix_iz*( dzV ) ;
}
}
__global__ void cudaupdate_velocity(
bool usetable,
int * userestrict mat_index,
bool usepunch,
int current_step,
int * userestrict first_arrive_step,
int * userestrict last_step,
float * userestrict Tx,
float * userestrict Tz,
float * userestrict V,
float * userestrict BV,
float * userestrict psi_Tx_x,
float * userestrict psi_Tz_z,
float * userestrict b_Tx_x,
float * userestrict b_Tz_z,
float * userestrict c_Tx_x,
float * userestrict c_Tz_z,
float * userestrict k_Tx_x,
float * userestrict k_Tz_z,
int nx,
int nz,
int npml,
int izshift)
{
__shared__ float s_data[BDIMY+2*radius][BDIMX+2*radius];
int ix = blockIdx.x*blockDim.x + threadIdx.x;
int iz = blockIdx.y*blockDim.y + threadIdx.y+izshift;
CHECK_IF_NEED_UPDATE;
int ord;
int idx;
int in_idx = iz*nx + ix; // index for reading input
int tx=threadIdx.x+radius;
int ty=threadIdx.y+radius;
float Tx_x,Tz_z;
float BV_ix_iz;
ZERO_HOLO;
__syncthreads();
if(usetable){
BV_ix_iz= BV[mat_index[in_idx]];
}else{
BV_ix_iz= BV[in_idx];
}
/* stress imaging */
if(iz==0) Tz[in_idx]=-Tz[in_idx+nx];
ord=min(ix,iz);
// ord=ix; // maybe top layer can also use 8th ,this reduce dispersion
ord=min(ord,nx-1-ix);
ord=min(ord,nz-1-iz);
ord=min(ord,ORD8);
EDGE_SHARE(Tx);
__syncthreads();
TxDIFF;
__syncthreads();
EDGE_SHARE(Tz);
__syncthreads();
TzDIFF;
if(ix<npml){
//left pml
idx=ix+iz*2*npml;
psi_Tx_x[idx] = b_Tx_x[ix] * psi_Tx_x[idx] + c_Tx_x[ix] * Tx_x;
V[in_idx] += BV_ix_iz*( Tx_x * k_Tx_x[ix] + psi_Tx_x[idx] );
}else if(ix>=nx-npml){
//right pml
idx=npml+nx-1-ix+iz*2*npml;
psi_Tx_x[idx] = b_Tx_x[ix] * psi_Tx_x[idx] + c_Tx_x[ix] * Tx_x;
V[in_idx] += BV_ix_iz*( Tx_x * k_Tx_x[ix] + psi_Tx_x[idx] );
}else{
V[in_idx] += BV_ix_iz*( Tx_x );
}
if(iz<npml && topabsorb){
//top pml
idx=(iz*nx)+ix;
psi_Tz_z[idx] = b_Tz_z[iz] * psi_Tz_z[idx] + c_Tz_z[iz] * Tz_z;
V[in_idx] += BV_ix_iz*(Tz_z * k_Tz_z[iz] + psi_Tz_z[idx] );
}else if(iz>=nz-npml){
// bottom
idx=(npml+nz-1-iz)*nx+ix;
psi_Tz_z[idx] = b_Tz_z[iz] * psi_Tz_z[idx] + c_Tz_z[iz] * Tz_z;
V[in_idx] += BV_ix_iz*(Tz_z * k_Tz_z[iz] + psi_Tz_z[idx] );
}else{
V[in_idx] += BV_ix_iz*( Tz_z );
}
}
void cu_step_stress(int deviceid,PARAM ¶m,FIELD & fld, MATERIAL &mat,CPML &cpml,SLIDE &slide)
{
int nz1,nz2,tnz;
int nx=fld.nx;
int nz=fld.nz;
int npml=cpml.npml;
nz1=param.a_nz1[deviceid];
nz2=param.a_nz2[deviceid];
tnz=nz2-nz1+1;
dim3 nblocks((nx+BDIMX-1)/BDIMX,(tnz+BDIMY-1)/BDIMY);
dim3 blocksize(BDIMX,BDIMY);
bool usepunch=slide.usepunch==1;
if(mat.usetable){
hipLaunchKernelGGL(( cudaupdate_stress), dim3(nblocks),dim3(blocksize), 0, 0,
mat.usetable,
mat.index,
usepunch,
slide.current_step,
slide.first_arrive_step,
slide.last_step,
fld.Tx,
fld.Tz,
fld.V,
mat.tbl_MUX,
mat.tbl_MUZ,
cpml.psi.V_x,
cpml.psi.V_z,
cpml.b.V_x,
cpml.b.V_z,
cpml.c.V_x,
cpml.c.V_z,
cpml.k.V_x,
cpml.k.V_z,
nx,
nz,
npml,
nz1);
}else{
hipLaunchKernelGGL(( cudaupdate_stress), dim3(nblocks),dim3(blocksize), 0, 0,
mat.usetable,
mat.index,
usepunch,
slide.current_step,
slide.first_arrive_step,
slide.last_step,
fld.Tx,
fld.Tz,
fld.V,
mat.MUX,
mat.MUZ,
cpml.psi.V_x,
cpml.psi.V_z,
cpml.b.V_x,
cpml.b.V_z,
cpml.c.V_x,
cpml.c.V_z,
cpml.k.V_x,
cpml.k.V_z,
nx,
nz,
npml,
nz1);
}
CUT_CHECK_ERROR("Error in step_forward_stress");
}
void cu_step_velocity(int deviceid,PARAM ¶m,FIELD & fld, MATERIAL &mat,CPML &cpml,SLIDE &slide)
{
int nz1,nz2,tnz;
int nx=fld.nx;
int nz=fld.nz;
int npml=cpml.npml;
nz1=param.a_nz1[deviceid];
nz2=param.a_nz2[deviceid];
tnz=nz2-nz1+1;
dim3 nblocks((nx+BDIMX-1)/BDIMX,(tnz+BDIMY-1)/BDIMY);
dim3 blocksize(BDIMX,BDIMY);
bool usepunch=slide.usepunch==1;
if(mat.usetable){
hipLaunchKernelGGL(( cudaupdate_velocity), dim3(nblocks),dim3(blocksize), 0, 0,
mat.usetable,
mat.index,
usepunch,
slide.current_step,
slide.first_arrive_step,
slide.last_step,
fld.Tx,
fld.Tz,
fld.V,
mat.tbl_BV,
cpml.psi.Tx_x,
cpml.psi.Tz_z,
cpml.b.Tx_x,
cpml.b.Tz_z,
cpml.c.Tx_x,
cpml.c.Tz_z,
cpml.k.Tx_x,
cpml.k.Tz_z,
nx,
nz,
npml,
nz1
);
}else{
hipLaunchKernelGGL(( cudaupdate_velocity), dim3(nblocks),dim3(blocksize), 0, 0,
mat.usetable,
mat.index,
usepunch,
slide.current_step,
slide.first_arrive_step,
slide.last_step,
fld.Tx,
fld.Tz,
fld.V,
mat.BV,
cpml.psi.Tx_x,
cpml.psi.Tz_z,
cpml.b.Tx_x,
cpml.b.Tz_z,
cpml.c.Tx_x,
cpml.c.Tz_z,
cpml.k.Tx_x,
cpml.k.Tz_z,
nx,
nz,
npml,
nz1
);
};
CUT_CHECK_ERROR("Error in step_forward_velocity");
}
| a1083f8d72a995e8bee51433e9138d768e1c16df.cu | #include"fd.h"
#include"../gpu.h"
#include"../param/const.h"
#include"../param/param.h"
#include"../material/material.h"
#include"../field/field.h"
#include"../cpml/cpml.h"
#include"../slide/slide.h"
#include<cstdio>
#include<cstdlib>
//#define topabsorb false
#define s_data(i,j) s_data[j][i]
#define CHECK_IF_NEED_UPDATE \
if(usepunch){\
if(\
current_step <= first_arrive_step[(iz/BDIMY)*(nx/BDIMX)+(ix/BDIMX)] ||\
current_step >= last_step [(iz/BDIMY)*(nx/BDIMX)+(ix/BDIMX)] ){ \
return;\
}\
}
#define TxDIFF \
Tx_x=d_coef[ord][0]*( s_data(tx,ty) - s_data(tx-1,ty) )\
-d_coef[ord][1]*( s_data(tx+1,ty) - s_data(tx-2,ty) )\
+d_coef[ord][2]*( s_data(tx+2,ty) - s_data(tx-3,ty) )\
-d_coef[ord][3]*( s_data(tx+3,ty) - s_data(tx-4,ty) );
#define TzDIFF \
Tz_z=d_coef[ord][0]*( s_data(tx,ty+1) - s_data(tx,ty) )\
-d_coef[ord][1]*( s_data(tx,ty+2) - s_data(tx,ty-1) )\
+d_coef[ord][2]*( s_data(tx,ty+3) - s_data(tx,ty-2) )\
-d_coef[ord][3]*( s_data(tx,ty+4) - s_data(tx,ty-3) );
#define DxV \
dxV=d_coef[ord][0]*( s_data(tx+1,ty) - s_data(tx,ty) )\
-d_coef[ord][1]*( s_data(tx+2,ty) - s_data(tx-1,ty) )\
+d_coef[ord][2]*( s_data(tx+3,ty) - s_data(tx-2,ty) )\
-d_coef[ord][3]*( s_data(tx+4,ty) - s_data(tx-3,ty) );
#define DzV \
dzV=d_coef[ord][0]*( s_data(tx,ty) - s_data(tx,ty-1) )\
-d_coef[ord][1]*( s_data(tx,ty+1) - s_data(tx,ty-2) )\
+d_coef[ord][2]*( s_data(tx,ty+2) - s_data(tx,ty-3) )\
-d_coef[ord][3]*( s_data(tx,ty+3) - s_data(tx,ty-4) );
#define ZERO_HOLO \
s_data[threadIdx.y][threadIdx.x]=0.0;\
s_data[threadIdx.y][BDIMX+2*radius-1-threadIdx.x]=0.0;\
s_data[BDIMY+2*radius-1-threadIdx.y][threadIdx.x]=0.0;\
s_data[BDIMY+2*radius-1-threadIdx.y][BDIMX+2*radius-1-threadIdx.x]=0.0;
#define EDGE_SHARE(d_F) \
if(threadIdx.y<radius && iz>radius){\
s_data[threadIdx.y][tx] = d_F[in_idx-radius*nx];\
}\
if(threadIdx.y<radius && iz+BDIMY<nz){\
s_data[threadIdx.y+BDIMY+radius][tx] = d_F[in_idx+BDIMY*nx];\
}\
if(threadIdx.x<radius && ix>radius){\
s_data[ty][threadIdx.x] = d_F[in_idx-radius];\
}\
if(threadIdx.x<radius && ix+BDIMX<nx){\
s_data[ty][threadIdx.x+BDIMX+radius] = d_F[in_idx+BDIMX];\
}\
s_data[ty][tx] = d_F[in_idx];
__constant__ float d_coef[5][4];
__global__ void cudaupdate_stress(
bool usetable,
int * userestrict mat_index,
bool usepunch,
int current_step,
int * userestrict first_arrive_step,
int * userestrict last_step,
float * userestrict Tx,
float * userestrict Tz,
float * userestrict V,
float * userestrict MUX,
float * userestrict MUZ,
float * userestrict psi_V_x,
float * userestrict psi_V_z,
float * userestrict b_V_x,
float * userestrict b_V_z,
float * userestrict c_V_x,
float * userestrict c_V_z,
float * userestrict k_V_x,
float * userestrict k_V_z,
int nx,
int nz,
int npml,
int izshift)
{
__shared__ float s_data[BDIMY+2*radius][BDIMX+2*radius];
int ix = blockIdx.x*blockDim.x + threadIdx.x;
int iz = blockIdx.y*blockDim.y + threadIdx.y + izshift;
CHECK_IF_NEED_UPDATE;
int ord;
int idx;
int in_idx = iz*nx + ix; // index for global memory
int tx=threadIdx.x+radius; //index for shared memory
int ty=threadIdx.y+radius;
float dxV,dzV;
float MUX_ix_iz;
float MUZ_ix_iz;
if(usetable){
MUX_ix_iz= MUX[mat_index[in_idx]];
MUZ_ix_iz= MUZ[mat_index[in_idx]];
}else{
MUX_ix_iz= MUX[in_idx];
MUZ_ix_iz= MUZ[in_idx];
}
ZERO_HOLO;
__syncthreads();
ord=min(ix,iz);
// ord=ix; // maybe top layer can also use 8th,this reduced dispersion
ord=min(ord,nx-1-ix);
ord=min(ord,nz-1-iz);
ord=min(ord,ORD8);
EDGE_SHARE(V);
__syncthreads();
DxV;
DzV;
if(ix==0 || ix>=nx-1 || iz>=nz-1) return;
if(ix<npml){
//left pml
idx=ix+iz*2*npml;
psi_V_x[idx] = b_V_x[ix] * psi_V_x[idx] + c_V_x[ix] * dxV;
Tx[in_idx] += MUX_ix_iz*( dxV * k_V_x[ix] + psi_V_x[idx] );
}else if(ix>=nx-npml){
//right pml
idx=npml+nx-1-ix+iz*2*npml;
psi_V_x[idx] = b_V_x[ix] * psi_V_x[idx] + c_V_x[ix] * dxV;
Tx[in_idx] += MUX_ix_iz*( dxV * k_V_x[ix] + psi_V_x[idx] );
}else{
Tx[in_idx] += MUX_ix_iz*( dxV );
}
if(iz<npml && topabsorb){
//top pml
idx=(iz*nx)+ix;
psi_V_z[idx] = b_V_z[iz] * psi_V_z[idx] + c_V_z[iz] * dzV;
Tz[in_idx] += MUZ_ix_iz*( dzV * k_V_z[iz] + psi_V_z[idx] ) ;
}else if(iz>=nz-npml){
// bottom
idx=(npml+nz-1-iz)*nx+ix;
psi_V_z[idx] = b_V_z[iz] * psi_V_z[idx] + c_V_z[iz] * dzV;
Tz[in_idx] += MUZ_ix_iz*( dzV * k_V_z[iz] + psi_V_z[idx] ) ;
}else{
Tz[in_idx] += MUZ_ix_iz*( dzV ) ;
}
}
__global__ void cudaupdate_velocity(
bool usetable,
int * userestrict mat_index,
bool usepunch,
int current_step,
int * userestrict first_arrive_step,
int * userestrict last_step,
float * userestrict Tx,
float * userestrict Tz,
float * userestrict V,
float * userestrict BV,
float * userestrict psi_Tx_x,
float * userestrict psi_Tz_z,
float * userestrict b_Tx_x,
float * userestrict b_Tz_z,
float * userestrict c_Tx_x,
float * userestrict c_Tz_z,
float * userestrict k_Tx_x,
float * userestrict k_Tz_z,
int nx,
int nz,
int npml,
int izshift)
{
__shared__ float s_data[BDIMY+2*radius][BDIMX+2*radius];
int ix = blockIdx.x*blockDim.x + threadIdx.x;
int iz = blockIdx.y*blockDim.y + threadIdx.y+izshift;
CHECK_IF_NEED_UPDATE;
int ord;
int idx;
int in_idx = iz*nx + ix; // index for reading input
int tx=threadIdx.x+radius;
int ty=threadIdx.y+radius;
float Tx_x,Tz_z;
float BV_ix_iz;
ZERO_HOLO;
__syncthreads();
if(usetable){
BV_ix_iz= BV[mat_index[in_idx]];
}else{
BV_ix_iz= BV[in_idx];
}
/* stress imaging */
if(iz==0) Tz[in_idx]=-Tz[in_idx+nx];
ord=min(ix,iz);
// ord=ix; // maybe top layer can also use 8th ,this reduce dispersion
ord=min(ord,nx-1-ix);
ord=min(ord,nz-1-iz);
ord=min(ord,ORD8);
EDGE_SHARE(Tx);
__syncthreads();
TxDIFF;
__syncthreads();
EDGE_SHARE(Tz);
__syncthreads();
TzDIFF;
if(ix<npml){
//left pml
idx=ix+iz*2*npml;
psi_Tx_x[idx] = b_Tx_x[ix] * psi_Tx_x[idx] + c_Tx_x[ix] * Tx_x;
V[in_idx] += BV_ix_iz*( Tx_x * k_Tx_x[ix] + psi_Tx_x[idx] );
}else if(ix>=nx-npml){
//right pml
idx=npml+nx-1-ix+iz*2*npml;
psi_Tx_x[idx] = b_Tx_x[ix] * psi_Tx_x[idx] + c_Tx_x[ix] * Tx_x;
V[in_idx] += BV_ix_iz*( Tx_x * k_Tx_x[ix] + psi_Tx_x[idx] );
}else{
V[in_idx] += BV_ix_iz*( Tx_x );
}
if(iz<npml && topabsorb){
//top pml
idx=(iz*nx)+ix;
psi_Tz_z[idx] = b_Tz_z[iz] * psi_Tz_z[idx] + c_Tz_z[iz] * Tz_z;
V[in_idx] += BV_ix_iz*(Tz_z * k_Tz_z[iz] + psi_Tz_z[idx] );
}else if(iz>=nz-npml){
// bottom
idx=(npml+nz-1-iz)*nx+ix;
psi_Tz_z[idx] = b_Tz_z[iz] * psi_Tz_z[idx] + c_Tz_z[iz] * Tz_z;
V[in_idx] += BV_ix_iz*(Tz_z * k_Tz_z[iz] + psi_Tz_z[idx] );
}else{
V[in_idx] += BV_ix_iz*( Tz_z );
}
}
void cu_step_stress(int deviceid,PARAM ¶m,FIELD & fld, MATERIAL &mat,CPML &cpml,SLIDE &slide)
{
int nz1,nz2,tnz;
int nx=fld.nx;
int nz=fld.nz;
int npml=cpml.npml;
nz1=param.a_nz1[deviceid];
nz2=param.a_nz2[deviceid];
tnz=nz2-nz1+1;
dim3 nblocks((nx+BDIMX-1)/BDIMX,(tnz+BDIMY-1)/BDIMY);
dim3 blocksize(BDIMX,BDIMY);
bool usepunch=slide.usepunch==1;
if(mat.usetable){
cudaupdate_stress<<<nblocks,blocksize>>>(
mat.usetable,
mat.index,
usepunch,
slide.current_step,
slide.first_arrive_step,
slide.last_step,
fld.Tx,
fld.Tz,
fld.V,
mat.tbl_MUX,
mat.tbl_MUZ,
cpml.psi.V_x,
cpml.psi.V_z,
cpml.b.V_x,
cpml.b.V_z,
cpml.c.V_x,
cpml.c.V_z,
cpml.k.V_x,
cpml.k.V_z,
nx,
nz,
npml,
nz1);
}else{
cudaupdate_stress<<<nblocks,blocksize>>>(
mat.usetable,
mat.index,
usepunch,
slide.current_step,
slide.first_arrive_step,
slide.last_step,
fld.Tx,
fld.Tz,
fld.V,
mat.MUX,
mat.MUZ,
cpml.psi.V_x,
cpml.psi.V_z,
cpml.b.V_x,
cpml.b.V_z,
cpml.c.V_x,
cpml.c.V_z,
cpml.k.V_x,
cpml.k.V_z,
nx,
nz,
npml,
nz1);
}
CUT_CHECK_ERROR("Error in step_forward_stress");
}
void cu_step_velocity(int deviceid,PARAM ¶m,FIELD & fld, MATERIAL &mat,CPML &cpml,SLIDE &slide)
{
int nz1,nz2,tnz;
int nx=fld.nx;
int nz=fld.nz;
int npml=cpml.npml;
nz1=param.a_nz1[deviceid];
nz2=param.a_nz2[deviceid];
tnz=nz2-nz1+1;
dim3 nblocks((nx+BDIMX-1)/BDIMX,(tnz+BDIMY-1)/BDIMY);
dim3 blocksize(BDIMX,BDIMY);
bool usepunch=slide.usepunch==1;
if(mat.usetable){
cudaupdate_velocity<<<nblocks,blocksize>>>(
mat.usetable,
mat.index,
usepunch,
slide.current_step,
slide.first_arrive_step,
slide.last_step,
fld.Tx,
fld.Tz,
fld.V,
mat.tbl_BV,
cpml.psi.Tx_x,
cpml.psi.Tz_z,
cpml.b.Tx_x,
cpml.b.Tz_z,
cpml.c.Tx_x,
cpml.c.Tz_z,
cpml.k.Tx_x,
cpml.k.Tz_z,
nx,
nz,
npml,
nz1
);
}else{
cudaupdate_velocity<<<nblocks,blocksize>>>(
mat.usetable,
mat.index,
usepunch,
slide.current_step,
slide.first_arrive_step,
slide.last_step,
fld.Tx,
fld.Tz,
fld.V,
mat.BV,
cpml.psi.Tx_x,
cpml.psi.Tz_z,
cpml.b.Tx_x,
cpml.b.Tz_z,
cpml.c.Tx_x,
cpml.c.Tz_z,
cpml.k.Tx_x,
cpml.k.Tz_z,
nx,
nz,
npml,
nz1
);
};
CUT_CHECK_ERROR("Error in step_forward_velocity");
}
|
485a290582da365353c984f04410cd1ec0335159.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*********************************************//**
* initialise an array [real] with an assigned
* value init_val.
*********************************************/
#include "gpu_predict.h"
__global__
void kernel_init_array(real *vec, const real init_val, const int vec_len)
{
int i, index;
int ix = blockIdx.x * blockDim.x + threadIdx.x;
for( i = 0; i < CUDA_BLOCK; ++i )
{
index = ix * CUDA_BLOCK + i;
if( index < vec_len)
{
vec[index] = init_val;
}
}
}
void gpu_init_array(real *vec, const int init_val, const int vec_len)
{
int nthread, nblock;
if( CUDA_BLOCK * vec_len < MAX_NUM_THREAD)
{
nthread = ceil( vec_len / CUDA_BLOCK );
nblock = 1;
}
else
{
if( vec_len / CUDA_BLOCK > MAX_NUM_BLOCK * MAX_NUM_THREAD ) // largest block size for sm_2.0
{
printf("gpu_init_array: vector length / CUDA_BLOCK = %d > MAX_NUM_BLOCK * MAX_NUM_THREAD.\n", vec_len/CUDA_BLOCK);
exit(EXIT_FAILURE);
}
nthread = MAX_NUM_THREAD;
nblock = ceil( float(vec_len) / nthread / float(CUDA_BLOCK) );
}
hipLaunchKernelGGL(( kernel_init_array), dim3(nblock), dim3(nthread) , 0, 0, vec, init_val, vec_len);
}
| 485a290582da365353c984f04410cd1ec0335159.cu | /*********************************************//**
* initialise an array [real] with an assigned
* value init_val.
*********************************************/
#include "gpu_predict.h"
__global__
void kernel_init_array(real *vec, const real init_val, const int vec_len)
{
int i, index;
int ix = blockIdx.x * blockDim.x + threadIdx.x;
for( i = 0; i < CUDA_BLOCK; ++i )
{
index = ix * CUDA_BLOCK + i;
if( index < vec_len)
{
vec[index] = init_val;
}
}
}
void gpu_init_array(real *vec, const int init_val, const int vec_len)
{
int nthread, nblock;
if( CUDA_BLOCK * vec_len < MAX_NUM_THREAD)
{
nthread = ceil( vec_len / CUDA_BLOCK );
nblock = 1;
}
else
{
if( vec_len / CUDA_BLOCK > MAX_NUM_BLOCK * MAX_NUM_THREAD ) // largest block size for sm_2.0
{
printf("gpu_init_array: vector length / CUDA_BLOCK = %d > MAX_NUM_BLOCK * MAX_NUM_THREAD.\n", vec_len/CUDA_BLOCK);
exit(EXIT_FAILURE);
}
nthread = MAX_NUM_THREAD;
nblock = ceil( float(vec_len) / nthread / float(CUDA_BLOCK) );
}
kernel_init_array<<< nblock, nthread >>>(vec, init_val, vec_len);
}
|
1ed610abe59eba100a517ece05933adc7327fb02.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*-----------
*
* layerTransform.cu
*
* This is the source file of a kernel to transform a layer of a layered 2D texture.
*
* This kernel is from CUDA samples. simpleLayeredTexture.cu
*
* streamsOptBenchmark/layerTransform.cu
*
* By Hao Li
*
*------------
*/
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
// includes, kernels
// declare texture reference for layered 2D float texture
// Note: The "dim" field in the texture reference template is now deprecated.
// Instead, please use a texture type macro such as hipTextureType1D, etc.
texture<float, hipTextureType2DLayered> tex;
////////////////////////////////////////////////////////////////////////////////
//! Transform a layer of a layered 2D texture using texture lookups
//! @param g_odata output data in global memory
////////////////////////////////////////////////////////////////////////////////
__global__ void LayerTransformKernel(float *g_idata, float *g_odata, int width, int height, int layer)
{
for(int l = 0; l < 1000000; l++)
{
for(int i = 0; i < layer; i++){
// calculate this thread's data point
unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y*blockDim.y + threadIdx.y;
// 0.5f offset and division are necessary to access the original data points
// in the texture (such that bilinear interpolation will not be activated).
// For details, see also CUDA Programming Guide, Appendix D
float u = (x+0.5f) / (float) width;
float v = (y+0.5f) / (float) height;
g_odata[i*width*height + y*width + x] = g_idata[i*width*height + y*width + x];
// read from texture, do expected transformation and write to global memory
// g_odata[layer*width*height + y*width + x] = -tex2DLayered(tex, u, v, layer) + layer;
g_odata[i*width*height + y*width + x] = -tex2DLayered(tex, u, v, i) + i;
}
}
}
// int main(int argc, char **argv)
// {
// // generate input data for layered texture
// unsigned int width=512, height=512, num_layers = 5;
// unsigned int size = width * height * num_layers * sizeof(float);
// float *h_data = (float *) malloc(size);
// for (unsigned int layer = 0; layer < num_layers; layer++)
// for (int i = 0; i < (int)(width * height); i++)
// {
// h_data[layer*width*height + i] = (float)i;
// }
// // this is the expected transformation of the input data (the expected output)
// float *h_data_ref = (float *) malloc(size);
// for (unsigned int layer = 0; layer < num_layers; layer++)
// for (int i = 0; i < (int)(width * height); i++)
// {
// h_data_ref[layer*width*height + i] = -h_data[layer*width*height + i] + layer;
// }
// // allocate device memory for result
// float *d_data = NULL;
// (hipMalloc((void **) &d_data, size));
// // allocate array and copy image data
// hipChannelFormatDesc channelDesc = hipCreateChannelDesc(32, 0, 0, 0, hipChannelFormatKindFloat);
// hipArray *cu_3darray;
// (hipMalloc3DArray(&cu_3darray, &channelDesc, make_hipExtent(width, height, num_layers), hipArrayLayered));
// hipMemcpy3DParms myparms = {0};
// myparms.srcPos = make_hipPos(0,0,0);
// myparms.dstPos = make_hipPos(0,0,0);
// myparms.srcPtr = make_hipPitchedPtr(h_data, width * sizeof(float), width, height);
// myparms.dstArray = cu_3darray;
// myparms.extent = make_hipExtent(width, height, num_layers);
// myparms.kind = hipMemcpyHostToDevice;
// (hipMemcpy3D(&myparms));
// // set texture parameters
// tex.addressMode[0] = hipAddressModeWrap;
// tex.addressMode[1] = hipAddressModeWrap;
// tex.filterMode = hipFilterModeLinear;
// tex.normalized = true; // access with normalized texture coordinates
// // Bind the array to the texture
// (hipBindTextureToArray(tex, cu_3darray, channelDesc));
// dim3 dimBlock(8, 8, 1);
// dim3 dimGrid(width / dimBlock.x, height / dimBlock.y, 1);
// printf("Covering 2D data array of %d x %d: Grid size is %d x %d, each block has 8 x 8 threads\n",
// width, height, dimGrid.x, dimGrid.y);
// transformKernel<<< dimGrid, dimBlock >>>(d_data, width, height, 0); // warmup (for better timing)
// (hipDeviceSynchronize());
// // execute the kernel
// for (unsigned int layer = 0; layer < num_layers; layer++)
// transformKernel<<< dimGrid, dimBlock, 0 >>>(d_data, width, height, layer);
// (hipDeviceSynchronize());
// // allocate mem for the result on host side
// float *h_odata = (float *) malloc(size);
// // copy result from device to host
// (hipMemcpy(h_odata, d_data, size, hipMemcpyDeviceToHost));
// // cleanup memory
// free(h_data);
// free(h_data_ref);
// free(h_odata);
// (hipFree(d_data));
// (hipFreeArray(cu_3darray));
// // hipDeviceReset causes the driver to clean up all state. While
// // not mandatory in normal operation, it is good practice. It is also
// // needed to ensure correct operation when the application is being
// // profiled. Calling hipDeviceReset causes all profile data to be
// // flushed before the application exits
// hipDeviceReset();
// return 0;
// }
| 1ed610abe59eba100a517ece05933adc7327fb02.cu | /*-----------
*
* layerTransform.cu
*
* This is the source file of a kernel to transform a layer of a layered 2D texture.
*
* This kernel is from CUDA samples. simpleLayeredTexture.cu
*
* streamsOptBenchmark/layerTransform.cu
*
* By Hao Li
*
*------------
*/
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
// includes, kernels
// declare texture reference for layered 2D float texture
// Note: The "dim" field in the texture reference template is now deprecated.
// Instead, please use a texture type macro such as cudaTextureType1D, etc.
texture<float, cudaTextureType2DLayered> tex;
////////////////////////////////////////////////////////////////////////////////
//! Transform a layer of a layered 2D texture using texture lookups
//! @param g_odata output data in global memory
////////////////////////////////////////////////////////////////////////////////
__global__ void LayerTransformKernel(float *g_idata, float *g_odata, int width, int height, int layer)
{
for(int l = 0; l < 1000000; l++)
{
for(int i = 0; i < layer; i++){
// calculate this thread's data point
unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y*blockDim.y + threadIdx.y;
// 0.5f offset and division are necessary to access the original data points
// in the texture (such that bilinear interpolation will not be activated).
// For details, see also CUDA Programming Guide, Appendix D
float u = (x+0.5f) / (float) width;
float v = (y+0.5f) / (float) height;
g_odata[i*width*height + y*width + x] = g_idata[i*width*height + y*width + x];
// read from texture, do expected transformation and write to global memory
// g_odata[layer*width*height + y*width + x] = -tex2DLayered(tex, u, v, layer) + layer;
g_odata[i*width*height + y*width + x] = -tex2DLayered(tex, u, v, i) + i;
}
}
}
// int main(int argc, char **argv)
// {
// // generate input data for layered texture
// unsigned int width=512, height=512, num_layers = 5;
// unsigned int size = width * height * num_layers * sizeof(float);
// float *h_data = (float *) malloc(size);
// for (unsigned int layer = 0; layer < num_layers; layer++)
// for (int i = 0; i < (int)(width * height); i++)
// {
// h_data[layer*width*height + i] = (float)i;
// }
// // this is the expected transformation of the input data (the expected output)
// float *h_data_ref = (float *) malloc(size);
// for (unsigned int layer = 0; layer < num_layers; layer++)
// for (int i = 0; i < (int)(width * height); i++)
// {
// h_data_ref[layer*width*height + i] = -h_data[layer*width*height + i] + layer;
// }
// // allocate device memory for result
// float *d_data = NULL;
// (cudaMalloc((void **) &d_data, size));
// // allocate array and copy image data
// cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc(32, 0, 0, 0, cudaChannelFormatKindFloat);
// cudaArray *cu_3darray;
// (cudaMalloc3DArray(&cu_3darray, &channelDesc, make_cudaExtent(width, height, num_layers), cudaArrayLayered));
// cudaMemcpy3DParms myparms = {0};
// myparms.srcPos = make_cudaPos(0,0,0);
// myparms.dstPos = make_cudaPos(0,0,0);
// myparms.srcPtr = make_cudaPitchedPtr(h_data, width * sizeof(float), width, height);
// myparms.dstArray = cu_3darray;
// myparms.extent = make_cudaExtent(width, height, num_layers);
// myparms.kind = cudaMemcpyHostToDevice;
// (cudaMemcpy3D(&myparms));
// // set texture parameters
// tex.addressMode[0] = cudaAddressModeWrap;
// tex.addressMode[1] = cudaAddressModeWrap;
// tex.filterMode = cudaFilterModeLinear;
// tex.normalized = true; // access with normalized texture coordinates
// // Bind the array to the texture
// (cudaBindTextureToArray(tex, cu_3darray, channelDesc));
// dim3 dimBlock(8, 8, 1);
// dim3 dimGrid(width / dimBlock.x, height / dimBlock.y, 1);
// printf("Covering 2D data array of %d x %d: Grid size is %d x %d, each block has 8 x 8 threads\n",
// width, height, dimGrid.x, dimGrid.y);
// transformKernel<<< dimGrid, dimBlock >>>(d_data, width, height, 0); // warmup (for better timing)
// (cudaDeviceSynchronize());
// // execute the kernel
// for (unsigned int layer = 0; layer < num_layers; layer++)
// transformKernel<<< dimGrid, dimBlock, 0 >>>(d_data, width, height, layer);
// (cudaDeviceSynchronize());
// // allocate mem for the result on host side
// float *h_odata = (float *) malloc(size);
// // copy result from device to host
// (cudaMemcpy(h_odata, d_data, size, cudaMemcpyDeviceToHost));
// // cleanup memory
// free(h_data);
// free(h_data_ref);
// free(h_odata);
// (cudaFree(d_data));
// (cudaFreeArray(cu_3darray));
// // cudaDeviceReset causes the driver to clean up all state. While
// // not mandatory in normal operation, it is good practice. It is also
// // needed to ensure correct operation when the application is being
// // profiled. Calling cudaDeviceReset causes all profile data to be
// // flushed before the application exits
// cudaDeviceReset();
// return 0;
// }
|
054e8cdbe97679d912ff7ceb4b91e8dde77af881.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#define CHECK(call) \
{ \
const hipError_t error = call; \
\
if (error != hipSuccess) \
{ \
printf("Error: %s:%d, ", __FILE__, __LINE__); \
printf("code: %d, reason: %s\n", error, hipGetErrorString(error)); \
exit(1); \
} \
}
void checkResult(float *hostRef, float *gpuRef, const int N) {
double epsilon = 1.0E-8;
bool match = 1;
for (int i=0; i<N; i++) {
if (abs(hostRef[i]-gpuRef[i]) > epsilon) {
match = 0;
printf("Arrrays do not match!\n");
printf("host %5.2f gpu %5.2f at current %d\n", hostRef[i], gpuRef[i], i);
break;
}
}
if (match) printf("Arrays match.\n\n");
}
void initialData(float *ip, int size) {
// generate data from random number
time_t t;
srand((unsigned) time(&t));
for (int i=0; i<size; i++) {
ip[i] = (float)(rand() & 0xFF)/10.0f;
}
}
double cpuSecond() {
struct timeval tp;
gettimeofday(&tp,NULL);
return ((double)tp.tv_sec + (double)tp.tv_usec*1.e-6);
}
void sumArraysOnHost(float *A, float *B, float *C, const int N) {
for (int idx=0; idx<N; idx++)
C[idx] = A[idx] + B[idx];
}
__global__ void sumArraysOnGPU(float *A, float *B, float *C, const int N) {
//int i = threadIdx.x;
int i = blockIdx.x * blockDim.x + threadIdx.x; //general case
if (i < N) C[i] = B[i] + A[i];
}
int main(int argc, char **argv) {
printf("This program is adding two Arrays on the host and then on the device.\n");
printf("The purpose in to use a timer.\n\n");
printf("%s Starting...\n", argv[0]);
// set up device
int dev = 0;
hipDeviceProp_t deviceProp;
CHECK(hipGetDeviceProperties(&deviceProp, dev));
printf("Using Device %d: %s\n", dev, deviceProp.name);
CHECK(hipSetDevice(dev));
// set up data size of vectors
int nElem = 1<<24;
printf("Vector size %d\n\n", nElem);
// malloc host memory
size_t nBytes = nElem * sizeof(float);
float *h_A, *h_B, *hostRef, *gpuRef;
h_A = (float *)malloc(nBytes);
h_B = (float *)malloc(nBytes);
hostRef = (float *)malloc(nBytes);
gpuRef = (float * )malloc(nBytes);
double iStart, iElaps;
// initialize data at host side
iStart = cpuSecond();
initialData(h_A, nElem);
initialData(h_B, nElem);
iElaps = cpuSecond() - iStart;
memset(hostRef, 0, nBytes);
memset(gpuRef, 0, nBytes);
// add vector at host side for result checks
iStart = cpuSecond();
sumArraysOnHost(h_A, h_B, hostRef, nElem);
iElaps = cpuSecond() - iStart;
printf("Execution on CPU. Time elapsed %f" \
"sec\n\n", iElaps);
// malloc device global memory
float *d_A, *d_B, *d_C;
hipMalloc((float **)&d_A, nBytes);
hipMalloc((float **)&d_B, nBytes);
hipMalloc((float **)&d_C, nBytes);
// transfer data from host to device
hipMemcpy(d_A, h_A, nBytes, hipMemcpyHostToDevice);
hipMemcpy(d_B, h_B, nBytes, hipMemcpyHostToDevice);
//invoke kernel at host side
int iLen = 1024;
dim3 block (iLen);
dim3 grid ((nElem + block.x - 1) / block.x);
iStart = cpuSecond();
hipLaunchKernelGGL(( sumArraysOnGPU), dim3(grid), dim3(block) , 0, 0, d_A, d_B, d_C, nElem);
hipDeviceSynchronize();
iElaps = cpuSecond() - iStart;
printf("Execution on GPU, configuration <<<%d, %d>>> Time elapsed %f" \
"sec\n", grid.x, block.x, iElaps);
// copy kernel result back to the host side
hipMemcpy(gpuRef, d_C, nBytes, hipMemcpyDeviceToHost);
// check device results
checkResult(hostRef, gpuRef, nElem);
// free device global memory
hipFree(d_A);
hipFree(d_B);
hipFree(d_C);
// free host memory
free(h_A);
free(h_B);
free(hostRef);
free(gpuRef);
return(0);
}
| 054e8cdbe97679d912ff7ceb4b91e8dde77af881.cu | #include <stdio.h>
#include <cuda_runtime.h>
#include <sys/time.h>
#define CHECK(call) \
{ \
const cudaError_t error = call; \
\
if (error != cudaSuccess) \
{ \
printf("Error: %s:%d, ", __FILE__, __LINE__); \
printf("code: %d, reason: %s\n", error, cudaGetErrorString(error)); \
exit(1); \
} \
}
void checkResult(float *hostRef, float *gpuRef, const int N) {
double epsilon = 1.0E-8;
bool match = 1;
for (int i=0; i<N; i++) {
if (abs(hostRef[i]-gpuRef[i]) > epsilon) {
match = 0;
printf("Arrrays do not match!\n");
printf("host %5.2f gpu %5.2f at current %d\n", hostRef[i], gpuRef[i], i);
break;
}
}
if (match) printf("Arrays match.\n\n");
}
void initialData(float *ip, int size) {
// generate data from random number
time_t t;
srand((unsigned) time(&t));
for (int i=0; i<size; i++) {
ip[i] = (float)(rand() & 0xFF)/10.0f;
}
}
double cpuSecond() {
struct timeval tp;
gettimeofday(&tp,NULL);
return ((double)tp.tv_sec + (double)tp.tv_usec*1.e-6);
}
void sumArraysOnHost(float *A, float *B, float *C, const int N) {
for (int idx=0; idx<N; idx++)
C[idx] = A[idx] + B[idx];
}
__global__ void sumArraysOnGPU(float *A, float *B, float *C, const int N) {
//int i = threadIdx.x;
int i = blockIdx.x * blockDim.x + threadIdx.x; //general case
if (i < N) C[i] = B[i] + A[i];
}
int main(int argc, char **argv) {
printf("This program is adding two Arrays on the host and then on the device.\n");
printf("The purpose in to use a timer.\n\n");
printf("%s Starting...\n", argv[0]);
// set up device
int dev = 0;
cudaDeviceProp deviceProp;
CHECK(cudaGetDeviceProperties(&deviceProp, dev));
printf("Using Device %d: %s\n", dev, deviceProp.name);
CHECK(cudaSetDevice(dev));
// set up data size of vectors
int nElem = 1<<24;
printf("Vector size %d\n\n", nElem);
// malloc host memory
size_t nBytes = nElem * sizeof(float);
float *h_A, *h_B, *hostRef, *gpuRef;
h_A = (float *)malloc(nBytes);
h_B = (float *)malloc(nBytes);
hostRef = (float *)malloc(nBytes);
gpuRef = (float * )malloc(nBytes);
double iStart, iElaps;
// initialize data at host side
iStart = cpuSecond();
initialData(h_A, nElem);
initialData(h_B, nElem);
iElaps = cpuSecond() - iStart;
memset(hostRef, 0, nBytes);
memset(gpuRef, 0, nBytes);
// add vector at host side for result checks
iStart = cpuSecond();
sumArraysOnHost(h_A, h_B, hostRef, nElem);
iElaps = cpuSecond() - iStart;
printf("Execution on CPU. Time elapsed %f" \
"sec\n\n", iElaps);
// malloc device global memory
float *d_A, *d_B, *d_C;
cudaMalloc((float **)&d_A, nBytes);
cudaMalloc((float **)&d_B, nBytes);
cudaMalloc((float **)&d_C, nBytes);
// transfer data from host to device
cudaMemcpy(d_A, h_A, nBytes, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, nBytes, cudaMemcpyHostToDevice);
//invoke kernel at host side
int iLen = 1024;
dim3 block (iLen);
dim3 grid ((nElem + block.x - 1) / block.x);
iStart = cpuSecond();
sumArraysOnGPU<<< grid, block >>>(d_A, d_B, d_C, nElem);
cudaDeviceSynchronize();
iElaps = cpuSecond() - iStart;
printf("Execution on GPU, configuration <<<%d, %d>>> Time elapsed %f" \
"sec\n", grid.x, block.x, iElaps);
// copy kernel result back to the host side
cudaMemcpy(gpuRef, d_C, nBytes, cudaMemcpyDeviceToHost);
// check device results
checkResult(hostRef, gpuRef, nElem);
// free device global memory
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
// free host memory
free(h_A);
free(h_B);
free(hostRef);
free(gpuRef);
return(0);
}
|
b86fa8f88b64af01034c55f99a63c145d41c8eb2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<bits/stdc++.h>
using namespace std;
#define BLOCK_SIZE 16
__global__ void matrix_multiplication(int *dev_a, int *dev_b, int *dev_c, int n){
__shared__ int tile_a[BLOCK_SIZE][BLOCK_SIZE];
__shared__ int tile_b[BLOCK_SIZE][BLOCK_SIZE];
int row = blockIdx.y*BLOCK_SIZE + threadIdx.y;
int col = blockIdx.x*BLOCK_SIZE + threadIdx.x;
int temp = 0;
int idx;
for (int i=0; i<gridDim.x; ++i){
idx = row*n + i*BLOCK_SIZE + threadIdx.x;
if (idx >= n*n){
tile_a[threadIdx.y][threadIdx.x] = 0;
}
else{
tile_a[threadIdx.y][threadIdx.x] = dev_a[idx];
}
idx = (i*BLOCK_SIZE + threadIdx.y)*n + col;
if (idx >= n*n){
tile_b[threadIdx.y][threadIdx.x] = 0;
}
else{
tile_b[threadIdx.y][threadIdx.x] = dev_b[idx];
}
__syncthreads();
for (int j=0; j<BLOCK_SIZE; ++j){
temp += tile_a[threadIdx.y][j]*tile_b[j][threadIdx.x];
}
__syncthreads();
}
if (row<n && col<n){
dev_c[row*n+col] = temp;
}
}
int main(int argc, char const *argv[]){
int n;
srand(1);
int *a, *b, *c;
n=10000;
hipHostMalloc((void **) &a, sizeof(int)*n*n);
hipHostMalloc((void **) &b, sizeof(int)*n*n);
hipHostMalloc((void **) &c, sizeof(int)*n*n);
for (int i = 0; i < n; ++i) {
for (int j = 0; j < n; ++j) {
a[i * n + j] = rand() % n;
b[i * n + j] = rand() % n;
}
}
float time_taken;
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start,0);
int *dev_a, *dev_b, *dev_c;
hipMalloc((void **) &dev_a, sizeof(int)*n*n);
hipMalloc((void **) &dev_b, sizeof(int)*n*n);
hipMalloc((void **) &dev_c, sizeof(int)*n*n);
hipMemcpy(dev_a, a, sizeof(int)*n*n, hipMemcpyHostToDevice);
hipMemcpy(dev_b, b, sizeof(int)*n*n, hipMemcpyHostToDevice);
unsigned int grid_rows = (n+BLOCK_SIZE-1)/BLOCK_SIZE;
unsigned int grid_cols = (n+BLOCK_SIZE-1)/BLOCK_SIZE;
dim3 dimGrid(grid_cols, grid_rows);
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
hipLaunchKernelGGL(( matrix_multiplication), dim3(dimGrid), dim3(dimBlock), 0, 0, dev_a, dev_b, dev_c, n);
hipMemcpy(c, dev_c, sizeof(int)*n*n, hipMemcpyDeviceToHost);
hipDeviceSynchronize();
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&time_taken, start, stop);
printf("Time elapsed in matrix multiplication on GPU: %f ms.\n",time_taken);
hipHostFree(a);
hipHostFree(b);
hipHostFree(c);
hipFree(dev_a);
hipFree(dev_b);
hipFree(dev_c);
return 0;
} | b86fa8f88b64af01034c55f99a63c145d41c8eb2.cu | #include<bits/stdc++.h>
using namespace std;
#define BLOCK_SIZE 16
__global__ void matrix_multiplication(int *dev_a, int *dev_b, int *dev_c, int n){
__shared__ int tile_a[BLOCK_SIZE][BLOCK_SIZE];
__shared__ int tile_b[BLOCK_SIZE][BLOCK_SIZE];
int row = blockIdx.y*BLOCK_SIZE + threadIdx.y;
int col = blockIdx.x*BLOCK_SIZE + threadIdx.x;
int temp = 0;
int idx;
for (int i=0; i<gridDim.x; ++i){
idx = row*n + i*BLOCK_SIZE + threadIdx.x;
if (idx >= n*n){
tile_a[threadIdx.y][threadIdx.x] = 0;
}
else{
tile_a[threadIdx.y][threadIdx.x] = dev_a[idx];
}
idx = (i*BLOCK_SIZE + threadIdx.y)*n + col;
if (idx >= n*n){
tile_b[threadIdx.y][threadIdx.x] = 0;
}
else{
tile_b[threadIdx.y][threadIdx.x] = dev_b[idx];
}
__syncthreads();
for (int j=0; j<BLOCK_SIZE; ++j){
temp += tile_a[threadIdx.y][j]*tile_b[j][threadIdx.x];
}
__syncthreads();
}
if (row<n && col<n){
dev_c[row*n+col] = temp;
}
}
int main(int argc, char const *argv[]){
int n;
srand(1);
int *a, *b, *c;
n=10000;
cudaMallocHost((void **) &a, sizeof(int)*n*n);
cudaMallocHost((void **) &b, sizeof(int)*n*n);
cudaMallocHost((void **) &c, sizeof(int)*n*n);
for (int i = 0; i < n; ++i) {
for (int j = 0; j < n; ++j) {
a[i * n + j] = rand() % n;
b[i * n + j] = rand() % n;
}
}
float time_taken;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start,0);
int *dev_a, *dev_b, *dev_c;
cudaMalloc((void **) &dev_a, sizeof(int)*n*n);
cudaMalloc((void **) &dev_b, sizeof(int)*n*n);
cudaMalloc((void **) &dev_c, sizeof(int)*n*n);
cudaMemcpy(dev_a, a, sizeof(int)*n*n, cudaMemcpyHostToDevice);
cudaMemcpy(dev_b, b, sizeof(int)*n*n, cudaMemcpyHostToDevice);
unsigned int grid_rows = (n+BLOCK_SIZE-1)/BLOCK_SIZE;
unsigned int grid_cols = (n+BLOCK_SIZE-1)/BLOCK_SIZE;
dim3 dimGrid(grid_cols, grid_rows);
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
matrix_multiplication<<<dimGrid, dimBlock>>>(dev_a, dev_b, dev_c, n);
cudaMemcpy(c, dev_c, sizeof(int)*n*n, cudaMemcpyDeviceToHost);
cudaThreadSynchronize();
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time_taken, start, stop);
printf("Time elapsed in matrix multiplication on GPU: %f ms.\n",time_taken);
cudaFreeHost(a);
cudaFreeHost(b);
cudaFreeHost(c);
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
return 0;
} |
92c401b856f83379c45406180eeb96d8727b4de4.hip | // !!! This is a file automatically generated by hipify!!!
/*
-- MAGMA (version 2.4.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date June 2018
@author Hartwig Anzt
@author Goran Flegar
@precisions normal z -> c d s
*/
#include "magmasparse_internal.h"
#include "shuffle.cuh"
#include <hip/hip_runtime_api.h>
#define PRECISION_z
#define COMPLEX
#define BLOCKSIZE 32
#define WARP_SIZE 32
#define WRP 32
#define WRQ 4
#include <hip/hip_runtime.h> // for TORCH_HIP_VERSION
#if (TORCH_HIP_VERSION >= 7000) // only for cuda>6000
const int MaxBlockSize = 32;
template <int block_size>
__device__ void
magma_zlowerisai_regs_inv_kernel(
magma_int_t num_rows,
const magma_index_t * __restrict__ Arow,
const magma_index_t * __restrict__ Acol,
const magmaDoubleComplex * __restrict__ Aval,
magma_index_t *Mrow,
magma_index_t *Mcol,
magmaDoubleComplex *Mval )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int tid = threadIdx.x;
int row = gridDim.x*blockIdx.y*blockDim.y + blockIdx.x*blockDim.y + threadIdx.y;
if( tid >= block_size )
return;
if( row >= num_rows )
return;
// only if within the size
int mstart = Mrow[ row ];
int mlim = Mrow[ row ]-1;
magmaDoubleComplex rB; // registers for trsv
magmaDoubleComplex dA[ block_size ]; // registers for trisystem
magmaDoubleComplex rA;
// set dA to 0
#pragma unroll
for( int j = 0; j < block_size; j++ ){
dA[ j ] = MAGMA_Z_ZERO;
}
// generate the triangular systems
int t = Mcol[ mstart + tid ];
int k = Arow[ t+1 ] - 1;
int alim = Arow[ t ]-1;
int l = Mrow[ row+1 ]-1;
int idx = block_size-1;
while( k > alim && l > mlim ){ // stop once this column is done
int mcol = Mcol[ l ];
int acol = Acol[k];
if( mcol == acol ){ //match
dA[ idx ] = Aval[ k ];
k--;
l--;
idx--;
} else if( acol > mcol ){// need to check next element
k--;
} else { // element does not exist, i.e. l < LC.col[k]
l--; // check next elment in the sparsity pattern
idx--; // leave this element equal zero
}
}
// second: solve the triangular systems - in registers
// we know how RHS looks like
rB = ( tid == 0 ) ? MAGMA_Z_ONE : MAGMA_Z_ZERO;
// Triangular solve in regs.
#pragma unroll
for (int k = 0; k < block_size; k++)
{
rA = dA[ k ];
if (k%block_size == tid)
rB /= rA;
magmaDoubleComplex top = magmablas_zshfl(rB, k%block_size);
if ( tid > k)
rB -= (top*rA);
}
// Drop B to dev memory - in ISAI preconditioner M
Mval[ mstart + tid ] = rB;
#endif
}
template <int block_size>
__device__ __forceinline__ void
magma_zlowerisai_regs_inv_select(
int N,
magma_int_t num_rows,
const magma_index_t * __restrict__ Arow,
const magma_index_t * __restrict__ Acol,
const magmaDoubleComplex * __restrict__ Aval,
magma_index_t *Mrow,
magma_index_t *Mcol,
magmaDoubleComplex *Mval )
{
if (N == block_size) {
magma_zlowerisai_regs_inv_kernel<block_size>(
num_rows, Arow, Acol, Aval, Mrow, Mcol, Mval);
} else {
magma_zlowerisai_regs_inv_select<block_size-1>(
N, num_rows, Arow, Acol, Aval, Mrow, Mcol, Mval);
}
}
template <>
__device__ __forceinline__ void
magma_zlowerisai_regs_inv_select<0>(
int N,
magma_int_t num_rows,
const magma_index_t * __restrict__ Arow,
const magma_index_t * __restrict__ Acol,
const magmaDoubleComplex * __restrict__ Aval,
magma_index_t *Mrow,
magma_index_t *Mcol,
magmaDoubleComplex *Mval )
{
;
// out of range - do nothing.
// printf("%% error: size out of range: %d\n", N);
}
__global__ void
magma_zlowerisai_regs_inv_switch(
magma_int_t num_rows,
const magma_index_t * __restrict__ Arow,
const magma_index_t * __restrict__ Acol,
const magmaDoubleComplex * __restrict__ Aval,
magma_index_t *Mrow,
magma_index_t *Mcol,
magmaDoubleComplex *Mval )
{
int row = gridDim.x*blockIdx.y*blockDim.y + blockIdx.x*blockDim.y + threadIdx.y;
if( row < num_rows ){
int N = Mrow[ row+1 ] - Mrow[ row ];
magma_zlowerisai_regs_inv_select<MaxBlockSize>(
N, num_rows, Arow, Acol, Aval, Mrow, Mcol, Mval);
}
}
template <int block_size>
__device__ void
magma_zupperisai_regs_inv_kernel(
magma_int_t num_rows,
const magma_index_t * __restrict__ Arow,
const magma_index_t * __restrict__ Acol,
const magmaDoubleComplex * __restrict__ Aval,
magma_index_t *Mrow,
magma_index_t *Mcol,
magmaDoubleComplex *Mval )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int tid = threadIdx.x;
int row = gridDim.x*blockIdx.y*blockDim.y + blockIdx.x*blockDim.y + threadIdx.y;
if( tid >= block_size )
return;
if( row >= num_rows )
return;
// only if within the size
int mstart = Mrow[ row ];
int mlim = Mrow[ row ]-1;
magmaDoubleComplex rB; // registers for trsv
magmaDoubleComplex dA[ block_size ]; // registers for trisystem
magmaDoubleComplex rA;
// set dA to 0
#pragma unroll
for( int j = 0; j < block_size; j++ ){
dA[ j ] = MAGMA_Z_ZERO;
}
// generate the triangular systems
int t = Mcol[ mstart + tid ];
int k = Arow[ t+1 ] - 1;
int alim = Arow[ t ]-1;
int l = Mrow[ row+1 ]-1;
int idx = block_size-1;
while( k > alim && l > mlim ){ // stop once this column is done
int mcol = Mcol[ l ];
int acol = Acol[k];
if( mcol == acol ){ //match
dA[ idx ] = Aval[ k ];
k--;
l--;
idx--;
} else if( acol > mcol ){// need to check next element
k--;
} else { // element does not exist, i.e. l < LC.col[k]
l--; // check next elment in the sparsity pattern
idx--; // leave this element equal zero
}
}
// second: solve the triangular systems - in registers
// we know how RHS looks like
rB = ( tid == block_size-1 ) ? MAGMA_Z_ONE : MAGMA_Z_ZERO;
// Triangular solve in regs.
#pragma unroll
for (int k = block_size-1; k >-1; k--)
{
rA = dA[ k ];
if (k%block_size == tid)
rB /= rA;
magmaDoubleComplex bottom = magmablas_zshfl(rB, k%block_size);
if ( tid < k)
rB -= (bottom*rA);
}
// Drop B to dev memory - in ISAI preconditioner M
Mval[ mstart + tid ] = rB;
#endif
}
template <int block_size>
__device__ __forceinline__ void
magma_zupperisai_regs_inv_select(
int N,
magma_int_t num_rows,
const magma_index_t * __restrict__ Arow,
const magma_index_t * __restrict__ Acol,
const magmaDoubleComplex * __restrict__ Aval,
magma_index_t *Mrow,
magma_index_t *Mcol,
magmaDoubleComplex *Mval )
{
if (N == block_size) {
magma_zupperisai_regs_inv_kernel<block_size>(
num_rows, Arow, Acol, Aval, Mrow, Mcol, Mval);
} else {
magma_zupperisai_regs_inv_select<block_size-1>(
N, num_rows, Arow, Acol, Aval, Mrow, Mcol, Mval);
}
}
template <>
__device__ __forceinline__ void
magma_zupperisai_regs_inv_select<0>(
int N,
magma_int_t num_rows,
const magma_index_t * __restrict__ Arow,
const magma_index_t * __restrict__ Acol,
const magmaDoubleComplex * __restrict__ Aval,
magma_index_t *Mrow,
magma_index_t *Mcol,
magmaDoubleComplex *Mval )
{
;
// out of range - do nothing.
// printf("%% error: size out of range: %d\n", N);
}
__global__ void
magma_zupperisai_regs_inv_switch(
magma_int_t num_rows,
const magma_index_t * __restrict__ Arow,
const magma_index_t * __restrict__ Acol,
const magmaDoubleComplex * __restrict__ Aval,
magma_index_t *Mrow,
magma_index_t *Mcol,
magmaDoubleComplex *Mval )
{
int row = gridDim.x*blockIdx.y*blockDim.y + blockIdx.x*blockDim.y + threadIdx.y;
if( row < num_rows ){
int N = Mrow[ row+1 ] - Mrow[ row ];
magma_zupperisai_regs_inv_select<MaxBlockSize>(
N, num_rows, Arow, Acol, Aval, Mrow, Mcol, Mval);
}
}
#endif
/**
Purpose
-------
This routine is designet to combine all kernels into one.
Arguments
---------
@param[in]
uplotype magma_uplo_t
lower or upper triangular
@param[in]
transtype magma_trans_t
possibility for transposed matrix
@param[in]
diagtype magma_diag_t
unit diagonal or not
@param[in]
L magma_z_matrix
triangular factor for which the ISAI matrix is computed.
Col-Major CSR storage.
@param[in,out]
M magma_z_matrix*
SPAI preconditioner CSR col-major
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_zaux
********************************************************************/
extern "C" magma_int_t
magma_zisai_generator_regs(
magma_uplo_t uplotype,
magma_trans_t transtype,
magma_diag_t diagtype,
magma_z_matrix L,
magma_z_matrix *M,
magma_queue_t queue )
{
magma_int_t info = 0;
#if (TORCH_HIP_VERSION >= 7000)
magma_int_t arch = magma_getdevice_arch();
hipDeviceSetCacheConfig( hipFuncCachePreferL1 );
int r2bs1 = 32;
int r2bs2 = 4;
int necessary_blocks = magma_ceildiv(L.num_rows, r2bs2);
int r2dg1 = min( int( sqrt( double( necessary_blocks ))), 65535 );
int r2dg2 = min(magma_ceildiv( necessary_blocks, r2dg1 ), 65535);
int r2dg3 = magma_ceildiv( necessary_blocks, r2dg1*r2dg2 );
dim3 r2block( r2bs1, r2bs2, 1 );
dim3 r2grid( r2dg1, r2dg2, r2dg3 );
if (arch >= 300) {
if (uplotype == MagmaLower) { //printf("in here lower new kernel\n");
hipLaunchKernelGGL(( magma_zlowerisai_regs_inv_switch), dim3(r2grid), dim3(r2block), 0, queue->cuda_stream() ,
L.num_rows,
L.row,
L.col,
L.val,
M->row,
M->col,
M->val );
}
else { // printf("in here upper new kernel\n");
hipLaunchKernelGGL(( magma_zupperisai_regs_inv_switch), dim3(r2grid), dim3(r2block), 0, queue->cuda_stream() ,
L.num_rows,
L.row,
L.col,
L.val,
M->row,
M->col,
M->val );
}
}
else {
printf( "%% error: ISAI preconditioner requires CUDA ARCHITECTURE >= 300.\n" );
info = MAGMA_ERR_NOT_SUPPORTED;
}
#else
// CUDA < 7000
printf( "%% error: ISAI preconditioner requires CUDA >= 7.0.\n" );
info = MAGMA_ERR_NOT_SUPPORTED;
#endif
return info;
}
| 92c401b856f83379c45406180eeb96d8727b4de4.cu | /*
-- MAGMA (version 2.4.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date June 2018
@author Hartwig Anzt
@author Goran Flegar
@precisions normal z -> c d s
*/
#include "magmasparse_internal.h"
#include "shuffle.cuh"
#include <cuda_profiler_api.h>
#define PRECISION_z
#define COMPLEX
#define BLOCKSIZE 32
#define WARP_SIZE 32
#define WRP 32
#define WRQ 4
#include <cuda.h> // for CUDA_VERSION
#if (CUDA_VERSION >= 7000) // only for cuda>6000
const int MaxBlockSize = 32;
template <int block_size>
__device__ void
magma_zlowerisai_regs_inv_kernel(
magma_int_t num_rows,
const magma_index_t * __restrict__ Arow,
const magma_index_t * __restrict__ Acol,
const magmaDoubleComplex * __restrict__ Aval,
magma_index_t *Mrow,
magma_index_t *Mcol,
magmaDoubleComplex *Mval )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int tid = threadIdx.x;
int row = gridDim.x*blockIdx.y*blockDim.y + blockIdx.x*blockDim.y + threadIdx.y;
if( tid >= block_size )
return;
if( row >= num_rows )
return;
// only if within the size
int mstart = Mrow[ row ];
int mlim = Mrow[ row ]-1;
magmaDoubleComplex rB; // registers for trsv
magmaDoubleComplex dA[ block_size ]; // registers for trisystem
magmaDoubleComplex rA;
// set dA to 0
#pragma unroll
for( int j = 0; j < block_size; j++ ){
dA[ j ] = MAGMA_Z_ZERO;
}
// generate the triangular systems
int t = Mcol[ mstart + tid ];
int k = Arow[ t+1 ] - 1;
int alim = Arow[ t ]-1;
int l = Mrow[ row+1 ]-1;
int idx = block_size-1;
while( k > alim && l > mlim ){ // stop once this column is done
int mcol = Mcol[ l ];
int acol = Acol[k];
if( mcol == acol ){ //match
dA[ idx ] = Aval[ k ];
k--;
l--;
idx--;
} else if( acol > mcol ){// need to check next element
k--;
} else { // element does not exist, i.e. l < LC.col[k]
l--; // check next elment in the sparsity pattern
idx--; // leave this element equal zero
}
}
// second: solve the triangular systems - in registers
// we know how RHS looks like
rB = ( tid == 0 ) ? MAGMA_Z_ONE : MAGMA_Z_ZERO;
// Triangular solve in regs.
#pragma unroll
for (int k = 0; k < block_size; k++)
{
rA = dA[ k ];
if (k%block_size == tid)
rB /= rA;
magmaDoubleComplex top = magmablas_zshfl(rB, k%block_size);
if ( tid > k)
rB -= (top*rA);
}
// Drop B to dev memory - in ISAI preconditioner M
Mval[ mstart + tid ] = rB;
#endif
}
template <int block_size>
__device__ __forceinline__ void
magma_zlowerisai_regs_inv_select(
int N,
magma_int_t num_rows,
const magma_index_t * __restrict__ Arow,
const magma_index_t * __restrict__ Acol,
const magmaDoubleComplex * __restrict__ Aval,
magma_index_t *Mrow,
magma_index_t *Mcol,
magmaDoubleComplex *Mval )
{
if (N == block_size) {
magma_zlowerisai_regs_inv_kernel<block_size>(
num_rows, Arow, Acol, Aval, Mrow, Mcol, Mval);
} else {
magma_zlowerisai_regs_inv_select<block_size-1>(
N, num_rows, Arow, Acol, Aval, Mrow, Mcol, Mval);
}
}
template <>
__device__ __forceinline__ void
magma_zlowerisai_regs_inv_select<0>(
int N,
magma_int_t num_rows,
const magma_index_t * __restrict__ Arow,
const magma_index_t * __restrict__ Acol,
const magmaDoubleComplex * __restrict__ Aval,
magma_index_t *Mrow,
magma_index_t *Mcol,
magmaDoubleComplex *Mval )
{
;
// out of range - do nothing.
// printf("%% error: size out of range: %d\n", N);
}
__global__ void
magma_zlowerisai_regs_inv_switch(
magma_int_t num_rows,
const magma_index_t * __restrict__ Arow,
const magma_index_t * __restrict__ Acol,
const magmaDoubleComplex * __restrict__ Aval,
magma_index_t *Mrow,
magma_index_t *Mcol,
magmaDoubleComplex *Mval )
{
int row = gridDim.x*blockIdx.y*blockDim.y + blockIdx.x*blockDim.y + threadIdx.y;
if( row < num_rows ){
int N = Mrow[ row+1 ] - Mrow[ row ];
magma_zlowerisai_regs_inv_select<MaxBlockSize>(
N, num_rows, Arow, Acol, Aval, Mrow, Mcol, Mval);
}
}
template <int block_size>
__device__ void
magma_zupperisai_regs_inv_kernel(
magma_int_t num_rows,
const magma_index_t * __restrict__ Arow,
const magma_index_t * __restrict__ Acol,
const magmaDoubleComplex * __restrict__ Aval,
magma_index_t *Mrow,
magma_index_t *Mcol,
magmaDoubleComplex *Mval )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int tid = threadIdx.x;
int row = gridDim.x*blockIdx.y*blockDim.y + blockIdx.x*blockDim.y + threadIdx.y;
if( tid >= block_size )
return;
if( row >= num_rows )
return;
// only if within the size
int mstart = Mrow[ row ];
int mlim = Mrow[ row ]-1;
magmaDoubleComplex rB; // registers for trsv
magmaDoubleComplex dA[ block_size ]; // registers for trisystem
magmaDoubleComplex rA;
// set dA to 0
#pragma unroll
for( int j = 0; j < block_size; j++ ){
dA[ j ] = MAGMA_Z_ZERO;
}
// generate the triangular systems
int t = Mcol[ mstart + tid ];
int k = Arow[ t+1 ] - 1;
int alim = Arow[ t ]-1;
int l = Mrow[ row+1 ]-1;
int idx = block_size-1;
while( k > alim && l > mlim ){ // stop once this column is done
int mcol = Mcol[ l ];
int acol = Acol[k];
if( mcol == acol ){ //match
dA[ idx ] = Aval[ k ];
k--;
l--;
idx--;
} else if( acol > mcol ){// need to check next element
k--;
} else { // element does not exist, i.e. l < LC.col[k]
l--; // check next elment in the sparsity pattern
idx--; // leave this element equal zero
}
}
// second: solve the triangular systems - in registers
// we know how RHS looks like
rB = ( tid == block_size-1 ) ? MAGMA_Z_ONE : MAGMA_Z_ZERO;
// Triangular solve in regs.
#pragma unroll
for (int k = block_size-1; k >-1; k--)
{
rA = dA[ k ];
if (k%block_size == tid)
rB /= rA;
magmaDoubleComplex bottom = magmablas_zshfl(rB, k%block_size);
if ( tid < k)
rB -= (bottom*rA);
}
// Drop B to dev memory - in ISAI preconditioner M
Mval[ mstart + tid ] = rB;
#endif
}
template <int block_size>
__device__ __forceinline__ void
magma_zupperisai_regs_inv_select(
int N,
magma_int_t num_rows,
const magma_index_t * __restrict__ Arow,
const magma_index_t * __restrict__ Acol,
const magmaDoubleComplex * __restrict__ Aval,
magma_index_t *Mrow,
magma_index_t *Mcol,
magmaDoubleComplex *Mval )
{
if (N == block_size) {
magma_zupperisai_regs_inv_kernel<block_size>(
num_rows, Arow, Acol, Aval, Mrow, Mcol, Mval);
} else {
magma_zupperisai_regs_inv_select<block_size-1>(
N, num_rows, Arow, Acol, Aval, Mrow, Mcol, Mval);
}
}
template <>
__device__ __forceinline__ void
magma_zupperisai_regs_inv_select<0>(
int N,
magma_int_t num_rows,
const magma_index_t * __restrict__ Arow,
const magma_index_t * __restrict__ Acol,
const magmaDoubleComplex * __restrict__ Aval,
magma_index_t *Mrow,
magma_index_t *Mcol,
magmaDoubleComplex *Mval )
{
;
// out of range - do nothing.
// printf("%% error: size out of range: %d\n", N);
}
__global__ void
magma_zupperisai_regs_inv_switch(
magma_int_t num_rows,
const magma_index_t * __restrict__ Arow,
const magma_index_t * __restrict__ Acol,
const magmaDoubleComplex * __restrict__ Aval,
magma_index_t *Mrow,
magma_index_t *Mcol,
magmaDoubleComplex *Mval )
{
int row = gridDim.x*blockIdx.y*blockDim.y + blockIdx.x*blockDim.y + threadIdx.y;
if( row < num_rows ){
int N = Mrow[ row+1 ] - Mrow[ row ];
magma_zupperisai_regs_inv_select<MaxBlockSize>(
N, num_rows, Arow, Acol, Aval, Mrow, Mcol, Mval);
}
}
#endif
/**
Purpose
-------
This routine is designet to combine all kernels into one.
Arguments
---------
@param[in]
uplotype magma_uplo_t
lower or upper triangular
@param[in]
transtype magma_trans_t
possibility for transposed matrix
@param[in]
diagtype magma_diag_t
unit diagonal or not
@param[in]
L magma_z_matrix
triangular factor for which the ISAI matrix is computed.
Col-Major CSR storage.
@param[in,out]
M magma_z_matrix*
SPAI preconditioner CSR col-major
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_zaux
********************************************************************/
extern "C" magma_int_t
magma_zisai_generator_regs(
magma_uplo_t uplotype,
magma_trans_t transtype,
magma_diag_t diagtype,
magma_z_matrix L,
magma_z_matrix *M,
magma_queue_t queue )
{
magma_int_t info = 0;
#if (CUDA_VERSION >= 7000)
magma_int_t arch = magma_getdevice_arch();
cudaDeviceSetCacheConfig( cudaFuncCachePreferL1 );
int r2bs1 = 32;
int r2bs2 = 4;
int necessary_blocks = magma_ceildiv(L.num_rows, r2bs2);
int r2dg1 = min( int( sqrt( double( necessary_blocks ))), 65535 );
int r2dg2 = min(magma_ceildiv( necessary_blocks, r2dg1 ), 65535);
int r2dg3 = magma_ceildiv( necessary_blocks, r2dg1*r2dg2 );
dim3 r2block( r2bs1, r2bs2, 1 );
dim3 r2grid( r2dg1, r2dg2, r2dg3 );
if (arch >= 300) {
if (uplotype == MagmaLower) { //printf("in here lower new kernel\n");
magma_zlowerisai_regs_inv_switch<<< r2grid, r2block, 0, queue->cuda_stream() >>>(
L.num_rows,
L.row,
L.col,
L.val,
M->row,
M->col,
M->val );
}
else { // printf("in here upper new kernel\n");
magma_zupperisai_regs_inv_switch<<< r2grid, r2block, 0, queue->cuda_stream() >>>(
L.num_rows,
L.row,
L.col,
L.val,
M->row,
M->col,
M->val );
}
}
else {
printf( "%% error: ISAI preconditioner requires CUDA ARCHITECTURE >= 300.\n" );
info = MAGMA_ERR_NOT_SUPPORTED;
}
#else
// CUDA < 7000
printf( "%% error: ISAI preconditioner requires CUDA >= 7.0.\n" );
info = MAGMA_ERR_NOT_SUPPORTED;
#endif
return info;
}
|
eccb1dbafda5ea83153212319262ab96c41b911e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//********************************************//
// MAC0219/5742 - EP3 //
// EP3 - Mandelbrot //
// Bruna Bazaluk, Felipe Serras, Ricardo Kojo //
//********************************************//
//*Arquivo que contem as funes para processamento em gpu.*//
#include <iostream>
#include <stdio.h>
#include <stdlib.h>
#include <string>
#include <thrust/complex.h> // Manipulao de nmeros complexos para CPU e GPU
#include <png.h>
using namespace std;
#define ITERATIONS 1000
//Estabelece os Headers de arquivos externos a serem utilizados:
inline void setColorValue(png_byte *ptr, double val);
int printImage(string file_name, int w, int h, float *buffer_image);
float maximize(float *array, int array_size);
// Verso da funo de criao da imagem buffer, que define a pertencncia dos numeros complexos em relao ao conjunto de
// Mandelbrot para a gpu. Ele em muito similar a verso da gpu. as principais diferenas encontram-se na
// forma de percorrer os pixels:
__global__ void mbrot_func_gpu(float c0_r, float c0_i, float c1_r, float c1_i, int w, int h, int iteractions, float *buffer_image)
{
// Considera-se que a imagem de buffer j foi alocada, pois ela deve ser alocada na memria da gpu:
float d_x = (c1_r - c0_r) / (float)w;
float d_y = (c1_i - c0_i) / (float)h;
// Para cada chamada o ndice e o passo do loop so calculados em funo do nmero da thread e o nmero do bloco
// da gpu que a est executando. Isso garante que nenhuma thread realiza o mesmo trabalho que outra:
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < w * h; i += stride)
{
int y = i / w;
int x = i % w;
thrust::complex<float> current;
current.real(0);
current.imag(0);
thrust::complex<float> last;
last.real(0);
last.imag(0);
thrust::complex<float> c;
c.real(c0_r + (x * d_x));
c.imag(c0_i + (y * d_y));
// printf("%d ",i);
float abs = 0.0;
bool mandel = 1;
for (int t = 1; t < iteractions; ++t)
{
current = last * last + c;
abs = thrust::abs(current);
if (abs > 2)
{
mandel = 0;
buffer_image[y * w + x] = (float)t;
break; // pintar baseado no t em que parou
}
last = current;
}
if (mandel)
{
buffer_image[y * w + x] = 0.0;
}
}
}
// Verso de normalizao de buffer para a cpu:
__global__ void normalizeBuffer_gpu(float *buffer_image, int buffer_size, float buffer_max)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < buffer_size; i += stride)
{
buffer_image[i] = buffer_image[i] / buffer_max;
}
}
//Funo principal para o processamento em GPU:
float *main_gpu(float C0_REAL, float C0_IMAG, float C1_REAL, float C1_IMAG, int WIDTH, int HEIGHT, int THREADS, string SAIDA)
{
int blockSize = THREADS;
int numBlocks = (WIDTH * HEIGHT + blockSize - 1) / blockSize;
float *buffer_image;
hipMallocManaged(&buffer_image, WIDTH * HEIGHT * sizeof(float)); // Aloca memria da gpu para a imagem de buffer
if (buffer_image == NULL)
{
cerr << "Falha ao criar o Buffer da imagem." << endl;
return buffer_image;
}
// Gera-se a imagem de buffer:
hipLaunchKernelGGL(( mbrot_func_gpu), dim3(numBlocks), dim3(blockSize), 0, 0, C0_REAL, C0_IMAG, C1_REAL, C1_IMAG, WIDTH, HEIGHT, ITERATIONS, buffer_image);
hipDeviceSynchronize(); // Espera-se o fim dos clculos para continuao da parte sequencia
hipDeviceSynchronize(); // Espera mais um poquinho.
float *buffer_image_cpu = (float *)malloc(WIDTH * HEIGHT * sizeof(float));
hipMemcpy(buffer_image_cpu, buffer_image, WIDTH * HEIGHT * sizeof(float), hipMemcpyDeviceToHost);
hipFree(buffer_image); // Libera a memria do cuda alocada para o buffer
return buffer_image_cpu; // Hora de dizer tchau.
} | eccb1dbafda5ea83153212319262ab96c41b911e.cu | //********************************************//
// MAC0219/5742 - EP3 //
// EP3 - Mandelbrot //
// Bruna Bazaluk, Felipe Serras, Ricardo Kojo //
//********************************************//
//*Arquivo que contem as funções para processamento em gpu.*//
#include <iostream>
#include <stdio.h>
#include <stdlib.h>
#include <string>
#include <thrust/complex.h> // Manipulação de números complexos para CPU e GPU
#include <png.h>
using namespace std;
#define ITERATIONS 1000
//Estabelece os Headers de arquivos externos a serem utilizados:
inline void setColorValue(png_byte *ptr, double val);
int printImage(string file_name, int w, int h, float *buffer_image);
float maximize(float *array, int array_size);
// Versão da função de criação da imagem buffer, que define a pertencência dos numeros complexos em relação ao conjunto de
// Mandelbrot para a gpu. Ele é em muito similar a versão da gpu. as principais diferenças encontram-se na
// forma de percorrer os pixels:
__global__ void mbrot_func_gpu(float c0_r, float c0_i, float c1_r, float c1_i, int w, int h, int iteractions, float *buffer_image)
{
// Considera-se que a imagem de buffer já foi alocada, pois ela deve ser alocada na memória da gpu:
float d_x = (c1_r - c0_r) / (float)w;
float d_y = (c1_i - c0_i) / (float)h;
// Para cada chamada o índice e o passo do loop são calculados em função do número da thread e o número do bloco
// da gpu que a está executando. Isso garante que nenhuma thread realiza o mesmo trabalho que outra:
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < w * h; i += stride)
{
int y = i / w;
int x = i % w;
thrust::complex<float> current;
current.real(0);
current.imag(0);
thrust::complex<float> last;
last.real(0);
last.imag(0);
thrust::complex<float> c;
c.real(c0_r + (x * d_x));
c.imag(c0_i + (y * d_y));
// printf("%d ",i);
float abs = 0.0;
bool mandel = 1;
for (int t = 1; t < iteractions; ++t)
{
current = last * last + c;
abs = thrust::abs(current);
if (abs > 2)
{
mandel = 0;
buffer_image[y * w + x] = (float)t;
break; // pintar baseado no t em que parou
}
last = current;
}
if (mandel)
{
buffer_image[y * w + x] = 0.0;
}
}
}
// Versão de normalização de buffer para a cpu:
__global__ void normalizeBuffer_gpu(float *buffer_image, int buffer_size, float buffer_max)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < buffer_size; i += stride)
{
buffer_image[i] = buffer_image[i] / buffer_max;
}
}
//Função principal para o processamento em GPU:
float *main_gpu(float C0_REAL, float C0_IMAG, float C1_REAL, float C1_IMAG, int WIDTH, int HEIGHT, int THREADS, string SAIDA)
{
int blockSize = THREADS;
int numBlocks = (WIDTH * HEIGHT + blockSize - 1) / blockSize;
float *buffer_image;
cudaMallocManaged(&buffer_image, WIDTH * HEIGHT * sizeof(float)); // Aloca memória da gpu para a imagem de buffer
if (buffer_image == NULL)
{
cerr << "Falha ao criar o Buffer da imagem." << endl;
return buffer_image;
}
// Gera-se a imagem de buffer:
mbrot_func_gpu<<<numBlocks, blockSize>>>(C0_REAL, C0_IMAG, C1_REAL, C1_IMAG, WIDTH, HEIGHT, ITERATIONS, buffer_image);
cudaDeviceSynchronize(); // Espera-se o fim dos cálculos para continuação da parte sequencia
cudaDeviceSynchronize(); // Espera mais um poquinho.
float *buffer_image_cpu = (float *)malloc(WIDTH * HEIGHT * sizeof(float));
cudaMemcpy(buffer_image_cpu, buffer_image, WIDTH * HEIGHT * sizeof(float), cudaMemcpyDeviceToHost);
cudaFree(buffer_image); // Libera a memória do cuda alocada para o buffer
return buffer_image_cpu; // Hora de dizer tchau.
} |
5271a21918dee398e856356f9be4d973857e4689.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by ops.py
//
__constant__ int xdim0_tea_leaf_zeqxty_kernel;
int xdim0_tea_leaf_zeqxty_kernel_h = -1;
int ydim0_tea_leaf_zeqxty_kernel_h = -1;
__constant__ int xdim1_tea_leaf_zeqxty_kernel;
int xdim1_tea_leaf_zeqxty_kernel_h = -1;
int ydim1_tea_leaf_zeqxty_kernel_h = -1;
__constant__ int xdim2_tea_leaf_zeqxty_kernel;
int xdim2_tea_leaf_zeqxty_kernel_h = -1;
int ydim2_tea_leaf_zeqxty_kernel_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
#define OPS_ACC0(x,y) (x+xdim0_tea_leaf_zeqxty_kernel*(y))
#define OPS_ACC1(x,y) (x+xdim1_tea_leaf_zeqxty_kernel*(y))
#define OPS_ACC2(x,y) (x+xdim2_tea_leaf_zeqxty_kernel*(y))
//user function
__device__
void tea_leaf_zeqxty_kernel_gpu(double * z, const double * x, const double * y) {
z[OPS_ACC0(0,0)] = x[OPS_ACC1(0,0)] * y[OPS_ACC2(0,0)];
}
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
__global__ void ops_tea_leaf_zeqxty_kernel(
double* __restrict arg0,
const double* __restrict arg1,
const double* __restrict arg2,
int size0,
int size1 ){
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1*1 + idx_y * 1*1 * xdim0_tea_leaf_zeqxty_kernel;
arg1 += idx_x * 1*1 + idx_y * 1*1 * xdim1_tea_leaf_zeqxty_kernel;
arg2 += idx_x * 1*1 + idx_y * 1*1 * xdim2_tea_leaf_zeqxty_kernel;
if (idx_x < size0 && idx_y < size1) {
tea_leaf_zeqxty_kernel_gpu(arg0, arg1, arg2);
}
}
// host stub function
void ops_par_loop_tea_leaf_zeqxty_kernel(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2) {
//Timing
double t1,t2,c1,c2;
ops_arg args[3] = { arg0, arg1, arg2};
#ifdef CHECKPOINTING
if (!ops_checkpointing_before(args,3,range,41)) return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(41,"tea_leaf_zeqxty_kernel");
OPS_kernels[41].count++;
ops_timers_core(&c1,&t1);
}
//compute locally allocated range for the sub-block
int start[2];
int end[2];
#ifdef OPS_MPI
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned) return;
for ( int n=0; n<2; n++ ){
start[n] = sb->decomp_disp[n];end[n] = sb->decomp_disp[n]+sb->decomp_size[n];
if (start[n] >= range[2*n]) {
start[n] = 0;
}
else {
start[n] = range[2*n] - start[n];
}
if (sb->id_m[n]==MPI_PROC_NULL && range[2*n] < 0) start[n] = range[2*n];
if (end[n] >= range[2*n+1]) {
end[n] = range[2*n+1] - sb->decomp_disp[n];
}
else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n]==MPI_PROC_NULL && (range[2*n+1] > sb->decomp_disp[n]+sb->decomp_size[n]))
end[n] += (range[2*n+1]-sb->decomp_disp[n]-sb->decomp_size[n]);
}
#else
for ( int n=0; n<2; n++ ){
start[n] = range[2*n];end[n] = range[2*n+1];
}
#endif
int x_size = MAX(0,end[0]-start[0]);
int y_size = MAX(0,end[1]-start[1]);
int xdim0 = args[0].dat->size[0];
int xdim1 = args[1].dat->size[0];
int xdim2 = args[2].dat->size[0];
if (xdim0 != xdim0_tea_leaf_zeqxty_kernel_h || xdim1 != xdim1_tea_leaf_zeqxty_kernel_h || xdim2 != xdim2_tea_leaf_zeqxty_kernel_h) {
hipMemcpyToSymbol( xdim0_tea_leaf_zeqxty_kernel, &xdim0, sizeof(int) );
xdim0_tea_leaf_zeqxty_kernel_h = xdim0;
hipMemcpyToSymbol( xdim1_tea_leaf_zeqxty_kernel, &xdim1, sizeof(int) );
xdim1_tea_leaf_zeqxty_kernel_h = xdim1;
hipMemcpyToSymbol( xdim2_tea_leaf_zeqxty_kernel, &xdim2, sizeof(int) );
xdim2_tea_leaf_zeqxty_kernel_h = xdim2;
}
dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, 1);
dim3 tblock(OPS_block_size_x,OPS_block_size_y,1);
int dat0 = args[0].dat->elem_size;
int dat1 = args[1].dat->elem_size;
int dat2 = args[2].dat->elem_size;
char *p_a[3];
//set up initial pointers
int d_m[OPS_MAX_DIM];
#ifdef OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d];
#endif
int base0 = dat0 * 1 *
(start[0] * args[0].stencil->stride[0] - args[0].dat->base[0] - d_m[0]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
(start[1] * args[0].stencil->stride[1] - args[0].dat->base[1] - d_m[1]);
p_a[0] = (char *)args[0].data_d + base0;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d];
#endif
int base1 = dat1 * 1 *
(start[0] * args[1].stencil->stride[0] - args[1].dat->base[0] - d_m[0]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
(start[1] * args[1].stencil->stride[1] - args[1].dat->base[1] - d_m[1]);
p_a[1] = (char *)args[1].data_d + base1;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[2].dat->d_m[d] + OPS_sub_dat_list[args[2].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++) d_m[d] = args[2].dat->d_m[d];
#endif
int base2 = dat2 * 1 *
(start[0] * args[2].stencil->stride[0] - args[2].dat->base[0] - d_m[0]);
base2 = base2+ dat2 *
args[2].dat->size[0] *
(start[1] * args[2].stencil->stride[1] - args[2].dat->base[1] - d_m[1]);
p_a[2] = (char *)args[2].data_d + base2;
ops_H_D_exchanges_device(args, 3);
ops_halo_exchanges(args,3,range);
if (OPS_diags > 1) {
ops_timers_core(&c2,&t2);
OPS_kernels[41].mpi_time += t2-t1;
}
//call kernel wrapper function, passing in pointers to data
hipLaunchKernelGGL(( ops_tea_leaf_zeqxty_kernel), dim3(grid), dim3(tblock) , 0, 0, (double *)p_a[0], (double *)p_a[1],
(double *)p_a[2],x_size, y_size);
if (OPS_diags>1) {
cutilSafeCall(hipDeviceSynchronize());
ops_timers_core(&c1,&t1);
OPS_kernels[41].time += t1-t2;
}
ops_set_dirtybit_device(args, 3);
ops_set_halo_dirtybit3(&args[0],range);
if (OPS_diags > 1) {
//Update kernel record
ops_timers_core(&c2,&t2);
OPS_kernels[41].mpi_time += t2-t1;
OPS_kernels[41].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[41].transfer += ops_compute_transfer(dim, start, end, &arg1);
OPS_kernels[41].transfer += ops_compute_transfer(dim, start, end, &arg2);
}
}
| 5271a21918dee398e856356f9be4d973857e4689.cu | //
// auto-generated by ops.py
//
__constant__ int xdim0_tea_leaf_zeqxty_kernel;
int xdim0_tea_leaf_zeqxty_kernel_h = -1;
int ydim0_tea_leaf_zeqxty_kernel_h = -1;
__constant__ int xdim1_tea_leaf_zeqxty_kernel;
int xdim1_tea_leaf_zeqxty_kernel_h = -1;
int ydim1_tea_leaf_zeqxty_kernel_h = -1;
__constant__ int xdim2_tea_leaf_zeqxty_kernel;
int xdim2_tea_leaf_zeqxty_kernel_h = -1;
int ydim2_tea_leaf_zeqxty_kernel_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
#define OPS_ACC0(x,y) (x+xdim0_tea_leaf_zeqxty_kernel*(y))
#define OPS_ACC1(x,y) (x+xdim1_tea_leaf_zeqxty_kernel*(y))
#define OPS_ACC2(x,y) (x+xdim2_tea_leaf_zeqxty_kernel*(y))
//user function
__device__
void tea_leaf_zeqxty_kernel_gpu(double * z, const double * x, const double * y) {
z[OPS_ACC0(0,0)] = x[OPS_ACC1(0,0)] * y[OPS_ACC2(0,0)];
}
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
__global__ void ops_tea_leaf_zeqxty_kernel(
double* __restrict arg0,
const double* __restrict arg1,
const double* __restrict arg2,
int size0,
int size1 ){
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1*1 + idx_y * 1*1 * xdim0_tea_leaf_zeqxty_kernel;
arg1 += idx_x * 1*1 + idx_y * 1*1 * xdim1_tea_leaf_zeqxty_kernel;
arg2 += idx_x * 1*1 + idx_y * 1*1 * xdim2_tea_leaf_zeqxty_kernel;
if (idx_x < size0 && idx_y < size1) {
tea_leaf_zeqxty_kernel_gpu(arg0, arg1, arg2);
}
}
// host stub function
void ops_par_loop_tea_leaf_zeqxty_kernel(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2) {
//Timing
double t1,t2,c1,c2;
ops_arg args[3] = { arg0, arg1, arg2};
#ifdef CHECKPOINTING
if (!ops_checkpointing_before(args,3,range,41)) return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(41,"tea_leaf_zeqxty_kernel");
OPS_kernels[41].count++;
ops_timers_core(&c1,&t1);
}
//compute locally allocated range for the sub-block
int start[2];
int end[2];
#ifdef OPS_MPI
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned) return;
for ( int n=0; n<2; n++ ){
start[n] = sb->decomp_disp[n];end[n] = sb->decomp_disp[n]+sb->decomp_size[n];
if (start[n] >= range[2*n]) {
start[n] = 0;
}
else {
start[n] = range[2*n] - start[n];
}
if (sb->id_m[n]==MPI_PROC_NULL && range[2*n] < 0) start[n] = range[2*n];
if (end[n] >= range[2*n+1]) {
end[n] = range[2*n+1] - sb->decomp_disp[n];
}
else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n]==MPI_PROC_NULL && (range[2*n+1] > sb->decomp_disp[n]+sb->decomp_size[n]))
end[n] += (range[2*n+1]-sb->decomp_disp[n]-sb->decomp_size[n]);
}
#else
for ( int n=0; n<2; n++ ){
start[n] = range[2*n];end[n] = range[2*n+1];
}
#endif
int x_size = MAX(0,end[0]-start[0]);
int y_size = MAX(0,end[1]-start[1]);
int xdim0 = args[0].dat->size[0];
int xdim1 = args[1].dat->size[0];
int xdim2 = args[2].dat->size[0];
if (xdim0 != xdim0_tea_leaf_zeqxty_kernel_h || xdim1 != xdim1_tea_leaf_zeqxty_kernel_h || xdim2 != xdim2_tea_leaf_zeqxty_kernel_h) {
cudaMemcpyToSymbol( xdim0_tea_leaf_zeqxty_kernel, &xdim0, sizeof(int) );
xdim0_tea_leaf_zeqxty_kernel_h = xdim0;
cudaMemcpyToSymbol( xdim1_tea_leaf_zeqxty_kernel, &xdim1, sizeof(int) );
xdim1_tea_leaf_zeqxty_kernel_h = xdim1;
cudaMemcpyToSymbol( xdim2_tea_leaf_zeqxty_kernel, &xdim2, sizeof(int) );
xdim2_tea_leaf_zeqxty_kernel_h = xdim2;
}
dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, 1);
dim3 tblock(OPS_block_size_x,OPS_block_size_y,1);
int dat0 = args[0].dat->elem_size;
int dat1 = args[1].dat->elem_size;
int dat2 = args[2].dat->elem_size;
char *p_a[3];
//set up initial pointers
int d_m[OPS_MAX_DIM];
#ifdef OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d];
#endif
int base0 = dat0 * 1 *
(start[0] * args[0].stencil->stride[0] - args[0].dat->base[0] - d_m[0]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
(start[1] * args[0].stencil->stride[1] - args[0].dat->base[1] - d_m[1]);
p_a[0] = (char *)args[0].data_d + base0;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d];
#endif
int base1 = dat1 * 1 *
(start[0] * args[1].stencil->stride[0] - args[1].dat->base[0] - d_m[0]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
(start[1] * args[1].stencil->stride[1] - args[1].dat->base[1] - d_m[1]);
p_a[1] = (char *)args[1].data_d + base1;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[2].dat->d_m[d] + OPS_sub_dat_list[args[2].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++) d_m[d] = args[2].dat->d_m[d];
#endif
int base2 = dat2 * 1 *
(start[0] * args[2].stencil->stride[0] - args[2].dat->base[0] - d_m[0]);
base2 = base2+ dat2 *
args[2].dat->size[0] *
(start[1] * args[2].stencil->stride[1] - args[2].dat->base[1] - d_m[1]);
p_a[2] = (char *)args[2].data_d + base2;
ops_H_D_exchanges_device(args, 3);
ops_halo_exchanges(args,3,range);
if (OPS_diags > 1) {
ops_timers_core(&c2,&t2);
OPS_kernels[41].mpi_time += t2-t1;
}
//call kernel wrapper function, passing in pointers to data
ops_tea_leaf_zeqxty_kernel<<<grid, tblock >>> ( (double *)p_a[0], (double *)p_a[1],
(double *)p_a[2],x_size, y_size);
if (OPS_diags>1) {
cutilSafeCall(cudaDeviceSynchronize());
ops_timers_core(&c1,&t1);
OPS_kernels[41].time += t1-t2;
}
ops_set_dirtybit_device(args, 3);
ops_set_halo_dirtybit3(&args[0],range);
if (OPS_diags > 1) {
//Update kernel record
ops_timers_core(&c2,&t2);
OPS_kernels[41].mpi_time += t2-t1;
OPS_kernels[41].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[41].transfer += ops_compute_transfer(dim, start, end, &arg1);
OPS_kernels[41].transfer += ops_compute_transfer(dim, start, end, &arg2);
}
}
|
5783ab63aa6368635ee1cb021364a06e31081132.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <vector>
#include "caffe/layers/sinh_layer.hpp"
namespace caffe {
template <typename Dtype>
__global__ void SinHForward(const int n, const Dtype* in, Dtype* out) {
CUDA_KERNEL_LOOP(index, n) {
out[index] = sinh(in[index]);
}
}
template <typename Dtype>
void SinHLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
const int count = bottom[0]->count();
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( SinHForward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, bottom_data, top_data);
CUDA_POST_KERNEL_CHECK;
}
template <typename Dtype>
__global__ void SinHBackward(const int n, const Dtype* top_diff,
const Dtype* bottom_data, Dtype* bottom_diff) {
CUDA_KERNEL_LOOP(index, n) {
bottom_diff[index] = cosh(bottom_data[index])*top_diff[index];
}
}
template <typename Dtype>
void SinHLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[0]) {
const Dtype* bottom_data = bottom[0]->gpu_data();
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const int count = bottom[0]->count();
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( SinHBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, top_diff, bottom_data, bottom_diff);
CUDA_POST_KERNEL_CHECK;
}
}
INSTANTIATE_LAYER_GPU_FUNCS(SinHLayer);
} // namespace caffe
| 5783ab63aa6368635ee1cb021364a06e31081132.cu | #include <vector>
#include "caffe/layers/sinh_layer.hpp"
namespace caffe {
template <typename Dtype>
__global__ void SinHForward(const int n, const Dtype* in, Dtype* out) {
CUDA_KERNEL_LOOP(index, n) {
out[index] = sinh(in[index]);
}
}
template <typename Dtype>
void SinHLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
const int count = bottom[0]->count();
// NOLINT_NEXT_LINE(whitespace/operators)
SinHForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, bottom_data, top_data);
CUDA_POST_KERNEL_CHECK;
}
template <typename Dtype>
__global__ void SinHBackward(const int n, const Dtype* top_diff,
const Dtype* bottom_data, Dtype* bottom_diff) {
CUDA_KERNEL_LOOP(index, n) {
bottom_diff[index] = cosh(bottom_data[index])*top_diff[index];
}
}
template <typename Dtype>
void SinHLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[0]) {
const Dtype* bottom_data = bottom[0]->gpu_data();
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const int count = bottom[0]->count();
// NOLINT_NEXT_LINE(whitespace/operators)
SinHBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, top_diff, bottom_data, bottom_diff);
CUDA_POST_KERNEL_CHECK;
}
}
INSTANTIATE_LAYER_GPU_FUNCS(SinHLayer);
} // namespace caffe
|
f71e8a93b7ec8ad3600c55cdfcc5e855c65f51cb.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* @copyright (c) 2012- King Abdullah University of Science and
* Technology (KAUST). All rights reserved.
**/
/**
* @file src/batch_svd/batch_qr.cu
* KBLAS is a high performance CUDA library for subset of BLAS
* and LAPACK routines optimized for NVIDIA GPUs.
* KBLAS is provided by KAUST.
*
* @version 3.0.0
* @author Wajih Halim Boukaram
* @date 2018-11-14
**/
#include <stdio.h>
#include <rocblas.h>
#include <algorithm>
#include "kblas.h"
#include "kblas_struct.h"
#include "kblas_gpu_util.ch"
#include "qr_kernels_hip.cuh"
#ifdef HLIB_PROFILING_ENABLED
#include "perf_counter.h"
#endif
#define QR_LOAD(m) __ldg(&(m))
#define KBLAS_QR_CHECK_RET(func) { if( (func) != KBLAS_Success ) return KBLAS_UnknownError; }
#define HLIB_R_COLS_PER_THREAD 8
#define HLIB_R_MAX_THREAD_Y 8
// Apply the generated householder vectors at the current panel to the trailing submatrix
template<class T, class T_ptr, int BLOCK_SIZE, int APPLY_FORWARD, class Dim_Type>
__global__
void batch_apply_hh_panel(
T_ptr m_batch, Dim_Type ldm_batch, int stride, T_ptr tau_batch, int stride_tau,
Dim_Type rows_batch, Dim_Type cols_batch, int row_offset, int col_offset,
int smem_entries, int panel_rows, int num_ops
)
{
extern __shared__ char sdata[];
const int HH_CB = QR_Config<T>::HH_CB;
int thread_id = blockIdx.x * blockDim.x + threadIdx.x;
int op_id = thread_id / BLOCK_SIZE;
if(op_id >= num_ops) return;
int local_op_id = threadIdx.x / BLOCK_SIZE;
int local_tid = threadIdx.x % BLOCK_SIZE;
T* s_tau = (T*)sdata + smem_entries * local_op_id;
T* smem = s_tau + HH_CB;
int rows = getOperationDim(rows_batch, op_id);
int cols = getOperationDim(cols_batch, op_id);
int ldm = getOperationDim(ldm_batch, op_id);
if(row_offset >= rows || col_offset >= cols)
return;
int trailing_blocks = cols - col_offset - HH_CB;
if(trailing_blocks <= 0) return;
trailing_blocks = iDivUp(trailing_blocks, HH_CB);
T* m_panel = getOperationPtr<T>(m_batch, op_id, stride) + row_offset + col_offset * ldm;
T* tau_panel = getOperationPtr<T>(tau_batch, op_id, stride_tau) + col_offset;
T* trailing_panel = m_panel;
int nopadding = (local_tid < panel_rows && local_tid + row_offset < rows);
// Load in tau from global memory
if(local_tid < HH_CB && local_tid + col_offset < cols)
s_tau[local_tid] = QR_LOAD(tau_panel[local_tid]);
if(BLOCK_SIZE != WARP_SIZE) __syncthreads();
// Store the matrix panel in registers
T matrix_row[HH_CB], panel_row[HH_CB];
// Load the panel that we're applying
#pragma unroll
for(int i = 0; i < HH_CB; i++)
panel_row[i] = (nopadding ? QR_LOAD(m_panel[local_tid + i * ldm]) : 0);
int column_index = col_offset;
for(int block = 0; block < trailing_blocks; block++)
{
trailing_panel += HH_CB * ldm;
column_index += HH_CB;
// Load the trailing panel in
#pragma unroll
for(int i = 0; i < HH_CB; i++)
{
matrix_row[i] = 0;
if(nopadding && column_index + i < cols)
matrix_row[i] = QR_LOAD(trailing_panel[local_tid + i * ldm]);
}
if(APPLY_FORWARD)
{
#pragma unroll
for(int i = 0; i < HH_CB; i++)
{
T v = (local_tid > i && nopadding ? panel_row[i] : 0);
if(local_tid == i) v = 1;
qr_apply_househoulder_panel<T, HH_CB, BLOCK_SIZE>(matrix_row, v, s_tau[i], local_tid, smem);
}
}
else
{
#pragma unroll
for(int i = HH_CB - 1; i >= 0; i--)
{
T v = (local_tid > i && nopadding ? panel_row[i] : 0);
if(local_tid == i) v = 1;
qr_apply_househoulder_panel<T, HH_CB, BLOCK_SIZE>(matrix_row, v, s_tau[i], local_tid, smem);
}
}
// Flush the current panel so we can load in the next one
if(nopadding)
{
#pragma unroll
for(int i = 0; i < HH_CB; i++)
if(column_index + i < cols)
trailing_panel[local_tid + i * ldm] = matrix_row[i];
}
}
}
template<class T, class T_ptr, int BLOCK_SIZE, class Dim_Type>
__global__
void batch_qr_panel(
T_ptr __restrict__ m_batch, Dim_Type ldm_batch, int stride, T_ptr tau_batch, int stride_tau,
Dim_Type rows_batch, Dim_Type cols_batch, int row_offset, int col_offset,
int smem_entries, int panel_rows, int num_ops
)
{
extern __shared__ char sdata[];
const int HH_CB = QR_Config<T>::HH_CB;
int thread_id = blockIdx.x * blockDim.x + threadIdx.x;
int op_id = thread_id / BLOCK_SIZE;
if(op_id >= num_ops) return;
int rows = getOperationDim(rows_batch, op_id);
int cols = getOperationDim(cols_batch, op_id);
int ldm = getOperationDim(ldm_batch, op_id);
if(row_offset >= rows || col_offset >= cols)
return;
int local_op_id = threadIdx.x / BLOCK_SIZE;
int local_tid = threadIdx.x % BLOCK_SIZE;
// Shared memory
T* s_tau = (T*)sdata + smem_entries * local_op_id;
T* s_pivot = s_tau + HH_CB;
T* smem_reduction = s_pivot + 1;
// Global memory
T* m_panel = getOperationPtr<T>(m_batch, op_id, stride) + row_offset + col_offset * ldm;
T* tau_panel = getOperationPtr<T>(tau_batch, op_id, stride_tau) + col_offset;
// Store the matrix panel in registers
T matrix_row[HH_CB];
// Threads with id beyond the remaining rows will be padded with zeros
int nopadding = (local_tid < panel_rows && local_tid + row_offset < rows);
// Load the current panel in
#pragma unroll
for(int i = 0; i < HH_CB; i++)
matrix_row[i] = (nopadding && col_offset + i < cols ? m_panel[local_tid + i * ldm] : 0);
// Factor the panel, generating the current block of R and reflectors
qr_householder_panel<T, HH_CB, BLOCK_SIZE>(matrix_row, s_tau, local_tid, smem_reduction, s_pivot);
// Flush the data to global memory
if(nopadding)
{
#pragma unroll
for(int i = 0; i < HH_CB; i++)
if(col_offset + i < cols)
m_panel[local_tid + i * ldm] = matrix_row[i];
}
if(local_tid < HH_CB && local_tid + col_offset < cols)
tau_panel[local_tid] = s_tau[local_tid];
}
template<class T, class T_ptr, int BLOCK_SIZE>
__global__
void batch_unpackQ_panel(
T_ptr __restrict__ m_batch, int ldm, int stride, T_ptr tau_batch, int stride_tau,
int rows, int cols, int row_offset, int col_offset,
int smem_entries, int panel_rows, int num_ops
)
{
extern __shared__ char sdata[];
const int HH_CB = QR_Config<T>::HH_CB;
int thread_id = blockIdx.x * blockDim.x + threadIdx.x;
int op_id = thread_id / BLOCK_SIZE;
if(op_id >= num_ops) return;
int local_op_id = threadIdx.x / BLOCK_SIZE;
int local_tid = threadIdx.x % BLOCK_SIZE;
// Shared memory
T* s_tau = (T*)sdata + smem_entries * local_op_id;
T* smem_reduction = s_tau + HH_CB;
// Global memory
T* m_panel = getOperationPtr<T>(m_batch, op_id, stride) + row_offset + col_offset * ldm;
T* tau_panel = getOperationPtr<T>(tau_batch, op_id, stride_tau) + col_offset;
// Store the matrix panel in registers
T matrix_row[HH_CB];
// Threads with id beyond the remaining rows will be padded with zeros
int nopadding = (local_tid < panel_rows);
// Load the current panel in
#pragma unroll
for(int i = 0; i < HH_CB; i++)
matrix_row[i] = (nopadding && col_offset + i < cols ? m_panel[local_tid + i * ldm] : 0);
if(local_tid < HH_CB && local_tid + col_offset < cols) s_tau[local_tid] = tau_panel[local_tid];
if(BLOCK_SIZE != WARP_SIZE) __syncthreads();
// Factor the panel, generating the current block of R and reflectors
qr_unpackQ_panel<T, HH_CB, BLOCK_SIZE>(matrix_row, s_tau, local_tid, smem_reduction);
// Flush the data to global memory
if(nopadding)
{
#pragma unroll
for(int i = 0; i < HH_CB; i++)
if(col_offset + i < cols)
m_panel[local_tid + i * ldm] = matrix_row[i];
}
}
// Annihilate the block (A2) below the current panel (A1)
template<class T, class T_ptr, int BLOCK_SIZE, class Dim_Type>
__global__
void batch_dtsqrt_panel(
T_ptr __restrict__ m_batch, Dim_Type ldm_batch, int stride, T_ptr tau_batch, int stride_tau,
Dim_Type rows_batch, Dim_Type cols_batch, int A1_row_off, int A1_col_off, int A2_row_off, int A2_rows,
int smem_entries_per_op, int num_ops
)
{
extern __shared__ char sdata[];
const int HH_CB = QR_Config<T>::HH_CB;
int thread_id = blockIdx.x * blockDim.x + threadIdx.x;
int op_id = thread_id / BLOCK_SIZE;
if(op_id >= num_ops) return;
int rows = getOperationDim(rows_batch, op_id);
int cols = getOperationDim(cols_batch, op_id);
int ldm = getOperationDim(ldm_batch, op_id);
if(A1_row_off >= rows || A2_row_off >= rows || A1_col_off >= cols)
return;
int local_op_id = threadIdx.x / BLOCK_SIZE;
int local_tid = threadIdx.x % BLOCK_SIZE;
// Shared memory
T* s_tau = (T*)sdata + smem_entries_per_op * local_op_id;
T* A1_smem = s_tau + HH_CB;
T* reduction_smem = A1_smem + HH_CB * HH_CB;
// Global memory
T* m_global = getOperationPtr<T>(m_batch, op_id, stride);
T* tau_global = getOperationPtr<T>(tau_batch, op_id, stride_tau);
T* A1_global = m_global + A1_row_off + A1_col_off * ldm;
T* A2_global = m_global + A2_row_off + A1_col_off * ldm;
// Store A2 in registers
T A2_matrix_row[HH_CB];
// Load the HH_CB x HH_CB A1 block into shared memory
if(local_tid < HH_CB && A1_row_off + local_tid < rows)
{
#pragma unroll
for(int i = 0; i < HH_CB; i++)
{
A1_smem[local_tid + i * HH_CB] = 0;
if(A1_col_off + i < cols)
A1_smem[local_tid + i * HH_CB] = A1_global[local_tid + i * ldm];
}
}
__syncthreads();
int nopadding = (local_tid < A2_rows && A2_row_off + local_tid < rows);
// Load the A2_rows x HH_CB A2 block into registers
#pragma unroll
for(int i = 0; i < HH_CB; i++)
A2_matrix_row[i] = (nopadding && A1_col_off + i < cols ? A2_global[local_tid + i * ldm] : 0);
// Eliminate the A2 block
dtsqrt_panel<T, HH_CB, BLOCK_SIZE>(A2_matrix_row, s_tau, local_tid, reduction_smem, A1_smem);
// Dump everything back to global memory
if(local_tid < HH_CB)
{
if(local_tid + A1_col_off < cols)
tau_global[local_tid] = s_tau[local_tid];
#pragma unroll
for(int i = 0; i < HH_CB; i++)
if(A1_col_off + i < cols)
A1_global[local_tid + i * ldm] = A1_smem[local_tid + i * HH_CB];
}
if(nopadding)
{
#pragma unroll
for(int i = 0; i < HH_CB; i++)
if(A1_col_off + i < cols)
A2_global[local_tid + i * ldm] = A2_matrix_row[i];
}
}
// Apply the generated householder vectors that annihilated the block at (V) to the trailing block row (A2) and the block row (A1) from the panel above it
template<class T, class T_ptr, int BLOCK_SIZE, class Dim_Type>
__global__
void batch_apply_dtsqrt_panel(
T_ptr __restrict__ m_batch, Dim_Type ldm_batch, int stride, T_ptr tau_batch, int stride_tau,
Dim_Type rows_batch, Dim_Type cols_batch, int V_row_off, int V_col_off, int V_rows, int A1_row_off,
int smem_entries_per_op, int num_ops
)
{
extern __shared__ char sdata[];
const int HH_CB = QR_Config<T>::HH_CB;
int thread_id = blockIdx.x * blockDim.x + threadIdx.x;
int op_id = thread_id / BLOCK_SIZE;
if(op_id >= num_ops) return;
int rows = getOperationDim(rows_batch, op_id);
int cols = getOperationDim(cols_batch, op_id);
int ldm = getOperationDim(ldm_batch, op_id);
if(V_row_off >= rows || A1_row_off >= rows || V_col_off >= cols)
return;
int trailing_blocks = cols - V_col_off - HH_CB;
if(trailing_blocks <= 0) return;
trailing_blocks = iDivUp(trailing_blocks, HH_CB);
int local_op_id = threadIdx.x / BLOCK_SIZE;
int local_tid = threadIdx.x % BLOCK_SIZE;
// Shared memory
T* s_tau = (T*)sdata + smem_entries_per_op * local_op_id;
T* A1_smem = s_tau + HH_CB;
T* reduction_smem = A1_smem + HH_CB * HH_CB;
// Global memory
T* m_global = getOperationPtr<T>(m_batch, op_id, stride);
T* tau_global = getOperationPtr<T>(tau_batch, op_id, stride_tau);
T* V_global = m_global + V_row_off + V_col_off * ldm;
T* A1_global = m_global + A1_row_off + V_col_off * ldm;
T* A2_global = V_global;
// Store V and A2 in registers
T A2_matrix_row[HH_CB], V_matrix_row[HH_CB];
// Load tau entries into shared memory
if(local_tid < HH_CB)
s_tau[local_tid] = tau_global[local_tid];
// Load the V_rows x HH_CB V block into registers
int no_padding = (local_tid < V_rows && local_tid + V_row_off < rows);
#pragma unroll
for(int i = 0; i < HH_CB; i++)
V_matrix_row[i] = (no_padding ? V_global[local_tid + i * ldm] : 0);
int column_index = V_col_off;
for(int b = 0; b < trailing_blocks; b++)
{
A1_global += HH_CB * ldm;
A2_global += HH_CB * ldm;
column_index += HH_CB;
// Load A1 into shared memory
if(local_tid < HH_CB)
for(int i = 0; i < HH_CB; i++)
A1_smem[local_tid + i * HH_CB] = (column_index + i < cols ? A1_global[local_tid + i * ldm] : 0);
// Load the V_rows x HH_CB A2 block into registers
#pragma unroll
for(int i = 0; i < HH_CB; i++)
A2_matrix_row[i] = (no_padding && column_index + i < cols ? A2_global[local_tid + i * ldm] : 0);
__syncthreads();
// Update the blocks
#pragma unroll
for(int i = 0; i < HH_CB; i++)
dtsqrt_apply_panel<T, HH_CB, BLOCK_SIZE>(A2_matrix_row, V_matrix_row[i], s_tau[i], local_tid, reduction_smem, A1_smem, i);
// Flush blocks back to global memory
if(local_tid < HH_CB)
for(int i = 0; i < HH_CB; i++)
if(column_index + i < cols)
A1_global[local_tid + i * ldm] = A1_smem[local_tid + i * HH_CB];
if(no_padding)
{
#pragma unroll
for(int i = 0; i < HH_CB; i++)
if(column_index + i < cols)
A2_global[local_tid + i * ldm] = A2_matrix_row[i];
}
__syncthreads();
}
}
template<class T, class T_ptr>
__global__
void batch_qr_copy_R_kernel(T_ptr m_batch, int ldm, int stride_m, T_ptr r_batch, int ldr, int stride_r, int rows, int cols, int ops)
{
int op_id = blockIdx.z;
if(op_id >= ops) return;
int R_rows = (rows > cols ? cols : rows);
int R_cols = cols;
int row_index = blockDim.x * blockIdx.x + threadIdx.x;
int col_index = (blockDim.y * blockIdx.y + threadIdx.y) * HLIB_R_COLS_PER_THREAD;
if(row_index >= R_rows || col_index >= R_cols)
return;
T* m_global = getOperationPtr<T>(m_batch, op_id, stride_m);
T* r_global = getOperationPtr<T>(r_batch, op_id, stride_r);
m_global += row_index + col_index * ldm;
r_global += row_index + col_index * ldr;
T reg_buffer[HLIB_R_COLS_PER_THREAD];
#pragma unroll
for(int j = 0; j < HLIB_R_COLS_PER_THREAD; j++)
if(j + col_index < R_cols)
reg_buffer[j] = (row_index > j + col_index ? 0 : m_global[j * ldm]);
#pragma unroll
for(int j = 0; j < HLIB_R_COLS_PER_THREAD; j++)
if(j + col_index < R_cols)
r_global[j * ldr] = reg_buffer[j];
}
template<class T, class T_ptr>
__global__
void batch_qr_clear_R_kernel(T_ptr m_batch, int ldm, int stride, int rows, int cols, int ops)
{
int op_id = blockIdx.z;
if(op_id >= ops) return;
int R_rows = (rows > cols ? cols : rows);
int R_cols = cols;
int row_index = blockDim.x * blockIdx.x + threadIdx.x;
int col_index = (blockDim.y * blockIdx.y + threadIdx.y) * HLIB_R_COLS_PER_THREAD;
if(row_index >= R_rows || col_index >= R_cols)
return;
T* m_global = getOperationPtr<T>(m_batch, op_id, stride);
m_global += row_index + col_index * ldm;
#pragma unroll
for(int j = 0; j < HLIB_R_COLS_PER_THREAD; j++)
if(j + col_index < R_cols && row_index < j + col_index)
m_global[j * ldm] = 0;
}
template<class T, class T_ptr>
int batch_qr_clear_R(kblasHandle_t handle, T_ptr m_batch, int ldm, int stride, int rows, int cols, int ops)
{
int R_rows = (rows > cols ? cols : rows);
int R_cols = cols;
int max_thread_y = HLIB_R_MAX_THREAD_Y;
int thread_x = WARP_SIZE, thread_y = kmin(max_thread_y, iDivUp(R_cols, HLIB_R_COLS_PER_THREAD));
int grid_x = iDivUp(R_rows, thread_x), grid_y = iDivUp(R_cols, thread_y * HLIB_R_COLS_PER_THREAD);
dim3 dimBlock(thread_x, thread_y, 1);
dim3 dimGrid(grid_x, grid_y, ops);
hipLaunchKernelGGL(( batch_qr_clear_R_kernel<T, T_ptr>) , dim3(dimGrid), dim3(dimBlock), 0, handle->stream ,
m_batch, ldm, stride, rows, cols, ops);
check_error_ret( hipGetLastError(), KBLAS_UnknownError );
return KBLAS_Success;
}
template<class T, class T_ptr>
int batch_qr_copy_R(kblasHandle_t handle, T_ptr m_batch, int ldm, int stride_m, T_ptr r_batch, int ldr, int stride_r, int rows, int cols, int ops)
{
int R_rows = (rows > cols ? cols : rows);
int R_cols = cols;
int max_thread_y = HLIB_R_MAX_THREAD_Y;
int thread_x = WARP_SIZE, thread_y = kmin(max_thread_y, iDivUp(R_cols, HLIB_R_COLS_PER_THREAD));
int grid_x = iDivUp(R_rows, thread_x), grid_y = iDivUp(R_cols, thread_y * HLIB_R_COLS_PER_THREAD);
dim3 dimBlock(thread_x, thread_y, 1);
dim3 dimGrid(grid_x, grid_y, ops);
hipLaunchKernelGGL(( batch_qr_copy_R_kernel<T, T_ptr>), dim3(dimGrid), dim3(dimBlock), 0, handle->stream ,
m_batch, ldm, stride_m, r_batch, ldr, stride_r, rows, cols, ops);
check_error_ret( hipGetLastError(), KBLAS_UnknownError );
return KBLAS_Success;
}
template<class T, class T_ptr, class Dim_Type>
int driver_hh_panel(
kblasHandle_t handle, T_ptr m_batch, Dim_Type ldm, int stride, T_ptr tau_batch, int stride_tau,
Dim_Type rows, Dim_Type cols, int num_ops, int row_offset, int col_offset, int panel_rows,
int max_rows, int max_cols
)
{
int ops_per_block = OPS_PER_BLOCK;
const int HH_CB = QR_Config<T>::HH_CB;
if(panel_rows > 256) ops_per_block = 1;
int blocks = iDivUp(num_ops, ops_per_block);
int warps = iDivUp(panel_rows, WARP_SIZE);
dim3 dimBlock( ops_per_block * warps * WARP_SIZE, 1 );
dim3 dimGrid( blocks, 1 );
int smem_reduction = warps * WARP_SIZE * HH_CB + warps * HH_CB;
int smem_tau = HH_CB;
int smem_pivot = 1;
int smem_entries_per_op = smem_reduction + smem_tau + smem_pivot;
int smem_per_block = sizeof(T) * smem_entries_per_op * ops_per_block;
switch(warps)
{
case 1:hipLaunchKernelGGL(( batch_qr_panel<T, T_ptr, 32, Dim_Type>), dim3(dimGrid), dim3(dimBlock), smem_per_block, handle->stream , m_batch, ldm, stride, tau_batch, stride_tau, rows, cols, row_offset, col_offset, smem_entries_per_op, panel_rows, num_ops); break;
case 2:hipLaunchKernelGGL(( batch_qr_panel<T, T_ptr, 64, Dim_Type>), dim3(dimGrid), dim3(dimBlock), smem_per_block, handle->stream , m_batch, ldm, stride, tau_batch, stride_tau, rows, cols, row_offset, col_offset, smem_entries_per_op, panel_rows, num_ops); break;
case 3:hipLaunchKernelGGL(( batch_qr_panel<T, T_ptr, 96, Dim_Type>), dim3(dimGrid), dim3(dimBlock), smem_per_block, handle->stream , m_batch, ldm, stride, tau_batch, stride_tau, rows, cols, row_offset, col_offset, smem_entries_per_op, panel_rows, num_ops); break;
case 4:hipLaunchKernelGGL(( batch_qr_panel<T, T_ptr, 128, Dim_Type>), dim3(dimGrid), dim3(dimBlock), smem_per_block, handle->stream , m_batch, ldm, stride, tau_batch, stride_tau, rows, cols, row_offset, col_offset, smem_entries_per_op, panel_rows, num_ops); break;
case 5:hipLaunchKernelGGL(( batch_qr_panel<T, T_ptr, 160, Dim_Type>), dim3(dimGrid), dim3(dimBlock), smem_per_block, handle->stream , m_batch, ldm, stride, tau_batch, stride_tau, rows, cols, row_offset, col_offset, smem_entries_per_op, panel_rows, num_ops); break;
case 6:hipLaunchKernelGGL(( batch_qr_panel<T, T_ptr, 192, Dim_Type>), dim3(dimGrid), dim3(dimBlock), smem_per_block, handle->stream , m_batch, ldm, stride, tau_batch, stride_tau, rows, cols, row_offset, col_offset, smem_entries_per_op, panel_rows, num_ops); break;
case 7:hipLaunchKernelGGL(( batch_qr_panel<T, T_ptr, 224, Dim_Type>), dim3(dimGrid), dim3(dimBlock), smem_per_block, handle->stream , m_batch, ldm, stride, tau_batch, stride_tau, rows, cols, row_offset, col_offset, smem_entries_per_op, panel_rows, num_ops); break;
case 8:hipLaunchKernelGGL(( batch_qr_panel<T, T_ptr, 256, Dim_Type>), dim3(dimGrid), dim3(dimBlock), smem_per_block, handle->stream , m_batch, ldm, stride, tau_batch, stride_tau, rows, cols, row_offset, col_offset, smem_entries_per_op, panel_rows, num_ops); break;
case 9:hipLaunchKernelGGL(( batch_qr_panel<T, T_ptr, 288, Dim_Type>), dim3(dimGrid), dim3(dimBlock), smem_per_block, handle->stream , m_batch, ldm, stride, tau_batch, stride_tau, rows, cols, row_offset, col_offset, smem_entries_per_op, panel_rows, num_ops); break;
case 10:hipLaunchKernelGGL(( batch_qr_panel<T, T_ptr, 320, Dim_Type>), dim3(dimGrid), dim3(dimBlock), smem_per_block, handle->stream , m_batch, ldm, stride, tau_batch, stride_tau, rows, cols, row_offset, col_offset, smem_entries_per_op, panel_rows, num_ops); break;
case 11:hipLaunchKernelGGL(( batch_qr_panel<T, T_ptr, 352, Dim_Type>), dim3(dimGrid), dim3(dimBlock), smem_per_block, handle->stream , m_batch, ldm, stride, tau_batch, stride_tau, rows, cols, row_offset, col_offset, smem_entries_per_op, panel_rows, num_ops); break;
case 12:hipLaunchKernelGGL(( batch_qr_panel<T, T_ptr, 384, Dim_Type>), dim3(dimGrid), dim3(dimBlock), smem_per_block, handle->stream , m_batch, ldm, stride, tau_batch, stride_tau, rows, cols, row_offset, col_offset, smem_entries_per_op, panel_rows, num_ops); break;
case 13:hipLaunchKernelGGL(( batch_qr_panel<T, T_ptr, 416, Dim_Type>), dim3(dimGrid), dim3(dimBlock), smem_per_block, handle->stream , m_batch, ldm, stride, tau_batch, stride_tau, rows, cols, row_offset, col_offset, smem_entries_per_op, panel_rows, num_ops); break;
case 14:hipLaunchKernelGGL(( batch_qr_panel<T, T_ptr, 448, Dim_Type>), dim3(dimGrid), dim3(dimBlock), smem_per_block, handle->stream , m_batch, ldm, stride, tau_batch, stride_tau, rows, cols, row_offset, col_offset, smem_entries_per_op, panel_rows, num_ops); break;
case 15:hipLaunchKernelGGL(( batch_qr_panel<T, T_ptr, 480, Dim_Type>), dim3(dimGrid), dim3(dimBlock), smem_per_block, handle->stream , m_batch, ldm, stride, tau_batch, stride_tau, rows, cols, row_offset, col_offset, smem_entries_per_op, panel_rows, num_ops); break;
case 16:hipLaunchKernelGGL(( batch_qr_panel<T, T_ptr, 512, Dim_Type>), dim3(dimGrid), dim3(dimBlock), smem_per_block, handle->stream , m_batch, ldm, stride, tau_batch, stride_tau, rows, cols, row_offset, col_offset, smem_entries_per_op, panel_rows, num_ops); break;
#ifdef QR_SUPPORT_LARGE
case 17:hipLaunchKernelGGL(( batch_qr_panel<T, T_ptr, 544, Dim_Type>), dim3(dimGrid), dim3(dimBlock), smem_per_block, handle->stream , m_batch, ldm, stride, tau_batch, stride_tau, rows, cols, row_offset, col_offset, smem_entries_per_op, panel_rows, num_ops); break;
case 18:hipLaunchKernelGGL(( batch_qr_panel<T, T_ptr, 576, Dim_Type>), dim3(dimGrid), dim3(dimBlock), smem_per_block, handle->stream , m_batch, ldm, stride, tau_batch, stride_tau, rows, cols, row_offset, col_offset, smem_entries_per_op, panel_rows, num_ops); break;
case 19:hipLaunchKernelGGL(( batch_qr_panel<T, T_ptr, 608, Dim_Type>), dim3(dimGrid), dim3(dimBlock), smem_per_block, handle->stream , m_batch, ldm, stride, tau_batch, stride_tau, rows, cols, row_offset, col_offset, smem_entries_per_op, panel_rows, num_ops); break;
case 20:hipLaunchKernelGGL(( batch_qr_panel<T, T_ptr, 640, Dim_Type>), dim3(dimGrid), dim3(dimBlock), smem_per_block, handle->stream , m_batch, ldm, stride, tau_batch, stride_tau, rows, cols, row_offset, col_offset, smem_entries_per_op, panel_rows, num_ops); break;
case 21:hipLaunchKernelGGL(( batch_qr_panel<T, T_ptr, 672, Dim_Type>), dim3(dimGrid), dim3(dimBlock), smem_per_block, handle->stream , m_batch, ldm, stride, tau_batch, stride_tau, rows, cols, row_offset, col_offset, smem_entries_per_op, panel_rows, num_ops); break;
case 22:hipLaunchKernelGGL(( batch_qr_panel<T, T_ptr, 704, Dim_Type>), dim3(dimGrid), dim3(dimBlock), smem_per_block, handle->stream , m_batch, ldm, stride, tau_batch, stride_tau, rows, cols, row_offset, col_offset, smem_entries_per_op, panel_rows, num_ops); break;
case 23:hipLaunchKernelGGL(( batch_qr_panel<T, T_ptr, 736, Dim_Type>), dim3(dimGrid), dim3(dimBlock), smem_per_block, handle->stream , m_batch, ldm, stride, tau_batch, stride_tau, rows, cols, row_offset, col_offset, smem_entries_per_op, panel_rows, num_ops); break;
case 24:hipLaunchKernelGGL(( batch_qr_panel<T, T_ptr, 768, Dim_Type>), dim3(dimGrid), dim3(dimBlock), smem_per_block, handle->stream , m_batch, ldm, stride, tau_batch, stride_tau, rows, cols, row_offset, col_offset, smem_entries_per_op, panel_rows, num_ops); break;
case 25:hipLaunchKernelGGL(( batch_qr_panel<T, T_ptr, 800, Dim_Type>), dim3(dimGrid), dim3(dimBlock), smem_per_block, handle->stream , m_batch, ldm, stride, tau_batch, stride_tau, rows, cols, row_offset, col_offset, smem_entries_per_op, panel_rows, num_ops); break;
case 26:hipLaunchKernelGGL(( batch_qr_panel<T, T_ptr, 832, Dim_Type>), dim3(dimGrid), dim3(dimBlock), smem_per_block, handle->stream , m_batch, ldm, stride, tau_batch, stride_tau, rows, cols, row_offset, col_offset, smem_entries_per_op, panel_rows, num_ops); break;
case 27:hipLaunchKernelGGL(( batch_qr_panel<T, T_ptr, 864, Dim_Type>), dim3(dimGrid), dim3(dimBlock), smem_per_block, handle->stream , m_batch, ldm, stride, tau_batch, stride_tau, rows, cols, row_offset, col_offset, smem_entries_per_op, panel_rows, num_ops); break;
case 28:hipLaunchKernelGGL(( batch_qr_panel<T, T_ptr, 896, Dim_Type>), dim3(dimGrid), dim3(dimBlock), smem_per_block, handle->stream , m_batch, ldm, stride, tau_batch, stride_tau, rows, cols, row_offset, col_offset, smem_entries_per_op, panel_rows, num_ops); break;
case 29:hipLaunchKernelGGL(( batch_qr_panel<T, T_ptr, 928, Dim_Type>), dim3(dimGrid), dim3(dimBlock), smem_per_block, handle->stream , m_batch, ldm, stride, tau_batch, stride_tau, rows, cols, row_offset, col_offset, smem_entries_per_op, panel_rows, num_ops); break;
case 30:hipLaunchKernelGGL(( batch_qr_panel<T, T_ptr, 960, Dim_Type>), dim3(dimGrid), dim3(dimBlock), smem_per_block, handle->stream , m_batch, ldm, stride, tau_batch, stride_tau, rows, cols, row_offset, col_offset, smem_entries_per_op, panel_rows, num_ops); break;
case 31:hipLaunchKernelGGL(( batch_qr_panel<T, T_ptr, 992, Dim_Type>), dim3(dimGrid), dim3(dimBlock), smem_per_block, handle->stream , m_batch, ldm, stride, tau_batch, stride_tau, rows, cols, row_offset, col_offset, smem_entries_per_op, panel_rows, num_ops); break;
case 32:hipLaunchKernelGGL(( batch_qr_panel<T, T_ptr, 1024, Dim_Type>), dim3(dimGrid), dim3(dimBlock), smem_per_block, handle->stream , m_batch, ldm, stride, tau_batch, stride_tau, rows, cols, row_offset, col_offset, smem_entries_per_op, panel_rows, num_ops); break;
#endif
default: { printf("driver_hh_panel: Invalid row size %d\n", panel_rows); return KBLAS_UnknownError; }
}
check_error_ret( hipGetLastError(), KBLAS_UnknownError );
return KBLAS_Success;
}
template<class T, class T_ptr>
int driver_unpackQ_panel(kblasHandle_t handle, T_ptr m_batch, int ldm, int stride, T_ptr tau_batch, int stride_tau, int rows, int cols, int num_ops, int row_offset, int col_offset, int panel_rows)
{
int ops_per_block = OPS_PER_BLOCK;
const int HH_CB = QR_Config<T>::HH_CB;
if(panel_rows > 256) ops_per_block = 1;
int blocks = iDivUp(num_ops, ops_per_block);
int warps = iDivUp(panel_rows, WARP_SIZE);
dim3 dimBlock( ops_per_block * warps * WARP_SIZE, 1 );
dim3 dimGrid( blocks, 1 );
int smem_reduction = warps * WARP_SIZE * HH_CB + warps * HH_CB;
int smem_tau = HH_CB;
int smem_entries_per_op = smem_reduction + smem_tau;
int smem_per_block = sizeof(T) * smem_entries_per_op * ops_per_block;
switch(warps)
{
case 1:hipLaunchKernelGGL(( batch_unpackQ_panel<T, T_ptr, 32>), dim3(dimGrid), dim3(dimBlock), smem_per_block, handle->stream , m_batch, ldm, stride, tau_batch, stride_tau, rows, cols, row_offset, col_offset, smem_entries_per_op, panel_rows, num_ops); break;
case 2:hipLaunchKernelGGL(( batch_unpackQ_panel<T, T_ptr, 64>), dim3(dimGrid), dim3(dimBlock), smem_per_block, handle->stream , m_batch, ldm, stride, tau_batch, stride_tau, rows, cols, row_offset, col_offset, smem_entries_per_op, panel_rows, num_ops); break;
case 3:hipLaunchKernelGGL(( batch_unpackQ_panel<T, T_ptr, 96>), dim3(dimGrid), dim3(dimBlock), smem_per_block, handle->stream , m_batch, ldm, stride, tau_batch, stride_tau, rows, cols, row_offset, col_offset, smem_entries_per_op, panel_rows, num_ops); break;
case 4:hipLaunchKernelGGL(( batch_unpackQ_panel<T, T_ptr, 128>), dim3(dimGrid), dim3(dimBlock), smem_per_block, handle->stream , m_batch, ldm, stride, tau_batch, stride_tau, rows, cols, row_offset, col_offset, smem_entries_per_op, panel_rows, num_ops); break;
case 5:hipLaunchKernelGGL(( batch_unpackQ_panel<T, T_ptr, 160>), dim3(dimGrid), dim3(dimBlock), smem_per_block, handle->stream , m_batch, ldm, stride, tau_batch, stride_tau, rows, cols, row_offset, col_offset, smem_entries_per_op, panel_rows, num_ops); break;
case 6:hipLaunchKernelGGL(( batch_unpackQ_panel<T, T_ptr, 192>), dim3(dimGrid), dim3(dimBlock), smem_per_block, handle->stream , m_batch, ldm, stride, tau_batch, stride_tau, rows, cols, row_offset, col_offset, smem_entries_per_op, panel_rows, num_ops); break;
case 7:hipLaunchKernelGGL(( batch_unpackQ_panel<T, T_ptr, 224>), dim3(dimGrid), dim3(dimBlock), smem_per_block, handle->stream , m_batch, ldm, stride, tau_batch, stride_tau, rows, cols, row_offset, col_offset, smem_entries_per_op, panel_rows, num_ops); break;
case 8:hipLaunchKernelGGL(( batch_unpackQ_panel<T, T_ptr, 256>), dim3(dimGrid), dim3(dimBlock), smem_per_block, handle->stream , m_batch, ldm, stride, tau_batch, stride_tau, rows, cols, row_offset, col_offset, smem_entries_per_op, panel_rows, num_ops); break;
case 9:hipLaunchKernelGGL(( batch_unpackQ_panel<T, T_ptr, 288>), dim3(dimGrid), dim3(dimBlock), smem_per_block, handle->stream , m_batch, ldm, stride, tau_batch, stride_tau, rows, cols, row_offset, col_offset, smem_entries_per_op, panel_rows, num_ops); break;
case 10:hipLaunchKernelGGL(( batch_unpackQ_panel<T, T_ptr, 320>), dim3(dimGrid), dim3(dimBlock), smem_per_block, handle->stream , m_batch, ldm, stride, tau_batch, stride_tau, rows, cols, row_offset, col_offset, smem_entries_per_op, panel_rows, num_ops); break;
case 11:hipLaunchKernelGGL(( batch_unpackQ_panel<T, T_ptr, 352>), dim3(dimGrid), dim3(dimBlock), smem_per_block, handle->stream , m_batch, ldm, stride, tau_batch, stride_tau, rows, cols, row_offset, col_offset, smem_entries_per_op, panel_rows, num_ops); break;
case 12:hipLaunchKernelGGL(( batch_unpackQ_panel<T, T_ptr, 384>), dim3(dimGrid), dim3(dimBlock), smem_per_block, handle->stream , m_batch, ldm, stride, tau_batch, stride_tau, rows, cols, row_offset, col_offset, smem_entries_per_op, panel_rows, num_ops); break;
case 13:hipLaunchKernelGGL(( batch_unpackQ_panel<T, T_ptr, 416>), dim3(dimGrid), dim3(dimBlock), smem_per_block, handle->stream , m_batch, ldm, stride, tau_batch, stride_tau, rows, cols, row_offset, col_offset, smem_entries_per_op, panel_rows, num_ops); break;
case 14:hipLaunchKernelGGL(( batch_unpackQ_panel<T, T_ptr, 448>), dim3(dimGrid), dim3(dimBlock), smem_per_block, handle->stream , m_batch, ldm, stride, tau_batch, stride_tau, rows, cols, row_offset, col_offset, smem_entries_per_op, panel_rows, num_ops); break;
case 15:hipLaunchKernelGGL(( batch_unpackQ_panel<T, T_ptr, 480>), dim3(dimGrid), dim3(dimBlock), smem_per_block, handle->stream , m_batch, ldm, stride, tau_batch, stride_tau, rows, cols, row_offset, col_offset, smem_entries_per_op, panel_rows, num_ops); break;
case 16:hipLaunchKernelGGL(( batch_unpackQ_panel<T, T_ptr, 512>), dim3(dimGrid), dim3(dimBlock), smem_per_block, handle->stream , m_batch, ldm, stride, tau_batch, stride_tau, rows, cols, row_offset, col_offset, smem_entries_per_op, panel_rows, num_ops); break;
#ifdef QR_SUPPORT_LARGE
case 17:hipLaunchKernelGGL(( batch_unpackQ_panel<T, T_ptr, 544>), dim3(dimGrid), dim3(dimBlock), smem_per_block, handle->stream , m_batch, ldm, stride, tau_batch, stride_tau, rows, cols, row_offset, col_offset, smem_entries_per_op, panel_rows, num_ops); break;
case 18:hipLaunchKernelGGL(( batch_unpackQ_panel<T, T_ptr, 576>), dim3(dimGrid), dim3(dimBlock), smem_per_block, handle->stream , m_batch, ldm, stride, tau_batch, stride_tau, rows, cols, row_offset, col_offset, smem_entries_per_op, panel_rows, num_ops); break;
case 19:hipLaunchKernelGGL(( batch_unpackQ_panel<T, T_ptr, 608>), dim3(dimGrid), dim3(dimBlock), smem_per_block, handle->stream , m_batch, ldm, stride, tau_batch, stride_tau, rows, cols, row_offset, col_offset, smem_entries_per_op, panel_rows, num_ops); break;
case 20:hipLaunchKernelGGL(( batch_unpackQ_panel<T, T_ptr, 640>), dim3(dimGrid), dim3(dimBlock), smem_per_block, handle->stream , m_batch, ldm, stride, tau_batch, stride_tau, rows, cols, row_offset, col_offset, smem_entries_per_op, panel_rows, num_ops); break;
case 21:hipLaunchKernelGGL(( batch_unpackQ_panel<T, T_ptr, 672>), dim3(dimGrid), dim3(dimBlock), smem_per_block, handle->stream , m_batch, ldm, stride, tau_batch, stride_tau, rows, cols, row_offset, col_offset, smem_entries_per_op, panel_rows, num_ops); break;
case 22:hipLaunchKernelGGL(( batch_unpackQ_panel<T, T_ptr, 704>), dim3(dimGrid), dim3(dimBlock), smem_per_block, handle->stream , m_batch, ldm, stride, tau_batch, stride_tau, rows, cols, row_offset, col_offset, smem_entries_per_op, panel_rows, num_ops); break;
case 23:hipLaunchKernelGGL(( batch_unpackQ_panel<T, T_ptr, 736>), dim3(dimGrid), dim3(dimBlock), smem_per_block, handle->stream , m_batch, ldm, stride, tau_batch, stride_tau, rows, cols, row_offset, col_offset, smem_entries_per_op, panel_rows, num_ops); break;
case 24:hipLaunchKernelGGL(( batch_unpackQ_panel<T, T_ptr, 768>), dim3(dimGrid), dim3(dimBlock), smem_per_block, handle->stream , m_batch, ldm, stride, tau_batch, stride_tau, rows, cols, row_offset, col_offset, smem_entries_per_op, panel_rows, num_ops); break;
case 25:hipLaunchKernelGGL(( batch_unpackQ_panel<T, T_ptr, 800>), dim3(dimGrid), dim3(dimBlock), smem_per_block, handle->stream , m_batch, ldm, stride, tau_batch, stride_tau, rows, cols, row_offset, col_offset, smem_entries_per_op, panel_rows, num_ops); break;
case 26:hipLaunchKernelGGL(( batch_unpackQ_panel<T, T_ptr, 832>), dim3(dimGrid), dim3(dimBlock), smem_per_block, handle->stream , m_batch, ldm, stride, tau_batch, stride_tau, rows, cols, row_offset, col_offset, smem_entries_per_op, panel_rows, num_ops); break;
case 27:hipLaunchKernelGGL(( batch_unpackQ_panel<T, T_ptr, 864>), dim3(dimGrid), dim3(dimBlock), smem_per_block, handle->stream , m_batch, ldm, stride, tau_batch, stride_tau, rows, cols, row_offset, col_offset, smem_entries_per_op, panel_rows, num_ops); break;
case 28:hipLaunchKernelGGL(( batch_unpackQ_panel<T, T_ptr, 896>), dim3(dimGrid), dim3(dimBlock), smem_per_block, handle->stream , m_batch, ldm, stride, tau_batch, stride_tau, rows, cols, row_offset, col_offset, smem_entries_per_op, panel_rows, num_ops); break;
case 29:hipLaunchKernelGGL(( batch_unpackQ_panel<T, T_ptr, 928>), dim3(dimGrid), dim3(dimBlock), smem_per_block, handle->stream , m_batch, ldm, stride, tau_batch, stride_tau, rows, cols, row_offset, col_offset, smem_entries_per_op, panel_rows, num_ops); break;
case 30:hipLaunchKernelGGL(( batch_unpackQ_panel<T, T_ptr, 960>), dim3(dimGrid), dim3(dimBlock), smem_per_block, handle->stream , m_batch, ldm, stride, tau_batch, stride_tau, rows, cols, row_offset, col_offset, smem_entries_per_op, panel_rows, num_ops); break;
case 31:hipLaunchKernelGGL(( batch_unpackQ_panel<T, T_ptr, 992>), dim3(dimGrid), dim3(dimBlock), smem_per_block, handle->stream , m_batch, ldm, stride, tau_batch, stride_tau, rows, cols, row_offset, col_offset, smem_entries_per_op, panel_rows, num_ops); break;
case 32:hipLaunchKernelGGL(( batch_unpackQ_panel<T, T_ptr, 1024>), dim3(dimGrid), dim3(dimBlock), smem_per_block, handle->stream , m_batch, ldm, stride, tau_batch, stride_tau, rows, cols, row_offset, col_offset, smem_entries_per_op, panel_rows, num_ops); break;
#endif
default: { printf("driver_unpackQ_panel: Invalid row size %d\n", panel_rows); return KBLAS_UnknownError; }
}
check_error_ret( hipGetLastError(), KBLAS_UnknownError );
return KBLAS_Success;
}
template<class T, class T_ptr, int APPLY_FORWARD, class Dim_Type>
int driver_apply_hh_panel(
kblasHandle_t handle, T_ptr m_batch, Dim_Type ldm, int stride, T_ptr tau_batch, int stride_tau,
Dim_Type rows, Dim_Type cols, int num_ops, int row_offset, int col_offset, int panel_rows,
int max_rows, int max_cols
)
{
int ops_per_block = OPS_PER_BLOCK;
const int HH_CB = QR_Config<T>::HH_CB;
if(panel_rows > 256) ops_per_block = 1;
if(max_cols - col_offset <= HH_CB) return KBLAS_Success;
int blocks = iDivUp(num_ops, ops_per_block);
int warps = iDivUp(panel_rows, WARP_SIZE);
dim3 dimBlock( ops_per_block * warps * WARP_SIZE, 1 );
dim3 dimGrid( blocks, 1 );
int smem_padding = warps * HH_CB;
int smem_entries_per_op = warps * WARP_SIZE * HH_CB + smem_padding + HH_CB;
int smem_per_block = sizeof(T) * smem_entries_per_op * ops_per_block;
switch(warps)
{
case 1:hipLaunchKernelGGL(( batch_apply_hh_panel<T, T_ptr, 32, APPLY_FORWARD, Dim_Type>), dim3(dimGrid), dim3(dimBlock), smem_per_block, handle->stream , m_batch, ldm, stride, tau_batch, stride_tau, rows, cols, row_offset, col_offset, smem_entries_per_op, panel_rows, num_ops); break;
case 2:hipLaunchKernelGGL(( batch_apply_hh_panel<T, T_ptr, 64, APPLY_FORWARD, Dim_Type>), dim3(dimGrid), dim3(dimBlock), smem_per_block, handle->stream , m_batch, ldm, stride, tau_batch, stride_tau, rows, cols, row_offset, col_offset, smem_entries_per_op, panel_rows, num_ops); break;
case 3:hipLaunchKernelGGL(( batch_apply_hh_panel<T, T_ptr, 96, APPLY_FORWARD, Dim_Type>), dim3(dimGrid), dim3(dimBlock), smem_per_block, handle->stream , m_batch, ldm, stride, tau_batch, stride_tau, rows, cols, row_offset, col_offset, smem_entries_per_op, panel_rows, num_ops); break;
case 4:hipLaunchKernelGGL(( batch_apply_hh_panel<T, T_ptr, 128, APPLY_FORWARD, Dim_Type>), dim3(dimGrid), dim3(dimBlock), smem_per_block, handle->stream , m_batch, ldm, stride, tau_batch, stride_tau, rows, cols, row_offset, col_offset, smem_entries_per_op, panel_rows, num_ops); break;
case 5:hipLaunchKernelGGL(( batch_apply_hh_panel<T, T_ptr, 160, APPLY_FORWARD, Dim_Type>), dim3(dimGrid), dim3(dimBlock), smem_per_block, handle->stream , m_batch, ldm, stride, tau_batch, stride_tau, rows, cols, row_offset, col_offset, smem_entries_per_op, panel_rows, num_ops); break;
case 6:hipLaunchKernelGGL(( batch_apply_hh_panel<T, T_ptr, 192, APPLY_FORWARD, Dim_Type>), dim3(dimGrid), dim3(dimBlock), smem_per_block, handle->stream , m_batch, ldm, stride, tau_batch, stride_tau, rows, cols, row_offset, col_offset, smem_entries_per_op, panel_rows, num_ops); break;
case 7:hipLaunchKernelGGL(( batch_apply_hh_panel<T, T_ptr, 224, APPLY_FORWARD, Dim_Type>), dim3(dimGrid), dim3(dimBlock), smem_per_block, handle->stream , m_batch, ldm, stride, tau_batch, stride_tau, rows, cols, row_offset, col_offset, smem_entries_per_op, panel_rows, num_ops); break;
case 8:hipLaunchKernelGGL(( batch_apply_hh_panel<T, T_ptr, 256, APPLY_FORWARD, Dim_Type>), dim3(dimGrid), dim3(dimBlock), smem_per_block, handle->stream , m_batch, ldm, stride, tau_batch, stride_tau, rows, cols, row_offset, col_offset, smem_entries_per_op, panel_rows, num_ops); break;
case 9:hipLaunchKernelGGL(( batch_apply_hh_panel<T, T_ptr, 288, APPLY_FORWARD, Dim_Type>), dim3(dimGrid), dim3(dimBlock), smem_per_block, handle->stream , m_batch, ldm, stride, tau_batch, stride_tau, rows, cols, row_offset, col_offset, smem_entries_per_op, panel_rows, num_ops); break;
case 10:hipLaunchKernelGGL(( batch_apply_hh_panel<T, T_ptr, 320, APPLY_FORWARD, Dim_Type>), dim3(dimGrid), dim3(dimBlock), smem_per_block, handle->stream , m_batch, ldm, stride, tau_batch, stride_tau, rows, cols, row_offset, col_offset, smem_entries_per_op, panel_rows, num_ops); break;
case 11:hipLaunchKernelGGL(( batch_apply_hh_panel<T, T_ptr, 352, APPLY_FORWARD, Dim_Type>), dim3(dimGrid), dim3(dimBlock), smem_per_block, handle->stream , m_batch, ldm, stride, tau_batch, stride_tau, rows, cols, row_offset, col_offset, smem_entries_per_op, panel_rows, num_ops); break;
case 12:hipLaunchKernelGGL(( batch_apply_hh_panel<T, T_ptr, 384, APPLY_FORWARD, Dim_Type>), dim3(dimGrid), dim3(dimBlock), smem_per_block, handle->stream , m_batch, ldm, stride, tau_batch, stride_tau, rows, cols, row_offset, col_offset, smem_entries_per_op, panel_rows, num_ops); break;
case 13:hipLaunchKernelGGL(( batch_apply_hh_panel<T, T_ptr, 416, APPLY_FORWARD, Dim_Type>), dim3(dimGrid), dim3(dimBlock), smem_per_block, handle->stream , m_batch, ldm, stride, tau_batch, stride_tau, rows, cols, row_offset, col_offset, smem_entries_per_op, panel_rows, num_ops); break;
case 14:hipLaunchKernelGGL(( batch_apply_hh_panel<T, T_ptr, 448, APPLY_FORWARD, Dim_Type>), dim3(dimGrid), dim3(dimBlock), smem_per_block, handle->stream , m_batch, ldm, stride, tau_batch, stride_tau, rows, cols, row_offset, col_offset, smem_entries_per_op, panel_rows, num_ops); break;
case 15:hipLaunchKernelGGL(( batch_apply_hh_panel<T, T_ptr, 480, APPLY_FORWARD, Dim_Type>), dim3(dimGrid), dim3(dimBlock), smem_per_block, handle->stream , m_batch, ldm, stride, tau_batch, stride_tau, rows, cols, row_offset, col_offset, smem_entries_per_op, panel_rows, num_ops); break;
case 16:hipLaunchKernelGGL(( batch_apply_hh_panel<T, T_ptr, 512, APPLY_FORWARD, Dim_Type>), dim3(dimGrid), dim3(dimBlock), smem_per_block, handle->stream , m_batch, ldm, stride, tau_batch, stride_tau, rows, cols, row_offset, col_offset, smem_entries_per_op, panel_rows, num_ops); break;
#ifdef QR_SUPPORT_LARGE
case 17:hipLaunchKernelGGL(( batch_apply_hh_panel<T, T_ptr, 544, APPLY_FORWARD, Dim_Type>), dim3(dimGrid), dim3(dimBlock), smem_per_block, handle->stream , m_batch, ldm, stride, tau_batch, stride_tau, rows, cols, row_offset, col_offset, smem_entries_per_op, panel_rows, num_ops); break;
case 18:hipLaunchKernelGGL(( batch_apply_hh_panel<T, T_ptr, 576, APPLY_FORWARD, Dim_Type>), dim3(dimGrid), dim3(dimBlock), smem_per_block, handle->stream , m_batch, ldm, stride, tau_batch, stride_tau, rows, cols, row_offset, col_offset, smem_entries_per_op, panel_rows, num_ops); break;
case 19:hipLaunchKernelGGL(( batch_apply_hh_panel<T, T_ptr, 608, APPLY_FORWARD, Dim_Type>), dim3(dimGrid), dim3(dimBlock), smem_per_block, handle->stream , m_batch, ldm, stride, tau_batch, stride_tau, rows, cols, row_offset, col_offset, smem_entries_per_op, panel_rows, num_ops); break;
case 20:hipLaunchKernelGGL(( batch_apply_hh_panel<T, T_ptr, 640, APPLY_FORWARD, Dim_Type>), dim3(dimGrid), dim3(dimBlock), smem_per_block, handle->stream , m_batch, ldm, stride, tau_batch, stride_tau, rows, cols, row_offset, col_offset, smem_entries_per_op, panel_rows, num_ops); break;
case 21:hipLaunchKernelGGL(( batch_apply_hh_panel<T, T_ptr, 672, APPLY_FORWARD, Dim_Type>), dim3(dimGrid), dim3(dimBlock), smem_per_block, handle->stream , m_batch, ldm, stride, tau_batch, stride_tau, rows, cols, row_offset, col_offset, smem_entries_per_op, panel_rows, num_ops); break;
case 22:hipLaunchKernelGGL(( batch_apply_hh_panel<T, T_ptr, 704, APPLY_FORWARD, Dim_Type>), dim3(dimGrid), dim3(dimBlock), smem_per_block, handle->stream , m_batch, ldm, stride, tau_batch, stride_tau, rows, cols, row_offset, col_offset, smem_entries_per_op, panel_rows, num_ops); break;
case 23:hipLaunchKernelGGL(( batch_apply_hh_panel<T, T_ptr, 736, APPLY_FORWARD, Dim_Type>), dim3(dimGrid), dim3(dimBlock), smem_per_block, handle->stream , m_batch, ldm, stride, tau_batch, stride_tau, rows, cols, row_offset, col_offset, smem_entries_per_op, panel_rows, num_ops); break;
case 24:hipLaunchKernelGGL(( batch_apply_hh_panel<T, T_ptr, 768, APPLY_FORWARD, Dim_Type>), dim3(dimGrid), dim3(dimBlock), smem_per_block, handle->stream , m_batch, ldm, stride, tau_batch, stride_tau, rows, cols, row_offset, col_offset, smem_entries_per_op, panel_rows, num_ops); break;
case 25:hipLaunchKernelGGL(( batch_apply_hh_panel<T, T_ptr, 800, APPLY_FORWARD, Dim_Type>), dim3(dimGrid), dim3(dimBlock), smem_per_block, handle->stream , m_batch, ldm, stride, tau_batch, stride_tau, rows, cols, row_offset, col_offset, smem_entries_per_op, panel_rows, num_ops); break;
case 26:hipLaunchKernelGGL(( batch_apply_hh_panel<T, T_ptr, 832, APPLY_FORWARD, Dim_Type>), dim3(dimGrid), dim3(dimBlock), smem_per_block, handle->stream , m_batch, ldm, stride, tau_batch, stride_tau, rows, cols, row_offset, col_offset, smem_entries_per_op, panel_rows, num_ops); break;
case 27:hipLaunchKernelGGL(( batch_apply_hh_panel<T, T_ptr, 864, APPLY_FORWARD, Dim_Type>), dim3(dimGrid), dim3(dimBlock), smem_per_block, handle->stream , m_batch, ldm, stride, tau_batch, stride_tau, rows, cols, row_offset, col_offset, smem_entries_per_op, panel_rows, num_ops); break;
case 28:hipLaunchKernelGGL(( batch_apply_hh_panel<T, T_ptr, 896, APPLY_FORWARD, Dim_Type>), dim3(dimGrid), dim3(dimBlock), smem_per_block, handle->stream , m_batch, ldm, stride, tau_batch, stride_tau, rows, cols, row_offset, col_offset, smem_entries_per_op, panel_rows, num_ops); break;
case 29:hipLaunchKernelGGL(( batch_apply_hh_panel<T, T_ptr, 928, APPLY_FORWARD, Dim_Type>), dim3(dimGrid), dim3(dimBlock), smem_per_block, handle->stream , m_batch, ldm, stride, tau_batch, stride_tau, rows, cols, row_offset, col_offset, smem_entries_per_op, panel_rows, num_ops); break;
case 30:hipLaunchKernelGGL(( batch_apply_hh_panel<T, T_ptr, 960, APPLY_FORWARD, Dim_Type>), dim3(dimGrid), dim3(dimBlock), smem_per_block, handle->stream , m_batch, ldm, stride, tau_batch, stride_tau, rows, cols, row_offset, col_offset, smem_entries_per_op, panel_rows, num_ops); break;
case 31:hipLaunchKernelGGL(( batch_apply_hh_panel<T, T_ptr, 992, APPLY_FORWARD, Dim_Type>), dim3(dimGrid), dim3(dimBlock), smem_per_block, handle->stream , m_batch, ldm, stride, tau_batch, stride_tau, rows, cols, row_offset, col_offset, smem_entries_per_op, panel_rows, num_ops); break;
case 32:hipLaunchKernelGGL(( batch_apply_hh_panel<T, T_ptr, 1024, APPLY_FORWARD, Dim_Type>), dim3(dimGrid), dim3(dimBlock), smem_per_block, handle->stream , m_batch, ldm, stride, tau_batch, stride_tau, rows, cols, row_offset, col_offset, smem_entries_per_op, panel_rows, num_ops); break;
#endif
default: {printf("driver_apply_hh_panel: Invalid row size %d\n", panel_rows); return KBLAS_UnknownError;}
}
check_error_ret( hipGetLastError(), KBLAS_UnknownError );
return KBLAS_Success;
}
template<class T, class T_ptr, class Dim_Type>
int driver_dtsqrt_panel(
kblasHandle_t handle, T_ptr m_batch, Dim_Type ldm, int stride, T_ptr tau_batch, int stride_tau,
Dim_Type rows, Dim_Type cols, int num_ops, int A1_row_off, int A1_col_off, int A2_row_off, int A2_rows,
int max_rows, int max_cols
)
{
int ops_per_block = OPS_PER_BLOCK;
const int HH_CB = QR_Config<T>::HH_CB;
int blocks = iDivUp(num_ops, ops_per_block);
int warps = iDivUp(A2_rows, WARP_SIZE);
dim3 dimBlock( ops_per_block * warps * WARP_SIZE, 1 );
dim3 dimGrid( blocks, 1 );
int smem_reduction = warps * WARP_SIZE * HH_CB + warps * HH_CB;
int smem_tau = HH_CB;
int smem_A1 = HH_CB * HH_CB;
int smem_entries_per_op = smem_reduction + smem_tau + smem_A1;
int smem_per_block = sizeof(T) * smem_entries_per_op * ops_per_block;
switch(warps)
{
case 1:hipLaunchKernelGGL(( batch_dtsqrt_panel<T, T_ptr, 32, Dim_Type>), dim3(dimGrid), dim3(dimBlock), smem_per_block, handle->stream , m_batch, ldm, stride, tau_batch, stride_tau, rows, cols, A1_row_off, A1_col_off, A2_row_off, A2_rows, smem_entries_per_op, num_ops); break;
case 2:hipLaunchKernelGGL(( batch_dtsqrt_panel<T, T_ptr, 64, Dim_Type>), dim3(dimGrid), dim3(dimBlock), smem_per_block, handle->stream , m_batch, ldm, stride, tau_batch, stride_tau, rows, cols, A1_row_off, A1_col_off, A2_row_off, A2_rows, smem_entries_per_op, num_ops); break;
case 3:hipLaunchKernelGGL(( batch_dtsqrt_panel<T, T_ptr, 96, Dim_Type>), dim3(dimGrid), dim3(dimBlock), smem_per_block, handle->stream , m_batch, ldm, stride, tau_batch, stride_tau, rows, cols, A1_row_off, A1_col_off, A2_row_off, A2_rows, smem_entries_per_op, num_ops); break;
case 4:hipLaunchKernelGGL(( batch_dtsqrt_panel<T, T_ptr, 128, Dim_Type>), dim3(dimGrid), dim3(dimBlock), smem_per_block, handle->stream , m_batch, ldm, stride, tau_batch, stride_tau, rows, cols, A1_row_off, A1_col_off, A2_row_off, A2_rows, smem_entries_per_op, num_ops); break;
default: { printf("driver_dtsqrt_panel: Invalid row size %d\n", A2_rows); return KBLAS_UnknownError; }
}
check_error_ret( hipGetLastError(), KBLAS_UnknownError );
return KBLAS_Success;
}
template<class T, class T_ptr, class Dim_Type>
int driver_apply_dtsqrt_panel(
kblasHandle_t handle, T_ptr m_batch, Dim_Type ldm, int stride, T_ptr tau_batch, int stride_tau,
Dim_Type rows, Dim_Type cols, int num_ops, int V_row_off, int V_col_off, int V_rows, int A1_row_off,
int max_rows, int max_cols
)
{
int ops_per_block = OPS_PER_BLOCK;
const int HH_CB = QR_Config<T>::HH_CB;
if(max_cols - V_col_off <= HH_CB) return KBLAS_Success;
int blocks = iDivUp(num_ops, ops_per_block);
int warps = iDivUp(V_rows, WARP_SIZE);
dim3 dimBlock( ops_per_block * warps * WARP_SIZE, 1 );
dim3 dimGrid( blocks, 1 );
int smem_reduction = warps * WARP_SIZE * HH_CB + warps * HH_CB;
int smem_tau = HH_CB;
int smem_A1 = HH_CB * HH_CB;
int smem_entries_per_op = smem_reduction + smem_tau + smem_A1;
int smem_per_block = sizeof(T) * smem_entries_per_op * ops_per_block;
switch(warps)
{
case 1:hipLaunchKernelGGL(( batch_apply_dtsqrt_panel<T, T_ptr, 32, Dim_Type>), dim3(dimGrid), dim3(dimBlock), smem_per_block, handle->stream , m_batch, ldm, stride, tau_batch, stride_tau, rows, cols, V_row_off, V_col_off, V_rows, A1_row_off, smem_entries_per_op, num_ops); break;
case 2:hipLaunchKernelGGL(( batch_apply_dtsqrt_panel<T, T_ptr, 64, Dim_Type>), dim3(dimGrid), dim3(dimBlock), smem_per_block, handle->stream , m_batch, ldm, stride, tau_batch, stride_tau, rows, cols, V_row_off, V_col_off, V_rows, A1_row_off, smem_entries_per_op, num_ops); break;
case 3:hipLaunchKernelGGL(( batch_apply_dtsqrt_panel<T, T_ptr, 96, Dim_Type>), dim3(dimGrid), dim3(dimBlock), smem_per_block, handle->stream , m_batch, ldm, stride, tau_batch, stride_tau, rows, cols, V_row_off, V_col_off, V_rows, A1_row_off, smem_entries_per_op, num_ops); break;
case 4:hipLaunchKernelGGL(( batch_apply_dtsqrt_panel<T, T_ptr, 128, Dim_Type>), dim3(dimGrid), dim3(dimBlock), smem_per_block, handle->stream , m_batch, ldm, stride, tau_batch, stride_tau, rows, cols, V_row_off, V_col_off, V_rows, A1_row_off, smem_entries_per_op, num_ops); break;
default: { printf("driver_apply_dtsqrt_panel: Invalid row size %d\n", V_rows); return KBLAS_UnknownError; }
}
check_error_ret( hipGetLastError(), KBLAS_UnknownError );
return KBLAS_Success;
}
template<class T, class T_ptr, class Dim_Type>
int batch_qr(kblasHandle_t handle, T_ptr m_batch, Dim_Type ldm, int stride, T_ptr tau_batch, int stride_tau, Dim_Type rows, Dim_Type cols, int num_ops, int block_rows, int max_rows, int max_cols)
{
const int HH_CB = QR_Config<T>::HH_CB;
int rows_per_block = (max_rows < block_rows ? max_rows : block_rows);
int matrix_rank = (max_rows > max_cols ? max_cols : max_rows);
for(int c = 0; c < matrix_rank; c += HH_CB)
{
int upper_panel_height = rows_per_block - c % rows_per_block;
if(c + upper_panel_height > max_rows)
upper_panel_height = max_rows - c;
KBLAS_QR_CHECK_RET( (driver_hh_panel<T, T_ptr, Dim_Type>(
handle, m_batch, ldm, stride, tau_batch, stride_tau, rows, cols, num_ops,
c, c, upper_panel_height, max_rows, max_cols
)) );
KBLAS_QR_CHECK_RET( (driver_apply_hh_panel<T, T_ptr, 1, Dim_Type>(
handle, m_batch, ldm, stride, tau_batch, stride_tau, rows, cols, num_ops,
c, c, upper_panel_height, max_rows, max_cols
)) );
int remaining_rows = max_rows - c - upper_panel_height;
if(remaining_rows <= 0) continue;
int remaining_row_blocks = iDivUp(remaining_rows, rows_per_block);
for(int rb = 0; rb < remaining_row_blocks; rb++)
{
int A2_row_offset = c + upper_panel_height + rb * rows_per_block;
int A2_rows = (A2_row_offset + rows_per_block > max_rows ? max_rows - A2_row_offset : rows_per_block);
KBLAS_QR_CHECK_RET( (driver_dtsqrt_panel<T, T_ptr, Dim_Type>(
handle, m_batch, ldm, stride, tau_batch, stride_tau, rows, cols, num_ops,
c, c, A2_row_offset, A2_rows, max_rows, max_cols
)) );
KBLAS_QR_CHECK_RET( (driver_apply_dtsqrt_panel<T, T_ptr, Dim_Type>(
handle, m_batch, ldm, stride, tau_batch, stride_tau, rows, cols, num_ops,
A2_row_offset, c, A2_rows, c, max_rows, max_cols
)) );
}
}
return KBLAS_Success;
}
template<class T, class T_ptr>
int batch_unpack_Q(kblasHandle_t handle, T_ptr m_batch, int ldm, int stride, T_ptr tau_batch, int stride_tau, int rows, int cols, int num_ops)
{
const int HH_CB = QR_Config<T>::HH_CB;
// Zero out the upper triangular part of the matrix
int matrix_rank = (rows > cols ? cols : rows);
KBLAS_QR_CHECK_RET( (batch_qr_clear_R<T, T_ptr>(handle, m_batch, ldm, stride, rows, matrix_rank, num_ops)) );
int col_start = (matrix_rank % HH_CB == 0 ? matrix_rank - HH_CB : matrix_rank - matrix_rank % HH_CB);
for(int c = col_start; c >= 0; c -= HH_CB)
{
int panel_rows = rows - c;
KBLAS_QR_CHECK_RET( (driver_apply_hh_panel<T, T_ptr, 0, int>(
handle, m_batch, ldm, stride, tau_batch, stride_tau, rows, matrix_rank, num_ops,
c, c, panel_rows, rows, cols
)) );
KBLAS_QR_CHECK_RET( (driver_unpackQ_panel<T, T_ptr>(
handle, m_batch, ldm, stride, tau_batch, stride_tau, rows, matrix_rank, num_ops,
c, c, panel_rows
)) );
}
return KBLAS_Success;
}
///////////////////////////////////////////////////////////////
// Strided routines
///////////////////////////////////////////////////////////////
extern "C" int kblasDgeqrf_batch_strided(kblasHandle_t handle, int m, int n, double* A_strided, int lda, int stride_a, double* tau, int stride_tau, int num_ops)
{
if(m > QR_Config<double>::HH_MAX_ROWS || lda < m)
return KBLAS_Error_WrongInput;
else if(stride_a < m || stride_tau < kmin(m, n))
return KBLAS_Error_WrongInput;
if(m == 0 || n == 0)
return KBLAS_Success;
return batch_qr<double, double*, int>(handle, A_strided, lda, stride_a, tau, stride_tau, m, n, num_ops, m, m, n);
}
extern "C" int kblasSgeqrf_batch_strided(kblasHandle_t handle, int m, int n, float* A_strided, int lda, int stride_a, float* tau, int stride_tau, int num_ops)
{
if(m > QR_Config<float>::HH_MAX_ROWS || lda < m)
return KBLAS_Error_WrongInput;
else if(stride_a < m || stride_tau < kmin(m, n))
return KBLAS_Error_WrongInput;
if(m == 0 || n == 0)
return KBLAS_Success;
return batch_qr<float, float*, int>(handle, A_strided, lda, stride_a, tau, stride_tau, m, n, num_ops, m, m, n);
}
extern "C" int kblasDtsqrf_batch_strided(kblasHandle_t handle, int m, int n, double* A_strided, int lda, int stride_a, double* tau, int stride_tau, int num_ops)
{
if(lda < m)
return KBLAS_Error_WrongInput;
else if(stride_a < m || stride_tau < kmin(m, n))
return KBLAS_Error_WrongInput;
if(m == 0 || n == 0)
return KBLAS_Success;
return batch_qr<double, double*, int>(handle, A_strided, lda, stride_a, tau, stride_tau, m, n, num_ops, ROWS_PER_BLOCK, m, n);
}
extern "C" int kblasStsqrf_batch_strided(kblasHandle_t handle, int m, int n, float* A_strided, int lda, int stride_a, float* tau, int stride_tau, int num_ops)
{
if(lda < m)
return KBLAS_Error_WrongInput;
else if(stride_a < m || stride_tau < kmin(m, n))
return KBLAS_Error_WrongInput;
if(m == 0 || n == 0)
return KBLAS_Success;
return batch_qr<float, float*, int>(handle, A_strided, lda, stride_a, tau, stride_tau, m, n, num_ops, ROWS_PER_BLOCK, m, n);
}
extern "C" int kblasDorgqr_batch_strided(kblasHandle_t handle, int m, int n, double* A_strided, int lda, int stride_a, double* tau, int stride_tau, int num_ops)
{
if(m > QR_Config<double>::HH_MAX_ROWS || lda < m)
return KBLAS_Error_WrongInput;
else if(stride_a < m || stride_tau < kmin(m, n))
return KBLAS_Error_WrongInput;
if(m == 0 || n == 0)
return KBLAS_Success;
return batch_unpack_Q<double, double*>(handle, A_strided, lda, stride_a, tau, stride_tau, m, n, num_ops);
}
extern "C" int kblasSorgqr_batch_strided(kblasHandle_t handle, int m, int n, float* A_strided, int lda, int stride_a, float* tau, int stride_tau, int num_ops)
{
if(m > QR_Config<float>::HH_MAX_ROWS || lda < m)
return KBLAS_Error_WrongInput;
else if(stride_a < m || stride_tau < kmin(m, n))
return KBLAS_Error_WrongInput;
if(m == 0 || n == 0)
return KBLAS_Success;
return batch_unpack_Q<float, float*>(handle, A_strided, lda, stride_a, tau, stride_tau, m, n, num_ops);
}
extern "C" int kblasDcopy_upper_batch_strided(kblasHandle_t handle, int m, int n, double* A_strided, int lda, int stride_a, double* R_strided, int ldr, int stride_R, int num_ops)
{
if(lda < m || ldr < kmin(m, n))
return KBLAS_Error_WrongInput;
else if(stride_a < m || stride_R < n)
return KBLAS_Error_WrongInput;
if(m == 0 || n == 0)
return KBLAS_Success;
return batch_qr_copy_R<double, double*>(handle, A_strided, lda, stride_a, R_strided, ldr, stride_R, m, n, num_ops);
}
extern "C" int kblasScopy_upper_batch_strided(kblasHandle_t handle, int m, int n, float* A_strided, int lda, int stride_a, float* R_strided, int ldr, int stride_R, int num_ops)
{
if(lda < m || ldr < kmin(m, n))
return KBLAS_Error_WrongInput;
else if(stride_a < m || stride_R < n)
return KBLAS_Error_WrongInput;
if(m == 0 || n == 0)
return KBLAS_Success;
return batch_qr_copy_R<float, float*>(handle, A_strided, lda, stride_a, R_strided, ldr, stride_R, m, n, num_ops);
}
///////////////////////////////////////////////////////////////
// Array of pointers routines
///////////////////////////////////////////////////////////////
extern "C" int kblasDgeqrf_batch(kblasHandle_t handle, int m, int n, double** A_array, int lda, double** tau_array, int num_ops)
{
if(m > QR_Config<double>::HH_MAX_ROWS || lda < m)
return KBLAS_Error_WrongInput;
if(m == 0 || n == 0)
return KBLAS_Success;
return batch_qr<double, double**, int>(handle, A_array, lda, 0, tau_array, 0, m, n, num_ops, m, m, n);
}
extern "C" int kblasSgeqrf_batch(kblasHandle_t handle, int m, int n, float** A_array, int lda, float** tau_array, int num_ops)
{
if(m > QR_Config<float>::HH_MAX_ROWS || lda < m)
return KBLAS_Error_WrongInput;
if(m == 0 || n == 0)
return KBLAS_Success;
return batch_qr<float, float**, int>(handle, A_array, lda, 0, tau_array, 0, m, n, num_ops, m, m, n);
}
extern "C" int kblasDtsqrf_batch(kblasHandle_t handle, int m, int n, double** A_array, int lda, double** tau_array, int num_ops)
{
if(lda < m)
return KBLAS_Error_WrongInput;
if(m == 0 || n == 0)
return KBLAS_Success;
return batch_qr<double, double**, int>(handle, A_array, lda, 0, tau_array, 0, m, n, num_ops, ROWS_PER_BLOCK, m, n);
}
extern "C" int kblasStsqrf_batch(kblasHandle_t handle, int m, int n, float** A_array, int lda, float** tau_array, int num_ops)
{
if(lda < m)
return KBLAS_Error_WrongInput;
if(m == 0 || n == 0)
return KBLAS_Success;
return batch_qr<float, float**, int>(handle, A_array, lda, 0, tau_array, 0, m, n, num_ops, ROWS_PER_BLOCK, m, n);
}
extern "C" int kblasDorgqr_batch(kblasHandle_t handle, int m, int n, double** A_array, int lda, double** tau_array, int num_ops)
{
if(m > QR_Config<double>::HH_MAX_ROWS || lda < m)
return KBLAS_Error_WrongInput;
if(m == 0 || n == 0)
return KBLAS_Success;
return batch_unpack_Q<double, double**>(handle, A_array, lda, 0, tau_array, 0, m, n, num_ops);
}
extern "C" int kblasSorgqr_batch(kblasHandle_t handle, int m, int n, float** A_array, int lda, float** tau_array, int num_ops)
{
if(m > QR_Config<float>::HH_MAX_ROWS || lda < m)
return KBLAS_Error_WrongInput;
if(m == 0 || n == 0)
return KBLAS_Success;
return batch_unpack_Q<float, float**>(handle, A_array, lda, 0, tau_array, 0, m, n, num_ops);
}
extern "C" int kblasDcopy_upper_batch(kblasHandle_t handle, int m, int n, double** A_array, int lda, double** R_array, int ldr, int num_ops)
{
if(lda < m || ldr < ::min(m, n))
return KBLAS_Error_WrongInput;
if(m == 0 || n == 0)
return KBLAS_Success;
return batch_qr_copy_R<double, double**>(handle, A_array, lda, 0, R_array, ldr, 0, m, n, num_ops);
}
extern "C" int kblasScopy_upper_batch(kblasHandle_t handle, int m, int n, float** A_array, int lda, float** R_array, int ldr, int num_ops)
{
if(lda < m || ldr < ::min(m, n))
return KBLAS_Error_WrongInput;
if(m == 0 || n == 0)
return KBLAS_Success;
return batch_qr_copy_R<float, float**>(handle, A_array, lda, 0, R_array, ldr, 0, m, n, num_ops);
}
///////////////////////////////////////////////////////////////
// Array of pointers variable size routines
///////////////////////////////////////////////////////////////
extern "C" int kblasDtsqrf_vbatch(kblasHandle_t handle, int* m, int* n, int max_m, int max_n, double** A_array, int* lda, double** tau_array, int num_ops)
{
return batch_qr<double, double**, int*>(handle, A_array, lda, 0, tau_array, 0, m, n, num_ops, ROWS_PER_BLOCK, max_m, max_n);
}
extern "C" int kblasStsqrf_vbatch(kblasHandle_t handle, int* m, int* n, int max_m, int max_n, float** A_array, int* lda, float** tau_array, int num_ops)
{
return batch_qr<float, float**, int*>(handle, A_array, lda, 0, tau_array, 0, m, n, num_ops, ROWS_PER_BLOCK, max_m, max_n);
}
| f71e8a93b7ec8ad3600c55cdfcc5e855c65f51cb.cu | /**
* @copyright (c) 2012- King Abdullah University of Science and
* Technology (KAUST). All rights reserved.
**/
/**
* @file src/batch_svd/batch_qr.cu
* KBLAS is a high performance CUDA library for subset of BLAS
* and LAPACK routines optimized for NVIDIA GPUs.
* KBLAS is provided by KAUST.
*
* @version 3.0.0
* @author Wajih Halim Boukaram
* @date 2018-11-14
**/
#include <stdio.h>
#include <cublas_v2.h>
#include <algorithm>
#include "kblas.h"
#include "kblas_struct.h"
#include "kblas_gpu_util.ch"
#include "qr_kernels.cuh"
#ifdef HLIB_PROFILING_ENABLED
#include "perf_counter.h"
#endif
#define QR_LOAD(m) __ldg(&(m))
#define KBLAS_QR_CHECK_RET(func) { if( (func) != KBLAS_Success ) return KBLAS_UnknownError; }
#define HLIB_R_COLS_PER_THREAD 8
#define HLIB_R_MAX_THREAD_Y 8
// Apply the generated householder vectors at the current panel to the trailing submatrix
template<class T, class T_ptr, int BLOCK_SIZE, int APPLY_FORWARD, class Dim_Type>
__global__
void batch_apply_hh_panel(
T_ptr m_batch, Dim_Type ldm_batch, int stride, T_ptr tau_batch, int stride_tau,
Dim_Type rows_batch, Dim_Type cols_batch, int row_offset, int col_offset,
int smem_entries, int panel_rows, int num_ops
)
{
extern __shared__ char sdata[];
const int HH_CB = QR_Config<T>::HH_CB;
int thread_id = blockIdx.x * blockDim.x + threadIdx.x;
int op_id = thread_id / BLOCK_SIZE;
if(op_id >= num_ops) return;
int local_op_id = threadIdx.x / BLOCK_SIZE;
int local_tid = threadIdx.x % BLOCK_SIZE;
T* s_tau = (T*)sdata + smem_entries * local_op_id;
T* smem = s_tau + HH_CB;
int rows = getOperationDim(rows_batch, op_id);
int cols = getOperationDim(cols_batch, op_id);
int ldm = getOperationDim(ldm_batch, op_id);
if(row_offset >= rows || col_offset >= cols)
return;
int trailing_blocks = cols - col_offset - HH_CB;
if(trailing_blocks <= 0) return;
trailing_blocks = iDivUp(trailing_blocks, HH_CB);
T* m_panel = getOperationPtr<T>(m_batch, op_id, stride) + row_offset + col_offset * ldm;
T* tau_panel = getOperationPtr<T>(tau_batch, op_id, stride_tau) + col_offset;
T* trailing_panel = m_panel;
int nopadding = (local_tid < panel_rows && local_tid + row_offset < rows);
// Load in tau from global memory
if(local_tid < HH_CB && local_tid + col_offset < cols)
s_tau[local_tid] = QR_LOAD(tau_panel[local_tid]);
if(BLOCK_SIZE != WARP_SIZE) __syncthreads();
// Store the matrix panel in registers
T matrix_row[HH_CB], panel_row[HH_CB];
// Load the panel that we're applying
#pragma unroll
for(int i = 0; i < HH_CB; i++)
panel_row[i] = (nopadding ? QR_LOAD(m_panel[local_tid + i * ldm]) : 0);
int column_index = col_offset;
for(int block = 0; block < trailing_blocks; block++)
{
trailing_panel += HH_CB * ldm;
column_index += HH_CB;
// Load the trailing panel in
#pragma unroll
for(int i = 0; i < HH_CB; i++)
{
matrix_row[i] = 0;
if(nopadding && column_index + i < cols)
matrix_row[i] = QR_LOAD(trailing_panel[local_tid + i * ldm]);
}
if(APPLY_FORWARD)
{
#pragma unroll
for(int i = 0; i < HH_CB; i++)
{
T v = (local_tid > i && nopadding ? panel_row[i] : 0);
if(local_tid == i) v = 1;
qr_apply_househoulder_panel<T, HH_CB, BLOCK_SIZE>(matrix_row, v, s_tau[i], local_tid, smem);
}
}
else
{
#pragma unroll
for(int i = HH_CB - 1; i >= 0; i--)
{
T v = (local_tid > i && nopadding ? panel_row[i] : 0);
if(local_tid == i) v = 1;
qr_apply_househoulder_panel<T, HH_CB, BLOCK_SIZE>(matrix_row, v, s_tau[i], local_tid, smem);
}
}
// Flush the current panel so we can load in the next one
if(nopadding)
{
#pragma unroll
for(int i = 0; i < HH_CB; i++)
if(column_index + i < cols)
trailing_panel[local_tid + i * ldm] = matrix_row[i];
}
}
}
template<class T, class T_ptr, int BLOCK_SIZE, class Dim_Type>
__global__
void batch_qr_panel(
T_ptr __restrict__ m_batch, Dim_Type ldm_batch, int stride, T_ptr tau_batch, int stride_tau,
Dim_Type rows_batch, Dim_Type cols_batch, int row_offset, int col_offset,
int smem_entries, int panel_rows, int num_ops
)
{
extern __shared__ char sdata[];
const int HH_CB = QR_Config<T>::HH_CB;
int thread_id = blockIdx.x * blockDim.x + threadIdx.x;
int op_id = thread_id / BLOCK_SIZE;
if(op_id >= num_ops) return;
int rows = getOperationDim(rows_batch, op_id);
int cols = getOperationDim(cols_batch, op_id);
int ldm = getOperationDim(ldm_batch, op_id);
if(row_offset >= rows || col_offset >= cols)
return;
int local_op_id = threadIdx.x / BLOCK_SIZE;
int local_tid = threadIdx.x % BLOCK_SIZE;
// Shared memory
T* s_tau = (T*)sdata + smem_entries * local_op_id;
T* s_pivot = s_tau + HH_CB;
T* smem_reduction = s_pivot + 1;
// Global memory
T* m_panel = getOperationPtr<T>(m_batch, op_id, stride) + row_offset + col_offset * ldm;
T* tau_panel = getOperationPtr<T>(tau_batch, op_id, stride_tau) + col_offset;
// Store the matrix panel in registers
T matrix_row[HH_CB];
// Threads with id beyond the remaining rows will be padded with zeros
int nopadding = (local_tid < panel_rows && local_tid + row_offset < rows);
// Load the current panel in
#pragma unroll
for(int i = 0; i < HH_CB; i++)
matrix_row[i] = (nopadding && col_offset + i < cols ? m_panel[local_tid + i * ldm] : 0);
// Factor the panel, generating the current block of R and reflectors
qr_householder_panel<T, HH_CB, BLOCK_SIZE>(matrix_row, s_tau, local_tid, smem_reduction, s_pivot);
// Flush the data to global memory
if(nopadding)
{
#pragma unroll
for(int i = 0; i < HH_CB; i++)
if(col_offset + i < cols)
m_panel[local_tid + i * ldm] = matrix_row[i];
}
if(local_tid < HH_CB && local_tid + col_offset < cols)
tau_panel[local_tid] = s_tau[local_tid];
}
template<class T, class T_ptr, int BLOCK_SIZE>
__global__
void batch_unpackQ_panel(
T_ptr __restrict__ m_batch, int ldm, int stride, T_ptr tau_batch, int stride_tau,
int rows, int cols, int row_offset, int col_offset,
int smem_entries, int panel_rows, int num_ops
)
{
extern __shared__ char sdata[];
const int HH_CB = QR_Config<T>::HH_CB;
int thread_id = blockIdx.x * blockDim.x + threadIdx.x;
int op_id = thread_id / BLOCK_SIZE;
if(op_id >= num_ops) return;
int local_op_id = threadIdx.x / BLOCK_SIZE;
int local_tid = threadIdx.x % BLOCK_SIZE;
// Shared memory
T* s_tau = (T*)sdata + smem_entries * local_op_id;
T* smem_reduction = s_tau + HH_CB;
// Global memory
T* m_panel = getOperationPtr<T>(m_batch, op_id, stride) + row_offset + col_offset * ldm;
T* tau_panel = getOperationPtr<T>(tau_batch, op_id, stride_tau) + col_offset;
// Store the matrix panel in registers
T matrix_row[HH_CB];
// Threads with id beyond the remaining rows will be padded with zeros
int nopadding = (local_tid < panel_rows);
// Load the current panel in
#pragma unroll
for(int i = 0; i < HH_CB; i++)
matrix_row[i] = (nopadding && col_offset + i < cols ? m_panel[local_tid + i * ldm] : 0);
if(local_tid < HH_CB && local_tid + col_offset < cols) s_tau[local_tid] = tau_panel[local_tid];
if(BLOCK_SIZE != WARP_SIZE) __syncthreads();
// Factor the panel, generating the current block of R and reflectors
qr_unpackQ_panel<T, HH_CB, BLOCK_SIZE>(matrix_row, s_tau, local_tid, smem_reduction);
// Flush the data to global memory
if(nopadding)
{
#pragma unroll
for(int i = 0; i < HH_CB; i++)
if(col_offset + i < cols)
m_panel[local_tid + i * ldm] = matrix_row[i];
}
}
// Annihilate the block (A2) below the current panel (A1)
template<class T, class T_ptr, int BLOCK_SIZE, class Dim_Type>
__global__
void batch_dtsqrt_panel(
T_ptr __restrict__ m_batch, Dim_Type ldm_batch, int stride, T_ptr tau_batch, int stride_tau,
Dim_Type rows_batch, Dim_Type cols_batch, int A1_row_off, int A1_col_off, int A2_row_off, int A2_rows,
int smem_entries_per_op, int num_ops
)
{
extern __shared__ char sdata[];
const int HH_CB = QR_Config<T>::HH_CB;
int thread_id = blockIdx.x * blockDim.x + threadIdx.x;
int op_id = thread_id / BLOCK_SIZE;
if(op_id >= num_ops) return;
int rows = getOperationDim(rows_batch, op_id);
int cols = getOperationDim(cols_batch, op_id);
int ldm = getOperationDim(ldm_batch, op_id);
if(A1_row_off >= rows || A2_row_off >= rows || A1_col_off >= cols)
return;
int local_op_id = threadIdx.x / BLOCK_SIZE;
int local_tid = threadIdx.x % BLOCK_SIZE;
// Shared memory
T* s_tau = (T*)sdata + smem_entries_per_op * local_op_id;
T* A1_smem = s_tau + HH_CB;
T* reduction_smem = A1_smem + HH_CB * HH_CB;
// Global memory
T* m_global = getOperationPtr<T>(m_batch, op_id, stride);
T* tau_global = getOperationPtr<T>(tau_batch, op_id, stride_tau);
T* A1_global = m_global + A1_row_off + A1_col_off * ldm;
T* A2_global = m_global + A2_row_off + A1_col_off * ldm;
// Store A2 in registers
T A2_matrix_row[HH_CB];
// Load the HH_CB x HH_CB A1 block into shared memory
if(local_tid < HH_CB && A1_row_off + local_tid < rows)
{
#pragma unroll
for(int i = 0; i < HH_CB; i++)
{
A1_smem[local_tid + i * HH_CB] = 0;
if(A1_col_off + i < cols)
A1_smem[local_tid + i * HH_CB] = A1_global[local_tid + i * ldm];
}
}
__syncthreads();
int nopadding = (local_tid < A2_rows && A2_row_off + local_tid < rows);
// Load the A2_rows x HH_CB A2 block into registers
#pragma unroll
for(int i = 0; i < HH_CB; i++)
A2_matrix_row[i] = (nopadding && A1_col_off + i < cols ? A2_global[local_tid + i * ldm] : 0);
// Eliminate the A2 block
dtsqrt_panel<T, HH_CB, BLOCK_SIZE>(A2_matrix_row, s_tau, local_tid, reduction_smem, A1_smem);
// Dump everything back to global memory
if(local_tid < HH_CB)
{
if(local_tid + A1_col_off < cols)
tau_global[local_tid] = s_tau[local_tid];
#pragma unroll
for(int i = 0; i < HH_CB; i++)
if(A1_col_off + i < cols)
A1_global[local_tid + i * ldm] = A1_smem[local_tid + i * HH_CB];
}
if(nopadding)
{
#pragma unroll
for(int i = 0; i < HH_CB; i++)
if(A1_col_off + i < cols)
A2_global[local_tid + i * ldm] = A2_matrix_row[i];
}
}
// Apply the generated householder vectors that annihilated the block at (V) to the trailing block row (A2) and the block row (A1) from the panel above it
template<class T, class T_ptr, int BLOCK_SIZE, class Dim_Type>
__global__
void batch_apply_dtsqrt_panel(
T_ptr __restrict__ m_batch, Dim_Type ldm_batch, int stride, T_ptr tau_batch, int stride_tau,
Dim_Type rows_batch, Dim_Type cols_batch, int V_row_off, int V_col_off, int V_rows, int A1_row_off,
int smem_entries_per_op, int num_ops
)
{
extern __shared__ char sdata[];
const int HH_CB = QR_Config<T>::HH_CB;
int thread_id = blockIdx.x * blockDim.x + threadIdx.x;
int op_id = thread_id / BLOCK_SIZE;
if(op_id >= num_ops) return;
int rows = getOperationDim(rows_batch, op_id);
int cols = getOperationDim(cols_batch, op_id);
int ldm = getOperationDim(ldm_batch, op_id);
if(V_row_off >= rows || A1_row_off >= rows || V_col_off >= cols)
return;
int trailing_blocks = cols - V_col_off - HH_CB;
if(trailing_blocks <= 0) return;
trailing_blocks = iDivUp(trailing_blocks, HH_CB);
int local_op_id = threadIdx.x / BLOCK_SIZE;
int local_tid = threadIdx.x % BLOCK_SIZE;
// Shared memory
T* s_tau = (T*)sdata + smem_entries_per_op * local_op_id;
T* A1_smem = s_tau + HH_CB;
T* reduction_smem = A1_smem + HH_CB * HH_CB;
// Global memory
T* m_global = getOperationPtr<T>(m_batch, op_id, stride);
T* tau_global = getOperationPtr<T>(tau_batch, op_id, stride_tau);
T* V_global = m_global + V_row_off + V_col_off * ldm;
T* A1_global = m_global + A1_row_off + V_col_off * ldm;
T* A2_global = V_global;
// Store V and A2 in registers
T A2_matrix_row[HH_CB], V_matrix_row[HH_CB];
// Load tau entries into shared memory
if(local_tid < HH_CB)
s_tau[local_tid] = tau_global[local_tid];
// Load the V_rows x HH_CB V block into registers
int no_padding = (local_tid < V_rows && local_tid + V_row_off < rows);
#pragma unroll
for(int i = 0; i < HH_CB; i++)
V_matrix_row[i] = (no_padding ? V_global[local_tid + i * ldm] : 0);
int column_index = V_col_off;
for(int b = 0; b < trailing_blocks; b++)
{
A1_global += HH_CB * ldm;
A2_global += HH_CB * ldm;
column_index += HH_CB;
// Load A1 into shared memory
if(local_tid < HH_CB)
for(int i = 0; i < HH_CB; i++)
A1_smem[local_tid + i * HH_CB] = (column_index + i < cols ? A1_global[local_tid + i * ldm] : 0);
// Load the V_rows x HH_CB A2 block into registers
#pragma unroll
for(int i = 0; i < HH_CB; i++)
A2_matrix_row[i] = (no_padding && column_index + i < cols ? A2_global[local_tid + i * ldm] : 0);
__syncthreads();
// Update the blocks
#pragma unroll
for(int i = 0; i < HH_CB; i++)
dtsqrt_apply_panel<T, HH_CB, BLOCK_SIZE>(A2_matrix_row, V_matrix_row[i], s_tau[i], local_tid, reduction_smem, A1_smem, i);
// Flush blocks back to global memory
if(local_tid < HH_CB)
for(int i = 0; i < HH_CB; i++)
if(column_index + i < cols)
A1_global[local_tid + i * ldm] = A1_smem[local_tid + i * HH_CB];
if(no_padding)
{
#pragma unroll
for(int i = 0; i < HH_CB; i++)
if(column_index + i < cols)
A2_global[local_tid + i * ldm] = A2_matrix_row[i];
}
__syncthreads();
}
}
template<class T, class T_ptr>
__global__
void batch_qr_copy_R_kernel(T_ptr m_batch, int ldm, int stride_m, T_ptr r_batch, int ldr, int stride_r, int rows, int cols, int ops)
{
int op_id = blockIdx.z;
if(op_id >= ops) return;
int R_rows = (rows > cols ? cols : rows);
int R_cols = cols;
int row_index = blockDim.x * blockIdx.x + threadIdx.x;
int col_index = (blockDim.y * blockIdx.y + threadIdx.y) * HLIB_R_COLS_PER_THREAD;
if(row_index >= R_rows || col_index >= R_cols)
return;
T* m_global = getOperationPtr<T>(m_batch, op_id, stride_m);
T* r_global = getOperationPtr<T>(r_batch, op_id, stride_r);
m_global += row_index + col_index * ldm;
r_global += row_index + col_index * ldr;
T reg_buffer[HLIB_R_COLS_PER_THREAD];
#pragma unroll
for(int j = 0; j < HLIB_R_COLS_PER_THREAD; j++)
if(j + col_index < R_cols)
reg_buffer[j] = (row_index > j + col_index ? 0 : m_global[j * ldm]);
#pragma unroll
for(int j = 0; j < HLIB_R_COLS_PER_THREAD; j++)
if(j + col_index < R_cols)
r_global[j * ldr] = reg_buffer[j];
}
template<class T, class T_ptr>
__global__
void batch_qr_clear_R_kernel(T_ptr m_batch, int ldm, int stride, int rows, int cols, int ops)
{
int op_id = blockIdx.z;
if(op_id >= ops) return;
int R_rows = (rows > cols ? cols : rows);
int R_cols = cols;
int row_index = blockDim.x * blockIdx.x + threadIdx.x;
int col_index = (blockDim.y * blockIdx.y + threadIdx.y) * HLIB_R_COLS_PER_THREAD;
if(row_index >= R_rows || col_index >= R_cols)
return;
T* m_global = getOperationPtr<T>(m_batch, op_id, stride);
m_global += row_index + col_index * ldm;
#pragma unroll
for(int j = 0; j < HLIB_R_COLS_PER_THREAD; j++)
if(j + col_index < R_cols && row_index < j + col_index)
m_global[j * ldm] = 0;
}
template<class T, class T_ptr>
int batch_qr_clear_R(kblasHandle_t handle, T_ptr m_batch, int ldm, int stride, int rows, int cols, int ops)
{
int R_rows = (rows > cols ? cols : rows);
int R_cols = cols;
int max_thread_y = HLIB_R_MAX_THREAD_Y;
int thread_x = WARP_SIZE, thread_y = kmin(max_thread_y, iDivUp(R_cols, HLIB_R_COLS_PER_THREAD));
int grid_x = iDivUp(R_rows, thread_x), grid_y = iDivUp(R_cols, thread_y * HLIB_R_COLS_PER_THREAD);
dim3 dimBlock(thread_x, thread_y, 1);
dim3 dimGrid(grid_x, grid_y, ops);
batch_qr_clear_R_kernel<T, T_ptr> <<< dimGrid, dimBlock, 0, handle->stream >>>
(m_batch, ldm, stride, rows, cols, ops);
check_error_ret( cudaGetLastError(), KBLAS_UnknownError );
return KBLAS_Success;
}
template<class T, class T_ptr>
int batch_qr_copy_R(kblasHandle_t handle, T_ptr m_batch, int ldm, int stride_m, T_ptr r_batch, int ldr, int stride_r, int rows, int cols, int ops)
{
int R_rows = (rows > cols ? cols : rows);
int R_cols = cols;
int max_thread_y = HLIB_R_MAX_THREAD_Y;
int thread_x = WARP_SIZE, thread_y = kmin(max_thread_y, iDivUp(R_cols, HLIB_R_COLS_PER_THREAD));
int grid_x = iDivUp(R_rows, thread_x), grid_y = iDivUp(R_cols, thread_y * HLIB_R_COLS_PER_THREAD);
dim3 dimBlock(thread_x, thread_y, 1);
dim3 dimGrid(grid_x, grid_y, ops);
batch_qr_copy_R_kernel<T, T_ptr><<< dimGrid, dimBlock, 0, handle->stream >>>
(m_batch, ldm, stride_m, r_batch, ldr, stride_r, rows, cols, ops);
check_error_ret( cudaGetLastError(), KBLAS_UnknownError );
return KBLAS_Success;
}
template<class T, class T_ptr, class Dim_Type>
int driver_hh_panel(
kblasHandle_t handle, T_ptr m_batch, Dim_Type ldm, int stride, T_ptr tau_batch, int stride_tau,
Dim_Type rows, Dim_Type cols, int num_ops, int row_offset, int col_offset, int panel_rows,
int max_rows, int max_cols
)
{
int ops_per_block = OPS_PER_BLOCK;
const int HH_CB = QR_Config<T>::HH_CB;
if(panel_rows > 256) ops_per_block = 1;
int blocks = iDivUp(num_ops, ops_per_block);
int warps = iDivUp(panel_rows, WARP_SIZE);
dim3 dimBlock( ops_per_block * warps * WARP_SIZE, 1 );
dim3 dimGrid( blocks, 1 );
int smem_reduction = warps * WARP_SIZE * HH_CB + warps * HH_CB;
int smem_tau = HH_CB;
int smem_pivot = 1;
int smem_entries_per_op = smem_reduction + smem_tau + smem_pivot;
int smem_per_block = sizeof(T) * smem_entries_per_op * ops_per_block;
switch(warps)
{
case 1: batch_qr_panel<T, T_ptr, 32, Dim_Type><<< dimGrid, dimBlock, smem_per_block, handle->stream >>>(m_batch, ldm, stride, tau_batch, stride_tau, rows, cols, row_offset, col_offset, smem_entries_per_op, panel_rows, num_ops); break;
case 2: batch_qr_panel<T, T_ptr, 64, Dim_Type><<< dimGrid, dimBlock, smem_per_block, handle->stream >>>(m_batch, ldm, stride, tau_batch, stride_tau, rows, cols, row_offset, col_offset, smem_entries_per_op, panel_rows, num_ops); break;
case 3: batch_qr_panel<T, T_ptr, 96, Dim_Type><<< dimGrid, dimBlock, smem_per_block, handle->stream >>>(m_batch, ldm, stride, tau_batch, stride_tau, rows, cols, row_offset, col_offset, smem_entries_per_op, panel_rows, num_ops); break;
case 4: batch_qr_panel<T, T_ptr, 128, Dim_Type><<< dimGrid, dimBlock, smem_per_block, handle->stream >>>(m_batch, ldm, stride, tau_batch, stride_tau, rows, cols, row_offset, col_offset, smem_entries_per_op, panel_rows, num_ops); break;
case 5: batch_qr_panel<T, T_ptr, 160, Dim_Type><<< dimGrid, dimBlock, smem_per_block, handle->stream >>>(m_batch, ldm, stride, tau_batch, stride_tau, rows, cols, row_offset, col_offset, smem_entries_per_op, panel_rows, num_ops); break;
case 6: batch_qr_panel<T, T_ptr, 192, Dim_Type><<< dimGrid, dimBlock, smem_per_block, handle->stream >>>(m_batch, ldm, stride, tau_batch, stride_tau, rows, cols, row_offset, col_offset, smem_entries_per_op, panel_rows, num_ops); break;
case 7: batch_qr_panel<T, T_ptr, 224, Dim_Type><<< dimGrid, dimBlock, smem_per_block, handle->stream >>>(m_batch, ldm, stride, tau_batch, stride_tau, rows, cols, row_offset, col_offset, smem_entries_per_op, panel_rows, num_ops); break;
case 8: batch_qr_panel<T, T_ptr, 256, Dim_Type><<< dimGrid, dimBlock, smem_per_block, handle->stream >>>(m_batch, ldm, stride, tau_batch, stride_tau, rows, cols, row_offset, col_offset, smem_entries_per_op, panel_rows, num_ops); break;
case 9: batch_qr_panel<T, T_ptr, 288, Dim_Type><<< dimGrid, dimBlock, smem_per_block, handle->stream >>>(m_batch, ldm, stride, tau_batch, stride_tau, rows, cols, row_offset, col_offset, smem_entries_per_op, panel_rows, num_ops); break;
case 10: batch_qr_panel<T, T_ptr, 320, Dim_Type><<< dimGrid, dimBlock, smem_per_block, handle->stream >>>(m_batch, ldm, stride, tau_batch, stride_tau, rows, cols, row_offset, col_offset, smem_entries_per_op, panel_rows, num_ops); break;
case 11: batch_qr_panel<T, T_ptr, 352, Dim_Type><<< dimGrid, dimBlock, smem_per_block, handle->stream >>>(m_batch, ldm, stride, tau_batch, stride_tau, rows, cols, row_offset, col_offset, smem_entries_per_op, panel_rows, num_ops); break;
case 12: batch_qr_panel<T, T_ptr, 384, Dim_Type><<< dimGrid, dimBlock, smem_per_block, handle->stream >>>(m_batch, ldm, stride, tau_batch, stride_tau, rows, cols, row_offset, col_offset, smem_entries_per_op, panel_rows, num_ops); break;
case 13: batch_qr_panel<T, T_ptr, 416, Dim_Type><<< dimGrid, dimBlock, smem_per_block, handle->stream >>>(m_batch, ldm, stride, tau_batch, stride_tau, rows, cols, row_offset, col_offset, smem_entries_per_op, panel_rows, num_ops); break;
case 14: batch_qr_panel<T, T_ptr, 448, Dim_Type><<< dimGrid, dimBlock, smem_per_block, handle->stream >>>(m_batch, ldm, stride, tau_batch, stride_tau, rows, cols, row_offset, col_offset, smem_entries_per_op, panel_rows, num_ops); break;
case 15: batch_qr_panel<T, T_ptr, 480, Dim_Type><<< dimGrid, dimBlock, smem_per_block, handle->stream >>>(m_batch, ldm, stride, tau_batch, stride_tau, rows, cols, row_offset, col_offset, smem_entries_per_op, panel_rows, num_ops); break;
case 16: batch_qr_panel<T, T_ptr, 512, Dim_Type><<< dimGrid, dimBlock, smem_per_block, handle->stream >>>(m_batch, ldm, stride, tau_batch, stride_tau, rows, cols, row_offset, col_offset, smem_entries_per_op, panel_rows, num_ops); break;
#ifdef QR_SUPPORT_LARGE
case 17: batch_qr_panel<T, T_ptr, 544, Dim_Type><<< dimGrid, dimBlock, smem_per_block, handle->stream >>>(m_batch, ldm, stride, tau_batch, stride_tau, rows, cols, row_offset, col_offset, smem_entries_per_op, panel_rows, num_ops); break;
case 18: batch_qr_panel<T, T_ptr, 576, Dim_Type><<< dimGrid, dimBlock, smem_per_block, handle->stream >>>(m_batch, ldm, stride, tau_batch, stride_tau, rows, cols, row_offset, col_offset, smem_entries_per_op, panel_rows, num_ops); break;
case 19: batch_qr_panel<T, T_ptr, 608, Dim_Type><<< dimGrid, dimBlock, smem_per_block, handle->stream >>>(m_batch, ldm, stride, tau_batch, stride_tau, rows, cols, row_offset, col_offset, smem_entries_per_op, panel_rows, num_ops); break;
case 20: batch_qr_panel<T, T_ptr, 640, Dim_Type><<< dimGrid, dimBlock, smem_per_block, handle->stream >>>(m_batch, ldm, stride, tau_batch, stride_tau, rows, cols, row_offset, col_offset, smem_entries_per_op, panel_rows, num_ops); break;
case 21: batch_qr_panel<T, T_ptr, 672, Dim_Type><<< dimGrid, dimBlock, smem_per_block, handle->stream >>>(m_batch, ldm, stride, tau_batch, stride_tau, rows, cols, row_offset, col_offset, smem_entries_per_op, panel_rows, num_ops); break;
case 22: batch_qr_panel<T, T_ptr, 704, Dim_Type><<< dimGrid, dimBlock, smem_per_block, handle->stream >>>(m_batch, ldm, stride, tau_batch, stride_tau, rows, cols, row_offset, col_offset, smem_entries_per_op, panel_rows, num_ops); break;
case 23: batch_qr_panel<T, T_ptr, 736, Dim_Type><<< dimGrid, dimBlock, smem_per_block, handle->stream >>>(m_batch, ldm, stride, tau_batch, stride_tau, rows, cols, row_offset, col_offset, smem_entries_per_op, panel_rows, num_ops); break;
case 24: batch_qr_panel<T, T_ptr, 768, Dim_Type><<< dimGrid, dimBlock, smem_per_block, handle->stream >>>(m_batch, ldm, stride, tau_batch, stride_tau, rows, cols, row_offset, col_offset, smem_entries_per_op, panel_rows, num_ops); break;
case 25: batch_qr_panel<T, T_ptr, 800, Dim_Type><<< dimGrid, dimBlock, smem_per_block, handle->stream >>>(m_batch, ldm, stride, tau_batch, stride_tau, rows, cols, row_offset, col_offset, smem_entries_per_op, panel_rows, num_ops); break;
case 26: batch_qr_panel<T, T_ptr, 832, Dim_Type><<< dimGrid, dimBlock, smem_per_block, handle->stream >>>(m_batch, ldm, stride, tau_batch, stride_tau, rows, cols, row_offset, col_offset, smem_entries_per_op, panel_rows, num_ops); break;
case 27: batch_qr_panel<T, T_ptr, 864, Dim_Type><<< dimGrid, dimBlock, smem_per_block, handle->stream >>>(m_batch, ldm, stride, tau_batch, stride_tau, rows, cols, row_offset, col_offset, smem_entries_per_op, panel_rows, num_ops); break;
case 28: batch_qr_panel<T, T_ptr, 896, Dim_Type><<< dimGrid, dimBlock, smem_per_block, handle->stream >>>(m_batch, ldm, stride, tau_batch, stride_tau, rows, cols, row_offset, col_offset, smem_entries_per_op, panel_rows, num_ops); break;
case 29: batch_qr_panel<T, T_ptr, 928, Dim_Type><<< dimGrid, dimBlock, smem_per_block, handle->stream >>>(m_batch, ldm, stride, tau_batch, stride_tau, rows, cols, row_offset, col_offset, smem_entries_per_op, panel_rows, num_ops); break;
case 30: batch_qr_panel<T, T_ptr, 960, Dim_Type><<< dimGrid, dimBlock, smem_per_block, handle->stream >>>(m_batch, ldm, stride, tau_batch, stride_tau, rows, cols, row_offset, col_offset, smem_entries_per_op, panel_rows, num_ops); break;
case 31: batch_qr_panel<T, T_ptr, 992, Dim_Type><<< dimGrid, dimBlock, smem_per_block, handle->stream >>>(m_batch, ldm, stride, tau_batch, stride_tau, rows, cols, row_offset, col_offset, smem_entries_per_op, panel_rows, num_ops); break;
case 32: batch_qr_panel<T, T_ptr, 1024, Dim_Type><<< dimGrid, dimBlock, smem_per_block, handle->stream >>>(m_batch, ldm, stride, tau_batch, stride_tau, rows, cols, row_offset, col_offset, smem_entries_per_op, panel_rows, num_ops); break;
#endif
default: { printf("driver_hh_panel: Invalid row size %d\n", panel_rows); return KBLAS_UnknownError; }
}
check_error_ret( cudaGetLastError(), KBLAS_UnknownError );
return KBLAS_Success;
}
template<class T, class T_ptr>
int driver_unpackQ_panel(kblasHandle_t handle, T_ptr m_batch, int ldm, int stride, T_ptr tau_batch, int stride_tau, int rows, int cols, int num_ops, int row_offset, int col_offset, int panel_rows)
{
int ops_per_block = OPS_PER_BLOCK;
const int HH_CB = QR_Config<T>::HH_CB;
if(panel_rows > 256) ops_per_block = 1;
int blocks = iDivUp(num_ops, ops_per_block);
int warps = iDivUp(panel_rows, WARP_SIZE);
dim3 dimBlock( ops_per_block * warps * WARP_SIZE, 1 );
dim3 dimGrid( blocks, 1 );
int smem_reduction = warps * WARP_SIZE * HH_CB + warps * HH_CB;
int smem_tau = HH_CB;
int smem_entries_per_op = smem_reduction + smem_tau;
int smem_per_block = sizeof(T) * smem_entries_per_op * ops_per_block;
switch(warps)
{
case 1: batch_unpackQ_panel<T, T_ptr, 32><<< dimGrid, dimBlock, smem_per_block, handle->stream >>>(m_batch, ldm, stride, tau_batch, stride_tau, rows, cols, row_offset, col_offset, smem_entries_per_op, panel_rows, num_ops); break;
case 2: batch_unpackQ_panel<T, T_ptr, 64><<< dimGrid, dimBlock, smem_per_block, handle->stream >>>(m_batch, ldm, stride, tau_batch, stride_tau, rows, cols, row_offset, col_offset, smem_entries_per_op, panel_rows, num_ops); break;
case 3: batch_unpackQ_panel<T, T_ptr, 96><<< dimGrid, dimBlock, smem_per_block, handle->stream >>>(m_batch, ldm, stride, tau_batch, stride_tau, rows, cols, row_offset, col_offset, smem_entries_per_op, panel_rows, num_ops); break;
case 4: batch_unpackQ_panel<T, T_ptr, 128><<< dimGrid, dimBlock, smem_per_block, handle->stream >>>(m_batch, ldm, stride, tau_batch, stride_tau, rows, cols, row_offset, col_offset, smem_entries_per_op, panel_rows, num_ops); break;
case 5: batch_unpackQ_panel<T, T_ptr, 160><<< dimGrid, dimBlock, smem_per_block, handle->stream >>>(m_batch, ldm, stride, tau_batch, stride_tau, rows, cols, row_offset, col_offset, smem_entries_per_op, panel_rows, num_ops); break;
case 6: batch_unpackQ_panel<T, T_ptr, 192><<< dimGrid, dimBlock, smem_per_block, handle->stream >>>(m_batch, ldm, stride, tau_batch, stride_tau, rows, cols, row_offset, col_offset, smem_entries_per_op, panel_rows, num_ops); break;
case 7: batch_unpackQ_panel<T, T_ptr, 224><<< dimGrid, dimBlock, smem_per_block, handle->stream >>>(m_batch, ldm, stride, tau_batch, stride_tau, rows, cols, row_offset, col_offset, smem_entries_per_op, panel_rows, num_ops); break;
case 8: batch_unpackQ_panel<T, T_ptr, 256><<< dimGrid, dimBlock, smem_per_block, handle->stream >>>(m_batch, ldm, stride, tau_batch, stride_tau, rows, cols, row_offset, col_offset, smem_entries_per_op, panel_rows, num_ops); break;
case 9: batch_unpackQ_panel<T, T_ptr, 288><<< dimGrid, dimBlock, smem_per_block, handle->stream >>>(m_batch, ldm, stride, tau_batch, stride_tau, rows, cols, row_offset, col_offset, smem_entries_per_op, panel_rows, num_ops); break;
case 10: batch_unpackQ_panel<T, T_ptr, 320><<< dimGrid, dimBlock, smem_per_block, handle->stream >>>(m_batch, ldm, stride, tau_batch, stride_tau, rows, cols, row_offset, col_offset, smem_entries_per_op, panel_rows, num_ops); break;
case 11: batch_unpackQ_panel<T, T_ptr, 352><<< dimGrid, dimBlock, smem_per_block, handle->stream >>>(m_batch, ldm, stride, tau_batch, stride_tau, rows, cols, row_offset, col_offset, smem_entries_per_op, panel_rows, num_ops); break;
case 12: batch_unpackQ_panel<T, T_ptr, 384><<< dimGrid, dimBlock, smem_per_block, handle->stream >>>(m_batch, ldm, stride, tau_batch, stride_tau, rows, cols, row_offset, col_offset, smem_entries_per_op, panel_rows, num_ops); break;
case 13: batch_unpackQ_panel<T, T_ptr, 416><<< dimGrid, dimBlock, smem_per_block, handle->stream >>>(m_batch, ldm, stride, tau_batch, stride_tau, rows, cols, row_offset, col_offset, smem_entries_per_op, panel_rows, num_ops); break;
case 14: batch_unpackQ_panel<T, T_ptr, 448><<< dimGrid, dimBlock, smem_per_block, handle->stream >>>(m_batch, ldm, stride, tau_batch, stride_tau, rows, cols, row_offset, col_offset, smem_entries_per_op, panel_rows, num_ops); break;
case 15: batch_unpackQ_panel<T, T_ptr, 480><<< dimGrid, dimBlock, smem_per_block, handle->stream >>>(m_batch, ldm, stride, tau_batch, stride_tau, rows, cols, row_offset, col_offset, smem_entries_per_op, panel_rows, num_ops); break;
case 16: batch_unpackQ_panel<T, T_ptr, 512><<< dimGrid, dimBlock, smem_per_block, handle->stream >>>(m_batch, ldm, stride, tau_batch, stride_tau, rows, cols, row_offset, col_offset, smem_entries_per_op, panel_rows, num_ops); break;
#ifdef QR_SUPPORT_LARGE
case 17: batch_unpackQ_panel<T, T_ptr, 544><<< dimGrid, dimBlock, smem_per_block, handle->stream >>>(m_batch, ldm, stride, tau_batch, stride_tau, rows, cols, row_offset, col_offset, smem_entries_per_op, panel_rows, num_ops); break;
case 18: batch_unpackQ_panel<T, T_ptr, 576><<< dimGrid, dimBlock, smem_per_block, handle->stream >>>(m_batch, ldm, stride, tau_batch, stride_tau, rows, cols, row_offset, col_offset, smem_entries_per_op, panel_rows, num_ops); break;
case 19: batch_unpackQ_panel<T, T_ptr, 608><<< dimGrid, dimBlock, smem_per_block, handle->stream >>>(m_batch, ldm, stride, tau_batch, stride_tau, rows, cols, row_offset, col_offset, smem_entries_per_op, panel_rows, num_ops); break;
case 20: batch_unpackQ_panel<T, T_ptr, 640><<< dimGrid, dimBlock, smem_per_block, handle->stream >>>(m_batch, ldm, stride, tau_batch, stride_tau, rows, cols, row_offset, col_offset, smem_entries_per_op, panel_rows, num_ops); break;
case 21: batch_unpackQ_panel<T, T_ptr, 672><<< dimGrid, dimBlock, smem_per_block, handle->stream >>>(m_batch, ldm, stride, tau_batch, stride_tau, rows, cols, row_offset, col_offset, smem_entries_per_op, panel_rows, num_ops); break;
case 22: batch_unpackQ_panel<T, T_ptr, 704><<< dimGrid, dimBlock, smem_per_block, handle->stream >>>(m_batch, ldm, stride, tau_batch, stride_tau, rows, cols, row_offset, col_offset, smem_entries_per_op, panel_rows, num_ops); break;
case 23: batch_unpackQ_panel<T, T_ptr, 736><<< dimGrid, dimBlock, smem_per_block, handle->stream >>>(m_batch, ldm, stride, tau_batch, stride_tau, rows, cols, row_offset, col_offset, smem_entries_per_op, panel_rows, num_ops); break;
case 24: batch_unpackQ_panel<T, T_ptr, 768><<< dimGrid, dimBlock, smem_per_block, handle->stream >>>(m_batch, ldm, stride, tau_batch, stride_tau, rows, cols, row_offset, col_offset, smem_entries_per_op, panel_rows, num_ops); break;
case 25: batch_unpackQ_panel<T, T_ptr, 800><<< dimGrid, dimBlock, smem_per_block, handle->stream >>>(m_batch, ldm, stride, tau_batch, stride_tau, rows, cols, row_offset, col_offset, smem_entries_per_op, panel_rows, num_ops); break;
case 26: batch_unpackQ_panel<T, T_ptr, 832><<< dimGrid, dimBlock, smem_per_block, handle->stream >>>(m_batch, ldm, stride, tau_batch, stride_tau, rows, cols, row_offset, col_offset, smem_entries_per_op, panel_rows, num_ops); break;
case 27: batch_unpackQ_panel<T, T_ptr, 864><<< dimGrid, dimBlock, smem_per_block, handle->stream >>>(m_batch, ldm, stride, tau_batch, stride_tau, rows, cols, row_offset, col_offset, smem_entries_per_op, panel_rows, num_ops); break;
case 28: batch_unpackQ_panel<T, T_ptr, 896><<< dimGrid, dimBlock, smem_per_block, handle->stream >>>(m_batch, ldm, stride, tau_batch, stride_tau, rows, cols, row_offset, col_offset, smem_entries_per_op, panel_rows, num_ops); break;
case 29: batch_unpackQ_panel<T, T_ptr, 928><<< dimGrid, dimBlock, smem_per_block, handle->stream >>>(m_batch, ldm, stride, tau_batch, stride_tau, rows, cols, row_offset, col_offset, smem_entries_per_op, panel_rows, num_ops); break;
case 30: batch_unpackQ_panel<T, T_ptr, 960><<< dimGrid, dimBlock, smem_per_block, handle->stream >>>(m_batch, ldm, stride, tau_batch, stride_tau, rows, cols, row_offset, col_offset, smem_entries_per_op, panel_rows, num_ops); break;
case 31: batch_unpackQ_panel<T, T_ptr, 992><<< dimGrid, dimBlock, smem_per_block, handle->stream >>>(m_batch, ldm, stride, tau_batch, stride_tau, rows, cols, row_offset, col_offset, smem_entries_per_op, panel_rows, num_ops); break;
case 32: batch_unpackQ_panel<T, T_ptr, 1024><<< dimGrid, dimBlock, smem_per_block, handle->stream >>>(m_batch, ldm, stride, tau_batch, stride_tau, rows, cols, row_offset, col_offset, smem_entries_per_op, panel_rows, num_ops); break;
#endif
default: { printf("driver_unpackQ_panel: Invalid row size %d\n", panel_rows); return KBLAS_UnknownError; }
}
check_error_ret( cudaGetLastError(), KBLAS_UnknownError );
return KBLAS_Success;
}
template<class T, class T_ptr, int APPLY_FORWARD, class Dim_Type>
int driver_apply_hh_panel(
kblasHandle_t handle, T_ptr m_batch, Dim_Type ldm, int stride, T_ptr tau_batch, int stride_tau,
Dim_Type rows, Dim_Type cols, int num_ops, int row_offset, int col_offset, int panel_rows,
int max_rows, int max_cols
)
{
int ops_per_block = OPS_PER_BLOCK;
const int HH_CB = QR_Config<T>::HH_CB;
if(panel_rows > 256) ops_per_block = 1;
if(max_cols - col_offset <= HH_CB) return KBLAS_Success;
int blocks = iDivUp(num_ops, ops_per_block);
int warps = iDivUp(panel_rows, WARP_SIZE);
dim3 dimBlock( ops_per_block * warps * WARP_SIZE, 1 );
dim3 dimGrid( blocks, 1 );
int smem_padding = warps * HH_CB;
int smem_entries_per_op = warps * WARP_SIZE * HH_CB + smem_padding + HH_CB;
int smem_per_block = sizeof(T) * smem_entries_per_op * ops_per_block;
switch(warps)
{
case 1: batch_apply_hh_panel<T, T_ptr, 32, APPLY_FORWARD, Dim_Type><<< dimGrid, dimBlock, smem_per_block, handle->stream >>>(m_batch, ldm, stride, tau_batch, stride_tau, rows, cols, row_offset, col_offset, smem_entries_per_op, panel_rows, num_ops); break;
case 2: batch_apply_hh_panel<T, T_ptr, 64, APPLY_FORWARD, Dim_Type><<< dimGrid, dimBlock, smem_per_block, handle->stream >>>(m_batch, ldm, stride, tau_batch, stride_tau, rows, cols, row_offset, col_offset, smem_entries_per_op, panel_rows, num_ops); break;
case 3: batch_apply_hh_panel<T, T_ptr, 96, APPLY_FORWARD, Dim_Type><<< dimGrid, dimBlock, smem_per_block, handle->stream >>>(m_batch, ldm, stride, tau_batch, stride_tau, rows, cols, row_offset, col_offset, smem_entries_per_op, panel_rows, num_ops); break;
case 4: batch_apply_hh_panel<T, T_ptr, 128, APPLY_FORWARD, Dim_Type><<< dimGrid, dimBlock, smem_per_block, handle->stream >>>(m_batch, ldm, stride, tau_batch, stride_tau, rows, cols, row_offset, col_offset, smem_entries_per_op, panel_rows, num_ops); break;
case 5: batch_apply_hh_panel<T, T_ptr, 160, APPLY_FORWARD, Dim_Type><<< dimGrid, dimBlock, smem_per_block, handle->stream >>>(m_batch, ldm, stride, tau_batch, stride_tau, rows, cols, row_offset, col_offset, smem_entries_per_op, panel_rows, num_ops); break;
case 6: batch_apply_hh_panel<T, T_ptr, 192, APPLY_FORWARD, Dim_Type><<< dimGrid, dimBlock, smem_per_block, handle->stream >>>(m_batch, ldm, stride, tau_batch, stride_tau, rows, cols, row_offset, col_offset, smem_entries_per_op, panel_rows, num_ops); break;
case 7: batch_apply_hh_panel<T, T_ptr, 224, APPLY_FORWARD, Dim_Type><<< dimGrid, dimBlock, smem_per_block, handle->stream >>>(m_batch, ldm, stride, tau_batch, stride_tau, rows, cols, row_offset, col_offset, smem_entries_per_op, panel_rows, num_ops); break;
case 8: batch_apply_hh_panel<T, T_ptr, 256, APPLY_FORWARD, Dim_Type><<< dimGrid, dimBlock, smem_per_block, handle->stream >>>(m_batch, ldm, stride, tau_batch, stride_tau, rows, cols, row_offset, col_offset, smem_entries_per_op, panel_rows, num_ops); break;
case 9: batch_apply_hh_panel<T, T_ptr, 288, APPLY_FORWARD, Dim_Type><<< dimGrid, dimBlock, smem_per_block, handle->stream >>>(m_batch, ldm, stride, tau_batch, stride_tau, rows, cols, row_offset, col_offset, smem_entries_per_op, panel_rows, num_ops); break;
case 10: batch_apply_hh_panel<T, T_ptr, 320, APPLY_FORWARD, Dim_Type><<< dimGrid, dimBlock, smem_per_block, handle->stream >>>(m_batch, ldm, stride, tau_batch, stride_tau, rows, cols, row_offset, col_offset, smem_entries_per_op, panel_rows, num_ops); break;
case 11: batch_apply_hh_panel<T, T_ptr, 352, APPLY_FORWARD, Dim_Type><<< dimGrid, dimBlock, smem_per_block, handle->stream >>>(m_batch, ldm, stride, tau_batch, stride_tau, rows, cols, row_offset, col_offset, smem_entries_per_op, panel_rows, num_ops); break;
case 12: batch_apply_hh_panel<T, T_ptr, 384, APPLY_FORWARD, Dim_Type><<< dimGrid, dimBlock, smem_per_block, handle->stream >>>(m_batch, ldm, stride, tau_batch, stride_tau, rows, cols, row_offset, col_offset, smem_entries_per_op, panel_rows, num_ops); break;
case 13: batch_apply_hh_panel<T, T_ptr, 416, APPLY_FORWARD, Dim_Type><<< dimGrid, dimBlock, smem_per_block, handle->stream >>>(m_batch, ldm, stride, tau_batch, stride_tau, rows, cols, row_offset, col_offset, smem_entries_per_op, panel_rows, num_ops); break;
case 14: batch_apply_hh_panel<T, T_ptr, 448, APPLY_FORWARD, Dim_Type><<< dimGrid, dimBlock, smem_per_block, handle->stream >>>(m_batch, ldm, stride, tau_batch, stride_tau, rows, cols, row_offset, col_offset, smem_entries_per_op, panel_rows, num_ops); break;
case 15: batch_apply_hh_panel<T, T_ptr, 480, APPLY_FORWARD, Dim_Type><<< dimGrid, dimBlock, smem_per_block, handle->stream >>>(m_batch, ldm, stride, tau_batch, stride_tau, rows, cols, row_offset, col_offset, smem_entries_per_op, panel_rows, num_ops); break;
case 16: batch_apply_hh_panel<T, T_ptr, 512, APPLY_FORWARD, Dim_Type><<< dimGrid, dimBlock, smem_per_block, handle->stream >>>(m_batch, ldm, stride, tau_batch, stride_tau, rows, cols, row_offset, col_offset, smem_entries_per_op, panel_rows, num_ops); break;
#ifdef QR_SUPPORT_LARGE
case 17: batch_apply_hh_panel<T, T_ptr, 544, APPLY_FORWARD, Dim_Type><<< dimGrid, dimBlock, smem_per_block, handle->stream >>>(m_batch, ldm, stride, tau_batch, stride_tau, rows, cols, row_offset, col_offset, smem_entries_per_op, panel_rows, num_ops); break;
case 18: batch_apply_hh_panel<T, T_ptr, 576, APPLY_FORWARD, Dim_Type><<< dimGrid, dimBlock, smem_per_block, handle->stream >>>(m_batch, ldm, stride, tau_batch, stride_tau, rows, cols, row_offset, col_offset, smem_entries_per_op, panel_rows, num_ops); break;
case 19: batch_apply_hh_panel<T, T_ptr, 608, APPLY_FORWARD, Dim_Type><<< dimGrid, dimBlock, smem_per_block, handle->stream >>>(m_batch, ldm, stride, tau_batch, stride_tau, rows, cols, row_offset, col_offset, smem_entries_per_op, panel_rows, num_ops); break;
case 20: batch_apply_hh_panel<T, T_ptr, 640, APPLY_FORWARD, Dim_Type><<< dimGrid, dimBlock, smem_per_block, handle->stream >>>(m_batch, ldm, stride, tau_batch, stride_tau, rows, cols, row_offset, col_offset, smem_entries_per_op, panel_rows, num_ops); break;
case 21: batch_apply_hh_panel<T, T_ptr, 672, APPLY_FORWARD, Dim_Type><<< dimGrid, dimBlock, smem_per_block, handle->stream >>>(m_batch, ldm, stride, tau_batch, stride_tau, rows, cols, row_offset, col_offset, smem_entries_per_op, panel_rows, num_ops); break;
case 22: batch_apply_hh_panel<T, T_ptr, 704, APPLY_FORWARD, Dim_Type><<< dimGrid, dimBlock, smem_per_block, handle->stream >>>(m_batch, ldm, stride, tau_batch, stride_tau, rows, cols, row_offset, col_offset, smem_entries_per_op, panel_rows, num_ops); break;
case 23: batch_apply_hh_panel<T, T_ptr, 736, APPLY_FORWARD, Dim_Type><<< dimGrid, dimBlock, smem_per_block, handle->stream >>>(m_batch, ldm, stride, tau_batch, stride_tau, rows, cols, row_offset, col_offset, smem_entries_per_op, panel_rows, num_ops); break;
case 24: batch_apply_hh_panel<T, T_ptr, 768, APPLY_FORWARD, Dim_Type><<< dimGrid, dimBlock, smem_per_block, handle->stream >>>(m_batch, ldm, stride, tau_batch, stride_tau, rows, cols, row_offset, col_offset, smem_entries_per_op, panel_rows, num_ops); break;
case 25: batch_apply_hh_panel<T, T_ptr, 800, APPLY_FORWARD, Dim_Type><<< dimGrid, dimBlock, smem_per_block, handle->stream >>>(m_batch, ldm, stride, tau_batch, stride_tau, rows, cols, row_offset, col_offset, smem_entries_per_op, panel_rows, num_ops); break;
case 26: batch_apply_hh_panel<T, T_ptr, 832, APPLY_FORWARD, Dim_Type><<< dimGrid, dimBlock, smem_per_block, handle->stream >>>(m_batch, ldm, stride, tau_batch, stride_tau, rows, cols, row_offset, col_offset, smem_entries_per_op, panel_rows, num_ops); break;
case 27: batch_apply_hh_panel<T, T_ptr, 864, APPLY_FORWARD, Dim_Type><<< dimGrid, dimBlock, smem_per_block, handle->stream >>>(m_batch, ldm, stride, tau_batch, stride_tau, rows, cols, row_offset, col_offset, smem_entries_per_op, panel_rows, num_ops); break;
case 28: batch_apply_hh_panel<T, T_ptr, 896, APPLY_FORWARD, Dim_Type><<< dimGrid, dimBlock, smem_per_block, handle->stream >>>(m_batch, ldm, stride, tau_batch, stride_tau, rows, cols, row_offset, col_offset, smem_entries_per_op, panel_rows, num_ops); break;
case 29: batch_apply_hh_panel<T, T_ptr, 928, APPLY_FORWARD, Dim_Type><<< dimGrid, dimBlock, smem_per_block, handle->stream >>>(m_batch, ldm, stride, tau_batch, stride_tau, rows, cols, row_offset, col_offset, smem_entries_per_op, panel_rows, num_ops); break;
case 30: batch_apply_hh_panel<T, T_ptr, 960, APPLY_FORWARD, Dim_Type><<< dimGrid, dimBlock, smem_per_block, handle->stream >>>(m_batch, ldm, stride, tau_batch, stride_tau, rows, cols, row_offset, col_offset, smem_entries_per_op, panel_rows, num_ops); break;
case 31: batch_apply_hh_panel<T, T_ptr, 992, APPLY_FORWARD, Dim_Type><<< dimGrid, dimBlock, smem_per_block, handle->stream >>>(m_batch, ldm, stride, tau_batch, stride_tau, rows, cols, row_offset, col_offset, smem_entries_per_op, panel_rows, num_ops); break;
case 32: batch_apply_hh_panel<T, T_ptr, 1024, APPLY_FORWARD, Dim_Type><<< dimGrid, dimBlock, smem_per_block, handle->stream >>>(m_batch, ldm, stride, tau_batch, stride_tau, rows, cols, row_offset, col_offset, smem_entries_per_op, panel_rows, num_ops); break;
#endif
default: {printf("driver_apply_hh_panel: Invalid row size %d\n", panel_rows); return KBLAS_UnknownError;}
}
check_error_ret( cudaGetLastError(), KBLAS_UnknownError );
return KBLAS_Success;
}
template<class T, class T_ptr, class Dim_Type>
int driver_dtsqrt_panel(
kblasHandle_t handle, T_ptr m_batch, Dim_Type ldm, int stride, T_ptr tau_batch, int stride_tau,
Dim_Type rows, Dim_Type cols, int num_ops, int A1_row_off, int A1_col_off, int A2_row_off, int A2_rows,
int max_rows, int max_cols
)
{
int ops_per_block = OPS_PER_BLOCK;
const int HH_CB = QR_Config<T>::HH_CB;
int blocks = iDivUp(num_ops, ops_per_block);
int warps = iDivUp(A2_rows, WARP_SIZE);
dim3 dimBlock( ops_per_block * warps * WARP_SIZE, 1 );
dim3 dimGrid( blocks, 1 );
int smem_reduction = warps * WARP_SIZE * HH_CB + warps * HH_CB;
int smem_tau = HH_CB;
int smem_A1 = HH_CB * HH_CB;
int smem_entries_per_op = smem_reduction + smem_tau + smem_A1;
int smem_per_block = sizeof(T) * smem_entries_per_op * ops_per_block;
switch(warps)
{
case 1: batch_dtsqrt_panel<T, T_ptr, 32, Dim_Type><<< dimGrid, dimBlock, smem_per_block, handle->stream >>>(m_batch, ldm, stride, tau_batch, stride_tau, rows, cols, A1_row_off, A1_col_off, A2_row_off, A2_rows, smem_entries_per_op, num_ops); break;
case 2: batch_dtsqrt_panel<T, T_ptr, 64, Dim_Type><<< dimGrid, dimBlock, smem_per_block, handle->stream >>>(m_batch, ldm, stride, tau_batch, stride_tau, rows, cols, A1_row_off, A1_col_off, A2_row_off, A2_rows, smem_entries_per_op, num_ops); break;
case 3: batch_dtsqrt_panel<T, T_ptr, 96, Dim_Type><<< dimGrid, dimBlock, smem_per_block, handle->stream >>>(m_batch, ldm, stride, tau_batch, stride_tau, rows, cols, A1_row_off, A1_col_off, A2_row_off, A2_rows, smem_entries_per_op, num_ops); break;
case 4: batch_dtsqrt_panel<T, T_ptr, 128, Dim_Type><<< dimGrid, dimBlock, smem_per_block, handle->stream >>>(m_batch, ldm, stride, tau_batch, stride_tau, rows, cols, A1_row_off, A1_col_off, A2_row_off, A2_rows, smem_entries_per_op, num_ops); break;
default: { printf("driver_dtsqrt_panel: Invalid row size %d\n", A2_rows); return KBLAS_UnknownError; }
}
check_error_ret( cudaGetLastError(), KBLAS_UnknownError );
return KBLAS_Success;
}
template<class T, class T_ptr, class Dim_Type>
int driver_apply_dtsqrt_panel(
kblasHandle_t handle, T_ptr m_batch, Dim_Type ldm, int stride, T_ptr tau_batch, int stride_tau,
Dim_Type rows, Dim_Type cols, int num_ops, int V_row_off, int V_col_off, int V_rows, int A1_row_off,
int max_rows, int max_cols
)
{
int ops_per_block = OPS_PER_BLOCK;
const int HH_CB = QR_Config<T>::HH_CB;
if(max_cols - V_col_off <= HH_CB) return KBLAS_Success;
int blocks = iDivUp(num_ops, ops_per_block);
int warps = iDivUp(V_rows, WARP_SIZE);
dim3 dimBlock( ops_per_block * warps * WARP_SIZE, 1 );
dim3 dimGrid( blocks, 1 );
int smem_reduction = warps * WARP_SIZE * HH_CB + warps * HH_CB;
int smem_tau = HH_CB;
int smem_A1 = HH_CB * HH_CB;
int smem_entries_per_op = smem_reduction + smem_tau + smem_A1;
int smem_per_block = sizeof(T) * smem_entries_per_op * ops_per_block;
switch(warps)
{
case 1: batch_apply_dtsqrt_panel<T, T_ptr, 32, Dim_Type><<< dimGrid, dimBlock, smem_per_block, handle->stream >>>(m_batch, ldm, stride, tau_batch, stride_tau, rows, cols, V_row_off, V_col_off, V_rows, A1_row_off, smem_entries_per_op, num_ops); break;
case 2: batch_apply_dtsqrt_panel<T, T_ptr, 64, Dim_Type><<< dimGrid, dimBlock, smem_per_block, handle->stream >>>(m_batch, ldm, stride, tau_batch, stride_tau, rows, cols, V_row_off, V_col_off, V_rows, A1_row_off, smem_entries_per_op, num_ops); break;
case 3: batch_apply_dtsqrt_panel<T, T_ptr, 96, Dim_Type><<< dimGrid, dimBlock, smem_per_block, handle->stream >>>(m_batch, ldm, stride, tau_batch, stride_tau, rows, cols, V_row_off, V_col_off, V_rows, A1_row_off, smem_entries_per_op, num_ops); break;
case 4: batch_apply_dtsqrt_panel<T, T_ptr, 128, Dim_Type><<< dimGrid, dimBlock, smem_per_block, handle->stream >>>(m_batch, ldm, stride, tau_batch, stride_tau, rows, cols, V_row_off, V_col_off, V_rows, A1_row_off, smem_entries_per_op, num_ops); break;
default: { printf("driver_apply_dtsqrt_panel: Invalid row size %d\n", V_rows); return KBLAS_UnknownError; }
}
check_error_ret( cudaGetLastError(), KBLAS_UnknownError );
return KBLAS_Success;
}
template<class T, class T_ptr, class Dim_Type>
int batch_qr(kblasHandle_t handle, T_ptr m_batch, Dim_Type ldm, int stride, T_ptr tau_batch, int stride_tau, Dim_Type rows, Dim_Type cols, int num_ops, int block_rows, int max_rows, int max_cols)
{
const int HH_CB = QR_Config<T>::HH_CB;
int rows_per_block = (max_rows < block_rows ? max_rows : block_rows);
int matrix_rank = (max_rows > max_cols ? max_cols : max_rows);
for(int c = 0; c < matrix_rank; c += HH_CB)
{
int upper_panel_height = rows_per_block - c % rows_per_block;
if(c + upper_panel_height > max_rows)
upper_panel_height = max_rows - c;
KBLAS_QR_CHECK_RET( (driver_hh_panel<T, T_ptr, Dim_Type>(
handle, m_batch, ldm, stride, tau_batch, stride_tau, rows, cols, num_ops,
c, c, upper_panel_height, max_rows, max_cols
)) );
KBLAS_QR_CHECK_RET( (driver_apply_hh_panel<T, T_ptr, 1, Dim_Type>(
handle, m_batch, ldm, stride, tau_batch, stride_tau, rows, cols, num_ops,
c, c, upper_panel_height, max_rows, max_cols
)) );
int remaining_rows = max_rows - c - upper_panel_height;
if(remaining_rows <= 0) continue;
int remaining_row_blocks = iDivUp(remaining_rows, rows_per_block);
for(int rb = 0; rb < remaining_row_blocks; rb++)
{
int A2_row_offset = c + upper_panel_height + rb * rows_per_block;
int A2_rows = (A2_row_offset + rows_per_block > max_rows ? max_rows - A2_row_offset : rows_per_block);
KBLAS_QR_CHECK_RET( (driver_dtsqrt_panel<T, T_ptr, Dim_Type>(
handle, m_batch, ldm, stride, tau_batch, stride_tau, rows, cols, num_ops,
c, c, A2_row_offset, A2_rows, max_rows, max_cols
)) );
KBLAS_QR_CHECK_RET( (driver_apply_dtsqrt_panel<T, T_ptr, Dim_Type>(
handle, m_batch, ldm, stride, tau_batch, stride_tau, rows, cols, num_ops,
A2_row_offset, c, A2_rows, c, max_rows, max_cols
)) );
}
}
return KBLAS_Success;
}
template<class T, class T_ptr>
int batch_unpack_Q(kblasHandle_t handle, T_ptr m_batch, int ldm, int stride, T_ptr tau_batch, int stride_tau, int rows, int cols, int num_ops)
{
const int HH_CB = QR_Config<T>::HH_CB;
// Zero out the upper triangular part of the matrix
int matrix_rank = (rows > cols ? cols : rows);
KBLAS_QR_CHECK_RET( (batch_qr_clear_R<T, T_ptr>(handle, m_batch, ldm, stride, rows, matrix_rank, num_ops)) );
int col_start = (matrix_rank % HH_CB == 0 ? matrix_rank - HH_CB : matrix_rank - matrix_rank % HH_CB);
for(int c = col_start; c >= 0; c -= HH_CB)
{
int panel_rows = rows - c;
KBLAS_QR_CHECK_RET( (driver_apply_hh_panel<T, T_ptr, 0, int>(
handle, m_batch, ldm, stride, tau_batch, stride_tau, rows, matrix_rank, num_ops,
c, c, panel_rows, rows, cols
)) );
KBLAS_QR_CHECK_RET( (driver_unpackQ_panel<T, T_ptr>(
handle, m_batch, ldm, stride, tau_batch, stride_tau, rows, matrix_rank, num_ops,
c, c, panel_rows
)) );
}
return KBLAS_Success;
}
///////////////////////////////////////////////////////////////
// Strided routines
///////////////////////////////////////////////////////////////
extern "C" int kblasDgeqrf_batch_strided(kblasHandle_t handle, int m, int n, double* A_strided, int lda, int stride_a, double* tau, int stride_tau, int num_ops)
{
if(m > QR_Config<double>::HH_MAX_ROWS || lda < m)
return KBLAS_Error_WrongInput;
else if(stride_a < m || stride_tau < kmin(m, n))
return KBLAS_Error_WrongInput;
if(m == 0 || n == 0)
return KBLAS_Success;
return batch_qr<double, double*, int>(handle, A_strided, lda, stride_a, tau, stride_tau, m, n, num_ops, m, m, n);
}
extern "C" int kblasSgeqrf_batch_strided(kblasHandle_t handle, int m, int n, float* A_strided, int lda, int stride_a, float* tau, int stride_tau, int num_ops)
{
if(m > QR_Config<float>::HH_MAX_ROWS || lda < m)
return KBLAS_Error_WrongInput;
else if(stride_a < m || stride_tau < kmin(m, n))
return KBLAS_Error_WrongInput;
if(m == 0 || n == 0)
return KBLAS_Success;
return batch_qr<float, float*, int>(handle, A_strided, lda, stride_a, tau, stride_tau, m, n, num_ops, m, m, n);
}
extern "C" int kblasDtsqrf_batch_strided(kblasHandle_t handle, int m, int n, double* A_strided, int lda, int stride_a, double* tau, int stride_tau, int num_ops)
{
if(lda < m)
return KBLAS_Error_WrongInput;
else if(stride_a < m || stride_tau < kmin(m, n))
return KBLAS_Error_WrongInput;
if(m == 0 || n == 0)
return KBLAS_Success;
return batch_qr<double, double*, int>(handle, A_strided, lda, stride_a, tau, stride_tau, m, n, num_ops, ROWS_PER_BLOCK, m, n);
}
extern "C" int kblasStsqrf_batch_strided(kblasHandle_t handle, int m, int n, float* A_strided, int lda, int stride_a, float* tau, int stride_tau, int num_ops)
{
if(lda < m)
return KBLAS_Error_WrongInput;
else if(stride_a < m || stride_tau < kmin(m, n))
return KBLAS_Error_WrongInput;
if(m == 0 || n == 0)
return KBLAS_Success;
return batch_qr<float, float*, int>(handle, A_strided, lda, stride_a, tau, stride_tau, m, n, num_ops, ROWS_PER_BLOCK, m, n);
}
extern "C" int kblasDorgqr_batch_strided(kblasHandle_t handle, int m, int n, double* A_strided, int lda, int stride_a, double* tau, int stride_tau, int num_ops)
{
if(m > QR_Config<double>::HH_MAX_ROWS || lda < m)
return KBLAS_Error_WrongInput;
else if(stride_a < m || stride_tau < kmin(m, n))
return KBLAS_Error_WrongInput;
if(m == 0 || n == 0)
return KBLAS_Success;
return batch_unpack_Q<double, double*>(handle, A_strided, lda, stride_a, tau, stride_tau, m, n, num_ops);
}
extern "C" int kblasSorgqr_batch_strided(kblasHandle_t handle, int m, int n, float* A_strided, int lda, int stride_a, float* tau, int stride_tau, int num_ops)
{
if(m > QR_Config<float>::HH_MAX_ROWS || lda < m)
return KBLAS_Error_WrongInput;
else if(stride_a < m || stride_tau < kmin(m, n))
return KBLAS_Error_WrongInput;
if(m == 0 || n == 0)
return KBLAS_Success;
return batch_unpack_Q<float, float*>(handle, A_strided, lda, stride_a, tau, stride_tau, m, n, num_ops);
}
extern "C" int kblasDcopy_upper_batch_strided(kblasHandle_t handle, int m, int n, double* A_strided, int lda, int stride_a, double* R_strided, int ldr, int stride_R, int num_ops)
{
if(lda < m || ldr < kmin(m, n))
return KBLAS_Error_WrongInput;
else if(stride_a < m || stride_R < n)
return KBLAS_Error_WrongInput;
if(m == 0 || n == 0)
return KBLAS_Success;
return batch_qr_copy_R<double, double*>(handle, A_strided, lda, stride_a, R_strided, ldr, stride_R, m, n, num_ops);
}
extern "C" int kblasScopy_upper_batch_strided(kblasHandle_t handle, int m, int n, float* A_strided, int lda, int stride_a, float* R_strided, int ldr, int stride_R, int num_ops)
{
if(lda < m || ldr < kmin(m, n))
return KBLAS_Error_WrongInput;
else if(stride_a < m || stride_R < n)
return KBLAS_Error_WrongInput;
if(m == 0 || n == 0)
return KBLAS_Success;
return batch_qr_copy_R<float, float*>(handle, A_strided, lda, stride_a, R_strided, ldr, stride_R, m, n, num_ops);
}
///////////////////////////////////////////////////////////////
// Array of pointers routines
///////////////////////////////////////////////////////////////
extern "C" int kblasDgeqrf_batch(kblasHandle_t handle, int m, int n, double** A_array, int lda, double** tau_array, int num_ops)
{
if(m > QR_Config<double>::HH_MAX_ROWS || lda < m)
return KBLAS_Error_WrongInput;
if(m == 0 || n == 0)
return KBLAS_Success;
return batch_qr<double, double**, int>(handle, A_array, lda, 0, tau_array, 0, m, n, num_ops, m, m, n);
}
extern "C" int kblasSgeqrf_batch(kblasHandle_t handle, int m, int n, float** A_array, int lda, float** tau_array, int num_ops)
{
if(m > QR_Config<float>::HH_MAX_ROWS || lda < m)
return KBLAS_Error_WrongInput;
if(m == 0 || n == 0)
return KBLAS_Success;
return batch_qr<float, float**, int>(handle, A_array, lda, 0, tau_array, 0, m, n, num_ops, m, m, n);
}
extern "C" int kblasDtsqrf_batch(kblasHandle_t handle, int m, int n, double** A_array, int lda, double** tau_array, int num_ops)
{
if(lda < m)
return KBLAS_Error_WrongInput;
if(m == 0 || n == 0)
return KBLAS_Success;
return batch_qr<double, double**, int>(handle, A_array, lda, 0, tau_array, 0, m, n, num_ops, ROWS_PER_BLOCK, m, n);
}
extern "C" int kblasStsqrf_batch(kblasHandle_t handle, int m, int n, float** A_array, int lda, float** tau_array, int num_ops)
{
if(lda < m)
return KBLAS_Error_WrongInput;
if(m == 0 || n == 0)
return KBLAS_Success;
return batch_qr<float, float**, int>(handle, A_array, lda, 0, tau_array, 0, m, n, num_ops, ROWS_PER_BLOCK, m, n);
}
extern "C" int kblasDorgqr_batch(kblasHandle_t handle, int m, int n, double** A_array, int lda, double** tau_array, int num_ops)
{
if(m > QR_Config<double>::HH_MAX_ROWS || lda < m)
return KBLAS_Error_WrongInput;
if(m == 0 || n == 0)
return KBLAS_Success;
return batch_unpack_Q<double, double**>(handle, A_array, lda, 0, tau_array, 0, m, n, num_ops);
}
extern "C" int kblasSorgqr_batch(kblasHandle_t handle, int m, int n, float** A_array, int lda, float** tau_array, int num_ops)
{
if(m > QR_Config<float>::HH_MAX_ROWS || lda < m)
return KBLAS_Error_WrongInput;
if(m == 0 || n == 0)
return KBLAS_Success;
return batch_unpack_Q<float, float**>(handle, A_array, lda, 0, tau_array, 0, m, n, num_ops);
}
extern "C" int kblasDcopy_upper_batch(kblasHandle_t handle, int m, int n, double** A_array, int lda, double** R_array, int ldr, int num_ops)
{
if(lda < m || ldr < std::min(m, n))
return KBLAS_Error_WrongInput;
if(m == 0 || n == 0)
return KBLAS_Success;
return batch_qr_copy_R<double, double**>(handle, A_array, lda, 0, R_array, ldr, 0, m, n, num_ops);
}
extern "C" int kblasScopy_upper_batch(kblasHandle_t handle, int m, int n, float** A_array, int lda, float** R_array, int ldr, int num_ops)
{
if(lda < m || ldr < std::min(m, n))
return KBLAS_Error_WrongInput;
if(m == 0 || n == 0)
return KBLAS_Success;
return batch_qr_copy_R<float, float**>(handle, A_array, lda, 0, R_array, ldr, 0, m, n, num_ops);
}
///////////////////////////////////////////////////////////////
// Array of pointers variable size routines
///////////////////////////////////////////////////////////////
extern "C" int kblasDtsqrf_vbatch(kblasHandle_t handle, int* m, int* n, int max_m, int max_n, double** A_array, int* lda, double** tau_array, int num_ops)
{
return batch_qr<double, double**, int*>(handle, A_array, lda, 0, tau_array, 0, m, n, num_ops, ROWS_PER_BLOCK, max_m, max_n);
}
extern "C" int kblasStsqrf_vbatch(kblasHandle_t handle, int* m, int* n, int max_m, int max_n, float** A_array, int* lda, float** tau_array, int num_ops)
{
return batch_qr<float, float**, int*>(handle, A_array, lda, 0, tau_array, 0, m, n, num_ops, ROWS_PER_BLOCK, max_m, max_n);
}
|
acb6205bd41366e2983765a89573afb36fd6bfa2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
#include <assert.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <helper_cuda.h>
#include "histogram_common.h"
////////////////////////////////////////////////////////////////////////////////
// Shortcut shared memory atomic addition functions
////////////////////////////////////////////////////////////////////////////////
#define TAG_MASK 0xFFFFFFFFU
inline __device__ void addByte(uint *s_WarpHist, uint data, uint threadTag)
{
atomicAdd(s_WarpHist + data, 1);
}
inline __device__ void addWord(uint *s_WarpHist, uint data, uint tag)
{
addByte(s_WarpHist, (data >> 0) & 0xFFU, tag);
addByte(s_WarpHist, (data >> 8) & 0xFFU, tag);
addByte(s_WarpHist, (data >> 16) & 0xFFU, tag);
addByte(s_WarpHist, (data >> 24) & 0xFFU, tag);
}
__global__ void histogram256Kernel(uint *d_PartialHistograms, uint *d_Data, uint dataCount)
{
//Per-warp subhistogram storage
__shared__ uint s_Hist[HISTOGRAM256_THREADBLOCK_MEMORY];
uint *s_WarpHist= s_Hist + (threadIdx.x >> LOG2_WARP_SIZE) * HISTOGRAM256_BIN_COUNT;
//Clear shared memory storage for current threadblock before processing
#pragma unroll
for (uint i = 0; i < (HISTOGRAM256_THREADBLOCK_MEMORY / HISTOGRAM256_THREADBLOCK_SIZE); i++)
{
s_Hist[threadIdx.x + i * HISTOGRAM256_THREADBLOCK_SIZE] = 0;
}
//Cycle through the entire data set, update subhistograms for each warp
const uint tag = threadIdx.x << (UINT_BITS - LOG2_WARP_SIZE);
__syncthreads();
for (uint pos = UMAD(blockIdx.x, blockDim.x, threadIdx.x); pos < dataCount; pos += UMUL(blockDim.x, gridDim.x))
{
uint data = d_Data[pos];
addWord(s_WarpHist, data, tag);
}
//Merge per-warp histograms into per-block and write to global memory
__syncthreads();
for (uint bin = threadIdx.x; bin < HISTOGRAM256_BIN_COUNT; bin += HISTOGRAM256_THREADBLOCK_SIZE)
{
uint sum = 0;
for (uint i = 0; i < WARP_COUNT; i++)
{
sum += s_Hist[bin + i * HISTOGRAM256_BIN_COUNT] & TAG_MASK;
}
d_PartialHistograms[blockIdx.x * HISTOGRAM256_BIN_COUNT + bin] = sum;
}
}
////////////////////////////////////////////////////////////////////////////////
// Merge histogram256() output
// Run one threadblock per bin; each threadblock adds up the same bin counter
// from every partial histogram. Reads are uncoalesced, but mergeHistogram256
// takes only a fraction of total processing time
////////////////////////////////////////////////////////////////////////////////
#define MERGE_THREADBLOCK_SIZE 256
__global__ void mergeHistogram256Kernel(
uint *d_Histogram,
uint *d_PartialHistograms,
uint histogramCount
)
{
uint sum = 0;
for (uint i = threadIdx.x; i < histogramCount; i += MERGE_THREADBLOCK_SIZE)
{
sum += d_PartialHistograms[blockIdx.x + i * HISTOGRAM256_BIN_COUNT];
}
__shared__ uint data[MERGE_THREADBLOCK_SIZE];
data[threadIdx.x] = sum;
for (uint stride = MERGE_THREADBLOCK_SIZE / 2; stride > 0; stride >>= 1)
{
__syncthreads();
if (threadIdx.x < stride)
{
data[threadIdx.x] += data[threadIdx.x + stride];
}
}
if (threadIdx.x == 0)
{
d_Histogram[blockIdx.x] = data[0];
}
}
////////////////////////////////////////////////////////////////////////////////
// Host interface to GPU histogram
////////////////////////////////////////////////////////////////////////////////
//histogram256kernel() intermediate results buffer
static const uint PARTIAL_HISTOGRAM256_COUNT = 240;
static uint *d_PartialHistograms;
//Internal memory allocation
extern "C" void initHistogram256(void)
{
checkCudaErrors(hipMalloc((void **)&d_PartialHistograms, PARTIAL_HISTOGRAM256_COUNT * HISTOGRAM256_BIN_COUNT * sizeof(uint)));
}
//Internal memory deallocation
extern "C" void closeHistogram256(void)
{
checkCudaErrors(hipFree(d_PartialHistograms));
}
extern "C" void histogram256(
uint *d_Histogram,
void *d_Data,
uint byteCount
)
{
assert(byteCount % sizeof(uint) == 0);
hipLaunchKernelGGL(( histogram256Kernel), dim3(PARTIAL_HISTOGRAM256_COUNT), dim3(HISTOGRAM256_THREADBLOCK_SIZE), 0, 0,
d_PartialHistograms,
(uint *)d_Data,
byteCount / sizeof(uint)
);
getLastCudaError("histogram256Kernel() execution failed\n");
hipLaunchKernelGGL(( mergeHistogram256Kernel), dim3(HISTOGRAM256_BIN_COUNT), dim3(MERGE_THREADBLOCK_SIZE), 0, 0,
d_Histogram,
d_PartialHistograms,
PARTIAL_HISTOGRAM256_COUNT
);
getLastCudaError("mergeHistogram256Kernel() execution failed\n");
}
| acb6205bd41366e2983765a89573afb36fd6bfa2.cu | /*
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
#include <assert.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <helper_cuda.h>
#include "histogram_common.h"
////////////////////////////////////////////////////////////////////////////////
// Shortcut shared memory atomic addition functions
////////////////////////////////////////////////////////////////////////////////
#define TAG_MASK 0xFFFFFFFFU
inline __device__ void addByte(uint *s_WarpHist, uint data, uint threadTag)
{
atomicAdd(s_WarpHist + data, 1);
}
inline __device__ void addWord(uint *s_WarpHist, uint data, uint tag)
{
addByte(s_WarpHist, (data >> 0) & 0xFFU, tag);
addByte(s_WarpHist, (data >> 8) & 0xFFU, tag);
addByte(s_WarpHist, (data >> 16) & 0xFFU, tag);
addByte(s_WarpHist, (data >> 24) & 0xFFU, tag);
}
__global__ void histogram256Kernel(uint *d_PartialHistograms, uint *d_Data, uint dataCount)
{
//Per-warp subhistogram storage
__shared__ uint s_Hist[HISTOGRAM256_THREADBLOCK_MEMORY];
uint *s_WarpHist= s_Hist + (threadIdx.x >> LOG2_WARP_SIZE) * HISTOGRAM256_BIN_COUNT;
//Clear shared memory storage for current threadblock before processing
#pragma unroll
for (uint i = 0; i < (HISTOGRAM256_THREADBLOCK_MEMORY / HISTOGRAM256_THREADBLOCK_SIZE); i++)
{
s_Hist[threadIdx.x + i * HISTOGRAM256_THREADBLOCK_SIZE] = 0;
}
//Cycle through the entire data set, update subhistograms for each warp
const uint tag = threadIdx.x << (UINT_BITS - LOG2_WARP_SIZE);
__syncthreads();
for (uint pos = UMAD(blockIdx.x, blockDim.x, threadIdx.x); pos < dataCount; pos += UMUL(blockDim.x, gridDim.x))
{
uint data = d_Data[pos];
addWord(s_WarpHist, data, tag);
}
//Merge per-warp histograms into per-block and write to global memory
__syncthreads();
for (uint bin = threadIdx.x; bin < HISTOGRAM256_BIN_COUNT; bin += HISTOGRAM256_THREADBLOCK_SIZE)
{
uint sum = 0;
for (uint i = 0; i < WARP_COUNT; i++)
{
sum += s_Hist[bin + i * HISTOGRAM256_BIN_COUNT] & TAG_MASK;
}
d_PartialHistograms[blockIdx.x * HISTOGRAM256_BIN_COUNT + bin] = sum;
}
}
////////////////////////////////////////////////////////////////////////////////
// Merge histogram256() output
// Run one threadblock per bin; each threadblock adds up the same bin counter
// from every partial histogram. Reads are uncoalesced, but mergeHistogram256
// takes only a fraction of total processing time
////////////////////////////////////////////////////////////////////////////////
#define MERGE_THREADBLOCK_SIZE 256
__global__ void mergeHistogram256Kernel(
uint *d_Histogram,
uint *d_PartialHistograms,
uint histogramCount
)
{
uint sum = 0;
for (uint i = threadIdx.x; i < histogramCount; i += MERGE_THREADBLOCK_SIZE)
{
sum += d_PartialHistograms[blockIdx.x + i * HISTOGRAM256_BIN_COUNT];
}
__shared__ uint data[MERGE_THREADBLOCK_SIZE];
data[threadIdx.x] = sum;
for (uint stride = MERGE_THREADBLOCK_SIZE / 2; stride > 0; stride >>= 1)
{
__syncthreads();
if (threadIdx.x < stride)
{
data[threadIdx.x] += data[threadIdx.x + stride];
}
}
if (threadIdx.x == 0)
{
d_Histogram[blockIdx.x] = data[0];
}
}
////////////////////////////////////////////////////////////////////////////////
// Host interface to GPU histogram
////////////////////////////////////////////////////////////////////////////////
//histogram256kernel() intermediate results buffer
static const uint PARTIAL_HISTOGRAM256_COUNT = 240;
static uint *d_PartialHistograms;
//Internal memory allocation
extern "C" void initHistogram256(void)
{
checkCudaErrors(cudaMalloc((void **)&d_PartialHistograms, PARTIAL_HISTOGRAM256_COUNT * HISTOGRAM256_BIN_COUNT * sizeof(uint)));
}
//Internal memory deallocation
extern "C" void closeHistogram256(void)
{
checkCudaErrors(cudaFree(d_PartialHistograms));
}
extern "C" void histogram256(
uint *d_Histogram,
void *d_Data,
uint byteCount
)
{
assert(byteCount % sizeof(uint) == 0);
histogram256Kernel<<<PARTIAL_HISTOGRAM256_COUNT, HISTOGRAM256_THREADBLOCK_SIZE>>>(
d_PartialHistograms,
(uint *)d_Data,
byteCount / sizeof(uint)
);
getLastCudaError("histogram256Kernel() execution failed\n");
mergeHistogram256Kernel<<<HISTOGRAM256_BIN_COUNT, MERGE_THREADBLOCK_SIZE>>>(
d_Histogram,
d_PartialHistograms,
PARTIAL_HISTOGRAM256_COUNT
);
getLastCudaError("mergeHistogram256Kernel() execution failed\n");
}
|
a6df5f0afafa2d2eaea39a91b6f1e2124d780107.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "cu_head.h"
#include "cu_svd_util.h"
#include <stdio.h>
__global__ void separateInterval(float *d_a, float*d_b, U32 n, U32 threads, float *d_up, U32 *d_ucnt, float lg, float ug, U32 n_lg, U32 n_ug, float tao)
{
__shared__ float s_a[MAX_HUGE_THREAD];
__shared__ float s_b[MAX_HUGE_THREAD];
__shared__ unsigned int converge;
unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x;
float t1 = fmaxf(fabsf(lg), fabsf(ug)) * tao;
float lo = lg, up = ug, mid;
unsigned int lcnt = n_lg, ucnt = n_ug, mcnt;
unsigned int point = (tid + 1) * (ucnt - lcnt) / threads + lcnt;
if (tid == 0){
converge = 0;
d_up[0] = lg;
d_ucnt[0] = n_lg;
}
__syncthreads();
while (!converge){
mid = midPoint(lo, up);
mcnt = nEigvalsHuge(d_a, d_b, n, mid, tid, threads, s_a, s_b, 0);
mcnt = fminf(fmaxf(mcnt, lcnt), ucnt);
if (tid < threads) {
if (mcnt >= point) {
up = mid;
ucnt = mcnt;
} else {
lo = mid;
lcnt = mcnt;
}
}
converge = 1;
__syncthreads();
if (tid < threads){
if(ucnt != point || fabs(up-lo) >= fmaxf(t1, MIN_INTERVAL))
converge = 0;
}
__syncthreads();
}
if (tid < threads) {
d_up[tid+1] = up;
d_ucnt[tid+1] = ucnt;
}
}
// Bisection to find eigenvals of a matrix
__global__ void bisectKernelHuge(float *d_a, float *d_b, U32 n, float *d_eig, U32 *d_pos, float *d_lo, U32 *d_lcnt, float tao)
{
__shared__ float s_lo[2*MAX_HUGE_THREAD];
__shared__ float s_up[2*MAX_HUGE_THREAD];
__shared__ unsigned int s_lcnt[2*MAX_HUGE_THREAD];
__shared__ unsigned int s_ucnt[2*MAX_HUGE_THREAD];
__shared__ unsigned int s_cmpl[2*MAX_HUGE_THREAD + 2];
__shared__ unsigned int compact2;
__shared__ unsigned int converge;
__shared__ unsigned int nth_actv;
__shared__ unsigned int nth_comp;
__shared__ unsigned int addr;
__shared__ unsigned int cnt;
unsigned int *s_cmpl_exc = s_cmpl + 1;
float lo = 0.0f, up = 0.0f, mid = 0.0f;
unsigned int lcnt = 0, ucnt = 0, mcnt = 0;
unsigned int active2 = 0;
s_cmpl[threadIdx.x] = 0;
s_lo[threadIdx.x] = 0;
s_up[threadIdx.x] = 0;
s_lcnt[threadIdx.x] = 0;
s_ucnt[threadIdx.x] = 0;
__syncthreads();
if (threadIdx.x == 0) {
s_lo[0] = d_lo[blockIdx.x];
s_up[0] = d_lo[blockIdx.x+1];
s_lcnt[0] = d_lcnt[blockIdx.x];
s_ucnt[0] = d_lcnt[blockIdx.x+1];
addr = d_lcnt[blockIdx.x] - d_lcnt[0];
cnt = s_ucnt[0] - s_lcnt[0];
compact2 = 0;
nth_actv = 1;
nth_comp = 1;
}
while (true) {
if(threadIdx.x == 0)
converge = 1;
__syncthreads();
active2 = 0;
subdivideInterval(threadIdx.x, s_lo, s_up, s_lcnt, s_ucnt, nth_actv, lo, up, lcnt, ucnt, mid, converge);
__syncthreads();
if (converge == 1) break;
__syncthreads();
mcnt = nEigvalsHuge(d_a, d_b, n, mid, threadIdx.x, nth_actv, s_lo, s_up, (lo == up));
__syncthreads();
if (threadIdx.x < nth_actv) {
if (lo != up)
storeIntervals(threadIdx.x, nth_actv, s_lo, s_up, s_lcnt, s_ucnt, lo, mid, up, lcnt, mcnt, ucnt, tao, compact2, s_cmpl_exc, active2);
else
storeConverged(s_lo, s_up, s_lcnt, s_ucnt, lo, mid, up, lcnt, mcnt, ucnt, s_cmpl_exc, compact2, nth_actv, active2);
}
__syncthreads();
if (compact2 > 0) {
createIndices(s_cmpl_exc, nth_comp);
compactIntervals(s_lo, s_up, s_lcnt, s_ucnt, mid, up, mcnt, ucnt, s_cmpl, nth_actv, active2);
}
__syncthreads();
if (threadIdx.x == 0) {
nth_actv += s_cmpl[nth_actv];
nth_comp = ceilPow2(nth_actv);
compact2 = 0;
}
__syncthreads();
}
__syncthreads();
if (threadIdx.x < cnt) {
d_eig[addr + threadIdx.x] = s_lo[threadIdx.x];
d_pos[addr + threadIdx.x] = s_lcnt[threadIdx.x];
}
}
// Separate the interval to find the best interval
void huge_eigval(float *p_val, float *p_a, float *p_b, U32 n, float lo, float up, U32 n_lo, U32 n_up, float tao)
{
// Cuda Event for timing
#ifdef TIME
float time;
hipEvent_t start, stop;
cudaErrors(hipEventCreate(&start));
cudaErrors(hipEventCreate(&stop));
#endif
U32 size = n_up - n_lo;
U32 interv = (size -1) / MAX_HUGE_THREAD + 1;
// Allocate Memory on GPU
float * p_lo = new float[interv+1];
U32 * p_lcnt = new U32[interv+1];
float * p_eig = new float[size];
U32 * p_pos = new U32[size];
float * d_lo, * d_eig;
U32 * d_lcnt, * d_pos;
cudaErrors(hipMalloc((void **)&d_lo, sizeof(float)*(interv+1)));
cudaErrors(hipMalloc((void **)&d_lcnt, sizeof(U32)*(interv+1)));
cudaErrors(hipMalloc((void **)&d_eig, sizeof(float)*size));
cudaErrors(hipMalloc((void **)&d_pos, sizeof(U32)*size));
// cout << interv << " threads" << endl;
#ifdef TIME
hipEventRecord(start, 0);
#endif
for (U32 i=0; i<iters; i++){
hipLaunchKernelGGL(( separateInterval), dim3(1),dim3(MAX_HUGE_THREAD), 0, 0, p_a, p_b, n, interv, d_lo, d_lcnt, lo, up, n_lo, n_up, tao);
cudaErrors(hipGetLastError());
cudaErrors(hipDeviceSynchronize());
hipLaunchKernelGGL(( bisectKernelHuge), dim3(interv), dim3(MAX_HUGE_THREAD), 0, 0, p_a, p_b, n, d_eig, d_pos, d_lo, d_lcnt, tao);
cudaErrors(hipGetLastError());
cudaErrors(hipDeviceSynchronize());
cudaErrors(hipMemcpy(p_eig, d_eig, sizeof(float)*size, hipMemcpyDeviceToHost));
cudaErrors(hipMemcpy(p_pos, d_pos, sizeof(U32)*size, hipMemcpyDeviceToHost));
for (U32 j=0; j<size; j++){
p_val[p_pos[j]] =p_eig[j];
}
}
#ifdef TIME
hipEventRecord(stop, 0);
cudaErrors(hipEventSynchronize(stop));
cudaErrors(hipEventElapsedTime(&time, start, stop));
pthread_mutex_lock (&print);
cout << "Parall Eigenval Time : " << fixed<<setprecision(3) << time/iters << " ms" << endl;
pthread_mutex_unlock (&print);
#endif
cudaErrors(hipFree(d_lo));
cudaErrors(hipFree(d_lcnt));
cudaErrors(hipFree(d_eig));
cudaErrors(hipFree(d_pos));
delete[] p_lo;
delete[] p_lcnt;
delete[] p_eig;
delete[] p_pos;
}
| a6df5f0afafa2d2eaea39a91b6f1e2124d780107.cu | #include "cu_head.h"
#include "cu_svd_util.h"
#include <stdio.h>
__global__ void separateInterval(float *d_a, float*d_b, U32 n, U32 threads, float *d_up, U32 *d_ucnt, float lg, float ug, U32 n_lg, U32 n_ug, float tao)
{
__shared__ float s_a[MAX_HUGE_THREAD];
__shared__ float s_b[MAX_HUGE_THREAD];
__shared__ unsigned int converge;
unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x;
float t1 = fmaxf(fabsf(lg), fabsf(ug)) * tao;
float lo = lg, up = ug, mid;
unsigned int lcnt = n_lg, ucnt = n_ug, mcnt;
unsigned int point = (tid + 1) * (ucnt - lcnt) / threads + lcnt;
if (tid == 0){
converge = 0;
d_up[0] = lg;
d_ucnt[0] = n_lg;
}
__syncthreads();
while (!converge){
mid = midPoint(lo, up);
mcnt = nEigvalsHuge(d_a, d_b, n, mid, tid, threads, s_a, s_b, 0);
mcnt = fminf(fmaxf(mcnt, lcnt), ucnt);
if (tid < threads) {
if (mcnt >= point) {
up = mid;
ucnt = mcnt;
} else {
lo = mid;
lcnt = mcnt;
}
}
converge = 1;
__syncthreads();
if (tid < threads){
if(ucnt != point || fabs(up-lo) >= fmaxf(t1, MIN_INTERVAL))
converge = 0;
}
__syncthreads();
}
if (tid < threads) {
d_up[tid+1] = up;
d_ucnt[tid+1] = ucnt;
}
}
// Bisection to find eigenvals of a matrix
__global__ void bisectKernelHuge(float *d_a, float *d_b, U32 n, float *d_eig, U32 *d_pos, float *d_lo, U32 *d_lcnt, float tao)
{
__shared__ float s_lo[2*MAX_HUGE_THREAD];
__shared__ float s_up[2*MAX_HUGE_THREAD];
__shared__ unsigned int s_lcnt[2*MAX_HUGE_THREAD];
__shared__ unsigned int s_ucnt[2*MAX_HUGE_THREAD];
__shared__ unsigned int s_cmpl[2*MAX_HUGE_THREAD + 2];
__shared__ unsigned int compact2;
__shared__ unsigned int converge;
__shared__ unsigned int nth_actv;
__shared__ unsigned int nth_comp;
__shared__ unsigned int addr;
__shared__ unsigned int cnt;
unsigned int *s_cmpl_exc = s_cmpl + 1;
float lo = 0.0f, up = 0.0f, mid = 0.0f;
unsigned int lcnt = 0, ucnt = 0, mcnt = 0;
unsigned int active2 = 0;
s_cmpl[threadIdx.x] = 0;
s_lo[threadIdx.x] = 0;
s_up[threadIdx.x] = 0;
s_lcnt[threadIdx.x] = 0;
s_ucnt[threadIdx.x] = 0;
__syncthreads();
if (threadIdx.x == 0) {
s_lo[0] = d_lo[blockIdx.x];
s_up[0] = d_lo[blockIdx.x+1];
s_lcnt[0] = d_lcnt[blockIdx.x];
s_ucnt[0] = d_lcnt[blockIdx.x+1];
addr = d_lcnt[blockIdx.x] - d_lcnt[0];
cnt = s_ucnt[0] - s_lcnt[0];
compact2 = 0;
nth_actv = 1;
nth_comp = 1;
}
while (true) {
if(threadIdx.x == 0)
converge = 1;
__syncthreads();
active2 = 0;
subdivideInterval(threadIdx.x, s_lo, s_up, s_lcnt, s_ucnt, nth_actv, lo, up, lcnt, ucnt, mid, converge);
__syncthreads();
if (converge == 1) break;
__syncthreads();
mcnt = nEigvalsHuge(d_a, d_b, n, mid, threadIdx.x, nth_actv, s_lo, s_up, (lo == up));
__syncthreads();
if (threadIdx.x < nth_actv) {
if (lo != up)
storeIntervals(threadIdx.x, nth_actv, s_lo, s_up, s_lcnt, s_ucnt, lo, mid, up, lcnt, mcnt, ucnt, tao, compact2, s_cmpl_exc, active2);
else
storeConverged(s_lo, s_up, s_lcnt, s_ucnt, lo, mid, up, lcnt, mcnt, ucnt, s_cmpl_exc, compact2, nth_actv, active2);
}
__syncthreads();
if (compact2 > 0) {
createIndices(s_cmpl_exc, nth_comp);
compactIntervals(s_lo, s_up, s_lcnt, s_ucnt, mid, up, mcnt, ucnt, s_cmpl, nth_actv, active2);
}
__syncthreads();
if (threadIdx.x == 0) {
nth_actv += s_cmpl[nth_actv];
nth_comp = ceilPow2(nth_actv);
compact2 = 0;
}
__syncthreads();
}
__syncthreads();
if (threadIdx.x < cnt) {
d_eig[addr + threadIdx.x] = s_lo[threadIdx.x];
d_pos[addr + threadIdx.x] = s_lcnt[threadIdx.x];
}
}
// Separate the interval to find the best interval
void huge_eigval(float *p_val, float *p_a, float *p_b, U32 n, float lo, float up, U32 n_lo, U32 n_up, float tao)
{
// Cuda Event for timing
#ifdef TIME
float time;
cudaEvent_t start, stop;
cudaErrors(cudaEventCreate(&start));
cudaErrors(cudaEventCreate(&stop));
#endif
U32 size = n_up - n_lo;
U32 interv = (size -1) / MAX_HUGE_THREAD + 1;
// Allocate Memory on GPU
float * p_lo = new float[interv+1];
U32 * p_lcnt = new U32[interv+1];
float * p_eig = new float[size];
U32 * p_pos = new U32[size];
float * d_lo, * d_eig;
U32 * d_lcnt, * d_pos;
cudaErrors(cudaMalloc((void **)&d_lo, sizeof(float)*(interv+1)));
cudaErrors(cudaMalloc((void **)&d_lcnt, sizeof(U32)*(interv+1)));
cudaErrors(cudaMalloc((void **)&d_eig, sizeof(float)*size));
cudaErrors(cudaMalloc((void **)&d_pos, sizeof(U32)*size));
// cout << interv << " threads" << endl;
#ifdef TIME
cudaEventRecord(start, 0);
#endif
for (U32 i=0; i<iters; i++){
separateInterval<<<1,MAX_HUGE_THREAD>>>(p_a, p_b, n, interv, d_lo, d_lcnt, lo, up, n_lo, n_up, tao);
cudaErrors(cudaGetLastError());
cudaErrors(cudaDeviceSynchronize());
bisectKernelHuge<<<interv, MAX_HUGE_THREAD>>>(p_a, p_b, n, d_eig, d_pos, d_lo, d_lcnt, tao);
cudaErrors(cudaGetLastError());
cudaErrors(cudaDeviceSynchronize());
cudaErrors(cudaMemcpy(p_eig, d_eig, sizeof(float)*size, cudaMemcpyDeviceToHost));
cudaErrors(cudaMemcpy(p_pos, d_pos, sizeof(U32)*size, cudaMemcpyDeviceToHost));
for (U32 j=0; j<size; j++){
p_val[p_pos[j]] =p_eig[j];
}
}
#ifdef TIME
cudaEventRecord(stop, 0);
cudaErrors(cudaEventSynchronize(stop));
cudaErrors(cudaEventElapsedTime(&time, start, stop));
pthread_mutex_lock (&print);
cout << "Parall Eigenval Time : " << fixed<<setprecision(3) << time/iters << " ms" << endl;
pthread_mutex_unlock (&print);
#endif
cudaErrors(cudaFree(d_lo));
cudaErrors(cudaFree(d_lcnt));
cudaErrors(cudaFree(d_eig));
cudaErrors(cudaFree(d_pos));
delete[] p_lo;
delete[] p_lcnt;
delete[] p_eig;
delete[] p_pos;
}
|
7b0d4b280bdfdead32b19cda801665ea0f9d0909.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// cudamatrix/cu-kernels.cu
// Copyright 2009-2012 Karel Vesely
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// THIS CODE IS PROVIDED *AS IS* BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION ANY IMPLIED
// WARRANTIES OR CONDITIONS OF TITLE, FITNESS FOR A PARTICULAR PURPOSE,
// MERCHANTABLITY OR NON-INFRINGEMENT.
// See the Apache 2 License for the specific language governing permissions and
// limitations under the License.
// In this file is the CUDA code of the CUDA kernels, plus the ANSI-C wrappers
#include <cfloat>
#include "cu-kernels-ansi.h"
/***********************************************************************
* Generic __device__ functions
*/
template<typename Real>
__device__
static Real _sum_reduce(Real buffer[]) {
// Total number of active threads
int32_cuda nTotalThreads = blockDim.x;
__syncthreads();
// perform tree-based reduction (sum)
while(nTotalThreads > 1) {
int32_cuda halfPoint = ((1+nTotalThreads) >> 1); // divide by two
// only the first half of the threads will be active.
if (threadIdx.x < halfPoint) {
// Get the shared value stored by another thread
Real temp = 0.0;
if(threadIdx.x+halfPoint < nTotalThreads) {
temp = buffer[threadIdx.x + halfPoint];
}
buffer[threadIdx.x] += temp;
}
__syncthreads();
nTotalThreads = ((1+nTotalThreads) >> 1); // divide by two.
}
// the result
return buffer[0];
}
template<typename Real>
__device__
static Real _max_reduce(Real buffer[]) {
// Total number of active threads
int32_cuda nTotalThreads = blockDim.x;
__syncthreads();
// perform tree-based reduction (max)
while(nTotalThreads > 1) {
int32_cuda halfPoint = ((1+nTotalThreads) >> 1); // divide by two
// only the first half of the threads will be active.
if (threadIdx.x < halfPoint) {
// Get the shared value stored by another thread
Real temp = -1e20;
if(threadIdx.x+halfPoint < nTotalThreads) {
temp = buffer[threadIdx.x + halfPoint];
}
if (temp > buffer[threadIdx.x]) buffer[threadIdx.x] = temp;
}
__syncthreads();
nTotalThreads = ((1+nTotalThreads) >> 1); // divide by two.
}
// the result
return buffer[0];
}
template<typename Real>
__device__
static int32_cuda _max_id_reduce(Real val[], int32_cuda idx[]) {
// Total number of active threads
int32_cuda nTotalThreads = blockDim.x;
__syncthreads();
// perform tree-based reduction (get index of maximum)
while(nTotalThreads > 1) {
int32_cuda halfPoint = ((1+nTotalThreads) >> 1); // divide by two
// only the first half of the threads will be active.
if (threadIdx.x < halfPoint) {
// Get the shared value stored by another thread
Real temp = -1e20;
if(threadIdx.x+halfPoint < nTotalThreads) {
temp = val[idx[threadIdx.x + halfPoint]];
}
if (temp > val[idx[threadIdx.x]]) idx[threadIdx.x]=idx[threadIdx.x + halfPoint];
}
__syncthreads();
nTotalThreads = ((1+nTotalThreads) >> 1); // divide by two.
}
// the result
return idx[0];
}
/***********************************************************************
* CUDA kernels
* the functions are templated to have the float/double operations
*/
/*
* CuMatrix
*/
template<typename Real>
__global__
static void _set_const(Real* mat, Real value, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j*d.stride;
if ( i < d.cols && j < d.rows )
mat[index] = value;
}
template<typename Real>
__global__
static void _add(Real* mat, Real value, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j*d.stride;
if ( i < d.cols && j < d.rows )
mat[index] = mat[index] + value;
}
template<typename Real>
__global__
static void _scale(Real* mat, Real value, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j*d.stride;
if ( i < d.cols && j < d.rows )
mat[index] = mat[index] * value;
}
template<typename Real>
__global__
static void _apply_log(Real* mat, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j*d.stride;
if ( i < d.cols && j < d.rows )
mat[index] = log(mat[index]);
}
template<typename Real>
__global__
static void _mul_elements(Real* mat, const Real* A, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j*d.stride;
if ( i < d.cols && j < d.rows )
mat[index] = mat[index] * A[index];
}
template<typename Real>
__global__
static void _mul_cols_vec(Real* mat, const Real* scale, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j*d.stride;
if ( i < d.cols && j < d.rows )
mat[index] *= scale[i];
}
template<typename Real>
__global__
static void _mul_rows_vec(Real* mat, const Real* scale, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j*d.stride;
if ( i < d.cols && j < d.rows )
mat[index] *= scale[j];
}
template<typename Real>
__global__
static void _div_rows_vec(Real* mat, const Real* vec_div, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j*d.stride;
if( j >= d.rows ) return;
//invert divider in shared memory
__shared__ Real inv[16];
if(threadIdx.x==0) {
inv[threadIdx.y] = 1.0/vec_div[j];
}
__syncthreads();
//multiply elements
if ( i < d.cols && j < d.rows )
mat[index] *= inv[threadIdx.y];
}
template<typename Real>
__global__
static void _add_mat(Real alpha, const Real* A, Real beta, Real* dst, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j*d.stride;
if ( i < d.cols && j < d.rows )
dst[index] = alpha*A[index] + beta*dst[index];
}
template<typename Real>
__global__
static void _add_vec_to_cols(Real alpha, const Real* col, Real beta, Real* dst, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j*d.stride;
if ( i < d.cols && j < d.rows )
dst[index] = alpha*col[j] + beta*dst[index];
}
template<typename Real>
__global__
static void _add_vec_to_rows(Real alpha, const Real* row, Real beta, Real* dst, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j*d.stride;
if ( i < d.cols && j < d.rows )
dst[index] = alpha*row[i] + beta*dst[index];
}
template<typename Real>
__global__
static void _apply_mask(Real* mat, const char* mask, MatrixDim dmat, MatrixDim dmask) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j*dmat.stride;
int32_cuda index2 = i + j*dmask.stride;
if ( i < dmat.cols && j < dmat.rows )
if(mask[index2] == 0) mat[index] = 0;
}
/*
* CuVector
*/
template<typename Real>
__global__
static void _add_row_sum_mat(const Real* mat, Real* vec_sum, MatrixDim d) {
int32_cuda i = blockIdx.y * blockDim.y + threadIdx.y; //col
int32_cuda j = blockIdx.x * blockDim.x + threadIdx.x; //row
if(blockIdx.x > 0) return;
if(blockDim.y > 1) return;
__shared__ Real row_data[256];
//copy the input to row_data
row_data[j] = mat[i+j*d.stride];
__syncthreads();
//get the sum
Real sum = _sum_reduce(row_data);
__syncthreads();
//add to previously accumulated sum
if(threadIdx.x == 0)
vec_sum[i] += sum;
}
template<typename Real>
__global__
static void _add_col_sum_mat(const Real* mat, Real* vec_sum, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; //row
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; //col
if(blockIdx.x > 0) return;
if(blockDim.y > 1) return;
__shared__ Real row_data[256];
//copy the input to row_data
row_data[i] = mat[i+j*d.stride];
__syncthreads();
//get the sum
Real sum = _sum_reduce(row_data);
__syncthreads();
//add to previously accumulated sum
if(threadIdx.x == 0)
vec_sum[j] += sum;
}
template<typename Real>
__global__
static void _invert_elements(Real* data, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j*d.stride;
if ( i < d.cols && j < d.rows )
data[index] = 1.0/data[index];
}
/*
* cu::
*/
template<typename Real>
__global__
static void _sigmoid(Real*y, const Real*x, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j*d.stride;
if( i < d.cols && j < d.rows ) {
Real res = 1.0 / (1.0 + exp(-x[index]));
y[index] = res;
}
}
template<typename Real>
__global__
static void _diff_sigmoid(Real*eout, const Real*e, const Real*y, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j*d.stride;
if( i < d.cols && j < d.rows )
eout[index] = y[index]*(1.0-y[index]) * e[index];
}
template<typename Real>
__global__
static void _tanh(Real*y, const Real*x, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j*d.stride;
if( i < d.cols && j < d.rows ) {
Real exp_2x = exp(2.0*x[index]);
Real res;
if(isinf(exp_2x)) {
res = 1.0;
} else {
res = (exp_2x - 1.0) / (exp_2x + 1.0);
}
y[index] = res;
}
}
template<typename Real>
__global__
static void _diff_tanh(Real*eout, const Real*e, const Real*y, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j*d.stride;
if( i < d.cols && j < d.rows )
eout[index] = (1.0 - y[index]*y[index]) * e[index];
}
template<typename Real>
__global__
static void _softmax(Real*y, const Real*x, MatrixDim d) {
int32_cuda j = blockIdx.x * blockDim.x + threadIdx.x;
if(j >= d.rows) return;
//copy to output and find max...
double max = -1e20;
double sum = 0.0;
for(int32_cuda i=0; i<d.cols; i++) {
if(max < x[i+j*d.stride]) max = x[i+j*d.stride];
y[i+j*d.stride] = x[i+j*d.stride];
}
//subtract max, apply exp, sum up...
for(int32_cuda i=0; i<d.cols; i++) {
y[i+j*d.stride] = exp(y[i+j*d.stride] - max);
sum += y[i+j*d.stride];
}
//normalize by sum...
for(int32_cuda i=0; i<d.cols; i++) {
y[i+j*d.stride] /= sum;
}
}
template<typename Real>
__global__
static void _splice(Real* y, const Real* x, const int32_cuda* off, MatrixDim d_out, MatrixDim d_in) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j*d_out.stride;
if( i < d_out.cols && j < d_out.rows ) {
int32_cuda src_col = i % d_in.cols;
int32_cuda src_row = j + off[i / d_in.cols];
if(src_row < 0) src_row = 0;
if(src_row >= d_in.rows) src_row = d_in.rows-1;
y[index] = x[src_col + src_row*d_in.stride];
}
}
template<typename Real>
__global__
static void _copy(Real* y, const Real* x, const int32_cuda* copy_from, MatrixDim d_out, MatrixDim d_in) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j*d_out.stride;
if( i < d_out.cols && j < d_out.rows ) {
int32_cuda src_col = copy_from[i];
if(src_col >= 0 && src_col < d_in.cols) {
y[index] = x[src_col + j*d_in.stride];
} else {
y[index] = 1.0/0.0;
}
}
}
template<typename Real>
__global__
static void _randomize(Real* y, const Real* x, const int32_cuda* copy_from, MatrixDim d_out, MatrixDim d_in) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j*d_out.stride;
if( i < d_out.cols && j < d_out.rows ) {
int32_cuda src_row = copy_from[j];
y[index] = x[i + src_row*d_in.stride];
}
}
template<typename Real>
__global__
static void _regularize_l1(Real* wei, Real* grad, Real l1, Real lr, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j*d.stride;
if ( i < d.cols && j < d.rows ) {
if(wei[index]==0.0) return; //skip L1 if zero weight!
Real l1_signed = l1;
if(wei[index] < 0.0) //flip sign
l1_signed = -l1;
Real before = wei[index];
Real after = wei[index] -lr*grad[index] -l1_signed;//simulate update
if((after > 0.0) ^ (before > 0.0)) { //sign changed?
wei[index] = 0.0;
grad[index] = 0.0;
} else {
wei[index] -= l1_signed;
}
}
}
template<typename Real>
__global__
static void _find_row_max_id(const Real* mat, Real* vec_val, int32_cuda* vec_id, int32_cuda voff, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
if(blockIdx.x > 0) return;
if(blockDim.y != 1) return;
__shared__ Real value[256];
__shared__ int32_cuda index[256];
//copy to shared memory
value[threadIdx.x] = mat[i+j*d.stride];
index[threadIdx.x] = threadIdx.x;
__syncthreads();
//get the id of the max value
int32_cuda out_max = _max_id_reduce(value,index);
__syncthreads();
//see if it's bigger value
if(threadIdx.x == 0) {
if(vec_val[j] <= mat[out_max+j*d.stride]) {
vec_val[j] = mat[out_max+j*d.stride];
vec_id[j] = voff+out_max;
}
}
}
template<typename Real>
__global__
static void _diff_xent(const int32_cuda* vec_tgt, Real* mat_net_out, Real* vec_log_post, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
if(i>0) return;
if(j<d.rows) {
int32_cuda index = vec_tgt[j] + j*d.stride;
Real value = mat_net_out[index];
if(value < 1e-20) value = 1e-20;
vec_log_post[j] = log(value);
mat_net_out[index] -= 1.0;
}
}
template<typename Real>
__global__
static void _softmax_part(const Real* X, const int32_cuda* vec_ids, Real* Y, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j*d.stride;
if ( i < d.cols && j < d.rows ) {
Real tmp = X[index] - X[vec_ids[j] + j*d.stride];
Y[index] = exp(tmp);
}
}
/***********************************************************************
* ANSI-C wrappers of CUDA kernels
*/
/*
* "int32"
*/
void cudaI32_set_const(dim3 Gr, dim3 Bl, int32_cuda* mat, int32_cuda value, MatrixDim d) {
hipLaunchKernelGGL(( _set_const), dim3(Gr),dim3(Bl), 0, 0, mat,value,d);
}
/*
* "float"
*/
/*
* CuMatrix
*/
void cudaF_set_const(dim3 Gr, dim3 Bl, float* mat, float value, MatrixDim d) {
hipLaunchKernelGGL(( _set_const), dim3(Gr),dim3(Bl), 0, 0, mat,value,d);
}
void cudaF_add(dim3 Gr, dim3 Bl, float* mat, float value, MatrixDim d) {
hipLaunchKernelGGL(( _add), dim3(Gr),dim3(Bl), 0, 0, mat,value,d);
}
void cudaF_scale(dim3 Gr, dim3 Bl, float* mat, float value, MatrixDim d) {
hipLaunchKernelGGL(( _scale), dim3(Gr),dim3(Bl), 0, 0, mat,value,d);
}
void cudaF_apply_log(dim3 Gr, dim3 Bl, float* mat, MatrixDim d) {
hipLaunchKernelGGL(( _apply_log), dim3(Gr),dim3(Bl), 0, 0, mat,d);
}
void cudaF_mul_elements(dim3 Gr, dim3 Bl, float* mat, const float* A, MatrixDim d) {
hipLaunchKernelGGL(( _mul_elements), dim3(Gr),dim3(Bl), 0, 0, mat,A,d);
}
void cudaF_mul_cols_vec(dim3 Gr, dim3 Bl, float* mat, const float* scale, MatrixDim d) {
hipLaunchKernelGGL(( _mul_cols_vec), dim3(Gr),dim3(Bl), 0, 0, mat,scale,d);
}
void cudaF_mul_rows_vec(dim3 Gr, dim3 Bl, float* mat, const float* scale, MatrixDim d) {
hipLaunchKernelGGL(( _mul_rows_vec), dim3(Gr),dim3(Bl), 0, 0, mat,scale,d);
}
void cudaF_div_rows_vec(dim3 Gr, dim3 Bl, float* mat, const float* vec_div, MatrixDim d) {
hipLaunchKernelGGL(( _div_rows_vec), dim3(Gr),dim3(Bl), 0, 0, mat, vec_div, d);
}
void cudaF_add_mat(dim3 Gr, dim3 Bl, float alpha, const float* A, float beta, float* dst, MatrixDim d) {
hipLaunchKernelGGL(( _add_mat), dim3(Gr),dim3(Bl), 0, 0, alpha,A,beta,dst,d);
}
void cudaF_add_vec_to_cols(dim3 Gr, dim3 Bl, float alpha, const float* col, float beta, float* dst, MatrixDim d) {
hipLaunchKernelGGL(( _add_vec_to_cols), dim3(Gr),dim3(Bl), 0, 0, alpha,col,beta,dst,d);
}
void cudaF_add_vec_to_rows(dim3 Gr, dim3 Bl, float alpha, const float* row, float beta, float* dst, MatrixDim d) {
hipLaunchKernelGGL(( _add_vec_to_rows), dim3(Gr),dim3(Bl), 0, 0, alpha,row,beta,dst,d);
}
// CURRENTLY UNUSED...
void cudaF_apply_mask(dim3 Gr, dim3 Bl, float* mat, const char* mask, MatrixDim dmat, MatrixDim dmask) {
hipLaunchKernelGGL(( _apply_mask), dim3(Gr),dim3(Bl), 0, 0, mat,mask,dmat,dmask);
}
/*
* CuVector
*/
void cudaF_add_row_sum_mat(dim3 Gr, dim3 Bl, const float* mat, float* vec_sum, MatrixDim d) {
hipLaunchKernelGGL(( _add_row_sum_mat), dim3(Gr),dim3(Bl), 0, 0, mat,vec_sum,d);
}
void cudaF_add_col_sum_mat(dim3 Gr, dim3 Bl, const float* mat, float* vec_sum, MatrixDim d) {
hipLaunchKernelGGL(( _add_col_sum_mat), dim3(Gr),dim3(Bl), 0, 0, mat,vec_sum,d);
}
void cudaF_invert_elements(dim3 Gr, dim3 Bl, float* data, MatrixDim d) {
hipLaunchKernelGGL(( _invert_elements), dim3(Gr),dim3(Bl), 0, 0, data, d);
}
/*
* cu::
*/
void cudaF_sigmoid (dim3 Gr, dim3 Bl, float* y, const float* x, MatrixDim d) {
hipLaunchKernelGGL(( _sigmoid), dim3(Gr),dim3(Bl), 0, 0, y, x, d);
}
void cudaF_diff_sigmoid (dim3 Gr, dim3 Bl, float* eout, const float* e, const float* y, MatrixDim d) {
hipLaunchKernelGGL(( _diff_sigmoid), dim3(Gr),dim3(Bl), 0, 0, eout, e, y, d);
}
void cudaF_tanh (dim3 Gr, dim3 Bl, float* y, const float* x, MatrixDim d) {
hipLaunchKernelGGL(( _tanh), dim3(Gr),dim3(Bl), 0, 0, y, x, d);
}
void cudaF_diff_tanh (dim3 Gr, dim3 Bl, float* eout, const float* e, const float* y, MatrixDim d) {
hipLaunchKernelGGL(( _diff_tanh), dim3(Gr),dim3(Bl), 0, 0, eout, e, y, d);
}
void cudaF_softmax (size_t Gr, size_t Bl, float* y, const float* x, MatrixDim d) {
hipLaunchKernelGGL(( _softmax), dim3(Gr),dim3(Bl), 0, 0, y, x, d);
}
void cudaF_softmax_part(dim3 Gr, dim3 Bl, const float* X, const int32_cuda* vec_ids, float* Y, MatrixDim d) {
hipLaunchKernelGGL(( _softmax_part), dim3(Gr),dim3(Bl), 0, 0, X,vec_ids,Y,d);
}
void cudaF_splice(dim3 Gr, dim3 Bl, float* y, const float* x, const int32_cuda* off, MatrixDim d_out, MatrixDim d_in) {
hipLaunchKernelGGL(( _splice), dim3(Gr),dim3(Bl), 0, 0, y,x,off,d_out,d_in);
}
void cudaF_copy(dim3 Gr, dim3 Bl, float* y, const float* x, const int32_cuda* copy_from, MatrixDim d_out, MatrixDim d_in) {
hipLaunchKernelGGL(( _copy), dim3(Gr),dim3(Bl), 0, 0, y,x,copy_from,d_out,d_in);
}
void cudaF_randomize(dim3 Gr, dim3 Bl, float* y, const float* x, const int32_cuda* copy_from, MatrixDim d_out, MatrixDim d_in) {
hipLaunchKernelGGL(( _randomize), dim3(Gr),dim3(Bl), 0, 0, y,x,copy_from,d_out,d_in);
}
void cudaF_regularize_l1(dim3 Gr, dim3 Bl, float* wei, float* grad, float l1, float lr, MatrixDim d) {
hipLaunchKernelGGL(( _regularize_l1), dim3(Gr),dim3(Bl), 0, 0, wei,grad,l1,lr,d);
}
void cudaF_find_row_max_id(dim3 Gr, dim3 Bl, const float* mat, float* vec_val, int32_cuda* vec_id, int32_cuda voff, MatrixDim d) {
hipLaunchKernelGGL(( _find_row_max_id), dim3(Gr),dim3(Bl), 0, 0, mat, vec_val, vec_id, voff, d);
}
void cudaF_diff_xent(dim3 Gr, dim3 Bl, const int32_cuda* vec_tgt, float* mat_net_out, float* vec_log_post, MatrixDim d) {
hipLaunchKernelGGL(( _diff_xent), dim3(Gr),dim3(Bl), 0, 0, vec_tgt,mat_net_out,vec_log_post,d);
}
/*
* "double"
*/
/*
* CuMatrix
*/
void cudaD_set_const(dim3 Gr, dim3 Bl, double* mat, double value, MatrixDim d) {
hipLaunchKernelGGL(( _set_const), dim3(Gr),dim3(Bl), 0, 0, mat,value,d);
}
void cudaD_add(dim3 Gr, dim3 Bl, double* mat, double value, MatrixDim d) {
hipLaunchKernelGGL(( _add), dim3(Gr),dim3(Bl), 0, 0, mat,value,d);
}
void cudaD_scale(dim3 Gr, dim3 Bl, double* mat, double value, MatrixDim d) {
hipLaunchKernelGGL(( _scale), dim3(Gr),dim3(Bl), 0, 0, mat,value,d);
}
void cudaD_apply_log(dim3 Gr, dim3 Bl, double* mat, MatrixDim d) {
hipLaunchKernelGGL(( _apply_log), dim3(Gr),dim3(Bl), 0, 0, mat,d);
}
void cudaD_mul_elements(dim3 Gr, dim3 Bl, double* mat, const double* A, MatrixDim d) {
hipLaunchKernelGGL(( _mul_elements), dim3(Gr),dim3(Bl), 0, 0, mat,A,d);
}
void cudaD_mul_cols_vec(dim3 Gr, dim3 Bl, double* mat, const double* scale, MatrixDim d) {
hipLaunchKernelGGL(( _mul_cols_vec), dim3(Gr),dim3(Bl), 0, 0, mat,scale,d);
}
void cudaD_mul_rows_vec(dim3 Gr, dim3 Bl, double* mat, const double* scale, MatrixDim d) {
hipLaunchKernelGGL(( _mul_rows_vec), dim3(Gr),dim3(Bl), 0, 0, mat,scale,d);
}
void cudaD_div_rows_vec(dim3 Gr, dim3 Bl, double* mat, const double* vec_div, MatrixDim d) {
hipLaunchKernelGGL(( _div_rows_vec), dim3(Gr),dim3(Bl), 0, 0, mat, vec_div, d);
}
void cudaD_add_mat(dim3 Gr, dim3 Bl, double alpha, const double* A, double beta, double* dst, MatrixDim d) {
hipLaunchKernelGGL(( _add_mat), dim3(Gr),dim3(Bl), 0, 0, alpha,A,beta,dst,d);
}
void cudaD_add_vec_to_cols(dim3 Gr, dim3 Bl, double alpha, const double* col, double beta, double* dst, MatrixDim d) {
hipLaunchKernelGGL(( _add_vec_to_cols), dim3(Gr),dim3(Bl), 0, 0, alpha,col,beta,dst,d);
}
void cudaD_add_vec_to_rows(dim3 Gr, dim3 Bl, double alpha, const double* row, double beta, double* dst, MatrixDim d) {
hipLaunchKernelGGL(( _add_vec_to_rows), dim3(Gr),dim3(Bl), 0, 0, alpha,row,beta,dst,d);
}
// CURRENTLY UNUSED...
void cudaD_apply_mask(dim3 Gr, dim3 Bl, double* mat, const char* mask, MatrixDim dmat, MatrixDim dmask) {
hipLaunchKernelGGL(( _apply_mask), dim3(Gr),dim3(Bl), 0, 0, mat,mask,dmat,dmask);
}
/*
* CuVector
*/
void cudaD_add_row_sum_mat(dim3 Gr, dim3 Bl, const double* mat, double* vec_sum, MatrixDim d) {
hipLaunchKernelGGL(( _add_row_sum_mat), dim3(Gr),dim3(Bl), 0, 0, mat,vec_sum,d);
}
void cudaD_add_col_sum_mat(dim3 Gr, dim3 Bl, const double* mat, double* vec_sum, MatrixDim d) {
hipLaunchKernelGGL(( _add_col_sum_mat), dim3(Gr),dim3(Bl), 0, 0, mat,vec_sum,d);
}
void cudaD_invert_elements(dim3 Gr, dim3 Bl, double* data, MatrixDim d) {
hipLaunchKernelGGL(( _invert_elements), dim3(Gr),dim3(Bl), 0, 0, data, d);
}
/*
* cu::
*/
void cudaD_sigmoid (dim3 Gr, dim3 Bl, double* y, const double* x, MatrixDim d) {
hipLaunchKernelGGL(( _sigmoid), dim3(Gr),dim3(Bl), 0, 0, y, x, d);
}
void cudaD_diff_sigmoid (dim3 Gr, dim3 Bl, double* eout, const double* e, const double* y, MatrixDim d) {
hipLaunchKernelGGL(( _diff_sigmoid), dim3(Gr),dim3(Bl), 0, 0, eout, e, y, d);
}
void cudaD_tanh (dim3 Gr, dim3 Bl, double* y, const double* x, MatrixDim d) {
hipLaunchKernelGGL(( _tanh), dim3(Gr),dim3(Bl), 0, 0, y, x, d);
}
void cudaD_diff_tanh (dim3 Gr, dim3 Bl, double* eout, const double* e, const double* y, MatrixDim d) {
hipLaunchKernelGGL(( _diff_tanh), dim3(Gr),dim3(Bl), 0, 0, eout, e, y, d);
}
void cudaD_softmax (size_t Gr, size_t Bl, double* y, const double* x, MatrixDim d) {
hipLaunchKernelGGL(( _softmax), dim3(Gr),dim3(Bl), 0, 0, y, x, d);
}
void cudaD_softmax_part(dim3 Gr, dim3 Bl, const double* X, const int32_cuda* vec_ids, double* Y, MatrixDim d) {
hipLaunchKernelGGL(( _softmax_part), dim3(Gr),dim3(Bl), 0, 0, X,vec_ids,Y,d);
}
void cudaD_splice(dim3 Gr, dim3 Bl, double* y, const double* x, const int32_cuda* off, MatrixDim d_out, MatrixDim d_in) {
hipLaunchKernelGGL(( _splice), dim3(Gr),dim3(Bl), 0, 0, y,x,off,d_out,d_in);
}
void cudaD_copy(dim3 Gr, dim3 Bl, double* y, const double* x, const int32_cuda* copy_from, MatrixDim d_out, MatrixDim d_in) {
hipLaunchKernelGGL(( _copy), dim3(Gr),dim3(Bl), 0, 0, y,x,copy_from,d_out,d_in);
}
void cudaD_randomize(dim3 Gr, dim3 Bl, double* y, const double* x, const int32_cuda* copy_from, MatrixDim d_out, MatrixDim d_in) {
hipLaunchKernelGGL(( _randomize), dim3(Gr),dim3(Bl), 0, 0, y,x,copy_from,d_out,d_in);
}
void cudaD_regularize_l1(dim3 Gr, dim3 Bl, double* wei, double* grad, double l1, double lr, MatrixDim d) {
hipLaunchKernelGGL(( _regularize_l1), dim3(Gr),dim3(Bl), 0, 0, wei,grad,l1,lr,d);
}
void cudaD_find_row_max_id(dim3 Gr, dim3 Bl, const double* mat, double* vec_val, int32_cuda* vec_id, int32_cuda voff, MatrixDim d) {
hipLaunchKernelGGL(( _find_row_max_id), dim3(Gr),dim3(Bl), 0, 0, mat, vec_val, vec_id, voff, d);
}
void cudaD_diff_xent(dim3 Gr, dim3 Bl, const int32_cuda* vec_tgt, double* mat_net_out, double* vec_log_post, MatrixDim d) {
hipLaunchKernelGGL(( _diff_xent), dim3(Gr),dim3(Bl), 0, 0, vec_tgt,mat_net_out,vec_log_post,d);
}
| 7b0d4b280bdfdead32b19cda801665ea0f9d0909.cu | // cudamatrix/cu-kernels.cu
// Copyright 2009-2012 Karel Vesely
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// THIS CODE IS PROVIDED *AS IS* BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION ANY IMPLIED
// WARRANTIES OR CONDITIONS OF TITLE, FITNESS FOR A PARTICULAR PURPOSE,
// MERCHANTABLITY OR NON-INFRINGEMENT.
// See the Apache 2 License for the specific language governing permissions and
// limitations under the License.
// In this file is the CUDA code of the CUDA kernels, plus the ANSI-C wrappers
#include <cfloat>
#include "cu-kernels-ansi.h"
/***********************************************************************
* Generic __device__ functions
*/
template<typename Real>
__device__
static Real _sum_reduce(Real buffer[]) {
// Total number of active threads
int32_cuda nTotalThreads = blockDim.x;
__syncthreads();
// perform tree-based reduction (sum)
while(nTotalThreads > 1) {
int32_cuda halfPoint = ((1+nTotalThreads) >> 1); // divide by two
// only the first half of the threads will be active.
if (threadIdx.x < halfPoint) {
// Get the shared value stored by another thread
Real temp = 0.0;
if(threadIdx.x+halfPoint < nTotalThreads) {
temp = buffer[threadIdx.x + halfPoint];
}
buffer[threadIdx.x] += temp;
}
__syncthreads();
nTotalThreads = ((1+nTotalThreads) >> 1); // divide by two.
}
// the result
return buffer[0];
}
template<typename Real>
__device__
static Real _max_reduce(Real buffer[]) {
// Total number of active threads
int32_cuda nTotalThreads = blockDim.x;
__syncthreads();
// perform tree-based reduction (max)
while(nTotalThreads > 1) {
int32_cuda halfPoint = ((1+nTotalThreads) >> 1); // divide by two
// only the first half of the threads will be active.
if (threadIdx.x < halfPoint) {
// Get the shared value stored by another thread
Real temp = -1e20;
if(threadIdx.x+halfPoint < nTotalThreads) {
temp = buffer[threadIdx.x + halfPoint];
}
if (temp > buffer[threadIdx.x]) buffer[threadIdx.x] = temp;
}
__syncthreads();
nTotalThreads = ((1+nTotalThreads) >> 1); // divide by two.
}
// the result
return buffer[0];
}
template<typename Real>
__device__
static int32_cuda _max_id_reduce(Real val[], int32_cuda idx[]) {
// Total number of active threads
int32_cuda nTotalThreads = blockDim.x;
__syncthreads();
// perform tree-based reduction (get index of maximum)
while(nTotalThreads > 1) {
int32_cuda halfPoint = ((1+nTotalThreads) >> 1); // divide by two
// only the first half of the threads will be active.
if (threadIdx.x < halfPoint) {
// Get the shared value stored by another thread
Real temp = -1e20;
if(threadIdx.x+halfPoint < nTotalThreads) {
temp = val[idx[threadIdx.x + halfPoint]];
}
if (temp > val[idx[threadIdx.x]]) idx[threadIdx.x]=idx[threadIdx.x + halfPoint];
}
__syncthreads();
nTotalThreads = ((1+nTotalThreads) >> 1); // divide by two.
}
// the result
return idx[0];
}
/***********************************************************************
* CUDA kernels
* the functions are templated to have the float/double operations
*/
/*
* CuMatrix
*/
template<typename Real>
__global__
static void _set_const(Real* mat, Real value, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j*d.stride;
if ( i < d.cols && j < d.rows )
mat[index] = value;
}
template<typename Real>
__global__
static void _add(Real* mat, Real value, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j*d.stride;
if ( i < d.cols && j < d.rows )
mat[index] = mat[index] + value;
}
template<typename Real>
__global__
static void _scale(Real* mat, Real value, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j*d.stride;
if ( i < d.cols && j < d.rows )
mat[index] = mat[index] * value;
}
template<typename Real>
__global__
static void _apply_log(Real* mat, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j*d.stride;
if ( i < d.cols && j < d.rows )
mat[index] = log(mat[index]);
}
template<typename Real>
__global__
static void _mul_elements(Real* mat, const Real* A, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j*d.stride;
if ( i < d.cols && j < d.rows )
mat[index] = mat[index] * A[index];
}
template<typename Real>
__global__
static void _mul_cols_vec(Real* mat, const Real* scale, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j*d.stride;
if ( i < d.cols && j < d.rows )
mat[index] *= scale[i];
}
template<typename Real>
__global__
static void _mul_rows_vec(Real* mat, const Real* scale, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j*d.stride;
if ( i < d.cols && j < d.rows )
mat[index] *= scale[j];
}
template<typename Real>
__global__
static void _div_rows_vec(Real* mat, const Real* vec_div, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j*d.stride;
if( j >= d.rows ) return;
//invert divider in shared memory
__shared__ Real inv[16];
if(threadIdx.x==0) {
inv[threadIdx.y] = 1.0/vec_div[j];
}
__syncthreads();
//multiply elements
if ( i < d.cols && j < d.rows )
mat[index] *= inv[threadIdx.y];
}
template<typename Real>
__global__
static void _add_mat(Real alpha, const Real* A, Real beta, Real* dst, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j*d.stride;
if ( i < d.cols && j < d.rows )
dst[index] = alpha*A[index] + beta*dst[index];
}
template<typename Real>
__global__
static void _add_vec_to_cols(Real alpha, const Real* col, Real beta, Real* dst, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j*d.stride;
if ( i < d.cols && j < d.rows )
dst[index] = alpha*col[j] + beta*dst[index];
}
template<typename Real>
__global__
static void _add_vec_to_rows(Real alpha, const Real* row, Real beta, Real* dst, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j*d.stride;
if ( i < d.cols && j < d.rows )
dst[index] = alpha*row[i] + beta*dst[index];
}
template<typename Real>
__global__
static void _apply_mask(Real* mat, const char* mask, MatrixDim dmat, MatrixDim dmask) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j*dmat.stride;
int32_cuda index2 = i + j*dmask.stride;
if ( i < dmat.cols && j < dmat.rows )
if(mask[index2] == 0) mat[index] = 0;
}
/*
* CuVector
*/
template<typename Real>
__global__
static void _add_row_sum_mat(const Real* mat, Real* vec_sum, MatrixDim d) {
int32_cuda i = blockIdx.y * blockDim.y + threadIdx.y; //col
int32_cuda j = blockIdx.x * blockDim.x + threadIdx.x; //row
if(blockIdx.x > 0) return;
if(blockDim.y > 1) return;
__shared__ Real row_data[256];
//copy the input to row_data
row_data[j] = mat[i+j*d.stride];
__syncthreads();
//get the sum
Real sum = _sum_reduce(row_data);
__syncthreads();
//add to previously accumulated sum
if(threadIdx.x == 0)
vec_sum[i] += sum;
}
template<typename Real>
__global__
static void _add_col_sum_mat(const Real* mat, Real* vec_sum, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; //row
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; //col
if(blockIdx.x > 0) return;
if(blockDim.y > 1) return;
__shared__ Real row_data[256];
//copy the input to row_data
row_data[i] = mat[i+j*d.stride];
__syncthreads();
//get the sum
Real sum = _sum_reduce(row_data);
__syncthreads();
//add to previously accumulated sum
if(threadIdx.x == 0)
vec_sum[j] += sum;
}
template<typename Real>
__global__
static void _invert_elements(Real* data, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j*d.stride;
if ( i < d.cols && j < d.rows )
data[index] = 1.0/data[index];
}
/*
* cu::
*/
template<typename Real>
__global__
static void _sigmoid(Real*y, const Real*x, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j*d.stride;
if( i < d.cols && j < d.rows ) {
Real res = 1.0 / (1.0 + exp(-x[index]));
y[index] = res;
}
}
template<typename Real>
__global__
static void _diff_sigmoid(Real*eout, const Real*e, const Real*y, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j*d.stride;
if( i < d.cols && j < d.rows )
eout[index] = y[index]*(1.0-y[index]) * e[index];
}
template<typename Real>
__global__
static void _tanh(Real*y, const Real*x, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j*d.stride;
if( i < d.cols && j < d.rows ) {
Real exp_2x = exp(2.0*x[index]);
Real res;
if(isinf(exp_2x)) {
res = 1.0;
} else {
res = (exp_2x - 1.0) / (exp_2x + 1.0);
}
y[index] = res;
}
}
template<typename Real>
__global__
static void _diff_tanh(Real*eout, const Real*e, const Real*y, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j*d.stride;
if( i < d.cols && j < d.rows )
eout[index] = (1.0 - y[index]*y[index]) * e[index];
}
template<typename Real>
__global__
static void _softmax(Real*y, const Real*x, MatrixDim d) {
int32_cuda j = blockIdx.x * blockDim.x + threadIdx.x;
if(j >= d.rows) return;
//copy to output and find max...
double max = -1e20;
double sum = 0.0;
for(int32_cuda i=0; i<d.cols; i++) {
if(max < x[i+j*d.stride]) max = x[i+j*d.stride];
y[i+j*d.stride] = x[i+j*d.stride];
}
//subtract max, apply exp, sum up...
for(int32_cuda i=0; i<d.cols; i++) {
y[i+j*d.stride] = exp(y[i+j*d.stride] - max);
sum += y[i+j*d.stride];
}
//normalize by sum...
for(int32_cuda i=0; i<d.cols; i++) {
y[i+j*d.stride] /= sum;
}
}
template<typename Real>
__global__
static void _splice(Real* y, const Real* x, const int32_cuda* off, MatrixDim d_out, MatrixDim d_in) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j*d_out.stride;
if( i < d_out.cols && j < d_out.rows ) {
int32_cuda src_col = i % d_in.cols;
int32_cuda src_row = j + off[i / d_in.cols];
if(src_row < 0) src_row = 0;
if(src_row >= d_in.rows) src_row = d_in.rows-1;
y[index] = x[src_col + src_row*d_in.stride];
}
}
template<typename Real>
__global__
static void _copy(Real* y, const Real* x, const int32_cuda* copy_from, MatrixDim d_out, MatrixDim d_in) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j*d_out.stride;
if( i < d_out.cols && j < d_out.rows ) {
int32_cuda src_col = copy_from[i];
if(src_col >= 0 && src_col < d_in.cols) {
y[index] = x[src_col + j*d_in.stride];
} else {
y[index] = 1.0/0.0;
}
}
}
template<typename Real>
__global__
static void _randomize(Real* y, const Real* x, const int32_cuda* copy_from, MatrixDim d_out, MatrixDim d_in) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j*d_out.stride;
if( i < d_out.cols && j < d_out.rows ) {
int32_cuda src_row = copy_from[j];
y[index] = x[i + src_row*d_in.stride];
}
}
template<typename Real>
__global__
static void _regularize_l1(Real* wei, Real* grad, Real l1, Real lr, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j*d.stride;
if ( i < d.cols && j < d.rows ) {
if(wei[index]==0.0) return; //skip L1 if zero weight!
Real l1_signed = l1;
if(wei[index] < 0.0) //flip sign
l1_signed = -l1;
Real before = wei[index];
Real after = wei[index] -lr*grad[index] -l1_signed;//simulate update
if((after > 0.0) ^ (before > 0.0)) { //sign changed?
wei[index] = 0.0;
grad[index] = 0.0;
} else {
wei[index] -= l1_signed;
}
}
}
template<typename Real>
__global__
static void _find_row_max_id(const Real* mat, Real* vec_val, int32_cuda* vec_id, int32_cuda voff, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
if(blockIdx.x > 0) return;
if(blockDim.y != 1) return;
__shared__ Real value[256];
__shared__ int32_cuda index[256];
//copy to shared memory
value[threadIdx.x] = mat[i+j*d.stride];
index[threadIdx.x] = threadIdx.x;
__syncthreads();
//get the id of the max value
int32_cuda out_max = _max_id_reduce(value,index);
__syncthreads();
//see if it's bigger value
if(threadIdx.x == 0) {
if(vec_val[j] <= mat[out_max+j*d.stride]) {
vec_val[j] = mat[out_max+j*d.stride];
vec_id[j] = voff+out_max;
}
}
}
template<typename Real>
__global__
static void _diff_xent(const int32_cuda* vec_tgt, Real* mat_net_out, Real* vec_log_post, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
if(i>0) return;
if(j<d.rows) {
int32_cuda index = vec_tgt[j] + j*d.stride;
Real value = mat_net_out[index];
if(value < 1e-20) value = 1e-20;
vec_log_post[j] = log(value);
mat_net_out[index] -= 1.0;
}
}
template<typename Real>
__global__
static void _softmax_part(const Real* X, const int32_cuda* vec_ids, Real* Y, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j*d.stride;
if ( i < d.cols && j < d.rows ) {
Real tmp = X[index] - X[vec_ids[j] + j*d.stride];
Y[index] = exp(tmp);
}
}
/***********************************************************************
* ANSI-C wrappers of CUDA kernels
*/
/*
* "int32"
*/
void cudaI32_set_const(dim3 Gr, dim3 Bl, int32_cuda* mat, int32_cuda value, MatrixDim d) {
_set_const<<<Gr,Bl>>>(mat,value,d);
}
/*
* "float"
*/
/*
* CuMatrix
*/
void cudaF_set_const(dim3 Gr, dim3 Bl, float* mat, float value, MatrixDim d) {
_set_const<<<Gr,Bl>>>(mat,value,d);
}
void cudaF_add(dim3 Gr, dim3 Bl, float* mat, float value, MatrixDim d) {
_add<<<Gr,Bl>>>(mat,value,d);
}
void cudaF_scale(dim3 Gr, dim3 Bl, float* mat, float value, MatrixDim d) {
_scale<<<Gr,Bl>>>(mat,value,d);
}
void cudaF_apply_log(dim3 Gr, dim3 Bl, float* mat, MatrixDim d) {
_apply_log<<<Gr,Bl>>>(mat,d);
}
void cudaF_mul_elements(dim3 Gr, dim3 Bl, float* mat, const float* A, MatrixDim d) {
_mul_elements<<<Gr,Bl>>>(mat,A,d);
}
void cudaF_mul_cols_vec(dim3 Gr, dim3 Bl, float* mat, const float* scale, MatrixDim d) {
_mul_cols_vec<<<Gr,Bl>>>(mat,scale,d);
}
void cudaF_mul_rows_vec(dim3 Gr, dim3 Bl, float* mat, const float* scale, MatrixDim d) {
_mul_rows_vec<<<Gr,Bl>>>(mat,scale,d);
}
void cudaF_div_rows_vec(dim3 Gr, dim3 Bl, float* mat, const float* vec_div, MatrixDim d) {
_div_rows_vec<<<Gr,Bl>>>(mat, vec_div, d);
}
void cudaF_add_mat(dim3 Gr, dim3 Bl, float alpha, const float* A, float beta, float* dst, MatrixDim d) {
_add_mat<<<Gr,Bl>>>(alpha,A,beta,dst,d);
}
void cudaF_add_vec_to_cols(dim3 Gr, dim3 Bl, float alpha, const float* col, float beta, float* dst, MatrixDim d) {
_add_vec_to_cols<<<Gr,Bl>>>(alpha,col,beta,dst,d);
}
void cudaF_add_vec_to_rows(dim3 Gr, dim3 Bl, float alpha, const float* row, float beta, float* dst, MatrixDim d) {
_add_vec_to_rows<<<Gr,Bl>>>(alpha,row,beta,dst,d);
}
// CURRENTLY UNUSED...
void cudaF_apply_mask(dim3 Gr, dim3 Bl, float* mat, const char* mask, MatrixDim dmat, MatrixDim dmask) {
_apply_mask<<<Gr,Bl>>>(mat,mask,dmat,dmask);
}
/*
* CuVector
*/
void cudaF_add_row_sum_mat(dim3 Gr, dim3 Bl, const float* mat, float* vec_sum, MatrixDim d) {
_add_row_sum_mat<<<Gr,Bl>>>(mat,vec_sum,d);
}
void cudaF_add_col_sum_mat(dim3 Gr, dim3 Bl, const float* mat, float* vec_sum, MatrixDim d) {
_add_col_sum_mat<<<Gr,Bl>>>(mat,vec_sum,d);
}
void cudaF_invert_elements(dim3 Gr, dim3 Bl, float* data, MatrixDim d) {
_invert_elements<<<Gr,Bl>>>(data, d);
}
/*
* cu::
*/
void cudaF_sigmoid (dim3 Gr, dim3 Bl, float* y, const float* x, MatrixDim d) {
_sigmoid<<<Gr,Bl>>>(y, x, d);
}
void cudaF_diff_sigmoid (dim3 Gr, dim3 Bl, float* eout, const float* e, const float* y, MatrixDim d) {
_diff_sigmoid<<<Gr,Bl>>>(eout, e, y, d);
}
void cudaF_tanh (dim3 Gr, dim3 Bl, float* y, const float* x, MatrixDim d) {
_tanh<<<Gr,Bl>>>(y, x, d);
}
void cudaF_diff_tanh (dim3 Gr, dim3 Bl, float* eout, const float* e, const float* y, MatrixDim d) {
_diff_tanh<<<Gr,Bl>>>(eout, e, y, d);
}
void cudaF_softmax (size_t Gr, size_t Bl, float* y, const float* x, MatrixDim d) {
_softmax<<<Gr,Bl>>>(y, x, d);
}
void cudaF_softmax_part(dim3 Gr, dim3 Bl, const float* X, const int32_cuda* vec_ids, float* Y, MatrixDim d) {
_softmax_part<<<Gr,Bl>>>(X,vec_ids,Y,d);
}
void cudaF_splice(dim3 Gr, dim3 Bl, float* y, const float* x, const int32_cuda* off, MatrixDim d_out, MatrixDim d_in) {
_splice<<<Gr,Bl>>>(y,x,off,d_out,d_in);
}
void cudaF_copy(dim3 Gr, dim3 Bl, float* y, const float* x, const int32_cuda* copy_from, MatrixDim d_out, MatrixDim d_in) {
_copy<<<Gr,Bl>>>(y,x,copy_from,d_out,d_in);
}
void cudaF_randomize(dim3 Gr, dim3 Bl, float* y, const float* x, const int32_cuda* copy_from, MatrixDim d_out, MatrixDim d_in) {
_randomize<<<Gr,Bl>>>(y,x,copy_from,d_out,d_in);
}
void cudaF_regularize_l1(dim3 Gr, dim3 Bl, float* wei, float* grad, float l1, float lr, MatrixDim d) {
_regularize_l1<<<Gr,Bl>>>(wei,grad,l1,lr,d);
}
void cudaF_find_row_max_id(dim3 Gr, dim3 Bl, const float* mat, float* vec_val, int32_cuda* vec_id, int32_cuda voff, MatrixDim d) {
_find_row_max_id<<<Gr,Bl>>>(mat, vec_val, vec_id, voff, d);
}
void cudaF_diff_xent(dim3 Gr, dim3 Bl, const int32_cuda* vec_tgt, float* mat_net_out, float* vec_log_post, MatrixDim d) {
_diff_xent<<<Gr,Bl>>>(vec_tgt,mat_net_out,vec_log_post,d);
}
/*
* "double"
*/
/*
* CuMatrix
*/
void cudaD_set_const(dim3 Gr, dim3 Bl, double* mat, double value, MatrixDim d) {
_set_const<<<Gr,Bl>>>(mat,value,d);
}
void cudaD_add(dim3 Gr, dim3 Bl, double* mat, double value, MatrixDim d) {
_add<<<Gr,Bl>>>(mat,value,d);
}
void cudaD_scale(dim3 Gr, dim3 Bl, double* mat, double value, MatrixDim d) {
_scale<<<Gr,Bl>>>(mat,value,d);
}
void cudaD_apply_log(dim3 Gr, dim3 Bl, double* mat, MatrixDim d) {
_apply_log<<<Gr,Bl>>>(mat,d);
}
void cudaD_mul_elements(dim3 Gr, dim3 Bl, double* mat, const double* A, MatrixDim d) {
_mul_elements<<<Gr,Bl>>>(mat,A,d);
}
void cudaD_mul_cols_vec(dim3 Gr, dim3 Bl, double* mat, const double* scale, MatrixDim d) {
_mul_cols_vec<<<Gr,Bl>>>(mat,scale,d);
}
void cudaD_mul_rows_vec(dim3 Gr, dim3 Bl, double* mat, const double* scale, MatrixDim d) {
_mul_rows_vec<<<Gr,Bl>>>(mat,scale,d);
}
void cudaD_div_rows_vec(dim3 Gr, dim3 Bl, double* mat, const double* vec_div, MatrixDim d) {
_div_rows_vec<<<Gr,Bl>>>(mat, vec_div, d);
}
void cudaD_add_mat(dim3 Gr, dim3 Bl, double alpha, const double* A, double beta, double* dst, MatrixDim d) {
_add_mat<<<Gr,Bl>>>(alpha,A,beta,dst,d);
}
void cudaD_add_vec_to_cols(dim3 Gr, dim3 Bl, double alpha, const double* col, double beta, double* dst, MatrixDim d) {
_add_vec_to_cols<<<Gr,Bl>>>(alpha,col,beta,dst,d);
}
void cudaD_add_vec_to_rows(dim3 Gr, dim3 Bl, double alpha, const double* row, double beta, double* dst, MatrixDim d) {
_add_vec_to_rows<<<Gr,Bl>>>(alpha,row,beta,dst,d);
}
// CURRENTLY UNUSED...
void cudaD_apply_mask(dim3 Gr, dim3 Bl, double* mat, const char* mask, MatrixDim dmat, MatrixDim dmask) {
_apply_mask<<<Gr,Bl>>>(mat,mask,dmat,dmask);
}
/*
* CuVector
*/
void cudaD_add_row_sum_mat(dim3 Gr, dim3 Bl, const double* mat, double* vec_sum, MatrixDim d) {
_add_row_sum_mat<<<Gr,Bl>>>(mat,vec_sum,d);
}
void cudaD_add_col_sum_mat(dim3 Gr, dim3 Bl, const double* mat, double* vec_sum, MatrixDim d) {
_add_col_sum_mat<<<Gr,Bl>>>(mat,vec_sum,d);
}
void cudaD_invert_elements(dim3 Gr, dim3 Bl, double* data, MatrixDim d) {
_invert_elements<<<Gr,Bl>>>(data, d);
}
/*
* cu::
*/
void cudaD_sigmoid (dim3 Gr, dim3 Bl, double* y, const double* x, MatrixDim d) {
_sigmoid<<<Gr,Bl>>>(y, x, d);
}
void cudaD_diff_sigmoid (dim3 Gr, dim3 Bl, double* eout, const double* e, const double* y, MatrixDim d) {
_diff_sigmoid<<<Gr,Bl>>>(eout, e, y, d);
}
void cudaD_tanh (dim3 Gr, dim3 Bl, double* y, const double* x, MatrixDim d) {
_tanh<<<Gr,Bl>>>(y, x, d);
}
void cudaD_diff_tanh (dim3 Gr, dim3 Bl, double* eout, const double* e, const double* y, MatrixDim d) {
_diff_tanh<<<Gr,Bl>>>(eout, e, y, d);
}
void cudaD_softmax (size_t Gr, size_t Bl, double* y, const double* x, MatrixDim d) {
_softmax<<<Gr,Bl>>>(y, x, d);
}
void cudaD_softmax_part(dim3 Gr, dim3 Bl, const double* X, const int32_cuda* vec_ids, double* Y, MatrixDim d) {
_softmax_part<<<Gr,Bl>>>(X,vec_ids,Y,d);
}
void cudaD_splice(dim3 Gr, dim3 Bl, double* y, const double* x, const int32_cuda* off, MatrixDim d_out, MatrixDim d_in) {
_splice<<<Gr,Bl>>>(y,x,off,d_out,d_in);
}
void cudaD_copy(dim3 Gr, dim3 Bl, double* y, const double* x, const int32_cuda* copy_from, MatrixDim d_out, MatrixDim d_in) {
_copy<<<Gr,Bl>>>(y,x,copy_from,d_out,d_in);
}
void cudaD_randomize(dim3 Gr, dim3 Bl, double* y, const double* x, const int32_cuda* copy_from, MatrixDim d_out, MatrixDim d_in) {
_randomize<<<Gr,Bl>>>(y,x,copy_from,d_out,d_in);
}
void cudaD_regularize_l1(dim3 Gr, dim3 Bl, double* wei, double* grad, double l1, double lr, MatrixDim d) {
_regularize_l1<<<Gr,Bl>>>(wei,grad,l1,lr,d);
}
void cudaD_find_row_max_id(dim3 Gr, dim3 Bl, const double* mat, double* vec_val, int32_cuda* vec_id, int32_cuda voff, MatrixDim d) {
_find_row_max_id<<<Gr,Bl>>>(mat, vec_val, vec_id, voff, d);
}
void cudaD_diff_xent(dim3 Gr, dim3 Bl, const int32_cuda* vec_tgt, double* mat_net_out, double* vec_log_post, MatrixDim d) {
_diff_xent<<<Gr,Bl>>>(vec_tgt,mat_net_out,vec_log_post,d);
}
|
26e394b6defdc117bde5e07838e5a4848f57eb53.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
__global__ void print_kernel() {
printf("Hello from block %d, thread %d\n", blockIdx.x, threadIdx.x);
}
int main() {
hipLaunchKernelGGL(( print_kernel), dim3(2), dim3(2), 0, 0, );
hipDeviceSynchronize();
}
| 26e394b6defdc117bde5e07838e5a4848f57eb53.cu | #include <stdio.h>
__global__ void print_kernel() {
printf("Hello from block %d, thread %d\n", blockIdx.x, threadIdx.x);
}
int main() {
print_kernel<<<2, 2>>>();
cudaDeviceSynchronize();
}
|
d9535724830b0b6a71cd45b348dd4f46c83521d2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cfloat>
#include "caffe2/core/context_gpu.h"
#include "caffe2/operators/roi_pool_op.h"
namespace caffe2 {
namespace {
template <typename T>
inline __device__ T gpu_atomic_add(const T val, T* address);
template <>
inline __device__ float gpu_atomic_add(const float val, float* address) {
return atomicAdd(address, val);
}
template <typename T>
__global__ void ROIPoolForward(
const int nthreads,
const T* bottom_data,
const T spatial_scale,
const int channels,
const int height,
const int width,
const int pooled_height,
const int pooled_width,
const T* bottom_rois,
T* top_data,
int* argmax_data) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
// (n, c, ph, pw) is an element in the pooled output
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
const T* offset_bottom_rois = bottom_rois + n * 5;
int roi_batch_ind = offset_bottom_rois[0];
int roi_start_w = roundf(offset_bottom_rois[1] * spatial_scale);
int roi_start_h = roundf(offset_bottom_rois[2] * spatial_scale);
int roi_end_w = roundf(offset_bottom_rois[3] * spatial_scale);
int roi_end_h = roundf(offset_bottom_rois[4] * spatial_scale);
// Force malformed ROIs to be 1x1
int roi_width = max(roi_end_w - roi_start_w + 1, 1);
int roi_height = max(roi_end_h - roi_start_h + 1, 1);
T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height);
T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width);
int hstart = static_cast<int>(floor(static_cast<T>(ph) * bin_size_h));
int wstart = static_cast<int>(floor(static_cast<T>(pw) * bin_size_w));
int hend = static_cast<int>(ceil(static_cast<T>(ph + 1) * bin_size_h));
int wend = static_cast<int>(ceil(static_cast<T>(pw + 1) * bin_size_w));
// Add roi offsets and clip to input boundaries
hstart = min(max(hstart + roi_start_h, 0), height);
hend = min(max(hend + roi_start_h, 0), height);
wstart = min(max(wstart + roi_start_w, 0), width);
wend = min(max(wend + roi_start_w, 0), width);
bool is_empty = (hend <= hstart) || (wend <= wstart);
// Define an empty pooling region to be zero
T maxval = is_empty ? 0 : -FLT_MAX;
// If nothing is pooled, argmax = -1 causes nothing to be backprop'd
int maxidx = -1;
const T* offset_bottom_data =
bottom_data + (roi_batch_ind * channels + c) * height * width;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
int bottom_index = h * width + w;
if (offset_bottom_data[bottom_index] > maxval) {
maxval = offset_bottom_data[bottom_index];
maxidx = bottom_index;
}
}
}
top_data[index] = maxval;
if (argmax_data) {
argmax_data[index] = maxidx;
}
}
}
template <typename T>
__global__ void ROIPoolBackward(
const int nthreads,
const T* top_diff,
const int* argmax_data,
const int num_rois,
const T spatial_scale,
const int channels,
const int height,
const int width,
const int pooled_height,
const int pooled_width,
T* bottom_diff,
const T* bottom_rois) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
// (n, c, ph, pw) is an element in the pooled output
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
const T* offset_bottom_rois = bottom_rois + n * 5;
int roi_batch_ind = offset_bottom_rois[0];
int bottom_offset = (roi_batch_ind * channels + c) * height * width;
int top_offset = (n * channels + c) * pooled_height * pooled_width;
const T* offset_top_diff = top_diff + top_offset;
T* offset_bottom_diff = bottom_diff + bottom_offset;
const int* offset_argmax_data = argmax_data + top_offset;
int argmax = offset_argmax_data[ph * pooled_width + pw];
if (argmax != -1) {
gpu_atomic_add(
static_cast<T>(offset_top_diff[ph * pooled_width + pw]),
offset_bottom_diff + argmax);
}
}
}
} // namespace
template <>
bool RoIPoolOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0); // Input data to pool
auto& R = Input(1); // RoIs
auto* Y = Output(0); // RoI pooled data
auto* A = is_test_ ? nullptr : Output(1); // argmaxes
// Handle empty rois
if (R.size() == 0) {
Y->Resize(0, X.dim32(1), pooled_height_, pooled_width_);
// mutable_data calls are needed to allocate the tensors
Y->template mutable_data<float>();
if (!is_test_) {
A->Resize(Y->sizes());
A->template mutable_data<int>();
}
return true;
}
Y->Resize(R.dim32(0), X.dim32(1), pooled_height_, pooled_width_);
if (!is_test_) {
A->Resize(Y->sizes());
}
int output_size = Y->size();
int* argmax_data = is_test_ ? nullptr : A->template mutable_data<int>();
hipLaunchKernelGGL(( ROIPoolForward<float>)
, dim3(CAFFE_GET_BLOCKS(output_size)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
output_size,
X.data<float>(),
spatial_scale_,
X.dim32(1),
X.dim32(2),
X.dim32(3),
pooled_height_,
pooled_width_,
R.data<float>(),
Y->template mutable_data<float>(),
argmax_data);
return true;
}
template <>
bool RoIPoolGradientOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0); // Input data to pool
auto& R = Input(1); // RoIs
auto& A = Input(2); // argmaxes
auto& dY = Input(3); // Gradient of net w.r.t. output of "forward" op
// (aka "gradOutput")
auto* dX = Output(
0, X.sizes(), at::dtype<float>()); // Gradient of net w.r.t. input to
// "forward" op (aka "gradInput")
// Must zero-out dX before accumulating gradients
math::Set<float, CUDAContext>(
dX->size(), 0.f, dX->template mutable_data<float>(), &context_);
if (dY.size() > 0) { // Handle possibly empty gradient if there were no rois
hipLaunchKernelGGL(( ROIPoolBackward<float>)
, dim3(CAFFE_GET_BLOCKS(dY.size())),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
dY.size(),
dY.data<float>(),
A.data<int>(),
R.dim32(0),
spatial_scale_,
X.dim32(1),
X.dim32(2),
X.dim32(3),
pooled_height_,
pooled_width_,
dX->template mutable_data<float>(),
R.data<float>());
}
return true;
}
REGISTER_CUDA_OPERATOR(RoIPool, RoIPoolOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(RoIPoolGradient, RoIPoolGradientOp<float, CUDAContext>);
} // namespace caffe2
| d9535724830b0b6a71cd45b348dd4f46c83521d2.cu | #include <cfloat>
#include "caffe2/core/context_gpu.h"
#include "caffe2/operators/roi_pool_op.h"
namespace caffe2 {
namespace {
template <typename T>
inline __device__ T gpu_atomic_add(const T val, T* address);
template <>
inline __device__ float gpu_atomic_add(const float val, float* address) {
return atomicAdd(address, val);
}
template <typename T>
__global__ void ROIPoolForward(
const int nthreads,
const T* bottom_data,
const T spatial_scale,
const int channels,
const int height,
const int width,
const int pooled_height,
const int pooled_width,
const T* bottom_rois,
T* top_data,
int* argmax_data) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
// (n, c, ph, pw) is an element in the pooled output
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
const T* offset_bottom_rois = bottom_rois + n * 5;
int roi_batch_ind = offset_bottom_rois[0];
int roi_start_w = roundf(offset_bottom_rois[1] * spatial_scale);
int roi_start_h = roundf(offset_bottom_rois[2] * spatial_scale);
int roi_end_w = roundf(offset_bottom_rois[3] * spatial_scale);
int roi_end_h = roundf(offset_bottom_rois[4] * spatial_scale);
// Force malformed ROIs to be 1x1
int roi_width = max(roi_end_w - roi_start_w + 1, 1);
int roi_height = max(roi_end_h - roi_start_h + 1, 1);
T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height);
T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width);
int hstart = static_cast<int>(floor(static_cast<T>(ph) * bin_size_h));
int wstart = static_cast<int>(floor(static_cast<T>(pw) * bin_size_w));
int hend = static_cast<int>(ceil(static_cast<T>(ph + 1) * bin_size_h));
int wend = static_cast<int>(ceil(static_cast<T>(pw + 1) * bin_size_w));
// Add roi offsets and clip to input boundaries
hstart = min(max(hstart + roi_start_h, 0), height);
hend = min(max(hend + roi_start_h, 0), height);
wstart = min(max(wstart + roi_start_w, 0), width);
wend = min(max(wend + roi_start_w, 0), width);
bool is_empty = (hend <= hstart) || (wend <= wstart);
// Define an empty pooling region to be zero
T maxval = is_empty ? 0 : -FLT_MAX;
// If nothing is pooled, argmax = -1 causes nothing to be backprop'd
int maxidx = -1;
const T* offset_bottom_data =
bottom_data + (roi_batch_ind * channels + c) * height * width;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
int bottom_index = h * width + w;
if (offset_bottom_data[bottom_index] > maxval) {
maxval = offset_bottom_data[bottom_index];
maxidx = bottom_index;
}
}
}
top_data[index] = maxval;
if (argmax_data) {
argmax_data[index] = maxidx;
}
}
}
template <typename T>
__global__ void ROIPoolBackward(
const int nthreads,
const T* top_diff,
const int* argmax_data,
const int num_rois,
const T spatial_scale,
const int channels,
const int height,
const int width,
const int pooled_height,
const int pooled_width,
T* bottom_diff,
const T* bottom_rois) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
// (n, c, ph, pw) is an element in the pooled output
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
const T* offset_bottom_rois = bottom_rois + n * 5;
int roi_batch_ind = offset_bottom_rois[0];
int bottom_offset = (roi_batch_ind * channels + c) * height * width;
int top_offset = (n * channels + c) * pooled_height * pooled_width;
const T* offset_top_diff = top_diff + top_offset;
T* offset_bottom_diff = bottom_diff + bottom_offset;
const int* offset_argmax_data = argmax_data + top_offset;
int argmax = offset_argmax_data[ph * pooled_width + pw];
if (argmax != -1) {
gpu_atomic_add(
static_cast<T>(offset_top_diff[ph * pooled_width + pw]),
offset_bottom_diff + argmax);
}
}
}
} // namespace
template <>
bool RoIPoolOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0); // Input data to pool
auto& R = Input(1); // RoIs
auto* Y = Output(0); // RoI pooled data
auto* A = is_test_ ? nullptr : Output(1); // argmaxes
// Handle empty rois
if (R.size() == 0) {
Y->Resize(0, X.dim32(1), pooled_height_, pooled_width_);
// mutable_data calls are needed to allocate the tensors
Y->template mutable_data<float>();
if (!is_test_) {
A->Resize(Y->sizes());
A->template mutable_data<int>();
}
return true;
}
Y->Resize(R.dim32(0), X.dim32(1), pooled_height_, pooled_width_);
if (!is_test_) {
A->Resize(Y->sizes());
}
int output_size = Y->size();
int* argmax_data = is_test_ ? nullptr : A->template mutable_data<int>();
ROIPoolForward<float>
<<<CAFFE_GET_BLOCKS(output_size),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
output_size,
X.data<float>(),
spatial_scale_,
X.dim32(1),
X.dim32(2),
X.dim32(3),
pooled_height_,
pooled_width_,
R.data<float>(),
Y->template mutable_data<float>(),
argmax_data);
return true;
}
template <>
bool RoIPoolGradientOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0); // Input data to pool
auto& R = Input(1); // RoIs
auto& A = Input(2); // argmaxes
auto& dY = Input(3); // Gradient of net w.r.t. output of "forward" op
// (aka "gradOutput")
auto* dX = Output(
0, X.sizes(), at::dtype<float>()); // Gradient of net w.r.t. input to
// "forward" op (aka "gradInput")
// Must zero-out dX before accumulating gradients
math::Set<float, CUDAContext>(
dX->size(), 0.f, dX->template mutable_data<float>(), &context_);
if (dY.size() > 0) { // Handle possibly empty gradient if there were no rois
ROIPoolBackward<float>
<<<CAFFE_GET_BLOCKS(dY.size()),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
dY.size(),
dY.data<float>(),
A.data<int>(),
R.dim32(0),
spatial_scale_,
X.dim32(1),
X.dim32(2),
X.dim32(3),
pooled_height_,
pooled_width_,
dX->template mutable_data<float>(),
R.data<float>());
}
return true;
}
REGISTER_CUDA_OPERATOR(RoIPool, RoIPoolOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(RoIPoolGradient, RoIPoolGradientOp<float, CUDAContext>);
} // namespace caffe2
|
2caff5ebdce276b4892604f6842add5d25fccae2.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright 2015 NVIDIA Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include "headers.h"
int main(int argc, char *argv[])
{
/* get GPU device number and name */
int dev;
hipDeviceProp_t deviceProp;
checkCUDA( hipGetDevice( &dev ) );
checkCUDA( hipGetDeviceProperties( &deviceProp, dev ) );
printf("Using GPU %d: %s\n", dev, deviceProp.name );
/* declare file pointers */
char trainingLabelFilename[] = "train-labels.txt";
char trainingSetFilename[] = "train-images.txt";
char testSetFilename[] = "t10k-images.txt";
char testLabelFilename[] = "t10k-labels.txt";
#if 0
//used for debugging
char theta1Filename[] = "Theta1.txt";
char theta2Filename[] = "Theta2.txt";
#endif
/* define constants */
int const numFeatures = FEATURE_VECTOR_SIZE;
int const numTrainingExamples = TRAINING_SET_SIZE;
int const numTestExamples = TEST_SET_SIZE;
int const numClasses = NUM_OUTPUT_CLASSES;
floatType_t const eps = 0.12;
/* define the arrays going to be used */
float *trainingVector, *trainingMatrix;
float *theta1, *theta2;
float *testVector, *testMatrix;
int *predictVector;
float learningRate;
int batchSize;
int iterations;
int sizeHiddenLayer;
/* read command line args if they're passed */
readCommandLineArgs( argc, argv, &learningRate, &batchSize, &iterations,
&sizeHiddenLayer );
printf("Number of training examples %d\n",numTrainingExamples);
printf("Number of features/pixels per example %d\n",numFeatures);
printf("Number of test examples %d\n",numTestExamples);
/* malloc trainingVector, which are the labels of the trainin sets */
trainingVector = (float *) malloc( sizeof(float) * numTrainingExamples );
if( trainingVector == NULL )
fprintf(stderr,"Houston we have a problem\n");
memset( trainingVector, 0, sizeof(float)*numTrainingExamples );
/* read trainingVector from file */
readMatrixFromFile( trainingLabelFilename, trainingVector,
numTrainingExamples, 1, 1 );
/* malloc the training matrix. each column is a different training
example of 784 pixel values
*/
trainingMatrix = (float *) malloc( sizeof(float) * numTrainingExamples *
(numFeatures+1) );
if( trainingMatrix == NULL )
fprintf(stderr,"Houston more problems\n");
memset( trainingMatrix, 0, sizeof(float)*
numTrainingExamples*(numFeatures+1) );
/* read training examples from file as a matrix
read first column of data into second column of array to leave room for
bias unit of ones
*/
// readMatrixFromFile( trainingSetFilename,
// &trainingMatrix[INDX(0,1,numTrainingExamples)],
// numTrainingExamples, numFeatures );
readMatrixFromFile( trainingSetFilename,
&trainingMatrix[1],
numFeatures, numTrainingExamples, numFeatures+1 );
/* scale the training matrix to 0 to 1, essentially a normalization
technique
*/
floatType_t scale = 1.0 / 256.0;
for( int i = 0; i < (numFeatures+1)*numTrainingExamples; i++ )
trainingMatrix[i] *= scale;
/* malloc the theta1 matrix which are the weights for first layer
*/
theta1 = (float *) malloc( sizeof(float) * sizeHiddenLayer *
(numFeatures + 1 ) );
if( theta1 == NULL )
fprintf(stderr,"Houston more problems\n");
memset( theta1, 0, sizeof(float)*sizeHiddenLayer*(numFeatures+1) );
/* init theta1 with random numbers */
for( int i = 0; i < sizeHiddenLayer*(numFeatures+1); i++ )
{
theta1[i] = double(rand()) / (double(RAND_MAX) + 1.0);
theta1[i] *= (2.0*eps);
theta1[i] -= eps;
} /* end for */
/* malloc the theta2 matrix which are weights for second layer
*/
theta2 = (float *) malloc( sizeof(float) * numClasses *
(sizeHiddenLayer + 1 ) );
if( theta2 == NULL )
fprintf(stderr,"Houston more problems\n");
memset( theta2, 0, sizeof(float)*numClasses*(sizeHiddenLayer+1) );
/* init theta2 from random numbers */
for( int i = 0; i < numClasses*(sizeHiddenLayer+1); i++ )
{
theta2[i] = double(rand()) / (double(RAND_MAX) + 1.0);
theta2[i] *= (2.0*eps);
theta2[i] -= eps;
} /* end for */
/* setup timers using CUDA events */
hipEvent_t start, stop;
checkCUDA( hipEventCreate( &start ) );
checkCUDA( hipEventCreate( &stop ) );
checkCUDA( hipEventRecord( start, 0 ) );
#if 1
/* call the training function. This is a majority of the runtime */
trainNetwork( trainingMatrix, numTrainingExamples, numFeatures+1,
theta1, sizeHiddenLayer, numFeatures+1,
theta2, numClasses, sizeHiddenLayer+1,
trainingVector, learningRate, iterations, batchSize );
#endif
/* report time of training */
checkCUDA( hipEventRecord( stop, 0 ) );
checkCUDA( hipEventSynchronize( stop ) );
float elapsedTime;
checkCUDA( hipEventElapsedTime( &elapsedTime, start, stop ) );
fprintf(stdout, "Total time for training is %.3e sec\n",
elapsedTime/1000.0f );
/* malloc predictVector this is a vector that will be populated by the
predict function, i.e., it will take a set of pixel data and predict
which digit it is, and put those values into a vector */
predictVector = (int *) malloc( sizeof(int) * numTrainingExamples );
if( predictVector == NULL )
fprintf(stderr,"Houston we have a problem\n");
memset( predictVector, 0, sizeof(int)*numTrainingExamples );
/* test prediction on the training examples */
predict( trainingMatrix, numTrainingExamples, numFeatures+1,
theta1, sizeHiddenLayer, numFeatures+1,
theta2, numClasses, sizeHiddenLayer+1,
predictVector );
/* compare the predicted values versus the actual values, of the
training set
*/
floatType_t result = 0.0;
for( int i = 0; i < numTrainingExamples; i++ )
{
if( (int) trainingVector[i] == predictVector[i] )
result += (floatType_t) 1.0;
} /* end for i */
printf("Total correct on training set is %d\n",(int)result);
printf("Prediction rate of training set is %.3f\n",
100.0 * result/(floatType_t)numTrainingExamples);
/* malloc testVector. this is a test set of labels for data
we haven't seen yet.
*/
testVector = (float *) malloc( sizeof(float) * numTestExamples );
if( testVector == NULL )
fprintf(stderr,"Houston we have a problem\n");
memset( testVector, 0, sizeof(float)*numTestExamples );
/* read testVector from file */
readMatrixFromFile( testLabelFilename, testVector,
numTestExamples, 1, 1 );
/* malloc the test matrix. each column is a different test example of data
we haven't seen before.
*/
testMatrix = (float *) malloc( sizeof(float) * numTestExamples *
(numFeatures+1) );
if( testMatrix == NULL )
fprintf(stderr,"Houston more problems\n");
memset( testMatrix, 0, sizeof(float)*
numTestExamples*(numFeatures+1) );
/* read test examples from file as a matrix
read first column of data into second column of array to leave room for
bias unit of ones
*/
readMatrixFromFile( testSetFilename,
&testMatrix[1],
numFeatures, numTestExamples, numFeatures+1 );
/* scale the test matrix to 0 to 1 */
scale = 1.0 / 256.0;
for( int i = 0; i < (numFeatures+1)*numTestExamples; i++ )
testMatrix[i] *= scale;
memset( predictVector, 0, sizeof(int)*numTestExamples );
/* test the prediction of test examples which we haven't trained on
*/
predict( testMatrix, numTestExamples, numFeatures+1,
theta1, sizeHiddenLayer, numFeatures+1,
theta2, numClasses, sizeHiddenLayer+1,
predictVector );
result = 0.0;
for( int i = 0; i < numTestExamples; i++ )
{
if( (int) testVector[i] == predictVector[i] )
result += (floatType_t) 1.0;
} /* end for i */
printf("Total correct on test set is %d\n",(int)result);
printf("Prediction rate of test set is %.3f\n",
100.0 * result/(floatType_t)numTestExamples);
free(trainingVector);
free(trainingMatrix);
free(theta1);
free(theta2);
free(predictVector);
free(testVector);
free(testMatrix);
return 0;
} /* end main */
| 2caff5ebdce276b4892604f6842add5d25fccae2.cu | /*
* Copyright 2015 NVIDIA Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include "headers.h"
int main(int argc, char *argv[])
{
/* get GPU device number and name */
int dev;
cudaDeviceProp deviceProp;
checkCUDA( cudaGetDevice( &dev ) );
checkCUDA( cudaGetDeviceProperties( &deviceProp, dev ) );
printf("Using GPU %d: %s\n", dev, deviceProp.name );
/* declare file pointers */
char trainingLabelFilename[] = "train-labels.txt";
char trainingSetFilename[] = "train-images.txt";
char testSetFilename[] = "t10k-images.txt";
char testLabelFilename[] = "t10k-labels.txt";
#if 0
//used for debugging
char theta1Filename[] = "Theta1.txt";
char theta2Filename[] = "Theta2.txt";
#endif
/* define constants */
int const numFeatures = FEATURE_VECTOR_SIZE;
int const numTrainingExamples = TRAINING_SET_SIZE;
int const numTestExamples = TEST_SET_SIZE;
int const numClasses = NUM_OUTPUT_CLASSES;
floatType_t const eps = 0.12;
/* define the arrays going to be used */
float *trainingVector, *trainingMatrix;
float *theta1, *theta2;
float *testVector, *testMatrix;
int *predictVector;
float learningRate;
int batchSize;
int iterations;
int sizeHiddenLayer;
/* read command line args if they're passed */
readCommandLineArgs( argc, argv, &learningRate, &batchSize, &iterations,
&sizeHiddenLayer );
printf("Number of training examples %d\n",numTrainingExamples);
printf("Number of features/pixels per example %d\n",numFeatures);
printf("Number of test examples %d\n",numTestExamples);
/* malloc trainingVector, which are the labels of the trainin sets */
trainingVector = (float *) malloc( sizeof(float) * numTrainingExamples );
if( trainingVector == NULL )
fprintf(stderr,"Houston we have a problem\n");
memset( trainingVector, 0, sizeof(float)*numTrainingExamples );
/* read trainingVector from file */
readMatrixFromFile( trainingLabelFilename, trainingVector,
numTrainingExamples, 1, 1 );
/* malloc the training matrix. each column is a different training
example of 784 pixel values
*/
trainingMatrix = (float *) malloc( sizeof(float) * numTrainingExamples *
(numFeatures+1) );
if( trainingMatrix == NULL )
fprintf(stderr,"Houston more problems\n");
memset( trainingMatrix, 0, sizeof(float)*
numTrainingExamples*(numFeatures+1) );
/* read training examples from file as a matrix
read first column of data into second column of array to leave room for
bias unit of ones
*/
// readMatrixFromFile( trainingSetFilename,
// &trainingMatrix[INDX(0,1,numTrainingExamples)],
// numTrainingExamples, numFeatures );
readMatrixFromFile( trainingSetFilename,
&trainingMatrix[1],
numFeatures, numTrainingExamples, numFeatures+1 );
/* scale the training matrix to 0 to 1, essentially a normalization
technique
*/
floatType_t scale = 1.0 / 256.0;
for( int i = 0; i < (numFeatures+1)*numTrainingExamples; i++ )
trainingMatrix[i] *= scale;
/* malloc the theta1 matrix which are the weights for first layer
*/
theta1 = (float *) malloc( sizeof(float) * sizeHiddenLayer *
(numFeatures + 1 ) );
if( theta1 == NULL )
fprintf(stderr,"Houston more problems\n");
memset( theta1, 0, sizeof(float)*sizeHiddenLayer*(numFeatures+1) );
/* init theta1 with random numbers */
for( int i = 0; i < sizeHiddenLayer*(numFeatures+1); i++ )
{
theta1[i] = double(rand()) / (double(RAND_MAX) + 1.0);
theta1[i] *= (2.0*eps);
theta1[i] -= eps;
} /* end for */
/* malloc the theta2 matrix which are weights for second layer
*/
theta2 = (float *) malloc( sizeof(float) * numClasses *
(sizeHiddenLayer + 1 ) );
if( theta2 == NULL )
fprintf(stderr,"Houston more problems\n");
memset( theta2, 0, sizeof(float)*numClasses*(sizeHiddenLayer+1) );
/* init theta2 from random numbers */
for( int i = 0; i < numClasses*(sizeHiddenLayer+1); i++ )
{
theta2[i] = double(rand()) / (double(RAND_MAX) + 1.0);
theta2[i] *= (2.0*eps);
theta2[i] -= eps;
} /* end for */
/* setup timers using CUDA events */
cudaEvent_t start, stop;
checkCUDA( cudaEventCreate( &start ) );
checkCUDA( cudaEventCreate( &stop ) );
checkCUDA( cudaEventRecord( start, 0 ) );
#if 1
/* call the training function. This is a majority of the runtime */
trainNetwork( trainingMatrix, numTrainingExamples, numFeatures+1,
theta1, sizeHiddenLayer, numFeatures+1,
theta2, numClasses, sizeHiddenLayer+1,
trainingVector, learningRate, iterations, batchSize );
#endif
/* report time of training */
checkCUDA( cudaEventRecord( stop, 0 ) );
checkCUDA( cudaEventSynchronize( stop ) );
float elapsedTime;
checkCUDA( cudaEventElapsedTime( &elapsedTime, start, stop ) );
fprintf(stdout, "Total time for training is %.3e sec\n",
elapsedTime/1000.0f );
/* malloc predictVector this is a vector that will be populated by the
predict function, i.e., it will take a set of pixel data and predict
which digit it is, and put those values into a vector */
predictVector = (int *) malloc( sizeof(int) * numTrainingExamples );
if( predictVector == NULL )
fprintf(stderr,"Houston we have a problem\n");
memset( predictVector, 0, sizeof(int)*numTrainingExamples );
/* test prediction on the training examples */
predict( trainingMatrix, numTrainingExamples, numFeatures+1,
theta1, sizeHiddenLayer, numFeatures+1,
theta2, numClasses, sizeHiddenLayer+1,
predictVector );
/* compare the predicted values versus the actual values, of the
training set
*/
floatType_t result = 0.0;
for( int i = 0; i < numTrainingExamples; i++ )
{
if( (int) trainingVector[i] == predictVector[i] )
result += (floatType_t) 1.0;
} /* end for i */
printf("Total correct on training set is %d\n",(int)result);
printf("Prediction rate of training set is %.3f\n",
100.0 * result/(floatType_t)numTrainingExamples);
/* malloc testVector. this is a test set of labels for data
we haven't seen yet.
*/
testVector = (float *) malloc( sizeof(float) * numTestExamples );
if( testVector == NULL )
fprintf(stderr,"Houston we have a problem\n");
memset( testVector, 0, sizeof(float)*numTestExamples );
/* read testVector from file */
readMatrixFromFile( testLabelFilename, testVector,
numTestExamples, 1, 1 );
/* malloc the test matrix. each column is a different test example of data
we haven't seen before.
*/
testMatrix = (float *) malloc( sizeof(float) * numTestExamples *
(numFeatures+1) );
if( testMatrix == NULL )
fprintf(stderr,"Houston more problems\n");
memset( testMatrix, 0, sizeof(float)*
numTestExamples*(numFeatures+1) );
/* read test examples from file as a matrix
read first column of data into second column of array to leave room for
bias unit of ones
*/
readMatrixFromFile( testSetFilename,
&testMatrix[1],
numFeatures, numTestExamples, numFeatures+1 );
/* scale the test matrix to 0 to 1 */
scale = 1.0 / 256.0;
for( int i = 0; i < (numFeatures+1)*numTestExamples; i++ )
testMatrix[i] *= scale;
memset( predictVector, 0, sizeof(int)*numTestExamples );
/* test the prediction of test examples which we haven't trained on
*/
predict( testMatrix, numTestExamples, numFeatures+1,
theta1, sizeHiddenLayer, numFeatures+1,
theta2, numClasses, sizeHiddenLayer+1,
predictVector );
result = 0.0;
for( int i = 0; i < numTestExamples; i++ )
{
if( (int) testVector[i] == predictVector[i] )
result += (floatType_t) 1.0;
} /* end for i */
printf("Total correct on test set is %d\n",(int)result);
printf("Prediction rate of test set is %.3f\n",
100.0 * result/(floatType_t)numTestExamples);
free(trainingVector);
free(trainingMatrix);
free(theta1);
free(theta2);
free(predictVector);
free(testVector);
free(testMatrix);
return 0;
} /* end main */
|
1c2c49fa8346ca05e43b1b6687e27f5ed6b1f22c.hip | // !!! This is a file automatically generated by hipify!!!
/**
* @file
* @brief CIS 565 Version Checker
* @details A simple CUDA hello-world-style program for Patrick Cozzi's
* CIS 565: GPU Programming, at the University of Pennsylvania.
* @authors Starter code: Yining Karl Li, Liam Boone, Harmony Li, Kai Ninomiya
* @copyright University of Pennsylvania
*/
#include <cstdio>
#include <hip/hip_runtime_api.h>
#include <device_launch_parameters.h>
#include "kernel.h"
void checkCUDAError(const char *msg) {
hipError_t err = hipGetLastError();
if (hipSuccess != err) {
fprintf(stderr, "Cuda error: %s: %s.\n", msg, hipGetErrorString(err));
exit(EXIT_FAILURE);
}
}
// Kernel that writes the image to the OpenGL PBO directly.
__global__ void createVersionVisualization(uchar4* PBOpos, int width, int height, int major,
int minor) {
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int index = x + (y * width);
if (x <= width && y <= height) {
// Each thread writes one pixel location in the texture (textel)
PBOpos[index].w = 0;
PBOpos[index].x = 0;
PBOpos[index].y = 0;
PBOpos[index].z = 0;
int ver = y < height / 2 ? major : minor;
if (ver == 0) {
PBOpos[index].x = 255;
} else if (ver == 1) {
PBOpos[index].y = 255;
} else if (ver == 2) {
PBOpos[index].z = 255;
} else if (ver == 3) {
PBOpos[index].x = 255;
PBOpos[index].y = 255;
} else if (ver == 5) {
PBOpos[index].z = 255;
PBOpos[index].y = 255;
}
}
}
// Wrapper for the __global__ call that sets up the kernel calls
void kernelVersionVis(uchar4* PBOpos, int width, int height, int major, int minor) {
// set up crucial magic
unsigned int blockSize = 16;
dim3 threadsPerBlock(blockSize, blockSize);
unsigned int blocksX = (width + blockSize - 1) / blockSize;
unsigned int blocksY = (height + blockSize - 1) / blockSize;
dim3 fullBlocksPerGrid(blocksX, blocksY);
//kernel launches
hipLaunchKernelGGL(( createVersionVisualization) , dim3(fullBlocksPerGrid), dim3(threadsPerBlock), 0, 0, PBOpos, width, height, major, minor);
// make certain the kernel has completed
hipDeviceSynchronize();
checkCUDAError("Kernel failed!");
}
| 1c2c49fa8346ca05e43b1b6687e27f5ed6b1f22c.cu | /**
* @file
* @brief CIS 565 Version Checker
* @details A simple CUDA hello-world-style program for Patrick Cozzi's
* CIS 565: GPU Programming, at the University of Pennsylvania.
* @authors Starter code: Yining Karl Li, Liam Boone, Harmony Li, Kai Ninomiya
* @copyright University of Pennsylvania
*/
#include <cstdio>
#include <cuda_runtime_api.h>
#include <device_launch_parameters.h>
#include "kernel.h"
void checkCUDAError(const char *msg) {
cudaError_t err = cudaGetLastError();
if (cudaSuccess != err) {
fprintf(stderr, "Cuda error: %s: %s.\n", msg, cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
}
// Kernel that writes the image to the OpenGL PBO directly.
__global__ void createVersionVisualization(uchar4* PBOpos, int width, int height, int major,
int minor) {
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int index = x + (y * width);
if (x <= width && y <= height) {
// Each thread writes one pixel location in the texture (textel)
PBOpos[index].w = 0;
PBOpos[index].x = 0;
PBOpos[index].y = 0;
PBOpos[index].z = 0;
int ver = y < height / 2 ? major : minor;
if (ver == 0) {
PBOpos[index].x = 255;
} else if (ver == 1) {
PBOpos[index].y = 255;
} else if (ver == 2) {
PBOpos[index].z = 255;
} else if (ver == 3) {
PBOpos[index].x = 255;
PBOpos[index].y = 255;
} else if (ver == 5) {
PBOpos[index].z = 255;
PBOpos[index].y = 255;
}
}
}
// Wrapper for the __global__ call that sets up the kernel calls
void kernelVersionVis(uchar4* PBOpos, int width, int height, int major, int minor) {
// set up crucial magic
unsigned int blockSize = 16;
dim3 threadsPerBlock(blockSize, blockSize);
unsigned int blocksX = (width + blockSize - 1) / blockSize;
unsigned int blocksY = (height + blockSize - 1) / blockSize;
dim3 fullBlocksPerGrid(blocksX, blocksY);
//kernel launches
createVersionVisualization <<< fullBlocksPerGrid, threadsPerBlock>>>(PBOpos, width, height, major, minor);
// make certain the kernel has completed
cudaThreadSynchronize();
checkCUDAError("Kernel failed!");
}
|
792471f712a916029aa8ceeb9770b79611cab7fc.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
template <typename T> __device__ void devicegpuUbou11(T *f, T *xdg, T *udg, T *odg, T *wdg, T *uhg, T *nlg, T *tau, T *uinf, T *param, T time, int modelnumber, int ng, int nc, int ncu, int nd, int ncx, int nco, int ncw)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i<ng) {
f[0*ng+i] = 0.0;
i += blockDim.x * gridDim.x;
}
}
template <typename T> __global__ void kernelgpuUbou11(T *f, T *xdg, T *udg, T *odg, T *wdg, T *uhg, T *nlg, T *tau, T *uinf, T *param, T time, int modelnumber, int ng, int nc, int ncu, int nd, int ncx, int nco, int ncw)
{
devicegpuUbou11(f, xdg, udg, odg, wdg, uhg, nlg, tau, uinf, param, time, modelnumber, ng, nc, ncu, nd, ncx, nco, ncw);
}
template <typename T> void gpuUbou1(T *f, T *xdg, T *udg, T *odg, T *wdg, T *uhg, T *nlg, T *tau, T *uinf, T *param, T time, int modelnumber, int ib, int ng, int nc, int ncu, int nd, int ncx, int nco, int ncw)
{
int blockDim = 256;
int gridDim = (ng + blockDim - 1) / blockDim;
gridDim = (gridDim>1024)? 1024 : gridDim;
if (ib == 1)
hipLaunchKernelGGL(( kernelgpuUbou11), dim3(gridDim), dim3(blockDim), 0, 0, f, xdg, udg, odg, wdg, uhg, nlg, tau, uinf, param, time, modelnumber, ng, nc, ncu, nd, ncx, nco, ncw);
}
template void gpuUbou1(double *, double *, double *, double *, double *, double *, double *, double *, double *, double *, double, int, int, int, int, int, int, int, int, int);
template void gpuUbou1(float *, float *, float *, float *, float *, float *, float *, float *, float *, float *, float, int, int, int, int, int, int, int, int, int);
#ifdef _ENZYME
template <typename T> __global__ void kernelGradgpuUbou11Enzyme(T *f, T *df, T *xg, T *udg, T *dudg, T *odg, T *dodg, T *wdg, T *dwdg, T *uhg, T *duhg, T *nlg, T *tau, T *uinf, T *param, T time, int modelnumber, int ib, int ng, int nc, int ncu, int nd, int ncx, int nco, int ncw)
{
__enzyme_fwddiffgpuUbou1((void*)devicegpuUbou11<T>,
enzyme_dup, f, df,
enzyme_const, xg,
enzyme_dup, udg, dudg,
enzyme_dup, odg, dodg,
enzyme_dup, wdg, dwdg,
enzyme_dup, uhg, duhg,
enzyme_const, nlg,
enzyme_const, tau,
enzyme_const, uinf,
enzyme_const, param,
enzyme_const, time,
enzyme_const, modelnumber,
enzyme_const, ng,
enzyme_const, nc,
enzyme_const, ncu,
enzyme_const, nd,
enzyme_const, ncx,
enzyme_const, nco,
enzyme_const, ncw);
}
template <typename T> void gpuUbou1Enzyme(T *f, T *df, T *xg, T *udg, T *dudg, T *odg, T *dodg, T *wdg, T *dwdg, T *uhg, T *duhg, T *nlg, T *tau, T *uinf, T *param, T time, int modelnumber, int ib, int ng, int nc, int ncu, int nd, int ncx, int nco, int ncw)
{
int blockDim = 256;
int gridDim = (ng + blockDim - 1) / blockDim;
gridDim = (gridDim>1024)? 1024 : gridDim;
if (ib == 1)
hipLaunchKernelGGL(( kernelGradgpuUbou11Enzyme), dim3(gridDim), dim3(blockDim), 0, 0, f, df, xg, udg, dudg, odg, dodg, wdg, dwdg, uhg, duhg, nlg, tau, uinf, param, time, modelnumber, ib, ng, nc, ncu, nd, ncx, nco, ncw);
}
template void gpuUbou1Enzyme(double *, double *, double *, double *, double *, double *, double *, double *, double *, double *, double *, double *, double *, double *, double *, double, int, int, int, int, int, int, int, int, int);
#endif | 792471f712a916029aa8ceeb9770b79611cab7fc.cu | template <typename T> __device__ void devicegpuUbou11(T *f, T *xdg, T *udg, T *odg, T *wdg, T *uhg, T *nlg, T *tau, T *uinf, T *param, T time, int modelnumber, int ng, int nc, int ncu, int nd, int ncx, int nco, int ncw)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i<ng) {
f[0*ng+i] = 0.0;
i += blockDim.x * gridDim.x;
}
}
template <typename T> __global__ void kernelgpuUbou11(T *f, T *xdg, T *udg, T *odg, T *wdg, T *uhg, T *nlg, T *tau, T *uinf, T *param, T time, int modelnumber, int ng, int nc, int ncu, int nd, int ncx, int nco, int ncw)
{
devicegpuUbou11(f, xdg, udg, odg, wdg, uhg, nlg, tau, uinf, param, time, modelnumber, ng, nc, ncu, nd, ncx, nco, ncw);
}
template <typename T> void gpuUbou1(T *f, T *xdg, T *udg, T *odg, T *wdg, T *uhg, T *nlg, T *tau, T *uinf, T *param, T time, int modelnumber, int ib, int ng, int nc, int ncu, int nd, int ncx, int nco, int ncw)
{
int blockDim = 256;
int gridDim = (ng + blockDim - 1) / blockDim;
gridDim = (gridDim>1024)? 1024 : gridDim;
if (ib == 1)
kernelgpuUbou11<<<gridDim, blockDim>>>(f, xdg, udg, odg, wdg, uhg, nlg, tau, uinf, param, time, modelnumber, ng, nc, ncu, nd, ncx, nco, ncw);
}
template void gpuUbou1(double *, double *, double *, double *, double *, double *, double *, double *, double *, double *, double, int, int, int, int, int, int, int, int, int);
template void gpuUbou1(float *, float *, float *, float *, float *, float *, float *, float *, float *, float *, float, int, int, int, int, int, int, int, int, int);
#ifdef _ENZYME
template <typename T> __global__ void kernelGradgpuUbou11Enzyme(T *f, T *df, T *xg, T *udg, T *dudg, T *odg, T *dodg, T *wdg, T *dwdg, T *uhg, T *duhg, T *nlg, T *tau, T *uinf, T *param, T time, int modelnumber, int ib, int ng, int nc, int ncu, int nd, int ncx, int nco, int ncw)
{
__enzyme_fwddiffgpuUbou1((void*)devicegpuUbou11<T>,
enzyme_dup, f, df,
enzyme_const, xg,
enzyme_dup, udg, dudg,
enzyme_dup, odg, dodg,
enzyme_dup, wdg, dwdg,
enzyme_dup, uhg, duhg,
enzyme_const, nlg,
enzyme_const, tau,
enzyme_const, uinf,
enzyme_const, param,
enzyme_const, time,
enzyme_const, modelnumber,
enzyme_const, ng,
enzyme_const, nc,
enzyme_const, ncu,
enzyme_const, nd,
enzyme_const, ncx,
enzyme_const, nco,
enzyme_const, ncw);
}
template <typename T> void gpuUbou1Enzyme(T *f, T *df, T *xg, T *udg, T *dudg, T *odg, T *dodg, T *wdg, T *dwdg, T *uhg, T *duhg, T *nlg, T *tau, T *uinf, T *param, T time, int modelnumber, int ib, int ng, int nc, int ncu, int nd, int ncx, int nco, int ncw)
{
int blockDim = 256;
int gridDim = (ng + blockDim - 1) / blockDim;
gridDim = (gridDim>1024)? 1024 : gridDim;
if (ib == 1)
kernelGradgpuUbou11Enzyme<<<gridDim, blockDim>>>(f, df, xg, udg, dudg, odg, dodg, wdg, dwdg, uhg, duhg, nlg, tau, uinf, param, time, modelnumber, ib, ng, nc, ncu, nd, ncx, nco, ncw);
}
template void gpuUbou1Enzyme(double *, double *, double *, double *, double *, double *, double *, double *, double *, double *, double *, double *, double *, double *, double *, double, int, int, int, int, int, int, int, int, int);
#endif |
488995f776c27c2094bd94d453de02caa900a286.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/operators/random_routing_op.h"
#include "paddle/fluid/platform/device/gpu/gpu_primitives.h"
#include "paddle/fluid/platform/float16.h"
namespace paddle {
namespace operators {
#define CEIL(_x_, _y_) (((_x_)-1) / (_y_) + 1)
#define PERTHREAD_EXPERTS 256
#define WARP_SIZE 32
const int CUDA_NUM_THREADS = 512;
static inline int GET_BLOCKS(const int N) {
return (N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS;
}
using LoDTensor = framework::LoDTensor;
using Tensor = framework::Tensor;
template <typename T>
__global__ void random_routing_kernel(int64_t* data, const int64_t length,
const size_t N, const size_t D,
const T* prob, const int64_t* topk_idx,
const T* topk_value) {
CUDA_KERNEL_LOOP(idx, length) {
size_t row = idx / D;
size_t col = idx % D;
if (col != 1) return;
if (static_cast<T>(2) * topk_value[idx] < prob[row]) {
data[idx] = static_cast<int64_t>(-1);
}
}
}
template <typename T>
class RandomRoutingOpCUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& context) const override {
auto topk_idx = context.Input<LoDTensor>("TopK_Idx");
auto topk_value = context.Input<LoDTensor>("TopK_Value");
auto prob = context.Input<LoDTensor>("Prob");
auto out = context.Output<LoDTensor>("Out");
auto place = context.GetPlace();
const auto& dev_ctx =
context.template device_context<platform::CUDADeviceContext>();
framework::TensorCopy(*topk_idx, place, out);
size_t N = topk_idx->dims()[0];
size_t D = topk_idx->dims()[1];
int64_t num_idx = topk_idx->numel();
auto prob_data = prob->data<T>();
auto topk_value_data = topk_value->data<T>();
auto topk_idx_data = topk_idx->data<int64_t>();
auto out_data = out->data<int64_t>();
hipLaunchKernelGGL(( random_routing_kernel<
T>), dim3(GET_BLOCKS(num_idx)), dim3(CUDA_NUM_THREADS), 0, dev_ctx.stream(),
out_data, num_idx, N, D, prob_data, topk_idx_data, topk_value_data);
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
namespace plat = paddle::platform;
REGISTER_OP_CUDA_KERNEL(random_routing, ops::RandomRoutingOpCUDAKernel<float>,
ops::RandomRoutingOpCUDAKernel<double>,
ops::RandomRoutingOpCUDAKernel<plat::float16>);
| 488995f776c27c2094bd94d453de02caa900a286.cu | // Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/operators/random_routing_op.h"
#include "paddle/fluid/platform/device/gpu/gpu_primitives.h"
#include "paddle/fluid/platform/float16.h"
namespace paddle {
namespace operators {
#define CEIL(_x_, _y_) (((_x_)-1) / (_y_) + 1)
#define PERTHREAD_EXPERTS 256
#define WARP_SIZE 32
const int CUDA_NUM_THREADS = 512;
static inline int GET_BLOCKS(const int N) {
return (N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS;
}
using LoDTensor = framework::LoDTensor;
using Tensor = framework::Tensor;
template <typename T>
__global__ void random_routing_kernel(int64_t* data, const int64_t length,
const size_t N, const size_t D,
const T* prob, const int64_t* topk_idx,
const T* topk_value) {
CUDA_KERNEL_LOOP(idx, length) {
size_t row = idx / D;
size_t col = idx % D;
if (col != 1) return;
if (static_cast<T>(2) * topk_value[idx] < prob[row]) {
data[idx] = static_cast<int64_t>(-1);
}
}
}
template <typename T>
class RandomRoutingOpCUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& context) const override {
auto topk_idx = context.Input<LoDTensor>("TopK_Idx");
auto topk_value = context.Input<LoDTensor>("TopK_Value");
auto prob = context.Input<LoDTensor>("Prob");
auto out = context.Output<LoDTensor>("Out");
auto place = context.GetPlace();
const auto& dev_ctx =
context.template device_context<platform::CUDADeviceContext>();
framework::TensorCopy(*topk_idx, place, out);
size_t N = topk_idx->dims()[0];
size_t D = topk_idx->dims()[1];
int64_t num_idx = topk_idx->numel();
auto prob_data = prob->data<T>();
auto topk_value_data = topk_value->data<T>();
auto topk_idx_data = topk_idx->data<int64_t>();
auto out_data = out->data<int64_t>();
random_routing_kernel<
T><<<GET_BLOCKS(num_idx), CUDA_NUM_THREADS, 0, dev_ctx.stream()>>>(
out_data, num_idx, N, D, prob_data, topk_idx_data, topk_value_data);
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
namespace plat = paddle::platform;
REGISTER_OP_CUDA_KERNEL(random_routing, ops::RandomRoutingOpCUDAKernel<float>,
ops::RandomRoutingOpCUDAKernel<double>,
ops::RandomRoutingOpCUDAKernel<plat::float16>);
|
e4494a1e343a1d55d3be8ca63ac9c005ad0f2042.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "glog/logging.h"
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/backends/gpu/gpu_dnn.h"
#include "paddle/phi/common/layout.h"
#include "paddle/phi/core/enforce.h"
#include "paddle/phi/core/flags.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/batch_norm_kernel.h"
#include "paddle/phi/kernels/empty_kernel.h"
#include "paddle/phi/kernels/funcs/batch_norm_utils.h"
#include "paddle/phi/kernels/funcs/eigen/common.h"
#include "paddle/phi/kernels/funcs/norm_utils.cu.h"
#include "paddle/phi/kernels/funcs/norm_utils.h"
#include "paddle/phi/kernels/funcs/reduce_function.h"
#ifdef __HIPCC__
#define LAUNCH_BOUNDS(BlockDim) __launch_bounds__(BlockDim)
#else
#define LAUNCH_BOUNDS(BlockDim)
#endif
PD_DECLARE_bool(cudnn_batchnorm_spatial_persistent);
namespace phi {
template <typename T>
using CudnnDataType = phi::backends::gpu::CudnnDataType<T>;
template <typename T>
using BatchNormParamType = typename CudnnDataType<T>::BatchNormParamType;
template <typename T, int BlockDim, phi::DataLayout layout>
static __global__ LAUNCH_BOUNDS(BlockDim) void KeBNBackwardScaleBias(
const T *dy,
const T *x,
const BatchNormParamType<T> *mean,
const BatchNormParamType<T> *variance,
const double epsilon,
const int N,
const int C,
const int HxW,
BatchNormParamType<T> *dscale,
BatchNormParamType<T> *dbias) {
const int outer_size = C;
const int inner_size = N * HxW;
typedef hipcub::BlockReduce<BatchNormParamType<T>, BlockDim> BlockReduce;
__shared__ typename BlockReduce::TempStorage ds_storage;
__shared__ typename BlockReduce::TempStorage db_storage;
for (int i = blockIdx.x; i < outer_size; i += gridDim.x) {
BatchNormParamType<T> ds_sum = static_cast<BatchNormParamType<T>>(0);
BatchNormParamType<T> db_sum = static_cast<BatchNormParamType<T>>(0);
BatchNormParamType<T> inv_var_i = 1.0 / sqrt(variance[i] + epsilon);
BatchNormParamType<T> mean_i = mean[i];
for (int j = threadIdx.x; j < inner_size; j += blockDim.x) {
const int index = layout == phi::DataLayout::kNCHW
? (j / HxW * C + i) * HxW + j % HxW
: j * outer_size + i;
ds_sum += static_cast<BatchNormParamType<T>>(dy[index]) *
(static_cast<BatchNormParamType<T>>(x[index]) - mean_i);
db_sum += static_cast<BatchNormParamType<T>>(dy[index]);
}
ds_sum = BlockReduce(ds_storage).Reduce(ds_sum, hipcub::Sum());
db_sum = BlockReduce(db_storage).Reduce(db_sum, hipcub::Sum());
if (threadIdx.x == 0) {
dscale[i] = ds_sum * inv_var_i;
dbias[i] = db_sum;
}
__syncthreads();
}
}
template <typename T, phi::DataLayout layout>
static __global__ void KeBNBackwardData(const T *dy,
const BatchNormParamType<T> *scale,
const BatchNormParamType<T> *variance,
const double epsilon,
const int C,
const int HxW,
const int num,
T *dx) {
int gid = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = gid; i < num; i += stride) {
const int c = layout == phi::DataLayout::kNCHW ? i / HxW % C : i % C;
BatchNormParamType<T> inv_var = 1.0 / sqrt(variance[c] + epsilon);
dx[i] = static_cast<T>(static_cast<BatchNormParamType<T>>(dy[i]) *
scale[c] * inv_var);
}
}
template <typename T>
static __global__ void KeBNRestoreData(const phi::DataLayout layout,
T *x,
const BatchNormParamType<T> *scale,
const BatchNormParamType<T> *bias,
const BatchNormParamType<T> *mean,
const BatchNormParamType<T> *variance,
double epsilon,
int C,
int M,
const int num,
const T *y) {
int gid = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = gid; i < num; i += stride) {
const int c = layout == phi::DataLayout::kNCHW ? (i / M) % C : i % C;
auto y_i = static_cast<BatchNormParamType<T>>(y[i]);
auto x_i = (y_i - bias[c]) / scale[c] / variance[c] + mean[c];
x[i] = static_cast<T>(x_i);
}
}
template <typename T>
class InplaceHelper {
public:
void operator()(const phi::DataLayout layout,
T *x,
const BatchNormParamType<T> *scale,
const BatchNormParamType<T> *bias,
const BatchNormParamType<T> *mean,
const BatchNormParamType<T> *variance,
double epsilon,
int C,
int M,
const int num,
const T *y,
int grid2,
const int block,
const gpuStream_t &stream) {
PADDLE_ENFORCE_EQ(x,
y,
phi::errors::InvalidArgument(
"X and Y should be inplaced in inplace mode"));
hipLaunchKernelGGL(( KeBNRestoreData), dim3(grid2), dim3(block), 0, stream,
layout, x, scale, bias, mean, variance, epsilon, C, M, num, y);
}
};
template <typename T, int BlockDim, phi::DataLayout layout>
static __global__ LAUNCH_BOUNDS(BlockDim) void BNBackward(
const T *dy,
const T *x,
const BatchNormParamType<T> *scale,
const BatchNormParamType<T> *saved_mean,
const BatchNormParamType<T> *saved_inv_variance,
const int C,
const int N,
const int HxW,
const double epsilon,
T *dx,
BatchNormParamType<T> *dscale,
BatchNormParamType<T> *dbias) {
const int outer_size = C;
const int inner_size = N * HxW;
typedef hipcub::BlockReduce<BatchNormParamType<T>, BlockDim> BlockReduce;
__shared__ typename BlockReduce::TempStorage ds_storage;
__shared__ typename BlockReduce::TempStorage db_storage;
__shared__ typename BlockReduce::TempStorage mean_storage;
__shared__ typename BlockReduce::TempStorage variance_storeage;
__shared__ BatchNormParamType<T> inv_var_val;
__shared__ BatchNormParamType<T> mean_val;
__shared__ BatchNormParamType<T> dscale_val;
__shared__ BatchNormParamType<T> dbias_val;
for (int i = blockIdx.x; i < outer_size; i += gridDim.x) {
BatchNormParamType<T> ds_sum = static_cast<BatchNormParamType<T>>(0);
BatchNormParamType<T> db_sum = static_cast<BatchNormParamType<T>>(0);
if (saved_mean && saved_inv_variance) {
if (threadIdx.x == 0) {
inv_var_val = saved_inv_variance[i];
mean_val = saved_mean[i];
}
} else {
BatchNormParamType<T> x_sum = static_cast<BatchNormParamType<T>>(0);
BatchNormParamType<T> x_square_sum =
static_cast<BatchNormParamType<T>>(0);
for (int j = threadIdx.x; j < inner_size; j += blockDim.x) {
const int index = layout == phi::DataLayout::kNCHW
? (j / HxW * C + i) * HxW + j % HxW
: j * outer_size + i;
BatchNormParamType<T> x_i =
static_cast<BatchNormParamType<T>>(x[index]);
x_sum += x_i;
x_square_sum += x_i * x_i;
}
x_sum = BlockReduce(mean_storage).Reduce(x_sum, hipcub::Sum());
x_square_sum =
BlockReduce(variance_storeage).Reduce(x_square_sum, hipcub::Sum());
if (threadIdx.x == 0) {
mean_val = x_sum / inner_size;
inv_var_val =
1 / sqrt(x_square_sum / inner_size - mean_val * mean_val + epsilon);
}
}
__syncthreads();
for (int j = threadIdx.x; j < inner_size; j += blockDim.x) {
const int index = layout == phi::DataLayout::kNCHW
? (j / HxW * C + i) * HxW + j % HxW
: j * outer_size + i;
BatchNormParamType<T> dy_i =
static_cast<BatchNormParamType<T>>(dy[index]);
ds_sum +=
dy_i * (static_cast<BatchNormParamType<T>>(x[index]) - mean_val);
db_sum += dy_i;
}
ds_sum = BlockReduce(ds_storage).Reduce(ds_sum, hipcub::Sum());
db_sum = BlockReduce(db_storage).Reduce(db_sum, hipcub::Sum());
if (threadIdx.x == 0) {
dscale_val = ds_sum * inv_var_val;
dbias_val = db_sum;
dscale[i] = dscale_val;
dbias[i] = dbias_val;
}
__syncthreads();
for (int j = threadIdx.x; j < inner_size; j += blockDim.x) {
const int index = layout == phi::DataLayout::kNCHW
? (j / HxW * C + i) * HxW + j % HxW
: j * outer_size + i;
dx[index] = scale[i] * inv_var_val *
(static_cast<BatchNormParamType<T>>(dy[index]) -
dbias_val / static_cast<BatchNormParamType<T>>(inner_size) -
(static_cast<BatchNormParamType<T>>(x[index]) - mean_val) *
inv_var_val * dscale_val / inner_size);
}
}
}
template <typename T, int BlockDim>
static __global__ void BNBackward2DChannelLastStage1(
const T *x,
const int C,
const int N,
const int HxW,
const double epsilon,
BatchNormParamType<T> *block_data_ptr,
BatchNormParamType<T> *compute_mean,
BatchNormParamType<T> *compute_inv_var,
int *flag_ptr) {
int outer_size = C;
int inner_size = N * HxW;
__shared__ BatchNormParamType<T> smem_sum[BlockDim];
__shared__ BatchNormParamType<T> smem_square_sum[BlockDim];
__shared__ BatchNormParamType<T> inv_var_val;
__shared__ BatchNormParamType<T> mean_val;
int outer_loop_stride = gridDim.x * blockDim.x;
int inner_loop_stride = gridDim.y * blockDim.y;
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < outer_size;
i += outer_loop_stride) {
BatchNormParamType<T> x_sum = static_cast<BatchNormParamType<T>>(0);
BatchNormParamType<T> x_square_sum = static_cast<BatchNormParamType<T>>(0);
for (int j = blockIdx.y * blockDim.y + threadIdx.y; j < inner_size;
j += inner_loop_stride) {
const int index = j * outer_size + i;
BatchNormParamType<T> x_i = static_cast<BatchNormParamType<T>>(x[index]);
x_sum += x_i;
x_square_sum += x_i * x_i;
}
// vertical block sum
funcs::BlockReduceByVetical<T, BatchNormParamType<T>>(x_sum,
x_square_sum,
&smem_sum[0],
&smem_square_sum[0],
&x_sum,
&x_square_sum);
if (gridDim.y > 1) {
__shared__ bool is_last_block_done;
funcs::ReduceSumPost<T, BatchNormParamType<T>>(C,
i,
&x_sum,
&x_square_sum,
&is_last_block_done,
smem_sum,
smem_square_sum,
block_data_ptr,
flag_ptr);
if (is_last_block_done) {
// final compute
if (threadIdx.y == 0) {
BatchNormParamType<T> compute_mean_val = x_sum / inner_size;
BatchNormParamType<T> variance_val =
x_square_sum / inner_size - compute_mean_val * compute_mean_val;
BatchNormParamType<T> compute_inv_var_val =
1 / sqrt(variance_val + epsilon);
compute_mean[i] = compute_mean_val;
compute_inv_var[i] = compute_inv_var_val;
}
}
}
}
}
template <typename T, int BlockDim>
static __global__ void BNBackward2DChannelLastStage2(
const T *dy,
const T *x,
const BatchNormParamType<T> *means,
const BatchNormParamType<T> *variances,
const int C,
const int N,
const int HxW,
const double epsilon,
const bool is_test,
BatchNormParamType<T> *block_data_ptr,
BatchNormParamType<T> *dscale,
BatchNormParamType<T> *dbias,
int *flag_ptr) {
int outer_size = C;
int inner_size = N * HxW;
__shared__ BatchNormParamType<T> smem_ds_sum[BlockDim];
__shared__ BatchNormParamType<T> smem_db_sum[BlockDim];
__shared__ BatchNormParamType<T> inv_var_val;
__shared__ BatchNormParamType<T> mean_val;
int outer_loop_stride = gridDim.x * blockDim.x;
int inner_loop_stride = gridDim.y * blockDim.y;
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < outer_size;
i += outer_loop_stride) {
BatchNormParamType<T> ds_sum = static_cast<BatchNormParamType<T>>(0);
BatchNormParamType<T> db_sum = static_cast<BatchNormParamType<T>>(0);
BatchNormParamType<T> mean_val = means[i];
BatchNormParamType<T> inv_var_val =
is_test ? 1.0 / sqrt(variances[i] + epsilon) : variances[i];
for (int j = blockIdx.y * blockDim.y + threadIdx.y; j < inner_size;
j += inner_loop_stride) {
const int index = j * outer_size + i;
BatchNormParamType<T> dy_i =
static_cast<BatchNormParamType<T>>(dy[index]);
ds_sum +=
dy_i * (static_cast<BatchNormParamType<T>>(x[index]) - mean_val);
db_sum += dy_i;
}
// vertical block sum
funcs::BlockReduceByVetical<T, BatchNormParamType<T>>(
ds_sum, db_sum, &smem_ds_sum[0], &smem_db_sum[0], &ds_sum, &db_sum);
if (gridDim.y > 1) {
__shared__ bool is_last_block_done;
funcs::ReduceSumPost<T, BatchNormParamType<T>>(C,
i,
&ds_sum,
&db_sum,
&is_last_block_done,
smem_ds_sum,
smem_db_sum,
block_data_ptr,
flag_ptr);
if (is_last_block_done) {
// final compute
if (threadIdx.y == 0) {
dscale[i] = ds_sum * inv_var_val;
dbias[i] = db_sum;
}
}
}
}
}
template <typename T, int BlockDim>
static __global__ void BNBackward2DChannelLastStage3(
const T *dy,
const T *x,
const BatchNormParamType<T> *scale,
const BatchNormParamType<T> *dscales,
const BatchNormParamType<T> *dbias,
const BatchNormParamType<T> *means,
const BatchNormParamType<T> *variances,
const int C,
const int N,
const int HxW,
const double epsilon,
T *dx) {
const int outer_size = C;
const int inner_size = N * HxW;
int outer_loop_stride = gridDim.x * blockDim.x;
int inner_loop_stride = gridDim.y * blockDim.y;
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < outer_size;
i += outer_loop_stride) {
BatchNormParamType<T> mean_val = means[i];
BatchNormParamType<T> inv_var_val = variances[i];
BatchNormParamType<T> dscale_val = dscales[i];
BatchNormParamType<T> dbias_val = dbias[i];
for (int j = blockIdx.y * blockDim.y + threadIdx.y; j < inner_size;
j += inner_loop_stride) {
const int index = j * outer_size + i;
dx[index] = scale[i] * inv_var_val *
(static_cast<BatchNormParamType<T>>(dy[index]) -
dbias_val / static_cast<BatchNormParamType<T>>(inner_size) -
(static_cast<BatchNormParamType<T>>(x[index]) - mean_val) *
inv_var_val * dscale_val / inner_size);
}
}
}
template <typename T, int BlockDim, phi::DataLayout layout>
static __global__ LAUNCH_BOUNDS(BlockDim) void BNBackwardData(
const T *dy,
const BatchNormParamType<T> *scale,
const BatchNormParamType<T> *mean,
const T *x,
const BatchNormParamType<T> *variance,
const int C,
const int N,
const int HxW,
T *dx) {
const int outer_size = C;
const int inner_size = N * HxW;
typedef hipcub::BlockReduce<BatchNormParamType<T>, BlockDim> BlockReduce;
__shared__ typename BlockReduce::TempStorage dy_storage;
__shared__ typename BlockReduce::TempStorage dy_x_sub_mean_storage;
__shared__ BatchNormParamType<T> dy_sum_val;
__shared__ BatchNormParamType<T> dy_x_sub_mean_sum_val;
for (int i = blockIdx.x; i < outer_size; i += gridDim.x) {
BatchNormParamType<T> inv_var_i = variance[i];
BatchNormParamType<T> mean_i = mean[i];
BatchNormParamType<T> dy_sum = static_cast<BatchNormParamType<T>>(0);
BatchNormParamType<T> dy_x_sub_mean_sum =
static_cast<BatchNormParamType<T>>(0);
for (int j = threadIdx.x; j < inner_size; j += blockDim.x) {
const int index = layout == phi::DataLayout::kNCHW
? (j / HxW * C + i) * HxW + j % HxW
: j * outer_size + i;
BatchNormParamType<T> dy_i =
static_cast<BatchNormParamType<T>>(dy[index]);
dy_sum += dy_i;
dy_x_sub_mean_sum +=
dy_i * (static_cast<BatchNormParamType<T>>(x[index]) - mean_i);
}
dy_sum = BlockReduce(dy_storage).Reduce(dy_sum, hipcub::Sum());
dy_x_sub_mean_sum = BlockReduce(dy_x_sub_mean_storage)
.Reduce(dy_x_sub_mean_sum, hipcub::Sum());
if (threadIdx.x == 0) {
dy_sum_val = dy_sum;
dy_x_sub_mean_sum_val = dy_x_sub_mean_sum;
}
__syncthreads();
for (int j = threadIdx.x; j < inner_size; j += blockDim.x) {
const int index = layout == phi::DataLayout::kNCHW
? (j / HxW * C + i) * HxW + j % HxW
: j * outer_size + i;
dx[index] =
(static_cast<BatchNormParamType<T>>(dy[index]) -
dy_sum_val / static_cast<BatchNormParamType<T>>(inner_size) -
(static_cast<BatchNormParamType<T>>(x[index]) - mean_i) *
dy_x_sub_mean_sum_val * inv_var_i * inv_var_i / inner_size) *
scale[i] * inv_var_i;
}
}
}
template <typename T, typename Context>
void BatchNormGradFunctor(const Context &ctx,
const DenseTensor &x,
const DenseTensor &scale,
const DenseTensor &bias,
const paddle::optional<DenseTensor> &mean,
const paddle::optional<DenseTensor> &variance,
const DenseTensor &saved_mean,
const DenseTensor &saved_variance,
const paddle::optional<DenseTensor> &reserve_space,
const DenseTensor &y_grad,
float momentum,
float epsilon_f,
const std::string &data_layout_str,
bool is_test,
bool use_global_stats,
bool trainable_statistics,
bool is_inplace,
DenseTensor *x_grad,
DenseTensor *scale_grad,
DenseTensor *bias_grad) {
double epsilon = static_cast<double>(epsilon_f);
const DataLayout data_layout = phi::StringToDataLayout(data_layout_str);
const auto *d_y = &y_grad;
auto *d_x = x_grad;
auto *d_scale = scale_grad;
auto *d_bias = bias_grad;
use_global_stats = is_test || use_global_stats;
const auto &x_dims = x.dims();
PADDLE_ENFORCE_EQ(
x_dims.size() >= 2 && x_dims.size() <= 5,
true,
phi::errors::InvalidArgument(
"The size of input's dimensions should be between 2 and 5."
"But received: the size of input's dimensions is [%d],"
"the dimensions of input is [%s]",
x_dims.size(),
x_dims));
PADDLE_ENFORCE_EQ((d_scale == nullptr && d_bias == nullptr) ||
(d_scale != nullptr && d_bias != nullptr),
true,
phi::errors::InvalidArgument(
"Weight and bias's stop_gradient of BatchNorm must be "
"True or False at the same time."));
int N, C, H, W, D;
phi::funcs::ExtractNCWHD(x_dims, data_layout, &N, &C, &H, &W, &D);
// init output
if (d_x) {
ctx.template Alloc<T>(d_x);
}
if (d_scale && d_bias) {
ctx.template Alloc<BatchNormParamType<T>>(d_scale);
ctx.template Alloc<BatchNormParamType<T>>(d_bias);
}
PADDLE_ENFORCE_EQ(
scale.dims().size(),
1UL,
phi::errors::InvalidArgument(
"The size of scale's dimensions must equal to 1. But received: "
"the size of scale's dimensions is [%d], the dimensions of scale "
"is [%s].",
scale.dims().size(),
scale.dims()));
PADDLE_ENFORCE_EQ(
scale.dims()[0],
C,
phi::errors::InvalidArgument(
"The first dimension of scale must equal to Channels[%d]. But "
"received: the first dimension of scale is [%d]",
C,
scale.dims()[0]));
auto dtype = phi::backends::gpu::CudnnDataType<T>::type;
#ifdef PADDLE_WITH_HIP
auto compute_format =
data_layout == DataLayout::kNHWC ? DataLayout::kNHWC : DataLayout::kNCHW;
// TODO(wangran16): wait for MIOpen to improve the performance of BN
// HIP do not support compute format of NHWC
// auto compute_format = DataLayout::kNCHW;
#else
const bool fast_nhwc_batch_norm = dtype == CUDNN_DATA_HALF &&
FLAGS_cudnn_batchnorm_spatial_persistent &&
(reserve_space.get_ptr() != nullptr);
auto compute_format = fast_nhwc_batch_norm && data_layout == DataLayout::kNHWC
? DataLayout::kNHWC
: DataLayout::kNCHW;
#endif
DenseTensor transformed_x(x.type());
DenseTensor transformed_d_y(d_y->type());
DenseTensor transformed_d_x;
if (data_layout == DataLayout::kNHWC && compute_format == DataLayout::kNCHW &&
x_dims.size() > 2) {
VLOG(3) << "Transform input tensor from NHWC to NCHW.";
ResizeToChannelFirst<Context, T>(ctx, &x, &transformed_x);
TransToChannelFirst<Context, T>(ctx, &x, &transformed_x);
ResizeToChannelFirst<Context, T>(ctx, d_y, &transformed_d_y);
TransToChannelFirst<Context, T>(ctx, d_y, &transformed_d_y);
if (d_x) {
ResizeToChannelFirst<Context, T>(ctx, d_x, &transformed_d_x);
}
} else {
transformed_x.ShareDataWith(x);
transformed_d_y.ShareDataWith(*d_y);
if (d_x) {
transformed_d_x.ShareDataWith(*d_x);
}
}
std::vector<int> dims;
std::vector<int> strides;
if (compute_format == DataLayout::kNCHW) {
dims = {N, C, H, W, D};
strides = {C * H * W * D, H * W * D, W * D, D, 1};
} else {
dims = {N, C, H, W, D};
strides = {H * W * C * D, 1, W * D * C, D * C, C};
}
const int num = transformed_x.numel();
#ifdef HIPCC
const int block = 256;
#else
const int block = 512;
#endif
int max_threads = ctx.GetMaxPhysicalThreadCount();
const int max_blocks = ::max(max_threads / block, 1);
int grid1 = (num + block - 1) / block;
int grid2 = ::min(C, max_blocks);
auto stream = ctx.stream();
InplaceHelper<T> inplace_functor;
if (!use_global_stats) {
if ((N * H * W * D) == 1) {
if (d_x) {
phi::Copy(ctx, *d_y, ctx.GetPlace(), false, d_x);
}
phi::funcs::SetConstant<Context, BatchNormParamType<T>> functor;
functor(ctx, d_scale, static_cast<BatchNormParamType<T>>(0));
functor(ctx, d_bias, static_cast<BatchNormParamType<T>>(0));
return;
}
// ------------------- cudnn descriptors ---------------------
#ifdef PADDLE_WITH_HIP
// TODO(wangran16): wait for MIOpen to improve the performance of BN
// miopenTensorDescriptor_t data_desc_;
// miopenTensorDescriptor_t bn_param_desc_;
// miopenBatchNormMode_t mode_;
// PADDLE_ENFORCE_GPU_SUCCESS(
// platform::dynload::miopenCreateTensorDescriptor(&data_desc_));
// PADDLE_ENFORCE_GPU_SUCCESS(
// platform::dynload::miopenCreateTensorDescriptor(&bn_param_desc_));
#else
cudnnTensorDescriptor_t data_desc_;
cudnnTensorDescriptor_t bn_param_desc_;
cudnnBatchNormMode_t mode_;
PADDLE_ENFORCE_GPU_SUCCESS(
phi::dynload::cudnnCreateTensorDescriptor(&data_desc_));
PADDLE_ENFORCE_GPU_SUCCESS(
phi::dynload::cudnnCreateTensorDescriptor(&bn_param_desc_));
#endif
if (epsilon <= CUDNN_BN_MIN_EPSILON - FLT_EPSILON) {
LOG(ERROR) << "Provided epsilon is smaller than "
<< "CUDNN_BN_MIN_EPSILON. Setting it to "
<< "CUDNN_BN_MIN_EPSILON instead.";
}
epsilon = ::max(epsilon, CUDNN_BN_MIN_EPSILON);
#ifdef PADDLE_WITH_HIP
// TODO(wangran16): wait for MIOpen to improve the performance of BN
// mode_ = miopenBNSpatial;
#elif CUDNN_VERSION_MIN(7, 0, 1)
if (FLAGS_cudnn_batchnorm_spatial_persistent) {
mode_ = CUDNN_BATCHNORM_SPATIAL_PERSISTENT;
} else if (H == 1 && W == 1) {
mode_ = CUDNN_BATCHNORM_PER_ACTIVATION;
} else {
mode_ = CUDNN_BATCHNORM_SPATIAL;
}
#else
if (H == 1 && W == 1) {
mode_ = CUDNN_BATCHNORM_PER_ACTIVATION;
} else {
mode_ = CUDNN_BATCHNORM_SPATIAL;
}
#endif // CUDNN_VERSION_MIN(7, 0, 1)
#ifdef PADDLE_WITH_HIP
// TODO(wangran16): wait for MIOpen to improve the performance of BN
// PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::miopenSetTensorDescriptor(
// data_desc_, CudnnDataType<T>::type,
// x_dims.size() > 3 ? x_dims.size() : 4, const_cast<int *>(dims.data()),
// const_cast<int *>(strides.data())));
// PADDLE_ENFORCE_GPU_SUCCESS(
// platform::dynload::miopenDeriveBNTensorDescriptor(bn_param_desc_,
// data_desc_, mode_));
#else
PADDLE_ENFORCE_GPU_SUCCESS(phi::dynload::cudnnSetTensorNdDescriptor(
data_desc_,
CudnnDataType<T>::type,
x_dims.size() > 3 ? x_dims.size() : 4,
dims.data(),
strides.data()));
PADDLE_ENFORCE_GPU_SUCCESS(phi::dynload::cudnnDeriveBNTensorDescriptor(
bn_param_desc_, data_desc_, mode_));
#endif
const auto *saved_mean_data =
saved_mean.template data<BatchNormParamType<T>>();
const auto *saved_var_data =
saved_variance.template data<BatchNormParamType<T>>();
if (is_inplace) {
inplace_functor(compute_format,
transformed_x.data<T>(),
scale.template data<BatchNormParamType<T>>(),
bias.template data<BatchNormParamType<T>>(),
saved_mean_data,
saved_var_data,
epsilon,
C,
H * W * D,
num,
transformed_x.data<T>(),
grid2,
block,
stream);
}
// This branch calls CUDNN APIs
if (d_x && d_scale && d_bias) {
#ifdef PADDLE_WITH_HIP
if (compute_format == DataLayout::kNCHW) {
hipLaunchKernelGGL(( BNBackward<T, block, DataLayout::kNCHW>)
, dim3(grid2), dim3(block), 0, ctx.stream(),
transformed_d_y.template data<T>(),
transformed_x.template data<T>(),
scale.template data<BatchNormParamType<T>>(),
saved_mean_data,
saved_var_data,
C,
N,
H * W * D,
epsilon,
transformed_d_x.template data<T>(),
ctx.template Alloc<BatchNormParamType<T>>(d_scale),
ctx.template Alloc<BatchNormParamType<T>>(d_bias));
} else {
hipLaunchKernelGGL(( BNBackward<T, block, DataLayout::kNHWC>)
, dim3(grid2), dim3(block), 0, ctx.stream(),
transformed_d_y.template data<T>(),
transformed_x.template data<T>(),
scale.template data<BatchNormParamType<T>>(),
saved_mean_data,
saved_var_data,
C,
N,
H * W * D,
epsilon,
transformed_d_x.template data<T>(),
ctx.template Alloc<BatchNormParamType<T>>(d_scale),
ctx.template Alloc<BatchNormParamType<T>>(d_bias));
}
// TODO(wangran16): wait for MIOpen to improve the performance of BN
// PADDLE_ENFORCE_GPU_SUCCESS(
// platform::dynload::miopenBatchNormalizationBackward(
// dev_ctx.cudnn_handle(), mode_, CudnnDataType<T>::kOne(),
// CudnnDataType<T>::kZero(), CudnnDataType<T>::kOne(),
// CudnnDataType<T>::kZero(), data_desc_,
// transformed_x.template data<T>(), data_desc_,
// transformed_d_y.template data<T>(), data_desc_,
// transformed_d_x.template mutable_data<T>(ctx.GetPlace()),
// bn_param_desc_, scale->template data<BatchNormParamType<T>>(),
// d_scale->template mutable_data<BatchNormParamType<T>>(
// ctx.GetPlace()),
// d_bias->template mutable_data<BatchNormParamType<T>>(
// ctx.GetPlace()),
// epsilon, saved_mean_data, saved_var_data));
#else
}
// CUDNN only support small batch size
bool use_native_nhwc =
d_x ? (x_dims.size() == 4 && compute_format == DataLayout::kNHWC &&
H * W >= CUDNN_SPATIAL_THRESHOLD_EVAL)
: false;
const bool use_native_kernel =
((x_dims.size() == 2 && N >= CUDNN_PER_ACTIVATION_THRESHOLD) ||
(x_dims.size() == 3 && N >= CUDNN_SPATIAL_THRESHOLD_TRAIN));
if (use_native_nhwc || (d_x && d_scale && d_bias)) {
if (use_native_kernel || use_native_nhwc) {
if (x_dims.size() == 2 || use_native_nhwc) {
dim3 block;
dim3 grid;
const int block_size = 512;
// init intermediate storage
DenseTensor block_data_tensor;
DenseTensor flag_tensor;
DenseTensor compute_mean_tensor =
phi::Empty<BatchNormParamType<T>, Context>(ctx, {C});
DenseTensor compute_inv_var_tensor =
phi::Empty<BatchNormParamType<T>, Context>(ctx, {C});
BatchNormParamType<T> *block_data_ptr = nullptr;
int *flag_ptr = nullptr;
funcs::SetLaunchConfigInfoForChannelLast<T, BatchNormParamType<T>>(
ctx,
&block_data_tensor,
&flag_tensor,
&block_data_ptr,
&flag_ptr,
N,
H,
W,
D,
C,
block_size,
&block,
&grid);
// 1. reduce_sum(x) => mean, inv_var
auto *mean_ptr =
saved_mean_data == nullptr
? compute_mean_tensor.data<BatchNormParamType<T>>()
: saved_mean_data;
auto *variance_ptr =
saved_var_data == nullptr
? compute_inv_var_tensor.data<BatchNormParamType<T>>()
: saved_var_data;
if (saved_mean_data == nullptr) {
hipLaunchKernelGGL(( BNBackward2DChannelLastStage1<T, block_size>)
, dim3(grid), dim3(block), 0, ctx.stream(),
transformed_x.template data<T>(),
C,
N,
H * W * D,
epsilon,
block_data_ptr,
compute_mean_tensor.data<BatchNormParamType<T>>(),
compute_inv_var_tensor.data<BatchNormParamType<T>>(),
flag_ptr);
}
// 2. reduce_sum(x, dy, mean) => dscale, dbias
BatchNormParamType<T> *dscale = nullptr;
BatchNormParamType<T> *dbias = nullptr;
bool with_scale = false;
if (d_scale && d_bias) {
dscale = ctx.template Alloc<BatchNormParamType<T>>(d_scale);
dbias = ctx.template Alloc<BatchNormParamType<T>>(d_bias);
} else {
DenseTensor dscale_mem =
phi::Empty<BatchNormParamType<T>, Context>(ctx, {C});
DenseTensor dbias_mem =
phi::Empty<BatchNormParamType<T>, Context>(ctx, {C});
dscale = dscale_mem.data<BatchNormParamType<T>>();
dbias = dbias_mem.data<BatchNormParamType<T>>();
}
hipLaunchKernelGGL(( BNBackward2DChannelLastStage2<T, block_size>)
, dim3(grid), dim3(block), 0, ctx.stream(),
transformed_d_y.template data<T>(),
transformed_x.template data<T>(),
mean_ptr,
variance_ptr,
C,
N,
H * W * D,
epsilon,
false,
block_data_ptr,
dscale,
dbias,
flag_ptr);
// 3. elementwise_mul(scale, mean, inv_var, dy, dscale, dbias) => dx
hipLaunchKernelGGL(( BNBackward2DChannelLastStage3<T, block_size>)
, dim3(grid), dim3(block), 0, ctx.stream(),
transformed_d_y.template data<T>(),
transformed_x.template data<T>(),
scale.template data<BatchNormParamType<T>>(),
dscale,
dbias,
mean_ptr,
variance_ptr,
C,
N,
H * W * D,
epsilon,
transformed_d_x.template data<T>());
} else {
if (compute_format == DataLayout::kNCHW) {
hipLaunchKernelGGL(( BNBackward<T, block, DataLayout::kNCHW>)
, dim3(grid2), dim3(block), 0, ctx.stream(),
transformed_d_y.template data<T>(),
transformed_x.template data<T>(),
scale.template data<BatchNormParamType<T>>(),
saved_mean_data,
saved_var_data,
C,
N,
H * W * D,
epsilon,
transformed_d_x.template data<T>(),
ctx.template Alloc<BatchNormParamType<T>>(d_scale),
ctx.template Alloc<BatchNormParamType<T>>(d_bias));
} else {
hipLaunchKernelGGL(( BNBackward<T, block, DataLayout::kNHWC>)
, dim3(grid2), dim3(block), 0, ctx.stream(),
transformed_d_y.template data<T>(),
transformed_x.template data<T>(),
scale.template data<BatchNormParamType<T>>(),
saved_mean_data,
saved_var_data,
C,
N,
H * W * D,
epsilon,
transformed_d_x.template data<T>(),
ctx.template Alloc<BatchNormParamType<T>>(d_scale),
ctx.template Alloc<BatchNormParamType<T>>(d_bias));
}
}
} else {
#if CUDNN_VERSION_MIN(7, 4, 1)
size_t workspace_size = 0;
void *workspace_ptr = nullptr;
DenseTensor workspace_tensor;
auto reserve_space_size = reserve_space->memory_size();
// --------------- cudnn batchnorm workspace ---------------
PADDLE_ENFORCE_GPU_SUCCESS(
phi::dynload::cudnnGetBatchNormalizationBackwardExWorkspaceSize(
/*handle=*/ctx.cudnn_handle(),
/*mode=*/mode_,
/*bnIps=*/CUDNN_BATCHNORM_OPS_BN,
/*xDesc=*/data_desc_,
/*yDesc=*/data_desc_,
/*dyDesc=*/data_desc_,
/*dzDesc=*/nullptr,
/*dxDesc=*/data_desc_,
/*bnScaleBiasMeanVarDesc=*/bn_param_desc_,
/*activationDesc=*/nullptr,
/*sizeInBytes=*/&workspace_size));
workspace_tensor.Resize({static_cast<int64_t>(workspace_size)});
workspace_ptr =
static_cast<void *>(ctx.template Alloc<uint8_t>(&workspace_tensor));
PADDLE_ENFORCE_GPU_SUCCESS(
phi::dynload::cudnnBatchNormalizationBackwardEx(
/*handle=*/ctx.cudnn_handle(),
/*mode=*/mode_,
/*bnOps=*/CUDNN_BATCHNORM_OPS_BN,
/*alphaDataDiff=*/CudnnDataType<T>::kOne(),
/*betaDataDiff=*/CudnnDataType<T>::kZero(),
/*alphaParamDiff=*/CudnnDataType<T>::kOne(),
/*betaParamDiff=*/CudnnDataType<T>::kZero(),
/*xDesc=*/data_desc_,
/*xData=*/transformed_x.template data<T>(),
/*yDesc=*/nullptr,
/*yData=*/nullptr,
/*dyDesc=*/data_desc_,
/*dyData=*/transformed_d_y.template data<T>(),
/*dzDesc=*/nullptr,
/*dzData=*/nullptr,
/*dxDesc=*/data_desc_,
/*dxData=*/ctx.template Alloc<T>(&transformed_d_x),
/*dBnScaleBiasDesc=*/bn_param_desc_,
/*bnScaleData=*/scale.template data<BatchNormParamType<T>>(),
/*bnBiasData=*/nullptr,
/*dBnScaleData=*/
ctx.template Alloc<BatchNormParamType<T>>(d_scale),
/*dBnBiasData=*/
ctx.template Alloc<BatchNormParamType<T>>(d_bias),
/*epsilon=*/epsilon,
/*savedMean=*/saved_mean_data,
/*savedInvVariance=*/saved_var_data,
/*activationDesc=*/nullptr,
/*workspace=*/workspace_ptr,
/*workSpaceSizeInBytes=*/workspace_size,
/*reserveSpace=*/
const_cast<uint8_t *>(reserve_space->template data<uint8_t>()),
/*reserveSpaceSizeInBytes=*/reserve_space_size));
#else
PADDLE_ENFORCE_GPU_SUCCESS(
phi::dynload::cudnnBatchNormalizationBackward(
ctx.cudnn_handle(),
mode_,
CudnnDataType<T>::kOne(),
CudnnDataType<T>::kZero(),
CudnnDataType<T>::kOne(),
CudnnDataType<T>::kZero(),
data_desc_,
transformed_x.template data<T>(),
data_desc_,
transformed_d_y.template data<T>(),
data_desc_,
ctx.template Alloc<T>(&transformed_d_x),
bn_param_desc_,
scale.template data<BatchNormParamType<T>>(),
ctx.template Alloc<BatchNormParamType<T>>(d_scale),
ctx.template Alloc<BatchNormParamType<T>>(d_bias),
epsilon,
saved_mean_data,
saved_var_data));
#endif // CUDNN_VERSION_MIN(7, 4, 1)
}
#endif
if (data_layout == DataLayout::kNHWC &&
compute_format == DataLayout::kNCHW) {
VLOG(3) << "Transform batchnorm output from NCHW to NHWC";
TransToChannelLast<Context, T>(ctx, &transformed_d_x, d_x);
}
} else {
// This branch call CUDA kernels
if (compute_format == DataLayout::kNCHW) {
if (data_layout == DataLayout::kNHWC) {
if (d_x) {
hipLaunchKernelGGL(( BNBackwardData<T, block, phi::DataLayout::kNHWC>)
, dim3(grid2), dim3(block), 0, ctx.stream(),
d_y->data<T>(),
scale.data<BatchNormParamType<T>>(),
saved_mean_data,
x.data<T>(),
saved_var_data,
C,
N,
H * W * D,
d_x->data<T>());
}
if (d_scale && d_bias) {
hipLaunchKernelGGL(( KeBNBackwardScaleBias<T, block, phi::DataLayout::kNHWC>)
, dim3(grid2), dim3(block), 0, stream,
d_y->data<T>(),
x.data<T>(),
saved_mean_data,
saved_var_data,
epsilon,
N,
C,
H * W * D,
d_scale->data<BatchNormParamType<T>>(),
d_bias->data<BatchNormParamType<T>>());
}
} else {
if (d_x) {
hipLaunchKernelGGL(( BNBackwardData<T, block, phi::DataLayout::kNCHW>)
, dim3(grid2), dim3(block), 0, ctx.stream(),
d_y->data<T>(),
scale.data<BatchNormParamType<T>>(),
saved_mean_data,
x.data<T>(),
saved_var_data,
C,
N,
H * W * D,
d_x->data<T>());
}
if (d_scale && d_bias) {
hipLaunchKernelGGL(( KeBNBackwardScaleBias<T, block, phi::DataLayout::kNCHW>)
, dim3(grid2), dim3(block), 0, stream,
d_y->data<T>(),
x.data<T>(),
saved_mean_data,
saved_var_data,
epsilon,
N,
C,
H * W * D,
d_scale->data<BatchNormParamType<T>>(),
d_bias->data<BatchNormParamType<T>>());
}
}
} else {
if (d_x) {
hipLaunchKernelGGL(( BNBackwardData<T, block, phi::DataLayout::kNHWC>)
, dim3(grid2), dim3(block), 0, ctx.stream(),
d_y->data<T>(),
scale.data<BatchNormParamType<T>>(),
saved_mean_data,
x.data<T>(),
saved_var_data,
C,
N,
H * W * D,
d_x->data<T>());
}
if (d_scale && d_bias) {
hipLaunchKernelGGL(( KeBNBackwardScaleBias<T, block, phi::DataLayout::kNHWC>)
, dim3(grid2), dim3(block), 0, stream,
d_y->data<T>(),
x.data<T>(),
saved_mean_data,
saved_var_data,
epsilon,
N,
C,
H * W * D,
d_scale->data<BatchNormParamType<T>>(),
d_bias->data<BatchNormParamType<T>>());
}
}
}
#ifdef PADDLE_WITH_HIP
// TODO(wangran16): wait for MIOpen to improve the performance of BN
// clean when exit.
// PADDLE_ENFORCE_GPU_SUCCESS(
// platform::dynload::miopenDestroyTensorDescriptor(data_desc_));
// PADDLE_ENFORCE_GPU_SUCCESS(
// platform::dynload::miopenDestroyTensorDescriptor(bn_param_desc_));
#else
// clean when exit.
PADDLE_ENFORCE_GPU_SUCCESS(
phi::dynload::cudnnDestroyTensorDescriptor(data_desc_));
PADDLE_ENFORCE_GPU_SUCCESS(
phi::dynload::cudnnDestroyTensorDescriptor(bn_param_desc_));
#endif
} else {
const auto *running_mean = mean.get_ptr();
const auto *running_var = variance.get_ptr();
const auto *running_mean_data =
running_mean->template data<BatchNormParamType<T>>();
const auto *running_var_data =
running_var->template data<BatchNormParamType<T>>();
if (is_inplace) {
auto px = x;
inplace_functor(data_layout,
ctx.template Alloc<T>(&px),
scale.template data<BatchNormParamType<T>>(),
bias.template data<BatchNormParamType<T>>(),
running_mean_data,
running_var_data,
epsilon,
C,
H * W * D,
num,
x.data<T>(),
grid2,
block,
stream);
}
if (compute_format == DataLayout::kNCHW) {
if (data_layout == DataLayout::kNHWC) {
if (d_x) {
hipLaunchKernelGGL(( KeBNBackwardData<T, phi::DataLayout::kNHWC>)
, dim3(grid1), dim3(block), 0, stream, d_y->data<T>(),
scale.data<BatchNormParamType<T>>(),
running_var_data,
epsilon,
C,
H * W,
num,
d_x->data<T>());
}
if (d_scale && d_bias) {
hipLaunchKernelGGL(( KeBNBackwardScaleBias<T, block, phi::DataLayout::kNHWC>)
, dim3(grid2), dim3(block), 0, stream,
d_y->data<T>(),
x.data<T>(),
running_mean_data,
running_var_data,
epsilon,
N,
C,
H * W * D,
d_scale->data<BatchNormParamType<T>>(),
d_bias->data<BatchNormParamType<T>>());
}
} else {
if (d_x) {
hipLaunchKernelGGL(( KeBNBackwardData<T, phi::DataLayout::kNCHW>)
, dim3(grid1), dim3(block), 0, stream, d_y->data<T>(),
scale.data<BatchNormParamType<T>>(),
running_var_data,
epsilon,
C,
H * W,
num,
d_x->data<T>());
}
if (d_scale && d_bias) {
hipLaunchKernelGGL(( KeBNBackwardScaleBias<T, block, phi::DataLayout::kNCHW>)
, dim3(grid2), dim3(block), 0, stream,
d_y->data<T>(),
x.data<T>(),
running_mean_data,
running_var_data,
epsilon,
N,
C,
H * W * D,
d_scale->data<BatchNormParamType<T>>(),
d_bias->data<BatchNormParamType<T>>());
}
}
} else {
if (d_x) {
hipLaunchKernelGGL(( KeBNBackwardData<T, phi::DataLayout::kNHWC>)
, dim3(grid1), dim3(block), 0, stream, d_y->data<T>(),
scale.data<BatchNormParamType<T>>(),
running_var_data,
epsilon,
C,
H * W,
num,
d_x->data<T>());
}
if (d_scale && d_bias) {
dim3 block;
dim3 grid;
const int block_size = 512;
// init intermediate storage
DenseTensor block_data_tensor;
DenseTensor flag_tensor;
BatchNormParamType<T> *block_data_ptr = nullptr;
int *flag_ptr = nullptr;
funcs::SetLaunchConfigInfoForChannelLast<T, BatchNormParamType<T>>(
ctx,
&block_data_tensor,
&flag_tensor,
&block_data_ptr,
&flag_ptr,
N,
H,
W,
D,
C,
block_size,
&block,
&grid);
hipLaunchKernelGGL(( BNBackward2DChannelLastStage2<T, block_size>)
, dim3(grid), dim3(block), 0, ctx.stream(),
transformed_d_y.template data<T>(),
transformed_x.template data<T>(),
running_mean_data,
running_var_data,
C,
N,
H * W * D,
epsilon,
true,
block_data_ptr,
d_scale->data<BatchNormParamType<T>>(),
d_bias->data<BatchNormParamType<T>>(),
flag_ptr);
}
}
}
}
template <typename T, typename Context>
void BatchNormGradKernel(const Context &dev_ctx,
const DenseTensor &x,
const DenseTensor &scale,
const DenseTensor &bias,
const paddle::optional<DenseTensor> &mean,
const paddle::optional<DenseTensor> &variance,
const DenseTensor &saved_mean,
const DenseTensor &saved_variance,
const paddle::optional<DenseTensor> &reserve_space,
const DenseTensor &y_grad,
float momentum,
float epsilon,
const std::string &data_layout,
bool is_test,
bool use_global_stats,
bool trainable_statistics,
DenseTensor *x_grad,
DenseTensor *scale_grad,
DenseTensor *bias_grad) {
BatchNormGradFunctor<T, Context>(dev_ctx,
x,
scale,
bias,
mean,
variance,
saved_mean,
saved_variance,
reserve_space,
y_grad,
momentum,
epsilon,
data_layout,
is_test,
use_global_stats,
trainable_statistics,
false,
x_grad,
scale_grad,
bias_grad);
}
template <typename T, typename Context>
void BatchNormDoubleGradKernel(
const Context &ctx,
const DenseTensor &x,
const DenseTensor &scale,
const paddle::optional<DenseTensor> &mean,
const paddle::optional<DenseTensor> &variance,
const DenseTensor &saved_mean,
const DenseTensor &saved_variance,
const DenseTensor &y_grad,
const paddle::optional<DenseTensor> &x_grad_grad,
const paddle::optional<DenseTensor> &scale_grad_grad,
const paddle::optional<DenseTensor> &bias_grad_grad,
float momentum,
float epsilon,
const std::string &data_layout_str,
bool is_test,
bool use_global_stats,
bool trainable_statistics,
DenseTensor *x_grad,
DenseTensor *scale_grad,
DenseTensor *y_grad_grad) {
PADDLE_ENFORCE_EQ(is_test,
false,
phi::errors::InvalidArgument(
"`is_test = True` CANNOT be used in train program. If "
"you want to use global status in pre_train model, "
"please set `use_global_stats = True`"));
const DataLayout data_layout = phi::StringToDataLayout(data_layout_str);
const DenseTensor *running_mean = nullptr;
const DenseTensor *running_variance = nullptr;
if (use_global_stats) {
running_mean = mean.get_ptr();
running_variance = variance.get_ptr();
}
phi::funcs::NormDoubleGradFunctor<Context, T>(ctx,
data_layout,
&x,
&scale,
&y_grad,
&saved_mean,
&saved_variance,
running_mean,
running_variance,
epsilon,
use_global_stats,
x_grad_grad.get_ptr(),
scale_grad_grad.get_ptr(),
bias_grad_grad.get_ptr(),
x_grad,
scale_grad,
y_grad_grad);
}
} // namespace phi
#ifdef PADDLE_WITH_HIP
PD_DECLARE_BN_GRAD_FUNCTOR(float, GPU);
PD_DECLARE_BN_GRAD_FUNCTOR(phi::dtype::float16, GPU);
PD_REGISTER_KERNEL(batch_norm_grad,
GPU,
ALL_LAYOUT,
phi::BatchNormGradKernel,
float,
phi::dtype::float16) {}
#else
#if CUDNN_VERSION_MIN(8, 1, 0)
PD_DECLARE_BN_GRAD_FUNCTOR(float, GPU);
PD_DECLARE_BN_GRAD_FUNCTOR(double, GPU);
PD_DECLARE_BN_GRAD_FUNCTOR(phi::dtype::bfloat16, GPU);
PD_DECLARE_BN_GRAD_FUNCTOR(phi::dtype::float16, GPU);
PD_REGISTER_KERNEL(batch_norm_grad,
GPU,
ALL_LAYOUT,
phi::BatchNormGradKernel,
float,
double,
phi::dtype::bfloat16,
phi::dtype::float16) {
if (kernel_key.dtype() == phi::DataType::FLOAT16 ||
kernel_key.dtype() == phi::DataType::BFLOAT16) {
kernel->OutputAt(0).SetDataType(phi::DataType::FLOAT32); // x_grad
kernel->OutputAt(1).SetDataType(phi::DataType::FLOAT32); // scale_grad
kernel->OutputAt(2).SetDataType(phi::DataType::FLOAT32); // bias_grad
}
}
#else
PD_DECLARE_BN_GRAD_FUNCTOR(float, GPU);
PD_DECLARE_BN_GRAD_FUNCTOR(double, GPU);
PD_DECLARE_BN_GRAD_FUNCTOR(phi::dtype::float16, GPU);
PD_REGISTER_KERNEL(batch_norm_grad,
GPU,
ALL_LAYOUT,
phi::BatchNormGradKernel,
float,
double,
phi::dtype::float16) {
if (kernel_key.dtype() == phi::DataType::FLOAT16) {
kernel->OutputAt(0).SetDataType(phi::DataType::FLOAT32); // x_grad
kernel->OutputAt(1).SetDataType(phi::DataType::FLOAT32); // scale_grad
kernel->OutputAt(2).SetDataType(phi::DataType::FLOAT32); // bias_grad
}
}
#endif
#endif
#ifdef PADDLE_WITH_HIP
PD_REGISTER_KERNEL(batch_norm_double_grad,
GPU,
ALL_LAYOUT,
phi::BatchNormDoubleGradKernel,
float,
double) {}
#else
PD_REGISTER_KERNEL(batch_norm_double_grad,
GPU,
ALL_LAYOUT,
phi::BatchNormDoubleGradKernel,
float,
double) {}
#endif
| e4494a1e343a1d55d3be8ca63ac9c005ad0f2042.cu | // Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "glog/logging.h"
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/backends/gpu/gpu_dnn.h"
#include "paddle/phi/common/layout.h"
#include "paddle/phi/core/enforce.h"
#include "paddle/phi/core/flags.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/batch_norm_kernel.h"
#include "paddle/phi/kernels/empty_kernel.h"
#include "paddle/phi/kernels/funcs/batch_norm_utils.h"
#include "paddle/phi/kernels/funcs/eigen/common.h"
#include "paddle/phi/kernels/funcs/norm_utils.cu.h"
#include "paddle/phi/kernels/funcs/norm_utils.h"
#include "paddle/phi/kernels/funcs/reduce_function.h"
#ifdef __HIPCC__
#define LAUNCH_BOUNDS(BlockDim) __launch_bounds__(BlockDim)
#else
#define LAUNCH_BOUNDS(BlockDim)
#endif
PD_DECLARE_bool(cudnn_batchnorm_spatial_persistent);
namespace phi {
template <typename T>
using CudnnDataType = phi::backends::gpu::CudnnDataType<T>;
template <typename T>
using BatchNormParamType = typename CudnnDataType<T>::BatchNormParamType;
template <typename T, int BlockDim, phi::DataLayout layout>
static __global__ LAUNCH_BOUNDS(BlockDim) void KeBNBackwardScaleBias(
const T *dy,
const T *x,
const BatchNormParamType<T> *mean,
const BatchNormParamType<T> *variance,
const double epsilon,
const int N,
const int C,
const int HxW,
BatchNormParamType<T> *dscale,
BatchNormParamType<T> *dbias) {
const int outer_size = C;
const int inner_size = N * HxW;
typedef cub::BlockReduce<BatchNormParamType<T>, BlockDim> BlockReduce;
__shared__ typename BlockReduce::TempStorage ds_storage;
__shared__ typename BlockReduce::TempStorage db_storage;
for (int i = blockIdx.x; i < outer_size; i += gridDim.x) {
BatchNormParamType<T> ds_sum = static_cast<BatchNormParamType<T>>(0);
BatchNormParamType<T> db_sum = static_cast<BatchNormParamType<T>>(0);
BatchNormParamType<T> inv_var_i = 1.0 / sqrt(variance[i] + epsilon);
BatchNormParamType<T> mean_i = mean[i];
for (int j = threadIdx.x; j < inner_size; j += blockDim.x) {
const int index = layout == phi::DataLayout::kNCHW
? (j / HxW * C + i) * HxW + j % HxW
: j * outer_size + i;
ds_sum += static_cast<BatchNormParamType<T>>(dy[index]) *
(static_cast<BatchNormParamType<T>>(x[index]) - mean_i);
db_sum += static_cast<BatchNormParamType<T>>(dy[index]);
}
ds_sum = BlockReduce(ds_storage).Reduce(ds_sum, cub::Sum());
db_sum = BlockReduce(db_storage).Reduce(db_sum, cub::Sum());
if (threadIdx.x == 0) {
dscale[i] = ds_sum * inv_var_i;
dbias[i] = db_sum;
}
__syncthreads();
}
}
template <typename T, phi::DataLayout layout>
static __global__ void KeBNBackwardData(const T *dy,
const BatchNormParamType<T> *scale,
const BatchNormParamType<T> *variance,
const double epsilon,
const int C,
const int HxW,
const int num,
T *dx) {
int gid = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = gid; i < num; i += stride) {
const int c = layout == phi::DataLayout::kNCHW ? i / HxW % C : i % C;
BatchNormParamType<T> inv_var = 1.0 / sqrt(variance[c] + epsilon);
dx[i] = static_cast<T>(static_cast<BatchNormParamType<T>>(dy[i]) *
scale[c] * inv_var);
}
}
template <typename T>
static __global__ void KeBNRestoreData(const phi::DataLayout layout,
T *x,
const BatchNormParamType<T> *scale,
const BatchNormParamType<T> *bias,
const BatchNormParamType<T> *mean,
const BatchNormParamType<T> *variance,
double epsilon,
int C,
int M,
const int num,
const T *y) {
int gid = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = gid; i < num; i += stride) {
const int c = layout == phi::DataLayout::kNCHW ? (i / M) % C : i % C;
auto y_i = static_cast<BatchNormParamType<T>>(y[i]);
auto x_i = (y_i - bias[c]) / scale[c] / variance[c] + mean[c];
x[i] = static_cast<T>(x_i);
}
}
template <typename T>
class InplaceHelper {
public:
void operator()(const phi::DataLayout layout,
T *x,
const BatchNormParamType<T> *scale,
const BatchNormParamType<T> *bias,
const BatchNormParamType<T> *mean,
const BatchNormParamType<T> *variance,
double epsilon,
int C,
int M,
const int num,
const T *y,
int grid2,
const int block,
const gpuStream_t &stream) {
PADDLE_ENFORCE_EQ(x,
y,
phi::errors::InvalidArgument(
"X and Y should be inplaced in inplace mode"));
KeBNRestoreData<<<grid2, block, 0, stream>>>(
layout, x, scale, bias, mean, variance, epsilon, C, M, num, y);
}
};
template <typename T, int BlockDim, phi::DataLayout layout>
static __global__ LAUNCH_BOUNDS(BlockDim) void BNBackward(
const T *dy,
const T *x,
const BatchNormParamType<T> *scale,
const BatchNormParamType<T> *saved_mean,
const BatchNormParamType<T> *saved_inv_variance,
const int C,
const int N,
const int HxW,
const double epsilon,
T *dx,
BatchNormParamType<T> *dscale,
BatchNormParamType<T> *dbias) {
const int outer_size = C;
const int inner_size = N * HxW;
typedef cub::BlockReduce<BatchNormParamType<T>, BlockDim> BlockReduce;
__shared__ typename BlockReduce::TempStorage ds_storage;
__shared__ typename BlockReduce::TempStorage db_storage;
__shared__ typename BlockReduce::TempStorage mean_storage;
__shared__ typename BlockReduce::TempStorage variance_storeage;
__shared__ BatchNormParamType<T> inv_var_val;
__shared__ BatchNormParamType<T> mean_val;
__shared__ BatchNormParamType<T> dscale_val;
__shared__ BatchNormParamType<T> dbias_val;
for (int i = blockIdx.x; i < outer_size; i += gridDim.x) {
BatchNormParamType<T> ds_sum = static_cast<BatchNormParamType<T>>(0);
BatchNormParamType<T> db_sum = static_cast<BatchNormParamType<T>>(0);
if (saved_mean && saved_inv_variance) {
if (threadIdx.x == 0) {
inv_var_val = saved_inv_variance[i];
mean_val = saved_mean[i];
}
} else {
BatchNormParamType<T> x_sum = static_cast<BatchNormParamType<T>>(0);
BatchNormParamType<T> x_square_sum =
static_cast<BatchNormParamType<T>>(0);
for (int j = threadIdx.x; j < inner_size; j += blockDim.x) {
const int index = layout == phi::DataLayout::kNCHW
? (j / HxW * C + i) * HxW + j % HxW
: j * outer_size + i;
BatchNormParamType<T> x_i =
static_cast<BatchNormParamType<T>>(x[index]);
x_sum += x_i;
x_square_sum += x_i * x_i;
}
x_sum = BlockReduce(mean_storage).Reduce(x_sum, cub::Sum());
x_square_sum =
BlockReduce(variance_storeage).Reduce(x_square_sum, cub::Sum());
if (threadIdx.x == 0) {
mean_val = x_sum / inner_size;
inv_var_val =
1 / sqrt(x_square_sum / inner_size - mean_val * mean_val + epsilon);
}
}
__syncthreads();
for (int j = threadIdx.x; j < inner_size; j += blockDim.x) {
const int index = layout == phi::DataLayout::kNCHW
? (j / HxW * C + i) * HxW + j % HxW
: j * outer_size + i;
BatchNormParamType<T> dy_i =
static_cast<BatchNormParamType<T>>(dy[index]);
ds_sum +=
dy_i * (static_cast<BatchNormParamType<T>>(x[index]) - mean_val);
db_sum += dy_i;
}
ds_sum = BlockReduce(ds_storage).Reduce(ds_sum, cub::Sum());
db_sum = BlockReduce(db_storage).Reduce(db_sum, cub::Sum());
if (threadIdx.x == 0) {
dscale_val = ds_sum * inv_var_val;
dbias_val = db_sum;
dscale[i] = dscale_val;
dbias[i] = dbias_val;
}
__syncthreads();
for (int j = threadIdx.x; j < inner_size; j += blockDim.x) {
const int index = layout == phi::DataLayout::kNCHW
? (j / HxW * C + i) * HxW + j % HxW
: j * outer_size + i;
dx[index] = scale[i] * inv_var_val *
(static_cast<BatchNormParamType<T>>(dy[index]) -
dbias_val / static_cast<BatchNormParamType<T>>(inner_size) -
(static_cast<BatchNormParamType<T>>(x[index]) - mean_val) *
inv_var_val * dscale_val / inner_size);
}
}
}
template <typename T, int BlockDim>
static __global__ void BNBackward2DChannelLastStage1(
const T *x,
const int C,
const int N,
const int HxW,
const double epsilon,
BatchNormParamType<T> *block_data_ptr,
BatchNormParamType<T> *compute_mean,
BatchNormParamType<T> *compute_inv_var,
int *flag_ptr) {
int outer_size = C;
int inner_size = N * HxW;
__shared__ BatchNormParamType<T> smem_sum[BlockDim];
__shared__ BatchNormParamType<T> smem_square_sum[BlockDim];
__shared__ BatchNormParamType<T> inv_var_val;
__shared__ BatchNormParamType<T> mean_val;
int outer_loop_stride = gridDim.x * blockDim.x;
int inner_loop_stride = gridDim.y * blockDim.y;
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < outer_size;
i += outer_loop_stride) {
BatchNormParamType<T> x_sum = static_cast<BatchNormParamType<T>>(0);
BatchNormParamType<T> x_square_sum = static_cast<BatchNormParamType<T>>(0);
for (int j = blockIdx.y * blockDim.y + threadIdx.y; j < inner_size;
j += inner_loop_stride) {
const int index = j * outer_size + i;
BatchNormParamType<T> x_i = static_cast<BatchNormParamType<T>>(x[index]);
x_sum += x_i;
x_square_sum += x_i * x_i;
}
// vertical block sum
funcs::BlockReduceByVetical<T, BatchNormParamType<T>>(x_sum,
x_square_sum,
&smem_sum[0],
&smem_square_sum[0],
&x_sum,
&x_square_sum);
if (gridDim.y > 1) {
__shared__ bool is_last_block_done;
funcs::ReduceSumPost<T, BatchNormParamType<T>>(C,
i,
&x_sum,
&x_square_sum,
&is_last_block_done,
smem_sum,
smem_square_sum,
block_data_ptr,
flag_ptr);
if (is_last_block_done) {
// final compute
if (threadIdx.y == 0) {
BatchNormParamType<T> compute_mean_val = x_sum / inner_size;
BatchNormParamType<T> variance_val =
x_square_sum / inner_size - compute_mean_val * compute_mean_val;
BatchNormParamType<T> compute_inv_var_val =
1 / sqrt(variance_val + epsilon);
compute_mean[i] = compute_mean_val;
compute_inv_var[i] = compute_inv_var_val;
}
}
}
}
}
template <typename T, int BlockDim>
static __global__ void BNBackward2DChannelLastStage2(
const T *dy,
const T *x,
const BatchNormParamType<T> *means,
const BatchNormParamType<T> *variances,
const int C,
const int N,
const int HxW,
const double epsilon,
const bool is_test,
BatchNormParamType<T> *block_data_ptr,
BatchNormParamType<T> *dscale,
BatchNormParamType<T> *dbias,
int *flag_ptr) {
int outer_size = C;
int inner_size = N * HxW;
__shared__ BatchNormParamType<T> smem_ds_sum[BlockDim];
__shared__ BatchNormParamType<T> smem_db_sum[BlockDim];
__shared__ BatchNormParamType<T> inv_var_val;
__shared__ BatchNormParamType<T> mean_val;
int outer_loop_stride = gridDim.x * blockDim.x;
int inner_loop_stride = gridDim.y * blockDim.y;
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < outer_size;
i += outer_loop_stride) {
BatchNormParamType<T> ds_sum = static_cast<BatchNormParamType<T>>(0);
BatchNormParamType<T> db_sum = static_cast<BatchNormParamType<T>>(0);
BatchNormParamType<T> mean_val = means[i];
BatchNormParamType<T> inv_var_val =
is_test ? 1.0 / sqrt(variances[i] + epsilon) : variances[i];
for (int j = blockIdx.y * blockDim.y + threadIdx.y; j < inner_size;
j += inner_loop_stride) {
const int index = j * outer_size + i;
BatchNormParamType<T> dy_i =
static_cast<BatchNormParamType<T>>(dy[index]);
ds_sum +=
dy_i * (static_cast<BatchNormParamType<T>>(x[index]) - mean_val);
db_sum += dy_i;
}
// vertical block sum
funcs::BlockReduceByVetical<T, BatchNormParamType<T>>(
ds_sum, db_sum, &smem_ds_sum[0], &smem_db_sum[0], &ds_sum, &db_sum);
if (gridDim.y > 1) {
__shared__ bool is_last_block_done;
funcs::ReduceSumPost<T, BatchNormParamType<T>>(C,
i,
&ds_sum,
&db_sum,
&is_last_block_done,
smem_ds_sum,
smem_db_sum,
block_data_ptr,
flag_ptr);
if (is_last_block_done) {
// final compute
if (threadIdx.y == 0) {
dscale[i] = ds_sum * inv_var_val;
dbias[i] = db_sum;
}
}
}
}
}
template <typename T, int BlockDim>
static __global__ void BNBackward2DChannelLastStage3(
const T *dy,
const T *x,
const BatchNormParamType<T> *scale,
const BatchNormParamType<T> *dscales,
const BatchNormParamType<T> *dbias,
const BatchNormParamType<T> *means,
const BatchNormParamType<T> *variances,
const int C,
const int N,
const int HxW,
const double epsilon,
T *dx) {
const int outer_size = C;
const int inner_size = N * HxW;
int outer_loop_stride = gridDim.x * blockDim.x;
int inner_loop_stride = gridDim.y * blockDim.y;
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < outer_size;
i += outer_loop_stride) {
BatchNormParamType<T> mean_val = means[i];
BatchNormParamType<T> inv_var_val = variances[i];
BatchNormParamType<T> dscale_val = dscales[i];
BatchNormParamType<T> dbias_val = dbias[i];
for (int j = blockIdx.y * blockDim.y + threadIdx.y; j < inner_size;
j += inner_loop_stride) {
const int index = j * outer_size + i;
dx[index] = scale[i] * inv_var_val *
(static_cast<BatchNormParamType<T>>(dy[index]) -
dbias_val / static_cast<BatchNormParamType<T>>(inner_size) -
(static_cast<BatchNormParamType<T>>(x[index]) - mean_val) *
inv_var_val * dscale_val / inner_size);
}
}
}
template <typename T, int BlockDim, phi::DataLayout layout>
static __global__ LAUNCH_BOUNDS(BlockDim) void BNBackwardData(
const T *dy,
const BatchNormParamType<T> *scale,
const BatchNormParamType<T> *mean,
const T *x,
const BatchNormParamType<T> *variance,
const int C,
const int N,
const int HxW,
T *dx) {
const int outer_size = C;
const int inner_size = N * HxW;
typedef cub::BlockReduce<BatchNormParamType<T>, BlockDim> BlockReduce;
__shared__ typename BlockReduce::TempStorage dy_storage;
__shared__ typename BlockReduce::TempStorage dy_x_sub_mean_storage;
__shared__ BatchNormParamType<T> dy_sum_val;
__shared__ BatchNormParamType<T> dy_x_sub_mean_sum_val;
for (int i = blockIdx.x; i < outer_size; i += gridDim.x) {
BatchNormParamType<T> inv_var_i = variance[i];
BatchNormParamType<T> mean_i = mean[i];
BatchNormParamType<T> dy_sum = static_cast<BatchNormParamType<T>>(0);
BatchNormParamType<T> dy_x_sub_mean_sum =
static_cast<BatchNormParamType<T>>(0);
for (int j = threadIdx.x; j < inner_size; j += blockDim.x) {
const int index = layout == phi::DataLayout::kNCHW
? (j / HxW * C + i) * HxW + j % HxW
: j * outer_size + i;
BatchNormParamType<T> dy_i =
static_cast<BatchNormParamType<T>>(dy[index]);
dy_sum += dy_i;
dy_x_sub_mean_sum +=
dy_i * (static_cast<BatchNormParamType<T>>(x[index]) - mean_i);
}
dy_sum = BlockReduce(dy_storage).Reduce(dy_sum, cub::Sum());
dy_x_sub_mean_sum = BlockReduce(dy_x_sub_mean_storage)
.Reduce(dy_x_sub_mean_sum, cub::Sum());
if (threadIdx.x == 0) {
dy_sum_val = dy_sum;
dy_x_sub_mean_sum_val = dy_x_sub_mean_sum;
}
__syncthreads();
for (int j = threadIdx.x; j < inner_size; j += blockDim.x) {
const int index = layout == phi::DataLayout::kNCHW
? (j / HxW * C + i) * HxW + j % HxW
: j * outer_size + i;
dx[index] =
(static_cast<BatchNormParamType<T>>(dy[index]) -
dy_sum_val / static_cast<BatchNormParamType<T>>(inner_size) -
(static_cast<BatchNormParamType<T>>(x[index]) - mean_i) *
dy_x_sub_mean_sum_val * inv_var_i * inv_var_i / inner_size) *
scale[i] * inv_var_i;
}
}
}
template <typename T, typename Context>
void BatchNormGradFunctor(const Context &ctx,
const DenseTensor &x,
const DenseTensor &scale,
const DenseTensor &bias,
const paddle::optional<DenseTensor> &mean,
const paddle::optional<DenseTensor> &variance,
const DenseTensor &saved_mean,
const DenseTensor &saved_variance,
const paddle::optional<DenseTensor> &reserve_space,
const DenseTensor &y_grad,
float momentum,
float epsilon_f,
const std::string &data_layout_str,
bool is_test,
bool use_global_stats,
bool trainable_statistics,
bool is_inplace,
DenseTensor *x_grad,
DenseTensor *scale_grad,
DenseTensor *bias_grad) {
double epsilon = static_cast<double>(epsilon_f);
const DataLayout data_layout = phi::StringToDataLayout(data_layout_str);
const auto *d_y = &y_grad;
auto *d_x = x_grad;
auto *d_scale = scale_grad;
auto *d_bias = bias_grad;
use_global_stats = is_test || use_global_stats;
const auto &x_dims = x.dims();
PADDLE_ENFORCE_EQ(
x_dims.size() >= 2 && x_dims.size() <= 5,
true,
phi::errors::InvalidArgument(
"The size of input's dimensions should be between 2 and 5."
"But received: the size of input's dimensions is [%d],"
"the dimensions of input is [%s]",
x_dims.size(),
x_dims));
PADDLE_ENFORCE_EQ((d_scale == nullptr && d_bias == nullptr) ||
(d_scale != nullptr && d_bias != nullptr),
true,
phi::errors::InvalidArgument(
"Weight and bias's stop_gradient of BatchNorm must be "
"True or False at the same time."));
int N, C, H, W, D;
phi::funcs::ExtractNCWHD(x_dims, data_layout, &N, &C, &H, &W, &D);
// init output
if (d_x) {
ctx.template Alloc<T>(d_x);
}
if (d_scale && d_bias) {
ctx.template Alloc<BatchNormParamType<T>>(d_scale);
ctx.template Alloc<BatchNormParamType<T>>(d_bias);
}
PADDLE_ENFORCE_EQ(
scale.dims().size(),
1UL,
phi::errors::InvalidArgument(
"The size of scale's dimensions must equal to 1. But received: "
"the size of scale's dimensions is [%d], the dimensions of scale "
"is [%s].",
scale.dims().size(),
scale.dims()));
PADDLE_ENFORCE_EQ(
scale.dims()[0],
C,
phi::errors::InvalidArgument(
"The first dimension of scale must equal to Channels[%d]. But "
"received: the first dimension of scale is [%d]",
C,
scale.dims()[0]));
auto dtype = phi::backends::gpu::CudnnDataType<T>::type;
#ifdef PADDLE_WITH_HIP
auto compute_format =
data_layout == DataLayout::kNHWC ? DataLayout::kNHWC : DataLayout::kNCHW;
// TODO(wangran16): wait for MIOpen to improve the performance of BN
// HIP do not support compute format of NHWC
// auto compute_format = DataLayout::kNCHW;
#else
const bool fast_nhwc_batch_norm = dtype == CUDNN_DATA_HALF &&
FLAGS_cudnn_batchnorm_spatial_persistent &&
(reserve_space.get_ptr() != nullptr);
auto compute_format = fast_nhwc_batch_norm && data_layout == DataLayout::kNHWC
? DataLayout::kNHWC
: DataLayout::kNCHW;
#endif
DenseTensor transformed_x(x.type());
DenseTensor transformed_d_y(d_y->type());
DenseTensor transformed_d_x;
if (data_layout == DataLayout::kNHWC && compute_format == DataLayout::kNCHW &&
x_dims.size() > 2) {
VLOG(3) << "Transform input tensor from NHWC to NCHW.";
ResizeToChannelFirst<Context, T>(ctx, &x, &transformed_x);
TransToChannelFirst<Context, T>(ctx, &x, &transformed_x);
ResizeToChannelFirst<Context, T>(ctx, d_y, &transformed_d_y);
TransToChannelFirst<Context, T>(ctx, d_y, &transformed_d_y);
if (d_x) {
ResizeToChannelFirst<Context, T>(ctx, d_x, &transformed_d_x);
}
} else {
transformed_x.ShareDataWith(x);
transformed_d_y.ShareDataWith(*d_y);
if (d_x) {
transformed_d_x.ShareDataWith(*d_x);
}
}
std::vector<int> dims;
std::vector<int> strides;
if (compute_format == DataLayout::kNCHW) {
dims = {N, C, H, W, D};
strides = {C * H * W * D, H * W * D, W * D, D, 1};
} else {
dims = {N, C, H, W, D};
strides = {H * W * C * D, 1, W * D * C, D * C, C};
}
const int num = transformed_x.numel();
#ifdef HIPCC
const int block = 256;
#else
const int block = 512;
#endif
int max_threads = ctx.GetMaxPhysicalThreadCount();
const int max_blocks = std::max(max_threads / block, 1);
int grid1 = (num + block - 1) / block;
int grid2 = std::min(C, max_blocks);
auto stream = ctx.stream();
InplaceHelper<T> inplace_functor;
if (!use_global_stats) {
if ((N * H * W * D) == 1) {
if (d_x) {
phi::Copy(ctx, *d_y, ctx.GetPlace(), false, d_x);
}
phi::funcs::SetConstant<Context, BatchNormParamType<T>> functor;
functor(ctx, d_scale, static_cast<BatchNormParamType<T>>(0));
functor(ctx, d_bias, static_cast<BatchNormParamType<T>>(0));
return;
}
// ------------------- cudnn descriptors ---------------------
#ifdef PADDLE_WITH_HIP
// TODO(wangran16): wait for MIOpen to improve the performance of BN
// miopenTensorDescriptor_t data_desc_;
// miopenTensorDescriptor_t bn_param_desc_;
// miopenBatchNormMode_t mode_;
// PADDLE_ENFORCE_GPU_SUCCESS(
// platform::dynload::miopenCreateTensorDescriptor(&data_desc_));
// PADDLE_ENFORCE_GPU_SUCCESS(
// platform::dynload::miopenCreateTensorDescriptor(&bn_param_desc_));
#else
cudnnTensorDescriptor_t data_desc_;
cudnnTensorDescriptor_t bn_param_desc_;
cudnnBatchNormMode_t mode_;
PADDLE_ENFORCE_GPU_SUCCESS(
phi::dynload::cudnnCreateTensorDescriptor(&data_desc_));
PADDLE_ENFORCE_GPU_SUCCESS(
phi::dynload::cudnnCreateTensorDescriptor(&bn_param_desc_));
#endif
if (epsilon <= CUDNN_BN_MIN_EPSILON - FLT_EPSILON) {
LOG(ERROR) << "Provided epsilon is smaller than "
<< "CUDNN_BN_MIN_EPSILON. Setting it to "
<< "CUDNN_BN_MIN_EPSILON instead.";
}
epsilon = std::max(epsilon, CUDNN_BN_MIN_EPSILON);
#ifdef PADDLE_WITH_HIP
// TODO(wangran16): wait for MIOpen to improve the performance of BN
// mode_ = miopenBNSpatial;
#elif CUDNN_VERSION_MIN(7, 0, 1)
if (FLAGS_cudnn_batchnorm_spatial_persistent) {
mode_ = CUDNN_BATCHNORM_SPATIAL_PERSISTENT;
} else if (H == 1 && W == 1) {
mode_ = CUDNN_BATCHNORM_PER_ACTIVATION;
} else {
mode_ = CUDNN_BATCHNORM_SPATIAL;
}
#else
if (H == 1 && W == 1) {
mode_ = CUDNN_BATCHNORM_PER_ACTIVATION;
} else {
mode_ = CUDNN_BATCHNORM_SPATIAL;
}
#endif // CUDNN_VERSION_MIN(7, 0, 1)
#ifdef PADDLE_WITH_HIP
// TODO(wangran16): wait for MIOpen to improve the performance of BN
// PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::miopenSetTensorDescriptor(
// data_desc_, CudnnDataType<T>::type,
// x_dims.size() > 3 ? x_dims.size() : 4, const_cast<int *>(dims.data()),
// const_cast<int *>(strides.data())));
// PADDLE_ENFORCE_GPU_SUCCESS(
// platform::dynload::miopenDeriveBNTensorDescriptor(bn_param_desc_,
// data_desc_, mode_));
#else
PADDLE_ENFORCE_GPU_SUCCESS(phi::dynload::cudnnSetTensorNdDescriptor(
data_desc_,
CudnnDataType<T>::type,
x_dims.size() > 3 ? x_dims.size() : 4,
dims.data(),
strides.data()));
PADDLE_ENFORCE_GPU_SUCCESS(phi::dynload::cudnnDeriveBNTensorDescriptor(
bn_param_desc_, data_desc_, mode_));
#endif
const auto *saved_mean_data =
saved_mean.template data<BatchNormParamType<T>>();
const auto *saved_var_data =
saved_variance.template data<BatchNormParamType<T>>();
if (is_inplace) {
inplace_functor(compute_format,
transformed_x.data<T>(),
scale.template data<BatchNormParamType<T>>(),
bias.template data<BatchNormParamType<T>>(),
saved_mean_data,
saved_var_data,
epsilon,
C,
H * W * D,
num,
transformed_x.data<T>(),
grid2,
block,
stream);
}
// This branch calls CUDNN APIs
if (d_x && d_scale && d_bias) {
#ifdef PADDLE_WITH_HIP
if (compute_format == DataLayout::kNCHW) {
BNBackward<T, block, DataLayout::kNCHW>
<<<grid2, block, 0, ctx.stream()>>>(
transformed_d_y.template data<T>(),
transformed_x.template data<T>(),
scale.template data<BatchNormParamType<T>>(),
saved_mean_data,
saved_var_data,
C,
N,
H * W * D,
epsilon,
transformed_d_x.template data<T>(),
ctx.template Alloc<BatchNormParamType<T>>(d_scale),
ctx.template Alloc<BatchNormParamType<T>>(d_bias));
} else {
BNBackward<T, block, DataLayout::kNHWC>
<<<grid2, block, 0, ctx.stream()>>>(
transformed_d_y.template data<T>(),
transformed_x.template data<T>(),
scale.template data<BatchNormParamType<T>>(),
saved_mean_data,
saved_var_data,
C,
N,
H * W * D,
epsilon,
transformed_d_x.template data<T>(),
ctx.template Alloc<BatchNormParamType<T>>(d_scale),
ctx.template Alloc<BatchNormParamType<T>>(d_bias));
}
// TODO(wangran16): wait for MIOpen to improve the performance of BN
// PADDLE_ENFORCE_GPU_SUCCESS(
// platform::dynload::miopenBatchNormalizationBackward(
// dev_ctx.cudnn_handle(), mode_, CudnnDataType<T>::kOne(),
// CudnnDataType<T>::kZero(), CudnnDataType<T>::kOne(),
// CudnnDataType<T>::kZero(), data_desc_,
// transformed_x.template data<T>(), data_desc_,
// transformed_d_y.template data<T>(), data_desc_,
// transformed_d_x.template mutable_data<T>(ctx.GetPlace()),
// bn_param_desc_, scale->template data<BatchNormParamType<T>>(),
// d_scale->template mutable_data<BatchNormParamType<T>>(
// ctx.GetPlace()),
// d_bias->template mutable_data<BatchNormParamType<T>>(
// ctx.GetPlace()),
// epsilon, saved_mean_data, saved_var_data));
#else
}
// CUDNN only support small batch size
bool use_native_nhwc =
d_x ? (x_dims.size() == 4 && compute_format == DataLayout::kNHWC &&
H * W >= CUDNN_SPATIAL_THRESHOLD_EVAL)
: false;
const bool use_native_kernel =
((x_dims.size() == 2 && N >= CUDNN_PER_ACTIVATION_THRESHOLD) ||
(x_dims.size() == 3 && N >= CUDNN_SPATIAL_THRESHOLD_TRAIN));
if (use_native_nhwc || (d_x && d_scale && d_bias)) {
if (use_native_kernel || use_native_nhwc) {
if (x_dims.size() == 2 || use_native_nhwc) {
dim3 block;
dim3 grid;
const int block_size = 512;
// init intermediate storage
DenseTensor block_data_tensor;
DenseTensor flag_tensor;
DenseTensor compute_mean_tensor =
phi::Empty<BatchNormParamType<T>, Context>(ctx, {C});
DenseTensor compute_inv_var_tensor =
phi::Empty<BatchNormParamType<T>, Context>(ctx, {C});
BatchNormParamType<T> *block_data_ptr = nullptr;
int *flag_ptr = nullptr;
funcs::SetLaunchConfigInfoForChannelLast<T, BatchNormParamType<T>>(
ctx,
&block_data_tensor,
&flag_tensor,
&block_data_ptr,
&flag_ptr,
N,
H,
W,
D,
C,
block_size,
&block,
&grid);
// 1. reduce_sum(x) => mean, inv_var
auto *mean_ptr =
saved_mean_data == nullptr
? compute_mean_tensor.data<BatchNormParamType<T>>()
: saved_mean_data;
auto *variance_ptr =
saved_var_data == nullptr
? compute_inv_var_tensor.data<BatchNormParamType<T>>()
: saved_var_data;
if (saved_mean_data == nullptr) {
BNBackward2DChannelLastStage1<T, block_size>
<<<grid, block, 0, ctx.stream()>>>(
transformed_x.template data<T>(),
C,
N,
H * W * D,
epsilon,
block_data_ptr,
compute_mean_tensor.data<BatchNormParamType<T>>(),
compute_inv_var_tensor.data<BatchNormParamType<T>>(),
flag_ptr);
}
// 2. reduce_sum(x, dy, mean) => dscale, dbias
BatchNormParamType<T> *dscale = nullptr;
BatchNormParamType<T> *dbias = nullptr;
bool with_scale = false;
if (d_scale && d_bias) {
dscale = ctx.template Alloc<BatchNormParamType<T>>(d_scale);
dbias = ctx.template Alloc<BatchNormParamType<T>>(d_bias);
} else {
DenseTensor dscale_mem =
phi::Empty<BatchNormParamType<T>, Context>(ctx, {C});
DenseTensor dbias_mem =
phi::Empty<BatchNormParamType<T>, Context>(ctx, {C});
dscale = dscale_mem.data<BatchNormParamType<T>>();
dbias = dbias_mem.data<BatchNormParamType<T>>();
}
BNBackward2DChannelLastStage2<T, block_size>
<<<grid, block, 0, ctx.stream()>>>(
transformed_d_y.template data<T>(),
transformed_x.template data<T>(),
mean_ptr,
variance_ptr,
C,
N,
H * W * D,
epsilon,
false,
block_data_ptr,
dscale,
dbias,
flag_ptr);
// 3. elementwise_mul(scale, mean, inv_var, dy, dscale, dbias) => dx
BNBackward2DChannelLastStage3<T, block_size>
<<<grid, block, 0, ctx.stream()>>>(
transformed_d_y.template data<T>(),
transformed_x.template data<T>(),
scale.template data<BatchNormParamType<T>>(),
dscale,
dbias,
mean_ptr,
variance_ptr,
C,
N,
H * W * D,
epsilon,
transformed_d_x.template data<T>());
} else {
if (compute_format == DataLayout::kNCHW) {
BNBackward<T, block, DataLayout::kNCHW>
<<<grid2, block, 0, ctx.stream()>>>(
transformed_d_y.template data<T>(),
transformed_x.template data<T>(),
scale.template data<BatchNormParamType<T>>(),
saved_mean_data,
saved_var_data,
C,
N,
H * W * D,
epsilon,
transformed_d_x.template data<T>(),
ctx.template Alloc<BatchNormParamType<T>>(d_scale),
ctx.template Alloc<BatchNormParamType<T>>(d_bias));
} else {
BNBackward<T, block, DataLayout::kNHWC>
<<<grid2, block, 0, ctx.stream()>>>(
transformed_d_y.template data<T>(),
transformed_x.template data<T>(),
scale.template data<BatchNormParamType<T>>(),
saved_mean_data,
saved_var_data,
C,
N,
H * W * D,
epsilon,
transformed_d_x.template data<T>(),
ctx.template Alloc<BatchNormParamType<T>>(d_scale),
ctx.template Alloc<BatchNormParamType<T>>(d_bias));
}
}
} else {
#if CUDNN_VERSION_MIN(7, 4, 1)
size_t workspace_size = 0;
void *workspace_ptr = nullptr;
DenseTensor workspace_tensor;
auto reserve_space_size = reserve_space->memory_size();
// --------------- cudnn batchnorm workspace ---------------
PADDLE_ENFORCE_GPU_SUCCESS(
phi::dynload::cudnnGetBatchNormalizationBackwardExWorkspaceSize(
/*handle=*/ctx.cudnn_handle(),
/*mode=*/mode_,
/*bnIps=*/CUDNN_BATCHNORM_OPS_BN,
/*xDesc=*/data_desc_,
/*yDesc=*/data_desc_,
/*dyDesc=*/data_desc_,
/*dzDesc=*/nullptr,
/*dxDesc=*/data_desc_,
/*bnScaleBiasMeanVarDesc=*/bn_param_desc_,
/*activationDesc=*/nullptr,
/*sizeInBytes=*/&workspace_size));
workspace_tensor.Resize({static_cast<int64_t>(workspace_size)});
workspace_ptr =
static_cast<void *>(ctx.template Alloc<uint8_t>(&workspace_tensor));
PADDLE_ENFORCE_GPU_SUCCESS(
phi::dynload::cudnnBatchNormalizationBackwardEx(
/*handle=*/ctx.cudnn_handle(),
/*mode=*/mode_,
/*bnOps=*/CUDNN_BATCHNORM_OPS_BN,
/*alphaDataDiff=*/CudnnDataType<T>::kOne(),
/*betaDataDiff=*/CudnnDataType<T>::kZero(),
/*alphaParamDiff=*/CudnnDataType<T>::kOne(),
/*betaParamDiff=*/CudnnDataType<T>::kZero(),
/*xDesc=*/data_desc_,
/*xData=*/transformed_x.template data<T>(),
/*yDesc=*/nullptr,
/*yData=*/nullptr,
/*dyDesc=*/data_desc_,
/*dyData=*/transformed_d_y.template data<T>(),
/*dzDesc=*/nullptr,
/*dzData=*/nullptr,
/*dxDesc=*/data_desc_,
/*dxData=*/ctx.template Alloc<T>(&transformed_d_x),
/*dBnScaleBiasDesc=*/bn_param_desc_,
/*bnScaleData=*/scale.template data<BatchNormParamType<T>>(),
/*bnBiasData=*/nullptr,
/*dBnScaleData=*/
ctx.template Alloc<BatchNormParamType<T>>(d_scale),
/*dBnBiasData=*/
ctx.template Alloc<BatchNormParamType<T>>(d_bias),
/*epsilon=*/epsilon,
/*savedMean=*/saved_mean_data,
/*savedInvVariance=*/saved_var_data,
/*activationDesc=*/nullptr,
/*workspace=*/workspace_ptr,
/*workSpaceSizeInBytes=*/workspace_size,
/*reserveSpace=*/
const_cast<uint8_t *>(reserve_space->template data<uint8_t>()),
/*reserveSpaceSizeInBytes=*/reserve_space_size));
#else
PADDLE_ENFORCE_GPU_SUCCESS(
phi::dynload::cudnnBatchNormalizationBackward(
ctx.cudnn_handle(),
mode_,
CudnnDataType<T>::kOne(),
CudnnDataType<T>::kZero(),
CudnnDataType<T>::kOne(),
CudnnDataType<T>::kZero(),
data_desc_,
transformed_x.template data<T>(),
data_desc_,
transformed_d_y.template data<T>(),
data_desc_,
ctx.template Alloc<T>(&transformed_d_x),
bn_param_desc_,
scale.template data<BatchNormParamType<T>>(),
ctx.template Alloc<BatchNormParamType<T>>(d_scale),
ctx.template Alloc<BatchNormParamType<T>>(d_bias),
epsilon,
saved_mean_data,
saved_var_data));
#endif // CUDNN_VERSION_MIN(7, 4, 1)
}
#endif
if (data_layout == DataLayout::kNHWC &&
compute_format == DataLayout::kNCHW) {
VLOG(3) << "Transform batchnorm output from NCHW to NHWC";
TransToChannelLast<Context, T>(ctx, &transformed_d_x, d_x);
}
} else {
// This branch call CUDA kernels
if (compute_format == DataLayout::kNCHW) {
if (data_layout == DataLayout::kNHWC) {
if (d_x) {
BNBackwardData<T, block, phi::DataLayout::kNHWC>
<<<grid2, block, 0, ctx.stream()>>>(
d_y->data<T>(),
scale.data<BatchNormParamType<T>>(),
saved_mean_data,
x.data<T>(),
saved_var_data,
C,
N,
H * W * D,
d_x->data<T>());
}
if (d_scale && d_bias) {
KeBNBackwardScaleBias<T, block, phi::DataLayout::kNHWC>
<<<grid2, block, 0, stream>>>(
d_y->data<T>(),
x.data<T>(),
saved_mean_data,
saved_var_data,
epsilon,
N,
C,
H * W * D,
d_scale->data<BatchNormParamType<T>>(),
d_bias->data<BatchNormParamType<T>>());
}
} else {
if (d_x) {
BNBackwardData<T, block, phi::DataLayout::kNCHW>
<<<grid2, block, 0, ctx.stream()>>>(
d_y->data<T>(),
scale.data<BatchNormParamType<T>>(),
saved_mean_data,
x.data<T>(),
saved_var_data,
C,
N,
H * W * D,
d_x->data<T>());
}
if (d_scale && d_bias) {
KeBNBackwardScaleBias<T, block, phi::DataLayout::kNCHW>
<<<grid2, block, 0, stream>>>(
d_y->data<T>(),
x.data<T>(),
saved_mean_data,
saved_var_data,
epsilon,
N,
C,
H * W * D,
d_scale->data<BatchNormParamType<T>>(),
d_bias->data<BatchNormParamType<T>>());
}
}
} else {
if (d_x) {
BNBackwardData<T, block, phi::DataLayout::kNHWC>
<<<grid2, block, 0, ctx.stream()>>>(
d_y->data<T>(),
scale.data<BatchNormParamType<T>>(),
saved_mean_data,
x.data<T>(),
saved_var_data,
C,
N,
H * W * D,
d_x->data<T>());
}
if (d_scale && d_bias) {
KeBNBackwardScaleBias<T, block, phi::DataLayout::kNHWC>
<<<grid2, block, 0, stream>>>(
d_y->data<T>(),
x.data<T>(),
saved_mean_data,
saved_var_data,
epsilon,
N,
C,
H * W * D,
d_scale->data<BatchNormParamType<T>>(),
d_bias->data<BatchNormParamType<T>>());
}
}
}
#ifdef PADDLE_WITH_HIP
// TODO(wangran16): wait for MIOpen to improve the performance of BN
// clean when exit.
// PADDLE_ENFORCE_GPU_SUCCESS(
// platform::dynload::miopenDestroyTensorDescriptor(data_desc_));
// PADDLE_ENFORCE_GPU_SUCCESS(
// platform::dynload::miopenDestroyTensorDescriptor(bn_param_desc_));
#else
// clean when exit.
PADDLE_ENFORCE_GPU_SUCCESS(
phi::dynload::cudnnDestroyTensorDescriptor(data_desc_));
PADDLE_ENFORCE_GPU_SUCCESS(
phi::dynload::cudnnDestroyTensorDescriptor(bn_param_desc_));
#endif
} else {
const auto *running_mean = mean.get_ptr();
const auto *running_var = variance.get_ptr();
const auto *running_mean_data =
running_mean->template data<BatchNormParamType<T>>();
const auto *running_var_data =
running_var->template data<BatchNormParamType<T>>();
if (is_inplace) {
auto px = x;
inplace_functor(data_layout,
ctx.template Alloc<T>(&px),
scale.template data<BatchNormParamType<T>>(),
bias.template data<BatchNormParamType<T>>(),
running_mean_data,
running_var_data,
epsilon,
C,
H * W * D,
num,
x.data<T>(),
grid2,
block,
stream);
}
if (compute_format == DataLayout::kNCHW) {
if (data_layout == DataLayout::kNHWC) {
if (d_x) {
KeBNBackwardData<T, phi::DataLayout::kNHWC>
<<<grid1, block, 0, stream>>>(d_y->data<T>(),
scale.data<BatchNormParamType<T>>(),
running_var_data,
epsilon,
C,
H * W,
num,
d_x->data<T>());
}
if (d_scale && d_bias) {
KeBNBackwardScaleBias<T, block, phi::DataLayout::kNHWC>
<<<grid2, block, 0, stream>>>(
d_y->data<T>(),
x.data<T>(),
running_mean_data,
running_var_data,
epsilon,
N,
C,
H * W * D,
d_scale->data<BatchNormParamType<T>>(),
d_bias->data<BatchNormParamType<T>>());
}
} else {
if (d_x) {
KeBNBackwardData<T, phi::DataLayout::kNCHW>
<<<grid1, block, 0, stream>>>(d_y->data<T>(),
scale.data<BatchNormParamType<T>>(),
running_var_data,
epsilon,
C,
H * W,
num,
d_x->data<T>());
}
if (d_scale && d_bias) {
KeBNBackwardScaleBias<T, block, phi::DataLayout::kNCHW>
<<<grid2, block, 0, stream>>>(
d_y->data<T>(),
x.data<T>(),
running_mean_data,
running_var_data,
epsilon,
N,
C,
H * W * D,
d_scale->data<BatchNormParamType<T>>(),
d_bias->data<BatchNormParamType<T>>());
}
}
} else {
if (d_x) {
KeBNBackwardData<T, phi::DataLayout::kNHWC>
<<<grid1, block, 0, stream>>>(d_y->data<T>(),
scale.data<BatchNormParamType<T>>(),
running_var_data,
epsilon,
C,
H * W,
num,
d_x->data<T>());
}
if (d_scale && d_bias) {
dim3 block;
dim3 grid;
const int block_size = 512;
// init intermediate storage
DenseTensor block_data_tensor;
DenseTensor flag_tensor;
BatchNormParamType<T> *block_data_ptr = nullptr;
int *flag_ptr = nullptr;
funcs::SetLaunchConfigInfoForChannelLast<T, BatchNormParamType<T>>(
ctx,
&block_data_tensor,
&flag_tensor,
&block_data_ptr,
&flag_ptr,
N,
H,
W,
D,
C,
block_size,
&block,
&grid);
BNBackward2DChannelLastStage2<T, block_size>
<<<grid, block, 0, ctx.stream()>>>(
transformed_d_y.template data<T>(),
transformed_x.template data<T>(),
running_mean_data,
running_var_data,
C,
N,
H * W * D,
epsilon,
true,
block_data_ptr,
d_scale->data<BatchNormParamType<T>>(),
d_bias->data<BatchNormParamType<T>>(),
flag_ptr);
}
}
}
}
template <typename T, typename Context>
void BatchNormGradKernel(const Context &dev_ctx,
const DenseTensor &x,
const DenseTensor &scale,
const DenseTensor &bias,
const paddle::optional<DenseTensor> &mean,
const paddle::optional<DenseTensor> &variance,
const DenseTensor &saved_mean,
const DenseTensor &saved_variance,
const paddle::optional<DenseTensor> &reserve_space,
const DenseTensor &y_grad,
float momentum,
float epsilon,
const std::string &data_layout,
bool is_test,
bool use_global_stats,
bool trainable_statistics,
DenseTensor *x_grad,
DenseTensor *scale_grad,
DenseTensor *bias_grad) {
BatchNormGradFunctor<T, Context>(dev_ctx,
x,
scale,
bias,
mean,
variance,
saved_mean,
saved_variance,
reserve_space,
y_grad,
momentum,
epsilon,
data_layout,
is_test,
use_global_stats,
trainable_statistics,
false,
x_grad,
scale_grad,
bias_grad);
}
template <typename T, typename Context>
void BatchNormDoubleGradKernel(
const Context &ctx,
const DenseTensor &x,
const DenseTensor &scale,
const paddle::optional<DenseTensor> &mean,
const paddle::optional<DenseTensor> &variance,
const DenseTensor &saved_mean,
const DenseTensor &saved_variance,
const DenseTensor &y_grad,
const paddle::optional<DenseTensor> &x_grad_grad,
const paddle::optional<DenseTensor> &scale_grad_grad,
const paddle::optional<DenseTensor> &bias_grad_grad,
float momentum,
float epsilon,
const std::string &data_layout_str,
bool is_test,
bool use_global_stats,
bool trainable_statistics,
DenseTensor *x_grad,
DenseTensor *scale_grad,
DenseTensor *y_grad_grad) {
PADDLE_ENFORCE_EQ(is_test,
false,
phi::errors::InvalidArgument(
"`is_test = True` CANNOT be used in train program. If "
"you want to use global status in pre_train model, "
"please set `use_global_stats = True`"));
const DataLayout data_layout = phi::StringToDataLayout(data_layout_str);
const DenseTensor *running_mean = nullptr;
const DenseTensor *running_variance = nullptr;
if (use_global_stats) {
running_mean = mean.get_ptr();
running_variance = variance.get_ptr();
}
phi::funcs::NormDoubleGradFunctor<Context, T>(ctx,
data_layout,
&x,
&scale,
&y_grad,
&saved_mean,
&saved_variance,
running_mean,
running_variance,
epsilon,
use_global_stats,
x_grad_grad.get_ptr(),
scale_grad_grad.get_ptr(),
bias_grad_grad.get_ptr(),
x_grad,
scale_grad,
y_grad_grad);
}
} // namespace phi
#ifdef PADDLE_WITH_HIP
PD_DECLARE_BN_GRAD_FUNCTOR(float, GPU);
PD_DECLARE_BN_GRAD_FUNCTOR(phi::dtype::float16, GPU);
PD_REGISTER_KERNEL(batch_norm_grad,
GPU,
ALL_LAYOUT,
phi::BatchNormGradKernel,
float,
phi::dtype::float16) {}
#else
#if CUDNN_VERSION_MIN(8, 1, 0)
PD_DECLARE_BN_GRAD_FUNCTOR(float, GPU);
PD_DECLARE_BN_GRAD_FUNCTOR(double, GPU);
PD_DECLARE_BN_GRAD_FUNCTOR(phi::dtype::bfloat16, GPU);
PD_DECLARE_BN_GRAD_FUNCTOR(phi::dtype::float16, GPU);
PD_REGISTER_KERNEL(batch_norm_grad,
GPU,
ALL_LAYOUT,
phi::BatchNormGradKernel,
float,
double,
phi::dtype::bfloat16,
phi::dtype::float16) {
if (kernel_key.dtype() == phi::DataType::FLOAT16 ||
kernel_key.dtype() == phi::DataType::BFLOAT16) {
kernel->OutputAt(0).SetDataType(phi::DataType::FLOAT32); // x_grad
kernel->OutputAt(1).SetDataType(phi::DataType::FLOAT32); // scale_grad
kernel->OutputAt(2).SetDataType(phi::DataType::FLOAT32); // bias_grad
}
}
#else
PD_DECLARE_BN_GRAD_FUNCTOR(float, GPU);
PD_DECLARE_BN_GRAD_FUNCTOR(double, GPU);
PD_DECLARE_BN_GRAD_FUNCTOR(phi::dtype::float16, GPU);
PD_REGISTER_KERNEL(batch_norm_grad,
GPU,
ALL_LAYOUT,
phi::BatchNormGradKernel,
float,
double,
phi::dtype::float16) {
if (kernel_key.dtype() == phi::DataType::FLOAT16) {
kernel->OutputAt(0).SetDataType(phi::DataType::FLOAT32); // x_grad
kernel->OutputAt(1).SetDataType(phi::DataType::FLOAT32); // scale_grad
kernel->OutputAt(2).SetDataType(phi::DataType::FLOAT32); // bias_grad
}
}
#endif
#endif
#ifdef PADDLE_WITH_HIP
PD_REGISTER_KERNEL(batch_norm_double_grad,
GPU,
ALL_LAYOUT,
phi::BatchNormDoubleGradKernel,
float,
double) {}
#else
PD_REGISTER_KERNEL(batch_norm_double_grad,
GPU,
ALL_LAYOUT,
phi::BatchNormDoubleGradKernel,
float,
double) {}
#endif
|
8b3a41cff3ff167745de5618fb41e63ef2aa8343.hip | // !!! This is a file automatically generated by hipify!!!
/*
Faz a soma dos elementos de dois vetores
Exemplifica o uso de memoria mapeada com hipHostMalloc() usando
o parametro hipHostMallocMapped para alocar memoria
tanto no host quanto no device. Copias entre host e device sao
implicitas, igual aa memoria unificada.
hipDeviceSynchronize() antes da impressao do resultado se faz
necessaria, caso contrrio o resultado deve sair errado.
Para compilar: nvcc 02-soma-vet-mapped.cu -o 02-soma-vet-mapped
Para executar: ./02-soma-vet-mapped
OBS: os valores de tamanho do vetor e o conteudo do vetor
estao fixos no codigo
*/
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
__global__ void soma(int *vetorA, int *vetorB,int *vetorC,int tam)
{
int idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx < tam)
{
vetorC[idx]=vetorA[idx]+vetorB[idx];
}
}
int main(int argc,char **argv)
{
int i,*vetorA,*vetorB,*vetorC,threadsPerBlock,blocksPerGrid;
int tam = 16; //5000;
//Define a quantidade de threads por bloco
threadsPerBlock = 256;
//Aloca os vetores no host e no device (memria mapeada em endereo virtual unificado)
hipHostMalloc((void**)&vetorA,tam*(sizeof(int)),hipHostMallocMapped);
hipHostMalloc((void**)&vetorB,tam*(sizeof(int)),hipHostMallocMapped);
hipHostMalloc((void**)&vetorC,tam*(sizeof(int)),hipHostMallocMapped);
//Preenche os vetores no host
for(i=0;i<tam;i++)
{
vetorA[i]=i;
vetorB[i]=0; //-i;
}
//Define a quantidade de blocos por grade
blocksPerGrid=(tam+threadsPerBlock-1)/threadsPerBlock;
//Invoca o kernel com blocksPerGrid blocos e threadsPerBlock threads
hipLaunchKernelGGL(( soma) , dim3(blocksPerGrid),dim3(threadsPerBlock), 0, 0, vetorA,vetorB,vetorC,tam);
hipDeviceSynchronize();
//Imprime o resultado no host
for(i=0;i<tam;i++)
{
printf("%d ",vetorC[i]);
}
printf("\n");
//Desaloca os vetores no host e no device
hipHostFree(vetorA);
hipHostFree(vetorB);
hipHostFree(vetorC);
} | 8b3a41cff3ff167745de5618fb41e63ef2aa8343.cu | /*
Faz a soma dos elementos de dois vetores
Exemplifica o uso de memoria mapeada com cudaHostAlloc() usando
o parametro cudaHostAllocMapped para alocar memoria
tanto no host quanto no device. Copias entre host e device sao
implicitas, igual aa memoria unificada.
cudaDeviceSynchronize() antes da impressao do resultado se faz
necessaria, caso contrário o resultado deve sair errado.
Para compilar: nvcc 02-soma-vet-mapped.cu -o 02-soma-vet-mapped
Para executar: ./02-soma-vet-mapped
OBS: os valores de tamanho do vetor e o conteudo do vetor
estao fixos no codigo
*/
#include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
__global__ void soma(int *vetorA, int *vetorB,int *vetorC,int tam)
{
int idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx < tam)
{
vetorC[idx]=vetorA[idx]+vetorB[idx];
}
}
int main(int argc,char **argv)
{
int i,*vetorA,*vetorB,*vetorC,threadsPerBlock,blocksPerGrid;
int tam = 16; //5000;
//Define a quantidade de threads por bloco
threadsPerBlock = 256;
//Aloca os vetores no host e no device (memória mapeada em endereço virtual unificado)
cudaHostAlloc((void**)&vetorA,tam*(sizeof(int)),cudaHostAllocMapped);
cudaHostAlloc((void**)&vetorB,tam*(sizeof(int)),cudaHostAllocMapped);
cudaHostAlloc((void**)&vetorC,tam*(sizeof(int)),cudaHostAllocMapped);
//Preenche os vetores no host
for(i=0;i<tam;i++)
{
vetorA[i]=i;
vetorB[i]=0; //-i;
}
//Define a quantidade de blocos por grade
blocksPerGrid=(tam+threadsPerBlock-1)/threadsPerBlock;
//Invoca o kernel com blocksPerGrid blocos e threadsPerBlock threads
soma <<<blocksPerGrid,threadsPerBlock>>> (vetorA,vetorB,vetorC,tam);
cudaDeviceSynchronize();
//Imprime o resultado no host
for(i=0;i<tam;i++)
{
printf("%d ",vetorC[i]);
}
printf("\n");
//Desaloca os vetores no host e no device
cudaFreeHost(vetorA);
cudaFreeHost(vetorB);
cudaFreeHost(vetorC);
} |
556d21a0d5a00a1a6aafff9cb15db442a380adc4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdlib.h>
#include <stdio.h>
#include <math.h>
//Thread block size
#define BLOCK_SIZE 3
#define WA 10
// Matrix A width
#define HA 10
// Matrix A height
#define WB 10
// Matrix B width
#define HB WA
// Matrix B height
#define WC WB
// Matrix C width
#define HC HA
// Matrix C height
//Allocates a matrix with random float entries.
void randomInit(float * data ,int size)
{
for(int i = 0; i < size; ++i)
//data[i] = rand() / (float) RAND_MAX;
data[i] = i;
}
// CUDA Kernel
__global__ void matrixMul(float* C,float* A,float* B,int hA, int wA,int wB)
{
// 2D Thread ID
int row = blockIdx.y*blockDim.y+threadIdx.y;
int col = blockIdx.x*blockDim.x+threadIdx.x;
// value stores the element that is computed by the thread
float value = 0;
if(row < hA && col < wB)
{
for(int i = 0; i < wA; ++i)
{
float elementA = A[row * wA + i];
float elementB = B[wA * i + col];
value += elementA * elementB;
}
// Write the matrix to device memory each
// thread writes one element
C[row * wA + col] = value;
}
}
// Program main
int main(int argc ,char** argv)
{
// set seed for rand()
srand(2006);
// 1. allocate host memory for matrices A and B
unsigned int size_A = WA * HA;
unsigned int mem_size_A =sizeof(float) * size_A;
float* h_A = (float*) malloc(mem_size_A);
unsigned int size_B = WB * HB;
unsigned int mem_size_B =sizeof(float) * size_B;
float * h_B = (float*) malloc(mem_size_B);
// 2. initialize host memory
randomInit(h_A, size_A);
randomInit(h_B, size_B);
// 3. print out A and B
printf("\n\nMatrix A\n");
for(int i = 0; i < size_A; i++)
{
printf("%6.0f ", h_A[i]);
if(((i + 1) % WA) == 0)
printf("\n");
}
printf("\n\nMatrix B\n");
for(int i = 0; i < size_B; i++)
{
printf("%6.0f ", h_B[i]);
if(((i + 1) % WB) == 0)
printf("\n");
}
// 4. allocate host memory for the result C
unsigned int size_C = WC * HC;
unsigned int mem_size_C =sizeof(float) * size_C;
float * h_C = (float *) malloc(mem_size_C);
// 8. allocate device memory
float* d_A;
float* d_B;
hipMalloc((void**) &d_A, mem_size_A);
hipMalloc((void**) &d_B, mem_size_B);
//9. copy host memory to device
hipMemcpy(d_A, h_A,mem_size_A ,hipMemcpyHostToDevice);
hipMemcpy(d_B, h_B,mem_size_B ,hipMemcpyHostToDevice);
// 10. allocate device memory for the result
float* d_C;
hipMalloc((void**) &d_C, mem_size_C);
// 5. perform the calculation
// setup execution parameters
dim3 threads(BLOCK_SIZE , BLOCK_SIZE);
dim3 grid((int)ceil((float)WC / threads.x), (int)ceil((float)HC / threads.y));
// execute the kernel
hipLaunchKernelGGL(( matrixMul), dim3(grid) , dim3(threads) , 0, 0, d_C, d_A,d_B, HA, WA, WB);
// 11. copy result from device to host
hipMemcpy(h_C, d_C, mem_size_C ,hipMemcpyDeviceToHost);
// 6. print out the results
printf("\n\n Matrix C ( Results ) \n");
for(int i = 0;i<size_C; i ++){
printf("%6.0f ",h_C[i]);
if(((i+ 1) % WC) == 0)
printf("\n");
}
printf("\n");
// 7.clean up memory
hipFree(d_A);
hipFree(d_B);
hipFree(d_C);
free(h_A);
free(h_B);
free(h_C);
}
| 556d21a0d5a00a1a6aafff9cb15db442a380adc4.cu | #include <stdlib.h>
#include <stdio.h>
#include <math.h>
//Thread block size
#define BLOCK_SIZE 3
#define WA 10
// Matrix A width
#define HA 10
// Matrix A height
#define WB 10
// Matrix B width
#define HB WA
// Matrix B height
#define WC WB
// Matrix C width
#define HC HA
// Matrix C height
//Allocates a matrix with random float entries.
void randomInit(float * data ,int size)
{
for(int i = 0; i < size; ++i)
//data[i] = rand() / (float) RAND_MAX;
data[i] = i;
}
// CUDA Kernel
__global__ void matrixMul(float* C,float* A,float* B,int hA, int wA,int wB)
{
// 2D Thread ID
int row = blockIdx.y*blockDim.y+threadIdx.y;
int col = blockIdx.x*blockDim.x+threadIdx.x;
// value stores the element that is computed by the thread
float value = 0;
if(row < hA && col < wB)
{
for(int i = 0; i < wA; ++i)
{
float elementA = A[row * wA + i];
float elementB = B[wA * i + col];
value += elementA * elementB;
}
// Write the matrix to device memory each
// thread writes one element
C[row * wA + col] = value;
}
}
// Program main
int main(int argc ,char** argv)
{
// set seed for rand()
srand(2006);
// 1. allocate host memory for matrices A and B
unsigned int size_A = WA * HA;
unsigned int mem_size_A =sizeof(float) * size_A;
float* h_A = (float*) malloc(mem_size_A);
unsigned int size_B = WB * HB;
unsigned int mem_size_B =sizeof(float) * size_B;
float * h_B = (float*) malloc(mem_size_B);
// 2. initialize host memory
randomInit(h_A, size_A);
randomInit(h_B, size_B);
// 3. print out A and B
printf("\n\nMatrix A\n");
for(int i = 0; i < size_A; i++)
{
printf("%6.0f ", h_A[i]);
if(((i + 1) % WA) == 0)
printf("\n");
}
printf("\n\nMatrix B\n");
for(int i = 0; i < size_B; i++)
{
printf("%6.0f ", h_B[i]);
if(((i + 1) % WB) == 0)
printf("\n");
}
// 4. allocate host memory for the result C
unsigned int size_C = WC * HC;
unsigned int mem_size_C =sizeof(float) * size_C;
float * h_C = (float *) malloc(mem_size_C);
// 8. allocate device memory
float* d_A;
float* d_B;
cudaMalloc((void**) &d_A, mem_size_A);
cudaMalloc((void**) &d_B, mem_size_B);
//9. copy host memory to device
cudaMemcpy(d_A, h_A,mem_size_A ,cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B,mem_size_B ,cudaMemcpyHostToDevice);
// 10. allocate device memory for the result
float* d_C;
cudaMalloc((void**) &d_C, mem_size_C);
// 5. perform the calculation
// setup execution parameters
dim3 threads(BLOCK_SIZE , BLOCK_SIZE);
dim3 grid((int)ceil((float)WC / threads.x), (int)ceil((float)HC / threads.y));
// execute the kernel
matrixMul<<< grid , threads >>>(d_C, d_A,d_B, HA, WA, WB);
// 11. copy result from device to host
cudaMemcpy(h_C, d_C, mem_size_C ,cudaMemcpyDeviceToHost);
// 6. print out the results
printf("\n\n Matrix C ( Results ) \n");
for(int i = 0;i<size_C; i ++){
printf("%6.0f ",h_C[i]);
if(((i+ 1) % WC) == 0)
printf("\n");
}
printf("\n");
// 7.clean up memory
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
free(h_A);
free(h_B);
free(h_C);
}
|
5a1ef8f51e45bb5470e0a1ea10a08672cb8f6a4c.hip | // !!! This is a file automatically generated by hipify!!!
/**
* @file ex_particle_CUDA_float.cu
* @author Michael Trotter & Matt Goodrum
* @brief Particle filter implementation completely in CUDA
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <limits.h>
#include <math.h>
#include <unistd.h>
#include <fcntl.h>
#include <float.h>
#include "mex.h"
#include "hip/hip_runtime.h"
#include "hip/hip_runtime.h"
#include "hip/driver_types.h"
#define BLOCK_X 16
#define BLOCK_Y 16
#define PI acos(-1)
/**
* @var tex_CDF The CDF texture array
*/
texture <float> tex_CDF;
/**
* @var threads_per_block The number of threads per block used on the GPU
*/
const int threads_per_block = 512;
/**
* Generates a normally distributed random number using the Box-Muller transformation
* @note This function is NOT thread-safe
* @return a double representing a Gaussian random number
*/
double randn(){
/* Box-Muller algorithm */
double u = (double)rand();
u = u/RAND_MAX;
double v = (double)rand();
v = v/RAND_MAX;
double cosine = cos(2*PI*v);
double rt = -2*log(u);
return sqrt(rt)*cosine;
}
/**
* Checks for CUDA errors and prints them to the screen to help with
* debugging of CUDA related programming
* @param e Cuda error code
*/
void check_error(hipError_t e) {
if (e != hipSuccess) {
printf("\nCUDA error: %s\n", hipGetErrorString(e));
exit(1);
}
}
/**
* Device funciton that determines the likelihood sum based on the formula: SUM( (IK[IND] - 100)^2 - (IK[IND] - 228)^2)/ 100
* @param I The 3D matrix
* @param ind The current ind array
* @param numOnes The length of ind array
* @return A double representing the sum
*/
__device__ float calcLikelihoodSum(unsigned char * I, int * ind, int numOnes, int index){
float likelihoodSum = 0.0;
int x;
for(x = 0; x < numOnes; x++)
likelihoodSum += (pow((float)(I[ind[index*numOnes + x]] - 100),2) - pow((float)(I[ind[index*numOnes + x]]-228),2))/50.0;
return likelihoodSum;
}
/**
* Device function used to calculated the CDF using the previously calculated weights
* @param CDF The CDF array
* @param weights The weights array
* @param Nparticles The length of CDF + weights array
*/
__device__ void cdfCalc(float * CDF, float * weights, int Nparticles){
int x;
CDF[0] = weights[0];
for(x = 1; x < Nparticles; x++){
CDF[x] = weights[x] + CDF[x-1];
}
}
/**
* Generates a uniformly distributed random number using the provided seed and GCC's settings for the Linear Congruential Generator (LCG)
* @see http://en.wikipedia.org/wiki/Linear_congruential_generator
* @note This function is thread-safe for use on the GPU
* @param seed The seed array
* @param index The specific index of the seed to be advanced
* @return a uniformly distributed number [0, 1)
*/
__device__ float d_randu(int * seed, int index)
{
//use GCC's M, A and C value for the LCG
int M = INT_MAX;
int A = 1103515245;
int C = 12345;
int num = A*seed[index] + C;
seed[index] = num % M;
num = seed[index];
return fabs(num/((float) M));
}
/**
* Generates a normally distributed random number using the Box-Muller transformation on the GPU
* @note This function is thread-safe for use on the GPU
* @param seed The seed array
* @param index The specific index of the seed to be advanced
* @return a double representing random number generated using the Box-Muller algorithm
* @see http://en.wikipedia.org/wiki/Normal_distribution, section computing value for normal random distribution
*/
__device__ float d_randn(int * seed, int index){
//Box-Muller algortihm
float pi = 3.14159265358979323846;
float u = d_randu(seed, index);
float v = d_randu(seed, index);
float cosine = cos(2*pi*v);
float rt = -2*log(u);
return sqrt(rt)*cosine;
}
/**
* @deprecated device function for calculating the weights; replaced by reduction function for weights
* @param weights The weights array
* @param likelihood The likelihood array
* @param Nparticles The length of the weights and likelihood arrays
* @return The sum of the weights
*/
__device__ float updateWeights(float * weights, float * likelihood, int Nparticles){
int x;
float sum = 0;
for(x = 0; x < Nparticles; x++){
weights[x] = weights[x] * exp(likelihood[x]);
sum += weights[x];
}
return sum;
}
/**
* Finds the first element in the CDF that is greater than or equal to the provided value and returns that index
* @note This function uses binary search before switching to sequential search
* @param CDF The CDF
* @param beginIndex The index to start searching from
* @param endIndex The index to stop searching
* @param value The value to find
* @return The index of value in the CDF; if value is never found, returns the last index
* @warning Use at your own risk; not fully tested
*/
__device__ int findIndexBin(float * CDF, int beginIndex, int endIndex, float value)
{
if(endIndex < beginIndex)
return -1;
int middleIndex;
while(endIndex > beginIndex)
{
middleIndex = beginIndex + ((endIndex-beginIndex)/2);
if(CDF[middleIndex] >= value)
{
if(middleIndex == 0)
return middleIndex;
else if(CDF[middleIndex-1] < value)
return middleIndex;
else if(CDF[middleIndex-1] == value)
{
while(CDF[middleIndex] == value && middleIndex >= 0)
middleIndex--;
middleIndex++;
return middleIndex;
}
}
if(CDF[middleIndex] > value)
endIndex = middleIndex-1;
else
beginIndex = middleIndex+1;
}
return -1;
}
/**
* Finds the first element in the CDF that is greater than or equal to the provided value and updates arrayX and arrayY using those values
* @note This function uses sequential search
* @param arrayX The array containing the guesses in the x direction
* @param arrayY The array containing the guesses in the y direction
* @param CDF The CDF array
* @param u The array containing the updated values
* @param xj The temp array for arrayX
* @param yj The temp array for arrayY
* @param weights The weights array
* @param Nparticles The number of particles used
* @param x_partial_sums The array containing the parital sum of arrayX; final sum is at index 0
* @param y_partial_sums The array containing the partial sum of arrayY; final sum is at index 0
* @param k The current frame
* @param x_loc The array containing the x location of the object for frame k
* @param y_loc The array containing the y location of the object for frame k
*/
__global__ void find_index_kernel(float * arrayX, float * arrayY, float * CDF, float * u, float * xj, float * yj, float * weights, int Nparticles, float * x_partial_sums, float * y_partial_sums, int k, float * x_loc, float * y_loc){
int block_id = blockIdx.x;
int i = blockDim.x * block_id + threadIdx.x;
x_loc[k] = x_partial_sums[0];
y_loc[k] = y_partial_sums[0];
if(i < Nparticles){
int index = -1;
int x;
for(x = 0; x < Nparticles; x++){
if(tex1Dfetch(tex_CDF, x) >= u[i]){
index = x;
break;
}
}
if(index == -1){
index = Nparticles-1;
}
xj[i] = arrayX[index];
yj[i] = arrayY[index];
weights[i] = 1/((float)(Nparticles));
}
__syncthreads();
}
/**
* Normalizes the weights using the partial sums previously calculated; sets up the partial sums for the x + y positions
* @param weights The weights array
* @param Nparticles The length of the weights array, arrayX and arrayY
* @param partial_sums The array containing the result of the partial sums in its initial index
* @param CDF The CDF array
* @param u The array for calculating the indices used for resampling
* @param seed The seed array used for random number generation
* @param x_partial_sums The array used for storing the partial sums of arrayX
* @param y_partial_sums The array used for storing the partial sums of arrayY
* @param arrayX The array storing the guesses for the x position of the particle
* @param arrayY The array storing the guesses for the y position of the particle
*/
__global__ void normalize_weights_kernel(float * weights, int Nparticles, float * partial_sums, float * CDF, float * u, int * seed, float * x_partial_sums, float * y_partial_sums, float * arrayX, float * arrayY)
{
int block_id = blockIdx.x;
int i = blockDim.x*block_id + threadIdx.x;
__shared__ float u1, sumWeights;
__shared__ float xbuffer[512];
__shared__ float ybuffer[512];
sumWeights = partial_sums[0];
if(i < Nparticles)
{
weights[i] = weights[i]/sumWeights;
}
if(i == 0)
{
cdfCalc(CDF, weights, Nparticles);
u1 = (1/((float)(Nparticles)))*d_randu(seed, i);
}
if(i < Nparticles)
{
__syncthreads();
u[i] = u1 + i/((float)(Nparticles));
xbuffer[threadIdx.x] = weights[i]*arrayX[i];
ybuffer[threadIdx.x] = weights[i]*arrayY[i];
__syncthreads();
for(unsigned int s=blockDim.x/2; s>0; s>>=1)
{
if(threadIdx.x < s)
{
xbuffer[threadIdx.x] += xbuffer[threadIdx.x + s];
ybuffer[threadIdx.x] += ybuffer[threadIdx.x + s];
}
__syncthreads();
}
if(threadIdx.x == 0)
{
x_partial_sums[blockIdx.x] = xbuffer[0];
y_partial_sums[blockIdx.x] = ybuffer[0];
}
}
}
/**
* Calculates the ultimate sum using the partial sums array & stores it at index 0
* @param partial_sums The array containing the partial sums
* @param Nparticles The length of the array
*/
__global__ void sum_kernel(float* partial_sums, int Nparticles)
{
int block_id = blockIdx.x;
int i = blockDim.x*block_id + threadIdx.x;
if(i == 0)
{
int x;
float sum = 0;
for(x = 0; x < Nparticles/512; x++)
{
sum += partial_sums[x];
}
partial_sums[0] = sum;
}
}
/**
* Calculates the ultimate sum using the partial sums arrays & store them at index 0 of the respective arrays
* @param x_partial_sums The array containing the partial sums of arrayX
* @param y_partial_sums The array containing the partial sums of arrayY
* @param Nparticles The length of the arrays
*/
__global__ void sum_xy_kernel(float * x_partial_sums, float * y_partial_sums, int Nparticles)
{
int block_id = blockIdx.x;
int i = blockDim.x*block_id + threadIdx.x;
if(i == 0)
{
int x;
float x_sum = 0;
float y_sum = 0;
for(x = 0; x < Nparticles/512; x++)
{
x_sum += x_partial_sums[x];
y_sum += y_partial_sums[x];
}
x_partial_sums[0] = x_sum;
y_partial_sums[0] = y_sum;
}
}
/**
* Calculates the likelihoods of an object going to the positions guessed using arrayX and arrayY
* @param arrayX The array containing the guesses in the x direction
* @param arrayY The array containing the guesses in the y direction
* @param ind The translated position in the video
* @param objxy The representation of the object
* @param likelihood The likelihood array
* @param I The video data to be analyzed
* @param u The array containing the update data
* @param weights The weights array
* @param Nparticles The number of particles to be used
* @param countOnes The length objxy
* @param max_size The maximum index in I
* @param k The current frame
* @param IszX The x dimension
* @param IszY The y dimension
* @param Nfr The number of frames
* @param seed The seed array
* @param partial_sums The partial sums array
*/
__global__ void likelihood_kernel(float * arrayX, float * arrayY, float * CDF, int * ind, int * objxy, float * likelihood, unsigned char * I, float * u, float * weights, int Nparticles, int countOnes, int max_size, int k, int IszX, int IszY, int Nfr, int *seed, float * partial_sums){
int block_id = blockIdx.x;
int i = blockDim.x * block_id + threadIdx.x;
int y;
float indX, indY;
__shared__ float buffer[512];
if(i < Nparticles){
arrayX[i] = arrayX[i] + 1.0 + 5.0*d_randn(seed, i);
arrayY[i] = arrayY[i] - 2.0 + 2.0*d_randn(seed, i);
__syncthreads();
}
if(i < Nparticles)
{
for(y = 0; y < countOnes; y++){
indX = round(arrayX[i]) + objxy[y*2 + 1];
indY = round(arrayY[i]) + objxy[y*2];
ind[i*countOnes + y] = fabs(k*IszY*IszX + indX*IszX + indY);
if(ind[i*countOnes + y] >= max_size)
ind[i*countOnes + y] = 0;
}
likelihood[i] = calcLikelihoodSum(I, ind, countOnes, i);
likelihood[i] = likelihood[i]/countOnes;
__syncthreads();
}
if(i < Nparticles)
{
weights[i] = weights[i]*likelihood[i];
__syncthreads();
buffer[threadIdx.x] = weights[i];
__syncthreads();
for(unsigned int s=blockDim.x/2; s>0; s>>=1)
{
if(threadIdx.x < s)
{
buffer[threadIdx.x] += buffer[threadIdx.x + s];
}
__syncthreads();
}
if(threadIdx.x == 0)
{
partial_sums[blockIdx.x] = buffer[0];
}
__syncthreads();
}
}
/**
* Calculates the likelihood for a single frame
* @param arrayX The array containing the guesses in the x direction
* @param arrayY The array containing the guesses in the y direction
* @param CDF The CDF array
* @param ind The array containing the translated addresses for I
* @param objxy The representation of the object to be tracked
* @param likelihood The likelihood array
* @param I The image to be analyzed
* @param u The array containing the information for updating arrayX and arrayY
* @param weights The weights array
* @param Nparticles The number of particles
* @param countOnes The length of the objxy array
* @param max_size The maximum length of objxy
* @param IszX The x dimension of the image
* @param IszY The y dimension of the image
* @param seed The seed array
* @param partial_sums The partial sums array
*/
__global__ void likelihood_kernel1F(float * arrayX, float * arrayY, float * CDF, int * ind, int * objxy, float * likelihood, unsigned char * I, float * u, float * weights, int Nparticles, int countOnes, int max_size, int IszX, int IszY, int *seed, float * partial_sums){
int block_id = blockIdx.x;
int i = blockDim.x * block_id + threadIdx.x;
int y;
float indX, indY;
__shared__ float buffer[512];
if(i < Nparticles){
arrayX[i] = arrayX[i] + 1.0 + 5.0*d_randn(seed, i);
arrayY[i] = arrayY[i] - 2.0 + 2.0*d_randn(seed, i);
__syncthreads();
}
if(i < Nparticles)
{
for(y = 0; y < countOnes; y++){
indX = round(arrayX[i]) + objxy[y*2 + 1];
indY = round(arrayY[i]) + objxy[y*2];
ind[i*countOnes + y] = fabs(indX*IszX + indY);
if(ind[i*countOnes + y] >= max_size)
ind[i*countOnes + y] = 0;
}
likelihood[i] = calcLikelihoodSum(I, ind, countOnes, i);
likelihood[i] = likelihood[i]/countOnes;
__syncthreads();
}
if(i < Nparticles)
{
weights[i] = weights[i]*likelihood[i];
__syncthreads();
buffer[threadIdx.x] = weights[i];
__syncthreads();
for(unsigned int s=blockDim.x/2; s>0; s>>=1)
{
if(threadIdx.x < s)
{
buffer[threadIdx.x] += buffer[threadIdx.x + s];
}
__syncthreads();
}
if(threadIdx.x == 0)
{
partial_sums[blockIdx.x] = buffer[0];
}
__syncthreads();
}
}
/**
* Takes in a double and returns an integer that approximates to that double
* @if the mantissa < .5 => return value < input value
* @else return value > input value
* @endif
*/
float roundDouble(float value){
int newValue = (int)(value);
if(value - newValue < .5)
return newValue;
else
return newValue++;
}
/**
* Set values of the 3D array to a newValue if that value is equal to the testValue
* @param testValue The value to be replaced
* @param newValue The value to replace testValue with
* @param array3D The image vector
* @param dimX The x dimension of the frame
* @param dimY The y dimension of the frame
* @param dimZ The number of frames
*/
void setIf(int testValue, int newValue, int * array3D, int * dimX, int * dimY, int * dimZ){
int x, y, z;
for(x = 0; x < *dimX; x++){
for(y = 0; y < *dimY; y++){
for(z = 0; z < *dimZ; z++){
if(array3D[x * *dimY * *dimZ+y * *dimZ + z] == testValue)
array3D[x * *dimY * *dimZ + y * *dimZ + z] = newValue;
}
}
}
}
/**
* Sets values of 3D matrix using randomly generated numbers from a normal distribution
* @param array3D The video to be modified
* @param dimX The x dimension of the frame
* @param dimY The y dimension of the frame
* @param dimZ The number of frames
* @param seed The seed array
*/
void addNoise(int * array3D, int * dimX, int * dimY, int * dimZ){
int x, y, z;
for(x = 0; x < *dimX; x++){
for(y = 0; y < *dimY; y++){
for(z = 0; z < *dimZ; z++){
array3D[x * *dimY * *dimZ + y * *dimZ + z] = array3D[x * *dimY * *dimZ + y * *dimZ + z] + (int)(5*randn());
}
}
}
}
/**
* Fills a radius x radius matrix representing the disk
* @param disk The pointer to the disk to be made
* @param radius The radius of the disk to be made
*/
void strelDisk(int * disk, int radius)
{
int diameter = radius*2 - 1;
int x, y;
for(x = 0; x < diameter; x++){
for(y = 0; y < diameter; y++){
double distance = sqrt(pow((double)(x-radius+1),2) + pow((double)(y-radius+1),2));
if(distance < radius)
disk[x*diameter + y] = 1;
}
}
}
/**
* Dilates the provided video
* @param matrix The video to be dilated
* @param posX The x location of the pixel to be dilated
* @param posY The y location of the pixel to be dilated
* @param poxZ The z location of the pixel to be dilated
* @param dimX The x dimension of the frame
* @param dimY The y dimension of the frame
* @param dimZ The number of frames
* @param error The error radius
*/
void dilate_matrix(int * matrix, int posX, int posY, int posZ, int dimX, int dimY, int dimZ, int error)
{
int startX = posX - error;
while(startX < 0)
startX++;
int startY = posY - error;
while(startY < 0)
startY++;
int endX = posX + error;
while(endX > dimX)
endX--;
int endY = posY + error;
while(endY > dimY)
endY--;
int x,y;
for(x = startX; x < endX; x++){
for(y = startY; y < endY; y++){
double distance = sqrt( pow((double)(x-posX),2) + pow((double)(y-posY),2) );
if(distance < error)
matrix[x*dimY*dimZ + y*dimZ + posZ] = 1;
}
}
}
/**
* Dilates the target matrix using the radius as a guide
* @param matrix The reference matrix
* @param dimX The x dimension of the video
* @param dimY The y dimension of the video
* @param dimZ The z dimension of the video
* @param error The error radius to be dilated
* @param newMatrix The target matrix
*/
void imdilate_disk(int * matrix, int dimX, int dimY, int dimZ, int error, int * newMatrix)
{
int x, y, z;
for(z = 0; z < dimZ; z++){
for(x = 0; x < dimX; x++){
for(y = 0; y < dimY; y++){
if(matrix[x*dimY*dimZ + y*dimZ + z] == 1){
dilate_matrix(newMatrix, x, y, z, dimX, dimY, dimZ, error);
}
}
}
}
}
/**
* Fills a 2D array describing the offsets of the disk object
* @param se The disk object
* @param numOnes The number of ones in the disk
* @param neighbors The array that will contain the offsets
* @param radius The radius used for dilation
*/
void getneighbors(int * se, int numOnes, int * neighbors, int radius){
int x, y;
int neighY = 0;
int center = radius - 1;
int diameter = radius*2 -1;
for(x = 0; x < diameter; x++){
for(y = 0; y < diameter; y++){
if(se[x*diameter + y]){
neighbors[neighY*2] = (int)(y - center);
neighbors[neighY*2 + 1] = (int)(x - center);
neighY++;
}
}
}
}
/**
* The synthetic video sequence we will work with here is composed of a
* single moving object, circular in shape (fixed radius)
* The motion here is a linear motion
* the foreground intensity and the backgrounf intensity is known
* the image is corrupted with zero mean Gaussian noise
* @param I The video itself
* @param IszX The x dimension of the video
* @param IszY The y dimension of the video
* @param Nfr The number of frames of the video
*/
void videoSequence(int * I, int IszX, int IszY, int Nfr){
int k;
int max_size = IszX*IszY*Nfr;
/*get object centers*/
int x0 = (int)roundDouble(IszY/2.0);
int y0 = (int)roundDouble(IszX/2.0);
I[x0 *IszY *Nfr + y0 * Nfr + 0] = 1;
/*move point*/
int xk, yk, pos;
for(k = 1; k < Nfr; k++){
xk = abs(x0 + (k-1));
yk = abs(y0 - 2*(k-1));
pos = yk * IszY * Nfr + xk *Nfr + k;
if(pos >= max_size)
pos = 0;
I[pos] = 1;
}
/*dilate matrix*/
int * newMatrix = (int *)mxCalloc(IszX*IszY*Nfr, sizeof(int));
imdilate_disk(I, IszX, IszY, Nfr, 5, newMatrix);
int x, y;
for(x = 0; x < IszX; x++){
for(y = 0; y < IszY; y++){
for(k = 0; k < Nfr; k++){
I[x*IszY*Nfr + y*Nfr + k] = newMatrix[x*IszY*Nfr + y*Nfr + k];
}
}
}
mxFree(newMatrix);
/*define background, add noise*/
setIf(0, 100, I, &IszX, &IszY, &Nfr);
setIf(1, 228, I, &IszX, &IszY, &Nfr);
/*add noise*/
addNoise(I, &IszX, &IszY, &Nfr);
}
/**
* The implementation of the particle filter using CUDA for many frames
* @see http://www.nvidia.com/object/cuda_home_new.html
* @note This function is designed to work with a video of several frames. In addition, it references a provided MATLAB function which takes the video, the objxy matrix and the x and y arrays as arguments and returns the likelihoods
* @param I The video to be run
* @param IszX The x dimension of the video
* @param IszY The y dimension of the video
* @param Nfr The number of frames
* @param seed The seed array used for random number generation
* @param Nparticles The number of particles to be used
* @param x_loc The array that will store the x locations of the desired object
* @param y_loc The array that will store the y locations of the desired object
* @param xe The starting x position of the object
* @param ye The starting y position of the object
*/
void particleFilter(unsigned char * I, int IszX, int IszY, int Nfr, int * seed, int Nparticles, float * x_loc, float * y_loc, float xe, float ye){
int max_size = IszX*IszY*Nfr;
//original particle centroid
x_loc[0] = xe;
y_loc[0] = ye;
/*expected object locations, compared to center*/
int radius = 5;
int diameter = radius*2 -1;
int * disk = (int *)mxCalloc(diameter*diameter, sizeof(int));
strelDisk(disk, radius);
int countOnes = 0;
int x, y;
for(x = 0; x < diameter; x++){
for(y = 0; y < diameter; y++){
if(disk[x*diameter + y] == 1)
countOnes++;
}
}
int * objxy = (int *)mxCalloc(countOnes*2, sizeof(int));
getneighbors(disk, countOnes, objxy, radius);
//initial weights are all equal (1/Nparticles)
float * weights = (float *)mxCalloc(Nparticles,sizeof(float));
for(x = 0; x < Nparticles; x++){
weights[x] = 1/((float)(Nparticles));
}
//initial likelihood to 0.0
float * likelihood = (float *)mxCalloc(Nparticles, sizeof(float));
float * arrayX = (float *)mxCalloc(Nparticles, sizeof(float));
float * arrayY = (float *)mxCalloc(Nparticles, sizeof(float));
float * xj = (float *)mxCalloc(Nparticles, sizeof(float));
float * yj = (float *)mxCalloc(Nparticles, sizeof(float));
float * CDF = (float *)mxCalloc(Nparticles, sizeof(float));
//GPU copies of arrays
float * arrayX_GPU;
float * arrayY_GPU;
float * xj_GPU;
float * yj_GPU;
float * CDF_GPU;
float * likelihood_GPU;
unsigned char * I_GPU;
float * weights_GPU;
int * objxy_GPU;
float * xloc_GPU;
float * yloc_GPU;
//int * ind = (int*)malloc(sizeof(int)*countOnes);
int * ind_GPU;
//float * u = (float *)malloc(sizeof(float)*Nparticles);
float * u_GPU;
int * seed_GPU;
float * partial_sums;
float * x_partial_sums;
float * y_partial_sums;
//CUDA memory allocation
check_error(hipMalloc((void **) &arrayX_GPU, sizeof(float)*Nparticles));
check_error(hipMalloc((void **) &arrayY_GPU, sizeof(float)*Nparticles));
check_error(hipMalloc((void **) &xj_GPU, sizeof(float)*Nparticles));
check_error(hipMalloc((void **) &yj_GPU, sizeof(float)*Nparticles));
check_error(hipMalloc((void **) &CDF_GPU, sizeof(float)*Nparticles));
check_error(hipMalloc((void **) &u_GPU, sizeof(float)*Nparticles));
check_error(hipMalloc((void **) &likelihood_GPU, sizeof(float)*Nparticles));
check_error(hipMalloc((void **) &weights_GPU, sizeof(float)*Nparticles));
check_error(hipMalloc((void **) &I_GPU, sizeof(unsigned char)*IszX*IszY*Nfr));
check_error(hipMalloc((void **) &objxy_GPU, sizeof(int)*countOnes));
check_error(hipMalloc((void **) &ind_GPU, sizeof(int)*countOnes*Nparticles));
check_error(hipMalloc((void **) &seed_GPU, sizeof(int)*Nparticles));
check_error(hipMalloc((void **) &partial_sums, sizeof(float)*Nparticles));
check_error(hipMalloc((void **) &xloc_GPU, sizeof(float)*Nfr));
check_error(hipMalloc((void **) &yloc_GPU, sizeof(float)*Nfr));
check_error(hipMalloc((void **) &x_partial_sums, sizeof(float)*Nparticles));
check_error(hipMalloc((void **) &y_partial_sums, sizeof(float)*Nparticles));
for(x = 0; x < Nparticles; x++){
arrayX[x] = xe;
arrayY[x] = ye;
}
int k;
//float * Ik = (float *)malloc(sizeof(float)*IszX*IszY);
//start send
hipMemcpy(I_GPU, I, sizeof(unsigned char)*IszX*IszY*Nfr, hipMemcpyHostToDevice);
hipMemcpy(objxy_GPU, objxy, sizeof(int)*countOnes, hipMemcpyHostToDevice);
hipMemcpy(weights_GPU, weights, sizeof(float)*Nparticles, hipMemcpyHostToDevice);
hipMemcpy(arrayX_GPU, arrayX, sizeof(float)*Nparticles, hipMemcpyHostToDevice);
hipMemcpy(arrayY_GPU, arrayY, sizeof(float)*Nparticles, hipMemcpyHostToDevice);
hipMemcpy(seed_GPU, seed, sizeof(int)*Nparticles, hipMemcpyHostToDevice);
int num_blocks = ceil((float) Nparticles/(float) threads_per_block);
for(k = 1; k < Nfr; k++){
hipLaunchKernelGGL(( likelihood_kernel) , dim3(num_blocks), dim3(threads_per_block) , 0, 0, arrayX_GPU, arrayY_GPU, CDF_GPU, ind_GPU, objxy_GPU, likelihood_GPU, I_GPU, u_GPU, weights_GPU, Nparticles, countOnes, max_size, k, IszX, IszY, Nfr, seed_GPU, partial_sums);
hipLaunchKernelGGL(( sum_kernel) , dim3(num_blocks), dim3(threads_per_block) , 0, 0, partial_sums, Nparticles);
hipLaunchKernelGGL(( normalize_weights_kernel) , dim3(num_blocks), dim3(threads_per_block) , 0, 0, weights_GPU, Nparticles, partial_sums, CDF_GPU, u_GPU, seed_GPU, x_partial_sums, y_partial_sums, arrayX_GPU, arrayY_GPU);
hipLaunchKernelGGL(( sum_xy_kernel) , dim3(num_blocks), dim3(threads_per_block) , 0, 0, x_partial_sums, y_partial_sums, Nparticles);
hipBindTexture(0, tex_CDF, CDF_GPU, Nparticles);
//KERNEL FUNCTION CALL
hipLaunchKernelGGL(( find_index_kernel) , dim3(num_blocks), dim3(threads_per_block) , 0, 0, arrayX_GPU, arrayY_GPU, CDF_GPU, u_GPU, xj_GPU, yj_GPU, weights_GPU, Nparticles, x_partial_sums, y_partial_sums, k, xloc_GPU, yloc_GPU);
hipUnbindTexture(tex_CDF);
}
//CUDA freeing of memory
hipFree(xj_GPU);
hipFree(yj_GPU);
hipFree(CDF_GPU);
hipFree(u_GPU);
hipFree(likelihood_GPU);
hipFree(I_GPU);
hipFree(objxy_GPU);
hipFree(ind_GPU);
hipFree(seed_GPU);
hipFree(partial_sums);
hipFree(weights_GPU);
hipFree(arrayY_GPU);
hipFree(arrayX_GPU);
hipFree(x_partial_sums);
hipFree(y_partial_sums);
hipMemcpy(x_loc, xloc_GPU, sizeof(float)*Nfr, hipMemcpyDeviceToHost);
hipMemcpy(y_loc, yloc_GPU, sizeof(float)*Nfr, hipMemcpyDeviceToHost);
hipFree(xloc_GPU);
hipFree(yloc_GPU);
x_loc[0] = xe;
y_loc[0] = ye;
}
/**
* The implementation of the particle filter using CUDA for a single image
* @see http://www.nvidia.com/object/cuda_home_new.html
* @note This function is designed to work with a single image. In addition, it references a provided MATLAB function which takes the video, the objxy matrix and the x and y arrays as arguments and returns the likelihoods
* @warning Use the other particle filter function for videos; the accuracy of this function decreases significantly as it is called repeatedly while processing video
* @param I The image to be run
* @param IszX The x dimension of the image
* @param IszY The y dimension of the image
* @param seed The seed array used for random number generation
* @param Nparticles The number of particles to be used
* @param x_loc The array that will store the x locations of the desired object
* @param y_loc The array that will store the y locations of the desired object
* @param prevX The starting x position of the object
* @param prevY The starting y position of the object
*/
void particleFilter1F(unsigned char * I, int IszX, int IszY, int * seed, int Nparticles, float * x_loc, float * y_loc, float prevX, float prevY){
int max_size = IszX*IszY;
/*expected object locations, compared to center*/
int radius = 5;
int diameter = radius*2 -1;
int * disk = (int *)mxCalloc(diameter*diameter, sizeof(int));
strelDisk(disk, radius);
int countOnes = 0;
int x, y;
for(x = 0; x < diameter; x++){
for(y = 0; y < diameter; y++){
if(disk[x*diameter + y] == 1)
countOnes++;
}
}
int * objxy = (int *)mxCalloc(countOnes*2, sizeof(int));
getneighbors(disk, countOnes, objxy, radius);
//initial weights are all equal (1/Nparticles)
float * weights = (float *)mxCalloc(Nparticles,sizeof(float));
for(x = 0; x < Nparticles; x++){
weights[x] = 1/((float)(Nparticles));
}
//initial likelihood to 0.0
float * likelihood = (float *)mxCalloc(Nparticles, sizeof(float));
float * arrayX = (float *)mxCalloc(Nparticles, sizeof(float));
float * arrayY = (float *)mxCalloc(Nparticles, sizeof(float));
float * xj = (float *)mxCalloc(Nparticles, sizeof(float));
float * yj = (float *)mxCalloc(Nparticles, sizeof(float));
float * CDF = (float *)mxCalloc(Nparticles, sizeof(float));
//GPU copies of arrays
float * arrayX_GPU;
float * arrayY_GPU;
float * xj_GPU;
float * yj_GPU;
float * CDF_GPU;
float * likelihood_GPU;
unsigned char * I_GPU;
float * weights_GPU;
int * objxy_GPU;
float * xloc_GPU;
float * yloc_GPU;
int * ind_GPU;
float * u_GPU;
int * seed_GPU;
float * partial_sums;
float * x_partial_sums;
float * y_partial_sums;
//CUDA memory allocation
check_error(hipMalloc((void **) &arrayX_GPU, sizeof(float)*Nparticles));
check_error(hipMalloc((void **) &arrayY_GPU, sizeof(float)*Nparticles));
check_error(hipMalloc((void **) &xj_GPU, sizeof(float)*Nparticles));
check_error(hipMalloc((void **) &yj_GPU, sizeof(float)*Nparticles));
check_error(hipMalloc((void **) &CDF_GPU, sizeof(float)*Nparticles));
check_error(hipMalloc((void **) &u_GPU, sizeof(float)*Nparticles));
check_error(hipMalloc((void **) &likelihood_GPU, sizeof(float)*Nparticles));
check_error(hipMalloc((void **) &weights_GPU, sizeof(float)*Nparticles));
check_error(hipMalloc((void **) &I_GPU, sizeof(unsigned char)*IszX*IszY));
check_error(hipMalloc((void **) &objxy_GPU, sizeof(int)*countOnes));
check_error(hipMalloc((void **) &ind_GPU, sizeof(int)*countOnes*Nparticles));
check_error(hipMalloc((void **) &seed_GPU, sizeof(int)*Nparticles));
check_error(hipMalloc((void **) &partial_sums, sizeof(float)*Nparticles));
check_error(hipMalloc((void **) &xloc_GPU, sizeof(float)));
check_error(hipMalloc((void **) &yloc_GPU, sizeof(float)));
check_error(hipMalloc((void **) &x_partial_sums, sizeof(float)*Nparticles));
check_error(hipMalloc((void **) &y_partial_sums, sizeof(float)*Nparticles));
for(x = 0; x < Nparticles; x++){
arrayX[x] = prevX;
arrayY[x] = prevY;
}
//start send
hipMemcpy(I_GPU, I, sizeof(unsigned char)*IszX*IszY, hipMemcpyHostToDevice);
hipMemcpy(objxy_GPU, objxy, sizeof(int)*countOnes, hipMemcpyHostToDevice);
hipMemcpy(weights_GPU, weights, sizeof(float)*Nparticles, hipMemcpyHostToDevice);
hipMemcpy(arrayX_GPU, arrayX, sizeof(float)*Nparticles, hipMemcpyHostToDevice);
hipMemcpy(arrayY_GPU, arrayY, sizeof(float)*Nparticles, hipMemcpyHostToDevice);
hipMemcpy(seed_GPU, seed, sizeof(int)*Nparticles, hipMemcpyHostToDevice);
int num_blocks = ceil((float) Nparticles/(float) threads_per_block);
hipLaunchKernelGGL(( likelihood_kernel) , dim3(num_blocks), dim3(threads_per_block) , 0, 0, arrayX_GPU, arrayY_GPU, CDF_GPU, ind_GPU, objxy_GPU, likelihood_GPU, I_GPU, u_GPU, weights_GPU, Nparticles, countOnes, max_size, 0, IszX, IszY, 0, seed_GPU, partial_sums);
hipLaunchKernelGGL(( sum_kernel) , dim3(num_blocks), dim3(threads_per_block) , 0, 0, partial_sums, Nparticles);
hipLaunchKernelGGL(( normalize_weights_kernel) , dim3(num_blocks), dim3(threads_per_block) , 0, 0, weights_GPU, Nparticles, partial_sums, CDF_GPU, u_GPU, seed_GPU, x_partial_sums, y_partial_sums, arrayX_GPU, arrayY_GPU);
hipLaunchKernelGGL(( sum_xy_kernel) , dim3(num_blocks), dim3(threads_per_block) , 0, 0, x_partial_sums, y_partial_sums, Nparticles);
hipBindTexture(0, tex_CDF, CDF_GPU, Nparticles);
//KERNEL FUNCTION CALL
hipLaunchKernelGGL(( find_index_kernel) , dim3(num_blocks), dim3(threads_per_block) , 0, 0, arrayX_GPU, arrayY_GPU, CDF_GPU, u_GPU, xj_GPU, yj_GPU, weights_GPU, Nparticles, x_partial_sums, y_partial_sums, 0, xloc_GPU, yloc_GPU);
hipUnbindTexture(tex_CDF);
//CUDA freeing of memory
hipFree(xj_GPU);
hipFree(yj_GPU);
hipFree(CDF_GPU);
hipFree(u_GPU);
hipFree(likelihood_GPU);
hipFree(I_GPU);
hipFree(objxy_GPU);
hipFree(ind_GPU);
hipFree(seed_GPU);
hipFree(partial_sums);
hipFree(weights_GPU);
hipFree(arrayY_GPU);
hipFree(arrayX_GPU);
hipFree(x_partial_sums);
hipFree(y_partial_sums);
hipMemcpy(x_loc, xloc_GPU, sizeof(float), hipMemcpyDeviceToHost);
hipMemcpy(y_loc, yloc_GPU, sizeof(float), hipMemcpyDeviceToHost);
hipFree(xloc_GPU);
hipFree(yloc_GPU);
}
/**
* Function that allows the 2 particle filter implementations to be run
* @details The number of arguments provided to this function determines which function will be called. 7 args will call the video processing version. 6 (leaving out the number of frames) will call the image processing version.
* @param nlhs (Number on the Left Hand Side) The number of items to return (2 will be in this case; the x and y arrays)
* @param plhs (Parameters on the Left Hand Side) A pointer to the arrays containing the x and y arrays
* @param nrhs (Number on the Right Hand Side) The number of arguments to take in (7 are needed for video processing (The image as an unsigned char, the x dimension, the y dimension, the number of frames, the number of particles, the x starting position, the y starting position)
* 6 are needed for the image processing (same as before but leave out the number of frames)
* @param prhs (Parameters on the Right Hand Side) A pointer to the arrays containing the parameters
*/
void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]){
unsigned char * I;
int IszX, IszY, Nfr, Nparticles;
srand(time(0));
if(nrhs < 6)
{
printf("ERROR: TOO FEW ARGS HAVE BEEN ENTERED\n");
printf("EXITING\n");
exit(0);
}
else if(nrhs == 7)
{
IszX = (int)(mxGetScalar(prhs[1]));
IszY = (int)(mxGetScalar(prhs[2]));
Nfr = (int)(mxGetScalar(prhs[3]));
Nparticles = (int)(mxGetScalar(prhs[4]));
unsigned char * cI = (unsigned char *)mxGetData(prhs[0]);
I = (unsigned char *)mxCalloc(IszX*IszY*Nfr, sizeof(unsigned char));
int x, y, z;
for(x = 0; x < IszX; x++){
for(y = 0; y < IszY; y++){
for(z = 0; z < Nfr; z++){
I[x*IszY*Nfr + y*Nfr + z] = (unsigned char)cI[x*IszY*Nfr + y*Nfr + z];
}
}
}
float xe = (float)mxGetScalar(prhs[5]);
float ye = (float)mxGetScalar(prhs[6]);
int * seed = (int *)mxCalloc(Nparticles, sizeof(int));
int i;
for(i = 0; i < Nparticles; i++)
seed[i] = time(0)*i;
float * posX = (float *)mxCalloc(Nfr, sizeof(float));
float * posY = (float *)mxCalloc(Nfr, sizeof(float));
particleFilter(I, IszX, IszY, Nfr, seed, Nparticles, posX, posY, xe, ye);
mxFree(I);
mxFree(seed);
plhs[0] = mxCreateDoubleMatrix(Nfr, 1, mxREAL);
plhs[1] = mxCreateDoubleMatrix(Nfr, 1, mxREAL);
double * bufferX = mxGetPr(plhs[0]);
double * bufferY = mxGetPr(plhs[1]);
for(i = 0; i < Nfr; i++)
{
bufferX[i] = (double)posX[i];
bufferY[i] = (double)posY[i];
}
mxFree(posX);
mxFree(posY);
}
else if(nrhs == 6)
{
IszX = (int)(mxGetScalar(prhs[1]));
IszY = (int)(mxGetScalar(prhs[2]));
Nparticles = (int)(mxGetScalar(prhs[3]));
double startX = (double)mxGetScalar(prhs[4]);
double startY = (double)mxGetScalar(prhs[5]);
unsigned char * cI = (unsigned char *)mxGetData(prhs[0]);
I = (unsigned char *)mxCalloc(IszX*IszY, sizeof(unsigned char));
int x, y;
for(x = 0; x < IszX; x++){
for(y = 0; y < IszY; y++){
I[x*IszX + y] = (unsigned char)cI[x*IszX + y];
}
}
int * seed = (int *)mxCalloc(Nparticles, sizeof(int));
int i;
for(i = 0; i < Nparticles; i++)
seed[i] = time(0)*i;
float posX[1];
float posY[1];
particleFilter1F(I, IszX, IszY, seed, Nparticles, posX, posY, (float)startX, (float)startY);
mxFree(I);
mxFree(seed);
plhs[0] = mxCreateDoubleMatrix(1,1,mxREAL);
plhs[1] = mxCreateDoubleMatrix(1,1,mxREAL);
double * bufferX = mxGetPr(plhs[0]);
double * bufferY = mxGetPr(plhs[1]);
bufferX[0] = posX[0];
bufferY[0] = posY[0];
}
else
{
printf("ERROR: TOO MANY ARGS\n");
printf("EXITING\n");
exit(0);
}
}
/**
* Unused
*/
int main(){
return 0;
}
| 5a1ef8f51e45bb5470e0a1ea10a08672cb8f6a4c.cu | /**
* @file ex_particle_CUDA_float.cu
* @author Michael Trotter & Matt Goodrum
* @brief Particle filter implementation completely in CUDA
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <limits.h>
#include <math.h>
#include <unistd.h>
#include <fcntl.h>
#include <float.h>
#include "mex.h"
#include "cuda.h"
#include "cuda_runtime.h"
#include "driver_types.h"
#define BLOCK_X 16
#define BLOCK_Y 16
#define PI acos(-1)
/**
* @var tex_CDF The CDF texture array
*/
texture <float> tex_CDF;
/**
* @var threads_per_block The number of threads per block used on the GPU
*/
const int threads_per_block = 512;
/**
* Generates a normally distributed random number using the Box-Muller transformation
* @note This function is NOT thread-safe
* @return a double representing a Gaussian random number
*/
double randn(){
/* Box-Muller algorithm */
double u = (double)rand();
u = u/RAND_MAX;
double v = (double)rand();
v = v/RAND_MAX;
double cosine = cos(2*PI*v);
double rt = -2*log(u);
return sqrt(rt)*cosine;
}
/**
* Checks for CUDA errors and prints them to the screen to help with
* debugging of CUDA related programming
* @param e Cuda error code
*/
void check_error(cudaError e) {
if (e != cudaSuccess) {
printf("\nCUDA error: %s\n", cudaGetErrorString(e));
exit(1);
}
}
/**
* Device funciton that determines the likelihood sum based on the formula: SUM( (IK[IND] - 100)^2 - (IK[IND] - 228)^2)/ 100
* @param I The 3D matrix
* @param ind The current ind array
* @param numOnes The length of ind array
* @return A double representing the sum
*/
__device__ float calcLikelihoodSum(unsigned char * I, int * ind, int numOnes, int index){
float likelihoodSum = 0.0;
int x;
for(x = 0; x < numOnes; x++)
likelihoodSum += (pow((float)(I[ind[index*numOnes + x]] - 100),2) - pow((float)(I[ind[index*numOnes + x]]-228),2))/50.0;
return likelihoodSum;
}
/**
* Device function used to calculated the CDF using the previously calculated weights
* @param CDF The CDF array
* @param weights The weights array
* @param Nparticles The length of CDF + weights array
*/
__device__ void cdfCalc(float * CDF, float * weights, int Nparticles){
int x;
CDF[0] = weights[0];
for(x = 1; x < Nparticles; x++){
CDF[x] = weights[x] + CDF[x-1];
}
}
/**
* Generates a uniformly distributed random number using the provided seed and GCC's settings for the Linear Congruential Generator (LCG)
* @see http://en.wikipedia.org/wiki/Linear_congruential_generator
* @note This function is thread-safe for use on the GPU
* @param seed The seed array
* @param index The specific index of the seed to be advanced
* @return a uniformly distributed number [0, 1)
*/
__device__ float d_randu(int * seed, int index)
{
//use GCC's M, A and C value for the LCG
int M = INT_MAX;
int A = 1103515245;
int C = 12345;
int num = A*seed[index] + C;
seed[index] = num % M;
num = seed[index];
return fabs(num/((float) M));
}
/**
* Generates a normally distributed random number using the Box-Muller transformation on the GPU
* @note This function is thread-safe for use on the GPU
* @param seed The seed array
* @param index The specific index of the seed to be advanced
* @return a double representing random number generated using the Box-Muller algorithm
* @see http://en.wikipedia.org/wiki/Normal_distribution, section computing value for normal random distribution
*/
__device__ float d_randn(int * seed, int index){
//Box-Muller algortihm
float pi = 3.14159265358979323846;
float u = d_randu(seed, index);
float v = d_randu(seed, index);
float cosine = cos(2*pi*v);
float rt = -2*log(u);
return sqrt(rt)*cosine;
}
/**
* @deprecated device function for calculating the weights; replaced by reduction function for weights
* @param weights The weights array
* @param likelihood The likelihood array
* @param Nparticles The length of the weights and likelihood arrays
* @return The sum of the weights
*/
__device__ float updateWeights(float * weights, float * likelihood, int Nparticles){
int x;
float sum = 0;
for(x = 0; x < Nparticles; x++){
weights[x] = weights[x] * exp(likelihood[x]);
sum += weights[x];
}
return sum;
}
/**
* Finds the first element in the CDF that is greater than or equal to the provided value and returns that index
* @note This function uses binary search before switching to sequential search
* @param CDF The CDF
* @param beginIndex The index to start searching from
* @param endIndex The index to stop searching
* @param value The value to find
* @return The index of value in the CDF; if value is never found, returns the last index
* @warning Use at your own risk; not fully tested
*/
__device__ int findIndexBin(float * CDF, int beginIndex, int endIndex, float value)
{
if(endIndex < beginIndex)
return -1;
int middleIndex;
while(endIndex > beginIndex)
{
middleIndex = beginIndex + ((endIndex-beginIndex)/2);
if(CDF[middleIndex] >= value)
{
if(middleIndex == 0)
return middleIndex;
else if(CDF[middleIndex-1] < value)
return middleIndex;
else if(CDF[middleIndex-1] == value)
{
while(CDF[middleIndex] == value && middleIndex >= 0)
middleIndex--;
middleIndex++;
return middleIndex;
}
}
if(CDF[middleIndex] > value)
endIndex = middleIndex-1;
else
beginIndex = middleIndex+1;
}
return -1;
}
/**
* Finds the first element in the CDF that is greater than or equal to the provided value and updates arrayX and arrayY using those values
* @note This function uses sequential search
* @param arrayX The array containing the guesses in the x direction
* @param arrayY The array containing the guesses in the y direction
* @param CDF The CDF array
* @param u The array containing the updated values
* @param xj The temp array for arrayX
* @param yj The temp array for arrayY
* @param weights The weights array
* @param Nparticles The number of particles used
* @param x_partial_sums The array containing the parital sum of arrayX; final sum is at index 0
* @param y_partial_sums The array containing the partial sum of arrayY; final sum is at index 0
* @param k The current frame
* @param x_loc The array containing the x location of the object for frame k
* @param y_loc The array containing the y location of the object for frame k
*/
__global__ void find_index_kernel(float * arrayX, float * arrayY, float * CDF, float * u, float * xj, float * yj, float * weights, int Nparticles, float * x_partial_sums, float * y_partial_sums, int k, float * x_loc, float * y_loc){
int block_id = blockIdx.x;
int i = blockDim.x * block_id + threadIdx.x;
x_loc[k] = x_partial_sums[0];
y_loc[k] = y_partial_sums[0];
if(i < Nparticles){
int index = -1;
int x;
for(x = 0; x < Nparticles; x++){
if(tex1Dfetch(tex_CDF, x) >= u[i]){
index = x;
break;
}
}
if(index == -1){
index = Nparticles-1;
}
xj[i] = arrayX[index];
yj[i] = arrayY[index];
weights[i] = 1/((float)(Nparticles));
}
__syncthreads();
}
/**
* Normalizes the weights using the partial sums previously calculated; sets up the partial sums for the x + y positions
* @param weights The weights array
* @param Nparticles The length of the weights array, arrayX and arrayY
* @param partial_sums The array containing the result of the partial sums in its initial index
* @param CDF The CDF array
* @param u The array for calculating the indices used for resampling
* @param seed The seed array used for random number generation
* @param x_partial_sums The array used for storing the partial sums of arrayX
* @param y_partial_sums The array used for storing the partial sums of arrayY
* @param arrayX The array storing the guesses for the x position of the particle
* @param arrayY The array storing the guesses for the y position of the particle
*/
__global__ void normalize_weights_kernel(float * weights, int Nparticles, float * partial_sums, float * CDF, float * u, int * seed, float * x_partial_sums, float * y_partial_sums, float * arrayX, float * arrayY)
{
int block_id = blockIdx.x;
int i = blockDim.x*block_id + threadIdx.x;
__shared__ float u1, sumWeights;
__shared__ float xbuffer[512];
__shared__ float ybuffer[512];
sumWeights = partial_sums[0];
if(i < Nparticles)
{
weights[i] = weights[i]/sumWeights;
}
if(i == 0)
{
cdfCalc(CDF, weights, Nparticles);
u1 = (1/((float)(Nparticles)))*d_randu(seed, i);
}
if(i < Nparticles)
{
__syncthreads();
u[i] = u1 + i/((float)(Nparticles));
xbuffer[threadIdx.x] = weights[i]*arrayX[i];
ybuffer[threadIdx.x] = weights[i]*arrayY[i];
__syncthreads();
for(unsigned int s=blockDim.x/2; s>0; s>>=1)
{
if(threadIdx.x < s)
{
xbuffer[threadIdx.x] += xbuffer[threadIdx.x + s];
ybuffer[threadIdx.x] += ybuffer[threadIdx.x + s];
}
__syncthreads();
}
if(threadIdx.x == 0)
{
x_partial_sums[blockIdx.x] = xbuffer[0];
y_partial_sums[blockIdx.x] = ybuffer[0];
}
}
}
/**
* Calculates the ultimate sum using the partial sums array & stores it at index 0
* @param partial_sums The array containing the partial sums
* @param Nparticles The length of the array
*/
__global__ void sum_kernel(float* partial_sums, int Nparticles)
{
int block_id = blockIdx.x;
int i = blockDim.x*block_id + threadIdx.x;
if(i == 0)
{
int x;
float sum = 0;
for(x = 0; x < Nparticles/512; x++)
{
sum += partial_sums[x];
}
partial_sums[0] = sum;
}
}
/**
* Calculates the ultimate sum using the partial sums arrays & store them at index 0 of the respective arrays
* @param x_partial_sums The array containing the partial sums of arrayX
* @param y_partial_sums The array containing the partial sums of arrayY
* @param Nparticles The length of the arrays
*/
__global__ void sum_xy_kernel(float * x_partial_sums, float * y_partial_sums, int Nparticles)
{
int block_id = blockIdx.x;
int i = blockDim.x*block_id + threadIdx.x;
if(i == 0)
{
int x;
float x_sum = 0;
float y_sum = 0;
for(x = 0; x < Nparticles/512; x++)
{
x_sum += x_partial_sums[x];
y_sum += y_partial_sums[x];
}
x_partial_sums[0] = x_sum;
y_partial_sums[0] = y_sum;
}
}
/**
* Calculates the likelihoods of an object going to the positions guessed using arrayX and arrayY
* @param arrayX The array containing the guesses in the x direction
* @param arrayY The array containing the guesses in the y direction
* @param ind The translated position in the video
* @param objxy The representation of the object
* @param likelihood The likelihood array
* @param I The video data to be analyzed
* @param u The array containing the update data
* @param weights The weights array
* @param Nparticles The number of particles to be used
* @param countOnes The length objxy
* @param max_size The maximum index in I
* @param k The current frame
* @param IszX The x dimension
* @param IszY The y dimension
* @param Nfr The number of frames
* @param seed The seed array
* @param partial_sums The partial sums array
*/
__global__ void likelihood_kernel(float * arrayX, float * arrayY, float * CDF, int * ind, int * objxy, float * likelihood, unsigned char * I, float * u, float * weights, int Nparticles, int countOnes, int max_size, int k, int IszX, int IszY, int Nfr, int *seed, float * partial_sums){
int block_id = blockIdx.x;
int i = blockDim.x * block_id + threadIdx.x;
int y;
float indX, indY;
__shared__ float buffer[512];
if(i < Nparticles){
arrayX[i] = arrayX[i] + 1.0 + 5.0*d_randn(seed, i);
arrayY[i] = arrayY[i] - 2.0 + 2.0*d_randn(seed, i);
__syncthreads();
}
if(i < Nparticles)
{
for(y = 0; y < countOnes; y++){
indX = round(arrayX[i]) + objxy[y*2 + 1];
indY = round(arrayY[i]) + objxy[y*2];
ind[i*countOnes + y] = fabs(k*IszY*IszX + indX*IszX + indY);
if(ind[i*countOnes + y] >= max_size)
ind[i*countOnes + y] = 0;
}
likelihood[i] = calcLikelihoodSum(I, ind, countOnes, i);
likelihood[i] = likelihood[i]/countOnes;
__syncthreads();
}
if(i < Nparticles)
{
weights[i] = weights[i]*likelihood[i];
__syncthreads();
buffer[threadIdx.x] = weights[i];
__syncthreads();
for(unsigned int s=blockDim.x/2; s>0; s>>=1)
{
if(threadIdx.x < s)
{
buffer[threadIdx.x] += buffer[threadIdx.x + s];
}
__syncthreads();
}
if(threadIdx.x == 0)
{
partial_sums[blockIdx.x] = buffer[0];
}
__syncthreads();
}
}
/**
* Calculates the likelihood for a single frame
* @param arrayX The array containing the guesses in the x direction
* @param arrayY The array containing the guesses in the y direction
* @param CDF The CDF array
* @param ind The array containing the translated addresses for I
* @param objxy The representation of the object to be tracked
* @param likelihood The likelihood array
* @param I The image to be analyzed
* @param u The array containing the information for updating arrayX and arrayY
* @param weights The weights array
* @param Nparticles The number of particles
* @param countOnes The length of the objxy array
* @param max_size The maximum length of objxy
* @param IszX The x dimension of the image
* @param IszY The y dimension of the image
* @param seed The seed array
* @param partial_sums The partial sums array
*/
__global__ void likelihood_kernel1F(float * arrayX, float * arrayY, float * CDF, int * ind, int * objxy, float * likelihood, unsigned char * I, float * u, float * weights, int Nparticles, int countOnes, int max_size, int IszX, int IszY, int *seed, float * partial_sums){
int block_id = blockIdx.x;
int i = blockDim.x * block_id + threadIdx.x;
int y;
float indX, indY;
__shared__ float buffer[512];
if(i < Nparticles){
arrayX[i] = arrayX[i] + 1.0 + 5.0*d_randn(seed, i);
arrayY[i] = arrayY[i] - 2.0 + 2.0*d_randn(seed, i);
__syncthreads();
}
if(i < Nparticles)
{
for(y = 0; y < countOnes; y++){
indX = round(arrayX[i]) + objxy[y*2 + 1];
indY = round(arrayY[i]) + objxy[y*2];
ind[i*countOnes + y] = fabs(indX*IszX + indY);
if(ind[i*countOnes + y] >= max_size)
ind[i*countOnes + y] = 0;
}
likelihood[i] = calcLikelihoodSum(I, ind, countOnes, i);
likelihood[i] = likelihood[i]/countOnes;
__syncthreads();
}
if(i < Nparticles)
{
weights[i] = weights[i]*likelihood[i];
__syncthreads();
buffer[threadIdx.x] = weights[i];
__syncthreads();
for(unsigned int s=blockDim.x/2; s>0; s>>=1)
{
if(threadIdx.x < s)
{
buffer[threadIdx.x] += buffer[threadIdx.x + s];
}
__syncthreads();
}
if(threadIdx.x == 0)
{
partial_sums[blockIdx.x] = buffer[0];
}
__syncthreads();
}
}
/**
* Takes in a double and returns an integer that approximates to that double
* @if the mantissa < .5 => return value < input value
* @else return value > input value
* @endif
*/
float roundDouble(float value){
int newValue = (int)(value);
if(value - newValue < .5)
return newValue;
else
return newValue++;
}
/**
* Set values of the 3D array to a newValue if that value is equal to the testValue
* @param testValue The value to be replaced
* @param newValue The value to replace testValue with
* @param array3D The image vector
* @param dimX The x dimension of the frame
* @param dimY The y dimension of the frame
* @param dimZ The number of frames
*/
void setIf(int testValue, int newValue, int * array3D, int * dimX, int * dimY, int * dimZ){
int x, y, z;
for(x = 0; x < *dimX; x++){
for(y = 0; y < *dimY; y++){
for(z = 0; z < *dimZ; z++){
if(array3D[x * *dimY * *dimZ+y * *dimZ + z] == testValue)
array3D[x * *dimY * *dimZ + y * *dimZ + z] = newValue;
}
}
}
}
/**
* Sets values of 3D matrix using randomly generated numbers from a normal distribution
* @param array3D The video to be modified
* @param dimX The x dimension of the frame
* @param dimY The y dimension of the frame
* @param dimZ The number of frames
* @param seed The seed array
*/
void addNoise(int * array3D, int * dimX, int * dimY, int * dimZ){
int x, y, z;
for(x = 0; x < *dimX; x++){
for(y = 0; y < *dimY; y++){
for(z = 0; z < *dimZ; z++){
array3D[x * *dimY * *dimZ + y * *dimZ + z] = array3D[x * *dimY * *dimZ + y * *dimZ + z] + (int)(5*randn());
}
}
}
}
/**
* Fills a radius x radius matrix representing the disk
* @param disk The pointer to the disk to be made
* @param radius The radius of the disk to be made
*/
void strelDisk(int * disk, int radius)
{
int diameter = radius*2 - 1;
int x, y;
for(x = 0; x < diameter; x++){
for(y = 0; y < diameter; y++){
double distance = sqrt(pow((double)(x-radius+1),2) + pow((double)(y-radius+1),2));
if(distance < radius)
disk[x*diameter + y] = 1;
}
}
}
/**
* Dilates the provided video
* @param matrix The video to be dilated
* @param posX The x location of the pixel to be dilated
* @param posY The y location of the pixel to be dilated
* @param poxZ The z location of the pixel to be dilated
* @param dimX The x dimension of the frame
* @param dimY The y dimension of the frame
* @param dimZ The number of frames
* @param error The error radius
*/
void dilate_matrix(int * matrix, int posX, int posY, int posZ, int dimX, int dimY, int dimZ, int error)
{
int startX = posX - error;
while(startX < 0)
startX++;
int startY = posY - error;
while(startY < 0)
startY++;
int endX = posX + error;
while(endX > dimX)
endX--;
int endY = posY + error;
while(endY > dimY)
endY--;
int x,y;
for(x = startX; x < endX; x++){
for(y = startY; y < endY; y++){
double distance = sqrt( pow((double)(x-posX),2) + pow((double)(y-posY),2) );
if(distance < error)
matrix[x*dimY*dimZ + y*dimZ + posZ] = 1;
}
}
}
/**
* Dilates the target matrix using the radius as a guide
* @param matrix The reference matrix
* @param dimX The x dimension of the video
* @param dimY The y dimension of the video
* @param dimZ The z dimension of the video
* @param error The error radius to be dilated
* @param newMatrix The target matrix
*/
void imdilate_disk(int * matrix, int dimX, int dimY, int dimZ, int error, int * newMatrix)
{
int x, y, z;
for(z = 0; z < dimZ; z++){
for(x = 0; x < dimX; x++){
for(y = 0; y < dimY; y++){
if(matrix[x*dimY*dimZ + y*dimZ + z] == 1){
dilate_matrix(newMatrix, x, y, z, dimX, dimY, dimZ, error);
}
}
}
}
}
/**
* Fills a 2D array describing the offsets of the disk object
* @param se The disk object
* @param numOnes The number of ones in the disk
* @param neighbors The array that will contain the offsets
* @param radius The radius used for dilation
*/
void getneighbors(int * se, int numOnes, int * neighbors, int radius){
int x, y;
int neighY = 0;
int center = radius - 1;
int diameter = radius*2 -1;
for(x = 0; x < diameter; x++){
for(y = 0; y < diameter; y++){
if(se[x*diameter + y]){
neighbors[neighY*2] = (int)(y - center);
neighbors[neighY*2 + 1] = (int)(x - center);
neighY++;
}
}
}
}
/**
* The synthetic video sequence we will work with here is composed of a
* single moving object, circular in shape (fixed radius)
* The motion here is a linear motion
* the foreground intensity and the backgrounf intensity is known
* the image is corrupted with zero mean Gaussian noise
* @param I The video itself
* @param IszX The x dimension of the video
* @param IszY The y dimension of the video
* @param Nfr The number of frames of the video
*/
void videoSequence(int * I, int IszX, int IszY, int Nfr){
int k;
int max_size = IszX*IszY*Nfr;
/*get object centers*/
int x0 = (int)roundDouble(IszY/2.0);
int y0 = (int)roundDouble(IszX/2.0);
I[x0 *IszY *Nfr + y0 * Nfr + 0] = 1;
/*move point*/
int xk, yk, pos;
for(k = 1; k < Nfr; k++){
xk = abs(x0 + (k-1));
yk = abs(y0 - 2*(k-1));
pos = yk * IszY * Nfr + xk *Nfr + k;
if(pos >= max_size)
pos = 0;
I[pos] = 1;
}
/*dilate matrix*/
int * newMatrix = (int *)mxCalloc(IszX*IszY*Nfr, sizeof(int));
imdilate_disk(I, IszX, IszY, Nfr, 5, newMatrix);
int x, y;
for(x = 0; x < IszX; x++){
for(y = 0; y < IszY; y++){
for(k = 0; k < Nfr; k++){
I[x*IszY*Nfr + y*Nfr + k] = newMatrix[x*IszY*Nfr + y*Nfr + k];
}
}
}
mxFree(newMatrix);
/*define background, add noise*/
setIf(0, 100, I, &IszX, &IszY, &Nfr);
setIf(1, 228, I, &IszX, &IszY, &Nfr);
/*add noise*/
addNoise(I, &IszX, &IszY, &Nfr);
}
/**
* The implementation of the particle filter using CUDA for many frames
* @see http://www.nvidia.com/object/cuda_home_new.html
* @note This function is designed to work with a video of several frames. In addition, it references a provided MATLAB function which takes the video, the objxy matrix and the x and y arrays as arguments and returns the likelihoods
* @param I The video to be run
* @param IszX The x dimension of the video
* @param IszY The y dimension of the video
* @param Nfr The number of frames
* @param seed The seed array used for random number generation
* @param Nparticles The number of particles to be used
* @param x_loc The array that will store the x locations of the desired object
* @param y_loc The array that will store the y locations of the desired object
* @param xe The starting x position of the object
* @param ye The starting y position of the object
*/
void particleFilter(unsigned char * I, int IszX, int IszY, int Nfr, int * seed, int Nparticles, float * x_loc, float * y_loc, float xe, float ye){
int max_size = IszX*IszY*Nfr;
//original particle centroid
x_loc[0] = xe;
y_loc[0] = ye;
/*expected object locations, compared to center*/
int radius = 5;
int diameter = radius*2 -1;
int * disk = (int *)mxCalloc(diameter*diameter, sizeof(int));
strelDisk(disk, radius);
int countOnes = 0;
int x, y;
for(x = 0; x < diameter; x++){
for(y = 0; y < diameter; y++){
if(disk[x*diameter + y] == 1)
countOnes++;
}
}
int * objxy = (int *)mxCalloc(countOnes*2, sizeof(int));
getneighbors(disk, countOnes, objxy, radius);
//initial weights are all equal (1/Nparticles)
float * weights = (float *)mxCalloc(Nparticles,sizeof(float));
for(x = 0; x < Nparticles; x++){
weights[x] = 1/((float)(Nparticles));
}
//initial likelihood to 0.0
float * likelihood = (float *)mxCalloc(Nparticles, sizeof(float));
float * arrayX = (float *)mxCalloc(Nparticles, sizeof(float));
float * arrayY = (float *)mxCalloc(Nparticles, sizeof(float));
float * xj = (float *)mxCalloc(Nparticles, sizeof(float));
float * yj = (float *)mxCalloc(Nparticles, sizeof(float));
float * CDF = (float *)mxCalloc(Nparticles, sizeof(float));
//GPU copies of arrays
float * arrayX_GPU;
float * arrayY_GPU;
float * xj_GPU;
float * yj_GPU;
float * CDF_GPU;
float * likelihood_GPU;
unsigned char * I_GPU;
float * weights_GPU;
int * objxy_GPU;
float * xloc_GPU;
float * yloc_GPU;
//int * ind = (int*)malloc(sizeof(int)*countOnes);
int * ind_GPU;
//float * u = (float *)malloc(sizeof(float)*Nparticles);
float * u_GPU;
int * seed_GPU;
float * partial_sums;
float * x_partial_sums;
float * y_partial_sums;
//CUDA memory allocation
check_error(cudaMalloc((void **) &arrayX_GPU, sizeof(float)*Nparticles));
check_error(cudaMalloc((void **) &arrayY_GPU, sizeof(float)*Nparticles));
check_error(cudaMalloc((void **) &xj_GPU, sizeof(float)*Nparticles));
check_error(cudaMalloc((void **) &yj_GPU, sizeof(float)*Nparticles));
check_error(cudaMalloc((void **) &CDF_GPU, sizeof(float)*Nparticles));
check_error(cudaMalloc((void **) &u_GPU, sizeof(float)*Nparticles));
check_error(cudaMalloc((void **) &likelihood_GPU, sizeof(float)*Nparticles));
check_error(cudaMalloc((void **) &weights_GPU, sizeof(float)*Nparticles));
check_error(cudaMalloc((void **) &I_GPU, sizeof(unsigned char)*IszX*IszY*Nfr));
check_error(cudaMalloc((void **) &objxy_GPU, sizeof(int)*countOnes));
check_error(cudaMalloc((void **) &ind_GPU, sizeof(int)*countOnes*Nparticles));
check_error(cudaMalloc((void **) &seed_GPU, sizeof(int)*Nparticles));
check_error(cudaMalloc((void **) &partial_sums, sizeof(float)*Nparticles));
check_error(cudaMalloc((void **) &xloc_GPU, sizeof(float)*Nfr));
check_error(cudaMalloc((void **) &yloc_GPU, sizeof(float)*Nfr));
check_error(cudaMalloc((void **) &x_partial_sums, sizeof(float)*Nparticles));
check_error(cudaMalloc((void **) &y_partial_sums, sizeof(float)*Nparticles));
for(x = 0; x < Nparticles; x++){
arrayX[x] = xe;
arrayY[x] = ye;
}
int k;
//float * Ik = (float *)malloc(sizeof(float)*IszX*IszY);
//start send
cudaMemcpy(I_GPU, I, sizeof(unsigned char)*IszX*IszY*Nfr, cudaMemcpyHostToDevice);
cudaMemcpy(objxy_GPU, objxy, sizeof(int)*countOnes, cudaMemcpyHostToDevice);
cudaMemcpy(weights_GPU, weights, sizeof(float)*Nparticles, cudaMemcpyHostToDevice);
cudaMemcpy(arrayX_GPU, arrayX, sizeof(float)*Nparticles, cudaMemcpyHostToDevice);
cudaMemcpy(arrayY_GPU, arrayY, sizeof(float)*Nparticles, cudaMemcpyHostToDevice);
cudaMemcpy(seed_GPU, seed, sizeof(int)*Nparticles, cudaMemcpyHostToDevice);
int num_blocks = ceil((float) Nparticles/(float) threads_per_block);
for(k = 1; k < Nfr; k++){
likelihood_kernel <<< num_blocks, threads_per_block >>> (arrayX_GPU, arrayY_GPU, CDF_GPU, ind_GPU, objxy_GPU, likelihood_GPU, I_GPU, u_GPU, weights_GPU, Nparticles, countOnes, max_size, k, IszX, IszY, Nfr, seed_GPU, partial_sums);
sum_kernel <<< num_blocks, threads_per_block >>> (partial_sums, Nparticles);
normalize_weights_kernel <<< num_blocks, threads_per_block >>> (weights_GPU, Nparticles, partial_sums, CDF_GPU, u_GPU, seed_GPU, x_partial_sums, y_partial_sums, arrayX_GPU, arrayY_GPU);
sum_xy_kernel <<< num_blocks, threads_per_block >>> (x_partial_sums, y_partial_sums, Nparticles);
cudaBindTexture(0, tex_CDF, CDF_GPU, Nparticles);
//KERNEL FUNCTION CALL
find_index_kernel <<< num_blocks, threads_per_block >>> (arrayX_GPU, arrayY_GPU, CDF_GPU, u_GPU, xj_GPU, yj_GPU, weights_GPU, Nparticles, x_partial_sums, y_partial_sums, k, xloc_GPU, yloc_GPU);
cudaUnbindTexture(tex_CDF);
}
//CUDA freeing of memory
cudaFree(xj_GPU);
cudaFree(yj_GPU);
cudaFree(CDF_GPU);
cudaFree(u_GPU);
cudaFree(likelihood_GPU);
cudaFree(I_GPU);
cudaFree(objxy_GPU);
cudaFree(ind_GPU);
cudaFree(seed_GPU);
cudaFree(partial_sums);
cudaFree(weights_GPU);
cudaFree(arrayY_GPU);
cudaFree(arrayX_GPU);
cudaFree(x_partial_sums);
cudaFree(y_partial_sums);
cudaMemcpy(x_loc, xloc_GPU, sizeof(float)*Nfr, cudaMemcpyDeviceToHost);
cudaMemcpy(y_loc, yloc_GPU, sizeof(float)*Nfr, cudaMemcpyDeviceToHost);
cudaFree(xloc_GPU);
cudaFree(yloc_GPU);
x_loc[0] = xe;
y_loc[0] = ye;
}
/**
* The implementation of the particle filter using CUDA for a single image
* @see http://www.nvidia.com/object/cuda_home_new.html
* @note This function is designed to work with a single image. In addition, it references a provided MATLAB function which takes the video, the objxy matrix and the x and y arrays as arguments and returns the likelihoods
* @warning Use the other particle filter function for videos; the accuracy of this function decreases significantly as it is called repeatedly while processing video
* @param I The image to be run
* @param IszX The x dimension of the image
* @param IszY The y dimension of the image
* @param seed The seed array used for random number generation
* @param Nparticles The number of particles to be used
* @param x_loc The array that will store the x locations of the desired object
* @param y_loc The array that will store the y locations of the desired object
* @param prevX The starting x position of the object
* @param prevY The starting y position of the object
*/
void particleFilter1F(unsigned char * I, int IszX, int IszY, int * seed, int Nparticles, float * x_loc, float * y_loc, float prevX, float prevY){
int max_size = IszX*IszY;
/*expected object locations, compared to center*/
int radius = 5;
int diameter = radius*2 -1;
int * disk = (int *)mxCalloc(diameter*diameter, sizeof(int));
strelDisk(disk, radius);
int countOnes = 0;
int x, y;
for(x = 0; x < diameter; x++){
for(y = 0; y < diameter; y++){
if(disk[x*diameter + y] == 1)
countOnes++;
}
}
int * objxy = (int *)mxCalloc(countOnes*2, sizeof(int));
getneighbors(disk, countOnes, objxy, radius);
//initial weights are all equal (1/Nparticles)
float * weights = (float *)mxCalloc(Nparticles,sizeof(float));
for(x = 0; x < Nparticles; x++){
weights[x] = 1/((float)(Nparticles));
}
//initial likelihood to 0.0
float * likelihood = (float *)mxCalloc(Nparticles, sizeof(float));
float * arrayX = (float *)mxCalloc(Nparticles, sizeof(float));
float * arrayY = (float *)mxCalloc(Nparticles, sizeof(float));
float * xj = (float *)mxCalloc(Nparticles, sizeof(float));
float * yj = (float *)mxCalloc(Nparticles, sizeof(float));
float * CDF = (float *)mxCalloc(Nparticles, sizeof(float));
//GPU copies of arrays
float * arrayX_GPU;
float * arrayY_GPU;
float * xj_GPU;
float * yj_GPU;
float * CDF_GPU;
float * likelihood_GPU;
unsigned char * I_GPU;
float * weights_GPU;
int * objxy_GPU;
float * xloc_GPU;
float * yloc_GPU;
int * ind_GPU;
float * u_GPU;
int * seed_GPU;
float * partial_sums;
float * x_partial_sums;
float * y_partial_sums;
//CUDA memory allocation
check_error(cudaMalloc((void **) &arrayX_GPU, sizeof(float)*Nparticles));
check_error(cudaMalloc((void **) &arrayY_GPU, sizeof(float)*Nparticles));
check_error(cudaMalloc((void **) &xj_GPU, sizeof(float)*Nparticles));
check_error(cudaMalloc((void **) &yj_GPU, sizeof(float)*Nparticles));
check_error(cudaMalloc((void **) &CDF_GPU, sizeof(float)*Nparticles));
check_error(cudaMalloc((void **) &u_GPU, sizeof(float)*Nparticles));
check_error(cudaMalloc((void **) &likelihood_GPU, sizeof(float)*Nparticles));
check_error(cudaMalloc((void **) &weights_GPU, sizeof(float)*Nparticles));
check_error(cudaMalloc((void **) &I_GPU, sizeof(unsigned char)*IszX*IszY));
check_error(cudaMalloc((void **) &objxy_GPU, sizeof(int)*countOnes));
check_error(cudaMalloc((void **) &ind_GPU, sizeof(int)*countOnes*Nparticles));
check_error(cudaMalloc((void **) &seed_GPU, sizeof(int)*Nparticles));
check_error(cudaMalloc((void **) &partial_sums, sizeof(float)*Nparticles));
check_error(cudaMalloc((void **) &xloc_GPU, sizeof(float)));
check_error(cudaMalloc((void **) &yloc_GPU, sizeof(float)));
check_error(cudaMalloc((void **) &x_partial_sums, sizeof(float)*Nparticles));
check_error(cudaMalloc((void **) &y_partial_sums, sizeof(float)*Nparticles));
for(x = 0; x < Nparticles; x++){
arrayX[x] = prevX;
arrayY[x] = prevY;
}
//start send
cudaMemcpy(I_GPU, I, sizeof(unsigned char)*IszX*IszY, cudaMemcpyHostToDevice);
cudaMemcpy(objxy_GPU, objxy, sizeof(int)*countOnes, cudaMemcpyHostToDevice);
cudaMemcpy(weights_GPU, weights, sizeof(float)*Nparticles, cudaMemcpyHostToDevice);
cudaMemcpy(arrayX_GPU, arrayX, sizeof(float)*Nparticles, cudaMemcpyHostToDevice);
cudaMemcpy(arrayY_GPU, arrayY, sizeof(float)*Nparticles, cudaMemcpyHostToDevice);
cudaMemcpy(seed_GPU, seed, sizeof(int)*Nparticles, cudaMemcpyHostToDevice);
int num_blocks = ceil((float) Nparticles/(float) threads_per_block);
likelihood_kernel <<< num_blocks, threads_per_block >>> (arrayX_GPU, arrayY_GPU, CDF_GPU, ind_GPU, objxy_GPU, likelihood_GPU, I_GPU, u_GPU, weights_GPU, Nparticles, countOnes, max_size, 0, IszX, IszY, 0, seed_GPU, partial_sums);
sum_kernel <<< num_blocks, threads_per_block >>> (partial_sums, Nparticles);
normalize_weights_kernel <<< num_blocks, threads_per_block >>> (weights_GPU, Nparticles, partial_sums, CDF_GPU, u_GPU, seed_GPU, x_partial_sums, y_partial_sums, arrayX_GPU, arrayY_GPU);
sum_xy_kernel <<< num_blocks, threads_per_block >>> (x_partial_sums, y_partial_sums, Nparticles);
cudaBindTexture(0, tex_CDF, CDF_GPU, Nparticles);
//KERNEL FUNCTION CALL
find_index_kernel <<< num_blocks, threads_per_block >>> (arrayX_GPU, arrayY_GPU, CDF_GPU, u_GPU, xj_GPU, yj_GPU, weights_GPU, Nparticles, x_partial_sums, y_partial_sums, 0, xloc_GPU, yloc_GPU);
cudaUnbindTexture(tex_CDF);
//CUDA freeing of memory
cudaFree(xj_GPU);
cudaFree(yj_GPU);
cudaFree(CDF_GPU);
cudaFree(u_GPU);
cudaFree(likelihood_GPU);
cudaFree(I_GPU);
cudaFree(objxy_GPU);
cudaFree(ind_GPU);
cudaFree(seed_GPU);
cudaFree(partial_sums);
cudaFree(weights_GPU);
cudaFree(arrayY_GPU);
cudaFree(arrayX_GPU);
cudaFree(x_partial_sums);
cudaFree(y_partial_sums);
cudaMemcpy(x_loc, xloc_GPU, sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy(y_loc, yloc_GPU, sizeof(float), cudaMemcpyDeviceToHost);
cudaFree(xloc_GPU);
cudaFree(yloc_GPU);
}
/**
* Function that allows the 2 particle filter implementations to be run
* @details The number of arguments provided to this function determines which function will be called. 7 args will call the video processing version. 6 (leaving out the number of frames) will call the image processing version.
* @param nlhs (Number on the Left Hand Side) The number of items to return (2 will be in this case; the x and y arrays)
* @param plhs (Parameters on the Left Hand Side) A pointer to the arrays containing the x and y arrays
* @param nrhs (Number on the Right Hand Side) The number of arguments to take in (7 are needed for video processing (The image as an unsigned char, the x dimension, the y dimension, the number of frames, the number of particles, the x starting position, the y starting position)
* 6 are needed for the image processing (same as before but leave out the number of frames)
* @param prhs (Parameters on the Right Hand Side) A pointer to the arrays containing the parameters
*/
void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]){
unsigned char * I;
int IszX, IszY, Nfr, Nparticles;
srand(time(0));
if(nrhs < 6)
{
printf("ERROR: TOO FEW ARGS HAVE BEEN ENTERED\n");
printf("EXITING\n");
exit(0);
}
else if(nrhs == 7)
{
IszX = (int)(mxGetScalar(prhs[1]));
IszY = (int)(mxGetScalar(prhs[2]));
Nfr = (int)(mxGetScalar(prhs[3]));
Nparticles = (int)(mxGetScalar(prhs[4]));
unsigned char * cI = (unsigned char *)mxGetData(prhs[0]);
I = (unsigned char *)mxCalloc(IszX*IszY*Nfr, sizeof(unsigned char));
int x, y, z;
for(x = 0; x < IszX; x++){
for(y = 0; y < IszY; y++){
for(z = 0; z < Nfr; z++){
I[x*IszY*Nfr + y*Nfr + z] = (unsigned char)cI[x*IszY*Nfr + y*Nfr + z];
}
}
}
float xe = (float)mxGetScalar(prhs[5]);
float ye = (float)mxGetScalar(prhs[6]);
int * seed = (int *)mxCalloc(Nparticles, sizeof(int));
int i;
for(i = 0; i < Nparticles; i++)
seed[i] = time(0)*i;
float * posX = (float *)mxCalloc(Nfr, sizeof(float));
float * posY = (float *)mxCalloc(Nfr, sizeof(float));
particleFilter(I, IszX, IszY, Nfr, seed, Nparticles, posX, posY, xe, ye);
mxFree(I);
mxFree(seed);
plhs[0] = mxCreateDoubleMatrix(Nfr, 1, mxREAL);
plhs[1] = mxCreateDoubleMatrix(Nfr, 1, mxREAL);
double * bufferX = mxGetPr(plhs[0]);
double * bufferY = mxGetPr(plhs[1]);
for(i = 0; i < Nfr; i++)
{
bufferX[i] = (double)posX[i];
bufferY[i] = (double)posY[i];
}
mxFree(posX);
mxFree(posY);
}
else if(nrhs == 6)
{
IszX = (int)(mxGetScalar(prhs[1]));
IszY = (int)(mxGetScalar(prhs[2]));
Nparticles = (int)(mxGetScalar(prhs[3]));
double startX = (double)mxGetScalar(prhs[4]);
double startY = (double)mxGetScalar(prhs[5]);
unsigned char * cI = (unsigned char *)mxGetData(prhs[0]);
I = (unsigned char *)mxCalloc(IszX*IszY, sizeof(unsigned char));
int x, y;
for(x = 0; x < IszX; x++){
for(y = 0; y < IszY; y++){
I[x*IszX + y] = (unsigned char)cI[x*IszX + y];
}
}
int * seed = (int *)mxCalloc(Nparticles, sizeof(int));
int i;
for(i = 0; i < Nparticles; i++)
seed[i] = time(0)*i;
float posX[1];
float posY[1];
particleFilter1F(I, IszX, IszY, seed, Nparticles, posX, posY, (float)startX, (float)startY);
mxFree(I);
mxFree(seed);
plhs[0] = mxCreateDoubleMatrix(1,1,mxREAL);
plhs[1] = mxCreateDoubleMatrix(1,1,mxREAL);
double * bufferX = mxGetPr(plhs[0]);
double * bufferY = mxGetPr(plhs[1]);
bufferX[0] = posX[0];
bufferY[0] = posY[0];
}
else
{
printf("ERROR: TOO MANY ARGS\n");
printf("EXITING\n");
exit(0);
}
}
/**
* Unused
*/
int main(){
return 0;
}
|
ec0b0721712dc68f535b8a2602851555aeda0bdd.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "../include/hpc_helpers.hpp"
template <
typename index_t,
typename value_t,
index_t num_iters=256> __global__
void square_root_kernel(
value_t * Data,
index_t length) {
const index_t thid = blockDim.x*blockIdx.x+threadIdx.x;
for (index_t i = thid; i < length; i += blockDim.x*gridDim.x){
value_t value = Data[i];
value_t root = value;
# pragma unroll (32)
for (index_t iters = 0; iters < num_iters && value; iters++)
root = 0.5*(root+value/root);
Data[i] = root;
}
}
int main () {
typedef float value_t;
typedef uint64_t index_t;
const index_t length = 1UL << 30;
int num_gpus;
hipGetDeviceCount(&num_gpus);
const index_t batch_size = length/num_gpus;
value_t * data = nullptr, * Data[num_gpus];
hipHostMalloc(&data, sizeof(value_t)*length); CUERR
for (index_t gpu = 0; gpu < num_gpus; gpu++) {
hipSetDevice(gpu);
hipMalloc(&Data[gpu], sizeof(value_t)*batch_size); CUERR
}
for (index_t index = 0; index < length; index++)
data[index] = index;
TIMERSTART(overall)
for (index_t gpu = 0; gpu < num_gpus; gpu++) {
const index_t offset = gpu*batch_size;
hipSetDevice(gpu); CUERR
hipMemcpy(Data[gpu], data+offset, sizeof(value_t)*batch_size,
hipMemcpyHostToDevice); CUERR
}
for (index_t gpu = 0; gpu < num_gpus; gpu++) {
hipSetDevice(gpu); CUERR
hipLaunchKernelGGL(( square_root_kernel), dim3(1024), dim3(1024), 0, 0, Data[gpu], batch_size); CUERR
}
for (index_t gpu = 0; gpu < num_gpus; gpu++) {
const index_t offset = gpu*batch_size;
hipSetDevice(gpu); CUERR
hipMemcpy(data+offset, Data[gpu], sizeof(value_t)*batch_size,
hipMemcpyDeviceToHost); CUERR
}
TIMERSTOP(overall)
for (index_t index = 0; index < length; index += batch_size/10)
std::cout << index << " " << data[index] << std::endl;
hipHostFree(data); CUERR
for (index_t gpu = 0; gpu < num_gpus; gpu++) {
hipSetDevice(gpu);
hipFree(Data[gpu]); CUERR
}
}
| ec0b0721712dc68f535b8a2602851555aeda0bdd.cu | #include "../include/hpc_helpers.hpp"
template <
typename index_t,
typename value_t,
index_t num_iters=256> __global__
void square_root_kernel(
value_t * Data,
index_t length) {
const index_t thid = blockDim.x*blockIdx.x+threadIdx.x;
for (index_t i = thid; i < length; i += blockDim.x*gridDim.x){
value_t value = Data[i];
value_t root = value;
# pragma unroll (32)
for (index_t iters = 0; iters < num_iters && value; iters++)
root = 0.5*(root+value/root);
Data[i] = root;
}
}
int main () {
typedef float value_t;
typedef uint64_t index_t;
const index_t length = 1UL << 30;
int num_gpus;
cudaGetDeviceCount(&num_gpus);
const index_t batch_size = length/num_gpus;
value_t * data = nullptr, * Data[num_gpus];
cudaMallocHost(&data, sizeof(value_t)*length); CUERR
for (index_t gpu = 0; gpu < num_gpus; gpu++) {
cudaSetDevice(gpu);
cudaMalloc(&Data[gpu], sizeof(value_t)*batch_size); CUERR
}
for (index_t index = 0; index < length; index++)
data[index] = index;
TIMERSTART(overall)
for (index_t gpu = 0; gpu < num_gpus; gpu++) {
const index_t offset = gpu*batch_size;
cudaSetDevice(gpu); CUERR
cudaMemcpy(Data[gpu], data+offset, sizeof(value_t)*batch_size,
cudaMemcpyHostToDevice); CUERR
}
for (index_t gpu = 0; gpu < num_gpus; gpu++) {
cudaSetDevice(gpu); CUERR
square_root_kernel<<<1024, 1024>>>(Data[gpu], batch_size); CUERR
}
for (index_t gpu = 0; gpu < num_gpus; gpu++) {
const index_t offset = gpu*batch_size;
cudaSetDevice(gpu); CUERR
cudaMemcpy(data+offset, Data[gpu], sizeof(value_t)*batch_size,
cudaMemcpyDeviceToHost); CUERR
}
TIMERSTOP(overall)
for (index_t index = 0; index < length; index += batch_size/10)
std::cout << index << " " << data[index] << std::endl;
cudaFreeHost(data); CUERR
for (index_t gpu = 0; gpu < num_gpus; gpu++) {
cudaSetDevice(gpu);
cudaFree(Data[gpu]); CUERR
}
}
|
01627276e6768b69094d3cee0d4b97c514c4410a.hip | // !!! This is a file automatically generated by hipify!!!
#include "matmul.h"
void crossbowKernelMatMul (void *args) {
/* GEMM variables */
float *A, *B, *C;
int M, N, K;
float alpha, beta;
crossbowDataBufferP __model_variable, output;
int offset, length;
crossbowStreamP s = (crossbowStreamP) args;
__model_variable = crossbowModelVariable (s->model, s->op->kernel->id, 1, &offset, &length);
output = crossbowStreamGetCurrentOutput (s);
M = crossbowVariableSchemaCountElementsInRange (s->examples->schema, 0, 1);
K = crossbowVariableSchemaCountElementsFrom (s->examples->schema, 1);
N = K;
alpha = 1;
beta = 0;
// /* Set device buffers */
A = (float *) s->input->dev; /* Examples are always the first variable */
B = (float *) __model_variable->dev;
C = (float *) output->dev;
checkCublasStatus(hipblasSgemm (s->cublasHandle[s->op->branch], HIPBLAS_OP_N, HIPBLAS_OP_N, M, N, K, &alpha, A, M, B, K, &beta, C, M));
/* Store output in stream */
crossbowListAppend(s->outputs[s->op->id], output);
return;
}
| 01627276e6768b69094d3cee0d4b97c514c4410a.cu | #include "matmul.h"
void crossbowKernelMatMul (void *args) {
/* GEMM variables */
float *A, *B, *C;
int M, N, K;
float alpha, beta;
crossbowDataBufferP __model_variable, output;
int offset, length;
crossbowStreamP s = (crossbowStreamP) args;
__model_variable = crossbowModelVariable (s->model, s->op->kernel->id, 1, &offset, &length);
output = crossbowStreamGetCurrentOutput (s);
M = crossbowVariableSchemaCountElementsInRange (s->examples->schema, 0, 1);
K = crossbowVariableSchemaCountElementsFrom (s->examples->schema, 1);
N = K;
alpha = 1;
beta = 0;
// /* Set device buffers */
A = (float *) s->input->dev; /* Examples are always the first variable */
B = (float *) __model_variable->dev;
C = (float *) output->dev;
checkCublasStatus(cublasSgemm (s->cublasHandle[s->op->branch], CUBLAS_OP_N, CUBLAS_OP_N, M, N, K, &alpha, A, M, B, K, &beta, C, M));
/* Store output in stream */
crossbowListAppend(s->outputs[s->op->id], output);
return;
}
|
7124b1799081e825553f1806737d829b1da9ea92.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (C) 2016 Gernot Riegler
// Institute for Computer Graphics and Vision (ICG)
// Graz University of Technology (TU GRAZ)
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
// 1. Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// 3. All advertising materials mentioning features or use of this software
// must display the following acknowledgement:
// This product includes software developed by the ICG, TU GRAZ.
// 4. Neither the name of the ICG, TU GRAZ nor the
// names of its contributors may be used to endorse or promote products
// derived from this software without specific prior written permission.
// THIS SOFTWARE IS PROVIDED ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE PROVIDER BE LIABLE FOR ANY
// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "utils.h"
#include "common.h"
__global__ void icgGeneralizedNablaTForward(float* out, const float* in, int length, int height,
int width, int neg, float* dir_data, int n_dir) {
// int offset = height * width;
CUDA_KERNEL_LOOP(idx, length) {
int h = idx / width;
int w = idx % width;
float val = 0;
for(int dir_idx = 0; dir_idx < n_dir; ++dir_idx) {
int dir_x = -dir_data[dir_idx];
int dir_y = -dir_data[n_dir + dir_idx];
int in_idx = (dir_idx * height + h) * width + w;
float add1 = (w+dir_x >= 0 && h+dir_y >= 0 && w+dir_x < width && h+dir_y < height) ? in[in_idx + dir_y * width + dir_x] : 0;
float add2 = (w-dir_x >= 0 && h-dir_y >= 0 && w-dir_x < width && h-dir_y < height) ? -in[in_idx] : 0;
val = val + add1 + add2;
}
out[idx] = neg ? -val : val;
}
}
static int icgcunn_IcgGeneralizedNablaT_updateOutput(lua_State *L)
{
THCState* state = getCutorchState(L);
THCudaTensor *in = (THCudaTensor*)luaT_checkudata(L, 2, "torch.CudaTensor");
int neg = luaT_getfieldcheckboolean(L, 1, "neg");
THCudaTensor* dir = (THCudaTensor*)luaT_getfieldcheckudata(L, 1, "directions", "torch.CudaTensor");
THCudaTensor *out = (THCudaTensor*)luaT_getfieldcheckudata(L, 1, "output", "torch.CudaTensor");
long n_dim = in->nDimension;
luaL_argcheck(L, n_dim == 3 || n_dim == 4, 2, "3D or 4D(batch mode) tensor expected");
long n_dir = THCudaTensor_size(state, dir, 1);
long num, channels, height, width, out_channels;
if(n_dim == 3) {
num = 1;
channels = THCudaTensor_size(state, in, 0);
height = THCudaTensor_size(state, in, 1);
width = THCudaTensor_size(state, in, 2);
out_channels = channels / n_dir;
THCudaTensor_resize3d(state, out, out_channels, height, width);
}
else if(n_dim == 4) {
num = THCudaTensor_size(state, in, 0);
channels = THCudaTensor_size(state, in, 1);
height = THCudaTensor_size(state, in, 2);
width = THCudaTensor_size(state, in, 3);
out_channels = channels / n_dir;
THCudaTensor_resize4d(state, out, num, out_channels, height, width);
}
luaL_argcheck(L, channels % n_dir == 0, 1, "channels % n_dir != 0");
long length = height * width;
in = THCudaTensor_newContiguous(state, in);
float* out_data = THCudaTensor_data(state, out);
float* in_data = THCudaTensor_data(state, in);
float* dir_data = THCudaTensor_data(state, dir);
for(long n = 0; n < num; ++n) {
for(long c = 0; c < out_channels; ++c) {
hipLaunchKernelGGL(( icgGeneralizedNablaTForward), dim3(GET_BLOCKS(length)), dim3(CUDA_NUM_THREADS), 0, THCState_getCurrentStream(state),
out_data, in_data, length, height, width, neg, dir_data, n_dir);
out_data = out_data + length;
in_data = in_data + n_dir * length;
}
}
THCudaTensor_free(state, in);
THCudaCheck(hipGetLastError());
return 1;
}
__global__ void icgGeneralizedNablaTBackward(float* grad_in, const float* grad_out,
int n, int height, int width, int neg, float* dir_data, int n_dir) {
// int offset = height * width;
CUDA_KERNEL_LOOP(out_idx, n) {
int h = out_idx / width;
int w = out_idx % width;
for(int dir_idx = 0; dir_idx < n_dir; ++dir_idx) {
int dir_x = -dir_data[dir_idx];
int dir_y = -dir_data[n_dir + dir_idx];
int in_idx = ((dir_idx) * height + h) * width + w;
if(w+dir_x >= 0 && h+dir_y >= 0 && w+dir_x < width && h+dir_y < height) {
// grad_in[in_idx + dir_y * width + dir_x] += neg * grad_out[out_idx];
atomicAdd(grad_in + in_idx + dir_y * width + dir_x, neg * grad_out[out_idx]);
}
if(w-dir_x >= 0 && h-dir_y >= 0 && w-dir_x < width && h-dir_y < height) {
// grad_in[in_idx] += -neg * grad_out[out_idx];
atomicAdd(grad_in + in_idx, -neg * grad_out[out_idx]);
}
}
}
}
static int icgcunn_IcgGeneralizedNablaT_updateGradInput(lua_State *L)
{
THCState* state = getCutorchState(L);
THCudaTensor* in = (THCudaTensor*)luaT_checkudata(L, 2, "torch.CudaTensor");
THCudaTensor* grad_out = (THCudaTensor*)luaT_checkudata(L, 3, "torch.CudaTensor");
int neg = luaT_getfieldcheckboolean(L, 1, "neg");
THCudaTensor* dir = (THCudaTensor*)luaT_getfieldcheckudata(L, 1, "directions", "torch.CudaTensor");
THCudaTensor* out = (THCudaTensor*)luaT_getfieldcheckudata(L, 1, "output", "torch.CudaTensor");
THCudaTensor* grad_in = (THCudaTensor*)luaT_getfieldcheckudata(L, 1, "gradInput", "torch.CudaTensor");
THAssert(THCudaTensor_checkGPU(state, 4, in, out, grad_in, grad_out));
long n_dim = in->nDimension;
long n_dir = THCudaTensor_size(state, dir, 1);
long num, channels, height, width, out_channels;
if(n_dim == 3) {
num = 1;
channels = THCudaTensor_size(state, in, 0);
height = THCudaTensor_size(state, in, 1);
width = THCudaTensor_size(state, in, 2);
out_channels = channels / n_dir;
}
else if(n_dim == 4) {
num = THCudaTensor_size(state, in, 0);
channels = THCudaTensor_size(state, in, 1);
height = THCudaTensor_size(state, in, 2);
width = THCudaTensor_size(state, in, 3);
out_channels = channels / n_dir;
}
THCudaTensor_resizeAs(state, grad_in, in);
long length = height * width;
float* grad_in_data = THCudaTensor_data(state, grad_in);
float* grad_out_data = THCudaTensor_data(state, grad_out);
float* dir_data = THCudaTensor_data(state, dir);
THCudaTensor_zero(state, grad_in);
neg = neg ? -1 : 1;
for(long n = 0; n < num; ++n) {
for(long c = 0; c < out_channels; ++c) {
hipLaunchKernelGGL(( icgGeneralizedNablaTBackward), dim3(GET_BLOCKS(length)), dim3(CUDA_NUM_THREADS), 0, THCState_getCurrentStream(state),
grad_in_data, grad_out_data, length, height, width, neg, dir_data, n_dir);
grad_in_data = grad_in_data + n_dir * length;
grad_out_data = grad_out_data + length;
}
}
THCudaCheck(hipGetLastError());
return 1;
}
static const struct luaL_Reg icgcunn_IcgGeneralizedNablaT__ [] = {
{"IcgGeneralizedNablaT_updateOutput", icgcunn_IcgGeneralizedNablaT_updateOutput},
{"IcgGeneralizedNablaT_updateGradInput", icgcunn_IcgGeneralizedNablaT_updateGradInput},
{NULL, NULL}
};
void icgcunn_IcgGeneralizedNablaT_init(lua_State *L)
{
luaT_pushmetatable(L, "torch.CudaTensor");
luaT_registeratname(L, icgcunn_IcgGeneralizedNablaT__, "icgnn");
lua_pop(L,1);
}
| 7124b1799081e825553f1806737d829b1da9ea92.cu | // Copyright (C) 2016 Gernot Riegler
// Institute for Computer Graphics and Vision (ICG)
// Graz University of Technology (TU GRAZ)
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
// 1. Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// 3. All advertising materials mentioning features or use of this software
// must display the following acknowledgement:
// This product includes software developed by the ICG, TU GRAZ.
// 4. Neither the name of the ICG, TU GRAZ nor the
// names of its contributors may be used to endorse or promote products
// derived from this software without specific prior written permission.
// THIS SOFTWARE IS PROVIDED ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE PROVIDER BE LIABLE FOR ANY
// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "utils.h"
#include "common.h"
__global__ void icgGeneralizedNablaTForward(float* out, const float* in, int length, int height,
int width, int neg, float* dir_data, int n_dir) {
// int offset = height * width;
CUDA_KERNEL_LOOP(idx, length) {
int h = idx / width;
int w = idx % width;
float val = 0;
for(int dir_idx = 0; dir_idx < n_dir; ++dir_idx) {
int dir_x = -dir_data[dir_idx];
int dir_y = -dir_data[n_dir + dir_idx];
int in_idx = (dir_idx * height + h) * width + w;
float add1 = (w+dir_x >= 0 && h+dir_y >= 0 && w+dir_x < width && h+dir_y < height) ? in[in_idx + dir_y * width + dir_x] : 0;
float add2 = (w-dir_x >= 0 && h-dir_y >= 0 && w-dir_x < width && h-dir_y < height) ? -in[in_idx] : 0;
val = val + add1 + add2;
}
out[idx] = neg ? -val : val;
}
}
static int icgcunn_IcgGeneralizedNablaT_updateOutput(lua_State *L)
{
THCState* state = getCutorchState(L);
THCudaTensor *in = (THCudaTensor*)luaT_checkudata(L, 2, "torch.CudaTensor");
int neg = luaT_getfieldcheckboolean(L, 1, "neg");
THCudaTensor* dir = (THCudaTensor*)luaT_getfieldcheckudata(L, 1, "directions", "torch.CudaTensor");
THCudaTensor *out = (THCudaTensor*)luaT_getfieldcheckudata(L, 1, "output", "torch.CudaTensor");
long n_dim = in->nDimension;
luaL_argcheck(L, n_dim == 3 || n_dim == 4, 2, "3D or 4D(batch mode) tensor expected");
long n_dir = THCudaTensor_size(state, dir, 1);
long num, channels, height, width, out_channels;
if(n_dim == 3) {
num = 1;
channels = THCudaTensor_size(state, in, 0);
height = THCudaTensor_size(state, in, 1);
width = THCudaTensor_size(state, in, 2);
out_channels = channels / n_dir;
THCudaTensor_resize3d(state, out, out_channels, height, width);
}
else if(n_dim == 4) {
num = THCudaTensor_size(state, in, 0);
channels = THCudaTensor_size(state, in, 1);
height = THCudaTensor_size(state, in, 2);
width = THCudaTensor_size(state, in, 3);
out_channels = channels / n_dir;
THCudaTensor_resize4d(state, out, num, out_channels, height, width);
}
luaL_argcheck(L, channels % n_dir == 0, 1, "channels % n_dir != 0");
long length = height * width;
in = THCudaTensor_newContiguous(state, in);
float* out_data = THCudaTensor_data(state, out);
float* in_data = THCudaTensor_data(state, in);
float* dir_data = THCudaTensor_data(state, dir);
for(long n = 0; n < num; ++n) {
for(long c = 0; c < out_channels; ++c) {
icgGeneralizedNablaTForward<<<GET_BLOCKS(length), CUDA_NUM_THREADS, 0, THCState_getCurrentStream(state)>>>(
out_data, in_data, length, height, width, neg, dir_data, n_dir);
out_data = out_data + length;
in_data = in_data + n_dir * length;
}
}
THCudaTensor_free(state, in);
THCudaCheck(cudaGetLastError());
return 1;
}
__global__ void icgGeneralizedNablaTBackward(float* grad_in, const float* grad_out,
int n, int height, int width, int neg, float* dir_data, int n_dir) {
// int offset = height * width;
CUDA_KERNEL_LOOP(out_idx, n) {
int h = out_idx / width;
int w = out_idx % width;
for(int dir_idx = 0; dir_idx < n_dir; ++dir_idx) {
int dir_x = -dir_data[dir_idx];
int dir_y = -dir_data[n_dir + dir_idx];
int in_idx = ((dir_idx) * height + h) * width + w;
if(w+dir_x >= 0 && h+dir_y >= 0 && w+dir_x < width && h+dir_y < height) {
// grad_in[in_idx + dir_y * width + dir_x] += neg * grad_out[out_idx];
atomicAdd(grad_in + in_idx + dir_y * width + dir_x, neg * grad_out[out_idx]);
}
if(w-dir_x >= 0 && h-dir_y >= 0 && w-dir_x < width && h-dir_y < height) {
// grad_in[in_idx] += -neg * grad_out[out_idx];
atomicAdd(grad_in + in_idx, -neg * grad_out[out_idx]);
}
}
}
}
static int icgcunn_IcgGeneralizedNablaT_updateGradInput(lua_State *L)
{
THCState* state = getCutorchState(L);
THCudaTensor* in = (THCudaTensor*)luaT_checkudata(L, 2, "torch.CudaTensor");
THCudaTensor* grad_out = (THCudaTensor*)luaT_checkudata(L, 3, "torch.CudaTensor");
int neg = luaT_getfieldcheckboolean(L, 1, "neg");
THCudaTensor* dir = (THCudaTensor*)luaT_getfieldcheckudata(L, 1, "directions", "torch.CudaTensor");
THCudaTensor* out = (THCudaTensor*)luaT_getfieldcheckudata(L, 1, "output", "torch.CudaTensor");
THCudaTensor* grad_in = (THCudaTensor*)luaT_getfieldcheckudata(L, 1, "gradInput", "torch.CudaTensor");
THAssert(THCudaTensor_checkGPU(state, 4, in, out, grad_in, grad_out));
long n_dim = in->nDimension;
long n_dir = THCudaTensor_size(state, dir, 1);
long num, channels, height, width, out_channels;
if(n_dim == 3) {
num = 1;
channels = THCudaTensor_size(state, in, 0);
height = THCudaTensor_size(state, in, 1);
width = THCudaTensor_size(state, in, 2);
out_channels = channels / n_dir;
}
else if(n_dim == 4) {
num = THCudaTensor_size(state, in, 0);
channels = THCudaTensor_size(state, in, 1);
height = THCudaTensor_size(state, in, 2);
width = THCudaTensor_size(state, in, 3);
out_channels = channels / n_dir;
}
THCudaTensor_resizeAs(state, grad_in, in);
long length = height * width;
float* grad_in_data = THCudaTensor_data(state, grad_in);
float* grad_out_data = THCudaTensor_data(state, grad_out);
float* dir_data = THCudaTensor_data(state, dir);
THCudaTensor_zero(state, grad_in);
neg = neg ? -1 : 1;
for(long n = 0; n < num; ++n) {
for(long c = 0; c < out_channels; ++c) {
icgGeneralizedNablaTBackward<<<GET_BLOCKS(length), CUDA_NUM_THREADS, 0, THCState_getCurrentStream(state)>>>(
grad_in_data, grad_out_data, length, height, width, neg, dir_data, n_dir);
grad_in_data = grad_in_data + n_dir * length;
grad_out_data = grad_out_data + length;
}
}
THCudaCheck(cudaGetLastError());
return 1;
}
static const struct luaL_Reg icgcunn_IcgGeneralizedNablaT__ [] = {
{"IcgGeneralizedNablaT_updateOutput", icgcunn_IcgGeneralizedNablaT_updateOutput},
{"IcgGeneralizedNablaT_updateGradInput", icgcunn_IcgGeneralizedNablaT_updateGradInput},
{NULL, NULL}
};
void icgcunn_IcgGeneralizedNablaT_init(lua_State *L)
{
luaT_pushmetatable(L, "torch.CudaTensor");
luaT_registeratname(L, icgcunn_IcgGeneralizedNablaT__, "icgnn");
lua_pop(L,1);
}
|
b3fe0c90e5d8cb805d9d3e5710a26a0f094eda7d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*!
* Copyright (c) 2021 by Contributors
* \file array/cuda/cuda_filter.cc
* \brief Object for selecting items in a set, or selecting items not in a set.
*/
#include <dgl/runtime/device_api.h>
#include "../filter.h"
#include "../../runtime/cuda/cuda_hashtable.cuh"
#include "./dgl_cub.cuh"
namespace dgl {
namespace array {
using namespace dgl::runtime::cuda;
namespace {
// TODO(nv-dlasalle): Replace with getting the stream from the context
// when it's implemented.
constexpr hipStream_t cudaDefaultStream = 0;
template<typename IdType, bool include>
__global__ void _IsInKernel(
DeviceOrderedHashTable<IdType> table,
const IdType * const array,
const int64_t size,
IdType * const mark) {
const int64_t idx = threadIdx.x + blockDim.x*blockIdx.x;
if (idx < size) {
mark[idx] = table.Contains(array[idx]) ^ (!include);
}
}
template<typename IdType>
__global__ void _InsertKernel(
const IdType * const prefix,
const int64_t size,
IdType * const result) {
const int64_t idx = threadIdx.x + blockDim.x*blockIdx.x;
if (idx < size) {
if (prefix[idx] != prefix[idx+1]) {
result[prefix[idx]] = idx;
}
}
}
template<typename IdType, bool include>
IdArray _PerformFilter(
const OrderedHashTable<IdType>& table,
IdArray test) {
const auto& ctx = test->ctx;
auto device = runtime::DeviceAPI::Get(ctx);
const int64_t size = test->shape[0];
if (size == 0) {
return test;
}
hipStream_t stream = cudaDefaultStream;
// we need two arrays: 1) to act as a prefixsum
// for the number of entries that will be inserted, and
// 2) to collect the included items.
IdType * prefix = static_cast<IdType*>(
device->AllocWorkspace(ctx, sizeof(IdType)*(size+1)));
// will resize down later
IdArray result = aten::NewIdArray(size, ctx, sizeof(IdType)*8);
// mark each index based on it's existence in the hashtable
{
const dim3 block(256);
const dim3 grid((size+block.x-1)/block.x);
hipLaunchKernelGGL(( _IsInKernel<IdType, include>), dim3(grid), dim3(block), 0, stream,
table.DeviceHandle(),
static_cast<const IdType*>(test->data),
size,
prefix);
CUDA_CALL(hipGetLastError());
}
// generate prefix-sum
{
size_t workspace_bytes;
CUDA_CALL(hipcub::DeviceScan::ExclusiveSum(
nullptr,
workspace_bytes,
static_cast<IdType*>(nullptr),
static_cast<IdType*>(nullptr),
size+1));
void * workspace = device->AllocWorkspace(ctx, workspace_bytes);
CUDA_CALL(hipcub::DeviceScan::ExclusiveSum(
workspace,
workspace_bytes,
prefix,
prefix,
size+1, stream));
device->FreeWorkspace(ctx, workspace);
}
// copy number
IdType num_unique;
device->CopyDataFromTo(prefix+size, 0,
&num_unique, 0,
sizeof(num_unique),
ctx,
DGLContext{kDLCPU, 0},
test->dtype,
stream);
// insert items into set
{
const dim3 block(256);
const dim3 grid((size+block.x-1)/block.x);
hipLaunchKernelGGL(( _InsertKernel), dim3(grid), dim3(block), 0, stream,
prefix,
size,
static_cast<IdType*>(result->data));
CUDA_CALL(hipGetLastError());
}
device->FreeWorkspace(ctx, prefix);
return result.CreateView({num_unique}, result->dtype);
}
template<typename IdType>
class CudaFilterSet : public Filter {
public:
explicit CudaFilterSet(IdArray array) :
table_(array->shape[0], array->ctx, cudaDefaultStream) {
table_.FillWithUnique(
static_cast<const IdType*>(array->data),
array->shape[0],
cudaDefaultStream);
}
IdArray find_included_indices(IdArray test) override {
return _PerformFilter<IdType, true>(table_, test);
}
IdArray find_excluded_indices(IdArray test) override {
return _PerformFilter<IdType, false>(table_, test);
}
private:
OrderedHashTable<IdType> table_;
};
} // namespace
template<DLDeviceType XPU, typename IdType>
FilterRef CreateSetFilter(IdArray set) {
return FilterRef(std::make_shared<CudaFilterSet<IdType>>(set));
}
template FilterRef CreateSetFilter<kDLGPU, int32_t>(IdArray set);
template FilterRef CreateSetFilter<kDLGPU, int64_t>(IdArray set);
} // namespace array
} // namespace dgl
| b3fe0c90e5d8cb805d9d3e5710a26a0f094eda7d.cu | /*!
* Copyright (c) 2021 by Contributors
* \file array/cuda/cuda_filter.cc
* \brief Object for selecting items in a set, or selecting items not in a set.
*/
#include <dgl/runtime/device_api.h>
#include "../filter.h"
#include "../../runtime/cuda/cuda_hashtable.cuh"
#include "./dgl_cub.cuh"
namespace dgl {
namespace array {
using namespace dgl::runtime::cuda;
namespace {
// TODO(nv-dlasalle): Replace with getting the stream from the context
// when it's implemented.
constexpr cudaStream_t cudaDefaultStream = 0;
template<typename IdType, bool include>
__global__ void _IsInKernel(
DeviceOrderedHashTable<IdType> table,
const IdType * const array,
const int64_t size,
IdType * const mark) {
const int64_t idx = threadIdx.x + blockDim.x*blockIdx.x;
if (idx < size) {
mark[idx] = table.Contains(array[idx]) ^ (!include);
}
}
template<typename IdType>
__global__ void _InsertKernel(
const IdType * const prefix,
const int64_t size,
IdType * const result) {
const int64_t idx = threadIdx.x + blockDim.x*blockIdx.x;
if (idx < size) {
if (prefix[idx] != prefix[idx+1]) {
result[prefix[idx]] = idx;
}
}
}
template<typename IdType, bool include>
IdArray _PerformFilter(
const OrderedHashTable<IdType>& table,
IdArray test) {
const auto& ctx = test->ctx;
auto device = runtime::DeviceAPI::Get(ctx);
const int64_t size = test->shape[0];
if (size == 0) {
return test;
}
cudaStream_t stream = cudaDefaultStream;
// we need two arrays: 1) to act as a prefixsum
// for the number of entries that will be inserted, and
// 2) to collect the included items.
IdType * prefix = static_cast<IdType*>(
device->AllocWorkspace(ctx, sizeof(IdType)*(size+1)));
// will resize down later
IdArray result = aten::NewIdArray(size, ctx, sizeof(IdType)*8);
// mark each index based on it's existence in the hashtable
{
const dim3 block(256);
const dim3 grid((size+block.x-1)/block.x);
_IsInKernel<IdType, include><<<grid, block, 0, stream>>>(
table.DeviceHandle(),
static_cast<const IdType*>(test->data),
size,
prefix);
CUDA_CALL(cudaGetLastError());
}
// generate prefix-sum
{
size_t workspace_bytes;
CUDA_CALL(cub::DeviceScan::ExclusiveSum(
nullptr,
workspace_bytes,
static_cast<IdType*>(nullptr),
static_cast<IdType*>(nullptr),
size+1));
void * workspace = device->AllocWorkspace(ctx, workspace_bytes);
CUDA_CALL(cub::DeviceScan::ExclusiveSum(
workspace,
workspace_bytes,
prefix,
prefix,
size+1, stream));
device->FreeWorkspace(ctx, workspace);
}
// copy number
IdType num_unique;
device->CopyDataFromTo(prefix+size, 0,
&num_unique, 0,
sizeof(num_unique),
ctx,
DGLContext{kDLCPU, 0},
test->dtype,
stream);
// insert items into set
{
const dim3 block(256);
const dim3 grid((size+block.x-1)/block.x);
_InsertKernel<<<grid, block, 0, stream>>>(
prefix,
size,
static_cast<IdType*>(result->data));
CUDA_CALL(cudaGetLastError());
}
device->FreeWorkspace(ctx, prefix);
return result.CreateView({num_unique}, result->dtype);
}
template<typename IdType>
class CudaFilterSet : public Filter {
public:
explicit CudaFilterSet(IdArray array) :
table_(array->shape[0], array->ctx, cudaDefaultStream) {
table_.FillWithUnique(
static_cast<const IdType*>(array->data),
array->shape[0],
cudaDefaultStream);
}
IdArray find_included_indices(IdArray test) override {
return _PerformFilter<IdType, true>(table_, test);
}
IdArray find_excluded_indices(IdArray test) override {
return _PerformFilter<IdType, false>(table_, test);
}
private:
OrderedHashTable<IdType> table_;
};
} // namespace
template<DLDeviceType XPU, typename IdType>
FilterRef CreateSetFilter(IdArray set) {
return FilterRef(std::make_shared<CudaFilterSet<IdType>>(set));
}
template FilterRef CreateSetFilter<kDLGPU, int32_t>(IdArray set);
template FilterRef CreateSetFilter<kDLGPU, int64_t>(IdArray set);
} // namespace array
} // namespace dgl
|
ecbce6f764c3158335dfa69e452a81a29f6a92e5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* The MIT License
*
* Copyright (c) 1997-2016 The University of Utah
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
/* GPU DataWarehouse device & host access*/
#include <CCA/Components/Schedulers/GPUDataWarehouse.h>
#include <Core/Grid/Variables/GPUVariable.h>
#include <Core/Grid/Variables/GPUGridVariable.h>
#include <Core/Grid/Variables/GPUReductionVariable.h>
#include <Core/Grid/Variables/GPUPerPatch.h>
#include <CCA/Components/Schedulers/UnifiedScheduler.h>
#include <CCA/Components/Schedulers/GPUMemoryPool.h>
#include <sci_defs/cuda_defs.h>
#include <Core/Parallel/CrowdMonitor.hpp>
#include <Core/Parallel/Parallel.h>
#include <Core/Parallel/ProcessorGroup.h>
#include <CCA/Components/Schedulers/SchedulerCommon.h>
#include <Core/Util/DebugStream.h>
#ifndef __CUDA_ARCH__
# include <string.h>
#include <string>
using namespace std;
#endif
#include <Core/Util/GPU.h>
extern DebugStream gpu_stats;
#include <mutex>
#include <map>
using std::map;
extern std::mutex cerrLock;
namespace {
// These are for uniquely identifying the Uintah::CrowdMonitors<Tag>
// used to protect multi-threaded access to global data structures
struct allocate_tag{};
struct var_tag{};
using allocate_monitor = Uintah::CrowdMonitor<allocate_tag>;
using var_monitor = Uintah::CrowdMonitor<var_tag>;
}
//______________________________________________________________________
//
HOST_DEVICE void
GPUDataWarehouse::get(const GPUGridVariableBase& var, char const* label, const int patchID, const int matlIndx, const int levelIndx)
{
#ifdef __CUDA_ARCH__
//device code
GPUDataWarehouse::dataItem* item = getItem(label, patchID, matlIndx, levelIndx);
if (item) {
var.setArray3(item->var_offset, item->var_size, item->var_ptr);
}
else {
printGetError("GPUDataWarehouse::get(GPUGridVariableBase& var, ...)", label, levelIndx, patchID, matlIndx);
}
#else
//host code
var_monitor var_read_lock{ Uintah::CrowdMonitor<var_tag>::READER };
{
labelPatchMatlLevel lpml(label, patchID, matlIndx, levelIndx);
if (varPointers->find(lpml) != varPointers->end()) {
allVarPointersInfo vp = varPointers->operator[](lpml);
var.setArray3(vp.device_offset, vp.device_size, vp.device_ptr);
}
else {
printf("I'm GPUDW with name: \"%s\" at %p \n", _internalName, this);
printGetError("GPUDataWarehouse::get(GPUGridVariableBase& var, ...)", label, levelIndx, patchID, matlIndx);
}
}
#endif
}
//______________________________________________________________________
//
HOST_DEVICE bool
GPUDataWarehouse::stagingVarExists(char const* label, int patchID, int matlIndx, int levelIndx, int3 offset, int3 size)
{
#ifdef __CUDA_ARCH__
//device code
printError("This method not defined for the device.", "stagingVarExists", label, patchID, matlIndx, levelIndx);
return false;
#else
//host code
bool retval = false;
var_monitor var_read_lock{ Uintah::CrowdMonitor<var_tag>::READER };
{
labelPatchMatlLevel lpml(label, patchID, matlIndx, levelIndx);
std::map<labelPatchMatlLevel, allVarPointersInfo>::iterator it = varPointers->find(lpml);
if (it != varPointers->end()) {
stagingVar sv;
sv.device_offset = offset;
sv.device_size = size;
std::map<stagingVar, stagingVarInfo>::iterator staging_it = it->second.stagingVars.find(sv);
retval = (staging_it != it->second.stagingVars.end());
}
}
return retval;
#endif
}
//______________________________________________________________________
//
HOST_DEVICE void
GPUDataWarehouse::getStagingVar(const GPUGridVariableBase& var, char const* label, int patchID, int matlIndx, int levelIndx, int3 offset, int3 size)
{
#ifdef __CUDA_ARCH__
//device code
printError("This method not defined for the device.", "getStagingVar", label, patchID, matlIndx, levelIndx);
#else
//host code
var_monitor var_read_lock{ Uintah::CrowdMonitor<var_tag>::READER };
{
labelPatchMatlLevel lpml(label, patchID, matlIndx, levelIndx);
std::map<labelPatchMatlLevel, allVarPointersInfo>::iterator it = varPointers->find(lpml);
if (it != varPointers->end()) {
stagingVar sv;
sv.device_offset = offset;
sv.device_size = size;
std::map<stagingVar, stagingVarInfo>::iterator staging_it = it->second.stagingVars.find(sv);
if (staging_it != it->second.stagingVars.end()) {
var.setArray3(offset, size, staging_it->second.device_ptr);
}
else {
printf(
"GPUDataWarehouse::getStagingVar() - Didn't find a staging variable from the device for label %s patch %d matl %d level %d offset (%d, %d, %d) size (%d, %d, %d).",
label, patchID, matlIndx, levelIndx, offset.x, offset.y, offset.z, size.x, size.y, size.z);
exit(-1);
}
}
else {
printError("Didn't find a staging variable from the device.", "getStagingVar", label, patchID, matlIndx, levelIndx);
}
}
#endif
}
//______________________________________________________________________
//
HOST_DEVICE void
GPUDataWarehouse::getLevel(const GPUGridVariableBase& var, char const* label, int matlIndx, int levelIndx)
{
#ifdef __CUDA_ARCH__
//device code
get(var, label, -99999999, matlIndx, levelIndx);
#else
//host code
get(var, label, -99999999, matlIndx, levelIndx);
#endif
}
//______________________________________________________________________
//
HOST_DEVICE void
GPUDataWarehouse::get(const GPUReductionVariableBase& var, char const* label, const int patchID, const int matlIndx, const int levelIndx)
{
#ifdef __CUDA_ARCH__
GPUDataWarehouse::dataItem* item = getItem(label, patchID, matlIndx, levelIndx);
if (item) {
var.setData(item->var_ptr);
}
else {
printGetError("GPUDataWarehouse::get(GPUReductionVariableBase& var, ...)", label, levelIndx, patchID, matlIndx);
}
#else
//host code
var_monitor var_read_lock{ Uintah::CrowdMonitor<var_tag>::READER };
{
labelPatchMatlLevel lpml(label, patchID, matlIndx, levelIndx);
if (varPointers->find(lpml) != varPointers->end()) {
allVarPointersInfo vp = varPointers->operator[](lpml);
var.setData(vp.device_ptr);
}
else {
printGetError("GPUDataWarehouse::get(GPUReductionVariableBase& var, ...)", label, levelIndx, patchID, matlIndx);
}
}
#endif
}
//______________________________________________________________________
//
HOST_DEVICE void
GPUDataWarehouse::get(const GPUPerPatchBase& var, char const* label, const int patchID, const int matlIndx, const int levelIndx)
{
#ifdef __CUDA_ARCH__
//device code
GPUDataWarehouse::dataItem* item = getItem(label, patchID, matlIndx, levelIndx);
if (item) {
var.setData(item->var_ptr);
}
else {
printGetError("GPUDataWarehouse::get(GPUPerPatchBase& var, ...)", label, levelIndx, patchID, matlIndx);
}
#else
//host code
var_monitor var_read_lock{ Uintah::CrowdMonitor<var_tag>::READER };
{
labelPatchMatlLevel lpml(label, patchID, matlIndx, levelIndx);
if (varPointers->find(lpml) != varPointers->end()) {
allVarPointersInfo vp = varPointers->operator[](lpml);
var.setData(vp.device_ptr);
}
else {
printGetError("GPUDataWarehouse::get(GPUPerPatchBase& var, ...)", label, levelIndx, patchID, matlIndx);
}
}
#endif
}
//______________________________________________________________________
//
HOST_DEVICE void
GPUDataWarehouse::getModifiable(GPUGridVariableBase& var, char const* label, int patchID, int matlIndx, int levelIndx)
{
#ifdef __CUDA_ARCH__
//device code
GPUDataWarehouse::dataItem* item = getItem(label, patchID, matlIndx, levelIndx);
if (item) {
var.setArray3(item->var_offset, item->var_size, item->var_ptr);
}
else {
printGetError("GPUDataWarehouse::getModifiable(GPUGridVariableBase& var, ...)", label, levelIndx, patchID, matlIndx);
}
#else
//host code
var_monitor var_read_lock{ Uintah::CrowdMonitor<var_tag>::READER };
{
labelPatchMatlLevel lpml(label, patchID, matlIndx, levelIndx);
if (varPointers->find(lpml) != varPointers->end()) {
allVarPointersInfo vp = varPointers->operator[](lpml);
var.setArray3(vp.device_offset, vp.device_size, vp.device_ptr);
}
else {
printGetError("GPUDataWarehouse::get(GPUGridVariableBase& var, ...)", label, levelIndx, patchID, matlIndx);
}
}
#endif
}
//______________________________________________________________________
//
HOST_DEVICE void
GPUDataWarehouse::getModifiable(GPUReductionVariableBase& var, char const* label, int patchID, int matlIndx, int levelIndx)
{
#ifdef __CUDA_ARCH__
//device code
GPUDataWarehouse::dataItem* item = getItem(label, patchID, matlIndx, levelIndx);
if (item) {
var.setData(item->var_ptr);
}
else {
printGetError("GPUDataWarehouse::getModifiable(GPUReductionVariableBase& var, ...)", label, levelIndx, patchID, matlIndx);
}
#else
//host code
var_monitor var_read_lock{ Uintah::CrowdMonitor<var_tag>::READER };
{
labelPatchMatlLevel lpml(label, patchID, matlIndx, levelIndx);
if (varPointers->find(lpml) != varPointers->end()) {
allVarPointersInfo vp = varPointers->operator[](lpml);
var.setData(vp.device_ptr);
}
else {
printGetError("GPUDataWarehouse::get(GPUReductionVariableBase& var, ...)", label, levelIndx, patchID, matlIndx);
}
}
#endif
}
//______________________________________________________________________
//
HOST_DEVICE void
GPUDataWarehouse::getModifiable(GPUPerPatchBase& var, char const* label, int patchID, int matlIndx, int levelIndx)
{
#ifdef __CUDA_ARCH__
//device code
GPUDataWarehouse::dataItem* item = getItem(label, patchID, matlIndx, levelIndx);
if (item) {
var.setData(item->var_ptr);
}
else {
printGetError("GPUDataWarehouse::getModifiable(GPUPerPatchBase& var, ...)", label, levelIndx, patchID, matlIndx);
}
#else
//host code
var_monitor var_read_lock{ Uintah::CrowdMonitor<var_tag>::READER };
{
labelPatchMatlLevel lpml(label, patchID, matlIndx, levelIndx);
if (varPointers->find(lpml) != varPointers->end()) {
allVarPointersInfo vp = varPointers->operator[](lpml);
var.setData(vp.device_ptr);
}
else {
printGetError("GPUDataWarehouse::get(GPUPerPatchBase& var, ...)", label, levelIndx, patchID, matlIndx);
}
}
#endif
}
//______________________________________________________________________
//
__host__ void
GPUDataWarehouse::put(GPUGridVariableBase &var, size_t sizeOfDataType, char const* label, int patchID, int matlIndx, int levelIndx, bool staging,
GhostType gtype, int numGhostCells, void* host_ptr)
{
var_monitor var_write_lock{ Uintah::CrowdMonitor<var_tag>::WRITER };
{
int3 var_offset; // offset
int3 var_size; // dimensions of GPUGridVariable
void* var_ptr; // raw pointer to the memory
var.getArray3(var_offset, var_size, var_ptr);
//See if it already exists. Also see if we need to update this into d_varDB.
labelPatchMatlLevel lpml(label, patchID, matlIndx, levelIndx);
std::map<labelPatchMatlLevel, allVarPointersInfo>::iterator iter = varPointers->find(lpml);
std::map<stagingVar, stagingVarInfo>::iterator staging_it;
//sanity checks
if (iter == varPointers->end()) {
printf("ERROR:\nGPUDataWarehouse::put( ) Can't use put() for a host-side GPU DW without it first existing in the internal database.\n");
exit(-1);
} else if (staging) {
stagingVar sv;
sv.device_offset = var_offset;
sv.device_size = var_size;
staging_it = iter->second.stagingVars.find(sv);
if (staging_it == iter->second.stagingVars.end()) {
printf("ERROR:\nGPUDataWarehouse::put( ) Can't use put() for a host-side GPU DW without this staging var first existing in the internal database.\n");
exit(-1);
}
}
if (gpu_stats.active()) {
cerrLock.lock();
{
gpu_stats << UnifiedScheduler::myRankThread()
<< " GPUDataWarehouse::put( " << label << " ) - "
<< " Attempting to put a variable in the host-side varPointers map for label " << label
<< " patch " << patchID
<< " matl " << matlIndx
<< " level " << levelIndx;
if (staging) {
gpu_stats << " staging: true";
} else {
gpu_stats << " staging: false";
}
gpu_stats << " at device address " << var_ptr
<< " with status codes ";
if (!staging) {
gpu_stats << getDisplayableStatusCodes(iter->second.atomicStatusInGpuMemory);
} else {
gpu_stats << getDisplayableStatusCodes(staging_it->second.atomicStatusInGpuMemory);
}
gpu_stats << " datatype size " << sizeOfDataType
<< " on device " << d_device_id
<< " into GPUDW at " << std::hex << this << std::dec
<< " with description " << _internalName
<< " current varPointers size is: " << varPointers->size()
<< " low (" << var_offset.x << ", " << var_offset.y << ", " << var_offset.z << ") "
<< endl;
}
cerrLock.unlock();
}
if (staging == false) {
iter->second.varDB_index = -1;
iter->second.device_ptr = var_ptr;
iter->second.device_offset = var_offset;
iter->second.device_size = var_size;
iter->second.sizeOfDataType = sizeOfDataType;
iter->second.gtype = gtype;
iter->second.numGhostCells = numGhostCells;
iter->second.host_contiguousArrayPtr = host_ptr;
iter->second.atomicStatusInHostMemory = UNKNOWN;
//previously set, do not set here
if (gpu_stats.active()) {
cerrLock.lock();
{
gpu_stats << UnifiedScheduler::myRankThread()
<< " GPUDataWarehouse::put( " << label << " ) - "
<< " Put a regular non-staging variable in the host-side varPointers map for label " << label
<< " patch " << patchID
<< " matl " << matlIndx
<< " level " << levelIndx
<< " at device address " << var_ptr
<< " with datatype size " << iter->second.sizeOfDataType
<< " with status codes " << getDisplayableStatusCodes(iter->second.atomicStatusInGpuMemory)
<< " on device " << d_device_id
<< " into GPUDW at " << std::hex << this << std::dec
<< " with description " << _internalName
<< " current varPointers size is: " << varPointers->size()
<< endl;
}
cerrLock.unlock();
}
} else { // if (staging == true)
staging_it->second.device_ptr = var_ptr;
staging_it->second.host_contiguousArrayPtr = host_ptr;
staging_it->second.varDB_index = -1;
staging_it->second.atomicStatusInHostMemory = UNKNOWN;
//Update the non-staging var's sizeOfDataType. The staging var uses this number.
//It's possible that a staging var can exist and an empty placeholder non-staging var also exist,
//if so, then then empty placeholder non-staging var won't have correct data type size.
//So we grab it here.
iter->second.sizeOfDataType = sizeOfDataType;
if (gpu_stats.active()) {
cerrLock.lock();
{
gpu_stats << UnifiedScheduler::myRankThread()
<< " GPUDataWarehouse::put( " << label << " ) - "
<< " Put a staging variable in the host-side varPointers map for label " << label
<< " patch " << patchID
<< " matl " << matlIndx
<< " level " << levelIndx
<< " with offset (" << var_offset.x << ", " << var_offset.y << ", " << var_offset.z << ")"
<< " and size (" << var_size.x << ", " << var_size.y << ", " << var_size.z << ")"
<< " at device address " << var_ptr
<< " with datatype size " << iter->second.sizeOfDataType
<< " with status codes " << getDisplayableStatusCodes(staging_it->second.atomicStatusInGpuMemory)
<< " on device " << d_device_id
<< " into GPUDW at " << std::hex << this << std::dec
<< endl;
}
cerrLock.unlock();
}
}
} // end var_monitor var_read_lock{ Uintah::CrowdMonitor<var_tag>::WRITER }
}
//______________________________________________________________________
//
//This method puts an empty placeholder entry into the GPUDW database and marks it as unallocated
__host__ void
GPUDataWarehouse::putUnallocatedIfNotExists(char const* label, int patchID, int matlIndx, int levelIndx, bool staging, int3 offset, int3 size)
{
var_monitor var_write_lock{ Uintah::CrowdMonitor<var_tag>::WRITER };
{
labelPatchMatlLevel lpml(label, patchID, matlIndx, levelIndx);
std::map<labelPatchMatlLevel, allVarPointersInfo>::iterator it = varPointers->find(lpml);
//if (!staging) {
//If it's a normal non-staging variable, check if doesn't exist. If so, add an "unallocated" entry.
//If it's a staging variable, then still check if the non-staging part exists. A staging must exist within a non-staging variable.
//A scenario where this can get a staging variable without a non-staging variable is receiving data from neighbor nodes.
//For example, suppose node A has patch 0, and node B has patch 1, and A's patch 0 needs ghost cells from B's patch 1. Node A will
//receive those ghost cells, but they will be marked as belonging to patch 1. Since A doesn't have the regular non-staging var
//for patch 1, we make an empty placeholder for patch 1 so A can have a staging var to hold the ghost cell for patch 1.
if ( it == varPointers->end()) {
allVarPointersInfo vp;
vp.varDB_index = -1;
vp.device_ptr = NULL;
vp.atomicStatusInHostMemory = UNKNOWN;
vp.atomicStatusInGpuMemory = UNALLOCATED;
vp.host_contiguousArrayPtr = NULL;
vp.sizeOfDataType = 0;
std::pair<std::map<labelPatchMatlLevel, allVarPointersInfo>::iterator, bool> ret = varPointers->insert( std::map<labelPatchMatlLevel, allVarPointersInfo>::value_type( lpml, vp ) );
if (!ret.second) {
printf("ERROR:\nGPUDataWarehouse::putUnallocatedIfNotExists( ) Failure inserting into varPointers map.\n");
exit(-1);
}
it = ret.first;
if (gpu_stats.active()) {
cerrLock.lock();
{
gpu_stats << UnifiedScheduler::myRankThread()
<< " GPUDataWarehouse::putUnallocatedIfNotExists( " << label << " ) - "
<< " Put an unallocated non-staging variable in the host-side varPointers map for label " << label
<< " patch " << patchID
<< " matl " << matlIndx
<< " level " << levelIndx
<< " on device " << d_device_id
<< " into GPUDW at " << std::hex << this << std::dec
<< " with description " << _internalName
<< endl;
}
cerrLock.unlock();
}
}
//} else { //staging = true
if (staging) {
std::map<stagingVar, stagingVarInfo>::iterator staging_it;
stagingVar sv;
sv.device_offset = offset;
sv.device_size = size;
staging_it = it->second.stagingVars.find(sv);
if (staging_it == it->second.stagingVars.end()){
stagingVarInfo svi;
svi.varDB_index = -1;
svi.device_ptr = NULL;
svi.host_contiguousArrayPtr = NULL;
svi.atomicStatusInHostMemory = UNKNOWN;
svi.atomicStatusInGpuMemory = UNALLOCATED;
std::pair<stagingVar, stagingVarInfo> p = make_pair( sv, svi );
it->second.stagingVars.insert( p );
if (gpu_stats.active()) {
cerrLock.lock();
{
gpu_stats << UnifiedScheduler::myRankThread()
<< " GPUDataWarehouse::putUnallocatedIfNotExists( " << label << " ) - "
<< " Put an unallocated staging variable in the host-side varPointers map for label " << label
<< " patch " << patchID
<< " matl " << matlIndx
<< " level " << levelIndx
<< " offset (" << offset.x << ", " << offset.y << ", " << offset.z << ")"
<< " size (" << size.x << ", " << size.y << ", " << size.z << ")"
<< " on device " << d_device_id
<< " into GPUDW at " << std::hex << this << std::dec
<< " with description " << _internalName
<< endl;
}
cerrLock.unlock();
}
}
}
} // end var_monitor var_write_lock{ Uintah::CrowdMonitor<var_tag>::WRITER }
}
//______________________________________________________________________
//
__host__ void
GPUDataWarehouse::allocateAndPut(GPUGridVariableBase &var, char const* label, int patchID, int matlIndx, int levelIndx, bool staging, int3 low, int3 high, size_t sizeOfDataType, GhostType gtype, int numGhostCells)
{
//Allocate space on the GPU and declare a variable onto the GPU.
//This method does NOT stage everything in a big array.
//Check if it exists prior to allocating memory for it.
//If it has already been allocated, just use that.
//If it hasn't, this is lock free and the first thread to request allocating gets to allocate
//If another thread sees that allocating is in process, it loops and waits until the allocation complete.
bool allocationNeeded = false;
int3 size = make_int3(high.x-low.x, high.y-low.y, high.z-low.z);
int3 offset = low;
if (gpu_stats.active()) {
cerrLock.lock();
{
gpu_stats << UnifiedScheduler::myRankThread() << " Calling putUnallocatedIfNotExists() for " << label
<< " patch " << patchID
<< " matl " << matlIndx
<< " level " << levelIndx
<< " staging: " << std::boolalpha << staging
<< " with offset (" << offset.x << ", " << offset.y << ", " << offset.z << ")"
<< " and size (" << size.x << ", " << size.y << ", " << size.z << ")"
<< " on device " << d_device_id
<< " into GPUDW at " << std::hex << this << std::dec
<< " with description " << _internalName << endl;
}
cerrLock.unlock();
}
//This variable may not yet exist. But we want to declare we're allocating it. So ensure there is an entry.
putUnallocatedIfNotExists(label, patchID, matlIndx, levelIndx, staging, offset, size);
std::map<labelPatchMatlLevel, allVarPointersInfo>::iterator it;
std::map<stagingVar, stagingVarInfo>::iterator staging_it;
labelPatchMatlLevel lpml(label, patchID, matlIndx, levelIndx);
var_monitor var_read_lock{ Uintah::CrowdMonitor<var_tag>::READER };
{
it = varPointers->find(lpml);
if (staging) {
stagingVar sv;
sv.device_offset = offset;
sv.device_size = size;
staging_it = it->second.stagingVars.find(sv);
}
}
//Locking not needed here on out in this method. STL maps ensure that iterators point to correct values
//even if other threads add nodes. We just can't remove values, but that shouldn't ever happen.
//This prepares the var with the offset and size. Any possible allocation will come later.
//If it needs to go into the database, that will also come later
void* addr = NULL;
var.setArray3(offset, size, addr);
//Now see if we allocate the variable or use a previous existing allocation.
if (staging == false) {
//See if someone has stated they are allocating it
allocationNeeded = testAndSetAllocating(it->second.atomicStatusInGpuMemory);
if (gpu_stats.active()) {
cerrLock.lock();
{
gpu_stats << UnifiedScheduler::myRankThread()
<< " GPUDataWarehouse::allocateAndPut( " << label << " ) - "
<< " allocationNeeded is " << std::boolalpha << allocationNeeded
<< " for label " << label
<< " patch " << patchID
<< " matl " << matlIndx
<< " level " << levelIndx
<< " with offset (" << offset.x << ", " << offset.y << ", " << offset.z << ")"
<< " and size (" << size.x << ", " << size.y << ", " << size.z << ")"
<< endl;
}
cerrLock.unlock();
}
if (!allocationNeeded) {
//Someone else is allocating it or it has already been allocated.
//Wait until they are done.
bool allocated = false;
while (!allocated) {
allocated = checkAllocated(it->second.atomicStatusInGpuMemory);
}
var_monitor var_read_lock{ Uintah::CrowdMonitor<var_tag>::READER };
{
//Sanity check to ensure we have correct size information.
it = varPointers->find(lpml);
}
if (it->second.device_offset.x == low.x
&& it->second.device_offset.y == low.y
&& it->second.device_offset.z == low.z
&& it->second.device_size.x == size.x
&& it->second.device_size.y == size.y
&& it->second.device_size.z == size.z) {
//Space for this var already exists. Use that and return.
if (gpu_stats.active()) {
cerrLock.lock();
{
gpu_stats << UnifiedScheduler::myRankThread()
<< " GPUDataWarehouse::allocateAndPut( " << label << " ) - "
<< " This non-staging/regular variable already exists. No need to allocate another. GPUDW has a variable for label " << label
<< " patch " << patchID
<< " matl " << matlIndx
<< " level " << levelIndx
<< " with offset (" << offset.x << ", " << offset.y << ", " << offset.z << ")"
<< " and size (" << size.x << ", " << size.y << ", " << size.z << ")"
<< " on device " << d_device_id
<< " with data pointer " << it->second.device_ptr
<< " with status codes " << getDisplayableStatusCodes(it->second.atomicStatusInGpuMemory)
<< " into GPUDW at " << std::hex << this << std::dec
<< endl;
}
cerrLock.unlock();
}
//Have this var use the existing memory address.
var.setArray3(it->second.device_offset, it->second.device_size, it->second.device_ptr);
} else {
printf("ERROR:\nGPUDataWarehouse::allocateAndPut( %s ) Variable in database but of the wrong size. This shouldn't ever happen. this needs low (%d, %d, %d,) and size (%d, %d, %d), but in the database it is low (%d, %d, %d,) and size (%d, %d, %d)\n",
label, low.x, low.y, low.z, size.x, size.y, size.z,
it->second.device_offset.x, it->second.device_offset.y, it->second.device_offset.z,
it->second.device_size.x, it->second.device_size.y, it->second.device_size.z);
exit(-1);
}
}
} else {
//it's a staging variable
if (staging_it != it->second.stagingVars.end()) {
////This variable exists in the database, no need to "put" it in again.
//putNeeded = false;
//See if someone has stated they are allocating it
allocationNeeded = testAndSetAllocating(staging_it->second.atomicStatusInGpuMemory);
if (!allocationNeeded) {
if (gpu_stats.active()) {
cerrLock.lock();
{
gpu_stats << UnifiedScheduler::myRankThread()
<< " GPUDataWarehouse::allocateAndPut( " << label << " ) - "
<< " This staging variable already exists. No need to allocate another. For label " << label
<< " patch " << patchID
<< " matl " << matlIndx
<< " level " << levelIndx
<< " with offset (" << offset.x << ", " << offset.y << ", " << offset.z << ")"
<< " and size (" << size.x << ", " << size.y << ", " << size.z << ")"
<< " on device " << d_device_id
<< " with data pointer " << staging_it->second.device_ptr
<< " with status codes " << getDisplayableStatusCodes(staging_it->second.atomicStatusInGpuMemory)
<< " into GPUDW at " << std::hex << this << std::dec
<< endl;
}
cerrLock.unlock();
}
//We need the pointer. We can't move on until we get the pointer.
//Ensure that it has been allocated (just not allocating). Another thread may have been assigned to allocate it
//but not completed that action. If that's the case, wait until it's done so we can get the pointer.
bool allocated = false;
while (!allocated) {
allocated = checkAllocated(staging_it->second.atomicStatusInGpuMemory);
}
//Have this var use the existing memory address.
var.setArray3(offset, size, staging_it->second.device_ptr);
}
}
}
//Now allocate it
if (allocationNeeded) {
OnDemandDataWarehouse::uintahSetCudaDevice(d_device_id);
unsigned int memSize = var.getMemSize();
//if (gpu_stats.active()) {
cerrLock.lock();
{
gpu_stats << UnifiedScheduler::myRankThread()
<< " GPUDataWarehouse::allocateAndPut(), calling GPUMemoryPool::allocateCudaSpaceFromPool"
<< " for " << label
<< " patch " << patchID
<< " material " << matlIndx
<< " level " << levelIndx
<< " staging: " << std::boolalpha << staging
<< " with offset (" << offset.x << ", " << offset.y << ", " << offset.z << ")"
<< " and size (" << size.x << ", " << size.y << ", " << size.z << ")"
<< " at " << addr
<< " with status codes ";
if (!staging) {
gpu_stats << getDisplayableStatusCodes(it->second.atomicStatusInGpuMemory);
} else {
gpu_stats << getDisplayableStatusCodes(staging_it->second.atomicStatusInGpuMemory);
}
gpu_stats << " on device " << d_device_id
<< " into GPUDW at " << std::hex << this << std::dec << endl;
}
cerrLock.unlock();
//}
addr = GPUMemoryPool::allocateCudaSpaceFromPool(d_device_id, memSize);
//Also update the var object itself
var.setArray3(offset, size, addr);
//Put all remaining information about the variable into the the database.
put(var, sizeOfDataType, label, patchID, matlIndx, levelIndx, staging, gtype, numGhostCells);
//Now that the database knows of this and other threads can see the device pointer, update the status from allocating to allocated
if (!staging) {
testAndSetAllocate(it->second.atomicStatusInGpuMemory);
} else {
testAndSetAllocate(staging_it->second.atomicStatusInGpuMemory);
}
}
}
//______________________________________________________________________
//
//This method is meant to take an entry from the host side DW and copy it into
//the task datawarehouse whose job is to eventually live GPU side.
__host__ void
GPUDataWarehouse::copyItemIntoTaskDW(GPUDataWarehouse *hostSideGPUDW, char const* label,
int patchID, int matlIndx, int levelIndx, bool staging,
int3 offset, int3 size) {
if (d_device_copy == NULL) {
//sanity check
printf("ERROR:\nGPUDataWarehouse::copyItemIntoTaskDW() - This method should only be called from a task data warehouse.\n");
exit(-1);
}
var_monitor var_read_lock{ Uintah::CrowdMonitor<var_tag>::READER };
{
if (d_numVarDBItems == MAX_VARDB_ITEMS) {
printf("ERROR: Out of GPUDataWarehouse space");
exit(-1);
}
}
labelPatchMatlLevel lpml(label, patchID, matlIndx, levelIndx);
stagingVar sv;
sv.device_offset = offset;
sv.device_size = size;
//Get the iterator(s) from the host side GPUDW.
std::map<labelPatchMatlLevel, allVarPointersInfo>::iterator hostSideGPUDW_iter = hostSideGPUDW->varPointers->find(lpml);
std::map<stagingVar, stagingVarInfo>::iterator hostSideGPUDW_staging_iter;
var_monitor host_var_read_lock{ Uintah::CrowdMonitor<var_tag>::READER };
{
if (staging) {
hostSideGPUDW_staging_iter = hostSideGPUDW_iter->second.stagingVars.find(sv);
}
}
var_monitor var_write_lock{ Uintah::CrowdMonitor<var_tag>::WRITER };
{
std::map<labelPatchMatlLevel, allVarPointersInfo>::iterator iter = varPointers->find(lpml);
//sanity check
if (iter != varPointers->end() && !staging) {
printf("ERROR:\nGPUDataWarehouse::copyItemIntoTaskDW() - This task datawarehouse already had an entry for %s patch %d material %d level %d\n", label, patchID, matlIndx, levelIndx);
exit(-1);
}
//If it's staging, there should already be a non-staging var in the host-side GPUDW (even if it's just a placeholder)
//Inserting into this task DW, it is a requirement that non-staging variables get inserted first
//then any staging variables can come in later. This won't handle any scenario where a staging variable is requested
//into the task DW without a non-staging variable already existing here.
int d_varDB_index=d_numVarDBItems;
d_numVarDBItems++;
int i = d_varDB_index;
if (!staging) {
//copy the item
allVarPointersInfo vp = hostSideGPUDW_iter->second;
//Clear out any staging vars it may have had
vp.stagingVars.clear();
//Give it a d_varDB index
vp.varDB_index = d_varDB_index;
//insert it in
varPointers->insert( std::map<labelPatchMatlLevel, allVarPointersInfo>::value_type( lpml, vp ) );
strncpy(d_varDB[i].label, label, MAX_NAME_LENGTH);
//if (levelIndx == -1) {
d_varDB[i].domainID = patchID;
//} else {
// d_varDB[i].domainID = -99999999;
//}
d_varDB[i].matlIndx = matlIndx;
d_varDB[i].levelIndx = levelIndx;
d_varDB[i].sizeOfDataType = hostSideGPUDW_iter->second.sizeOfDataType;
d_varDB[i].varItem.gtype = hostSideGPUDW_iter->second.gtype;
d_varDB[i].varItem.numGhostCells = hostSideGPUDW_iter->second.numGhostCells;
d_varDB[i].varItem.staging = staging;
d_varDB[i].ghostItem.dest_varDB_index = -1; //Signify that this d_varDB item is NOT meta data to copy a ghost cell.
d_varDB[i].var_offset = hostSideGPUDW_iter->second.device_offset;
d_varDB[i].var_size = hostSideGPUDW_iter->second.device_size;
d_varDB[i].var_ptr = hostSideGPUDW_iter->second.device_ptr;
} else {
if (iter == varPointers->end()) {
//A staging item was requested but there's no regular variable for it to piggy back in.
//So create an empty placeholder regular variable.
//Start by getting a copy of what the GPU DW already had for this non-staging var
allVarPointersInfo vp = hostSideGPUDW_iter->second;
//Clear out any staging vars it may have had
vp.stagingVars.clear();
//Empty placeholders won't be placed in the d_varDB array.
vp.varDB_index = -1;
//insert it in
std::pair<std::map<labelPatchMatlLevel, allVarPointersInfo>::iterator, bool> ret = varPointers->insert( std::map<labelPatchMatlLevel, allVarPointersInfo>::value_type( lpml, vp ) );
if (!ret.second) {
printf("ERROR:\nGPUDataWarehouse::copyItemIntoTaskDW( ) Failure inserting into varPointers map.\n");
exit(-1);
}
iter = ret.first;
}
//copy the item
stagingVarInfo svi = hostSideGPUDW_staging_iter->second;
//Give it a d_varDB index
svi.varDB_index = d_varDB_index;
//insert it in
std::map<stagingVar, stagingVarInfo>::iterator staging_iter = iter->second.stagingVars.find(sv);
if (staging_iter != iter->second.stagingVars.end()) {
printf("ERROR:\nGPUDataWarehouse::copyItemIntoTaskDW( ) This staging var already exists in this task DW\n");
}
std::pair<stagingVar, stagingVarInfo> p = make_pair( sv, svi );
iter->second.stagingVars.insert( p );
strncpy(d_varDB[i].label, label, MAX_NAME_LENGTH);
//if (levelIndx == -1) {
d_varDB[i].domainID = patchID;
//} else {
// d_varDB[i].domainID = -99999999;
//}
d_varDB[i].matlIndx = matlIndx;
d_varDB[i].levelIndx = levelIndx;
d_varDB[i].sizeOfDataType = hostSideGPUDW_iter->second.sizeOfDataType;
d_varDB[i].varItem.gtype = hostSideGPUDW_iter->second.gtype;
d_varDB[i].varItem.numGhostCells = hostSideGPUDW_iter->second.numGhostCells;
d_varDB[i].varItem.staging = staging;
d_varDB[i].ghostItem.dest_varDB_index = -1; //Signify that this d_varDB item is NOT meta data to copy a ghost cell.
d_varDB[i].var_offset = hostSideGPUDW_staging_iter->first.device_offset;
d_varDB[i].var_size = hostSideGPUDW_staging_iter->first.device_size;
d_varDB[i].var_ptr = hostSideGPUDW_staging_iter->second.device_ptr;
}
d_dirty=true;
if (gpu_stats.active()) {
cerrLock.lock();
{
gpu_stats << UnifiedScheduler::myRankThread()
<< " GPUDataWarehouse::copyItemIntoTaskDW( " << label << " ) - "
<< " Put into d_varDB at index " << i
<< " of max index " << maxdVarDBItems - 1
<< " label " << label
<< " patch " << d_varDB[i].domainID
<< " matl " << matlIndx
<< " level " << levelIndx
<< " staging: " << std::boolalpha << staging
<< " datatype size " <<d_varDB[i].sizeOfDataType
<< " into address " << d_varDB[i].var_ptr
<< " on device " << d_device_id
<< " into GPUDW at " << std::hex << this << std::dec
<< " size [" << d_varDB[i].var_size.x << ", " << d_varDB[i].var_size.y << ", " << d_varDB[i].var_size.z << "]"
<< " offset [" << d_varDB[i].var_offset.x << ", " << d_varDB[i].var_offset.y << ", " << d_varDB[i].var_offset.z << "]"
<< endl;
}
cerrLock.unlock();
}
} // end var_monitor var_write_lock{ Uintah::CrowdMonitor<var_tag>::WRITER }
}
//______________________________________________________________________
//
HOST_DEVICE void
GPUDataWarehouse::putContiguous(GPUGridVariableBase &var, const char* indexID, char const* label, int patchID, int matlIndx, int levelIndx, bool staging, int3 low, int3 high, size_t sizeOfDataType, GridVariableBase* gridVar, bool stageOnHost)
{
#ifdef __CUDA_ARCH__
//Should not put from device side as all memory allocation should be done on CPU side through CUDAMalloc()
#else
var_monitor var_write_lock{ Uintah::CrowdMonitor<var_tag>::WRITER };
{
//first check if this patch/var/matl is in the process of loading in.
labelPatchMatlLevel lpml(label, patchID, matlIndx, levelIndx);
if (varPointers->find(lpml) != varPointers->end()) {
//Space for this patch already exists. Use that and return.
if (d_debug) {
printf(
"GPUDataWarehouse::putContiguous( %s ). This gpudw database has a variable for label %s patch %d matl %d level %d staging %s on device %d. Reusing it.\n",
label, label, patchID, matlIndx, levelIndx, staging ? "true" : "false", d_device_id);
}
var.setArray3(varPointers->operator[](lpml).device_offset, varPointers->operator[](lpml).device_size,
varPointers->operator[](lpml).device_ptr);
return;
}
int3 size = make_int3(high.x - low.x, high.y - low.y, high.z - low.z);
int3 offset = low;
void* device_ptr = NULL;
var.setArray3(offset, size, device_ptr);
contiguousArrayInfo *ca;
allocate_monitor allocate_read_lock { Uintah::CrowdMonitor < allocate_tag > ::READER };
{
ca = &(contiguousArrays->operator[](indexID));
}
if ((ca->allocatedDeviceMemory == NULL || ca->sizeOfAllocatedMemory - ca->assignedOffset < var.getMemSize()) && stageOnHost) {
printf("ERROR: No room left on device to be assigned address space\n");
if (ca->allocatedDeviceMemory != NULL) {
printf(
"There was %lu bytes allocated, %lu has been assigned, and %lu more bytes were attempted to be assigned for %s patch %d matl %d level %d staging %s\n",
ca->sizeOfAllocatedMemory, ca->assignedOffset, var.getMemSize(), label, patchID, matlIndx, levelIndx,
staging ? "true" : "false");
}
exit(-1);
}
else {
//There is already pre-allocated contiguous memory chunks with room available on
//both the device and the host. Just assign pointers for both the device and host contiguous arrays.
//This prepares the var with the offset and size. The actual address will come next.
void* host_contiguousArrayPtr = NULL;
int varMemSize = var.getMemSize();
device_ptr = (void*)((uint8_t*)ca->allocatedDeviceMemory + ca->assignedOffset);
var.setArray3(offset, size, device_ptr);
host_contiguousArrayPtr = (void*)((uint8_t*)ca->allocatedHostMemory + ca->assignedOffset);
//We ran into cuda misaligned errors previously when mixing different data types. We suspect the ints at 4 bytes
//were the issue. So the engine previously computes buffer room for each variable as a multiple of UnifiedScheduler::bufferPadding.
//So the contiguous array has been sized with extra padding. (For example, if a var holds 12 ints, then it would be 48 bytes in
//size. But if UnifiedScheduler::bufferPadding = 32, then it should add 16 bytes for padding, for a total of 64 bytes).
int memSizePlusPadding = ((UnifiedScheduler::bufferPadding - varMemSize % UnifiedScheduler::bufferPadding)
% UnifiedScheduler::bufferPadding)
+ varMemSize;
ca->assignedOffset += memSizePlusPadding;
if (stageOnHost) {
//Some GPU grid variable data doesn't need to be copied from the host
//For example, computes vars are just uninitialized space.
//Others grid vars need to be copied. This copies the data into a contiguous
//array on the host so that copyDataHostToDevice() can copy the contiguous
//host array to the device.
//Data listed as required. Or compute data that was initialized as a copy of something else.
ca->copiedOffset += memSizePlusPadding;
memcpy(host_contiguousArrayPtr, gridVar->getBasePointer(), varMemSize);
} //else {
//printf("Setting aside space %s %d %d from host location %p host contiguous array %p\n", label, patchID, matlIndx, host_ptr, host_contiguousArrayPtr);
//}
put(var, sizeOfDataType, label, patchID, matlIndx, levelIndx, staging, None, 0, host_contiguousArrayPtr);
//printf("Allocating for %s at patch %d and matl %d size is %d host_ptr %p host_contiguousPtr %p device_ptr %p\n", label, patchID, matlIndx, varMemSize, host_ptr, host_contiguousArrayPtr, device_ptr);
}
} // end var_monitor var_write_lock{ Uintah::CrowdMonitor<var_tag>::WRITER }
#endif
}
//______________________________________________________________________
//
HOST_DEVICE void
GPUDataWarehouse::allocate(const char* indexID, size_t size)
{
#ifdef __CUDA_ARCH__
//Should not put from device side as all memory allocation should be done on CPU side through CUDAMalloc()
#else
if (size == 0) {
return;
}
//This method allocates one big chunk of memory so that little allocations do not have to occur for each grid variable.
//This is needed because devices often have substantial overhead for each device malloc and device copy. By putting it into one
//chunk of memory, only one malloc and one copy to device should be needed.
double *d_ptr = NULL;
double *h_ptr = NULL;
OnDemandDataWarehouse::uintahSetCudaDevice(d_device_id);
printf("Allocated GPU buffer of size %lu \n", (unsigned long)size);
CUDA_RT_SAFE_CALL(hipMalloc(&d_ptr, size) );
//printf("In allocate(), cuda malloc for size %ld at %p on device %d\n", size, d_ptr, d_device_id);
if (d_debug) {
printf("In allocate(), hipMalloc for size %ld at %p on device %d\n", size, d_ptr, d_device_id);
}
//Now allocate that much also on the host. We want to do this because it's easier to pool up all the data on the host side
//and then move it over to the device side later in one shot. It also allows for one copy doing a device to host later.
//h_ptr = new double[size];
h_ptr = (double*)malloc(size);
//Registering memory seems good in theory, but bad in practice for our purposes.
//On the k20 device on beast.sci.utah.edu, this single register call was taking 0.1 seconds!
//On my home GTX580 device, it was taking 0.015 seconds, better, but still substantial enough
//we should avoid it for now. (If you want to use it, then also uncomment the hipHostUnregister call in clear()).
//hipHostRegister(h_ptr, size, hipHostRegisterPortable);
contiguousArrayInfo ca(d_ptr, h_ptr, size);
allocate_monitor var_write_lock{ Uintah::CrowdMonitor<allocate_tag>::WRITER };
{
contiguousArrays->insert( std::map<const char *, contiguousArrayInfo>::value_type( indexID, ca ) );
// for (std::map<std::string, contiguousArrayInfo>::iterator it = contiguousArrays->begin(); it != contiguousArrays->end(); ++it) {
// printf("%s\n", it->first.c_str());
// }
}
#endif
}
//______________________________________________________________________
//
HOST_DEVICE void
GPUDataWarehouse::copyHostContiguousToHost(GPUGridVariableBase& device_var, GridVariableBase* host_var, char const* label, int patchID, int matlIndx, int levelIndx) {
#ifdef __CUDA_ARCH__
//Should not called from device side as all memory allocation should be done on CPU side through CUDAMalloc()
#else
var_monitor var_read_lock{ Uintah::CrowdMonitor<var_tag>::READER };
{
//see if this datawarehouse has anything for this patchGroupID.
labelPatchMatlLevel lpml(label, patchID, matlIndx, levelIndx);
if (varPointers->find(lpml) != varPointers->end()) {
allVarPointersInfo info = varPointers->operator[](lpml);
device_var.setArray3(varPointers->operator[](lpml).device_offset, varPointers->operator[](lpml).device_offset,
info.device_ptr);
// size_t size = device_var.getMemSize();
//TODO: Instead of doing a memcpy, I bet the original host grid variable could just have its pointers updated
//to work with what we were sent back. This would take some considerable work though to get all the details right
//TODO: This needs to be a memcpy async
memcpy(host_var->getBasePointer(), info.host_contiguousArrayPtr, device_var.getMemSize());
//Since we've moved it back into the host, lets mark it as being used.
//It's possible in the future there could be a scenario where we want to bring it
//back to the host but still retain it in the GPU. One scenario is
//sending data to an output .ups file but not modifying it on the host.
remove(label, patchID, matlIndx, levelIndx);
}
else {
printf("ERROR: host copyHostContiguoustoHost unknown variable on GPUDataWarehouse");
//for (std::map<labelPatchMatlLevel, allVarPointersInfo>::iterator it=varPointers->begin(); it!=varPointers->end(); ++it)
// printf("%s %d %d => %d \n", it->first.label, it->first.patchID, it->first.matlIndx, it->second.varDB_index);
exit(-1);
}
}
#endif
}
//______________________________________________________________________
//
__host__ void
GPUDataWarehouse::put(GPUReductionVariableBase &var, size_t sizeOfDataType, char const* label, int patchID, int matlIndx, int levelIndx, void* host_ptr)
{
var_monitor var_write_lock{ Uintah::CrowdMonitor<var_tag>::WRITER };
{
void* var_ptr; // raw pointer to the memory
var.getData(var_ptr);
//See if it already exists. Also see if we need to update this into d_varDB.
labelPatchMatlLevel lpml(label, patchID, matlIndx, levelIndx);
std::map<labelPatchMatlLevel, allVarPointersInfo>::iterator iter = varPointers->find(lpml);
//sanity check
if (iter == varPointers->end()) {
printf("ERROR:\nGPUDataWarehouse::put( ) Can't use put() for a host-side GPU DW without it first existing in the internal database.\n");
exit(-1);
}
iter->second.varDB_index = -1;
iter->second.device_ptr = var_ptr;
iter->second.sizeOfDataType = sizeOfDataType;
iter->second.gtype = None;
iter->second.numGhostCells = 0;
iter->second.host_contiguousArrayPtr = host_ptr;
iter->second.atomicStatusInHostMemory = UNKNOWN;
int3 zeroValue;
zeroValue.x = 0;
zeroValue.y = 0;
zeroValue.z = 0;
iter->second.device_offset = zeroValue;
iter->second.device_size = zeroValue;
//previously set, do not set here
//iter->second.atomicStatusInGputMemory =
if (gpu_stats.active()) {
cerrLock.lock();
{
gpu_stats << UnifiedScheduler::myRankThread()
<< " GPUDataWarehouse::put( " << label << " ) - "
<< " Put a reduction variable in the host-side varPointers map for label " << label
<< " patch " << patchID
<< " matl " << matlIndx
<< " level " << levelIndx
<< " at device address " << var_ptr
<< " with datatype size " << iter->second.sizeOfDataType
<< " with status codes " << getDisplayableStatusCodes(iter->second.atomicStatusInGpuMemory)
<< " on device " << d_device_id
<< " into GPUDW at " << std::hex << this << std::dec
<< " with description " << _internalName
<< " current varPointers size is: " << varPointers->size()
<< endl;
}
cerrLock.unlock();
}
}
}
//______________________________________________________________________
//
__host__ void
GPUDataWarehouse::put(GPUPerPatchBase& var, size_t sizeOfDataType, char const* label, int patchID, int matlIndx, int levelIndx, void* host_ptr)
{
var_monitor var_write_lock{ Uintah::CrowdMonitor<var_tag>::WRITER };
{
void* var_ptr; // raw pointer to the memory
var.getData(var_ptr);
//See if it already exists. Also see if we need to update this into d_varDB.
labelPatchMatlLevel lpml(label, patchID, matlIndx, levelIndx);
std::map<labelPatchMatlLevel, allVarPointersInfo>::iterator iter = varPointers->find(lpml);
//sanity check
if (iter == varPointers->end()) {
printf("ERROR:\nGPUDataWarehouse::put( ) Can't use put() for a host-side GPU DW without it first existing in the internal database.\n");
exit(-1);
}
iter->second.varDB_index = -1;
iter->second.device_ptr = var_ptr;
iter->second.sizeOfDataType = sizeOfDataType;
iter->second.gtype = None;
iter->second.numGhostCells = 0;
iter->second.host_contiguousArrayPtr = host_ptr;
iter->second.atomicStatusInHostMemory = UNKNOWN;
int3 zeroValue;
zeroValue.x = 0;
zeroValue.y = 0;
zeroValue.z = 0;
iter->second.device_offset = zeroValue;
iter->second.device_size = zeroValue;
//previously set, do not set here
//iter->second.atomicStatusInGputMemory =
if (gpu_stats.active()) {
cerrLock.lock();
{
gpu_stats << UnifiedScheduler::myRankThread()
<< " GPUDataWarehouse::put( " << label << " ) - "
<< " Put a patch variable in the host-side varPointers map for label " << label
<< " patch " << patchID
<< " matl " << matlIndx
<< " level " << levelIndx
<< " at device address " << var_ptr
<< " with datatype size " << iter->second.sizeOfDataType
<< " with status codes " << getDisplayableStatusCodes(iter->second.atomicStatusInGpuMemory)
<< " on device " << d_device_id
<< " into GPUDW at " << std::hex << this << std::dec
<< " with description " << _internalName
<< " current varPointers size is: " << varPointers->size()
<< endl;
}
cerrLock.unlock();
}
}
}
//______________________________________________________________________
//
__host__ void
GPUDataWarehouse::allocateAndPut(GPUReductionVariableBase& var, char const* label, int patchID, int matlIndx, int levelIndx, size_t sizeOfDataType)
{
//Allocate space on the GPU and declare a variable onto the GPU.
//This method does NOT stage everything in a big array.
//Check if it exists prior to allocating memory for it.
//If it has already been allocated, just use that.
//If it hasn't, this is lock free and the first thread to request allocating gets to allocate
//If another thread sees that allocating is in process, it loops and waits until the allocation complete.
bool allocationNeeded = false;
int3 size = make_int3(0,0,0);
int3 offset = make_int3(0,0,0);
if (gpu_stats.active()) {
cerrLock.lock();
{
gpu_stats << UnifiedScheduler::myRankThread() << " Calling putUnallocatedIfNotExists() for " << label
<< " patch " << patchID
<< " matl " << matlIndx
<< " level " << levelIndx
<< " on device " << d_device_id
<< " into GPUDW at " << std::hex << this << std::dec
<< " with description " << _internalName << endl;
}
cerrLock.unlock();
}
//This variable may not yet exist. But we want to declare we're allocating it. So ensure there is an entry.
putUnallocatedIfNotExists(label, patchID, matlIndx, levelIndx, false, offset, size);
std::map<labelPatchMatlLevel, allVarPointersInfo>::iterator it;
var_monitor var_read_lock{ Uintah::CrowdMonitor<var_tag>::READER };
{
labelPatchMatlLevel lpml(label, patchID, matlIndx, levelIndx);
it = varPointers->find(lpml);
}
void* addr = NULL;
//Now see if we allocate the variable or use a previous existing allocation.
//See if someone has stated they are allocating it
allocationNeeded = testAndSetAllocating(it->second.atomicStatusInGpuMemory);
if (!allocationNeeded) {
//Someone else is allocating it or it has already been allocated.
//Space for this var already exists. Use that and return.
if (gpu_stats.active()) {
cerrLock.lock();
{
gpu_stats << UnifiedScheduler::myRankThread()
<< " GPUDataWarehouse::allocateAndPut( " << label << " ) - "
<< " This reduction variable already exists. No need to allocate another. GPUDW has a variable for label " << label
<< " patch " << patchID
<< " matl " << matlIndx
<< " level " << levelIndx
<< " on device " << d_device_id
<< " with data pointer " << it->second.device_ptr
<< " with status codes " << getDisplayableStatusCodes(it->second.atomicStatusInGpuMemory)
<< " into GPUDW at " << std::hex << this << std::dec
<< endl;
}
cerrLock.unlock();
}
//We need the pointer. We can't move on until we get the pointer.
//Ensure that it has been allocated (just not allocating). Another thread may have been assigned to allocate it
//but not completed that action. If that's the case, wait until it's done so we can get the pointer.
bool allocated = false;
while (!allocated) {
allocated = checkAllocated(it->second.atomicStatusInGpuMemory);
}
//Have this var use the existing memory address.
var.setData(addr);
} else {
//We are the first task to request allocation. Do it.
OnDemandDataWarehouse::uintahSetCudaDevice(d_device_id);
size_t memSize = var.getMemSize();
if (gpu_stats.active()) {
cerrLock.lock();
{
gpu_stats << UnifiedScheduler::myRankThread()
<< " GPUDataWarehouse::allocateAndPut(), calling GPUMemoryPool::allocateCudaSpaceFromPool"
<< " for reduction variable " << label
<< " patch " << patchID
<< " material " << matlIndx
<< " level " << levelIndx
<< " size " << var.getMemSize()
<< " at " << addr
<< " with status codes " << getDisplayableStatusCodes(it->second.atomicStatusInGpuMemory)
<< " on device " << d_device_id
<< " into GPUDW at " << std::hex << this << std::dec << endl;
}
cerrLock.unlock();
}
addr = GPUMemoryPool::allocateCudaSpaceFromPool(d_device_id, memSize);
//Also update the var object itself
var.setData(addr);
//Put all remaining information about the variable into the the database.
put(var, sizeOfDataType, label, patchID, matlIndx, levelIndx);
//Now that the database knows of this and other threads can see the device pointer, update the status from allocating to allocated
testAndSetAllocate(it->second.atomicStatusInGpuMemory);
}
}
//______________________________________________________________________
//
__host__ void
GPUDataWarehouse::allocateAndPut(GPUPerPatchBase& var, char const* label, int patchID, int matlIndx, int levelIndx, size_t sizeOfDataType)
{
//Allocate space on the GPU and declare a variable onto the GPU.
//This method does NOT stage everything in a big array.
//Check if it exists prior to allocating memory for it.
//If it has already been allocated, just use that.
//If it hasn't, this is lock free and the first thread to request allocating gets to allocate
//If another thread sees that allocating is in process, it loops and waits until the allocation complete.
bool allocationNeeded = false;
int3 size = make_int3(0,0,0);
int3 offset = make_int3(0,0,0);
if (gpu_stats.active()) {
cerrLock.lock();
{
gpu_stats << UnifiedScheduler::myRankThread() << " Calling putUnallocatedIfNotExists() for " << label
<< " patch " << patchID
<< " matl " << matlIndx
<< " level " << levelIndx
<< " on device " << d_device_id
<< " into GPUDW at " << std::hex << this << std::dec
<< " with description " << _internalName << endl;
}
cerrLock.unlock();
}
//This variable may not yet exist. But we want to declare we're allocating it. So ensure there is an entry.
putUnallocatedIfNotExists(label, patchID, matlIndx, levelIndx, false, offset, size);
std::map<labelPatchMatlLevel, allVarPointersInfo>::iterator it;
var_monitor var_read_lock{ Uintah::CrowdMonitor<var_tag>::READER };
{
labelPatchMatlLevel lpml(label, patchID, matlIndx, levelIndx);
it = varPointers->find(lpml);
}
void* addr = NULL;
//Now see if we allocate the variable or use a previous existing allocation.
//See if someone has stated they are allocating it
allocationNeeded = testAndSetAllocating(it->second.atomicStatusInGpuMemory);
if (!allocationNeeded) {
//Someone else is allocating it or it has already been allocated.
//Space for this var already exists. Use that and return.
if (gpu_stats.active()) {
cerrLock.lock();
{
gpu_stats << UnifiedScheduler::myRankThread()
<< " GPUDataWarehouse::allocateAndPut( " << label << " ) - "
<< " This patch variable already exists. No need to allocate another. GPUDW has a variable for label " << label
<< " patch " << patchID
<< " matl " << matlIndx
<< " level " << levelIndx
<< " on device " << d_device_id
<< " with data pointer " << it->second.device_ptr
<< " with status codes " << getDisplayableStatusCodes(it->second.atomicStatusInGpuMemory)
<< " into GPUDW at " << std::hex << this << std::dec
<< endl;
}
cerrLock.unlock();
}
//We need the pointer. We can't move on until we get the pointer.
//Ensure that it has been allocated (just not allocating). Another thread may have been assigned to allocate it
//but not completed that action. If that's the case, wait until it's done so we can get the pointer.
bool allocated = false;
while (!allocated) {
allocated = checkAllocated(it->second.atomicStatusInGpuMemory);
}
//Have this var use the existing memory address.
var.setData(addr);
} else {
//We are the first task to request allocation. Do it.
OnDemandDataWarehouse::uintahSetCudaDevice(d_device_id);
size_t memSize = var.getMemSize();
if (gpu_stats.active()) {
cerrLock.lock();
{
gpu_stats << UnifiedScheduler::myRankThread()
<< " GPUDataWarehouse::allocateAndPut(), calling GPUMemoryPool::allocateCudaSpaceFromPool"
<< " for PerPatch variable " << label
<< " patch " << patchID
<< " material " << matlIndx
<< " level " << levelIndx
<< " size " << var.getMemSize()
<< " at " << addr
<< " with status codes " << getDisplayableStatusCodes(it->second.atomicStatusInGpuMemory)
<< " on device " << d_device_id
<< " into GPUDW at " << std::hex << this << std::dec << endl;
}
cerrLock.unlock();
}
addr = GPUMemoryPool::allocateCudaSpaceFromPool(d_device_id, memSize);
//Also update the var object itself
var.setData(addr);
//Put all remaining information about the variable into the the database.
put(var, sizeOfDataType, label, patchID, matlIndx, levelIndx);
//Now that the database knows of this and other threads can see the device pointer, update the status from allocating to allocated
testAndSetAllocate(it->second.atomicStatusInGpuMemory);
}
}
//______________________________________________________________________
//
HOST_DEVICE GPUDataWarehouse::dataItem*
GPUDataWarehouse::getItem(char const* label, int patchID, int matlIndx, int levelIndx)
{
#ifdef __CUDA_ARCH__
//This upcoming __syncthreads is needed. I believe with CUDA function calls are inlined.
// If you don't have it this upcoming __syncthreads here's what can happen:
// * The correct index was found by one of the threads.
// * The last __syncthreads is called, all threads met up there.
// * Some threads in the block then make a second "function" call and reset index to -1
// * Meanwhile, those other threads were still in the first "function" call and hadn't
// yet processed if (index == -1). They now run that line. And see index is now -1. That's bad.
// So to prevent this scenario, we have one more __syncthreads.
__syncthreads(); //sync before get
int numThreads = blockDim.x * blockDim.y * blockDim.z;
//int blockID = blockIdx.x + blockIdx.y * gridDim.x + gridDim.x * gridDim.y * blockIdx.z; //blockID on the grid
int threadID = threadIdx.x + blockDim.x * threadIdx.y + (blockDim.x * blockDim.y) * threadIdx.z; //threadID in the block
int i = threadID;
__syncthreads(); //sync before get
//if (d_debug && threadID == 0 && blockID == 0) {
// printf("device getting item \"%s\" from GPUDW %p", label, this);
// printf("size (%d vars)\n Available labels:", d_numVarDBItems);
//}
//Have every thread try to find the label/patchId/matlIndx is a match in
//array. This is a clever approach so that instead of doing a simple
//sequential search with one thread, we can let every thread search for it. Only the
//winning thread gets to write to shared data.
__shared__ int index;
index = -1;
while(i<d_numVarDBItems){
int strmatch=0;
char const *s1 = label; //reset s1 and s2 back to the start
char const *s2 = &(d_varDB[i].label[0]);
//a one-line strcmp. This should keep branching down to a minimum.
while (!(strmatch = *(unsigned char *) s1 - *(unsigned char *) s2) && *s1++ && *s2++);
//only one thread will ever match this.
//And nobody on the device side should ever access "staging" variables.
if (strmatch == 0) {
if (patchID ==-99999999 //Only getLevel calls should hit this
&& d_varDB[i].matlIndx == matlIndx
&& d_varDB[i].levelIndx == levelIndx
&& d_varDB[i].varItem.staging == false /* we don't support staging/foregin vars for get() */
&& d_varDB[i].ghostItem.dest_varDB_index == -1) { /*don't let ghost cell copy data mix in with normal variables for get() */
index = i; //we found it.
}
else if(d_varDB[i].domainID == patchID
&& d_varDB[i].matlIndx == matlIndx
&& d_varDB[i].levelIndx == levelIndx
&& d_varDB[i].varItem.staging == false
&& d_varDB[i].ghostItem.dest_varDB_index == -1) {
index = i; //we found it.
//printf("I'm thread %d In DW at %p, We found it for var %s patch %d matl %d level %d. d_varDB has it at index %d var %s patch %d at its item address %p with var pointer %p\n",
// threadID, this, label, patchID, matlIndx, levelIndx, index, &(d_varDB[index].label[0]), d_varDB[index].domainID, &d_varDB[index], d_varDB[index].var_ptr);
}
}
i = i + numThreads; //Since every thread is involved in searching for the string, have this thread loop to the next possible item to check for.
}
//sync before return;
__syncthreads();
if (index == -1) {
printf("ERROR:\nGPUDataWarehouse::getItem() didn't find anything for %s patch %d matl %d with threadID %d and numthreads %d\n", label, patchID, matlIndx, threadID, numThreads);
return NULL;
}
return &d_varDB[index];
#else
//__________________________________
// cpu code
/*labelPatchMatlLevel lpm(label, patchID, matlIndx);
int i = 0;
var_monitor var_read_lock{ Uintah::CrowdMonitor<var_tag>::READER };
{
if (varPointers->find(lpm) != varPointers->end()) {
i = varPointers[lpm].varDB_index;
} else {
printf("ERROR:\nGPUDataWarehouse::getItem( %s ) host get unknown variable from GPUDataWarehouse\n",label);
exit(-1);
}
}
if (d_debug){
printf("host got \"%s\" loc %p from GPUDW %p on device %u\n", label, d_varDB[i].var_ptr, d_device_copy, d_device_id);
}
//quick error check
if (strcmp(d_varDB[i].label, label) != 0 || d_varDB[i].domainID != patchID || d_varDB[i].matlIndx != matlIndx) {
printf("ERROR:\nGPUDataWarehouse::getItem( %s ), data does not match what was expected\n",label);
exit(-1);
}
*/
printError("This method should only be called device side.", "getItem()", label, patchID, matlIndx, levelIndx );
//printf("ERROR:\nGPUDataWarehouse::getItem() should only be called device side.\n",label);
return &d_varDB[0];
#endif
}
//______________________________________________________________________
//
HOST_DEVICE bool
GPUDataWarehouse::remove(char const* label, int patchID, int matlIndx, int levelIndx)
{
#ifdef __CUDA_ARCH__
printf("GPUDataWarehouse::remove() should not be called on device.\n");
return false;
#else
//It seems there are few scenarios for calling remove. I think the only time it should
//happen is removing staging variables.
//Avoid calling this unless you are absolutely sure what you are doing.
//Further, this doesn't erase any staging vars within a var.
bool retVal = false;
labelPatchMatlLevel lpml(label, patchID, matlIndx, levelIndx);
var_monitor var_write_lock{ Uintah::CrowdMonitor<var_tag>::WRITER };
{
if (varPointers->find(lpml) != varPointers->end()) {
int i = varPointers->operator[](lpml).varDB_index;
d_varDB[i].label[0] = '\0'; //leave a hole in the flat array, not deleted.
varPointers->erase(lpml); //TODO: GPU Memory leak?
retVal = true;
d_dirty=true;
}
if (d_debug){
printf("GPUDataWarehouse::remove( %s ). Removed a variable for label %s patch %d matl %d level %d \n",
label, label, patchID, matlIndx, levelIndx);
}
}
return retVal;
#endif
}
//______________________________________________________________________
//
__host__ void
GPUDataWarehouse::init(int id, std::string internalName)
{
#ifdef __CUDA_ARCH__
printf("GPUDataWarehouse::init() should not be called on the device.\n");
#else
d_device_id = id;
//this->_internalName = new std::string(internalName);
strncpy(_internalName, internalName.c_str(), sizeof(_internalName));
objectSizeInBytes = 0;
maxdVarDBItems = 0;
//this->placementNewBuffer = placementNewBuffer;
varPointers = new std::map<labelPatchMatlLevel, allVarPointersInfo>;
contiguousArrays = new std::map<std::string, contiguousArrayInfo>;
//other data members are initialized in the constructor
d_numVarDBItems = 0;
d_numMaterials = 0;
d_debug = false;
//d_numGhostCells = 0;
d_device_copy = NULL;
d_dirty = true;
objectSizeInBytes = 0;
//resetdVarDB();
numGhostCellCopiesNeeded = 0;
#endif
}
//______________________________________________________________________
//
__host__ void
GPUDataWarehouse::cleanup()
{
delete varPointers;
delete contiguousArrays;
}
//______________________________________________________________________
//
__host__ void
GPUDataWarehouse::init_device(size_t objectSizeInBytes, unsigned int maxdVarDBItems)
{
#ifdef __CUDA_ARCH__
printf("GPUDataWarehouse::init_device() should only be called by the framework\n");
#else
this->objectSizeInBytes = objectSizeInBytes;
this->maxdVarDBItems = maxdVarDBItems;
OnDemandDataWarehouse::uintahSetCudaDevice( d_device_id );
void* temp = NULL;
//CUDA_RT_SAFE_CALL(hipMalloc(&temp, objectSizeInBytes));
temp = GPUMemoryPool::allocateCudaSpaceFromPool(d_device_id, objectSizeInBytes);
//if (gpu_stats.active()) {
cerrLock.lock();
{
gpu_stats << UnifiedScheduler::myRankThread()
<< " GPUDataWarehouse::init_device() -"
<< " requested GPU space from GPUMemoryPool::allocateCudaSpaceFromPool for Task DW of size " << objectSizeInBytes
<< " bytes at " << temp
<< " on device " << d_device_id
<< " the host GPUDW is at " << this
<< endl;
}
cerrLock.unlock();
//}
d_device_copy = (GPUDataWarehouse*)temp;
//hipHostRegister(this, sizeof(GPUDataWarehouse), hipHostRegisterPortable);
d_dirty = true;
#endif
}
//______________________________________________________________________
//
HOST_DEVICE void
GPUDataWarehouse::syncto_device(void *cuda_stream)
{
#ifdef __CUDA_ARCH__
printf("GPUDataWarehouse::remove() should only be called by the framework\n");
#else
if (!d_device_copy) {
printf("ERROR:\nGPUDataWarehouse::syncto_device()\nNo device copy\n");
exit(-1);
}
var_monitor var_write_lock{ Uintah::CrowdMonitor<var_tag>::WRITER };
{
if (d_dirty){
OnDemandDataWarehouse::uintahSetCudaDevice( d_device_id );
//Even though this is in a writeLock state on the CPU, the nature of multiple threads
//each with their own stream copying to a GPU means that one stream might seemingly go outg
//of order. This is ok for two reasons. 1) Nothing should ever be *removed* from a gpu data warehouse
//2) Therefore, it doesn't matter if streams go out of order, each thread will still ensure it copies
//exactly what it needs. Other streams may write additional data to the gpu data warehouse, but cpu
//threads will only access their own data, not data copied in by other cpu threada via streams.
//This approach does NOT require CUDA pinned memory.
//unsigned int sizeToCopy = sizeof(GPUDataWarehouse);
hipStream_t* stream = (hipStream_t*)(cuda_stream);
if (gpu_stats.active()) {
cerrLock.lock();
{
gpu_stats << UnifiedScheduler::myRankThread()
<< " GPUDataWarehouse::syncto_device() - hipMemcpy -"
<< " sync GPUDW at " << d_device_copy
<< " with description " << _internalName
<< " to device " << d_device_id
<< " on stream " << stream
<< endl;
}
cerrLock.unlock();
}
CUDA_RT_SAFE_CALL (hipMemcpyAsync( d_device_copy, this, objectSizeInBytes, hipMemcpyHostToDevice, *stream));
//CUDA_RT_SAFE_CALL (hipMemcpy( d_device_copy, this, objectSizeInBytes, hipMemcpyHostToDevice));
//if (d_debug) {
//printf("%s sync GPUDW %p to device %d on stream %p\n", UnifiedScheduler::myRankThread().c_str(), d_device_copy, d_device_id, stream);
//}
d_dirty=false;
}
}
#endif
}
//______________________________________________________________________
//
HOST_DEVICE void
GPUDataWarehouse::clear()
{
#ifdef __CUDA_ARCH__
//no meaning in device method
#else
OnDemandDataWarehouse::uintahSetCudaDevice( d_device_id );
//delete any grid var that isn't part of a contiguous array
var_monitor var_write_lock{ Uintah::CrowdMonitor<var_tag>::WRITER };
{
std::map<labelPatchMatlLevel, allVarPointersInfo>::iterator varIter;
for (varIter = varPointers->begin(); varIter != varPointers->end(); ++varIter) {
if (varIter->second.host_contiguousArrayPtr == NULL) {
//clear out all the staging vars, if any
std::map<stagingVar, stagingVarInfo>::iterator stagingIter;
for (stagingIter = varIter->second.stagingVars.begin(); stagingIter != varIter->second.stagingVars.end(); ++stagingIter) {
if (gpu_stats.active()) {
cerrLock.lock();
{
gpu_stats << UnifiedScheduler::myRankThread()
<< " GPUDataWarehouse::clear() -"
<< " calling GPUMemoryPool::freeCudaSpaceFromPool() for staging var for " << varIter->first.label
<< " at device ptr " << stagingIter->second.device_ptr
<< " on device " << d_device_id
<< endl;
}
cerrLock.unlock();
}
//CUDA_RT_SAFE_CALL(hipFree(stagingIter->second.device_ptr));
//stagingIter->second.device_ptr == NULL;
size_t memSize = stagingIter->first.device_size.x *
stagingIter->first.device_size.y *
stagingIter->first.device_size.z *
varIter->second.sizeOfDataType;
if (GPUMemoryPool::freeCudaSpaceFromPool(d_device_id, memSize, stagingIter->second.device_ptr) ) {
stagingIter->second.device_ptr == NULL;
} else {
//No open spot in the pool, go ahead and allocate it.
printf("ERROR:\nGPUDataWarehouse::clear(), for a staging variable, couldn't find in the GPU memory pool the space starting at address %p\n", stagingIter->second.device_ptr);
exit(-1);
}
}
varIter->second.stagingVars.clear();
//clear out the regular vars
//See if it's a placeholder var for staging vars. This happens if the non-staging var
//had a device_ptr of NULL, and it was only in the varPointers map to only hold staging vars
if (varIter->second.device_ptr) {
if (gpu_stats.active()) {
cerrLock.lock();
{
gpu_stats << UnifiedScheduler::myRankThread()
<< " GPUDataWarehouse::clear() -"
<< " calling GPUMemoryPool::freeCudaSpaceFromPool() for non-staging var for " << varIter->first.label
<< " at device ptr " << varIter->second.device_ptr
<< " on device " << d_device_id
<< endl;
}
cerrLock.unlock();
}
size_t memSize = varIter->second.sizeOfDataType;
if (varIter->second.device_size.x != 0) {
memSize = memSize *
varIter->second.device_size.x *
varIter->second.device_size.y *
varIter->second.device_size.z;
}
if (GPUMemoryPool::freeCudaSpaceFromPool(d_device_id, memSize, varIter->second.device_ptr)) {
varIter->second.device_ptr == NULL;
} else {
printf("ERROR:\nGPUDataWarehouse::clear(), for a non-staging variable, couldn't find in the GPU memory pool the space starting at address %p\n", varIter->second.device_ptr);
exit(-1);
}
}
}
}
varPointers->clear();
//delete all the contiguous arrays
std::map<std::string, contiguousArrayInfo>::iterator iter;
for (iter = contiguousArrays->begin(); iter != contiguousArrays->end(); ++iter) {
if (gpu_stats.active()) {
cerrLock.lock();
{
gpu_stats << UnifiedScheduler::myRankThread()
<< " GPUDataWarehouse::clear() -"
<< " hipFree for contiguous array for " << iter->first.c_str()
<< " at device ptr " << iter->second.allocatedDeviceMemory
<< " and host free at host ptr " << iter->second.allocatedHostMemory
<< " on device " << d_device_id
<< endl;
}
cerrLock.unlock();
}
CUDA_RT_SAFE_CALL(hipFree(iter->second.allocatedDeviceMemory));
//hipHostUnregister(iter->second.allocatedHostMemory);
free(iter->second.allocatedHostMemory);
}
contiguousArrays->clear();
}
init(d_device_id, _internalName);
#endif
}
//______________________________________________________________________
//
HOST_DEVICE void
GPUDataWarehouse::deleteSelfOnDevice()
{
#ifdef __CUDA_ARCH__
//no meaning in device method
#else
if ( d_device_copy ) {
OnDemandDataWarehouse::uintahSetCudaDevice( d_device_id );
if (gpu_stats.active()) {
cerrLock.lock();
{
gpu_stats << UnifiedScheduler::myRankThread()
<< "GPUDataWarehouse::deleteSelfOnDevice - calling GPUMemoryPool::freeCudaSpaceFromPool for Task DW at " << std::hex
<< d_device_copy << " on device " << std::dec << d_device_id << std::endl;
}
cerrLock.unlock();
}
//hipHostUnregister(this);
GPUMemoryPool::freeCudaSpaceFromPool(d_device_id, objectSizeInBytes, d_device_copy);
//CUDA_RT_SAFE_CALL(hipFree( d_device_copy ));
}
#endif
}
//______________________________________________________________________
//
HOST_DEVICE void
GPUDataWarehouse::resetdVarDB()
{
#ifdef __CUDA_ARCH__
//no meaning in device method
#else
if (d_device_copy != NULL) {
//TODO: When TaskDWs are removed, this section shouldn't be needed as there won't be concurrency problems
//This is designed to help stop tricky race scenarios. One such scenario I encountered was as follows:
//Thread A would call getItem() on the GPU, and look thruogh d_varDB for a matching label/patch/matl tuple
//Thread B would have previously added a new item to the d_varDB, then called syncto_device.
//Thread B would be partway through updating d_varDB on the GPU. It would increase the number of items by one
//And it would write the label. But it wouldn't yet write the patch or matl part of the tuple. By coincidence
//the old garbage data in the GPU would have exactly the patch and matl that matches thread A's query
//For a very brief window, there would be 2 tuples matching that label/patch/matl pair in d_varDB because
//thread B hasn't fully written in all of his data.
//Thread A's getItem() would run exactly in this brief window, find the wrong match, and use the wrong
//memory address, and the program would crash with an invalid address.
//The answer is to initialize d_varDB to items that should never provide an accidental match.
//This should also occur for all other arrays.
//TODO: Should this be could be cleaned up to only reset as much as was used.
for (int i = 0; i < MAX_VARDB_ITEMS; i++) {
d_varDB[i].label[0] = '\0';
d_varDB[i].domainID = -1;
d_varDB[i].matlIndx = -1;
//d_varDB[i].staging = false;
d_varDB[i].var_ptr = NULL;
d_varDB[i].ghostItem.dest_varDB_index = -1;
}
for (int i = 0; i < MAX_LEVELDB_ITEMS; i++) {
d_levelDB[i].label[0] = '\0';
d_levelDB[i].domainID = -1;
d_levelDB[i].matlIndx = -1;
//d_varDB[i].staging = false;
d_levelDB[i].var_ptr = NULL;
}
for (int i = 0; i < MAX_MATERIALSDB_ITEMS; i++) {
d_materialDB[i].simulationType[0] = '\0';
}
}
#endif
}
//______________________________________________________________________
//
HOST_DEVICE void
GPUDataWarehouse::putMaterials( std::vector< std::string > materials)
{
#ifdef __CUDA_ARCH__
//Should not put from device side
#else
//__________________________________
//cpu code
var_monitor var_write_lock{ Uintah::CrowdMonitor<var_tag>::WRITER };
{
//see if a thread has already supplied this datawarehouse with the material data
int numMaterials = materials.size();
if (d_numMaterials != numMaterials) {
//nobody has given us this material data yet, so lets add it in from the beginning.
if (numMaterials > MAX_MATERIALSDB_ITEMS) {
printf("ERROR: out of GPUDataWarehouse space for materials");
exit(-1);
}
for (int i = 0; i < numMaterials; i++) {
if (strcmp(materials.at(i).c_str(), "ideal_gas") == 0) {
d_materialDB[i].material = IDEAL_GAS;
} else {
printf("ERROR: This material has not yet been coded for GPU support\n.");
exit(-1);
}
}
d_numMaterials = numMaterials;
}
}
#endif
}
//______________________________________________________________________
//
HOST_DEVICE int
GPUDataWarehouse::getNumMaterials() const
{
#ifdef __CUDA_ARCH__
return d_numMaterials;
#else
//I don't know if it makes sense to write this for the host side, when it already exists elsewhere host side.
return -1;
#endif
}
//______________________________________________________________________
//
HOST_DEVICE materialType
GPUDataWarehouse::getMaterial(int i) const
{
#ifdef __CUDA_ARCH__
if (i >= d_numMaterials) {
printf("ERROR: Attempting to access material past bounds\n");
assert(0);
}
return d_materialDB[i].material;
#else
//I don't know if it makes sense to write this for the host side, when it already exists elsewhere host side.
printf("getMaterial() is only implemented as a GPU function");
return IDEAL_GAS; //returning something to prevent a compiler error
#endif
}
//______________________________________________________________________
//
HOST_DEVICE void
GPUDataWarehouse::copyGpuGhostCellsToGpuVars() {
#ifndef __CUDA_ARCH__
//Not for the host side
#else
//Copy all ghost cells from their source to their destination.
//The ghost cells could either be only the data that needs to be copied,
//or it could be on an edge of a bigger grid var.
//I believe the x,y,z coordinates of everything should match.
//This could probably be made more efficient by using only perhaps one block,
//copying float 4s, and doing it with instruction level parallelism.
int numThreads = blockDim.x*blockDim.y*blockDim.z;
int blockID = blockIdx.x + blockIdx.y * gridDim.x + gridDim.x * gridDim.y * blockIdx.z; //blockID on the grid
int threadID = threadIdx.x + blockDim.x * threadIdx.y + (blockDim.x * blockDim.y) * threadIdx.z; //threadID in the block
int totalThreads = numThreads * gridDim.x * gridDim.y * gridDim.z;
int assignedCellID;
//go through every ghost cell var we need
for (int i = 0; i < d_numVarDBItems; i++) {
//if (threadID == 0) {
// if (d_varDB[i].ghostItem.dest_varDB_index != -1) {
// printf("d_varDB[%d].label is %s\n", i, d_varDB[d_varDB[i].ghostItem.dest_varDB_index].label, d_numVarDBItems);
// } else {
// printf("d_varDB[%d].label is %s\n", i, d_varDB[i].label, d_numVarDBItems);
// }
//}
//some things in d_varDB are meta data for simulation variables
//other things in d_varDB are meta data for how to copy ghost cells.
//Make sure we're only dealing with ghost cells here
if(d_varDB[i].ghostItem.dest_varDB_index != -1) {
assignedCellID = blockID * numThreads + threadID;
int destIndex = d_varDB[i].ghostItem.dest_varDB_index;
int3 ghostCellSize;
ghostCellSize.x = d_varDB[i].ghostItem.sharedHighCoordinates.x - d_varDB[i].ghostItem.sharedLowCoordinates.x;
ghostCellSize.y = d_varDB[i].ghostItem.sharedHighCoordinates.y - d_varDB[i].ghostItem.sharedLowCoordinates.y;
ghostCellSize.z = d_varDB[i].ghostItem.sharedHighCoordinates.z - d_varDB[i].ghostItem.sharedLowCoordinates.z;
//while there's still work to do (this assigned ID is still within the ghost cell)
while (assignedCellID < ghostCellSize.x * ghostCellSize.y * ghostCellSize.z ) {
int z = assignedCellID / (ghostCellSize.x * ghostCellSize.y);
int temp = assignedCellID % (ghostCellSize.x * ghostCellSize.y);
int y = temp / ghostCellSize.x;
int x = temp % ghostCellSize.x;
assignedCellID += totalThreads;
//if we're in a valid x,y,z space for the variable. (It's unlikely every cell will perfectly map onto every available thread.)
if (x < ghostCellSize.x && y < ghostCellSize.y && z < ghostCellSize.z) {
//offset them to their true array coordinates, not relative simulation cell coordinates
//When using virtual addresses, the virtual offset is always applied to the source, but the destination is correct.
int x_source_real = x + d_varDB[i].ghostItem.sharedLowCoordinates.x - d_varDB[i].ghostItem.virtualOffset.x - d_varDB[i].var_offset.x;
int y_source_real = y + d_varDB[i].ghostItem.sharedLowCoordinates.y - d_varDB[i].ghostItem.virtualOffset.y - d_varDB[i].var_offset.y;
int z_source_real = z + d_varDB[i].ghostItem.sharedLowCoordinates.z - d_varDB[i].ghostItem.virtualOffset.z - d_varDB[i].var_offset.z;
//count over array slots.
int sourceOffset = x_source_real + d_varDB[i].var_size.x * (y_source_real + z_source_real * d_varDB[i].var_size.y);
int x_dest_real = x + d_varDB[i].ghostItem.sharedLowCoordinates.x - d_varDB[destIndex].var_offset.x;
int y_dest_real = y + d_varDB[i].ghostItem.sharedLowCoordinates.y - d_varDB[destIndex].var_offset.y;
int z_dest_real = z + d_varDB[i].ghostItem.sharedLowCoordinates.z - d_varDB[destIndex].var_offset.z;
int destOffset = x_dest_real + d_varDB[destIndex].var_size.x * (y_dest_real + z_dest_real * d_varDB[destIndex].var_size.y);
//if (threadID == 0) {
/* printf("Going to copy, between (%d, %d, %d) from offset %d to offset %d. From starts at (%d, %d, %d) with size (%d, %d, %d) at index %d pointer %p. To starts at (%d, %d, %d) with size (%d, %d, %d).\n",
d_varDB[i].ghostItem.sharedLowCoordinates.x,
d_varDB[i].ghostItem.sharedLowCoordinates.y,
d_varDB[i].ghostItem.sharedLowCoordinates.z,
sourceOffset,
destOffset,
d_varDB[i].var_offset.x, d_varDB[i].var_offset.y, d_varDB[i].var_offset.z,
d_varDB[i].var_size.x, d_varDB[i].var_size.y, d_varDB[i].var_size.z,
i,
d_varDB[i].var_ptr,
d_varDB[destIndex].var_offset.x, d_varDB[destIndex].var_offset.y, d_varDB[destIndex].var_offset.z,
d_varDB[destIndex].var_size.x, d_varDB[destIndex].var_size.y, d_varDB[destIndex].var_size.z);
*/
//}
//copy all 8 bytes of a double in one shot
if (d_varDB[i].sizeOfDataType == sizeof(double)) {
*((double*)(d_varDB[destIndex].var_ptr) + destOffset) = *((double*)(d_varDB[i].var_ptr) + sourceOffset);
//Note: Every now and then I've seen this printf statement get confused, a line will print with the wrong variables/offset variables...
/* printf("Thread %d - %s At (%d, %d, %d), real: (%d, %d, %d), copying within region between (%d, %d, %d) and (%d, %d, %d). Source d_varDB index (%d, %d, %d) varSize (%d, %d, %d) virtualOffset(%d, %d, %d), varOffset(%d, %d, %d), sourceOffset %d actual pointer %p, value %e. Dest d_varDB index %d ptr %p destOffset %d actual pointer. %p\n",
threadID, d_varDB[destIndex].label, x, y, z, x_source_real, y_source_real, z_source_real,
d_varDB[i].ghostItem.sharedLowCoordinates.x, d_varDB[i].ghostItem.sharedLowCoordinates.y, d_varDB[i].ghostItem.sharedLowCoordinates.z,
d_varDB[i].ghostItem.sharedHighCoordinates.x, d_varDB[i].ghostItem.sharedHighCoordinates.y, d_varDB[i].ghostItem.sharedHighCoordinates.z,
x + d_varDB[i].ghostItem.sharedLowCoordinates.x - d_varDB[i].ghostItem.virtualOffset.x,
y + d_varDB[i].ghostItem.sharedLowCoordinates.y - d_varDB[i].ghostItem.virtualOffset.y,
z + d_varDB[i].ghostItem.sharedLowCoordinates.z - d_varDB[i].ghostItem.virtualOffset.z,
d_varDB[i].var_size.x, d_varDB[i].var_size.y, d_varDB[i].var_size.z,
d_varDB[i].ghostItem.virtualOffset.x, d_varDB[i].ghostItem.virtualOffset.y, d_varDB[i].ghostItem.virtualOffset.z,
d_varDB[i].var_offset.x, d_varDB[i].var_offset.y, d_varDB[i].var_offset.z,
sourceOffset, (double*)(d_varDB[i].var_ptr) + sourceOffset, *((double*)(d_varDB[i].var_ptr) + sourceOffset),
destIndex, d_varDB[destIndex].var_ptr, destOffset, (double*)(d_varDB[destIndex].var_ptr) + destOffset);
*/
}
//or copy all 4 bytes of an int in one shot.
else if (d_varDB[i].sizeOfDataType == sizeof(int)) {
*(((int*)d_varDB[destIndex].var_ptr) + destOffset) = *((int*)(d_varDB[i].var_ptr) + sourceOffset);
//Copy each byte until we've copied all for this data type.
} else {
for (int j = 0; j < d_varDB[i].sizeOfDataType; j++) {
*(((char*)d_varDB[destIndex].var_ptr) + (destOffset * d_varDB[destIndex].sizeOfDataType + j))
= *(((char*)d_varDB[i].var_ptr) + (sourceOffset * d_varDB[i].sizeOfDataType + j));
}
}
}
}
}
}
#endif
}
//______________________________________________________________________
//
__global__ void copyGpuGhostCellsToGpuVarsKernel( GPUDataWarehouse *gpudw) {
gpudw->copyGpuGhostCellsToGpuVars();
}
//______________________________________________________________________
//
HOST_DEVICE void
GPUDataWarehouse::copyGpuGhostCellsToGpuVarsInvoker(hipStream_t* stream)
{
#ifdef __CUDA_ARCH__
//Not for the device side
#else
//see if this GPU datawarehouse has ghost cells in it.
if (numGhostCellCopiesNeeded > 0) {
//call a kernel which gets the copy process started.
OnDemandDataWarehouse::uintahSetCudaDevice(d_device_id);
const int BLOCKSIZE = 1;
int xblocks = 32;
int yblocks = 1;
int zblocks = 1;
dim3 dimBlock(32, 32, 1);
dim3 dimGrid(1, 1, 1); //Give each ghost copying kernel 32 * 32 = 1024 threads to copy
//printf("Launching copyGpuGhostCellsToGpuVarsKernel\n");
//hipDeviceSynchronize();
/*
//View a variable before and after the ghost cell copy
{
hipDeviceSynchronize();
//pull out phi01
Uintah::GPUGridVariable<double> myDeviceVar;
getModifiable( myDeviceVar, "phi1", 0, 0 );
double * uintahDeviceFieldVar = const_cast<double*>( myDeviceVar.getPointer() );
printf("Before the device pointer is %p\n", uintahDeviceFieldVar);
double * hostSideVar = new double[myDeviceVar.getMemSize()/8];
CUDA_RT_SAFE_CALL(hipMemcpy((void*)hostSideVar, (void*)uintahDeviceFieldVar, myDeviceVar.getMemSize(), hipMemcpyDeviceToHost));
printf("Contents of phi1:\n");
for (int i = 0; i < 12; i++) {
for (int j = 0; j < 12; j++) {
printf("%1.3lf ", hostSideVar[i*12+j]);
}
printf("\n");
}
delete[] hostSideVar;
}
*/
if (gpu_stats.active()) {
cerrLock.lock();
{
gpu_stats << UnifiedScheduler::myRankThread()
<< " GPUDataWarehouse::copyGpuGhostCellsToGpuVarsInvoker() - "
<< " Launching ghost cell copies kernel"
<< " on device " << d_device_id
<< " at GPUDW at " << std::hex << this << std::dec
<< " with description " << _internalName
<< endl;
}
cerrLock.unlock();
}
hipLaunchKernelGGL(( copyGpuGhostCellsToGpuVarsKernel), dim3(dimGrid), dim3(dimBlock), 0, *stream , this->d_device_copy);
//copyGpuGhostCellsToGpuVarsKernel<<< dimGrid, dimBlock >>>(this->d_device_copy);
//printf("Finished copyGpuGhostCellsToGpuVarsKernel\n");
//
/*
{
//pull out phi0
Uintah::GPUGridVariable<double> myDeviceVar;
getModifiable( myDeviceVar, "phi1", 0, 0 );
double * uintahDeviceFieldVar = const_cast<double*>( myDeviceVar.getPointer() );
printf("After the device pointer is %p\n", uintahDeviceFieldVar);
double * hostSideVar = new double[myDeviceVar.getMemSize()/8];
CUDA_RT_SAFE_CALL(hipMemcpy((void*)hostSideVar, (void*)uintahDeviceFieldVar, myDeviceVar.getMemSize(), hipMemcpyDeviceToHost));
printf("Contents of phi1:\n");
for (int i = 0; i < 12; i++) {
for (int j = 0; j < 12; j++) {
printf("%1.3lf ", hostSideVar[i*12+j]);
}
printf("\n");
}
delete[] hostSideVar;
}
*/
}
#endif
}
//______________________________________________________________________
//
HOST_DEVICE bool
GPUDataWarehouse::ghostCellCopiesNeeded()
{
#ifdef __CUDA_ARCH__
//Not implemented for the device side
printError("This method not allowed on the device.", "ghostCellCopiesNeeded");
return false;
#else
//see if this GPU datawarehouse has ghost cells in it.
return (numGhostCellCopiesNeeded > 0);
#endif
}
//______________________________________________________________________
//
HOST_DEVICE void
GPUDataWarehouse::putGhostCell(char const* label, int sourcePatchID, int destPatchID, int matlIndx, int levelIndx,
bool sourceStaging, bool destStaging,
int3 varOffset, int3 varSize,
int3 sharedLowCoordinates, int3 sharedHighCoordinates, int3 virtualOffset) {
#ifdef __CUDA_ARCH__
printf("ERROR:\nGPUDataWarehouse::putGhostCell( %s ) Not implemented for GPU\n",label);
#else
//Add information describing a ghost cell that needs to be copied internally from
//one chunk of data to the destination. This covers a GPU -> same GPU copy scenario.
var_monitor var_write_lock{ Uintah::CrowdMonitor<var_tag>::WRITER };
{
size_t i = d_numVarDBItems;
if (i > maxdVarDBItems) {
printf("ERROR: GPUDataWarehouse::putGhostCell( %s ). Exceeded maximum d_varDB entries. Index is %d and max items is %d\n", i, maxdVarDBItems);
exit(-1);
}
int index = -1;
d_numVarDBItems++;
numGhostCellCopiesNeeded++;
d_varDB[i].ghostItem.sharedLowCoordinates = sharedLowCoordinates;
d_varDB[i].ghostItem.sharedHighCoordinates = sharedHighCoordinates;
d_varDB[i].ghostItem.virtualOffset = virtualOffset;
//look up the source index and the destination index for these.
//it may be an entire variable (in which case staging is false)
//or it may be a staging variable.
labelPatchMatlLevel lpml_source(label, sourcePatchID, matlIndx, levelIndx);
if (!sourceStaging) {
if (varPointers->find(lpml_source) != varPointers->end()) {
index = varPointers->operator[](lpml_source).varDB_index;
}
} else {
//Find the variable that contains the region in which our ghost cells exist.
//Usually the sharedLowCoordinates and sharedHighCoordinates correspond
//exactly to the size of the staging variable. But sometimes the ghost data is found within
//a larger staging variable.
stagingVar sv;
sv.device_offset = varOffset;
sv.device_size = varSize;
std::map<stagingVar, stagingVarInfo>::iterator staging_it = varPointers->operator[](lpml_source).stagingVars.find(sv);
if (staging_it != varPointers->operator[](lpml_source).stagingVars.end()) {
index = staging_it->second.varDB_index;
} else {
printf("ERROR: GPUDataWarehouse::putGhostCell( %s ). Number of staging vars for this var: %d, No staging variable found label %s patch %d matl %d level %d offset (%d, %d, %d) size (%d, %d, %d) on DW at %p.\n",
label, varPointers->operator[](lpml_source).stagingVars.size(), label, sourcePatchID, matlIndx, levelIndx,
sv.device_offset.x, sv.device_offset.y, sv.device_offset.z,
sv.device_size.x, sv.device_size.y, sv.device_size.z,
this);
exit(-1);
}
//Find the d_varDB entry for this specific one.
}
if (index < 0) {
printf("ERROR:\nGPUDataWarehouse::putGhostCell, label %s, source patch ID %d, matlIndx %d, levelIndex %d staging %s not found in GPU DW %p\n",
label, sourcePatchID, matlIndx, levelIndx, sourceStaging ? "true" : "false", this);
exit(-1);
}
//printf("The found index %d for var %s patch %d matl %d\n", index, label, sourcePatchID, matlIndx);
//if (d_varDB[index].varItem.validOnGPU == false) {
//Steps prior to this point should have checked for this scenario.
//This is just a failsafe.
// printf("ERROR:\nGPUDataWarehouse::putGhostCell, attempting to use: label %s, source patch ID %d, materialID %d, it exists but the data is not valid.\n", label, sourcePatchID, matlIndx);
// exit(-1);
//}
d_varDB[i].var_offset = d_varDB[index].var_offset;
d_varDB[i].var_size = d_varDB[index].var_size;
d_varDB[i].var_ptr = d_varDB[index].var_ptr;
d_varDB[i].sizeOfDataType = d_varDB[index].sizeOfDataType;
if (gpu_stats.active()) {
cerrLock.lock();
{
gpu_stats << UnifiedScheduler::myRankThread()
<< " GPUDataWarehouse::putGhostCell() - "
<< " Placed into d_varDB at index " << i << " of max index " << maxdVarDBItems - 1
<< " from patch " << sourcePatchID << " staging " << sourceStaging << " to patch " << destPatchID << " staging " << destStaging
<< " has shared coordinates (" << sharedLowCoordinates.x << ", " << sharedLowCoordinates.y << ", " << sharedLowCoordinates.z << "),"
<< " (" << sharedHighCoordinates.x << ", " << sharedHighCoordinates.y << ", " << sharedHighCoordinates.z << "), "
<< " from low/offset (" << d_varDB[i].var_offset.x << ", " << d_varDB[i].var_offset.y << ", " << d_varDB[i].var_offset.z << ") "
<< " size (" << d_varDB[i].var_size.x << ", " << d_varDB[i].var_size.y << ", " << d_varDB[i].var_size.z << ") "
<< " virtualOffset (" << d_varDB[i].ghostItem.virtualOffset.x << ", " << d_varDB[i].ghostItem.virtualOffset.y << ", " << d_varDB[i].ghostItem.virtualOffset.z << ") "
<< " datatype size " << d_varDB[i].sizeOfDataType
<< " on device " << d_device_id
<< " at GPUDW at " << std::hex << this<< std::dec
<< endl;
}
cerrLock.unlock();
}
//if (d_debug){
// printf("Placed into d_varDB at index %d from patch %d to patch %d has shared coordinates (%d, %d, %d), (%d, %d, %d), from low/offset (%d, %d, %d) size (%d, %d, %d) virtualOffset(%d, %d, %d)\n",
// i, sourcePatchID, destPatchID, sharedLowCoordinates.x, sharedLowCoordinates.y,
// sharedLowCoordinates.z, sharedHighCoordinates.x, sharedHighCoordinates.y, sharedHighCoordinates.z,
// d_varDB[i].var_offset.x, d_varDB[i].var_offset.y, d_varDB[i].var_offset.z,
// d_varDB[i].var_size.x, d_varDB[i].var_size.y, d_varDB[i].var_size.z,
// d_varDB[i].ghostItem.virtualOffset.x, d_varDB[i].ghostItem.virtualOffset.y, d_varDB[i].ghostItem.virtualOffset.z);
//}
//Find where we are sending the ghost cell data to
labelPatchMatlLevel lpml_dest(label, destPatchID, matlIndx, levelIndx);
std::map<labelPatchMatlLevel, allVarPointersInfo>::iterator it = varPointers->find(lpml_dest);
if (it != varPointers->end()) {
if (destStaging) {
//TODO: Do the same thing as the source.
//If the destination is staging, then the shared coordinates are also the ghost coordinates.
stagingVar sv;
sv.device_offset = sharedLowCoordinates;
sv.device_size = make_int3(sharedHighCoordinates.x-sharedLowCoordinates.x,
sharedHighCoordinates.y-sharedLowCoordinates.y,
sharedHighCoordinates.z-sharedLowCoordinates.z);
std::map<stagingVar, stagingVarInfo>::iterator staging_it = it->second.stagingVars.find(sv);
if (staging_it != it->second.stagingVars.end()) {
d_varDB[i].ghostItem.dest_varDB_index = staging_it->second.varDB_index;
} else {
printf("\nERROR:\nGPUDataWarehouse::putGhostCell() didn't find a staging variable from the device for offset (%d, %d, %d) and size (%d, %d, %d).\n",
sharedLowCoordinates.x, sharedLowCoordinates.y, sharedLowCoordinates.z,
sv.device_size.x, sv.device_size.y, sv.device_size.z);
exit(-1);
}
} else {
d_varDB[i].ghostItem.dest_varDB_index = it->second.varDB_index;
}
//if (d_debug){
// int destIndex = d_varDB[i].ghostItem.dest_varDB_index;
// printf("The destination ghost cell copy is at d_varDB at index %d with size (%d, %d, %d), offset (%d, %d, %d)\n",
// destIndex,
// d_varDB[destIndex].var_size.x, d_varDB[destIndex].var_size.y, d_varDB[destIndex].var_size.z,
// d_varDB[destIndex].var_offset.x, d_varDB[destIndex].var_offset.y, d_varDB[destIndex].var_offset.z);
//}
} else {
printf("ERROR:\nGPUDataWarehouse::putGhostCell(), label: %s destination patch ID %d, matlIndx %d, levelIndex %d, staging %s not found in GPU DW variable database\n",
label, destPatchID, matlIndx, levelIndx, destStaging ? "true" : "false");
exit(-1);
}
d_dirty=true;
}
#endif
}
//______________________________________________________________________
//
HOST_DEVICE void
GPUDataWarehouse::getSizes(int3& low, int3& high, int3& siz, GhostType& gtype, int& numGhostCells,
char const* label, int patchID, int matlIndx, int levelIndx) {
#ifdef __CUDA_ARCH__
printf("ERROR:\nGPUDataWarehouse::getSizes() Not implemented for GPU\n");
#else
var_monitor var_read_lock{ Uintah::CrowdMonitor<var_tag>::READER };
{
labelPatchMatlLevel lpml(label, patchID, matlIndx, levelIndx);
if (varPointers->find(lpml) != varPointers->end()) {
allVarPointersInfo info = varPointers->operator[](lpml);
low = info.device_offset;
high.x = info.device_size.x + info.device_offset.x;
high.y = info.device_size.y + info.device_offset.y;
high.z = info.device_size.z + info.device_offset.z;
siz = info.device_size;
gtype = info.gtype;
numGhostCells = info.numGhostCells;
}
}
#endif
}
//______________________________________________________________________
//
//Go through all staging vars for a var. See if they are all marked as valid.
__host__ bool GPUDataWarehouse::areAllStagingVarsValid(char const* label, int patchID, int matlIndx, int levelIndx) {
var_monitor var_read_lock{ Uintah::CrowdMonitor<var_tag>::READER };
{
labelPatchMatlLevel lpml(label, patchID, matlIndx, levelIndx);
std::map<labelPatchMatlLevel, allVarPointersInfo>::iterator it = varPointers->find(lpml);
if (it != varPointers->end()) {
for (std::map<stagingVar, stagingVarInfo>::iterator staging_it = it->second.stagingVars.begin();
staging_it != it->second.stagingVars.end();
++staging_it) {
if (!checkValid(staging_it->second.atomicStatusInGpuMemory)) {
if (gpu_stats.active()) {
cerrLock.lock();
{
gpu_stats << UnifiedScheduler::myRankThread() << " GPUDataWarehouse::areAllStagingVarsValid() -"
// Task: " << dtask->getName()
<< " Not all staging vars were ready for "
<< label << " patch " << patchID
<< " material " << matlIndx << " level " << levelIndx
<< " offset (" << staging_it->first.device_offset.x
<< ", " << staging_it->first.device_offset.y
<< ", " << staging_it->first.device_offset.z
<< ") and size (" << staging_it->first.device_size.x
<< ", " << staging_it->first.device_size.y
<< ", " << staging_it->first.device_size.z
<< ") with status codes " << getDisplayableStatusCodes(staging_it->second.atomicStatusInGpuMemory) <<endl;
}
cerrLock.unlock();
}
return false;
}
}
}
}
return true;
}
//______________________________________________________________________
//
//Simply performs an atomic fetch on the status variable.
typedef int atomicDataStatus;
__host__ atomicDataStatus
GPUDataWarehouse::getStatus(atomicDataStatus& status) {
return __sync_or_and_fetch(&(status), 0);
}
__host__ string
GPUDataWarehouse::getDisplayableStatusCodes(atomicDataStatus& status) {
atomicDataStatus varStatus = __sync_or_and_fetch(&(status), 0);
string retval = "";
if (varStatus == 0) {
retval += "Unallocated ";
} else {
if ((varStatus & ALLOCATING) == ALLOCATING) {
retval += "Allocating ";
}
if ((varStatus & ALLOCATED) == ALLOCATED) {
retval += "Allocated ";
}
if ((varStatus & COPYING_IN) == COPYING_IN) {
retval += "Copying-in ";
}
if ((varStatus & VALID) == VALID) {
retval += "Valid ";
}
if ((varStatus & AWAITING_GHOST_COPY) == AWAITING_GHOST_COPY) {
retval += "Awaiting-ghost-copy ";
}
if ((varStatus & VALID_WITH_GHOSTS) == VALID_WITH_GHOSTS) {
retval += "Valid-with-ghosts ";
}
if ((varStatus & UNKNOWN) == UNKNOWN) {
retval += "Unknown ";
}
}
return retval;
}
//______________________________________________________________________
//
//returns false if something else already allocated space and we don't have to.
//returns true if we are the ones to allocate the space.
//performs operations with atomic compare and swaps
__host__ bool
GPUDataWarehouse::testAndSetAllocating(atomicDataStatus& status)
{
bool allocating = false;
while (!allocating) {
//get the value
atomicDataStatus oldVarStatus = __sync_or_and_fetch(&(status), 0);
//if it's allocated, return true
if (((oldVarStatus & ALLOCATING) == ALLOCATING) || ((oldVarStatus & ALLOCATED) == ALLOCATED)) {
//Something else already allocated or is allocating it. So this thread won't do do any allocation.
return false;
} else {
//Attempt to claim we'll allocate it. If not go back into our loop and recheck
atomicDataStatus newVarStatus = oldVarStatus | ALLOCATING;
allocating = __sync_bool_compare_and_swap(&status, oldVarStatus, newVarStatus);
}
}
return true;
}
//______________________________________________________________________
//
//Sets the allocated flag on a variables atomicDataStatus
//This is called after an allocation completes.
__host__ bool
GPUDataWarehouse::testAndSetAllocate(atomicDataStatus& status)
{
bool allocated = false;
//get the value
atomicDataStatus oldVarStatus = __sync_or_and_fetch(&(status), 0);
if ((oldVarStatus & ALLOCATING) == 0) {
//A sanity check
printf("ERROR:\nGPUDataWarehouse::testAndSetAllocate( ) Can't allocate a status if it wasn't previously marked as allocating.\n");
exit(-1);
} else if ((oldVarStatus & ALLOCATED) == ALLOCATED) {
//A sanity check
printf("ERROR:\nGPUDataWarehouse::testAndSetAllocate( ) Can't allocate a status if it's already allocated\n");
exit(-1);
}
else {
//Attempt to claim we'll allocate it. Create what we want the status to look like
//by turning off allocating and turning on allocated.
//Note: No need to turn off UNALLOCATED, it's defined as all zero bits.
//But the below is kept in just for readability's sake.
atomicDataStatus newVarStatus = oldVarStatus & ~UNALLOCATED;
newVarStatus = newVarStatus & ~ALLOCATING;
newVarStatus = newVarStatus | ALLOCATED;
//If we succeeded in our attempt to claim to allocate, this returns true.
//If we failed, thats a real problem, and we crash the problem below.
allocated = __sync_bool_compare_and_swap(&status, oldVarStatus, newVarStatus);
}
if (!allocated) {
//Another sanity check
printf("ERROR:\nGPUDataWarehouse::testAndSetAllocate( ) Something wrongly modified the atomic status while setting the allocated flag\n");
exit(-1);
}
return allocated;
}
//______________________________________________________________________
//
//Simply determines if a variable has been marked as allocated.
__host__ bool
GPUDataWarehouse::checkAllocated(atomicDataStatus& status)
{
return ((__sync_or_and_fetch(&(status), 0) & ALLOCATED) == ALLOCATED);
}
//______________________________________________________________________
//
//Simply determines if a variable has been marked as valid.
__host__ bool
GPUDataWarehouse::checkValid(atomicDataStatus& status)
{
return ((__sync_or_and_fetch(&(status), 0) & VALID) == VALID);
}
//______________________________________________________________________
//
__host__ bool
GPUDataWarehouse::isAllocatedOnGPU(char const* label, int patchID, int matlIndx, int levelIndx)
{
var_monitor var_read_lock{ Uintah::CrowdMonitor<var_tag>::READER };
{
labelPatchMatlLevel lpml(label, patchID, matlIndx, levelIndx);
if (varPointers->find(lpml) != varPointers->end()) {
bool retVal = ((__sync_fetch_and_or(&(varPointers->operator[](lpml).atomicStatusInGpuMemory), 0) & ALLOCATED) == ALLOCATED);
return retVal;
}
else {
return false;
}
}
}
//______________________________________________________________________
//
__host__ bool
GPUDataWarehouse::isAllocatedOnGPU(char const* label, int patchID, int matlIndx, int levelIndx, int3 offset, int3 size)
{
var_monitor var_read_lock{ Uintah::CrowdMonitor<var_tag>::READER };
{
labelPatchMatlLevel lpml(label, patchID, matlIndx, levelIndx);
if (varPointers->find(lpml) != varPointers->end()) {
//cout << "In isAllocatedOnGPU - For patchID " << patchID << " for the status is " << getDisplayableStatusCodes(varPointers->operator[](lpml).atomicStatusInGpuMemory) << endl;
bool retVal = ((__sync_fetch_and_or(&(varPointers->operator[](lpml).atomicStatusInGpuMemory), 0) & ALLOCATED) == ALLOCATED);
if (retVal) {
//now check the sizes
int3 device_offset = varPointers->operator[](lpml).device_offset;
int3 device_size = varPointers->operator[](lpml).device_size;
retVal = (device_offset.x == offset.x && device_offset.y == offset.y && device_offset.z == offset.z
&& device_size.x == size.x && device_size.y == size.y && device_size.z == size.z);
}
return retVal;
}
else {
return false;
}
}
}
//______________________________________________________________________
//
__host__ bool
GPUDataWarehouse::isValidOnGPU(char const* label, int patchID, int matlIndx, int levelIndx)
{
var_monitor var_read_lock{ Uintah::CrowdMonitor<var_tag>::READER };
{
labelPatchMatlLevel lpml(label, patchID, matlIndx, levelIndx);
if (varPointers->find(lpml) != varPointers->end()) {
bool retVal = ((__sync_fetch_and_or(&(varPointers->operator[](lpml).atomicStatusInGpuMemory), 0) & VALID) == VALID);
return retVal;
}
else {
return false;
}
}
}
//______________________________________________________________________
//
__host__ void
GPUDataWarehouse::setValidOnGPU(char const* label, int patchID, int matlIndx, int levelIndx)
{
var_monitor var_write_lock{ Uintah::CrowdMonitor<var_tag>::WRITER };
{
labelPatchMatlLevel lpml(label, patchID, matlIndx, levelIndx);
if (varPointers->find(lpml) != varPointers->end()) {
__sync_and_and_fetch(&(varPointers->operator[](lpml).atomicStatusInGpuMemory), ~COPYING_IN);
__sync_or_and_fetch(&(varPointers->operator[](lpml).atomicStatusInGpuMemory), VALID);
}
else {
printf("host setValidOnGPU unknown variable %s on GPUDataWarehouse\n", label);
exit(-1);
}
}
}
//______________________________________________________________________
//
__host__ void
GPUDataWarehouse::setValidOnGPUStaging(char const* label, int patchID, int matlIndx, int levelIndx, int3 offset, int3 size)
{
var_monitor var_write_lock{ Uintah::CrowdMonitor<var_tag>::WRITER };
{
labelPatchMatlLevel lpml(label, patchID, matlIndx, levelIndx);
std::map<labelPatchMatlLevel, allVarPointersInfo>::iterator it = varPointers->find(lpml);
if (it != varPointers->end()) {
stagingVar sv;
sv.device_offset = offset;
sv.device_size = size;
std::map<stagingVar, stagingVarInfo>::iterator staging_it = it->second.stagingVars.find(sv);
if (staging_it != it->second.stagingVars.end()) {
__sync_and_and_fetch(&(staging_it->second.atomicStatusInGpuMemory), ~COPYING_IN);
__sync_or_and_fetch(&(staging_it->second.atomicStatusInGpuMemory), VALID);
}
else {
printf("ERROR:\nGPUDataWarehouse::setValidOnGPUStaging( ) Staging variable %s not found.\n", label);
exit(-1);
}
}
else {
printf("ERROR:\nGPUDataWarehouse::setValidOnGPUStaging( ) Variable %s not found.\n", label);
exit(-1);
}
}
}
//______________________________________________________________________
//
//We have an entry for this item in the GPU DW, and it's not unknown. Therefore
//if this returns true it means this GPU DW specifically knows something about the
//state of this variable. (The reason for the unknown check is currently when a
//var is added to the GPUDW, we also need to state what we know about its data in
//host memory. Since it doesn't know, it marks it as unknown, meaning, the host
//side DW is possibly managing the data.)
__host__ bool GPUDataWarehouse::dwEntryExistsOnCPU(char const* label, int patchID, int matlIndx, int levelIndx) {
var_monitor var_read_lock{ Uintah::CrowdMonitor<var_tag>::READER };
{
bool retVal = false;
labelPatchMatlLevel lpml(label, patchID, matlIndx, levelIndx);
std::map<labelPatchMatlLevel, allVarPointersInfo>::iterator it = varPointers->find(lpml);
if (it != varPointers->end()) {
if ((it->second.atomicStatusInHostMemory & UNKNOWN) != UNKNOWN) {
retVal = true;
}
}
return retVal;
}
}
//______________________________________________________________________
//
__host__ bool
GPUDataWarehouse::isValidOnCPU(char const* label, const int patchID, const int matlIndx, const int levelIndx)
{
var_monitor var_read_lock{ Uintah::CrowdMonitor<var_tag>::READER };
{
labelPatchMatlLevel lpml(label, patchID, matlIndx, levelIndx);
if (varPointers->find(lpml) != varPointers->end()) {
bool retVal = ((__sync_fetch_and_or(&(varPointers->operator[](lpml).atomicStatusInHostMemory), 0) & VALID) == VALID);
return retVal;
}
else {
return false;
}
}
}
//______________________________________________________________________
//
__host__ void
GPUDataWarehouse::setValidOnCPU(char const* label, int patchID, int matlIndx, int levelIndx)
{
var_monitor var_write_lock{ Uintah::CrowdMonitor<var_tag>::WRITER };
{
labelPatchMatlLevel lpml(label, patchID, matlIndx, levelIndx);
if (varPointers->find(lpml) != varPointers->end()) {
__sync_and_and_fetch(&(varPointers->operator[](lpml).atomicStatusInHostMemory), ~COPYING_IN);
__sync_or_and_fetch(&(varPointers->operator[](lpml).atomicStatusInHostMemory), VALID);
}
else {
printf("host setValidOnCPU unknown variable %s on GPUDataWarehouse\n", label);
exit(-1);
}
}
}
//______________________________________________________________________
//
__host__ void
GPUDataWarehouse::setAwaitingGhostDataOnGPU(char const* label, int patchID, int matlIndx, int levelIndx)
{
var_monitor var_write_lock{ Uintah::CrowdMonitor<var_tag>::WRITER };
{
labelPatchMatlLevel lpml(label, patchID, matlIndx, levelIndx);
if (varPointers->find(lpml) != varPointers->end()) {
__sync_or_and_fetch(&(varPointers->operator[](lpml).atomicStatusInGpuMemory), AWAITING_GHOST_COPY);
}
else {
printf("host setAwaitingGhostDataOnGPU unknown variable %s on GPUDataWarehouse\n", label);
exit(-1);
}
}
}
//______________________________________________________________________
//
//returns false if something else already changed a valid variable to valid awaiting ghost data
//returns true if we are the ones to manage this variable's ghost data.
__host__ bool
GPUDataWarehouse::testAndSetAwaitingGhostDataOnGPU(char const* label, int patchID, int matlIndx, int levelIndx)
{
bool allocating = false;
atomicDataStatus *status;
while (!allocating) {
var_monitor var_read_lock{ Uintah::CrowdMonitor<var_tag>::READER };
{
//get the adress
labelPatchMatlLevel lpml(label, patchID, matlIndx, levelIndx);
if (varPointers->find(lpml) != varPointers->end()) {
status = &(varPointers->operator[](lpml).atomicStatusInGpuMemory);
}
else {
printf("ERROR:\nGPUDataWarehouse::testAndSetAwaitingGhostDataOnGPU( ) Variable %s not found.\n", label);
exit(-1);
return false;
}
}
atomicDataStatus oldVarStatus = __sync_or_and_fetch(status, 0);
if (((oldVarStatus & AWAITING_GHOST_COPY) == AWAITING_GHOST_COPY) || ((oldVarStatus & VALID_WITH_GHOSTS) == VALID_WITH_GHOSTS)) {
//Something else already took care of it. So this task won't manage it.
return false;
} else {
//Attempt to claim we'll manage the ghost cells for this variable. If the claim fails go back into our loop and recheck
atomicDataStatus newVarStatus = oldVarStatus | AWAITING_GHOST_COPY;
allocating = __sync_bool_compare_and_swap(status, oldVarStatus, newVarStatus);
}
}
return true;
}
//______________________________________________________________________
//
//returns false if something else already claimed to copy or has copied data into the GPU.
//returns true if we are the ones to manage this variable's ghost data.
__host__ bool
GPUDataWarehouse::testAndSetCopyingIntoGPU(char const* label, int patchID, int matlIndx, int levelIndx)
{
bool copyingin = false;
atomicDataStatus *status;
while (!copyingin) {
var_monitor var_read_lock{ Uintah::CrowdMonitor<var_tag>::READER };
{
//get the address
labelPatchMatlLevel lpml(label, patchID, matlIndx, levelIndx);
if (varPointers->find(lpml) != varPointers->end()) {
status = &(varPointers->operator[](lpml).atomicStatusInGpuMemory);
}
else {
printf("ERROR:\nGPUDataWarehouse::testAndSetCopyingIntoGPU( ) Variable %s not found.\n", label);
exit(-1);
return false;
}
}
atomicDataStatus oldVarStatus = __sync_or_and_fetch(status, 0);
if (oldVarStatus == UNALLOCATED) {
printf("ERROR:\nGPUDataWarehouse::testAndSetCopyingIntoGPU( ) Variable %s is unallocated.\n", label);
exit(-1);
}
if (((oldVarStatus & COPYING_IN) == COPYING_IN) ||
((oldVarStatus & VALID) == VALID) ||
((oldVarStatus & VALID_WITH_GHOSTS) == VALID_WITH_GHOSTS)) {
//Something else already took care of it. So this task won't manage it.
return false;
} else {
//Attempt to claim we'll manage the ghost cells for this variable. If the claim fails go back into our loop and recheck
atomicDataStatus newVarStatus = oldVarStatus | COPYING_IN;
copyingin = __sync_bool_compare_and_swap(status, oldVarStatus, newVarStatus);
}
}
return true;
}
//______________________________________________________________________
//
//returns false if something else already claimed to copy or has copied data into the CPU.
//returns true if we are the ones to manage this variable's ghost data.
__host__ bool
GPUDataWarehouse::testAndSetCopyingIntoCPU(char const* label, int patchID, int matlIndx, int levelIndx)
{
bool copyingin = false;
atomicDataStatus *status;
while (!copyingin) {
var_monitor var_read_lock { Uintah::CrowdMonitor<var_tag>::READER };
{
//get the address
labelPatchMatlLevel lpml(label, patchID, matlIndx, levelIndx);
if (varPointers->find(lpml) != varPointers->end()) {
status = &(varPointers->operator[](lpml).atomicStatusInHostMemory);
}
else {
printf("ERROR:\nGPUDataWarehouse::testAndSetCopyingIntoCPU( ) Variable %s not found.\n", label);
exit(-1);
return false;
}
}
atomicDataStatus oldVarStatus = __sync_or_and_fetch(status, 0);
//We don't have good tracking of CPU vars at the moment.
//if (oldVarStatus == UNALLOCATED) {
// printf("ERROR:\nGPUDataWarehouse::testAndSetCopyingIntoCPU( ) Variable %s is unallocated.\n", label);
// exit(-1);
//}
if (((oldVarStatus & COPYING_IN) == COPYING_IN) ||
((oldVarStatus & VALID) == VALID) ||
((oldVarStatus & VALID_WITH_GHOSTS) == VALID_WITH_GHOSTS)) {
//Something else already took care of it. So this task won't manage it.
return false;
} else {
//Attempt to claim we'll manage the ghost cells for this variable. If the claim fails go back into our loop and recheck
atomicDataStatus newVarStatus = oldVarStatus | COPYING_IN;
copyingin = __sync_bool_compare_and_swap(status, oldVarStatus, newVarStatus);
}
}
return true;
}
//______________________________________________________________________
//
//returns false if something else already claimed to copy or has copied data into the GPU.
//returns true if we are the ones to manage this variable's ghost data.
__host__ bool
GPUDataWarehouse::testAndSetCopyingIntoGPUStaging(char const* label, int patchID, int matlIndx, int levelIndx, int3 offset, int3 size)
{
bool copyingin = false;
atomicDataStatus *status;
while (!copyingin) {
//get the address
var_monitor var_read_lock{ Uintah::CrowdMonitor<var_tag>::READER };
{
labelPatchMatlLevel lpml(label, patchID, matlIndx, levelIndx);
std::map<labelPatchMatlLevel, allVarPointersInfo>::iterator it = varPointers->find(lpml);
if (it != varPointers->end()) {
stagingVar sv;
sv.device_offset = offset;
sv.device_size = size;
std::map<stagingVar, stagingVarInfo>::iterator staging_it = it->second.stagingVars.find(sv);
if (staging_it != it->second.stagingVars.end()) {
status = &(staging_it->second.atomicStatusInGpuMemory);
}
else {
printf("ERROR:\nGPUDataWarehouse::testAndSetCopyingIntoGPUStaging( ) Staging variable %s not found.\n", label);
exit(-1);
return false;
}
}
else {
printf("ERROR:\nGPUDataWarehouse::testAndSetCopyingIntoGPUStaging( ) Variable %s not found.\n", label);
exit(-1);
return false;
}
}
atomicDataStatus oldVarStatus = __sync_or_and_fetch(status, 0);
if (oldVarStatus == UNALLOCATED) {
printf("ERROR:\nGPUDataWarehouse::testAndSetCopyingIntoGPUStaging( ) Variable %s is unallocated.\n", label);
exit(-1);
} else if ((oldVarStatus & VALID_WITH_GHOSTS) == VALID_WITH_GHOSTS) {
printf("ERROR:\nGPUDataWarehouse::testAndSetCopyingIntoGPUStaging( ) Variable %s is marked as valid with ghosts, that should never happen with staging vars.\n", label);
exit(-1);
} else if (((oldVarStatus & COPYING_IN) == COPYING_IN) ||
((oldVarStatus & VALID) == VALID)) {
//Something else already took care of it. So this task won't manage it.
return false;
} else {
//Attempt to claim we'll manage the ghost cells for this variable. If the claim fails go back into our loop and recheck
atomicDataStatus newVarStatus = oldVarStatus | COPYING_IN;
copyingin = __sync_bool_compare_and_swap(status, oldVarStatus, newVarStatus);
}
}
return true;
}
//______________________________________________________________________
//
__host__ bool
GPUDataWarehouse::isValidWithGhostsOnGPU(char const* label, int patchID, int matlIndx, int levelIndx)
{
var_monitor var_read_lock{ Uintah::CrowdMonitor<var_tag>::READER };
{
labelPatchMatlLevel lpml(label, patchID, matlIndx, levelIndx);
if (varPointers->find(lpml) != varPointers->end()) {
bool retVal = ((__sync_fetch_and_or(&(varPointers->operator[](lpml).atomicStatusInGpuMemory), 0) & VALID_WITH_GHOSTS)
== VALID_WITH_GHOSTS);
return retVal;
}
else {
return false;
}
}
}
//______________________________________________________________________
//
__host__ void
GPUDataWarehouse::setValidWithGhostsOnGPU(char const* label, int patchID, int matlIndx, int levelIndx)
{
var_monitor var_write_lock{ Uintah::CrowdMonitor<var_tag>::WRITER };
{
labelPatchMatlLevel lpml(label, patchID, matlIndx, levelIndx);
if (varPointers->find(lpml) != varPointers->end()) {
//make sure the valid is still turned on
__sync_or_and_fetch(&(varPointers->operator[](lpml).atomicStatusInGpuMemory), VALID);
//turn off AWAITING_GHOST_COPY
__sync_and_and_fetch(&(varPointers->operator[](lpml).atomicStatusInGpuMemory), ~AWAITING_GHOST_COPY);
//turn on VALID_WITH_GHOSTS
__sync_or_and_fetch(&(varPointers->operator[](lpml).atomicStatusInGpuMemory), VALID_WITH_GHOSTS);
}
else {
exit(-1);
}
}
}
//______________________________________________________________________
//
__device__ void
GPUDataWarehouse::print()
{
#ifdef __CUDA_ARCH__
__syncthreads();
if( isThread0_Blk0() ){
printf("\nVariables in GPUDataWarehouse\n");
for (int i = 0; i < d_numVarDBItems; i++) {
dataItem me = d_varDB[i];
printf(" %-15s matl: %i, patchID: %i, L-%i, size:[%i,%i,%i] pointer: %p\n", me.label, me.matlIndx,
me.domainID, me.levelIndx, me.var_size.x, me.var_size.y, me.var_size.z, me.var_ptr);
}
__syncthreads();
printThread();
printBlock();
printf("\n");
}
#endif
}
//______________________________________________________________________
//
HOST_DEVICE void
GPUDataWarehouse::printError(const char* msg, const char* methodName, char const* label, int patchID, int matlIndx, int levelIndx )
{
#ifdef __CUDA_ARCH__
__syncthreads();
if( isThread0() ){
if (label[0] == '\0') {
printf(" \nERROR GPU-side: GPUDataWarehouse::%s() - %s\n", methodName, msg );
} else {
printf(" \nERROR GPU-side: GPUDataWarehouse::%s(), label: \"%s\", patch: %i, matlIndx: %i, levelIndx: %i - %s\n", methodName, label, patchID, matlIndx, levelIndx, msg);
}
//Should this just loop through the variable database and print out only items with a
//levelIndx value greater than zero? -- Brad
//for (int i = 0; i < d_numLevelItems; i++) {
// printf(" Available levelDB labels(%i): \"%-15s\" matl: %i, L-%i \n", d_numLevelItems, d_levelDB[i].label, d_levelDB[i].matlIndx, d_levelDB[i].levelIndx);
// }
__syncthreads();
printThread();
printBlock();
// we know this is fatal and why, so just stop kernel execution
__threadfence();
asm("trap;");
}
#else
//__________________________________
// CPU code
if (label[0] == '\0') {
printf(" \nERROR host-side: GPUDataWarehouse::%s() - %s\n", methodName, msg );
} else {
printf(" \nERROR host-side: GPUDataWarehouse::%s(), label: \"%s\", patch: %i, matlIndx: %i, levelIndx: %i - %s\n", methodName, label, patchID, matlIndx, levelIndx, msg);
}//for (int i = 0; i < d_numLevelItems; i++) {
// printf(" Available levelDB labels(%i): \"%-15s\" matl: %i, L-%i \n", d_numLevelItems, d_levelDB[i].label, d_levelDB[i].matlIndx, d_levelDB[i].levelIndx);
//}
exit(-1);
#endif
}
//______________________________________________________________________
//
HOST_DEVICE void
GPUDataWarehouse::printGetLevelError(const char* msg, char const* label, int levelIndx, int matlIndx)
{
#ifdef __CUDA_ARCH__
__syncthreads();
if( isThread0() ){
printf(" \nERROR: %s( \"%s\", levelIndx: %i, matl: %i) unknown variable\n", msg, label, levelIndx, matlIndx);
//Should this just loop through the variable database and print out only items with a
//levelIndx value greater than zero? -- Brad
//for (int i = 0; i < d_numLevelItems; i++) {
// printf(" Available levelDB labels(%i): \"%-15s\" matl: %i, L-%i \n", d_numLevelItems, d_levelDB[i].label, d_levelDB[i].matlIndx, d_levelDB[i].levelIndx);
// }
__syncthreads();
printThread();
printBlock();
// we know this is fatal and why, so just stop kernel execution
__threadfence();
asm("trap;");
}
#else
//__________________________________
// CPU code
printf(" \nERROR: %s( \"%s\", levelIndx: %i, matl: %i) unknown variable\n", msg, label, levelIndx, matlIndx);
//for (int i = 0; i < d_numLevelItems; i++) {
// printf(" Available levelDB labels(%i): \"%-15s\" matl: %i, L-%i \n", d_numLevelItems, d_levelDB[i].label, d_levelDB[i].matlIndx, d_levelDB[i].levelIndx);
//}
#endif
}
//______________________________________________________________________
//
HOST_DEVICE void
GPUDataWarehouse::printGetError(const char* msg, char const* label, int levelIndx, int patchID, int matlIndx)
{
#ifdef __CUDA_ARCH__
__syncthreads();
if( isThread0() ) {
printf(" \nERROR: %s( \"%s\", levelIndx: %i, patchID: %i, matl: %i) unknown variable\n",
msg, label, levelIndx, patchID, matlIndx);
for (int i = 0; i < d_numVarDBItems; i++) {
printf(" Available varDB labels(%i of %i): \"%-15s\" matl: %i, patchID: %i, level: %i\n", i, d_numVarDBItems, d_varDB[i].label, d_varDB[i].matlIndx,
d_varDB[i].domainID, d_varDB[i].levelIndx);
}
__syncthreads();
printThread();
printBlock();
printf("\n");
// we know this is fatal and why, so just stop kernel execution
__threadfence();
asm("trap;");
}
#else
//__________________________________
// CPU code
printf(" \nERROR: %s( \"%s\", levelIndx: %i, patchID: %i, matl: %i) unknown variable in DW %s\n",
msg, label, levelIndx, patchID, matlIndx, _internalName);
for (int i = 0; i < d_numVarDBItems; i++) {
printf(" Available varDB labels(%i): \"%-15s\" matl: %i, patchID: %i, level: %i\n", d_numVarDBItems, d_varDB[i].label, d_varDB[i].matlIndx,
d_varDB[i].domainID, d_varDB[i].levelIndx);
}
#endif
}
//______________________________________________________________________
//
HOST_DEVICE void*
GPUDataWarehouse::getPlacementNewBuffer()
{
#ifdef __CUDA_ARCH__
printf("GPUDataWarehouse::getPlacementNewBuffer() not for device code\n");
return NULL;
#else
return placementNewBuffer;
#endif
}
| ecbce6f764c3158335dfa69e452a81a29f6a92e5.cu | /*
* The MIT License
*
* Copyright (c) 1997-2016 The University of Utah
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
/* GPU DataWarehouse device & host access*/
#include <CCA/Components/Schedulers/GPUDataWarehouse.h>
#include <Core/Grid/Variables/GPUVariable.h>
#include <Core/Grid/Variables/GPUGridVariable.h>
#include <Core/Grid/Variables/GPUReductionVariable.h>
#include <Core/Grid/Variables/GPUPerPatch.h>
#include <CCA/Components/Schedulers/UnifiedScheduler.h>
#include <CCA/Components/Schedulers/GPUMemoryPool.h>
#include <sci_defs/cuda_defs.h>
#include <Core/Parallel/CrowdMonitor.hpp>
#include <Core/Parallel/Parallel.h>
#include <Core/Parallel/ProcessorGroup.h>
#include <CCA/Components/Schedulers/SchedulerCommon.h>
#include <Core/Util/DebugStream.h>
#ifndef __CUDA_ARCH__
# include <string.h>
#include <string>
using namespace std;
#endif
#include <Core/Util/GPU.h>
extern DebugStream gpu_stats;
#include <mutex>
#include <map>
using std::map;
extern std::mutex cerrLock;
namespace {
// These are for uniquely identifying the Uintah::CrowdMonitors<Tag>
// used to protect multi-threaded access to global data structures
struct allocate_tag{};
struct var_tag{};
using allocate_monitor = Uintah::CrowdMonitor<allocate_tag>;
using var_monitor = Uintah::CrowdMonitor<var_tag>;
}
//______________________________________________________________________
//
HOST_DEVICE void
GPUDataWarehouse::get(const GPUGridVariableBase& var, char const* label, const int patchID, const int matlIndx, const int levelIndx)
{
#ifdef __CUDA_ARCH__
//device code
GPUDataWarehouse::dataItem* item = getItem(label, patchID, matlIndx, levelIndx);
if (item) {
var.setArray3(item->var_offset, item->var_size, item->var_ptr);
}
else {
printGetError("GPUDataWarehouse::get(GPUGridVariableBase& var, ...)", label, levelIndx, patchID, matlIndx);
}
#else
//host code
var_monitor var_read_lock{ Uintah::CrowdMonitor<var_tag>::READER };
{
labelPatchMatlLevel lpml(label, patchID, matlIndx, levelIndx);
if (varPointers->find(lpml) != varPointers->end()) {
allVarPointersInfo vp = varPointers->operator[](lpml);
var.setArray3(vp.device_offset, vp.device_size, vp.device_ptr);
}
else {
printf("I'm GPUDW with name: \"%s\" at %p \n", _internalName, this);
printGetError("GPUDataWarehouse::get(GPUGridVariableBase& var, ...)", label, levelIndx, patchID, matlIndx);
}
}
#endif
}
//______________________________________________________________________
//
HOST_DEVICE bool
GPUDataWarehouse::stagingVarExists(char const* label, int patchID, int matlIndx, int levelIndx, int3 offset, int3 size)
{
#ifdef __CUDA_ARCH__
//device code
printError("This method not defined for the device.", "stagingVarExists", label, patchID, matlIndx, levelIndx);
return false;
#else
//host code
bool retval = false;
var_monitor var_read_lock{ Uintah::CrowdMonitor<var_tag>::READER };
{
labelPatchMatlLevel lpml(label, patchID, matlIndx, levelIndx);
std::map<labelPatchMatlLevel, allVarPointersInfo>::iterator it = varPointers->find(lpml);
if (it != varPointers->end()) {
stagingVar sv;
sv.device_offset = offset;
sv.device_size = size;
std::map<stagingVar, stagingVarInfo>::iterator staging_it = it->second.stagingVars.find(sv);
retval = (staging_it != it->second.stagingVars.end());
}
}
return retval;
#endif
}
//______________________________________________________________________
//
HOST_DEVICE void
GPUDataWarehouse::getStagingVar(const GPUGridVariableBase& var, char const* label, int patchID, int matlIndx, int levelIndx, int3 offset, int3 size)
{
#ifdef __CUDA_ARCH__
//device code
printError("This method not defined for the device.", "getStagingVar", label, patchID, matlIndx, levelIndx);
#else
//host code
var_monitor var_read_lock{ Uintah::CrowdMonitor<var_tag>::READER };
{
labelPatchMatlLevel lpml(label, patchID, matlIndx, levelIndx);
std::map<labelPatchMatlLevel, allVarPointersInfo>::iterator it = varPointers->find(lpml);
if (it != varPointers->end()) {
stagingVar sv;
sv.device_offset = offset;
sv.device_size = size;
std::map<stagingVar, stagingVarInfo>::iterator staging_it = it->second.stagingVars.find(sv);
if (staging_it != it->second.stagingVars.end()) {
var.setArray3(offset, size, staging_it->second.device_ptr);
}
else {
printf(
"GPUDataWarehouse::getStagingVar() - Didn't find a staging variable from the device for label %s patch %d matl %d level %d offset (%d, %d, %d) size (%d, %d, %d).",
label, patchID, matlIndx, levelIndx, offset.x, offset.y, offset.z, size.x, size.y, size.z);
exit(-1);
}
}
else {
printError("Didn't find a staging variable from the device.", "getStagingVar", label, patchID, matlIndx, levelIndx);
}
}
#endif
}
//______________________________________________________________________
//
HOST_DEVICE void
GPUDataWarehouse::getLevel(const GPUGridVariableBase& var, char const* label, int matlIndx, int levelIndx)
{
#ifdef __CUDA_ARCH__
//device code
get(var, label, -99999999, matlIndx, levelIndx);
#else
//host code
get(var, label, -99999999, matlIndx, levelIndx);
#endif
}
//______________________________________________________________________
//
HOST_DEVICE void
GPUDataWarehouse::get(const GPUReductionVariableBase& var, char const* label, const int patchID, const int matlIndx, const int levelIndx)
{
#ifdef __CUDA_ARCH__
GPUDataWarehouse::dataItem* item = getItem(label, patchID, matlIndx, levelIndx);
if (item) {
var.setData(item->var_ptr);
}
else {
printGetError("GPUDataWarehouse::get(GPUReductionVariableBase& var, ...)", label, levelIndx, patchID, matlIndx);
}
#else
//host code
var_monitor var_read_lock{ Uintah::CrowdMonitor<var_tag>::READER };
{
labelPatchMatlLevel lpml(label, patchID, matlIndx, levelIndx);
if (varPointers->find(lpml) != varPointers->end()) {
allVarPointersInfo vp = varPointers->operator[](lpml);
var.setData(vp.device_ptr);
}
else {
printGetError("GPUDataWarehouse::get(GPUReductionVariableBase& var, ...)", label, levelIndx, patchID, matlIndx);
}
}
#endif
}
//______________________________________________________________________
//
HOST_DEVICE void
GPUDataWarehouse::get(const GPUPerPatchBase& var, char const* label, const int patchID, const int matlIndx, const int levelIndx)
{
#ifdef __CUDA_ARCH__
//device code
GPUDataWarehouse::dataItem* item = getItem(label, patchID, matlIndx, levelIndx);
if (item) {
var.setData(item->var_ptr);
}
else {
printGetError("GPUDataWarehouse::get(GPUPerPatchBase& var, ...)", label, levelIndx, patchID, matlIndx);
}
#else
//host code
var_monitor var_read_lock{ Uintah::CrowdMonitor<var_tag>::READER };
{
labelPatchMatlLevel lpml(label, patchID, matlIndx, levelIndx);
if (varPointers->find(lpml) != varPointers->end()) {
allVarPointersInfo vp = varPointers->operator[](lpml);
var.setData(vp.device_ptr);
}
else {
printGetError("GPUDataWarehouse::get(GPUPerPatchBase& var, ...)", label, levelIndx, patchID, matlIndx);
}
}
#endif
}
//______________________________________________________________________
//
HOST_DEVICE void
GPUDataWarehouse::getModifiable(GPUGridVariableBase& var, char const* label, int patchID, int matlIndx, int levelIndx)
{
#ifdef __CUDA_ARCH__
//device code
GPUDataWarehouse::dataItem* item = getItem(label, patchID, matlIndx, levelIndx);
if (item) {
var.setArray3(item->var_offset, item->var_size, item->var_ptr);
}
else {
printGetError("GPUDataWarehouse::getModifiable(GPUGridVariableBase& var, ...)", label, levelIndx, patchID, matlIndx);
}
#else
//host code
var_monitor var_read_lock{ Uintah::CrowdMonitor<var_tag>::READER };
{
labelPatchMatlLevel lpml(label, patchID, matlIndx, levelIndx);
if (varPointers->find(lpml) != varPointers->end()) {
allVarPointersInfo vp = varPointers->operator[](lpml);
var.setArray3(vp.device_offset, vp.device_size, vp.device_ptr);
}
else {
printGetError("GPUDataWarehouse::get(GPUGridVariableBase& var, ...)", label, levelIndx, patchID, matlIndx);
}
}
#endif
}
//______________________________________________________________________
//
HOST_DEVICE void
GPUDataWarehouse::getModifiable(GPUReductionVariableBase& var, char const* label, int patchID, int matlIndx, int levelIndx)
{
#ifdef __CUDA_ARCH__
//device code
GPUDataWarehouse::dataItem* item = getItem(label, patchID, matlIndx, levelIndx);
if (item) {
var.setData(item->var_ptr);
}
else {
printGetError("GPUDataWarehouse::getModifiable(GPUReductionVariableBase& var, ...)", label, levelIndx, patchID, matlIndx);
}
#else
//host code
var_monitor var_read_lock{ Uintah::CrowdMonitor<var_tag>::READER };
{
labelPatchMatlLevel lpml(label, patchID, matlIndx, levelIndx);
if (varPointers->find(lpml) != varPointers->end()) {
allVarPointersInfo vp = varPointers->operator[](lpml);
var.setData(vp.device_ptr);
}
else {
printGetError("GPUDataWarehouse::get(GPUReductionVariableBase& var, ...)", label, levelIndx, patchID, matlIndx);
}
}
#endif
}
//______________________________________________________________________
//
HOST_DEVICE void
GPUDataWarehouse::getModifiable(GPUPerPatchBase& var, char const* label, int patchID, int matlIndx, int levelIndx)
{
#ifdef __CUDA_ARCH__
//device code
GPUDataWarehouse::dataItem* item = getItem(label, patchID, matlIndx, levelIndx);
if (item) {
var.setData(item->var_ptr);
}
else {
printGetError("GPUDataWarehouse::getModifiable(GPUPerPatchBase& var, ...)", label, levelIndx, patchID, matlIndx);
}
#else
//host code
var_monitor var_read_lock{ Uintah::CrowdMonitor<var_tag>::READER };
{
labelPatchMatlLevel lpml(label, patchID, matlIndx, levelIndx);
if (varPointers->find(lpml) != varPointers->end()) {
allVarPointersInfo vp = varPointers->operator[](lpml);
var.setData(vp.device_ptr);
}
else {
printGetError("GPUDataWarehouse::get(GPUPerPatchBase& var, ...)", label, levelIndx, patchID, matlIndx);
}
}
#endif
}
//______________________________________________________________________
//
__host__ void
GPUDataWarehouse::put(GPUGridVariableBase &var, size_t sizeOfDataType, char const* label, int patchID, int matlIndx, int levelIndx, bool staging,
GhostType gtype, int numGhostCells, void* host_ptr)
{
var_monitor var_write_lock{ Uintah::CrowdMonitor<var_tag>::WRITER };
{
int3 var_offset; // offset
int3 var_size; // dimensions of GPUGridVariable
void* var_ptr; // raw pointer to the memory
var.getArray3(var_offset, var_size, var_ptr);
//See if it already exists. Also see if we need to update this into d_varDB.
labelPatchMatlLevel lpml(label, patchID, matlIndx, levelIndx);
std::map<labelPatchMatlLevel, allVarPointersInfo>::iterator iter = varPointers->find(lpml);
std::map<stagingVar, stagingVarInfo>::iterator staging_it;
//sanity checks
if (iter == varPointers->end()) {
printf("ERROR:\nGPUDataWarehouse::put( ) Can't use put() for a host-side GPU DW without it first existing in the internal database.\n");
exit(-1);
} else if (staging) {
stagingVar sv;
sv.device_offset = var_offset;
sv.device_size = var_size;
staging_it = iter->second.stagingVars.find(sv);
if (staging_it == iter->second.stagingVars.end()) {
printf("ERROR:\nGPUDataWarehouse::put( ) Can't use put() for a host-side GPU DW without this staging var first existing in the internal database.\n");
exit(-1);
}
}
if (gpu_stats.active()) {
cerrLock.lock();
{
gpu_stats << UnifiedScheduler::myRankThread()
<< " GPUDataWarehouse::put( " << label << " ) - "
<< " Attempting to put a variable in the host-side varPointers map for label " << label
<< " patch " << patchID
<< " matl " << matlIndx
<< " level " << levelIndx;
if (staging) {
gpu_stats << " staging: true";
} else {
gpu_stats << " staging: false";
}
gpu_stats << " at device address " << var_ptr
<< " with status codes ";
if (!staging) {
gpu_stats << getDisplayableStatusCodes(iter->second.atomicStatusInGpuMemory);
} else {
gpu_stats << getDisplayableStatusCodes(staging_it->second.atomicStatusInGpuMemory);
}
gpu_stats << " datatype size " << sizeOfDataType
<< " on device " << d_device_id
<< " into GPUDW at " << std::hex << this << std::dec
<< " with description " << _internalName
<< " current varPointers size is: " << varPointers->size()
<< " low (" << var_offset.x << ", " << var_offset.y << ", " << var_offset.z << ") "
<< endl;
}
cerrLock.unlock();
}
if (staging == false) {
iter->second.varDB_index = -1;
iter->second.device_ptr = var_ptr;
iter->second.device_offset = var_offset;
iter->second.device_size = var_size;
iter->second.sizeOfDataType = sizeOfDataType;
iter->second.gtype = gtype;
iter->second.numGhostCells = numGhostCells;
iter->second.host_contiguousArrayPtr = host_ptr;
iter->second.atomicStatusInHostMemory = UNKNOWN;
//previously set, do not set here
if (gpu_stats.active()) {
cerrLock.lock();
{
gpu_stats << UnifiedScheduler::myRankThread()
<< " GPUDataWarehouse::put( " << label << " ) - "
<< " Put a regular non-staging variable in the host-side varPointers map for label " << label
<< " patch " << patchID
<< " matl " << matlIndx
<< " level " << levelIndx
<< " at device address " << var_ptr
<< " with datatype size " << iter->second.sizeOfDataType
<< " with status codes " << getDisplayableStatusCodes(iter->second.atomicStatusInGpuMemory)
<< " on device " << d_device_id
<< " into GPUDW at " << std::hex << this << std::dec
<< " with description " << _internalName
<< " current varPointers size is: " << varPointers->size()
<< endl;
}
cerrLock.unlock();
}
} else { // if (staging == true)
staging_it->second.device_ptr = var_ptr;
staging_it->second.host_contiguousArrayPtr = host_ptr;
staging_it->second.varDB_index = -1;
staging_it->second.atomicStatusInHostMemory = UNKNOWN;
//Update the non-staging var's sizeOfDataType. The staging var uses this number.
//It's possible that a staging var can exist and an empty placeholder non-staging var also exist,
//if so, then then empty placeholder non-staging var won't have correct data type size.
//So we grab it here.
iter->second.sizeOfDataType = sizeOfDataType;
if (gpu_stats.active()) {
cerrLock.lock();
{
gpu_stats << UnifiedScheduler::myRankThread()
<< " GPUDataWarehouse::put( " << label << " ) - "
<< " Put a staging variable in the host-side varPointers map for label " << label
<< " patch " << patchID
<< " matl " << matlIndx
<< " level " << levelIndx
<< " with offset (" << var_offset.x << ", " << var_offset.y << ", " << var_offset.z << ")"
<< " and size (" << var_size.x << ", " << var_size.y << ", " << var_size.z << ")"
<< " at device address " << var_ptr
<< " with datatype size " << iter->second.sizeOfDataType
<< " with status codes " << getDisplayableStatusCodes(staging_it->second.atomicStatusInGpuMemory)
<< " on device " << d_device_id
<< " into GPUDW at " << std::hex << this << std::dec
<< endl;
}
cerrLock.unlock();
}
}
} // end var_monitor var_read_lock{ Uintah::CrowdMonitor<var_tag>::WRITER }
}
//______________________________________________________________________
//
//This method puts an empty placeholder entry into the GPUDW database and marks it as unallocated
__host__ void
GPUDataWarehouse::putUnallocatedIfNotExists(char const* label, int patchID, int matlIndx, int levelIndx, bool staging, int3 offset, int3 size)
{
var_monitor var_write_lock{ Uintah::CrowdMonitor<var_tag>::WRITER };
{
labelPatchMatlLevel lpml(label, patchID, matlIndx, levelIndx);
std::map<labelPatchMatlLevel, allVarPointersInfo>::iterator it = varPointers->find(lpml);
//if (!staging) {
//If it's a normal non-staging variable, check if doesn't exist. If so, add an "unallocated" entry.
//If it's a staging variable, then still check if the non-staging part exists. A staging must exist within a non-staging variable.
//A scenario where this can get a staging variable without a non-staging variable is receiving data from neighbor nodes.
//For example, suppose node A has patch 0, and node B has patch 1, and A's patch 0 needs ghost cells from B's patch 1. Node A will
//receive those ghost cells, but they will be marked as belonging to patch 1. Since A doesn't have the regular non-staging var
//for patch 1, we make an empty placeholder for patch 1 so A can have a staging var to hold the ghost cell for patch 1.
if ( it == varPointers->end()) {
allVarPointersInfo vp;
vp.varDB_index = -1;
vp.device_ptr = NULL;
vp.atomicStatusInHostMemory = UNKNOWN;
vp.atomicStatusInGpuMemory = UNALLOCATED;
vp.host_contiguousArrayPtr = NULL;
vp.sizeOfDataType = 0;
std::pair<std::map<labelPatchMatlLevel, allVarPointersInfo>::iterator, bool> ret = varPointers->insert( std::map<labelPatchMatlLevel, allVarPointersInfo>::value_type( lpml, vp ) );
if (!ret.second) {
printf("ERROR:\nGPUDataWarehouse::putUnallocatedIfNotExists( ) Failure inserting into varPointers map.\n");
exit(-1);
}
it = ret.first;
if (gpu_stats.active()) {
cerrLock.lock();
{
gpu_stats << UnifiedScheduler::myRankThread()
<< " GPUDataWarehouse::putUnallocatedIfNotExists( " << label << " ) - "
<< " Put an unallocated non-staging variable in the host-side varPointers map for label " << label
<< " patch " << patchID
<< " matl " << matlIndx
<< " level " << levelIndx
<< " on device " << d_device_id
<< " into GPUDW at " << std::hex << this << std::dec
<< " with description " << _internalName
<< endl;
}
cerrLock.unlock();
}
}
//} else { //staging = true
if (staging) {
std::map<stagingVar, stagingVarInfo>::iterator staging_it;
stagingVar sv;
sv.device_offset = offset;
sv.device_size = size;
staging_it = it->second.stagingVars.find(sv);
if (staging_it == it->second.stagingVars.end()){
stagingVarInfo svi;
svi.varDB_index = -1;
svi.device_ptr = NULL;
svi.host_contiguousArrayPtr = NULL;
svi.atomicStatusInHostMemory = UNKNOWN;
svi.atomicStatusInGpuMemory = UNALLOCATED;
std::pair<stagingVar, stagingVarInfo> p = make_pair( sv, svi );
it->second.stagingVars.insert( p );
if (gpu_stats.active()) {
cerrLock.lock();
{
gpu_stats << UnifiedScheduler::myRankThread()
<< " GPUDataWarehouse::putUnallocatedIfNotExists( " << label << " ) - "
<< " Put an unallocated staging variable in the host-side varPointers map for label " << label
<< " patch " << patchID
<< " matl " << matlIndx
<< " level " << levelIndx
<< " offset (" << offset.x << ", " << offset.y << ", " << offset.z << ")"
<< " size (" << size.x << ", " << size.y << ", " << size.z << ")"
<< " on device " << d_device_id
<< " into GPUDW at " << std::hex << this << std::dec
<< " with description " << _internalName
<< endl;
}
cerrLock.unlock();
}
}
}
} // end var_monitor var_write_lock{ Uintah::CrowdMonitor<var_tag>::WRITER }
}
//______________________________________________________________________
//
__host__ void
GPUDataWarehouse::allocateAndPut(GPUGridVariableBase &var, char const* label, int patchID, int matlIndx, int levelIndx, bool staging, int3 low, int3 high, size_t sizeOfDataType, GhostType gtype, int numGhostCells)
{
//Allocate space on the GPU and declare a variable onto the GPU.
//This method does NOT stage everything in a big array.
//Check if it exists prior to allocating memory for it.
//If it has already been allocated, just use that.
//If it hasn't, this is lock free and the first thread to request allocating gets to allocate
//If another thread sees that allocating is in process, it loops and waits until the allocation complete.
bool allocationNeeded = false;
int3 size = make_int3(high.x-low.x, high.y-low.y, high.z-low.z);
int3 offset = low;
if (gpu_stats.active()) {
cerrLock.lock();
{
gpu_stats << UnifiedScheduler::myRankThread() << " Calling putUnallocatedIfNotExists() for " << label
<< " patch " << patchID
<< " matl " << matlIndx
<< " level " << levelIndx
<< " staging: " << std::boolalpha << staging
<< " with offset (" << offset.x << ", " << offset.y << ", " << offset.z << ")"
<< " and size (" << size.x << ", " << size.y << ", " << size.z << ")"
<< " on device " << d_device_id
<< " into GPUDW at " << std::hex << this << std::dec
<< " with description " << _internalName << endl;
}
cerrLock.unlock();
}
//This variable may not yet exist. But we want to declare we're allocating it. So ensure there is an entry.
putUnallocatedIfNotExists(label, patchID, matlIndx, levelIndx, staging, offset, size);
std::map<labelPatchMatlLevel, allVarPointersInfo>::iterator it;
std::map<stagingVar, stagingVarInfo>::iterator staging_it;
labelPatchMatlLevel lpml(label, patchID, matlIndx, levelIndx);
var_monitor var_read_lock{ Uintah::CrowdMonitor<var_tag>::READER };
{
it = varPointers->find(lpml);
if (staging) {
stagingVar sv;
sv.device_offset = offset;
sv.device_size = size;
staging_it = it->second.stagingVars.find(sv);
}
}
//Locking not needed here on out in this method. STL maps ensure that iterators point to correct values
//even if other threads add nodes. We just can't remove values, but that shouldn't ever happen.
//This prepares the var with the offset and size. Any possible allocation will come later.
//If it needs to go into the database, that will also come later
void* addr = NULL;
var.setArray3(offset, size, addr);
//Now see if we allocate the variable or use a previous existing allocation.
if (staging == false) {
//See if someone has stated they are allocating it
allocationNeeded = testAndSetAllocating(it->second.atomicStatusInGpuMemory);
if (gpu_stats.active()) {
cerrLock.lock();
{
gpu_stats << UnifiedScheduler::myRankThread()
<< " GPUDataWarehouse::allocateAndPut( " << label << " ) - "
<< " allocationNeeded is " << std::boolalpha << allocationNeeded
<< " for label " << label
<< " patch " << patchID
<< " matl " << matlIndx
<< " level " << levelIndx
<< " with offset (" << offset.x << ", " << offset.y << ", " << offset.z << ")"
<< " and size (" << size.x << ", " << size.y << ", " << size.z << ")"
<< endl;
}
cerrLock.unlock();
}
if (!allocationNeeded) {
//Someone else is allocating it or it has already been allocated.
//Wait until they are done.
bool allocated = false;
while (!allocated) {
allocated = checkAllocated(it->second.atomicStatusInGpuMemory);
}
var_monitor var_read_lock{ Uintah::CrowdMonitor<var_tag>::READER };
{
//Sanity check to ensure we have correct size information.
it = varPointers->find(lpml);
}
if (it->second.device_offset.x == low.x
&& it->second.device_offset.y == low.y
&& it->second.device_offset.z == low.z
&& it->second.device_size.x == size.x
&& it->second.device_size.y == size.y
&& it->second.device_size.z == size.z) {
//Space for this var already exists. Use that and return.
if (gpu_stats.active()) {
cerrLock.lock();
{
gpu_stats << UnifiedScheduler::myRankThread()
<< " GPUDataWarehouse::allocateAndPut( " << label << " ) - "
<< " This non-staging/regular variable already exists. No need to allocate another. GPUDW has a variable for label " << label
<< " patch " << patchID
<< " matl " << matlIndx
<< " level " << levelIndx
<< " with offset (" << offset.x << ", " << offset.y << ", " << offset.z << ")"
<< " and size (" << size.x << ", " << size.y << ", " << size.z << ")"
<< " on device " << d_device_id
<< " with data pointer " << it->second.device_ptr
<< " with status codes " << getDisplayableStatusCodes(it->second.atomicStatusInGpuMemory)
<< " into GPUDW at " << std::hex << this << std::dec
<< endl;
}
cerrLock.unlock();
}
//Have this var use the existing memory address.
var.setArray3(it->second.device_offset, it->second.device_size, it->second.device_ptr);
} else {
printf("ERROR:\nGPUDataWarehouse::allocateAndPut( %s ) Variable in database but of the wrong size. This shouldn't ever happen. this needs low (%d, %d, %d,) and size (%d, %d, %d), but in the database it is low (%d, %d, %d,) and size (%d, %d, %d)\n",
label, low.x, low.y, low.z, size.x, size.y, size.z,
it->second.device_offset.x, it->second.device_offset.y, it->second.device_offset.z,
it->second.device_size.x, it->second.device_size.y, it->second.device_size.z);
exit(-1);
}
}
} else {
//it's a staging variable
if (staging_it != it->second.stagingVars.end()) {
////This variable exists in the database, no need to "put" it in again.
//putNeeded = false;
//See if someone has stated they are allocating it
allocationNeeded = testAndSetAllocating(staging_it->second.atomicStatusInGpuMemory);
if (!allocationNeeded) {
if (gpu_stats.active()) {
cerrLock.lock();
{
gpu_stats << UnifiedScheduler::myRankThread()
<< " GPUDataWarehouse::allocateAndPut( " << label << " ) - "
<< " This staging variable already exists. No need to allocate another. For label " << label
<< " patch " << patchID
<< " matl " << matlIndx
<< " level " << levelIndx
<< " with offset (" << offset.x << ", " << offset.y << ", " << offset.z << ")"
<< " and size (" << size.x << ", " << size.y << ", " << size.z << ")"
<< " on device " << d_device_id
<< " with data pointer " << staging_it->second.device_ptr
<< " with status codes " << getDisplayableStatusCodes(staging_it->second.atomicStatusInGpuMemory)
<< " into GPUDW at " << std::hex << this << std::dec
<< endl;
}
cerrLock.unlock();
}
//We need the pointer. We can't move on until we get the pointer.
//Ensure that it has been allocated (just not allocating). Another thread may have been assigned to allocate it
//but not completed that action. If that's the case, wait until it's done so we can get the pointer.
bool allocated = false;
while (!allocated) {
allocated = checkAllocated(staging_it->second.atomicStatusInGpuMemory);
}
//Have this var use the existing memory address.
var.setArray3(offset, size, staging_it->second.device_ptr);
}
}
}
//Now allocate it
if (allocationNeeded) {
OnDemandDataWarehouse::uintahSetCudaDevice(d_device_id);
unsigned int memSize = var.getMemSize();
//if (gpu_stats.active()) {
cerrLock.lock();
{
gpu_stats << UnifiedScheduler::myRankThread()
<< " GPUDataWarehouse::allocateAndPut(), calling GPUMemoryPool::allocateCudaSpaceFromPool"
<< " for " << label
<< " patch " << patchID
<< " material " << matlIndx
<< " level " << levelIndx
<< " staging: " << std::boolalpha << staging
<< " with offset (" << offset.x << ", " << offset.y << ", " << offset.z << ")"
<< " and size (" << size.x << ", " << size.y << ", " << size.z << ")"
<< " at " << addr
<< " with status codes ";
if (!staging) {
gpu_stats << getDisplayableStatusCodes(it->second.atomicStatusInGpuMemory);
} else {
gpu_stats << getDisplayableStatusCodes(staging_it->second.atomicStatusInGpuMemory);
}
gpu_stats << " on device " << d_device_id
<< " into GPUDW at " << std::hex << this << std::dec << endl;
}
cerrLock.unlock();
//}
addr = GPUMemoryPool::allocateCudaSpaceFromPool(d_device_id, memSize);
//Also update the var object itself
var.setArray3(offset, size, addr);
//Put all remaining information about the variable into the the database.
put(var, sizeOfDataType, label, patchID, matlIndx, levelIndx, staging, gtype, numGhostCells);
//Now that the database knows of this and other threads can see the device pointer, update the status from allocating to allocated
if (!staging) {
testAndSetAllocate(it->second.atomicStatusInGpuMemory);
} else {
testAndSetAllocate(staging_it->second.atomicStatusInGpuMemory);
}
}
}
//______________________________________________________________________
//
//This method is meant to take an entry from the host side DW and copy it into
//the task datawarehouse whose job is to eventually live GPU side.
__host__ void
GPUDataWarehouse::copyItemIntoTaskDW(GPUDataWarehouse *hostSideGPUDW, char const* label,
int patchID, int matlIndx, int levelIndx, bool staging,
int3 offset, int3 size) {
if (d_device_copy == NULL) {
//sanity check
printf("ERROR:\nGPUDataWarehouse::copyItemIntoTaskDW() - This method should only be called from a task data warehouse.\n");
exit(-1);
}
var_monitor var_read_lock{ Uintah::CrowdMonitor<var_tag>::READER };
{
if (d_numVarDBItems == MAX_VARDB_ITEMS) {
printf("ERROR: Out of GPUDataWarehouse space");
exit(-1);
}
}
labelPatchMatlLevel lpml(label, patchID, matlIndx, levelIndx);
stagingVar sv;
sv.device_offset = offset;
sv.device_size = size;
//Get the iterator(s) from the host side GPUDW.
std::map<labelPatchMatlLevel, allVarPointersInfo>::iterator hostSideGPUDW_iter = hostSideGPUDW->varPointers->find(lpml);
std::map<stagingVar, stagingVarInfo>::iterator hostSideGPUDW_staging_iter;
var_monitor host_var_read_lock{ Uintah::CrowdMonitor<var_tag>::READER };
{
if (staging) {
hostSideGPUDW_staging_iter = hostSideGPUDW_iter->second.stagingVars.find(sv);
}
}
var_monitor var_write_lock{ Uintah::CrowdMonitor<var_tag>::WRITER };
{
std::map<labelPatchMatlLevel, allVarPointersInfo>::iterator iter = varPointers->find(lpml);
//sanity check
if (iter != varPointers->end() && !staging) {
printf("ERROR:\nGPUDataWarehouse::copyItemIntoTaskDW() - This task datawarehouse already had an entry for %s patch %d material %d level %d\n", label, patchID, matlIndx, levelIndx);
exit(-1);
}
//If it's staging, there should already be a non-staging var in the host-side GPUDW (even if it's just a placeholder)
//Inserting into this task DW, it is a requirement that non-staging variables get inserted first
//then any staging variables can come in later. This won't handle any scenario where a staging variable is requested
//into the task DW without a non-staging variable already existing here.
int d_varDB_index=d_numVarDBItems;
d_numVarDBItems++;
int i = d_varDB_index;
if (!staging) {
//copy the item
allVarPointersInfo vp = hostSideGPUDW_iter->second;
//Clear out any staging vars it may have had
vp.stagingVars.clear();
//Give it a d_varDB index
vp.varDB_index = d_varDB_index;
//insert it in
varPointers->insert( std::map<labelPatchMatlLevel, allVarPointersInfo>::value_type( lpml, vp ) );
strncpy(d_varDB[i].label, label, MAX_NAME_LENGTH);
//if (levelIndx == -1) {
d_varDB[i].domainID = patchID;
//} else {
// d_varDB[i].domainID = -99999999;
//}
d_varDB[i].matlIndx = matlIndx;
d_varDB[i].levelIndx = levelIndx;
d_varDB[i].sizeOfDataType = hostSideGPUDW_iter->second.sizeOfDataType;
d_varDB[i].varItem.gtype = hostSideGPUDW_iter->second.gtype;
d_varDB[i].varItem.numGhostCells = hostSideGPUDW_iter->second.numGhostCells;
d_varDB[i].varItem.staging = staging;
d_varDB[i].ghostItem.dest_varDB_index = -1; //Signify that this d_varDB item is NOT meta data to copy a ghost cell.
d_varDB[i].var_offset = hostSideGPUDW_iter->second.device_offset;
d_varDB[i].var_size = hostSideGPUDW_iter->second.device_size;
d_varDB[i].var_ptr = hostSideGPUDW_iter->second.device_ptr;
} else {
if (iter == varPointers->end()) {
//A staging item was requested but there's no regular variable for it to piggy back in.
//So create an empty placeholder regular variable.
//Start by getting a copy of what the GPU DW already had for this non-staging var
allVarPointersInfo vp = hostSideGPUDW_iter->second;
//Clear out any staging vars it may have had
vp.stagingVars.clear();
//Empty placeholders won't be placed in the d_varDB array.
vp.varDB_index = -1;
//insert it in
std::pair<std::map<labelPatchMatlLevel, allVarPointersInfo>::iterator, bool> ret = varPointers->insert( std::map<labelPatchMatlLevel, allVarPointersInfo>::value_type( lpml, vp ) );
if (!ret.second) {
printf("ERROR:\nGPUDataWarehouse::copyItemIntoTaskDW( ) Failure inserting into varPointers map.\n");
exit(-1);
}
iter = ret.first;
}
//copy the item
stagingVarInfo svi = hostSideGPUDW_staging_iter->second;
//Give it a d_varDB index
svi.varDB_index = d_varDB_index;
//insert it in
std::map<stagingVar, stagingVarInfo>::iterator staging_iter = iter->second.stagingVars.find(sv);
if (staging_iter != iter->second.stagingVars.end()) {
printf("ERROR:\nGPUDataWarehouse::copyItemIntoTaskDW( ) This staging var already exists in this task DW\n");
}
std::pair<stagingVar, stagingVarInfo> p = make_pair( sv, svi );
iter->second.stagingVars.insert( p );
strncpy(d_varDB[i].label, label, MAX_NAME_LENGTH);
//if (levelIndx == -1) {
d_varDB[i].domainID = patchID;
//} else {
// d_varDB[i].domainID = -99999999;
//}
d_varDB[i].matlIndx = matlIndx;
d_varDB[i].levelIndx = levelIndx;
d_varDB[i].sizeOfDataType = hostSideGPUDW_iter->second.sizeOfDataType;
d_varDB[i].varItem.gtype = hostSideGPUDW_iter->second.gtype;
d_varDB[i].varItem.numGhostCells = hostSideGPUDW_iter->second.numGhostCells;
d_varDB[i].varItem.staging = staging;
d_varDB[i].ghostItem.dest_varDB_index = -1; //Signify that this d_varDB item is NOT meta data to copy a ghost cell.
d_varDB[i].var_offset = hostSideGPUDW_staging_iter->first.device_offset;
d_varDB[i].var_size = hostSideGPUDW_staging_iter->first.device_size;
d_varDB[i].var_ptr = hostSideGPUDW_staging_iter->second.device_ptr;
}
d_dirty=true;
if (gpu_stats.active()) {
cerrLock.lock();
{
gpu_stats << UnifiedScheduler::myRankThread()
<< " GPUDataWarehouse::copyItemIntoTaskDW( " << label << " ) - "
<< " Put into d_varDB at index " << i
<< " of max index " << maxdVarDBItems - 1
<< " label " << label
<< " patch " << d_varDB[i].domainID
<< " matl " << matlIndx
<< " level " << levelIndx
<< " staging: " << std::boolalpha << staging
<< " datatype size " <<d_varDB[i].sizeOfDataType
<< " into address " << d_varDB[i].var_ptr
<< " on device " << d_device_id
<< " into GPUDW at " << std::hex << this << std::dec
<< " size [" << d_varDB[i].var_size.x << ", " << d_varDB[i].var_size.y << ", " << d_varDB[i].var_size.z << "]"
<< " offset [" << d_varDB[i].var_offset.x << ", " << d_varDB[i].var_offset.y << ", " << d_varDB[i].var_offset.z << "]"
<< endl;
}
cerrLock.unlock();
}
} // end var_monitor var_write_lock{ Uintah::CrowdMonitor<var_tag>::WRITER }
}
//______________________________________________________________________
//
HOST_DEVICE void
GPUDataWarehouse::putContiguous(GPUGridVariableBase &var, const char* indexID, char const* label, int patchID, int matlIndx, int levelIndx, bool staging, int3 low, int3 high, size_t sizeOfDataType, GridVariableBase* gridVar, bool stageOnHost)
{
#ifdef __CUDA_ARCH__
//Should not put from device side as all memory allocation should be done on CPU side through CUDAMalloc()
#else
var_monitor var_write_lock{ Uintah::CrowdMonitor<var_tag>::WRITER };
{
//first check if this patch/var/matl is in the process of loading in.
labelPatchMatlLevel lpml(label, patchID, matlIndx, levelIndx);
if (varPointers->find(lpml) != varPointers->end()) {
//Space for this patch already exists. Use that and return.
if (d_debug) {
printf(
"GPUDataWarehouse::putContiguous( %s ). This gpudw database has a variable for label %s patch %d matl %d level %d staging %s on device %d. Reusing it.\n",
label, label, patchID, matlIndx, levelIndx, staging ? "true" : "false", d_device_id);
}
var.setArray3(varPointers->operator[](lpml).device_offset, varPointers->operator[](lpml).device_size,
varPointers->operator[](lpml).device_ptr);
return;
}
int3 size = make_int3(high.x - low.x, high.y - low.y, high.z - low.z);
int3 offset = low;
void* device_ptr = NULL;
var.setArray3(offset, size, device_ptr);
contiguousArrayInfo *ca;
allocate_monitor allocate_read_lock { Uintah::CrowdMonitor < allocate_tag > ::READER };
{
ca = &(contiguousArrays->operator[](indexID));
}
if ((ca->allocatedDeviceMemory == NULL || ca->sizeOfAllocatedMemory - ca->assignedOffset < var.getMemSize()) && stageOnHost) {
printf("ERROR: No room left on device to be assigned address space\n");
if (ca->allocatedDeviceMemory != NULL) {
printf(
"There was %lu bytes allocated, %lu has been assigned, and %lu more bytes were attempted to be assigned for %s patch %d matl %d level %d staging %s\n",
ca->sizeOfAllocatedMemory, ca->assignedOffset, var.getMemSize(), label, patchID, matlIndx, levelIndx,
staging ? "true" : "false");
}
exit(-1);
}
else {
//There is already pre-allocated contiguous memory chunks with room available on
//both the device and the host. Just assign pointers for both the device and host contiguous arrays.
//This prepares the var with the offset and size. The actual address will come next.
void* host_contiguousArrayPtr = NULL;
int varMemSize = var.getMemSize();
device_ptr = (void*)((uint8_t*)ca->allocatedDeviceMemory + ca->assignedOffset);
var.setArray3(offset, size, device_ptr);
host_contiguousArrayPtr = (void*)((uint8_t*)ca->allocatedHostMemory + ca->assignedOffset);
//We ran into cuda misaligned errors previously when mixing different data types. We suspect the ints at 4 bytes
//were the issue. So the engine previously computes buffer room for each variable as a multiple of UnifiedScheduler::bufferPadding.
//So the contiguous array has been sized with extra padding. (For example, if a var holds 12 ints, then it would be 48 bytes in
//size. But if UnifiedScheduler::bufferPadding = 32, then it should add 16 bytes for padding, for a total of 64 bytes).
int memSizePlusPadding = ((UnifiedScheduler::bufferPadding - varMemSize % UnifiedScheduler::bufferPadding)
% UnifiedScheduler::bufferPadding)
+ varMemSize;
ca->assignedOffset += memSizePlusPadding;
if (stageOnHost) {
//Some GPU grid variable data doesn't need to be copied from the host
//For example, computes vars are just uninitialized space.
//Others grid vars need to be copied. This copies the data into a contiguous
//array on the host so that copyDataHostToDevice() can copy the contiguous
//host array to the device.
//Data listed as required. Or compute data that was initialized as a copy of something else.
ca->copiedOffset += memSizePlusPadding;
memcpy(host_contiguousArrayPtr, gridVar->getBasePointer(), varMemSize);
} //else {
//printf("Setting aside space %s %d %d from host location %p host contiguous array %p\n", label, patchID, matlIndx, host_ptr, host_contiguousArrayPtr);
//}
put(var, sizeOfDataType, label, patchID, matlIndx, levelIndx, staging, None, 0, host_contiguousArrayPtr);
//printf("Allocating for %s at patch %d and matl %d size is %d host_ptr %p host_contiguousPtr %p device_ptr %p\n", label, patchID, matlIndx, varMemSize, host_ptr, host_contiguousArrayPtr, device_ptr);
}
} // end var_monitor var_write_lock{ Uintah::CrowdMonitor<var_tag>::WRITER }
#endif
}
//______________________________________________________________________
//
HOST_DEVICE void
GPUDataWarehouse::allocate(const char* indexID, size_t size)
{
#ifdef __CUDA_ARCH__
//Should not put from device side as all memory allocation should be done on CPU side through CUDAMalloc()
#else
if (size == 0) {
return;
}
//This method allocates one big chunk of memory so that little allocations do not have to occur for each grid variable.
//This is needed because devices often have substantial overhead for each device malloc and device copy. By putting it into one
//chunk of memory, only one malloc and one copy to device should be needed.
double *d_ptr = NULL;
double *h_ptr = NULL;
OnDemandDataWarehouse::uintahSetCudaDevice(d_device_id);
printf("Allocated GPU buffer of size %lu \n", (unsigned long)size);
CUDA_RT_SAFE_CALL(cudaMalloc(&d_ptr, size) );
//printf("In allocate(), cuda malloc for size %ld at %p on device %d\n", size, d_ptr, d_device_id);
if (d_debug) {
printf("In allocate(), cudaMalloc for size %ld at %p on device %d\n", size, d_ptr, d_device_id);
}
//Now allocate that much also on the host. We want to do this because it's easier to pool up all the data on the host side
//and then move it over to the device side later in one shot. It also allows for one copy doing a device to host later.
//h_ptr = new double[size];
h_ptr = (double*)malloc(size);
//Registering memory seems good in theory, but bad in practice for our purposes.
//On the k20 device on beast.sci.utah.edu, this single register call was taking 0.1 seconds!
//On my home GTX580 device, it was taking 0.015 seconds, better, but still substantial enough
//we should avoid it for now. (If you want to use it, then also uncomment the cudaHostUnregister call in clear()).
//cudaHostRegister(h_ptr, size, cudaHostRegisterPortable);
contiguousArrayInfo ca(d_ptr, h_ptr, size);
allocate_monitor var_write_lock{ Uintah::CrowdMonitor<allocate_tag>::WRITER };
{
contiguousArrays->insert( std::map<const char *, contiguousArrayInfo>::value_type( indexID, ca ) );
// for (std::map<std::string, contiguousArrayInfo>::iterator it = contiguousArrays->begin(); it != contiguousArrays->end(); ++it) {
// printf("%s\n", it->first.c_str());
// }
}
#endif
}
//______________________________________________________________________
//
HOST_DEVICE void
GPUDataWarehouse::copyHostContiguousToHost(GPUGridVariableBase& device_var, GridVariableBase* host_var, char const* label, int patchID, int matlIndx, int levelIndx) {
#ifdef __CUDA_ARCH__
//Should not called from device side as all memory allocation should be done on CPU side through CUDAMalloc()
#else
var_monitor var_read_lock{ Uintah::CrowdMonitor<var_tag>::READER };
{
//see if this datawarehouse has anything for this patchGroupID.
labelPatchMatlLevel lpml(label, patchID, matlIndx, levelIndx);
if (varPointers->find(lpml) != varPointers->end()) {
allVarPointersInfo info = varPointers->operator[](lpml);
device_var.setArray3(varPointers->operator[](lpml).device_offset, varPointers->operator[](lpml).device_offset,
info.device_ptr);
// size_t size = device_var.getMemSize();
//TODO: Instead of doing a memcpy, I bet the original host grid variable could just have its pointers updated
//to work with what we were sent back. This would take some considerable work though to get all the details right
//TODO: This needs to be a memcpy async
memcpy(host_var->getBasePointer(), info.host_contiguousArrayPtr, device_var.getMemSize());
//Since we've moved it back into the host, lets mark it as being used.
//It's possible in the future there could be a scenario where we want to bring it
//back to the host but still retain it in the GPU. One scenario is
//sending data to an output .ups file but not modifying it on the host.
remove(label, patchID, matlIndx, levelIndx);
}
else {
printf("ERROR: host copyHostContiguoustoHost unknown variable on GPUDataWarehouse");
//for (std::map<labelPatchMatlLevel, allVarPointersInfo>::iterator it=varPointers->begin(); it!=varPointers->end(); ++it)
// printf("%s %d %d => %d \n", it->first.label, it->first.patchID, it->first.matlIndx, it->second.varDB_index);
exit(-1);
}
}
#endif
}
//______________________________________________________________________
//
__host__ void
GPUDataWarehouse::put(GPUReductionVariableBase &var, size_t sizeOfDataType, char const* label, int patchID, int matlIndx, int levelIndx, void* host_ptr)
{
var_monitor var_write_lock{ Uintah::CrowdMonitor<var_tag>::WRITER };
{
void* var_ptr; // raw pointer to the memory
var.getData(var_ptr);
//See if it already exists. Also see if we need to update this into d_varDB.
labelPatchMatlLevel lpml(label, patchID, matlIndx, levelIndx);
std::map<labelPatchMatlLevel, allVarPointersInfo>::iterator iter = varPointers->find(lpml);
//sanity check
if (iter == varPointers->end()) {
printf("ERROR:\nGPUDataWarehouse::put( ) Can't use put() for a host-side GPU DW without it first existing in the internal database.\n");
exit(-1);
}
iter->second.varDB_index = -1;
iter->second.device_ptr = var_ptr;
iter->second.sizeOfDataType = sizeOfDataType;
iter->second.gtype = None;
iter->second.numGhostCells = 0;
iter->second.host_contiguousArrayPtr = host_ptr;
iter->second.atomicStatusInHostMemory = UNKNOWN;
int3 zeroValue;
zeroValue.x = 0;
zeroValue.y = 0;
zeroValue.z = 0;
iter->second.device_offset = zeroValue;
iter->second.device_size = zeroValue;
//previously set, do not set here
//iter->second.atomicStatusInGputMemory =
if (gpu_stats.active()) {
cerrLock.lock();
{
gpu_stats << UnifiedScheduler::myRankThread()
<< " GPUDataWarehouse::put( " << label << " ) - "
<< " Put a reduction variable in the host-side varPointers map for label " << label
<< " patch " << patchID
<< " matl " << matlIndx
<< " level " << levelIndx
<< " at device address " << var_ptr
<< " with datatype size " << iter->second.sizeOfDataType
<< " with status codes " << getDisplayableStatusCodes(iter->second.atomicStatusInGpuMemory)
<< " on device " << d_device_id
<< " into GPUDW at " << std::hex << this << std::dec
<< " with description " << _internalName
<< " current varPointers size is: " << varPointers->size()
<< endl;
}
cerrLock.unlock();
}
}
}
//______________________________________________________________________
//
__host__ void
GPUDataWarehouse::put(GPUPerPatchBase& var, size_t sizeOfDataType, char const* label, int patchID, int matlIndx, int levelIndx, void* host_ptr)
{
var_monitor var_write_lock{ Uintah::CrowdMonitor<var_tag>::WRITER };
{
void* var_ptr; // raw pointer to the memory
var.getData(var_ptr);
//See if it already exists. Also see if we need to update this into d_varDB.
labelPatchMatlLevel lpml(label, patchID, matlIndx, levelIndx);
std::map<labelPatchMatlLevel, allVarPointersInfo>::iterator iter = varPointers->find(lpml);
//sanity check
if (iter == varPointers->end()) {
printf("ERROR:\nGPUDataWarehouse::put( ) Can't use put() for a host-side GPU DW without it first existing in the internal database.\n");
exit(-1);
}
iter->second.varDB_index = -1;
iter->second.device_ptr = var_ptr;
iter->second.sizeOfDataType = sizeOfDataType;
iter->second.gtype = None;
iter->second.numGhostCells = 0;
iter->second.host_contiguousArrayPtr = host_ptr;
iter->second.atomicStatusInHostMemory = UNKNOWN;
int3 zeroValue;
zeroValue.x = 0;
zeroValue.y = 0;
zeroValue.z = 0;
iter->second.device_offset = zeroValue;
iter->second.device_size = zeroValue;
//previously set, do not set here
//iter->second.atomicStatusInGputMemory =
if (gpu_stats.active()) {
cerrLock.lock();
{
gpu_stats << UnifiedScheduler::myRankThread()
<< " GPUDataWarehouse::put( " << label << " ) - "
<< " Put a patch variable in the host-side varPointers map for label " << label
<< " patch " << patchID
<< " matl " << matlIndx
<< " level " << levelIndx
<< " at device address " << var_ptr
<< " with datatype size " << iter->second.sizeOfDataType
<< " with status codes " << getDisplayableStatusCodes(iter->second.atomicStatusInGpuMemory)
<< " on device " << d_device_id
<< " into GPUDW at " << std::hex << this << std::dec
<< " with description " << _internalName
<< " current varPointers size is: " << varPointers->size()
<< endl;
}
cerrLock.unlock();
}
}
}
//______________________________________________________________________
//
__host__ void
GPUDataWarehouse::allocateAndPut(GPUReductionVariableBase& var, char const* label, int patchID, int matlIndx, int levelIndx, size_t sizeOfDataType)
{
//Allocate space on the GPU and declare a variable onto the GPU.
//This method does NOT stage everything in a big array.
//Check if it exists prior to allocating memory for it.
//If it has already been allocated, just use that.
//If it hasn't, this is lock free and the first thread to request allocating gets to allocate
//If another thread sees that allocating is in process, it loops and waits until the allocation complete.
bool allocationNeeded = false;
int3 size = make_int3(0,0,0);
int3 offset = make_int3(0,0,0);
if (gpu_stats.active()) {
cerrLock.lock();
{
gpu_stats << UnifiedScheduler::myRankThread() << " Calling putUnallocatedIfNotExists() for " << label
<< " patch " << patchID
<< " matl " << matlIndx
<< " level " << levelIndx
<< " on device " << d_device_id
<< " into GPUDW at " << std::hex << this << std::dec
<< " with description " << _internalName << endl;
}
cerrLock.unlock();
}
//This variable may not yet exist. But we want to declare we're allocating it. So ensure there is an entry.
putUnallocatedIfNotExists(label, patchID, matlIndx, levelIndx, false, offset, size);
std::map<labelPatchMatlLevel, allVarPointersInfo>::iterator it;
var_monitor var_read_lock{ Uintah::CrowdMonitor<var_tag>::READER };
{
labelPatchMatlLevel lpml(label, patchID, matlIndx, levelIndx);
it = varPointers->find(lpml);
}
void* addr = NULL;
//Now see if we allocate the variable or use a previous existing allocation.
//See if someone has stated they are allocating it
allocationNeeded = testAndSetAllocating(it->second.atomicStatusInGpuMemory);
if (!allocationNeeded) {
//Someone else is allocating it or it has already been allocated.
//Space for this var already exists. Use that and return.
if (gpu_stats.active()) {
cerrLock.lock();
{
gpu_stats << UnifiedScheduler::myRankThread()
<< " GPUDataWarehouse::allocateAndPut( " << label << " ) - "
<< " This reduction variable already exists. No need to allocate another. GPUDW has a variable for label " << label
<< " patch " << patchID
<< " matl " << matlIndx
<< " level " << levelIndx
<< " on device " << d_device_id
<< " with data pointer " << it->second.device_ptr
<< " with status codes " << getDisplayableStatusCodes(it->second.atomicStatusInGpuMemory)
<< " into GPUDW at " << std::hex << this << std::dec
<< endl;
}
cerrLock.unlock();
}
//We need the pointer. We can't move on until we get the pointer.
//Ensure that it has been allocated (just not allocating). Another thread may have been assigned to allocate it
//but not completed that action. If that's the case, wait until it's done so we can get the pointer.
bool allocated = false;
while (!allocated) {
allocated = checkAllocated(it->second.atomicStatusInGpuMemory);
}
//Have this var use the existing memory address.
var.setData(addr);
} else {
//We are the first task to request allocation. Do it.
OnDemandDataWarehouse::uintahSetCudaDevice(d_device_id);
size_t memSize = var.getMemSize();
if (gpu_stats.active()) {
cerrLock.lock();
{
gpu_stats << UnifiedScheduler::myRankThread()
<< " GPUDataWarehouse::allocateAndPut(), calling GPUMemoryPool::allocateCudaSpaceFromPool"
<< " for reduction variable " << label
<< " patch " << patchID
<< " material " << matlIndx
<< " level " << levelIndx
<< " size " << var.getMemSize()
<< " at " << addr
<< " with status codes " << getDisplayableStatusCodes(it->second.atomicStatusInGpuMemory)
<< " on device " << d_device_id
<< " into GPUDW at " << std::hex << this << std::dec << endl;
}
cerrLock.unlock();
}
addr = GPUMemoryPool::allocateCudaSpaceFromPool(d_device_id, memSize);
//Also update the var object itself
var.setData(addr);
//Put all remaining information about the variable into the the database.
put(var, sizeOfDataType, label, patchID, matlIndx, levelIndx);
//Now that the database knows of this and other threads can see the device pointer, update the status from allocating to allocated
testAndSetAllocate(it->second.atomicStatusInGpuMemory);
}
}
//______________________________________________________________________
//
__host__ void
GPUDataWarehouse::allocateAndPut(GPUPerPatchBase& var, char const* label, int patchID, int matlIndx, int levelIndx, size_t sizeOfDataType)
{
//Allocate space on the GPU and declare a variable onto the GPU.
//This method does NOT stage everything in a big array.
//Check if it exists prior to allocating memory for it.
//If it has already been allocated, just use that.
//If it hasn't, this is lock free and the first thread to request allocating gets to allocate
//If another thread sees that allocating is in process, it loops and waits until the allocation complete.
bool allocationNeeded = false;
int3 size = make_int3(0,0,0);
int3 offset = make_int3(0,0,0);
if (gpu_stats.active()) {
cerrLock.lock();
{
gpu_stats << UnifiedScheduler::myRankThread() << " Calling putUnallocatedIfNotExists() for " << label
<< " patch " << patchID
<< " matl " << matlIndx
<< " level " << levelIndx
<< " on device " << d_device_id
<< " into GPUDW at " << std::hex << this << std::dec
<< " with description " << _internalName << endl;
}
cerrLock.unlock();
}
//This variable may not yet exist. But we want to declare we're allocating it. So ensure there is an entry.
putUnallocatedIfNotExists(label, patchID, matlIndx, levelIndx, false, offset, size);
std::map<labelPatchMatlLevel, allVarPointersInfo>::iterator it;
var_monitor var_read_lock{ Uintah::CrowdMonitor<var_tag>::READER };
{
labelPatchMatlLevel lpml(label, patchID, matlIndx, levelIndx);
it = varPointers->find(lpml);
}
void* addr = NULL;
//Now see if we allocate the variable or use a previous existing allocation.
//See if someone has stated they are allocating it
allocationNeeded = testAndSetAllocating(it->second.atomicStatusInGpuMemory);
if (!allocationNeeded) {
//Someone else is allocating it or it has already been allocated.
//Space for this var already exists. Use that and return.
if (gpu_stats.active()) {
cerrLock.lock();
{
gpu_stats << UnifiedScheduler::myRankThread()
<< " GPUDataWarehouse::allocateAndPut( " << label << " ) - "
<< " This patch variable already exists. No need to allocate another. GPUDW has a variable for label " << label
<< " patch " << patchID
<< " matl " << matlIndx
<< " level " << levelIndx
<< " on device " << d_device_id
<< " with data pointer " << it->second.device_ptr
<< " with status codes " << getDisplayableStatusCodes(it->second.atomicStatusInGpuMemory)
<< " into GPUDW at " << std::hex << this << std::dec
<< endl;
}
cerrLock.unlock();
}
//We need the pointer. We can't move on until we get the pointer.
//Ensure that it has been allocated (just not allocating). Another thread may have been assigned to allocate it
//but not completed that action. If that's the case, wait until it's done so we can get the pointer.
bool allocated = false;
while (!allocated) {
allocated = checkAllocated(it->second.atomicStatusInGpuMemory);
}
//Have this var use the existing memory address.
var.setData(addr);
} else {
//We are the first task to request allocation. Do it.
OnDemandDataWarehouse::uintahSetCudaDevice(d_device_id);
size_t memSize = var.getMemSize();
if (gpu_stats.active()) {
cerrLock.lock();
{
gpu_stats << UnifiedScheduler::myRankThread()
<< " GPUDataWarehouse::allocateAndPut(), calling GPUMemoryPool::allocateCudaSpaceFromPool"
<< " for PerPatch variable " << label
<< " patch " << patchID
<< " material " << matlIndx
<< " level " << levelIndx
<< " size " << var.getMemSize()
<< " at " << addr
<< " with status codes " << getDisplayableStatusCodes(it->second.atomicStatusInGpuMemory)
<< " on device " << d_device_id
<< " into GPUDW at " << std::hex << this << std::dec << endl;
}
cerrLock.unlock();
}
addr = GPUMemoryPool::allocateCudaSpaceFromPool(d_device_id, memSize);
//Also update the var object itself
var.setData(addr);
//Put all remaining information about the variable into the the database.
put(var, sizeOfDataType, label, patchID, matlIndx, levelIndx);
//Now that the database knows of this and other threads can see the device pointer, update the status from allocating to allocated
testAndSetAllocate(it->second.atomicStatusInGpuMemory);
}
}
//______________________________________________________________________
//
HOST_DEVICE GPUDataWarehouse::dataItem*
GPUDataWarehouse::getItem(char const* label, int patchID, int matlIndx, int levelIndx)
{
#ifdef __CUDA_ARCH__
//This upcoming __syncthreads is needed. I believe with CUDA function calls are inlined.
// If you don't have it this upcoming __syncthreads here's what can happen:
// * The correct index was found by one of the threads.
// * The last __syncthreads is called, all threads met up there.
// * Some threads in the block then make a second "function" call and reset index to -1
// * Meanwhile, those other threads were still in the first "function" call and hadn't
// yet processed if (index == -1). They now run that line. And see index is now -1. That's bad.
// So to prevent this scenario, we have one more __syncthreads.
__syncthreads(); //sync before get
int numThreads = blockDim.x * blockDim.y * blockDim.z;
//int blockID = blockIdx.x + blockIdx.y * gridDim.x + gridDim.x * gridDim.y * blockIdx.z; //blockID on the grid
int threadID = threadIdx.x + blockDim.x * threadIdx.y + (blockDim.x * blockDim.y) * threadIdx.z; //threadID in the block
int i = threadID;
__syncthreads(); //sync before get
//if (d_debug && threadID == 0 && blockID == 0) {
// printf("device getting item \"%s\" from GPUDW %p", label, this);
// printf("size (%d vars)\n Available labels:", d_numVarDBItems);
//}
//Have every thread try to find the label/patchId/matlIndx is a match in
//array. This is a clever approach so that instead of doing a simple
//sequential search with one thread, we can let every thread search for it. Only the
//winning thread gets to write to shared data.
__shared__ int index;
index = -1;
while(i<d_numVarDBItems){
int strmatch=0;
char const *s1 = label; //reset s1 and s2 back to the start
char const *s2 = &(d_varDB[i].label[0]);
//a one-line strcmp. This should keep branching down to a minimum.
while (!(strmatch = *(unsigned char *) s1 - *(unsigned char *) s2) && *s1++ && *s2++);
//only one thread will ever match this.
//And nobody on the device side should ever access "staging" variables.
if (strmatch == 0) {
if (patchID ==-99999999 //Only getLevel calls should hit this
&& d_varDB[i].matlIndx == matlIndx
&& d_varDB[i].levelIndx == levelIndx
&& d_varDB[i].varItem.staging == false /* we don't support staging/foregin vars for get() */
&& d_varDB[i].ghostItem.dest_varDB_index == -1) { /*don't let ghost cell copy data mix in with normal variables for get() */
index = i; //we found it.
}
else if(d_varDB[i].domainID == patchID
&& d_varDB[i].matlIndx == matlIndx
&& d_varDB[i].levelIndx == levelIndx
&& d_varDB[i].varItem.staging == false
&& d_varDB[i].ghostItem.dest_varDB_index == -1) {
index = i; //we found it.
//printf("I'm thread %d In DW at %p, We found it for var %s patch %d matl %d level %d. d_varDB has it at index %d var %s patch %d at its item address %p with var pointer %p\n",
// threadID, this, label, patchID, matlIndx, levelIndx, index, &(d_varDB[index].label[0]), d_varDB[index].domainID, &d_varDB[index], d_varDB[index].var_ptr);
}
}
i = i + numThreads; //Since every thread is involved in searching for the string, have this thread loop to the next possible item to check for.
}
//sync before return;
__syncthreads();
if (index == -1) {
printf("ERROR:\nGPUDataWarehouse::getItem() didn't find anything for %s patch %d matl %d with threadID %d and numthreads %d\n", label, patchID, matlIndx, threadID, numThreads);
return NULL;
}
return &d_varDB[index];
#else
//__________________________________
// cpu code
/*labelPatchMatlLevel lpm(label, patchID, matlIndx);
int i = 0;
var_monitor var_read_lock{ Uintah::CrowdMonitor<var_tag>::READER };
{
if (varPointers->find(lpm) != varPointers->end()) {
i = varPointers[lpm].varDB_index;
} else {
printf("ERROR:\nGPUDataWarehouse::getItem( %s ) host get unknown variable from GPUDataWarehouse\n",label);
exit(-1);
}
}
if (d_debug){
printf("host got \"%s\" loc %p from GPUDW %p on device %u\n", label, d_varDB[i].var_ptr, d_device_copy, d_device_id);
}
//quick error check
if (strcmp(d_varDB[i].label, label) != 0 || d_varDB[i].domainID != patchID || d_varDB[i].matlIndx != matlIndx) {
printf("ERROR:\nGPUDataWarehouse::getItem( %s ), data does not match what was expected\n",label);
exit(-1);
}
*/
printError("This method should only be called device side.", "getItem()", label, patchID, matlIndx, levelIndx );
//printf("ERROR:\nGPUDataWarehouse::getItem() should only be called device side.\n",label);
return &d_varDB[0];
#endif
}
//______________________________________________________________________
//
HOST_DEVICE bool
GPUDataWarehouse::remove(char const* label, int patchID, int matlIndx, int levelIndx)
{
#ifdef __CUDA_ARCH__
printf("GPUDataWarehouse::remove() should not be called on device.\n");
return false;
#else
//It seems there are few scenarios for calling remove. I think the only time it should
//happen is removing staging variables.
//Avoid calling this unless you are absolutely sure what you are doing.
//Further, this doesn't erase any staging vars within a var.
bool retVal = false;
labelPatchMatlLevel lpml(label, patchID, matlIndx, levelIndx);
var_monitor var_write_lock{ Uintah::CrowdMonitor<var_tag>::WRITER };
{
if (varPointers->find(lpml) != varPointers->end()) {
int i = varPointers->operator[](lpml).varDB_index;
d_varDB[i].label[0] = '\0'; //leave a hole in the flat array, not deleted.
varPointers->erase(lpml); //TODO: GPU Memory leak?
retVal = true;
d_dirty=true;
}
if (d_debug){
printf("GPUDataWarehouse::remove( %s ). Removed a variable for label %s patch %d matl %d level %d \n",
label, label, patchID, matlIndx, levelIndx);
}
}
return retVal;
#endif
}
//______________________________________________________________________
//
__host__ void
GPUDataWarehouse::init(int id, std::string internalName)
{
#ifdef __CUDA_ARCH__
printf("GPUDataWarehouse::init() should not be called on the device.\n");
#else
d_device_id = id;
//this->_internalName = new std::string(internalName);
strncpy(_internalName, internalName.c_str(), sizeof(_internalName));
objectSizeInBytes = 0;
maxdVarDBItems = 0;
//this->placementNewBuffer = placementNewBuffer;
varPointers = new std::map<labelPatchMatlLevel, allVarPointersInfo>;
contiguousArrays = new std::map<std::string, contiguousArrayInfo>;
//other data members are initialized in the constructor
d_numVarDBItems = 0;
d_numMaterials = 0;
d_debug = false;
//d_numGhostCells = 0;
d_device_copy = NULL;
d_dirty = true;
objectSizeInBytes = 0;
//resetdVarDB();
numGhostCellCopiesNeeded = 0;
#endif
}
//______________________________________________________________________
//
__host__ void
GPUDataWarehouse::cleanup()
{
delete varPointers;
delete contiguousArrays;
}
//______________________________________________________________________
//
__host__ void
GPUDataWarehouse::init_device(size_t objectSizeInBytes, unsigned int maxdVarDBItems)
{
#ifdef __CUDA_ARCH__
printf("GPUDataWarehouse::init_device() should only be called by the framework\n");
#else
this->objectSizeInBytes = objectSizeInBytes;
this->maxdVarDBItems = maxdVarDBItems;
OnDemandDataWarehouse::uintahSetCudaDevice( d_device_id );
void* temp = NULL;
//CUDA_RT_SAFE_CALL(cudaMalloc(&temp, objectSizeInBytes));
temp = GPUMemoryPool::allocateCudaSpaceFromPool(d_device_id, objectSizeInBytes);
//if (gpu_stats.active()) {
cerrLock.lock();
{
gpu_stats << UnifiedScheduler::myRankThread()
<< " GPUDataWarehouse::init_device() -"
<< " requested GPU space from GPUMemoryPool::allocateCudaSpaceFromPool for Task DW of size " << objectSizeInBytes
<< " bytes at " << temp
<< " on device " << d_device_id
<< " the host GPUDW is at " << this
<< endl;
}
cerrLock.unlock();
//}
d_device_copy = (GPUDataWarehouse*)temp;
//cudaHostRegister(this, sizeof(GPUDataWarehouse), cudaHostRegisterPortable);
d_dirty = true;
#endif
}
//______________________________________________________________________
//
HOST_DEVICE void
GPUDataWarehouse::syncto_device(void *cuda_stream)
{
#ifdef __CUDA_ARCH__
printf("GPUDataWarehouse::remove() should only be called by the framework\n");
#else
if (!d_device_copy) {
printf("ERROR:\nGPUDataWarehouse::syncto_device()\nNo device copy\n");
exit(-1);
}
var_monitor var_write_lock{ Uintah::CrowdMonitor<var_tag>::WRITER };
{
if (d_dirty){
OnDemandDataWarehouse::uintahSetCudaDevice( d_device_id );
//Even though this is in a writeLock state on the CPU, the nature of multiple threads
//each with their own stream copying to a GPU means that one stream might seemingly go outg
//of order. This is ok for two reasons. 1) Nothing should ever be *removed* from a gpu data warehouse
//2) Therefore, it doesn't matter if streams go out of order, each thread will still ensure it copies
//exactly what it needs. Other streams may write additional data to the gpu data warehouse, but cpu
//threads will only access their own data, not data copied in by other cpu threada via streams.
//This approach does NOT require CUDA pinned memory.
//unsigned int sizeToCopy = sizeof(GPUDataWarehouse);
cudaStream_t* stream = (cudaStream_t*)(cuda_stream);
if (gpu_stats.active()) {
cerrLock.lock();
{
gpu_stats << UnifiedScheduler::myRankThread()
<< " GPUDataWarehouse::syncto_device() - cudaMemcpy -"
<< " sync GPUDW at " << d_device_copy
<< " with description " << _internalName
<< " to device " << d_device_id
<< " on stream " << stream
<< endl;
}
cerrLock.unlock();
}
CUDA_RT_SAFE_CALL (cudaMemcpyAsync( d_device_copy, this, objectSizeInBytes, cudaMemcpyHostToDevice, *stream));
//CUDA_RT_SAFE_CALL (cudaMemcpy( d_device_copy, this, objectSizeInBytes, cudaMemcpyHostToDevice));
//if (d_debug) {
//printf("%s sync GPUDW %p to device %d on stream %p\n", UnifiedScheduler::myRankThread().c_str(), d_device_copy, d_device_id, stream);
//}
d_dirty=false;
}
}
#endif
}
//______________________________________________________________________
//
HOST_DEVICE void
GPUDataWarehouse::clear()
{
#ifdef __CUDA_ARCH__
//no meaning in device method
#else
OnDemandDataWarehouse::uintahSetCudaDevice( d_device_id );
//delete any grid var that isn't part of a contiguous array
var_monitor var_write_lock{ Uintah::CrowdMonitor<var_tag>::WRITER };
{
std::map<labelPatchMatlLevel, allVarPointersInfo>::iterator varIter;
for (varIter = varPointers->begin(); varIter != varPointers->end(); ++varIter) {
if (varIter->second.host_contiguousArrayPtr == NULL) {
//clear out all the staging vars, if any
std::map<stagingVar, stagingVarInfo>::iterator stagingIter;
for (stagingIter = varIter->second.stagingVars.begin(); stagingIter != varIter->second.stagingVars.end(); ++stagingIter) {
if (gpu_stats.active()) {
cerrLock.lock();
{
gpu_stats << UnifiedScheduler::myRankThread()
<< " GPUDataWarehouse::clear() -"
<< " calling GPUMemoryPool::freeCudaSpaceFromPool() for staging var for " << varIter->first.label
<< " at device ptr " << stagingIter->second.device_ptr
<< " on device " << d_device_id
<< endl;
}
cerrLock.unlock();
}
//CUDA_RT_SAFE_CALL(cudaFree(stagingIter->second.device_ptr));
//stagingIter->second.device_ptr == NULL;
size_t memSize = stagingIter->first.device_size.x *
stagingIter->first.device_size.y *
stagingIter->first.device_size.z *
varIter->second.sizeOfDataType;
if (GPUMemoryPool::freeCudaSpaceFromPool(d_device_id, memSize, stagingIter->second.device_ptr) ) {
stagingIter->second.device_ptr == NULL;
} else {
//No open spot in the pool, go ahead and allocate it.
printf("ERROR:\nGPUDataWarehouse::clear(), for a staging variable, couldn't find in the GPU memory pool the space starting at address %p\n", stagingIter->second.device_ptr);
exit(-1);
}
}
varIter->second.stagingVars.clear();
//clear out the regular vars
//See if it's a placeholder var for staging vars. This happens if the non-staging var
//had a device_ptr of NULL, and it was only in the varPointers map to only hold staging vars
if (varIter->second.device_ptr) {
if (gpu_stats.active()) {
cerrLock.lock();
{
gpu_stats << UnifiedScheduler::myRankThread()
<< " GPUDataWarehouse::clear() -"
<< " calling GPUMemoryPool::freeCudaSpaceFromPool() for non-staging var for " << varIter->first.label
<< " at device ptr " << varIter->second.device_ptr
<< " on device " << d_device_id
<< endl;
}
cerrLock.unlock();
}
size_t memSize = varIter->second.sizeOfDataType;
if (varIter->second.device_size.x != 0) {
memSize = memSize *
varIter->second.device_size.x *
varIter->second.device_size.y *
varIter->second.device_size.z;
}
if (GPUMemoryPool::freeCudaSpaceFromPool(d_device_id, memSize, varIter->second.device_ptr)) {
varIter->second.device_ptr == NULL;
} else {
printf("ERROR:\nGPUDataWarehouse::clear(), for a non-staging variable, couldn't find in the GPU memory pool the space starting at address %p\n", varIter->second.device_ptr);
exit(-1);
}
}
}
}
varPointers->clear();
//delete all the contiguous arrays
std::map<std::string, contiguousArrayInfo>::iterator iter;
for (iter = contiguousArrays->begin(); iter != contiguousArrays->end(); ++iter) {
if (gpu_stats.active()) {
cerrLock.lock();
{
gpu_stats << UnifiedScheduler::myRankThread()
<< " GPUDataWarehouse::clear() -"
<< " cudaFree for contiguous array for " << iter->first.c_str()
<< " at device ptr " << iter->second.allocatedDeviceMemory
<< " and host free at host ptr " << iter->second.allocatedHostMemory
<< " on device " << d_device_id
<< endl;
}
cerrLock.unlock();
}
CUDA_RT_SAFE_CALL(cudaFree(iter->second.allocatedDeviceMemory));
//cudaHostUnregister(iter->second.allocatedHostMemory);
free(iter->second.allocatedHostMemory);
}
contiguousArrays->clear();
}
init(d_device_id, _internalName);
#endif
}
//______________________________________________________________________
//
HOST_DEVICE void
GPUDataWarehouse::deleteSelfOnDevice()
{
#ifdef __CUDA_ARCH__
//no meaning in device method
#else
if ( d_device_copy ) {
OnDemandDataWarehouse::uintahSetCudaDevice( d_device_id );
if (gpu_stats.active()) {
cerrLock.lock();
{
gpu_stats << UnifiedScheduler::myRankThread()
<< "GPUDataWarehouse::deleteSelfOnDevice - calling GPUMemoryPool::freeCudaSpaceFromPool for Task DW at " << std::hex
<< d_device_copy << " on device " << std::dec << d_device_id << std::endl;
}
cerrLock.unlock();
}
//cudaHostUnregister(this);
GPUMemoryPool::freeCudaSpaceFromPool(d_device_id, objectSizeInBytes, d_device_copy);
//CUDA_RT_SAFE_CALL(cudaFree( d_device_copy ));
}
#endif
}
//______________________________________________________________________
//
HOST_DEVICE void
GPUDataWarehouse::resetdVarDB()
{
#ifdef __CUDA_ARCH__
//no meaning in device method
#else
if (d_device_copy != NULL) {
//TODO: When TaskDWs are removed, this section shouldn't be needed as there won't be concurrency problems
//This is designed to help stop tricky race scenarios. One such scenario I encountered was as follows:
//Thread A would call getItem() on the GPU, and look thruogh d_varDB for a matching label/patch/matl tuple
//Thread B would have previously added a new item to the d_varDB, then called syncto_device.
//Thread B would be partway through updating d_varDB on the GPU. It would increase the number of items by one
//And it would write the label. But it wouldn't yet write the patch or matl part of the tuple. By coincidence
//the old garbage data in the GPU would have exactly the patch and matl that matches thread A's query
//For a very brief window, there would be 2 tuples matching that label/patch/matl pair in d_varDB because
//thread B hasn't fully written in all of his data.
//Thread A's getItem() would run exactly in this brief window, find the wrong match, and use the wrong
//memory address, and the program would crash with an invalid address.
//The answer is to initialize d_varDB to items that should never provide an accidental match.
//This should also occur for all other arrays.
//TODO: Should this be could be cleaned up to only reset as much as was used.
for (int i = 0; i < MAX_VARDB_ITEMS; i++) {
d_varDB[i].label[0] = '\0';
d_varDB[i].domainID = -1;
d_varDB[i].matlIndx = -1;
//d_varDB[i].staging = false;
d_varDB[i].var_ptr = NULL;
d_varDB[i].ghostItem.dest_varDB_index = -1;
}
for (int i = 0; i < MAX_LEVELDB_ITEMS; i++) {
d_levelDB[i].label[0] = '\0';
d_levelDB[i].domainID = -1;
d_levelDB[i].matlIndx = -1;
//d_varDB[i].staging = false;
d_levelDB[i].var_ptr = NULL;
}
for (int i = 0; i < MAX_MATERIALSDB_ITEMS; i++) {
d_materialDB[i].simulationType[0] = '\0';
}
}
#endif
}
//______________________________________________________________________
//
HOST_DEVICE void
GPUDataWarehouse::putMaterials( std::vector< std::string > materials)
{
#ifdef __CUDA_ARCH__
//Should not put from device side
#else
//__________________________________
//cpu code
var_monitor var_write_lock{ Uintah::CrowdMonitor<var_tag>::WRITER };
{
//see if a thread has already supplied this datawarehouse with the material data
int numMaterials = materials.size();
if (d_numMaterials != numMaterials) {
//nobody has given us this material data yet, so lets add it in from the beginning.
if (numMaterials > MAX_MATERIALSDB_ITEMS) {
printf("ERROR: out of GPUDataWarehouse space for materials");
exit(-1);
}
for (int i = 0; i < numMaterials; i++) {
if (strcmp(materials.at(i).c_str(), "ideal_gas") == 0) {
d_materialDB[i].material = IDEAL_GAS;
} else {
printf("ERROR: This material has not yet been coded for GPU support\n.");
exit(-1);
}
}
d_numMaterials = numMaterials;
}
}
#endif
}
//______________________________________________________________________
//
HOST_DEVICE int
GPUDataWarehouse::getNumMaterials() const
{
#ifdef __CUDA_ARCH__
return d_numMaterials;
#else
//I don't know if it makes sense to write this for the host side, when it already exists elsewhere host side.
return -1;
#endif
}
//______________________________________________________________________
//
HOST_DEVICE materialType
GPUDataWarehouse::getMaterial(int i) const
{
#ifdef __CUDA_ARCH__
if (i >= d_numMaterials) {
printf("ERROR: Attempting to access material past bounds\n");
assert(0);
}
return d_materialDB[i].material;
#else
//I don't know if it makes sense to write this for the host side, when it already exists elsewhere host side.
printf("getMaterial() is only implemented as a GPU function");
return IDEAL_GAS; //returning something to prevent a compiler error
#endif
}
//______________________________________________________________________
//
HOST_DEVICE void
GPUDataWarehouse::copyGpuGhostCellsToGpuVars() {
#ifndef __CUDA_ARCH__
//Not for the host side
#else
//Copy all ghost cells from their source to their destination.
//The ghost cells could either be only the data that needs to be copied,
//or it could be on an edge of a bigger grid var.
//I believe the x,y,z coordinates of everything should match.
//This could probably be made more efficient by using only perhaps one block,
//copying float 4s, and doing it with instruction level parallelism.
int numThreads = blockDim.x*blockDim.y*blockDim.z;
int blockID = blockIdx.x + blockIdx.y * gridDim.x + gridDim.x * gridDim.y * blockIdx.z; //blockID on the grid
int threadID = threadIdx.x + blockDim.x * threadIdx.y + (blockDim.x * blockDim.y) * threadIdx.z; //threadID in the block
int totalThreads = numThreads * gridDim.x * gridDim.y * gridDim.z;
int assignedCellID;
//go through every ghost cell var we need
for (int i = 0; i < d_numVarDBItems; i++) {
//if (threadID == 0) {
// if (d_varDB[i].ghostItem.dest_varDB_index != -1) {
// printf("d_varDB[%d].label is %s\n", i, d_varDB[d_varDB[i].ghostItem.dest_varDB_index].label, d_numVarDBItems);
// } else {
// printf("d_varDB[%d].label is %s\n", i, d_varDB[i].label, d_numVarDBItems);
// }
//}
//some things in d_varDB are meta data for simulation variables
//other things in d_varDB are meta data for how to copy ghost cells.
//Make sure we're only dealing with ghost cells here
if(d_varDB[i].ghostItem.dest_varDB_index != -1) {
assignedCellID = blockID * numThreads + threadID;
int destIndex = d_varDB[i].ghostItem.dest_varDB_index;
int3 ghostCellSize;
ghostCellSize.x = d_varDB[i].ghostItem.sharedHighCoordinates.x - d_varDB[i].ghostItem.sharedLowCoordinates.x;
ghostCellSize.y = d_varDB[i].ghostItem.sharedHighCoordinates.y - d_varDB[i].ghostItem.sharedLowCoordinates.y;
ghostCellSize.z = d_varDB[i].ghostItem.sharedHighCoordinates.z - d_varDB[i].ghostItem.sharedLowCoordinates.z;
//while there's still work to do (this assigned ID is still within the ghost cell)
while (assignedCellID < ghostCellSize.x * ghostCellSize.y * ghostCellSize.z ) {
int z = assignedCellID / (ghostCellSize.x * ghostCellSize.y);
int temp = assignedCellID % (ghostCellSize.x * ghostCellSize.y);
int y = temp / ghostCellSize.x;
int x = temp % ghostCellSize.x;
assignedCellID += totalThreads;
//if we're in a valid x,y,z space for the variable. (It's unlikely every cell will perfectly map onto every available thread.)
if (x < ghostCellSize.x && y < ghostCellSize.y && z < ghostCellSize.z) {
//offset them to their true array coordinates, not relative simulation cell coordinates
//When using virtual addresses, the virtual offset is always applied to the source, but the destination is correct.
int x_source_real = x + d_varDB[i].ghostItem.sharedLowCoordinates.x - d_varDB[i].ghostItem.virtualOffset.x - d_varDB[i].var_offset.x;
int y_source_real = y + d_varDB[i].ghostItem.sharedLowCoordinates.y - d_varDB[i].ghostItem.virtualOffset.y - d_varDB[i].var_offset.y;
int z_source_real = z + d_varDB[i].ghostItem.sharedLowCoordinates.z - d_varDB[i].ghostItem.virtualOffset.z - d_varDB[i].var_offset.z;
//count over array slots.
int sourceOffset = x_source_real + d_varDB[i].var_size.x * (y_source_real + z_source_real * d_varDB[i].var_size.y);
int x_dest_real = x + d_varDB[i].ghostItem.sharedLowCoordinates.x - d_varDB[destIndex].var_offset.x;
int y_dest_real = y + d_varDB[i].ghostItem.sharedLowCoordinates.y - d_varDB[destIndex].var_offset.y;
int z_dest_real = z + d_varDB[i].ghostItem.sharedLowCoordinates.z - d_varDB[destIndex].var_offset.z;
int destOffset = x_dest_real + d_varDB[destIndex].var_size.x * (y_dest_real + z_dest_real * d_varDB[destIndex].var_size.y);
//if (threadID == 0) {
/* printf("Going to copy, between (%d, %d, %d) from offset %d to offset %d. From starts at (%d, %d, %d) with size (%d, %d, %d) at index %d pointer %p. To starts at (%d, %d, %d) with size (%d, %d, %d).\n",
d_varDB[i].ghostItem.sharedLowCoordinates.x,
d_varDB[i].ghostItem.sharedLowCoordinates.y,
d_varDB[i].ghostItem.sharedLowCoordinates.z,
sourceOffset,
destOffset,
d_varDB[i].var_offset.x, d_varDB[i].var_offset.y, d_varDB[i].var_offset.z,
d_varDB[i].var_size.x, d_varDB[i].var_size.y, d_varDB[i].var_size.z,
i,
d_varDB[i].var_ptr,
d_varDB[destIndex].var_offset.x, d_varDB[destIndex].var_offset.y, d_varDB[destIndex].var_offset.z,
d_varDB[destIndex].var_size.x, d_varDB[destIndex].var_size.y, d_varDB[destIndex].var_size.z);
*/
//}
//copy all 8 bytes of a double in one shot
if (d_varDB[i].sizeOfDataType == sizeof(double)) {
*((double*)(d_varDB[destIndex].var_ptr) + destOffset) = *((double*)(d_varDB[i].var_ptr) + sourceOffset);
//Note: Every now and then I've seen this printf statement get confused, a line will print with the wrong variables/offset variables...
/* printf("Thread %d - %s At (%d, %d, %d), real: (%d, %d, %d), copying within region between (%d, %d, %d) and (%d, %d, %d). Source d_varDB index (%d, %d, %d) varSize (%d, %d, %d) virtualOffset(%d, %d, %d), varOffset(%d, %d, %d), sourceOffset %d actual pointer %p, value %e. Dest d_varDB index %d ptr %p destOffset %d actual pointer. %p\n",
threadID, d_varDB[destIndex].label, x, y, z, x_source_real, y_source_real, z_source_real,
d_varDB[i].ghostItem.sharedLowCoordinates.x, d_varDB[i].ghostItem.sharedLowCoordinates.y, d_varDB[i].ghostItem.sharedLowCoordinates.z,
d_varDB[i].ghostItem.sharedHighCoordinates.x, d_varDB[i].ghostItem.sharedHighCoordinates.y, d_varDB[i].ghostItem.sharedHighCoordinates.z,
x + d_varDB[i].ghostItem.sharedLowCoordinates.x - d_varDB[i].ghostItem.virtualOffset.x,
y + d_varDB[i].ghostItem.sharedLowCoordinates.y - d_varDB[i].ghostItem.virtualOffset.y,
z + d_varDB[i].ghostItem.sharedLowCoordinates.z - d_varDB[i].ghostItem.virtualOffset.z,
d_varDB[i].var_size.x, d_varDB[i].var_size.y, d_varDB[i].var_size.z,
d_varDB[i].ghostItem.virtualOffset.x, d_varDB[i].ghostItem.virtualOffset.y, d_varDB[i].ghostItem.virtualOffset.z,
d_varDB[i].var_offset.x, d_varDB[i].var_offset.y, d_varDB[i].var_offset.z,
sourceOffset, (double*)(d_varDB[i].var_ptr) + sourceOffset, *((double*)(d_varDB[i].var_ptr) + sourceOffset),
destIndex, d_varDB[destIndex].var_ptr, destOffset, (double*)(d_varDB[destIndex].var_ptr) + destOffset);
*/
}
//or copy all 4 bytes of an int in one shot.
else if (d_varDB[i].sizeOfDataType == sizeof(int)) {
*(((int*)d_varDB[destIndex].var_ptr) + destOffset) = *((int*)(d_varDB[i].var_ptr) + sourceOffset);
//Copy each byte until we've copied all for this data type.
} else {
for (int j = 0; j < d_varDB[i].sizeOfDataType; j++) {
*(((char*)d_varDB[destIndex].var_ptr) + (destOffset * d_varDB[destIndex].sizeOfDataType + j))
= *(((char*)d_varDB[i].var_ptr) + (sourceOffset * d_varDB[i].sizeOfDataType + j));
}
}
}
}
}
}
#endif
}
//______________________________________________________________________
//
__global__ void copyGpuGhostCellsToGpuVarsKernel( GPUDataWarehouse *gpudw) {
gpudw->copyGpuGhostCellsToGpuVars();
}
//______________________________________________________________________
//
HOST_DEVICE void
GPUDataWarehouse::copyGpuGhostCellsToGpuVarsInvoker(cudaStream_t* stream)
{
#ifdef __CUDA_ARCH__
//Not for the device side
#else
//see if this GPU datawarehouse has ghost cells in it.
if (numGhostCellCopiesNeeded > 0) {
//call a kernel which gets the copy process started.
OnDemandDataWarehouse::uintahSetCudaDevice(d_device_id);
const int BLOCKSIZE = 1;
int xblocks = 32;
int yblocks = 1;
int zblocks = 1;
dim3 dimBlock(32, 32, 1);
dim3 dimGrid(1, 1, 1); //Give each ghost copying kernel 32 * 32 = 1024 threads to copy
//printf("Launching copyGpuGhostCellsToGpuVarsKernel\n");
//cudaDeviceSynchronize();
/*
//View a variable before and after the ghost cell copy
{
cudaDeviceSynchronize();
//pull out phi01
Uintah::GPUGridVariable<double> myDeviceVar;
getModifiable( myDeviceVar, "phi1", 0, 0 );
double * uintahDeviceFieldVar = const_cast<double*>( myDeviceVar.getPointer() );
printf("Before the device pointer is %p\n", uintahDeviceFieldVar);
double * hostSideVar = new double[myDeviceVar.getMemSize()/8];
CUDA_RT_SAFE_CALL(cudaMemcpy((void*)hostSideVar, (void*)uintahDeviceFieldVar, myDeviceVar.getMemSize(), cudaMemcpyDeviceToHost));
printf("Contents of phi1:\n");
for (int i = 0; i < 12; i++) {
for (int j = 0; j < 12; j++) {
printf("%1.3lf ", hostSideVar[i*12+j]);
}
printf("\n");
}
delete[] hostSideVar;
}
*/
if (gpu_stats.active()) {
cerrLock.lock();
{
gpu_stats << UnifiedScheduler::myRankThread()
<< " GPUDataWarehouse::copyGpuGhostCellsToGpuVarsInvoker() - "
<< " Launching ghost cell copies kernel"
<< " on device " << d_device_id
<< " at GPUDW at " << std::hex << this << std::dec
<< " with description " << _internalName
<< endl;
}
cerrLock.unlock();
}
copyGpuGhostCellsToGpuVarsKernel<<< dimGrid, dimBlock, 0, *stream >>>(this->d_device_copy);
//copyGpuGhostCellsToGpuVarsKernel<<< dimGrid, dimBlock >>>(this->d_device_copy);
//printf("Finished copyGpuGhostCellsToGpuVarsKernel\n");
//
/*
{
//pull out phi0
Uintah::GPUGridVariable<double> myDeviceVar;
getModifiable( myDeviceVar, "phi1", 0, 0 );
double * uintahDeviceFieldVar = const_cast<double*>( myDeviceVar.getPointer() );
printf("After the device pointer is %p\n", uintahDeviceFieldVar);
double * hostSideVar = new double[myDeviceVar.getMemSize()/8];
CUDA_RT_SAFE_CALL(cudaMemcpy((void*)hostSideVar, (void*)uintahDeviceFieldVar, myDeviceVar.getMemSize(), cudaMemcpyDeviceToHost));
printf("Contents of phi1:\n");
for (int i = 0; i < 12; i++) {
for (int j = 0; j < 12; j++) {
printf("%1.3lf ", hostSideVar[i*12+j]);
}
printf("\n");
}
delete[] hostSideVar;
}
*/
}
#endif
}
//______________________________________________________________________
//
HOST_DEVICE bool
GPUDataWarehouse::ghostCellCopiesNeeded()
{
#ifdef __CUDA_ARCH__
//Not implemented for the device side
printError("This method not allowed on the device.", "ghostCellCopiesNeeded");
return false;
#else
//see if this GPU datawarehouse has ghost cells in it.
return (numGhostCellCopiesNeeded > 0);
#endif
}
//______________________________________________________________________
//
HOST_DEVICE void
GPUDataWarehouse::putGhostCell(char const* label, int sourcePatchID, int destPatchID, int matlIndx, int levelIndx,
bool sourceStaging, bool destStaging,
int3 varOffset, int3 varSize,
int3 sharedLowCoordinates, int3 sharedHighCoordinates, int3 virtualOffset) {
#ifdef __CUDA_ARCH__
printf("ERROR:\nGPUDataWarehouse::putGhostCell( %s ) Not implemented for GPU\n",label);
#else
//Add information describing a ghost cell that needs to be copied internally from
//one chunk of data to the destination. This covers a GPU -> same GPU copy scenario.
var_monitor var_write_lock{ Uintah::CrowdMonitor<var_tag>::WRITER };
{
size_t i = d_numVarDBItems;
if (i > maxdVarDBItems) {
printf("ERROR: GPUDataWarehouse::putGhostCell( %s ). Exceeded maximum d_varDB entries. Index is %d and max items is %d\n", i, maxdVarDBItems);
exit(-1);
}
int index = -1;
d_numVarDBItems++;
numGhostCellCopiesNeeded++;
d_varDB[i].ghostItem.sharedLowCoordinates = sharedLowCoordinates;
d_varDB[i].ghostItem.sharedHighCoordinates = sharedHighCoordinates;
d_varDB[i].ghostItem.virtualOffset = virtualOffset;
//look up the source index and the destination index for these.
//it may be an entire variable (in which case staging is false)
//or it may be a staging variable.
labelPatchMatlLevel lpml_source(label, sourcePatchID, matlIndx, levelIndx);
if (!sourceStaging) {
if (varPointers->find(lpml_source) != varPointers->end()) {
index = varPointers->operator[](lpml_source).varDB_index;
}
} else {
//Find the variable that contains the region in which our ghost cells exist.
//Usually the sharedLowCoordinates and sharedHighCoordinates correspond
//exactly to the size of the staging variable. But sometimes the ghost data is found within
//a larger staging variable.
stagingVar sv;
sv.device_offset = varOffset;
sv.device_size = varSize;
std::map<stagingVar, stagingVarInfo>::iterator staging_it = varPointers->operator[](lpml_source).stagingVars.find(sv);
if (staging_it != varPointers->operator[](lpml_source).stagingVars.end()) {
index = staging_it->second.varDB_index;
} else {
printf("ERROR: GPUDataWarehouse::putGhostCell( %s ). Number of staging vars for this var: %d, No staging variable found label %s patch %d matl %d level %d offset (%d, %d, %d) size (%d, %d, %d) on DW at %p.\n",
label, varPointers->operator[](lpml_source).stagingVars.size(), label, sourcePatchID, matlIndx, levelIndx,
sv.device_offset.x, sv.device_offset.y, sv.device_offset.z,
sv.device_size.x, sv.device_size.y, sv.device_size.z,
this);
exit(-1);
}
//Find the d_varDB entry for this specific one.
}
if (index < 0) {
printf("ERROR:\nGPUDataWarehouse::putGhostCell, label %s, source patch ID %d, matlIndx %d, levelIndex %d staging %s not found in GPU DW %p\n",
label, sourcePatchID, matlIndx, levelIndx, sourceStaging ? "true" : "false", this);
exit(-1);
}
//printf("The found index %d for var %s patch %d matl %d\n", index, label, sourcePatchID, matlIndx);
//if (d_varDB[index].varItem.validOnGPU == false) {
//Steps prior to this point should have checked for this scenario.
//This is just a failsafe.
// printf("ERROR:\nGPUDataWarehouse::putGhostCell, attempting to use: label %s, source patch ID %d, materialID %d, it exists but the data is not valid.\n", label, sourcePatchID, matlIndx);
// exit(-1);
//}
d_varDB[i].var_offset = d_varDB[index].var_offset;
d_varDB[i].var_size = d_varDB[index].var_size;
d_varDB[i].var_ptr = d_varDB[index].var_ptr;
d_varDB[i].sizeOfDataType = d_varDB[index].sizeOfDataType;
if (gpu_stats.active()) {
cerrLock.lock();
{
gpu_stats << UnifiedScheduler::myRankThread()
<< " GPUDataWarehouse::putGhostCell() - "
<< " Placed into d_varDB at index " << i << " of max index " << maxdVarDBItems - 1
<< " from patch " << sourcePatchID << " staging " << sourceStaging << " to patch " << destPatchID << " staging " << destStaging
<< " has shared coordinates (" << sharedLowCoordinates.x << ", " << sharedLowCoordinates.y << ", " << sharedLowCoordinates.z << "),"
<< " (" << sharedHighCoordinates.x << ", " << sharedHighCoordinates.y << ", " << sharedHighCoordinates.z << "), "
<< " from low/offset (" << d_varDB[i].var_offset.x << ", " << d_varDB[i].var_offset.y << ", " << d_varDB[i].var_offset.z << ") "
<< " size (" << d_varDB[i].var_size.x << ", " << d_varDB[i].var_size.y << ", " << d_varDB[i].var_size.z << ") "
<< " virtualOffset (" << d_varDB[i].ghostItem.virtualOffset.x << ", " << d_varDB[i].ghostItem.virtualOffset.y << ", " << d_varDB[i].ghostItem.virtualOffset.z << ") "
<< " datatype size " << d_varDB[i].sizeOfDataType
<< " on device " << d_device_id
<< " at GPUDW at " << std::hex << this<< std::dec
<< endl;
}
cerrLock.unlock();
}
//if (d_debug){
// printf("Placed into d_varDB at index %d from patch %d to patch %d has shared coordinates (%d, %d, %d), (%d, %d, %d), from low/offset (%d, %d, %d) size (%d, %d, %d) virtualOffset(%d, %d, %d)\n",
// i, sourcePatchID, destPatchID, sharedLowCoordinates.x, sharedLowCoordinates.y,
// sharedLowCoordinates.z, sharedHighCoordinates.x, sharedHighCoordinates.y, sharedHighCoordinates.z,
// d_varDB[i].var_offset.x, d_varDB[i].var_offset.y, d_varDB[i].var_offset.z,
// d_varDB[i].var_size.x, d_varDB[i].var_size.y, d_varDB[i].var_size.z,
// d_varDB[i].ghostItem.virtualOffset.x, d_varDB[i].ghostItem.virtualOffset.y, d_varDB[i].ghostItem.virtualOffset.z);
//}
//Find where we are sending the ghost cell data to
labelPatchMatlLevel lpml_dest(label, destPatchID, matlIndx, levelIndx);
std::map<labelPatchMatlLevel, allVarPointersInfo>::iterator it = varPointers->find(lpml_dest);
if (it != varPointers->end()) {
if (destStaging) {
//TODO: Do the same thing as the source.
//If the destination is staging, then the shared coordinates are also the ghost coordinates.
stagingVar sv;
sv.device_offset = sharedLowCoordinates;
sv.device_size = make_int3(sharedHighCoordinates.x-sharedLowCoordinates.x,
sharedHighCoordinates.y-sharedLowCoordinates.y,
sharedHighCoordinates.z-sharedLowCoordinates.z);
std::map<stagingVar, stagingVarInfo>::iterator staging_it = it->second.stagingVars.find(sv);
if (staging_it != it->second.stagingVars.end()) {
d_varDB[i].ghostItem.dest_varDB_index = staging_it->second.varDB_index;
} else {
printf("\nERROR:\nGPUDataWarehouse::putGhostCell() didn't find a staging variable from the device for offset (%d, %d, %d) and size (%d, %d, %d).\n",
sharedLowCoordinates.x, sharedLowCoordinates.y, sharedLowCoordinates.z,
sv.device_size.x, sv.device_size.y, sv.device_size.z);
exit(-1);
}
} else {
d_varDB[i].ghostItem.dest_varDB_index = it->second.varDB_index;
}
//if (d_debug){
// int destIndex = d_varDB[i].ghostItem.dest_varDB_index;
// printf("The destination ghost cell copy is at d_varDB at index %d with size (%d, %d, %d), offset (%d, %d, %d)\n",
// destIndex,
// d_varDB[destIndex].var_size.x, d_varDB[destIndex].var_size.y, d_varDB[destIndex].var_size.z,
// d_varDB[destIndex].var_offset.x, d_varDB[destIndex].var_offset.y, d_varDB[destIndex].var_offset.z);
//}
} else {
printf("ERROR:\nGPUDataWarehouse::putGhostCell(), label: %s destination patch ID %d, matlIndx %d, levelIndex %d, staging %s not found in GPU DW variable database\n",
label, destPatchID, matlIndx, levelIndx, destStaging ? "true" : "false");
exit(-1);
}
d_dirty=true;
}
#endif
}
//______________________________________________________________________
//
HOST_DEVICE void
GPUDataWarehouse::getSizes(int3& low, int3& high, int3& siz, GhostType& gtype, int& numGhostCells,
char const* label, int patchID, int matlIndx, int levelIndx) {
#ifdef __CUDA_ARCH__
printf("ERROR:\nGPUDataWarehouse::getSizes() Not implemented for GPU\n");
#else
var_monitor var_read_lock{ Uintah::CrowdMonitor<var_tag>::READER };
{
labelPatchMatlLevel lpml(label, patchID, matlIndx, levelIndx);
if (varPointers->find(lpml) != varPointers->end()) {
allVarPointersInfo info = varPointers->operator[](lpml);
low = info.device_offset;
high.x = info.device_size.x + info.device_offset.x;
high.y = info.device_size.y + info.device_offset.y;
high.z = info.device_size.z + info.device_offset.z;
siz = info.device_size;
gtype = info.gtype;
numGhostCells = info.numGhostCells;
}
}
#endif
}
//______________________________________________________________________
//
//Go through all staging vars for a var. See if they are all marked as valid.
__host__ bool GPUDataWarehouse::areAllStagingVarsValid(char const* label, int patchID, int matlIndx, int levelIndx) {
var_monitor var_read_lock{ Uintah::CrowdMonitor<var_tag>::READER };
{
labelPatchMatlLevel lpml(label, patchID, matlIndx, levelIndx);
std::map<labelPatchMatlLevel, allVarPointersInfo>::iterator it = varPointers->find(lpml);
if (it != varPointers->end()) {
for (std::map<stagingVar, stagingVarInfo>::iterator staging_it = it->second.stagingVars.begin();
staging_it != it->second.stagingVars.end();
++staging_it) {
if (!checkValid(staging_it->second.atomicStatusInGpuMemory)) {
if (gpu_stats.active()) {
cerrLock.lock();
{
gpu_stats << UnifiedScheduler::myRankThread() << " GPUDataWarehouse::areAllStagingVarsValid() -"
// Task: " << dtask->getName()
<< " Not all staging vars were ready for "
<< label << " patch " << patchID
<< " material " << matlIndx << " level " << levelIndx
<< " offset (" << staging_it->first.device_offset.x
<< ", " << staging_it->first.device_offset.y
<< ", " << staging_it->first.device_offset.z
<< ") and size (" << staging_it->first.device_size.x
<< ", " << staging_it->first.device_size.y
<< ", " << staging_it->first.device_size.z
<< ") with status codes " << getDisplayableStatusCodes(staging_it->second.atomicStatusInGpuMemory) <<endl;
}
cerrLock.unlock();
}
return false;
}
}
}
}
return true;
}
//______________________________________________________________________
//
//Simply performs an atomic fetch on the status variable.
typedef int atomicDataStatus;
__host__ atomicDataStatus
GPUDataWarehouse::getStatus(atomicDataStatus& status) {
return __sync_or_and_fetch(&(status), 0);
}
__host__ string
GPUDataWarehouse::getDisplayableStatusCodes(atomicDataStatus& status) {
atomicDataStatus varStatus = __sync_or_and_fetch(&(status), 0);
string retval = "";
if (varStatus == 0) {
retval += "Unallocated ";
} else {
if ((varStatus & ALLOCATING) == ALLOCATING) {
retval += "Allocating ";
}
if ((varStatus & ALLOCATED) == ALLOCATED) {
retval += "Allocated ";
}
if ((varStatus & COPYING_IN) == COPYING_IN) {
retval += "Copying-in ";
}
if ((varStatus & VALID) == VALID) {
retval += "Valid ";
}
if ((varStatus & AWAITING_GHOST_COPY) == AWAITING_GHOST_COPY) {
retval += "Awaiting-ghost-copy ";
}
if ((varStatus & VALID_WITH_GHOSTS) == VALID_WITH_GHOSTS) {
retval += "Valid-with-ghosts ";
}
if ((varStatus & UNKNOWN) == UNKNOWN) {
retval += "Unknown ";
}
}
return retval;
}
//______________________________________________________________________
//
//returns false if something else already allocated space and we don't have to.
//returns true if we are the ones to allocate the space.
//performs operations with atomic compare and swaps
__host__ bool
GPUDataWarehouse::testAndSetAllocating(atomicDataStatus& status)
{
bool allocating = false;
while (!allocating) {
//get the value
atomicDataStatus oldVarStatus = __sync_or_and_fetch(&(status), 0);
//if it's allocated, return true
if (((oldVarStatus & ALLOCATING) == ALLOCATING) || ((oldVarStatus & ALLOCATED) == ALLOCATED)) {
//Something else already allocated or is allocating it. So this thread won't do do any allocation.
return false;
} else {
//Attempt to claim we'll allocate it. If not go back into our loop and recheck
atomicDataStatus newVarStatus = oldVarStatus | ALLOCATING;
allocating = __sync_bool_compare_and_swap(&status, oldVarStatus, newVarStatus);
}
}
return true;
}
//______________________________________________________________________
//
//Sets the allocated flag on a variables atomicDataStatus
//This is called after an allocation completes.
__host__ bool
GPUDataWarehouse::testAndSetAllocate(atomicDataStatus& status)
{
bool allocated = false;
//get the value
atomicDataStatus oldVarStatus = __sync_or_and_fetch(&(status), 0);
if ((oldVarStatus & ALLOCATING) == 0) {
//A sanity check
printf("ERROR:\nGPUDataWarehouse::testAndSetAllocate( ) Can't allocate a status if it wasn't previously marked as allocating.\n");
exit(-1);
} else if ((oldVarStatus & ALLOCATED) == ALLOCATED) {
//A sanity check
printf("ERROR:\nGPUDataWarehouse::testAndSetAllocate( ) Can't allocate a status if it's already allocated\n");
exit(-1);
}
else {
//Attempt to claim we'll allocate it. Create what we want the status to look like
//by turning off allocating and turning on allocated.
//Note: No need to turn off UNALLOCATED, it's defined as all zero bits.
//But the below is kept in just for readability's sake.
atomicDataStatus newVarStatus = oldVarStatus & ~UNALLOCATED;
newVarStatus = newVarStatus & ~ALLOCATING;
newVarStatus = newVarStatus | ALLOCATED;
//If we succeeded in our attempt to claim to allocate, this returns true.
//If we failed, thats a real problem, and we crash the problem below.
allocated = __sync_bool_compare_and_swap(&status, oldVarStatus, newVarStatus);
}
if (!allocated) {
//Another sanity check
printf("ERROR:\nGPUDataWarehouse::testAndSetAllocate( ) Something wrongly modified the atomic status while setting the allocated flag\n");
exit(-1);
}
return allocated;
}
//______________________________________________________________________
//
//Simply determines if a variable has been marked as allocated.
__host__ bool
GPUDataWarehouse::checkAllocated(atomicDataStatus& status)
{
return ((__sync_or_and_fetch(&(status), 0) & ALLOCATED) == ALLOCATED);
}
//______________________________________________________________________
//
//Simply determines if a variable has been marked as valid.
__host__ bool
GPUDataWarehouse::checkValid(atomicDataStatus& status)
{
return ((__sync_or_and_fetch(&(status), 0) & VALID) == VALID);
}
//______________________________________________________________________
//
__host__ bool
GPUDataWarehouse::isAllocatedOnGPU(char const* label, int patchID, int matlIndx, int levelIndx)
{
var_monitor var_read_lock{ Uintah::CrowdMonitor<var_tag>::READER };
{
labelPatchMatlLevel lpml(label, patchID, matlIndx, levelIndx);
if (varPointers->find(lpml) != varPointers->end()) {
bool retVal = ((__sync_fetch_and_or(&(varPointers->operator[](lpml).atomicStatusInGpuMemory), 0) & ALLOCATED) == ALLOCATED);
return retVal;
}
else {
return false;
}
}
}
//______________________________________________________________________
//
__host__ bool
GPUDataWarehouse::isAllocatedOnGPU(char const* label, int patchID, int matlIndx, int levelIndx, int3 offset, int3 size)
{
var_monitor var_read_lock{ Uintah::CrowdMonitor<var_tag>::READER };
{
labelPatchMatlLevel lpml(label, patchID, matlIndx, levelIndx);
if (varPointers->find(lpml) != varPointers->end()) {
//cout << "In isAllocatedOnGPU - For patchID " << patchID << " for the status is " << getDisplayableStatusCodes(varPointers->operator[](lpml).atomicStatusInGpuMemory) << endl;
bool retVal = ((__sync_fetch_and_or(&(varPointers->operator[](lpml).atomicStatusInGpuMemory), 0) & ALLOCATED) == ALLOCATED);
if (retVal) {
//now check the sizes
int3 device_offset = varPointers->operator[](lpml).device_offset;
int3 device_size = varPointers->operator[](lpml).device_size;
retVal = (device_offset.x == offset.x && device_offset.y == offset.y && device_offset.z == offset.z
&& device_size.x == size.x && device_size.y == size.y && device_size.z == size.z);
}
return retVal;
}
else {
return false;
}
}
}
//______________________________________________________________________
//
__host__ bool
GPUDataWarehouse::isValidOnGPU(char const* label, int patchID, int matlIndx, int levelIndx)
{
var_monitor var_read_lock{ Uintah::CrowdMonitor<var_tag>::READER };
{
labelPatchMatlLevel lpml(label, patchID, matlIndx, levelIndx);
if (varPointers->find(lpml) != varPointers->end()) {
bool retVal = ((__sync_fetch_and_or(&(varPointers->operator[](lpml).atomicStatusInGpuMemory), 0) & VALID) == VALID);
return retVal;
}
else {
return false;
}
}
}
//______________________________________________________________________
//
__host__ void
GPUDataWarehouse::setValidOnGPU(char const* label, int patchID, int matlIndx, int levelIndx)
{
var_monitor var_write_lock{ Uintah::CrowdMonitor<var_tag>::WRITER };
{
labelPatchMatlLevel lpml(label, patchID, matlIndx, levelIndx);
if (varPointers->find(lpml) != varPointers->end()) {
__sync_and_and_fetch(&(varPointers->operator[](lpml).atomicStatusInGpuMemory), ~COPYING_IN);
__sync_or_and_fetch(&(varPointers->operator[](lpml).atomicStatusInGpuMemory), VALID);
}
else {
printf("host setValidOnGPU unknown variable %s on GPUDataWarehouse\n", label);
exit(-1);
}
}
}
//______________________________________________________________________
//
__host__ void
GPUDataWarehouse::setValidOnGPUStaging(char const* label, int patchID, int matlIndx, int levelIndx, int3 offset, int3 size)
{
var_monitor var_write_lock{ Uintah::CrowdMonitor<var_tag>::WRITER };
{
labelPatchMatlLevel lpml(label, patchID, matlIndx, levelIndx);
std::map<labelPatchMatlLevel, allVarPointersInfo>::iterator it = varPointers->find(lpml);
if (it != varPointers->end()) {
stagingVar sv;
sv.device_offset = offset;
sv.device_size = size;
std::map<stagingVar, stagingVarInfo>::iterator staging_it = it->second.stagingVars.find(sv);
if (staging_it != it->second.stagingVars.end()) {
__sync_and_and_fetch(&(staging_it->second.atomicStatusInGpuMemory), ~COPYING_IN);
__sync_or_and_fetch(&(staging_it->second.atomicStatusInGpuMemory), VALID);
}
else {
printf("ERROR:\nGPUDataWarehouse::setValidOnGPUStaging( ) Staging variable %s not found.\n", label);
exit(-1);
}
}
else {
printf("ERROR:\nGPUDataWarehouse::setValidOnGPUStaging( ) Variable %s not found.\n", label);
exit(-1);
}
}
}
//______________________________________________________________________
//
//We have an entry for this item in the GPU DW, and it's not unknown. Therefore
//if this returns true it means this GPU DW specifically knows something about the
//state of this variable. (The reason for the unknown check is currently when a
//var is added to the GPUDW, we also need to state what we know about its data in
//host memory. Since it doesn't know, it marks it as unknown, meaning, the host
//side DW is possibly managing the data.)
__host__ bool GPUDataWarehouse::dwEntryExistsOnCPU(char const* label, int patchID, int matlIndx, int levelIndx) {
var_monitor var_read_lock{ Uintah::CrowdMonitor<var_tag>::READER };
{
bool retVal = false;
labelPatchMatlLevel lpml(label, patchID, matlIndx, levelIndx);
std::map<labelPatchMatlLevel, allVarPointersInfo>::iterator it = varPointers->find(lpml);
if (it != varPointers->end()) {
if ((it->second.atomicStatusInHostMemory & UNKNOWN) != UNKNOWN) {
retVal = true;
}
}
return retVal;
}
}
//______________________________________________________________________
//
__host__ bool
GPUDataWarehouse::isValidOnCPU(char const* label, const int patchID, const int matlIndx, const int levelIndx)
{
var_monitor var_read_lock{ Uintah::CrowdMonitor<var_tag>::READER };
{
labelPatchMatlLevel lpml(label, patchID, matlIndx, levelIndx);
if (varPointers->find(lpml) != varPointers->end()) {
bool retVal = ((__sync_fetch_and_or(&(varPointers->operator[](lpml).atomicStatusInHostMemory), 0) & VALID) == VALID);
return retVal;
}
else {
return false;
}
}
}
//______________________________________________________________________
//
__host__ void
GPUDataWarehouse::setValidOnCPU(char const* label, int patchID, int matlIndx, int levelIndx)
{
var_monitor var_write_lock{ Uintah::CrowdMonitor<var_tag>::WRITER };
{
labelPatchMatlLevel lpml(label, patchID, matlIndx, levelIndx);
if (varPointers->find(lpml) != varPointers->end()) {
__sync_and_and_fetch(&(varPointers->operator[](lpml).atomicStatusInHostMemory), ~COPYING_IN);
__sync_or_and_fetch(&(varPointers->operator[](lpml).atomicStatusInHostMemory), VALID);
}
else {
printf("host setValidOnCPU unknown variable %s on GPUDataWarehouse\n", label);
exit(-1);
}
}
}
//______________________________________________________________________
//
__host__ void
GPUDataWarehouse::setAwaitingGhostDataOnGPU(char const* label, int patchID, int matlIndx, int levelIndx)
{
var_monitor var_write_lock{ Uintah::CrowdMonitor<var_tag>::WRITER };
{
labelPatchMatlLevel lpml(label, patchID, matlIndx, levelIndx);
if (varPointers->find(lpml) != varPointers->end()) {
__sync_or_and_fetch(&(varPointers->operator[](lpml).atomicStatusInGpuMemory), AWAITING_GHOST_COPY);
}
else {
printf("host setAwaitingGhostDataOnGPU unknown variable %s on GPUDataWarehouse\n", label);
exit(-1);
}
}
}
//______________________________________________________________________
//
//returns false if something else already changed a valid variable to valid awaiting ghost data
//returns true if we are the ones to manage this variable's ghost data.
__host__ bool
GPUDataWarehouse::testAndSetAwaitingGhostDataOnGPU(char const* label, int patchID, int matlIndx, int levelIndx)
{
bool allocating = false;
atomicDataStatus *status;
while (!allocating) {
var_monitor var_read_lock{ Uintah::CrowdMonitor<var_tag>::READER };
{
//get the adress
labelPatchMatlLevel lpml(label, patchID, matlIndx, levelIndx);
if (varPointers->find(lpml) != varPointers->end()) {
status = &(varPointers->operator[](lpml).atomicStatusInGpuMemory);
}
else {
printf("ERROR:\nGPUDataWarehouse::testAndSetAwaitingGhostDataOnGPU( ) Variable %s not found.\n", label);
exit(-1);
return false;
}
}
atomicDataStatus oldVarStatus = __sync_or_and_fetch(status, 0);
if (((oldVarStatus & AWAITING_GHOST_COPY) == AWAITING_GHOST_COPY) || ((oldVarStatus & VALID_WITH_GHOSTS) == VALID_WITH_GHOSTS)) {
//Something else already took care of it. So this task won't manage it.
return false;
} else {
//Attempt to claim we'll manage the ghost cells for this variable. If the claim fails go back into our loop and recheck
atomicDataStatus newVarStatus = oldVarStatus | AWAITING_GHOST_COPY;
allocating = __sync_bool_compare_and_swap(status, oldVarStatus, newVarStatus);
}
}
return true;
}
//______________________________________________________________________
//
//returns false if something else already claimed to copy or has copied data into the GPU.
//returns true if we are the ones to manage this variable's ghost data.
__host__ bool
GPUDataWarehouse::testAndSetCopyingIntoGPU(char const* label, int patchID, int matlIndx, int levelIndx)
{
bool copyingin = false;
atomicDataStatus *status;
while (!copyingin) {
var_monitor var_read_lock{ Uintah::CrowdMonitor<var_tag>::READER };
{
//get the address
labelPatchMatlLevel lpml(label, patchID, matlIndx, levelIndx);
if (varPointers->find(lpml) != varPointers->end()) {
status = &(varPointers->operator[](lpml).atomicStatusInGpuMemory);
}
else {
printf("ERROR:\nGPUDataWarehouse::testAndSetCopyingIntoGPU( ) Variable %s not found.\n", label);
exit(-1);
return false;
}
}
atomicDataStatus oldVarStatus = __sync_or_and_fetch(status, 0);
if (oldVarStatus == UNALLOCATED) {
printf("ERROR:\nGPUDataWarehouse::testAndSetCopyingIntoGPU( ) Variable %s is unallocated.\n", label);
exit(-1);
}
if (((oldVarStatus & COPYING_IN) == COPYING_IN) ||
((oldVarStatus & VALID) == VALID) ||
((oldVarStatus & VALID_WITH_GHOSTS) == VALID_WITH_GHOSTS)) {
//Something else already took care of it. So this task won't manage it.
return false;
} else {
//Attempt to claim we'll manage the ghost cells for this variable. If the claim fails go back into our loop and recheck
atomicDataStatus newVarStatus = oldVarStatus | COPYING_IN;
copyingin = __sync_bool_compare_and_swap(status, oldVarStatus, newVarStatus);
}
}
return true;
}
//______________________________________________________________________
//
//returns false if something else already claimed to copy or has copied data into the CPU.
//returns true if we are the ones to manage this variable's ghost data.
__host__ bool
GPUDataWarehouse::testAndSetCopyingIntoCPU(char const* label, int patchID, int matlIndx, int levelIndx)
{
bool copyingin = false;
atomicDataStatus *status;
while (!copyingin) {
var_monitor var_read_lock { Uintah::CrowdMonitor<var_tag>::READER };
{
//get the address
labelPatchMatlLevel lpml(label, patchID, matlIndx, levelIndx);
if (varPointers->find(lpml) != varPointers->end()) {
status = &(varPointers->operator[](lpml).atomicStatusInHostMemory);
}
else {
printf("ERROR:\nGPUDataWarehouse::testAndSetCopyingIntoCPU( ) Variable %s not found.\n", label);
exit(-1);
return false;
}
}
atomicDataStatus oldVarStatus = __sync_or_and_fetch(status, 0);
//We don't have good tracking of CPU vars at the moment.
//if (oldVarStatus == UNALLOCATED) {
// printf("ERROR:\nGPUDataWarehouse::testAndSetCopyingIntoCPU( ) Variable %s is unallocated.\n", label);
// exit(-1);
//}
if (((oldVarStatus & COPYING_IN) == COPYING_IN) ||
((oldVarStatus & VALID) == VALID) ||
((oldVarStatus & VALID_WITH_GHOSTS) == VALID_WITH_GHOSTS)) {
//Something else already took care of it. So this task won't manage it.
return false;
} else {
//Attempt to claim we'll manage the ghost cells for this variable. If the claim fails go back into our loop and recheck
atomicDataStatus newVarStatus = oldVarStatus | COPYING_IN;
copyingin = __sync_bool_compare_and_swap(status, oldVarStatus, newVarStatus);
}
}
return true;
}
//______________________________________________________________________
//
//returns false if something else already claimed to copy or has copied data into the GPU.
//returns true if we are the ones to manage this variable's ghost data.
__host__ bool
GPUDataWarehouse::testAndSetCopyingIntoGPUStaging(char const* label, int patchID, int matlIndx, int levelIndx, int3 offset, int3 size)
{
bool copyingin = false;
atomicDataStatus *status;
while (!copyingin) {
//get the address
var_monitor var_read_lock{ Uintah::CrowdMonitor<var_tag>::READER };
{
labelPatchMatlLevel lpml(label, patchID, matlIndx, levelIndx);
std::map<labelPatchMatlLevel, allVarPointersInfo>::iterator it = varPointers->find(lpml);
if (it != varPointers->end()) {
stagingVar sv;
sv.device_offset = offset;
sv.device_size = size;
std::map<stagingVar, stagingVarInfo>::iterator staging_it = it->second.stagingVars.find(sv);
if (staging_it != it->second.stagingVars.end()) {
status = &(staging_it->second.atomicStatusInGpuMemory);
}
else {
printf("ERROR:\nGPUDataWarehouse::testAndSetCopyingIntoGPUStaging( ) Staging variable %s not found.\n", label);
exit(-1);
return false;
}
}
else {
printf("ERROR:\nGPUDataWarehouse::testAndSetCopyingIntoGPUStaging( ) Variable %s not found.\n", label);
exit(-1);
return false;
}
}
atomicDataStatus oldVarStatus = __sync_or_and_fetch(status, 0);
if (oldVarStatus == UNALLOCATED) {
printf("ERROR:\nGPUDataWarehouse::testAndSetCopyingIntoGPUStaging( ) Variable %s is unallocated.\n", label);
exit(-1);
} else if ((oldVarStatus & VALID_WITH_GHOSTS) == VALID_WITH_GHOSTS) {
printf("ERROR:\nGPUDataWarehouse::testAndSetCopyingIntoGPUStaging( ) Variable %s is marked as valid with ghosts, that should never happen with staging vars.\n", label);
exit(-1);
} else if (((oldVarStatus & COPYING_IN) == COPYING_IN) ||
((oldVarStatus & VALID) == VALID)) {
//Something else already took care of it. So this task won't manage it.
return false;
} else {
//Attempt to claim we'll manage the ghost cells for this variable. If the claim fails go back into our loop and recheck
atomicDataStatus newVarStatus = oldVarStatus | COPYING_IN;
copyingin = __sync_bool_compare_and_swap(status, oldVarStatus, newVarStatus);
}
}
return true;
}
//______________________________________________________________________
//
__host__ bool
GPUDataWarehouse::isValidWithGhostsOnGPU(char const* label, int patchID, int matlIndx, int levelIndx)
{
var_monitor var_read_lock{ Uintah::CrowdMonitor<var_tag>::READER };
{
labelPatchMatlLevel lpml(label, patchID, matlIndx, levelIndx);
if (varPointers->find(lpml) != varPointers->end()) {
bool retVal = ((__sync_fetch_and_or(&(varPointers->operator[](lpml).atomicStatusInGpuMemory), 0) & VALID_WITH_GHOSTS)
== VALID_WITH_GHOSTS);
return retVal;
}
else {
return false;
}
}
}
//______________________________________________________________________
//
__host__ void
GPUDataWarehouse::setValidWithGhostsOnGPU(char const* label, int patchID, int matlIndx, int levelIndx)
{
var_monitor var_write_lock{ Uintah::CrowdMonitor<var_tag>::WRITER };
{
labelPatchMatlLevel lpml(label, patchID, matlIndx, levelIndx);
if (varPointers->find(lpml) != varPointers->end()) {
//make sure the valid is still turned on
__sync_or_and_fetch(&(varPointers->operator[](lpml).atomicStatusInGpuMemory), VALID);
//turn off AWAITING_GHOST_COPY
__sync_and_and_fetch(&(varPointers->operator[](lpml).atomicStatusInGpuMemory), ~AWAITING_GHOST_COPY);
//turn on VALID_WITH_GHOSTS
__sync_or_and_fetch(&(varPointers->operator[](lpml).atomicStatusInGpuMemory), VALID_WITH_GHOSTS);
}
else {
exit(-1);
}
}
}
//______________________________________________________________________
//
__device__ void
GPUDataWarehouse::print()
{
#ifdef __CUDA_ARCH__
__syncthreads();
if( isThread0_Blk0() ){
printf("\nVariables in GPUDataWarehouse\n");
for (int i = 0; i < d_numVarDBItems; i++) {
dataItem me = d_varDB[i];
printf(" %-15s matl: %i, patchID: %i, L-%i, size:[%i,%i,%i] pointer: %p\n", me.label, me.matlIndx,
me.domainID, me.levelIndx, me.var_size.x, me.var_size.y, me.var_size.z, me.var_ptr);
}
__syncthreads();
printThread();
printBlock();
printf("\n");
}
#endif
}
//______________________________________________________________________
//
HOST_DEVICE void
GPUDataWarehouse::printError(const char* msg, const char* methodName, char const* label, int patchID, int matlIndx, int levelIndx )
{
#ifdef __CUDA_ARCH__
__syncthreads();
if( isThread0() ){
if (label[0] == '\0') {
printf(" \nERROR GPU-side: GPUDataWarehouse::%s() - %s\n", methodName, msg );
} else {
printf(" \nERROR GPU-side: GPUDataWarehouse::%s(), label: \"%s\", patch: %i, matlIndx: %i, levelIndx: %i - %s\n", methodName, label, patchID, matlIndx, levelIndx, msg);
}
//Should this just loop through the variable database and print out only items with a
//levelIndx value greater than zero? -- Brad
//for (int i = 0; i < d_numLevelItems; i++) {
// printf(" Available levelDB labels(%i): \"%-15s\" matl: %i, L-%i \n", d_numLevelItems, d_levelDB[i].label, d_levelDB[i].matlIndx, d_levelDB[i].levelIndx);
// }
__syncthreads();
printThread();
printBlock();
// we know this is fatal and why, so just stop kernel execution
__threadfence();
asm("trap;");
}
#else
//__________________________________
// CPU code
if (label[0] == '\0') {
printf(" \nERROR host-side: GPUDataWarehouse::%s() - %s\n", methodName, msg );
} else {
printf(" \nERROR host-side: GPUDataWarehouse::%s(), label: \"%s\", patch: %i, matlIndx: %i, levelIndx: %i - %s\n", methodName, label, patchID, matlIndx, levelIndx, msg);
}//for (int i = 0; i < d_numLevelItems; i++) {
// printf(" Available levelDB labels(%i): \"%-15s\" matl: %i, L-%i \n", d_numLevelItems, d_levelDB[i].label, d_levelDB[i].matlIndx, d_levelDB[i].levelIndx);
//}
exit(-1);
#endif
}
//______________________________________________________________________
//
HOST_DEVICE void
GPUDataWarehouse::printGetLevelError(const char* msg, char const* label, int levelIndx, int matlIndx)
{
#ifdef __CUDA_ARCH__
__syncthreads();
if( isThread0() ){
printf(" \nERROR: %s( \"%s\", levelIndx: %i, matl: %i) unknown variable\n", msg, label, levelIndx, matlIndx);
//Should this just loop through the variable database and print out only items with a
//levelIndx value greater than zero? -- Brad
//for (int i = 0; i < d_numLevelItems; i++) {
// printf(" Available levelDB labels(%i): \"%-15s\" matl: %i, L-%i \n", d_numLevelItems, d_levelDB[i].label, d_levelDB[i].matlIndx, d_levelDB[i].levelIndx);
// }
__syncthreads();
printThread();
printBlock();
// we know this is fatal and why, so just stop kernel execution
__threadfence();
asm("trap;");
}
#else
//__________________________________
// CPU code
printf(" \nERROR: %s( \"%s\", levelIndx: %i, matl: %i) unknown variable\n", msg, label, levelIndx, matlIndx);
//for (int i = 0; i < d_numLevelItems; i++) {
// printf(" Available levelDB labels(%i): \"%-15s\" matl: %i, L-%i \n", d_numLevelItems, d_levelDB[i].label, d_levelDB[i].matlIndx, d_levelDB[i].levelIndx);
//}
#endif
}
//______________________________________________________________________
//
HOST_DEVICE void
GPUDataWarehouse::printGetError(const char* msg, char const* label, int levelIndx, int patchID, int matlIndx)
{
#ifdef __CUDA_ARCH__
__syncthreads();
if( isThread0() ) {
printf(" \nERROR: %s( \"%s\", levelIndx: %i, patchID: %i, matl: %i) unknown variable\n",
msg, label, levelIndx, patchID, matlIndx);
for (int i = 0; i < d_numVarDBItems; i++) {
printf(" Available varDB labels(%i of %i): \"%-15s\" matl: %i, patchID: %i, level: %i\n", i, d_numVarDBItems, d_varDB[i].label, d_varDB[i].matlIndx,
d_varDB[i].domainID, d_varDB[i].levelIndx);
}
__syncthreads();
printThread();
printBlock();
printf("\n");
// we know this is fatal and why, so just stop kernel execution
__threadfence();
asm("trap;");
}
#else
//__________________________________
// CPU code
printf(" \nERROR: %s( \"%s\", levelIndx: %i, patchID: %i, matl: %i) unknown variable in DW %s\n",
msg, label, levelIndx, patchID, matlIndx, _internalName);
for (int i = 0; i < d_numVarDBItems; i++) {
printf(" Available varDB labels(%i): \"%-15s\" matl: %i, patchID: %i, level: %i\n", d_numVarDBItems, d_varDB[i].label, d_varDB[i].matlIndx,
d_varDB[i].domainID, d_varDB[i].levelIndx);
}
#endif
}
//______________________________________________________________________
//
HOST_DEVICE void*
GPUDataWarehouse::getPlacementNewBuffer()
{
#ifdef __CUDA_ARCH__
printf("GPUDataWarehouse::getPlacementNewBuffer() not for device code\n");
return NULL;
#else
return placementNewBuffer;
#endif
}
|
df133cb9c40e0159e4629ee285631162be29f1e0.hip | // !!! This is a file automatically generated by hipify!!!
// includes, system
#include <iostream>
#include <fstream>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
// includes CUDA
#include <hip/hip_runtime.h>
// includes, project
#include <hip/device_functions.h>
#include <device_launch_parameters.h>
#include <helper_cuda.h>
#include <helper_functions.h> // helper functions for SDK examples
//define parameter
#define pi 3.1415
#define MAXCUDADEVICES 1
#define threadNum 246
#define transNum_Host *paraInt_Host
#define inhibitor_Host *(paraInt_Host + 1)
#define lgLength_Host *(paraInt_Host + 2)
#define datalength_Host *(paraInt_Host + 3)
#define pickLength_Host *(paraInt_Host + 4)
#define numZ_Host *(paraInt_Host + 5)
#define numX_Host *(paraInt_Host + 6)
#define delayCoef_Host *paraFloat_Host
#define acoustVel_Host *(paraFloat_Host + 1)
#define pitch_Host *(paraFloat_Host + 2)
#define pSize_Host *(paraFloat_Host + 3)
#define sampFreq_Host *(paraFloat_Host + 4)
#define angleAperture_Host *(paraFloat_Host + 5)
#define transNum_Dev *paraInt_Dev
#define inhibitor_Dev *(paraInt_Dev + 1)
#define lgLength_Dev *(paraInt_Dev + 2)
#define datalength_Dev *(paraInt_Dev + 3)
#define pickLength_Dev *(paraInt_Dev + 4)
#define numZ_Dev *(paraInt_Dev + 5)
#define numX_Dev *(paraInt_Dev + 6)
#define delayCoef_Dev *paraFloat_Dev
#define acoustVel_Dev *(paraFloat_Dev + 1)
#define pitch_Dev *(paraFloat_Dev + 2)
#define pSize_Dev *(paraFloat_Dev + 3)
#define sampFreq_Dev *(paraFloat_Dev + 4)
#define angleAperture_Dev *(paraFloat_Dev + 5)
int *paraInt_Host, *paraInt_Dev, *krev;
float *paraFloat_Host, *paraFloat_Dev, *w_real, *w_imag, *data_Dev, *imgRecons_Dev, *dataPick, *y_real, *y_imag;
// paraInt : 0-transElement, 1-inhibitor, 2-lgLength, 3-dataLength
// paraInt_Host/_Dev : 0-transElement, 1-inhibitor, 2-lgLength, 3-dataLength, 4-pickLength, 5-numZ, 6-numX
// paraFloat : 0-delayCoef, 1-acoustVel, 2-pitch, 3-pixelSize, 4-samFreq
// paraFloat_Host/_Dev : 0-delayCoef, 1-acoustVel, 2-pitch, 3-pixelSize, 4-samFreq, 5-angleAperture
extern "C" void initcudas(int *paraInt, float *paraFloat, float *data, int MAXZ_host, int MAXX_host)
{
paraInt_Host = (int *)malloc(7 * sizeof(int));
memcpy(paraInt_Host, paraInt, 4 * sizeof(int));
*(paraInt_Host + 4) = 1;
*(paraInt_Host + 4) <<= *(paraInt + 2);
*(paraInt_Host + 5) = MAXZ_host;
*(paraInt_Host + 6) = MAXX_host;
paraFloat_Host = (float *)malloc(6 * sizeof(float));
memcpy(paraFloat_Host, paraFloat, 5 * sizeof(float));
*(paraFloat_Host + 5) = 0.5;
// imgSize_Dev
checkCudaErrors(hipSetDevice(0));
checkCudaErrors(hipMalloc((void**)&(paraInt_Dev), 7 * sizeof(int)));
checkCudaErrors(hipMalloc((void**)&(paraFloat_Dev), 6 * sizeof(float)));
checkCudaErrors(hipMemcpy(paraInt_Dev, paraInt_Host, 7 * sizeof(int), hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(paraFloat_Dev, paraFloat_Host, 6 * sizeof(float), hipMemcpyHostToDevice));
printf_s("Device parameter setting...\n");
// float*_Dev malloc
checkCudaErrors(hipMalloc((void**)&(data_Dev), transNum_Host*datalength_Host*sizeof(float)));
checkCudaErrors(hipMalloc((void**)&(imgRecons_Dev), numZ_Host*numX_Host*sizeof(float)));
checkCudaErrors(hipMalloc((void**)&(dataPick), pickLength_Host*threadNum*numX_Host*sizeof(float)));
checkCudaErrors(hipMalloc((void**)&(y_real), pickLength_Host*threadNum*numX_Host*sizeof(float)));
checkCudaErrors(hipMalloc((void**)&(y_imag), pickLength_Host*threadNum*numX_Host*sizeof(float)));
checkCudaErrors(hipMalloc((void**)&(w_real), (pickLength_Host - 1)*sizeof(float)));
checkCudaErrors(hipMalloc((void**)&(w_imag), (pickLength_Host - 1)*sizeof(float)));
// int*_Dev malloc
checkCudaErrors(hipMalloc((void**)&(krev), pickLength_Host*sizeof(int)));
//calculate parameter of fft
int *krev_Host = (int *)malloc(pickLength_Host*sizeof(int));
for (int k = 0; k < pickLength_Host; ++k)
{
int r = k;
*(krev_Host + k) = (r & 0x1);
for (int j = 1; j < lgLength_Host; ++j)
{
*(krev_Host + k) = (*(krev_Host + k)) << 1;
r = r >> 1;
if (r & 0x1) ++(*(krev_Host + k));
}
}
checkCudaErrors(hipMemcpy(krev, krev_Host, pickLength_Host*sizeof(int), hipMemcpyHostToDevice));
free(krev_Host);
float *wreal_Host = (float *)malloc((pickLength_Host - 1)*sizeof(float)),
*wimag_Host = (float *)malloc((pickLength_Host - 1)*sizeof(float));
int m = 1;
float wm_real, wm_imag, w_realRec, w_imagRec, *wreal_now = wreal_Host, *wimag_now = wimag_Host;
for (int s = 1; s <= lgLength_Host; ++s)
{
m *= 2;
wm_real = cos(2 * pi * 1 / m);
wm_imag = -sin(2 * pi * 1 / m);
w_realRec = 1;
w_imagRec = 0;
for (int j = 0; j < (m / 2); ++j)
{
//w = w * wm = t * wm;
*(wreal_now + j) = w_realRec;
*(wimag_now + j) = w_imagRec;
w_realRec = *(wreal_now + j)*wm_real - *(wimag_now + j)*wm_imag;
w_imagRec = *(wreal_now + j)*wm_imag + *(wimag_now + j)*wm_real;
}
wreal_now += m / 2;
wimag_now += m / 2;
}
checkCudaErrors(hipMemcpy(w_real, wreal_Host, (pickLength_Host - 1)*sizeof(float), hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(w_imag, wimag_Host, (pickLength_Host - 1)*sizeof(float), hipMemcpyHostToDevice));
free(wreal_Host);
free(wimag_Host);
// copy host data to device
checkCudaErrors(hipMemcpy(data_Dev, data, transNum_Host*datalength_Host*sizeof(float), hipMemcpyHostToDevice));
}
extern "C" void clearcudas()
{
checkCudaErrors(hipFree(paraInt_Dev));
checkCudaErrors(hipFree(paraFloat_Dev));
checkCudaErrors(hipFree(data_Dev));
checkCudaErrors(hipFree(imgRecons_Dev));
checkCudaErrors(hipFree(dataPick));
checkCudaErrors(hipFree(y_real));
checkCudaErrors(hipFree(y_imag));
checkCudaErrors(hipFree(w_real));
checkCudaErrors(hipFree(w_imag));
checkCudaErrors(hipFree(krev));
free(paraInt_Host);
free(paraFloat_Host);
}
__device__ void getEvelope(int *paraInt_Dev, float *paraFloat_Dev, int *krev, float *w_real, float *w_imag, float *x, float *y_real, float *y_imag)
{
// 2_DFT
float *px = x;
for (int k = 0; k < pickLength_Dev; ++k)
{
*(y_real + *(krev + k)) = *px;
*(y_imag + *(krev + k)) = 0;
++px;
}
int m = 1;
float t_real, t_imag, u_real, u_imag, *wreal_now = w_real, *wimag_now = w_imag;
for (int s = 1; s <= lgLength_Dev; ++s)
{
m *= 2;
for (int k = 0; k < pickLength_Dev; k += m)
{
for (int j = 0; j < (m / 2); ++j)
{
//t = w * (*(y+k+j+m/2))
t_real = *(wreal_now + j)*(*(y_real + k + j + m / 2)) - *(wimag_now + j)*(*(y_imag + k + j + m / 2));
t_imag = *(wreal_now + j)*(*(y_imag + k + j + m / 2)) + *(wimag_now + j)*(*(y_real + k + j + m / 2));
u_real = *(y_real + k + j);
u_imag = *(y_imag + k + j);
*(y_real + k + j) = u_real + t_real;
*(y_imag + k + j) = u_imag + t_imag;
*(y_real + k + j + m / 2) = u_real - t_real;
*(y_imag + k + j + m / 2) = u_imag - t_imag;
}
}
wreal_now += m / 2;
wimag_now += m / 2;
}
// HilbertTran
int count = 0;
for (count = 1; count < pickLength_Dev / 2; ++count) //pickLength must be even
{
(*(y_real + count)) *= 2;
(*(y_imag + count)) *= 2;
}
for (count += 1; count < pickLength_Dev; ++count)
{
(*(y_real + count)) *= 0;
(*(y_imag + count)) *= 0;
}
for (int k = 0; k < pickLength_Dev; ++k)
{
count = *(krev + k);
if (count == k)
{
*(y_imag + k) = -(*(y_imag + k));
}
else if (k < count)
{
t_real = *(y_real + k);
t_imag = *(y_imag + k);
*(y_real + k) = *(y_real + count);
*(y_imag + k) = -(*(y_imag + count));
*(y_real + count) = t_real;
*(y_imag + count) = -t_imag;
}
}
m = 1;
wreal_now = w_real;
wimag_now = w_imag;
for (int s = 1; s <= lgLength_Dev; ++s)
{
m *= 2;
for (int k = 0; k < pickLength_Dev; k += m)
{
for (int j = 0; j < (m / 2); ++j)
{
//t = w * (*(y+k+j+m/2))
t_real = *(wreal_now + j)*(*(y_real + k + j + m / 2)) - *(wimag_now + j)*(*(y_imag + k + j + m / 2));
t_imag = *(wreal_now + j)*(*(y_imag + k + j + m / 2)) + *(wimag_now + j)*(*(y_real + k + j + m / 2));
u_real = *(y_real + k + j);
u_imag = *(y_imag + k + j);
*(y_real + k + j) = u_real + t_real;
*(y_imag + k + j) = u_imag + t_imag;
*(y_real + k + j + m / 2) = u_real - t_real;
*(y_imag + k + j + m / 2) = u_imag - t_imag;
}
}
wreal_now += m / 2;
wimag_now += m / 2;
}
int div_len = pickLength_Dev*pickLength_Dev;
for (int i = 0; i < pickLength_Dev; ++i)
{
*(x + i) = (*(y_real + i))*(*(y_real + i)) + (*(y_imag + i))*(*(y_imag + i));
*(x + i) /= div_len;
}
}
__global__ void PArecon(int *paraInt_Dev, float *paraFloat_Dev, float *data_Dev, float *imgRecons_Dev, float *dataPick, int *krev, float *w_real, float *w_imag, float *y_real, float *y_imag, int zdepth, int zstart)
{
// access thread id
const unsigned int tidx = threadIdx.x;
// access block id
const unsigned int bidx = blockIdx.x;
if (bidx < zstart)
{
return;
}
float Distan;
float Y, Z, y;
int POINTER, pointer = pickLength_Dev*((bidx % threadNum)*numX_Dev + tidx);
float *pickBeg = dataPick + pointer;
int pick_offset = pickLength_Dev / 2;
Z = bidx * pSize_Dev;
Y = tidx * pSize_Dev;
int y_start = (int)((Y - Z*angleAperture_Dev) / pitch_Dev - 0.5);
if (y_start < 0)
{
y_start = 0;
}
int y_end = (int)((Y + Z*angleAperture_Dev) / pitch_Dev - 0.5);
if (y_end > transNum_Dev - 1)
{
y_end = transNum_Dev - 1;
}
for (int len = 0; len < pickLength_Dev; ++len)
{
*(pickBeg + len) = 0;
}
int lenMax;
for (int bidy = y_start; bidy <= y_end; ++bidy)
{
y = (bidy + 0.5) * pitch_Dev;
Distan = sqrt((Y - y)*(Y - y) + Z*Z);
POINTER = (int)((Distan / acoustVel_Dev - delayCoef_Dev)*sampFreq_Dev + 0.5) - pick_offset;
lenMax = pickLength_Dev;
if (POINTER + lenMax >= datalength_Dev){
lenMax = datalength_Dev - 1 - POINTER;
}
if (POINTER >= 0 && POINTER < datalength_Dev)
{
POINTER = POINTER + bidy*datalength_Dev;
for (int len = 0; len < lenMax; ++len)
{
*(pickBeg + len) += *(data_Dev + POINTER + len);
}
}
}
getEvelope(paraInt_Dev, paraFloat_Dev, krev, w_real, w_imag, pickBeg, y_real + pointer, y_imag + pointer);
lenMax = 0;
for (int len = 1; len < pickLength_Dev - 1; ++len)
{
if (*(pickBeg + len) > *(pickBeg + lenMax))
{
lenMax = len;
}
}
if (*(pickBeg + lenMax) > 0)
{
*(imgRecons_Dev + tidx*zdepth + bidx) = *(pickBeg + pick_offset);
for (int i = 1; i < inhibitor_Dev; ++i)
{
*(imgRecons_Dev + tidx*zdepth + bidx) *= *(pickBeg + pick_offset);
*(imgRecons_Dev + tidx*zdepth + bidx) /= *(pickBeg + lenMax);
}
}
__syncthreads();
}
__host__ void parecon(int cudadeviceindex, int zdepth, int zstart, float *imgRecons)
{
// use command-line specified CUDA device, otherwise use device with highest Gflops/s
checkCudaErrors(hipSetDevice(cudadeviceindex));
// setup execution parameters
dim3 grids(numZ_Host, 1, 1);
dim3 threads(numX_Host, 1, 1);
// execute the kernel
PArecon << < grids, threads >> >(paraInt_Dev, paraFloat_Dev, data_Dev, imgRecons_Dev, dataPick, krev, w_real, w_imag, y_real, y_imag, zdepth, zstart);
// check if kernel execution generated and error
getLastCudaError("Kernel execution failed");
// copy result from device to host
checkCudaErrors(hipMemcpy(imgRecons, imgRecons_Dev, numX_Host*zdepth*sizeof(float), hipMemcpyDeviceToHost));
// check if kernel execution generated and error
getLastCudaError("Kernel execution failed");
}
void MultiDASEnv(int *paraInt, float *paraFloat, float *data, float *imgRecons, int MAXZ_host, int MAXX_host)
{
int devID = 0;
printf_s("Initializing...\n");
initcudas(paraInt, paraFloat, data, MAXZ_host, MAXX_host);
printf_s("Reconstructing...\n");
parecon(devID, MAXZ_host, 0, imgRecons);
printf_s("Clearing...\n");
clearcudas();
}
int main()
{
using namespace std;
int *paraInt = new int[4];
paraInt[0] = 128;
paraInt[1] = 3;
paraInt[2] = 4;
paraInt[3] = 1024;
float *paraFloat = new float[5];
paraFloat[0] = 0;
paraFloat[1] = 1.54;
paraFloat[2] = 0.3;
paraFloat[3] = 0.1;
paraFloat[4] = 40;
int MAXX_host = 384,
MAXZ_host = (int)(paraInt[3] * paraFloat[1] / paraFloat[4] / paraFloat[3]);
ifstream fin("C:\\Users\\MX\\Documents\\research\\PA Reconstructions\\GPUProg\\template\\signal\\Rf_032918_113516_OBP_PA_64_15331342.txt");
float *data = (float *)malloc(paraInt[0] * paraInt[3] * sizeof(float));
for (int i = 0; i < paraInt[0] * paraInt[3]; ++i)
{
fin >> *(data + i);
}
fin.close();
// paraInt : 0-transElement, 1-inhibitor, 2-lgLength, 3-dataLength
// paraFloat : 0-delayCoef, 1-acoustVel, 2-pitch, 3-pixelSize, 4-samFreq
printf_s("PA reconstructing...\n");
float *imgRecons = (float *)malloc(MAXX_host*MAXZ_host*sizeof(float));
MultiDASEnv(paraInt, paraFloat, data, imgRecons, MAXZ_host, MAXX_host);
ofstream fout("C:\\Users\\MX\\Documents\\research\\PA Reconstructions\\GPUProg\\template\\recons\\fig_recons.txt");
for (int i = 0; i < MAXZ_host*MAXX_host; ++i)
{
fout << *(imgRecons + i);
fout << " ";
}
fout.close();
free(data);
}
| df133cb9c40e0159e4629ee285631162be29f1e0.cu | // includes, system
#include <iostream>
#include <fstream>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
// includes CUDA
#include <cuda_runtime.h>
// includes, project
#include <device_functions.h>
#include <device_launch_parameters.h>
#include <helper_cuda.h>
#include <helper_functions.h> // helper functions for SDK examples
//define parameter
#define pi 3.1415
#define MAXCUDADEVICES 1
#define threadNum 246
#define transNum_Host *paraInt_Host
#define inhibitor_Host *(paraInt_Host + 1)
#define lgLength_Host *(paraInt_Host + 2)
#define datalength_Host *(paraInt_Host + 3)
#define pickLength_Host *(paraInt_Host + 4)
#define numZ_Host *(paraInt_Host + 5)
#define numX_Host *(paraInt_Host + 6)
#define delayCoef_Host *paraFloat_Host
#define acoustVel_Host *(paraFloat_Host + 1)
#define pitch_Host *(paraFloat_Host + 2)
#define pSize_Host *(paraFloat_Host + 3)
#define sampFreq_Host *(paraFloat_Host + 4)
#define angleAperture_Host *(paraFloat_Host + 5)
#define transNum_Dev *paraInt_Dev
#define inhibitor_Dev *(paraInt_Dev + 1)
#define lgLength_Dev *(paraInt_Dev + 2)
#define datalength_Dev *(paraInt_Dev + 3)
#define pickLength_Dev *(paraInt_Dev + 4)
#define numZ_Dev *(paraInt_Dev + 5)
#define numX_Dev *(paraInt_Dev + 6)
#define delayCoef_Dev *paraFloat_Dev
#define acoustVel_Dev *(paraFloat_Dev + 1)
#define pitch_Dev *(paraFloat_Dev + 2)
#define pSize_Dev *(paraFloat_Dev + 3)
#define sampFreq_Dev *(paraFloat_Dev + 4)
#define angleAperture_Dev *(paraFloat_Dev + 5)
int *paraInt_Host, *paraInt_Dev, *krev;
float *paraFloat_Host, *paraFloat_Dev, *w_real, *w_imag, *data_Dev, *imgRecons_Dev, *dataPick, *y_real, *y_imag;
// paraInt : 0-transElement, 1-inhibitor, 2-lgLength, 3-dataLength
// paraInt_Host/_Dev : 0-transElement, 1-inhibitor, 2-lgLength, 3-dataLength, 4-pickLength, 5-numZ, 6-numX
// paraFloat : 0-delayCoef, 1-acoustVel, 2-pitch, 3-pixelSize, 4-samFreq
// paraFloat_Host/_Dev : 0-delayCoef, 1-acoustVel, 2-pitch, 3-pixelSize, 4-samFreq, 5-angleAperture
extern "C" void initcudas(int *paraInt, float *paraFloat, float *data, int MAXZ_host, int MAXX_host)
{
paraInt_Host = (int *)malloc(7 * sizeof(int));
memcpy(paraInt_Host, paraInt, 4 * sizeof(int));
*(paraInt_Host + 4) = 1;
*(paraInt_Host + 4) <<= *(paraInt + 2);
*(paraInt_Host + 5) = MAXZ_host;
*(paraInt_Host + 6) = MAXX_host;
paraFloat_Host = (float *)malloc(6 * sizeof(float));
memcpy(paraFloat_Host, paraFloat, 5 * sizeof(float));
*(paraFloat_Host + 5) = 0.5;
// imgSize_Dev
checkCudaErrors(cudaSetDevice(0));
checkCudaErrors(cudaMalloc((void**)&(paraInt_Dev), 7 * sizeof(int)));
checkCudaErrors(cudaMalloc((void**)&(paraFloat_Dev), 6 * sizeof(float)));
checkCudaErrors(cudaMemcpy(paraInt_Dev, paraInt_Host, 7 * sizeof(int), cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(paraFloat_Dev, paraFloat_Host, 6 * sizeof(float), cudaMemcpyHostToDevice));
printf_s("Device parameter setting...\n");
// float*_Dev malloc
checkCudaErrors(cudaMalloc((void**)&(data_Dev), transNum_Host*datalength_Host*sizeof(float)));
checkCudaErrors(cudaMalloc((void**)&(imgRecons_Dev), numZ_Host*numX_Host*sizeof(float)));
checkCudaErrors(cudaMalloc((void**)&(dataPick), pickLength_Host*threadNum*numX_Host*sizeof(float)));
checkCudaErrors(cudaMalloc((void**)&(y_real), pickLength_Host*threadNum*numX_Host*sizeof(float)));
checkCudaErrors(cudaMalloc((void**)&(y_imag), pickLength_Host*threadNum*numX_Host*sizeof(float)));
checkCudaErrors(cudaMalloc((void**)&(w_real), (pickLength_Host - 1)*sizeof(float)));
checkCudaErrors(cudaMalloc((void**)&(w_imag), (pickLength_Host - 1)*sizeof(float)));
// int*_Dev malloc
checkCudaErrors(cudaMalloc((void**)&(krev), pickLength_Host*sizeof(int)));
//calculate parameter of fft
int *krev_Host = (int *)malloc(pickLength_Host*sizeof(int));
for (int k = 0; k < pickLength_Host; ++k)
{
int r = k;
*(krev_Host + k) = (r & 0x1);
for (int j = 1; j < lgLength_Host; ++j)
{
*(krev_Host + k) = (*(krev_Host + k)) << 1;
r = r >> 1;
if (r & 0x1) ++(*(krev_Host + k));
}
}
checkCudaErrors(cudaMemcpy(krev, krev_Host, pickLength_Host*sizeof(int), cudaMemcpyHostToDevice));
free(krev_Host);
float *wreal_Host = (float *)malloc((pickLength_Host - 1)*sizeof(float)),
*wimag_Host = (float *)malloc((pickLength_Host - 1)*sizeof(float));
int m = 1;
float wm_real, wm_imag, w_realRec, w_imagRec, *wreal_now = wreal_Host, *wimag_now = wimag_Host;
for (int s = 1; s <= lgLength_Host; ++s)
{
m *= 2;
wm_real = cos(2 * pi * 1 / m);
wm_imag = -sin(2 * pi * 1 / m);
w_realRec = 1;
w_imagRec = 0;
for (int j = 0; j < (m / 2); ++j)
{
//w = w * wm = t * wm;
*(wreal_now + j) = w_realRec;
*(wimag_now + j) = w_imagRec;
w_realRec = *(wreal_now + j)*wm_real - *(wimag_now + j)*wm_imag;
w_imagRec = *(wreal_now + j)*wm_imag + *(wimag_now + j)*wm_real;
}
wreal_now += m / 2;
wimag_now += m / 2;
}
checkCudaErrors(cudaMemcpy(w_real, wreal_Host, (pickLength_Host - 1)*sizeof(float), cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(w_imag, wimag_Host, (pickLength_Host - 1)*sizeof(float), cudaMemcpyHostToDevice));
free(wreal_Host);
free(wimag_Host);
// copy host data to device
checkCudaErrors(cudaMemcpy(data_Dev, data, transNum_Host*datalength_Host*sizeof(float), cudaMemcpyHostToDevice));
}
extern "C" void clearcudas()
{
checkCudaErrors(cudaFree(paraInt_Dev));
checkCudaErrors(cudaFree(paraFloat_Dev));
checkCudaErrors(cudaFree(data_Dev));
checkCudaErrors(cudaFree(imgRecons_Dev));
checkCudaErrors(cudaFree(dataPick));
checkCudaErrors(cudaFree(y_real));
checkCudaErrors(cudaFree(y_imag));
checkCudaErrors(cudaFree(w_real));
checkCudaErrors(cudaFree(w_imag));
checkCudaErrors(cudaFree(krev));
free(paraInt_Host);
free(paraFloat_Host);
}
__device__ void getEvelope(int *paraInt_Dev, float *paraFloat_Dev, int *krev, float *w_real, float *w_imag, float *x, float *y_real, float *y_imag)
{
// 2_DFT
float *px = x;
for (int k = 0; k < pickLength_Dev; ++k)
{
*(y_real + *(krev + k)) = *px;
*(y_imag + *(krev + k)) = 0;
++px;
}
int m = 1;
float t_real, t_imag, u_real, u_imag, *wreal_now = w_real, *wimag_now = w_imag;
for (int s = 1; s <= lgLength_Dev; ++s)
{
m *= 2;
for (int k = 0; k < pickLength_Dev; k += m)
{
for (int j = 0; j < (m / 2); ++j)
{
//t = w * (*(y+k+j+m/2))
t_real = *(wreal_now + j)*(*(y_real + k + j + m / 2)) - *(wimag_now + j)*(*(y_imag + k + j + m / 2));
t_imag = *(wreal_now + j)*(*(y_imag + k + j + m / 2)) + *(wimag_now + j)*(*(y_real + k + j + m / 2));
u_real = *(y_real + k + j);
u_imag = *(y_imag + k + j);
*(y_real + k + j) = u_real + t_real;
*(y_imag + k + j) = u_imag + t_imag;
*(y_real + k + j + m / 2) = u_real - t_real;
*(y_imag + k + j + m / 2) = u_imag - t_imag;
}
}
wreal_now += m / 2;
wimag_now += m / 2;
}
// HilbertTran
int count = 0;
for (count = 1; count < pickLength_Dev / 2; ++count) //pickLength must be even
{
(*(y_real + count)) *= 2;
(*(y_imag + count)) *= 2;
}
for (count += 1; count < pickLength_Dev; ++count)
{
(*(y_real + count)) *= 0;
(*(y_imag + count)) *= 0;
}
for (int k = 0; k < pickLength_Dev; ++k)
{
count = *(krev + k);
if (count == k)
{
*(y_imag + k) = -(*(y_imag + k));
}
else if (k < count)
{
t_real = *(y_real + k);
t_imag = *(y_imag + k);
*(y_real + k) = *(y_real + count);
*(y_imag + k) = -(*(y_imag + count));
*(y_real + count) = t_real;
*(y_imag + count) = -t_imag;
}
}
m = 1;
wreal_now = w_real;
wimag_now = w_imag;
for (int s = 1; s <= lgLength_Dev; ++s)
{
m *= 2;
for (int k = 0; k < pickLength_Dev; k += m)
{
for (int j = 0; j < (m / 2); ++j)
{
//t = w * (*(y+k+j+m/2))
t_real = *(wreal_now + j)*(*(y_real + k + j + m / 2)) - *(wimag_now + j)*(*(y_imag + k + j + m / 2));
t_imag = *(wreal_now + j)*(*(y_imag + k + j + m / 2)) + *(wimag_now + j)*(*(y_real + k + j + m / 2));
u_real = *(y_real + k + j);
u_imag = *(y_imag + k + j);
*(y_real + k + j) = u_real + t_real;
*(y_imag + k + j) = u_imag + t_imag;
*(y_real + k + j + m / 2) = u_real - t_real;
*(y_imag + k + j + m / 2) = u_imag - t_imag;
}
}
wreal_now += m / 2;
wimag_now += m / 2;
}
int div_len = pickLength_Dev*pickLength_Dev;
for (int i = 0; i < pickLength_Dev; ++i)
{
*(x + i) = (*(y_real + i))*(*(y_real + i)) + (*(y_imag + i))*(*(y_imag + i));
*(x + i) /= div_len;
}
}
__global__ void PArecon(int *paraInt_Dev, float *paraFloat_Dev, float *data_Dev, float *imgRecons_Dev, float *dataPick, int *krev, float *w_real, float *w_imag, float *y_real, float *y_imag, int zdepth, int zstart)
{
// access thread id
const unsigned int tidx = threadIdx.x;
// access block id
const unsigned int bidx = blockIdx.x;
if (bidx < zstart)
{
return;
}
float Distan;
float Y, Z, y;
int POINTER, pointer = pickLength_Dev*((bidx % threadNum)*numX_Dev + tidx);
float *pickBeg = dataPick + pointer;
int pick_offset = pickLength_Dev / 2;
Z = bidx * pSize_Dev;
Y = tidx * pSize_Dev;
int y_start = (int)((Y - Z*angleAperture_Dev) / pitch_Dev - 0.5);
if (y_start < 0)
{
y_start = 0;
}
int y_end = (int)((Y + Z*angleAperture_Dev) / pitch_Dev - 0.5);
if (y_end > transNum_Dev - 1)
{
y_end = transNum_Dev - 1;
}
for (int len = 0; len < pickLength_Dev; ++len)
{
*(pickBeg + len) = 0;
}
int lenMax;
for (int bidy = y_start; bidy <= y_end; ++bidy)
{
y = (bidy + 0.5) * pitch_Dev;
Distan = sqrt((Y - y)*(Y - y) + Z*Z);
POINTER = (int)((Distan / acoustVel_Dev - delayCoef_Dev)*sampFreq_Dev + 0.5) - pick_offset;
lenMax = pickLength_Dev;
if (POINTER + lenMax >= datalength_Dev){
lenMax = datalength_Dev - 1 - POINTER;
}
if (POINTER >= 0 && POINTER < datalength_Dev)
{
POINTER = POINTER + bidy*datalength_Dev;
for (int len = 0; len < lenMax; ++len)
{
*(pickBeg + len) += *(data_Dev + POINTER + len);
}
}
}
getEvelope(paraInt_Dev, paraFloat_Dev, krev, w_real, w_imag, pickBeg, y_real + pointer, y_imag + pointer);
lenMax = 0;
for (int len = 1; len < pickLength_Dev - 1; ++len)
{
if (*(pickBeg + len) > *(pickBeg + lenMax))
{
lenMax = len;
}
}
if (*(pickBeg + lenMax) > 0)
{
*(imgRecons_Dev + tidx*zdepth + bidx) = *(pickBeg + pick_offset);
for (int i = 1; i < inhibitor_Dev; ++i)
{
*(imgRecons_Dev + tidx*zdepth + bidx) *= *(pickBeg + pick_offset);
*(imgRecons_Dev + tidx*zdepth + bidx) /= *(pickBeg + lenMax);
}
}
__syncthreads();
}
__host__ void parecon(int cudadeviceindex, int zdepth, int zstart, float *imgRecons)
{
// use command-line specified CUDA device, otherwise use device with highest Gflops/s
checkCudaErrors(cudaSetDevice(cudadeviceindex));
// setup execution parameters
dim3 grids(numZ_Host, 1, 1);
dim3 threads(numX_Host, 1, 1);
// execute the kernel
PArecon << < grids, threads >> >(paraInt_Dev, paraFloat_Dev, data_Dev, imgRecons_Dev, dataPick, krev, w_real, w_imag, y_real, y_imag, zdepth, zstart);
// check if kernel execution generated and error
getLastCudaError("Kernel execution failed");
// copy result from device to host
checkCudaErrors(cudaMemcpy(imgRecons, imgRecons_Dev, numX_Host*zdepth*sizeof(float), cudaMemcpyDeviceToHost));
// check if kernel execution generated and error
getLastCudaError("Kernel execution failed");
}
void MultiDASEnv(int *paraInt, float *paraFloat, float *data, float *imgRecons, int MAXZ_host, int MAXX_host)
{
int devID = 0;
printf_s("Initializing...\n");
initcudas(paraInt, paraFloat, data, MAXZ_host, MAXX_host);
printf_s("Reconstructing...\n");
parecon(devID, MAXZ_host, 0, imgRecons);
printf_s("Clearing...\n");
clearcudas();
}
int main()
{
using namespace std;
int *paraInt = new int[4];
paraInt[0] = 128;
paraInt[1] = 3;
paraInt[2] = 4;
paraInt[3] = 1024;
float *paraFloat = new float[5];
paraFloat[0] = 0;
paraFloat[1] = 1.54;
paraFloat[2] = 0.3;
paraFloat[3] = 0.1;
paraFloat[4] = 40;
int MAXX_host = 384,
MAXZ_host = (int)(paraInt[3] * paraFloat[1] / paraFloat[4] / paraFloat[3]);
ifstream fin("C:\\Users\\MX\\Documents\\research\\PA Reconstructions\\GPUProg\\template\\signal\\Rf_032918_113516_OBP_PA_64_15331342.txt");
float *data = (float *)malloc(paraInt[0] * paraInt[3] * sizeof(float));
for (int i = 0; i < paraInt[0] * paraInt[3]; ++i)
{
fin >> *(data + i);
}
fin.close();
// paraInt : 0-transElement, 1-inhibitor, 2-lgLength, 3-dataLength
// paraFloat : 0-delayCoef, 1-acoustVel, 2-pitch, 3-pixelSize, 4-samFreq
printf_s("PA reconstructing...\n");
float *imgRecons = (float *)malloc(MAXX_host*MAXZ_host*sizeof(float));
MultiDASEnv(paraInt, paraFloat, data, imgRecons, MAXZ_host, MAXX_host);
ofstream fout("C:\\Users\\MX\\Documents\\research\\PA Reconstructions\\GPUProg\\template\\recons\\fig_recons.txt");
for (int i = 0; i < MAXZ_host*MAXX_host; ++i)
{
fout << *(imgRecons + i);
fout << " ";
}
fout.close();
free(data);
}
|
c34df5dfbda414c05600360c8b6576e32381c384.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <math_functions.h> // CUDA's, not caffe's, for fabs, signbit
#include <thrust/device_vector.h>
#include <thrust/functional.h> // thrust::plus
#include <thrust/reduce.h>
#include <cmath>
#include <cstdlib>
#include <cstring>
#include "caffe/common.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <>
void caffe_gpu_gemm<float>(const CBLAS_TRANSPOSE TransA,
const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K,
const float alpha, const float* A, const float* B, const float beta,
float* C) {
// Note that cublas follows fortran order.
int lda = (TransA == CblasNoTrans) ? K : M;
int ldb = (TransB == CblasNoTrans) ? N : K;
hipblasOperation_t cuTransA =
(TransA == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
hipblasOperation_t cuTransB =
(TransB == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
CUBLAS_CHECK(hipblasSgemm(Caffe::cublas_handle(), cuTransB, cuTransA,
N, M, K, &alpha, B, ldb, A, lda, &beta, C, N));
}
template <>
void caffe_gpu_gemm<double>(const CBLAS_TRANSPOSE TransA,
const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K,
const double alpha, const double* A, const double* B, const double beta,
double* C) {
// Note that cublas follows fortran order.
int lda = (TransA == CblasNoTrans) ? K : M;
int ldb = (TransB == CblasNoTrans) ? N : K;
hipblasOperation_t cuTransA =
(TransA == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
hipblasOperation_t cuTransB =
(TransB == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
CUBLAS_CHECK(hipblasDgemm(Caffe::cublas_handle(), cuTransB, cuTransA,
N, M, K, &alpha, B, ldb, A, lda, &beta, C, N));
}
template <>
void caffe_gpu_gemv<float>(const CBLAS_TRANSPOSE TransA, const int M,
const int N, const float alpha, const float* A, const float* x,
const float beta, float* y) {
hipblasOperation_t cuTransA =
(TransA == CblasNoTrans) ? HIPBLAS_OP_T : HIPBLAS_OP_N;
CUBLAS_CHECK(hipblasSgemv(Caffe::cublas_handle(), cuTransA, N, M, &alpha,
A, N, x, 1, &beta, y, 1));
}
template <>
void caffe_gpu_gemv<double>(const CBLAS_TRANSPOSE TransA, const int M,
const int N, const double alpha, const double* A, const double* x,
const double beta, double* y) {
hipblasOperation_t cuTransA =
(TransA == CblasNoTrans) ? HIPBLAS_OP_T : HIPBLAS_OP_N;
CUBLAS_CHECK(hipblasDgemv(Caffe::cublas_handle(), cuTransA, N, M, &alpha,
A, N, x, 1, &beta, y, 1));
}
template <>
void caffe_gpu_axpy<float>(const int N, const float alpha, const float* X,
float* Y) {
CUBLAS_CHECK(hipblasSaxpy(Caffe::cublas_handle(), N, &alpha, X, 1, Y, 1));
}
template <>
void caffe_gpu_axpy<double>(const int N, const double alpha, const double* X,
double* Y) {
CUBLAS_CHECK(hipblasDaxpy(Caffe::cublas_handle(), N, &alpha, X, 1, Y, 1));
}
void caffe_gpu_memcpy(const size_t N, const void* X, void* Y) {
if (X != Y) {
CUDA_CHECK(hipMemcpy(Y, X, N, hipMemcpyDefault)); // NOLINT(caffe/alt_fn)
}
}
template <>
void caffe_gpu_scal<float>(const int N, const float alpha, float *X) {
CUBLAS_CHECK(hipblasSscal(Caffe::cublas_handle(), N, &alpha, X, 1));
}
template <>
void caffe_gpu_scal<double>(const int N, const double alpha, double *X) {
CUBLAS_CHECK(hipblasDscal(Caffe::cublas_handle(), N, &alpha, X, 1));
}
template <>
void caffe_gpu_axpby<float>(const int N, const float alpha, const float* X,
const float beta, float* Y) {
caffe_gpu_scal<float>(N, beta, Y);
caffe_gpu_axpy<float>(N, alpha, X, Y);
}
template <>
void caffe_gpu_axpby<double>(const int N, const double alpha, const double* X,
const double beta, double* Y) {
caffe_gpu_scal<double>(N, beta, Y);
caffe_gpu_axpy<double>(N, alpha, X, Y);
}
template <>
void caffe_gpu_dot<float>(const int n, const float* x, const float* y,
float* out) {
CUBLAS_CHECK(hipblasSdot(Caffe::cublas_handle(), n, x, 1, y, 1, out));
}
template <>
void caffe_gpu_dot<double>(const int n, const double* x, const double* y,
double * out) {
CUBLAS_CHECK(hipblasDdot(Caffe::cublas_handle(), n, x, 1, y, 1, out));
}
template <>
void caffe_gpu_asum<float>(const int n, const float* x, float* y) {
CUBLAS_CHECK(hipblasSasum(Caffe::cublas_handle(), n, x, 1, y));
}
template <>
void caffe_gpu_asum<double>(const int n, const double* x, double* y) {
CUBLAS_CHECK(hipblasDasum(Caffe::cublas_handle(), n, x, 1, y));
}
template <>
void caffe_gpu_scale<float>(const int n, const float alpha, const float *x,
float* y) {
CUBLAS_CHECK(hipblasScopy(Caffe::cublas_handle(), n, x, 1, y, 1));
CUBLAS_CHECK(hipblasSscal(Caffe::cublas_handle(), n, &alpha, y, 1));
}
template <>
void caffe_gpu_scale<double>(const int n, const double alpha, const double *x,
double* y) {
CUBLAS_CHECK(hipblasDcopy(Caffe::cublas_handle(), n, x, 1, y, 1));
CUBLAS_CHECK(hipblasDscal(Caffe::cublas_handle(), n, &alpha, y, 1));
}
template <typename Dtype>
__global__ void set_kernel(const int n, const Dtype alpha, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = alpha;
}
}
template <typename Dtype>
void caffe_gpu_set(const int N, const Dtype alpha, Dtype* Y) {
if (alpha == 0) {
CUDA_CHECK(hipMemset(Y, 0, sizeof(Dtype) * N)); // NOLINT(caffe/alt_fn)
return;
}
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( set_kernel<Dtype>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, alpha, Y);
}
template void caffe_gpu_set<int>(const int N, const int alpha, int* Y);
template void caffe_gpu_set<float>(const int N, const float alpha, float* Y);
template void caffe_gpu_set<double>(const int N, const double alpha, double* Y);
template <typename Dtype>
__global__ void add_scalar_kernel(const int n, const Dtype alpha, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] += alpha;
}
}
template <>
void caffe_gpu_add_scalar(const int N, const float alpha, float* Y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( add_scalar_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, alpha, Y);
}
template <>
void caffe_gpu_add_scalar(const int N, const double alpha, double* Y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( add_scalar_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, alpha, Y);
}
template <typename Dtype>
__global__ void add_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] + b[index];
}
}
template <typename Dtype>
__global__ void add_kernel1(const int n, const Dtype* a,
const Dtype* b, const Dtype lambda1, const Dtype lambda2, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index]*lambda1 + b[index]*lambda2;
}
}
template <>
void caffe_gpu_add<float>(const int N, const float* a, const float* b,
float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( add_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <>
void caffe_gpu_add<double>(const int N, const double* a, const double* b,
double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( add_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <>
void caffe_gpu_add1<float>(const int N, const float* a, const float* b, const float lambda1, const float lambda2,
float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( add_kernel1<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b,lambda1,lambda2, y);
}
template <>
void caffe_gpu_add1<double>(const int N, const double* a, const double* b, const double lambda1, const double lambda2,
double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( add_kernel1<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b,lambda1,lambda2, y);
}
template <typename Dtype>
__global__ void sub_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] - b[index];
}
}
template <>
void caffe_gpu_sub<float>(const int N, const float* a, const float* b,
float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( sub_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <>
void caffe_gpu_sub<double>(const int N, const double* a, const double* b,
double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( sub_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <typename Dtype>
__global__ void mul_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] * b[index];
}
}
template <typename Dtype>
__global__ void my_mul_kernel(const int n, const Dtype* a,
const unsigned int* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] * b[index];
}
}
template <typename Dtype>
__global__ void dtpooling_kernel(const int n, const Dtype* col_buff,
Dtype* weight, Dtype* x, Dtype* x1, Dtype* y, Dtype* y1 , Dtype* output) {
CUDA_KERNEL_LOOP(index, n) {
// index
int width=46*46;
const int c = index/width;
const int h = (index - c * width);//index
const int w = index % width; //index
output[index]=weight[0]*x[c]+weight[1]*x1[c]+weight[2]*y[c]+weight[3]*y1[c]+col_buff[index];
// output[index]=col_buff[index];
}
}
template <typename Dtype>
__global__ void obtain_col_kernel(const int n, const Dtype* template1,
int height, int width,int col_index ,Dtype* output) {
CUDA_KERNEL_LOOP(index, n) {
// index
const int h = index/width;
const int w = (index - h * width);//index
if(w==col_index)
{
output[h]=template1[index];
}
}
}
template <typename Dtype>
__global__ void refinenet_kernel(const int n, Dtype* col_buff,
Dtype* weight, Dtype* x, Dtype* x1, Dtype* y, Dtype* y1 , int width, Dtype* output) {
CUDA_KERNEL_LOOP(index, n) {
// index
const int c = index/width;
const int h = (index - c * width);//index
const int w = index % width; //index
output[index]=weight[0]*x[c]+weight[1]*x1[c]+weight[2]*y[c]+weight[3]*y1[c]+col_buff[index];
}
}
template <typename Dtype>
__global__ void place_col_kernel(const int n, Dtype* col_buff,
int layer_index,int height, int width, Dtype* output) {
CUDA_KERNEL_LOOP(index, n) {
// indexindex,layer_index
int current_index=index*width+layer_index;
output[current_index]=col_buff[index];
}
}
template <>
void caffe_gpu_placecol<float>(const int N, float* col_buff, int layer_index,int height, int width, float* output) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( place_col_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, col_buff, layer_index, height, width, output);
}
template <>
void caffe_gpu_placecol<double>(const int N, double* col_buff, int layer_index,int height, int width, double* output) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( place_col_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, col_buff, layer_index, height, width, output);
}
template <>
void caffe_gpu_refinenet<float>(const int N, float* col_buff, float* weight,float* x, float* x1, float* y, float* y1, int width, float* output) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( refinenet_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, col_buff, weight, x, x1, y, y1, width, output);
}
template <>
void caffe_gpu_refinenet<double>(const int N, double* col_buff, double* weight,double* x, double* x1, double* y, double* y1, int width, double* output) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( refinenet_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, col_buff, weight, x, x1, y, y1, width, output);
}
template <>
void caffe_gpu_obtain_col<float>(const int N, const float* template1, int height, int width, int col_index, float* output) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( obtain_col_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, template1, height, width, col_index, output);
}
template <>
void caffe_gpu_obtain_col<double>(const int N, const double* template1, int height, int width, int col_index, double* output) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( obtain_col_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, template1, height, width, col_index, output);
}
template <>
void caffe_gpu_dtpooling<float>(const int N, const float* col_buff, float* weight, float* x,
float* x1, float* y, float* y1, float* output) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( dtpooling_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, col_buff, weight, x, x1, y, y1,output);
}
template <>
void caffe_gpu_dtpooling<double>(const int N, const double* col_buff, double* weight, double* x,
double* x1, double* y, double* y1, double* output) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( dtpooling_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, col_buff, weight, x, x1, y, y1,output);
}
template <>
void caffe_gpu_mul<float>(const int N, const float* a,
const float* b, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( mul_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <>
void caffe_gpu_mul<double>(const int N, const double* a,
const double* b, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( mul_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <>
void caffe_gpu_mul1<float>(const int N, const float* a,
const unsigned int* b, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( my_mul_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <>
void caffe_gpu_mul1<double>(const int N, const double* a,
const unsigned int* b, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( my_mul_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <typename Dtype>
__global__ void div_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] / b[index];
}
}
template <>
void caffe_gpu_div<float>(const int N, const float* a,
const float* b, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( div_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <>
void caffe_gpu_div<double>(const int N, const double* a,
const double* b, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( div_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <typename Dtype>
__global__ void abs_kernel(const int n, const Dtype* a, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = abs(a[index]);
}
}
template <>
void caffe_gpu_abs<float>(const int N, const float* a, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( abs_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, y);
}
template <>
void caffe_gpu_abs<double>(const int N, const double* a, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( abs_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, y);
}
template <typename Dtype>
__global__ void exp_kernel(const int n, const Dtype* a, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = exp(a[index]);
}
}
template <>
void caffe_gpu_exp<float>(const int N, const float* a, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( exp_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, y);
}
template <>
void caffe_gpu_exp<double>(const int N, const double* a, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( exp_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, y);
}
template <typename Dtype>
__global__ void log_kernel(const int n, const Dtype* a, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = log(a[index]);
}
}
template <>
void caffe_gpu_log<float>(const int N, const float* a, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( log_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, y);
}
template <>
void caffe_gpu_log<double>(const int N, const double* a, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( log_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, y);
}
template <typename Dtype>
__global__ void powx_kernel(const int n, const Dtype* a,
const Dtype alpha, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = pow(a[index], alpha);
}
}
template <>
void caffe_gpu_powx<float>(const int N, const float* a,
const float alpha, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( powx_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, alpha, y);
}
template <>
void caffe_gpu_powx<double>(const int N, const double* a,
const double alpha, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( powx_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, alpha, y);
}
DEFINE_AND_INSTANTIATE_GPU_UNARY_FUNC(sign, y[index] = (Dtype(0) < x[index])
- (x[index] < Dtype(0)));
DEFINE_AND_INSTANTIATE_GPU_UNARY_FUNC(sgnbit, y[index] = signbit(x[index]));
__global__ void popc_kernel(const int n, const float* a,
const float* b, uint8_t* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = __popc(static_cast<uint32_t>(a[index]) ^
static_cast<uint32_t>(b[index]));
}
}
__global__ void popcll_kernel(const int n, const double* a,
const double* b, uint8_t* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = __popcll(static_cast<uint64_t>(a[index]) ^
static_cast<uint64_t>(b[index]));
}
}
template <>
uint32_t caffe_gpu_hamming_distance<float>(const int n, const float* x,
const float* y) {
// TODO: Fix caffe_gpu_hamming_distance (see failing unit test
// TestHammingDistanceGPU in test_math_functions.cpp).
NOT_IMPLEMENTED;
thrust::device_vector<uint8_t> popcounts(n);
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( popc_kernel), dim3(CAFFE_GET_BLOCKS(n)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
n, x, y, thrust::raw_pointer_cast(popcounts.data()));
return thrust::reduce(popcounts.begin(), popcounts.end(),
(uint32_t) 0, thrust::plus<uint32_t>());
}
template <>
uint32_t caffe_gpu_hamming_distance<double>(const int n, const double* x,
const double* y) {
// TODO: Fix caffe_gpu_hamming_distance (see failing unit test
// TestHammingDistanceGPU in test_math_functions.cpp).
NOT_IMPLEMENTED;
thrust::device_vector<uint8_t> popcounts(n);
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( popcll_kernel), dim3(CAFFE_GET_BLOCKS(n)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
n, x, y, thrust::raw_pointer_cast(popcounts.data()));
return thrust::reduce(popcounts.begin(), popcounts.end(),
/* NOLINT_NEXT_LINE(build/include_what_you_use) */
(uint32_t) 0, thrust::plus<uint32_t>());
}
void caffe_gpu_rng_uniform(const int n, unsigned int* r) {
CURAND_CHECK(hiprandGenerate(Caffe::curand_generator(), r, n));
}
template <>
void caffe_gpu_rng_uniform<float>(const int n, const float a, const float b,
float* r) {
CURAND_CHECK(hiprandGenerateUniform(Caffe::curand_generator(), r, n));
const float range = b - a;
if (range != static_cast<float>(1)) {
caffe_gpu_scal(n, range, r);
}
if (a != static_cast<float>(0)) {
caffe_gpu_add_scalar(n, a, r);
}
}
template <>
void caffe_gpu_rng_uniform<double>(const int n, const double a, const double b,
double* r) {
CURAND_CHECK(hiprandGenerateUniformDouble(Caffe::curand_generator(), r, n));
const double range = b - a;
if (range != static_cast<double>(1)) {
caffe_gpu_scal(n, range, r);
}
if (a != static_cast<double>(0)) {
caffe_gpu_add_scalar(n, a, r);
}
}
template <>
void caffe_gpu_rng_gaussian(const int n, const float mu, const float sigma,
float* r) {
CURAND_CHECK(
hiprandGenerateNormal(Caffe::curand_generator(), r, n, mu, sigma));
}
template <>
void caffe_gpu_rng_gaussian(const int n, const double mu, const double sigma,
double* r) {
CURAND_CHECK(
hiprandGenerateNormalDouble(Caffe::curand_generator(), r, n, mu, sigma));
}
} // namespace caffe
| c34df5dfbda414c05600360c8b6576e32381c384.cu | #include <math_functions.h> // CUDA's, not caffe's, for fabs, signbit
#include <thrust/device_vector.h>
#include <thrust/functional.h> // thrust::plus
#include <thrust/reduce.h>
#include <cmath>
#include <cstdlib>
#include <cstring>
#include "caffe/common.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <>
void caffe_gpu_gemm<float>(const CBLAS_TRANSPOSE TransA,
const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K,
const float alpha, const float* A, const float* B, const float beta,
float* C) {
// Note that cublas follows fortran order.
int lda = (TransA == CblasNoTrans) ? K : M;
int ldb = (TransB == CblasNoTrans) ? N : K;
cublasOperation_t cuTransA =
(TransA == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
cublasOperation_t cuTransB =
(TransB == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
CUBLAS_CHECK(cublasSgemm(Caffe::cublas_handle(), cuTransB, cuTransA,
N, M, K, &alpha, B, ldb, A, lda, &beta, C, N));
}
template <>
void caffe_gpu_gemm<double>(const CBLAS_TRANSPOSE TransA,
const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K,
const double alpha, const double* A, const double* B, const double beta,
double* C) {
// Note that cublas follows fortran order.
int lda = (TransA == CblasNoTrans) ? K : M;
int ldb = (TransB == CblasNoTrans) ? N : K;
cublasOperation_t cuTransA =
(TransA == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
cublasOperation_t cuTransB =
(TransB == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
CUBLAS_CHECK(cublasDgemm(Caffe::cublas_handle(), cuTransB, cuTransA,
N, M, K, &alpha, B, ldb, A, lda, &beta, C, N));
}
template <>
void caffe_gpu_gemv<float>(const CBLAS_TRANSPOSE TransA, const int M,
const int N, const float alpha, const float* A, const float* x,
const float beta, float* y) {
cublasOperation_t cuTransA =
(TransA == CblasNoTrans) ? CUBLAS_OP_T : CUBLAS_OP_N;
CUBLAS_CHECK(cublasSgemv(Caffe::cublas_handle(), cuTransA, N, M, &alpha,
A, N, x, 1, &beta, y, 1));
}
template <>
void caffe_gpu_gemv<double>(const CBLAS_TRANSPOSE TransA, const int M,
const int N, const double alpha, const double* A, const double* x,
const double beta, double* y) {
cublasOperation_t cuTransA =
(TransA == CblasNoTrans) ? CUBLAS_OP_T : CUBLAS_OP_N;
CUBLAS_CHECK(cublasDgemv(Caffe::cublas_handle(), cuTransA, N, M, &alpha,
A, N, x, 1, &beta, y, 1));
}
template <>
void caffe_gpu_axpy<float>(const int N, const float alpha, const float* X,
float* Y) {
CUBLAS_CHECK(cublasSaxpy(Caffe::cublas_handle(), N, &alpha, X, 1, Y, 1));
}
template <>
void caffe_gpu_axpy<double>(const int N, const double alpha, const double* X,
double* Y) {
CUBLAS_CHECK(cublasDaxpy(Caffe::cublas_handle(), N, &alpha, X, 1, Y, 1));
}
void caffe_gpu_memcpy(const size_t N, const void* X, void* Y) {
if (X != Y) {
CUDA_CHECK(cudaMemcpy(Y, X, N, cudaMemcpyDefault)); // NOLINT(caffe/alt_fn)
}
}
template <>
void caffe_gpu_scal<float>(const int N, const float alpha, float *X) {
CUBLAS_CHECK(cublasSscal(Caffe::cublas_handle(), N, &alpha, X, 1));
}
template <>
void caffe_gpu_scal<double>(const int N, const double alpha, double *X) {
CUBLAS_CHECK(cublasDscal(Caffe::cublas_handle(), N, &alpha, X, 1));
}
template <>
void caffe_gpu_axpby<float>(const int N, const float alpha, const float* X,
const float beta, float* Y) {
caffe_gpu_scal<float>(N, beta, Y);
caffe_gpu_axpy<float>(N, alpha, X, Y);
}
template <>
void caffe_gpu_axpby<double>(const int N, const double alpha, const double* X,
const double beta, double* Y) {
caffe_gpu_scal<double>(N, beta, Y);
caffe_gpu_axpy<double>(N, alpha, X, Y);
}
template <>
void caffe_gpu_dot<float>(const int n, const float* x, const float* y,
float* out) {
CUBLAS_CHECK(cublasSdot(Caffe::cublas_handle(), n, x, 1, y, 1, out));
}
template <>
void caffe_gpu_dot<double>(const int n, const double* x, const double* y,
double * out) {
CUBLAS_CHECK(cublasDdot(Caffe::cublas_handle(), n, x, 1, y, 1, out));
}
template <>
void caffe_gpu_asum<float>(const int n, const float* x, float* y) {
CUBLAS_CHECK(cublasSasum(Caffe::cublas_handle(), n, x, 1, y));
}
template <>
void caffe_gpu_asum<double>(const int n, const double* x, double* y) {
CUBLAS_CHECK(cublasDasum(Caffe::cublas_handle(), n, x, 1, y));
}
template <>
void caffe_gpu_scale<float>(const int n, const float alpha, const float *x,
float* y) {
CUBLAS_CHECK(cublasScopy(Caffe::cublas_handle(), n, x, 1, y, 1));
CUBLAS_CHECK(cublasSscal(Caffe::cublas_handle(), n, &alpha, y, 1));
}
template <>
void caffe_gpu_scale<double>(const int n, const double alpha, const double *x,
double* y) {
CUBLAS_CHECK(cublasDcopy(Caffe::cublas_handle(), n, x, 1, y, 1));
CUBLAS_CHECK(cublasDscal(Caffe::cublas_handle(), n, &alpha, y, 1));
}
template <typename Dtype>
__global__ void set_kernel(const int n, const Dtype alpha, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = alpha;
}
}
template <typename Dtype>
void caffe_gpu_set(const int N, const Dtype alpha, Dtype* Y) {
if (alpha == 0) {
CUDA_CHECK(cudaMemset(Y, 0, sizeof(Dtype) * N)); // NOLINT(caffe/alt_fn)
return;
}
// NOLINT_NEXT_LINE(whitespace/operators)
set_kernel<Dtype><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, alpha, Y);
}
template void caffe_gpu_set<int>(const int N, const int alpha, int* Y);
template void caffe_gpu_set<float>(const int N, const float alpha, float* Y);
template void caffe_gpu_set<double>(const int N, const double alpha, double* Y);
template <typename Dtype>
__global__ void add_scalar_kernel(const int n, const Dtype alpha, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] += alpha;
}
}
template <>
void caffe_gpu_add_scalar(const int N, const float alpha, float* Y) {
// NOLINT_NEXT_LINE(whitespace/operators)
add_scalar_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, alpha, Y);
}
template <>
void caffe_gpu_add_scalar(const int N, const double alpha, double* Y) {
// NOLINT_NEXT_LINE(whitespace/operators)
add_scalar_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, alpha, Y);
}
template <typename Dtype>
__global__ void add_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] + b[index];
}
}
template <typename Dtype>
__global__ void add_kernel1(const int n, const Dtype* a,
const Dtype* b, const Dtype lambda1, const Dtype lambda2, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index]*lambda1 + b[index]*lambda2;
}
}
template <>
void caffe_gpu_add<float>(const int N, const float* a, const float* b,
float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
add_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <>
void caffe_gpu_add<double>(const int N, const double* a, const double* b,
double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
add_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <>
void caffe_gpu_add1<float>(const int N, const float* a, const float* b, const float lambda1, const float lambda2,
float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
add_kernel1<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b,lambda1,lambda2, y);
}
template <>
void caffe_gpu_add1<double>(const int N, const double* a, const double* b, const double lambda1, const double lambda2,
double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
add_kernel1<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b,lambda1,lambda2, y);
}
template <typename Dtype>
__global__ void sub_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] - b[index];
}
}
template <>
void caffe_gpu_sub<float>(const int N, const float* a, const float* b,
float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
sub_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <>
void caffe_gpu_sub<double>(const int N, const double* a, const double* b,
double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
sub_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <typename Dtype>
__global__ void mul_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] * b[index];
}
}
template <typename Dtype>
__global__ void my_mul_kernel(const int n, const Dtype* a,
const unsigned int* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] * b[index];
}
}
template <typename Dtype>
__global__ void dtpooling_kernel(const int n, const Dtype* col_buff,
Dtype* weight, Dtype* x, Dtype* x1, Dtype* y, Dtype* y1 , Dtype* output) {
CUDA_KERNEL_LOOP(index, n) {
// 首先求得index的行及列坐标
int width=46*46;
const int c = index/width;
const int h = (index - c * width);//求得行index
const int w = index % width; //求得列index
output[index]=weight[0]*x[c]+weight[1]*x1[c]+weight[2]*y[c]+weight[3]*y1[c]+col_buff[index];
// output[index]=col_buff[index];
}
}
template <typename Dtype>
__global__ void obtain_col_kernel(const int n, const Dtype* template1,
int height, int width,int col_index ,Dtype* output) {
CUDA_KERNEL_LOOP(index, n) {
// 首先求得index的行及列坐标
const int h = index/width;
const int w = (index - h * width);//求得行index
if(w==col_index)
{
output[h]=template1[index];
}
}
}
template <typename Dtype>
__global__ void refinenet_kernel(const int n, Dtype* col_buff,
Dtype* weight, Dtype* x, Dtype* x1, Dtype* y, Dtype* y1 , int width, Dtype* output) {
CUDA_KERNEL_LOOP(index, n) {
// 首先求得index的行及列坐标
const int c = index/width;
const int h = (index - c * width);//求得行index
const int w = index % width; //求得列index
output[index]=weight[0]*x[c]+weight[1]*x1[c]+weight[2]*y[c]+weight[3]*y1[c]+col_buff[index];
}
}
template <typename Dtype>
__global__ void place_col_kernel(const int n, Dtype* col_buff,
int layer_index,int height, int width, Dtype* output) {
CUDA_KERNEL_LOOP(index, n) {
// 当前index坐标为index列,layer_index行
int current_index=index*width+layer_index;
output[current_index]=col_buff[index];
}
}
template <>
void caffe_gpu_placecol<float>(const int N, float* col_buff, int layer_index,int height, int width, float* output) {
// NOLINT_NEXT_LINE(whitespace/operators)
place_col_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, col_buff, layer_index, height, width, output);
}
template <>
void caffe_gpu_placecol<double>(const int N, double* col_buff, int layer_index,int height, int width, double* output) {
// NOLINT_NEXT_LINE(whitespace/operators)
place_col_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, col_buff, layer_index, height, width, output);
}
template <>
void caffe_gpu_refinenet<float>(const int N, float* col_buff, float* weight,float* x, float* x1, float* y, float* y1, int width, float* output) {
// NOLINT_NEXT_LINE(whitespace/operators)
refinenet_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, col_buff, weight, x, x1, y, y1, width, output);
}
template <>
void caffe_gpu_refinenet<double>(const int N, double* col_buff, double* weight,double* x, double* x1, double* y, double* y1, int width, double* output) {
// NOLINT_NEXT_LINE(whitespace/operators)
refinenet_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, col_buff, weight, x, x1, y, y1, width, output);
}
template <>
void caffe_gpu_obtain_col<float>(const int N, const float* template1, int height, int width, int col_index, float* output) {
// NOLINT_NEXT_LINE(whitespace/operators)
obtain_col_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, template1, height, width, col_index, output);
}
template <>
void caffe_gpu_obtain_col<double>(const int N, const double* template1, int height, int width, int col_index, double* output) {
// NOLINT_NEXT_LINE(whitespace/operators)
obtain_col_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, template1, height, width, col_index, output);
}
template <>
void caffe_gpu_dtpooling<float>(const int N, const float* col_buff, float* weight, float* x,
float* x1, float* y, float* y1, float* output) {
// NOLINT_NEXT_LINE(whitespace/operators)
dtpooling_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, col_buff, weight, x, x1, y, y1,output);
}
template <>
void caffe_gpu_dtpooling<double>(const int N, const double* col_buff, double* weight, double* x,
double* x1, double* y, double* y1, double* output) {
// NOLINT_NEXT_LINE(whitespace/operators)
dtpooling_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, col_buff, weight, x, x1, y, y1,output);
}
template <>
void caffe_gpu_mul<float>(const int N, const float* a,
const float* b, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
mul_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <>
void caffe_gpu_mul<double>(const int N, const double* a,
const double* b, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
mul_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <>
void caffe_gpu_mul1<float>(const int N, const float* a,
const unsigned int* b, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
my_mul_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <>
void caffe_gpu_mul1<double>(const int N, const double* a,
const unsigned int* b, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
my_mul_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <typename Dtype>
__global__ void div_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] / b[index];
}
}
template <>
void caffe_gpu_div<float>(const int N, const float* a,
const float* b, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
div_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <>
void caffe_gpu_div<double>(const int N, const double* a,
const double* b, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
div_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <typename Dtype>
__global__ void abs_kernel(const int n, const Dtype* a, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = abs(a[index]);
}
}
template <>
void caffe_gpu_abs<float>(const int N, const float* a, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
abs_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, y);
}
template <>
void caffe_gpu_abs<double>(const int N, const double* a, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
abs_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, y);
}
template <typename Dtype>
__global__ void exp_kernel(const int n, const Dtype* a, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = exp(a[index]);
}
}
template <>
void caffe_gpu_exp<float>(const int N, const float* a, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
exp_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, y);
}
template <>
void caffe_gpu_exp<double>(const int N, const double* a, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
exp_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, y);
}
template <typename Dtype>
__global__ void log_kernel(const int n, const Dtype* a, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = log(a[index]);
}
}
template <>
void caffe_gpu_log<float>(const int N, const float* a, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
log_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, y);
}
template <>
void caffe_gpu_log<double>(const int N, const double* a, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
log_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, y);
}
template <typename Dtype>
__global__ void powx_kernel(const int n, const Dtype* a,
const Dtype alpha, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = pow(a[index], alpha);
}
}
template <>
void caffe_gpu_powx<float>(const int N, const float* a,
const float alpha, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
powx_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, alpha, y);
}
template <>
void caffe_gpu_powx<double>(const int N, const double* a,
const double alpha, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
powx_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, alpha, y);
}
DEFINE_AND_INSTANTIATE_GPU_UNARY_FUNC(sign, y[index] = (Dtype(0) < x[index])
- (x[index] < Dtype(0)));
DEFINE_AND_INSTANTIATE_GPU_UNARY_FUNC(sgnbit, y[index] = signbit(x[index]));
__global__ void popc_kernel(const int n, const float* a,
const float* b, uint8_t* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = __popc(static_cast<uint32_t>(a[index]) ^
static_cast<uint32_t>(b[index]));
}
}
__global__ void popcll_kernel(const int n, const double* a,
const double* b, uint8_t* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = __popcll(static_cast<uint64_t>(a[index]) ^
static_cast<uint64_t>(b[index]));
}
}
template <>
uint32_t caffe_gpu_hamming_distance<float>(const int n, const float* x,
const float* y) {
// TODO: Fix caffe_gpu_hamming_distance (see failing unit test
// TestHammingDistanceGPU in test_math_functions.cpp).
NOT_IMPLEMENTED;
thrust::device_vector<uint8_t> popcounts(n);
// NOLINT_NEXT_LINE(whitespace/operators)
popc_kernel<<<CAFFE_GET_BLOCKS(n), CAFFE_CUDA_NUM_THREADS>>>(
n, x, y, thrust::raw_pointer_cast(popcounts.data()));
return thrust::reduce(popcounts.begin(), popcounts.end(),
(uint32_t) 0, thrust::plus<uint32_t>());
}
template <>
uint32_t caffe_gpu_hamming_distance<double>(const int n, const double* x,
const double* y) {
// TODO: Fix caffe_gpu_hamming_distance (see failing unit test
// TestHammingDistanceGPU in test_math_functions.cpp).
NOT_IMPLEMENTED;
thrust::device_vector<uint8_t> popcounts(n);
// NOLINT_NEXT_LINE(whitespace/operators)
popcll_kernel<<<CAFFE_GET_BLOCKS(n), CAFFE_CUDA_NUM_THREADS>>>(
n, x, y, thrust::raw_pointer_cast(popcounts.data()));
return thrust::reduce(popcounts.begin(), popcounts.end(),
/* NOLINT_NEXT_LINE(build/include_what_you_use) */
(uint32_t) 0, thrust::plus<uint32_t>());
}
void caffe_gpu_rng_uniform(const int n, unsigned int* r) {
CURAND_CHECK(curandGenerate(Caffe::curand_generator(), r, n));
}
template <>
void caffe_gpu_rng_uniform<float>(const int n, const float a, const float b,
float* r) {
CURAND_CHECK(curandGenerateUniform(Caffe::curand_generator(), r, n));
const float range = b - a;
if (range != static_cast<float>(1)) {
caffe_gpu_scal(n, range, r);
}
if (a != static_cast<float>(0)) {
caffe_gpu_add_scalar(n, a, r);
}
}
template <>
void caffe_gpu_rng_uniform<double>(const int n, const double a, const double b,
double* r) {
CURAND_CHECK(curandGenerateUniformDouble(Caffe::curand_generator(), r, n));
const double range = b - a;
if (range != static_cast<double>(1)) {
caffe_gpu_scal(n, range, r);
}
if (a != static_cast<double>(0)) {
caffe_gpu_add_scalar(n, a, r);
}
}
template <>
void caffe_gpu_rng_gaussian(const int n, const float mu, const float sigma,
float* r) {
CURAND_CHECK(
curandGenerateNormal(Caffe::curand_generator(), r, n, mu, sigma));
}
template <>
void caffe_gpu_rng_gaussian(const int n, const double mu, const double sigma,
double* r) {
CURAND_CHECK(
curandGenerateNormalDouble(Caffe::curand_generator(), r, n, mu, sigma));
}
} // namespace caffe
|
8da160e3e3d3f035e115976012558d785b7f968e.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright 2018 BlazingDB, Inc.
* Copyright 2018 Felipe Aramburu <[email protected]>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "gtest/gtest.h"
#include <iostream>
#include <gdf/gdf.h>
#include <gdf/cffi/functions.h>
#include <thrust/functional.h>
#include <thrust/device_ptr.h>
#include <thrust/execution_policy.h>
#include <hip/hip_runtime.h>
#include "helper/utils.cuh"
/*
============================================================================
Description : Compute sum of reciprocals using STL on CPU and Thrust on GPU
============================================================================
*/
TEST(Example, Equals)
{
gdf_size_type num_elements = 8;
char *data_left;
char *data_right;
char *data_out;
hipError_t cuda_error = hipMalloc((void **)&data_left, sizeof(int8_t) * num_elements);
cuda_error = hipMalloc((void **)&data_right, sizeof(int8_t) * num_elements);
cuda_error = hipMalloc((void **)&data_out, sizeof(int8_t) * num_elements);
ASSERT_EQ(cuda_error, hipSuccess);
int8_t int8_value = 2;
thrust::device_ptr<int8_t> right_ptr = thrust::device_pointer_cast((int8_t *)data_right);
thrust::fill(thrust::detail::make_normal_iterator(right_ptr), thrust::detail::make_normal_iterator(right_ptr + num_elements), int8_value);
//for this simple test we will send in only 8 values
gdf_valid_type *valid = new gdf_valid_type;
*valid = 255;
gdf_valid_type *valid_device;
cuda_error = hipMalloc((void **)&valid_device, 1);
hipMemcpy(valid_device, valid, sizeof(gdf_valid_type), hipMemcpyHostToDevice);
gdf_valid_type *valid_out;
cuda_error = hipMalloc((void **)&valid_out, 1);
gdf_column lhs;
gdf_error error = gdf_column_view_augmented(&lhs, (void *)data_left, valid_device, num_elements, GDF_INT8, 0);
gdf_column rhs;
error = gdf_column_view_augmented(&rhs, (void *)data_right, valid_device, num_elements, GDF_INT8, 0);
gdf_column output;
error = gdf_column_view_augmented(&output, (void *)data_out, valid_out, num_elements, GDF_INT8, 0);
ASSERT_EQ(error, GDF_SUCCESS);
std::cout << "Left" << std::endl;
print_column(&lhs);
std::cout << "Right" << std::endl;
print_column(&rhs);
error = gpu_comparison(&lhs, &rhs, &output, GDF_EQUALS); // gtest!
std::cout << "Output" << std::endl;
print_column(&output);
error = gpu_comparison_static_i8(&lhs, 3, &output, GDF_EQUALS);
ASSERT_EQ(error, GDF_SUCCESS);
std::cout << "Output static_i8" << std::endl;
print_column(&output);
hipFree(data_left);
hipFree(data_right);
hipFree(data_out);
hipFree(valid_device);
hipFree(valid_out);
delete valid;
EXPECT_EQ(1, 1);
} | 8da160e3e3d3f035e115976012558d785b7f968e.cu | /*
* Copyright 2018 BlazingDB, Inc.
* Copyright 2018 Felipe Aramburu <[email protected]>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "gtest/gtest.h"
#include <iostream>
#include <gdf/gdf.h>
#include <gdf/cffi/functions.h>
#include <thrust/functional.h>
#include <thrust/device_ptr.h>
#include <thrust/execution_policy.h>
#include <cuda_runtime.h>
#include "helper/utils.cuh"
/*
============================================================================
Description : Compute sum of reciprocals using STL on CPU and Thrust on GPU
============================================================================
*/
TEST(Example, Equals)
{
gdf_size_type num_elements = 8;
char *data_left;
char *data_right;
char *data_out;
cudaError_t cuda_error = cudaMalloc((void **)&data_left, sizeof(int8_t) * num_elements);
cuda_error = cudaMalloc((void **)&data_right, sizeof(int8_t) * num_elements);
cuda_error = cudaMalloc((void **)&data_out, sizeof(int8_t) * num_elements);
ASSERT_EQ(cuda_error, cudaSuccess);
int8_t int8_value = 2;
thrust::device_ptr<int8_t> right_ptr = thrust::device_pointer_cast((int8_t *)data_right);
thrust::fill(thrust::detail::make_normal_iterator(right_ptr), thrust::detail::make_normal_iterator(right_ptr + num_elements), int8_value);
//for this simple test we will send in only 8 values
gdf_valid_type *valid = new gdf_valid_type;
*valid = 255;
gdf_valid_type *valid_device;
cuda_error = cudaMalloc((void **)&valid_device, 1);
cudaMemcpy(valid_device, valid, sizeof(gdf_valid_type), cudaMemcpyHostToDevice);
gdf_valid_type *valid_out;
cuda_error = cudaMalloc((void **)&valid_out, 1);
gdf_column lhs;
gdf_error error = gdf_column_view_augmented(&lhs, (void *)data_left, valid_device, num_elements, GDF_INT8, 0);
gdf_column rhs;
error = gdf_column_view_augmented(&rhs, (void *)data_right, valid_device, num_elements, GDF_INT8, 0);
gdf_column output;
error = gdf_column_view_augmented(&output, (void *)data_out, valid_out, num_elements, GDF_INT8, 0);
ASSERT_EQ(error, GDF_SUCCESS);
std::cout << "Left" << std::endl;
print_column(&lhs);
std::cout << "Right" << std::endl;
print_column(&rhs);
error = gpu_comparison(&lhs, &rhs, &output, GDF_EQUALS); // gtest!
std::cout << "Output" << std::endl;
print_column(&output);
error = gpu_comparison_static_i8(&lhs, 3, &output, GDF_EQUALS);
ASSERT_EQ(error, GDF_SUCCESS);
std::cout << "Output static_i8" << std::endl;
print_column(&output);
cudaFree(data_left);
cudaFree(data_right);
cudaFree(data_out);
cudaFree(valid_device);
cudaFree(valid_out);
delete valid;
EXPECT_EQ(1, 1);
} |
e46e575b3b14fbb76e228717663d38f428cb7ae4.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <rocblas.h>
#include <thrust/device_vector.h>
struct saxpy_functor
{
const float a_;
saxpy_functor(float a) : a_(a) {}
__host__ __device__
float operator() (const float &x, const float &y) const {
return a_ * x + y;
}
};
int main ()
{
hipEvent_t start, finish;
hipEventCreate(&start);
hipEventCreate(&finish);
float elapsedTime;
const int N = 1 << 10;
const float XVAL = rand() % 1000000;
const float YVAL = rand() % 1000000;
const float AVAL = rand() % 1000000;
float *host_x, *host_y;
hipblasHandle_t handle;
hipblasCreate(&handle);
host_x = new float[N];
host_y = new float[N];
for (int i = 0; i < N; i++) {
host_x[i] = XVAL;
host_y[i] = YVAL;
}
float *dev_x, *dev_y;
hipMalloc((void **) &dev_x, N * sizeof(float));
hipMalloc((void **) &dev_y, N * sizeof(float));
hipblasSetVector(N, sizeof(host_x[0]), host_x, 1, dev_x, 1);
hipblasSetVector(N, sizeof(host_y[0]), host_x, 1, dev_y, 1);
hipDeviceSynchronize();
hipEventRecord(start, nullptr);
hipblasSaxpy(handle, N, &AVAL, dev_x, 1, dev_y, 1);
hipDeviceSynchronize();
hipEventRecord(finish, nullptr);
hipEventSynchronize(finish);
hipEventElapsedTime(&elapsedTime, start, finish);
std::cout << "CUBLAS SAXPY: " << elapsedTime << "ms.\n";
hipFree(dev_x);
hipFree(dev_y);
delete [] host_x;
delete [] host_y;
thrust::device_vector<float> X(N, XVAL);
thrust::device_vector<float> Y(N, YVAL);
hipEventRecord(start, nullptr);
thrust::transform(X.begin(), X.end(), Y.begin(), Y.begin(), saxpy_functor(AVAL));
hipDeviceSynchronize();
hipEventRecord(finish, nullptr);
hipEventSynchronize(finish);
hipEventElapsedTime(&elapsedTime, start, finish);
std::cout << "Thrust SAXPY: " << elapsedTime << "ms.\n";
}
| e46e575b3b14fbb76e228717663d38f428cb7ae4.cu | #include <iostream>
#include <cublas_v2.h>
#include <thrust/device_vector.h>
struct saxpy_functor
{
const float a_;
saxpy_functor(float a) : a_(a) {}
__host__ __device__
float operator() (const float &x, const float &y) const {
return a_ * x + y;
}
};
int main ()
{
cudaEvent_t start, finish;
cudaEventCreate(&start);
cudaEventCreate(&finish);
float elapsedTime;
const int N = 1 << 10;
const float XVAL = rand() % 1000000;
const float YVAL = rand() % 1000000;
const float AVAL = rand() % 1000000;
float *host_x, *host_y;
cublasHandle_t handle;
cublasCreate(&handle);
host_x = new float[N];
host_y = new float[N];
for (int i = 0; i < N; i++) {
host_x[i] = XVAL;
host_y[i] = YVAL;
}
float *dev_x, *dev_y;
cudaMalloc((void **) &dev_x, N * sizeof(float));
cudaMalloc((void **) &dev_y, N * sizeof(float));
cublasSetVector(N, sizeof(host_x[0]), host_x, 1, dev_x, 1);
cublasSetVector(N, sizeof(host_y[0]), host_x, 1, dev_y, 1);
cudaDeviceSynchronize();
cudaEventRecord(start, nullptr);
cublasSaxpy(handle, N, &AVAL, dev_x, 1, dev_y, 1);
cudaDeviceSynchronize();
cudaEventRecord(finish, nullptr);
cudaEventSynchronize(finish);
cudaEventElapsedTime(&elapsedTime, start, finish);
std::cout << "CUBLAS SAXPY: " << elapsedTime << "ms.\n";
cudaFree(dev_x);
cudaFree(dev_y);
delete [] host_x;
delete [] host_y;
thrust::device_vector<float> X(N, XVAL);
thrust::device_vector<float> Y(N, YVAL);
cudaEventRecord(start, nullptr);
thrust::transform(X.begin(), X.end(), Y.begin(), Y.begin(), saxpy_functor(AVAL));
cudaDeviceSynchronize();
cudaEventRecord(finish, nullptr);
cudaEventSynchronize(finish);
cudaEventElapsedTime(&elapsedTime, start, finish);
std::cout << "Thrust SAXPY: " << elapsedTime << "ms.\n";
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.