hip_filename
stringlengths 5
84
| hip_content
stringlengths 79
9.69M
| cuda_filename
stringlengths 4
83
| cuda_content
stringlengths 19
9.69M
|
---|---|---|---|
61fb217e2d32e310d9badbcd34474063c8b7ccda.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <wb.h>
#define TILE_WIDTH 16
#define wbCheck(stmt) \
do { \
hipError_t err = stmt; \
if (err != hipSuccess) { \
wbLog(ERROR, "Failed to run stmt ", #stmt); \
wbLog(ERROR, "Got CUDA error ... ", hipGetErrorString(err)); \
return -1; \
} \
} while (0)
// Compute C = A * B
__global__ void matrixMultiply(float *A, float *B, float *C, int numARows,
int numAColumns, int numBRows, int numBColumns,
int numCRows, int numCColumns) {
//@@ Insert code to implement matrix multiplication here
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
if (row < numCRows && col < numCColumns)
{
float value = 0.0;
for (int i = 0; i < numBRows; i++)
value += A[row * numAColumns + i] * B[i * numBColumns + col];
C[row * numCColumns + col] = value;
}
}
int main(int argc, char **argv) {
wbArg_t args;
float *hostA; // The A matrix
float *hostB; // The B matrix
float *hostC; // The output C matrix
float *deviceA;
float *deviceB;
float *deviceC;
int numARows; // number of rows in the matrix A
int numAColumns; // number of columns in the matrix A
int numBRows; // number of rows in the matrix B
int numBColumns; // number of columns in the matrix B
int numCRows; // number of rows in the matrix C (you have to set this)
int numCColumns; // number of columns in the matrix C (you have to set this)
args = wbArg_read(argc, argv);
wbTime_start(Generic, "Importing data and creating memory on host");
hostA =
( float * )wbImport(wbArg_getInputFile(args, 0), &numARows, &numAColumns);
hostB =
( float * )wbImport(wbArg_getInputFile(args, 1), &numBRows, &numBColumns);
//@@ Set numCRows and numCColumns
numCRows = numARows;
numCColumns = numBColumns;
//@@ Allocate the hostC matrix
wbTime_stop(Generic, "Importing data and creating memory on host");
int sizeA = numARows * numAColumns * sizeof(float);
int sizeB = numBRows * numBColumns * sizeof(float);
int sizeC = numCRows * numCColumns * sizeof(float);
hostC = (float*) malloc(sizeC);
wbLog(TRACE, "The dimensions of A are ", numARows, " x ", numAColumns);
wbLog(TRACE, "The dimensions of B are ", numBRows, " x ", numBColumns);
wbTime_start(GPU, "Allocating GPU memory.");
//@@ Allocate GPU memory here
wbCheck(hipMalloc((void**)&deviceA, sizeA));
wbCheck(hipMalloc((void**)&deviceB, sizeB));
wbCheck(hipMalloc((void**)&deviceC, sizeC));
wbTime_stop(GPU, "Allocating GPU memory.");
wbTime_start(GPU, "Copying input memory to the GPU.");
//@@ Copy memory to the GPU here
wbCheck(hipMemcpy(deviceA, hostA, sizeA, hipMemcpyHostToDevice));
wbCheck(hipMemcpy(deviceB, hostB, sizeB, hipMemcpyHostToDevice));
wbTime_stop(GPU, "Copying input memory to the GPU.");
//@@ Initialize the grid and block dimensions here
dim3 gridDim3((numCColumns - 1)/TILE_WIDTH + 1, (numCRows - 1)/TILE_WIDTH + 1, 1);
dim3 blockDim3(TILE_WIDTH, TILE_WIDTH, 1);
wbTime_start(Compute, "Performing CUDA computation");
//@@ Launch the GPU Kernel here
hipLaunchKernelGGL(( matrixMultiply) , dim3(gridDim3), dim3(blockDim3) , 0, 0, deviceA, deviceB, deviceC,
numARows, numAColumns, numBRows,
numBColumns, numCRows, numCColumns);
hipDeviceSynchronize();
wbTime_stop(Compute, "Performing CUDA computation");
wbTime_start(Copy, "Copying output memory to the CPU");
//@@ Copy the GPU memory back to the CPU here
wbCheck(hipMemcpy(hostC, deviceC, sizeC, hipMemcpyDeviceToHost));
wbTime_stop(Copy, "Copying output memory to the CPU");
wbTime_start(GPU, "Freeing GPU Memory");
//@@ Free the GPU memory here
wbCheck(hipFree(deviceA));
wbCheck(hipFree(deviceB));
wbCheck(hipFree(deviceC));
wbTime_stop(GPU, "Freeing GPU Memory");
wbSolution(args, hostC, numCRows, numCColumns);
free(hostA);
free(hostB);
free(hostC);
return 0;
}
|
61fb217e2d32e310d9badbcd34474063c8b7ccda.cu
|
#include <wb.h>
#define TILE_WIDTH 16
#define wbCheck(stmt) \
do { \
cudaError_t err = stmt; \
if (err != cudaSuccess) { \
wbLog(ERROR, "Failed to run stmt ", #stmt); \
wbLog(ERROR, "Got CUDA error ... ", cudaGetErrorString(err)); \
return -1; \
} \
} while (0)
// Compute C = A * B
__global__ void matrixMultiply(float *A, float *B, float *C, int numARows,
int numAColumns, int numBRows, int numBColumns,
int numCRows, int numCColumns) {
//@@ Insert code to implement matrix multiplication here
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
if (row < numCRows && col < numCColumns)
{
float value = 0.0;
for (int i = 0; i < numBRows; i++)
value += A[row * numAColumns + i] * B[i * numBColumns + col];
C[row * numCColumns + col] = value;
}
}
int main(int argc, char **argv) {
wbArg_t args;
float *hostA; // The A matrix
float *hostB; // The B matrix
float *hostC; // The output C matrix
float *deviceA;
float *deviceB;
float *deviceC;
int numARows; // number of rows in the matrix A
int numAColumns; // number of columns in the matrix A
int numBRows; // number of rows in the matrix B
int numBColumns; // number of columns in the matrix B
int numCRows; // number of rows in the matrix C (you have to set this)
int numCColumns; // number of columns in the matrix C (you have to set this)
args = wbArg_read(argc, argv);
wbTime_start(Generic, "Importing data and creating memory on host");
hostA =
( float * )wbImport(wbArg_getInputFile(args, 0), &numARows, &numAColumns);
hostB =
( float * )wbImport(wbArg_getInputFile(args, 1), &numBRows, &numBColumns);
//@@ Set numCRows and numCColumns
numCRows = numARows;
numCColumns = numBColumns;
//@@ Allocate the hostC matrix
wbTime_stop(Generic, "Importing data and creating memory on host");
int sizeA = numARows * numAColumns * sizeof(float);
int sizeB = numBRows * numBColumns * sizeof(float);
int sizeC = numCRows * numCColumns * sizeof(float);
hostC = (float*) malloc(sizeC);
wbLog(TRACE, "The dimensions of A are ", numARows, " x ", numAColumns);
wbLog(TRACE, "The dimensions of B are ", numBRows, " x ", numBColumns);
wbTime_start(GPU, "Allocating GPU memory.");
//@@ Allocate GPU memory here
wbCheck(cudaMalloc((void**)&deviceA, sizeA));
wbCheck(cudaMalloc((void**)&deviceB, sizeB));
wbCheck(cudaMalloc((void**)&deviceC, sizeC));
wbTime_stop(GPU, "Allocating GPU memory.");
wbTime_start(GPU, "Copying input memory to the GPU.");
//@@ Copy memory to the GPU here
wbCheck(cudaMemcpy(deviceA, hostA, sizeA, cudaMemcpyHostToDevice));
wbCheck(cudaMemcpy(deviceB, hostB, sizeB, cudaMemcpyHostToDevice));
wbTime_stop(GPU, "Copying input memory to the GPU.");
//@@ Initialize the grid and block dimensions here
dim3 gridDim3((numCColumns - 1)/TILE_WIDTH + 1, (numCRows - 1)/TILE_WIDTH + 1, 1);
dim3 blockDim3(TILE_WIDTH, TILE_WIDTH, 1);
wbTime_start(Compute, "Performing CUDA computation");
//@@ Launch the GPU Kernel here
matrixMultiply <<< gridDim3, blockDim3 >>> (deviceA, deviceB, deviceC,
numARows, numAColumns, numBRows,
numBColumns, numCRows, numCColumns);
cudaDeviceSynchronize();
wbTime_stop(Compute, "Performing CUDA computation");
wbTime_start(Copy, "Copying output memory to the CPU");
//@@ Copy the GPU memory back to the CPU here
wbCheck(cudaMemcpy(hostC, deviceC, sizeC, cudaMemcpyDeviceToHost));
wbTime_stop(Copy, "Copying output memory to the CPU");
wbTime_start(GPU, "Freeing GPU Memory");
//@@ Free the GPU memory here
wbCheck(cudaFree(deviceA));
wbCheck(cudaFree(deviceB));
wbCheck(cudaFree(deviceC));
wbTime_stop(GPU, "Freeing GPU Memory");
wbSolution(args, hostC, numCRows, numCColumns);
free(hostA);
free(hostB);
free(hostC);
return 0;
}
|
5af9193201bc8da4149a13d4c81ff043b77d7a3b.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm>
#include <cfloat>
#include <chrono>
#include <random>
#include <vector>
#include <string>
#include <cstring>
#include <cctype>
#include <cstdlib>
#include <cstdio>
#include <iostream>
#include <fstream>
#include <bitset>
#include <random>
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include "csv.hpp"
using namespace std;
// A small data structure to do RAII for a dataset of 2-dimensional points.
struct Data {
explicit Data(int size) : size(size), bytes(size * sizeof(float)) {
hipMalloc(&x, bytes);
hipMalloc(&y, bytes);
}
Data(int size, std::vector<float>& h_x, std::vector<float>& h_y)
: size(size), bytes(size * sizeof(float)) {
hipMalloc(&x, bytes);
hipMalloc(&y, bytes);
hipMemcpy(x, h_x.data(), bytes, hipMemcpyHostToDevice);
hipMemcpy(y, h_y.data(), bytes, hipMemcpyHostToDevice);
}
~Data() {
hipFree(x);
hipFree(y);
}
void clear() {
hipMemset(x, 0, bytes);
hipMemset(y, 0, bytes);
}
float* x{nullptr};
float* y{nullptr};
int size{0};
int bytes{0};
};
__device__ float
squared_l2_distance(float x_1, float y_1, float x_2, float y_2) {
return (x_1 - x_2) * (x_1 - x_2) + (y_1 - y_2) * (y_1 - y_2);
}
// In the assignment step, each point (thread) computes its distance to each
// cluster centroid and adds its x and y values to the sum of its closest
// centroid, as well as incrementing that centroid's count of assigned points.
__global__ void assign_clusters(const thrust::device_ptr<float> data_x,
const thrust::device_ptr<float> data_y,
int data_size,
const thrust::device_ptr<float> means_x,
const thrust::device_ptr<float> means_y,
thrust::device_ptr<float> new_sums_x,
thrust::device_ptr<float> new_sums_y,
int k,
thrust::device_ptr<int> counts) {
const int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= data_size) return;
// Make global loads once.
const float x = data_x[index];
const float y = data_y[index];
float best_distance = FLT_MAX;
int best_cluster = 0;
for (int cluster = 0; cluster < k; ++cluster) {
const float distance =
squared_l2_distance(x, y, means_x[cluster], means_y[cluster]);
if (distance < best_distance) {
best_distance = distance;
best_cluster = cluster;
}
}
atomicAdd(thrust::raw_pointer_cast(new_sums_x + best_cluster), x);
atomicAdd(thrust::raw_pointer_cast(new_sums_y + best_cluster), y);
atomicAdd(thrust::raw_pointer_cast(counts + best_cluster), 1);
}
// Each thread is one cluster, which just recomputes its coordinates as the mean
// of all points assigned to it.
__global__ void compute_new_means(thrust::device_ptr<float> means_x,
thrust::device_ptr<float> means_y,
const thrust::device_ptr<float> new_sum_x,
const thrust::device_ptr<float> new_sum_y,
const thrust::device_ptr<int> counts) {
const int cluster = threadIdx.x;
const int count = max(1, counts[cluster]);
means_x[cluster] = new_sum_x[cluster] / count;
means_y[cluster] = new_sum_y[cluster] / count;
}
int main(int argc, const char* argv[]) {
std::vector<float> h_x;
std::vector<float> h_y;
// Load x and y into host vectors ... (omitted)
int N = atoi(argv[2]);
int k = 3;
int number_of_iterations = 1000;
const string csv_file = std::string(argv[1]);
vector<vector<string>> data2;
Csv objCsv(csv_file);
if (!objCsv.getCsv(data2)) {
cout << "read ERROR" << endl;
return 1;
}
// for (int row = 0; row < data2.size(); row++) {
for (int row = 0; row < 1024; row++) {
vector<string> rec = data2[row];
h_x.push_back(std::stof(rec[0]));
h_y.push_back(std::stof(rec[1]));
}
const size_t number_of_elements = h_x.size();
thrust::device_vector<float> d_x = h_x;
thrust::device_vector<float> d_y = h_y;
std::mt19937 rng(std::random_device{}());
std::shuffle(h_x.begin(), h_x.end(), rng);
std::shuffle(h_y.begin(), h_y.end(), rng);
thrust::device_vector<float> d_mean_x(h_x.begin(), h_x.begin() + k);
thrust::device_vector<float> d_mean_y(h_y.begin(), h_y.begin() + k);
thrust::device_vector<float> d_sums_x(k);
thrust::device_vector<float> d_sums_y(k);
thrust::device_vector<int> d_counts(k, 0);
const int threads = 1024;
const int blocks = (number_of_elements + threads - 1) / threads;
for (size_t iteration = 0; iteration < number_of_iterations; ++iteration) {
thrust::fill(d_sums_x.begin(), d_sums_x.end(), 0);
thrust::fill(d_sums_y.begin(), d_sums_y.end(), 0);
thrust::fill(d_counts.begin(), d_counts.end(), 0);
hipLaunchKernelGGL(( assign_clusters), dim3(blocks), dim3(threads), 0, 0, d_x.data(),
d_y.data(),
number_of_elements,
d_mean_x.data(),
d_mean_y.data(),
d_sums_x.data(),
d_sums_y.data(),
k,
d_counts.data());
hipDeviceSynchronize();
hipLaunchKernelGGL(( compute_new_means), dim3(1), dim3(k), 0, 0, d_mean_x.data(),
d_mean_y.data(),
d_sums_x.data(),
d_sums_y.data(),
d_counts.data());
hipDeviceSynchronize();
}
/*
hipMemcpy(h_clusterNo, d_clusterNo, N * sizeof(int), hipMemcpyDeviceToHost);
for(int i=0; i < N; i++)
std::cout << h_x[i] << "," << h_y[i] << "," << h_clusterNo[i] << std::endl;
*/
}
|
5af9193201bc8da4149a13d4c81ff043b77d7a3b.cu
|
#include <algorithm>
#include <cfloat>
#include <chrono>
#include <random>
#include <vector>
#include <string>
#include <cstring>
#include <cctype>
#include <cstdlib>
#include <cstdio>
#include <iostream>
#include <fstream>
#include <bitset>
#include <random>
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include "csv.hpp"
using namespace std;
// A small data structure to do RAII for a dataset of 2-dimensional points.
struct Data {
explicit Data(int size) : size(size), bytes(size * sizeof(float)) {
cudaMalloc(&x, bytes);
cudaMalloc(&y, bytes);
}
Data(int size, std::vector<float>& h_x, std::vector<float>& h_y)
: size(size), bytes(size * sizeof(float)) {
cudaMalloc(&x, bytes);
cudaMalloc(&y, bytes);
cudaMemcpy(x, h_x.data(), bytes, cudaMemcpyHostToDevice);
cudaMemcpy(y, h_y.data(), bytes, cudaMemcpyHostToDevice);
}
~Data() {
cudaFree(x);
cudaFree(y);
}
void clear() {
cudaMemset(x, 0, bytes);
cudaMemset(y, 0, bytes);
}
float* x{nullptr};
float* y{nullptr};
int size{0};
int bytes{0};
};
__device__ float
squared_l2_distance(float x_1, float y_1, float x_2, float y_2) {
return (x_1 - x_2) * (x_1 - x_2) + (y_1 - y_2) * (y_1 - y_2);
}
// In the assignment step, each point (thread) computes its distance to each
// cluster centroid and adds its x and y values to the sum of its closest
// centroid, as well as incrementing that centroid's count of assigned points.
__global__ void assign_clusters(const thrust::device_ptr<float> data_x,
const thrust::device_ptr<float> data_y,
int data_size,
const thrust::device_ptr<float> means_x,
const thrust::device_ptr<float> means_y,
thrust::device_ptr<float> new_sums_x,
thrust::device_ptr<float> new_sums_y,
int k,
thrust::device_ptr<int> counts) {
const int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= data_size) return;
// Make global loads once.
const float x = data_x[index];
const float y = data_y[index];
float best_distance = FLT_MAX;
int best_cluster = 0;
for (int cluster = 0; cluster < k; ++cluster) {
const float distance =
squared_l2_distance(x, y, means_x[cluster], means_y[cluster]);
if (distance < best_distance) {
best_distance = distance;
best_cluster = cluster;
}
}
atomicAdd(thrust::raw_pointer_cast(new_sums_x + best_cluster), x);
atomicAdd(thrust::raw_pointer_cast(new_sums_y + best_cluster), y);
atomicAdd(thrust::raw_pointer_cast(counts + best_cluster), 1);
}
// Each thread is one cluster, which just recomputes its coordinates as the mean
// of all points assigned to it.
__global__ void compute_new_means(thrust::device_ptr<float> means_x,
thrust::device_ptr<float> means_y,
const thrust::device_ptr<float> new_sum_x,
const thrust::device_ptr<float> new_sum_y,
const thrust::device_ptr<int> counts) {
const int cluster = threadIdx.x;
const int count = max(1, counts[cluster]);
means_x[cluster] = new_sum_x[cluster] / count;
means_y[cluster] = new_sum_y[cluster] / count;
}
int main(int argc, const char* argv[]) {
std::vector<float> h_x;
std::vector<float> h_y;
// Load x and y into host vectors ... (omitted)
int N = atoi(argv[2]);
int k = 3;
int number_of_iterations = 1000;
const string csv_file = std::string(argv[1]);
vector<vector<string>> data2;
Csv objCsv(csv_file);
if (!objCsv.getCsv(data2)) {
cout << "read ERROR" << endl;
return 1;
}
// for (int row = 0; row < data2.size(); row++) {
for (int row = 0; row < 1024; row++) {
vector<string> rec = data2[row];
h_x.push_back(std::stof(rec[0]));
h_y.push_back(std::stof(rec[1]));
}
const size_t number_of_elements = h_x.size();
thrust::device_vector<float> d_x = h_x;
thrust::device_vector<float> d_y = h_y;
std::mt19937 rng(std::random_device{}());
std::shuffle(h_x.begin(), h_x.end(), rng);
std::shuffle(h_y.begin(), h_y.end(), rng);
thrust::device_vector<float> d_mean_x(h_x.begin(), h_x.begin() + k);
thrust::device_vector<float> d_mean_y(h_y.begin(), h_y.begin() + k);
thrust::device_vector<float> d_sums_x(k);
thrust::device_vector<float> d_sums_y(k);
thrust::device_vector<int> d_counts(k, 0);
const int threads = 1024;
const int blocks = (number_of_elements + threads - 1) / threads;
for (size_t iteration = 0; iteration < number_of_iterations; ++iteration) {
thrust::fill(d_sums_x.begin(), d_sums_x.end(), 0);
thrust::fill(d_sums_y.begin(), d_sums_y.end(), 0);
thrust::fill(d_counts.begin(), d_counts.end(), 0);
assign_clusters<<<blocks, threads>>>(d_x.data(),
d_y.data(),
number_of_elements,
d_mean_x.data(),
d_mean_y.data(),
d_sums_x.data(),
d_sums_y.data(),
k,
d_counts.data());
cudaDeviceSynchronize();
compute_new_means<<<1, k>>>(d_mean_x.data(),
d_mean_y.data(),
d_sums_x.data(),
d_sums_y.data(),
d_counts.data());
cudaDeviceSynchronize();
}
/*
cudaMemcpy(h_clusterNo, d_clusterNo, N * sizeof(int), cudaMemcpyDeviceToHost);
for(int i=0; i < N; i++)
std::cout << h_x[i] << "," << h_y[i] << "," << h_clusterNo[i] << std::endl;
*/
}
|
1a387f7b61adcf61ec1eadb07bbb941cf44cfe4a.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <vector>
#include "gtest/gtest.h"
#include "caffe/blob.hpp"
#include "caffe/common.hpp"
#include "caffe/filler.hpp"
#include "caffe/layers/im2col_layer.hpp"
#include "caffe/util/im2col.hpp"
#include "caffe/test/test_caffe_main.hpp"
namespace caffe {
// Forward declare kernel functions
template <typename Dtype>
__global__ void im2col_gpu_kernel(const int n, const Dtype* data_im,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int height_col, const int width_col,
Dtype* data_col);
template <typename Dtype, int num_axes>
__global__ void im2col_nd_gpu_kernel(const int n, const Dtype* data_im,
const int* im_shape, const int* col_shape,
const int* kernel_shape, const int* pad, const int* stride,
const int* dilation, Dtype* data_col);
template <typename Dtype>
class Im2colKernelTest : public GPUDeviceTest<Dtype> {
protected:
Im2colKernelTest()
// big so launches > 1024 threads
: blob_bottom_(new TBlob<Dtype>(5, 500, 15, 15)),
blob_kernel_shape_(new TBlob<int>()),
blob_stride_(new TBlob<int>()),
blob_pad_(new TBlob<int>()),
blob_dilation_(new TBlob<int>()),
blob_top_(new TBlob<Dtype>()),
blob_top_cpu_(new TBlob<Dtype>()) {
FillerParameter filler_param;
GaussianFiller<Dtype> filler(filler_param);
filler.Fill(this->blob_bottom_);
vector<int> dim_blob_shape(1, 2);
blob_kernel_shape_->Reshape(dim_blob_shape);
blob_stride_->Reshape(dim_blob_shape);
blob_pad_->Reshape(dim_blob_shape);
blob_dilation_->Reshape(dim_blob_shape);
height_ = blob_bottom_->height();
width_ = blob_bottom_->width();
channels_ = blob_bottom_->channels();
pad_ = 0;
stride_ = 2;
dilation_ = 3;
kernel_size_ = 3;
height_col_ = (height_ + 2 * pad_ -
(dilation_ * (kernel_size_ - 1) + 1)) / stride_ + 1;
width_col_ = (width_ + 2 * pad_ -
(dilation_ * (kernel_size_ - 1) + 1)) / stride_ + 1;
for (int i = 0; i < 2; ++i) {
blob_kernel_shape_->mutable_cpu_data()[i] = kernel_size_;
blob_stride_->mutable_cpu_data()[i] = stride_;
blob_pad_->mutable_cpu_data()[i] = pad_;
blob_dilation_->mutable_cpu_data()[i] = dilation_;
}
}
virtual ~Im2colKernelTest() {
delete blob_bottom_;
delete blob_top_;
delete blob_top_cpu_;
delete blob_kernel_shape_;
delete blob_stride_;
delete blob_pad_;
delete blob_dilation_;
}
TBlob<int>* const blob_kernel_shape_;
TBlob<int>* const blob_stride_;
TBlob<int>* const blob_pad_;
TBlob<int>* const blob_dilation_;
TBlob<Dtype>* const blob_bottom_;
TBlob<Dtype>* const blob_top_;
TBlob<Dtype>* const blob_top_cpu_;
int height_;
int width_;
int channels_;
int pad_;
int stride_;
int dilation_;
int kernel_size_;
int height_col_;
int width_col_;
};
TYPED_TEST_CASE(Im2colKernelTest, TestDtypes);
TYPED_TEST(Im2colKernelTest, Test2D) {
// Reshape the blobs to correct size for im2col output
this->blob_top_->Reshape(this->blob_bottom_->num(),
this->channels_ * this->kernel_size_ * this->kernel_size_,
this->height_col_,
this->width_col_);
this->blob_top_cpu_->Reshape(this->blob_bottom_->num(),
this->channels_ * this->kernel_size_ * this->kernel_size_,
this->height_col_,
this->width_col_);
const TypeParam* bottom_data = this->blob_bottom_->gpu_data();
TypeParam* top_data = this->blob_top_->mutable_gpu_data();
TypeParam* cpu_data = this->blob_top_cpu_->mutable_cpu_data();
// CPU Version
for (int n = 0; n < this->blob_bottom_->num(); ++n) {
im2col_cpu(this->blob_bottom_->cpu_data() + this->blob_bottom_->offset(n),
this->channels_, this->height_, this->width_,
this->kernel_size_, this->kernel_size_, this->pad_, this->pad_,
this->stride_, this->stride_, this->dilation_, this->dilation_,
cpu_data + this->blob_top_cpu_->offset(n));
}
// GPU version
int num_kernels = this->channels_ * this->height_col_ * this->width_col_;
int default_grid_dim = CAFFE_GET_BLOCKS(num_kernels);
// Launch with different grid sizes
hipStream_t stream = Caffe::thread_stream();
for (int grid_div = 2; grid_div <= 8; grid_div++) {
for (int n = 0; n < this->blob_bottom_->num(); ++n) {
int grid_dim = default_grid_dim/grid_div;
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( im2col_gpu_kernel<TypeParam>), dim3(grid_dim), dim3(CAFFE_CUDA_NUM_THREADS), 0, stream,
num_kernels, bottom_data + this->blob_bottom_->offset(n),
this->height_, this->width_, this->kernel_size_, this->kernel_size_,
this->pad_, this->pad_, this->stride_, this->stride_,
this->dilation_, this->dilation_,
this->height_col_, this->width_col_,
top_data + this->blob_top_->offset(n));
CUDA_POST_KERNEL_CHECK;
}
// Compare results against CPU version
for (int i = 0; i < this->blob_top_->count(); ++i) {
TypeParam cpuval = cpu_data[i];
TypeParam gpuval = this->blob_top_->cpu_data()[i];
EXPECT_EQ(cpuval, gpuval);
if (cpuval != gpuval) {
break;
}
}
}
CUDA_CHECK(hipStreamSynchronize(stream));
}
TYPED_TEST(Im2colKernelTest, TestND) {
// Reshape the blobs to correct size for im2col output
this->blob_top_->Reshape(this->blob_bottom_->num(),
this->channels_ * this->kernel_size_ * this->kernel_size_,
this->height_col_,
this->width_col_);
this->blob_top_cpu_->ReshapeLike(*this->blob_top_);
const TypeParam* bottom_data_cpu = this->blob_bottom_->cpu_data();
TypeParam* top_data_cpu = this->blob_top_cpu_->mutable_cpu_data();
// CPU Version
for (int n = 0; n < this->blob_bottom_->num(); ++n) {
im2col_nd_cpu(bottom_data_cpu + this->blob_bottom_->offset(n), 2,
this->blob_bottom_->shape().data() + 1,
this->blob_top_cpu_->shape().data() + 1,
this->blob_kernel_shape_->cpu_data(),
this->blob_pad_->cpu_data(), this->blob_stride_->cpu_data(),
this->blob_dilation_->cpu_data(),
top_data_cpu + this->blob_top_cpu_->offset(n));
}
// GPU version
int num_kernels = this->channels_ * this->height_col_ * this->width_col_;
int default_grid_dim = CAFFE_GET_BLOCKS(num_kernels);
const TypeParam* bottom_data_gpu = this->blob_bottom_->gpu_data();
// Launch with different grid sizes
hipStream_t stream = Caffe::thread_stream();
for (int grid_div = 2; grid_div <= 8; grid_div++) {
for (int n = 0; n < this->blob_bottom_->num(); ++n) {
const int grid_dim = default_grid_dim / grid_div;
TypeParam* top_data_gpu = this->blob_top_->mutable_gpu_data();
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( im2col_nd_gpu_kernel<TypeParam, 2>), dim3(grid_dim), dim3(CAFFE_CUDA_NUM_THREADS), 0, stream,
num_kernels, bottom_data_gpu + this->blob_bottom_->offset(n),
this->blob_bottom_->gpu_shape() + 1, this->blob_top_->gpu_shape() + 1,
this->blob_kernel_shape_->gpu_data(), this->blob_pad_->gpu_data(),
this->blob_stride_->gpu_data(), this->blob_dilation_->gpu_data(),
top_data_gpu + this->blob_top_->offset(n));
CUDA_POST_KERNEL_CHECK;
CUDA_CHECK(hipStreamSynchronize(stream));
}
// Compare results against CPU version
for (int i = 0; i < this->blob_top_->count(); ++i) {
TypeParam cpuval = top_data_cpu[i];
TypeParam gpuval = this->blob_top_->cpu_data()[i];
EXPECT_EQ(cpuval, gpuval);
if (cpuval != gpuval) {
break;
}
}
}
}
} // namespace caffe
|
1a387f7b61adcf61ec1eadb07bbb941cf44cfe4a.cu
|
#include <vector>
#include "gtest/gtest.h"
#include "caffe/blob.hpp"
#include "caffe/common.hpp"
#include "caffe/filler.hpp"
#include "caffe/layers/im2col_layer.hpp"
#include "caffe/util/im2col.hpp"
#include "caffe/test/test_caffe_main.hpp"
namespace caffe {
// Forward declare kernel functions
template <typename Dtype>
__global__ void im2col_gpu_kernel(const int n, const Dtype* data_im,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int height_col, const int width_col,
Dtype* data_col);
template <typename Dtype, int num_axes>
__global__ void im2col_nd_gpu_kernel(const int n, const Dtype* data_im,
const int* im_shape, const int* col_shape,
const int* kernel_shape, const int* pad, const int* stride,
const int* dilation, Dtype* data_col);
template <typename Dtype>
class Im2colKernelTest : public GPUDeviceTest<Dtype> {
protected:
Im2colKernelTest()
// big so launches > 1024 threads
: blob_bottom_(new TBlob<Dtype>(5, 500, 15, 15)),
blob_kernel_shape_(new TBlob<int>()),
blob_stride_(new TBlob<int>()),
blob_pad_(new TBlob<int>()),
blob_dilation_(new TBlob<int>()),
blob_top_(new TBlob<Dtype>()),
blob_top_cpu_(new TBlob<Dtype>()) {
FillerParameter filler_param;
GaussianFiller<Dtype> filler(filler_param);
filler.Fill(this->blob_bottom_);
vector<int> dim_blob_shape(1, 2);
blob_kernel_shape_->Reshape(dim_blob_shape);
blob_stride_->Reshape(dim_blob_shape);
blob_pad_->Reshape(dim_blob_shape);
blob_dilation_->Reshape(dim_blob_shape);
height_ = blob_bottom_->height();
width_ = blob_bottom_->width();
channels_ = blob_bottom_->channels();
pad_ = 0;
stride_ = 2;
dilation_ = 3;
kernel_size_ = 3;
height_col_ = (height_ + 2 * pad_ -
(dilation_ * (kernel_size_ - 1) + 1)) / stride_ + 1;
width_col_ = (width_ + 2 * pad_ -
(dilation_ * (kernel_size_ - 1) + 1)) / stride_ + 1;
for (int i = 0; i < 2; ++i) {
blob_kernel_shape_->mutable_cpu_data()[i] = kernel_size_;
blob_stride_->mutable_cpu_data()[i] = stride_;
blob_pad_->mutable_cpu_data()[i] = pad_;
blob_dilation_->mutable_cpu_data()[i] = dilation_;
}
}
virtual ~Im2colKernelTest() {
delete blob_bottom_;
delete blob_top_;
delete blob_top_cpu_;
delete blob_kernel_shape_;
delete blob_stride_;
delete blob_pad_;
delete blob_dilation_;
}
TBlob<int>* const blob_kernel_shape_;
TBlob<int>* const blob_stride_;
TBlob<int>* const blob_pad_;
TBlob<int>* const blob_dilation_;
TBlob<Dtype>* const blob_bottom_;
TBlob<Dtype>* const blob_top_;
TBlob<Dtype>* const blob_top_cpu_;
int height_;
int width_;
int channels_;
int pad_;
int stride_;
int dilation_;
int kernel_size_;
int height_col_;
int width_col_;
};
TYPED_TEST_CASE(Im2colKernelTest, TestDtypes);
TYPED_TEST(Im2colKernelTest, Test2D) {
// Reshape the blobs to correct size for im2col output
this->blob_top_->Reshape(this->blob_bottom_->num(),
this->channels_ * this->kernel_size_ * this->kernel_size_,
this->height_col_,
this->width_col_);
this->blob_top_cpu_->Reshape(this->blob_bottom_->num(),
this->channels_ * this->kernel_size_ * this->kernel_size_,
this->height_col_,
this->width_col_);
const TypeParam* bottom_data = this->blob_bottom_->gpu_data();
TypeParam* top_data = this->blob_top_->mutable_gpu_data();
TypeParam* cpu_data = this->blob_top_cpu_->mutable_cpu_data();
// CPU Version
for (int n = 0; n < this->blob_bottom_->num(); ++n) {
im2col_cpu(this->blob_bottom_->cpu_data() + this->blob_bottom_->offset(n),
this->channels_, this->height_, this->width_,
this->kernel_size_, this->kernel_size_, this->pad_, this->pad_,
this->stride_, this->stride_, this->dilation_, this->dilation_,
cpu_data + this->blob_top_cpu_->offset(n));
}
// GPU version
int num_kernels = this->channels_ * this->height_col_ * this->width_col_;
int default_grid_dim = CAFFE_GET_BLOCKS(num_kernels);
// Launch with different grid sizes
cudaStream_t stream = Caffe::thread_stream();
for (int grid_div = 2; grid_div <= 8; grid_div++) {
for (int n = 0; n < this->blob_bottom_->num(); ++n) {
int grid_dim = default_grid_dim/grid_div;
// NOLINT_NEXT_LINE(whitespace/operators)
im2col_gpu_kernel<TypeParam><<<grid_dim, CAFFE_CUDA_NUM_THREADS, 0, stream>>>(
num_kernels, bottom_data + this->blob_bottom_->offset(n),
this->height_, this->width_, this->kernel_size_, this->kernel_size_,
this->pad_, this->pad_, this->stride_, this->stride_,
this->dilation_, this->dilation_,
this->height_col_, this->width_col_,
top_data + this->blob_top_->offset(n));
CUDA_POST_KERNEL_CHECK;
}
// Compare results against CPU version
for (int i = 0; i < this->blob_top_->count(); ++i) {
TypeParam cpuval = cpu_data[i];
TypeParam gpuval = this->blob_top_->cpu_data()[i];
EXPECT_EQ(cpuval, gpuval);
if (cpuval != gpuval) {
break;
}
}
}
CUDA_CHECK(cudaStreamSynchronize(stream));
}
TYPED_TEST(Im2colKernelTest, TestND) {
// Reshape the blobs to correct size for im2col output
this->blob_top_->Reshape(this->blob_bottom_->num(),
this->channels_ * this->kernel_size_ * this->kernel_size_,
this->height_col_,
this->width_col_);
this->blob_top_cpu_->ReshapeLike(*this->blob_top_);
const TypeParam* bottom_data_cpu = this->blob_bottom_->cpu_data();
TypeParam* top_data_cpu = this->blob_top_cpu_->mutable_cpu_data();
// CPU Version
for (int n = 0; n < this->blob_bottom_->num(); ++n) {
im2col_nd_cpu(bottom_data_cpu + this->blob_bottom_->offset(n), 2,
this->blob_bottom_->shape().data() + 1,
this->blob_top_cpu_->shape().data() + 1,
this->blob_kernel_shape_->cpu_data(),
this->blob_pad_->cpu_data(), this->blob_stride_->cpu_data(),
this->blob_dilation_->cpu_data(),
top_data_cpu + this->blob_top_cpu_->offset(n));
}
// GPU version
int num_kernels = this->channels_ * this->height_col_ * this->width_col_;
int default_grid_dim = CAFFE_GET_BLOCKS(num_kernels);
const TypeParam* bottom_data_gpu = this->blob_bottom_->gpu_data();
// Launch with different grid sizes
cudaStream_t stream = Caffe::thread_stream();
for (int grid_div = 2; grid_div <= 8; grid_div++) {
for (int n = 0; n < this->blob_bottom_->num(); ++n) {
const int grid_dim = default_grid_dim / grid_div;
TypeParam* top_data_gpu = this->blob_top_->mutable_gpu_data();
// NOLINT_NEXT_LINE(whitespace/operators)
im2col_nd_gpu_kernel<TypeParam, 2><<<grid_dim, CAFFE_CUDA_NUM_THREADS, 0, stream>>>(
num_kernels, bottom_data_gpu + this->blob_bottom_->offset(n),
this->blob_bottom_->gpu_shape() + 1, this->blob_top_->gpu_shape() + 1,
this->blob_kernel_shape_->gpu_data(), this->blob_pad_->gpu_data(),
this->blob_stride_->gpu_data(), this->blob_dilation_->gpu_data(),
top_data_gpu + this->blob_top_->offset(n));
CUDA_POST_KERNEL_CHECK;
CUDA_CHECK(cudaStreamSynchronize(stream));
}
// Compare results against CPU version
for (int i = 0; i < this->blob_top_->count(); ++i) {
TypeParam cpuval = top_data_cpu[i];
TypeParam gpuval = this->blob_top_->cpu_data()[i];
EXPECT_EQ(cpuval, gpuval);
if (cpuval != gpuval) {
break;
}
}
}
}
} // namespace caffe
|
4dfe2aaa77014515c325dcddb258ca139186ce6b.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifndef CUDA_SPARSE_VOTING_KERNEL
#define CUDA_SPARSE_VOTING_KERNEL
namespace cudavoting{
__global__ void sparse_ball_voting_kernel(float3 * field, const float3 * __restrict__ points, const float sigma, const int numPoints, int2 * logg)
{
__shared__ float3 tmpfield[BLOCK_DIM*3];
int token = threadIdx.x+blockIdx.x*blockDim.x;
tmpfield[threadIdx.x*3+0] = make_float3(1.0,0,0);
tmpfield[threadIdx.x*3+1] = make_float3(0,1.0,0);
tmpfield[threadIdx.x*3+2] = make_float3(0,0,1.0);
__syncthreads();
if (token >= numPoints) return;
float3 votee = points[token];
#pragma unroll 64
for(unsigned int voter_i = 0; voter_i<numPoints; voter_i ++)
{
float3 voter = points[voter_i];
if (token == voter_i) continue;
float3 v = votee - voter;
//float l = __powf(v.x,2) + __powf(v.y,2) + __powf(v.z,2);
float l = pow(v.x,2) + pow(v.y,2) + pow(v.z,2);
float z = __fdividef(__fsqrt_rn(l),sigma);
if(l>0 && z<3)
{
// 40ms additional for 1000 points
//logg[token].x += 1;
//atomicAdd(&(logg[voter_i].y), 1); //logg[votee_i].y += 1;
// outer product
float3 vv[3];
vv[0] = make_float3(pow(v.x,2), __fmul_rn(v.x,v.y), __fmul_rn(v.x,v.z));
vv[1] = make_float3(__fmul_rn(v.y,v.x), pow(v.y,2), __fmul_rn(v.y,v.z));
vv[2] = make_float3(__fmul_rn(v.z,v.x), __fmul_rn(v.z,v.y), pow(v.z,2));
float norm_vv = __fsqrt_rn(
pow(vv[0].x,2) + pow(vv[0].y,2) + pow(vv[0].z,2) +
pow(vv[1].x,2) + pow(vv[1].y,2) + pow(vv[1].z,2) +
pow(vv[2].x,2) + pow(vv[2].y,2) + pow(vv[2].z,2)
);
// ATTENTION: sm_11 only support integer atomicAdd
//float decay = FDIV(z*z*(z-3)*(z-3)*(z-3)*(z-3),16);
float decay = __expf(-pow(z,2));
float3 vv_use[3];
vv_use[0] = make_float3( __fmul_rn(decay, 1 - __fdividef(vv[0].x,norm_vv)), __fmul_rn(decay, 0 - __fdividef(vv[0].y,norm_vv)), __fmul_rn(decay, 0 - __fdividef(vv[0].z,norm_vv)) );
vv_use[1] = make_float3( __fmul_rn(decay, 0 - __fdividef(vv[1].x,norm_vv)), __fmul_rn(decay, 1 - __fdividef(vv[1].y,norm_vv)), __fmul_rn(decay, 0 - __fdividef(vv[1].z,norm_vv)) );
vv_use[2] = make_float3( __fmul_rn(decay, 0 - __fdividef(vv[2].x,norm_vv)), __fmul_rn(decay, 0 - __fdividef(vv[2].y,norm_vv)), __fmul_rn(decay, 1 - __fdividef(vv[2].z,norm_vv)) );
// update outputing field
tmpfield[threadIdx.x*3 + 0] += vv_use[0];
tmpfield[threadIdx.x*3 + 1] += vv_use[1];
tmpfield[threadIdx.x*3 + 2] += vv_use[2];
} // end of if
}// end of for all voters
__syncthreads();
field[token*3 + 0] = tmpfield[threadIdx.x*3 + 0];
field[token*3 + 1] = tmpfield[threadIdx.x*3 + 1];
field[token*3 + 2] = tmpfield[threadIdx.x*3 + 2];
}
}
#endif
|
4dfe2aaa77014515c325dcddb258ca139186ce6b.cu
|
#ifndef CUDA_SPARSE_VOTING_KERNEL
#define CUDA_SPARSE_VOTING_KERNEL
namespace cudavoting{
__global__ void sparse_ball_voting_kernel(float3 * field, const float3 * __restrict__ points, const float sigma, const int numPoints, int2 * logg)
{
__shared__ float3 tmpfield[BLOCK_DIM*3];
int token = threadIdx.x+blockIdx.x*blockDim.x;
tmpfield[threadIdx.x*3+0] = make_float3(1.0,0,0);
tmpfield[threadIdx.x*3+1] = make_float3(0,1.0,0);
tmpfield[threadIdx.x*3+2] = make_float3(0,0,1.0);
__syncthreads();
if (token >= numPoints) return;
float3 votee = points[token];
#pragma unroll 64
for(unsigned int voter_i = 0; voter_i<numPoints; voter_i ++)
{
float3 voter = points[voter_i];
if (token == voter_i) continue;
float3 v = votee - voter;
//float l = __powf(v.x,2) + __powf(v.y,2) + __powf(v.z,2);
float l = pow(v.x,2) + pow(v.y,2) + pow(v.z,2);
float z = __fdividef(__fsqrt_rn(l),sigma);
if(l>0 && z<3)
{
// 40ms additional for 1000 points
//logg[token].x += 1;
//atomicAdd(&(logg[voter_i].y), 1); //logg[votee_i].y += 1;
// outer product
float3 vv[3];
vv[0] = make_float3(pow(v.x,2), __fmul_rn(v.x,v.y), __fmul_rn(v.x,v.z));
vv[1] = make_float3(__fmul_rn(v.y,v.x), pow(v.y,2), __fmul_rn(v.y,v.z));
vv[2] = make_float3(__fmul_rn(v.z,v.x), __fmul_rn(v.z,v.y), pow(v.z,2));
float norm_vv = __fsqrt_rn(
pow(vv[0].x,2) + pow(vv[0].y,2) + pow(vv[0].z,2) +
pow(vv[1].x,2) + pow(vv[1].y,2) + pow(vv[1].z,2) +
pow(vv[2].x,2) + pow(vv[2].y,2) + pow(vv[2].z,2)
);
// ATTENTION: sm_11 only support integer atomicAdd
//float decay = FDIV(z*z*(z-3)*(z-3)*(z-3)*(z-3),16);
float decay = __expf(-pow(z,2));
float3 vv_use[3];
vv_use[0] = make_float3( __fmul_rn(decay, 1 - __fdividef(vv[0].x,norm_vv)), __fmul_rn(decay, 0 - __fdividef(vv[0].y,norm_vv)), __fmul_rn(decay, 0 - __fdividef(vv[0].z,norm_vv)) );
vv_use[1] = make_float3( __fmul_rn(decay, 0 - __fdividef(vv[1].x,norm_vv)), __fmul_rn(decay, 1 - __fdividef(vv[1].y,norm_vv)), __fmul_rn(decay, 0 - __fdividef(vv[1].z,norm_vv)) );
vv_use[2] = make_float3( __fmul_rn(decay, 0 - __fdividef(vv[2].x,norm_vv)), __fmul_rn(decay, 0 - __fdividef(vv[2].y,norm_vv)), __fmul_rn(decay, 1 - __fdividef(vv[2].z,norm_vv)) );
// update outputing field
tmpfield[threadIdx.x*3 + 0] += vv_use[0];
tmpfield[threadIdx.x*3 + 1] += vv_use[1];
tmpfield[threadIdx.x*3 + 2] += vv_use[2];
} // end of if
}// end of for all voters
__syncthreads();
field[token*3 + 0] = tmpfield[threadIdx.x*3 + 0];
field[token*3 + 1] = tmpfield[threadIdx.x*3 + 1];
field[token*3 + 2] = tmpfield[threadIdx.x*3 + 2];
}
}
#endif
|
9b295a4c3bfbd07ef0121689fa34d42a0d0033b4.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "makeFlist.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
unsigned int *d_trans_offset = NULL;
hipMalloc(&d_trans_offset, XSIZE*YSIZE);
unsigned int *d_transactions = NULL;
hipMalloc(&d_transactions, XSIZE*YSIZE);
unsigned int *d_flist = NULL;
hipMalloc(&d_flist, XSIZE*YSIZE);
unsigned int num_transactions = 1;
unsigned int num_items_in_transactions = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
makeFlist), dim3(gridBlock),dim3(threadBlock), 0, 0, d_trans_offset,d_transactions,d_flist,num_transactions,num_items_in_transactions);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
makeFlist), dim3(gridBlock),dim3(threadBlock), 0, 0, d_trans_offset,d_transactions,d_flist,num_transactions,num_items_in_transactions);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
makeFlist), dim3(gridBlock),dim3(threadBlock), 0, 0, d_trans_offset,d_transactions,d_flist,num_transactions,num_items_in_transactions);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
9b295a4c3bfbd07ef0121689fa34d42a0d0033b4.cu
|
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "makeFlist.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
unsigned int *d_trans_offset = NULL;
cudaMalloc(&d_trans_offset, XSIZE*YSIZE);
unsigned int *d_transactions = NULL;
cudaMalloc(&d_transactions, XSIZE*YSIZE);
unsigned int *d_flist = NULL;
cudaMalloc(&d_flist, XSIZE*YSIZE);
unsigned int num_transactions = 1;
unsigned int num_items_in_transactions = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
makeFlist<<<gridBlock,threadBlock>>>(d_trans_offset,d_transactions,d_flist,num_transactions,num_items_in_transactions);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
makeFlist<<<gridBlock,threadBlock>>>(d_trans_offset,d_transactions,d_flist,num_transactions,num_items_in_transactions);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
makeFlist<<<gridBlock,threadBlock>>>(d_trans_offset,d_transactions,d_flist,num_transactions,num_items_in_transactions);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
d0442243281cb9d5621a28e8e587d4be570c4d16.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#define N 1024
__global__ void stencil(float *d_a, float *d_b) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid > 0 && tid < N - 1) {
d_b[tid] = 0.3333f * d_a[tid - 1] * d_a[tid] * d_a[tid + 1];
}
}
int main() {
float *h_a, *h_b;
float *d_a, *d_b;
int memSize = sizeof(float) * N;
//Reserve host memory
h_a = (float *) malloc(memSize);
h_b = (float *) malloc(memSize);
//Reserves device memory
hipError_t error;
error = hipMalloc((void **) &d_a, memSize);
if (error != hipSuccess) {
fprintf(stderr, "Error al reservar memoria en la GPU\n");
return -1;
}
error = hipMalloc((void **) &d_b, memSize);
if (error != hipSuccess) {
fprintf(stderr, "Error al reservar memoria en la GPU\n");
return -1;
}
//Fills the arrays
for (int i = 0; i < N; ++i) {
h_a[i] = h_b[i] = 70.0f;
}
h_a[0] = h_a[N - 1] = h_b[0] = h_b[N - 1] = 150.0f;
//Copies host memory to device
error = hipMemcpy(d_a, h_a, memSize, hipMemcpyHostToDevice);
if (error != hipSuccess) {
fprintf(stderr, "Error al transferir informacin\n");
return -1;
}
error = hipMemcpy(d_b, h_b, memSize, hipMemcpyHostToDevice);
if (error != hipSuccess) {
fprintf(stderr, "Error al transferir informacin\n");
return -1;
}
//Grid Definition
dim3 block(N / 256);
dim3 thread(256);
float *aux = NULL;
for (int i = 0; i < N; ++i) {
hipLaunchKernelGGL(( stencil), dim3(block) ,dim3(thread), 0, 0, d_a, d_b);
aux = d_a;
d_a = d_b;
d_b = aux;
}
error = hipMemcpy(h_a, d_a, memSize, hipMemcpyDeviceToHost);
if (error != hipSuccess) {
fprintf(stderr, "Error al transferir informacin\n");
return -1;
}
for (int i = 0; i < N; ++i) {
printf("%f, ", h_a[i]);
}
printf("\n");
free(h_a);
free(h_b);
hipFree(d_a);
hipFree(d_b);
return 0;
}
|
d0442243281cb9d5621a28e8e587d4be570c4d16.cu
|
#include <stdio.h>
#include <stdlib.h>
#include <cuda_runtime.h>
#define N 1024
__global__ void stencil(float *d_a, float *d_b) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid > 0 && tid < N - 1) {
d_b[tid] = 0.3333f * d_a[tid - 1] * d_a[tid] * d_a[tid + 1];
}
}
int main() {
float *h_a, *h_b;
float *d_a, *d_b;
int memSize = sizeof(float) * N;
//Reserve host memory
h_a = (float *) malloc(memSize);
h_b = (float *) malloc(memSize);
//Reserves device memory
cudaError_t error;
error = cudaMalloc((void **) &d_a, memSize);
if (error != cudaSuccess) {
fprintf(stderr, "Error al reservar memoria en la GPU\n");
return -1;
}
error = cudaMalloc((void **) &d_b, memSize);
if (error != cudaSuccess) {
fprintf(stderr, "Error al reservar memoria en la GPU\n");
return -1;
}
//Fills the arrays
for (int i = 0; i < N; ++i) {
h_a[i] = h_b[i] = 70.0f;
}
h_a[0] = h_a[N - 1] = h_b[0] = h_b[N - 1] = 150.0f;
//Copies host memory to device
error = cudaMemcpy(d_a, h_a, memSize, cudaMemcpyHostToDevice);
if (error != cudaSuccess) {
fprintf(stderr, "Error al transferir información\n");
return -1;
}
error = cudaMemcpy(d_b, h_b, memSize, cudaMemcpyHostToDevice);
if (error != cudaSuccess) {
fprintf(stderr, "Error al transferir información\n");
return -1;
}
//Grid Definition
dim3 block(N / 256);
dim3 thread(256);
float *aux = NULL;
for (int i = 0; i < N; ++i) {
stencil<<<block ,thread>>>(d_a, d_b);
aux = d_a;
d_a = d_b;
d_b = aux;
}
error = cudaMemcpy(h_a, d_a, memSize, cudaMemcpyDeviceToHost);
if (error != cudaSuccess) {
fprintf(stderr, "Error al transferir información\n");
return -1;
}
for (int i = 0; i < N; ++i) {
printf("%f, ", h_a[i]);
}
printf("\n");
free(h_a);
free(h_b);
cudaFree(d_a);
cudaFree(d_b);
return 0;
}
|
274ca9f25b066c3f6279a8b9cb62b30e69eb06a4.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*Raul P. Pelaez 2016. Brownian Euler Maruyama Integrator derived class implementation
An Integrator is intended to be a separated module that handles the update of positions given the forces
It takes care of keeping the positions updated.
The positions must be provided as a global object,
they are not created by the module.
Also takes care of writing to disk
Solves the following differential equation:
X[t+dt] = dt(KX[t]+MF[t]) + sqrt(2Tdt)dWB
Being:
X - Positions
M - Mobility matrix
K - Shear matrix
dW- Noise vector
B - chol(M)
*/
#include "BrownianEulerMaruyama.cuh"
#include"utils/vector_overloads.h"
#include"utils/helper_gpu.cuh"
__constant__ real3 Mcons[3];
__constant__ real3 Bcons[3];
__constant__ real3 Kcons[3];
/*Performs the cholesky decomposition of a matrix, in CPU*/
Matrixf cholesky(Matrixf Din){
//Doesnt check for positive definite
//Super slow, use only in initialization
uint i, j, k; /* Indices */
real tmpsum; /* Temporary variable */
if(!Din.isSquare()){
cerr<<"Cholesky: Not a square matrix!!!"<<endl;
}
uint dim = Din.size().x;
Matrixf B(dim, dim);
Matrixf D = Din;
/* Clear B matrix */
B.fill_with(0.0);
for(j = 0; j < dim; j++) {
tmpsum = 0;
for(k = 0; k < j; k++)
tmpsum += B[j][k]*B[j][k];
B[j][j] = sqrt(D[j][j] - tmpsum);
for(i = j + 1; i < dim; i++) {
tmpsum = 0;
for(k = 0; k < j; k++)
tmpsum += B[i][k]*B[j][k];
B[i][j] = (D[i][j] - tmpsum)/B[j][j];
}
}
return B;
}
BrownianEulerMaruyama::BrownianEulerMaruyama(Matrixf Min,
Matrixf Kin):
BrownianEulerMaruyama(Min, Kin, gcnf.N, gcnf.L, gcnf.dt){}
BrownianEulerMaruyama::BrownianEulerMaruyama(Matrixf Min,
Matrixf Kin,
int N, real3 L, real dt):
Integrator(N, L, dt, 128),
M(Min), K(Kin),
noise( N + ((3*N)%2) ),
T(gcnf.T){
cerr<<"Initializing Brownian Euler Maruyama Integrator..."<<endl;
if(Min.size().x!=3 || Kin.size().x!=3 ||
!Min.isSquare() || !Kin.isSquare()){
cerr<<"ERROR!, K and D must be 3x3!!"<<endl;
exit(1);
}
/*Set GPU parameters*/
this->sqrt2Tdt = sqrt(dt)*sqrt(2.0*T);
M.upload();
K.upload();
//Is K zero?
bool K0 = true;
fori(0,9){
if(K.data[i] != real(0.0)) K0 = false;
}
if(K0){
K.freeMem();
}
B = cholesky(M);
B.upload();
hipMemcpyToSymbol(Mcons, M.d_m, 3*sizeof(real3));
hipMemcpyToSymbol(Bcons, B.d_m, 3*sizeof(real3));
if(!K0)
hipMemcpyToSymbol(Kcons, K.d_m, 3*sizeof(real3));
/*Create noise*/
hiprandCreateGenerator(&rng, HIPRAND_RNG_PSEUDO_DEFAULT);
hiprandSetPseudoRandomGeneratorSeed(rng, grng.next());
noise.fill_with(make_real3(0.0));
noise.upload();
//Curand fill with gaussian numbers with mean 0 and var 1, you have to ask for an even number of them
hiprandGenerateNormal(rng, (real*) noise.d_m, 3*N + ((3*N)%2), 0.0, 1.0);
cerr<<"Brownian Euler Maruyama Integrator\t\tDONE!!\n\n"<<endl;
}
BrownianEulerMaruyama::~BrownianEulerMaruyama(){}
namespace BrownianEulerMaruyama_ns{
/*Integrate the movement*/
template<bool shear>
__global__ void integrateGPU(real4 __restrict__ *pos,
const real4 __restrict__ *force,
const real3 __restrict__ *dW,
// const real3 __restrict__ *M,
// const real3 __restrict__ *B,
// const real3 __restrict__ *K,
int N, real dt, bool D2, real sqrt2Tdt){
uint i = blockIdx.x*blockDim.x+threadIdx.x;
if(i>=N) return;
/*Half step velocity*/
real3 *M = Mcons;
real3 *B = Bcons;
real3 p = make_real3(pos[i]);
real3 f = make_real3(force[i]);
real3 KR = make_real3(real(0.0));
if(shear){
real3 *K = Kcons;
KR = make_real3(dot(K[0],p), dot(K[1],p), dot(K[2],p));
}
// X[t+dt] = dt(KX[t]+DF[t]) + sqrt(dt)dWB
p.x = dt*( KR.x + dot(M[0],f)) + sqrt2Tdt*dot(dW[i],B[0]);
p.y = dt*( KR.y + dot(M[1],f)) + sqrt2Tdt*dot(dW[i],B[1]);
if(!D2)//If 3D
p.z = dt*( KR.z + dot(M[2],f)) + sqrt2Tdt*dot(dW[i],B[2]);
pos[i] += make_real4(p);
}
};
void BrownianEulerMaruyama::update(){
uint nthreads = BLOCKSIZE<N?BLOCKSIZE:N;
uint nblocks = N/nthreads + ((N%nthreads!=0)?1:0);
steps++;
/*Generate noise*/
hiprandGenerateNormal(rng, (real*) noise.d_m, 3*N + ((3*N)%2), real(0.0), real(1.0));
/*Reset force*/
hipMemset(force.d_m, 0, N*sizeof(real4));
/*Compute new forces*/
for(auto forceComp: interactors) forceComp->sumForce();
/*Update positions*/
if(K.d_m)
hipLaunchKernelGGL(( BrownianEulerMaruyama_ns::integrateGPU<true>), dim3(nblocks),dim3(nthreads), 0, 0, pos.d_m, force.d_m, noise.d_m,
// (real3*)M.d_m, (real3*)B.d_m, (real3*)K.d_m,
N, dt, gcnf.D2, sqrt2Tdt);
else
hipLaunchKernelGGL(( BrownianEulerMaruyama_ns::integrateGPU<false>), dim3(nblocks),dim3(nthreads), 0, 0, pos.d_m, force.d_m, noise.d_m,
N, dt, gcnf.D2, sqrt2Tdt);
}
real BrownianEulerMaruyama::sumEnergy(){
return 0.0;
}
|
274ca9f25b066c3f6279a8b9cb62b30e69eb06a4.cu
|
/*Raul P. Pelaez 2016. Brownian Euler Maruyama Integrator derived class implementation
An Integrator is intended to be a separated module that handles the update of positions given the forces
It takes care of keeping the positions updated.
The positions must be provided as a global object,
they are not created by the module.
Also takes care of writing to disk
Solves the following differential equation:
X[t+dt] = dt(K·X[t]+M·F[t]) + sqrt(2·T·dt)·dW·B
Being:
X - Positions
M - Mobility matrix
K - Shear matrix
dW- Noise vector
B - chol(M)
*/
#include "BrownianEulerMaruyama.cuh"
#include"utils/vector_overloads.h"
#include"utils/helper_gpu.cuh"
__constant__ real3 Mcons[3];
__constant__ real3 Bcons[3];
__constant__ real3 Kcons[3];
/*Performs the cholesky decomposition of a matrix, in CPU*/
Matrixf cholesky(Matrixf Din){
//Doesnt check for positive definite
//Super slow, use only in initialization
uint i, j, k; /* Indices */
real tmpsum; /* Temporary variable */
if(!Din.isSquare()){
cerr<<"Cholesky: Not a square matrix!!!"<<endl;
}
uint dim = Din.size().x;
Matrixf B(dim, dim);
Matrixf D = Din;
/* Clear B matrix */
B.fill_with(0.0);
for(j = 0; j < dim; j++) {
tmpsum = 0;
for(k = 0; k < j; k++)
tmpsum += B[j][k]*B[j][k];
B[j][j] = sqrt(D[j][j] - tmpsum);
for(i = j + 1; i < dim; i++) {
tmpsum = 0;
for(k = 0; k < j; k++)
tmpsum += B[i][k]*B[j][k];
B[i][j] = (D[i][j] - tmpsum)/B[j][j];
}
}
return B;
}
BrownianEulerMaruyama::BrownianEulerMaruyama(Matrixf Min,
Matrixf Kin):
BrownianEulerMaruyama(Min, Kin, gcnf.N, gcnf.L, gcnf.dt){}
BrownianEulerMaruyama::BrownianEulerMaruyama(Matrixf Min,
Matrixf Kin,
int N, real3 L, real dt):
Integrator(N, L, dt, 128),
M(Min), K(Kin),
noise( N + ((3*N)%2) ),
T(gcnf.T){
cerr<<"Initializing Brownian Euler Maruyama Integrator..."<<endl;
if(Min.size().x!=3 || Kin.size().x!=3 ||
!Min.isSquare() || !Kin.isSquare()){
cerr<<"ERROR!, K and D must be 3x3!!"<<endl;
exit(1);
}
/*Set GPU parameters*/
this->sqrt2Tdt = sqrt(dt)*sqrt(2.0*T);
M.upload();
K.upload();
//Is K zero?
bool K0 = true;
fori(0,9){
if(K.data[i] != real(0.0)) K0 = false;
}
if(K0){
K.freeMem();
}
B = cholesky(M);
B.upload();
cudaMemcpyToSymbol(Mcons, M.d_m, 3*sizeof(real3));
cudaMemcpyToSymbol(Bcons, B.d_m, 3*sizeof(real3));
if(!K0)
cudaMemcpyToSymbol(Kcons, K.d_m, 3*sizeof(real3));
/*Create noise*/
curandCreateGenerator(&rng, CURAND_RNG_PSEUDO_DEFAULT);
curandSetPseudoRandomGeneratorSeed(rng, grng.next());
noise.fill_with(make_real3(0.0));
noise.upload();
//Curand fill with gaussian numbers with mean 0 and var 1, you have to ask for an even number of them
curandGenerateNormal(rng, (real*) noise.d_m, 3*N + ((3*N)%2), 0.0, 1.0);
cerr<<"Brownian Euler Maruyama Integrator\t\tDONE!!\n\n"<<endl;
}
BrownianEulerMaruyama::~BrownianEulerMaruyama(){}
namespace BrownianEulerMaruyama_ns{
/*Integrate the movement*/
template<bool shear>
__global__ void integrateGPU(real4 __restrict__ *pos,
const real4 __restrict__ *force,
const real3 __restrict__ *dW,
// const real3 __restrict__ *M,
// const real3 __restrict__ *B,
// const real3 __restrict__ *K,
int N, real dt, bool D2, real sqrt2Tdt){
uint i = blockIdx.x*blockDim.x+threadIdx.x;
if(i>=N) return;
/*Half step velocity*/
real3 *M = Mcons;
real3 *B = Bcons;
real3 p = make_real3(pos[i]);
real3 f = make_real3(force[i]);
real3 KR = make_real3(real(0.0));
if(shear){
real3 *K = Kcons;
KR = make_real3(dot(K[0],p), dot(K[1],p), dot(K[2],p));
}
// X[t+dt] = dt(K·X[t]+D·F[t]) + sqrt(dt)·dW·B
p.x = dt*( KR.x + dot(M[0],f)) + sqrt2Tdt*dot(dW[i],B[0]);
p.y = dt*( KR.y + dot(M[1],f)) + sqrt2Tdt*dot(dW[i],B[1]);
if(!D2)//If 3D
p.z = dt*( KR.z + dot(M[2],f)) + sqrt2Tdt*dot(dW[i],B[2]);
pos[i] += make_real4(p);
}
};
void BrownianEulerMaruyama::update(){
uint nthreads = BLOCKSIZE<N?BLOCKSIZE:N;
uint nblocks = N/nthreads + ((N%nthreads!=0)?1:0);
steps++;
/*Generate noise*/
curandGenerateNormal(rng, (real*) noise.d_m, 3*N + ((3*N)%2), real(0.0), real(1.0));
/*Reset force*/
cudaMemset(force.d_m, 0, N*sizeof(real4));
/*Compute new forces*/
for(auto forceComp: interactors) forceComp->sumForce();
/*Update positions*/
if(K.d_m)
BrownianEulerMaruyama_ns::integrateGPU<true><<<nblocks,nthreads>>>(pos.d_m, force.d_m, noise.d_m,
// (real3*)M.d_m, (real3*)B.d_m, (real3*)K.d_m,
N, dt, gcnf.D2, sqrt2Tdt);
else
BrownianEulerMaruyama_ns::integrateGPU<false><<<nblocks,nthreads>>>(pos.d_m, force.d_m, noise.d_m,
N, dt, gcnf.D2, sqrt2Tdt);
}
real BrownianEulerMaruyama::sumEnergy(){
return 0.0;
}
|
224b82806420eb6fbf90c94251012928e3d3aaeb.hip
|
// !!! This is a file automatically generated by hipify!!!
/************************************************************************************\
* *
* Copyright 2014 Advanced Micro Devices, Inc. *
* Copyright (c) 2015 Mark D. Hill and David A. Wood *
* All rights reserved. *
* *
* Redistribution and use in source and binary forms, with or without *
* modification, are permitted provided that the following are met: *
* *
* You must reproduce the above copyright notice. *
* *
* Neither the name of the copyright holder nor the names of its contributors *
* may be used to endorse or promote products derived from this software *
* without specific, prior, written permission from at least the copyright
holder. *
* *
* You must include the following terms in your license and/or other materials *
* provided with the software. *
* *
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" *
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE *
* IMPLIED WARRANTIES OF MERCHANTABILITY, NON-INFRINGEMENT, AND FITNESS FOR A *
* PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER *
* OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, *
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT *
* OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS *
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN *
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING *
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY *
* OF SUCH DAMAGE. *
* *
* Without limiting the foregoing, the software may implement third party *
* technologies for which you must obtain licenses from parties other than AMD.
*
* You agree that AMD has not obtained or conveyed to you, and that you shall *
* be responsible for obtaining the rights to use and/or distribute the
applicable *
* underlying intellectual property rights related to the third party
technologies. *
* These third party technologies are not licensed hereunder. *
* *
* If you use the software (in whole or in part), you shall adhere to all *
* applicable U.S., European, and other export laws, including but not limited
to *
* the U.S. Export Administration Regulations ("EAR") (15 C.F.R Sections
730-774), *
* and E.U. Council Regulation (EC) No 428/2009 of 5 May 2009. Further,
pursuant *
* to Section 740.6 of the EAR, you hereby certify that, except pursuant to a *
* license granted by the United States Department of Commerce Bureau of
Industry *
* and Security or as otherwise permitted pursuant to a License Exception under
*
* the U.S. Export Administration Regulations ("EAR"), you will not (1) export,
*
* re-export or release to a national of a country in Country Groups D:1, E:1 or
*
* E:2 any restricted technology, software, or source code you receive
hereunder, *
* or (2) export to Country Groups D:1, E:1 or E:2 the direct product of such *
* technology or software, if such foreign produced direct product is subject to
*
* national security controls as identified on the Commerce Control List
(currently *
* found in Supplement 1 to Part 774 of EAR). For the most current Country
Group *
* listings, or for additional information about the EAR or your obligations
under *
* those regulations, please refer to the U.S. Bureau of Industry and Security's
*
* website at http://www.bis.doc.gov/. *
* *
\************************************************************************************/
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/time.h>
#include <new>
#include "../../mem_alloc/mem_alloc.h"
#include "../graph_parser/parse.h"
#include "parse_oo.h"
#include "../graph_parser/util.h"
#include "kernel.h"
// Iteration count
#define ITER 20
void print_vectorf(float *vector, int num);
int main(int argc, char **argv) {
char *tmpchar;
mem_alloc shared_mem(4ULL * 1024 * 1024 * 1024);
obj_alloc my_obj_alloc(&shared_mem,atoll(argv[3]));
int num_nodes;
int num_edges;
int file_format = 1;
bool directed = 0;
hipError_t err = hipSuccess;
if (argc == 4) {
tmpchar = argv[1]; // Graph inputfile
file_format = atoi(argv[2]); // File format
} else {
fprintf(stderr, "You did something wrong!\n");
exit(1);
}
// Allocate the csr structure
csr_array *csr;
// Parse graph files into csr structure
if (file_format == 1) {
// Metis
csr = parseMetis(tmpchar, &num_nodes, &num_edges, directed);
} else if (file_format == 0) {
// Dimacs9
csr = parseCOO(tmpchar, &num_nodes, &num_edges, 1);
} else if (file_format == 2) {
// Matrix market
csr = parseMM(tmpchar, &num_nodes, &num_edges, directed, 0);
} else {
printf("reserve for future");
exit(1);
}
// Allocate rank_array
float *rank_array = (float *)malloc(num_nodes * sizeof(float));
if (!rank_array) {
fprintf(stderr, "rank array not allocated successfully\n");
return -1;
}
int *row_d;
int *col_d;
int *inrow_d;
int *incol_d;
float *pagerank_d;
// Create device-side buffers for the graph
err = hipMalloc(&row_d, num_nodes * sizeof(int));
if (err != hipSuccess) {
fprintf(stderr, "ERROR: hipMalloc row_d (size:%d) => %s\n", num_nodes,
hipGetErrorString(err));
return -1;
}
err = hipMalloc(&col_d, num_edges * sizeof(int));
if (err != hipSuccess) {
fprintf(stderr, "ERROR: hipMalloc col_d (size:%d) => %s\n", num_edges,
hipGetErrorString(err));
return -1;
}
err = hipMalloc(&inrow_d, num_nodes * sizeof(int));
if (err != hipSuccess) {
fprintf(stderr, "ERROR: hipMalloc inrow_d (size:%d) => %s\n", num_nodes,
hipGetErrorString(err));
return -1;
}
err = hipMalloc(&incol_d, num_edges * sizeof(int));
if (err != hipSuccess) {
fprintf(stderr, "ERROR: hipMalloc incol_d (size:%d) => %s\n", num_edges,
hipGetErrorString(err));
return -1;
}
// Create buffers for pagerank
err = hipMalloc(&pagerank_d, num_nodes * sizeof(float));
if (err != hipSuccess) {
fprintf(stderr, "ERROR: hipMalloc pagerank_d (size:%d) => %s\n", num_nodes,
hipGetErrorString(err));
return -1;
}
double timer1 = gettime();
// Copy the data to the device-side buffers
err = hipMemcpy(row_d, csr->row_array, num_nodes * sizeof(int),
hipMemcpyHostToDevice);
if (err != hipSuccess) {
fprintf(stderr, "ERROR:#endif hipMemcpy row_d (size:%d) => %s\n",
num_nodes, hipGetErrorString(err));
return -1;
}
err = hipMemcpy(col_d, csr->col_array, num_edges * sizeof(int),
hipMemcpyHostToDevice);
if (err != hipSuccess) {
fprintf(stderr, "ERROR: hipMemcpy col_d (size:%d) => %s\n", num_nodes,
hipGetErrorString(err));
return -1;
}
err = hipMemcpy(inrow_d, csr->inrow_array, num_nodes * sizeof(int),
hipMemcpyHostToDevice);
if (err != hipSuccess) {
fprintf(stderr, "ERROR:#endif hipMemcpy inrow_d (size:%d) => %s\n",
num_nodes, hipGetErrorString(err));
return -1;
}
err = hipMemcpy(incol_d, csr->incol_array, num_edges * sizeof(int),
hipMemcpyHostToDevice);
if (err != hipSuccess) {
fprintf(stderr, "ERROR: hipMemcpy incol_d (size:%d) => %s\n", num_nodes,
hipGetErrorString(err));
return -1;
}
// Set up work dimensions
int block_size = 256;
int num_blocks = (num_nodes + block_size - 1) / block_size;
dim3 threads(block_size, 1, 1);
dim3 grid(num_blocks, 1, 1);
hipDeviceSetLimit(hipLimitMallocHeapSize, 4ULL * 1024 * 1024 * 1024);
double timer3 = gettime();
VirtVertex<float, float> **vertex;
GraphChiContext *context;
vertex = (VirtVertex<float, float> **)my_obj_alloc.calloc<ChiVertex<float, float> *>(
num_nodes);
context = (GraphChiContext *)my_obj_alloc.calloc<GraphChiContext>(1);
// err = hipMalloc(&vertex, num_nodes * sizeof(ChiVertex<float, float>*));
// if (err != hipSuccess) {
// fprintf(stderr, "ERROR: hipMalloc vertex (size:%d) => %s\n",
// num_edges, hipGetErrorString(err));
// return -1;
// }
// err = hipMalloc(&context, sizeof(GraphChiContext));
// if (err != hipSuccess) {
// fprintf(stderr, "ERROR: hipMalloc context (size:%d) => %s\n",
// num_edges, hipGetErrorString(err));
// return -1;
// }
printf("Start initCtx\n");
initContext(context, num_nodes, num_edges);
// hipDeviceSynchronize();
// err = hipGetLastError();
// if (err != hipSuccess) {
// fprintf(stderr, "ERROR: initCtx failed (%s)\n",
// hipGetErrorString(err));
// return -1;
// }
printf("Start initObj\n");
part0_initObject(vertex, context, row_d, col_d, inrow_d, incol_d,
&my_obj_alloc);
hipLaunchKernelGGL(( part_kern0_initObject), dim3(grid), dim3(threads), 0, 0, vertex, context, row_d, col_d,
inrow_d, incol_d);
hipDeviceSynchronize();
part1_initObject(vertex, context, row_d, col_d, inrow_d, incol_d,
&my_obj_alloc);
hipLaunchKernelGGL(( part_kern1_initObject), dim3(grid), dim3(threads), 0, 0, vertex, context, row_d, col_d,
inrow_d, incol_d);
hipDeviceSynchronize();
err = hipGetLastError();
if (err != hipSuccess) {
fprintf(stderr, "ERROR: initObject failed (%s)\n", hipGetErrorString(err));
return -1;
}
printf("Start initOutEdge\n");
hipLaunchKernelGGL(( kern_initOutEdge), dim3(grid), dim3(threads), 0, 0, vertex, context, row_d, col_d);
hipDeviceSynchronize();
err = hipGetLastError();
if (err != hipSuccess) {
fprintf(stderr, "ERROR: initOutEdge failed (%s)\n",
hipGetErrorString(err));
return -1;
}
double timer5 = gettime();
printf("init time = %lf ms\n", (timer5 - timer3) * 1000);
my_obj_alloc.create_tree();
range_tree = my_obj_alloc.get_range_tree();
tree_size_g = my_obj_alloc.get_tree_size();
// Run PageRank for some iter. TO: convergence determination
double timer6 = gettime();
for (int i = 0; i < ITER; i++) {
printf("Start PageRank\n");
//PageRank<<<grid, threads>>>(vertex, context,i);
hipLaunchKernelGGL(( PageRank), dim3(grid), dim3(threads), 0, 0, vertex, context,i);
printf("Finish PageRank\n");
hipDeviceSynchronize();
err = hipGetLastError();
if (err != hipSuccess) {
fprintf(stderr, "ERROR: hipLaunch failed (%s)\n",
hipGetErrorString(err));
return -1;
}
}
hipDeviceSynchronize();
double timer4 = gettime();
printf("kernel time = %lf ms\n", (timer4 - timer6) * 1000);
printf("Start Copyback\n");
hipLaunchKernelGGL(( copyBack), dim3(grid), dim3(threads), 0, 0, vertex, context, pagerank_d);
printf("End Copyback\n");
hipDeviceSynchronize();
err = hipGetLastError();
if (err != hipSuccess) {
fprintf(stderr, "ERROR: hipLaunch failed (%s)\n", hipGetErrorString(err));
return -1;
}
// Copy the rank buffer back
err = hipMemcpy(rank_array, pagerank_d, num_nodes * sizeof(float),
hipMemcpyDeviceToHost);
if (err != hipSuccess) {
fprintf(stderr, "ERROR: hipMemcpy() failed (%s)\n",
hipGetErrorString(err));
return -1;
}
double timer2 = gettime();
// Report timing characteristics
printf("kernel time = %lf ms\n", (timer4 - timer3) * 1000);
printf("kernel + memcpy time = %lf ms\n", (timer2 - timer1) * 1000);
#if 1
// Print rank array
print_vectorf(rank_array, num_nodes);
#endif
// Free the host-side arrays
free(rank_array);
csr->freeArrays();
free(csr);
// Free the device buffers
hipFree(row_d);
hipFree(col_d);
hipFree(inrow_d);
hipFree(incol_d);
hipFree(pagerank_d);
return 0;
}
void print_vectorf(float *vector, int num) {
FILE *fp = fopen("result_PRV.out", "w");
if (!fp) {
printf("ERROR: unable to open result.txt\n");
}
for (int i = 0; i < num; i++) {
fprintf(fp, "%f\n", vector[i]);
}
fclose(fp);
}
|
224b82806420eb6fbf90c94251012928e3d3aaeb.cu
|
/************************************************************************************\
* *
* Copyright � 2014 Advanced Micro Devices, Inc. *
* Copyright (c) 2015 Mark D. Hill and David A. Wood *
* All rights reserved. *
* *
* Redistribution and use in source and binary forms, with or without *
* modification, are permitted provided that the following are met: *
* *
* You must reproduce the above copyright notice. *
* *
* Neither the name of the copyright holder nor the names of its contributors *
* may be used to endorse or promote products derived from this software *
* without specific, prior, written permission from at least the copyright
holder. *
* *
* You must include the following terms in your license and/or other materials *
* provided with the software. *
* *
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" *
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE *
* IMPLIED WARRANTIES OF MERCHANTABILITY, NON-INFRINGEMENT, AND FITNESS FOR A *
* PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER *
* OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, *
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT *
* OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS *
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN *
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING *
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY *
* OF SUCH DAMAGE. *
* *
* Without limiting the foregoing, the software may implement third party *
* technologies for which you must obtain licenses from parties other than AMD.
*
* You agree that AMD has not obtained or conveyed to you, and that you shall *
* be responsible for obtaining the rights to use and/or distribute the
applicable *
* underlying intellectual property rights related to the third party
technologies. *
* These third party technologies are not licensed hereunder. *
* *
* If you use the software (in whole or in part), you shall adhere to all *
* applicable U.S., European, and other export laws, including but not limited
to *
* the U.S. Export Administration Regulations ("EAR"�) (15 C.F.R Sections
730-774), *
* and E.U. Council Regulation (EC) No 428/2009 of 5 May 2009. Further,
pursuant *
* to Section 740.6 of the EAR, you hereby certify that, except pursuant to a *
* license granted by the United States Department of Commerce Bureau of
Industry *
* and Security or as otherwise permitted pursuant to a License Exception under
*
* the U.S. Export Administration Regulations ("EAR"), you will not (1) export,
*
* re-export or release to a national of a country in Country Groups D:1, E:1 or
*
* E:2 any restricted technology, software, or source code you receive
hereunder, *
* or (2) export to Country Groups D:1, E:1 or E:2 the direct product of such *
* technology or software, if such foreign produced direct product is subject to
*
* national security controls as identified on the Commerce Control List
(currently *
* found in Supplement 1 to Part 774 of EAR). For the most current Country
Group *
* listings, or for additional information about the EAR or your obligations
under *
* those regulations, please refer to the U.S. Bureau of Industry and Security's
*
* website at http://www.bis.doc.gov/. *
* *
\************************************************************************************/
#include <cuda.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/time.h>
#include <new>
#include "../../mem_alloc/mem_alloc.h"
#include "../graph_parser/parse.h"
#include "parse_oo.h"
#include "../graph_parser/util.h"
#include "kernel.h"
// Iteration count
#define ITER 20
void print_vectorf(float *vector, int num);
int main(int argc, char **argv) {
char *tmpchar;
mem_alloc shared_mem(4ULL * 1024 * 1024 * 1024);
obj_alloc my_obj_alloc(&shared_mem,atoll(argv[3]));
int num_nodes;
int num_edges;
int file_format = 1;
bool directed = 0;
cudaError_t err = cudaSuccess;
if (argc == 4) {
tmpchar = argv[1]; // Graph inputfile
file_format = atoi(argv[2]); // File format
} else {
fprintf(stderr, "You did something wrong!\n");
exit(1);
}
// Allocate the csr structure
csr_array *csr;
// Parse graph files into csr structure
if (file_format == 1) {
// Metis
csr = parseMetis(tmpchar, &num_nodes, &num_edges, directed);
} else if (file_format == 0) {
// Dimacs9
csr = parseCOO(tmpchar, &num_nodes, &num_edges, 1);
} else if (file_format == 2) {
// Matrix market
csr = parseMM(tmpchar, &num_nodes, &num_edges, directed, 0);
} else {
printf("reserve for future");
exit(1);
}
// Allocate rank_array
float *rank_array = (float *)malloc(num_nodes * sizeof(float));
if (!rank_array) {
fprintf(stderr, "rank array not allocated successfully\n");
return -1;
}
int *row_d;
int *col_d;
int *inrow_d;
int *incol_d;
float *pagerank_d;
// Create device-side buffers for the graph
err = cudaMalloc(&row_d, num_nodes * sizeof(int));
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: cudaMalloc row_d (size:%d) => %s\n", num_nodes,
cudaGetErrorString(err));
return -1;
}
err = cudaMalloc(&col_d, num_edges * sizeof(int));
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: cudaMalloc col_d (size:%d) => %s\n", num_edges,
cudaGetErrorString(err));
return -1;
}
err = cudaMalloc(&inrow_d, num_nodes * sizeof(int));
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: cudaMalloc inrow_d (size:%d) => %s\n", num_nodes,
cudaGetErrorString(err));
return -1;
}
err = cudaMalloc(&incol_d, num_edges * sizeof(int));
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: cudaMalloc incol_d (size:%d) => %s\n", num_edges,
cudaGetErrorString(err));
return -1;
}
// Create buffers for pagerank
err = cudaMalloc(&pagerank_d, num_nodes * sizeof(float));
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: cudaMalloc pagerank_d (size:%d) => %s\n", num_nodes,
cudaGetErrorString(err));
return -1;
}
double timer1 = gettime();
// Copy the data to the device-side buffers
err = cudaMemcpy(row_d, csr->row_array, num_nodes * sizeof(int),
cudaMemcpyHostToDevice);
if (err != cudaSuccess) {
fprintf(stderr, "ERROR:#endif cudaMemcpy row_d (size:%d) => %s\n",
num_nodes, cudaGetErrorString(err));
return -1;
}
err = cudaMemcpy(col_d, csr->col_array, num_edges * sizeof(int),
cudaMemcpyHostToDevice);
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: cudaMemcpy col_d (size:%d) => %s\n", num_nodes,
cudaGetErrorString(err));
return -1;
}
err = cudaMemcpy(inrow_d, csr->inrow_array, num_nodes * sizeof(int),
cudaMemcpyHostToDevice);
if (err != cudaSuccess) {
fprintf(stderr, "ERROR:#endif cudaMemcpy inrow_d (size:%d) => %s\n",
num_nodes, cudaGetErrorString(err));
return -1;
}
err = cudaMemcpy(incol_d, csr->incol_array, num_edges * sizeof(int),
cudaMemcpyHostToDevice);
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: cudaMemcpy incol_d (size:%d) => %s\n", num_nodes,
cudaGetErrorString(err));
return -1;
}
// Set up work dimensions
int block_size = 256;
int num_blocks = (num_nodes + block_size - 1) / block_size;
dim3 threads(block_size, 1, 1);
dim3 grid(num_blocks, 1, 1);
cudaDeviceSetLimit(cudaLimitMallocHeapSize, 4ULL * 1024 * 1024 * 1024);
double timer3 = gettime();
VirtVertex<float, float> **vertex;
GraphChiContext *context;
vertex = (VirtVertex<float, float> **)my_obj_alloc.calloc<ChiVertex<float, float> *>(
num_nodes);
context = (GraphChiContext *)my_obj_alloc.calloc<GraphChiContext>(1);
// err = cudaMalloc(&vertex, num_nodes * sizeof(ChiVertex<float, float>*));
// if (err != cudaSuccess) {
// fprintf(stderr, "ERROR: cudaMalloc vertex (size:%d) => %s\n",
// num_edges, cudaGetErrorString(err));
// return -1;
// }
// err = cudaMalloc(&context, sizeof(GraphChiContext));
// if (err != cudaSuccess) {
// fprintf(stderr, "ERROR: cudaMalloc context (size:%d) => %s\n",
// num_edges, cudaGetErrorString(err));
// return -1;
// }
printf("Start initCtx\n");
initContext(context, num_nodes, num_edges);
// cudaDeviceSynchronize();
// err = cudaGetLastError();
// if (err != cudaSuccess) {
// fprintf(stderr, "ERROR: initCtx failed (%s)\n",
// cudaGetErrorString(err));
// return -1;
// }
printf("Start initObj\n");
part0_initObject(vertex, context, row_d, col_d, inrow_d, incol_d,
&my_obj_alloc);
part_kern0_initObject<<<grid, threads>>>(vertex, context, row_d, col_d,
inrow_d, incol_d);
cudaDeviceSynchronize();
part1_initObject(vertex, context, row_d, col_d, inrow_d, incol_d,
&my_obj_alloc);
part_kern1_initObject<<<grid, threads>>>(vertex, context, row_d, col_d,
inrow_d, incol_d);
cudaDeviceSynchronize();
err = cudaGetLastError();
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: initObject failed (%s)\n", cudaGetErrorString(err));
return -1;
}
printf("Start initOutEdge\n");
kern_initOutEdge<<<grid, threads>>>(vertex, context, row_d, col_d);
cudaDeviceSynchronize();
err = cudaGetLastError();
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: initOutEdge failed (%s)\n",
cudaGetErrorString(err));
return -1;
}
double timer5 = gettime();
printf("init time = %lf ms\n", (timer5 - timer3) * 1000);
my_obj_alloc.create_tree();
range_tree = my_obj_alloc.get_range_tree();
tree_size_g = my_obj_alloc.get_tree_size();
// Run PageRank for some iter. TO: convergence determination
double timer6 = gettime();
for (int i = 0; i < ITER; i++) {
printf("Start PageRank\n");
//PageRank<<<grid, threads>>>(vertex, context,i);
PageRank<<<grid, threads>>>(vertex, context,i);
printf("Finish PageRank\n");
cudaDeviceSynchronize();
err = cudaGetLastError();
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: cudaLaunch failed (%s)\n",
cudaGetErrorString(err));
return -1;
}
}
cudaDeviceSynchronize();
double timer4 = gettime();
printf("kernel time = %lf ms\n", (timer4 - timer6) * 1000);
printf("Start Copyback\n");
copyBack<<<grid, threads>>>(vertex, context, pagerank_d);
printf("End Copyback\n");
cudaDeviceSynchronize();
err = cudaGetLastError();
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: cudaLaunch failed (%s)\n", cudaGetErrorString(err));
return -1;
}
// Copy the rank buffer back
err = cudaMemcpy(rank_array, pagerank_d, num_nodes * sizeof(float),
cudaMemcpyDeviceToHost);
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: cudaMemcpy() failed (%s)\n",
cudaGetErrorString(err));
return -1;
}
double timer2 = gettime();
// Report timing characteristics
printf("kernel time = %lf ms\n", (timer4 - timer3) * 1000);
printf("kernel + memcpy time = %lf ms\n", (timer2 - timer1) * 1000);
#if 1
// Print rank array
print_vectorf(rank_array, num_nodes);
#endif
// Free the host-side arrays
free(rank_array);
csr->freeArrays();
free(csr);
// Free the device buffers
cudaFree(row_d);
cudaFree(col_d);
cudaFree(inrow_d);
cudaFree(incol_d);
cudaFree(pagerank_d);
return 0;
}
void print_vectorf(float *vector, int num) {
FILE *fp = fopen("result_PRV.out", "w");
if (!fp) {
printf("ERROR: unable to open result.txt\n");
}
for (int i = 0; i < num; i++) {
fprintf(fp, "%f\n", vector[i]);
}
fclose(fp);
}
|
57d5db3ea0b025048214e48b8da530f207819898.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date September 2014
@generated from zlacpy_batched.cu normal z -> d, Tue Sep 2 12:38:15 2014
@author Mark Gates
*/
#include "common_magma.h"
#define NB 64
/* =====================================================================
Batches dlacpy of multiple arrays;
y-dimension of grid is different arrays,
x-dimension of grid is blocks for each array.
Matrix is m x n, and is divided into block rows, each NB x n.
Each CUDA block has NB threads to handle one block row.
Each thread copies one row, iterating across all columns.
The bottom block of rows may be partially outside the matrix;
if so, rows outside the matrix (i >= m) are disabled.
*/
__global__ void
dlacpy_batched_kernel(
int m, int n,
const double * const *dAarray, int ldda,
double **dBarray, int lddb )
{
// dA and dB iterate across row i
const double *dA = dAarray[ blockIdx.y ];
double *dB = dBarray[ blockIdx.y ];
int i = blockIdx.x*blockDim.x + threadIdx.x;
if ( i < m ) {
dA += i;
dB += i;
const double *dAend = dA + n*ldda;
while( dA < dAend ) {
*dB = *dA;
dA += ldda;
dB += lddb;
}
}
}
/* ===================================================================== */
/**
Note
--------
- UPLO Parameter is disabled
- Do we want to provide a generic function to the user with all the options?
Purpose
-------
DLACPY copies all or part of a set of two-dimensional matrices dAarray[i]
to another set of matrices dBarray[i], for i = 0, ..., batchCount-1.
Arguments
---------
@param[in]
uplo magma_uplo_t
Specifies the part of each matrix dAarray[i] to be copied to dBarray[i].
- = MagmaUpper: Upper triangular part
- = MagmaLower: Lower triangular part
Otherwise: All of each matrix dAarray[i]
@param[in]
m INTEGER
The number of rows of each matrix dAarray[i]. M >= 0.
@param[in]
n INTEGER
The number of columns of each matrix dAarray[i]. N >= 0.
@param[in]
dAarray array on GPU, dimension(batchCount), of pointers to arrays,
with each array a DOUBLE_PRECISION array, dimension (LDDA,N)
The m by n matrices dAarray[i].
If UPLO = MagmaUpper, only the upper triangle or trapezoid is accessed;
if UPLO = MagmaLower, only the lower triangle or trapezoid is accessed.
@param[in]
ldda INTEGER
The leading dimension of each array dAarray[i]. LDDA >= max(1,M).
@param[out]
dBarray array on GPU, dimension(batchCount), of pointers to arrays,
with each array a DOUBLE_PRECISION array, dimension (LDDB,N)
The m by n matrices dBarray[i].
On exit, matrix dBarray[i] = matrix dAarray[i] in the locations
specified by UPLO.
@param[in]
lddb INTEGER
The leading dimension of each array dBarray[i]. LDDB >= max(1,M).
@param[in]
batchCount INTEGER
The number of matrices to add; length of dAarray and dBarray.
batchCount >= 0.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_daux2
********************************************************************/
extern "C" void
magmablas_dlacpy_batched_q(
magma_uplo_t uplo, magma_int_t m, magma_int_t n,
const double * const *dAarray, magma_int_t ldda,
double **dBarray, magma_int_t lddb,
magma_int_t batchCount,
magma_queue_t queue )
{
magma_int_t info = 0;
if ( m < 0 )
info = -2;
else if ( n < 0 )
info = -3;
else if ( ldda < max(1,m))
info = -5;
else if ( lddb < max(1,m))
info = -7;
else if ( batchCount < 0 )
info = -8;
if ( info != 0 ) {
magma_xerbla( __func__, -(info) );
return;
}
if ( m == 0 || n == 0 || batchCount == 0 )
return;
dim3 threads( NB );
dim3 grid( (m + NB - 1)/NB, batchCount );
if ( uplo == MagmaUpper ) {
fprintf(stderr, "lacpy upper is not implemented\n");
}
else if ( uplo == MagmaLower ) {
fprintf(stderr, "lacpy lower is not implemented\n");
}
else {
hipLaunchKernelGGL(( dlacpy_batched_kernel), dim3(grid), dim3(threads), 0, queue ,
m, n, dAarray, ldda, dBarray, lddb );
}
}
/**
@see magmablas_dlacpy_batched_q
@ingroup magma_daux2
********************************************************************/
extern "C" void
magmablas_dlacpy_batched(
magma_uplo_t uplo, magma_int_t m, magma_int_t n,
const double * const *dAarray, magma_int_t ldda,
double **dBarray, magma_int_t lddb,
magma_int_t batchCount )
{
magmablas_dlacpy_batched_q(
uplo, m, n, dAarray, ldda, dBarray, lddb, batchCount, magma_stream );
}
|
57d5db3ea0b025048214e48b8da530f207819898.cu
|
/*
-- MAGMA (version 1.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date September 2014
@generated from zlacpy_batched.cu normal z -> d, Tue Sep 2 12:38:15 2014
@author Mark Gates
*/
#include "common_magma.h"
#define NB 64
/* =====================================================================
Batches dlacpy of multiple arrays;
y-dimension of grid is different arrays,
x-dimension of grid is blocks for each array.
Matrix is m x n, and is divided into block rows, each NB x n.
Each CUDA block has NB threads to handle one block row.
Each thread copies one row, iterating across all columns.
The bottom block of rows may be partially outside the matrix;
if so, rows outside the matrix (i >= m) are disabled.
*/
__global__ void
dlacpy_batched_kernel(
int m, int n,
const double * const *dAarray, int ldda,
double **dBarray, int lddb )
{
// dA and dB iterate across row i
const double *dA = dAarray[ blockIdx.y ];
double *dB = dBarray[ blockIdx.y ];
int i = blockIdx.x*blockDim.x + threadIdx.x;
if ( i < m ) {
dA += i;
dB += i;
const double *dAend = dA + n*ldda;
while( dA < dAend ) {
*dB = *dA;
dA += ldda;
dB += lddb;
}
}
}
/* ===================================================================== */
/**
Note
--------
- UPLO Parameter is disabled
- Do we want to provide a generic function to the user with all the options?
Purpose
-------
DLACPY copies all or part of a set of two-dimensional matrices dAarray[i]
to another set of matrices dBarray[i], for i = 0, ..., batchCount-1.
Arguments
---------
@param[in]
uplo magma_uplo_t
Specifies the part of each matrix dAarray[i] to be copied to dBarray[i].
- = MagmaUpper: Upper triangular part
- = MagmaLower: Lower triangular part
Otherwise: All of each matrix dAarray[i]
@param[in]
m INTEGER
The number of rows of each matrix dAarray[i]. M >= 0.
@param[in]
n INTEGER
The number of columns of each matrix dAarray[i]. N >= 0.
@param[in]
dAarray array on GPU, dimension(batchCount), of pointers to arrays,
with each array a DOUBLE_PRECISION array, dimension (LDDA,N)
The m by n matrices dAarray[i].
If UPLO = MagmaUpper, only the upper triangle or trapezoid is accessed;
if UPLO = MagmaLower, only the lower triangle or trapezoid is accessed.
@param[in]
ldda INTEGER
The leading dimension of each array dAarray[i]. LDDA >= max(1,M).
@param[out]
dBarray array on GPU, dimension(batchCount), of pointers to arrays,
with each array a DOUBLE_PRECISION array, dimension (LDDB,N)
The m by n matrices dBarray[i].
On exit, matrix dBarray[i] = matrix dAarray[i] in the locations
specified by UPLO.
@param[in]
lddb INTEGER
The leading dimension of each array dBarray[i]. LDDB >= max(1,M).
@param[in]
batchCount INTEGER
The number of matrices to add; length of dAarray and dBarray.
batchCount >= 0.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_daux2
********************************************************************/
extern "C" void
magmablas_dlacpy_batched_q(
magma_uplo_t uplo, magma_int_t m, magma_int_t n,
const double * const *dAarray, magma_int_t ldda,
double **dBarray, magma_int_t lddb,
magma_int_t batchCount,
magma_queue_t queue )
{
magma_int_t info = 0;
if ( m < 0 )
info = -2;
else if ( n < 0 )
info = -3;
else if ( ldda < max(1,m))
info = -5;
else if ( lddb < max(1,m))
info = -7;
else if ( batchCount < 0 )
info = -8;
if ( info != 0 ) {
magma_xerbla( __func__, -(info) );
return;
}
if ( m == 0 || n == 0 || batchCount == 0 )
return;
dim3 threads( NB );
dim3 grid( (m + NB - 1)/NB, batchCount );
if ( uplo == MagmaUpper ) {
fprintf(stderr, "lacpy upper is not implemented\n");
}
else if ( uplo == MagmaLower ) {
fprintf(stderr, "lacpy lower is not implemented\n");
}
else {
dlacpy_batched_kernel<<< grid, threads, 0, queue >>>(
m, n, dAarray, ldda, dBarray, lddb );
}
}
/**
@see magmablas_dlacpy_batched_q
@ingroup magma_daux2
********************************************************************/
extern "C" void
magmablas_dlacpy_batched(
magma_uplo_t uplo, magma_int_t m, magma_int_t n,
const double * const *dAarray, magma_int_t ldda,
double **dBarray, magma_int_t lddb,
magma_int_t batchCount )
{
magmablas_dlacpy_batched_q(
uplo, m, n, dAarray, ldda, dBarray, lddb, batchCount, magma_stream );
}
|
01707344f140ec1ddaf950d9e69a4fb76411e310.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm>
#include <cfloat>
#include <vector>
#include "caffe/layers/pooling_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
__global__ void MaxPoolForward(const int nthreads,
const Dtype* const bottom_data, const int num, const int channels,
const int height, const int width, const int pooled_height,
const int pooled_width, const int kernel_h, const int kernel_w,
const int stride_h, const int stride_w, const int pad_h, const int pad_w,
Dtype* const top_data, int* mask, Dtype* top_mask) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int pw = index % pooled_width;
const int ph = (index / pooled_width) % pooled_height;
const int c = (index / pooled_width / pooled_height) % channels;
const int n = index / pooled_width / pooled_height / channels;
int hstart = ph * stride_h - pad_h;
int wstart = pw * stride_w - pad_w;
const int hend = min(hstart + kernel_h, height);
const int wend = min(wstart + kernel_w, width);
hstart = max(hstart, 0);
wstart = max(wstart, 0);
Dtype maxval = -FLT_MAX;
int maxidx = -1;
const Dtype* const bottom_slice =
bottom_data + (n * channels + c) * height * width;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
if (bottom_slice[h * width + w] > maxval) {
maxidx = h * width + w;
maxval = bottom_slice[maxidx];
}
}
}
top_data[index] = maxval;
if (mask) {
mask[index] = maxidx;
} else {
top_mask[index] = maxidx;
}
}
}
template <typename Dtype>
__global__ void AvePoolForward(const int nthreads,
const Dtype* const bottom_data, const int num, const int channels,
const int height, const int width, const int pooled_height,
const int pooled_width, const int kernel_h, const int kernel_w,
const int stride_h, const int stride_w, const int pad_h, const int pad_w,
Dtype* const top_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int pw = index % pooled_width;
const int ph = (index / pooled_width) % pooled_height;
const int c = (index / pooled_width / pooled_height) % channels;
const int n = index / pooled_width / pooled_height / channels;
int hstart = ph * stride_h - pad_h;
int wstart = pw * stride_w - pad_w;
int hend = min(hstart + kernel_h, height + pad_h);
int wend = min(wstart + kernel_w, width + pad_w);
const int pool_size = (hend - hstart) * (wend - wstart);
hstart = max(hstart, 0);
wstart = max(wstart, 0);
hend = min(hend, height);
wend = min(wend, width);
Dtype aveval = 0;
const Dtype* const bottom_slice =
bottom_data + (n * channels + c) * height * width;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
aveval += bottom_slice[h * width + w];
}
}
top_data[index] = aveval / pool_size;
}
}
template <typename Dtype>
__global__ void StoPoolForwardTrain(const int nthreads,
const Dtype* const bottom_data,
const int num, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const int kernel_h, const int kernel_w, const int stride_h,
const int stride_w, Dtype* const rand_idx, Dtype* const top_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int pw = index % pooled_width;
const int ph = (index / pooled_width) % pooled_height;
const int c = (index / pooled_width / pooled_height) % channels;
const int n = index / pooled_width / pooled_height / channels;
const int hstart = ph * stride_h;
const int hend = min(hstart + kernel_h, height);
const int wstart = pw * stride_w;
const int wend = min(wstart + kernel_w, width);
Dtype cumsum = 0.;
const Dtype* const bottom_slice =
bottom_data + (n * channels + c) * height * width;
// First pass: get sum
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
cumsum += bottom_slice[h * width + w];
}
}
const float thres = rand_idx[index] * cumsum;
// Second pass: get value, and set index.
cumsum = 0;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
cumsum += bottom_slice[h * width + w];
if (cumsum >= thres) {
rand_idx[index] = ((n * channels + c) * height + h) * width + w;
top_data[index] = bottom_slice[h * width + w];
return;
}
}
}
}
}
template <typename Dtype>
__global__ void StoPoolForwardTest(const int nthreads,
const Dtype* const bottom_data,
const int num, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const int kernel_h, const int kernel_w, const int stride_h,
const int stride_w, Dtype* const top_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int pw = index % pooled_width;
const int ph = (index / pooled_width) % pooled_height;
const int c = (index / pooled_width / pooled_height) % channels;
const int n = index / pooled_width / pooled_height / channels;
const int hstart = ph * stride_h;
const int hend = min(hstart + kernel_h, height);
const int wstart = pw * stride_w;
const int wend = min(wstart + kernel_w, width);
// We set cumsum to be 0 to avoid divide-by-zero problems
Dtype cumsum = 0.;
Dtype cumvalues = 0.;
const Dtype* const bottom_slice =
bottom_data + (n * channels + c) * height * width;
// First pass: get sum
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
cumsum += bottom_slice[h * width + w];
cumvalues += bottom_slice[h * width + w] * bottom_slice[h * width + w];
}
}
top_data[index] = (cumsum > 0.) ? cumvalues / cumsum : 0.;
}
}
template <typename Dtype>
void PoolingLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
int count = top[0]->count();
// We'll output the mask to top[1] if it's of size >1.
const bool use_top_mask = top.size() > 1;
int* mask = NULL;
Dtype* top_mask = NULL;
const int height_ = input_shape_[2];
const int width_ = input_shape_[3];
const int pooled_height_ = pooled_shape_[2];
const int pooled_width_ = pooled_shape_[3];
const int kernel_h_ = kernel_shape_[0];
const int kernel_w_ = kernel_shape_[1];
const int pad_h_ = pad_[0];
const int pad_w_ = pad_[1];
const int stride_h_ = stride_[0];
const int stride_w_ = stride_[1];
switch (this->layer_param_.pooling_param().pool()) {
case PoolingParameter_PoolMethod_MAX:
if (use_top_mask) {
top_mask = top[1]->mutable_gpu_data();
} else {
mask = max_idx_.mutable_gpu_data();
}
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( MaxPoolForward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, bottom_data, bottom[0]->num(), channels_,
height_, width_, pooled_height_, pooled_width_, kernel_h_,
kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, top_data,
mask, top_mask);
break;
case PoolingParameter_PoolMethod_AVE:
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( AvePoolForward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, bottom_data, bottom[0]->num(), channels_,
height_, width_, pooled_height_, pooled_width_, kernel_h_,
kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, top_data);
break;
case PoolingParameter_PoolMethod_STOCHASTIC:
if (this->phase_ == TRAIN) {
// We need to create the random index as well.
caffe_gpu_rng_uniform(count, Dtype(0), Dtype(1),
rand_idx_.mutable_gpu_data());
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( StoPoolForwardTrain<Dtype>), dim3(CAFFE_GET_BLOCKS(count)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, bottom_data, bottom[0]->num(), channels_,
height_, width_, pooled_height_, pooled_width_, kernel_h_,
kernel_w_, stride_h_, stride_w_,
rand_idx_.mutable_gpu_data(), top_data);
} else {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( StoPoolForwardTest<Dtype>), dim3(CAFFE_GET_BLOCKS(count)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, bottom_data, bottom[0]->num(), channels_,
height_, width_, pooled_height_, pooled_width_, kernel_h_,
kernel_w_, stride_h_, stride_w_, top_data);
}
break;
default:
LOG(FATAL) << "Unknown pooling method.";
}
CUDA_POST_KERNEL_CHECK;
}
template <typename Dtype>
__global__ void MaxPoolBackward(const int nthreads, const Dtype* const top_diff,
const int* const mask, const Dtype* const top_mask, const int num,
const int channels, const int height, const int width,
const int pooled_height, const int pooled_width, const int kernel_h,
const int kernel_w, const int stride_h, const int stride_w, const int pad_h,
const int pad_w, Dtype* const bottom_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
// find out the local index
// find out the local offset
const int w = index % width;
const int h = (index / width) % height;
const int c = (index / width / height) % channels;
const int n = index / width / height / channels;
const int phstart =
(h + pad_h < kernel_h) ? 0 : (h + pad_h - kernel_h) / stride_h + 1;
const int phend = min((h + pad_h) / stride_h + 1, pooled_height);
const int pwstart =
(w + pad_w < kernel_w) ? 0 : (w + pad_w - kernel_w) / stride_w + 1;
const int pwend = min((w + pad_w) / stride_w + 1, pooled_width);
Dtype gradient = 0;
const int offset = (n * channels + c) * pooled_height * pooled_width;
const Dtype* const top_diff_slice = top_diff + offset;
if (mask) {
const int* const mask_slice = mask + offset;
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
if (mask_slice[ph * pooled_width + pw] == h * width + w) {
gradient += top_diff_slice[ph * pooled_width + pw];
}
}
}
} else {
const Dtype* const top_mask_slice = top_mask + offset;
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
if (top_mask_slice[ph * pooled_width + pw] == h * width + w) {
gradient += top_diff_slice[ph * pooled_width + pw];
}
}
}
}
bottom_diff[index] = gradient;
}
}
template <typename Dtype>
__global__ void AvePoolBackward(const int nthreads, const Dtype* const top_diff,
const int num, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const int kernel_h, const int kernel_w, const int stride_h,
const int stride_w, const int pad_h, const int pad_w,
Dtype* const bottom_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
// find out the local index
// find out the local offset
const int w = index % width + pad_w;
const int h = (index / width) % height + pad_h;
const int c = (index / width / height) % channels;
const int n = index / width / height / channels;
const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1;
const int phend = min(h / stride_h + 1, pooled_height);
const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1;
const int pwend = min(w / stride_w + 1, pooled_width);
Dtype gradient = 0;
const Dtype* const top_diff_slice =
top_diff + (n * channels + c) * pooled_height * pooled_width;
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
// figure out the pooling size
int hstart = ph * stride_h - pad_h;
int wstart = pw * stride_w - pad_w;
int hend = min(hstart + kernel_h, height + pad_h);
int wend = min(wstart + kernel_w, width + pad_w);
int pool_size = (hend - hstart) * (wend - wstart);
gradient += top_diff_slice[ph * pooled_width + pw] / pool_size;
}
}
bottom_diff[index] = gradient;
}
}
template <typename Dtype>
__global__ void StoPoolBackward(const int nthreads,
const Dtype* const rand_idx, const Dtype* const top_diff,
const int num, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const int kernel_h, const int kernel_w, const int stride_h,
const int stride_w, Dtype* const bottom_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
// find out the local index
// find out the local offset
const int w = index % width;
const int h = (index / width) % height;
const int c = (index / width / height) % channels;
const int n = index / width / height / channels;
const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1;
const int phend = min(h / stride_h + 1, pooled_height);
const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1;
const int pwend = min(w / stride_w + 1, pooled_width);
Dtype gradient = 0;
const Dtype* const rand_idx_slice =
rand_idx + (n * channels + c) * pooled_height * pooled_width;
const Dtype* const top_diff_slice =
top_diff + (n * channels + c) * pooled_height * pooled_width;
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
gradient += top_diff_slice[ph * pooled_width + pw] *
(index == static_cast<int>(rand_idx_slice[ph * pooled_width + pw]));
}
}
bottom_diff[index] = gradient;
}
}
template <typename Dtype>
void PoolingLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (!propagate_down[0]) {
return;
}
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const int count = bottom[0]->count();
caffe_gpu_set(count, Dtype(0.), bottom_diff);
// We'll output the mask to top[1] if it's of size >1.
const bool use_top_mask = top.size() > 1;
const int* mask = NULL;
const Dtype* top_mask = NULL;
const int height_ = input_shape_[2];
const int width_ = input_shape_[3];
const int pooled_height_ = pooled_shape_[2];
const int pooled_width_ = pooled_shape_[3];
const int kernel_h_ = kernel_shape_[0];
const int kernel_w_ = kernel_shape_[1];
const int pad_h_ = pad_[0];
const int pad_w_ = pad_[1];
const int stride_h_ = stride_[0];
const int stride_w_ = stride_[1];
switch (this->layer_param_.pooling_param().pool()) {
case PoolingParameter_PoolMethod_MAX:
if (use_top_mask) {
top_mask = top[1]->gpu_data();
} else {
mask = max_idx_.gpu_data();
}
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( MaxPoolBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, top_diff, mask, top_mask, top[0]->num(), channels_,
height_, width_, pooled_height_, pooled_width_,
kernel_h_, kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_,
bottom_diff);
break;
case PoolingParameter_PoolMethod_AVE:
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( AvePoolBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, top_diff, top[0]->num(), channels_,
height_, width_, pooled_height_, pooled_width_, kernel_h_,
kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, bottom_diff);
break;
case PoolingParameter_PoolMethod_STOCHASTIC:
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( StoPoolBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, rand_idx_.gpu_data(), top_diff,
top[0]->num(), channels_, height_, width_, pooled_height_,
pooled_width_, kernel_h_, kernel_w_, stride_h_, stride_w_,
bottom_diff);
break;
default:
LOG(FATAL) << "Unknown pooling method.";
}
CUDA_POST_KERNEL_CHECK;
}
INSTANTIATE_LAYER_GPU_FUNCS(PoolingLayer);
} // namespace caffe
|
01707344f140ec1ddaf950d9e69a4fb76411e310.cu
|
#include <algorithm>
#include <cfloat>
#include <vector>
#include "caffe/layers/pooling_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
__global__ void MaxPoolForward(const int nthreads,
const Dtype* const bottom_data, const int num, const int channels,
const int height, const int width, const int pooled_height,
const int pooled_width, const int kernel_h, const int kernel_w,
const int stride_h, const int stride_w, const int pad_h, const int pad_w,
Dtype* const top_data, int* mask, Dtype* top_mask) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int pw = index % pooled_width;
const int ph = (index / pooled_width) % pooled_height;
const int c = (index / pooled_width / pooled_height) % channels;
const int n = index / pooled_width / pooled_height / channels;
int hstart = ph * stride_h - pad_h;
int wstart = pw * stride_w - pad_w;
const int hend = min(hstart + kernel_h, height);
const int wend = min(wstart + kernel_w, width);
hstart = max(hstart, 0);
wstart = max(wstart, 0);
Dtype maxval = -FLT_MAX;
int maxidx = -1;
const Dtype* const bottom_slice =
bottom_data + (n * channels + c) * height * width;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
if (bottom_slice[h * width + w] > maxval) {
maxidx = h * width + w;
maxval = bottom_slice[maxidx];
}
}
}
top_data[index] = maxval;
if (mask) {
mask[index] = maxidx;
} else {
top_mask[index] = maxidx;
}
}
}
template <typename Dtype>
__global__ void AvePoolForward(const int nthreads,
const Dtype* const bottom_data, const int num, const int channels,
const int height, const int width, const int pooled_height,
const int pooled_width, const int kernel_h, const int kernel_w,
const int stride_h, const int stride_w, const int pad_h, const int pad_w,
Dtype* const top_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int pw = index % pooled_width;
const int ph = (index / pooled_width) % pooled_height;
const int c = (index / pooled_width / pooled_height) % channels;
const int n = index / pooled_width / pooled_height / channels;
int hstart = ph * stride_h - pad_h;
int wstart = pw * stride_w - pad_w;
int hend = min(hstart + kernel_h, height + pad_h);
int wend = min(wstart + kernel_w, width + pad_w);
const int pool_size = (hend - hstart) * (wend - wstart);
hstart = max(hstart, 0);
wstart = max(wstart, 0);
hend = min(hend, height);
wend = min(wend, width);
Dtype aveval = 0;
const Dtype* const bottom_slice =
bottom_data + (n * channels + c) * height * width;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
aveval += bottom_slice[h * width + w];
}
}
top_data[index] = aveval / pool_size;
}
}
template <typename Dtype>
__global__ void StoPoolForwardTrain(const int nthreads,
const Dtype* const bottom_data,
const int num, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const int kernel_h, const int kernel_w, const int stride_h,
const int stride_w, Dtype* const rand_idx, Dtype* const top_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int pw = index % pooled_width;
const int ph = (index / pooled_width) % pooled_height;
const int c = (index / pooled_width / pooled_height) % channels;
const int n = index / pooled_width / pooled_height / channels;
const int hstart = ph * stride_h;
const int hend = min(hstart + kernel_h, height);
const int wstart = pw * stride_w;
const int wend = min(wstart + kernel_w, width);
Dtype cumsum = 0.;
const Dtype* const bottom_slice =
bottom_data + (n * channels + c) * height * width;
// First pass: get sum
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
cumsum += bottom_slice[h * width + w];
}
}
const float thres = rand_idx[index] * cumsum;
// Second pass: get value, and set index.
cumsum = 0;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
cumsum += bottom_slice[h * width + w];
if (cumsum >= thres) {
rand_idx[index] = ((n * channels + c) * height + h) * width + w;
top_data[index] = bottom_slice[h * width + w];
return;
}
}
}
}
}
template <typename Dtype>
__global__ void StoPoolForwardTest(const int nthreads,
const Dtype* const bottom_data,
const int num, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const int kernel_h, const int kernel_w, const int stride_h,
const int stride_w, Dtype* const top_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int pw = index % pooled_width;
const int ph = (index / pooled_width) % pooled_height;
const int c = (index / pooled_width / pooled_height) % channels;
const int n = index / pooled_width / pooled_height / channels;
const int hstart = ph * stride_h;
const int hend = min(hstart + kernel_h, height);
const int wstart = pw * stride_w;
const int wend = min(wstart + kernel_w, width);
// We set cumsum to be 0 to avoid divide-by-zero problems
Dtype cumsum = 0.;
Dtype cumvalues = 0.;
const Dtype* const bottom_slice =
bottom_data + (n * channels + c) * height * width;
// First pass: get sum
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
cumsum += bottom_slice[h * width + w];
cumvalues += bottom_slice[h * width + w] * bottom_slice[h * width + w];
}
}
top_data[index] = (cumsum > 0.) ? cumvalues / cumsum : 0.;
}
}
template <typename Dtype>
void PoolingLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
int count = top[0]->count();
// We'll output the mask to top[1] if it's of size >1.
const bool use_top_mask = top.size() > 1;
int* mask = NULL;
Dtype* top_mask = NULL;
const int height_ = input_shape_[2];
const int width_ = input_shape_[3];
const int pooled_height_ = pooled_shape_[2];
const int pooled_width_ = pooled_shape_[3];
const int kernel_h_ = kernel_shape_[0];
const int kernel_w_ = kernel_shape_[1];
const int pad_h_ = pad_[0];
const int pad_w_ = pad_[1];
const int stride_h_ = stride_[0];
const int stride_w_ = stride_[1];
switch (this->layer_param_.pooling_param().pool()) {
case PoolingParameter_PoolMethod_MAX:
if (use_top_mask) {
top_mask = top[1]->mutable_gpu_data();
} else {
mask = max_idx_.mutable_gpu_data();
}
// NOLINT_NEXT_LINE(whitespace/operators)
MaxPoolForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, bottom_data, bottom[0]->num(), channels_,
height_, width_, pooled_height_, pooled_width_, kernel_h_,
kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, top_data,
mask, top_mask);
break;
case PoolingParameter_PoolMethod_AVE:
// NOLINT_NEXT_LINE(whitespace/operators)
AvePoolForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, bottom_data, bottom[0]->num(), channels_,
height_, width_, pooled_height_, pooled_width_, kernel_h_,
kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, top_data);
break;
case PoolingParameter_PoolMethod_STOCHASTIC:
if (this->phase_ == TRAIN) {
// We need to create the random index as well.
caffe_gpu_rng_uniform(count, Dtype(0), Dtype(1),
rand_idx_.mutable_gpu_data());
// NOLINT_NEXT_LINE(whitespace/operators)
StoPoolForwardTrain<Dtype><<<CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS>>>(
count, bottom_data, bottom[0]->num(), channels_,
height_, width_, pooled_height_, pooled_width_, kernel_h_,
kernel_w_, stride_h_, stride_w_,
rand_idx_.mutable_gpu_data(), top_data);
} else {
// NOLINT_NEXT_LINE(whitespace/operators)
StoPoolForwardTest<Dtype><<<CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS>>>(
count, bottom_data, bottom[0]->num(), channels_,
height_, width_, pooled_height_, pooled_width_, kernel_h_,
kernel_w_, stride_h_, stride_w_, top_data);
}
break;
default:
LOG(FATAL) << "Unknown pooling method.";
}
CUDA_POST_KERNEL_CHECK;
}
template <typename Dtype>
__global__ void MaxPoolBackward(const int nthreads, const Dtype* const top_diff,
const int* const mask, const Dtype* const top_mask, const int num,
const int channels, const int height, const int width,
const int pooled_height, const int pooled_width, const int kernel_h,
const int kernel_w, const int stride_h, const int stride_w, const int pad_h,
const int pad_w, Dtype* const bottom_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
// find out the local index
// find out the local offset
const int w = index % width;
const int h = (index / width) % height;
const int c = (index / width / height) % channels;
const int n = index / width / height / channels;
const int phstart =
(h + pad_h < kernel_h) ? 0 : (h + pad_h - kernel_h) / stride_h + 1;
const int phend = min((h + pad_h) / stride_h + 1, pooled_height);
const int pwstart =
(w + pad_w < kernel_w) ? 0 : (w + pad_w - kernel_w) / stride_w + 1;
const int pwend = min((w + pad_w) / stride_w + 1, pooled_width);
Dtype gradient = 0;
const int offset = (n * channels + c) * pooled_height * pooled_width;
const Dtype* const top_diff_slice = top_diff + offset;
if (mask) {
const int* const mask_slice = mask + offset;
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
if (mask_slice[ph * pooled_width + pw] == h * width + w) {
gradient += top_diff_slice[ph * pooled_width + pw];
}
}
}
} else {
const Dtype* const top_mask_slice = top_mask + offset;
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
if (top_mask_slice[ph * pooled_width + pw] == h * width + w) {
gradient += top_diff_slice[ph * pooled_width + pw];
}
}
}
}
bottom_diff[index] = gradient;
}
}
template <typename Dtype>
__global__ void AvePoolBackward(const int nthreads, const Dtype* const top_diff,
const int num, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const int kernel_h, const int kernel_w, const int stride_h,
const int stride_w, const int pad_h, const int pad_w,
Dtype* const bottom_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
// find out the local index
// find out the local offset
const int w = index % width + pad_w;
const int h = (index / width) % height + pad_h;
const int c = (index / width / height) % channels;
const int n = index / width / height / channels;
const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1;
const int phend = min(h / stride_h + 1, pooled_height);
const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1;
const int pwend = min(w / stride_w + 1, pooled_width);
Dtype gradient = 0;
const Dtype* const top_diff_slice =
top_diff + (n * channels + c) * pooled_height * pooled_width;
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
// figure out the pooling size
int hstart = ph * stride_h - pad_h;
int wstart = pw * stride_w - pad_w;
int hend = min(hstart + kernel_h, height + pad_h);
int wend = min(wstart + kernel_w, width + pad_w);
int pool_size = (hend - hstart) * (wend - wstart);
gradient += top_diff_slice[ph * pooled_width + pw] / pool_size;
}
}
bottom_diff[index] = gradient;
}
}
template <typename Dtype>
__global__ void StoPoolBackward(const int nthreads,
const Dtype* const rand_idx, const Dtype* const top_diff,
const int num, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const int kernel_h, const int kernel_w, const int stride_h,
const int stride_w, Dtype* const bottom_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
// find out the local index
// find out the local offset
const int w = index % width;
const int h = (index / width) % height;
const int c = (index / width / height) % channels;
const int n = index / width / height / channels;
const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1;
const int phend = min(h / stride_h + 1, pooled_height);
const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1;
const int pwend = min(w / stride_w + 1, pooled_width);
Dtype gradient = 0;
const Dtype* const rand_idx_slice =
rand_idx + (n * channels + c) * pooled_height * pooled_width;
const Dtype* const top_diff_slice =
top_diff + (n * channels + c) * pooled_height * pooled_width;
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
gradient += top_diff_slice[ph * pooled_width + pw] *
(index == static_cast<int>(rand_idx_slice[ph * pooled_width + pw]));
}
}
bottom_diff[index] = gradient;
}
}
template <typename Dtype>
void PoolingLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (!propagate_down[0]) {
return;
}
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const int count = bottom[0]->count();
caffe_gpu_set(count, Dtype(0.), bottom_diff);
// We'll output the mask to top[1] if it's of size >1.
const bool use_top_mask = top.size() > 1;
const int* mask = NULL;
const Dtype* top_mask = NULL;
const int height_ = input_shape_[2];
const int width_ = input_shape_[3];
const int pooled_height_ = pooled_shape_[2];
const int pooled_width_ = pooled_shape_[3];
const int kernel_h_ = kernel_shape_[0];
const int kernel_w_ = kernel_shape_[1];
const int pad_h_ = pad_[0];
const int pad_w_ = pad_[1];
const int stride_h_ = stride_[0];
const int stride_w_ = stride_[1];
switch (this->layer_param_.pooling_param().pool()) {
case PoolingParameter_PoolMethod_MAX:
if (use_top_mask) {
top_mask = top[1]->gpu_data();
} else {
mask = max_idx_.gpu_data();
}
// NOLINT_NEXT_LINE(whitespace/operators)
MaxPoolBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, top_diff, mask, top_mask, top[0]->num(), channels_,
height_, width_, pooled_height_, pooled_width_,
kernel_h_, kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_,
bottom_diff);
break;
case PoolingParameter_PoolMethod_AVE:
// NOLINT_NEXT_LINE(whitespace/operators)
AvePoolBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, top_diff, top[0]->num(), channels_,
height_, width_, pooled_height_, pooled_width_, kernel_h_,
kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, bottom_diff);
break;
case PoolingParameter_PoolMethod_STOCHASTIC:
// NOLINT_NEXT_LINE(whitespace/operators)
StoPoolBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, rand_idx_.gpu_data(), top_diff,
top[0]->num(), channels_, height_, width_, pooled_height_,
pooled_width_, kernel_h_, kernel_w_, stride_h_, stride_w_,
bottom_diff);
break;
default:
LOG(FATAL) << "Unknown pooling method.";
}
CUDA_POST_KERNEL_CHECK;
}
INSTANTIATE_LAYER_GPU_FUNCS(PoolingLayer);
} // namespace caffe
|
cde7f6935fd56a8cae4f2f1537568604dbc99c11.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "StructuredEikonal.h"
StructuredEikonal::StructuredEikonal(bool verbose)
:verbose_(verbose), isCudaMemCreated_(false),
width_(256), height_(256), depth_(256),
itersPerBlock_(10), solverType_(0) {}
StructuredEikonal::~StructuredEikonal() {}
void StructuredEikonal::writeNRRD(std::string filename) {
std::fstream out(filename.c_str(), std::ios::out | std::ios::binary);
out << "NRRD0001\n";
out << "# Complete NRRD file format specification at:\n";
out << "# http://teem.sourceforge.net/nrrd/format.html\n";
out << "type: double\n";
out << "dimension: 3\n";
out << "sizes: " << this->width_ << " " << this->height_ << " " << this->depth_ << "\n";
out << "endian: little\n";
out << "encoding: raw\n\n";
for(size_t k = 0; k < this->depth_; k++) {
for(size_t j = 0; j < this->height_; j++) {
for(size_t i = 0; i < this->width_; i++) {
double d = this->answer_[i][j][k];
out.write(reinterpret_cast<const char*>(&d),sizeof(double));
}
}
}
out.close();
}
void StructuredEikonal::setDims(size_t x, size_t y, size_t z) {
this->width_ = x;
this->height_ = y;
this->depth_ = z;
}
void StructuredEikonal::error(char* msg) {
printf("%s\n",msg);
assert(false);
exit(0);
}
void StructuredEikonal::CheckCUDAMemory() {
size_t freeMem, totalMem;
hipMemGetInfo(&freeMem, &totalMem);
if (this->verbose_) {
std::cout << "Total Memory : " << totalMem / (1024 * 1024) << "MB" << std::endl;
std::cout << "Free Memory : " << freeMem / (1024 * 1024) << "MB" << std::endl;
std::cout << "--" << std::endl;
}
}
void StructuredEikonal::init_cuda_mem() {
assert(this->width_ > 0 && this->height_ > 0 && this->depth_ > 0);
if(this->width_ <= 0 || this->height_ <= 0 || this->depth_ <= 0){
printf("Volume dimension cannot be zero");
exit(1);
}
this->CheckCUDAMemory();
// 1. Create /initialize GPU memory
size_t nx, ny, nz;
nx = this->width_ + (BLOCK_LENGTH-this->width_%BLOCK_LENGTH)%BLOCK_LENGTH;
ny = this->height_ + (BLOCK_LENGTH-this->height_%BLOCK_LENGTH)%BLOCK_LENGTH;
nz = this->depth_ + (BLOCK_LENGTH-this->depth_%BLOCK_LENGTH)%BLOCK_LENGTH;
if (this->verbose_) {
printf("%d %d %d \n",nx,ny,nz);
}
auto volSize = nx*ny*nz;
auto blkSize = BLOCK_LENGTH*BLOCK_LENGTH*BLOCK_LENGTH;
auto nBlkX = nx / BLOCK_LENGTH;
auto nBlkY = ny / BLOCK_LENGTH;
auto nBlkZ = nz / BLOCK_LENGTH;
auto blockNum = nBlkX*nBlkY*nBlkZ;
this->memoryStruct_.xdim = static_cast<int>(nx);
this->memoryStruct_.ydim = static_cast<int>(ny);
this->memoryStruct_.zdim = static_cast<int>(nz);
this->memoryStruct_.volsize = static_cast<uint>(volSize);
this->memoryStruct_.blksize = static_cast<uint>(blkSize);
this->memoryStruct_.blklength = BLOCK_LENGTH;
this->memoryStruct_.blknum = static_cast<uint>(blockNum);
this->memoryStruct_.nIter = static_cast<int>(this->itersPerBlock_); // iter per block
if(this->isCudaMemCreated_) // delete previous memory
{
free((DOUBLE*)this->memoryStruct_.h_sol);
free((uint*)this->memoryStruct_.h_list);
free((bool*)this->memoryStruct_.h_listed);
free((bool*)this->memoryStruct_.h_listVol);
free((int*)this->memoryStruct_.blockOrder);
CUDA_SAFE_CALL( hipFree(this->memoryStruct_.d_spd) );
CUDA_SAFE_CALL( hipFree(this->memoryStruct_.d_sol) );
CUDA_SAFE_CALL( hipFree(this->memoryStruct_.t_sol) ); // temp solution for ping-pong
CUDA_SAFE_CALL( hipFree(this->memoryStruct_.d_con) ); // convergence volume
CUDA_SAFE_CALL( hipFree(this->memoryStruct_.d_list) );
CUDA_SAFE_CALL( hipFree(this->memoryStruct_.d_listVol) );
CUDA_SAFE_CALL( hipFree(this->memoryStruct_.d_mask) );
}
this->isCudaMemCreated_ = true;
this->memoryStruct_.h_sol = (DOUBLE*) malloc(volSize*sizeof(DOUBLE)); // initial solution
this->memoryStruct_.h_list = (uint*) malloc(blockNum*sizeof(uint)); // linear list contains active block indices
this->memoryStruct_.h_listed = (bool*) malloc(blockNum*sizeof(bool)); // whether block is added to the list
this->memoryStruct_.h_listVol = (bool*) malloc(blockNum*sizeof(bool)); // volume list shows active/nonactive of corresponding block
this->memoryStruct_.blockOrder = (int*) malloc(blockNum*sizeof(int));
this->CheckCUDAMemory();
//
// create host/device memory using CUDA mem functions
//
CUDA_SAFE_CALL( hipMalloc((void**)&(this->memoryStruct_.d_spd), volSize*sizeof(double)) );
this->CheckCUDAMemory();
CUDA_SAFE_CALL( hipMalloc((void**)&(this->memoryStruct_.d_sol), volSize*sizeof(DOUBLE)) );
this->CheckCUDAMemory();
CUDA_SAFE_CALL( hipMalloc((void**)&(this->memoryStruct_.t_sol), volSize*sizeof(DOUBLE)) ); // temp solution for ping-pong
this->CheckCUDAMemory();
CUDA_SAFE_CALL( hipMalloc((void**)&(this->memoryStruct_.d_con), volSize*sizeof(bool)) ); // convergence volume
this->CheckCUDAMemory();
CUDA_SAFE_CALL( hipMalloc((void**)&(this->memoryStruct_.d_list), blockNum*sizeof(uint)) );
this->CheckCUDAMemory();
CUDA_SAFE_CALL( hipMalloc((void**)&(this->memoryStruct_.d_listVol), blockNum*sizeof(bool)) );
this->CheckCUDAMemory();
CUDA_SAFE_CALL( hipMalloc((void**)&(this->memoryStruct_.d_mask), volSize*sizeof(bool)) );
this->CheckCUDAMemory();
}
void StructuredEikonal::set_attribute_mask() {
uint volSize = this->memoryStruct_.volsize;
int nx, ny, nz, blklength;
nx = memoryStruct_.xdim;
ny = memoryStruct_.ydim;
nz = memoryStruct_.zdim;
blklength = memoryStruct_.blklength;
// create host memory
double *h_spd = new double[volSize]; // byte speed, host
bool *h_mask = new bool[volSize];
// copy input volume to host memory
// make each block to be stored contiguously in 1D memory space
uint idx = 0;
for(int zStr = 0; zStr < nz; zStr += blklength) {
for(int yStr = 0; yStr < ny; yStr += blklength) {
for(int xStr = 0; xStr < nx; xStr += blklength) {
// for each block
for(int z=zStr; z<zStr+blklength; z++) {
for(int y=yStr; y<yStr+blklength; y++) {
for(int x=xStr; x<xStr+blklength; x++) {
h_spd[idx] = this->speeds_[x][y][z];
h_mask[idx] = true;
idx++;
}
}
}
}
}
}
// initialize GPU memory with host memory
CUDA_SAFE_CALL( hipMemcpy(memoryStruct_.d_spd, h_spd, volSize*sizeof(double), hipMemcpyHostToDevice) );
CUDA_SAFE_CALL( hipMemcpy(memoryStruct_.d_mask, h_mask, volSize*sizeof(bool), hipMemcpyHostToDevice) );
delete[] h_spd;
delete[] h_mask;
}
void StructuredEikonal::initialization() {
// get / set CUDA device
int deviceCount;
hipGetDeviceCount(&deviceCount);
int device;
for(device = 0; device < deviceCount; ++device) {
hipDeviceProp_t deviceProp;
hipGetDeviceProperties(&deviceProp, device);
}
this->CheckCUDAMemory();
this->init_cuda_mem();
this->set_attribute_mask();
this->CheckCUDAMemory();
}
void StructuredEikonal::map_generator() {
double pi = 3.141592653589793238462643383;
this->speeds_ = std::vector<std::vector<std::vector<double> > >(
this->width_, std::vector<std::vector<double> >(
this->height_, std::vector<double>(this->depth_,1.)));
switch(this->solverType_){
case 0 :
//Constant Speed Map
break;
case 1 :
//Sinusoid Speed Map
for (int k = 0 ; k < this->depth_ ; ++k) {
for (int j = 0 ; j < this->height_; ++j) {
for ( int i = 0 ; i < this->width_ ; ++i) {
this->speeds_[i][j][k] =
(6 + 5*(sin((i*pi)/this->width_ *2))*
sin((j*pi)/this->height_*2)*
sin((k*pi)/this->depth_*2));
}
}
}
break;
}
}
void StructuredEikonal::setSeeds(std::vector<std::array<size_t, 3> > seeds) {
this->seeds_ = seeds;
}
void StructuredEikonal::useSeeds() {
if (this->verbose_) {
std::cout << "Loading seed volume..." << std::endl;
}
uint volSize, blockNum;
int nx, ny, nz, blklength;
nx = this->memoryStruct_.xdim;
ny = this->memoryStruct_.ydim;
nz = this->memoryStruct_.zdim;
volSize = this->memoryStruct_.volsize;
blklength = this->memoryStruct_.blklength;
blockNum = this->memoryStruct_.blknum;
// copy input volume to host memory
// make each block to be stored contiguously in 1D memory space
uint idx = 0;
uint blk_idx = 0;
uint list_idx = 0;
uint nActiveBlock = 0;
for(int zStr = 0; zStr < nz; zStr += blklength) {
for(int yStr = 0; yStr < ny; yStr += blklength) {
for(int xStr = 0; xStr < nx; xStr += blklength) {
// for each block
bool isSeedBlock = false;
for(int z=zStr; z<zStr+blklength; z++) {
for(int y=yStr; y<yStr+blklength; y++) {
for(int x=xStr; x<xStr+blklength; x++) {
this->memoryStruct_.h_sol[idx] = INF;
if (this->seeds_.empty()) {
if (x == nx/2 && y == ny/2 && z == nz/2) {
this->memoryStruct_.h_sol[idx] = 0;
isSeedBlock = true;
if (this->verbose_) {
printf("%d is Selected bt source \n",idx);
}
}
} else {
for(size_t i = 0; i < this->seeds_.size(); i++) {
if (this->seeds_[i][0] == x &&
this->seeds_[i][1] == y &&
this->seeds_[i][2] == z) {
this->memoryStruct_.h_sol[idx] = 0;
isSeedBlock = true;
if (this->verbose_) {
printf("%d is Selected bt source \n",idx);
}
}
}
}
idx++;
}
}
}
///////////////////////////////////////////////
if(isSeedBlock) {
if (this->verbose_) {
printf("%d,%d,%d is Seed Block \n",zStr,yStr,xStr);
}
this->memoryStruct_.h_listVol[blk_idx] = true;
this->memoryStruct_.h_listed[blk_idx] = true;
this->memoryStruct_.h_list[list_idx] = blk_idx;
list_idx++;
nActiveBlock++;
} else {
this->memoryStruct_.h_listVol[blk_idx] = false;
this->memoryStruct_.h_listed[blk_idx] = false;
}
blk_idx++;
}
}
}
this->memoryStruct_.nActiveBlock = nActiveBlock;
// initialize GPU memory with host memory
CUDA_SAFE_CALL( hipMemcpy(this->memoryStruct_.d_sol, this->memoryStruct_.h_sol, volSize*sizeof(DOUBLE), hipMemcpyHostToDevice) );
CUDA_SAFE_CALL( hipMemcpy(this->memoryStruct_.t_sol, this->memoryStruct_.h_sol, volSize*sizeof(DOUBLE), hipMemcpyHostToDevice) );
CUDA_SAFE_CALL( hipMemcpy(this->memoryStruct_.d_list, this->memoryStruct_.h_list, nActiveBlock*sizeof(uint), hipMemcpyHostToDevice) );
CUDA_SAFE_CALL( hipMemcpy(this->memoryStruct_.d_listVol, this->memoryStruct_.h_listVol, blockNum*sizeof(bool), hipMemcpyHostToDevice) );
// initialize GPU memory with constant value
CUDA_SAFE_CALL( hipMemset(this->memoryStruct_.d_con, 1, volSize*sizeof(bool)) );
}
void StructuredEikonal::setMapType(size_t t) {
this->solverType_ = t;
}
void StructuredEikonal::solveEikonal() {
if (this->speeds_.empty()) {
this->map_generator();
}
this->isCudaMemCreated_ = false;
this->initialization();
this->useSeeds();
runEikonalSolverSimple(this->memoryStruct_, this->verbose_);
this->get_solution();
}
std::vector< std::vector< std::vector<double> > >
StructuredEikonal::getFinalResult() {
return this->answer_;
}
void StructuredEikonal::get_solution() {
// copy solution from GPU
CUDA_SAFE_CALL( hipMemcpy(this->memoryStruct_.h_sol,
this->memoryStruct_.d_sol, this->memoryStruct_.volsize*sizeof(DOUBLE),
hipMemcpyDeviceToHost) );
//put the data where it belongs in the grand scheme of data!
this->answer_ = std::vector<std::vector<std::vector<double> > >(
this->width_, std::vector<std::vector<double> >(
this->height_, std::vector<double>(this->depth_,0)));
for(size_t blockID = 0; blockID < this->memoryStruct_.blknum; blockID++) {
size_t baseAddr = blockID * this->memoryStruct_.blksize;
size_t xgridlength = this->memoryStruct_.xdim/BLOCK_LENGTH;
size_t ygridlength = this->memoryStruct_.ydim/BLOCK_LENGTH;
// compute block index
size_t bx = blockID%xgridlength;
size_t tmpIdx = (blockID - bx)/xgridlength;
size_t by = tmpIdx%ygridlength;
size_t bz = (tmpIdx-by)/ygridlength;
//translate back to real space
for(int k = 0; k < BLOCK_LENGTH; k++) {
for(int j = 0; j < BLOCK_LENGTH; j++) {
for(int i = 0; i < BLOCK_LENGTH; i++) {
double d = this->memoryStruct_.h_sol[baseAddr +
k * BLOCK_LENGTH * BLOCK_LENGTH +
j * BLOCK_LENGTH + i];
if ((i + bx * BLOCK_LENGTH) < this->width_ &&
(j + by * BLOCK_LENGTH) < this->height_ &&
(k + bz * BLOCK_LENGTH) < this->depth_) {
this->answer_[(i + bx * BLOCK_LENGTH)][(j +
by * BLOCK_LENGTH)][k + bz * BLOCK_LENGTH] = d;
}
}
}
}
}
}
void StructuredEikonal::setItersPerBlock(size_t t) {
this->itersPerBlock_ = t;
}
|
cde7f6935fd56a8cae4f2f1537568604dbc99c11.cu
|
#include "StructuredEikonal.h"
StructuredEikonal::StructuredEikonal(bool verbose)
:verbose_(verbose), isCudaMemCreated_(false),
width_(256), height_(256), depth_(256),
itersPerBlock_(10), solverType_(0) {}
StructuredEikonal::~StructuredEikonal() {}
void StructuredEikonal::writeNRRD(std::string filename) {
std::fstream out(filename.c_str(), std::ios::out | std::ios::binary);
out << "NRRD0001\n";
out << "# Complete NRRD file format specification at:\n";
out << "# http://teem.sourceforge.net/nrrd/format.html\n";
out << "type: double\n";
out << "dimension: 3\n";
out << "sizes: " << this->width_ << " " << this->height_ << " " << this->depth_ << "\n";
out << "endian: little\n";
out << "encoding: raw\n\n";
for(size_t k = 0; k < this->depth_; k++) {
for(size_t j = 0; j < this->height_; j++) {
for(size_t i = 0; i < this->width_; i++) {
double d = this->answer_[i][j][k];
out.write(reinterpret_cast<const char*>(&d),sizeof(double));
}
}
}
out.close();
}
void StructuredEikonal::setDims(size_t x, size_t y, size_t z) {
this->width_ = x;
this->height_ = y;
this->depth_ = z;
}
void StructuredEikonal::error(char* msg) {
printf("%s\n",msg);
assert(false);
exit(0);
}
void StructuredEikonal::CheckCUDAMemory() {
size_t freeMem, totalMem;
cudaMemGetInfo(&freeMem, &totalMem);
if (this->verbose_) {
std::cout << "Total Memory : " << totalMem / (1024 * 1024) << "MB" << std::endl;
std::cout << "Free Memory : " << freeMem / (1024 * 1024) << "MB" << std::endl;
std::cout << "--" << std::endl;
}
}
void StructuredEikonal::init_cuda_mem() {
assert(this->width_ > 0 && this->height_ > 0 && this->depth_ > 0);
if(this->width_ <= 0 || this->height_ <= 0 || this->depth_ <= 0){
printf("Volume dimension cannot be zero");
exit(1);
}
this->CheckCUDAMemory();
// 1. Create /initialize GPU memory
size_t nx, ny, nz;
nx = this->width_ + (BLOCK_LENGTH-this->width_%BLOCK_LENGTH)%BLOCK_LENGTH;
ny = this->height_ + (BLOCK_LENGTH-this->height_%BLOCK_LENGTH)%BLOCK_LENGTH;
nz = this->depth_ + (BLOCK_LENGTH-this->depth_%BLOCK_LENGTH)%BLOCK_LENGTH;
if (this->verbose_) {
printf("%d %d %d \n",nx,ny,nz);
}
auto volSize = nx*ny*nz;
auto blkSize = BLOCK_LENGTH*BLOCK_LENGTH*BLOCK_LENGTH;
auto nBlkX = nx / BLOCK_LENGTH;
auto nBlkY = ny / BLOCK_LENGTH;
auto nBlkZ = nz / BLOCK_LENGTH;
auto blockNum = nBlkX*nBlkY*nBlkZ;
this->memoryStruct_.xdim = static_cast<int>(nx);
this->memoryStruct_.ydim = static_cast<int>(ny);
this->memoryStruct_.zdim = static_cast<int>(nz);
this->memoryStruct_.volsize = static_cast<uint>(volSize);
this->memoryStruct_.blksize = static_cast<uint>(blkSize);
this->memoryStruct_.blklength = BLOCK_LENGTH;
this->memoryStruct_.blknum = static_cast<uint>(blockNum);
this->memoryStruct_.nIter = static_cast<int>(this->itersPerBlock_); // iter per block
if(this->isCudaMemCreated_) // delete previous memory
{
free((DOUBLE*)this->memoryStruct_.h_sol);
free((uint*)this->memoryStruct_.h_list);
free((bool*)this->memoryStruct_.h_listed);
free((bool*)this->memoryStruct_.h_listVol);
free((int*)this->memoryStruct_.blockOrder);
CUDA_SAFE_CALL( cudaFree(this->memoryStruct_.d_spd) );
CUDA_SAFE_CALL( cudaFree(this->memoryStruct_.d_sol) );
CUDA_SAFE_CALL( cudaFree(this->memoryStruct_.t_sol) ); // temp solution for ping-pong
CUDA_SAFE_CALL( cudaFree(this->memoryStruct_.d_con) ); // convergence volume
CUDA_SAFE_CALL( cudaFree(this->memoryStruct_.d_list) );
CUDA_SAFE_CALL( cudaFree(this->memoryStruct_.d_listVol) );
CUDA_SAFE_CALL( cudaFree(this->memoryStruct_.d_mask) );
}
this->isCudaMemCreated_ = true;
this->memoryStruct_.h_sol = (DOUBLE*) malloc(volSize*sizeof(DOUBLE)); // initial solution
this->memoryStruct_.h_list = (uint*) malloc(blockNum*sizeof(uint)); // linear list contains active block indices
this->memoryStruct_.h_listed = (bool*) malloc(blockNum*sizeof(bool)); // whether block is added to the list
this->memoryStruct_.h_listVol = (bool*) malloc(blockNum*sizeof(bool)); // volume list shows active/nonactive of corresponding block
this->memoryStruct_.blockOrder = (int*) malloc(blockNum*sizeof(int));
this->CheckCUDAMemory();
//
// create host/device memory using CUDA mem functions
//
CUDA_SAFE_CALL( cudaMalloc((void**)&(this->memoryStruct_.d_spd), volSize*sizeof(double)) );
this->CheckCUDAMemory();
CUDA_SAFE_CALL( cudaMalloc((void**)&(this->memoryStruct_.d_sol), volSize*sizeof(DOUBLE)) );
this->CheckCUDAMemory();
CUDA_SAFE_CALL( cudaMalloc((void**)&(this->memoryStruct_.t_sol), volSize*sizeof(DOUBLE)) ); // temp solution for ping-pong
this->CheckCUDAMemory();
CUDA_SAFE_CALL( cudaMalloc((void**)&(this->memoryStruct_.d_con), volSize*sizeof(bool)) ); // convergence volume
this->CheckCUDAMemory();
CUDA_SAFE_CALL( cudaMalloc((void**)&(this->memoryStruct_.d_list), blockNum*sizeof(uint)) );
this->CheckCUDAMemory();
CUDA_SAFE_CALL( cudaMalloc((void**)&(this->memoryStruct_.d_listVol), blockNum*sizeof(bool)) );
this->CheckCUDAMemory();
CUDA_SAFE_CALL( cudaMalloc((void**)&(this->memoryStruct_.d_mask), volSize*sizeof(bool)) );
this->CheckCUDAMemory();
}
void StructuredEikonal::set_attribute_mask() {
uint volSize = this->memoryStruct_.volsize;
int nx, ny, nz, blklength;
nx = memoryStruct_.xdim;
ny = memoryStruct_.ydim;
nz = memoryStruct_.zdim;
blklength = memoryStruct_.blklength;
// create host memory
double *h_spd = new double[volSize]; // byte speed, host
bool *h_mask = new bool[volSize];
// copy input volume to host memory
// make each block to be stored contiguously in 1D memory space
uint idx = 0;
for(int zStr = 0; zStr < nz; zStr += blklength) {
for(int yStr = 0; yStr < ny; yStr += blklength) {
for(int xStr = 0; xStr < nx; xStr += blklength) {
// for each block
for(int z=zStr; z<zStr+blklength; z++) {
for(int y=yStr; y<yStr+blklength; y++) {
for(int x=xStr; x<xStr+blklength; x++) {
h_spd[idx] = this->speeds_[x][y][z];
h_mask[idx] = true;
idx++;
}
}
}
}
}
}
// initialize GPU memory with host memory
CUDA_SAFE_CALL( cudaMemcpy(memoryStruct_.d_spd, h_spd, volSize*sizeof(double), cudaMemcpyHostToDevice) );
CUDA_SAFE_CALL( cudaMemcpy(memoryStruct_.d_mask, h_mask, volSize*sizeof(bool), cudaMemcpyHostToDevice) );
delete[] h_spd;
delete[] h_mask;
}
void StructuredEikonal::initialization() {
// get / set CUDA device
int deviceCount;
cudaGetDeviceCount(&deviceCount);
int device;
for(device = 0; device < deviceCount; ++device) {
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, device);
}
this->CheckCUDAMemory();
this->init_cuda_mem();
this->set_attribute_mask();
this->CheckCUDAMemory();
}
void StructuredEikonal::map_generator() {
double pi = 3.141592653589793238462643383;
this->speeds_ = std::vector<std::vector<std::vector<double> > >(
this->width_, std::vector<std::vector<double> >(
this->height_, std::vector<double>(this->depth_,1.)));
switch(this->solverType_){
case 0 :
//Constant Speed Map
break;
case 1 :
//Sinusoid Speed Map
for (int k = 0 ; k < this->depth_ ; ++k) {
for (int j = 0 ; j < this->height_; ++j) {
for ( int i = 0 ; i < this->width_ ; ++i) {
this->speeds_[i][j][k] =
(6 + 5*(sin((i*pi)/this->width_ *2))*
sin((j*pi)/this->height_*2)*
sin((k*pi)/this->depth_*2));
}
}
}
break;
}
}
void StructuredEikonal::setSeeds(std::vector<std::array<size_t, 3> > seeds) {
this->seeds_ = seeds;
}
void StructuredEikonal::useSeeds() {
if (this->verbose_) {
std::cout << "Loading seed volume..." << std::endl;
}
uint volSize, blockNum;
int nx, ny, nz, blklength;
nx = this->memoryStruct_.xdim;
ny = this->memoryStruct_.ydim;
nz = this->memoryStruct_.zdim;
volSize = this->memoryStruct_.volsize;
blklength = this->memoryStruct_.blklength;
blockNum = this->memoryStruct_.blknum;
// copy input volume to host memory
// make each block to be stored contiguously in 1D memory space
uint idx = 0;
uint blk_idx = 0;
uint list_idx = 0;
uint nActiveBlock = 0;
for(int zStr = 0; zStr < nz; zStr += blklength) {
for(int yStr = 0; yStr < ny; yStr += blklength) {
for(int xStr = 0; xStr < nx; xStr += blklength) {
// for each block
bool isSeedBlock = false;
for(int z=zStr; z<zStr+blklength; z++) {
for(int y=yStr; y<yStr+blklength; y++) {
for(int x=xStr; x<xStr+blklength; x++) {
this->memoryStruct_.h_sol[idx] = INF;
if (this->seeds_.empty()) {
if (x == nx/2 && y == ny/2 && z == nz/2) {
this->memoryStruct_.h_sol[idx] = 0;
isSeedBlock = true;
if (this->verbose_) {
printf("%d is Selected bt source \n",idx);
}
}
} else {
for(size_t i = 0; i < this->seeds_.size(); i++) {
if (this->seeds_[i][0] == x &&
this->seeds_[i][1] == y &&
this->seeds_[i][2] == z) {
this->memoryStruct_.h_sol[idx] = 0;
isSeedBlock = true;
if (this->verbose_) {
printf("%d is Selected bt source \n",idx);
}
}
}
}
idx++;
}
}
}
///////////////////////////////////////////////
if(isSeedBlock) {
if (this->verbose_) {
printf("%d,%d,%d is Seed Block \n",zStr,yStr,xStr);
}
this->memoryStruct_.h_listVol[blk_idx] = true;
this->memoryStruct_.h_listed[blk_idx] = true;
this->memoryStruct_.h_list[list_idx] = blk_idx;
list_idx++;
nActiveBlock++;
} else {
this->memoryStruct_.h_listVol[blk_idx] = false;
this->memoryStruct_.h_listed[blk_idx] = false;
}
blk_idx++;
}
}
}
this->memoryStruct_.nActiveBlock = nActiveBlock;
// initialize GPU memory with host memory
CUDA_SAFE_CALL( cudaMemcpy(this->memoryStruct_.d_sol, this->memoryStruct_.h_sol, volSize*sizeof(DOUBLE), cudaMemcpyHostToDevice) );
CUDA_SAFE_CALL( cudaMemcpy(this->memoryStruct_.t_sol, this->memoryStruct_.h_sol, volSize*sizeof(DOUBLE), cudaMemcpyHostToDevice) );
CUDA_SAFE_CALL( cudaMemcpy(this->memoryStruct_.d_list, this->memoryStruct_.h_list, nActiveBlock*sizeof(uint), cudaMemcpyHostToDevice) );
CUDA_SAFE_CALL( cudaMemcpy(this->memoryStruct_.d_listVol, this->memoryStruct_.h_listVol, blockNum*sizeof(bool), cudaMemcpyHostToDevice) );
// initialize GPU memory with constant value
CUDA_SAFE_CALL( cudaMemset(this->memoryStruct_.d_con, 1, volSize*sizeof(bool)) );
}
void StructuredEikonal::setMapType(size_t t) {
this->solverType_ = t;
}
void StructuredEikonal::solveEikonal() {
if (this->speeds_.empty()) {
this->map_generator();
}
this->isCudaMemCreated_ = false;
this->initialization();
this->useSeeds();
runEikonalSolverSimple(this->memoryStruct_, this->verbose_);
this->get_solution();
}
std::vector< std::vector< std::vector<double> > >
StructuredEikonal::getFinalResult() {
return this->answer_;
}
void StructuredEikonal::get_solution() {
// copy solution from GPU
CUDA_SAFE_CALL( cudaMemcpy(this->memoryStruct_.h_sol,
this->memoryStruct_.d_sol, this->memoryStruct_.volsize*sizeof(DOUBLE),
cudaMemcpyDeviceToHost) );
//put the data where it belongs in the grand scheme of data!
this->answer_ = std::vector<std::vector<std::vector<double> > >(
this->width_, std::vector<std::vector<double> >(
this->height_, std::vector<double>(this->depth_,0)));
for(size_t blockID = 0; blockID < this->memoryStruct_.blknum; blockID++) {
size_t baseAddr = blockID * this->memoryStruct_.blksize;
size_t xgridlength = this->memoryStruct_.xdim/BLOCK_LENGTH;
size_t ygridlength = this->memoryStruct_.ydim/BLOCK_LENGTH;
// compute block index
size_t bx = blockID%xgridlength;
size_t tmpIdx = (blockID - bx)/xgridlength;
size_t by = tmpIdx%ygridlength;
size_t bz = (tmpIdx-by)/ygridlength;
//translate back to real space
for(int k = 0; k < BLOCK_LENGTH; k++) {
for(int j = 0; j < BLOCK_LENGTH; j++) {
for(int i = 0; i < BLOCK_LENGTH; i++) {
double d = this->memoryStruct_.h_sol[baseAddr +
k * BLOCK_LENGTH * BLOCK_LENGTH +
j * BLOCK_LENGTH + i];
if ((i + bx * BLOCK_LENGTH) < this->width_ &&
(j + by * BLOCK_LENGTH) < this->height_ &&
(k + bz * BLOCK_LENGTH) < this->depth_) {
this->answer_[(i + bx * BLOCK_LENGTH)][(j +
by * BLOCK_LENGTH)][k + bz * BLOCK_LENGTH] = d;
}
}
}
}
}
}
void StructuredEikonal::setItersPerBlock(size_t t) {
this->itersPerBlock_ = t;
}
|
75c52d1dc204fbd17ce4bbefec55dbd117627147.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* NiuTrans.Tensor - an open-source tensor library
* Copyright (C) 2017, Natural Language Processing Lab, Northeastern University.
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* $Created by: XIAO Tong (email: [email protected]) 2018-04-24
*/
#include "../../XDevice.h"
#include "../../XTensor.h"
#include "Normalize.h"
#include "Normalize.cuh"
namespace nts { // namespace nts(NiuTrans.Tensor)
#ifdef USE_ROCM
/*
normalized the data with normal distribution (kernel code). For an input x,
y = a * (x-mean)/sqrt(variance+\epsilon) + b
where a and b are the scalar and bias respectively, and \epsilon is the adjustment parameter
>> input - the input data array
>> output - the output data array
>> mean - the mean of the input
>> var - the variance of the input
>> a - the scalar
>> b - the bias
>> epsilon - a parameter
>> stride - stride that we need to move to the next item
>> strideNum - how many strides we need to go over for next block
>> blockNum - how many blocks we have
*/
template<class T>
__global__
void KernelNormalizeFloat(T * input, T * output, T * mean, T * var,
T * a, T * b, T epsilon,
int stride, int strideNum, int blockNum)
{
__shared__ T iMean[MAX_CUDA_THREAD_NUM_PER_BLOCK];
__shared__ T iVar[MAX_CUDA_THREAD_NUM_PER_BLOCK];
__shared__ int iBlock[MAX_CUDA_THREAD_NUM_PER_BLOCK];
__shared__ int iOffset[MAX_CUDA_THREAD_NUM_PER_BLOCK];
__shared__ int blockSize;
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int j = blockIdx.y * blockDim.y + threadIdx.y;
if (i >= stride * blockNum || j >= strideNum)
return;
if (threadIdx.y == 0) {
iOffset[threadIdx.x] = i % stride;
iBlock[threadIdx.x] = i / stride;
iMean[threadIdx.x] = mean[i];
iVar[threadIdx.x] = var[i];
blockSize = stride * strideNum;
}
__syncthreads();
int inBlockOffset = j * stride + iOffset[threadIdx.x];
int offset = iBlock[threadIdx.x] * blockSize + inBlockOffset;
output[offset] = (DTYPE)(a[inBlockOffset] * (input[offset] - iMean[threadIdx.x])) /
sqrt((DTYPE)(iVar[threadIdx.x] + epsilon)) + (DTYPE)b[inBlockOffset];
}
template<class T>
__global__
void KernelNormalizeHalf(T * input, T * output, T * mean, T * var,
T * a, T * b,
int stride, int strideNum, int blockNum)
{
__shared__ half iMean[MAX_CUDA_THREAD_NUM_PER_BLOCK];
__shared__ half iVar[MAX_CUDA_THREAD_NUM_PER_BLOCK];
__shared__ int iBlock[MAX_CUDA_THREAD_NUM_PER_BLOCK];
__shared__ int iOffset[MAX_CUDA_THREAD_NUM_PER_BLOCK];
__shared__ int blockSize;
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int j = blockIdx.y * blockDim.y + threadIdx.y;
if (i >= stride * blockNum || j >= strideNum)
return;
if (threadIdx.y == 0) {
iOffset[threadIdx.x] = i % stride;
iBlock[threadIdx.x] = i / stride;
iMean[threadIdx.x] = mean[i];
iVar[threadIdx.x] = var[i];
blockSize = stride * strideNum;
}
__syncthreads();
int inBlockOffset = j * stride + iOffset[threadIdx.x];
int offset = iBlock[threadIdx.x] * blockSize + inBlockOffset;
output[offset] = __hadd(__hdiv(__hmul(a[inBlockOffset], __hsub(input[offset], iMean[threadIdx.x])),
hsqrt(iVar[threadIdx.x])), b[inBlockOffset]);
}
/*
normalized the data with normal distribution. For an input x,
y = a * (x-mean)/sqrt(variance+\epsilon) + b
where a and b are the scalar and bias respectively, and \epsilon is the adjustment parameter
>> input - the input tensor
>> output - the output tensor
>> dim - dimension alone which we generate the mean and variance
>> mean - the mean of the input
>> var - the variance of the input
>> a - the scalar
>> b - the bias
>> epsilon - a parameter
*/
void _CudaNormalize(const XTensor * input, XTensor * output, int dim,
const XTensor * mean, const XTensor * var,
const XTensor * a, const XTensor * b,
DTYPE epsilon)
{
int stride = 1;
int strideNum = input->dimSize[dim];
int blockNum = 1;
for (int i = 0; i < input->order; i++) {
if (i > dim)
stride *= input->dimSize[i];
else if (i < dim)
blockNum *= input->dimSize[i];
}
int cudaGridSize[3];
int cudaBlockSize[3];
GDevs.GetCudaThread2D(input->devID, strideNum, stride * blockNum,
MAX_INT, cudaGridSize, cudaBlockSize);
dim3 blocks(cudaGridSize[1], cudaGridSize[0]);
dim3 threads(cudaBlockSize[1], cudaBlockSize[0]);
int devIDBackup;
ProtectCudaDev(a->devID, devIDBackup);
if (input->dataType == DEFAULT_DTYPE) {
KernelNormalizeFloat <DTYPE><< <blocks, threads >> >((DTYPE*)input->data, (DTYPE*)output->data,
(DTYPE*)mean->data, (DTYPE*)var->data,
(DTYPE*)a->data, (DTYPE*)b->data, epsilon,
stride, strideNum, blockNum);
}
else if (input->dataType == X_FLOAT16) {
#ifdef HALF_PRECISION
KernelNormalizeHalf <half><< <blocks, threads>>> ((__half*)input->data, (__half*)output->data,
(__half*)mean->data, (__half*)var->data,
(__half*)a->data, (__half*)b->data,
stride, strideNum, blockNum);
#else
ShowNTErrors("Please compile with -DHALF_PRECISION");
#endif
}
BacktoCudaDev(a->devID, devIDBackup);
}
/*
normalized the data with normal distribution (kernel code). For an input x,
y = a * (x-mean)/sqrt(variance+\epsilon) + b
where a and b are the scalar and bias respectively, and \epsilon is the adjustment parameter
>> input - the input data array
>> output - the output data array
>> mean - the mean of the input
>> var - the variance of the input
>> a - the scalar
>> b - the bias
>> epsilon - a parameter
>> stride - stride that we need to move to the next item
>> strideNum - how many strides we need to go over for next block
>> blockNum - how many blocks we have
*/
template<class T>
__global__
void KernelL1NormalizeFloat(T * input, T * output, T * mean, T * distance,
T * a, T * b,
int stride, int strideNum, int blockNum)
{
__shared__ T iMean[MAX_CUDA_THREAD_NUM_PER_BLOCK];
__shared__ T iVar[MAX_CUDA_THREAD_NUM_PER_BLOCK];
__shared__ int iBlock[MAX_CUDA_THREAD_NUM_PER_BLOCK];
__shared__ int iOffset[MAX_CUDA_THREAD_NUM_PER_BLOCK];
__shared__ int blockSize;
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int j = blockIdx.y * blockDim.y + threadIdx.y;
if (i >= stride * blockNum || j >= strideNum)
return;
if (threadIdx.y == 0) {
iOffset[threadIdx.x] = i % stride;
iBlock[threadIdx.x] = i / stride;
iMean[threadIdx.x] = mean[i];
iVar[threadIdx.x] = distance[i];
blockSize = stride * strideNum;
}
__syncthreads();
int inBlockOffset = j * stride + iOffset[threadIdx.x];
int offset = iBlock[threadIdx.x] * blockSize + inBlockOffset;
output[offset] = (DTYPE)(a[inBlockOffset] * (input[offset] - iMean[threadIdx.x]) /
iVar[threadIdx.x]) + (DTYPE)b[inBlockOffset];
}
template<class T>
__global__
void KernelL1NormalizeHalf(T * input, T * output, T * mean, T * distance,
T * a, T * b, int stride, int strideNum, int blockNum)
{
__shared__ half iMean[MAX_CUDA_THREAD_NUM_PER_BLOCK];
__shared__ half iVar[MAX_CUDA_THREAD_NUM_PER_BLOCK];
__shared__ int iBlock[MAX_CUDA_THREAD_NUM_PER_BLOCK];
__shared__ int iOffset[MAX_CUDA_THREAD_NUM_PER_BLOCK];
__shared__ int blockSize;
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int j = blockIdx.y * blockDim.y + threadIdx.y;
if (i >= stride * blockNum || j >= strideNum)
return;
if (threadIdx.y == 0) {
iOffset[threadIdx.x] = i % stride;
iBlock[threadIdx.x] = i / stride;
iMean[threadIdx.x] = mean[i];
iVar[threadIdx.x] = distance[i];
blockSize = stride * strideNum;
}
__syncthreads();
int inBlockOffset = j * stride + iOffset[threadIdx.x];
int offset = iBlock[threadIdx.x] * blockSize + inBlockOffset;
output[offset] = __hadd(__hdiv(__hmul(a[inBlockOffset], __hsub(input[offset], iMean[threadIdx.x])),
iVar[threadIdx.x]), b[inBlockOffset]);
}
/*
normalized the data with normal distribution. For an input x,
y = a * (x-mean)/distance + b
where a and b are the scalar and bias respectively, and \epsilon is the adjustment parameter
>> input - the input tensor
>> output - the output tensor
>> dim - dimension alone which we generate the mean and variance
>> mean - the mean of the input
>> distance - the distance of the input
>> a - the scalar
>> b - the bias
*/
void _CudaL1Normalize(const XTensor * input, XTensor * output, int dim,
const XTensor * mean, const XTensor * distance,
const XTensor * a, const XTensor * b)
{
int stride = 1;
int strideNum = input->dimSize[dim];
int blockNum = 1;
for (int i = 0; i < input->order; i++) {
if (i > dim)
stride *= input->dimSize[i];
else if (i < dim)
blockNum *= input->dimSize[i];
}
int cudaGridSize[3];
int cudaBlockSize[3];
GDevs.GetCudaThread2D(input->devID, strideNum, stride * blockNum,
MAX_INT, cudaGridSize, cudaBlockSize);
dim3 blocks(cudaGridSize[1], cudaGridSize[0]);
dim3 threads(cudaBlockSize[1], cudaBlockSize[0]);
int devIDBackup;
ProtectCudaDev(a->devID, devIDBackup);
if (input->dataType == DEFAULT_DTYPE) {
KernelL1NormalizeFloat <DTYPE><< <blocks, threads >> >((DTYPE*)input->data, (DTYPE*)output->data,
(DTYPE*)mean->data, (DTYPE*)distance->data,
(DTYPE*)a->data, (DTYPE*)b->data,
stride, strideNum, blockNum);
}
else if (input->dataType == X_FLOAT16) {
#ifdef HALF_PRECISION
KernelL1NormalizeHalf <half><< <blocks, threads>>> ((__half*)input->data, (__half*)output->data,
(__half*)mean->data, (__half*)distance->data,
(__half*)a->data, (__half*)b->data,
stride, strideNum, blockNum);
#else
ShowNTErrors("Please compile with -DHALF_PRECISION");
#endif
}
BacktoCudaDev(a->devID, devIDBackup);
}
#endif // USE_ROCM
} // namespace nts(NiuTrans.Tensor)
|
75c52d1dc204fbd17ce4bbefec55dbd117627147.cu
|
/* NiuTrans.Tensor - an open-source tensor library
* Copyright (C) 2017, Natural Language Processing Lab, Northeastern University.
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* $Created by: XIAO Tong (email: [email protected]) 2018-04-24
*/
#include "../../XDevice.h"
#include "../../XTensor.h"
#include "Normalize.h"
#include "Normalize.cuh"
namespace nts { // namespace nts(NiuTrans.Tensor)
#ifdef USE_CUDA
/*
normalized the data with normal distribution (kernel code). For an input x,
y = a * (x-mean)/sqrt(variance+\epsilon) + b
where a and b are the scalar and bias respectively, and \epsilon is the adjustment parameter
>> input - the input data array
>> output - the output data array
>> mean - the mean of the input
>> var - the variance of the input
>> a - the scalar
>> b - the bias
>> epsilon - a parameter
>> stride - stride that we need to move to the next item
>> strideNum - how many strides we need to go over for next block
>> blockNum - how many blocks we have
*/
template<class T>
__global__
void KernelNormalizeFloat(T * input, T * output, T * mean, T * var,
T * a, T * b, T epsilon,
int stride, int strideNum, int blockNum)
{
__shared__ T iMean[MAX_CUDA_THREAD_NUM_PER_BLOCK];
__shared__ T iVar[MAX_CUDA_THREAD_NUM_PER_BLOCK];
__shared__ int iBlock[MAX_CUDA_THREAD_NUM_PER_BLOCK];
__shared__ int iOffset[MAX_CUDA_THREAD_NUM_PER_BLOCK];
__shared__ int blockSize;
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int j = blockIdx.y * blockDim.y + threadIdx.y;
if (i >= stride * blockNum || j >= strideNum)
return;
if (threadIdx.y == 0) {
iOffset[threadIdx.x] = i % stride;
iBlock[threadIdx.x] = i / stride;
iMean[threadIdx.x] = mean[i];
iVar[threadIdx.x] = var[i];
blockSize = stride * strideNum;
}
__syncthreads();
int inBlockOffset = j * stride + iOffset[threadIdx.x];
int offset = iBlock[threadIdx.x] * blockSize + inBlockOffset;
output[offset] = (DTYPE)(a[inBlockOffset] * (input[offset] - iMean[threadIdx.x])) /
sqrt((DTYPE)(iVar[threadIdx.x] + epsilon)) + (DTYPE)b[inBlockOffset];
}
template<class T>
__global__
void KernelNormalizeHalf(T * input, T * output, T * mean, T * var,
T * a, T * b,
int stride, int strideNum, int blockNum)
{
__shared__ half iMean[MAX_CUDA_THREAD_NUM_PER_BLOCK];
__shared__ half iVar[MAX_CUDA_THREAD_NUM_PER_BLOCK];
__shared__ int iBlock[MAX_CUDA_THREAD_NUM_PER_BLOCK];
__shared__ int iOffset[MAX_CUDA_THREAD_NUM_PER_BLOCK];
__shared__ int blockSize;
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int j = blockIdx.y * blockDim.y + threadIdx.y;
if (i >= stride * blockNum || j >= strideNum)
return;
if (threadIdx.y == 0) {
iOffset[threadIdx.x] = i % stride;
iBlock[threadIdx.x] = i / stride;
iMean[threadIdx.x] = mean[i];
iVar[threadIdx.x] = var[i];
blockSize = stride * strideNum;
}
__syncthreads();
int inBlockOffset = j * stride + iOffset[threadIdx.x];
int offset = iBlock[threadIdx.x] * blockSize + inBlockOffset;
output[offset] = __hadd(__hdiv(__hmul(a[inBlockOffset], __hsub(input[offset], iMean[threadIdx.x])),
hsqrt(iVar[threadIdx.x])), b[inBlockOffset]);
}
/*
normalized the data with normal distribution. For an input x,
y = a * (x-mean)/sqrt(variance+\epsilon) + b
where a and b are the scalar and bias respectively, and \epsilon is the adjustment parameter
>> input - the input tensor
>> output - the output tensor
>> dim - dimension alone which we generate the mean and variance
>> mean - the mean of the input
>> var - the variance of the input
>> a - the scalar
>> b - the bias
>> epsilon - a parameter
*/
void _CudaNormalize(const XTensor * input, XTensor * output, int dim,
const XTensor * mean, const XTensor * var,
const XTensor * a, const XTensor * b,
DTYPE epsilon)
{
int stride = 1;
int strideNum = input->dimSize[dim];
int blockNum = 1;
for (int i = 0; i < input->order; i++) {
if (i > dim)
stride *= input->dimSize[i];
else if (i < dim)
blockNum *= input->dimSize[i];
}
int cudaGridSize[3];
int cudaBlockSize[3];
GDevs.GetCudaThread2D(input->devID, strideNum, stride * blockNum,
MAX_INT, cudaGridSize, cudaBlockSize);
dim3 blocks(cudaGridSize[1], cudaGridSize[0]);
dim3 threads(cudaBlockSize[1], cudaBlockSize[0]);
int devIDBackup;
ProtectCudaDev(a->devID, devIDBackup);
if (input->dataType == DEFAULT_DTYPE) {
KernelNormalizeFloat <DTYPE><< <blocks, threads >> >((DTYPE*)input->data, (DTYPE*)output->data,
(DTYPE*)mean->data, (DTYPE*)var->data,
(DTYPE*)a->data, (DTYPE*)b->data, epsilon,
stride, strideNum, blockNum);
}
else if (input->dataType == X_FLOAT16) {
#ifdef HALF_PRECISION
KernelNormalizeHalf <half><< <blocks, threads>>> ((__half*)input->data, (__half*)output->data,
(__half*)mean->data, (__half*)var->data,
(__half*)a->data, (__half*)b->data,
stride, strideNum, blockNum);
#else
ShowNTErrors("Please compile with -DHALF_PRECISION");
#endif
}
BacktoCudaDev(a->devID, devIDBackup);
}
/*
normalized the data with normal distribution (kernel code). For an input x,
y = a * (x-mean)/sqrt(variance+\epsilon) + b
where a and b are the scalar and bias respectively, and \epsilon is the adjustment parameter
>> input - the input data array
>> output - the output data array
>> mean - the mean of the input
>> var - the variance of the input
>> a - the scalar
>> b - the bias
>> epsilon - a parameter
>> stride - stride that we need to move to the next item
>> strideNum - how many strides we need to go over for next block
>> blockNum - how many blocks we have
*/
template<class T>
__global__
void KernelL1NormalizeFloat(T * input, T * output, T * mean, T * distance,
T * a, T * b,
int stride, int strideNum, int blockNum)
{
__shared__ T iMean[MAX_CUDA_THREAD_NUM_PER_BLOCK];
__shared__ T iVar[MAX_CUDA_THREAD_NUM_PER_BLOCK];
__shared__ int iBlock[MAX_CUDA_THREAD_NUM_PER_BLOCK];
__shared__ int iOffset[MAX_CUDA_THREAD_NUM_PER_BLOCK];
__shared__ int blockSize;
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int j = blockIdx.y * blockDim.y + threadIdx.y;
if (i >= stride * blockNum || j >= strideNum)
return;
if (threadIdx.y == 0) {
iOffset[threadIdx.x] = i % stride;
iBlock[threadIdx.x] = i / stride;
iMean[threadIdx.x] = mean[i];
iVar[threadIdx.x] = distance[i];
blockSize = stride * strideNum;
}
__syncthreads();
int inBlockOffset = j * stride + iOffset[threadIdx.x];
int offset = iBlock[threadIdx.x] * blockSize + inBlockOffset;
output[offset] = (DTYPE)(a[inBlockOffset] * (input[offset] - iMean[threadIdx.x]) /
iVar[threadIdx.x]) + (DTYPE)b[inBlockOffset];
}
template<class T>
__global__
void KernelL1NormalizeHalf(T * input, T * output, T * mean, T * distance,
T * a, T * b, int stride, int strideNum, int blockNum)
{
__shared__ half iMean[MAX_CUDA_THREAD_NUM_PER_BLOCK];
__shared__ half iVar[MAX_CUDA_THREAD_NUM_PER_BLOCK];
__shared__ int iBlock[MAX_CUDA_THREAD_NUM_PER_BLOCK];
__shared__ int iOffset[MAX_CUDA_THREAD_NUM_PER_BLOCK];
__shared__ int blockSize;
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int j = blockIdx.y * blockDim.y + threadIdx.y;
if (i >= stride * blockNum || j >= strideNum)
return;
if (threadIdx.y == 0) {
iOffset[threadIdx.x] = i % stride;
iBlock[threadIdx.x] = i / stride;
iMean[threadIdx.x] = mean[i];
iVar[threadIdx.x] = distance[i];
blockSize = stride * strideNum;
}
__syncthreads();
int inBlockOffset = j * stride + iOffset[threadIdx.x];
int offset = iBlock[threadIdx.x] * blockSize + inBlockOffset;
output[offset] = __hadd(__hdiv(__hmul(a[inBlockOffset], __hsub(input[offset], iMean[threadIdx.x])),
iVar[threadIdx.x]), b[inBlockOffset]);
}
/*
normalized the data with normal distribution. For an input x,
y = a * (x-mean)/distance + b
where a and b are the scalar and bias respectively, and \epsilon is the adjustment parameter
>> input - the input tensor
>> output - the output tensor
>> dim - dimension alone which we generate the mean and variance
>> mean - the mean of the input
>> distance - the distance of the input
>> a - the scalar
>> b - the bias
*/
void _CudaL1Normalize(const XTensor * input, XTensor * output, int dim,
const XTensor * mean, const XTensor * distance,
const XTensor * a, const XTensor * b)
{
int stride = 1;
int strideNum = input->dimSize[dim];
int blockNum = 1;
for (int i = 0; i < input->order; i++) {
if (i > dim)
stride *= input->dimSize[i];
else if (i < dim)
blockNum *= input->dimSize[i];
}
int cudaGridSize[3];
int cudaBlockSize[3];
GDevs.GetCudaThread2D(input->devID, strideNum, stride * blockNum,
MAX_INT, cudaGridSize, cudaBlockSize);
dim3 blocks(cudaGridSize[1], cudaGridSize[0]);
dim3 threads(cudaBlockSize[1], cudaBlockSize[0]);
int devIDBackup;
ProtectCudaDev(a->devID, devIDBackup);
if (input->dataType == DEFAULT_DTYPE) {
KernelL1NormalizeFloat <DTYPE><< <blocks, threads >> >((DTYPE*)input->data, (DTYPE*)output->data,
(DTYPE*)mean->data, (DTYPE*)distance->data,
(DTYPE*)a->data, (DTYPE*)b->data,
stride, strideNum, blockNum);
}
else if (input->dataType == X_FLOAT16) {
#ifdef HALF_PRECISION
KernelL1NormalizeHalf <half><< <blocks, threads>>> ((__half*)input->data, (__half*)output->data,
(__half*)mean->data, (__half*)distance->data,
(__half*)a->data, (__half*)b->data,
stride, strideNum, blockNum);
#else
ShowNTErrors("Please compile with -DHALF_PRECISION");
#endif
}
BacktoCudaDev(a->devID, devIDBackup);
}
#endif // USE_CUDA
} // namespace nts(NiuTrans.Tensor)
|
abc6a06c9502dd87cf8bdd94a38db5902e0ced92.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* \file dnn/src/cuda/pooling/pooling2d_qint.cu
* MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
*
* Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied.
*/
#include "./pooling2d_qint.cuh"
#include "src/common/opr_param_defs_enumv.cuh"
#include "src/cuda/query_blocksize.cuh"
using namespace megdnn;
using namespace cuda;
using namespace pooling2d;
namespace {
__device__ __forceinline__ int pack_int8_to_int8x4(int8_t x, int8_t y, int8_t z,
int8_t w) {
int ix = static_cast<int>(x), iy = static_cast<int>(y),
iz = static_cast<int>(z), iw = static_cast<int>(w);
asm volatile("prmt.b32 %0, %0, %1, 0x1140;" : "+r"(ix) : "r"(iy));
asm volatile("prmt.b32 %0, %0, %1, 0x1140;" : "+r"(iz) : "r"(iw));
asm volatile("prmt.b32 %0, %0, %1, 0x5410;" : "+r"(ix) : "r"(iz));
return ix;
}
template <int regs, int dtype_bits, typename OutDtype>
__device__ __forceinline__ OutDtype pack_int8(int8_t (&x)[regs]);
template <>
__device__ __forceinline__ int pack_int8<4, 8, int>(int8_t (&x)[4]) {
return pack_int8_to_int8x4(x[0], x[1], x[2], x[3]);
}
template <>
__device__ __forceinline__ int2 pack_int8<8, 8, int2>(int8_t (&x)[8]) {
int8_t x0[4]{x[0], x[1], x[2], x[3]};
int8_t x1[4]{x[4], x[5], x[6], x[7]};
return ::make_int2(pack_int8<4, 8, int>(x0), pack_int8<4, 8, int>(x1));
}
template <>
__device__ __forceinline__ int4 pack_int8<16, 8, int4>(int8_t (&x)[16]) {
int8_t x0[4]{x[0], x[1], x[2], x[3]};
int8_t x1[4]{x[4], x[5], x[6], x[7]};
int8_t x2[4]{x[8], x[9], x[10], x[11]};
int8_t x3[4]{x[12], x[13], x[14], x[15]};
return ::make_int4(pack_int8<4, 8, int>(x0), pack_int8<4, 8, int>(x1),
pack_int8<4, 8, int>(x2), pack_int8<4, 8, int>(x3));
}
__device__ __forceinline__ int8_t pack_int8_to_int4x2(int8_t x0, int8_t x1) {
return (x0 & 0xf) | (x1 << 4);
}
template <>
__device__ __forceinline__ int pack_int8<8, 4, int>(int8_t (&x)[8]) {
int8_t x0 = pack_int8_to_int4x2(x[0], x[1]);
int8_t x1 = pack_int8_to_int4x2(x[2], x[3]);
int8_t x2 = pack_int8_to_int4x2(x[4], x[5]);
int8_t x3 = pack_int8_to_int4x2(x[6], x[7]);
return pack_int8_to_int8x4(x0, x1, x2, x3);
}
template <>
__device__ __forceinline__ int4 pack_int8<32, 4, int4>(int8_t (&x)[32]) {
int8_t x0[8]{x[0], x[1], x[2], x[3], x[4], x[5], x[6], x[7]};
int8_t x1[8]{x[8], x[9], x[10], x[11], x[12], x[13], x[14], x[15]};
int8_t x2[8]{x[16], x[17], x[18], x[19], x[20], x[21], x[22], x[23]};
int8_t x3[8]{x[24], x[25], x[26], x[27], x[28], x[29], x[30], x[31]};
return ::make_int4(pack_int8<8, 4, int>(x0), pack_int8<8, 4, int>(x1),
pack_int8<8, 4, int>(x2), pack_int8<8, 4, int>(x3));
}
template <typename Dtype>
struct TypeTrait;
template <>
struct TypeTrait<int8_t> {
static constexpr int bit_width = 8;
static constexpr int mask = 0xff;
static constexpr int8_t min = -128;
static constexpr int elem_per_32bit = 32 / bit_width;
static constexpr int shift_fix_sign = 0;
static constexpr bool need_zero_pad = false;
};
template <>
struct TypeTrait<dt_qint4> {
static constexpr int bit_width = 4;
static constexpr int mask = 0xf;
static constexpr int8_t min = -8;
static constexpr int elem_per_32bit = 32 / bit_width;
static constexpr int shift_fix_sign = 4;
static constexpr bool need_zero_pad = false;
};
template <>
struct TypeTrait<dt_quint4> {
static constexpr int bit_width = 4;
static constexpr int mask = 0xf;
static constexpr int8_t min = 0;
static constexpr int elem_per_32bit = 32 / bit_width;
static constexpr int shift_fix_sign = 0;
static constexpr bool need_zero_pad = true;
};
template <typename src_type, typename _feed_type>
struct MaxPooler {
using feed_type = _feed_type;
static constexpr int bit_width = TypeTrait<src_type>::bit_width;
static constexpr int nr_results = sizeof(feed_type) * 8 / bit_width;
static constexpr int elem_per_32bit = TypeTrait<src_type>::elem_per_32bit;
static constexpr int shift_fix_sign = TypeTrait<src_type>::shift_fix_sign;
int8_t res[nr_results];
__device__ MaxPooler(int, int) {}
__device__ __forceinline__ void init() {
#pragma unroll
for (int i = 0; i < nr_results; ++i) {
res[i] = TypeTrait<src_type>::min;
}
}
__device__ __forceinline__ void feed(int x, int idx = 0) {
constexpr int unroll_n = sizeof(int) * 8 / bit_width;
#pragma unroll
for (int i = 0; i < unroll_n; i++) {
int8_t temp = ((x >> (i * bit_width)) & TypeTrait<src_type>::mask)
<< shift_fix_sign;
temp = temp >> shift_fix_sign;
res[idx + i] = res[idx + i] > temp ? res[idx + i] : temp;
}
}
__device__ __forceinline__ void feed(int2 x) {
feed(x.x, 0 * elem_per_32bit);
feed(x.y, 1 * elem_per_32bit);
}
__device__ __forceinline__ void feed(int4 x) {
feed(x.x, 0 * elem_per_32bit);
feed(x.y, 1 * elem_per_32bit);
feed(x.z, 2 * elem_per_32bit);
feed(x.w, 3 * elem_per_32bit);
}
__device__ __forceinline__ feed_type get_ans() {
feed_type ans;
ans = pack_int8<nr_results, bit_width, feed_type>(res);
return ans;
}
};
template <typename src_type, typename _feed_type, typename inter_type>
struct MeanIncludeRoundedPooler {
using feed_type = _feed_type;
static constexpr int bit_width = TypeTrait<src_type>::bit_width;
static constexpr int nr_results = sizeof(feed_type) * 8 / bit_width;
static constexpr int elem_per_32bit = TypeTrait<src_type>::elem_per_32bit;
static constexpr int shift_fix_sign = TypeTrait<src_type>::shift_fix_sign;
static constexpr bool need_zero_pad = TypeTrait<src_type>::need_zero_pad;
int32_t res[nr_results];
const int count;
const float fi_count;
int real_fi_count;
const int zero_pad;
__device__ MeanIncludeRoundedPooler(int count, int zero_point)
: count{count}, fi_count{1.f / count}, zero_pad{zero_point} {}
__device__ __forceinline__ void init() {
#pragma unroll
for (int i = 0; i < nr_results; ++i) {
res[i] = 0;
}
if (need_zero_pad) {
real_fi_count = 0;
}
}
__device__ __forceinline__ void feed(int x, int idx) {
constexpr int unroll_n = sizeof(int) * 8 / bit_width;
#pragma unroll
for (int i = 0; i < unroll_n; i++) {
int8_t temp = ((x >> (i * bit_width)) & TypeTrait<src_type>::mask)
<< shift_fix_sign;
temp = temp >> shift_fix_sign;
res[idx + i] += static_cast<int32_t>(temp);
}
}
__device__ __forceinline__ void feed(int x) {
feed(x, 0);
if (need_zero_pad) {
real_fi_count++;
}
}
__device__ __forceinline__ void feed(int2 x) {
feed(x.x, 0 * elem_per_32bit);
feed(x.y, 1 * elem_per_32bit);
if (need_zero_pad) {
real_fi_count++;
}
}
__device__ __forceinline__ void feed(int4 x) {
feed(x.x, 0 * elem_per_32bit);
feed(x.y, 1 * elem_per_32bit);
feed(x.z, 2 * elem_per_32bit);
feed(x.w, 3 * elem_per_32bit);
if (need_zero_pad) {
real_fi_count++;
}
}
__device__ __forceinline__ feed_type get_ans() {
feed_type ans;
int8_t out_res[nr_results];
#pragma unroll
for (int i = 0; i < nr_results; i++) {
float f32_res = roundf(static_cast<float>(res[i]) * fi_count);
if (need_zero_pad) {
f32_res = roundf((static_cast<float>(res[i]) +
(count - real_fi_count) * zero_pad) *
fi_count);
}
int i8_res;
asm volatile("cvt.rni.s8.f32 %0, %1;"
: "=r"(i8_res)
: "f"(f32_res));
out_res[i] = i8_res;
}
ans = pack_int8<nr_results, bit_width, feed_type>(out_res);
return ans;
}
};
template <typename src_type, typename _feed_type, typename inter_type>
struct MeanExcludeRoundedPooler {
using feed_type = _feed_type;
static constexpr int bit_width = TypeTrait<src_type>::bit_width;
static constexpr int nr_results = sizeof(feed_type) * 8 / bit_width;
static constexpr int elem_per_32bit = TypeTrait<src_type>::elem_per_32bit;
static constexpr int shift_fix_sign = TypeTrait<src_type>::shift_fix_sign;
int32_t res[nr_results];
int count;
__device__ MeanExcludeRoundedPooler(int, int) {}
__device__ __forceinline__ void init() {
#pragma unroll
for (int i = 0; i < nr_results; ++i) {
res[i] = 0;
}
count = 0;
}
__device__ __forceinline__ void feed(int x, int idx) {
constexpr int unroll_n = sizeof(int) * 8 / bit_width;
#pragma unroll
for (int i = 0; i < unroll_n; i++) {
int8_t temp = ((x >> (i * bit_width)) & TypeTrait<src_type>::mask)
<< shift_fix_sign;
temp = temp >> shift_fix_sign;
res[idx + i] += static_cast<int32_t>(temp);
}
}
__device__ __forceinline__ void feed(int x) {
feed(x, 0);
count++;
}
__device__ __forceinline__ void feed(int2 x) {
feed(x.x, 0 * elem_per_32bit);
feed(x.y, 1 * elem_per_32bit);
count++;
}
__device__ __forceinline__ void feed(int4 x) {
feed(x.x, 0 * elem_per_32bit);
feed(x.y, 1 * elem_per_32bit);
feed(x.z, 2 * elem_per_32bit);
feed(x.w, 3 * elem_per_32bit);
count++;
}
__device__ __forceinline__ feed_type get_ans() {
feed_type ans;
int8_t out_res[nr_results];
#pragma unroll
for (int i = 0; i < nr_results; i++) {
float f32_res = roundf(static_cast<float>(res[i]) / count);
int i8_res;
asm volatile("cvt.rni.s8.f32 %0, %1;"
: "=r"(i8_res)
: "f"(f32_res));
out_res[i] = i8_res;
}
ans = pack_int8<nr_results, bit_width, feed_type>(out_res);
return ans;
}
};
template <typename Pooler>
__global__ void pooling2d_device_template_int8_cdiv4hwn4(
const int8_t* __restrict__ src, int8_t* __restrict__ dst, Param param) {
const int tidx = threadIdx.x;
const int tidy = threadIdx.y;
const int bidx = blockIdx.x;
const int bidy = blockIdx.y;
const int bidz = blockIdx.z;
using ldg_type = typename Pooler::feed_type;
static int constexpr pack_size = 4;
static int constexpr ldg_width = sizeof(ldg_type) / sizeof(int32_t);
const int batch = (bidy * blockDim.x + tidx) * ldg_width;
const int packed_ch = bidz * blockDim.y + tidy;
const int npack = param.n * pack_size;
if (batch >= param.n || packed_ch >= param.c / pack_size)
return;
const int ho = bidx / param.wo;
const int wo = bidx - param.wo * ho;
const int input_pixels = param.hi * param.wi;
const int output_pixels = param.ho * param.wo;
const int8_t* __restrict__ g_src_ptr =
src + batch * pack_size + packed_ch * input_pixels * npack;
int8_t* __restrict__ g_dst_ptr = dst + batch * pack_size +
packed_ch * output_pixels * npack +
(ho * param.wo + wo) * npack;
Pooler pooler(param.window_h * param.window_w, 0);
pooler.init();
for (int fh = 0; fh < param.window_h; fh++) {
uint32_t ih = ho * param.sh + fh - param.ph;
for (int fw = 0; fw < param.window_w; fw++) {
uint32_t iw = wo * param.sw + fw - param.pw;
if (ih < param.hi && iw < param.wi) {
const int8_t* __restrict__ cur_src_ptr =
g_src_ptr + (ih * param.wi + iw) * npack;
ldg_type sval =
__ldg(reinterpret_cast<const ldg_type*>(cur_src_ptr));
pooler.feed(sval);
}
}
}
ldg_type res = pooler.get_ans();
*(reinterpret_cast<ldg_type*>(g_dst_ptr)) = res;
}
template <typename Pooler, int pack_size, int pack_byte,
int ldg_width_assert = 4>
__global__ void pooling2d_device_template_nchwc(const int8_t* __restrict__ src,
int8_t* __restrict__ dst,
Param param, int zero_point) {
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
using ldg_type = typename Pooler::feed_type;
static int constexpr ldg_width = sizeof(ldg_type) / sizeof(int32_t);
static int constexpr ldg_width_bytes = sizeof(ldg_type);
static int constexpr section = pack_byte / sizeof(ldg_type);
MEGDNN_STATIC_ASSERT(
ldg_width == ldg_width_assert,
"pooling2d (NCHW64) kernel must use 128bit width ldg instruction");
const int c_packed = param.c / pack_size;
const int batch = tid / (param.ho * param.wo * c_packed * section);
const int batch_residual =
tid - batch * param.ho * param.wo * c_packed * section;
const int oc = batch_residual / (param.ho * param.wo * section);
const int oc_residual = batch_residual - oc * param.ho * param.wo * section;
const int oh = oc_residual / (param.wo * section);
const int oh_residual = (oc_residual - oh * param.wo * section);
const int ow = oh_residual / section;
const int sec = oh_residual - ow * section;
if (batch >= param.n || oc >= c_packed || oh >= param.ho || ow >= param.wo)
return;
const int in_batch_stride =
param.hi * param.wi * param.c * pack_byte / pack_size;
const int out_batch_stride =
param.ho * param.wo * param.c * pack_byte / pack_size;
const int in_channel_stride = param.hi * param.wi * pack_byte;
const int out_channel_stride = param.ho * param.wo * pack_byte;
const int8_t* __restrict__ g_src_ptr =
src + (batch * in_batch_stride + oc * in_channel_stride +
sec * ldg_width_bytes);
int8_t* __restrict__ g_dst_ptr =
dst + (batch * out_batch_stride + oc * out_channel_stride +
(oh * param.wo + ow) * pack_byte + sec * ldg_width_bytes);
Pooler pooler(param.window_h * param.window_w, zero_point);
pooler.init();
for (int fh = 0; fh < param.window_h; fh++) {
uint32_t ih = oh * param.sh + fh - param.ph;
for (int fw = 0; fw < param.window_w; fw++) {
uint32_t iw = ow * param.sw + fw - param.pw;
if (ih < param.hi && iw < param.wi) {
const int8_t* __restrict__ cur_src_ptr =
g_src_ptr + (ih * param.wi + iw) * pack_byte;
ldg_type sval =
__ldg(reinterpret_cast<const ldg_type*>(cur_src_ptr));
pooler.feed(sval);
}
}
}
ldg_type res = pooler.get_ans();
*(reinterpret_cast<ldg_type*>(g_dst_ptr)) = res;
}
template <typename Pooler, int pack_size, int pack_byte,
int ldg_width_assert = 4>
__global__ void pooling2d_device_template_nhwc(const int8_t* __restrict__ src,
int8_t* __restrict__ dst,
Param param, int zero_point) {
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
using ldg_type = typename Pooler::feed_type;
static int constexpr ldg_width = sizeof(ldg_type) / sizeof(int32_t);
static int constexpr ldg_width_bytes = sizeof(ldg_type);
MEGDNN_STATIC_ASSERT(
ldg_width == ldg_width_assert,
"pooling2d (NHWC) kernel must ldg_width == ldg_width_assert");
const int c_packed = param.c / pack_size;
const int batch = tid / (param.ho * param.wo * c_packed);
const int batch_residual = tid - batch * param.ho * param.wo * c_packed;
const int oh = batch_residual / (param.wo * c_packed);
const int oh_residual = batch_residual - oh * param.wo * c_packed;
const int ow = oh_residual / c_packed;
const int ow_residual = oh_residual - ow * c_packed;
const int sec = ow_residual;
if (batch >= param.n || oh >= param.ho || ow >= param.wo)
return;
const int in_batch_stride =
param.hi * param.wi * param.c * pack_byte / pack_size;
const int out_batch_stride =
param.ho * param.wo * param.c * pack_byte / pack_size;
const int w_stride = param.c * pack_byte / pack_size;
const int8_t* __restrict__ g_src_ptr =
src + (batch * in_batch_stride + sec * ldg_width_bytes);
int8_t* __restrict__ g_dst_ptr =
dst + (batch * out_batch_stride + (oh * param.wo + ow) * w_stride +
sec * ldg_width_bytes);
Pooler pooler(param.window_h * param.window_w, zero_point);
pooler.init();
for (int fh = 0; fh < param.window_h; fh++) {
uint32_t ih = oh * param.sh + fh - param.ph;
for (int fw = 0; fw < param.window_w; fw++) {
uint32_t iw = ow * param.sw + fw - param.pw;
if (ih < param.hi && iw < param.wi) {
const int8_t* __restrict__ cur_src_ptr =
g_src_ptr + (ih * param.wi + iw) * w_stride;
ldg_type sval =
__ldg(reinterpret_cast<const ldg_type*>(cur_src_ptr));
pooler.feed(sval);
}
}
}
ldg_type res = pooler.get_ans();
*(reinterpret_cast<ldg_type*>(g_dst_ptr)) = res;
}
}; // namespace
void megdnn::cuda::pooling2d::do_pooling2d_int8_cdiv4hwn4(const int8_t* d_src,
int8_t* d_dst,
const Param& param,
hipStream_t stream,
uint32_t mode) {
using Mode = megdnn::param_enumv::Pooling::Mode;
void (*kern)(const int8_t* __restrict__, int8_t* __restrict__, Param param);
uint32_t vthreads_x = 0, vthreads_y = param.c / 4;
#define dispatch_pooling_mode(_feed_type) \
switch (mode) { \
case Mode::MAX: \
kern = pooling2d_device_template_int8_cdiv4hwn4< \
MaxPooler<int8_t, _feed_type>>; \
break; \
case Mode::AVERAGE: \
kern = pooling2d_device_template_int8_cdiv4hwn4< \
MeanIncludeRoundedPooler<int8_t, _feed_type, int32_t>>; \
break; \
case Mode::AVERAGE_COUNT_EXCLUDE_PADDING: \
kern = pooling2d_device_template_int8_cdiv4hwn4< \
MeanExcludeRoundedPooler<int8_t, _feed_type, int32_t>>; \
break; \
default: \
megdnn_assert(false, "invalid pooling mode"); \
}
if (param.n % 4 == 0) {
dispatch_pooling_mode(int4);
vthreads_x = param.n / 4;
} else if (param.n % 2 == 0) {
dispatch_pooling_mode(int2);
vthreads_x = param.n / 2;
} else {
dispatch_pooling_mode(int32_t);
vthreads_x = param.n;
}
#undef dispatch_pooling_mode
constexpr uint32_t threads_x = 16;
uint32_t nr_threads = query_blocksize_for_kernel(kern);
uint32_t nr_threads_x = ::min(threads_x, vthreads_x),
nr_threads_y = ::min(nr_threads / nr_threads_x, vthreads_y);
uint32_t nr_blocks_x = param.ho * param.wo,
nr_blocks_y = DIVUP(vthreads_x, nr_threads_x),
nr_blocks_z = DIVUP(vthreads_y, nr_threads_y);
dim3 threads{nr_threads_x, nr_threads_y, 1};
dim3 blocks{nr_blocks_x, nr_blocks_y, nr_blocks_z};
hipLaunchKernelGGL(( kern), dim3(blocks), dim3(threads), 0, stream, d_src, d_dst, param);
after_kernel_launch();
}
void megdnn::cuda::pooling2d::do_pooling2d_int8_ncdiv4hw4(
const int8_t* d_src, int8_t* d_dst, const Param& param,
hipStream_t stream, uint32_t mode, bool /* uint_case */, int zero_point) {
using Mode = megdnn::param_enumv::Pooling::Mode;
void (*kern)(const int8_t* __restrict__, int8_t* __restrict__, Param param,
int zero_point);
constexpr int ldg_byte = 4;
constexpr int elem_per_byte = 1;
constexpr int pack_size = 4;
constexpr int pack_byte = pack_size / elem_per_byte;
constexpr int elem_per_thread = ldg_byte * elem_per_byte;
constexpr int ldg_assert_width = ldg_byte / sizeof(int32_t);
uint32_t vthreads =
param.n * param.c * param.ho * param.wo / elem_per_thread;
switch (mode) {
case Mode::MAX:
kern = pooling2d_device_template_nchwc<MaxPooler<int8_t, int32_t>,
pack_size, pack_byte,
ldg_assert_width>;
break;
case Mode::AVERAGE:
kern = pooling2d_device_template_nchwc<
MeanIncludeRoundedPooler<int8_t, int32_t, int32_t>,
pack_size, pack_byte, ldg_assert_width>;
break;
case Mode::AVERAGE_COUNT_EXCLUDE_PADDING:
kern = pooling2d_device_template_nchwc<
MeanExcludeRoundedPooler<int8_t, int32_t, int32_t>,
pack_size, pack_byte, ldg_assert_width>;
break;
default:
megdnn_assert(false, "invalid pooling mode");
}
uint32_t nr_threads = query_blocksize_for_kernel(kern);
nr_threads = ::min(nr_threads, vthreads);
uint32_t nr_blocks = DIVUP(vthreads, nr_threads);
hipLaunchKernelGGL(( kern), dim3(nr_blocks), dim3(nr_threads), 0, stream, d_src, d_dst, param, zero_point);
after_kernel_launch();
}
void megdnn::cuda::pooling2d::do_pooling2d_int8_ncdiv32hw32(
const int8_t* d_src, int8_t* d_dst, const Param& param,
hipStream_t stream, uint32_t mode, bool /* uint_case */, int zero_point) {
using Mode = megdnn::param_enumv::Pooling::Mode;
void (*kern)(const int8_t* __restrict__, int8_t* __restrict__, Param param,
int zero_point);
constexpr int ldg_byte = 16;
constexpr int elem_per_byte = 1;
constexpr int pack_size = 32;
constexpr int pack_byte = pack_size / elem_per_byte;
constexpr int elem_per_thread = ldg_byte * elem_per_byte;
uint32_t vthreads =
param.n * param.c * param.ho * param.wo / elem_per_thread;
switch (mode) {
case Mode::MAX:
kern = pooling2d_device_template_nchwc<MaxPooler<int8_t, int4>,
pack_size, pack_byte>;
break;
case Mode::AVERAGE:
kern = pooling2d_device_template_nchwc<
MeanIncludeRoundedPooler<int8_t, int4, int32_t>, pack_size,
pack_byte>;
break;
case Mode::AVERAGE_COUNT_EXCLUDE_PADDING:
kern = pooling2d_device_template_nchwc<
MeanExcludeRoundedPooler<int8_t, int4, int32_t>, pack_size,
pack_byte>;
break;
default:
megdnn_assert(false, "invalid pooling mode");
}
uint32_t nr_threads = query_blocksize_for_kernel(kern);
nr_threads = ::min(nr_threads, vthreads);
uint32_t nr_blocks = DIVUP(vthreads, nr_threads);
hipLaunchKernelGGL(( kern), dim3(nr_blocks), dim3(nr_threads), 0, stream, d_src, d_dst, param, zero_point);
after_kernel_launch();
}
void megdnn::cuda::pooling2d::do_pooling2d_int4_ncdiv64hw64(
const int8_t* d_src, int8_t* d_dst, const Param& param,
hipStream_t stream, uint32_t mode, bool uint_case, int zero_point) {
using Mode = megdnn::param_enumv::Pooling::Mode;
void (*kern)(const int8_t* __restrict__, int8_t* __restrict__, Param param,
int zero_point);
constexpr int ldg_byte = 16;
constexpr int elem_per_byte = 2;
constexpr int pack_size = 64;
constexpr int pack_byte = pack_size / elem_per_byte;
constexpr int elem_per_thread = ldg_byte * elem_per_byte;
uint32_t vthreads =
param.n * param.c * param.ho * param.wo / elem_per_thread;
if (uint_case) {
switch (mode) {
case Mode::MAX:
kern = pooling2d_device_template_nchwc<
MaxPooler<dt_quint4, int4>, pack_size, pack_byte>;
break;
case Mode::AVERAGE:
kern = pooling2d_device_template_nchwc<
MeanIncludeRoundedPooler<dt_quint4, int4, int32_t>,
pack_size, pack_byte>;
break;
case Mode::AVERAGE_COUNT_EXCLUDE_PADDING:
kern = pooling2d_device_template_nchwc<
MeanExcludeRoundedPooler<dt_quint4, int4, int32_t>,
pack_size, pack_byte>;
break;
default:
megdnn_assert(false, "invalid pooling mode");
}
} else {
switch (mode) {
case Mode::MAX:
kern = pooling2d_device_template_nchwc<MaxPooler<dt_qint4, int4>,
pack_size, pack_byte>;
break;
case Mode::AVERAGE:
kern = pooling2d_device_template_nchwc<
MeanIncludeRoundedPooler<dt_qint4, int4, int32_t>,
pack_size, pack_byte>;
break;
case Mode::AVERAGE_COUNT_EXCLUDE_PADDING:
kern = pooling2d_device_template_nchwc<
MeanExcludeRoundedPooler<dt_qint4, int4, int32_t>,
pack_size, pack_byte>;
break;
default:
megdnn_assert(false, "invalid pooling mode");
}
}
uint32_t nr_threads = query_blocksize_for_kernel(kern);
nr_threads = ::min(nr_threads, vthreads);
uint32_t nr_blocks = DIVUP(vthreads, nr_threads);
hipLaunchKernelGGL(( kern), dim3(nr_blocks), dim3(nr_threads), 0, stream, d_src, d_dst, param, zero_point);
after_kernel_launch();
}
void megdnn::cuda::pooling2d::do_pooling2d_int4_nhwc(
const int8_t* d_src, int8_t* d_dst, const Param& param,
hipStream_t stream, uint32_t mode, bool uint_case, int zero_point) {
using Mode = megdnn::param_enumv::Pooling::Mode;
void (*kern)(const int8_t* __restrict__, int8_t* __restrict__, Param param,
int zero_point);
megdnn_assert(param.c % 8 == 0);
constexpr int ldg_byte = 4;
constexpr int elem_per_byte = 2;
constexpr int ldg_width_assert = 1;
constexpr int pack_size = ldg_byte * elem_per_byte;
constexpr int pack_byte = pack_size / elem_per_byte;
constexpr int elem_per_thread = ldg_byte * elem_per_byte;
uint32_t vthreads =
param.n * param.c * param.ho * param.wo / elem_per_thread;
if (uint_case) {
switch (mode) {
case Mode::MAX:
kern = pooling2d_device_template_nhwc<
MaxPooler<dt_quint4, int32_t>, pack_size, pack_byte,
ldg_width_assert>;
break;
case Mode::AVERAGE:
kern = pooling2d_device_template_nhwc<
MeanIncludeRoundedPooler<dt_quint4, int32_t, int32_t>,
pack_size, pack_byte, ldg_width_assert>;
break;
case Mode::AVERAGE_COUNT_EXCLUDE_PADDING:
kern = pooling2d_device_template_nhwc<
MeanExcludeRoundedPooler<dt_quint4, int32_t, int32_t>,
pack_size, pack_byte, ldg_width_assert>;
break;
default:
megdnn_assert(false, "invalid pooling mode");
}
} else {
switch (mode) {
case Mode::MAX:
kern = pooling2d_device_template_nhwc<
MaxPooler<dt_qint4, int32_t>, pack_size, pack_byte,
ldg_width_assert>;
break;
case Mode::AVERAGE:
kern = pooling2d_device_template_nhwc<
MeanIncludeRoundedPooler<dt_qint4, int32_t, int32_t>,
pack_size, pack_byte, ldg_width_assert>;
break;
case Mode::AVERAGE_COUNT_EXCLUDE_PADDING:
kern = pooling2d_device_template_nhwc<
MeanExcludeRoundedPooler<dt_qint4, int32_t, int32_t>,
pack_size, pack_byte, ldg_width_assert>;
break;
default:
megdnn_assert(false, "invalid pooling mode");
}
}
uint32_t nr_threads = query_blocksize_for_kernel(kern);
nr_threads = ::min(nr_threads, vthreads);
uint32_t nr_blocks = DIVUP(vthreads, nr_threads);
hipLaunchKernelGGL(( kern), dim3(nr_blocks), dim3(nr_threads), 0, stream, d_src, d_dst, param, zero_point);
after_kernel_launch();
}
// vim: syntax=cuda.doxygen
|
abc6a06c9502dd87cf8bdd94a38db5902e0ced92.cu
|
/**
* \file dnn/src/cuda/pooling/pooling2d_qint.cu
* MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
*
* Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied.
*/
#include "./pooling2d_qint.cuh"
#include "src/common/opr_param_defs_enumv.cuh"
#include "src/cuda/query_blocksize.cuh"
using namespace megdnn;
using namespace cuda;
using namespace pooling2d;
namespace {
__device__ __forceinline__ int pack_int8_to_int8x4(int8_t x, int8_t y, int8_t z,
int8_t w) {
int ix = static_cast<int>(x), iy = static_cast<int>(y),
iz = static_cast<int>(z), iw = static_cast<int>(w);
asm volatile("prmt.b32 %0, %0, %1, 0x1140;" : "+r"(ix) : "r"(iy));
asm volatile("prmt.b32 %0, %0, %1, 0x1140;" : "+r"(iz) : "r"(iw));
asm volatile("prmt.b32 %0, %0, %1, 0x5410;" : "+r"(ix) : "r"(iz));
return ix;
}
template <int regs, int dtype_bits, typename OutDtype>
__device__ __forceinline__ OutDtype pack_int8(int8_t (&x)[regs]);
template <>
__device__ __forceinline__ int pack_int8<4, 8, int>(int8_t (&x)[4]) {
return pack_int8_to_int8x4(x[0], x[1], x[2], x[3]);
}
template <>
__device__ __forceinline__ int2 pack_int8<8, 8, int2>(int8_t (&x)[8]) {
int8_t x0[4]{x[0], x[1], x[2], x[3]};
int8_t x1[4]{x[4], x[5], x[6], x[7]};
return ::make_int2(pack_int8<4, 8, int>(x0), pack_int8<4, 8, int>(x1));
}
template <>
__device__ __forceinline__ int4 pack_int8<16, 8, int4>(int8_t (&x)[16]) {
int8_t x0[4]{x[0], x[1], x[2], x[3]};
int8_t x1[4]{x[4], x[5], x[6], x[7]};
int8_t x2[4]{x[8], x[9], x[10], x[11]};
int8_t x3[4]{x[12], x[13], x[14], x[15]};
return ::make_int4(pack_int8<4, 8, int>(x0), pack_int8<4, 8, int>(x1),
pack_int8<4, 8, int>(x2), pack_int8<4, 8, int>(x3));
}
__device__ __forceinline__ int8_t pack_int8_to_int4x2(int8_t x0, int8_t x1) {
return (x0 & 0xf) | (x1 << 4);
}
template <>
__device__ __forceinline__ int pack_int8<8, 4, int>(int8_t (&x)[8]) {
int8_t x0 = pack_int8_to_int4x2(x[0], x[1]);
int8_t x1 = pack_int8_to_int4x2(x[2], x[3]);
int8_t x2 = pack_int8_to_int4x2(x[4], x[5]);
int8_t x3 = pack_int8_to_int4x2(x[6], x[7]);
return pack_int8_to_int8x4(x0, x1, x2, x3);
}
template <>
__device__ __forceinline__ int4 pack_int8<32, 4, int4>(int8_t (&x)[32]) {
int8_t x0[8]{x[0], x[1], x[2], x[3], x[4], x[5], x[6], x[7]};
int8_t x1[8]{x[8], x[9], x[10], x[11], x[12], x[13], x[14], x[15]};
int8_t x2[8]{x[16], x[17], x[18], x[19], x[20], x[21], x[22], x[23]};
int8_t x3[8]{x[24], x[25], x[26], x[27], x[28], x[29], x[30], x[31]};
return ::make_int4(pack_int8<8, 4, int>(x0), pack_int8<8, 4, int>(x1),
pack_int8<8, 4, int>(x2), pack_int8<8, 4, int>(x3));
}
template <typename Dtype>
struct TypeTrait;
template <>
struct TypeTrait<int8_t> {
static constexpr int bit_width = 8;
static constexpr int mask = 0xff;
static constexpr int8_t min = -128;
static constexpr int elem_per_32bit = 32 / bit_width;
static constexpr int shift_fix_sign = 0;
static constexpr bool need_zero_pad = false;
};
template <>
struct TypeTrait<dt_qint4> {
static constexpr int bit_width = 4;
static constexpr int mask = 0xf;
static constexpr int8_t min = -8;
static constexpr int elem_per_32bit = 32 / bit_width;
static constexpr int shift_fix_sign = 4;
static constexpr bool need_zero_pad = false;
};
template <>
struct TypeTrait<dt_quint4> {
static constexpr int bit_width = 4;
static constexpr int mask = 0xf;
static constexpr int8_t min = 0;
static constexpr int elem_per_32bit = 32 / bit_width;
static constexpr int shift_fix_sign = 0;
static constexpr bool need_zero_pad = true;
};
template <typename src_type, typename _feed_type>
struct MaxPooler {
using feed_type = _feed_type;
static constexpr int bit_width = TypeTrait<src_type>::bit_width;
static constexpr int nr_results = sizeof(feed_type) * 8 / bit_width;
static constexpr int elem_per_32bit = TypeTrait<src_type>::elem_per_32bit;
static constexpr int shift_fix_sign = TypeTrait<src_type>::shift_fix_sign;
int8_t res[nr_results];
__device__ MaxPooler(int, int) {}
__device__ __forceinline__ void init() {
#pragma unroll
for (int i = 0; i < nr_results; ++i) {
res[i] = TypeTrait<src_type>::min;
}
}
__device__ __forceinline__ void feed(int x, int idx = 0) {
constexpr int unroll_n = sizeof(int) * 8 / bit_width;
#pragma unroll
for (int i = 0; i < unroll_n; i++) {
int8_t temp = ((x >> (i * bit_width)) & TypeTrait<src_type>::mask)
<< shift_fix_sign;
temp = temp >> shift_fix_sign;
res[idx + i] = res[idx + i] > temp ? res[idx + i] : temp;
}
}
__device__ __forceinline__ void feed(int2 x) {
feed(x.x, 0 * elem_per_32bit);
feed(x.y, 1 * elem_per_32bit);
}
__device__ __forceinline__ void feed(int4 x) {
feed(x.x, 0 * elem_per_32bit);
feed(x.y, 1 * elem_per_32bit);
feed(x.z, 2 * elem_per_32bit);
feed(x.w, 3 * elem_per_32bit);
}
__device__ __forceinline__ feed_type get_ans() {
feed_type ans;
ans = pack_int8<nr_results, bit_width, feed_type>(res);
return ans;
}
};
template <typename src_type, typename _feed_type, typename inter_type>
struct MeanIncludeRoundedPooler {
using feed_type = _feed_type;
static constexpr int bit_width = TypeTrait<src_type>::bit_width;
static constexpr int nr_results = sizeof(feed_type) * 8 / bit_width;
static constexpr int elem_per_32bit = TypeTrait<src_type>::elem_per_32bit;
static constexpr int shift_fix_sign = TypeTrait<src_type>::shift_fix_sign;
static constexpr bool need_zero_pad = TypeTrait<src_type>::need_zero_pad;
int32_t res[nr_results];
const int count;
const float fi_count;
int real_fi_count;
const int zero_pad;
__device__ MeanIncludeRoundedPooler(int count, int zero_point)
: count{count}, fi_count{1.f / count}, zero_pad{zero_point} {}
__device__ __forceinline__ void init() {
#pragma unroll
for (int i = 0; i < nr_results; ++i) {
res[i] = 0;
}
if (need_zero_pad) {
real_fi_count = 0;
}
}
__device__ __forceinline__ void feed(int x, int idx) {
constexpr int unroll_n = sizeof(int) * 8 / bit_width;
#pragma unroll
for (int i = 0; i < unroll_n; i++) {
int8_t temp = ((x >> (i * bit_width)) & TypeTrait<src_type>::mask)
<< shift_fix_sign;
temp = temp >> shift_fix_sign;
res[idx + i] += static_cast<int32_t>(temp);
}
}
__device__ __forceinline__ void feed(int x) {
feed(x, 0);
if (need_zero_pad) {
real_fi_count++;
}
}
__device__ __forceinline__ void feed(int2 x) {
feed(x.x, 0 * elem_per_32bit);
feed(x.y, 1 * elem_per_32bit);
if (need_zero_pad) {
real_fi_count++;
}
}
__device__ __forceinline__ void feed(int4 x) {
feed(x.x, 0 * elem_per_32bit);
feed(x.y, 1 * elem_per_32bit);
feed(x.z, 2 * elem_per_32bit);
feed(x.w, 3 * elem_per_32bit);
if (need_zero_pad) {
real_fi_count++;
}
}
__device__ __forceinline__ feed_type get_ans() {
feed_type ans;
int8_t out_res[nr_results];
#pragma unroll
for (int i = 0; i < nr_results; i++) {
float f32_res = roundf(static_cast<float>(res[i]) * fi_count);
if (need_zero_pad) {
f32_res = roundf((static_cast<float>(res[i]) +
(count - real_fi_count) * zero_pad) *
fi_count);
}
int i8_res;
asm volatile("cvt.rni.s8.f32 %0, %1;"
: "=r"(i8_res)
: "f"(f32_res));
out_res[i] = i8_res;
}
ans = pack_int8<nr_results, bit_width, feed_type>(out_res);
return ans;
}
};
template <typename src_type, typename _feed_type, typename inter_type>
struct MeanExcludeRoundedPooler {
using feed_type = _feed_type;
static constexpr int bit_width = TypeTrait<src_type>::bit_width;
static constexpr int nr_results = sizeof(feed_type) * 8 / bit_width;
static constexpr int elem_per_32bit = TypeTrait<src_type>::elem_per_32bit;
static constexpr int shift_fix_sign = TypeTrait<src_type>::shift_fix_sign;
int32_t res[nr_results];
int count;
__device__ MeanExcludeRoundedPooler(int, int) {}
__device__ __forceinline__ void init() {
#pragma unroll
for (int i = 0; i < nr_results; ++i) {
res[i] = 0;
}
count = 0;
}
__device__ __forceinline__ void feed(int x, int idx) {
constexpr int unroll_n = sizeof(int) * 8 / bit_width;
#pragma unroll
for (int i = 0; i < unroll_n; i++) {
int8_t temp = ((x >> (i * bit_width)) & TypeTrait<src_type>::mask)
<< shift_fix_sign;
temp = temp >> shift_fix_sign;
res[idx + i] += static_cast<int32_t>(temp);
}
}
__device__ __forceinline__ void feed(int x) {
feed(x, 0);
count++;
}
__device__ __forceinline__ void feed(int2 x) {
feed(x.x, 0 * elem_per_32bit);
feed(x.y, 1 * elem_per_32bit);
count++;
}
__device__ __forceinline__ void feed(int4 x) {
feed(x.x, 0 * elem_per_32bit);
feed(x.y, 1 * elem_per_32bit);
feed(x.z, 2 * elem_per_32bit);
feed(x.w, 3 * elem_per_32bit);
count++;
}
__device__ __forceinline__ feed_type get_ans() {
feed_type ans;
int8_t out_res[nr_results];
#pragma unroll
for (int i = 0; i < nr_results; i++) {
float f32_res = roundf(static_cast<float>(res[i]) / count);
int i8_res;
asm volatile("cvt.rni.s8.f32 %0, %1;"
: "=r"(i8_res)
: "f"(f32_res));
out_res[i] = i8_res;
}
ans = pack_int8<nr_results, bit_width, feed_type>(out_res);
return ans;
}
};
template <typename Pooler>
__global__ void pooling2d_device_template_int8_cdiv4hwn4(
const int8_t* __restrict__ src, int8_t* __restrict__ dst, Param param) {
const int tidx = threadIdx.x;
const int tidy = threadIdx.y;
const int bidx = blockIdx.x;
const int bidy = blockIdx.y;
const int bidz = blockIdx.z;
using ldg_type = typename Pooler::feed_type;
static int constexpr pack_size = 4;
static int constexpr ldg_width = sizeof(ldg_type) / sizeof(int32_t);
const int batch = (bidy * blockDim.x + tidx) * ldg_width;
const int packed_ch = bidz * blockDim.y + tidy;
const int npack = param.n * pack_size;
if (batch >= param.n || packed_ch >= param.c / pack_size)
return;
const int ho = bidx / param.wo;
const int wo = bidx - param.wo * ho;
const int input_pixels = param.hi * param.wi;
const int output_pixels = param.ho * param.wo;
const int8_t* __restrict__ g_src_ptr =
src + batch * pack_size + packed_ch * input_pixels * npack;
int8_t* __restrict__ g_dst_ptr = dst + batch * pack_size +
packed_ch * output_pixels * npack +
(ho * param.wo + wo) * npack;
Pooler pooler(param.window_h * param.window_w, 0);
pooler.init();
for (int fh = 0; fh < param.window_h; fh++) {
uint32_t ih = ho * param.sh + fh - param.ph;
for (int fw = 0; fw < param.window_w; fw++) {
uint32_t iw = wo * param.sw + fw - param.pw;
if (ih < param.hi && iw < param.wi) {
const int8_t* __restrict__ cur_src_ptr =
g_src_ptr + (ih * param.wi + iw) * npack;
ldg_type sval =
__ldg(reinterpret_cast<const ldg_type*>(cur_src_ptr));
pooler.feed(sval);
}
}
}
ldg_type res = pooler.get_ans();
*(reinterpret_cast<ldg_type*>(g_dst_ptr)) = res;
}
template <typename Pooler, int pack_size, int pack_byte,
int ldg_width_assert = 4>
__global__ void pooling2d_device_template_nchwc(const int8_t* __restrict__ src,
int8_t* __restrict__ dst,
Param param, int zero_point) {
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
using ldg_type = typename Pooler::feed_type;
static int constexpr ldg_width = sizeof(ldg_type) / sizeof(int32_t);
static int constexpr ldg_width_bytes = sizeof(ldg_type);
static int constexpr section = pack_byte / sizeof(ldg_type);
MEGDNN_STATIC_ASSERT(
ldg_width == ldg_width_assert,
"pooling2d (NCHW64) kernel must use 128bit width ldg instruction");
const int c_packed = param.c / pack_size;
const int batch = tid / (param.ho * param.wo * c_packed * section);
const int batch_residual =
tid - batch * param.ho * param.wo * c_packed * section;
const int oc = batch_residual / (param.ho * param.wo * section);
const int oc_residual = batch_residual - oc * param.ho * param.wo * section;
const int oh = oc_residual / (param.wo * section);
const int oh_residual = (oc_residual - oh * param.wo * section);
const int ow = oh_residual / section;
const int sec = oh_residual - ow * section;
if (batch >= param.n || oc >= c_packed || oh >= param.ho || ow >= param.wo)
return;
const int in_batch_stride =
param.hi * param.wi * param.c * pack_byte / pack_size;
const int out_batch_stride =
param.ho * param.wo * param.c * pack_byte / pack_size;
const int in_channel_stride = param.hi * param.wi * pack_byte;
const int out_channel_stride = param.ho * param.wo * pack_byte;
const int8_t* __restrict__ g_src_ptr =
src + (batch * in_batch_stride + oc * in_channel_stride +
sec * ldg_width_bytes);
int8_t* __restrict__ g_dst_ptr =
dst + (batch * out_batch_stride + oc * out_channel_stride +
(oh * param.wo + ow) * pack_byte + sec * ldg_width_bytes);
Pooler pooler(param.window_h * param.window_w, zero_point);
pooler.init();
for (int fh = 0; fh < param.window_h; fh++) {
uint32_t ih = oh * param.sh + fh - param.ph;
for (int fw = 0; fw < param.window_w; fw++) {
uint32_t iw = ow * param.sw + fw - param.pw;
if (ih < param.hi && iw < param.wi) {
const int8_t* __restrict__ cur_src_ptr =
g_src_ptr + (ih * param.wi + iw) * pack_byte;
ldg_type sval =
__ldg(reinterpret_cast<const ldg_type*>(cur_src_ptr));
pooler.feed(sval);
}
}
}
ldg_type res = pooler.get_ans();
*(reinterpret_cast<ldg_type*>(g_dst_ptr)) = res;
}
template <typename Pooler, int pack_size, int pack_byte,
int ldg_width_assert = 4>
__global__ void pooling2d_device_template_nhwc(const int8_t* __restrict__ src,
int8_t* __restrict__ dst,
Param param, int zero_point) {
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
using ldg_type = typename Pooler::feed_type;
static int constexpr ldg_width = sizeof(ldg_type) / sizeof(int32_t);
static int constexpr ldg_width_bytes = sizeof(ldg_type);
MEGDNN_STATIC_ASSERT(
ldg_width == ldg_width_assert,
"pooling2d (NHWC) kernel must ldg_width == ldg_width_assert");
const int c_packed = param.c / pack_size;
const int batch = tid / (param.ho * param.wo * c_packed);
const int batch_residual = tid - batch * param.ho * param.wo * c_packed;
const int oh = batch_residual / (param.wo * c_packed);
const int oh_residual = batch_residual - oh * param.wo * c_packed;
const int ow = oh_residual / c_packed;
const int ow_residual = oh_residual - ow * c_packed;
const int sec = ow_residual;
if (batch >= param.n || oh >= param.ho || ow >= param.wo)
return;
const int in_batch_stride =
param.hi * param.wi * param.c * pack_byte / pack_size;
const int out_batch_stride =
param.ho * param.wo * param.c * pack_byte / pack_size;
const int w_stride = param.c * pack_byte / pack_size;
const int8_t* __restrict__ g_src_ptr =
src + (batch * in_batch_stride + sec * ldg_width_bytes);
int8_t* __restrict__ g_dst_ptr =
dst + (batch * out_batch_stride + (oh * param.wo + ow) * w_stride +
sec * ldg_width_bytes);
Pooler pooler(param.window_h * param.window_w, zero_point);
pooler.init();
for (int fh = 0; fh < param.window_h; fh++) {
uint32_t ih = oh * param.sh + fh - param.ph;
for (int fw = 0; fw < param.window_w; fw++) {
uint32_t iw = ow * param.sw + fw - param.pw;
if (ih < param.hi && iw < param.wi) {
const int8_t* __restrict__ cur_src_ptr =
g_src_ptr + (ih * param.wi + iw) * w_stride;
ldg_type sval =
__ldg(reinterpret_cast<const ldg_type*>(cur_src_ptr));
pooler.feed(sval);
}
}
}
ldg_type res = pooler.get_ans();
*(reinterpret_cast<ldg_type*>(g_dst_ptr)) = res;
}
}; // namespace
void megdnn::cuda::pooling2d::do_pooling2d_int8_cdiv4hwn4(const int8_t* d_src,
int8_t* d_dst,
const Param& param,
cudaStream_t stream,
uint32_t mode) {
using Mode = megdnn::param_enumv::Pooling::Mode;
void (*kern)(const int8_t* __restrict__, int8_t* __restrict__, Param param);
uint32_t vthreads_x = 0, vthreads_y = param.c / 4;
#define dispatch_pooling_mode(_feed_type) \
switch (mode) { \
case Mode::MAX: \
kern = pooling2d_device_template_int8_cdiv4hwn4< \
MaxPooler<int8_t, _feed_type>>; \
break; \
case Mode::AVERAGE: \
kern = pooling2d_device_template_int8_cdiv4hwn4< \
MeanIncludeRoundedPooler<int8_t, _feed_type, int32_t>>; \
break; \
case Mode::AVERAGE_COUNT_EXCLUDE_PADDING: \
kern = pooling2d_device_template_int8_cdiv4hwn4< \
MeanExcludeRoundedPooler<int8_t, _feed_type, int32_t>>; \
break; \
default: \
megdnn_assert(false, "invalid pooling mode"); \
}
if (param.n % 4 == 0) {
dispatch_pooling_mode(int4);
vthreads_x = param.n / 4;
} else if (param.n % 2 == 0) {
dispatch_pooling_mode(int2);
vthreads_x = param.n / 2;
} else {
dispatch_pooling_mode(int32_t);
vthreads_x = param.n;
}
#undef dispatch_pooling_mode
constexpr uint32_t threads_x = 16;
uint32_t nr_threads = query_blocksize_for_kernel(kern);
uint32_t nr_threads_x = std::min(threads_x, vthreads_x),
nr_threads_y = std::min(nr_threads / nr_threads_x, vthreads_y);
uint32_t nr_blocks_x = param.ho * param.wo,
nr_blocks_y = DIVUP(vthreads_x, nr_threads_x),
nr_blocks_z = DIVUP(vthreads_y, nr_threads_y);
dim3 threads{nr_threads_x, nr_threads_y, 1};
dim3 blocks{nr_blocks_x, nr_blocks_y, nr_blocks_z};
kern<<<blocks, threads, 0, stream>>>(d_src, d_dst, param);
after_kernel_launch();
}
void megdnn::cuda::pooling2d::do_pooling2d_int8_ncdiv4hw4(
const int8_t* d_src, int8_t* d_dst, const Param& param,
cudaStream_t stream, uint32_t mode, bool /* uint_case */, int zero_point) {
using Mode = megdnn::param_enumv::Pooling::Mode;
void (*kern)(const int8_t* __restrict__, int8_t* __restrict__, Param param,
int zero_point);
constexpr int ldg_byte = 4;
constexpr int elem_per_byte = 1;
constexpr int pack_size = 4;
constexpr int pack_byte = pack_size / elem_per_byte;
constexpr int elem_per_thread = ldg_byte * elem_per_byte;
constexpr int ldg_assert_width = ldg_byte / sizeof(int32_t);
uint32_t vthreads =
param.n * param.c * param.ho * param.wo / elem_per_thread;
switch (mode) {
case Mode::MAX:
kern = pooling2d_device_template_nchwc<MaxPooler<int8_t, int32_t>,
pack_size, pack_byte,
ldg_assert_width>;
break;
case Mode::AVERAGE:
kern = pooling2d_device_template_nchwc<
MeanIncludeRoundedPooler<int8_t, int32_t, int32_t>,
pack_size, pack_byte, ldg_assert_width>;
break;
case Mode::AVERAGE_COUNT_EXCLUDE_PADDING:
kern = pooling2d_device_template_nchwc<
MeanExcludeRoundedPooler<int8_t, int32_t, int32_t>,
pack_size, pack_byte, ldg_assert_width>;
break;
default:
megdnn_assert(false, "invalid pooling mode");
}
uint32_t nr_threads = query_blocksize_for_kernel(kern);
nr_threads = std::min(nr_threads, vthreads);
uint32_t nr_blocks = DIVUP(vthreads, nr_threads);
kern<<<nr_blocks, nr_threads, 0, stream>>>(d_src, d_dst, param, zero_point);
after_kernel_launch();
}
void megdnn::cuda::pooling2d::do_pooling2d_int8_ncdiv32hw32(
const int8_t* d_src, int8_t* d_dst, const Param& param,
cudaStream_t stream, uint32_t mode, bool /* uint_case */, int zero_point) {
using Mode = megdnn::param_enumv::Pooling::Mode;
void (*kern)(const int8_t* __restrict__, int8_t* __restrict__, Param param,
int zero_point);
constexpr int ldg_byte = 16;
constexpr int elem_per_byte = 1;
constexpr int pack_size = 32;
constexpr int pack_byte = pack_size / elem_per_byte;
constexpr int elem_per_thread = ldg_byte * elem_per_byte;
uint32_t vthreads =
param.n * param.c * param.ho * param.wo / elem_per_thread;
switch (mode) {
case Mode::MAX:
kern = pooling2d_device_template_nchwc<MaxPooler<int8_t, int4>,
pack_size, pack_byte>;
break;
case Mode::AVERAGE:
kern = pooling2d_device_template_nchwc<
MeanIncludeRoundedPooler<int8_t, int4, int32_t>, pack_size,
pack_byte>;
break;
case Mode::AVERAGE_COUNT_EXCLUDE_PADDING:
kern = pooling2d_device_template_nchwc<
MeanExcludeRoundedPooler<int8_t, int4, int32_t>, pack_size,
pack_byte>;
break;
default:
megdnn_assert(false, "invalid pooling mode");
}
uint32_t nr_threads = query_blocksize_for_kernel(kern);
nr_threads = std::min(nr_threads, vthreads);
uint32_t nr_blocks = DIVUP(vthreads, nr_threads);
kern<<<nr_blocks, nr_threads, 0, stream>>>(d_src, d_dst, param, zero_point);
after_kernel_launch();
}
void megdnn::cuda::pooling2d::do_pooling2d_int4_ncdiv64hw64(
const int8_t* d_src, int8_t* d_dst, const Param& param,
cudaStream_t stream, uint32_t mode, bool uint_case, int zero_point) {
using Mode = megdnn::param_enumv::Pooling::Mode;
void (*kern)(const int8_t* __restrict__, int8_t* __restrict__, Param param,
int zero_point);
constexpr int ldg_byte = 16;
constexpr int elem_per_byte = 2;
constexpr int pack_size = 64;
constexpr int pack_byte = pack_size / elem_per_byte;
constexpr int elem_per_thread = ldg_byte * elem_per_byte;
uint32_t vthreads =
param.n * param.c * param.ho * param.wo / elem_per_thread;
if (uint_case) {
switch (mode) {
case Mode::MAX:
kern = pooling2d_device_template_nchwc<
MaxPooler<dt_quint4, int4>, pack_size, pack_byte>;
break;
case Mode::AVERAGE:
kern = pooling2d_device_template_nchwc<
MeanIncludeRoundedPooler<dt_quint4, int4, int32_t>,
pack_size, pack_byte>;
break;
case Mode::AVERAGE_COUNT_EXCLUDE_PADDING:
kern = pooling2d_device_template_nchwc<
MeanExcludeRoundedPooler<dt_quint4, int4, int32_t>,
pack_size, pack_byte>;
break;
default:
megdnn_assert(false, "invalid pooling mode");
}
} else {
switch (mode) {
case Mode::MAX:
kern = pooling2d_device_template_nchwc<MaxPooler<dt_qint4, int4>,
pack_size, pack_byte>;
break;
case Mode::AVERAGE:
kern = pooling2d_device_template_nchwc<
MeanIncludeRoundedPooler<dt_qint4, int4, int32_t>,
pack_size, pack_byte>;
break;
case Mode::AVERAGE_COUNT_EXCLUDE_PADDING:
kern = pooling2d_device_template_nchwc<
MeanExcludeRoundedPooler<dt_qint4, int4, int32_t>,
pack_size, pack_byte>;
break;
default:
megdnn_assert(false, "invalid pooling mode");
}
}
uint32_t nr_threads = query_blocksize_for_kernel(kern);
nr_threads = std::min(nr_threads, vthreads);
uint32_t nr_blocks = DIVUP(vthreads, nr_threads);
kern<<<nr_blocks, nr_threads, 0, stream>>>(d_src, d_dst, param, zero_point);
after_kernel_launch();
}
void megdnn::cuda::pooling2d::do_pooling2d_int4_nhwc(
const int8_t* d_src, int8_t* d_dst, const Param& param,
cudaStream_t stream, uint32_t mode, bool uint_case, int zero_point) {
using Mode = megdnn::param_enumv::Pooling::Mode;
void (*kern)(const int8_t* __restrict__, int8_t* __restrict__, Param param,
int zero_point);
megdnn_assert(param.c % 8 == 0);
constexpr int ldg_byte = 4;
constexpr int elem_per_byte = 2;
constexpr int ldg_width_assert = 1;
constexpr int pack_size = ldg_byte * elem_per_byte;
constexpr int pack_byte = pack_size / elem_per_byte;
constexpr int elem_per_thread = ldg_byte * elem_per_byte;
uint32_t vthreads =
param.n * param.c * param.ho * param.wo / elem_per_thread;
if (uint_case) {
switch (mode) {
case Mode::MAX:
kern = pooling2d_device_template_nhwc<
MaxPooler<dt_quint4, int32_t>, pack_size, pack_byte,
ldg_width_assert>;
break;
case Mode::AVERAGE:
kern = pooling2d_device_template_nhwc<
MeanIncludeRoundedPooler<dt_quint4, int32_t, int32_t>,
pack_size, pack_byte, ldg_width_assert>;
break;
case Mode::AVERAGE_COUNT_EXCLUDE_PADDING:
kern = pooling2d_device_template_nhwc<
MeanExcludeRoundedPooler<dt_quint4, int32_t, int32_t>,
pack_size, pack_byte, ldg_width_assert>;
break;
default:
megdnn_assert(false, "invalid pooling mode");
}
} else {
switch (mode) {
case Mode::MAX:
kern = pooling2d_device_template_nhwc<
MaxPooler<dt_qint4, int32_t>, pack_size, pack_byte,
ldg_width_assert>;
break;
case Mode::AVERAGE:
kern = pooling2d_device_template_nhwc<
MeanIncludeRoundedPooler<dt_qint4, int32_t, int32_t>,
pack_size, pack_byte, ldg_width_assert>;
break;
case Mode::AVERAGE_COUNT_EXCLUDE_PADDING:
kern = pooling2d_device_template_nhwc<
MeanExcludeRoundedPooler<dt_qint4, int32_t, int32_t>,
pack_size, pack_byte, ldg_width_assert>;
break;
default:
megdnn_assert(false, "invalid pooling mode");
}
}
uint32_t nr_threads = query_blocksize_for_kernel(kern);
nr_threads = std::min(nr_threads, vthreads);
uint32_t nr_blocks = DIVUP(vthreads, nr_threads);
kern<<<nr_blocks, nr_threads, 0, stream>>>(d_src, d_dst, param, zero_point);
after_kernel_launch();
}
// vim: syntax=cuda.doxygen
|
578a4d5933fa5807d615cca2aeb5660049d6befc.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#if __linux__ && defined(__INTEL_COMPILER)
#define __sync_fetch_and_add(ptr,addend) _InterlockedExchangeAdd(const_cast<void*>(reinterpret_cast<volatile void*>(ptr)), addend)
#endif
#include <string>
#include <cstring>
#include <cctype>
#include <cstdlib>
#include <cstdio>
#include <iostream>
#include <fstream>
#include <bitset>
#include <sys/socket.h>
#include <netinet/in.h>
#include <arpa/inet.h>
#include "tbb/concurrent_hash_map.h"
#include "tbb/blocked_range.h"
#include "tbb/parallel_for.h"
#include "tbb/tick_count.h"
#include "tbb/task_scheduler_init.h"
#include "tbb/concurrent_vector.h"
#include "utility.h"
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include "csv.hpp"
#include "timer.h"
using namespace tbb;
using namespace std;
std::vector<string> timestamp;
__global__ void sumArraysOnGPU(unsigned long long *A, long *B, long *C, const int N)
{
// extern __shared__ long *shared_data[];
int idx = blockIdx.x * blockDim.x + threadIdx.x;
// int idx = threadIdx.x;
for (int i = 0; i < N; ++i) {
if(A[idx]==A[i])
{
C[idx]++;
}
}
__syncthreads();
}
int main( int argc, char* argv[] ) {
int counter = 0;
int N = atoi(argv[2]);
const size_t ullBytes = N * sizeof(unsigned long long);
const size_t lBytes = N * sizeof(long);
unsigned long long *h_A;
long *h_B;
h_A = (unsigned long long *)malloc(ullBytes);
h_B = (long *)malloc(lBytes);
thrust::host_vector<unsigned long long> h_key_in_all(N);
thrust::host_vector<long> h_value_in_all(N);
unsigned int t, travdirtime;
int ngpus = 4;
try {
tbb::tick_count mainStartTime = tbb::tick_count::now();
srand(2);
utility::thread_number_range threads(tbb::task_scheduler_init::default_num_threads,0);
const string csv_file = std::string(argv[1]);
vector<vector<string>> data;
unsigned long long **d_k_i = (unsigned long long **)malloc(sizeof(unsigned long long *) * ngpus);
long **d_v_i = (long **)malloc(sizeof(long *) * ngpus);
unsigned long long **h_k_i = (unsigned long long **)malloc(sizeof(unsigned long long *) * ngpus);
long **h_v_i = (long **)malloc(sizeof(long *) * ngpus);
hipStream_t *stream = (hipStream_t *)malloc(sizeof(hipStream_t) * ngpus);
for (int i = 0; i < ngpus; i++)
{
hipSetDevice(i);
hipMalloc((void **) &d_k_i[i], ullBytes);
hipMalloc((void **) &d_v_i[i], lBytes);
hipHostMalloc((void **) &h_k_i[i], ullBytes);
hipHostMalloc((void **) &h_v_i[i], lBytes);
hipStreamCreate(&stream[i]);
}
try {
Csv objCsv(csv_file);
if (!objCsv.getCsv(data)) {
cout << "read ERROR" << endl;
return 1;
}
counter = 0;
for (unsigned int row = 0; row < data.size(); row++) {
vector<string> rec = data[row];
std::string tms = rec[0];
for(size_t c = tms.find_first_of("\""); c != string::npos; c = c = tms.find_first_of("\"")){
tms.erase(c,1);
}
for(size_t c = tms.find_first_of("/"); c != string::npos; c = c = tms.find_first_of("/")){
tms.erase(c,1);
}
for(size_t c = tms.find_first_of("."); c != string::npos; c = c = tms.find_first_of(".")){
tms.erase(c,1);
}
for(size_t c = tms.find_first_of(" "); c != string::npos; c = c = tms.find_first_of(" ")){
tms.erase(c,1);
}
for(size_t c = tms.find_first_of(":"); c != string::npos; c = c = tms.find_first_of(":")){
tms.erase(c,1);
}
h_A[row] = stoull(tms);
h_B[row] = 1;
if(counter < N/2) {
h_k_i[0][counter] = stoull(tms);
h_v_i[0][counter] = 1;
}
if(counter > N/2) {
h_k_i[1][counter] = stoull(tms);
h_v_i[1][counter] = 1;
}
counter = counter + 1;
}
int iLen = 1024;
dim3 block (iLen);
dim3 grid ((N + block.x - 1) / block.x);
hipSetDevice(0);
hipMemcpyAsync(d_k_i[0], h_k_i[0], N/2, hipMemcpyHostToDevice, stream[0]);
hipMemcpyAsync(d_v_i[0], h_v_i[0], N/2, hipMemcpyHostToDevice, stream[0]);
hipStreamSynchronize(stream[0]);
unsigned long long *d_A;
long *d_B;
hipMalloc((unsigned long long**)&d_A, ullBytes);
hipMalloc((long**)&d_B, lBytes);
hipMemcpy(d_A, h_k_i[0], ullBytes, hipMemcpyHostToDevice);
hipMemcpy(d_B, h_v_i[0], lBytes, hipMemcpyHostToDevice);
long *sum, *gpuRef;
hipMalloc((long**)&sum, lBytes);
hipLaunchKernelGGL(( sumArraysOnGPU), dim3(grid), dim3(block), 0, 0, d_A, d_B, sum, N);
gpuRef = (long *)malloc(lBytes);
hipMemcpy(gpuRef, sum, lBytes, hipMemcpyDeviceToHost);
for(int i = 0; i < 10; i++)
{
cout << h_k_i[0][i] << "," << gpuRef[i] <<"," << h_v_i[0][i] << endl;
}
}
catch (...) {
cout << "EXCEPTION!" << endl;
return 1;
}
return 0;
} catch(std::exception& e) {
std::cerr<<"error occurred. error text is :\"" <<e.what()<<"\"\n";
}
}
|
578a4d5933fa5807d615cca2aeb5660049d6befc.cu
|
#if __linux__ && defined(__INTEL_COMPILER)
#define __sync_fetch_and_add(ptr,addend) _InterlockedExchangeAdd(const_cast<void*>(reinterpret_cast<volatile void*>(ptr)), addend)
#endif
#include <string>
#include <cstring>
#include <cctype>
#include <cstdlib>
#include <cstdio>
#include <iostream>
#include <fstream>
#include <bitset>
#include <sys/socket.h>
#include <netinet/in.h>
#include <arpa/inet.h>
#include "tbb/concurrent_hash_map.h"
#include "tbb/blocked_range.h"
#include "tbb/parallel_for.h"
#include "tbb/tick_count.h"
#include "tbb/task_scheduler_init.h"
#include "tbb/concurrent_vector.h"
#include "utility.h"
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include "csv.hpp"
#include "timer.h"
using namespace tbb;
using namespace std;
std::vector<string> timestamp;
__global__ void sumArraysOnGPU(unsigned long long *A, long *B, long *C, const int N)
{
// extern __shared__ long *shared_data[];
int idx = blockIdx.x * blockDim.x + threadIdx.x;
// int idx = threadIdx.x;
for (int i = 0; i < N; ++i) {
if(A[idx]==A[i])
{
C[idx]++;
}
}
__syncthreads();
}
int main( int argc, char* argv[] ) {
int counter = 0;
int N = atoi(argv[2]);
const size_t ullBytes = N * sizeof(unsigned long long);
const size_t lBytes = N * sizeof(long);
unsigned long long *h_A;
long *h_B;
h_A = (unsigned long long *)malloc(ullBytes);
h_B = (long *)malloc(lBytes);
thrust::host_vector<unsigned long long> h_key_in_all(N);
thrust::host_vector<long> h_value_in_all(N);
unsigned int t, travdirtime;
int ngpus = 4;
try {
tbb::tick_count mainStartTime = tbb::tick_count::now();
srand(2);
utility::thread_number_range threads(tbb::task_scheduler_init::default_num_threads,0);
const string csv_file = std::string(argv[1]);
vector<vector<string>> data;
unsigned long long **d_k_i = (unsigned long long **)malloc(sizeof(unsigned long long *) * ngpus);
long **d_v_i = (long **)malloc(sizeof(long *) * ngpus);
unsigned long long **h_k_i = (unsigned long long **)malloc(sizeof(unsigned long long *) * ngpus);
long **h_v_i = (long **)malloc(sizeof(long *) * ngpus);
cudaStream_t *stream = (cudaStream_t *)malloc(sizeof(cudaStream_t) * ngpus);
for (int i = 0; i < ngpus; i++)
{
cudaSetDevice(i);
cudaMalloc((void **) &d_k_i[i], ullBytes);
cudaMalloc((void **) &d_v_i[i], lBytes);
cudaMallocHost((void **) &h_k_i[i], ullBytes);
cudaMallocHost((void **) &h_v_i[i], lBytes);
cudaStreamCreate(&stream[i]);
}
try {
Csv objCsv(csv_file);
if (!objCsv.getCsv(data)) {
cout << "read ERROR" << endl;
return 1;
}
counter = 0;
for (unsigned int row = 0; row < data.size(); row++) {
vector<string> rec = data[row];
std::string tms = rec[0];
for(size_t c = tms.find_first_of("\""); c != string::npos; c = c = tms.find_first_of("\"")){
tms.erase(c,1);
}
for(size_t c = tms.find_first_of("/"); c != string::npos; c = c = tms.find_first_of("/")){
tms.erase(c,1);
}
for(size_t c = tms.find_first_of("."); c != string::npos; c = c = tms.find_first_of(".")){
tms.erase(c,1);
}
for(size_t c = tms.find_first_of(" "); c != string::npos; c = c = tms.find_first_of(" ")){
tms.erase(c,1);
}
for(size_t c = tms.find_first_of(":"); c != string::npos; c = c = tms.find_first_of(":")){
tms.erase(c,1);
}
h_A[row] = stoull(tms);
h_B[row] = 1;
if(counter < N/2) {
h_k_i[0][counter] = stoull(tms);
h_v_i[0][counter] = 1;
}
if(counter > N/2) {
h_k_i[1][counter] = stoull(tms);
h_v_i[1][counter] = 1;
}
counter = counter + 1;
}
int iLen = 1024;
dim3 block (iLen);
dim3 grid ((N + block.x - 1) / block.x);
cudaSetDevice(0);
cudaMemcpyAsync(d_k_i[0], h_k_i[0], N/2, cudaMemcpyHostToDevice, stream[0]);
cudaMemcpyAsync(d_v_i[0], h_v_i[0], N/2, cudaMemcpyHostToDevice, stream[0]);
cudaStreamSynchronize(stream[0]);
unsigned long long *d_A;
long *d_B;
cudaMalloc((unsigned long long**)&d_A, ullBytes);
cudaMalloc((long**)&d_B, lBytes);
cudaMemcpy(d_A, h_k_i[0], ullBytes, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_v_i[0], lBytes, cudaMemcpyHostToDevice);
long *sum, *gpuRef;
cudaMalloc((long**)&sum, lBytes);
sumArraysOnGPU<<<grid, block>>>(d_A, d_B, sum, N);
gpuRef = (long *)malloc(lBytes);
cudaMemcpy(gpuRef, sum, lBytes, cudaMemcpyDeviceToHost);
for(int i = 0; i < 10; i++)
{
cout << h_k_i[0][i] << "," << gpuRef[i] <<"," << h_v_i[0][i] << endl;
}
}
catch (...) {
cout << "EXCEPTION!" << endl;
return 1;
}
return 0;
} catch(std::exception& e) {
std::cerr<<"error occurred. error text is :\"" <<e.what()<<"\"\n";
}
}
|
543dee5a606c365057b111da7e0caa6d6f068610.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "ReplaceNANs.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *buffer = NULL;
hipMalloc(&buffer, XSIZE*YSIZE);
float value = 2;
int size = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
ReplaceNANs), dim3(gridBlock),dim3(threadBlock), 0, 0, buffer,value,size);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
ReplaceNANs), dim3(gridBlock),dim3(threadBlock), 0, 0, buffer,value,size);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
ReplaceNANs), dim3(gridBlock),dim3(threadBlock), 0, 0, buffer,value,size);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
543dee5a606c365057b111da7e0caa6d6f068610.cu
|
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "ReplaceNANs.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *buffer = NULL;
cudaMalloc(&buffer, XSIZE*YSIZE);
float value = 2;
int size = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
ReplaceNANs<<<gridBlock,threadBlock>>>(buffer,value,size);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
ReplaceNANs<<<gridBlock,threadBlock>>>(buffer,value,size);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
ReplaceNANs<<<gridBlock,threadBlock>>>(buffer,value,size);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
bac3c4f77e5c3995ee18887bc27a12ba5dca5207.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <cstdio>
#include <hip/hip_runtime.h>
#include "kosaraju.cuh"
/* Fill out the adjacency list and the reverse adjacency list as according to
* the routes given. Each route represents a directed edge.
*/
__global__
void cudaAirportAdjacencyKernel(int *dev_routes,
int *dev_adj,
int *dev_radj,
int n_ports,
int n_routes) {
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
while (i < n_routes) {
int first = dev_routes[2 * i];
int second = dev_routes[2 * i + 1];
dev_adj[first * n_ports + second] = 1;
dev_radj[second * n_ports + first] = 1;
i += blockDim.x * gridDim.x;
}
}
/* Wrapper function to call cudaAirportAdjacencyKernel. */
void cudaCallAirportAdjacencyKernel(const unsigned int blocks,
const unsigned int threadsPerBlock,
int *dev_routes,
int *dev_adj,
int *dev_radj,
int n_ports,
int n_routes) {
hipLaunchKernelGGL(( cudaAirportAdjacencyKernel), dim3(blocks), dim3(threadsPerBlock), 0, 0,
dev_routes, dev_adj, dev_radj, n_ports, n_routes);
}
/* Remove any vertices with in-degree and out-degree 0, just for optimization. */
__global__
void cudaTrimGraph(int *m,
int *row_sum,
bool *mark,
int n_ports) {
// For i = 0 to n_ports - 1 inclusive, achieve the following:
// row_sum[i] = sum from j = 0 to n_ports - 1 of m[i * n_ports + j] * !mark[j]
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
while (i < n_ports) {
int total = 0;
for (int j = 0; j < n_ports; j++) {
total += m[i * n_ports + j] * !(mark[j]);
}
row_sum[i] = total;
i += blockDim.x * gridDim.x;
}
}
void cudaCallTrimGraph(const unsigned int blocks,
const unsigned int threadsPerBlock,
int *adj,
int *row_sum,
bool *mark,
int n_ports) {
hipLaunchKernelGGL(( cudaTrimGraph), dim3(blocks), dim3(threadsPerBlock), 0, 0, adj, row_sum, mark, n_ports);
}
__global__
void cudaBFSKernel(int *adj,
bool *frontier,
bool *visited,
int n_ports) {
// Do the BFS search
unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x;
while (tid < n_ports) {
if (frontier[tid]) {
frontier[tid] = false;
visited[tid] = true;
for (int i = 0; i < n_ports; i++) {
if (adj[tid * n_ports + i] && !visited[i]) {
frontier[i] = true;
}
}
}
tid += blockDim.x * gridDim.x;
}
}
/* Returns whether the frontier array contains any true values. */
__global__
void cudaContainsTrueKernel(bool *frontier,
int *dev_flag,
int n_ports) {
unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x;
while (tid < n_ports) {
if (frontier[tid]) {
dev_flag[0] *= 0;
}
tid += blockDim.x * gridDim.x;
}
}
/* Wrapper function to perform BFS. */
void cudaCallBFSKernel(const unsigned int blocks,
const unsigned int threadsPerBlock,
int *adj,
bool *visited,
bool *dev_frontier,
int start_port,
int n_ports,
int *dev_flag) {
int *flag = (int *) malloc(sizeof(int));
while (true) {
for (int i = 0; i < n_ports; i++) {
hipLaunchKernelGGL(( cudaBFSKernel), dim3(blocks), dim3(threadsPerBlock), 0, 0,
adj, dev_frontier, visited, n_ports);
}
hipLaunchKernelGGL(( cudaContainsTrueKernel), dim3(blocks), dim3(threadsPerBlock), 0, 0,
dev_frontier, dev_flag, n_ports);
hipMemcpy(flag, dev_flag, sizeof(int), hipMemcpyDeviceToHost);
if (flag[0]) {
break;
}
}
free(flag);
}
/* Fill out an array, one value for each airport. If an index i is some
* representative node of an SCC (that is not the starting airport) and we have
* that dev_zeroes[i] = 0 at the end of this kernel, then that means that
* index represents an airport who is a representative node of an SCC that has
* no incoming edges.
*/
__global__
void cudaFindDegreeZeroSCCKernel(int *adj,
int *radj,
int *reps,
int *dev_zeroes,
int start_port,
int n_ports) {
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
dev_zeroes[start_port] = 1;
while (i < n_ports) {
unsigned int curr_rep = reps[i];
for(int j = 0; j < n_ports; j++) {
if (radj[i * n_ports + j] == 1 && reps[j] != curr_rep) {
dev_zeroes[curr_rep] = 1;
break;
}
}
i += blockDim.x * gridDim.x;
}
}
/* Find number of representative nodes that have in-degree 0 (excluding
* starting airport). This is then the final answer to our algorithm.
*/
__global__
void cudaFindAllZeroesKernel(int *dev_reps,
int *dev_zeroes,
int *dev_total,
int n_ports){
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
while (i < n_ports) {
if(dev_reps[i] == i && dev_zeroes[i] == 0) {
atomicAdd(dev_total, 1);
}
i += blockDim.x * gridDim.x;
}
}
/* Wrapper function to call cudaFindDegreeZeroSCCKernel and
* cudaFindAllZeroesKernel.
*/
void cudaCallFindDegreeZeroSCCKernel(const unsigned int blocks,
const unsigned int threadsPerBlock,
int *dev_adj,
int *dev_radj,
int *dev_reps,
int *dev_zeroes,
int *dev_total,
int start_port,
int n_ports) {
hipLaunchKernelGGL(( cudaFindDegreeZeroSCCKernel), dim3(blocks), dim3(threadsPerBlock), 0, 0,
dev_adj, dev_radj, dev_reps, dev_zeroes, start_port, n_ports);
hipLaunchKernelGGL(( cudaFindAllZeroesKernel), dim3(blocks), dim3(threadsPerBlock), 0, 0,
dev_reps, dev_zeroes, dev_total, n_ports);
}
|
bac3c4f77e5c3995ee18887bc27a12ba5dca5207.cu
|
#include <cstdio>
#include <cuda_runtime.h>
#include "kosaraju.cuh"
/* Fill out the adjacency list and the reverse adjacency list as according to
* the routes given. Each route represents a directed edge.
*/
__global__
void cudaAirportAdjacencyKernel(int *dev_routes,
int *dev_adj,
int *dev_radj,
int n_ports,
int n_routes) {
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
while (i < n_routes) {
int first = dev_routes[2 * i];
int second = dev_routes[2 * i + 1];
dev_adj[first * n_ports + second] = 1;
dev_radj[second * n_ports + first] = 1;
i += blockDim.x * gridDim.x;
}
}
/* Wrapper function to call cudaAirportAdjacencyKernel. */
void cudaCallAirportAdjacencyKernel(const unsigned int blocks,
const unsigned int threadsPerBlock,
int *dev_routes,
int *dev_adj,
int *dev_radj,
int n_ports,
int n_routes) {
cudaAirportAdjacencyKernel<<<blocks, threadsPerBlock>>>
(dev_routes, dev_adj, dev_radj, n_ports, n_routes);
}
/* Remove any vertices with in-degree and out-degree 0, just for optimization. */
__global__
void cudaTrimGraph(int *m,
int *row_sum,
bool *mark,
int n_ports) {
// For i = 0 to n_ports - 1 inclusive, achieve the following:
// row_sum[i] = sum from j = 0 to n_ports - 1 of m[i * n_ports + j] * !mark[j]
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
while (i < n_ports) {
int total = 0;
for (int j = 0; j < n_ports; j++) {
total += m[i * n_ports + j] * !(mark[j]);
}
row_sum[i] = total;
i += blockDim.x * gridDim.x;
}
}
void cudaCallTrimGraph(const unsigned int blocks,
const unsigned int threadsPerBlock,
int *adj,
int *row_sum,
bool *mark,
int n_ports) {
cudaTrimGraph<<<blocks, threadsPerBlock>>>(adj, row_sum, mark, n_ports);
}
__global__
void cudaBFSKernel(int *adj,
bool *frontier,
bool *visited,
int n_ports) {
// Do the BFS search
unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x;
while (tid < n_ports) {
if (frontier[tid]) {
frontier[tid] = false;
visited[tid] = true;
for (int i = 0; i < n_ports; i++) {
if (adj[tid * n_ports + i] && !visited[i]) {
frontier[i] = true;
}
}
}
tid += blockDim.x * gridDim.x;
}
}
/* Returns whether the frontier array contains any true values. */
__global__
void cudaContainsTrueKernel(bool *frontier,
int *dev_flag,
int n_ports) {
unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x;
while (tid < n_ports) {
if (frontier[tid]) {
dev_flag[0] *= 0;
}
tid += blockDim.x * gridDim.x;
}
}
/* Wrapper function to perform BFS. */
void cudaCallBFSKernel(const unsigned int blocks,
const unsigned int threadsPerBlock,
int *adj,
bool *visited,
bool *dev_frontier,
int start_port,
int n_ports,
int *dev_flag) {
int *flag = (int *) malloc(sizeof(int));
while (true) {
for (int i = 0; i < n_ports; i++) {
cudaBFSKernel<<<blocks, threadsPerBlock>>>
(adj, dev_frontier, visited, n_ports);
}
cudaContainsTrueKernel<<<blocks, threadsPerBlock>>>
(dev_frontier, dev_flag, n_ports);
cudaMemcpy(flag, dev_flag, sizeof(int), cudaMemcpyDeviceToHost);
if (flag[0]) {
break;
}
}
free(flag);
}
/* Fill out an array, one value for each airport. If an index i is some
* representative node of an SCC (that is not the starting airport) and we have
* that dev_zeroes[i] = 0 at the end of this kernel, then that means that
* index represents an airport who is a representative node of an SCC that has
* no incoming edges.
*/
__global__
void cudaFindDegreeZeroSCCKernel(int *adj,
int *radj,
int *reps,
int *dev_zeroes,
int start_port,
int n_ports) {
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
dev_zeroes[start_port] = 1;
while (i < n_ports) {
unsigned int curr_rep = reps[i];
for(int j = 0; j < n_ports; j++) {
if (radj[i * n_ports + j] == 1 && reps[j] != curr_rep) {
dev_zeroes[curr_rep] = 1;
break;
}
}
i += blockDim.x * gridDim.x;
}
}
/* Find number of representative nodes that have in-degree 0 (excluding
* starting airport). This is then the final answer to our algorithm.
*/
__global__
void cudaFindAllZeroesKernel(int *dev_reps,
int *dev_zeroes,
int *dev_total,
int n_ports){
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
while (i < n_ports) {
if(dev_reps[i] == i && dev_zeroes[i] == 0) {
atomicAdd(dev_total, 1);
}
i += blockDim.x * gridDim.x;
}
}
/* Wrapper function to call cudaFindDegreeZeroSCCKernel and
* cudaFindAllZeroesKernel.
*/
void cudaCallFindDegreeZeroSCCKernel(const unsigned int blocks,
const unsigned int threadsPerBlock,
int *dev_adj,
int *dev_radj,
int *dev_reps,
int *dev_zeroes,
int *dev_total,
int start_port,
int n_ports) {
cudaFindDegreeZeroSCCKernel<<<blocks, threadsPerBlock>>>
(dev_adj, dev_radj, dev_reps, dev_zeroes, start_port, n_ports);
cudaFindAllZeroesKernel<<<blocks, threadsPerBlock>>>
(dev_reps, dev_zeroes, dev_total, n_ports);
}
|
04b8c65beb0d7a04a7f5ff43c7d5dbaec983e484.hip
|
// !!! This is a file automatically generated by hipify!!!
//
// Created by songzeceng on 2020/11/13.
// nvcc DynamicParallelTest.cu -gencode arch=compute_61,code=sm_61 -rdc=true -o DynamicParallelTest
#include "hip/hip_runtime.h"
#include "stdio.h"
__device__ void printArray(int *data, int n) {
for (int i = 0; i < n; i++) {
printf("%d\t", data[i]);
}
}
__global__ void child_launch(int *data, int n) {
// data[threadIdx.x] = data[threadIdx.x] + 1000;
printf("Hello ");
}
__global__ void parent_launch(int *data, int n) {
// data[threadIdx.x] = threadIdx.x;
//
// __syncthreads();
//
// printArray(data, n);
//
// if (threadIdx.x == 0) {
// child_launch<<<1, 6>>>(data, n);
// }
//#endif
hipLaunchKernelGGL(( child_launch), dim3(1), dim3(1), 0, 0, data, n);
hipDeviceSynchronize();
printf(" World!\n");
int *d = (int *) malloc(10 * sizeof(int));
int *a = (int *) malloc(20 * sizeof(int));
int *a_1;
hipMalloc(&a_1, 50 * sizeof(int));
int *a_2;
hipMalloc(&a_2, 90 * sizeof(int));
free(d);
free(a);
hipFree(a_1);
hipFree(a_2);
// printArray(data, n);
//
// __syncthreads();
}
int main() {
hipSetDevice(0);
int size = 6;
int *h_data = (int *) malloc(size * sizeof(int));
int *d_data;
for (int i = 0; i < size; i++) {
h_data[i] = 0;
}
hipMalloc(&d_data, size * sizeof(int));
hipMemcpy(d_data, h_data, size * sizeof(int), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( parent_launch), dim3(1), dim3(1), 0, 0, d_data, size);
hipDeviceSynchronize();
hipFree(d_data);
free(h_data);
return 0;
}
|
04b8c65beb0d7a04a7f5ff43c7d5dbaec983e484.cu
|
//
// Created by songzeceng on 2020/11/13.
// 编译:nvcc DynamicParallelTest.cu -gencode arch=compute_61,code=sm_61 -rdc=true -o DynamicParallelTest
#include "cuda_runtime.h"
#include "stdio.h"
__device__ void printArray(int *data, int n) {
for (int i = 0; i < n; i++) {
printf("%d\t", data[i]);
}
}
__global__ void child_launch(int *data, int n) {
// data[threadIdx.x] = data[threadIdx.x] + 1000;
printf("Hello ");
}
__global__ void parent_launch(int *data, int n) {
// data[threadIdx.x] = threadIdx.x;
//
// __syncthreads();
//
// printArray(data, n);
//
// if (threadIdx.x == 0) {
// child_launch<<<1, 6>>>(data, n);
// }
//#endif
child_launch<<<1, 1>>>(data, n);
cudaDeviceSynchronize();
printf(" World!\n");
int *d = (int *) malloc(10 * sizeof(int));
int *a = (int *) malloc(20 * sizeof(int));
int *a_1;
cudaMalloc(&a_1, 50 * sizeof(int));
int *a_2;
cudaMalloc(&a_2, 90 * sizeof(int));
free(d);
free(a);
cudaFree(a_1);
cudaFree(a_2);
// printArray(data, n);
//
// __syncthreads();
}
int main() {
cudaSetDevice(0);
int size = 6;
int *h_data = (int *) malloc(size * sizeof(int));
int *d_data;
for (int i = 0; i < size; i++) {
h_data[i] = 0;
}
cudaMalloc(&d_data, size * sizeof(int));
cudaMemcpy(d_data, h_data, size * sizeof(int), cudaMemcpyHostToDevice);
parent_launch<<<1, 1>>>(d_data, size);
cudaDeviceSynchronize();
cudaFree(d_data);
free(h_data);
return 0;
}
|
ecb69858750bfb98a85ce349fd1e7613d1024ce3.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright 2019 Yan Yan
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <ATen/ATen.h>
// clang-format off
// TODO: make spconv_utils.h order agnostic
#include "../spconv_utils.h"
// clang-format on
#include <utils/spconv/spconv/mp_helper.h>
#include <utils/spconv/spconv/reordering.h>
#include <utils/spconv/tensorview/helper_launch.h>
#include <utils/spconv/tensorview/tensorview.h>
#include <chrono>
#include <limits>
#include <spconv/reordering.cuh>
#include <type_traits>
#include <utils/spconv/tensorview/helper_kernel.cuh>
#include "pytorch_cuda_helper.hpp"
namespace functor {
template <typename scalar_t, typename Index>
struct SparseGatherFunctor<tv::TorchGPU, scalar_t, Index> {
using vecload_type_t =
std::conditional_t<std::is_same<scalar_t, at::Half>::value, int2, int4>;
using kernel_block_t = mp_list_c<int, 64, 32, 16>;
void operator()(const tv::TorchGPU &d, tv::TensorView<scalar_t> buffer,
tv::TensorView<const scalar_t> features,
tv::TensorView<const Index> indices, int size) {
if (size <= 0) return;
int numPlanes = features.dim(1);
bool notFound = true;
constexpr int vecloadFactor = sizeof(vecload_type_t) / sizeof(scalar_t);
mp_for_each<kernel_block_t>([=, &buffer, &features, &indices,
¬Found](auto NumTLP) {
constexpr int NumILP = NumTLP / 4;
int nHotBlock = (size / NumTLP) * NumTLP;
if (notFound) {
if (numPlanes % NumTLP == 0) {
if (nHotBlock >= NumTLP) {
hipLaunchKernelGGL(( gatherVecBlockKernel<scalar_t, Index, int(NumTLP), NumILP,
vecload_type_t>)
, dim3(dim3(numPlanes / NumTLP, size / NumTLP)),
dim3(dim3(NumTLP / vecloadFactor, NumTLP / NumILP)), 0,
d.getStream(), buffer.data(), features.data(),
indices.data(), nHotBlock,
numPlanes / vecloadFactor);
TV_CHECK_CUDA_ERR();
}
if (size - nHotBlock > 0) {
hipLaunchKernelGGL(( gatherVecKernel<scalar_t, Index, int(NumTLP), NumILP,
vecload_type_t>)
, dim3(dim3(1, numPlanes / NumTLP)),
dim3(dim3(NumTLP / NumILP, NumTLP / vecloadFactor)), 0,
d.getStream(), buffer.data() + nHotBlock * numPlanes,
features.data(), indices.data() + nHotBlock,
size - nHotBlock,
numPlanes / vecloadFactor);
TV_CHECK_CUDA_ERR();
}
notFound = false;
}
}
});
if (notFound) {
constexpr int NumTLP = 64;
constexpr int NumILP = NumTLP / 4;
hipLaunchKernelGGL(( gatherGenericKernel<scalar_t, Index, NumTLP, NumILP>)
, dim3(tv::launch::DivUp(size, NumTLP),
tv::launch::DivUp(numPlanes, NumTLP)),
dim3(dim3(NumTLP / NumILP, NumTLP)), 0, d.getStream(),
buffer.data(), features.data(), indices.data(), size, numPlanes);
TV_CHECK_CUDA_ERR();
}
}
};
template <typename scalar_t, typename Index>
struct SparseScatterAddFunctor<tv::TorchGPU, scalar_t, Index> {
using vecload_type_t =
std::conditional_t<std::is_same<scalar_t, at::Half>::value, int2, int4>;
using kernel_block_t = mp_list_c<int, 64, 32, 16>;
void operator()(const tv::TorchGPU &d, tv::TensorView<scalar_t> outFeatures,
tv::TensorView<const scalar_t> buffer,
tv::TensorView<const Index> indices, int size, bool stable) {
if (size <= 0) return;
int numPlanes = outFeatures.dim(1);
bool notFound = true;
constexpr int vecloadFactor =
sizeof(vecload_type_t) / sizeof(scalar_t); // important for half.
mp_for_each<kernel_block_t>([=, &d, &outFeatures, &buffer, &indices,
¬Found](auto NumTLP) {
constexpr int NumILP = NumTLP / 4;
int nHotBlock = (size / NumTLP) * NumTLP;
if (notFound) {
if (numPlanes % NumTLP == 0) {
if (nHotBlock >= NumTLP) {
hipLaunchKernelGGL(( scatterAddVecBlockKernel<scalar_t, Index, int(NumTLP), NumILP,
vecload_type_t>)
, dim3(dim3(numPlanes / NumTLP, size / NumTLP)),
dim3(dim3(NumTLP / vecloadFactor, NumTLP / NumILP)), 0,
d.getStream(), outFeatures.data(), buffer.data(),
indices.data(), nHotBlock,
numPlanes / vecloadFactor);
TV_CHECK_CUDA_ERR();
}
if (size - nHotBlock > 0) {
hipLaunchKernelGGL(( scatterAddGenericKernel<scalar_t, Index, int(NumTLP), NumILP>)
, dim3(dim3(1, numPlanes / NumTLP)), dim3(dim3(NumTLP / NumILP, NumTLP)),
0, d.getStream(),
outFeatures.data(), buffer.data() + nHotBlock * numPlanes,
indices.data() + nHotBlock, size - nHotBlock, numPlanes);
TV_CHECK_CUDA_ERR();
}
notFound = false;
}
}
});
if (notFound) {
constexpr int NumTLP = 64;
constexpr int NumILP = NumTLP / 4;
hipLaunchKernelGGL(( scatterAddGenericKernel<scalar_t, Index, NumTLP, NumILP>)
, dim3(tv::launch::DivUp(size, NumTLP),
tv::launch::DivUp(numPlanes, NumTLP)),
dim3(dim3(NumTLP / NumILP, NumTLP)), 0, d.getStream(),
outFeatures.data(), buffer.data(), indices.data(), size,
numPlanes);
TV_CHECK_CUDA_ERR();
}
}
};
} // namespace functor
#define DECLARE_GPU_SPECS_T_INDEX(scalar_t, Index) \
template struct functor::SparseGatherFunctor<tv::TorchGPU, scalar_t, Index>; \
template struct functor::SparseScatterAddFunctor<tv::TorchGPU, scalar_t, \
Index>;
#define DECLARE_GPU_SPECS(scalar_t) DECLARE_GPU_SPECS_T_INDEX(scalar_t, int);
DECLARE_GPU_SPECS(float);
DECLARE_GPU_SPECS(double);
DECLARE_GPU_SPECS(at::Half);
#undef DECLARE_GPU_SPECS
#undef DECLARE_GPU_SPECS_T_INDEX
|
ecb69858750bfb98a85ce349fd1e7613d1024ce3.cu
|
// Copyright 2019 Yan Yan
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <ATen/ATen.h>
// clang-format off
// TODO: make spconv_utils.h order agnostic
#include "../spconv_utils.h"
// clang-format on
#include <utils/spconv/spconv/mp_helper.h>
#include <utils/spconv/spconv/reordering.h>
#include <utils/spconv/tensorview/helper_launch.h>
#include <utils/spconv/tensorview/tensorview.h>
#include <chrono>
#include <limits>
#include <spconv/reordering.cuh>
#include <type_traits>
#include <utils/spconv/tensorview/helper_kernel.cuh>
#include "pytorch_cuda_helper.hpp"
namespace functor {
template <typename scalar_t, typename Index>
struct SparseGatherFunctor<tv::TorchGPU, scalar_t, Index> {
using vecload_type_t =
std::conditional_t<std::is_same<scalar_t, at::Half>::value, int2, int4>;
using kernel_block_t = mp_list_c<int, 64, 32, 16>;
void operator()(const tv::TorchGPU &d, tv::TensorView<scalar_t> buffer,
tv::TensorView<const scalar_t> features,
tv::TensorView<const Index> indices, int size) {
if (size <= 0) return;
int numPlanes = features.dim(1);
bool notFound = true;
constexpr int vecloadFactor = sizeof(vecload_type_t) / sizeof(scalar_t);
mp_for_each<kernel_block_t>([=, &buffer, &features, &indices,
¬Found](auto NumTLP) {
constexpr int NumILP = NumTLP / 4;
int nHotBlock = (size / NumTLP) * NumTLP;
if (notFound) {
if (numPlanes % NumTLP == 0) {
if (nHotBlock >= NumTLP) {
gatherVecBlockKernel<scalar_t, Index, int(NumTLP), NumILP,
vecload_type_t>
<<<dim3(numPlanes / NumTLP, size / NumTLP),
dim3(NumTLP / vecloadFactor, NumTLP / NumILP), 0,
d.getStream()>>>(buffer.data(), features.data(),
indices.data(), nHotBlock,
numPlanes / vecloadFactor);
TV_CHECK_CUDA_ERR();
}
if (size - nHotBlock > 0) {
gatherVecKernel<scalar_t, Index, int(NumTLP), NumILP,
vecload_type_t>
<<<dim3(1, numPlanes / NumTLP),
dim3(NumTLP / NumILP, NumTLP / vecloadFactor), 0,
d.getStream()>>>(buffer.data() + nHotBlock * numPlanes,
features.data(), indices.data() + nHotBlock,
size - nHotBlock,
numPlanes / vecloadFactor);
TV_CHECK_CUDA_ERR();
}
notFound = false;
}
}
});
if (notFound) {
constexpr int NumTLP = 64;
constexpr int NumILP = NumTLP / 4;
gatherGenericKernel<scalar_t, Index, NumTLP, NumILP>
<<<dim3(tv::launch::DivUp(size, NumTLP),
tv::launch::DivUp(numPlanes, NumTLP)),
dim3(NumTLP / NumILP, NumTLP), 0, d.getStream()>>>(
buffer.data(), features.data(), indices.data(), size, numPlanes);
TV_CHECK_CUDA_ERR();
}
}
};
template <typename scalar_t, typename Index>
struct SparseScatterAddFunctor<tv::TorchGPU, scalar_t, Index> {
using vecload_type_t =
std::conditional_t<std::is_same<scalar_t, at::Half>::value, int2, int4>;
using kernel_block_t = mp_list_c<int, 64, 32, 16>;
void operator()(const tv::TorchGPU &d, tv::TensorView<scalar_t> outFeatures,
tv::TensorView<const scalar_t> buffer,
tv::TensorView<const Index> indices, int size, bool stable) {
if (size <= 0) return;
int numPlanes = outFeatures.dim(1);
bool notFound = true;
constexpr int vecloadFactor =
sizeof(vecload_type_t) / sizeof(scalar_t); // important for half.
mp_for_each<kernel_block_t>([=, &d, &outFeatures, &buffer, &indices,
¬Found](auto NumTLP) {
constexpr int NumILP = NumTLP / 4;
int nHotBlock = (size / NumTLP) * NumTLP;
if (notFound) {
if (numPlanes % NumTLP == 0) {
if (nHotBlock >= NumTLP) {
scatterAddVecBlockKernel<scalar_t, Index, int(NumTLP), NumILP,
vecload_type_t>
<<<dim3(numPlanes / NumTLP, size / NumTLP),
dim3(NumTLP / vecloadFactor, NumTLP / NumILP), 0,
d.getStream()>>>(outFeatures.data(), buffer.data(),
indices.data(), nHotBlock,
numPlanes / vecloadFactor);
TV_CHECK_CUDA_ERR();
}
if (size - nHotBlock > 0) {
scatterAddGenericKernel<scalar_t, Index, int(NumTLP), NumILP>
<<<dim3(1, numPlanes / NumTLP), dim3(NumTLP / NumILP, NumTLP),
0, d.getStream()>>>(
outFeatures.data(), buffer.data() + nHotBlock * numPlanes,
indices.data() + nHotBlock, size - nHotBlock, numPlanes);
TV_CHECK_CUDA_ERR();
}
notFound = false;
}
}
});
if (notFound) {
constexpr int NumTLP = 64;
constexpr int NumILP = NumTLP / 4;
scatterAddGenericKernel<scalar_t, Index, NumTLP, NumILP>
<<<dim3(tv::launch::DivUp(size, NumTLP),
tv::launch::DivUp(numPlanes, NumTLP)),
dim3(NumTLP / NumILP, NumTLP), 0, d.getStream()>>>(
outFeatures.data(), buffer.data(), indices.data(), size,
numPlanes);
TV_CHECK_CUDA_ERR();
}
}
};
} // namespace functor
#define DECLARE_GPU_SPECS_T_INDEX(scalar_t, Index) \
template struct functor::SparseGatherFunctor<tv::TorchGPU, scalar_t, Index>; \
template struct functor::SparseScatterAddFunctor<tv::TorchGPU, scalar_t, \
Index>;
#define DECLARE_GPU_SPECS(scalar_t) DECLARE_GPU_SPECS_T_INDEX(scalar_t, int);
DECLARE_GPU_SPECS(float);
DECLARE_GPU_SPECS(double);
DECLARE_GPU_SPECS(at::Half);
#undef DECLARE_GPU_SPECS
#undef DECLARE_GPU_SPECS_T_INDEX
|
b9430f1578cafbae2f0ea47a1eb36c7cc02a26bb.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
//#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
__global__ void global_reduce_kernel(float * d_out, float * d_in)
{
int myId = threadIdx.x + blockDim.x * blockIdx.x;
printf("global_reduce_kernel myId=%d,threadIdx.x=%d,blockDim.x=%d, blockIdx.x=%d\n", myId, threadIdx.x, blockDim.x, blockIdx.x);
int tid = threadIdx.x;
// do reduction in global mem
for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1)
{
if (tid < s)
{
printf("myId=%d,s=%d,d_in[myId]=%f,d_in[myId + s]=%f,+=%f\n", myId, s, d_in[myId], d_in[myId + s], d_in[myId] + d_in[myId + s]);
d_in[myId] += d_in[myId + s];
}
__syncthreads(); // make sure all adds at one stage are done!
printf("myId=%d\n", myId);
}
// only thread 0 writes result for this block back to global mem
if (tid == 0)
{
d_out[blockIdx.x] = d_in[myId];
}
}
__global__ void shmem_reduce_kernel(float * d_out, const float * d_in)
{
// sdata is allocated in the kernel call: 3rd arg to <<<b, t, shmem>>>
extern __shared__ float sdata[];
int myId = threadIdx.x + blockDim.x * blockIdx.x;
printf("shmem_reduce_kernel myId=%d,threadIdx.x=%d,blockDim.x=%d, blockIdx.x=%d\n", myId, threadIdx.x, blockDim.x, blockIdx.x);
int tid = threadIdx.x;
// load shared mem from global mem
sdata[tid] = d_in[myId];
__syncthreads(); // make sure entire block is loaded!
// do reduction in shared mem
for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1)
{
if (tid < s)
{
printf("s=%d\n", s);
sdata[tid] += sdata[tid + s];
}
__syncthreads(); // make sure all adds at one stage are done!
}
// only thread 0 writes result for this block back to global mem
if (tid == 0)
{
d_out[blockIdx.x] = sdata[0];
}
}
void reduce(float * d_out, float * d_intermediate, float * d_in,
int size, bool usesSharedMemory)
{
// assumes that size is not greater than maxThreadsPerBlock^2
// and that size is a multiple of maxThreadsPerBlock
const int maxThreadsPerBlock = 32;
int threads = maxThreadsPerBlock;
int blocks = size / maxThreadsPerBlock;
if (usesSharedMemory)
{
hipLaunchKernelGGL(( shmem_reduce_kernel), dim3(blocks), dim3(threads), threads * sizeof(float), 0,
d_intermediate, d_in);
hipDeviceSynchronize();
}
else
{
hipLaunchKernelGGL(( global_reduce_kernel), dim3(blocks), dim3(threads), 0, 0,
d_intermediate, d_in);
hipDeviceSynchronize();
}
// now we're down to one block left, so reduce it
threads = blocks; // launch one thread for each block in prev step
blocks = 1;
if (usesSharedMemory)
{
hipLaunchKernelGGL(( shmem_reduce_kernel), dim3(blocks), dim3(threads), threads * sizeof(float), 0,
d_out, d_intermediate);
hipDeviceSynchronize();
}
else
{
hipLaunchKernelGGL(( global_reduce_kernel), dim3(blocks), dim3(threads), 0, 0,
d_out, d_intermediate);
hipDeviceSynchronize();
}
}
//int main1(int argc, char **argv)
int main()
{
// GPU
int deviceCount;
hipGetDeviceCount(&deviceCount);
if (deviceCount == 0) {
fprintf(stderr, "error: no devices supporting CUDA.\n");
exit(EXIT_FAILURE);
}
printf("deviceCount=%d\n", deviceCount);
int dev = 0;
hipSetDevice(dev);
hipDeviceProp_t devProps;
if (hipGetDeviceProperties(&devProps, dev) == 0)
{
printf("Using device %d:\n", dev);
printf("%s; global mem: %dB; compute v%d.%d; clock: %d kHz\n",
devProps.name, (int)devProps.totalGlobalMem,
(int)devProps.major, (int)devProps.minor,
(int)devProps.clockRate);
}
int repet = 1;
const int ARRAY_SIZE = 32; // 20
const int ARRAY_BYTES = ARRAY_SIZE * sizeof(float);
// generate the input array on the host
float h_in[ARRAY_SIZE];
float sum = 0.0f;
for(int i = 0; i < ARRAY_SIZE; i++) {
// generate random float in [-1.0f, 1.0f]
h_in[i] = -1.0f + (float)rand()/((float)RAND_MAX/2.0f);
sum += h_in[i];
}
// declare GPU memory pointers
float * d_in, * d_intermediate, * d_out;
// allocate GPU memory
hipMalloc((void **) &d_in, ARRAY_BYTES);
hipMalloc((void **) &d_intermediate, ARRAY_BYTES); // overallocated
hipMalloc((void **) &d_out, sizeof(float));
// transfer the input array to the GPU
hipMemcpy(d_in, h_in, ARRAY_BYTES, hipMemcpyHostToDevice);
int whichKernel = 0;
//if (argc == 2) {
// whichKernel = atoi(argv[1]);
//}
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
// launch the kernel
switch(whichKernel) {
case 0:
printf("Running global reduce\n");
hipEventRecord(start, 0);
for (int i = 0; i < repet; i++)
{
reduce(d_out, d_intermediate, d_in, ARRAY_SIZE, false);
}
hipEventRecord(stop, 0);
break;
case 1:
printf("Running reduce with shared mem\n");
hipEventRecord(start, 0);
for (int i = 0; i < repet; i++)
{
reduce(d_out, d_intermediate, d_in, ARRAY_SIZE, true);
}
hipEventRecord(stop, 0);
break;
default:
fprintf(stderr, "error: ran no kernel\n");
exit(EXIT_FAILURE);
}
hipEventSynchronize(stop);
float elapsedTime;
hipEventElapsedTime(&elapsedTime, start, stop);
elapsedTime /= 100.0f; // 100 trials
// copy back the sum from GPU
float h_out;
hipMemcpy(&h_out, d_out, sizeof(float), hipMemcpyDeviceToHost);
printf("sum=%f\n", sum);
printf("h_out=%f\n", h_out);
printf("average time elapsed: %f\n", elapsedTime);
// free GPU memory allocation
hipFree(d_in);
hipFree(d_intermediate);
hipFree(d_out);
return 0;
}
|
b9430f1578cafbae2f0ea47a1eb36c7cc02a26bb.cu
|
#include <stdio.h>
#include <stdlib.h>
//#include <cuda_runtime.h>
#include <cuda.h>
__global__ void global_reduce_kernel(float * d_out, float * d_in)
{
int myId = threadIdx.x + blockDim.x * blockIdx.x;
printf("global_reduce_kernel myId=%d,threadIdx.x=%d,blockDim.x=%d, blockIdx.x=%d\n", myId, threadIdx.x, blockDim.x, blockIdx.x);
int tid = threadIdx.x;
// do reduction in global mem
for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1)
{
if (tid < s)
{
printf("myId=%d,s=%d,d_in[myId]=%f,d_in[myId + s]=%f,+=%f\n", myId, s, d_in[myId], d_in[myId + s], d_in[myId] + d_in[myId + s]);
d_in[myId] += d_in[myId + s];
}
__syncthreads(); // make sure all adds at one stage are done!
printf("myId=%d\n", myId);
}
// only thread 0 writes result for this block back to global mem
if (tid == 0)
{
d_out[blockIdx.x] = d_in[myId];
}
}
__global__ void shmem_reduce_kernel(float * d_out, const float * d_in)
{
// sdata is allocated in the kernel call: 3rd arg to <<<b, t, shmem>>>
extern __shared__ float sdata[];
int myId = threadIdx.x + blockDim.x * blockIdx.x;
printf("shmem_reduce_kernel myId=%d,threadIdx.x=%d,blockDim.x=%d, blockIdx.x=%d\n", myId, threadIdx.x, blockDim.x, blockIdx.x);
int tid = threadIdx.x;
// load shared mem from global mem
sdata[tid] = d_in[myId];
__syncthreads(); // make sure entire block is loaded!
// do reduction in shared mem
for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1)
{
if (tid < s)
{
printf("s=%d\n", s);
sdata[tid] += sdata[tid + s];
}
__syncthreads(); // make sure all adds at one stage are done!
}
// only thread 0 writes result for this block back to global mem
if (tid == 0)
{
d_out[blockIdx.x] = sdata[0];
}
}
void reduce(float * d_out, float * d_intermediate, float * d_in,
int size, bool usesSharedMemory)
{
// assumes that size is not greater than maxThreadsPerBlock^2
// and that size is a multiple of maxThreadsPerBlock
const int maxThreadsPerBlock = 32;
int threads = maxThreadsPerBlock;
int blocks = size / maxThreadsPerBlock;
if (usesSharedMemory)
{
shmem_reduce_kernel<<<blocks, threads, threads * sizeof(float)>>>
(d_intermediate, d_in);
cudaDeviceSynchronize();
}
else
{
global_reduce_kernel<<<blocks, threads>>>
(d_intermediate, d_in);
cudaDeviceSynchronize();
}
// now we're down to one block left, so reduce it
threads = blocks; // launch one thread for each block in prev step
blocks = 1;
if (usesSharedMemory)
{
shmem_reduce_kernel<<<blocks, threads, threads * sizeof(float)>>>
(d_out, d_intermediate);
cudaDeviceSynchronize();
}
else
{
global_reduce_kernel<<<blocks, threads>>>
(d_out, d_intermediate);
cudaDeviceSynchronize();
}
}
//int main1(int argc, char **argv)
int main()
{
// 获取GPU个数
int deviceCount;
cudaGetDeviceCount(&deviceCount);
if (deviceCount == 0) {
fprintf(stderr, "error: no devices supporting CUDA.\n");
exit(EXIT_FAILURE);
}
printf("deviceCount=%d\n", deviceCount);
int dev = 0;
cudaSetDevice(dev);
cudaDeviceProp devProps;
if (cudaGetDeviceProperties(&devProps, dev) == 0)
{
printf("Using device %d:\n", dev);
printf("%s; global mem: %dB; compute v%d.%d; clock: %d kHz\n",
devProps.name, (int)devProps.totalGlobalMem,
(int)devProps.major, (int)devProps.minor,
(int)devProps.clockRate);
}
int repet = 1;
const int ARRAY_SIZE = 32; // 20
const int ARRAY_BYTES = ARRAY_SIZE * sizeof(float);
// generate the input array on the host
float h_in[ARRAY_SIZE];
float sum = 0.0f;
for(int i = 0; i < ARRAY_SIZE; i++) {
// generate random float in [-1.0f, 1.0f]
h_in[i] = -1.0f + (float)rand()/((float)RAND_MAX/2.0f);
sum += h_in[i];
}
// declare GPU memory pointers
float * d_in, * d_intermediate, * d_out;
// allocate GPU memory
cudaMalloc((void **) &d_in, ARRAY_BYTES);
cudaMalloc((void **) &d_intermediate, ARRAY_BYTES); // overallocated
cudaMalloc((void **) &d_out, sizeof(float));
// transfer the input array to the GPU
cudaMemcpy(d_in, h_in, ARRAY_BYTES, cudaMemcpyHostToDevice);
int whichKernel = 0;
//if (argc == 2) {
// whichKernel = atoi(argv[1]);
//}
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
// launch the kernel
switch(whichKernel) {
case 0:
printf("Running global reduce\n");
cudaEventRecord(start, 0);
for (int i = 0; i < repet; i++)
{
reduce(d_out, d_intermediate, d_in, ARRAY_SIZE, false);
}
cudaEventRecord(stop, 0);
break;
case 1:
printf("Running reduce with shared mem\n");
cudaEventRecord(start, 0);
for (int i = 0; i < repet; i++)
{
reduce(d_out, d_intermediate, d_in, ARRAY_SIZE, true);
}
cudaEventRecord(stop, 0);
break;
default:
fprintf(stderr, "error: ran no kernel\n");
exit(EXIT_FAILURE);
}
cudaEventSynchronize(stop);
float elapsedTime;
cudaEventElapsedTime(&elapsedTime, start, stop);
elapsedTime /= 100.0f; // 100 trials
// copy back the sum from GPU
float h_out;
cudaMemcpy(&h_out, d_out, sizeof(float), cudaMemcpyDeviceToHost);
printf("sum=%f\n", sum);
printf("h_out=%f\n", h_out);
printf("average time elapsed: %f\n", elapsedTime);
// free GPU memory allocation
cudaFree(d_in);
cudaFree(d_intermediate);
cudaFree(d_out);
return 0;
}
|
8ffbe6c8958902c56e5409ec63f77e12fed33d5d.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <vector>
#include <math.h>
#include <iostream>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <hip/hip_fp16.h>
#include <hip/hip_runtime_api.h>
#include <ATen/ATen.h>
#include <ATen/hip/HIPContext.h>
#include <torch/extension.h>
#include "softmax_apex.h"
#include "gemm_blaslt.cuh"
#include "strided_batched_gemm_blaslt.cuh"
// symbol to be automatically resolved by PyTorch libs
// extern THCState *state;
namespace multihead_attn {
namespace encdec_bias {
namespace cublaslt {
std::vector<torch::Tensor> fwd_cuda(
bool is_training,
int heads,
torch::Tensor const& inputs_q,
torch::Tensor const& inputs_kv,
torch::Tensor const& input_weights_q,
torch::Tensor const& input_weights_kv,
torch::Tensor const& output_weights,
torch::Tensor const& input_biases_q,
torch::Tensor const& input_biases_kv,
torch::Tensor const& output_biases,
torch::Tensor const& pad_mask,
float dropout_prob,
torch::Tensor lt_workspace )
{
const int embed_dim = inputs_q.size(2);
const int sequences = inputs_q.size(1);
const int q_seq_len = inputs_q.size(0);
const int k_seq_len = inputs_kv.size(0);
const int batches_q = sequences * q_seq_len;
const int batches_kv = sequences * k_seq_len;
const int head_dim = embed_dim / heads;
const int output_lin_q_dim = embed_dim;
const int output_lin_kv_dim = 2 * embed_dim;
const int attn_batches = heads * sequences;
const int lead_dim_q = attn_batches * head_dim;
const int lead_dim_kv = attn_batches * 2 *head_dim;
const int batch_stride_q = head_dim;
const int batch_stride_kv = 2 * head_dim;
const int dropout_elems = attn_batches * q_seq_len * k_seq_len;
const float alpha = 1.0;
const float beta_zero = 0.0;
// const float beta_one = 1.0;
const float scale = 1.0 / sqrt(static_cast<float>(head_dim));
// const half alpha = __float2half_rn(alpha);
// const half beta_zero = __float2half_rn(beta_zero);
// const half beta_one = __float2half_rn(beta_one);
// const half scale = __float2half_rn(scale);
// printf("Input kernel sizes: %d %d %d \n",
// inputs_kv.size(0), inputs_kv.size(1), inputs_kv.size(2));
// There is no reason to use more than one stream as every kernel is
// sequentially dependent
hipblasHandle_t handle = at::cuda::getCurrentCUDABlasHandle();
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA().stream();
hipblasSetStream(handle, stream);
// 3 Intermediate Results + Output (Note: dropout intermediates are generated by ATen library code)
auto act_options = inputs_q.options().requires_grad(false);
auto mask_options = act_options.dtype(torch::kUInt8);
torch::Tensor input_lin_q_results = torch::empty({q_seq_len, sequences, output_lin_q_dim}, act_options);
torch::Tensor input_lin_kv_results = torch::empty({k_seq_len, sequences, output_lin_kv_dim}, act_options);
torch::Tensor attn_scores = torch::empty({attn_batches, q_seq_len, k_seq_len}, act_options);
torch::Tensor dropout_results = torch::empty({attn_batches, q_seq_len, k_seq_len}, act_options);
torch::Tensor dropout_mask = torch::empty({attn_batches, q_seq_len, k_seq_len}, mask_options);
torch::Tensor matmul2_results = torch::empty({q_seq_len, attn_batches, head_dim}, act_options);
torch::Tensor outputs = torch::empty_like(inputs_q, act_options);
// Input Linear Results Pointers to Q, K, and V of interviewed activations
void* q_lin_results_ptr = static_cast<void*>(input_lin_q_results.data_ptr());
void* k_lin_results_ptr = static_cast<void*>(input_lin_kv_results.data_ptr());
void* v_lin_results_ptr = static_cast<void*>(static_cast<half*>(input_lin_kv_results.data_ptr()) + head_dim);
void* dropout_results_ptr = static_cast<void*>(dropout_results.data_ptr());
// Softmax Intermediate Result Ptr (used by Matmul1 -> Softmax)
void* attn_scores_ptr = static_cast<void*>(attn_scores.data_ptr());
void* lt_workspace_ptr = static_cast<void*>(lt_workspace.data_ptr());
// TORCH_CUDABLAS_CHECK(cublasSetMathMode(handle, CUBLAS_TENSOR_OP_MATH));
//
// input_lin_q_results.copy_(input_biases_q);
// // Input Linear Q Fwd
// TORCH_CUDABLAS_CHECK(hipblasGemmEx(handle,
// HIPBLAS_OP_T, // A transpose
// HIPBLAS_OP_N, // B wo/ transpose
// output_lin_q_dim, // embed_dim
// batches_q, // bsz x len_q
// embed_dim, // embed_dim
// static_cast<const void*>(&alpha),
// static_cast<const void*>(input_weights_q.data_ptr()), // weight emb_out x emb_in transposed
// HIP_R_16F,
// embed_dim, // lda so A has size [lda x m] -> [embed_dim x output_lin_q_dim]
// static_cast<const void*>(inputs_q.data_ptr()), // input Q
// HIP_R_16F,
// embed_dim, // ldb B has size [lda xn] -> [embed_dim x batches_q]
// static_cast<const void*>(&beta_one), // one
// q_lin_results_ptr, // C -> emb * B
// HIP_R_16F,
// output_lin_q_dim, // ldc C [lda x n] -> [embed_dim x batches_q]
// HIP_R_32F,
// CUBLAS_GEMM_DEFAULT_TENSOR_OP));
int cublas_status = 1;
cublas_status = gemm_bias_lt(
(cublasLtHandle_t)handle,
HIPBLAS_OP_T,
HIPBLAS_OP_N,
output_lin_q_dim,
batches_q,
embed_dim,
&alpha, /* host pointer */
static_cast<const void*>(input_weights_q.data_ptr()),
embed_dim,
static_cast<const void*>(inputs_q.data_ptr()),
embed_dim,
&beta_zero, /* host pointer */
q_lin_results_ptr,
output_lin_q_dim,
lt_workspace_ptr, // TODO: get lt_workspace
1 << 22,
stream,
true,
static_cast<const void*>(input_biases_q.data_ptr()));
if (cublas_status != HIPBLAS_STATUS_SUCCESS) {
printf("GEMM Q forward failed with %d\n", cublas_status);
exit(0);
}
// input_lin_kv_results.copy_(input_biases_kv);
// // Input Linear KV Fwd
// TORCH_CUDABLAS_CHECK(hipblasGemmEx(handle,
// HIPBLAS_OP_T,
// HIPBLAS_OP_N,
// output_lin_kv_dim,
// batches_kv,
// embed_dim,
// static_cast<const void*>(&alpha),
// static_cast<const void*>(input_weights_kv.data_ptr()),
// HIP_R_16F,
// embed_dim,
// static_cast<const void*>(inputs_kv.data_ptr()),
// HIP_R_16F,
// embed_dim,
// static_cast<const void*>(&beta_one),
// k_lin_results_ptr,
// HIP_R_16F,
// output_lin_kv_dim,
// HIP_R_32F,
// CUBLAS_GEMM_DEFAULT_TENSOR_OP));
cublas_status = gemm_bias_lt(
(cublasLtHandle_t)handle,
HIPBLAS_OP_T,
HIPBLAS_OP_N,
output_lin_kv_dim,
batches_kv,
embed_dim,
&alpha, /* host pointer */
static_cast<const void*>(input_weights_kv.data_ptr()),
embed_dim,
static_cast<const void*>(inputs_kv.data_ptr()),
embed_dim,
&beta_zero, /* host pointer */
k_lin_results_ptr,
output_lin_kv_dim,
lt_workspace_ptr, // TODO: get lt_workspace
1 << 22,
stream,
true,
static_cast<const void*>(input_biases_kv.data_ptr()));
if (cublas_status != HIPBLAS_STATUS_SUCCESS) {
printf("GEMM KV forward failed with %d\n", cublas_status);
exit(0);
}
// MatMul1 of Dot-Product Attention Plus scaling by 1/Sqrt(head size)
// TORCH_CUDABLAS_CHECK(hipblasGemmStridedBatchedEx(handle,
// HIPBLAS_OP_T,
// HIPBLAS_OP_N,
// k_seq_len,
// q_seq_len,
// head_dim,
// static_cast<const void*>(&scale),
// static_cast<const void*>(k_lin_results_ptr), // A:
// HIP_R_16F,
// lead_dim_kv, // lda
// batch_stride_kv, // stride A
// static_cast<const void*>(q_lin_results_ptr),
// HIP_R_16F,
// lead_dim_q,
// batch_stride_q,
// static_cast<const void*>(&beta_zero),
// static_cast<void*>(attn_scores_ptr), // C
// HIP_R_16F,
// k_seq_len,
// k_seq_len*q_seq_len,
// attn_batches,
// HIP_R_32F,
// CUBLAS_GEMM_DEFAULT_TENSOR_OP));
cublas_status = strided_batched_gemm_lt(
(cublasLtHandle_t)handle,
HIPBLAS_OP_T,
HIPBLAS_OP_N,
k_seq_len,
q_seq_len,
head_dim,
&scale, /* host pointer */
static_cast<const void*>(k_lin_results_ptr),
lead_dim_kv,
batch_stride_kv,
static_cast<const void*>(q_lin_results_ptr),
lead_dim_q,
batch_stride_q,
&beta_zero, /* host pointer */
static_cast<void*>(attn_scores_ptr), // C
k_seq_len, // ldc
k_seq_len*q_seq_len, // stride c
attn_batches, // batch = heads * bsz
lt_workspace_ptr,
1 << 22,
stream);
if (cublas_status != HIPBLAS_STATUS_SUCCESS) {
printf("Strided Batched GEMM QKV forward failed with %d\n", cublas_status);
exit(0);
}
// need to call padding from torch interface here.
// - infinity or - 10000?
attn_scores.view({sequences, heads, q_seq_len, k_seq_len}).masked_fill_(pad_mask,
-std::numeric_limits<float>::infinity());
bool softmax_success = false;
if (is_training && dropout_prob > 0.0f) {
// This function fuses softmax-dropout-pad (and dropout inplace)
softmax_success = dispatch_softmax_dropout<half, half, float>(
reinterpret_cast<half*>(dropout_results_ptr),
(is_training) ? reinterpret_cast<uint8_t*>(dropout_mask.data_ptr<uint8_t>()) : nullptr,
reinterpret_cast<const half*>(attn_scores_ptr),
dropout_elems,
k_seq_len,
k_seq_len,
attn_batches*q_seq_len,
1.0f-dropout_prob,
stream);
} else {
softmax_success = dispatch_softmax<half, half, float>(
reinterpret_cast<half*>(dropout_results_ptr), // this is actually softmax results, but making it consistent for the next function
reinterpret_cast<const half*>(attn_scores_ptr),
k_seq_len,
k_seq_len,
attn_batches*q_seq_len,
stream); // pad batch strides
}
assert(softmax_success);
// Matmul2
// matrix kv has size len_k * batch_size * (2 * heads * head_dim)
// dropout results [bsz*heads, len_q, len_k]
// matmul2_results is [len_q x attn_batches x head_dim]
// TORCH_CUDABLAS_CHECK(hipblasGemmStridedBatchedEx(handle,
// HIPBLAS_OP_N,
// HIPBLAS_OP_N,
// head_dim, // m
// q_seq_len, // n
// k_seq_len, // k
// static_cast<const void*>(&alpha),
// static_cast<const void*>(v_lin_results_ptr), // A:
// HIP_R_16F,
// lead_dim_kv, // lda
// batch_stride_kv, // stride A
// static_cast<const void*>(dropout_results.data_ptr()),
// HIP_R_16F,
// k_seq_len,
// k_seq_len*q_seq_len,
// static_cast<const void*>(&beta_zero),
// static_cast<void*>(matmul2_results.data_ptr()), // C
// HIP_R_16F,
// head_dim*attn_batches,
// head_dim,
// attn_batches,
// HIP_R_32F,
// CUBLAS_GEMM_DEFAULT_TENSOR_OP));
cublas_status = strided_batched_gemm_lt(
(cublasLtHandle_t)handle,
HIPBLAS_OP_N,
HIPBLAS_OP_N,
head_dim, // m
q_seq_len, // n
k_seq_len, // k
&alpha, /* host pointer */
static_cast<const void*>(v_lin_results_ptr), // A:
lead_dim_kv,
batch_stride_kv,
static_cast<const void*>(dropout_results.data_ptr()), // A:
k_seq_len,
k_seq_len*q_seq_len,
&beta_zero, /* host pointer */
static_cast<void*>(matmul2_results.data_ptr()), // C
head_dim*attn_batches,
head_dim,
attn_batches, // batch = heads * bsz
lt_workspace_ptr,
1 << 22,
stream);
if (cublas_status != HIPBLAS_STATUS_SUCCESS) {
printf("Strided Batched GEMM ATTNV forward failed with %d\n", cublas_status);
exit(0);
}
// outputs.copy_(output_biases);
// // Output Linear
// TORCH_CUDABLAS_CHECK(hipblasGemmEx(handle,
// HIPBLAS_OP_T,
// HIPBLAS_OP_N,
// embed_dim,
// batches_q,
// embed_dim,
// static_cast<const void*>(&alpha),
// static_cast<const void*>(output_weights.data_ptr()),
// HIP_R_16F,
// embed_dim,
// static_cast<const void*>(matmul2_results.data_ptr()),
// HIP_R_16F,
// embed_dim,
// static_cast<const void*>(&beta_one),
// static_cast<void*>(outputs.data_ptr()),
// HIP_R_16F,
// embed_dim,
// HIP_R_32F,
// //CUBLAS_GEMM_ALGO1_TENSOR_OP));
// CUBLAS_GEMM_DEFAULT_TENSOR_OP));
cublas_status = gemm_bias_lt(
(cublasLtHandle_t)handle,
HIPBLAS_OP_T,
HIPBLAS_OP_N,
embed_dim,
batches_q,
embed_dim,
&alpha, /* host pointer */
static_cast<const void*>(output_weights.data_ptr()),
embed_dim,
static_cast<const void*>(matmul2_results.data_ptr()),
embed_dim,
&beta_zero, /* host pointer */
static_cast<void*>(outputs.data_ptr()),
embed_dim,
lt_workspace_ptr, // TODO: get lt_workspace
1 << 22,
stream,
true,
static_cast<const void*>(output_biases.data_ptr()));
if (cublas_status != HIPBLAS_STATUS_SUCCESS) {
printf("GEMM output forward failed with %d\n", cublas_status);
exit(0);
}
TORCH_CUDABLAS_CHECK(cublasSetMathMode(handle, CUBLAS_DEFAULT_MATH));
return {
input_lin_q_results,
input_lin_kv_results,
attn_scores,
dropout_results,
dropout_mask,
matmul2_results,
outputs
};
}
std::vector<torch::Tensor> fwd_compact_cuda(
bool is_training,
int heads,
torch::Tensor const& input_lin_q_results,
torch::Tensor const& input_lin_kv_results,
torch::Tensor const& pad_mask,
float dropout_prob,
torch::Tensor lt_workspace )
{
const int embed_dim = input_lin_q_results.size(2);
const int sequences = input_lin_q_results.size(1);
const int q_seq_len = input_lin_q_results.size(0);
const int k_seq_len = input_lin_kv_results.size(0);
const int batches_q = sequences * q_seq_len;
const int batches_kv = sequences * k_seq_len;
const int head_dim = embed_dim / heads;
const int output_lin_q_dim = embed_dim;
const int output_lin_kv_dim = 2 * embed_dim;
const int attn_batches = heads * sequences;
const int lead_dim_q = attn_batches * head_dim;
const int lead_dim_kv = attn_batches * 2 *head_dim;
const int batch_stride_q = head_dim;
const int batch_stride_kv = 2 * head_dim;
const int dropout_elems = attn_batches * q_seq_len * k_seq_len;
const float alpha = 1.0;
const float beta_zero = 0.0;
const float scale = 1.0 / sqrt(static_cast<float>(head_dim));
// There is no reason to use more than one stream as every kernel is
// sequentially dependent
hipblasHandle_t handle = at::cuda::getCurrentCUDABlasHandle();
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA().stream();
hipblasSetStream(handle, stream);
// 3 Intermediate Results + Output (Note: dropout intermediates are generated by ATen library code)
auto act_options = input_lin_q_results.options().requires_grad(false);
auto mask_options = act_options.dtype(torch::kUInt8);
torch::Tensor attn_scores = torch::empty({attn_batches, q_seq_len, k_seq_len}, act_options);
torch::Tensor dropout_results = torch::empty({attn_batches, q_seq_len, k_seq_len}, act_options);
torch::Tensor dropout_mask = torch::empty({attn_batches, q_seq_len, k_seq_len}, mask_options);
torch::Tensor matmul2_results = torch::empty({q_seq_len, attn_batches, head_dim}, act_options);
// Input Linear Results Pointers to Q, K, and V of interviewed activations
void* q_lin_results_ptr = static_cast<void*>(input_lin_q_results.data_ptr());
void* k_lin_results_ptr = static_cast<void*>(input_lin_kv_results.data_ptr());
void* v_lin_results_ptr = static_cast<void*>(static_cast<half*>(input_lin_kv_results.data_ptr()) + head_dim);
void* dropout_results_ptr = static_cast<void*>(dropout_results.data_ptr());
// Softmax Intermediate Result Ptr (used by Matmul1 -> Softmax)
void* attn_scores_ptr = static_cast<void*>(attn_scores.data_ptr());
void* lt_workspace_ptr = static_cast<void*>(lt_workspace.data_ptr());
int cublas_status = 1;
// MatMul1 of Dot-Product Attention Plus scaling by 1/Sqrt(head size)
cublas_status = strided_batched_gemm_lt(
(cublasLtHandle_t)handle,
HIPBLAS_OP_T,
HIPBLAS_OP_N,
k_seq_len,
q_seq_len,
head_dim,
&scale, /* host pointer */
static_cast<const void*>(k_lin_results_ptr),
lead_dim_kv,
batch_stride_kv,
static_cast<const void*>(q_lin_results_ptr),
lead_dim_q,
batch_stride_q,
&beta_zero, /* host pointer */
static_cast<void*>(attn_scores_ptr), // C
k_seq_len, // ldc
k_seq_len*q_seq_len, // stride c
attn_batches, // batch = heads * bsz
lt_workspace_ptr,
1 << 22,
stream);
if (cublas_status != HIPBLAS_STATUS_SUCCESS) {
printf("Strided Batched GEMM QKV forward failed with %d\n", cublas_status);
exit(0);
}
// need to call padding from torch interface here.
// - infinity or - 10000?
attn_scores.view({sequences, heads, q_seq_len, k_seq_len}).masked_fill_(pad_mask,
-std::numeric_limits<float>::infinity());
bool softmax_success = false;
if (is_training && dropout_prob > 0.0f) {
// This function fuses softmax-dropout-pad (and dropout inplace)
softmax_success = dispatch_softmax_dropout<half, half, float>(
reinterpret_cast<half*>(dropout_results_ptr),
(is_training) ? reinterpret_cast<uint8_t*>(dropout_mask.data_ptr<uint8_t>()) : nullptr,
reinterpret_cast<const half*>(attn_scores_ptr),
dropout_elems,
k_seq_len,
k_seq_len,
attn_batches*q_seq_len,
1.0f-dropout_prob,
stream);
} else {
softmax_success = dispatch_softmax<half, half, float>(
reinterpret_cast<half*>(dropout_results_ptr), // this is actually softmax results, but making it consistent for the next function
reinterpret_cast<const half*>(attn_scores_ptr),
k_seq_len,
k_seq_len,
attn_batches*q_seq_len,
stream); // pad batch strides
}
assert(softmax_success);
// Matmul2
// matrix kv has size len_k * batch_size * (2 * heads * head_dim)
// dropout results [bsz*heads, len_q, len_k]
// matmul2_results is [len_q x attn_batches x head_dim]
cublas_status = strided_batched_gemm_lt(
(cublasLtHandle_t)handle,
HIPBLAS_OP_N,
HIPBLAS_OP_N,
head_dim, // m
q_seq_len, // n
k_seq_len, // k
&alpha, /* host pointer */
static_cast<const void*>(v_lin_results_ptr), // A:
lead_dim_kv,
batch_stride_kv,
static_cast<const void*>(dropout_results.data_ptr()), // A:
k_seq_len,
k_seq_len*q_seq_len,
&beta_zero, /* host pointer */
static_cast<void*>(matmul2_results.data_ptr()), // C
head_dim*attn_batches,
head_dim,
attn_batches, // batch = heads * bsz
lt_workspace_ptr,
1 << 22,
stream);
if (cublas_status != HIPBLAS_STATUS_SUCCESS) {
printf("Strided Batched GEMM ATTNV forward failed with %d\n", cublas_status);
exit(0);
}
TORCH_CUDABLAS_CHECK(cublasSetMathMode(handle, CUBLAS_DEFAULT_MATH));
return {
attn_scores,
dropout_results,
dropout_mask,
matmul2_results,
};
}
std::vector<torch::Tensor> bwd_cuda_recompute(
int heads,
torch::Tensor const& output_grads,
torch::Tensor const& inputs_q,
torch::Tensor const& inputs_kv,
torch::Tensor const& input_weights_q,
torch::Tensor const& input_weights_kv,
torch::Tensor const& input_biases_q,
torch::Tensor const& input_biases_kv,
torch::Tensor const& output_weights,
torch::Tensor const& dropout_mask,
torch::Tensor const& pad_mask,
float dropout_prob,
torch::Tensor lt_workspace
)
{
const int embed_dim = inputs_q.size(2);
const int sequences = inputs_q.size(1);
const int q_seq_len = inputs_q.size(0);
const int k_seq_len = inputs_kv.size(0);
const int batches_q = sequences * q_seq_len;
const int batches_kv = sequences * k_seq_len;
const int head_dim = embed_dim / heads;
const int output_lin_q_dim = embed_dim;
const int output_lin_kv_dim = 2 * embed_dim;
const int attn_batches = heads * sequences;
const int lead_dim_q = attn_batches * head_dim;
const int lead_dim_kv = attn_batches * 2 *head_dim;
const int batch_stride_q = head_dim;
const int batch_stride_kv = 2 * head_dim;
const int dropout_elems = attn_batches * q_seq_len * k_seq_len;
const float alpha = 1.0;
const float beta = 0.0;
const float beta_zero = 0.0;
const float scale = 1.0 / sqrt(static_cast<float>(head_dim));
// in my first attempt to create the code
hipblasHandle_t handle = at::cuda::getCurrentCUDABlasHandle();
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA().stream();
hipblasSetStream(handle, stream);
// TODO: recompute the output
auto act_options = inputs_q.options().requires_grad(false);
torch::Tensor input_lin_q_results = torch::empty({q_seq_len, sequences, output_lin_q_dim}, act_options);
torch::Tensor input_lin_kv_results = torch::empty({k_seq_len, sequences, output_lin_kv_dim}, act_options);
torch::Tensor attn_scores = torch::empty({attn_batches, q_seq_len, k_seq_len}, act_options);
torch::Tensor dropout_results = torch::empty({attn_batches, q_seq_len, k_seq_len}, act_options);
torch::Tensor matmul2_results = torch::empty({q_seq_len, attn_batches, head_dim}, act_options);
// Input Linear Results Pointers to Q, K, and V of interviewed activations
void* q_lin_results_ptr = static_cast<void*>(input_lin_q_results.data_ptr());
void* k_lin_results_ptr = static_cast<void*>(input_lin_kv_results.data_ptr());
void* v_lin_results_ptr = static_cast<void*>(static_cast<half*>(input_lin_kv_results.data_ptr()) + head_dim);
void* dropout_results_ptr = static_cast<void*>(dropout_results.data_ptr());
// Softmax Intermediate Result Ptr (used by Matmul1 -> Softmax)
void* attn_scores_ptr = static_cast<void*>(attn_scores.data_ptr());
void* lt_workspace_ptr = static_cast<void*>(lt_workspace.data_ptr());
int cublas_status = 1;
cublas_status = gemm_bias_lt(
(cublasLtHandle_t)handle,
HIPBLAS_OP_T,
HIPBLAS_OP_N,
output_lin_q_dim,
batches_q,
embed_dim,
&alpha, /* host pointer */
static_cast<const void*>(input_weights_q.data_ptr()),
embed_dim,
static_cast<const void*>(inputs_q.data_ptr()),
embed_dim,
&beta_zero, /* host pointer */
q_lin_results_ptr,
output_lin_q_dim,
lt_workspace_ptr, // TODO: get lt_workspace
1 << 22,
stream,
true,
static_cast<const void*>(input_biases_q.data_ptr()));
if (cublas_status != HIPBLAS_STATUS_SUCCESS) {
printf("GEMM Q Recompute forward failed with %d\n", cublas_status);
exit(0);
}
cublas_status = gemm_bias_lt(
(cublasLtHandle_t)handle,
HIPBLAS_OP_T,
HIPBLAS_OP_N,
output_lin_kv_dim,
batches_kv,
embed_dim,
&alpha, /* host pointer */
static_cast<const void*>(input_weights_kv.data_ptr()),
embed_dim,
static_cast<const void*>(inputs_kv.data_ptr()),
embed_dim,
&beta_zero, /* host pointer */
k_lin_results_ptr,
output_lin_kv_dim,
lt_workspace_ptr, // TODO: get lt_workspace
1 << 22,
stream,
true,
static_cast<const void*>(input_biases_kv.data_ptr()));
if (cublas_status != HIPBLAS_STATUS_SUCCESS) {
printf("GEMM KV forward failed with %d\n", cublas_status);
exit(0);
}
cublas_status = strided_batched_gemm_lt(
(cublasLtHandle_t)handle,
HIPBLAS_OP_T,
HIPBLAS_OP_N,
k_seq_len,
q_seq_len,
head_dim,
&scale, /* host pointer */
static_cast<const void*>(k_lin_results_ptr),
lead_dim_kv,
batch_stride_kv,
static_cast<const void*>(q_lin_results_ptr),
lead_dim_q,
batch_stride_q,
&beta_zero, /* host pointer */
static_cast<void*>(attn_scores_ptr), // C
k_seq_len, // ldc
k_seq_len*q_seq_len, // stride c
attn_batches, // batch = heads * bsz
lt_workspace_ptr,
1 << 22,
stream);
if (cublas_status != HIPBLAS_STATUS_SUCCESS) {
printf("Strided Batched GEMM Recompute QKV forward failed with %d\n", cublas_status);
exit(0);
}
attn_scores.view({sequences, heads, q_seq_len, k_seq_len}).masked_fill_(pad_mask,
-std::numeric_limits<float>::infinity());
bool softmax_forward_success = false;
if (dropout_prob > 0.0f) {
// This function fuses softmax-dropout-pad (and dropout inplace)
softmax_forward_success = dispatch_softmax_dropout_presampled<half, half, float>(
reinterpret_cast<half*>(dropout_results_ptr),
reinterpret_cast<const uint8_t*>(dropout_mask.data_ptr<uint8_t>()),
reinterpret_cast<const half*>(attn_scores_ptr),
dropout_elems,
k_seq_len,
k_seq_len,
attn_batches*q_seq_len,
1.0f-dropout_prob,
stream);
} else {
softmax_forward_success = dispatch_softmax<half, half, float>(
reinterpret_cast<half*>(dropout_results_ptr), // this is actually softmax results, but making it consistent for the next function
reinterpret_cast<const half*>(attn_scores_ptr),
k_seq_len,
k_seq_len,
attn_batches*q_seq_len,
stream); // pad batch strides
}
assert(softmax_forward_success);
cublas_status = strided_batched_gemm_lt(
(cublasLtHandle_t)handle,
HIPBLAS_OP_N,
HIPBLAS_OP_N,
head_dim, // m
q_seq_len, // n
k_seq_len, // k
&alpha, /* host pointer */
static_cast<const void*>(v_lin_results_ptr), // A:
lead_dim_kv,
batch_stride_kv,
static_cast<const void*>(dropout_results.data_ptr()), // A:
k_seq_len,
k_seq_len*q_seq_len,
&beta_zero, /* host pointer */
static_cast<void*>(matmul2_results.data_ptr()), // C
head_dim*attn_batches,
head_dim,
attn_batches, // batch = heads * bsz
lt_workspace_ptr,
1 << 22,
stream);
if (cublas_status != HIPBLAS_STATUS_SUCCESS) {
printf("Strided Batched GEMM Recompute ATTNV forward failed with %d\n", cublas_status);
exit(0);
}
// Don't need to compute the output again :)
// Recompute finished - now compute gradients
// Output Tensor Allocations
torch::Tensor input_q_grads = torch::empty_like(inputs_q);
torch::Tensor input_kv_grads = torch::empty_like(inputs_kv);
torch::Tensor input_weight_q_grads = torch::empty_like(input_weights_q);
torch::Tensor input_weight_kv_grads = torch::empty_like(input_weights_kv);
torch::Tensor output_weight_grads = torch::empty_like(output_weights);
at::Tensor input_bias_q_grads = torch::empty({output_lin_q_dim}, inputs_q.type());
at::Tensor input_bias_kv_grads = torch::empty({output_lin_kv_dim}, inputs_kv.type());
at::Tensor output_biases_grads = torch::empty({embed_dim}, inputs_q.type());
// Intermediate Tensor Allocations
at::Tensor output_lin_grads = torch::empty_like(matmul2_results);
at::Tensor matmul2_grads = torch::empty_like(dropout_results);
at::Tensor input_lin_q_output_grads = torch::empty_like(input_lin_q_results);
at::Tensor input_lin_kv_output_grads = torch::empty_like(input_lin_kv_results);
// auto q_lin_results_ptr = static_cast<half*>(input_lin_q_results.data_ptr());
// auto k_lin_results_ptr = static_cast<half*>(input_lin_kv_results.data_ptr());
// auto v_lin_results_ptr = static_cast<half*>(input_lin_kv_results.data_ptr()) + head_dim;
auto q_lin_grads_ptr = static_cast<half*>(input_lin_q_output_grads.data_ptr());
auto k_lin_grads_ptr = static_cast<half*>(input_lin_kv_output_grads.data_ptr());
auto v_lin_grads_ptr = static_cast<half*>(input_lin_kv_output_grads.data_ptr()) + head_dim;
TORCH_CUDABLAS_CHECK(cublasSetMathMode(handle, CUBLAS_TENSOR_OP_MATH));
// Output Linear Dgrad
// C = alpha * op(A) op(B) + BetaC
// op(A): mxk, op(B): kxn C: mxn
cublas_status = gemm_bias_lt(
(cublasLtHandle_t)handle,
HIPBLAS_OP_N,
HIPBLAS_OP_N,
embed_dim,
batches_q,
embed_dim,
&alpha, /* host pointer */
static_cast<const void*>(output_weights.data_ptr()),
embed_dim,
static_cast<const void*>(output_grads.data_ptr()),
embed_dim,
&beta, /* host pointer */
static_cast<void*>(output_lin_grads.data_ptr()),
embed_dim,
lt_workspace_ptr, // TODO: get lt_workspace
1 << 22,
stream,
false,
static_cast<const void*>(nullptr));
if (cublas_status != HIPBLAS_STATUS_SUCCESS) {
printf("GEMM output lin grad backward failed with %d\n", cublas_status);
exit(0);
}
cublas_status = gemm_bgradb_lt(
(cublasLtHandle_t)handle,
HIPBLAS_OP_N,
HIPBLAS_OP_T,
embed_dim,
embed_dim,
batches_q,
&alpha, /* host pointer */
static_cast<const void*>(matmul2_results.data_ptr()),
embed_dim,
static_cast<const void*>(output_grads.data_ptr()),
embed_dim,
&beta, /* host pointer */
static_cast<void*>(output_weight_grads.data_ptr()),
embed_dim,
lt_workspace_ptr,
1 << 22,
stream,
true,
static_cast<void*>(output_biases_grads.data_ptr()));
if (cublas_status != HIPBLAS_STATUS_SUCCESS) {
printf("GEMM output backward failed with %d\n", cublas_status);
exit(0);
}
cublas_status = strided_batched_gemm_lt(
(cublasLtHandle_t)handle,
HIPBLAS_OP_T,
HIPBLAS_OP_N,
k_seq_len,
q_seq_len,
head_dim,
&alpha, /* host pointer */
static_cast<const void*>(v_lin_results_ptr), // A:
lead_dim_kv,
batch_stride_kv,
static_cast<const void*>(output_lin_grads.data_ptr()),
head_dim*attn_batches,
head_dim,
&beta, /* host pointer */
static_cast<void*>(matmul2_grads.data_ptr()), // C
k_seq_len,
k_seq_len*q_seq_len,
attn_batches, // batch = heads * bsz
lt_workspace_ptr,
1 << 22,
stream);
if (cublas_status != HIPBLAS_STATUS_SUCCESS) {
printf("Strided Batched GEMM backward 1 failed with %d\n", cublas_status);
exit(0);
}
cublas_status = strided_batched_gemm_lt(
(cublasLtHandle_t)handle,
HIPBLAS_OP_N,
HIPBLAS_OP_T,
head_dim,
k_seq_len,
q_seq_len,
&alpha, /* host pointer */
static_cast<const void*>(output_lin_grads.data_ptr()), // A:
head_dim*attn_batches,
head_dim,
static_cast<const void*>(dropout_results.data_ptr()),
k_seq_len,
k_seq_len*q_seq_len,
&beta, /* host pointer */
static_cast<void*>(v_lin_grads_ptr), // C
lead_dim_kv,
batch_stride_kv,
attn_batches, // batch = heads * bsz
lt_workspace_ptr,
1 << 22,
stream);
if (cublas_status != HIPBLAS_STATUS_SUCCESS) {
printf("Strided Batched GEMM backward 2 failed with %d\n", cublas_status);
exit(0);
}
if ( dropout_prob > 0.0f) {
dispatch_softmax_dropout_backward_recompute<half, half, float, false>(
static_cast<half*>(matmul2_grads.data_ptr()),
static_cast<half* const>(matmul2_grads.data_ptr()),
reinterpret_cast<half const*>(attn_scores.data_ptr()), // need this to recompute softmax
//reinterpret_cast<half const*>(pad_mask.data_ptr()),
static_cast<uint8_t const*>(dropout_mask.data_ptr()),
1.0/(1.0-dropout_prob),
k_seq_len,
k_seq_len,
attn_batches*q_seq_len,
stream);
} else {
dispatch_softmax_backward_norecompute<half, half, float, false>(
static_cast<half*>(matmul2_grads.data_ptr()),
static_cast<half* const>(matmul2_grads.data_ptr()),
reinterpret_cast<half const*>(dropout_results.data_ptr()),
k_seq_len,
k_seq_len,
attn_batches*q_seq_len,
stream);
}
cublas_status = strided_batched_gemm_lt(
(cublasLtHandle_t)handle,
HIPBLAS_OP_N,
HIPBLAS_OP_N,
head_dim,
q_seq_len,
k_seq_len,
&scale, /* host pointer */
static_cast<const void*>(k_lin_results_ptr), // A:
lead_dim_kv,
batch_stride_kv,
static_cast<const void*>(matmul2_grads.data_ptr()),
k_seq_len,
k_seq_len*q_seq_len,
&beta, /* host pointer */
static_cast<void*>(q_lin_grads_ptr), // C
lead_dim_q,
batch_stride_q,
attn_batches, // batch = heads * bsz
lt_workspace_ptr,
1 << 22,
stream);
if (cublas_status != HIPBLAS_STATUS_SUCCESS) {
printf("Strided Batched GEMM backward 3 failed with %d\n", cublas_status);
exit(0);
}
cublas_status = strided_batched_gemm_lt(
(cublasLtHandle_t)handle,
HIPBLAS_OP_N,
HIPBLAS_OP_T,
head_dim,
k_seq_len,
q_seq_len,
&scale, /* host pointer */
static_cast<const void*>(q_lin_results_ptr), // A:
lead_dim_q,
batch_stride_q,
static_cast<const void*>(matmul2_grads.data_ptr()),
k_seq_len,
k_seq_len*q_seq_len,
&beta, /* host pointer */
static_cast<void*>(k_lin_grads_ptr), // C
lead_dim_kv,
batch_stride_kv,
attn_batches, // batch = heads * bsz
lt_workspace_ptr,
1 << 22,
stream);
if (cublas_status != HIPBLAS_STATUS_SUCCESS) {
printf("Strided Batched GEMM backward 4 failed with %d\n", cublas_status);
exit(0);
}
cublas_status = gemm_bias_lt(
(cublasLtHandle_t)handle,
HIPBLAS_OP_N,
HIPBLAS_OP_N,
embed_dim,
batches_q,
output_lin_q_dim,
&alpha, /* host pointer */
static_cast<const void*>(input_weights_q.data_ptr()),
embed_dim,
static_cast<const void*>(q_lin_grads_ptr),
output_lin_q_dim,
&beta, /* host pointer */
static_cast<void*>(input_q_grads.data_ptr()),
embed_dim,
lt_workspace_ptr, // TODO: get lt_workspace
1 << 22,
stream,
false,
static_cast<const void*>(nullptr));
if (cublas_status != HIPBLAS_STATUS_SUCCESS) {
printf("GEMM output backward final input q failed with %d\n", cublas_status);
exit(0);
}
//
cublas_status = gemm_bgradb_lt(
(cublasLtHandle_t)handle,
HIPBLAS_OP_N,
HIPBLAS_OP_T,
embed_dim,
output_lin_q_dim,
batches_q,
&alpha, /* host pointer */
static_cast<const void*>(inputs_q.data_ptr()),
embed_dim,
reinterpret_cast<const void*>(q_lin_grads_ptr),
output_lin_q_dim,
&beta, /* host pointer */
static_cast<void*>(input_weight_q_grads.data_ptr()),
embed_dim,
lt_workspace_ptr, // TODO: get lt_workspace
1 << 22,
stream,
true,
static_cast<void*>(input_bias_q_grads.data_ptr()));
if (cublas_status != HIPBLAS_STATUS_SUCCESS) {
printf("GEMM input backward 1 failed with %d\n", cublas_status);
exit(0);
}
cublas_status = gemm_bias_lt(
(cublasLtHandle_t)handle,
HIPBLAS_OP_N,
HIPBLAS_OP_N,
embed_dim,
batches_kv,
output_lin_kv_dim,
&alpha, /* host pointer */
static_cast<const void*>(input_weights_kv.data_ptr()),
embed_dim,
static_cast<const void*>(k_lin_grads_ptr),
output_lin_kv_dim,
&beta, /* host pointer */
static_cast<void*>(input_kv_grads.data_ptr()),
embed_dim,
lt_workspace_ptr, // TODO: get lt_workspace
1 << 22,
stream,
false,
static_cast<const void*>(nullptr));
if (cublas_status != HIPBLAS_STATUS_SUCCESS) {
printf("GEMM output backward final input kv failed with %d\n", cublas_status);
exit(0);
}
cublas_status = gemm_bgradb_lt(
(cublasLtHandle_t)handle,
HIPBLAS_OP_N,
HIPBLAS_OP_T,
embed_dim,
output_lin_kv_dim,
batches_kv,
&alpha, /* host pointer */
static_cast<const void*>(inputs_kv.data_ptr()),
embed_dim,
reinterpret_cast<const void*>(k_lin_grads_ptr),
output_lin_kv_dim,
&beta, /* host pointer */
static_cast<void*>(input_weight_kv_grads.data_ptr()),
embed_dim,
lt_workspace_ptr, // TODO: get lt_workspace
1 << 22,
stream,
true,
static_cast<void*>(input_bias_kv_grads.data_ptr()));
if (cublas_status != HIPBLAS_STATUS_SUCCESS) {
printf("GEMM input backward 2 failed with %d\n", cublas_status);
exit(0);
}
TORCH_CUDABLAS_CHECK(cublasSetMathMode(handle, CUBLAS_DEFAULT_MATH));
return {
input_q_grads,
input_kv_grads,
input_weight_q_grads,
input_weight_kv_grads,
output_weight_grads,
input_bias_q_grads,
input_bias_kv_grads,
output_biases_grads
};
}
std::vector<torch::Tensor> bwd_cuda(
int heads,
torch::Tensor const& output_grads,
torch::Tensor const& matmul2_results,
torch::Tensor const& dropout_results,
torch::Tensor const& attn_scores,
// const half* pad_mask,
torch::Tensor const& input_lin_q_results,
torch::Tensor const& input_lin_kv_results,
torch::Tensor const& inputs_q,
torch::Tensor const& inputs_kv,
torch::Tensor const& input_weights_q,
torch::Tensor const& input_weights_kv,
torch::Tensor const& output_weights,
torch::Tensor const& dropout_mask,
float dropout_prob,
torch::Tensor lt_workspace
)
{
const int embed_dim = inputs_q.size(2);
const int sequences = inputs_q.size(1);
const int q_seq_len = inputs_q.size(0);
const int k_seq_len = inputs_kv.size(0);
const int batches_q = sequences * q_seq_len;
const int batches_kv = sequences * k_seq_len;
const int head_dim = embed_dim / heads;
const int output_lin_q_dim = embed_dim;
const int output_lin_kv_dim = 2 * embed_dim;
const int attn_batches = heads * sequences;
const int lead_dim_q = attn_batches * head_dim;
const int lead_dim_kv = attn_batches * 2 *head_dim;
const int batch_stride_q = head_dim;
const int batch_stride_kv = 2 * head_dim;
// const int dropout_elems = attn_batches * q_seq_len * k_seq_len;
const float alpha = 1.0;
const float beta = 0.0;
const float scale = 1.0 / sqrt(static_cast<float>(head_dim));
// TODO: Streams can be used in Backprop but I haven't added more than one
// in my first attempt to create the code
hipblasHandle_t handle = at::cuda::getCurrentCUDABlasHandle();
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA().stream();
hipblasSetStream(handle, stream);
// Output Tensor Allocations
torch::Tensor input_q_grads = torch::empty_like(inputs_q);
torch::Tensor input_kv_grads = torch::empty_like(inputs_kv);
torch::Tensor input_weight_q_grads = torch::empty_like(input_weights_q);
torch::Tensor input_weight_kv_grads = torch::empty_like(input_weights_kv);
torch::Tensor output_weight_grads = torch::empty_like(output_weights);
at::Tensor input_bias_q_grads = torch::empty({output_lin_q_dim}, inputs_q.type());
at::Tensor input_bias_kv_grads = torch::empty({output_lin_kv_dim}, inputs_kv.type());
at::Tensor output_biases_grads = torch::empty({embed_dim}, inputs_q.type());
// Intermediate Tensor Allocations
at::Tensor output_lin_grads = torch::empty_like(matmul2_results);
at::Tensor matmul2_grads = torch::empty_like(dropout_results);
at::Tensor input_lin_q_output_grads = torch::empty_like(input_lin_q_results);
at::Tensor input_lin_kv_output_grads = torch::empty_like(input_lin_kv_results);
auto q_lin_results_ptr = static_cast<half*>(input_lin_q_results.data_ptr());
auto k_lin_results_ptr = static_cast<half*>(input_lin_kv_results.data_ptr());
auto v_lin_results_ptr = static_cast<half*>(input_lin_kv_results.data_ptr()) + head_dim;
auto q_lin_grads_ptr = static_cast<half*>(input_lin_q_output_grads.data_ptr());
auto k_lin_grads_ptr = static_cast<half*>(input_lin_kv_output_grads.data_ptr());
auto v_lin_grads_ptr = static_cast<half*>(input_lin_kv_output_grads.data_ptr()) + head_dim;
void* lt_workspace_ptr = static_cast<void*>(lt_workspace.data_ptr());
TORCH_CUDABLAS_CHECK(cublasSetMathMode(handle, CUBLAS_TENSOR_OP_MATH));
// Output Linear Dgrad
// C = alpha * op(A) op(B) + BetaC
// op(A): mxk, op(B): kxn C: mxn
int cublas_status = 1;
cublas_status = gemm_bias_lt(
(cublasLtHandle_t)handle,
HIPBLAS_OP_N,
HIPBLAS_OP_N,
embed_dim,
batches_q,
embed_dim,
&alpha, /* host pointer */
static_cast<const void*>(output_weights.data_ptr()),
embed_dim,
static_cast<const void*>(output_grads.data_ptr()),
embed_dim,
&beta, /* host pointer */
static_cast<void*>(output_lin_grads.data_ptr()),
embed_dim,
lt_workspace_ptr, // TODO: get lt_workspace
1 << 22,
stream,
false,
static_cast<const void*>(nullptr));
if (cublas_status != HIPBLAS_STATUS_SUCCESS) {
printf("GEMM output lin grad backward failed with %d\n", cublas_status);
exit(0);
}
cublas_status = gemm_bgradb_lt(
(cublasLtHandle_t)handle,
HIPBLAS_OP_N,
HIPBLAS_OP_T,
embed_dim,
embed_dim,
batches_q,
&alpha, /* host pointer */
static_cast<const void*>(matmul2_results.data_ptr()),
embed_dim,
static_cast<const void*>(output_grads.data_ptr()),
embed_dim,
&beta, /* host pointer */
static_cast<void*>(output_weight_grads.data_ptr()),
embed_dim,
lt_workspace_ptr,
1 << 22,
stream,
true,
static_cast<void*>(output_biases_grads.data_ptr()));
if (cublas_status != HIPBLAS_STATUS_SUCCESS) {
printf("GEMM output backward failed with %d\n", cublas_status);
exit(0);
}
cublas_status = strided_batched_gemm_lt(
(cublasLtHandle_t)handle,
HIPBLAS_OP_T,
HIPBLAS_OP_N,
k_seq_len,
q_seq_len,
head_dim,
&alpha, /* host pointer */
static_cast<const void*>(v_lin_results_ptr), // A:
lead_dim_kv,
batch_stride_kv,
static_cast<const void*>(output_lin_grads.data_ptr()),
head_dim*attn_batches,
head_dim,
&beta, /* host pointer */
static_cast<void*>(matmul2_grads.data_ptr()), // C
k_seq_len,
k_seq_len*q_seq_len,
attn_batches, // batch = heads * bsz
lt_workspace_ptr,
1 << 22,
stream);
if (cublas_status != HIPBLAS_STATUS_SUCCESS) {
printf("Strided Batched GEMM backward 1 failed with %d\n", cublas_status);
exit(0);
}
cublas_status = strided_batched_gemm_lt(
(cublasLtHandle_t)handle,
HIPBLAS_OP_N,
HIPBLAS_OP_T,
head_dim,
k_seq_len,
q_seq_len,
&alpha, /* host pointer */
static_cast<const void*>(output_lin_grads.data_ptr()), // A:
head_dim*attn_batches,
head_dim,
static_cast<const void*>(dropout_results.data_ptr()),
k_seq_len,
k_seq_len*q_seq_len,
&beta, /* host pointer */
static_cast<void*>(v_lin_grads_ptr), // C
lead_dim_kv,
batch_stride_kv,
attn_batches, // batch = heads * bsz
lt_workspace_ptr,
1 << 22,
stream);
if (cublas_status != HIPBLAS_STATUS_SUCCESS) {
printf("Strided Batched GEMM backward 2 failed with %d\n", cublas_status);
exit(0);
}
if ( dropout_prob > 0.0f) {
dispatch_softmax_dropout_backward_recompute<half, half, float, false>(
static_cast<half*>(matmul2_grads.data_ptr()),
static_cast<half* const>(matmul2_grads.data_ptr()),
reinterpret_cast<half const*>(attn_scores.data_ptr()), // need this to recompute softmax
//reinterpret_cast<half const*>(pad_mask.data_ptr()),
static_cast<uint8_t const*>(dropout_mask.data_ptr()),
1.0/(1.0-dropout_prob),
k_seq_len,
k_seq_len,
attn_batches*q_seq_len,
stream);
} else {
dispatch_softmax_backward_norecompute<half, half, float, false>(
static_cast<half*>(matmul2_grads.data_ptr()),
static_cast<half* const>(matmul2_grads.data_ptr()),
reinterpret_cast<half const*>(dropout_results.data_ptr()),
k_seq_len,
k_seq_len,
attn_batches*q_seq_len,
stream);
}
cublas_status = strided_batched_gemm_lt(
(cublasLtHandle_t)handle,
HIPBLAS_OP_N,
HIPBLAS_OP_N,
head_dim,
q_seq_len,
k_seq_len,
&scale, /* host pointer */
static_cast<const void*>(k_lin_results_ptr), // A:
lead_dim_kv,
batch_stride_kv,
static_cast<const void*>(matmul2_grads.data_ptr()),
k_seq_len,
k_seq_len*q_seq_len,
&beta, /* host pointer */
static_cast<void*>(q_lin_grads_ptr), // C
lead_dim_q,
batch_stride_q,
attn_batches, // batch = heads * bsz
lt_workspace_ptr,
1 << 22,
stream);
if (cublas_status != HIPBLAS_STATUS_SUCCESS) {
printf("Strided Batched GEMM backward 3 failed with %d\n", cublas_status);
exit(0);
}
cublas_status = strided_batched_gemm_lt(
(cublasLtHandle_t)handle,
HIPBLAS_OP_N,
HIPBLAS_OP_T,
head_dim,
k_seq_len,
q_seq_len,
&scale, /* host pointer */
static_cast<const void*>(q_lin_results_ptr), // A:
lead_dim_q,
batch_stride_q,
static_cast<const void*>(matmul2_grads.data_ptr()),
k_seq_len,
k_seq_len*q_seq_len,
&beta, /* host pointer */
static_cast<void*>(k_lin_grads_ptr), // C
lead_dim_kv,
batch_stride_kv,
attn_batches, // batch = heads * bsz
lt_workspace_ptr,
1 << 22,
stream);
if (cublas_status != HIPBLAS_STATUS_SUCCESS) {
printf("Strided Batched GEMM backward 4 failed with %d\n", cublas_status);
exit(0);
}
cublas_status = gemm_bias_lt(
(cublasLtHandle_t)handle,
HIPBLAS_OP_N,
HIPBLAS_OP_N,
embed_dim,
batches_q,
output_lin_q_dim,
&alpha, /* host pointer */
static_cast<const void*>(input_weights_q.data_ptr()),
embed_dim,
static_cast<const void*>(q_lin_grads_ptr),
output_lin_q_dim,
&beta, /* host pointer */
static_cast<void*>(input_q_grads.data_ptr()),
embed_dim,
lt_workspace_ptr, // TODO: get lt_workspace
1 << 22,
stream,
false,
static_cast<const void*>(nullptr));
if (cublas_status != HIPBLAS_STATUS_SUCCESS) {
printf("GEMM output backward final input q failed with %d\n", cublas_status);
exit(0);
}
//
cublas_status = gemm_bgradb_lt(
(cublasLtHandle_t)handle,
HIPBLAS_OP_N,
HIPBLAS_OP_T,
embed_dim,
output_lin_q_dim,
batches_q,
&alpha, /* host pointer */
static_cast<const void*>(inputs_q.data_ptr()),
embed_dim,
reinterpret_cast<const void*>(q_lin_grads_ptr),
output_lin_q_dim,
&beta, /* host pointer */
static_cast<void*>(input_weight_q_grads.data_ptr()),
embed_dim,
lt_workspace_ptr, // TODO: get lt_workspace
1 << 22,
stream,
true,
static_cast<void*>(input_bias_q_grads.data_ptr()));
if (cublas_status != HIPBLAS_STATUS_SUCCESS) {
printf("GEMM input backward 1 failed with %d\n", cublas_status);
exit(0);
}
cublas_status = gemm_bias_lt(
(cublasLtHandle_t)handle,
HIPBLAS_OP_N,
HIPBLAS_OP_N,
embed_dim,
batches_kv,
output_lin_kv_dim,
&alpha, /* host pointer */
static_cast<const void*>(input_weights_kv.data_ptr()),
embed_dim,
static_cast<const void*>(k_lin_grads_ptr),
output_lin_kv_dim,
&beta, /* host pointer */
static_cast<void*>(input_kv_grads.data_ptr()),
embed_dim,
lt_workspace_ptr, // TODO: get lt_workspace
1 << 22,
stream,
false,
static_cast<const void*>(nullptr));
if (cublas_status != HIPBLAS_STATUS_SUCCESS) {
printf("GEMM output backward final input kv failed with %d\n", cublas_status);
exit(0);
}
cublas_status = gemm_bgradb_lt(
(cublasLtHandle_t)handle,
HIPBLAS_OP_N,
HIPBLAS_OP_T,
embed_dim,
output_lin_kv_dim,
batches_kv,
&alpha, /* host pointer */
static_cast<const void*>(inputs_kv.data_ptr()),
embed_dim,
reinterpret_cast<const void*>(k_lin_grads_ptr),
output_lin_kv_dim,
&beta, /* host pointer */
static_cast<void*>(input_weight_kv_grads.data_ptr()),
embed_dim,
lt_workspace_ptr, // TODO: get lt_workspace
1 << 22,
stream,
true,
static_cast<void*>(input_bias_kv_grads.data_ptr()));
if (cublas_status != HIPBLAS_STATUS_SUCCESS) {
printf("GEMM input backward 2 failed with %d\n", cublas_status);
exit(0);
}
TORCH_CUDABLAS_CHECK(cublasSetMathMode(handle, CUBLAS_DEFAULT_MATH));
return {
input_q_grads,
input_kv_grads,
input_weight_q_grads,
input_weight_kv_grads,
output_weight_grads,
input_bias_q_grads,
input_bias_kv_grads,
output_biases_grads
};
}
std::vector<torch::Tensor> bwd_compact_cuda(
int heads,
torch::Tensor const& output_lin_grads,
torch::Tensor const& dropout_results,
torch::Tensor const& attn_scores,
torch::Tensor const& input_lin_q_results,
torch::Tensor const& input_lin_kv_results,
torch::Tensor const& dropout_mask,
float dropout_prob,
torch::Tensor lt_workspace
)
{
const int embed_dim = input_lin_q_results.size(2);
const int sequences = input_lin_q_results.size(1);
const int q_seq_len = input_lin_q_results.size(0);
const int k_seq_len = input_lin_kv_results.size(0);
const int batches_q = sequences * q_seq_len;
const int batches_kv = sequences * k_seq_len;
const int head_dim = embed_dim / heads;
const int output_lin_q_dim = embed_dim;
const int output_lin_kv_dim = 2 * embed_dim;
const int attn_batches = heads * sequences;
const int lead_dim_q = attn_batches * head_dim;
const int lead_dim_kv = attn_batches * 2 *head_dim;
const int batch_stride_q = head_dim;
const int batch_stride_kv = 2 * head_dim;
// const int dropout_elems = attn_batches * q_seq_len * k_seq_len;
const float alpha = 1.0;
const float beta = 0.0;
const float scale = 1.0 / sqrt(static_cast<float>(head_dim));
// TODO: Streams can be used in Backprop but I haven't added more than one
// in my first attempt to create the code
hipblasHandle_t handle = at::cuda::getCurrentCUDABlasHandle();
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA().stream();
hipblasSetStream(handle, stream);
// Output Tensor Allocations
// Intermediate Tensor Allocations
at::Tensor matmul2_grads = torch::empty_like(dropout_results);
at::Tensor input_lin_q_output_grads = torch::empty_like(input_lin_q_results);
at::Tensor input_lin_kv_output_grads = torch::empty_like(input_lin_kv_results);
auto q_lin_results_ptr = static_cast<half*>(input_lin_q_results.data_ptr());
auto k_lin_results_ptr = static_cast<half*>(input_lin_kv_results.data_ptr());
auto v_lin_results_ptr = static_cast<half*>(input_lin_kv_results.data_ptr()) + head_dim;
auto q_lin_grads_ptr = static_cast<half*>(input_lin_q_output_grads.data_ptr());
auto k_lin_grads_ptr = static_cast<half*>(input_lin_kv_output_grads.data_ptr());
auto v_lin_grads_ptr = static_cast<half*>(input_lin_kv_output_grads.data_ptr()) + head_dim;
void* lt_workspace_ptr = static_cast<void*>(lt_workspace.data_ptr());
TORCH_CUDABLAS_CHECK(cublasSetMathMode(handle, CUBLAS_TENSOR_OP_MATH));
int cublas_status = 1;
cublas_status = strided_batched_gemm_lt(
(cublasLtHandle_t)handle,
HIPBLAS_OP_T,
HIPBLAS_OP_N,
k_seq_len,
q_seq_len,
head_dim,
&alpha, /* host pointer */
static_cast<const void*>(v_lin_results_ptr), // A:
lead_dim_kv,
batch_stride_kv,
static_cast<const void*>(output_lin_grads.data_ptr()),
head_dim*attn_batches,
head_dim,
&beta, /* host pointer */
static_cast<void*>(matmul2_grads.data_ptr()), // C
k_seq_len,
k_seq_len*q_seq_len,
attn_batches, // batch = heads * bsz
lt_workspace_ptr,
1 << 22,
stream);
if (cublas_status != HIPBLAS_STATUS_SUCCESS) {
printf("Strided Batched GEMM backward 1 failed with %d\n", cublas_status);
exit(0);
}
cublas_status = strided_batched_gemm_lt(
(cublasLtHandle_t)handle,
HIPBLAS_OP_N,
HIPBLAS_OP_T,
head_dim,
k_seq_len,
q_seq_len,
&alpha, /* host pointer */
static_cast<const void*>(output_lin_grads.data_ptr()), // A:
head_dim*attn_batches,
head_dim,
static_cast<const void*>(dropout_results.data_ptr()),
k_seq_len,
k_seq_len*q_seq_len,
&beta, /* host pointer */
static_cast<void*>(v_lin_grads_ptr), // C
lead_dim_kv,
batch_stride_kv,
attn_batches, // batch = heads * bsz
lt_workspace_ptr,
1 << 22,
stream);
if (cublas_status != HIPBLAS_STATUS_SUCCESS) {
printf("Strided Batched GEMM backward 2 failed with %d\n", cublas_status);
exit(0);
}
if ( dropout_prob > 0.0f) {
dispatch_softmax_dropout_backward_recompute<half, half, float, false>(
static_cast<half*>(matmul2_grads.data_ptr()),
static_cast<half* const>(matmul2_grads.data_ptr()),
reinterpret_cast<half const*>(attn_scores.data_ptr()), // need this to recompute softmax
//reinterpret_cast<half const*>(pad_mask.data_ptr()),
static_cast<uint8_t const*>(dropout_mask.data_ptr()),
1.0/(1.0-dropout_prob),
k_seq_len,
k_seq_len,
attn_batches*q_seq_len,
stream);
} else {
dispatch_softmax_backward_norecompute<half, half, float, false>(
static_cast<half*>(matmul2_grads.data_ptr()),
static_cast<half* const>(matmul2_grads.data_ptr()),
reinterpret_cast<half const*>(dropout_results.data_ptr()),
k_seq_len,
k_seq_len,
attn_batches*q_seq_len,
stream);
}
cublas_status = strided_batched_gemm_lt(
(cublasLtHandle_t)handle,
HIPBLAS_OP_N,
HIPBLAS_OP_N,
head_dim,
q_seq_len,
k_seq_len,
&scale, /* host pointer */
static_cast<const void*>(k_lin_results_ptr), // A:
lead_dim_kv,
batch_stride_kv,
static_cast<const void*>(matmul2_grads.data_ptr()),
k_seq_len,
k_seq_len*q_seq_len,
&beta, /* host pointer */
static_cast<void*>(q_lin_grads_ptr), // C
lead_dim_q,
batch_stride_q,
attn_batches, // batch = heads * bsz
lt_workspace_ptr,
1 << 22,
stream);
if (cublas_status != HIPBLAS_STATUS_SUCCESS) {
printf("Strided Batched GEMM backward 3 failed with %d\n", cublas_status);
exit(0);
}
cublas_status = strided_batched_gemm_lt(
(cublasLtHandle_t)handle,
HIPBLAS_OP_N,
HIPBLAS_OP_T,
head_dim,
k_seq_len,
q_seq_len,
&scale, /* host pointer */
static_cast<const void*>(q_lin_results_ptr), // A:
lead_dim_q,
batch_stride_q,
static_cast<const void*>(matmul2_grads.data_ptr()),
k_seq_len,
k_seq_len*q_seq_len,
&beta, /* host pointer */
static_cast<void*>(k_lin_grads_ptr), // C
lead_dim_kv,
batch_stride_kv,
attn_batches, // batch = heads * bsz
lt_workspace_ptr,
1 << 22,
stream);
if (cublas_status != HIPBLAS_STATUS_SUCCESS) {
printf("Strided Batched GEMM backward 4 failed with %d\n", cublas_status);
exit(0);
}
TORCH_CUDABLAS_CHECK(cublasSetMathMode(handle, CUBLAS_DEFAULT_MATH));
return {
input_lin_q_output_grads,
input_lin_kv_output_grads,
};
}
std::vector<torch::Tensor> bwd_cuda_input_only(
int heads,
torch::Tensor const& output_grads,
torch::Tensor const& matmul2_results,
torch::Tensor const& dropout_results,
torch::Tensor const& attn_scores,
torch::Tensor const& input_lin_q_results,
torch::Tensor const& input_lin_kv_results,
torch::Tensor const& inputs_q,
torch::Tensor const& inputs_kv,
torch::Tensor const& input_weights_q,
torch::Tensor const& input_weights_kv,
torch::Tensor const& output_weights,
torch::Tensor const& dropout_mask,
float dropout_prob
)
{
const int embed_dim = inputs_q.size(2);
const int sequences = inputs_q.size(1);
const int q_seq_len = inputs_q.size(0);
const int k_seq_len = inputs_kv.size(0);
const int batches_q = sequences * q_seq_len;
const int batches_kv = sequences * k_seq_len;
const int head_dim = embed_dim / heads;
const int output_lin_q_dim = embed_dim;
const int output_lin_kv_dim = 2 * embed_dim;
const int attn_batches = heads * sequences;
const int lead_dim_q = attn_batches * head_dim;
const int lead_dim_kv = attn_batches * 2 *head_dim;
const int batch_stride_q = head_dim;
const int batch_stride_kv = 2 * head_dim;
// const int dropout_elems = attn_batches * q_seq_len * k_seq_len;
const float alpha = 1.0;
const float beta = 0.0;
const float scale = 1.0 / sqrt(static_cast<float>(head_dim));
// TODO: Streams can be used in Backprop but I haven't added more than one
// in my first attempt to create the code
hipblasHandle_t handle = at::cuda::getCurrentCUDABlasHandle();
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA().stream();
hipblasSetStream(handle, stream);
// Output Tensor Allocations
torch::Tensor input_q_grads = torch::empty_like(inputs_q);
torch::Tensor input_kv_grads = torch::empty_like(inputs_kv);
// torch::Tensor input_weight_q_grads = torch::empty_like(input_weights_q);
// torch::Tensor input_weight_kv_grads = torch::empty_like(input_weights_kv);
// torch::Tensor output_weight_grads = torch::empty_like(output_weights);
// Intermediate Tensor Allocations
at::Tensor output_lin_grads = torch::empty_like(matmul2_results);
at::Tensor matmul2_grads = torch::empty_like(dropout_results);
at::Tensor input_lin_q_output_grads = torch::empty_like(input_lin_q_results);
at::Tensor input_lin_kv_output_grads = torch::empty_like(input_lin_kv_results);
auto q_lin_results_ptr = static_cast<half*>(input_lin_q_results.data_ptr());
auto k_lin_results_ptr = static_cast<half*>(input_lin_kv_results.data_ptr());
auto v_lin_results_ptr = static_cast<half*>(input_lin_kv_results.data_ptr()) + head_dim;
auto q_lin_grads_ptr = static_cast<half*>(input_lin_q_output_grads.data_ptr());
auto k_lin_grads_ptr = static_cast<half*>(input_lin_kv_output_grads.data_ptr());
auto v_lin_grads_ptr = static_cast<half*>(input_lin_kv_output_grads.data_ptr()) + head_dim;
// char a_layout_n{'n'};
// char a_layout_t{'t'};
// char b_layout_n{'n'};
// char b_layout_t{'t'};
TORCH_CUDABLAS_CHECK(cublasSetMathMode(handle, CUBLAS_TENSOR_OP_MATH));
// Output Linear Dgrad
// C = alpha * op(A) op(B) + BetaC
// op(A): mxk, op(B): kxn C: mxn
TORCH_CUDABLAS_CHECK(hipblasGemmEx(handle, //
HIPBLAS_OP_N, // no transpose
HIPBLAS_OP_N, // no transpose
embed_dim, // m
batches_q, // n = bsz * len_q
embed_dim, // k
static_cast<const void*>(&alpha), // alpha = 1.0
static_cast<const void*>(output_weights.data_ptr()), // A mxk
HIP_R_16F, // data type
embed_dim, // leading dimension of A (embed dim) (the rows)
static_cast<const void*>(output_grads.data_ptr()), // B kxn
HIP_R_16F, // data type
embed_dim, // leading dimension of B (embed dim)
static_cast<const void*>(&beta), // beta
static_cast<void*>(output_lin_grads.data_ptr()), // C mxn
HIP_R_16F, // data type
embed_dim, // ldc
HIP_R_32F, // compute type
CUBLAS_GEMM_DEFAULT_TENSOR_OP));
// Output Linear Wgrad
// TORCH_CUDABLAS_CHECK(hipblasGemmEx(handle,
// HIPBLAS_OP_N,
// HIPBLAS_OP_T,
// embed_dim,
// embed_dim,
// batches_q,
// static_cast<const void*>(&alpha),
// static_cast<const void*>(matmul2_results.data_ptr()),
// HIP_R_16F,
// embed_dim,
// static_cast<const void*>(output_grads.data_ptr()),
// HIP_R_16F,
// embed_dim,
// static_cast<const void*>(&beta),
// static_cast<void*>(output_weight_grads.data_ptr()),
// HIP_R_16F,
// embed_dim,
// HIP_R_32F,
// CUBLAS_GEMM_DEFAULT_TENSOR_OP));
// auto output_bias_grads = output_grads.view({-1, embed_dim}).sum(0, false);
TORCH_CUDABLAS_CHECK(hipblasGemmStridedBatchedEx(handle,
HIPBLAS_OP_T,
HIPBLAS_OP_N,
k_seq_len, // m
q_seq_len, // n
head_dim, // k
static_cast<const void*>(&alpha),
static_cast<const void*>(v_lin_results_ptr), // A:
HIP_R_16F,
lead_dim_kv, // lda
batch_stride_kv, // stride A
static_cast<const void*>(output_lin_grads.data_ptr()),
HIP_R_16F,
head_dim*attn_batches,
head_dim,
static_cast<const void*>(&beta),
static_cast<void*>(matmul2_grads.data_ptr()), // C
HIP_R_16F,
k_seq_len,
k_seq_len*q_seq_len,
attn_batches,
HIP_R_32F,
CUBLAS_GEMM_DEFAULT_TENSOR_OP));
TORCH_CUDABLAS_CHECK(hipblasGemmStridedBatchedEx(handle,
HIPBLAS_OP_N,
HIPBLAS_OP_T,
head_dim, // m
k_seq_len, // n
q_seq_len, // k
static_cast<const void*>(&alpha),
static_cast<const void*>(output_lin_grads.data_ptr()), // A:
HIP_R_16F,
head_dim*attn_batches, // lda
head_dim, // stride A
static_cast<const void*>(dropout_results.data_ptr()),
HIP_R_16F,
k_seq_len,
k_seq_len*q_seq_len,
static_cast<const void*>(&beta),
static_cast<void*>(v_lin_grads_ptr), // C
HIP_R_16F,
lead_dim_kv,
batch_stride_kv,
attn_batches,
HIP_R_32F,
CUBLAS_GEMM_DEFAULT_TENSOR_OP));
// bool softmax_success = false;
if ( dropout_prob > 0.0f) {
dispatch_softmax_dropout_backward_recompute<half, half, float, false>(
static_cast<half*>(matmul2_grads.data_ptr()),
static_cast<half* const>(matmul2_grads.data_ptr()),
reinterpret_cast<half const*>(attn_scores.data_ptr()), // need this to recompute softmax
//reinterpret_cast<half const*>(pad_mask.data_ptr()),
static_cast<uint8_t const*>(dropout_mask.data_ptr()),
1.0/(1.0-dropout_prob),
k_seq_len,
k_seq_len,
attn_batches*q_seq_len,
stream);
} else {
// if dropout == 0 then we don't need to recompute (because dropout_results == softmax_results)
dispatch_softmax_backward_norecompute<half, half, float, false>(
static_cast<half*>(matmul2_grads.data_ptr()),
static_cast<half* const>(matmul2_grads.data_ptr()),
reinterpret_cast<half const*>(dropout_results.data_ptr()),
k_seq_len,
k_seq_len,
attn_batches*q_seq_len,
stream);
}
TORCH_CUDABLAS_CHECK(hipblasGemmStridedBatchedEx(handle,
HIPBLAS_OP_N,
HIPBLAS_OP_N,
head_dim, // m
q_seq_len, // n
k_seq_len, // k
static_cast<const void*>(&scale),
static_cast<const void*>(k_lin_results_ptr), // A:
HIP_R_16F,
lead_dim_kv, // lda
batch_stride_kv, // stride A
static_cast<const void*>(matmul2_grads.data_ptr()),
HIP_R_16F,
k_seq_len,
k_seq_len*q_seq_len,
static_cast<const void*>(&beta),
static_cast<void*>(q_lin_grads_ptr), // C
HIP_R_16F,
lead_dim_q,
batch_stride_q,
attn_batches,
HIP_R_32F,
CUBLAS_GEMM_DEFAULT_TENSOR_OP));
TORCH_CUDABLAS_CHECK(hipblasGemmStridedBatchedEx(handle,
HIPBLAS_OP_N,
HIPBLAS_OP_T,
head_dim, // m
k_seq_len, // n
q_seq_len, // k
static_cast<const void*>(&scale),
static_cast<const void*>(q_lin_results_ptr), // A:
HIP_R_16F,
lead_dim_q, // lda
batch_stride_q, // stride A
static_cast<const void*>(matmul2_grads.data_ptr()),
HIP_R_16F,
k_seq_len,
k_seq_len*q_seq_len,
static_cast<const void*>(&beta),
static_cast<void*>(k_lin_grads_ptr), // C
HIP_R_16F,
lead_dim_kv,
batch_stride_kv,
attn_batches,
HIP_R_32F,
CUBLAS_GEMM_DEFAULT_TENSOR_OP));
// Input Linear Q Dgrad
TORCH_CUDABLAS_CHECK(hipblasGemmEx(handle,
HIPBLAS_OP_N,
HIPBLAS_OP_N,
embed_dim,
batches_q,
output_lin_q_dim,
static_cast<const void*>(&alpha),
static_cast<const void*>(input_weights_q.data_ptr()),
HIP_R_16F,
embed_dim,
static_cast<const void*>(q_lin_grads_ptr),
HIP_R_16F,
output_lin_q_dim,
static_cast<const void*>(&beta),
static_cast<void*>(input_q_grads.data_ptr()),
HIP_R_16F,
embed_dim,
HIP_R_32F,
//CUBLAS_GEMM_ALGO10_TENSOR_OP));
CUBLAS_GEMM_DEFAULT_TENSOR_OP));
// Input Linear Q Wgrad
// TORCH_CUDABLAS_CHECK(hipblasGemmEx(handle,
// HIPBLAS_OP_N,
// HIPBLAS_OP_T,
// embed_dim,
// output_lin_q_dim,
// batches_q,
// static_cast<const void*>(&alpha),
// static_cast<const void*>(inputs_q.data_ptr()),
// HIP_R_16F,
// embed_dim,
// static_cast<const void*>(q_lin_grads_ptr),
// HIP_R_16F,
// output_lin_q_dim,
// static_cast<const void*>(&beta),
// static_cast<void*>(input_weight_q_grads.data_ptr()),
// HIP_R_16F,
// embed_dim,
// HIP_R_32F,
// CUBLAS_GEMM_DEFAULT_TENSOR_OP));
//
// auto input_bias_q_grads = input_lin_q_output_grads.view({-1, output_lin_q_dim}).sum(0, false);
// Input Linear KV Dgrad
TORCH_CUDABLAS_CHECK(hipblasGemmEx(handle,
HIPBLAS_OP_N,
HIPBLAS_OP_N,
embed_dim,
batches_kv,
output_lin_kv_dim,
static_cast<const void*>(&alpha),
static_cast<const void*>(input_weights_kv.data_ptr()),
HIP_R_16F,
embed_dim,
static_cast<const void*>(k_lin_grads_ptr),
HIP_R_16F,
output_lin_kv_dim,
static_cast<const void*>(&beta),
static_cast<void*>(input_kv_grads.data_ptr()),
HIP_R_16F,
embed_dim,
HIP_R_32F,
//CUBLAS_GEMM_ALGO10_TENSOR_OP));
CUBLAS_GEMM_DEFAULT_TENSOR_OP));
// Input Linear KV Wgrad
// TORCH_CUDABLAS_CHECK(hipblasGemmEx(handle,
// HIPBLAS_OP_N,
// HIPBLAS_OP_T,
// embed_dim,
// output_lin_kv_dim,
// batches_kv,
// static_cast<const void*>(&alpha),
// static_cast<const void*>(inputs_kv.data_ptr()),
// HIP_R_16F,
// embed_dim,
// static_cast<const void*>(k_lin_grads_ptr),
// HIP_R_16F,
// output_lin_kv_dim,
// static_cast<const void*>(&beta),
// static_cast<void*>(input_weight_kv_grads.data_ptr()),
// HIP_R_16F,
// embed_dim,
// HIP_R_32F,
// CUBLAS_GEMM_DEFAULT_TENSOR_OP));
// auto input_bias_kv_grads = input_lin_kv_output_grads.view({-1, output_lin_kv_dim}).sum(0, false);
TORCH_CUDABLAS_CHECK(cublasSetMathMode(handle, CUBLAS_DEFAULT_MATH));
return {
input_q_grads,
input_kv_grads,
};
}
//
//
//std::vector<torch::Tensor> bwd_recompute_cuda(
// int heads,
// torch::Tensor const& output_grads,
// torch::Tensor const& inputs_q,
// torch::Tensor const& inputs_kv,
// torch::Tensor const& input_weights_q,
// torch::Tensor const& input_weights_kv,
// torch::Tensor const& output_weights,
// torch::Tensor const& dropout_mask,
// torch::Tensor const& pad_mask,
// float dropout_prob
// )
//{
// const int embed_dim = inputs_q.size(2);
// const int sequences = inputs_q.size(1);
// const int q_seq_len = inputs_q.size(0);
// const int k_seq_len = inputs_kv.size(0);
// const int batches_q = sequences * q_seq_len;
// const int batches_kv = sequences * k_seq_len;
// const int head_dim = embed_dim / heads;
// const int output_lin_q_dim = embed_dim;
// const int output_lin_kv_dim = 2 * embed_dim;
// const int attn_batches = heads * sequences;
// const int lead_dim_q = attn_batches * head_dim;
// const int lead_dim_kv = attn_batches * 2 *head_dim;
// const int batch_stride_q = head_dim;
// const int batch_stride_kv = 2 * head_dim;
// const int dropout_elems = attn_batches * q_seq_len * k_seq_len;
// const float alpha = 1.0;
// const float beta = 0.0;
// const float scale = 1.0 / sqrt(static_cast<float>(head_dim));
//
// // TODO: Streams can be used in Backprop but I haven't added more than one
// // in my first attempt to create the code
// hipblasHandle_t handle = at::cuda::getCurrentCUDABlasHandle();
// hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA().stream();
// hipblasSetStream(handle, stream);
//
// // Tensor allocations for recomputation
// auto act_options = inputs_q.options().requires_grad(false);
//
// torch::Tensor input_lin_q_results = torch::empty({q_seq_len, sequences, output_lin_q_dim}, act_options);
// torch::Tensor input_lin_kv_results = torch::empty({k_seq_len, sequences, output_lin_kv_dim}, act_options);
// torch::Tensor attn_scores = torch::empty({attn_batches, q_seq_len, k_seq_len}, act_options);
// torch::Tensor softmax_results = torch::empty({attn_batches, q_seq_len, k_seq_len}, act_options);
// torch::Tensor dropout_results = torch::empty({attn_batches, q_seq_len, k_seq_len}, act_options);
// torch::Tensor matmul2_results = torch::empty({q_seq_len, attn_batches, head_dim}, act_options);
//
// void* softmax_results_ptr = static_cast<void*>(softmax_results.data_ptr());
// void* dropout_results_ptr = static_cast<void*>(dropout_results.data_ptr());
// // Softmax Intermediate Result Ptr (used by Matmul1 -> Softmax)
// void* attn_scores_ptr = static_cast<void*>(attn_scores.data_ptr());
//
// // Output Tensor Allocations
// torch::Tensor input_q_grads = torch::empty_like(inputs_q);
// torch::Tensor input_kv_grads = torch::empty_like(inputs_kv);
// torch::Tensor input_weight_q_grads = torch::empty_like(input_weights_q);
// torch::Tensor input_weight_kv_grads = torch::empty_like(input_weights_kv);
// torch::Tensor output_weight_grads = torch::empty_like(output_weights);
// // Intermediate Tensor Allocations
// at::Tensor output_lin_grads = torch::empty_like(matmul2_results);
// at::Tensor matmul2_grads = torch::empty_like(softmax_results);
// at::Tensor input_lin_q_output_grads = torch::empty_like(input_lin_q_results);
// at::Tensor input_lin_kv_output_grads = torch::empty_like(input_lin_kv_results);
//
// auto q_lin_results_ptr = static_cast<half*>(input_lin_q_results.data_ptr());
// auto k_lin_results_ptr = static_cast<half*>(input_lin_kv_results.data_ptr());
// auto v_lin_results_ptr = static_cast<half*>(input_lin_kv_results.data_ptr()) + head_dim;
//
// auto q_lin_grads_ptr = static_cast<half*>(input_lin_q_output_grads.data_ptr());
// auto k_lin_grads_ptr = static_cast<half*>(input_lin_kv_output_grads.data_ptr());
// auto v_lin_grads_ptr = static_cast<half*>(input_lin_kv_output_grads.data_ptr()) + head_dim;
//
// char a_layout_n{'n'};
// char a_layout_t{'t'};
// char b_layout_n{'n'};
// char b_layout_t{'t'};
//
// TORCH_CUDABLAS_CHECK(cublasSetMathMode(handle, CUBLAS_TENSOR_OP_MATH));
//
// // Input Linear Q Fwd
// TORCH_CUDABLAS_CHECK(hipblasGemmEx(handle,
// HIPBLAS_OP_T, // A transpose
// HIPBLAS_OP_N, // B wo/ transpose
// output_lin_q_dim, // embed_dim
// batches_q, // bsz x len_q
// embed_dim, // embed_dim
// static_cast<const void*>(&alpha),
// static_cast<const void*>(input_weights_q.data_ptr()), // weight emb_out x emb_in transposed
// HIP_R_16F,
// embed_dim, // lda so A has size [lda x m] -> [embed_dim x output_lin_q_dim]
// static_cast<const void*>(inputs_q.data_ptr()), // input Q
// HIP_R_16F,
// embed_dim, // ldb B has size [lda xn] -> [embed_dim x batches_q]
// static_cast<const void*>(&beta), // beta
// q_lin_results_ptr, // C -> emb * B
// HIP_R_16F,
// output_lin_q_dim, // ldc C [lda x n] -> [embed_dim x batches_q]
// HIP_R_32F,
// CUBLAS_GEMM_DEFAULT_TENSOR_OP));
//
// // Input Linear KV Fwd
// TORCH_CUDABLAS_CHECK(hipblasGemmEx(handle,
// HIPBLAS_OP_T,
// HIPBLAS_OP_N,
// output_lin_kv_dim,
// batches_kv,
// embed_dim,
// static_cast<const void*>(&alpha),
// static_cast<const void*>(input_weights_kv.data_ptr()),
// HIP_R_16F,
// embed_dim,
// static_cast<const void*>(inputs_kv.data_ptr()),
// HIP_R_16F,
// embed_dim,
// static_cast<const void*>(&beta),
// k_lin_results_ptr,
// HIP_R_16F,
// output_lin_kv_dim,
// HIP_R_32F,
// CUBLAS_GEMM_DEFAULT_TENSOR_OP));
//
// // MatMul1 of Dot-Product Attention Plus scaling by 1/Sqrt(head size)
// gemm_switch_fp32accum( state,
// a_layout_t,
// b_layout_n,
// k_seq_len, // m
// q_seq_len, // n
// head_dim, // k
// scale,
// static_cast<const half*>(k_lin_results_ptr),
// lead_dim_kv, // lda
// batch_stride_kv, //strideA
// static_cast<const half*>(q_lin_results_ptr),
// lead_dim_q, // ldb
// batch_stride_q, //strideB
// beta,
// static_cast<half*>(attn_scores_ptr), // [attn_batches * len_q * len_k]
// k_seq_len, // ldc
// k_seq_len*q_seq_len, // stride c
// attn_batches); // p
//
// // need to call padding from torch interface here.
// attn_scores.view({sequences, heads, q_seq_len, k_seq_len}).masked_fill_(pad_mask,
// -std::numeric_limits<float>::infinity());
//
// attn_scores.view({sequences*heads, q_seq_len, k_seq_len});
// bool softmax_success = false;
//
// // run softmax dropout again but don't change the dropout mask
// softmax_success = dispatch_softmax_dropout_presampled<half, half, float>(
// reinterpret_cast<half*>(dropout_results_ptr),
// reinterpret_cast<half*>(softmax_results_ptr),
// reinterpret_cast<const uint8_t*>(dropout_mask.data_ptr<uint8_t>()),
// reinterpret_cast<const half*>(attn_scores_ptr),
// dropout_elems,
// k_seq_len,
// k_seq_len,
// attn_batches*q_seq_len,
// (1.0f - dropout_prob),
// stream);
//
// assert(softmax_success);
//
// // Matmul2
// // matrix kv has size len_k * batch_size * (2 * heads * head_dim)
// // dropout results [bsz*heads, len_q, len_k]
// // matmul2_results is [len_q x attn_batches x head_dim]
// gemm_switch_fp32accum( state,
// a_layout_n,
// b_layout_n,
// head_dim, // m
// q_seq_len, // n
// k_seq_len, // k
// alpha,
// static_cast<const half*>(v_lin_results_ptr), // A_i [head_dimxk_seq_len]
// lead_dim_kv, // attn_batches * 2 *head_dim
// batch_stride_kv, // stride = 2 * head_dim
// static_cast<const half*>(dropout_results.data_ptr()), // B_i [k_seq_len x q_seq_len]
// k_seq_len, // lead_dim
// k_seq_len*q_seq_len, // stride
// beta,
// static_cast<half*>(matmul2_results.data_ptr()),
// head_dim*attn_batches, // ldc
// head_dim, // stride c
// attn_batches); //p
//
// ////////////////////////////////////////// Recomputation Done /////////////////////////////////////
//
// // Output Linear Dgrad
// // C = alpha * op(A) op(B) + BetaC
// // op(A): mxk, op(B): kxn C: mxn
// TORCH_CUDABLAS_CHECK(hipblasGemmEx(handle, //
// HIPBLAS_OP_N, // no transpose
// HIPBLAS_OP_N, // no transpose
// embed_dim, // m
// batches_q, // n = bsz * len_q
// embed_dim, // k
// static_cast<const void*>(&alpha), // alpha = 1.0
// static_cast<const void*>(output_weights.data_ptr()), // A mxk
// HIP_R_16F, // data type
// embed_dim, // leading dimension of A (embed dim) (the rows)
// static_cast<const void*>(output_grads.data_ptr()), // B kxn
// HIP_R_16F, // data type
// embed_dim, // leading dimension of B (embed dim)
// static_cast<const void*>(&beta), // beta
// static_cast<void*>(output_lin_grads.data_ptr()), // C mxn
// HIP_R_16F, // data type
// embed_dim, // ldc
// HIP_R_32F, // compute type
// CUBLAS_GEMM_DEFAULT_TENSOR_OP));
//
// // Output Linear Wgrad
// TORCH_CUDABLAS_CHECK(hipblasGemmEx(handle,
// HIPBLAS_OP_N,
// HIPBLAS_OP_T,
// embed_dim,
// embed_dim,
// batches_q,
// static_cast<const void*>(&alpha),
// static_cast<const void*>(matmul2_results.data_ptr()),
// HIP_R_16F,
// embed_dim,
// static_cast<const void*>(output_grads.data_ptr()),
// HIP_R_16F,
// embed_dim,
// static_cast<const void*>(&beta),
// static_cast<void*>(output_weight_grads.data_ptr()),
// HIP_R_16F,
// embed_dim,
// HIP_R_32F,
// CUBLAS_GEMM_DEFAULT_TENSOR_OP));
//
// // MatMul2 Dgrad1
// gemm_switch_fp32accum( state,
// a_layout_t,
// b_layout_n,
// k_seq_len,
// q_seq_len,
// head_dim,
// alpha,
// static_cast<const half*>(v_lin_results_ptr),
// lead_dim_kv,
// batch_stride_kv, // 2 * head_dim
// static_cast<const half*>(output_lin_grads.data_ptr()),
// head_dim*attn_batches,
// head_dim,
// beta,
// static_cast<half*>(matmul2_grads.data_ptr()),
// k_seq_len,
// k_seq_len*q_seq_len,
// attn_batches);
//
// // Matmul2 Dgrad2
// gemm_switch_fp32accum( state,
// a_layout_n,
// b_layout_t,
// head_dim,
// k_seq_len,
// q_seq_len,
// alpha,
// static_cast<const half*>(output_lin_grads.data_ptr()),
// head_dim*attn_batches,
// head_dim,
// static_cast<const half*>(dropout_results.data_ptr()),
// k_seq_len,
// k_seq_len*q_seq_len,
// beta,
// v_lin_grads_ptr,
// lead_dim_kv,
// batch_stride_kv,
// attn_batches);
//
// dispatch_masked_scale_softmax_backward_recompute<half, half, float, false>(
// static_cast<half*>(matmul2_grads.data_ptr()),
// static_cast<half* const>(matmul2_grads.data_ptr()),
// reinterpret_cast<half const*>(softmax_results.data_ptr()),
// static_cast<uint8_t const*>(dropout_mask.data_ptr()),
// 1.0/(1.0-dropout_prob),
// k_seq_len,
// k_seq_len,
// attn_batches*q_seq_len,
// stream);
//
// // Matmul1 Dgrad1
// gemm_switch_fp32accum( state,
// a_layout_n,
// b_layout_n,
// head_dim,
// q_seq_len,
// k_seq_len,
// scale,
// k_lin_results_ptr,
// lead_dim_kv,
// batch_stride_kv,
// static_cast<half*>(matmul2_grads.data_ptr()),
// k_seq_len,
// k_seq_len*q_seq_len,
// beta,
// q_lin_grads_ptr,
// lead_dim_q,
// batch_stride_q,
// attn_batches);
//
// // Matmul1 Dgrad2
// gemm_switch_fp32accum( state,
// a_layout_n,
// b_layout_t,
// head_dim,
// k_seq_len,
// q_seq_len,
// scale,
// q_lin_results_ptr,
// lead_dim_q,
// batch_stride_q,
// static_cast<half*>(matmul2_grads.data_ptr()),
// k_seq_len,
// k_seq_len*q_seq_len,
// beta,
// k_lin_grads_ptr,
// lead_dim_kv,
// batch_stride_kv,
// attn_batches);
//
// // Input Linear Q Dgrad
// TORCH_CUDABLAS_CHECK(hipblasGemmEx(handle,
// HIPBLAS_OP_N,
// HIPBLAS_OP_N,
// embed_dim,
// batches_q,
// output_lin_q_dim,
// static_cast<const void*>(&alpha),
// static_cast<const void*>(input_weights_q.data_ptr()),
// HIP_R_16F,
// embed_dim,
// static_cast<const void*>(q_lin_grads_ptr),
// HIP_R_16F,
// output_lin_q_dim,
// static_cast<const void*>(&beta),
// static_cast<void*>(input_q_grads.data_ptr()),
// HIP_R_16F,
// embed_dim,
// HIP_R_32F,
// //CUBLAS_GEMM_ALGO10_TENSOR_OP));
// CUBLAS_GEMM_DEFAULT_TENSOR_OP));
//
// // Input Linear Q Wgrad
// TORCH_CUDABLAS_CHECK(hipblasGemmEx(handle,
// HIPBLAS_OP_N,
// HIPBLAS_OP_T,
// embed_dim,
// output_lin_q_dim,
// batches_q,
// static_cast<const void*>(&alpha),
// static_cast<const void*>(inputs_q.data_ptr()),
// HIP_R_16F,
// embed_dim,
// static_cast<const void*>(q_lin_grads_ptr),
// HIP_R_16F,
// output_lin_q_dim,
// static_cast<const void*>(&beta),
// static_cast<void*>(input_weight_q_grads.data_ptr()),
// HIP_R_16F,
// embed_dim,
// HIP_R_32F,
// CUBLAS_GEMM_DEFAULT_TENSOR_OP));
//
// // Input Linear KV Dgrad
// TORCH_CUDABLAS_CHECK(hipblasGemmEx(handle,
// HIPBLAS_OP_N,
// HIPBLAS_OP_N,
// embed_dim,
// batches_kv,
// output_lin_kv_dim,
// static_cast<const void*>(&alpha),
// static_cast<const void*>(input_weights_kv.data_ptr()),
// HIP_R_16F,
// embed_dim,
// static_cast<const void*>(k_lin_grads_ptr),
// HIP_R_16F,
// output_lin_kv_dim,
// static_cast<const void*>(&beta),
// static_cast<void*>(input_kv_grads.data_ptr()),
// HIP_R_16F,
// embed_dim,
// HIP_R_32F,
// //CUBLAS_GEMM_ALGO10_TENSOR_OP));
// CUBLAS_GEMM_DEFAULT_TENSOR_OP));
//
// // Input Linear KV Wgrad
// TORCH_CUDABLAS_CHECK(hipblasGemmEx(handle,
// HIPBLAS_OP_N,
// HIPBLAS_OP_T,
// embed_dim,
// output_lin_kv_dim,
// batches_kv,
// static_cast<const void*>(&alpha),
// static_cast<const void*>(inputs_kv.data_ptr()),
// HIP_R_16F,
// embed_dim,
// static_cast<const void*>(k_lin_grads_ptr),
// HIP_R_16F,
// output_lin_kv_dim,
// static_cast<const void*>(&beta),
// static_cast<void*>(input_weight_kv_grads.data_ptr()),
// HIP_R_16F,
// embed_dim,
// HIP_R_32F,
// CUBLAS_GEMM_DEFAULT_TENSOR_OP));
// TORCH_CUDABLAS_CHECK(cublasSetMathMode(handle, CUBLAS_DEFAULT_MATH));
//
// return {
// input_q_grads,
// input_kv_grads,
// input_weight_q_grads,
// input_weight_kv_grads,
// output_weight_grads
// };
//}
} // end namespace cublas_gemmex
} // end namespace encdec
} // end namespace multihead_attn
|
8ffbe6c8958902c56e5409ec63f77e12fed33d5d.cu
|
#include <vector>
#include <math.h>
#include <iostream>
#include <cuda.h>
#include <cuda_runtime.h>
#include <cuda_fp16.h>
#include <cuda_profiler_api.h>
#include <ATen/ATen.h>
#include <ATen/cuda/CUDAContext.h>
#include <torch/extension.h>
#include "softmax_apex.h"
#include "gemm_blaslt.cuh"
#include "strided_batched_gemm_blaslt.cuh"
// symbol to be automatically resolved by PyTorch libs
// extern THCState *state;
namespace multihead_attn {
namespace encdec_bias {
namespace cublaslt {
std::vector<torch::Tensor> fwd_cuda(
bool is_training,
int heads,
torch::Tensor const& inputs_q,
torch::Tensor const& inputs_kv,
torch::Tensor const& input_weights_q,
torch::Tensor const& input_weights_kv,
torch::Tensor const& output_weights,
torch::Tensor const& input_biases_q,
torch::Tensor const& input_biases_kv,
torch::Tensor const& output_biases,
torch::Tensor const& pad_mask,
float dropout_prob,
torch::Tensor lt_workspace )
{
const int embed_dim = inputs_q.size(2);
const int sequences = inputs_q.size(1);
const int q_seq_len = inputs_q.size(0);
const int k_seq_len = inputs_kv.size(0);
const int batches_q = sequences * q_seq_len;
const int batches_kv = sequences * k_seq_len;
const int head_dim = embed_dim / heads;
const int output_lin_q_dim = embed_dim;
const int output_lin_kv_dim = 2 * embed_dim;
const int attn_batches = heads * sequences;
const int lead_dim_q = attn_batches * head_dim;
const int lead_dim_kv = attn_batches * 2 *head_dim;
const int batch_stride_q = head_dim;
const int batch_stride_kv = 2 * head_dim;
const int dropout_elems = attn_batches * q_seq_len * k_seq_len;
const float alpha = 1.0;
const float beta_zero = 0.0;
// const float beta_one = 1.0;
const float scale = 1.0 / sqrt(static_cast<float>(head_dim));
// const half alpha = __float2half_rn(alpha);
// const half beta_zero = __float2half_rn(beta_zero);
// const half beta_one = __float2half_rn(beta_one);
// const half scale = __float2half_rn(scale);
// printf("Input kernel sizes: %d %d %d \n",
// inputs_kv.size(0), inputs_kv.size(1), inputs_kv.size(2));
// There is no reason to use more than one stream as every kernel is
// sequentially dependent
cublasHandle_t handle = at::cuda::getCurrentCUDABlasHandle();
cudaStream_t stream = at::cuda::getCurrentCUDAStream().stream();
cublasSetStream(handle, stream);
// 3 Intermediate Results + Output (Note: dropout intermediates are generated by ATen library code)
auto act_options = inputs_q.options().requires_grad(false);
auto mask_options = act_options.dtype(torch::kUInt8);
torch::Tensor input_lin_q_results = torch::empty({q_seq_len, sequences, output_lin_q_dim}, act_options);
torch::Tensor input_lin_kv_results = torch::empty({k_seq_len, sequences, output_lin_kv_dim}, act_options);
torch::Tensor attn_scores = torch::empty({attn_batches, q_seq_len, k_seq_len}, act_options);
torch::Tensor dropout_results = torch::empty({attn_batches, q_seq_len, k_seq_len}, act_options);
torch::Tensor dropout_mask = torch::empty({attn_batches, q_seq_len, k_seq_len}, mask_options);
torch::Tensor matmul2_results = torch::empty({q_seq_len, attn_batches, head_dim}, act_options);
torch::Tensor outputs = torch::empty_like(inputs_q, act_options);
// Input Linear Results Pointers to Q, K, and V of interviewed activations
void* q_lin_results_ptr = static_cast<void*>(input_lin_q_results.data_ptr());
void* k_lin_results_ptr = static_cast<void*>(input_lin_kv_results.data_ptr());
void* v_lin_results_ptr = static_cast<void*>(static_cast<half*>(input_lin_kv_results.data_ptr()) + head_dim);
void* dropout_results_ptr = static_cast<void*>(dropout_results.data_ptr());
// Softmax Intermediate Result Ptr (used by Matmul1 -> Softmax)
void* attn_scores_ptr = static_cast<void*>(attn_scores.data_ptr());
void* lt_workspace_ptr = static_cast<void*>(lt_workspace.data_ptr());
// TORCH_CUDABLAS_CHECK(cublasSetMathMode(handle, CUBLAS_TENSOR_OP_MATH));
//
// input_lin_q_results.copy_(input_biases_q);
// // Input Linear Q Fwd
// TORCH_CUDABLAS_CHECK(cublasGemmEx(handle,
// CUBLAS_OP_T, // A transpose
// CUBLAS_OP_N, // B wo/ transpose
// output_lin_q_dim, // embed_dim
// batches_q, // bsz x len_q
// embed_dim, // embed_dim
// static_cast<const void*>(&alpha),
// static_cast<const void*>(input_weights_q.data_ptr()), // weight emb_out x emb_in transposed
// CUDA_R_16F,
// embed_dim, // lda so A has size [lda x m] -> [embed_dim x output_lin_q_dim]
// static_cast<const void*>(inputs_q.data_ptr()), // input Q
// CUDA_R_16F,
// embed_dim, // ldb B has size [lda xn] -> [embed_dim x batches_q]
// static_cast<const void*>(&beta_one), // one
// q_lin_results_ptr, // C -> emb * B
// CUDA_R_16F,
// output_lin_q_dim, // ldc C [lda x n] -> [embed_dim x batches_q]
// CUDA_R_32F,
// CUBLAS_GEMM_DEFAULT_TENSOR_OP));
int cublas_status = 1;
cublas_status = gemm_bias_lt(
(cublasLtHandle_t)handle,
CUBLAS_OP_T,
CUBLAS_OP_N,
output_lin_q_dim,
batches_q,
embed_dim,
&alpha, /* host pointer */
static_cast<const void*>(input_weights_q.data_ptr()),
embed_dim,
static_cast<const void*>(inputs_q.data_ptr()),
embed_dim,
&beta_zero, /* host pointer */
q_lin_results_ptr,
output_lin_q_dim,
lt_workspace_ptr, // TODO: get lt_workspace
1 << 22,
stream,
true,
static_cast<const void*>(input_biases_q.data_ptr()));
if (cublas_status != CUBLAS_STATUS_SUCCESS) {
printf("GEMM Q forward failed with %d\n", cublas_status);
exit(0);
}
// input_lin_kv_results.copy_(input_biases_kv);
// // Input Linear KV Fwd
// TORCH_CUDABLAS_CHECK(cublasGemmEx(handle,
// CUBLAS_OP_T,
// CUBLAS_OP_N,
// output_lin_kv_dim,
// batches_kv,
// embed_dim,
// static_cast<const void*>(&alpha),
// static_cast<const void*>(input_weights_kv.data_ptr()),
// CUDA_R_16F,
// embed_dim,
// static_cast<const void*>(inputs_kv.data_ptr()),
// CUDA_R_16F,
// embed_dim,
// static_cast<const void*>(&beta_one),
// k_lin_results_ptr,
// CUDA_R_16F,
// output_lin_kv_dim,
// CUDA_R_32F,
// CUBLAS_GEMM_DEFAULT_TENSOR_OP));
cublas_status = gemm_bias_lt(
(cublasLtHandle_t)handle,
CUBLAS_OP_T,
CUBLAS_OP_N,
output_lin_kv_dim,
batches_kv,
embed_dim,
&alpha, /* host pointer */
static_cast<const void*>(input_weights_kv.data_ptr()),
embed_dim,
static_cast<const void*>(inputs_kv.data_ptr()),
embed_dim,
&beta_zero, /* host pointer */
k_lin_results_ptr,
output_lin_kv_dim,
lt_workspace_ptr, // TODO: get lt_workspace
1 << 22,
stream,
true,
static_cast<const void*>(input_biases_kv.data_ptr()));
if (cublas_status != CUBLAS_STATUS_SUCCESS) {
printf("GEMM KV forward failed with %d\n", cublas_status);
exit(0);
}
// MatMul1 of Dot-Product Attention Plus scaling by 1/Sqrt(head size)
// TORCH_CUDABLAS_CHECK(cublasGemmStridedBatchedEx(handle,
// CUBLAS_OP_T,
// CUBLAS_OP_N,
// k_seq_len,
// q_seq_len,
// head_dim,
// static_cast<const void*>(&scale),
// static_cast<const void*>(k_lin_results_ptr), // A:
// CUDA_R_16F,
// lead_dim_kv, // lda
// batch_stride_kv, // stride A
// static_cast<const void*>(q_lin_results_ptr),
// CUDA_R_16F,
// lead_dim_q,
// batch_stride_q,
// static_cast<const void*>(&beta_zero),
// static_cast<void*>(attn_scores_ptr), // C
// CUDA_R_16F,
// k_seq_len,
// k_seq_len*q_seq_len,
// attn_batches,
// CUDA_R_32F,
// CUBLAS_GEMM_DEFAULT_TENSOR_OP));
cublas_status = strided_batched_gemm_lt(
(cublasLtHandle_t)handle,
CUBLAS_OP_T,
CUBLAS_OP_N,
k_seq_len,
q_seq_len,
head_dim,
&scale, /* host pointer */
static_cast<const void*>(k_lin_results_ptr),
lead_dim_kv,
batch_stride_kv,
static_cast<const void*>(q_lin_results_ptr),
lead_dim_q,
batch_stride_q,
&beta_zero, /* host pointer */
static_cast<void*>(attn_scores_ptr), // C
k_seq_len, // ldc
k_seq_len*q_seq_len, // stride c
attn_batches, // batch = heads * bsz
lt_workspace_ptr,
1 << 22,
stream);
if (cublas_status != CUBLAS_STATUS_SUCCESS) {
printf("Strided Batched GEMM QKV forward failed with %d\n", cublas_status);
exit(0);
}
// need to call padding from torch interface here.
// - infinity or - 10000?
attn_scores.view({sequences, heads, q_seq_len, k_seq_len}).masked_fill_(pad_mask,
-std::numeric_limits<float>::infinity());
bool softmax_success = false;
if (is_training && dropout_prob > 0.0f) {
// This function fuses softmax-dropout-pad (and dropout inplace)
softmax_success = dispatch_softmax_dropout<half, half, float>(
reinterpret_cast<half*>(dropout_results_ptr),
(is_training) ? reinterpret_cast<uint8_t*>(dropout_mask.data_ptr<uint8_t>()) : nullptr,
reinterpret_cast<const half*>(attn_scores_ptr),
dropout_elems,
k_seq_len,
k_seq_len,
attn_batches*q_seq_len,
1.0f-dropout_prob,
stream);
} else {
softmax_success = dispatch_softmax<half, half, float>(
reinterpret_cast<half*>(dropout_results_ptr), // this is actually softmax results, but making it consistent for the next function
reinterpret_cast<const half*>(attn_scores_ptr),
k_seq_len,
k_seq_len,
attn_batches*q_seq_len,
stream); // pad batch strides
}
assert(softmax_success);
// Matmul2
// matrix kv has size len_k * batch_size * (2 * heads * head_dim)
// dropout results [bsz*heads, len_q, len_k]
// matmul2_results is [len_q x attn_batches x head_dim]
// TORCH_CUDABLAS_CHECK(cublasGemmStridedBatchedEx(handle,
// CUBLAS_OP_N,
// CUBLAS_OP_N,
// head_dim, // m
// q_seq_len, // n
// k_seq_len, // k
// static_cast<const void*>(&alpha),
// static_cast<const void*>(v_lin_results_ptr), // A:
// CUDA_R_16F,
// lead_dim_kv, // lda
// batch_stride_kv, // stride A
// static_cast<const void*>(dropout_results.data_ptr()),
// CUDA_R_16F,
// k_seq_len,
// k_seq_len*q_seq_len,
// static_cast<const void*>(&beta_zero),
// static_cast<void*>(matmul2_results.data_ptr()), // C
// CUDA_R_16F,
// head_dim*attn_batches,
// head_dim,
// attn_batches,
// CUDA_R_32F,
// CUBLAS_GEMM_DEFAULT_TENSOR_OP));
cublas_status = strided_batched_gemm_lt(
(cublasLtHandle_t)handle,
CUBLAS_OP_N,
CUBLAS_OP_N,
head_dim, // m
q_seq_len, // n
k_seq_len, // k
&alpha, /* host pointer */
static_cast<const void*>(v_lin_results_ptr), // A:
lead_dim_kv,
batch_stride_kv,
static_cast<const void*>(dropout_results.data_ptr()), // A:
k_seq_len,
k_seq_len*q_seq_len,
&beta_zero, /* host pointer */
static_cast<void*>(matmul2_results.data_ptr()), // C
head_dim*attn_batches,
head_dim,
attn_batches, // batch = heads * bsz
lt_workspace_ptr,
1 << 22,
stream);
if (cublas_status != CUBLAS_STATUS_SUCCESS) {
printf("Strided Batched GEMM ATTNV forward failed with %d\n", cublas_status);
exit(0);
}
// outputs.copy_(output_biases);
// // Output Linear
// TORCH_CUDABLAS_CHECK(cublasGemmEx(handle,
// CUBLAS_OP_T,
// CUBLAS_OP_N,
// embed_dim,
// batches_q,
// embed_dim,
// static_cast<const void*>(&alpha),
// static_cast<const void*>(output_weights.data_ptr()),
// CUDA_R_16F,
// embed_dim,
// static_cast<const void*>(matmul2_results.data_ptr()),
// CUDA_R_16F,
// embed_dim,
// static_cast<const void*>(&beta_one),
// static_cast<void*>(outputs.data_ptr()),
// CUDA_R_16F,
// embed_dim,
// CUDA_R_32F,
// //CUBLAS_GEMM_ALGO1_TENSOR_OP));
// CUBLAS_GEMM_DEFAULT_TENSOR_OP));
cublas_status = gemm_bias_lt(
(cublasLtHandle_t)handle,
CUBLAS_OP_T,
CUBLAS_OP_N,
embed_dim,
batches_q,
embed_dim,
&alpha, /* host pointer */
static_cast<const void*>(output_weights.data_ptr()),
embed_dim,
static_cast<const void*>(matmul2_results.data_ptr()),
embed_dim,
&beta_zero, /* host pointer */
static_cast<void*>(outputs.data_ptr()),
embed_dim,
lt_workspace_ptr, // TODO: get lt_workspace
1 << 22,
stream,
true,
static_cast<const void*>(output_biases.data_ptr()));
if (cublas_status != CUBLAS_STATUS_SUCCESS) {
printf("GEMM output forward failed with %d\n", cublas_status);
exit(0);
}
TORCH_CUDABLAS_CHECK(cublasSetMathMode(handle, CUBLAS_DEFAULT_MATH));
return {
input_lin_q_results,
input_lin_kv_results,
attn_scores,
dropout_results,
dropout_mask,
matmul2_results,
outputs
};
}
std::vector<torch::Tensor> fwd_compact_cuda(
bool is_training,
int heads,
torch::Tensor const& input_lin_q_results,
torch::Tensor const& input_lin_kv_results,
torch::Tensor const& pad_mask,
float dropout_prob,
torch::Tensor lt_workspace )
{
const int embed_dim = input_lin_q_results.size(2);
const int sequences = input_lin_q_results.size(1);
const int q_seq_len = input_lin_q_results.size(0);
const int k_seq_len = input_lin_kv_results.size(0);
const int batches_q = sequences * q_seq_len;
const int batches_kv = sequences * k_seq_len;
const int head_dim = embed_dim / heads;
const int output_lin_q_dim = embed_dim;
const int output_lin_kv_dim = 2 * embed_dim;
const int attn_batches = heads * sequences;
const int lead_dim_q = attn_batches * head_dim;
const int lead_dim_kv = attn_batches * 2 *head_dim;
const int batch_stride_q = head_dim;
const int batch_stride_kv = 2 * head_dim;
const int dropout_elems = attn_batches * q_seq_len * k_seq_len;
const float alpha = 1.0;
const float beta_zero = 0.0;
const float scale = 1.0 / sqrt(static_cast<float>(head_dim));
// There is no reason to use more than one stream as every kernel is
// sequentially dependent
cublasHandle_t handle = at::cuda::getCurrentCUDABlasHandle();
cudaStream_t stream = at::cuda::getCurrentCUDAStream().stream();
cublasSetStream(handle, stream);
// 3 Intermediate Results + Output (Note: dropout intermediates are generated by ATen library code)
auto act_options = input_lin_q_results.options().requires_grad(false);
auto mask_options = act_options.dtype(torch::kUInt8);
torch::Tensor attn_scores = torch::empty({attn_batches, q_seq_len, k_seq_len}, act_options);
torch::Tensor dropout_results = torch::empty({attn_batches, q_seq_len, k_seq_len}, act_options);
torch::Tensor dropout_mask = torch::empty({attn_batches, q_seq_len, k_seq_len}, mask_options);
torch::Tensor matmul2_results = torch::empty({q_seq_len, attn_batches, head_dim}, act_options);
// Input Linear Results Pointers to Q, K, and V of interviewed activations
void* q_lin_results_ptr = static_cast<void*>(input_lin_q_results.data_ptr());
void* k_lin_results_ptr = static_cast<void*>(input_lin_kv_results.data_ptr());
void* v_lin_results_ptr = static_cast<void*>(static_cast<half*>(input_lin_kv_results.data_ptr()) + head_dim);
void* dropout_results_ptr = static_cast<void*>(dropout_results.data_ptr());
// Softmax Intermediate Result Ptr (used by Matmul1 -> Softmax)
void* attn_scores_ptr = static_cast<void*>(attn_scores.data_ptr());
void* lt_workspace_ptr = static_cast<void*>(lt_workspace.data_ptr());
int cublas_status = 1;
// MatMul1 of Dot-Product Attention Plus scaling by 1/Sqrt(head size)
cublas_status = strided_batched_gemm_lt(
(cublasLtHandle_t)handle,
CUBLAS_OP_T,
CUBLAS_OP_N,
k_seq_len,
q_seq_len,
head_dim,
&scale, /* host pointer */
static_cast<const void*>(k_lin_results_ptr),
lead_dim_kv,
batch_stride_kv,
static_cast<const void*>(q_lin_results_ptr),
lead_dim_q,
batch_stride_q,
&beta_zero, /* host pointer */
static_cast<void*>(attn_scores_ptr), // C
k_seq_len, // ldc
k_seq_len*q_seq_len, // stride c
attn_batches, // batch = heads * bsz
lt_workspace_ptr,
1 << 22,
stream);
if (cublas_status != CUBLAS_STATUS_SUCCESS) {
printf("Strided Batched GEMM QKV forward failed with %d\n", cublas_status);
exit(0);
}
// need to call padding from torch interface here.
// - infinity or - 10000?
attn_scores.view({sequences, heads, q_seq_len, k_seq_len}).masked_fill_(pad_mask,
-std::numeric_limits<float>::infinity());
bool softmax_success = false;
if (is_training && dropout_prob > 0.0f) {
// This function fuses softmax-dropout-pad (and dropout inplace)
softmax_success = dispatch_softmax_dropout<half, half, float>(
reinterpret_cast<half*>(dropout_results_ptr),
(is_training) ? reinterpret_cast<uint8_t*>(dropout_mask.data_ptr<uint8_t>()) : nullptr,
reinterpret_cast<const half*>(attn_scores_ptr),
dropout_elems,
k_seq_len,
k_seq_len,
attn_batches*q_seq_len,
1.0f-dropout_prob,
stream);
} else {
softmax_success = dispatch_softmax<half, half, float>(
reinterpret_cast<half*>(dropout_results_ptr), // this is actually softmax results, but making it consistent for the next function
reinterpret_cast<const half*>(attn_scores_ptr),
k_seq_len,
k_seq_len,
attn_batches*q_seq_len,
stream); // pad batch strides
}
assert(softmax_success);
// Matmul2
// matrix kv has size len_k * batch_size * (2 * heads * head_dim)
// dropout results [bsz*heads, len_q, len_k]
// matmul2_results is [len_q x attn_batches x head_dim]
cublas_status = strided_batched_gemm_lt(
(cublasLtHandle_t)handle,
CUBLAS_OP_N,
CUBLAS_OP_N,
head_dim, // m
q_seq_len, // n
k_seq_len, // k
&alpha, /* host pointer */
static_cast<const void*>(v_lin_results_ptr), // A:
lead_dim_kv,
batch_stride_kv,
static_cast<const void*>(dropout_results.data_ptr()), // A:
k_seq_len,
k_seq_len*q_seq_len,
&beta_zero, /* host pointer */
static_cast<void*>(matmul2_results.data_ptr()), // C
head_dim*attn_batches,
head_dim,
attn_batches, // batch = heads * bsz
lt_workspace_ptr,
1 << 22,
stream);
if (cublas_status != CUBLAS_STATUS_SUCCESS) {
printf("Strided Batched GEMM ATTNV forward failed with %d\n", cublas_status);
exit(0);
}
TORCH_CUDABLAS_CHECK(cublasSetMathMode(handle, CUBLAS_DEFAULT_MATH));
return {
attn_scores,
dropout_results,
dropout_mask,
matmul2_results,
};
}
std::vector<torch::Tensor> bwd_cuda_recompute(
int heads,
torch::Tensor const& output_grads,
torch::Tensor const& inputs_q,
torch::Tensor const& inputs_kv,
torch::Tensor const& input_weights_q,
torch::Tensor const& input_weights_kv,
torch::Tensor const& input_biases_q,
torch::Tensor const& input_biases_kv,
torch::Tensor const& output_weights,
torch::Tensor const& dropout_mask,
torch::Tensor const& pad_mask,
float dropout_prob,
torch::Tensor lt_workspace
)
{
const int embed_dim = inputs_q.size(2);
const int sequences = inputs_q.size(1);
const int q_seq_len = inputs_q.size(0);
const int k_seq_len = inputs_kv.size(0);
const int batches_q = sequences * q_seq_len;
const int batches_kv = sequences * k_seq_len;
const int head_dim = embed_dim / heads;
const int output_lin_q_dim = embed_dim;
const int output_lin_kv_dim = 2 * embed_dim;
const int attn_batches = heads * sequences;
const int lead_dim_q = attn_batches * head_dim;
const int lead_dim_kv = attn_batches * 2 *head_dim;
const int batch_stride_q = head_dim;
const int batch_stride_kv = 2 * head_dim;
const int dropout_elems = attn_batches * q_seq_len * k_seq_len;
const float alpha = 1.0;
const float beta = 0.0;
const float beta_zero = 0.0;
const float scale = 1.0 / sqrt(static_cast<float>(head_dim));
// in my first attempt to create the code
cublasHandle_t handle = at::cuda::getCurrentCUDABlasHandle();
cudaStream_t stream = at::cuda::getCurrentCUDAStream().stream();
cublasSetStream(handle, stream);
// TODO: recompute the output
auto act_options = inputs_q.options().requires_grad(false);
torch::Tensor input_lin_q_results = torch::empty({q_seq_len, sequences, output_lin_q_dim}, act_options);
torch::Tensor input_lin_kv_results = torch::empty({k_seq_len, sequences, output_lin_kv_dim}, act_options);
torch::Tensor attn_scores = torch::empty({attn_batches, q_seq_len, k_seq_len}, act_options);
torch::Tensor dropout_results = torch::empty({attn_batches, q_seq_len, k_seq_len}, act_options);
torch::Tensor matmul2_results = torch::empty({q_seq_len, attn_batches, head_dim}, act_options);
// Input Linear Results Pointers to Q, K, and V of interviewed activations
void* q_lin_results_ptr = static_cast<void*>(input_lin_q_results.data_ptr());
void* k_lin_results_ptr = static_cast<void*>(input_lin_kv_results.data_ptr());
void* v_lin_results_ptr = static_cast<void*>(static_cast<half*>(input_lin_kv_results.data_ptr()) + head_dim);
void* dropout_results_ptr = static_cast<void*>(dropout_results.data_ptr());
// Softmax Intermediate Result Ptr (used by Matmul1 -> Softmax)
void* attn_scores_ptr = static_cast<void*>(attn_scores.data_ptr());
void* lt_workspace_ptr = static_cast<void*>(lt_workspace.data_ptr());
int cublas_status = 1;
cublas_status = gemm_bias_lt(
(cublasLtHandle_t)handle,
CUBLAS_OP_T,
CUBLAS_OP_N,
output_lin_q_dim,
batches_q,
embed_dim,
&alpha, /* host pointer */
static_cast<const void*>(input_weights_q.data_ptr()),
embed_dim,
static_cast<const void*>(inputs_q.data_ptr()),
embed_dim,
&beta_zero, /* host pointer */
q_lin_results_ptr,
output_lin_q_dim,
lt_workspace_ptr, // TODO: get lt_workspace
1 << 22,
stream,
true,
static_cast<const void*>(input_biases_q.data_ptr()));
if (cublas_status != CUBLAS_STATUS_SUCCESS) {
printf("GEMM Q Recompute forward failed with %d\n", cublas_status);
exit(0);
}
cublas_status = gemm_bias_lt(
(cublasLtHandle_t)handle,
CUBLAS_OP_T,
CUBLAS_OP_N,
output_lin_kv_dim,
batches_kv,
embed_dim,
&alpha, /* host pointer */
static_cast<const void*>(input_weights_kv.data_ptr()),
embed_dim,
static_cast<const void*>(inputs_kv.data_ptr()),
embed_dim,
&beta_zero, /* host pointer */
k_lin_results_ptr,
output_lin_kv_dim,
lt_workspace_ptr, // TODO: get lt_workspace
1 << 22,
stream,
true,
static_cast<const void*>(input_biases_kv.data_ptr()));
if (cublas_status != CUBLAS_STATUS_SUCCESS) {
printf("GEMM KV forward failed with %d\n", cublas_status);
exit(0);
}
cublas_status = strided_batched_gemm_lt(
(cublasLtHandle_t)handle,
CUBLAS_OP_T,
CUBLAS_OP_N,
k_seq_len,
q_seq_len,
head_dim,
&scale, /* host pointer */
static_cast<const void*>(k_lin_results_ptr),
lead_dim_kv,
batch_stride_kv,
static_cast<const void*>(q_lin_results_ptr),
lead_dim_q,
batch_stride_q,
&beta_zero, /* host pointer */
static_cast<void*>(attn_scores_ptr), // C
k_seq_len, // ldc
k_seq_len*q_seq_len, // stride c
attn_batches, // batch = heads * bsz
lt_workspace_ptr,
1 << 22,
stream);
if (cublas_status != CUBLAS_STATUS_SUCCESS) {
printf("Strided Batched GEMM Recompute QKV forward failed with %d\n", cublas_status);
exit(0);
}
attn_scores.view({sequences, heads, q_seq_len, k_seq_len}).masked_fill_(pad_mask,
-std::numeric_limits<float>::infinity());
bool softmax_forward_success = false;
if (dropout_prob > 0.0f) {
// This function fuses softmax-dropout-pad (and dropout inplace)
softmax_forward_success = dispatch_softmax_dropout_presampled<half, half, float>(
reinterpret_cast<half*>(dropout_results_ptr),
reinterpret_cast<const uint8_t*>(dropout_mask.data_ptr<uint8_t>()),
reinterpret_cast<const half*>(attn_scores_ptr),
dropout_elems,
k_seq_len,
k_seq_len,
attn_batches*q_seq_len,
1.0f-dropout_prob,
stream);
} else {
softmax_forward_success = dispatch_softmax<half, half, float>(
reinterpret_cast<half*>(dropout_results_ptr), // this is actually softmax results, but making it consistent for the next function
reinterpret_cast<const half*>(attn_scores_ptr),
k_seq_len,
k_seq_len,
attn_batches*q_seq_len,
stream); // pad batch strides
}
assert(softmax_forward_success);
cublas_status = strided_batched_gemm_lt(
(cublasLtHandle_t)handle,
CUBLAS_OP_N,
CUBLAS_OP_N,
head_dim, // m
q_seq_len, // n
k_seq_len, // k
&alpha, /* host pointer */
static_cast<const void*>(v_lin_results_ptr), // A:
lead_dim_kv,
batch_stride_kv,
static_cast<const void*>(dropout_results.data_ptr()), // A:
k_seq_len,
k_seq_len*q_seq_len,
&beta_zero, /* host pointer */
static_cast<void*>(matmul2_results.data_ptr()), // C
head_dim*attn_batches,
head_dim,
attn_batches, // batch = heads * bsz
lt_workspace_ptr,
1 << 22,
stream);
if (cublas_status != CUBLAS_STATUS_SUCCESS) {
printf("Strided Batched GEMM Recompute ATTNV forward failed with %d\n", cublas_status);
exit(0);
}
// Don't need to compute the output again :)
// Recompute finished - now compute gradients
// Output Tensor Allocations
torch::Tensor input_q_grads = torch::empty_like(inputs_q);
torch::Tensor input_kv_grads = torch::empty_like(inputs_kv);
torch::Tensor input_weight_q_grads = torch::empty_like(input_weights_q);
torch::Tensor input_weight_kv_grads = torch::empty_like(input_weights_kv);
torch::Tensor output_weight_grads = torch::empty_like(output_weights);
at::Tensor input_bias_q_grads = torch::empty({output_lin_q_dim}, inputs_q.type());
at::Tensor input_bias_kv_grads = torch::empty({output_lin_kv_dim}, inputs_kv.type());
at::Tensor output_biases_grads = torch::empty({embed_dim}, inputs_q.type());
// Intermediate Tensor Allocations
at::Tensor output_lin_grads = torch::empty_like(matmul2_results);
at::Tensor matmul2_grads = torch::empty_like(dropout_results);
at::Tensor input_lin_q_output_grads = torch::empty_like(input_lin_q_results);
at::Tensor input_lin_kv_output_grads = torch::empty_like(input_lin_kv_results);
// auto q_lin_results_ptr = static_cast<half*>(input_lin_q_results.data_ptr());
// auto k_lin_results_ptr = static_cast<half*>(input_lin_kv_results.data_ptr());
// auto v_lin_results_ptr = static_cast<half*>(input_lin_kv_results.data_ptr()) + head_dim;
auto q_lin_grads_ptr = static_cast<half*>(input_lin_q_output_grads.data_ptr());
auto k_lin_grads_ptr = static_cast<half*>(input_lin_kv_output_grads.data_ptr());
auto v_lin_grads_ptr = static_cast<half*>(input_lin_kv_output_grads.data_ptr()) + head_dim;
TORCH_CUDABLAS_CHECK(cublasSetMathMode(handle, CUBLAS_TENSOR_OP_MATH));
// Output Linear Dgrad
// C = alpha * op(A) op(B) + BetaC
// op(A): mxk, op(B): kxn C: mxn
cublas_status = gemm_bias_lt(
(cublasLtHandle_t)handle,
CUBLAS_OP_N,
CUBLAS_OP_N,
embed_dim,
batches_q,
embed_dim,
&alpha, /* host pointer */
static_cast<const void*>(output_weights.data_ptr()),
embed_dim,
static_cast<const void*>(output_grads.data_ptr()),
embed_dim,
&beta, /* host pointer */
static_cast<void*>(output_lin_grads.data_ptr()),
embed_dim,
lt_workspace_ptr, // TODO: get lt_workspace
1 << 22,
stream,
false,
static_cast<const void*>(nullptr));
if (cublas_status != CUBLAS_STATUS_SUCCESS) {
printf("GEMM output lin grad backward failed with %d\n", cublas_status);
exit(0);
}
cublas_status = gemm_bgradb_lt(
(cublasLtHandle_t)handle,
CUBLAS_OP_N,
CUBLAS_OP_T,
embed_dim,
embed_dim,
batches_q,
&alpha, /* host pointer */
static_cast<const void*>(matmul2_results.data_ptr()),
embed_dim,
static_cast<const void*>(output_grads.data_ptr()),
embed_dim,
&beta, /* host pointer */
static_cast<void*>(output_weight_grads.data_ptr()),
embed_dim,
lt_workspace_ptr,
1 << 22,
stream,
true,
static_cast<void*>(output_biases_grads.data_ptr()));
if (cublas_status != CUBLAS_STATUS_SUCCESS) {
printf("GEMM output backward failed with %d\n", cublas_status);
exit(0);
}
cublas_status = strided_batched_gemm_lt(
(cublasLtHandle_t)handle,
CUBLAS_OP_T,
CUBLAS_OP_N,
k_seq_len,
q_seq_len,
head_dim,
&alpha, /* host pointer */
static_cast<const void*>(v_lin_results_ptr), // A:
lead_dim_kv,
batch_stride_kv,
static_cast<const void*>(output_lin_grads.data_ptr()),
head_dim*attn_batches,
head_dim,
&beta, /* host pointer */
static_cast<void*>(matmul2_grads.data_ptr()), // C
k_seq_len,
k_seq_len*q_seq_len,
attn_batches, // batch = heads * bsz
lt_workspace_ptr,
1 << 22,
stream);
if (cublas_status != CUBLAS_STATUS_SUCCESS) {
printf("Strided Batched GEMM backward 1 failed with %d\n", cublas_status);
exit(0);
}
cublas_status = strided_batched_gemm_lt(
(cublasLtHandle_t)handle,
CUBLAS_OP_N,
CUBLAS_OP_T,
head_dim,
k_seq_len,
q_seq_len,
&alpha, /* host pointer */
static_cast<const void*>(output_lin_grads.data_ptr()), // A:
head_dim*attn_batches,
head_dim,
static_cast<const void*>(dropout_results.data_ptr()),
k_seq_len,
k_seq_len*q_seq_len,
&beta, /* host pointer */
static_cast<void*>(v_lin_grads_ptr), // C
lead_dim_kv,
batch_stride_kv,
attn_batches, // batch = heads * bsz
lt_workspace_ptr,
1 << 22,
stream);
if (cublas_status != CUBLAS_STATUS_SUCCESS) {
printf("Strided Batched GEMM backward 2 failed with %d\n", cublas_status);
exit(0);
}
if ( dropout_prob > 0.0f) {
dispatch_softmax_dropout_backward_recompute<half, half, float, false>(
static_cast<half*>(matmul2_grads.data_ptr()),
static_cast<half* const>(matmul2_grads.data_ptr()),
reinterpret_cast<half const*>(attn_scores.data_ptr()), // need this to recompute softmax
//reinterpret_cast<half const*>(pad_mask.data_ptr()),
static_cast<uint8_t const*>(dropout_mask.data_ptr()),
1.0/(1.0-dropout_prob),
k_seq_len,
k_seq_len,
attn_batches*q_seq_len,
stream);
} else {
dispatch_softmax_backward_norecompute<half, half, float, false>(
static_cast<half*>(matmul2_grads.data_ptr()),
static_cast<half* const>(matmul2_grads.data_ptr()),
reinterpret_cast<half const*>(dropout_results.data_ptr()),
k_seq_len,
k_seq_len,
attn_batches*q_seq_len,
stream);
}
cublas_status = strided_batched_gemm_lt(
(cublasLtHandle_t)handle,
CUBLAS_OP_N,
CUBLAS_OP_N,
head_dim,
q_seq_len,
k_seq_len,
&scale, /* host pointer */
static_cast<const void*>(k_lin_results_ptr), // A:
lead_dim_kv,
batch_stride_kv,
static_cast<const void*>(matmul2_grads.data_ptr()),
k_seq_len,
k_seq_len*q_seq_len,
&beta, /* host pointer */
static_cast<void*>(q_lin_grads_ptr), // C
lead_dim_q,
batch_stride_q,
attn_batches, // batch = heads * bsz
lt_workspace_ptr,
1 << 22,
stream);
if (cublas_status != CUBLAS_STATUS_SUCCESS) {
printf("Strided Batched GEMM backward 3 failed with %d\n", cublas_status);
exit(0);
}
cublas_status = strided_batched_gemm_lt(
(cublasLtHandle_t)handle,
CUBLAS_OP_N,
CUBLAS_OP_T,
head_dim,
k_seq_len,
q_seq_len,
&scale, /* host pointer */
static_cast<const void*>(q_lin_results_ptr), // A:
lead_dim_q,
batch_stride_q,
static_cast<const void*>(matmul2_grads.data_ptr()),
k_seq_len,
k_seq_len*q_seq_len,
&beta, /* host pointer */
static_cast<void*>(k_lin_grads_ptr), // C
lead_dim_kv,
batch_stride_kv,
attn_batches, // batch = heads * bsz
lt_workspace_ptr,
1 << 22,
stream);
if (cublas_status != CUBLAS_STATUS_SUCCESS) {
printf("Strided Batched GEMM backward 4 failed with %d\n", cublas_status);
exit(0);
}
cublas_status = gemm_bias_lt(
(cublasLtHandle_t)handle,
CUBLAS_OP_N,
CUBLAS_OP_N,
embed_dim,
batches_q,
output_lin_q_dim,
&alpha, /* host pointer */
static_cast<const void*>(input_weights_q.data_ptr()),
embed_dim,
static_cast<const void*>(q_lin_grads_ptr),
output_lin_q_dim,
&beta, /* host pointer */
static_cast<void*>(input_q_grads.data_ptr()),
embed_dim,
lt_workspace_ptr, // TODO: get lt_workspace
1 << 22,
stream,
false,
static_cast<const void*>(nullptr));
if (cublas_status != CUBLAS_STATUS_SUCCESS) {
printf("GEMM output backward final input q failed with %d\n", cublas_status);
exit(0);
}
//
cublas_status = gemm_bgradb_lt(
(cublasLtHandle_t)handle,
CUBLAS_OP_N,
CUBLAS_OP_T,
embed_dim,
output_lin_q_dim,
batches_q,
&alpha, /* host pointer */
static_cast<const void*>(inputs_q.data_ptr()),
embed_dim,
reinterpret_cast<const void*>(q_lin_grads_ptr),
output_lin_q_dim,
&beta, /* host pointer */
static_cast<void*>(input_weight_q_grads.data_ptr()),
embed_dim,
lt_workspace_ptr, // TODO: get lt_workspace
1 << 22,
stream,
true,
static_cast<void*>(input_bias_q_grads.data_ptr()));
if (cublas_status != CUBLAS_STATUS_SUCCESS) {
printf("GEMM input backward 1 failed with %d\n", cublas_status);
exit(0);
}
cublas_status = gemm_bias_lt(
(cublasLtHandle_t)handle,
CUBLAS_OP_N,
CUBLAS_OP_N,
embed_dim,
batches_kv,
output_lin_kv_dim,
&alpha, /* host pointer */
static_cast<const void*>(input_weights_kv.data_ptr()),
embed_dim,
static_cast<const void*>(k_lin_grads_ptr),
output_lin_kv_dim,
&beta, /* host pointer */
static_cast<void*>(input_kv_grads.data_ptr()),
embed_dim,
lt_workspace_ptr, // TODO: get lt_workspace
1 << 22,
stream,
false,
static_cast<const void*>(nullptr));
if (cublas_status != CUBLAS_STATUS_SUCCESS) {
printf("GEMM output backward final input kv failed with %d\n", cublas_status);
exit(0);
}
cublas_status = gemm_bgradb_lt(
(cublasLtHandle_t)handle,
CUBLAS_OP_N,
CUBLAS_OP_T,
embed_dim,
output_lin_kv_dim,
batches_kv,
&alpha, /* host pointer */
static_cast<const void*>(inputs_kv.data_ptr()),
embed_dim,
reinterpret_cast<const void*>(k_lin_grads_ptr),
output_lin_kv_dim,
&beta, /* host pointer */
static_cast<void*>(input_weight_kv_grads.data_ptr()),
embed_dim,
lt_workspace_ptr, // TODO: get lt_workspace
1 << 22,
stream,
true,
static_cast<void*>(input_bias_kv_grads.data_ptr()));
if (cublas_status != CUBLAS_STATUS_SUCCESS) {
printf("GEMM input backward 2 failed with %d\n", cublas_status);
exit(0);
}
TORCH_CUDABLAS_CHECK(cublasSetMathMode(handle, CUBLAS_DEFAULT_MATH));
return {
input_q_grads,
input_kv_grads,
input_weight_q_grads,
input_weight_kv_grads,
output_weight_grads,
input_bias_q_grads,
input_bias_kv_grads,
output_biases_grads
};
}
std::vector<torch::Tensor> bwd_cuda(
int heads,
torch::Tensor const& output_grads,
torch::Tensor const& matmul2_results,
torch::Tensor const& dropout_results,
torch::Tensor const& attn_scores,
// const half* pad_mask,
torch::Tensor const& input_lin_q_results,
torch::Tensor const& input_lin_kv_results,
torch::Tensor const& inputs_q,
torch::Tensor const& inputs_kv,
torch::Tensor const& input_weights_q,
torch::Tensor const& input_weights_kv,
torch::Tensor const& output_weights,
torch::Tensor const& dropout_mask,
float dropout_prob,
torch::Tensor lt_workspace
)
{
const int embed_dim = inputs_q.size(2);
const int sequences = inputs_q.size(1);
const int q_seq_len = inputs_q.size(0);
const int k_seq_len = inputs_kv.size(0);
const int batches_q = sequences * q_seq_len;
const int batches_kv = sequences * k_seq_len;
const int head_dim = embed_dim / heads;
const int output_lin_q_dim = embed_dim;
const int output_lin_kv_dim = 2 * embed_dim;
const int attn_batches = heads * sequences;
const int lead_dim_q = attn_batches * head_dim;
const int lead_dim_kv = attn_batches * 2 *head_dim;
const int batch_stride_q = head_dim;
const int batch_stride_kv = 2 * head_dim;
// const int dropout_elems = attn_batches * q_seq_len * k_seq_len;
const float alpha = 1.0;
const float beta = 0.0;
const float scale = 1.0 / sqrt(static_cast<float>(head_dim));
// TODO: Streams can be used in Backprop but I haven't added more than one
// in my first attempt to create the code
cublasHandle_t handle = at::cuda::getCurrentCUDABlasHandle();
cudaStream_t stream = at::cuda::getCurrentCUDAStream().stream();
cublasSetStream(handle, stream);
// Output Tensor Allocations
torch::Tensor input_q_grads = torch::empty_like(inputs_q);
torch::Tensor input_kv_grads = torch::empty_like(inputs_kv);
torch::Tensor input_weight_q_grads = torch::empty_like(input_weights_q);
torch::Tensor input_weight_kv_grads = torch::empty_like(input_weights_kv);
torch::Tensor output_weight_grads = torch::empty_like(output_weights);
at::Tensor input_bias_q_grads = torch::empty({output_lin_q_dim}, inputs_q.type());
at::Tensor input_bias_kv_grads = torch::empty({output_lin_kv_dim}, inputs_kv.type());
at::Tensor output_biases_grads = torch::empty({embed_dim}, inputs_q.type());
// Intermediate Tensor Allocations
at::Tensor output_lin_grads = torch::empty_like(matmul2_results);
at::Tensor matmul2_grads = torch::empty_like(dropout_results);
at::Tensor input_lin_q_output_grads = torch::empty_like(input_lin_q_results);
at::Tensor input_lin_kv_output_grads = torch::empty_like(input_lin_kv_results);
auto q_lin_results_ptr = static_cast<half*>(input_lin_q_results.data_ptr());
auto k_lin_results_ptr = static_cast<half*>(input_lin_kv_results.data_ptr());
auto v_lin_results_ptr = static_cast<half*>(input_lin_kv_results.data_ptr()) + head_dim;
auto q_lin_grads_ptr = static_cast<half*>(input_lin_q_output_grads.data_ptr());
auto k_lin_grads_ptr = static_cast<half*>(input_lin_kv_output_grads.data_ptr());
auto v_lin_grads_ptr = static_cast<half*>(input_lin_kv_output_grads.data_ptr()) + head_dim;
void* lt_workspace_ptr = static_cast<void*>(lt_workspace.data_ptr());
TORCH_CUDABLAS_CHECK(cublasSetMathMode(handle, CUBLAS_TENSOR_OP_MATH));
// Output Linear Dgrad
// C = alpha * op(A) op(B) + BetaC
// op(A): mxk, op(B): kxn C: mxn
int cublas_status = 1;
cublas_status = gemm_bias_lt(
(cublasLtHandle_t)handle,
CUBLAS_OP_N,
CUBLAS_OP_N,
embed_dim,
batches_q,
embed_dim,
&alpha, /* host pointer */
static_cast<const void*>(output_weights.data_ptr()),
embed_dim,
static_cast<const void*>(output_grads.data_ptr()),
embed_dim,
&beta, /* host pointer */
static_cast<void*>(output_lin_grads.data_ptr()),
embed_dim,
lt_workspace_ptr, // TODO: get lt_workspace
1 << 22,
stream,
false,
static_cast<const void*>(nullptr));
if (cublas_status != CUBLAS_STATUS_SUCCESS) {
printf("GEMM output lin grad backward failed with %d\n", cublas_status);
exit(0);
}
cublas_status = gemm_bgradb_lt(
(cublasLtHandle_t)handle,
CUBLAS_OP_N,
CUBLAS_OP_T,
embed_dim,
embed_dim,
batches_q,
&alpha, /* host pointer */
static_cast<const void*>(matmul2_results.data_ptr()),
embed_dim,
static_cast<const void*>(output_grads.data_ptr()),
embed_dim,
&beta, /* host pointer */
static_cast<void*>(output_weight_grads.data_ptr()),
embed_dim,
lt_workspace_ptr,
1 << 22,
stream,
true,
static_cast<void*>(output_biases_grads.data_ptr()));
if (cublas_status != CUBLAS_STATUS_SUCCESS) {
printf("GEMM output backward failed with %d\n", cublas_status);
exit(0);
}
cublas_status = strided_batched_gemm_lt(
(cublasLtHandle_t)handle,
CUBLAS_OP_T,
CUBLAS_OP_N,
k_seq_len,
q_seq_len,
head_dim,
&alpha, /* host pointer */
static_cast<const void*>(v_lin_results_ptr), // A:
lead_dim_kv,
batch_stride_kv,
static_cast<const void*>(output_lin_grads.data_ptr()),
head_dim*attn_batches,
head_dim,
&beta, /* host pointer */
static_cast<void*>(matmul2_grads.data_ptr()), // C
k_seq_len,
k_seq_len*q_seq_len,
attn_batches, // batch = heads * bsz
lt_workspace_ptr,
1 << 22,
stream);
if (cublas_status != CUBLAS_STATUS_SUCCESS) {
printf("Strided Batched GEMM backward 1 failed with %d\n", cublas_status);
exit(0);
}
cublas_status = strided_batched_gemm_lt(
(cublasLtHandle_t)handle,
CUBLAS_OP_N,
CUBLAS_OP_T,
head_dim,
k_seq_len,
q_seq_len,
&alpha, /* host pointer */
static_cast<const void*>(output_lin_grads.data_ptr()), // A:
head_dim*attn_batches,
head_dim,
static_cast<const void*>(dropout_results.data_ptr()),
k_seq_len,
k_seq_len*q_seq_len,
&beta, /* host pointer */
static_cast<void*>(v_lin_grads_ptr), // C
lead_dim_kv,
batch_stride_kv,
attn_batches, // batch = heads * bsz
lt_workspace_ptr,
1 << 22,
stream);
if (cublas_status != CUBLAS_STATUS_SUCCESS) {
printf("Strided Batched GEMM backward 2 failed with %d\n", cublas_status);
exit(0);
}
if ( dropout_prob > 0.0f) {
dispatch_softmax_dropout_backward_recompute<half, half, float, false>(
static_cast<half*>(matmul2_grads.data_ptr()),
static_cast<half* const>(matmul2_grads.data_ptr()),
reinterpret_cast<half const*>(attn_scores.data_ptr()), // need this to recompute softmax
//reinterpret_cast<half const*>(pad_mask.data_ptr()),
static_cast<uint8_t const*>(dropout_mask.data_ptr()),
1.0/(1.0-dropout_prob),
k_seq_len,
k_seq_len,
attn_batches*q_seq_len,
stream);
} else {
dispatch_softmax_backward_norecompute<half, half, float, false>(
static_cast<half*>(matmul2_grads.data_ptr()),
static_cast<half* const>(matmul2_grads.data_ptr()),
reinterpret_cast<half const*>(dropout_results.data_ptr()),
k_seq_len,
k_seq_len,
attn_batches*q_seq_len,
stream);
}
cublas_status = strided_batched_gemm_lt(
(cublasLtHandle_t)handle,
CUBLAS_OP_N,
CUBLAS_OP_N,
head_dim,
q_seq_len,
k_seq_len,
&scale, /* host pointer */
static_cast<const void*>(k_lin_results_ptr), // A:
lead_dim_kv,
batch_stride_kv,
static_cast<const void*>(matmul2_grads.data_ptr()),
k_seq_len,
k_seq_len*q_seq_len,
&beta, /* host pointer */
static_cast<void*>(q_lin_grads_ptr), // C
lead_dim_q,
batch_stride_q,
attn_batches, // batch = heads * bsz
lt_workspace_ptr,
1 << 22,
stream);
if (cublas_status != CUBLAS_STATUS_SUCCESS) {
printf("Strided Batched GEMM backward 3 failed with %d\n", cublas_status);
exit(0);
}
cublas_status = strided_batched_gemm_lt(
(cublasLtHandle_t)handle,
CUBLAS_OP_N,
CUBLAS_OP_T,
head_dim,
k_seq_len,
q_seq_len,
&scale, /* host pointer */
static_cast<const void*>(q_lin_results_ptr), // A:
lead_dim_q,
batch_stride_q,
static_cast<const void*>(matmul2_grads.data_ptr()),
k_seq_len,
k_seq_len*q_seq_len,
&beta, /* host pointer */
static_cast<void*>(k_lin_grads_ptr), // C
lead_dim_kv,
batch_stride_kv,
attn_batches, // batch = heads * bsz
lt_workspace_ptr,
1 << 22,
stream);
if (cublas_status != CUBLAS_STATUS_SUCCESS) {
printf("Strided Batched GEMM backward 4 failed with %d\n", cublas_status);
exit(0);
}
cublas_status = gemm_bias_lt(
(cublasLtHandle_t)handle,
CUBLAS_OP_N,
CUBLAS_OP_N,
embed_dim,
batches_q,
output_lin_q_dim,
&alpha, /* host pointer */
static_cast<const void*>(input_weights_q.data_ptr()),
embed_dim,
static_cast<const void*>(q_lin_grads_ptr),
output_lin_q_dim,
&beta, /* host pointer */
static_cast<void*>(input_q_grads.data_ptr()),
embed_dim,
lt_workspace_ptr, // TODO: get lt_workspace
1 << 22,
stream,
false,
static_cast<const void*>(nullptr));
if (cublas_status != CUBLAS_STATUS_SUCCESS) {
printf("GEMM output backward final input q failed with %d\n", cublas_status);
exit(0);
}
//
cublas_status = gemm_bgradb_lt(
(cublasLtHandle_t)handle,
CUBLAS_OP_N,
CUBLAS_OP_T,
embed_dim,
output_lin_q_dim,
batches_q,
&alpha, /* host pointer */
static_cast<const void*>(inputs_q.data_ptr()),
embed_dim,
reinterpret_cast<const void*>(q_lin_grads_ptr),
output_lin_q_dim,
&beta, /* host pointer */
static_cast<void*>(input_weight_q_grads.data_ptr()),
embed_dim,
lt_workspace_ptr, // TODO: get lt_workspace
1 << 22,
stream,
true,
static_cast<void*>(input_bias_q_grads.data_ptr()));
if (cublas_status != CUBLAS_STATUS_SUCCESS) {
printf("GEMM input backward 1 failed with %d\n", cublas_status);
exit(0);
}
cublas_status = gemm_bias_lt(
(cublasLtHandle_t)handle,
CUBLAS_OP_N,
CUBLAS_OP_N,
embed_dim,
batches_kv,
output_lin_kv_dim,
&alpha, /* host pointer */
static_cast<const void*>(input_weights_kv.data_ptr()),
embed_dim,
static_cast<const void*>(k_lin_grads_ptr),
output_lin_kv_dim,
&beta, /* host pointer */
static_cast<void*>(input_kv_grads.data_ptr()),
embed_dim,
lt_workspace_ptr, // TODO: get lt_workspace
1 << 22,
stream,
false,
static_cast<const void*>(nullptr));
if (cublas_status != CUBLAS_STATUS_SUCCESS) {
printf("GEMM output backward final input kv failed with %d\n", cublas_status);
exit(0);
}
cublas_status = gemm_bgradb_lt(
(cublasLtHandle_t)handle,
CUBLAS_OP_N,
CUBLAS_OP_T,
embed_dim,
output_lin_kv_dim,
batches_kv,
&alpha, /* host pointer */
static_cast<const void*>(inputs_kv.data_ptr()),
embed_dim,
reinterpret_cast<const void*>(k_lin_grads_ptr),
output_lin_kv_dim,
&beta, /* host pointer */
static_cast<void*>(input_weight_kv_grads.data_ptr()),
embed_dim,
lt_workspace_ptr, // TODO: get lt_workspace
1 << 22,
stream,
true,
static_cast<void*>(input_bias_kv_grads.data_ptr()));
if (cublas_status != CUBLAS_STATUS_SUCCESS) {
printf("GEMM input backward 2 failed with %d\n", cublas_status);
exit(0);
}
TORCH_CUDABLAS_CHECK(cublasSetMathMode(handle, CUBLAS_DEFAULT_MATH));
return {
input_q_grads,
input_kv_grads,
input_weight_q_grads,
input_weight_kv_grads,
output_weight_grads,
input_bias_q_grads,
input_bias_kv_grads,
output_biases_grads
};
}
std::vector<torch::Tensor> bwd_compact_cuda(
int heads,
torch::Tensor const& output_lin_grads,
torch::Tensor const& dropout_results,
torch::Tensor const& attn_scores,
torch::Tensor const& input_lin_q_results,
torch::Tensor const& input_lin_kv_results,
torch::Tensor const& dropout_mask,
float dropout_prob,
torch::Tensor lt_workspace
)
{
const int embed_dim = input_lin_q_results.size(2);
const int sequences = input_lin_q_results.size(1);
const int q_seq_len = input_lin_q_results.size(0);
const int k_seq_len = input_lin_kv_results.size(0);
const int batches_q = sequences * q_seq_len;
const int batches_kv = sequences * k_seq_len;
const int head_dim = embed_dim / heads;
const int output_lin_q_dim = embed_dim;
const int output_lin_kv_dim = 2 * embed_dim;
const int attn_batches = heads * sequences;
const int lead_dim_q = attn_batches * head_dim;
const int lead_dim_kv = attn_batches * 2 *head_dim;
const int batch_stride_q = head_dim;
const int batch_stride_kv = 2 * head_dim;
// const int dropout_elems = attn_batches * q_seq_len * k_seq_len;
const float alpha = 1.0;
const float beta = 0.0;
const float scale = 1.0 / sqrt(static_cast<float>(head_dim));
// TODO: Streams can be used in Backprop but I haven't added more than one
// in my first attempt to create the code
cublasHandle_t handle = at::cuda::getCurrentCUDABlasHandle();
cudaStream_t stream = at::cuda::getCurrentCUDAStream().stream();
cublasSetStream(handle, stream);
// Output Tensor Allocations
// Intermediate Tensor Allocations
at::Tensor matmul2_grads = torch::empty_like(dropout_results);
at::Tensor input_lin_q_output_grads = torch::empty_like(input_lin_q_results);
at::Tensor input_lin_kv_output_grads = torch::empty_like(input_lin_kv_results);
auto q_lin_results_ptr = static_cast<half*>(input_lin_q_results.data_ptr());
auto k_lin_results_ptr = static_cast<half*>(input_lin_kv_results.data_ptr());
auto v_lin_results_ptr = static_cast<half*>(input_lin_kv_results.data_ptr()) + head_dim;
auto q_lin_grads_ptr = static_cast<half*>(input_lin_q_output_grads.data_ptr());
auto k_lin_grads_ptr = static_cast<half*>(input_lin_kv_output_grads.data_ptr());
auto v_lin_grads_ptr = static_cast<half*>(input_lin_kv_output_grads.data_ptr()) + head_dim;
void* lt_workspace_ptr = static_cast<void*>(lt_workspace.data_ptr());
TORCH_CUDABLAS_CHECK(cublasSetMathMode(handle, CUBLAS_TENSOR_OP_MATH));
int cublas_status = 1;
cublas_status = strided_batched_gemm_lt(
(cublasLtHandle_t)handle,
CUBLAS_OP_T,
CUBLAS_OP_N,
k_seq_len,
q_seq_len,
head_dim,
&alpha, /* host pointer */
static_cast<const void*>(v_lin_results_ptr), // A:
lead_dim_kv,
batch_stride_kv,
static_cast<const void*>(output_lin_grads.data_ptr()),
head_dim*attn_batches,
head_dim,
&beta, /* host pointer */
static_cast<void*>(matmul2_grads.data_ptr()), // C
k_seq_len,
k_seq_len*q_seq_len,
attn_batches, // batch = heads * bsz
lt_workspace_ptr,
1 << 22,
stream);
if (cublas_status != CUBLAS_STATUS_SUCCESS) {
printf("Strided Batched GEMM backward 1 failed with %d\n", cublas_status);
exit(0);
}
cublas_status = strided_batched_gemm_lt(
(cublasLtHandle_t)handle,
CUBLAS_OP_N,
CUBLAS_OP_T,
head_dim,
k_seq_len,
q_seq_len,
&alpha, /* host pointer */
static_cast<const void*>(output_lin_grads.data_ptr()), // A:
head_dim*attn_batches,
head_dim,
static_cast<const void*>(dropout_results.data_ptr()),
k_seq_len,
k_seq_len*q_seq_len,
&beta, /* host pointer */
static_cast<void*>(v_lin_grads_ptr), // C
lead_dim_kv,
batch_stride_kv,
attn_batches, // batch = heads * bsz
lt_workspace_ptr,
1 << 22,
stream);
if (cublas_status != CUBLAS_STATUS_SUCCESS) {
printf("Strided Batched GEMM backward 2 failed with %d\n", cublas_status);
exit(0);
}
if ( dropout_prob > 0.0f) {
dispatch_softmax_dropout_backward_recompute<half, half, float, false>(
static_cast<half*>(matmul2_grads.data_ptr()),
static_cast<half* const>(matmul2_grads.data_ptr()),
reinterpret_cast<half const*>(attn_scores.data_ptr()), // need this to recompute softmax
//reinterpret_cast<half const*>(pad_mask.data_ptr()),
static_cast<uint8_t const*>(dropout_mask.data_ptr()),
1.0/(1.0-dropout_prob),
k_seq_len,
k_seq_len,
attn_batches*q_seq_len,
stream);
} else {
dispatch_softmax_backward_norecompute<half, half, float, false>(
static_cast<half*>(matmul2_grads.data_ptr()),
static_cast<half* const>(matmul2_grads.data_ptr()),
reinterpret_cast<half const*>(dropout_results.data_ptr()),
k_seq_len,
k_seq_len,
attn_batches*q_seq_len,
stream);
}
cublas_status = strided_batched_gemm_lt(
(cublasLtHandle_t)handle,
CUBLAS_OP_N,
CUBLAS_OP_N,
head_dim,
q_seq_len,
k_seq_len,
&scale, /* host pointer */
static_cast<const void*>(k_lin_results_ptr), // A:
lead_dim_kv,
batch_stride_kv,
static_cast<const void*>(matmul2_grads.data_ptr()),
k_seq_len,
k_seq_len*q_seq_len,
&beta, /* host pointer */
static_cast<void*>(q_lin_grads_ptr), // C
lead_dim_q,
batch_stride_q,
attn_batches, // batch = heads * bsz
lt_workspace_ptr,
1 << 22,
stream);
if (cublas_status != CUBLAS_STATUS_SUCCESS) {
printf("Strided Batched GEMM backward 3 failed with %d\n", cublas_status);
exit(0);
}
cublas_status = strided_batched_gemm_lt(
(cublasLtHandle_t)handle,
CUBLAS_OP_N,
CUBLAS_OP_T,
head_dim,
k_seq_len,
q_seq_len,
&scale, /* host pointer */
static_cast<const void*>(q_lin_results_ptr), // A:
lead_dim_q,
batch_stride_q,
static_cast<const void*>(matmul2_grads.data_ptr()),
k_seq_len,
k_seq_len*q_seq_len,
&beta, /* host pointer */
static_cast<void*>(k_lin_grads_ptr), // C
lead_dim_kv,
batch_stride_kv,
attn_batches, // batch = heads * bsz
lt_workspace_ptr,
1 << 22,
stream);
if (cublas_status != CUBLAS_STATUS_SUCCESS) {
printf("Strided Batched GEMM backward 4 failed with %d\n", cublas_status);
exit(0);
}
TORCH_CUDABLAS_CHECK(cublasSetMathMode(handle, CUBLAS_DEFAULT_MATH));
return {
input_lin_q_output_grads,
input_lin_kv_output_grads,
};
}
std::vector<torch::Tensor> bwd_cuda_input_only(
int heads,
torch::Tensor const& output_grads,
torch::Tensor const& matmul2_results,
torch::Tensor const& dropout_results,
torch::Tensor const& attn_scores,
torch::Tensor const& input_lin_q_results,
torch::Tensor const& input_lin_kv_results,
torch::Tensor const& inputs_q,
torch::Tensor const& inputs_kv,
torch::Tensor const& input_weights_q,
torch::Tensor const& input_weights_kv,
torch::Tensor const& output_weights,
torch::Tensor const& dropout_mask,
float dropout_prob
)
{
const int embed_dim = inputs_q.size(2);
const int sequences = inputs_q.size(1);
const int q_seq_len = inputs_q.size(0);
const int k_seq_len = inputs_kv.size(0);
const int batches_q = sequences * q_seq_len;
const int batches_kv = sequences * k_seq_len;
const int head_dim = embed_dim / heads;
const int output_lin_q_dim = embed_dim;
const int output_lin_kv_dim = 2 * embed_dim;
const int attn_batches = heads * sequences;
const int lead_dim_q = attn_batches * head_dim;
const int lead_dim_kv = attn_batches * 2 *head_dim;
const int batch_stride_q = head_dim;
const int batch_stride_kv = 2 * head_dim;
// const int dropout_elems = attn_batches * q_seq_len * k_seq_len;
const float alpha = 1.0;
const float beta = 0.0;
const float scale = 1.0 / sqrt(static_cast<float>(head_dim));
// TODO: Streams can be used in Backprop but I haven't added more than one
// in my first attempt to create the code
cublasHandle_t handle = at::cuda::getCurrentCUDABlasHandle();
cudaStream_t stream = at::cuda::getCurrentCUDAStream().stream();
cublasSetStream(handle, stream);
// Output Tensor Allocations
torch::Tensor input_q_grads = torch::empty_like(inputs_q);
torch::Tensor input_kv_grads = torch::empty_like(inputs_kv);
// torch::Tensor input_weight_q_grads = torch::empty_like(input_weights_q);
// torch::Tensor input_weight_kv_grads = torch::empty_like(input_weights_kv);
// torch::Tensor output_weight_grads = torch::empty_like(output_weights);
// Intermediate Tensor Allocations
at::Tensor output_lin_grads = torch::empty_like(matmul2_results);
at::Tensor matmul2_grads = torch::empty_like(dropout_results);
at::Tensor input_lin_q_output_grads = torch::empty_like(input_lin_q_results);
at::Tensor input_lin_kv_output_grads = torch::empty_like(input_lin_kv_results);
auto q_lin_results_ptr = static_cast<half*>(input_lin_q_results.data_ptr());
auto k_lin_results_ptr = static_cast<half*>(input_lin_kv_results.data_ptr());
auto v_lin_results_ptr = static_cast<half*>(input_lin_kv_results.data_ptr()) + head_dim;
auto q_lin_grads_ptr = static_cast<half*>(input_lin_q_output_grads.data_ptr());
auto k_lin_grads_ptr = static_cast<half*>(input_lin_kv_output_grads.data_ptr());
auto v_lin_grads_ptr = static_cast<half*>(input_lin_kv_output_grads.data_ptr()) + head_dim;
// char a_layout_n{'n'};
// char a_layout_t{'t'};
// char b_layout_n{'n'};
// char b_layout_t{'t'};
TORCH_CUDABLAS_CHECK(cublasSetMathMode(handle, CUBLAS_TENSOR_OP_MATH));
// Output Linear Dgrad
// C = alpha * op(A) op(B) + BetaC
// op(A): mxk, op(B): kxn C: mxn
TORCH_CUDABLAS_CHECK(cublasGemmEx(handle, //
CUBLAS_OP_N, // no transpose
CUBLAS_OP_N, // no transpose
embed_dim, // m
batches_q, // n = bsz * len_q
embed_dim, // k
static_cast<const void*>(&alpha), // alpha = 1.0
static_cast<const void*>(output_weights.data_ptr()), // A mxk
CUDA_R_16F, // data type
embed_dim, // leading dimension of A (embed dim) (the rows)
static_cast<const void*>(output_grads.data_ptr()), // B kxn
CUDA_R_16F, // data type
embed_dim, // leading dimension of B (embed dim)
static_cast<const void*>(&beta), // beta
static_cast<void*>(output_lin_grads.data_ptr()), // C mxn
CUDA_R_16F, // data type
embed_dim, // ldc
CUDA_R_32F, // compute type
CUBLAS_GEMM_DEFAULT_TENSOR_OP));
// Output Linear Wgrad
// TORCH_CUDABLAS_CHECK(cublasGemmEx(handle,
// CUBLAS_OP_N,
// CUBLAS_OP_T,
// embed_dim,
// embed_dim,
// batches_q,
// static_cast<const void*>(&alpha),
// static_cast<const void*>(matmul2_results.data_ptr()),
// CUDA_R_16F,
// embed_dim,
// static_cast<const void*>(output_grads.data_ptr()),
// CUDA_R_16F,
// embed_dim,
// static_cast<const void*>(&beta),
// static_cast<void*>(output_weight_grads.data_ptr()),
// CUDA_R_16F,
// embed_dim,
// CUDA_R_32F,
// CUBLAS_GEMM_DEFAULT_TENSOR_OP));
// auto output_bias_grads = output_grads.view({-1, embed_dim}).sum(0, false);
TORCH_CUDABLAS_CHECK(cublasGemmStridedBatchedEx(handle,
CUBLAS_OP_T,
CUBLAS_OP_N,
k_seq_len, // m
q_seq_len, // n
head_dim, // k
static_cast<const void*>(&alpha),
static_cast<const void*>(v_lin_results_ptr), // A:
CUDA_R_16F,
lead_dim_kv, // lda
batch_stride_kv, // stride A
static_cast<const void*>(output_lin_grads.data_ptr()),
CUDA_R_16F,
head_dim*attn_batches,
head_dim,
static_cast<const void*>(&beta),
static_cast<void*>(matmul2_grads.data_ptr()), // C
CUDA_R_16F,
k_seq_len,
k_seq_len*q_seq_len,
attn_batches,
CUDA_R_32F,
CUBLAS_GEMM_DEFAULT_TENSOR_OP));
TORCH_CUDABLAS_CHECK(cublasGemmStridedBatchedEx(handle,
CUBLAS_OP_N,
CUBLAS_OP_T,
head_dim, // m
k_seq_len, // n
q_seq_len, // k
static_cast<const void*>(&alpha),
static_cast<const void*>(output_lin_grads.data_ptr()), // A:
CUDA_R_16F,
head_dim*attn_batches, // lda
head_dim, // stride A
static_cast<const void*>(dropout_results.data_ptr()),
CUDA_R_16F,
k_seq_len,
k_seq_len*q_seq_len,
static_cast<const void*>(&beta),
static_cast<void*>(v_lin_grads_ptr), // C
CUDA_R_16F,
lead_dim_kv,
batch_stride_kv,
attn_batches,
CUDA_R_32F,
CUBLAS_GEMM_DEFAULT_TENSOR_OP));
// bool softmax_success = false;
if ( dropout_prob > 0.0f) {
dispatch_softmax_dropout_backward_recompute<half, half, float, false>(
static_cast<half*>(matmul2_grads.data_ptr()),
static_cast<half* const>(matmul2_grads.data_ptr()),
reinterpret_cast<half const*>(attn_scores.data_ptr()), // need this to recompute softmax
//reinterpret_cast<half const*>(pad_mask.data_ptr()),
static_cast<uint8_t const*>(dropout_mask.data_ptr()),
1.0/(1.0-dropout_prob),
k_seq_len,
k_seq_len,
attn_batches*q_seq_len,
stream);
} else {
// if dropout == 0 then we don't need to recompute (because dropout_results == softmax_results)
dispatch_softmax_backward_norecompute<half, half, float, false>(
static_cast<half*>(matmul2_grads.data_ptr()),
static_cast<half* const>(matmul2_grads.data_ptr()),
reinterpret_cast<half const*>(dropout_results.data_ptr()),
k_seq_len,
k_seq_len,
attn_batches*q_seq_len,
stream);
}
TORCH_CUDABLAS_CHECK(cublasGemmStridedBatchedEx(handle,
CUBLAS_OP_N,
CUBLAS_OP_N,
head_dim, // m
q_seq_len, // n
k_seq_len, // k
static_cast<const void*>(&scale),
static_cast<const void*>(k_lin_results_ptr), // A:
CUDA_R_16F,
lead_dim_kv, // lda
batch_stride_kv, // stride A
static_cast<const void*>(matmul2_grads.data_ptr()),
CUDA_R_16F,
k_seq_len,
k_seq_len*q_seq_len,
static_cast<const void*>(&beta),
static_cast<void*>(q_lin_grads_ptr), // C
CUDA_R_16F,
lead_dim_q,
batch_stride_q,
attn_batches,
CUDA_R_32F,
CUBLAS_GEMM_DEFAULT_TENSOR_OP));
TORCH_CUDABLAS_CHECK(cublasGemmStridedBatchedEx(handle,
CUBLAS_OP_N,
CUBLAS_OP_T,
head_dim, // m
k_seq_len, // n
q_seq_len, // k
static_cast<const void*>(&scale),
static_cast<const void*>(q_lin_results_ptr), // A:
CUDA_R_16F,
lead_dim_q, // lda
batch_stride_q, // stride A
static_cast<const void*>(matmul2_grads.data_ptr()),
CUDA_R_16F,
k_seq_len,
k_seq_len*q_seq_len,
static_cast<const void*>(&beta),
static_cast<void*>(k_lin_grads_ptr), // C
CUDA_R_16F,
lead_dim_kv,
batch_stride_kv,
attn_batches,
CUDA_R_32F,
CUBLAS_GEMM_DEFAULT_TENSOR_OP));
// Input Linear Q Dgrad
TORCH_CUDABLAS_CHECK(cublasGemmEx(handle,
CUBLAS_OP_N,
CUBLAS_OP_N,
embed_dim,
batches_q,
output_lin_q_dim,
static_cast<const void*>(&alpha),
static_cast<const void*>(input_weights_q.data_ptr()),
CUDA_R_16F,
embed_dim,
static_cast<const void*>(q_lin_grads_ptr),
CUDA_R_16F,
output_lin_q_dim,
static_cast<const void*>(&beta),
static_cast<void*>(input_q_grads.data_ptr()),
CUDA_R_16F,
embed_dim,
CUDA_R_32F,
//CUBLAS_GEMM_ALGO10_TENSOR_OP));
CUBLAS_GEMM_DEFAULT_TENSOR_OP));
// Input Linear Q Wgrad
// TORCH_CUDABLAS_CHECK(cublasGemmEx(handle,
// CUBLAS_OP_N,
// CUBLAS_OP_T,
// embed_dim,
// output_lin_q_dim,
// batches_q,
// static_cast<const void*>(&alpha),
// static_cast<const void*>(inputs_q.data_ptr()),
// CUDA_R_16F,
// embed_dim,
// static_cast<const void*>(q_lin_grads_ptr),
// CUDA_R_16F,
// output_lin_q_dim,
// static_cast<const void*>(&beta),
// static_cast<void*>(input_weight_q_grads.data_ptr()),
// CUDA_R_16F,
// embed_dim,
// CUDA_R_32F,
// CUBLAS_GEMM_DEFAULT_TENSOR_OP));
//
// auto input_bias_q_grads = input_lin_q_output_grads.view({-1, output_lin_q_dim}).sum(0, false);
// Input Linear KV Dgrad
TORCH_CUDABLAS_CHECK(cublasGemmEx(handle,
CUBLAS_OP_N,
CUBLAS_OP_N,
embed_dim,
batches_kv,
output_lin_kv_dim,
static_cast<const void*>(&alpha),
static_cast<const void*>(input_weights_kv.data_ptr()),
CUDA_R_16F,
embed_dim,
static_cast<const void*>(k_lin_grads_ptr),
CUDA_R_16F,
output_lin_kv_dim,
static_cast<const void*>(&beta),
static_cast<void*>(input_kv_grads.data_ptr()),
CUDA_R_16F,
embed_dim,
CUDA_R_32F,
//CUBLAS_GEMM_ALGO10_TENSOR_OP));
CUBLAS_GEMM_DEFAULT_TENSOR_OP));
// Input Linear KV Wgrad
// TORCH_CUDABLAS_CHECK(cublasGemmEx(handle,
// CUBLAS_OP_N,
// CUBLAS_OP_T,
// embed_dim,
// output_lin_kv_dim,
// batches_kv,
// static_cast<const void*>(&alpha),
// static_cast<const void*>(inputs_kv.data_ptr()),
// CUDA_R_16F,
// embed_dim,
// static_cast<const void*>(k_lin_grads_ptr),
// CUDA_R_16F,
// output_lin_kv_dim,
// static_cast<const void*>(&beta),
// static_cast<void*>(input_weight_kv_grads.data_ptr()),
// CUDA_R_16F,
// embed_dim,
// CUDA_R_32F,
// CUBLAS_GEMM_DEFAULT_TENSOR_OP));
// auto input_bias_kv_grads = input_lin_kv_output_grads.view({-1, output_lin_kv_dim}).sum(0, false);
TORCH_CUDABLAS_CHECK(cublasSetMathMode(handle, CUBLAS_DEFAULT_MATH));
return {
input_q_grads,
input_kv_grads,
};
}
//
//
//std::vector<torch::Tensor> bwd_recompute_cuda(
// int heads,
// torch::Tensor const& output_grads,
// torch::Tensor const& inputs_q,
// torch::Tensor const& inputs_kv,
// torch::Tensor const& input_weights_q,
// torch::Tensor const& input_weights_kv,
// torch::Tensor const& output_weights,
// torch::Tensor const& dropout_mask,
// torch::Tensor const& pad_mask,
// float dropout_prob
// )
//{
// const int embed_dim = inputs_q.size(2);
// const int sequences = inputs_q.size(1);
// const int q_seq_len = inputs_q.size(0);
// const int k_seq_len = inputs_kv.size(0);
// const int batches_q = sequences * q_seq_len;
// const int batches_kv = sequences * k_seq_len;
// const int head_dim = embed_dim / heads;
// const int output_lin_q_dim = embed_dim;
// const int output_lin_kv_dim = 2 * embed_dim;
// const int attn_batches = heads * sequences;
// const int lead_dim_q = attn_batches * head_dim;
// const int lead_dim_kv = attn_batches * 2 *head_dim;
// const int batch_stride_q = head_dim;
// const int batch_stride_kv = 2 * head_dim;
// const int dropout_elems = attn_batches * q_seq_len * k_seq_len;
// const float alpha = 1.0;
// const float beta = 0.0;
// const float scale = 1.0 / sqrt(static_cast<float>(head_dim));
//
// // TODO: Streams can be used in Backprop but I haven't added more than one
// // in my first attempt to create the code
// cublasHandle_t handle = at::cuda::getCurrentCUDABlasHandle();
// cudaStream_t stream = at::cuda::getCurrentCUDAStream().stream();
// cublasSetStream(handle, stream);
//
// // Tensor allocations for recomputation
// auto act_options = inputs_q.options().requires_grad(false);
//
// torch::Tensor input_lin_q_results = torch::empty({q_seq_len, sequences, output_lin_q_dim}, act_options);
// torch::Tensor input_lin_kv_results = torch::empty({k_seq_len, sequences, output_lin_kv_dim}, act_options);
// torch::Tensor attn_scores = torch::empty({attn_batches, q_seq_len, k_seq_len}, act_options);
// torch::Tensor softmax_results = torch::empty({attn_batches, q_seq_len, k_seq_len}, act_options);
// torch::Tensor dropout_results = torch::empty({attn_batches, q_seq_len, k_seq_len}, act_options);
// torch::Tensor matmul2_results = torch::empty({q_seq_len, attn_batches, head_dim}, act_options);
//
// void* softmax_results_ptr = static_cast<void*>(softmax_results.data_ptr());
// void* dropout_results_ptr = static_cast<void*>(dropout_results.data_ptr());
// // Softmax Intermediate Result Ptr (used by Matmul1 -> Softmax)
// void* attn_scores_ptr = static_cast<void*>(attn_scores.data_ptr());
//
// // Output Tensor Allocations
// torch::Tensor input_q_grads = torch::empty_like(inputs_q);
// torch::Tensor input_kv_grads = torch::empty_like(inputs_kv);
// torch::Tensor input_weight_q_grads = torch::empty_like(input_weights_q);
// torch::Tensor input_weight_kv_grads = torch::empty_like(input_weights_kv);
// torch::Tensor output_weight_grads = torch::empty_like(output_weights);
// // Intermediate Tensor Allocations
// at::Tensor output_lin_grads = torch::empty_like(matmul2_results);
// at::Tensor matmul2_grads = torch::empty_like(softmax_results);
// at::Tensor input_lin_q_output_grads = torch::empty_like(input_lin_q_results);
// at::Tensor input_lin_kv_output_grads = torch::empty_like(input_lin_kv_results);
//
// auto q_lin_results_ptr = static_cast<half*>(input_lin_q_results.data_ptr());
// auto k_lin_results_ptr = static_cast<half*>(input_lin_kv_results.data_ptr());
// auto v_lin_results_ptr = static_cast<half*>(input_lin_kv_results.data_ptr()) + head_dim;
//
// auto q_lin_grads_ptr = static_cast<half*>(input_lin_q_output_grads.data_ptr());
// auto k_lin_grads_ptr = static_cast<half*>(input_lin_kv_output_grads.data_ptr());
// auto v_lin_grads_ptr = static_cast<half*>(input_lin_kv_output_grads.data_ptr()) + head_dim;
//
// char a_layout_n{'n'};
// char a_layout_t{'t'};
// char b_layout_n{'n'};
// char b_layout_t{'t'};
//
// TORCH_CUDABLAS_CHECK(cublasSetMathMode(handle, CUBLAS_TENSOR_OP_MATH));
//
// // Input Linear Q Fwd
// TORCH_CUDABLAS_CHECK(cublasGemmEx(handle,
// CUBLAS_OP_T, // A transpose
// CUBLAS_OP_N, // B wo/ transpose
// output_lin_q_dim, // embed_dim
// batches_q, // bsz x len_q
// embed_dim, // embed_dim
// static_cast<const void*>(&alpha),
// static_cast<const void*>(input_weights_q.data_ptr()), // weight emb_out x emb_in transposed
// CUDA_R_16F,
// embed_dim, // lda so A has size [lda x m] -> [embed_dim x output_lin_q_dim]
// static_cast<const void*>(inputs_q.data_ptr()), // input Q
// CUDA_R_16F,
// embed_dim, // ldb B has size [lda xn] -> [embed_dim x batches_q]
// static_cast<const void*>(&beta), // beta
// q_lin_results_ptr, // C -> emb * B
// CUDA_R_16F,
// output_lin_q_dim, // ldc C [lda x n] -> [embed_dim x batches_q]
// CUDA_R_32F,
// CUBLAS_GEMM_DEFAULT_TENSOR_OP));
//
// // Input Linear KV Fwd
// TORCH_CUDABLAS_CHECK(cublasGemmEx(handle,
// CUBLAS_OP_T,
// CUBLAS_OP_N,
// output_lin_kv_dim,
// batches_kv,
// embed_dim,
// static_cast<const void*>(&alpha),
// static_cast<const void*>(input_weights_kv.data_ptr()),
// CUDA_R_16F,
// embed_dim,
// static_cast<const void*>(inputs_kv.data_ptr()),
// CUDA_R_16F,
// embed_dim,
// static_cast<const void*>(&beta),
// k_lin_results_ptr,
// CUDA_R_16F,
// output_lin_kv_dim,
// CUDA_R_32F,
// CUBLAS_GEMM_DEFAULT_TENSOR_OP));
//
// // MatMul1 of Dot-Product Attention Plus scaling by 1/Sqrt(head size)
// gemm_switch_fp32accum( state,
// a_layout_t,
// b_layout_n,
// k_seq_len, // m
// q_seq_len, // n
// head_dim, // k
// scale,
// static_cast<const half*>(k_lin_results_ptr),
// lead_dim_kv, // lda
// batch_stride_kv, //strideA
// static_cast<const half*>(q_lin_results_ptr),
// lead_dim_q, // ldb
// batch_stride_q, //strideB
// beta,
// static_cast<half*>(attn_scores_ptr), // [attn_batches * len_q * len_k]
// k_seq_len, // ldc
// k_seq_len*q_seq_len, // stride c
// attn_batches); // p
//
// // need to call padding from torch interface here.
// attn_scores.view({sequences, heads, q_seq_len, k_seq_len}).masked_fill_(pad_mask,
// -std::numeric_limits<float>::infinity());
//
// attn_scores.view({sequences*heads, q_seq_len, k_seq_len});
// bool softmax_success = false;
//
// // run softmax dropout again but don't change the dropout mask
// softmax_success = dispatch_softmax_dropout_presampled<half, half, float>(
// reinterpret_cast<half*>(dropout_results_ptr),
// reinterpret_cast<half*>(softmax_results_ptr),
// reinterpret_cast<const uint8_t*>(dropout_mask.data_ptr<uint8_t>()),
// reinterpret_cast<const half*>(attn_scores_ptr),
// dropout_elems,
// k_seq_len,
// k_seq_len,
// attn_batches*q_seq_len,
// (1.0f - dropout_prob),
// stream);
//
// assert(softmax_success);
//
// // Matmul2
// // matrix kv has size len_k * batch_size * (2 * heads * head_dim)
// // dropout results [bsz*heads, len_q, len_k]
// // matmul2_results is [len_q x attn_batches x head_dim]
// gemm_switch_fp32accum( state,
// a_layout_n,
// b_layout_n,
// head_dim, // m
// q_seq_len, // n
// k_seq_len, // k
// alpha,
// static_cast<const half*>(v_lin_results_ptr), // A_i [head_dimxk_seq_len]
// lead_dim_kv, // attn_batches * 2 *head_dim
// batch_stride_kv, // stride = 2 * head_dim
// static_cast<const half*>(dropout_results.data_ptr()), // B_i [k_seq_len x q_seq_len]
// k_seq_len, // lead_dim
// k_seq_len*q_seq_len, // stride
// beta,
// static_cast<half*>(matmul2_results.data_ptr()),
// head_dim*attn_batches, // ldc
// head_dim, // stride c
// attn_batches); //p
//
// ////////////////////////////////////////// Recomputation Done /////////////////////////////////////
//
// // Output Linear Dgrad
// // C = alpha * op(A) op(B) + BetaC
// // op(A): mxk, op(B): kxn C: mxn
// TORCH_CUDABLAS_CHECK(cublasGemmEx(handle, //
// CUBLAS_OP_N, // no transpose
// CUBLAS_OP_N, // no transpose
// embed_dim, // m
// batches_q, // n = bsz * len_q
// embed_dim, // k
// static_cast<const void*>(&alpha), // alpha = 1.0
// static_cast<const void*>(output_weights.data_ptr()), // A mxk
// CUDA_R_16F, // data type
// embed_dim, // leading dimension of A (embed dim) (the rows)
// static_cast<const void*>(output_grads.data_ptr()), // B kxn
// CUDA_R_16F, // data type
// embed_dim, // leading dimension of B (embed dim)
// static_cast<const void*>(&beta), // beta
// static_cast<void*>(output_lin_grads.data_ptr()), // C mxn
// CUDA_R_16F, // data type
// embed_dim, // ldc
// CUDA_R_32F, // compute type
// CUBLAS_GEMM_DEFAULT_TENSOR_OP));
//
// // Output Linear Wgrad
// TORCH_CUDABLAS_CHECK(cublasGemmEx(handle,
// CUBLAS_OP_N,
// CUBLAS_OP_T,
// embed_dim,
// embed_dim,
// batches_q,
// static_cast<const void*>(&alpha),
// static_cast<const void*>(matmul2_results.data_ptr()),
// CUDA_R_16F,
// embed_dim,
// static_cast<const void*>(output_grads.data_ptr()),
// CUDA_R_16F,
// embed_dim,
// static_cast<const void*>(&beta),
// static_cast<void*>(output_weight_grads.data_ptr()),
// CUDA_R_16F,
// embed_dim,
// CUDA_R_32F,
// CUBLAS_GEMM_DEFAULT_TENSOR_OP));
//
// // MatMul2 Dgrad1
// gemm_switch_fp32accum( state,
// a_layout_t,
// b_layout_n,
// k_seq_len,
// q_seq_len,
// head_dim,
// alpha,
// static_cast<const half*>(v_lin_results_ptr),
// lead_dim_kv,
// batch_stride_kv, // 2 * head_dim
// static_cast<const half*>(output_lin_grads.data_ptr()),
// head_dim*attn_batches,
// head_dim,
// beta,
// static_cast<half*>(matmul2_grads.data_ptr()),
// k_seq_len,
// k_seq_len*q_seq_len,
// attn_batches);
//
// // Matmul2 Dgrad2
// gemm_switch_fp32accum( state,
// a_layout_n,
// b_layout_t,
// head_dim,
// k_seq_len,
// q_seq_len,
// alpha,
// static_cast<const half*>(output_lin_grads.data_ptr()),
// head_dim*attn_batches,
// head_dim,
// static_cast<const half*>(dropout_results.data_ptr()),
// k_seq_len,
// k_seq_len*q_seq_len,
// beta,
// v_lin_grads_ptr,
// lead_dim_kv,
// batch_stride_kv,
// attn_batches);
//
// dispatch_masked_scale_softmax_backward_recompute<half, half, float, false>(
// static_cast<half*>(matmul2_grads.data_ptr()),
// static_cast<half* const>(matmul2_grads.data_ptr()),
// reinterpret_cast<half const*>(softmax_results.data_ptr()),
// static_cast<uint8_t const*>(dropout_mask.data_ptr()),
// 1.0/(1.0-dropout_prob),
// k_seq_len,
// k_seq_len,
// attn_batches*q_seq_len,
// stream);
//
// // Matmul1 Dgrad1
// gemm_switch_fp32accum( state,
// a_layout_n,
// b_layout_n,
// head_dim,
// q_seq_len,
// k_seq_len,
// scale,
// k_lin_results_ptr,
// lead_dim_kv,
// batch_stride_kv,
// static_cast<half*>(matmul2_grads.data_ptr()),
// k_seq_len,
// k_seq_len*q_seq_len,
// beta,
// q_lin_grads_ptr,
// lead_dim_q,
// batch_stride_q,
// attn_batches);
//
// // Matmul1 Dgrad2
// gemm_switch_fp32accum( state,
// a_layout_n,
// b_layout_t,
// head_dim,
// k_seq_len,
// q_seq_len,
// scale,
// q_lin_results_ptr,
// lead_dim_q,
// batch_stride_q,
// static_cast<half*>(matmul2_grads.data_ptr()),
// k_seq_len,
// k_seq_len*q_seq_len,
// beta,
// k_lin_grads_ptr,
// lead_dim_kv,
// batch_stride_kv,
// attn_batches);
//
// // Input Linear Q Dgrad
// TORCH_CUDABLAS_CHECK(cublasGemmEx(handle,
// CUBLAS_OP_N,
// CUBLAS_OP_N,
// embed_dim,
// batches_q,
// output_lin_q_dim,
// static_cast<const void*>(&alpha),
// static_cast<const void*>(input_weights_q.data_ptr()),
// CUDA_R_16F,
// embed_dim,
// static_cast<const void*>(q_lin_grads_ptr),
// CUDA_R_16F,
// output_lin_q_dim,
// static_cast<const void*>(&beta),
// static_cast<void*>(input_q_grads.data_ptr()),
// CUDA_R_16F,
// embed_dim,
// CUDA_R_32F,
// //CUBLAS_GEMM_ALGO10_TENSOR_OP));
// CUBLAS_GEMM_DEFAULT_TENSOR_OP));
//
// // Input Linear Q Wgrad
// TORCH_CUDABLAS_CHECK(cublasGemmEx(handle,
// CUBLAS_OP_N,
// CUBLAS_OP_T,
// embed_dim,
// output_lin_q_dim,
// batches_q,
// static_cast<const void*>(&alpha),
// static_cast<const void*>(inputs_q.data_ptr()),
// CUDA_R_16F,
// embed_dim,
// static_cast<const void*>(q_lin_grads_ptr),
// CUDA_R_16F,
// output_lin_q_dim,
// static_cast<const void*>(&beta),
// static_cast<void*>(input_weight_q_grads.data_ptr()),
// CUDA_R_16F,
// embed_dim,
// CUDA_R_32F,
// CUBLAS_GEMM_DEFAULT_TENSOR_OP));
//
// // Input Linear KV Dgrad
// TORCH_CUDABLAS_CHECK(cublasGemmEx(handle,
// CUBLAS_OP_N,
// CUBLAS_OP_N,
// embed_dim,
// batches_kv,
// output_lin_kv_dim,
// static_cast<const void*>(&alpha),
// static_cast<const void*>(input_weights_kv.data_ptr()),
// CUDA_R_16F,
// embed_dim,
// static_cast<const void*>(k_lin_grads_ptr),
// CUDA_R_16F,
// output_lin_kv_dim,
// static_cast<const void*>(&beta),
// static_cast<void*>(input_kv_grads.data_ptr()),
// CUDA_R_16F,
// embed_dim,
// CUDA_R_32F,
// //CUBLAS_GEMM_ALGO10_TENSOR_OP));
// CUBLAS_GEMM_DEFAULT_TENSOR_OP));
//
// // Input Linear KV Wgrad
// TORCH_CUDABLAS_CHECK(cublasGemmEx(handle,
// CUBLAS_OP_N,
// CUBLAS_OP_T,
// embed_dim,
// output_lin_kv_dim,
// batches_kv,
// static_cast<const void*>(&alpha),
// static_cast<const void*>(inputs_kv.data_ptr()),
// CUDA_R_16F,
// embed_dim,
// static_cast<const void*>(k_lin_grads_ptr),
// CUDA_R_16F,
// output_lin_kv_dim,
// static_cast<const void*>(&beta),
// static_cast<void*>(input_weight_kv_grads.data_ptr()),
// CUDA_R_16F,
// embed_dim,
// CUDA_R_32F,
// CUBLAS_GEMM_DEFAULT_TENSOR_OP));
// TORCH_CUDABLAS_CHECK(cublasSetMathMode(handle, CUBLAS_DEFAULT_MATH));
//
// return {
// input_q_grads,
// input_kv_grads,
// input_weight_q_grads,
// input_weight_kv_grads,
// output_weight_grads
// };
//}
} // end namespace cublas_gemmex
} // end namespace encdec
} // end namespace multihead_attn
|
a5dfc5f589d2b579e58d7a8f895ff4153ba24a85.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "clearLabel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
bool *label = NULL;
hipMalloc(&label, XSIZE*YSIZE);
unsigned int size = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
clearLabel), dim3(gridBlock),dim3(threadBlock), 0, 0, label,size);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
clearLabel), dim3(gridBlock),dim3(threadBlock), 0, 0, label,size);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
clearLabel), dim3(gridBlock),dim3(threadBlock), 0, 0, label,size);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
a5dfc5f589d2b579e58d7a8f895ff4153ba24a85.cu
|
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "clearLabel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
bool *label = NULL;
cudaMalloc(&label, XSIZE*YSIZE);
unsigned int size = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
clearLabel<<<gridBlock,threadBlock>>>(label,size);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
clearLabel<<<gridBlock,threadBlock>>>(label,size);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
clearLabel<<<gridBlock,threadBlock>>>(label,size);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
896787e77c5e9e0bf84b21b4d10134d66921fdc7.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <ATen/NativeFunctions.h>
#include <ATen/Dispatch.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/hip/Loops.cuh>
#include <ATen/hip/HIPApplyUtils.cuh>
namespace at { namespace native {
using where_fn = void (*)(TensorIterator &, ScalarType);
DECLARE_DISPATCH(where_fn, where_kernel);
using is_infinity_op_fn = void (*)(TensorIterator &);
DECLARE_DISPATCH(is_infinity_op_fn, isposinf_stub);
DECLARE_DISPATCH(is_infinity_op_fn, isneginf_stub);
namespace {
void where_kernel_impl(TensorIterator &iter, ScalarType condition_type) {
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(kHalf, kBFloat16, kBool, iter.dtype(), "where_cuda", [&] {
if (condition_type == at::ScalarType::Byte) {
gpu_kernel(
iter,
[=] GPU_LAMBDA (uint8_t cond_val, scalar_t self_val, scalar_t other_val) -> scalar_t {
return cond_val ? self_val : other_val;
});
} else {
gpu_kernel(
iter,
[=] GPU_LAMBDA (bool cond_val, scalar_t self_val, scalar_t other_val) -> scalar_t {
return cond_val ? self_val : other_val;
});
}
});
}
void isposinf_kernel_impl(TensorIterator &iter) {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.input_dtype(), "isposinf_cuda", [&]() {
gpu_kernel(
iter,
[] GPU_LAMBDA (scalar_t a) -> bool { return a == std::numeric_limits<scalar_t>::infinity(); }
);
});
}
void isneginf_kernel_impl(TensorIterator &iter) {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.input_dtype(), "isneginf_cuda", [&]() {
gpu_kernel(
iter,
[] GPU_LAMBDA (scalar_t a) -> bool { return a == -std::numeric_limits<scalar_t>::infinity(); }
);
});
}
} // anonymous namespace
REGISTER_DISPATCH(where_kernel, &where_kernel_impl);
REGISTER_DISPATCH(isposinf_stub, &isposinf_kernel_impl);
REGISTER_DISPATCH(isneginf_stub, &isneginf_kernel_impl);
template <typename scalar_t>
__global__ void assert_async_cuda_kernel(scalar_t* input) {
CUDA_KERNEL_ASSERT(input[0] != 0);
}
__global__ void assert_async_cuda_kernel(c10::complex<float>* input) {
CUDA_KERNEL_ASSERT(input[0] != c10::complex<float>(0, 0));
}
__global__ void assert_async_cuda_kernel(c10::complex<double>* input) {
CUDA_KERNEL_ASSERT(input[0] != c10::complex<double>(0, 0));
}
void assert_async_cuda(const Tensor& self) {
auto n = self.numel();
TORCH_CHECK(n != 0, "Boolean value of Tensor with no values is ambiguous");
TORCH_CHECK(n < 2, "Boolean value of Tensor with more than one value is ambiguous");
auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(at::ScalarType::Half, at::ScalarType::Bool, at::ScalarType::BFloat16, self.scalar_type(), "assert_async_cuda", [&] {
hipLaunchKernelGGL(( assert_async_cuda_kernel), dim3(1), dim3(1), 0, stream, self.data_ptr<scalar_t>());
});
}
}} // namespace at::native
|
896787e77c5e9e0bf84b21b4d10134d66921fdc7.cu
|
#include <ATen/NativeFunctions.h>
#include <ATen/Dispatch.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/cuda/Loops.cuh>
#include <ATen/cuda/CUDAApplyUtils.cuh>
namespace at { namespace native {
using where_fn = void (*)(TensorIterator &, ScalarType);
DECLARE_DISPATCH(where_fn, where_kernel);
using is_infinity_op_fn = void (*)(TensorIterator &);
DECLARE_DISPATCH(is_infinity_op_fn, isposinf_stub);
DECLARE_DISPATCH(is_infinity_op_fn, isneginf_stub);
namespace {
void where_kernel_impl(TensorIterator &iter, ScalarType condition_type) {
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(kHalf, kBFloat16, kBool, iter.dtype(), "where_cuda", [&] {
if (condition_type == at::ScalarType::Byte) {
gpu_kernel(
iter,
[=] GPU_LAMBDA (uint8_t cond_val, scalar_t self_val, scalar_t other_val) -> scalar_t {
return cond_val ? self_val : other_val;
});
} else {
gpu_kernel(
iter,
[=] GPU_LAMBDA (bool cond_val, scalar_t self_val, scalar_t other_val) -> scalar_t {
return cond_val ? self_val : other_val;
});
}
});
}
void isposinf_kernel_impl(TensorIterator &iter) {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.input_dtype(), "isposinf_cuda", [&]() {
gpu_kernel(
iter,
[] GPU_LAMBDA (scalar_t a) -> bool { return a == std::numeric_limits<scalar_t>::infinity(); }
);
});
}
void isneginf_kernel_impl(TensorIterator &iter) {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.input_dtype(), "isneginf_cuda", [&]() {
gpu_kernel(
iter,
[] GPU_LAMBDA (scalar_t a) -> bool { return a == -std::numeric_limits<scalar_t>::infinity(); }
);
});
}
} // anonymous namespace
REGISTER_DISPATCH(where_kernel, &where_kernel_impl);
REGISTER_DISPATCH(isposinf_stub, &isposinf_kernel_impl);
REGISTER_DISPATCH(isneginf_stub, &isneginf_kernel_impl);
template <typename scalar_t>
__global__ void assert_async_cuda_kernel(scalar_t* input) {
CUDA_KERNEL_ASSERT(input[0] != 0);
}
__global__ void assert_async_cuda_kernel(c10::complex<float>* input) {
CUDA_KERNEL_ASSERT(input[0] != c10::complex<float>(0, 0));
}
__global__ void assert_async_cuda_kernel(c10::complex<double>* input) {
CUDA_KERNEL_ASSERT(input[0] != c10::complex<double>(0, 0));
}
void assert_async_cuda(const Tensor& self) {
auto n = self.numel();
TORCH_CHECK(n != 0, "Boolean value of Tensor with no values is ambiguous");
TORCH_CHECK(n < 2, "Boolean value of Tensor with more than one value is ambiguous");
auto stream = at::cuda::getCurrentCUDAStream();
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(at::ScalarType::Half, at::ScalarType::Bool, at::ScalarType::BFloat16, self.scalar_type(), "assert_async_cuda", [&] {
assert_async_cuda_kernel<<<1, 1, 0, stream>>>(self.data_ptr<scalar_t>());
});
}
}} // namespace at::native
|
c1acfb0367f6b19e84b5fb8b67da4788d8385e07.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void kAdd(float* a, float* b, float* dest, unsigned int numEls, float scaleA, float scaleB) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
// const unsigned int idx = blockIdx.y * height + blockIdx.x * blockDim.x + threadIdx.y*blockDim.x + threadIdx.x;
for (unsigned int i = idx; i < numEls; i += numThreads) {
dest[i] = scaleA * a[i] + scaleB * b[i];
}
}
|
c1acfb0367f6b19e84b5fb8b67da4788d8385e07.cu
|
#include "includes.h"
__global__ void kAdd(float* a, float* b, float* dest, unsigned int numEls, float scaleA, float scaleB) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
// const unsigned int idx = blockIdx.y * height + blockIdx.x * blockDim.x + threadIdx.y*blockDim.x + threadIdx.x;
for (unsigned int i = idx; i < numEls; i += numThreads) {
dest[i] = scaleA * a[i] + scaleB * b[i];
}
}
|
bcdda4c7074039743908d7add4d37cbc2f2a9086.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void SigmoidBackKernel(float* Z, float* dZ, int size){
int id = blockIdx.x * blockDim.x + threadIdx.x;
if(id < size){
float t = Z[id];
dZ[id] = dZ[id] * t * (1-t) ;
}
}
|
bcdda4c7074039743908d7add4d37cbc2f2a9086.cu
|
#include "includes.h"
__global__ void SigmoidBackKernel(float* Z, float* dZ, int size){
int id = blockIdx.x * blockDim.x + threadIdx.x;
if(id < size){
float t = Z[id];
dZ[id] = dZ[id] * t * (1-t) ;
}
}
|
a5cd9e6e16e877b590d10d3e1ea852eaf90d6cd1.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "common.h"
void checkCUDAErrorFn(const char *msg, const char *file, int line) {
hipError_t err = hipGetLastError();
if (hipSuccess == err) {
return;
}
fprintf(stderr, "CUDA error");
if (file) {
fprintf(stderr, " (%s:%d)", file, line);
}
fprintf(stderr, ": %s: %s\n", msg, hipGetErrorString(err));
exit(EXIT_FAILURE);
}
namespace StreamCompaction {
namespace Common {
/**
* Maps an array to an array of 0s and 1s for stream compaction. Elements
* which map to 0 will be removed, and elements which map to 1 will be kept.
*/
__global__ void kernMapToBoolean(int n, int *bools, const int *idata) {
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx >= n) return;
bools[idx] = (idata[idx] == 0) ? 0 : 1;
}
/**
* Performs scatter on an array. That is, for each element in idata,
* if bools[idx] == 1, it copies idata[idx] to odata[indices[idx]].
*/
__global__ void kernScatter(int n, int *odata,
const int *idata, const int *bools, const int *indices) {
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx >= n) return;
if (bools[idx] == 1) odata[indices[idx]] = idata[idx];
}
}
}
|
a5cd9e6e16e877b590d10d3e1ea852eaf90d6cd1.cu
|
#include "common.h"
void checkCUDAErrorFn(const char *msg, const char *file, int line) {
cudaError_t err = cudaGetLastError();
if (cudaSuccess == err) {
return;
}
fprintf(stderr, "CUDA error");
if (file) {
fprintf(stderr, " (%s:%d)", file, line);
}
fprintf(stderr, ": %s: %s\n", msg, cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
namespace StreamCompaction {
namespace Common {
/**
* Maps an array to an array of 0s and 1s for stream compaction. Elements
* which map to 0 will be removed, and elements which map to 1 will be kept.
*/
__global__ void kernMapToBoolean(int n, int *bools, const int *idata) {
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx >= n) return;
bools[idx] = (idata[idx] == 0) ? 0 : 1;
}
/**
* Performs scatter on an array. That is, for each element in idata,
* if bools[idx] == 1, it copies idata[idx] to odata[indices[idx]].
*/
__global__ void kernScatter(int n, int *odata,
const int *idata, const int *bools, const int *indices) {
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx >= n) return;
if (bools[idx] == 1) odata[indices[idx]] = idata[idx];
}
}
}
|
bd82aef670fce9d6a4fe9a4af2ecd077f6b675a1.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2017 Sony Corporation. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <nbla/array.hpp>
#include <nbla/cuda/common.hpp>
#include <nbla/cuda/function/embed.hpp>
#include <nbla/variable.hpp>
namespace nbla {
template <typename T, typename T1>
__global__ void kernel_embed_forward(const int num, T1 *y, const T *x,
const T1 *w, int stride0) {
NBLA_CUDA_KERNEL_LOOP(idx, num) {
const int i = idx / stride0;
const int j = idx % stride0;
y[idx] = w[x[i] * stride0 + j];
}
}
template <typename T, typename T1, typename Tw>
__global__ void kernel_embed_backward_weight(const int num, Tw *dw, const T *x,
const T1 *dy, int stride0) {
// TODO: optimize
NBLA_CUDA_KERNEL_LOOP(idx, num) {
const int i = idx / stride0;
const int j = idx % stride0;
atomicAdd(dw + x[i] * stride0 + j,
(typename CudaTypeForceFloat<T1>::type)dy[i * stride0 + j]);
}
}
template <typename T, typename T1>
void EmbedCuda<T, T1>::setup_impl(const Variables &inputs,
const Variables &outputs) {
Embed<T, T1>::setup_impl(inputs, outputs);
}
template <typename T, typename T1>
void EmbedCuda<T, T1>::forward_impl(const Variables &inputs,
const Variables &outputs) {
typedef typename CudaType<T1>::type Tc;
cuda_set_device(std::stoi(this->ctx_.device_id));
const T *x = inputs[0]->get_data_pointer<T>(this->ctx_);
const Tc *w = inputs[1]->get_data_pointer<Tc>(this->ctx_);
Tc *y = outputs[0]->cast_data_and_get_pointer<Tc>(this->ctx_, true);
Size_t stride0 = inputs[1]->size(1);
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel_embed_forward,
inputs[0]->size() * stride0, y, x, w, stride0);
}
template <typename T, typename T1>
void EmbedCuda<T, T1>::backward_impl(const Variables &inputs,
const Variables &outputs,
const vector<bool> &propagate_down,
const vector<bool> &accum) {
typedef typename CudaType<T1>::type Tc;
// atomicAdd doesn't support half precision. Force to use float32 instead.
typedef typename CudaTypeForceFloat<T1>::type Tw;
NBLA_CHECK(!propagate_down[0], error_code::value,
"Index array can not be propagated down.");
if (!propagate_down[1]) {
return;
}
cuda_set_device(std::stoi(this->ctx_.device_id));
if (!accum[1])
inputs[1]->grad()->zero();
const T *x = inputs[0]->get_data_pointer<T>(this->ctx_);
Tw *dw = inputs[1]->cast_grad_and_get_pointer<Tw>(this->ctx_, false);
const Tc *dy = outputs[0]->get_grad_pointer<Tc>(this->ctx_);
Size_t stride0 = inputs[1]->size(1);
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel_embed_backward_weight,
inputs[0]->size() * stride0, dw, x, dy,
stride0);
}
}
|
bd82aef670fce9d6a4fe9a4af2ecd077f6b675a1.cu
|
// Copyright (c) 2017 Sony Corporation. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <nbla/array.hpp>
#include <nbla/cuda/common.hpp>
#include <nbla/cuda/function/embed.hpp>
#include <nbla/variable.hpp>
namespace nbla {
template <typename T, typename T1>
__global__ void kernel_embed_forward(const int num, T1 *y, const T *x,
const T1 *w, int stride0) {
NBLA_CUDA_KERNEL_LOOP(idx, num) {
const int i = idx / stride0;
const int j = idx % stride0;
y[idx] = w[x[i] * stride0 + j];
}
}
template <typename T, typename T1, typename Tw>
__global__ void kernel_embed_backward_weight(const int num, Tw *dw, const T *x,
const T1 *dy, int stride0) {
// TODO: optimize
NBLA_CUDA_KERNEL_LOOP(idx, num) {
const int i = idx / stride0;
const int j = idx % stride0;
atomicAdd(dw + x[i] * stride0 + j,
(typename CudaTypeForceFloat<T1>::type)dy[i * stride0 + j]);
}
}
template <typename T, typename T1>
void EmbedCuda<T, T1>::setup_impl(const Variables &inputs,
const Variables &outputs) {
Embed<T, T1>::setup_impl(inputs, outputs);
}
template <typename T, typename T1>
void EmbedCuda<T, T1>::forward_impl(const Variables &inputs,
const Variables &outputs) {
typedef typename CudaType<T1>::type Tc;
cuda_set_device(std::stoi(this->ctx_.device_id));
const T *x = inputs[0]->get_data_pointer<T>(this->ctx_);
const Tc *w = inputs[1]->get_data_pointer<Tc>(this->ctx_);
Tc *y = outputs[0]->cast_data_and_get_pointer<Tc>(this->ctx_, true);
Size_t stride0 = inputs[1]->size(1);
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel_embed_forward,
inputs[0]->size() * stride0, y, x, w, stride0);
}
template <typename T, typename T1>
void EmbedCuda<T, T1>::backward_impl(const Variables &inputs,
const Variables &outputs,
const vector<bool> &propagate_down,
const vector<bool> &accum) {
typedef typename CudaType<T1>::type Tc;
// atomicAdd doesn't support half precision. Force to use float32 instead.
typedef typename CudaTypeForceFloat<T1>::type Tw;
NBLA_CHECK(!propagate_down[0], error_code::value,
"Index array can not be propagated down.");
if (!propagate_down[1]) {
return;
}
cuda_set_device(std::stoi(this->ctx_.device_id));
if (!accum[1])
inputs[1]->grad()->zero();
const T *x = inputs[0]->get_data_pointer<T>(this->ctx_);
Tw *dw = inputs[1]->cast_grad_and_get_pointer<Tw>(this->ctx_, false);
const Tc *dy = outputs[0]->get_grad_pointer<Tc>(this->ctx_);
Size_t stride0 = inputs[1]->size(1);
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel_embed_backward_weight,
inputs[0]->size() * stride0, dw, x, dy,
stride0);
}
}
|
8337e01902624e932c9f81c084537e2666a1c9a6.hip
|
// !!! This is a file automatically generated by hipify!!!
/**************************************************************************
* Copyright (c) 2017-2019 by the mfmg authors *
* All rights reserved. *
* *
* This file is part of the mfmg library. mfmg is distributed under a BSD *
* 3-clause license. For the licensing terms see the LICENSE file in the *
* top-level directory *
* *
* SPDX-License-Identifier: BSD-3-Clause *
*************************************************************************/
#define BOOST_TEST_MODULE amgx_direct_solver
#include "main.cc"
#if MFMG_WITH_AMGX
#include <mfmg/cuda/cuda_matrix_operator.cuh>
#include <mfmg/cuda/cuda_solver.cuh>
#include <mfmg/cuda/sparse_matrix_device.cuh>
#include <mfmg/cuda/utils.cuh>
#include <boost/property_tree/ptree.hpp>
#include <random>
#include <cusolverDn.h>
#include <cusolverSp.h>
BOOST_AUTO_TEST_CASE(amgx_serial)
{
int comm_size = dealii::Utilities::MPI::n_mpi_processes(MPI_COMM_WORLD);
if (comm_size == 1)
{
mfmg::CudaHandle cuda_handle;
// Create the matrix on the host.
dealii::SparsityPattern sparsity_pattern;
dealii::SparseMatrix<double> matrix;
unsigned int const size = 3000;
std::vector<std::vector<unsigned int>> column_indices(size);
for (unsigned int i = 0; i < size; ++i)
{
unsigned int j_max = ::min(size, i + 2);
unsigned int j_min = (i == 0) ? 0 : i - 1;
for (unsigned int j = j_min; j < j_max; ++j)
column_indices[i].emplace_back(j);
}
sparsity_pattern.copy_from(size, size, column_indices.begin(),
column_indices.end());
matrix.reinit(sparsity_pattern);
for (unsigned int i = 0; i < size; ++i)
{
unsigned int j_max = ::min(size - 1, i + 1);
unsigned int j_min = (i == 0) ? 0 : i - 1;
matrix.set(i, j_min, -1.);
matrix.set(i, j_max, -1.);
matrix.set(i, i, 4.);
}
// Generate a random solution and then compute the rhs
dealii::Vector<double> sol_ref(size);
std::default_random_engine generator;
std::normal_distribution<> distribution(10., 2.);
for (auto &val : sol_ref)
val = distribution(generator);
dealii::Vector<double> rhs(size);
matrix.vmult(rhs, sol_ref);
// Move the matrix and the rhs to the device
auto matrix_dev = std::make_shared<mfmg::SparseMatrixDevice<double>>(
mfmg::convert_matrix(matrix));
matrix_dev->cusparse_handle = cuda_handle.cusparse_handle;
hipsparseStatus_t cusparse_error_code =
hipsparseCreateMatDescr(&matrix_dev->descr);
mfmg::ASSERT_CUSPARSE(cusparse_error_code);
cusparse_error_code =
hipsparseSetMatType(matrix_dev->descr, HIPSPARSE_MATRIX_TYPE_GENERAL);
mfmg::ASSERT_CUSPARSE(cusparse_error_code);
cusparse_error_code =
hipsparseSetMatIndexBase(matrix_dev->descr, HIPSPARSE_INDEX_BASE_ZERO);
mfmg::ASSERT_CUSPARSE(cusparse_error_code);
auto partitioner =
std::make_shared<dealii::Utilities::MPI::Partitioner>(size);
dealii::LinearAlgebra::distributed::Vector<double,
dealii::MemorySpace::CUDA>
rhs_dev(partitioner);
dealii::LinearAlgebra::distributed::Vector<double,
dealii::MemorySpace::CUDA>
solution_dev(partitioner);
std::vector<double> rhs_host(size);
std::copy(rhs.begin(), rhs.end(), rhs_host.begin());
mfmg::cuda_mem_copy_to_dev(rhs_host, rhs_dev.get_values());
auto params = std::make_shared<boost::property_tree::ptree>();
params->put("solver.type", "amgx");
params->put("solver.config_file", "amgx_config_fgmres.json");
std::shared_ptr<mfmg::Operator<dealii::LinearAlgebra::distributed::Vector<
double, dealii::MemorySpace::CUDA>>>
op_dev = std::make_shared<
mfmg::CudaMatrixOperator<dealii::LinearAlgebra::distributed::Vector<
double, dealii::MemorySpace::CUDA>>>(matrix_dev);
mfmg::CudaSolver<dealii::LinearAlgebra::distributed::Vector<
double, dealii::MemorySpace::CUDA>>
direct_solver_dev(cuda_handle, op_dev, params);
direct_solver_dev.apply(rhs_dev, solution_dev);
// Move the result back to the host
int const n_local_rows = matrix_dev->n_local_rows();
std::vector<double> solution_host(n_local_rows);
mfmg::cuda_mem_copy_to_host(solution_dev.get_values(), solution_host);
// Check the result
for (unsigned int i = 0; i < n_local_rows; ++i)
BOOST_CHECK_CLOSE(solution_host[i], sol_ref[i], 1e-7);
}
}
BOOST_AUTO_TEST_CASE(amgx_parallel)
{
int n_devices = 0;
hipError_t cuda_error_code = hipGetDeviceCount(&n_devices);
mfmg::ASSERT_CUDA(cuda_error_code);
int comm_size = dealii::Utilities::MPI::n_mpi_processes(MPI_COMM_WORLD);
int rank = dealii::Utilities::MPI::this_mpi_process(MPI_COMM_WORLD);
cuda_error_code = hipSetDevice(rank % n_devices);
mfmg::CudaHandle cuda_handle;
// Create the matrix on the host.
unsigned int const n_local_rows = 10000;
unsigned int const row_offset = rank * n_local_rows;
unsigned int const size = comm_size * n_local_rows;
dealii::IndexSet parallel_partitioning(size);
for (unsigned int i = 0; i < n_local_rows; ++i)
parallel_partitioning.add_index(row_offset + i);
parallel_partitioning.compress();
dealii::TrilinosWrappers::SparseMatrix sparse_matrix(parallel_partitioning);
for (unsigned int i = 0; i < n_local_rows; ++i)
{
unsigned int const row = row_offset + i;
unsigned int j_max = ::min(size - 1, row + 1);
unsigned int j_min = (row == 0) ? 0 : row - 1;
sparse_matrix.set(row, j_min, -1.);
sparse_matrix.set(row, j_max, -1.);
sparse_matrix.set(row, row, 4.);
}
sparse_matrix.compress(dealii::VectorOperation::insert);
// Generate a random solution and then compute the rhs
auto range_indexset = sparse_matrix.locally_owned_range_indices();
dealii::LinearAlgebra::distributed::Vector<double> sol_ref(range_indexset,
MPI_COMM_WORLD);
for (unsigned int i = 0; i < n_local_rows; ++i)
sol_ref.local_element(i) = row_offset + i + 1;
dealii::LinearAlgebra::distributed::Vector<double> rhs(range_indexset,
MPI_COMM_WORLD);
sparse_matrix.vmult(rhs, sol_ref);
// Move the matrix and the rhs to the device
auto matrix_dev = std::make_shared<mfmg::SparseMatrixDevice<double>>(
mfmg::convert_matrix(sparse_matrix));
matrix_dev->cusparse_handle = cuda_handle.cusparse_handle;
hipsparseStatus_t cusparse_error_code =
hipsparseCreateMatDescr(&matrix_dev->descr);
mfmg::ASSERT_CUSPARSE(cusparse_error_code);
cusparse_error_code =
hipsparseSetMatType(matrix_dev->descr, HIPSPARSE_MATRIX_TYPE_GENERAL);
mfmg::ASSERT_CUSPARSE(cusparse_error_code);
cusparse_error_code =
hipsparseSetMatIndexBase(matrix_dev->descr, HIPSPARSE_INDEX_BASE_ZERO);
mfmg::ASSERT_CUSPARSE(cusparse_error_code);
auto rhs_dev = mfmg::copy_from_host(rhs);
dealii::LinearAlgebra::distributed::Vector<double, dealii::MemorySpace::CUDA>
solution_dev(sol_ref.get_partitioner());
auto params = std::make_shared<boost::property_tree::ptree>();
params->put("solver.type", "amgx");
params->put("solver.config_file", "amgx_config_fgmres.json");
std::shared_ptr<mfmg::Operator<dealii::LinearAlgebra::distributed::Vector<
double, dealii::MemorySpace::CUDA>>>
cuda_op(
new mfmg::CudaMatrixOperator<dealii::LinearAlgebra::distributed::Vector<
double, dealii::MemorySpace::CUDA>>(matrix_dev));
mfmg::CudaSolver<dealii::LinearAlgebra::distributed::Vector<
double, dealii::MemorySpace::CUDA>>
direct_solver_dev(cuda_handle, cuda_op, params);
direct_solver_dev.apply(rhs_dev, solution_dev);
// Move the result back to the host
std::vector<double> solution_host(n_local_rows);
mfmg::cuda_mem_copy_to_host(solution_dev.get_values(), solution_host);
for (unsigned int i = 0; i < n_local_rows; ++i)
BOOST_CHECK_CLOSE(solution_host[i], sol_ref.local_element(i), 1e-7);
}
#endif
|
8337e01902624e932c9f81c084537e2666a1c9a6.cu
|
/**************************************************************************
* Copyright (c) 2017-2019 by the mfmg authors *
* All rights reserved. *
* *
* This file is part of the mfmg library. mfmg is distributed under a BSD *
* 3-clause license. For the licensing terms see the LICENSE file in the *
* top-level directory *
* *
* SPDX-License-Identifier: BSD-3-Clause *
*************************************************************************/
#define BOOST_TEST_MODULE amgx_direct_solver
#include "main.cc"
#if MFMG_WITH_AMGX
#include <mfmg/cuda/cuda_matrix_operator.cuh>
#include <mfmg/cuda/cuda_solver.cuh>
#include <mfmg/cuda/sparse_matrix_device.cuh>
#include <mfmg/cuda/utils.cuh>
#include <boost/property_tree/ptree.hpp>
#include <random>
#include <cusolverDn.h>
#include <cusolverSp.h>
BOOST_AUTO_TEST_CASE(amgx_serial)
{
int comm_size = dealii::Utilities::MPI::n_mpi_processes(MPI_COMM_WORLD);
if (comm_size == 1)
{
mfmg::CudaHandle cuda_handle;
// Create the matrix on the host.
dealii::SparsityPattern sparsity_pattern;
dealii::SparseMatrix<double> matrix;
unsigned int const size = 3000;
std::vector<std::vector<unsigned int>> column_indices(size);
for (unsigned int i = 0; i < size; ++i)
{
unsigned int j_max = std::min(size, i + 2);
unsigned int j_min = (i == 0) ? 0 : i - 1;
for (unsigned int j = j_min; j < j_max; ++j)
column_indices[i].emplace_back(j);
}
sparsity_pattern.copy_from(size, size, column_indices.begin(),
column_indices.end());
matrix.reinit(sparsity_pattern);
for (unsigned int i = 0; i < size; ++i)
{
unsigned int j_max = std::min(size - 1, i + 1);
unsigned int j_min = (i == 0) ? 0 : i - 1;
matrix.set(i, j_min, -1.);
matrix.set(i, j_max, -1.);
matrix.set(i, i, 4.);
}
// Generate a random solution and then compute the rhs
dealii::Vector<double> sol_ref(size);
std::default_random_engine generator;
std::normal_distribution<> distribution(10., 2.);
for (auto &val : sol_ref)
val = distribution(generator);
dealii::Vector<double> rhs(size);
matrix.vmult(rhs, sol_ref);
// Move the matrix and the rhs to the device
auto matrix_dev = std::make_shared<mfmg::SparseMatrixDevice<double>>(
mfmg::convert_matrix(matrix));
matrix_dev->cusparse_handle = cuda_handle.cusparse_handle;
cusparseStatus_t cusparse_error_code =
cusparseCreateMatDescr(&matrix_dev->descr);
mfmg::ASSERT_CUSPARSE(cusparse_error_code);
cusparse_error_code =
cusparseSetMatType(matrix_dev->descr, CUSPARSE_MATRIX_TYPE_GENERAL);
mfmg::ASSERT_CUSPARSE(cusparse_error_code);
cusparse_error_code =
cusparseSetMatIndexBase(matrix_dev->descr, CUSPARSE_INDEX_BASE_ZERO);
mfmg::ASSERT_CUSPARSE(cusparse_error_code);
auto partitioner =
std::make_shared<dealii::Utilities::MPI::Partitioner>(size);
dealii::LinearAlgebra::distributed::Vector<double,
dealii::MemorySpace::CUDA>
rhs_dev(partitioner);
dealii::LinearAlgebra::distributed::Vector<double,
dealii::MemorySpace::CUDA>
solution_dev(partitioner);
std::vector<double> rhs_host(size);
std::copy(rhs.begin(), rhs.end(), rhs_host.begin());
mfmg::cuda_mem_copy_to_dev(rhs_host, rhs_dev.get_values());
auto params = std::make_shared<boost::property_tree::ptree>();
params->put("solver.type", "amgx");
params->put("solver.config_file", "amgx_config_fgmres.json");
std::shared_ptr<mfmg::Operator<dealii::LinearAlgebra::distributed::Vector<
double, dealii::MemorySpace::CUDA>>>
op_dev = std::make_shared<
mfmg::CudaMatrixOperator<dealii::LinearAlgebra::distributed::Vector<
double, dealii::MemorySpace::CUDA>>>(matrix_dev);
mfmg::CudaSolver<dealii::LinearAlgebra::distributed::Vector<
double, dealii::MemorySpace::CUDA>>
direct_solver_dev(cuda_handle, op_dev, params);
direct_solver_dev.apply(rhs_dev, solution_dev);
// Move the result back to the host
int const n_local_rows = matrix_dev->n_local_rows();
std::vector<double> solution_host(n_local_rows);
mfmg::cuda_mem_copy_to_host(solution_dev.get_values(), solution_host);
// Check the result
for (unsigned int i = 0; i < n_local_rows; ++i)
BOOST_CHECK_CLOSE(solution_host[i], sol_ref[i], 1e-7);
}
}
BOOST_AUTO_TEST_CASE(amgx_parallel)
{
int n_devices = 0;
cudaError_t cuda_error_code = cudaGetDeviceCount(&n_devices);
mfmg::ASSERT_CUDA(cuda_error_code);
int comm_size = dealii::Utilities::MPI::n_mpi_processes(MPI_COMM_WORLD);
int rank = dealii::Utilities::MPI::this_mpi_process(MPI_COMM_WORLD);
cuda_error_code = cudaSetDevice(rank % n_devices);
mfmg::CudaHandle cuda_handle;
// Create the matrix on the host.
unsigned int const n_local_rows = 10000;
unsigned int const row_offset = rank * n_local_rows;
unsigned int const size = comm_size * n_local_rows;
dealii::IndexSet parallel_partitioning(size);
for (unsigned int i = 0; i < n_local_rows; ++i)
parallel_partitioning.add_index(row_offset + i);
parallel_partitioning.compress();
dealii::TrilinosWrappers::SparseMatrix sparse_matrix(parallel_partitioning);
for (unsigned int i = 0; i < n_local_rows; ++i)
{
unsigned int const row = row_offset + i;
unsigned int j_max = std::min(size - 1, row + 1);
unsigned int j_min = (row == 0) ? 0 : row - 1;
sparse_matrix.set(row, j_min, -1.);
sparse_matrix.set(row, j_max, -1.);
sparse_matrix.set(row, row, 4.);
}
sparse_matrix.compress(dealii::VectorOperation::insert);
// Generate a random solution and then compute the rhs
auto range_indexset = sparse_matrix.locally_owned_range_indices();
dealii::LinearAlgebra::distributed::Vector<double> sol_ref(range_indexset,
MPI_COMM_WORLD);
for (unsigned int i = 0; i < n_local_rows; ++i)
sol_ref.local_element(i) = row_offset + i + 1;
dealii::LinearAlgebra::distributed::Vector<double> rhs(range_indexset,
MPI_COMM_WORLD);
sparse_matrix.vmult(rhs, sol_ref);
// Move the matrix and the rhs to the device
auto matrix_dev = std::make_shared<mfmg::SparseMatrixDevice<double>>(
mfmg::convert_matrix(sparse_matrix));
matrix_dev->cusparse_handle = cuda_handle.cusparse_handle;
cusparseStatus_t cusparse_error_code =
cusparseCreateMatDescr(&matrix_dev->descr);
mfmg::ASSERT_CUSPARSE(cusparse_error_code);
cusparse_error_code =
cusparseSetMatType(matrix_dev->descr, CUSPARSE_MATRIX_TYPE_GENERAL);
mfmg::ASSERT_CUSPARSE(cusparse_error_code);
cusparse_error_code =
cusparseSetMatIndexBase(matrix_dev->descr, CUSPARSE_INDEX_BASE_ZERO);
mfmg::ASSERT_CUSPARSE(cusparse_error_code);
auto rhs_dev = mfmg::copy_from_host(rhs);
dealii::LinearAlgebra::distributed::Vector<double, dealii::MemorySpace::CUDA>
solution_dev(sol_ref.get_partitioner());
auto params = std::make_shared<boost::property_tree::ptree>();
params->put("solver.type", "amgx");
params->put("solver.config_file", "amgx_config_fgmres.json");
std::shared_ptr<mfmg::Operator<dealii::LinearAlgebra::distributed::Vector<
double, dealii::MemorySpace::CUDA>>>
cuda_op(
new mfmg::CudaMatrixOperator<dealii::LinearAlgebra::distributed::Vector<
double, dealii::MemorySpace::CUDA>>(matrix_dev));
mfmg::CudaSolver<dealii::LinearAlgebra::distributed::Vector<
double, dealii::MemorySpace::CUDA>>
direct_solver_dev(cuda_handle, cuda_op, params);
direct_solver_dev.apply(rhs_dev, solution_dev);
// Move the result back to the host
std::vector<double> solution_host(n_local_rows);
mfmg::cuda_mem_copy_to_host(solution_dev.get_values(), solution_host);
for (unsigned int i = 0; i < n_local_rows; ++i)
BOOST_CHECK_CLOSE(solution_host[i], sol_ref.local_element(i), 1e-7);
}
#endif
|
f1c15212accd96d824862a99007f78e1d9cfc7e6.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "chainerx/cuda/cuda_device.h"
#include <algorithm>
#include <cstdint>
#include <functional>
#include <memory>
#include <string>
#include <tuple>
#include <utility>
#include <vector>
#include <absl/types/optional.h>
#include <hip/hip_runtime.h>
#include <cudnn.h>
#include "chainerx/array.h"
#include "chainerx/backend_util.h"
#include "chainerx/constant.h"
#include "chainerx/cuda/cuda.h"
#include "chainerx/cuda/cuda_backend.h"
#include "chainerx/cuda/cuda_runtime.h"
#include "chainerx/cuda/cuda_set_device_scope.h"
#include "chainerx/cuda/cudnn.h"
#include "chainerx/cuda/kernel_regist.h"
#include "chainerx/device.h"
#include "chainerx/dtype.h"
#include "chainerx/error.h"
#include "chainerx/kernels/connection.h"
#include "chainerx/kernels/creation.h"
#include "chainerx/kernels/misc.h"
#include "chainerx/kernels/rnn.h"
#include "chainerx/macro.h"
#include "chainerx/native/kernel_regist.h"
#include "chainerx/routines/connection.h"
#include "chainerx/routines/creation.h"
#include "chainerx/routines/manipulation.h"
#include "chainerx/shape.h"
#include "chainerx/stack_vector.h"
namespace chainerx {
namespace cuda {
namespace {
namespace {
__global__ void InitGpuDataKer(float* data, int64_t num_elements, float* value) {
int64_t tid = static_cast<int64_t>(blockIdx.x);
int64_t block_dim = static_cast<int64_t>(blockDim.x);
tid = tid * block_dim + static_cast<int64_t>(threadIdx.x);
if (tid < num_elements) {
data[tid] = value[tid];
}
}
void InitGPUData(float* data, int64_t num_elements, float* value) {
dim3 grid_dim;
dim3 block_dim;
block_dim.x = 1024;
grid_dim.x = (num_elements + block_dim.x - 1) / block_dim.x;
hipLaunchKernelGGL(( InitGpuDataKer), dim3(grid_dim), dim3(block_dim), 0, 0, data, num_elements, value);
}
void WeightsForward(
cuda_internal::DeviceInternals& device_internals,
cudnnRNNDescriptor_t rnn_desc,
const std::vector<std::vector<Array>> ws,
const std::vector<std::vector<Array>> bs,
int64_t n_layers,
int64_t num_directions,
cudnnTensorDescriptor_t x_desc,
cudnnFilterDescriptor_t w_desc,
Array& w) {
for (int64_t layer = 0; layer < n_layers; layer++) {
for (int8_t di = 0; di < num_directions; di++) {
for (size_t lin_layer_id = 0; lin_layer_id < ws[0].size(); lin_layer_id++) {
int64_t index = num_directions * layer + di;
cudnnFilterDescriptor_t lin_layer_mat_desc;
cudnnCreateFilterDescriptor(&lin_layer_mat_desc);
float* m_offset;
device_internals.cudnn_handle().Call(
cudnnGetRNNLinLayerMatrixParams,
rnn_desc,
index,
x_desc,
w_desc,
internal::GetRawOffsetData(w),
lin_layer_id,
lin_layer_mat_desc,
reinterpret_cast<void**>(&m_offset));
cudnnDataType_t data_type;
cudnnTensorFormat_t format;
int nb_dims;
int filter_dim_a[3];
cudnnGetFilterNdDescriptor(lin_layer_mat_desc, 3, &data_type, &format, &nb_dims, filter_dim_a);
Array w_temp = AsContiguous(ws[index][lin_layer_id].AsType(Dtype::kFloat32));
InitGPUData(
m_offset,
filter_dim_a[0] * filter_dim_a[1] * filter_dim_a[2],
reinterpret_cast<float*>(internal::GetRawOffsetData(w_temp)));
cudnnDestroyFilterDescriptor(lin_layer_mat_desc);
cudnnFilterDescriptor_t lin_layer_bias_desc;
cudnnCreateFilterDescriptor(&lin_layer_bias_desc);
float* b_offset;
device_internals.cudnn_handle().Call(
cudnnGetRNNLinLayerBiasParams,
rnn_desc,
index,
x_desc,
w_desc,
internal::GetRawOffsetData(w),
lin_layer_id,
lin_layer_bias_desc,
reinterpret_cast<void**>(&b_offset));
cudnnGetFilterNdDescriptor(lin_layer_bias_desc, 3, &data_type, &format, &nb_dims, filter_dim_a);
Array b_temp = AsContiguous(bs[index][lin_layer_id].AsType(Dtype::kFloat32));
InitGPUData(
b_offset,
filter_dim_a[0] * filter_dim_a[1] * filter_dim_a[2],
reinterpret_cast<float*>(internal::GetRawOffsetData(b_temp)));
cudnnDestroyFilterDescriptor(lin_layer_bias_desc);
}
}
}
}
std::vector<Array> WeightsBackward(
CudaDevice& device,
cudnnRNNDescriptor_t& rnn_desc,
cudnnTensorDescriptor_t dummy_x_desc,
cudnnFilterDescriptor_t w_desc,
Array w,
std::vector<std::vector<Array>> ws,
std::vector<std::vector<Array>> bs,
int64_t n_layers,
int64_t num_directions,
Dtype type) {
cuda_internal::DeviceInternals& device_internals = cuda_internal::GetDeviceInternals(device);
std::vector<Array> ret;
for (int64_t layer = 0; layer < n_layers; layer++) {
for (int8_t di = 0; di < num_directions; di++) {
for (size_t lin_layer_id = 0; lin_layer_id < ws[0].size(); lin_layer_id++) {
int64_t index = num_directions * layer + di;
cudnnFilterDescriptor_t lin_layer_mat_desc;
cudnnCreateFilterDescriptor(&lin_layer_mat_desc);
float* m_offset;
device_internals.cudnn_handle().Call(
cudnnGetRNNLinLayerMatrixParams,
rnn_desc,
index,
dummy_x_desc,
w_desc,
internal::GetRawOffsetData(w),
lin_layer_id,
lin_layer_mat_desc,
reinterpret_cast<void**>(&m_offset));
Array m = AsContiguous(Zeros(ws[index][lin_layer_id].shape(), type, ws[index][lin_layer_id].device()));
cudnnDataType_t data_type;
cudnnTensorFormat_t format;
int nb_dims;
int filter_dim_a[3];
cudnnGetFilterNdDescriptor(lin_layer_mat_desc, 3, &data_type, &format, &nb_dims, filter_dim_a);
InitGPUData(
reinterpret_cast<float*>(internal::GetRawOffsetData(m)),
filter_dim_a[0] * filter_dim_a[1] * filter_dim_a[2],
m_offset);
cudnnDestroyFilterDescriptor(lin_layer_mat_desc);
ret.emplace_back(m);
cudnnFilterDescriptor_t lin_layer_bias_desc;
cudnnCreateFilterDescriptor(&lin_layer_bias_desc);
float* b_offset;
device_internals.cudnn_handle().Call(
cudnnGetRNNLinLayerBiasParams,
rnn_desc,
index,
dummy_x_desc,
w_desc,
internal::GetRawOffsetData(w),
lin_layer_id,
lin_layer_bias_desc,
reinterpret_cast<void**>(&b_offset));
Array b = AsContiguous(Zeros(bs[index][lin_layer_id].shape(), type, bs[index][lin_layer_id].device()));
cudnnGetFilterNdDescriptor(lin_layer_bias_desc, 3, &data_type, &format, &nb_dims, filter_dim_a);
InitGPUData(
reinterpret_cast<float*>(internal::GetRawOffsetData(b)),
filter_dim_a[0] * filter_dim_a[1] * filter_dim_a[2],
b_offset);
cudnnDestroyFilterDescriptor(lin_layer_bias_desc);
ret.emplace_back(b);
}
}
}
return ret;
}
} // namespace
class CudaRnnKernel : public RnnKernel {
public:
std::tuple<std::vector<std::vector<Array>>, std::unique_ptr<chainerx::RnnGradState>> Call(
int64_t n_layers,
Array hx,
absl::optional<Array> cx,
const std::vector<std::vector<Array>>& ws,
const std::vector<std::vector<Array>>& bs,
const std::vector<Array>& xs,
int8_t bidirectional,
int8_t mode,
absl::optional<std::string> activation) override {
CudaDevice& device = dynamic_cast<CudaDevice&>(hx.device());
CudaSetDeviceScope scope{device.index()};
auto& backend = static_cast<CudaBackend&>(device.backend()); // NOLINT
Dtype type = hx.dtype();
cuda_internal::DeviceInternals& device_internals = cuda_internal::GetDeviceInternals(device);
const auto input_dim = xs[0].shape()[1];
const auto hidden_dim = hx.shape()[2];
const auto num_directions = bidirectional == 1 ? 2 : 1;
const auto num_layers = n_layers;
const auto rnn_direction = bidirectional == 1 ? CUDNN_BIDIRECTIONAL : CUDNN_UNIDIRECTIONAL;
miopenRNNMode_t rnn_mode;
if (mode == 2) {
if (*activation == "tanh") {
rnn_mode = miopenRNNTANH;
} else {
rnn_mode = miopenRNNRELU;
}
} else {
rnn_mode = mode == 0 ? miopenGRU : miopenLSTM;
}
const auto rnn_input = CUDNN_LINEAR_INPUT;
cudnnRNNDescriptor_t rnn_desc;
uint64_t seed = 1337ull;
cudnnDropoutDescriptor_t dropout_desc;
cudnnCreateDropoutDescriptor(&dropout_desc);
size_t state_size;
void* states;
device_internals.cudnn_handle().Call(cudnnDropoutGetStatesSize, &state_size);
hipMalloc(&states, state_size);
cudnnSetDropoutDescriptor(dropout_desc, device_internals.cudnn_handle().handle(), 0, states, state_size, seed);
cudnnCreateRNNDescriptor(&rnn_desc);
device_internals.cudnn_handle().Call(
cudnnSetRNNDescriptor,
rnn_desc,
hidden_dim,
num_layers,
dropout_desc,
rnn_input,
rnn_direction,
rnn_mode,
CUDNN_RNN_ALGO_STANDARD,
CUDNN_DATA_FLOAT);
cudnnTensorDescriptor_t *x_desc, *y_desc;
x_desc = reinterpret_cast<cudnnTensorDescriptor_t*>(malloc(xs.size() * sizeof(cudnnTensorDescriptor_t)));
y_desc = reinterpret_cast<cudnnTensorDescriptor_t*>(malloc(xs.size() * sizeof(cudnnTensorDescriptor_t)));
std::vector<cuda_internal::CudnnTensorDescriptor> xs_desc;
std::vector<cuda_internal::CudnnTensorDescriptor> ys_desc;
std::vector<Array> ys;
std::vector<Array> xs_cont;
for (size_t i = 0; i < xs.size(); i++) {
Shape xs_shape{xs[i].shape()[0], xs[i].shape()[1], 1};
Shape ys_shape{xs[i].shape()[0], num_directions * hidden_dim, 1};
xs_cont.emplace_back(AsContiguous(xs[i].AsType(Dtype::kFloat32)));
ys.emplace_back(
AsContiguous(Zeros({xs_cont[i].shape()[0], num_directions * hidden_dim}, xs_cont[i].dtype(), xs_cont[i].device())));
xs_desc.emplace_back(cuda_internal::CudnnTensorDescriptor(Reshape(xs_cont[i], xs_shape)));
ys_desc.emplace_back(cuda_internal::CudnnTensorDescriptor(Reshape(ys[i], ys_shape)));
x_desc[i] = *xs_desc[i];
y_desc[i] = *ys_desc[i];
}
Array x = Concatenate(xs_cont, 0);
Array y = Concatenate(ys, 0);
size_t weight_size;
device_internals.cudnn_handle().Call(cudnnGetRNNParamsSize, rnn_desc, x_desc[0], &weight_size, CUDNN_DATA_FLOAT);
Array w = AsContiguous(Zeros({static_cast<int64_t>(weight_size) / 4, 1, 1}, x.dtype(), x.device()));
cuda_internal::CudnnFilterDescriptor w_desc{w};
WeightsForward(device_internals, rnn_desc, ws, bs, n_layers, num_directions, x_desc[0], *w_desc, w);
size_t work_size;
size_t reserve_size;
device_internals.cudnn_handle().Call(cudnnGetRNNWorkspaceSize, rnn_desc, xs.size(), x_desc, &work_size);
Array workspace = AsContiguous(Zeros({static_cast<int64_t>(work_size)}, hx.dtype(), hx.device()));
device_internals.cudnn_handle().Call(cudnnGetRNNTrainingReserveSize, rnn_desc, xs.size(), x_desc, &reserve_size);
Array reserve = AsContiguous(Zeros({static_cast<int64_t>(reserve_size)}, hx.dtype(), hx.device()));
hx = AsContiguous(hx.AsType(Dtype::kFloat32));
Array hy = AsContiguous(Zeros(hx.shape(), hx.dtype(), hx.device()));
Array cy = AsContiguous(Zeros(hx.shape(), hx.dtype(), hx.device()));
cuda_internal::CudnnTensorDescriptor hx_desc{hx};
Array _cx;
if (cx.has_value()) {
_cx = AsContiguous((*cx).AsType(Dtype::kFloat32));
} else {
_cx = Zeros(hx.shape(), hx.dtype(), hx.device());
}
cuda_internal::CudnnTensorDescriptor cx_desc{_cx};
cuda_internal::CudnnTensorDescriptor hy_desc{hy};
cuda_internal::CudnnTensorDescriptor cy_desc{cy};
device_internals.cudnn_handle().Call(
cudnnRNNForwardTraining,
rnn_desc,
xs.size(),
x_desc,
internal::GetRawOffsetData(x),
*hx_desc,
internal::GetRawOffsetData(hx),
*cx_desc,
internal::GetRawOffsetData(_cx),
*w_desc,
internal::GetRawOffsetData(w),
y_desc,
internal::GetRawOffsetData(y),
*hy_desc,
internal::GetRawOffsetData(hy),
*cy_desc,
internal::GetRawOffsetData(cy),
internal::GetRawOffsetData(workspace),
work_size,
internal::GetRawOffsetData(reserve),
reserve_size);
std::vector<int64_t> split_indices;
for (size_t i = 0; i < xs.size() - 1; i++) {
if (i != 0) {
split_indices.emplace_back(split_indices[i - 1] + xs[i].shape()[0]);
} else {
split_indices.emplace_back(xs[i].shape()[0]);
}
}
std::unique_ptr<RnnGradState> state = std::make_unique<GenericRnnGradState>(rnn_desc, *w_desc, w, reserve, workspace);
y = y.AsType(type);
ys = Split(y, split_indices, 0);
std::vector<Array> out_states;
out_states.emplace_back(hy.AsType(type));
if (cx.has_value()) {
out_states.emplace_back(cy.AsType(type));
}
std::vector<std::vector<Array>> ret;
ret.emplace_back(out_states);
ret.emplace_back(ys);
return std::make_tuple(std::move(ret), std::move(state));
}
};
CHAINERX_CUDA_REGISTER_KERNEL(RnnKernel, CudaRnnKernel);
class CudaRnnBackwardKernel : public RnnBackwardKernel {
public:
std::vector<std::vector<Array>> Call(
int64_t n_layers,
Array hx,
absl::optional<Array> cx,
const std::vector<std::vector<Array>>& ws,
const std::vector<std::vector<Array>>& bs,
const std::vector<Array>& xs,
Array dhy,
absl::optional<Array> dcy,
std::vector<Array> ys,
std::vector<Array> dys,
int8_t bidirectional,
const std::shared_ptr<chainerx::RnnGradState>& state) override {
CudaDevice& device = dynamic_cast<CudaDevice&>(hx.device());
CudaSetDeviceScope scope{device.index()};
cuda_internal::DeviceInternals& device_internals = cuda_internal::GetDeviceInternals(device);
auto& cuda_state = dynamic_cast<GenericRnnGradState&>(*state);
Dtype type = hx.dtype();
const auto input_dim = xs[0].shape()[1];
const auto hidden_dim = hx.shape()[2];
const auto num_directions = bidirectional == 1 ? 2 : 1;
cudnnRNNDescriptor_t rnn_desc = cuda_state.rnn_desc();
std::vector<Array> dxs;
std::vector<cuda_internal::CudnnTensorDescriptor> _xs_desc, _dxs_desc, _ys_desc, _dys_desc;
cudnnTensorDescriptor_t* xs_desc = reinterpret_cast<cudnnTensorDescriptor_t*>(malloc(xs.size() * sizeof(cudnnTensorDescriptor_t)));
cudnnTensorDescriptor_t* dxs_desc = reinterpret_cast<cudnnTensorDescriptor_t*>(malloc(xs.size() * sizeof(cudnnTensorDescriptor_t)));
cudnnTensorDescriptor_t* ys_desc = reinterpret_cast<cudnnTensorDescriptor_t*>(malloc(xs.size() * sizeof(cudnnTensorDescriptor_t)));
cudnnTensorDescriptor_t* dys_desc = reinterpret_cast<cudnnTensorDescriptor_t*>(malloc(xs.size() * sizeof(cudnnTensorDescriptor_t)));
std::vector<Array> xs_cont;
for (size_t i = 0; i < xs.size(); i++) {
Shape xs_shape{xs[i].shape()[0], xs[i].shape()[1], 1};
Shape ys_shape{ys[i].shape()[0], ys[i].shape()[1], 1};
xs_cont.emplace_back(AsContiguous(xs[i].AsType(Dtype::kFloat32)));
ys[i] = AsContiguous(ys[i].AsType(Dtype::kFloat32));
dys[i] = AsContiguous(dys[i].AsType(Dtype::kFloat32));
_xs_desc.emplace_back(cuda_internal::CudnnTensorDescriptor(Reshape(xs_cont[i], xs_shape)));
xs_desc[i] = *_xs_desc[i];
dxs.emplace_back(AsContiguous(Zeros(xs_cont[i].shape(), xs_cont[i].dtype(), xs_cont[i].device())));
_dxs_desc.emplace_back(cuda_internal::CudnnTensorDescriptor(Reshape(dxs[i], xs_shape)));
dxs_desc[i] = *_dxs_desc[i];
_ys_desc.emplace_back(cuda_internal::CudnnTensorDescriptor(Reshape(ys[i], ys_shape)));
ys_desc[i] = *_ys_desc[i];
_dys_desc.emplace_back(cuda_internal::CudnnTensorDescriptor(Reshape(dys[i], ys_shape)));
dys_desc[i] = *_dys_desc[i];
}
Array dx = AsContiguous(Concatenate(dxs, 0));
Array x = AsContiguous(Concatenate(xs_cont, 0));
Array y = AsContiguous(Concatenate(ys, 0));
Array dy = AsContiguous(Concatenate(dys, 0));
cudnnFilterDescriptor_t w_desc = cuda_state.wDesc();
Array w = AsContiguous(cuda_state.w());
Array reserve = AsContiguous(cuda_state.reserve());
Array workspace = AsContiguous(cuda_state.workspace());
size_t reserve_size = reserve.shape()[0];
size_t work_size = workspace.shape()[0];
hx = AsContiguous(hx.AsType(Dtype::kFloat32));
dhy = AsContiguous(dhy.AsType(Dtype::kFloat32));
cuda_internal::CudnnTensorDescriptor hx_desc{hx};
Array _cx;
Array _dcy;
if (cx.has_value()) {
_cx = AsContiguous((*cx).AsType(Dtype::kFloat32));
_dcy = AsContiguous((*dcy).AsType(Dtype::kFloat32));
} else {
_cx = Zeros(hx.shape(), hx.dtype(), hx.device());
_dcy = Zeros(hx.shape(), hx.dtype(), hx.device());
}
cuda_internal::CudnnTensorDescriptor cx_desc{_cx};
cuda_internal::CudnnTensorDescriptor dcy_desc{_dcy};
cuda_internal::CudnnTensorDescriptor dhy_desc{dhy};
Array dhx = AsContiguous(Zeros(hx.shape(), hx.dtype(), hx.device()));
cuda_internal::CudnnTensorDescriptor dhx_desc{dhx};
Array dcx = AsContiguous(Zeros(hx.shape(), hx.dtype(), hx.device()));
cuda_internal::CudnnTensorDescriptor dcx_desc{dcx};
reserve = AsContiguous(reserve);
device_internals.cudnn_handle().Call(
cudnnRNNBackwardData,
rnn_desc,
xs.size(),
ys_desc,
internal::GetRawOffsetData(y),
dys_desc,
internal::GetRawOffsetData(dy),
*dhy_desc,
internal::GetRawOffsetData(dhy),
*dcy_desc,
internal::GetRawOffsetData(_dcy),
w_desc,
internal::GetRawOffsetData(w),
*hx_desc,
internal::GetRawOffsetData(hx),
*cx_desc,
internal::GetRawOffsetData(_cx),
dxs_desc,
internal::GetRawOffsetData(dx),
*dhx_desc,
internal::GetRawOffsetData(dhx),
*dcx_desc,
internal::GetRawOffsetData(dcx),
internal::GetRawOffsetData(workspace),
work_size,
internal::GetRawOffsetData(reserve),
reserve_size);
Array dw = AsContiguous(Zeros(w.shape(), hx.dtype(), hx.device()));
cuda_internal::CudnnFilterDescriptor dw_desc{dw};
device_internals.cudnn_handle().Call(
cudnnRNNBackwardWeights,
rnn_desc,
xs.size(),
xs_desc,
internal::GetRawOffsetData(x),
*hx_desc,
internal::GetRawOffsetData(hx),
ys_desc,
internal::GetRawOffsetData(y),
internal::GetRawOffsetData(workspace),
work_size,
*dw_desc,
internal::GetRawOffsetData(dw),
internal::GetRawOffsetData(reserve),
reserve_size);
std::vector<int64_t> split_indices;
for (size_t i = 0; i < xs.size() - 1; i++) {
if (i != 0) {
split_indices.emplace_back(split_indices[i - 1] + xs[i].shape()[0]);
} else {
split_indices.emplace_back(xs[i].shape()[0]);
}
}
dx = dx.AsType(type);
dxs = Split(dx, split_indices, 0);
std::vector<Array> dstate;
dstate.emplace_back(dhx.AsType(type));
if (cx.has_value()) {
dstate.emplace_back(dcx.AsType(type));
}
std::vector<std::vector<Array>> ret;
ret.emplace_back(dstate);
ret.emplace_back(WeightsBackward(device, rnn_desc, dxs_desc[0], *dw_desc, dw, ws, bs, n_layers, num_directions, type));
ret.emplace_back(dxs);
return ret;
}
};
CHAINERX_CUDA_REGISTER_KERNEL(RnnBackwardKernel, CudaRnnBackwardKernel);
} // namespace
} // namespace cuda
} // namespace chainerx
|
f1c15212accd96d824862a99007f78e1d9cfc7e6.cu
|
#include "chainerx/cuda/cuda_device.h"
#include <algorithm>
#include <cstdint>
#include <functional>
#include <memory>
#include <string>
#include <tuple>
#include <utility>
#include <vector>
#include <absl/types/optional.h>
#include <cuda.h>
#include <cudnn.h>
#include "chainerx/array.h"
#include "chainerx/backend_util.h"
#include "chainerx/constant.h"
#include "chainerx/cuda/cuda.h"
#include "chainerx/cuda/cuda_backend.h"
#include "chainerx/cuda/cuda_runtime.h"
#include "chainerx/cuda/cuda_set_device_scope.h"
#include "chainerx/cuda/cudnn.h"
#include "chainerx/cuda/kernel_regist.h"
#include "chainerx/device.h"
#include "chainerx/dtype.h"
#include "chainerx/error.h"
#include "chainerx/kernels/connection.h"
#include "chainerx/kernels/creation.h"
#include "chainerx/kernels/misc.h"
#include "chainerx/kernels/rnn.h"
#include "chainerx/macro.h"
#include "chainerx/native/kernel_regist.h"
#include "chainerx/routines/connection.h"
#include "chainerx/routines/creation.h"
#include "chainerx/routines/manipulation.h"
#include "chainerx/shape.h"
#include "chainerx/stack_vector.h"
namespace chainerx {
namespace cuda {
namespace {
namespace {
__global__ void InitGpuDataKer(float* data, int64_t num_elements, float* value) {
int64_t tid = static_cast<int64_t>(blockIdx.x);
int64_t block_dim = static_cast<int64_t>(blockDim.x);
tid = tid * block_dim + static_cast<int64_t>(threadIdx.x);
if (tid < num_elements) {
data[tid] = value[tid];
}
}
void InitGPUData(float* data, int64_t num_elements, float* value) {
dim3 grid_dim;
dim3 block_dim;
block_dim.x = 1024;
grid_dim.x = (num_elements + block_dim.x - 1) / block_dim.x;
InitGpuDataKer<<<grid_dim, block_dim>>>(data, num_elements, value);
}
void WeightsForward(
cuda_internal::DeviceInternals& device_internals,
cudnnRNNDescriptor_t rnn_desc,
const std::vector<std::vector<Array>> ws,
const std::vector<std::vector<Array>> bs,
int64_t n_layers,
int64_t num_directions,
cudnnTensorDescriptor_t x_desc,
cudnnFilterDescriptor_t w_desc,
Array& w) {
for (int64_t layer = 0; layer < n_layers; layer++) {
for (int8_t di = 0; di < num_directions; di++) {
for (size_t lin_layer_id = 0; lin_layer_id < ws[0].size(); lin_layer_id++) {
int64_t index = num_directions * layer + di;
cudnnFilterDescriptor_t lin_layer_mat_desc;
cudnnCreateFilterDescriptor(&lin_layer_mat_desc);
float* m_offset;
device_internals.cudnn_handle().Call(
cudnnGetRNNLinLayerMatrixParams,
rnn_desc,
index,
x_desc,
w_desc,
internal::GetRawOffsetData(w),
lin_layer_id,
lin_layer_mat_desc,
reinterpret_cast<void**>(&m_offset));
cudnnDataType_t data_type;
cudnnTensorFormat_t format;
int nb_dims;
int filter_dim_a[3];
cudnnGetFilterNdDescriptor(lin_layer_mat_desc, 3, &data_type, &format, &nb_dims, filter_dim_a);
Array w_temp = AsContiguous(ws[index][lin_layer_id].AsType(Dtype::kFloat32));
InitGPUData(
m_offset,
filter_dim_a[0] * filter_dim_a[1] * filter_dim_a[2],
reinterpret_cast<float*>(internal::GetRawOffsetData(w_temp)));
cudnnDestroyFilterDescriptor(lin_layer_mat_desc);
cudnnFilterDescriptor_t lin_layer_bias_desc;
cudnnCreateFilterDescriptor(&lin_layer_bias_desc);
float* b_offset;
device_internals.cudnn_handle().Call(
cudnnGetRNNLinLayerBiasParams,
rnn_desc,
index,
x_desc,
w_desc,
internal::GetRawOffsetData(w),
lin_layer_id,
lin_layer_bias_desc,
reinterpret_cast<void**>(&b_offset));
cudnnGetFilterNdDescriptor(lin_layer_bias_desc, 3, &data_type, &format, &nb_dims, filter_dim_a);
Array b_temp = AsContiguous(bs[index][lin_layer_id].AsType(Dtype::kFloat32));
InitGPUData(
b_offset,
filter_dim_a[0] * filter_dim_a[1] * filter_dim_a[2],
reinterpret_cast<float*>(internal::GetRawOffsetData(b_temp)));
cudnnDestroyFilterDescriptor(lin_layer_bias_desc);
}
}
}
}
std::vector<Array> WeightsBackward(
CudaDevice& device,
cudnnRNNDescriptor_t& rnn_desc,
cudnnTensorDescriptor_t dummy_x_desc,
cudnnFilterDescriptor_t w_desc,
Array w,
std::vector<std::vector<Array>> ws,
std::vector<std::vector<Array>> bs,
int64_t n_layers,
int64_t num_directions,
Dtype type) {
cuda_internal::DeviceInternals& device_internals = cuda_internal::GetDeviceInternals(device);
std::vector<Array> ret;
for (int64_t layer = 0; layer < n_layers; layer++) {
for (int8_t di = 0; di < num_directions; di++) {
for (size_t lin_layer_id = 0; lin_layer_id < ws[0].size(); lin_layer_id++) {
int64_t index = num_directions * layer + di;
cudnnFilterDescriptor_t lin_layer_mat_desc;
cudnnCreateFilterDescriptor(&lin_layer_mat_desc);
float* m_offset;
device_internals.cudnn_handle().Call(
cudnnGetRNNLinLayerMatrixParams,
rnn_desc,
index,
dummy_x_desc,
w_desc,
internal::GetRawOffsetData(w),
lin_layer_id,
lin_layer_mat_desc,
reinterpret_cast<void**>(&m_offset));
Array m = AsContiguous(Zeros(ws[index][lin_layer_id].shape(), type, ws[index][lin_layer_id].device()));
cudnnDataType_t data_type;
cudnnTensorFormat_t format;
int nb_dims;
int filter_dim_a[3];
cudnnGetFilterNdDescriptor(lin_layer_mat_desc, 3, &data_type, &format, &nb_dims, filter_dim_a);
InitGPUData(
reinterpret_cast<float*>(internal::GetRawOffsetData(m)),
filter_dim_a[0] * filter_dim_a[1] * filter_dim_a[2],
m_offset);
cudnnDestroyFilterDescriptor(lin_layer_mat_desc);
ret.emplace_back(m);
cudnnFilterDescriptor_t lin_layer_bias_desc;
cudnnCreateFilterDescriptor(&lin_layer_bias_desc);
float* b_offset;
device_internals.cudnn_handle().Call(
cudnnGetRNNLinLayerBiasParams,
rnn_desc,
index,
dummy_x_desc,
w_desc,
internal::GetRawOffsetData(w),
lin_layer_id,
lin_layer_bias_desc,
reinterpret_cast<void**>(&b_offset));
Array b = AsContiguous(Zeros(bs[index][lin_layer_id].shape(), type, bs[index][lin_layer_id].device()));
cudnnGetFilterNdDescriptor(lin_layer_bias_desc, 3, &data_type, &format, &nb_dims, filter_dim_a);
InitGPUData(
reinterpret_cast<float*>(internal::GetRawOffsetData(b)),
filter_dim_a[0] * filter_dim_a[1] * filter_dim_a[2],
b_offset);
cudnnDestroyFilterDescriptor(lin_layer_bias_desc);
ret.emplace_back(b);
}
}
}
return ret;
}
} // namespace
class CudaRnnKernel : public RnnKernel {
public:
std::tuple<std::vector<std::vector<Array>>, std::unique_ptr<chainerx::RnnGradState>> Call(
int64_t n_layers,
Array hx,
absl::optional<Array> cx,
const std::vector<std::vector<Array>>& ws,
const std::vector<std::vector<Array>>& bs,
const std::vector<Array>& xs,
int8_t bidirectional,
int8_t mode,
absl::optional<std::string> activation) override {
CudaDevice& device = dynamic_cast<CudaDevice&>(hx.device());
CudaSetDeviceScope scope{device.index()};
auto& backend = static_cast<CudaBackend&>(device.backend()); // NOLINT
Dtype type = hx.dtype();
cuda_internal::DeviceInternals& device_internals = cuda_internal::GetDeviceInternals(device);
const auto input_dim = xs[0].shape()[1];
const auto hidden_dim = hx.shape()[2];
const auto num_directions = bidirectional == 1 ? 2 : 1;
const auto num_layers = n_layers;
const auto rnn_direction = bidirectional == 1 ? CUDNN_BIDIRECTIONAL : CUDNN_UNIDIRECTIONAL;
cudnnRNNMode_t rnn_mode;
if (mode == 2) {
if (*activation == "tanh") {
rnn_mode = CUDNN_RNN_TANH;
} else {
rnn_mode = CUDNN_RNN_RELU;
}
} else {
rnn_mode = mode == 0 ? CUDNN_GRU : CUDNN_LSTM;
}
const auto rnn_input = CUDNN_LINEAR_INPUT;
cudnnRNNDescriptor_t rnn_desc;
uint64_t seed = 1337ull;
cudnnDropoutDescriptor_t dropout_desc;
cudnnCreateDropoutDescriptor(&dropout_desc);
size_t state_size;
void* states;
device_internals.cudnn_handle().Call(cudnnDropoutGetStatesSize, &state_size);
cudaMalloc(&states, state_size);
cudnnSetDropoutDescriptor(dropout_desc, device_internals.cudnn_handle().handle(), 0, states, state_size, seed);
cudnnCreateRNNDescriptor(&rnn_desc);
device_internals.cudnn_handle().Call(
cudnnSetRNNDescriptor,
rnn_desc,
hidden_dim,
num_layers,
dropout_desc,
rnn_input,
rnn_direction,
rnn_mode,
CUDNN_RNN_ALGO_STANDARD,
CUDNN_DATA_FLOAT);
cudnnTensorDescriptor_t *x_desc, *y_desc;
x_desc = reinterpret_cast<cudnnTensorDescriptor_t*>(malloc(xs.size() * sizeof(cudnnTensorDescriptor_t)));
y_desc = reinterpret_cast<cudnnTensorDescriptor_t*>(malloc(xs.size() * sizeof(cudnnTensorDescriptor_t)));
std::vector<cuda_internal::CudnnTensorDescriptor> xs_desc;
std::vector<cuda_internal::CudnnTensorDescriptor> ys_desc;
std::vector<Array> ys;
std::vector<Array> xs_cont;
for (size_t i = 0; i < xs.size(); i++) {
Shape xs_shape{xs[i].shape()[0], xs[i].shape()[1], 1};
Shape ys_shape{xs[i].shape()[0], num_directions * hidden_dim, 1};
xs_cont.emplace_back(AsContiguous(xs[i].AsType(Dtype::kFloat32)));
ys.emplace_back(
AsContiguous(Zeros({xs_cont[i].shape()[0], num_directions * hidden_dim}, xs_cont[i].dtype(), xs_cont[i].device())));
xs_desc.emplace_back(cuda_internal::CudnnTensorDescriptor(Reshape(xs_cont[i], xs_shape)));
ys_desc.emplace_back(cuda_internal::CudnnTensorDescriptor(Reshape(ys[i], ys_shape)));
x_desc[i] = *xs_desc[i];
y_desc[i] = *ys_desc[i];
}
Array x = Concatenate(xs_cont, 0);
Array y = Concatenate(ys, 0);
size_t weight_size;
device_internals.cudnn_handle().Call(cudnnGetRNNParamsSize, rnn_desc, x_desc[0], &weight_size, CUDNN_DATA_FLOAT);
Array w = AsContiguous(Zeros({static_cast<int64_t>(weight_size) / 4, 1, 1}, x.dtype(), x.device()));
cuda_internal::CudnnFilterDescriptor w_desc{w};
WeightsForward(device_internals, rnn_desc, ws, bs, n_layers, num_directions, x_desc[0], *w_desc, w);
size_t work_size;
size_t reserve_size;
device_internals.cudnn_handle().Call(cudnnGetRNNWorkspaceSize, rnn_desc, xs.size(), x_desc, &work_size);
Array workspace = AsContiguous(Zeros({static_cast<int64_t>(work_size)}, hx.dtype(), hx.device()));
device_internals.cudnn_handle().Call(cudnnGetRNNTrainingReserveSize, rnn_desc, xs.size(), x_desc, &reserve_size);
Array reserve = AsContiguous(Zeros({static_cast<int64_t>(reserve_size)}, hx.dtype(), hx.device()));
hx = AsContiguous(hx.AsType(Dtype::kFloat32));
Array hy = AsContiguous(Zeros(hx.shape(), hx.dtype(), hx.device()));
Array cy = AsContiguous(Zeros(hx.shape(), hx.dtype(), hx.device()));
cuda_internal::CudnnTensorDescriptor hx_desc{hx};
Array _cx;
if (cx.has_value()) {
_cx = AsContiguous((*cx).AsType(Dtype::kFloat32));
} else {
_cx = Zeros(hx.shape(), hx.dtype(), hx.device());
}
cuda_internal::CudnnTensorDescriptor cx_desc{_cx};
cuda_internal::CudnnTensorDescriptor hy_desc{hy};
cuda_internal::CudnnTensorDescriptor cy_desc{cy};
device_internals.cudnn_handle().Call(
cudnnRNNForwardTraining,
rnn_desc,
xs.size(),
x_desc,
internal::GetRawOffsetData(x),
*hx_desc,
internal::GetRawOffsetData(hx),
*cx_desc,
internal::GetRawOffsetData(_cx),
*w_desc,
internal::GetRawOffsetData(w),
y_desc,
internal::GetRawOffsetData(y),
*hy_desc,
internal::GetRawOffsetData(hy),
*cy_desc,
internal::GetRawOffsetData(cy),
internal::GetRawOffsetData(workspace),
work_size,
internal::GetRawOffsetData(reserve),
reserve_size);
std::vector<int64_t> split_indices;
for (size_t i = 0; i < xs.size() - 1; i++) {
if (i != 0) {
split_indices.emplace_back(split_indices[i - 1] + xs[i].shape()[0]);
} else {
split_indices.emplace_back(xs[i].shape()[0]);
}
}
std::unique_ptr<RnnGradState> state = std::make_unique<GenericRnnGradState>(rnn_desc, *w_desc, w, reserve, workspace);
y = y.AsType(type);
ys = Split(y, split_indices, 0);
std::vector<Array> out_states;
out_states.emplace_back(hy.AsType(type));
if (cx.has_value()) {
out_states.emplace_back(cy.AsType(type));
}
std::vector<std::vector<Array>> ret;
ret.emplace_back(out_states);
ret.emplace_back(ys);
return std::make_tuple(std::move(ret), std::move(state));
}
};
CHAINERX_CUDA_REGISTER_KERNEL(RnnKernel, CudaRnnKernel);
class CudaRnnBackwardKernel : public RnnBackwardKernel {
public:
std::vector<std::vector<Array>> Call(
int64_t n_layers,
Array hx,
absl::optional<Array> cx,
const std::vector<std::vector<Array>>& ws,
const std::vector<std::vector<Array>>& bs,
const std::vector<Array>& xs,
Array dhy,
absl::optional<Array> dcy,
std::vector<Array> ys,
std::vector<Array> dys,
int8_t bidirectional,
const std::shared_ptr<chainerx::RnnGradState>& state) override {
CudaDevice& device = dynamic_cast<CudaDevice&>(hx.device());
CudaSetDeviceScope scope{device.index()};
cuda_internal::DeviceInternals& device_internals = cuda_internal::GetDeviceInternals(device);
auto& cuda_state = dynamic_cast<GenericRnnGradState&>(*state);
Dtype type = hx.dtype();
const auto input_dim = xs[0].shape()[1];
const auto hidden_dim = hx.shape()[2];
const auto num_directions = bidirectional == 1 ? 2 : 1;
cudnnRNNDescriptor_t rnn_desc = cuda_state.rnn_desc();
std::vector<Array> dxs;
std::vector<cuda_internal::CudnnTensorDescriptor> _xs_desc, _dxs_desc, _ys_desc, _dys_desc;
cudnnTensorDescriptor_t* xs_desc = reinterpret_cast<cudnnTensorDescriptor_t*>(malloc(xs.size() * sizeof(cudnnTensorDescriptor_t)));
cudnnTensorDescriptor_t* dxs_desc = reinterpret_cast<cudnnTensorDescriptor_t*>(malloc(xs.size() * sizeof(cudnnTensorDescriptor_t)));
cudnnTensorDescriptor_t* ys_desc = reinterpret_cast<cudnnTensorDescriptor_t*>(malloc(xs.size() * sizeof(cudnnTensorDescriptor_t)));
cudnnTensorDescriptor_t* dys_desc = reinterpret_cast<cudnnTensorDescriptor_t*>(malloc(xs.size() * sizeof(cudnnTensorDescriptor_t)));
std::vector<Array> xs_cont;
for (size_t i = 0; i < xs.size(); i++) {
Shape xs_shape{xs[i].shape()[0], xs[i].shape()[1], 1};
Shape ys_shape{ys[i].shape()[0], ys[i].shape()[1], 1};
xs_cont.emplace_back(AsContiguous(xs[i].AsType(Dtype::kFloat32)));
ys[i] = AsContiguous(ys[i].AsType(Dtype::kFloat32));
dys[i] = AsContiguous(dys[i].AsType(Dtype::kFloat32));
_xs_desc.emplace_back(cuda_internal::CudnnTensorDescriptor(Reshape(xs_cont[i], xs_shape)));
xs_desc[i] = *_xs_desc[i];
dxs.emplace_back(AsContiguous(Zeros(xs_cont[i].shape(), xs_cont[i].dtype(), xs_cont[i].device())));
_dxs_desc.emplace_back(cuda_internal::CudnnTensorDescriptor(Reshape(dxs[i], xs_shape)));
dxs_desc[i] = *_dxs_desc[i];
_ys_desc.emplace_back(cuda_internal::CudnnTensorDescriptor(Reshape(ys[i], ys_shape)));
ys_desc[i] = *_ys_desc[i];
_dys_desc.emplace_back(cuda_internal::CudnnTensorDescriptor(Reshape(dys[i], ys_shape)));
dys_desc[i] = *_dys_desc[i];
}
Array dx = AsContiguous(Concatenate(dxs, 0));
Array x = AsContiguous(Concatenate(xs_cont, 0));
Array y = AsContiguous(Concatenate(ys, 0));
Array dy = AsContiguous(Concatenate(dys, 0));
cudnnFilterDescriptor_t w_desc = cuda_state.wDesc();
Array w = AsContiguous(cuda_state.w());
Array reserve = AsContiguous(cuda_state.reserve());
Array workspace = AsContiguous(cuda_state.workspace());
size_t reserve_size = reserve.shape()[0];
size_t work_size = workspace.shape()[0];
hx = AsContiguous(hx.AsType(Dtype::kFloat32));
dhy = AsContiguous(dhy.AsType(Dtype::kFloat32));
cuda_internal::CudnnTensorDescriptor hx_desc{hx};
Array _cx;
Array _dcy;
if (cx.has_value()) {
_cx = AsContiguous((*cx).AsType(Dtype::kFloat32));
_dcy = AsContiguous((*dcy).AsType(Dtype::kFloat32));
} else {
_cx = Zeros(hx.shape(), hx.dtype(), hx.device());
_dcy = Zeros(hx.shape(), hx.dtype(), hx.device());
}
cuda_internal::CudnnTensorDescriptor cx_desc{_cx};
cuda_internal::CudnnTensorDescriptor dcy_desc{_dcy};
cuda_internal::CudnnTensorDescriptor dhy_desc{dhy};
Array dhx = AsContiguous(Zeros(hx.shape(), hx.dtype(), hx.device()));
cuda_internal::CudnnTensorDescriptor dhx_desc{dhx};
Array dcx = AsContiguous(Zeros(hx.shape(), hx.dtype(), hx.device()));
cuda_internal::CudnnTensorDescriptor dcx_desc{dcx};
reserve = AsContiguous(reserve);
device_internals.cudnn_handle().Call(
cudnnRNNBackwardData,
rnn_desc,
xs.size(),
ys_desc,
internal::GetRawOffsetData(y),
dys_desc,
internal::GetRawOffsetData(dy),
*dhy_desc,
internal::GetRawOffsetData(dhy),
*dcy_desc,
internal::GetRawOffsetData(_dcy),
w_desc,
internal::GetRawOffsetData(w),
*hx_desc,
internal::GetRawOffsetData(hx),
*cx_desc,
internal::GetRawOffsetData(_cx),
dxs_desc,
internal::GetRawOffsetData(dx),
*dhx_desc,
internal::GetRawOffsetData(dhx),
*dcx_desc,
internal::GetRawOffsetData(dcx),
internal::GetRawOffsetData(workspace),
work_size,
internal::GetRawOffsetData(reserve),
reserve_size);
Array dw = AsContiguous(Zeros(w.shape(), hx.dtype(), hx.device()));
cuda_internal::CudnnFilterDescriptor dw_desc{dw};
device_internals.cudnn_handle().Call(
cudnnRNNBackwardWeights,
rnn_desc,
xs.size(),
xs_desc,
internal::GetRawOffsetData(x),
*hx_desc,
internal::GetRawOffsetData(hx),
ys_desc,
internal::GetRawOffsetData(y),
internal::GetRawOffsetData(workspace),
work_size,
*dw_desc,
internal::GetRawOffsetData(dw),
internal::GetRawOffsetData(reserve),
reserve_size);
std::vector<int64_t> split_indices;
for (size_t i = 0; i < xs.size() - 1; i++) {
if (i != 0) {
split_indices.emplace_back(split_indices[i - 1] + xs[i].shape()[0]);
} else {
split_indices.emplace_back(xs[i].shape()[0]);
}
}
dx = dx.AsType(type);
dxs = Split(dx, split_indices, 0);
std::vector<Array> dstate;
dstate.emplace_back(dhx.AsType(type));
if (cx.has_value()) {
dstate.emplace_back(dcx.AsType(type));
}
std::vector<std::vector<Array>> ret;
ret.emplace_back(dstate);
ret.emplace_back(WeightsBackward(device, rnn_desc, dxs_desc[0], *dw_desc, dw, ws, bs, n_layers, num_directions, type));
ret.emplace_back(dxs);
return ret;
}
};
CHAINERX_CUDA_REGISTER_KERNEL(RnnBackwardKernel, CudaRnnBackwardKernel);
} // namespace
} // namespace cuda
} // namespace chainerx
|
cuTrasformation.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "cuTrasformation.cuh"
#include <math.h>
#include <stdio.h>
#include "../common/cuMatrix.h"
#include "../common/util.h"
#include "hip/hip_runtime.h"
#include <hiprand/hiprand_kernel.h>
#include <time.h>
#include "../common/Config.h"
#include <helper_functions.h>
#include <helper_cuda.h>
#include "../common/cuBase.h"
#define GAUSSIAN_FIELD_SIZE (21) /* strictly odd number */
#define constDistortion (1.0)
hiprandGenerator_t rand_generator_device;
const hiprandRngType_t generator_type = HIPRAND_RNG_PSEUDO_DEFAULT;
cuMatrix<float>* cuGaussianKernel;
cuMatrix<float>* cuDispH;
cuMatrix<float>* cuDispV;
float * cu_d_randonNumf;
float* cu_d_randomNum;
float* cu_h_randomNum;
float dElasticSigma = 4.0; /* higher numbers are more smooth and less distorted; Simard uses 4.0*/
int getRandomNumLen(int batch, int ImgSize)
{
return batch * ImgSize * ImgSize * 2 * Config::instance()->getChannels();
}
/*
* blocks : dim3(1)
* threads: dim3(GAUSSIAN_FIELD_SIZE*GAUSSIAN_FIELD_SIZE)
*/
__global__ void g_createGaussianKernel(float* gaussian, float dElasticSigma, int ImgSize)
{
int iiMid = GAUSSIAN_FIELD_SIZE >> 1;
float floatElasticSigma = dElasticSigma * dElasticSigma;
int row = threadIdx.x % ImgSize;
int col = threadIdx.x / ImgSize;
float val1 = 1.0 / (dElasticSigma * 2.0 * 3.1415926535897932384626433832795);
float val2 = (row-iiMid)*(row-iiMid) + (col-iiMid)*(col-iiMid);
gaussian[threadIdx.x] = val1 * exp(-1.0 * val2 / (2.0 * floatElasticSigma));
}
void cuInitDistortionMemery(int batch, int ImgSize)
{
hiprandStatus_t curandstatus;
cuGaussianKernel = new cuMatrix<float>(GAUSSIAN_FIELD_SIZE, GAUSSIAN_FIELD_SIZE, 1);
if(GAUSSIAN_FIELD_SIZE * GAUSSIAN_FIELD_SIZE > MAX_THREADS)
{
char logStr[1024];
sprintf(logStr, "g_createGaussianKernel > MAX_THREADS\n");
LOG(logStr, "Result/log.txt");
exit(0);
}
hipLaunchKernelGGL(( g_createGaussianKernel), dim3(dim3(1)),dim3(dim3(GAUSSIAN_FIELD_SIZE * GAUSSIAN_FIELD_SIZE)), 0, 0,
cuGaussianKernel->getDev(),
dElasticSigma,
ImgSize);
hipDeviceSynchronize();
/*cu_d_randomNum*/
checkCudaErrors(
MemoryMonitor::instance()->gpuMalloc((void**)&cu_d_randomNum, sizeof(float) * getRandomNumLen(batch, ImgSize))
);
/*cu_d_randonNumf*/
checkCudaErrors(
MemoryMonitor::instance()->gpuMalloc((void**)&cu_d_randonNumf, sizeof(float) * getRandomNumLen(batch, ImgSize))
);
/*cu_h_randomNum*/
cu_h_randomNum = (float*)MemoryMonitor::instance()->cpuMalloc(sizeof(float) * getRandomNumLen(batch, ImgSize));
if(!cu_h_randomNum)
{
char logStr[1024];
sprintf(logStr, "malloc cu_h_randomNum fail\n");
LOG(logStr, "Result/log.txt");
exit(0);
}
/*hiprandCreateGenerator*/
curandstatus = hiprandCreateGenerator(&rand_generator_device, generator_type);
if(curandstatus != HIPRAND_STATUS_SUCCESS)
{
char logStr[1024];
sprintf(logStr, "hiprandCreateGenerator fail\n");
LOG(logStr, "Result/log.txt");
exit(0);
}
cuDispV = new cuMatrix<float>(batch, ImgSize * ImgSize, 1);
cuDispH = new cuMatrix<float>(batch, ImgSize * ImgSize, 1);
}
__global__ void g_getRandomUniform(float* r1, float* r2, int len)
{
for(int i = 0; i < len; i += gridDim.x * blockDim.x)
{
int id = i + blockDim.x * blockIdx.x + threadIdx.x;
if(id < len)
{
r2[id] = r1[id] * 2.0f - 1.0f;
}
}
}
/*
* blocks : dim3(batch)
* threads : dim3(512)
*/
__global__ void g_generateDistortionMap(
float* _dispH,
float* _dispV,
float* rand,
float* gaussianKernel,
float dElasticScaling,
float dMaxScaling,
float dMaxRotation,
int ImgSize)
{
int ImgSize2 = ImgSize * ImgSize;
float* uniformH = rand + blockIdx.x * ImgSize2;
float* uniformV = rand + blockIdx.x * ImgSize2 * 2;
float* dispH = _dispH + ImgSize2 * blockIdx.x;
float* dispV = _dispV + ImgSize2 * blockIdx.x;
if(dElasticScaling >= 0.1){
for(int is = 0; is < ImgSize2; is += blockDim.x)
{
int idx = is + threadIdx.x;
if(idx < ImgSize2)
{
int row = idx / ImgSize;
int col = idx % ImgSize;
int iiMid = GAUSSIAN_FIELD_SIZE / 2;
float fConvolvedH = 0.0;
float fConvolvedV = 0.0;
float fSampleH, fSampleV;
float elasticScale = dElasticScaling;
for(int xxx = 0; xxx < GAUSSIAN_FIELD_SIZE; ++xxx)
{
for(int yyy = 0; yyy < GAUSSIAN_FIELD_SIZE; ++yyy)
{
int xxxDisp = col - iiMid + xxx;
int yyyDisp = row - iiMid + yyy;
if(xxxDisp < 0 || xxxDisp >= ImgSize ||
yyyDisp < 0 || yyyDisp >= ImgSize)
{
fSampleH = 0.0;
fSampleV = 0.0;
}
else
{
fSampleH = uniformH[yyyDisp * ImgSize + xxxDisp];
fSampleV = uniformV[yyyDisp * ImgSize + xxxDisp];
}
fConvolvedH += fSampleH * gaussianKernel[yyy * GAUSSIAN_FIELD_SIZE + xxx] * constDistortion;
fConvolvedV += fSampleV * gaussianKernel[yyy * GAUSSIAN_FIELD_SIZE + xxx] * constDistortion;
}
}
dispH[idx] = elasticScale * fConvolvedH;
dispV[idx] = elasticScale * fConvolvedV;
}
}
}
else{
for(int is = 0; is < ImgSize2; is += blockDim.x){
int idx = is + threadIdx.x;
if(idx < ImgSize2){
dispH[idx] = 0.0;
dispV[idx] = 0.0;
}
}
}
__syncthreads();
float rand1 = rand[blockIdx.x];
float rand2 = rand[blockIdx.x + 1];
if(fabs(dMaxRotation) >= 0.01){
if(rand1 <= 0.0) rand1 = 0.0;
if(rand2 <= 0.0) rand2 = 0.0;
}
for(int is = 0; is < ImgSize2; is += blockDim.x)
{
int idx = is + threadIdx.x;
if(idx < ImgSize2)
{
int row = idx / ImgSize;
int col = idx % ImgSize;
float dSFHoriz = dMaxScaling / 100.0 * rand1;
float dSFVert = dMaxScaling / 100.0 * rand2;
int iMid = ImgSize / 2;
dispH[idx] += dSFHoriz * (col - iMid);
dispV[idx] += dSFVert * (row - iMid);
}
}
__syncthreads();
for(int is = 0; is < ImgSize2; is += blockDim.x)
{
int idx = is + threadIdx.x;
if(idx < ImgSize2)
{
int row = idx / ImgSize;
int col = idx % ImgSize;
float angle = dMaxRotation * rand[blockIdx.x];
//printf("%f\n",angle);
angle = angle * 3.1415926535897932384626433832795 / 180.0;
float cosAngle = cos(angle);
float sinAngle = sin(angle);
int iMid = ImgSize / 2;
float xx = row - iMid;
float yy = col - iMid;
dispH[idx] += yy - yy * cosAngle - xx * sinAngle;
dispV[idx] += xx - xx * cosAngle + yy * sinAngle;
}
}
}
__global__ void g_scaleAndRotate(
float* _dispH,
float* _dispV,
float scalingx,
float scalingy,
float rotation,
int ImgSize)
{
int ImgSize2 = ImgSize * ImgSize;
float* dispH = _dispH + ImgSize2 * blockIdx.x;
float* dispV = _dispV + ImgSize2 * blockIdx.x;
for(int is = 0; is < ImgSize2; is += blockDim.x)
{
int idx = is + threadIdx.x;
if(idx < ImgSize2)
{
dispH[idx] = 0.0;
dispV[idx] = 0.0;
}
}
__syncthreads();
for(int is = 0; is < ImgSize2; is += blockDim.x)
{
int idx = is + threadIdx.x;
if(idx < ImgSize2)
{
int row = idx / ImgSize;
int col = idx % ImgSize;
float dSFHoriz = scalingx / 100.0;
float dSFVert = scalingy / 100.0;
int iMid = ImgSize / 2;
dispH[idx] += dSFHoriz * (col - iMid);
dispV[idx] += dSFVert * (row - iMid);
}
}
__syncthreads();
for(int is = 0; is < ImgSize2; is += blockDim.x)
{
int idx = is + threadIdx.x;
if(idx < ImgSize2)
{
int row = idx / ImgSize;
int col = idx % ImgSize;
float angle = rotation;
angle = angle * 3.1415926535897932384626433832795 / 180.0;
float cosAngle = cos(angle);
float sinAngle = sin(angle);
int iMid = ImgSize / 2;
float xx = row - iMid;
float yy = col - iMid;
dispH[idx] += yy - yy * cosAngle - xx * sinAngle;
dispV[idx] += xx - xx * cosAngle + yy * sinAngle;
}
}
}
/*
* blocks : dim3(batch, Config::instance()->getChannels())
* threads: dim3(min(512, ImgSize * ImgSize))
*/
__global__ void g_applyDistortionMap(
float** _inputs,
float** _outputs,
float* _dispH,
float* _dispV,
int ImgSize)
{
extern __shared__ float img[];
int c = blockIdx.y;
int ImgSize2 = ImgSize * ImgSize;
float* input = _inputs[blockIdx.x] + ImgSize2 * c;
float* output= _outputs[blockIdx.x]+ ImgSize2 * c;
float* dispV = _dispV + blockIdx.x * ImgSize2;
float* dispH = _dispH + blockIdx.x * ImgSize2;
for(int is = 0; is < ImgSize2; is += blockDim.x){
int idx = is + threadIdx.x;
if(idx < ImgSize2){
img[idx] = input[idx];
}
}
__syncthreads();
for(int is = 0; is < ImgSize2; is += blockDim.x)
{
int idx = is + threadIdx.x;
if(idx < ImgSize2)
{
int row = idx / ImgSize;
int col = idx % ImgSize;
float sourceRow, sourceCol;
float fracRow, fracCol;
float w1, w2, w3, w4;
float sourceValue;
int sRow, sCol, sRowp1, sColp1;
bool bSkipOutOfBounds;
if(fabs(dispV[idx]) < 0.000000001 && fabs(dispH[idx]) < 0.0000000001)
{
output[idx] = input[idx];
continue;
}
sourceRow = (float)row - dispV[idx];
sourceCol = (float)col - dispH[idx];
fracRow = sourceRow - (int)sourceRow;
fracCol = sourceCol - (int)sourceCol;
w1 = ( 1.0 - fracRow ) * ( 1.0 - fracCol );
w2 = ( 1.0 - fracRow ) * fracCol;
w3 = fracRow * ( 1.0 - fracCol );
w4 = fracRow * fracCol;
bSkipOutOfBounds = false;
if ( ((int)sourceRow + 1) >= ImgSize ) bSkipOutOfBounds = true;
if ( (int)sourceRow < 0 ) bSkipOutOfBounds = true;
if ( ((int)sourceCol + 1) >= ImgSize ) bSkipOutOfBounds = true;
if ( (int)sourceCol < 0 ) bSkipOutOfBounds = true;
if ( bSkipOutOfBounds == false )
{
sRow = (int)sourceRow;
sCol = (int)sourceCol;
sRowp1 = sRow + 1;
sColp1 = sCol + 1;
while (sRowp1 >= ImgSize) sRowp1 -= ImgSize;
while (sRowp1 < 0) sRowp1 += ImgSize;
while (sColp1 >= ImgSize) sColp1 -= ImgSize;
while (sColp1 < 0) sColp1 += ImgSize;
while (sRow >= ImgSize) sRow -= ImgSize;
while (sRow < 0) sRow += ImgSize;
while (sCol >= ImgSize) sCol -= ImgSize;
while (sCol < 0) sCol += ImgSize;
sourceValue =
w1 * img[sRow * ImgSize + sCol] +
w2 * img[sRow * ImgSize + sColp1] +
w3 * img[sRowp1 * ImgSize + sCol] +
w4 * img[sRowp1 * ImgSize + sColp1];
}
else
{
sourceValue = -1.0;
}
output[idx] = sourceValue;
}
}
}
void cuApplyRandom(int batch, unsigned long long s, int ImgSize)
{
hiprandStatus_t hiprandStatus_t;
unsigned long long seed = s;
hiprandStatus_t = hiprandSetPseudoRandomGeneratorSeed(rand_generator_device, seed);
if(hiprandStatus_t != HIPRAND_STATUS_SUCCESS)
{
char logStr[1024];
sprintf(logStr, "hiprandSetPseudoRandomGeneratorSeed fail\n");
LOG(logStr, "Result/log.txt");
exit(0);
}
hiprandGenerateUniform(rand_generator_device, cu_d_randonNumf, getRandomNumLen(batch, ImgSize));
hipLaunchKernelGGL(( g_getRandomUniform), dim3(dim3(256)),dim3(dim3(256)), 0, 0, cu_d_randonNumf, cu_d_randomNum, getRandomNumLen(batch, ImgSize));
hipDeviceSynchronize();
getLastCudaError("g_getRandomUniform");
int threads = min(512, ImgSize * ImgSize);
hipLaunchKernelGGL(( g_generateDistortionMap), dim3(dim3(batch)),dim3(threads), 0, 0, cuDispH->getDev(),
cuDispV->getDev(), cu_d_randomNum, cuGaussianKernel->getDev(),
Config::instance()->getDistortion(),
Config::instance()->getScale(),
Config::instance()->getRotation(), ImgSize);
hipDeviceSynchronize();
getLastCudaError("g_generateDistortionMap");
}
void cuApplyScaleAndRotate(int batch,
int ImgSize,
float scalingx,
float scalingy,
float rotation)
{
hipLaunchKernelGGL(( g_scaleAndRotate), dim3(dim3(batch)),dim3(dim3(512)), 0, 0,
cuDispH->getDev(),
cuDispV->getDev(),
scalingx,
scalingy,
rotation,
ImgSize);
hipDeviceSynchronize();
getLastCudaError("g_generateDistortionMap");
}
void cuApplyDistortion(float**inputs, float**outputs, int batch, int ImgSize)
{
int threadidx = min(ImgSize * ImgSize, 512);
hipLaunchKernelGGL(( g_applyDistortionMap), dim3(dim3(batch, Config::instance()->getChannels())),
dim3( dim3(threadidx)), sizeof(float) * ImgSize * ImgSize, 0, inputs,
outputs,
cuDispH->getDev(),
cuDispV->getDev(),
ImgSize);
hipDeviceSynchronize();
getLastCudaError("g_applyDistortionMap");
}
/*
* blocks : dim3(batch, channels)
* threads : dim3(min(ImgSize*ImgSize, 512))
*/
__global__ void g_applyCropRandom(float**_inputs, float**_outputs, float* random, int crop, int ImgSize)
{
int c = blockIdx.y;
int outputImgSize = ImgSize;
int inputImgSize = ImgSize + crop;
int inputImgSize2 = inputImgSize * inputImgSize;
int outputImgSize2= outputImgSize* outputImgSize;
float* input = _inputs [blockIdx.x] + c * inputImgSize2;
float* output= _outputs[blockIdx.x] + c * outputImgSize2;
int sx =(int)((random[blockIdx.x] + 1.0) * 0.5 * crop);
int sy =(int)((random[blockIdx.x + 1] + 1.0) * 0.5 * crop);
if(sx > crop) sx = crop;
if(sy > crop) sy = crop;
if(sx < 0) sx = 0;
if(sy < 0) sy = 0;
// if(threadIdx.x == 0)
// sprintf(logStr, "%d %d\n", sx, sy);
for(int is = 0; is < outputImgSize2; is += blockDim.x)
{
int idx = is + threadIdx.x;
if(idx < outputImgSize2)
{
int ox = idx / outputImgSize;
int oy = idx % outputImgSize;
int ix = ox + sx;
int iy = oy + sy;
cuAssert(ix < inputImgSize && iy < inputImgSize);
output[idx] = input[ix * inputImgSize + iy];
}
}
}
/*
* blocks : dim3(batch)
* threads : dim3(min(ImgSize*ImgSize, 512))
*/
__global__ void g_applyColorNoise(float**_inputs, float* _noise, int imgSize)
{
int imgSize2 = imgSize * imgSize;
float* input = _inputs[blockIdx.x];
float* noise = _noise + blockIdx.x * 3;
for(int is = 0; is < imgSize2; is += blockDim.x)
{
int idx = is + threadIdx.x;
if(idx < imgSize2)
{
int x = idx / imgSize;
int y = idx % imgSize;
for(int c = 0; c < 3; c++){
input[c * imgSize2 + idx] = input[c * imgSize2 + idx] + noise[c];
}
}
}
}
/*
* blocks : dim3(batch, channels)
* threads: dim3(min(ImgSize * ImgSize, 512);
*/
__global__ void g_applyCrop(float**_inputs, float**_outputs, float* random, int croplen, int ImgSize, int cropr, int cropc)
{
int c = blockIdx.y;
int outputImgSize = ImgSize;
int inputImgSize = ImgSize + croplen;
int inputImgSize2 = inputImgSize * inputImgSize;
int outputImgSize2= outputImgSize* outputImgSize;
float* input = _inputs [blockIdx.x]+ c * inputImgSize2 ;
float* output= _outputs[blockIdx.x]+ c * outputImgSize2;
int sx = cropr;
int sy = cropc;
for(int is = 0; is < outputImgSize2; is += blockDim.x)
{
int idx = is + threadIdx.x;
if(idx < outputImgSize2)
{
int ox = idx / outputImgSize;
int oy = idx % outputImgSize;
int ix = ox + sx;
int iy = oy + sy;
cuAssert(ix < inputImgSize && iy < inputImgSize);
output[idx] = input[ix * inputImgSize + iy];
}
}
}
void cuApplyCropRandom(float**inputs, float**outputs, int batch, int ImgSize)
{
dim3 block = dim3(batch, Config::instance()->getChannels());
dim3 threads = min(512, ImgSize * ImgSize);
hipLaunchKernelGGL(( g_applyCropRandom), dim3(block),dim3(threads), 0, 0, inputs, outputs, cu_d_randomNum, Config::instance()->getCrop(), ImgSize);
hipDeviceSynchronize();
getLastCudaError("g_applyCropRandom");
}
void cuApplyColorNoise(float** inputs, float* colorNoise, int batch, int ImgSize){
dim3 block = dim3(batch);
dim3 threads = min(512, ImgSize * ImgSize);
hipLaunchKernelGGL(( g_applyColorNoise), dim3(block),dim3(threads), 0, 0, inputs, colorNoise, ImgSize);
hipDeviceSynchronize();
getLastCudaError("g_applyColorNoise");
}
void cuApplyCrop(float**inputs, float**outputs, int batch, int ImgSize, int cropr, int cropc)
{
int threads = min(512, ImgSize * ImgSize);
hipLaunchKernelGGL(( g_applyCrop), dim3(dim3(batch, Config::instance()->getChannels())),
dim3( dim3(threads)), 0, 0, inputs, outputs,cu_d_randomNum, Config::instance()->getCrop(), ImgSize, cropr, cropc);
hipDeviceSynchronize();
getLastCudaError("g_applyCrop");
}
/*
* function: orizontal Reflection
* blocks : dim3(batch, Config::instance()->getChannels()),
* threads : dim3(threads)
* flag : 0. Random
* 1. Horizontal
* 2. Not Horizontal
*/
__global__ void g_applyHorizontal(float**_inputs, float**_outputs, float* rand, int ImgSize, int flag)
{
int c = blockIdx.y;
int ImgSize2 = ImgSize * ImgSize ;
float* input = _inputs[blockIdx.x] + c * ImgSize2;
float* output= _outputs[blockIdx.x]+ c * ImgSize2;
int half = ImgSize / 2;
for(int is = 0; is < half * ImgSize; is += blockDim.x)
{
int idx = is + threadIdx.x;
if(idx < half * ImgSize)
{
int ox = idx / half;
int oy = idx % half;
int ix = ox;
int iy = ImgSize - oy - 1;
if(flag == RANDOM_HORIZONTAL)
{
//if(rand[blockIdx.x] <= 0.0){
if(blockIdx.x % 2 == 0){
cuAssert(ix < ImgSize && iy < ImgSize);
swap(output[ox * ImgSize + oy], input[ix * ImgSize + iy]);
}
}
else if(flag == HORIZONTAL){
cuAssert(ix < ImgSize && iy < ImgSize);
swap(output[ox * ImgSize + oy], input[ix * ImgSize + iy]);
}
else if(flag == NOT_HORIZONTAL){
}
}
}
}
/*
* flag : 0. Random
* 1. Horizontal
* 2. Not Horizontal
*/
void cuApplyHorizontal(float **inputs, float**outputs, int batch, int ImgSize, int flag)
{
int threads = ::min(ImgSize * ImgSize / 2, 512);
hipLaunchKernelGGL(( g_applyHorizontal), dim3(dim3(batch, Config::instance()->getChannels())),
dim3( dim3(threads)), 0, 0, inputs, outputs, cu_d_randomNum, ImgSize, flag);
hipDeviceSynchronize();
getLastCudaError("g_applyHorizontal");
}
__global__ void g_applyWhiteNoise(
float** _inputs,
float ** _outputs,
float * _random,
int ImgSize,
float stdev){
int s = blockIdx.x;
int c = blockIdx.y;
int ImgSize2 = ImgSize * ImgSize;
int offset = ImgSize2 * c;
float* input = _inputs [s] + offset;
float* output= _outputs[s] + offset;
float* rand = _random + offset;
//if(_random[blockIdx.x] >= 0.9){
if(true){
for(int i = 0; i < ImgSize2; i += blockDim.x){
int idx = i + threadIdx.x;
if(idx < ImgSize2){
float val = input[idx] + stdev * rand[idx];
// if(val < -1.0) val = -1.0;
// if(val > 1.0) val = 1.0;
output[idx] = val;
}
}
}else{
for(int i = 0; i < ImgSize2; i += blockDim.x){
int idx = i + threadIdx.x;
if(idx < ImgSize2){
output[idx] = input[idx];
}
}
}
}
/*
ref: http://en.wikipedia.org/wiki/White_noise
*/
void cuApplyWhiteNoise(float **inputs, float**outputs, int batch, int ImgSize, float stdev)
{
dim3 blocks = dim3(batch, Config::instance()->getChannels());
dim3 threads = dim3(min(ImgSize * ImgSize, 512));
hipLaunchKernelGGL(( g_applyWhiteNoise), dim3(blocks), dim3(threads), 0, 0, inputs, outputs, cu_d_randomNum, ImgSize, stdev);
hipDeviceSynchronize();
getLastCudaError("g_applyWhiteNoise");
}
|
cuTrasformation.cu
|
#include "cuTrasformation.cuh"
#include <math.h>
#include <stdio.h>
#include "../common/cuMatrix.h"
#include "../common/util.h"
#include "cuda_runtime.h"
#include <curand_kernel.h>
#include <time.h>
#include "../common/Config.h"
#include <helper_functions.h>
#include <helper_cuda.h>
#include "../common/cuBase.h"
#define GAUSSIAN_FIELD_SIZE (21) /* strictly odd number */
#define constDistortion (1.0)
curandGenerator_t rand_generator_device;
const curandRngType_t generator_type = CURAND_RNG_PSEUDO_DEFAULT;
cuMatrix<float>* cuGaussianKernel;
cuMatrix<float>* cuDispH;
cuMatrix<float>* cuDispV;
float * cu_d_randonNumf;
float* cu_d_randomNum;
float* cu_h_randomNum;
float dElasticSigma = 4.0; /* higher numbers are more smooth and less distorted; Simard uses 4.0*/
int getRandomNumLen(int batch, int ImgSize)
{
return batch * ImgSize * ImgSize * 2 * Config::instance()->getChannels();
}
/*
* blocks : dim3(1)
* threads: dim3(GAUSSIAN_FIELD_SIZE*GAUSSIAN_FIELD_SIZE)
*/
__global__ void g_createGaussianKernel(float* gaussian, float dElasticSigma, int ImgSize)
{
int iiMid = GAUSSIAN_FIELD_SIZE >> 1;
float floatElasticSigma = dElasticSigma * dElasticSigma;
int row = threadIdx.x % ImgSize;
int col = threadIdx.x / ImgSize;
float val1 = 1.0 / (dElasticSigma * 2.0 * 3.1415926535897932384626433832795);
float val2 = (row-iiMid)*(row-iiMid) + (col-iiMid)*(col-iiMid);
gaussian[threadIdx.x] = val1 * exp(-1.0 * val2 / (2.0 * floatElasticSigma));
}
void cuInitDistortionMemery(int batch, int ImgSize)
{
curandStatus_t curandstatus;
cuGaussianKernel = new cuMatrix<float>(GAUSSIAN_FIELD_SIZE, GAUSSIAN_FIELD_SIZE, 1);
if(GAUSSIAN_FIELD_SIZE * GAUSSIAN_FIELD_SIZE > MAX_THREADS)
{
char logStr[1024];
sprintf(logStr, "g_createGaussianKernel > MAX_THREADS\n");
LOG(logStr, "Result/log.txt");
exit(0);
}
g_createGaussianKernel<<<dim3(1),dim3(GAUSSIAN_FIELD_SIZE * GAUSSIAN_FIELD_SIZE)>>>(
cuGaussianKernel->getDev(),
dElasticSigma,
ImgSize);
cudaDeviceSynchronize();
/*cu_d_randomNum*/
checkCudaErrors(
MemoryMonitor::instance()->gpuMalloc((void**)&cu_d_randomNum, sizeof(float) * getRandomNumLen(batch, ImgSize))
);
/*cu_d_randonNumf*/
checkCudaErrors(
MemoryMonitor::instance()->gpuMalloc((void**)&cu_d_randonNumf, sizeof(float) * getRandomNumLen(batch, ImgSize))
);
/*cu_h_randomNum*/
cu_h_randomNum = (float*)MemoryMonitor::instance()->cpuMalloc(sizeof(float) * getRandomNumLen(batch, ImgSize));
if(!cu_h_randomNum)
{
char logStr[1024];
sprintf(logStr, "malloc cu_h_randomNum fail\n");
LOG(logStr, "Result/log.txt");
exit(0);
}
/*curandCreateGenerator*/
curandstatus = curandCreateGenerator(&rand_generator_device, generator_type);
if(curandstatus != CURAND_STATUS_SUCCESS)
{
char logStr[1024];
sprintf(logStr, "curandCreateGenerator fail\n");
LOG(logStr, "Result/log.txt");
exit(0);
}
cuDispV = new cuMatrix<float>(batch, ImgSize * ImgSize, 1);
cuDispH = new cuMatrix<float>(batch, ImgSize * ImgSize, 1);
}
__global__ void g_getRandomUniform(float* r1, float* r2, int len)
{
for(int i = 0; i < len; i += gridDim.x * blockDim.x)
{
int id = i + blockDim.x * blockIdx.x + threadIdx.x;
if(id < len)
{
r2[id] = r1[id] * 2.0f - 1.0f;
}
}
}
/*
* blocks : dim3(batch)
* threads : dim3(512)
*/
__global__ void g_generateDistortionMap(
float* _dispH,
float* _dispV,
float* rand,
float* gaussianKernel,
float dElasticScaling,
float dMaxScaling,
float dMaxRotation,
int ImgSize)
{
int ImgSize2 = ImgSize * ImgSize;
float* uniformH = rand + blockIdx.x * ImgSize2;
float* uniformV = rand + blockIdx.x * ImgSize2 * 2;
float* dispH = _dispH + ImgSize2 * blockIdx.x;
float* dispV = _dispV + ImgSize2 * blockIdx.x;
if(dElasticScaling >= 0.1){
for(int is = 0; is < ImgSize2; is += blockDim.x)
{
int idx = is + threadIdx.x;
if(idx < ImgSize2)
{
int row = idx / ImgSize;
int col = idx % ImgSize;
int iiMid = GAUSSIAN_FIELD_SIZE / 2;
float fConvolvedH = 0.0;
float fConvolvedV = 0.0;
float fSampleH, fSampleV;
float elasticScale = dElasticScaling;
for(int xxx = 0; xxx < GAUSSIAN_FIELD_SIZE; ++xxx)
{
for(int yyy = 0; yyy < GAUSSIAN_FIELD_SIZE; ++yyy)
{
int xxxDisp = col - iiMid + xxx;
int yyyDisp = row - iiMid + yyy;
if(xxxDisp < 0 || xxxDisp >= ImgSize ||
yyyDisp < 0 || yyyDisp >= ImgSize)
{
fSampleH = 0.0;
fSampleV = 0.0;
}
else
{
fSampleH = uniformH[yyyDisp * ImgSize + xxxDisp];
fSampleV = uniformV[yyyDisp * ImgSize + xxxDisp];
}
fConvolvedH += fSampleH * gaussianKernel[yyy * GAUSSIAN_FIELD_SIZE + xxx] * constDistortion;
fConvolvedV += fSampleV * gaussianKernel[yyy * GAUSSIAN_FIELD_SIZE + xxx] * constDistortion;
}
}
dispH[idx] = elasticScale * fConvolvedH;
dispV[idx] = elasticScale * fConvolvedV;
}
}
}
else{
for(int is = 0; is < ImgSize2; is += blockDim.x){
int idx = is + threadIdx.x;
if(idx < ImgSize2){
dispH[idx] = 0.0;
dispV[idx] = 0.0;
}
}
}
__syncthreads();
float rand1 = rand[blockIdx.x];
float rand2 = rand[blockIdx.x + 1];
if(fabs(dMaxRotation) >= 0.01){
if(rand1 <= 0.0) rand1 = 0.0;
if(rand2 <= 0.0) rand2 = 0.0;
}
for(int is = 0; is < ImgSize2; is += blockDim.x)
{
int idx = is + threadIdx.x;
if(idx < ImgSize2)
{
int row = idx / ImgSize;
int col = idx % ImgSize;
float dSFHoriz = dMaxScaling / 100.0 * rand1;
float dSFVert = dMaxScaling / 100.0 * rand2;
int iMid = ImgSize / 2;
dispH[idx] += dSFHoriz * (col - iMid);
dispV[idx] += dSFVert * (row - iMid);
}
}
__syncthreads();
for(int is = 0; is < ImgSize2; is += blockDim.x)
{
int idx = is + threadIdx.x;
if(idx < ImgSize2)
{
int row = idx / ImgSize;
int col = idx % ImgSize;
float angle = dMaxRotation * rand[blockIdx.x];
//printf("%f\n",angle);
angle = angle * 3.1415926535897932384626433832795 / 180.0;
float cosAngle = cos(angle);
float sinAngle = sin(angle);
int iMid = ImgSize / 2;
float xx = row - iMid;
float yy = col - iMid;
dispH[idx] += yy - yy * cosAngle - xx * sinAngle;
dispV[idx] += xx - xx * cosAngle + yy * sinAngle;
}
}
}
__global__ void g_scaleAndRotate(
float* _dispH,
float* _dispV,
float scalingx,
float scalingy,
float rotation,
int ImgSize)
{
int ImgSize2 = ImgSize * ImgSize;
float* dispH = _dispH + ImgSize2 * blockIdx.x;
float* dispV = _dispV + ImgSize2 * blockIdx.x;
for(int is = 0; is < ImgSize2; is += blockDim.x)
{
int idx = is + threadIdx.x;
if(idx < ImgSize2)
{
dispH[idx] = 0.0;
dispV[idx] = 0.0;
}
}
__syncthreads();
for(int is = 0; is < ImgSize2; is += blockDim.x)
{
int idx = is + threadIdx.x;
if(idx < ImgSize2)
{
int row = idx / ImgSize;
int col = idx % ImgSize;
float dSFHoriz = scalingx / 100.0;
float dSFVert = scalingy / 100.0;
int iMid = ImgSize / 2;
dispH[idx] += dSFHoriz * (col - iMid);
dispV[idx] += dSFVert * (row - iMid);
}
}
__syncthreads();
for(int is = 0; is < ImgSize2; is += blockDim.x)
{
int idx = is + threadIdx.x;
if(idx < ImgSize2)
{
int row = idx / ImgSize;
int col = idx % ImgSize;
float angle = rotation;
angle = angle * 3.1415926535897932384626433832795 / 180.0;
float cosAngle = cos(angle);
float sinAngle = sin(angle);
int iMid = ImgSize / 2;
float xx = row - iMid;
float yy = col - iMid;
dispH[idx] += yy - yy * cosAngle - xx * sinAngle;
dispV[idx] += xx - xx * cosAngle + yy * sinAngle;
}
}
}
/*
* blocks : dim3(batch, Config::instance()->getChannels())
* threads: dim3(min(512, ImgSize * ImgSize))
*/
__global__ void g_applyDistortionMap(
float** _inputs,
float** _outputs,
float* _dispH,
float* _dispV,
int ImgSize)
{
extern __shared__ float img[];
int c = blockIdx.y;
int ImgSize2 = ImgSize * ImgSize;
float* input = _inputs[blockIdx.x] + ImgSize2 * c;
float* output= _outputs[blockIdx.x]+ ImgSize2 * c;
float* dispV = _dispV + blockIdx.x * ImgSize2;
float* dispH = _dispH + blockIdx.x * ImgSize2;
for(int is = 0; is < ImgSize2; is += blockDim.x){
int idx = is + threadIdx.x;
if(idx < ImgSize2){
img[idx] = input[idx];
}
}
__syncthreads();
for(int is = 0; is < ImgSize2; is += blockDim.x)
{
int idx = is + threadIdx.x;
if(idx < ImgSize2)
{
int row = idx / ImgSize;
int col = idx % ImgSize;
float sourceRow, sourceCol;
float fracRow, fracCol;
float w1, w2, w3, w4;
float sourceValue;
int sRow, sCol, sRowp1, sColp1;
bool bSkipOutOfBounds;
if(fabs(dispV[idx]) < 0.000000001 && fabs(dispH[idx]) < 0.0000000001)
{
output[idx] = input[idx];
continue;
}
sourceRow = (float)row - dispV[idx];
sourceCol = (float)col - dispH[idx];
fracRow = sourceRow - (int)sourceRow;
fracCol = sourceCol - (int)sourceCol;
w1 = ( 1.0 - fracRow ) * ( 1.0 - fracCol );
w2 = ( 1.0 - fracRow ) * fracCol;
w3 = fracRow * ( 1.0 - fracCol );
w4 = fracRow * fracCol;
bSkipOutOfBounds = false;
if ( ((int)sourceRow + 1) >= ImgSize ) bSkipOutOfBounds = true;
if ( (int)sourceRow < 0 ) bSkipOutOfBounds = true;
if ( ((int)sourceCol + 1) >= ImgSize ) bSkipOutOfBounds = true;
if ( (int)sourceCol < 0 ) bSkipOutOfBounds = true;
if ( bSkipOutOfBounds == false )
{
sRow = (int)sourceRow;
sCol = (int)sourceCol;
sRowp1 = sRow + 1;
sColp1 = sCol + 1;
while (sRowp1 >= ImgSize) sRowp1 -= ImgSize;
while (sRowp1 < 0) sRowp1 += ImgSize;
while (sColp1 >= ImgSize) sColp1 -= ImgSize;
while (sColp1 < 0) sColp1 += ImgSize;
while (sRow >= ImgSize) sRow -= ImgSize;
while (sRow < 0) sRow += ImgSize;
while (sCol >= ImgSize) sCol -= ImgSize;
while (sCol < 0) sCol += ImgSize;
sourceValue =
w1 * img[sRow * ImgSize + sCol] +
w2 * img[sRow * ImgSize + sColp1] +
w3 * img[sRowp1 * ImgSize + sCol] +
w4 * img[sRowp1 * ImgSize + sColp1];
}
else
{
sourceValue = -1.0;
}
output[idx] = sourceValue;
}
}
}
void cuApplyRandom(int batch, unsigned long long s, int ImgSize)
{
curandStatus_t curandStatus;
unsigned long long seed = s;
curandStatus = curandSetPseudoRandomGeneratorSeed(rand_generator_device, seed);
if(curandStatus != CURAND_STATUS_SUCCESS)
{
char logStr[1024];
sprintf(logStr, "curandSetPseudoRandomGeneratorSeed fail\n");
LOG(logStr, "Result/log.txt");
exit(0);
}
curandGenerateUniform(rand_generator_device, cu_d_randonNumf, getRandomNumLen(batch, ImgSize));
g_getRandomUniform<<<dim3(256),dim3(256)>>>(cu_d_randonNumf, cu_d_randomNum, getRandomNumLen(batch, ImgSize));
cudaDeviceSynchronize();
getLastCudaError("g_getRandomUniform");
int threads = min(512, ImgSize * ImgSize);
g_generateDistortionMap<<<dim3(batch),threads>>>(cuDispH->getDev(),
cuDispV->getDev(), cu_d_randomNum, cuGaussianKernel->getDev(),
Config::instance()->getDistortion(),
Config::instance()->getScale(),
Config::instance()->getRotation(), ImgSize);
cudaDeviceSynchronize();
getLastCudaError("g_generateDistortionMap");
}
void cuApplyScaleAndRotate(int batch,
int ImgSize,
float scalingx,
float scalingy,
float rotation)
{
g_scaleAndRotate<<<dim3(batch),dim3(512)>>>(
cuDispH->getDev(),
cuDispV->getDev(),
scalingx,
scalingy,
rotation,
ImgSize);
cudaDeviceSynchronize();
getLastCudaError("g_generateDistortionMap");
}
void cuApplyDistortion(float**inputs, float**outputs, int batch, int ImgSize)
{
int threadidx = min(ImgSize * ImgSize, 512);
g_applyDistortionMap<<<dim3(batch, Config::instance()->getChannels()),
dim3(threadidx), sizeof(float) * ImgSize * ImgSize>>>(inputs,
outputs,
cuDispH->getDev(),
cuDispV->getDev(),
ImgSize);
cudaDeviceSynchronize();
getLastCudaError("g_applyDistortionMap");
}
/*
* blocks : dim3(batch, channels)
* threads : dim3(min(ImgSize*ImgSize, 512))
*/
__global__ void g_applyCropRandom(float**_inputs, float**_outputs, float* random, int crop, int ImgSize)
{
int c = blockIdx.y;
int outputImgSize = ImgSize;
int inputImgSize = ImgSize + crop;
int inputImgSize2 = inputImgSize * inputImgSize;
int outputImgSize2= outputImgSize* outputImgSize;
float* input = _inputs [blockIdx.x] + c * inputImgSize2;
float* output= _outputs[blockIdx.x] + c * outputImgSize2;
int sx =(int)((random[blockIdx.x] + 1.0) * 0.5 * crop);
int sy =(int)((random[blockIdx.x + 1] + 1.0) * 0.5 * crop);
if(sx > crop) sx = crop;
if(sy > crop) sy = crop;
if(sx < 0) sx = 0;
if(sy < 0) sy = 0;
// if(threadIdx.x == 0)
// sprintf(logStr, "%d %d\n", sx, sy);
for(int is = 0; is < outputImgSize2; is += blockDim.x)
{
int idx = is + threadIdx.x;
if(idx < outputImgSize2)
{
int ox = idx / outputImgSize;
int oy = idx % outputImgSize;
int ix = ox + sx;
int iy = oy + sy;
cuAssert(ix < inputImgSize && iy < inputImgSize);
output[idx] = input[ix * inputImgSize + iy];
}
}
}
/*
* blocks : dim3(batch)
* threads : dim3(min(ImgSize*ImgSize, 512))
*/
__global__ void g_applyColorNoise(float**_inputs, float* _noise, int imgSize)
{
int imgSize2 = imgSize * imgSize;
float* input = _inputs[blockIdx.x];
float* noise = _noise + blockIdx.x * 3;
for(int is = 0; is < imgSize2; is += blockDim.x)
{
int idx = is + threadIdx.x;
if(idx < imgSize2)
{
int x = idx / imgSize;
int y = idx % imgSize;
for(int c = 0; c < 3; c++){
input[c * imgSize2 + idx] = input[c * imgSize2 + idx] + noise[c];
}
}
}
}
/*
* blocks : dim3(batch, channels)
* threads: dim3(min(ImgSize * ImgSize, 512);
*/
__global__ void g_applyCrop(float**_inputs, float**_outputs, float* random, int croplen, int ImgSize, int cropr, int cropc)
{
int c = blockIdx.y;
int outputImgSize = ImgSize;
int inputImgSize = ImgSize + croplen;
int inputImgSize2 = inputImgSize * inputImgSize;
int outputImgSize2= outputImgSize* outputImgSize;
float* input = _inputs [blockIdx.x]+ c * inputImgSize2 ;
float* output= _outputs[blockIdx.x]+ c * outputImgSize2;
int sx = cropr;
int sy = cropc;
for(int is = 0; is < outputImgSize2; is += blockDim.x)
{
int idx = is + threadIdx.x;
if(idx < outputImgSize2)
{
int ox = idx / outputImgSize;
int oy = idx % outputImgSize;
int ix = ox + sx;
int iy = oy + sy;
cuAssert(ix < inputImgSize && iy < inputImgSize);
output[idx] = input[ix * inputImgSize + iy];
}
}
}
void cuApplyCropRandom(float**inputs, float**outputs, int batch, int ImgSize)
{
dim3 block = dim3(batch, Config::instance()->getChannels());
dim3 threads = min(512, ImgSize * ImgSize);
g_applyCropRandom<<<block,threads>>>(inputs, outputs, cu_d_randomNum, Config::instance()->getCrop(), ImgSize);
cudaDeviceSynchronize();
getLastCudaError("g_applyCropRandom");
}
void cuApplyColorNoise(float** inputs, float* colorNoise, int batch, int ImgSize){
dim3 block = dim3(batch);
dim3 threads = min(512, ImgSize * ImgSize);
g_applyColorNoise<<<block,threads>>>(inputs, colorNoise, ImgSize);
cudaDeviceSynchronize();
getLastCudaError("g_applyColorNoise");
}
void cuApplyCrop(float**inputs, float**outputs, int batch, int ImgSize, int cropr, int cropc)
{
int threads = min(512, ImgSize * ImgSize);
g_applyCrop<<<dim3(batch, Config::instance()->getChannels()),
dim3(threads)>>>(inputs, outputs,cu_d_randomNum, Config::instance()->getCrop(), ImgSize, cropr, cropc);
cudaDeviceSynchronize();
getLastCudaError("g_applyCrop");
}
/*
* function: orizontal Reflection
* blocks : dim3(batch, Config::instance()->getChannels()),
* threads : dim3(threads)
* flag : 0. Random
* 1. Horizontal
* 2. Not Horizontal
*/
__global__ void g_applyHorizontal(float**_inputs, float**_outputs, float* rand, int ImgSize, int flag)
{
int c = blockIdx.y;
int ImgSize2 = ImgSize * ImgSize ;
float* input = _inputs[blockIdx.x] + c * ImgSize2;
float* output= _outputs[blockIdx.x]+ c * ImgSize2;
int half = ImgSize / 2;
for(int is = 0; is < half * ImgSize; is += blockDim.x)
{
int idx = is + threadIdx.x;
if(idx < half * ImgSize)
{
int ox = idx / half;
int oy = idx % half;
int ix = ox;
int iy = ImgSize - oy - 1;
if(flag == RANDOM_HORIZONTAL)
{
//if(rand[blockIdx.x] <= 0.0){
if(blockIdx.x % 2 == 0){
cuAssert(ix < ImgSize && iy < ImgSize);
swap(output[ox * ImgSize + oy], input[ix * ImgSize + iy]);
}
}
else if(flag == HORIZONTAL){
cuAssert(ix < ImgSize && iy < ImgSize);
swap(output[ox * ImgSize + oy], input[ix * ImgSize + iy]);
}
else if(flag == NOT_HORIZONTAL){
}
}
}
}
/*
* flag : 0. Random
* 1. Horizontal
* 2. Not Horizontal
*/
void cuApplyHorizontal(float **inputs, float**outputs, int batch, int ImgSize, int flag)
{
int threads = std::min(ImgSize * ImgSize / 2, 512);
g_applyHorizontal<<<dim3(batch, Config::instance()->getChannels()),
dim3(threads)>>>(inputs, outputs, cu_d_randomNum, ImgSize, flag);
cudaDeviceSynchronize();
getLastCudaError("g_applyHorizontal");
}
__global__ void g_applyWhiteNoise(
float** _inputs,
float ** _outputs,
float * _random,
int ImgSize,
float stdev){
int s = blockIdx.x;
int c = blockIdx.y;
int ImgSize2 = ImgSize * ImgSize;
int offset = ImgSize2 * c;
float* input = _inputs [s] + offset;
float* output= _outputs[s] + offset;
float* rand = _random + offset;
//if(_random[blockIdx.x] >= 0.9){
if(true){
for(int i = 0; i < ImgSize2; i += blockDim.x){
int idx = i + threadIdx.x;
if(idx < ImgSize2){
float val = input[idx] + stdev * rand[idx];
// if(val < -1.0) val = -1.0;
// if(val > 1.0) val = 1.0;
output[idx] = val;
}
}
}else{
for(int i = 0; i < ImgSize2; i += blockDim.x){
int idx = i + threadIdx.x;
if(idx < ImgSize2){
output[idx] = input[idx];
}
}
}
}
/*
ref: http://en.wikipedia.org/wiki/White_noise
*/
void cuApplyWhiteNoise(float **inputs, float**outputs, int batch, int ImgSize, float stdev)
{
dim3 blocks = dim3(batch, Config::instance()->getChannels());
dim3 threads = dim3(min(ImgSize * ImgSize, 512));
g_applyWhiteNoise<<<blocks, threads>>>(inputs, outputs, cu_d_randomNum, ImgSize, stdev);
cudaDeviceSynchronize();
getLastCudaError("g_applyWhiteNoise");
}
|
6d850a3f6bebaec63945bb83f14706e45334a69f.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* This is a automatically generated test. Do not modify */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__
void compute(float comp, int var_1,float* var_2,int var_3,float var_4,float var_5,float var_6,float var_7,float var_8,float var_9,float var_10,float var_11,float var_12,float var_13,float var_14,float var_15,float var_16,float var_17,float var_18,float var_19,float var_20,float* var_21,float var_22,float var_23,float var_24,float var_25,float var_26) {
for (int i=0; i < var_1; ++i) {
var_2[i] = +1.6143E-41f;
comp = var_2[i] / cosf(asinf(var_4 - (var_5 * sinhf((var_6 - var_7 / var_8)))));
float tmp_1 = -0.0f;
comp += tmp_1 / (-0.0f * (var_9 * +1.2366E25f + cosf(var_10 - -1.4355E-7f / var_11)));
if (comp == var_12 + -1.2856E7f) {
comp = +1.2538E22f + var_13 / var_14 * -1.9884E36f + +0.0f / +1.6793E35f;
}
if (comp > fabsf(-1.2656E17f)) {
comp += var_15 / fabsf((-1.5653E-35f - var_16 - ldexpf((var_17 * var_18 / var_19 + (var_20 * -1.6549E-42f)), 2)));
}
for (int i=0; i < var_3; ++i) {
var_21[i] = -1.2917E35f;
float tmp_2 = -0.0f;
comp = tmp_2 + var_21[i] * (+1.3277E-36f * (+0.0f / (var_22 * sqrtf(asinf((+0.0f / var_23 * (var_24 * var_25)))))));
comp = (+1.9593E-41f * var_26);
}
}
printf("%.17g\n", comp);
}
float* initPointer(float v) {
float *ret = (float*) malloc(sizeof(float)*10);
for(int i=0; i < 10; ++i)
ret[i] = v;
return ret;
}
int main(int argc, char** argv) {
/* Program variables */
float tmp_1 = atof(argv[1]);
int tmp_2 = atoi(argv[2]);
float* tmp_3 = initPointer( atof(argv[3]) );
int tmp_4 = atoi(argv[4]);
float tmp_5 = atof(argv[5]);
float tmp_6 = atof(argv[6]);
float tmp_7 = atof(argv[7]);
float tmp_8 = atof(argv[8]);
float tmp_9 = atof(argv[9]);
float tmp_10 = atof(argv[10]);
float tmp_11 = atof(argv[11]);
float tmp_12 = atof(argv[12]);
float tmp_13 = atof(argv[13]);
float tmp_14 = atof(argv[14]);
float tmp_15 = atof(argv[15]);
float tmp_16 = atof(argv[16]);
float tmp_17 = atof(argv[17]);
float tmp_18 = atof(argv[18]);
float tmp_19 = atof(argv[19]);
float tmp_20 = atof(argv[20]);
float tmp_21 = atof(argv[21]);
float* tmp_22 = initPointer( atof(argv[22]) );
float tmp_23 = atof(argv[23]);
float tmp_24 = atof(argv[24]);
float tmp_25 = atof(argv[25]);
float tmp_26 = atof(argv[26]);
float tmp_27 = atof(argv[27]);
hipLaunchKernelGGL(( compute), dim3(1),dim3(1), 0, 0, tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13,tmp_14,tmp_15,tmp_16,tmp_17,tmp_18,tmp_19,tmp_20,tmp_21,tmp_22,tmp_23,tmp_24,tmp_25,tmp_26,tmp_27);
hipDeviceSynchronize();
return 0;
}
|
6d850a3f6bebaec63945bb83f14706e45334a69f.cu
|
/* This is a automatically generated test. Do not modify */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__
void compute(float comp, int var_1,float* var_2,int var_3,float var_4,float var_5,float var_6,float var_7,float var_8,float var_9,float var_10,float var_11,float var_12,float var_13,float var_14,float var_15,float var_16,float var_17,float var_18,float var_19,float var_20,float* var_21,float var_22,float var_23,float var_24,float var_25,float var_26) {
for (int i=0; i < var_1; ++i) {
var_2[i] = +1.6143E-41f;
comp = var_2[i] / cosf(asinf(var_4 - (var_5 * sinhf((var_6 - var_7 / var_8)))));
float tmp_1 = -0.0f;
comp += tmp_1 / (-0.0f * (var_9 * +1.2366E25f + cosf(var_10 - -1.4355E-7f / var_11)));
if (comp == var_12 + -1.2856E7f) {
comp = +1.2538E22f + var_13 / var_14 * -1.9884E36f + +0.0f / +1.6793E35f;
}
if (comp > fabsf(-1.2656E17f)) {
comp += var_15 / fabsf((-1.5653E-35f - var_16 - ldexpf((var_17 * var_18 / var_19 + (var_20 * -1.6549E-42f)), 2)));
}
for (int i=0; i < var_3; ++i) {
var_21[i] = -1.2917E35f;
float tmp_2 = -0.0f;
comp = tmp_2 + var_21[i] * (+1.3277E-36f * (+0.0f / (var_22 * sqrtf(asinf((+0.0f / var_23 * (var_24 * var_25)))))));
comp = (+1.9593E-41f * var_26);
}
}
printf("%.17g\n", comp);
}
float* initPointer(float v) {
float *ret = (float*) malloc(sizeof(float)*10);
for(int i=0; i < 10; ++i)
ret[i] = v;
return ret;
}
int main(int argc, char** argv) {
/* Program variables */
float tmp_1 = atof(argv[1]);
int tmp_2 = atoi(argv[2]);
float* tmp_3 = initPointer( atof(argv[3]) );
int tmp_4 = atoi(argv[4]);
float tmp_5 = atof(argv[5]);
float tmp_6 = atof(argv[6]);
float tmp_7 = atof(argv[7]);
float tmp_8 = atof(argv[8]);
float tmp_9 = atof(argv[9]);
float tmp_10 = atof(argv[10]);
float tmp_11 = atof(argv[11]);
float tmp_12 = atof(argv[12]);
float tmp_13 = atof(argv[13]);
float tmp_14 = atof(argv[14]);
float tmp_15 = atof(argv[15]);
float tmp_16 = atof(argv[16]);
float tmp_17 = atof(argv[17]);
float tmp_18 = atof(argv[18]);
float tmp_19 = atof(argv[19]);
float tmp_20 = atof(argv[20]);
float tmp_21 = atof(argv[21]);
float* tmp_22 = initPointer( atof(argv[22]) );
float tmp_23 = atof(argv[23]);
float tmp_24 = atof(argv[24]);
float tmp_25 = atof(argv[25]);
float tmp_26 = atof(argv[26]);
float tmp_27 = atof(argv[27]);
compute<<<1,1>>>(tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13,tmp_14,tmp_15,tmp_16,tmp_17,tmp_18,tmp_19,tmp_20,tmp_21,tmp_22,tmp_23,tmp_24,tmp_25,tmp_26,tmp_27);
cudaDeviceSynchronize();
return 0;
}
|
1cde116bbd518a7cf9387ab03c8878c64d368778.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2020-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "backref_re_hip.cuh"
#include <cudf/strings/detail/utilities.hpp>
#include <rmm/cuda_stream_view.hpp>
namespace cudf {
namespace strings {
namespace detail {
children_pair replace_with_backrefs_medium(column_device_view const& d_strings,
reprog_device& d_prog,
string_view const& d_repl_template,
device_span<backref_type> backrefs,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
using Iterator = decltype(backrefs.begin());
return make_strings_children(
backrefs_fn<Iterator, RX_STACK_MEDIUM>{
d_strings, d_prog, d_repl_template, backrefs.begin(), backrefs.end()},
d_strings.size(),
stream,
mr);
}
} // namespace detail
} // namespace strings
} // namespace cudf
|
1cde116bbd518a7cf9387ab03c8878c64d368778.cu
|
/*
* Copyright (c) 2020-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "backref_re.cuh"
#include <cudf/strings/detail/utilities.hpp>
#include <rmm/cuda_stream_view.hpp>
namespace cudf {
namespace strings {
namespace detail {
children_pair replace_with_backrefs_medium(column_device_view const& d_strings,
reprog_device& d_prog,
string_view const& d_repl_template,
device_span<backref_type> backrefs,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
using Iterator = decltype(backrefs.begin());
return make_strings_children(
backrefs_fn<Iterator, RX_STACK_MEDIUM>{
d_strings, d_prog, d_repl_template, backrefs.begin(), backrefs.end()},
d_strings.size(),
stream,
mr);
}
} // namespace detail
} // namespace strings
} // namespace cudf
|
b7a6211f32b47f4775f6315cc315dcd1ce827c46.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <iostream>
#include <math.h>
#include <opencv2/opencv.hpp>
using namespace cv;
using namespace std;
#define CUDA_ERR_HANDLE(err) (CudaErrorHandler(err, __FILE__, __LINE__))
int CudaErrorHandler(hipError_t err, const char* file, int line)
{
if(err != hipSuccess)
{
cout<<file<<" no."<<line<<" error : "<<hipGetErrorString(err)<<endl;
return 1;
}
return 0;
}
void GenerateGaussKernel(int size, float sigma, float* kernel);
unsigned char GetPixelVal(unsigned char* img, int img_height, int img_width, int i, int j);
void Gauss(unsigned char* img, int img_width, int img_height, float* kernel, int kernel_size, unsigned char* output);
void Sobel(unsigned char* img, int img_width, int img_height, short* sobel_x, short* sobel_y, unsigned char* output);
void NoneMaxSuppress(unsigned char* sobel, int sobel_width, int sobel_height, short* sobel_x, short* sobel_y, unsigned char* output);
void DoubleThreshold(unsigned char* sobel, int sobel_width, int sobel_height, int min_val, int max_val, unsigned char* canny);
__device__ unsigned char CUDA_GetPixelVal(unsigned char* img, int img_height, int img_width, int i, int j);
__global__ void CUDA_Gauss(unsigned char* img, int img_width, int img_height, float* kernel, int kernel_size, unsigned char* output);
__global__ void CUDA_Sobel(unsigned char* img, int img_width, int img_height, short* sobel_x, short* sobel_y, unsigned char* output);
__global__ void CUDA_NoneMaxSuppress(unsigned char* sobel, int sobel_width, int sobel_height, short* sobel_x, short* sobel_y, unsigned char* output);
__global__ void CUDA_DoubleThreshold(unsigned char* sobel, int sobel_width, int sobel_height, int min_val, int max_val, unsigned char* canny);
int main(int argc, char** argv)
{
int cpu_gpu = 0;
if(argc >= 2 && strcmp(argv[1], "gpu") == 0)
{
cout<<"---canny acceleration[GPU]!---"<<endl;
cpu_gpu = 1;
}
else
{
cout<<"---canny acceleration[CPU]!---"<<endl;
}
int width = 640;
int height = 480;
int gauss_kernel_size = 3;
int thread_size = width;
int block_size = (width * height + thread_size - 1) / thread_size;
/*****cpu memory*****/
unsigned char* gauss = new unsigned char[width * height];
float* gauss_kernel = new float[gauss_kernel_size * gauss_kernel_size];
GenerateGaussKernel(gauss_kernel_size, 1, gauss_kernel);
short* sobel_x = new short[width * height];
short* sobel_y = new short[width * height];
unsigned char* sobel = new unsigned char[width * height];
unsigned char* canny = new unsigned char[width * height];
/*****gpu memory*****/
unsigned char* cuda_gray;
hipMalloc(&cuda_gray, width * height * sizeof(unsigned char));
unsigned char* cuda_gauss;
hipMalloc(&cuda_gauss, width * height * sizeof(unsigned char));
float* cuda_gauss_kernel;
hipMalloc(&cuda_gauss_kernel, width * height * sizeof(float));
hipMemcpy(cuda_gauss_kernel, gauss_kernel, gauss_kernel_size * gauss_kernel_size * sizeof(float), hipMemcpyHostToDevice);
short* cuda_sobel_x;
hipMalloc(&cuda_sobel_x, width * height * sizeof(short));
short* cuda_sobel_y;
hipMalloc(&cuda_sobel_y, width * height * sizeof(short));
unsigned char* cuda_sobel;
hipMalloc(&cuda_sobel, width * height * sizeof(unsigned char));
unsigned char* cuda_canny;
hipMalloc(&cuda_canny, width * height * sizeof(unsigned char));
VideoCapture camera(0);
//VideoCapture camera;
//camera.open("/home/katsuto/Pictures/Wallpapers/video.MP4");
Mat img_src;
//img_src = imread("/home/katsuto/Pictures/Wallpapers/nvidia.jpg");
Mat img_gray, img_gauss, img_sobel, img_canny;
while(1)
{
if(cpu_gpu == 0)
{
camera >> img_src;
//img_src = imread("/home/katsuto/Pictures/Wallpapers/nvidia.jpg");
cvtColor(img_src, img_gray, CV_BGR2GRAY);
resize(img_gray, img_gray, Size(width, height), 0, 0);
imshow("img_gray", img_gray);
Gauss(img_gray.data, width, height, gauss_kernel, gauss_kernel_size, gauss);
//img_gauss = Mat(Size(width, height), CV_8UC1, gauss);
//imshow("img_gauss", img_gauss);
Sobel(gauss, width, height, sobel_x, sobel_y, sobel);
img_sobel = Mat(Size(width, height), CV_8UC1, sobel);
imshow("img_sobel", img_sobel);
NoneMaxSuppress(sobel, width, height, sobel_x, sobel_y, sobel);
//img_sobel = Mat(Size(width, height), CV_8UC1, sobel);
//imshow("img_suppress", img_sobel);
DoubleThreshold(sobel, width, height, 30, 130, canny);
img_canny = Mat(Size(width, height), CV_8UC1, canny);
imshow("img_canny", img_canny);
}
else
{
/*read image*/
cout<<"gpu"<<endl;
camera >> img_src;
resize(img_src, img_src, Size(width, height), 0, 0);
cvtColor(img_src, img_gray, CV_BGR2GRAY);
/*load into gpu*/
if(CUDA_ERR_HANDLE(hipMemcpy(cuda_gray, img_gray.data, width * height * sizeof(unsigned char), hipMemcpyHostToDevice)))
{
cout<<"memcpy fail1"<<endl;
continue;
}
/*gauss filter*/
hipLaunchKernelGGL(( CUDA_Gauss), dim3(block_size), dim3(thread_size), 0, 0, cuda_gray, width, height, cuda_gauss_kernel, gauss_kernel_size, cuda_gauss);
/*sobel edge detection*/
hipLaunchKernelGGL(( CUDA_Sobel), dim3(block_size), dim3(thread_size), 0, 0, cuda_gauss, width, height, cuda_sobel_x, cuda_sobel_y, cuda_sobel);
/*none max suppress*/
hipLaunchKernelGGL(( CUDA_NoneMaxSuppress), dim3(block_size), dim3(thread_size), 0, 0, cuda_sobel, width, height, cuda_sobel_x, cuda_sobel_y, cuda_sobel);
hipDeviceSynchronize();
/*double threshold*/
hipLaunchKernelGGL(( CUDA_DoubleThreshold), dim3(block_size), dim3(thread_size), 0, 0, cuda_sobel, width, height, 30, 130, cuda_canny);
if(CUDA_ERR_HANDLE(hipMemcpy(canny, cuda_canny, width * height * sizeof(unsigned char), hipMemcpyDeviceToHost)))
{
cout<<"memcpy fail2"<<endl;
continue;
}
img_canny = Mat(Size(width, height), CV_8UC1, canny);
imshow("img_canny_gpu", img_canny);
}
if(waitKey(1) == 'q')
break;
//break;
}
hipFree(cuda_gray);
hipFree(cuda_gauss);
hipFree(cuda_gauss_kernel);
hipFree(cuda_sobel_x);
hipFree(cuda_sobel_y);
hipFree(cuda_sobel);
hipFree(cuda_canny);
delete[] gauss;
gauss = nullptr;
delete[] gauss_kernel;
gauss_kernel = nullptr;
delete[] sobel_x;
sobel_x = nullptr;
delete[] canny;
canny = nullptr;
delete[] sobel_y;
sobel_y = nullptr;
delete[] sobel;
sobel = nullptr;
return 0;
}
unsigned char GetPixelVal(unsigned char* img, int img_height, int img_width, int i, int j)
{
if(i >= img_height || i < 0)
return 0;
if(j >= img_width || j < 0)
return 0;
return *(img + i * img_width + j);
}
__device__ unsigned char CUDA_GetPixelVal(unsigned char* img, int img_height, int img_width, int i, int j)
{
if(i >= img_height || i < 0)
return 0;
else if(j >= img_width || j < 0)
return 0;
return *(img + i * img_width + j);
}
__global__ void CUDA_Gauss(unsigned char* img, int img_width, int img_height, float* kernel, int kernel_size, unsigned char* output)
{
int id = blockIdx.x * blockDim.x + threadIdx.x;
int i = id / img_width;
int j = id % img_width;
if(id < img_width * img_height)
{
int new_pixel_value = 0;
int half_kernel_size = kernel_size / 2;
for(int k = 0; k < kernel_size; k++)
{
for(int m = 0; m < kernel_size; m++)
{
new_pixel_value += (*(kernel + k * kernel_size + m)) * CUDA_GetPixelVal(img, img_height, img_width, i + k - half_kernel_size, j + m - half_kernel_size);
__syncthreads();
}
}
*(output + i * img_width + j) = new_pixel_value;
}
}
__global__ void CUDA_Sobel(unsigned char* img, int img_width, int img_height, short* sobel_x, short* sobel_y, unsigned char* output)
{
int id = blockIdx.x * blockDim.x + threadIdx.x;
int i = id / img_width;
int j = id % img_width;
if(id >= img_width * img_height)
return;
*(sobel_x + i * img_width + j) = CUDA_GetPixelVal(img, img_height, img_width, i-1, j-1) * (1) +
CUDA_GetPixelVal(img, img_height, img_width, i-1, j ) * (2) +
CUDA_GetPixelVal(img, img_height, img_width, i-1, j+1) * (1) +
CUDA_GetPixelVal(img, img_height, img_width, i+1, j-1) * (-1) +
CUDA_GetPixelVal(img, img_height, img_width, i+1, j ) * (-2) +
CUDA_GetPixelVal(img, img_height, img_width, i+1, j+1) * (-1);
*(sobel_y + i * img_width + j) = CUDA_GetPixelVal(img, img_height, img_width, i-1, j-1) * (-1) +
CUDA_GetPixelVal(img, img_height, img_width, i-1, j+1) * (1) +
CUDA_GetPixelVal(img, img_height, img_width, i , j-1) * (-2) +
CUDA_GetPixelVal(img, img_height, img_width, i , j+1) * (2) +
CUDA_GetPixelVal(img, img_height, img_width, i+1, j-1) * (-1) +
CUDA_GetPixelVal(img, img_height, img_width, i+1, j+1) * (1);
float val =sqrt(pow(*(sobel_x + i * img_width + j), 2) + pow(*(sobel_y + i * img_width + j), 2));
*(output + i * img_width + j) = val;
if(val > 255)
*(output + i * img_width + j) = 255;
else
*(output + i * img_width + j) = val;
}
void GenerateGaussKernel(int size, float sigma, float* kernel)
{
int center = size / 2;
float sum = 0.0f;
for(int i = 0; i < size; i++)
{
for(int j = 0; j < size; j++)
{
*(kernel + i * size + j) = (float)1 / (2 * 3.1415926 * sigma * sigma) * exp(-(pow(i - center, 2) + pow(j - center, 2)) / (2 * pow(sigma, 2)));
sum += *(kernel + i * size + j);
}
}
cout<<"gauss kenel : "<<endl;
for(int i = 0; i < size; i++)
{
for(int j = 0; j < size; j++)
{
*(kernel + i * size + j) /= sum;
cout<<*(kernel + i * size + j)<<" ";
}
cout<<endl;
}
cout<<endl;
}
void Gauss(unsigned char* img, int img_width, int img_height, float* kernel, int kernel_size, unsigned char* output)
{
for(int i = 0; i < img_height; i++)
{
for(int j = 0; j < img_width; j++)
{
int new_pixel_value = 0;
int half_kernel_size = kernel_size / 2;
for(int k = 0; k < kernel_size; k++)
{
for(int m = 0; m < kernel_size; m++)
{
new_pixel_value += GetPixelVal(img, img_height, img_width, i + k - half_kernel_size, j + m - half_kernel_size) * (*(kernel + k * kernel_size + m));
}
}
*(output + i * img_width + j) = new_pixel_value;
}
}
}
void Sobel(unsigned char* img, int img_width, int img_height, short* sobel_x, short* sobel_y, unsigned char* output)
{
float sobel_filter_x[9] = {1,2,1,0,0,0,-1,-2,-1};
float sobel_filter_y[9] = {-1,0,1,-2,0,2,-1,0,1};
for(int i = 0; i < img_height; i++)
{
for(int j = 0; j < img_width; j++)
{
*(sobel_x + i * img_width + j) = GetPixelVal(img, img_height, img_width, i-1, j-1) * sobel_filter_x[0] +
GetPixelVal(img, img_height, img_width, i-1, j ) * sobel_filter_x[1] +
GetPixelVal(img, img_height, img_width, i-1, j+1) * sobel_filter_x[2] +
GetPixelVal(img, img_height, img_width, i , j-1) * sobel_filter_x[3] +
GetPixelVal(img, img_height, img_width, i , j ) * sobel_filter_x[4] +
GetPixelVal(img, img_height, img_width, i , j+1) * sobel_filter_x[5] +
GetPixelVal(img, img_height, img_width, i+1, j-1) * sobel_filter_x[6] +
GetPixelVal(img, img_height, img_width, i+1, j ) * sobel_filter_x[7] +
GetPixelVal(img, img_height, img_width, i+1, j+1) * sobel_filter_x[8];
*(sobel_y + i * img_width + j) = GetPixelVal(img, img_height, img_width, i-1, j-1) * sobel_filter_y[0] +
GetPixelVal(img, img_height, img_width, i-1, j ) * sobel_filter_y[1] +
GetPixelVal(img, img_height, img_width, i-1, j+1) * sobel_filter_y[2] +
GetPixelVal(img, img_height, img_width, i , j-1) * sobel_filter_y[3] +
GetPixelVal(img, img_height, img_width, i , j ) * sobel_filter_y[4] +
GetPixelVal(img, img_height, img_width, i , j+1) * sobel_filter_y[5] +
GetPixelVal(img, img_height, img_width, i+1, j-1) * sobel_filter_y[6] +
GetPixelVal(img, img_height, img_width, i+1, j ) * sobel_filter_y[7] +
GetPixelVal(img, img_height, img_width, i+1, j+1) * sobel_filter_y[8];
float val =sqrt(pow(*(sobel_x + i * img_width + j), 2) + pow(*(sobel_y + i * img_width + j), 2));
if(val > 255)
*(output + i * img_width + j) = 255;
else
*(output + i * img_width + j) = val;
}
}
}
__global__ void CUDA_NoneMaxSuppress(unsigned char* sobel, int sobel_width, int sobel_height, short* sobel_x, short* sobel_y, unsigned char* output)
{
int id = blockIdx.x * blockDim.x + threadIdx.x;
int i = id / sobel_width;
int j = id % sobel_width;
if(id >= sobel_width * sobel_height)
return;
if(i == 0 || j == 0)
{
*(output + i * sobel_width + j) = 0;
}
else
{
short gx = *(sobel_x + i * sobel_width + j);
short gy = *(sobel_y + i * sobel_width + j);
short g1,g2,g3,g4;
float weight = 0.0f;
if(gx == 0 || gy == 0)
{
*(output + i * sobel_width + j) = 0;
}
else
{
if(abs(gx) < abs(gy) && gx * gy >= 0)
{
weight = (float)abs(gx) / abs(gy);
g1 = CUDA_GetPixelVal(sobel, sobel_height, sobel_width, i , j + 1);
g2 = CUDA_GetPixelVal(sobel, sobel_height, sobel_width, i - 1, j + 1);
g3 = CUDA_GetPixelVal(sobel, sobel_height, sobel_width, i , j - 1);
g4 = CUDA_GetPixelVal(sobel, sobel_height, sobel_width, i + 1, j - 1);
}
else if(abs(gx) >= abs(gy) && gx * gy > 0)
{
weight = (float)abs(gy) / abs(gx);
g1 = CUDA_GetPixelVal(sobel, sobel_height, sobel_width, i - 1, j );
g2 = CUDA_GetPixelVal(sobel, sobel_height, sobel_width, i - 1, j + 1);
g3 = CUDA_GetPixelVal(sobel, sobel_height, sobel_width, i + 1, j );
g4 = CUDA_GetPixelVal(sobel, sobel_height, sobel_width, i + 1, j - 1);
}
else if(abs(gx) > abs(gy) && gx * gy <= 0)
{
weight = (float)abs(gy) / abs(gx);
g1 = CUDA_GetPixelVal(sobel, sobel_height, sobel_width, i - 1, j );
g2 = CUDA_GetPixelVal(sobel, sobel_height, sobel_width, i - 1, j - 1);
g3 = CUDA_GetPixelVal(sobel, sobel_height, sobel_width, i + 1, j );
g4 = CUDA_GetPixelVal(sobel, sobel_height, sobel_width, i + 1, j + 1);
}
else if(abs(gx) <= abs(gy) && gx * gy < 0)
{
weight = (float)abs(gx) / abs(gy);
g1 = CUDA_GetPixelVal(sobel, sobel_height, sobel_width, i , j - 1);
g2 = CUDA_GetPixelVal(sobel, sobel_height, sobel_width, i - 1, j - 1);
g3 = CUDA_GetPixelVal(sobel, sobel_height, sobel_width, i , j + 1);
g4 = CUDA_GetPixelVal(sobel, sobel_height, sobel_width, i + 1, j + 1);
}
else
{
printf("invalid gradient\n");
}
int dot1 = g1 * (1 - weight) + g2 * weight;
int dot2 = g3 * (1 - weight) + g4 * weight;
if(*(sobel + i * sobel_width + j) > dot1 && *(sobel + i * sobel_width + j) > dot2)
*(output + i * sobel_width + j) = *(sobel + i * sobel_width + j);
else
*(output + i * sobel_width + j) = 0;
}
}
}
void NoneMaxSuppress(unsigned char* sobel, int sobel_width, int sobel_height, short* sobel_x, short* sobel_y, unsigned char* output)
{
for(int i = 0; i < sobel_height; i++)
{
for(int j = 0; j < sobel_width; j++)
{
if(i == 0 || j == 0)
{
*(output + i * sobel_width + j) = 0;
}
else
{
short gx = *(sobel_x + i * sobel_width + j);
short gy = *(sobel_y + i * sobel_width + j);
short g1,g2,g3,g4;
float weight = 0.0f;
if(gx == 0 || gy == 0)
{
*(output + i * sobel_width + j) = 0;
}
else
{
if(abs(gx) < abs(gy) && gx * gy >= 0)
{
weight = (float)abs(gx) / abs(gy);
g1 = GetPixelVal(sobel, sobel_height, sobel_width, i , j + 1);
g2 = GetPixelVal(sobel, sobel_height, sobel_width, i - 1, j + 1);
g3 = GetPixelVal(sobel, sobel_height, sobel_width, i , j - 1);
g4 = GetPixelVal(sobel, sobel_height, sobel_width, i + 1, j - 1);
}
else if(abs(gx) >= abs(gy) && gx * gy > 0)
{
weight = (float)abs(gy) / abs(gx);
g1 = GetPixelVal(sobel, sobel_height, sobel_width, i - 1, j );
g2 = GetPixelVal(sobel, sobel_height, sobel_width, i - 1, j + 1);
g3 = GetPixelVal(sobel, sobel_height, sobel_width, i + 1, j );
g4 = GetPixelVal(sobel, sobel_height, sobel_width, i + 1, j - 1);
}
else if(abs(gx) > abs(gy) && gx * gy <= 0)
{
weight = (float)abs(gy) / abs(gx);
g1 = GetPixelVal(sobel, sobel_height, sobel_width, i - 1, j );
g2 = GetPixelVal(sobel, sobel_height, sobel_width, i - 1, j - 1);
g3 = GetPixelVal(sobel, sobel_height, sobel_width, i + 1, j );
g4 = GetPixelVal(sobel, sobel_height, sobel_width, i + 1, j + 1);
}
else if(abs(gx) <= abs(gy) && gx * gy < 0)
{
weight = (float)abs(gx) / abs(gy);
g1 = GetPixelVal(sobel, sobel_height, sobel_width, i , j - 1);
g2 = GetPixelVal(sobel, sobel_height, sobel_width, i - 1, j - 1);
g3 = GetPixelVal(sobel, sobel_height, sobel_width, i , j + 1);
g4 = GetPixelVal(sobel, sobel_height, sobel_width, i + 1, j + 1);
}
else
{
cout<<"none"<<endl;
}
int dot1 = g1 * (1 - weight) + g2 * weight;
int dot2 = g3 * (1 - weight) + g4 * weight;
if(*(sobel + i * sobel_width + j) > dot1 && *(sobel + i * sobel_width + j) > dot2)
*(output + i * sobel_width + j) = *(sobel + i * sobel_width + j);
else
*(output + i * sobel_width + j) = 0;
}
}
}
}
}
__global__ void CUDA_DoubleThreshold(unsigned char* sobel, int sobel_width, int sobel_height, int min_val, int max_val, unsigned char* canny)
{
int id = blockIdx.x * blockDim.x + threadIdx.x;
int i = id / sobel_width;
int j = id % sobel_width;
if(id >= sobel_width * sobel_height)
return;
if(i == 0 || i == sobel_height - 1 || j == 0 || j == sobel_width - 1)
{
*(canny + i * sobel_width + j) = 255;
return;
}
if(*(sobel + i * sobel_width + j) > max_val)
{
*(canny + i * sobel_width + j) = 255;
}
else if(*(sobel + i * sobel_width + j) > min_val && *(sobel + i * sobel_width + j) < max_val)
{
if(CUDA_GetPixelVal(sobel, sobel_height, sobel_width, i - 1, j - 1) > max_val ||
CUDA_GetPixelVal(sobel, sobel_height, sobel_width, i - 1, j ) > max_val ||
CUDA_GetPixelVal(sobel, sobel_height, sobel_width, i - 1, j + 1) > max_val ||
CUDA_GetPixelVal(sobel, sobel_height, sobel_width, i , j - 1) > max_val ||
CUDA_GetPixelVal(sobel, sobel_height, sobel_width, i , j + 1) > max_val ||
CUDA_GetPixelVal(sobel, sobel_height, sobel_width, i + 1, j - 1) > max_val ||
CUDA_GetPixelVal(sobel, sobel_height, sobel_width, i + 1, j ) > max_val ||
CUDA_GetPixelVal(sobel, sobel_height, sobel_width, i + 1, j + 1) > max_val)
{
*(canny + i * sobel_width + j) = 255;
}
else
{
*(canny + i * sobel_width + j) = 0;
}
}
else
{
*(canny + i * sobel_width + j) = 0;
}
}
void DoubleThreshold(unsigned char* sobel, int sobel_width, int sobel_height, int min_val, int max_val, unsigned char* canny)
{
for(int i = 1; i < sobel_height - 1; i++)
{
for(int j = 1; j < sobel_width - 1; j++)
{
if(*(sobel + i * sobel_width + j) > max_val)
{
*(canny + i * sobel_width + j) = 255;
}
else if(*(sobel + i * sobel_width + j) > min_val && *(sobel + i * sobel_width + j) < max_val)
{
if(GetPixelVal(sobel, sobel_height, sobel_width, i - 1, j - 1) > max_val ||
GetPixelVal(sobel, sobel_height, sobel_width, i - 1, j ) > max_val ||
GetPixelVal(sobel, sobel_height, sobel_width, i - 1, j + 1) > max_val ||
GetPixelVal(sobel, sobel_height, sobel_width, i , j - 1) > max_val ||
GetPixelVal(sobel, sobel_height, sobel_width, i , j + 1) > max_val ||
GetPixelVal(sobel, sobel_height, sobel_width, i + 1, j - 1) > max_val ||
GetPixelVal(sobel, sobel_height, sobel_width, i + 1, j ) > max_val ||
GetPixelVal(sobel, sobel_height, sobel_width, i + 1, j + 1) > max_val)
{
*(canny + i * sobel_width + j) = 255;
}
else
{
*(canny + i * sobel_width + j) = 0;
}
}
else
{
*(canny + i * sobel_width + j) = 0;
}
}
}
}
|
b7a6211f32b47f4775f6315cc315dcd1ce827c46.cu
|
#include <stdio.h>
#include <iostream>
#include <math.h>
#include <opencv2/opencv.hpp>
using namespace cv;
using namespace std;
#define CUDA_ERR_HANDLE(err) (CudaErrorHandler(err, __FILE__, __LINE__))
int CudaErrorHandler(cudaError_t err, const char* file, int line)
{
if(err != cudaSuccess)
{
cout<<file<<" no."<<line<<" error : "<<cudaGetErrorString(err)<<endl;
return 1;
}
return 0;
}
void GenerateGaussKernel(int size, float sigma, float* kernel);
unsigned char GetPixelVal(unsigned char* img, int img_height, int img_width, int i, int j);
void Gauss(unsigned char* img, int img_width, int img_height, float* kernel, int kernel_size, unsigned char* output);
void Sobel(unsigned char* img, int img_width, int img_height, short* sobel_x, short* sobel_y, unsigned char* output);
void NoneMaxSuppress(unsigned char* sobel, int sobel_width, int sobel_height, short* sobel_x, short* sobel_y, unsigned char* output);
void DoubleThreshold(unsigned char* sobel, int sobel_width, int sobel_height, int min_val, int max_val, unsigned char* canny);
__device__ unsigned char CUDA_GetPixelVal(unsigned char* img, int img_height, int img_width, int i, int j);
__global__ void CUDA_Gauss(unsigned char* img, int img_width, int img_height, float* kernel, int kernel_size, unsigned char* output);
__global__ void CUDA_Sobel(unsigned char* img, int img_width, int img_height, short* sobel_x, short* sobel_y, unsigned char* output);
__global__ void CUDA_NoneMaxSuppress(unsigned char* sobel, int sobel_width, int sobel_height, short* sobel_x, short* sobel_y, unsigned char* output);
__global__ void CUDA_DoubleThreshold(unsigned char* sobel, int sobel_width, int sobel_height, int min_val, int max_val, unsigned char* canny);
int main(int argc, char** argv)
{
int cpu_gpu = 0;
if(argc >= 2 && strcmp(argv[1], "gpu") == 0)
{
cout<<"---canny acceleration[GPU]!---"<<endl;
cpu_gpu = 1;
}
else
{
cout<<"---canny acceleration[CPU]!---"<<endl;
}
int width = 640;
int height = 480;
int gauss_kernel_size = 3;
int thread_size = width;
int block_size = (width * height + thread_size - 1) / thread_size;
/*****cpu memory*****/
unsigned char* gauss = new unsigned char[width * height];
float* gauss_kernel = new float[gauss_kernel_size * gauss_kernel_size];
GenerateGaussKernel(gauss_kernel_size, 1, gauss_kernel);
short* sobel_x = new short[width * height];
short* sobel_y = new short[width * height];
unsigned char* sobel = new unsigned char[width * height];
unsigned char* canny = new unsigned char[width * height];
/*****gpu memory*****/
unsigned char* cuda_gray;
cudaMalloc(&cuda_gray, width * height * sizeof(unsigned char));
unsigned char* cuda_gauss;
cudaMalloc(&cuda_gauss, width * height * sizeof(unsigned char));
float* cuda_gauss_kernel;
cudaMalloc(&cuda_gauss_kernel, width * height * sizeof(float));
cudaMemcpy(cuda_gauss_kernel, gauss_kernel, gauss_kernel_size * gauss_kernel_size * sizeof(float), cudaMemcpyHostToDevice);
short* cuda_sobel_x;
cudaMalloc(&cuda_sobel_x, width * height * sizeof(short));
short* cuda_sobel_y;
cudaMalloc(&cuda_sobel_y, width * height * sizeof(short));
unsigned char* cuda_sobel;
cudaMalloc(&cuda_sobel, width * height * sizeof(unsigned char));
unsigned char* cuda_canny;
cudaMalloc(&cuda_canny, width * height * sizeof(unsigned char));
VideoCapture camera(0);
//VideoCapture camera;
//camera.open("/home/katsuto/Pictures/Wallpapers/video.MP4");
Mat img_src;
//img_src = imread("/home/katsuto/Pictures/Wallpapers/nvidia.jpg");
Mat img_gray, img_gauss, img_sobel, img_canny;
while(1)
{
if(cpu_gpu == 0)
{
camera >> img_src;
//img_src = imread("/home/katsuto/Pictures/Wallpapers/nvidia.jpg");
cvtColor(img_src, img_gray, CV_BGR2GRAY);
resize(img_gray, img_gray, Size(width, height), 0, 0);
imshow("img_gray", img_gray);
Gauss(img_gray.data, width, height, gauss_kernel, gauss_kernel_size, gauss);
//img_gauss = Mat(Size(width, height), CV_8UC1, gauss);
//imshow("img_gauss", img_gauss);
Sobel(gauss, width, height, sobel_x, sobel_y, sobel);
img_sobel = Mat(Size(width, height), CV_8UC1, sobel);
imshow("img_sobel", img_sobel);
NoneMaxSuppress(sobel, width, height, sobel_x, sobel_y, sobel);
//img_sobel = Mat(Size(width, height), CV_8UC1, sobel);
//imshow("img_suppress", img_sobel);
DoubleThreshold(sobel, width, height, 30, 130, canny);
img_canny = Mat(Size(width, height), CV_8UC1, canny);
imshow("img_canny", img_canny);
}
else
{
/*read image*/
cout<<"gpu"<<endl;
camera >> img_src;
resize(img_src, img_src, Size(width, height), 0, 0);
cvtColor(img_src, img_gray, CV_BGR2GRAY);
/*load into gpu*/
if(CUDA_ERR_HANDLE(cudaMemcpy(cuda_gray, img_gray.data, width * height * sizeof(unsigned char), cudaMemcpyHostToDevice)))
{
cout<<"memcpy fail1"<<endl;
continue;
}
/*gauss filter*/
CUDA_Gauss<<<block_size, thread_size>>>(cuda_gray, width, height, cuda_gauss_kernel, gauss_kernel_size, cuda_gauss);
/*sobel edge detection*/
CUDA_Sobel<<<block_size, thread_size>>>(cuda_gauss, width, height, cuda_sobel_x, cuda_sobel_y, cuda_sobel);
/*none max suppress*/
CUDA_NoneMaxSuppress<<<block_size, thread_size>>>(cuda_sobel, width, height, cuda_sobel_x, cuda_sobel_y, cuda_sobel);
cudaDeviceSynchronize();
/*double threshold*/
CUDA_DoubleThreshold<<<block_size, thread_size>>>(cuda_sobel, width, height, 30, 130, cuda_canny);
if(CUDA_ERR_HANDLE(cudaMemcpy(canny, cuda_canny, width * height * sizeof(unsigned char), cudaMemcpyDeviceToHost)))
{
cout<<"memcpy fail2"<<endl;
continue;
}
img_canny = Mat(Size(width, height), CV_8UC1, canny);
imshow("img_canny_gpu", img_canny);
}
if(waitKey(1) == 'q')
break;
//break;
}
cudaFree(cuda_gray);
cudaFree(cuda_gauss);
cudaFree(cuda_gauss_kernel);
cudaFree(cuda_sobel_x);
cudaFree(cuda_sobel_y);
cudaFree(cuda_sobel);
cudaFree(cuda_canny);
delete[] gauss;
gauss = nullptr;
delete[] gauss_kernel;
gauss_kernel = nullptr;
delete[] sobel_x;
sobel_x = nullptr;
delete[] canny;
canny = nullptr;
delete[] sobel_y;
sobel_y = nullptr;
delete[] sobel;
sobel = nullptr;
return 0;
}
unsigned char GetPixelVal(unsigned char* img, int img_height, int img_width, int i, int j)
{
if(i >= img_height || i < 0)
return 0;
if(j >= img_width || j < 0)
return 0;
return *(img + i * img_width + j);
}
__device__ unsigned char CUDA_GetPixelVal(unsigned char* img, int img_height, int img_width, int i, int j)
{
if(i >= img_height || i < 0)
return 0;
else if(j >= img_width || j < 0)
return 0;
return *(img + i * img_width + j);
}
__global__ void CUDA_Gauss(unsigned char* img, int img_width, int img_height, float* kernel, int kernel_size, unsigned char* output)
{
int id = blockIdx.x * blockDim.x + threadIdx.x;
int i = id / img_width;
int j = id % img_width;
if(id < img_width * img_height)
{
int new_pixel_value = 0;
int half_kernel_size = kernel_size / 2;
for(int k = 0; k < kernel_size; k++)
{
for(int m = 0; m < kernel_size; m++)
{
new_pixel_value += (*(kernel + k * kernel_size + m)) * CUDA_GetPixelVal(img, img_height, img_width, i + k - half_kernel_size, j + m - half_kernel_size);
__syncthreads();
}
}
*(output + i * img_width + j) = new_pixel_value;
}
}
__global__ void CUDA_Sobel(unsigned char* img, int img_width, int img_height, short* sobel_x, short* sobel_y, unsigned char* output)
{
int id = blockIdx.x * blockDim.x + threadIdx.x;
int i = id / img_width;
int j = id % img_width;
if(id >= img_width * img_height)
return;
*(sobel_x + i * img_width + j) = CUDA_GetPixelVal(img, img_height, img_width, i-1, j-1) * (1) +
CUDA_GetPixelVal(img, img_height, img_width, i-1, j ) * (2) +
CUDA_GetPixelVal(img, img_height, img_width, i-1, j+1) * (1) +
CUDA_GetPixelVal(img, img_height, img_width, i+1, j-1) * (-1) +
CUDA_GetPixelVal(img, img_height, img_width, i+1, j ) * (-2) +
CUDA_GetPixelVal(img, img_height, img_width, i+1, j+1) * (-1);
*(sobel_y + i * img_width + j) = CUDA_GetPixelVal(img, img_height, img_width, i-1, j-1) * (-1) +
CUDA_GetPixelVal(img, img_height, img_width, i-1, j+1) * (1) +
CUDA_GetPixelVal(img, img_height, img_width, i , j-1) * (-2) +
CUDA_GetPixelVal(img, img_height, img_width, i , j+1) * (2) +
CUDA_GetPixelVal(img, img_height, img_width, i+1, j-1) * (-1) +
CUDA_GetPixelVal(img, img_height, img_width, i+1, j+1) * (1);
float val =sqrt(pow(*(sobel_x + i * img_width + j), 2) + pow(*(sobel_y + i * img_width + j), 2));
*(output + i * img_width + j) = val;
if(val > 255)
*(output + i * img_width + j) = 255;
else
*(output + i * img_width + j) = val;
}
void GenerateGaussKernel(int size, float sigma, float* kernel)
{
int center = size / 2;
float sum = 0.0f;
for(int i = 0; i < size; i++)
{
for(int j = 0; j < size; j++)
{
*(kernel + i * size + j) = (float)1 / (2 * 3.1415926 * sigma * sigma) * exp(-(pow(i - center, 2) + pow(j - center, 2)) / (2 * pow(sigma, 2)));
sum += *(kernel + i * size + j);
}
}
cout<<"gauss kenel : "<<endl;
for(int i = 0; i < size; i++)
{
for(int j = 0; j < size; j++)
{
*(kernel + i * size + j) /= sum;
cout<<*(kernel + i * size + j)<<" ";
}
cout<<endl;
}
cout<<endl;
}
void Gauss(unsigned char* img, int img_width, int img_height, float* kernel, int kernel_size, unsigned char* output)
{
for(int i = 0; i < img_height; i++)
{
for(int j = 0; j < img_width; j++)
{
int new_pixel_value = 0;
int half_kernel_size = kernel_size / 2;
for(int k = 0; k < kernel_size; k++)
{
for(int m = 0; m < kernel_size; m++)
{
new_pixel_value += GetPixelVal(img, img_height, img_width, i + k - half_kernel_size, j + m - half_kernel_size) * (*(kernel + k * kernel_size + m));
}
}
*(output + i * img_width + j) = new_pixel_value;
}
}
}
void Sobel(unsigned char* img, int img_width, int img_height, short* sobel_x, short* sobel_y, unsigned char* output)
{
float sobel_filter_x[9] = {1,2,1,0,0,0,-1,-2,-1};
float sobel_filter_y[9] = {-1,0,1,-2,0,2,-1,0,1};
for(int i = 0; i < img_height; i++)
{
for(int j = 0; j < img_width; j++)
{
*(sobel_x + i * img_width + j) = GetPixelVal(img, img_height, img_width, i-1, j-1) * sobel_filter_x[0] +
GetPixelVal(img, img_height, img_width, i-1, j ) * sobel_filter_x[1] +
GetPixelVal(img, img_height, img_width, i-1, j+1) * sobel_filter_x[2] +
GetPixelVal(img, img_height, img_width, i , j-1) * sobel_filter_x[3] +
GetPixelVal(img, img_height, img_width, i , j ) * sobel_filter_x[4] +
GetPixelVal(img, img_height, img_width, i , j+1) * sobel_filter_x[5] +
GetPixelVal(img, img_height, img_width, i+1, j-1) * sobel_filter_x[6] +
GetPixelVal(img, img_height, img_width, i+1, j ) * sobel_filter_x[7] +
GetPixelVal(img, img_height, img_width, i+1, j+1) * sobel_filter_x[8];
*(sobel_y + i * img_width + j) = GetPixelVal(img, img_height, img_width, i-1, j-1) * sobel_filter_y[0] +
GetPixelVal(img, img_height, img_width, i-1, j ) * sobel_filter_y[1] +
GetPixelVal(img, img_height, img_width, i-1, j+1) * sobel_filter_y[2] +
GetPixelVal(img, img_height, img_width, i , j-1) * sobel_filter_y[3] +
GetPixelVal(img, img_height, img_width, i , j ) * sobel_filter_y[4] +
GetPixelVal(img, img_height, img_width, i , j+1) * sobel_filter_y[5] +
GetPixelVal(img, img_height, img_width, i+1, j-1) * sobel_filter_y[6] +
GetPixelVal(img, img_height, img_width, i+1, j ) * sobel_filter_y[7] +
GetPixelVal(img, img_height, img_width, i+1, j+1) * sobel_filter_y[8];
float val =sqrt(pow(*(sobel_x + i * img_width + j), 2) + pow(*(sobel_y + i * img_width + j), 2));
if(val > 255)
*(output + i * img_width + j) = 255;
else
*(output + i * img_width + j) = val;
}
}
}
__global__ void CUDA_NoneMaxSuppress(unsigned char* sobel, int sobel_width, int sobel_height, short* sobel_x, short* sobel_y, unsigned char* output)
{
int id = blockIdx.x * blockDim.x + threadIdx.x;
int i = id / sobel_width;
int j = id % sobel_width;
if(id >= sobel_width * sobel_height)
return;
if(i == 0 || j == 0)
{
*(output + i * sobel_width + j) = 0;
}
else
{
short gx = *(sobel_x + i * sobel_width + j);
short gy = *(sobel_y + i * sobel_width + j);
short g1,g2,g3,g4;
float weight = 0.0f;
if(gx == 0 || gy == 0)
{
*(output + i * sobel_width + j) = 0;
}
else
{
if(abs(gx) < abs(gy) && gx * gy >= 0)
{
weight = (float)abs(gx) / abs(gy);
g1 = CUDA_GetPixelVal(sobel, sobel_height, sobel_width, i , j + 1);
g2 = CUDA_GetPixelVal(sobel, sobel_height, sobel_width, i - 1, j + 1);
g3 = CUDA_GetPixelVal(sobel, sobel_height, sobel_width, i , j - 1);
g4 = CUDA_GetPixelVal(sobel, sobel_height, sobel_width, i + 1, j - 1);
}
else if(abs(gx) >= abs(gy) && gx * gy > 0)
{
weight = (float)abs(gy) / abs(gx);
g1 = CUDA_GetPixelVal(sobel, sobel_height, sobel_width, i - 1, j );
g2 = CUDA_GetPixelVal(sobel, sobel_height, sobel_width, i - 1, j + 1);
g3 = CUDA_GetPixelVal(sobel, sobel_height, sobel_width, i + 1, j );
g4 = CUDA_GetPixelVal(sobel, sobel_height, sobel_width, i + 1, j - 1);
}
else if(abs(gx) > abs(gy) && gx * gy <= 0)
{
weight = (float)abs(gy) / abs(gx);
g1 = CUDA_GetPixelVal(sobel, sobel_height, sobel_width, i - 1, j );
g2 = CUDA_GetPixelVal(sobel, sobel_height, sobel_width, i - 1, j - 1);
g3 = CUDA_GetPixelVal(sobel, sobel_height, sobel_width, i + 1, j );
g4 = CUDA_GetPixelVal(sobel, sobel_height, sobel_width, i + 1, j + 1);
}
else if(abs(gx) <= abs(gy) && gx * gy < 0)
{
weight = (float)abs(gx) / abs(gy);
g1 = CUDA_GetPixelVal(sobel, sobel_height, sobel_width, i , j - 1);
g2 = CUDA_GetPixelVal(sobel, sobel_height, sobel_width, i - 1, j - 1);
g3 = CUDA_GetPixelVal(sobel, sobel_height, sobel_width, i , j + 1);
g4 = CUDA_GetPixelVal(sobel, sobel_height, sobel_width, i + 1, j + 1);
}
else
{
printf("invalid gradient\n");
}
int dot1 = g1 * (1 - weight) + g2 * weight;
int dot2 = g3 * (1 - weight) + g4 * weight;
if(*(sobel + i * sobel_width + j) > dot1 && *(sobel + i * sobel_width + j) > dot2)
*(output + i * sobel_width + j) = *(sobel + i * sobel_width + j);
else
*(output + i * sobel_width + j) = 0;
}
}
}
void NoneMaxSuppress(unsigned char* sobel, int sobel_width, int sobel_height, short* sobel_x, short* sobel_y, unsigned char* output)
{
for(int i = 0; i < sobel_height; i++)
{
for(int j = 0; j < sobel_width; j++)
{
if(i == 0 || j == 0)
{
*(output + i * sobel_width + j) = 0;
}
else
{
short gx = *(sobel_x + i * sobel_width + j);
short gy = *(sobel_y + i * sobel_width + j);
short g1,g2,g3,g4;
float weight = 0.0f;
if(gx == 0 || gy == 0)
{
*(output + i * sobel_width + j) = 0;
}
else
{
if(abs(gx) < abs(gy) && gx * gy >= 0)
{
weight = (float)abs(gx) / abs(gy);
g1 = GetPixelVal(sobel, sobel_height, sobel_width, i , j + 1);
g2 = GetPixelVal(sobel, sobel_height, sobel_width, i - 1, j + 1);
g3 = GetPixelVal(sobel, sobel_height, sobel_width, i , j - 1);
g4 = GetPixelVal(sobel, sobel_height, sobel_width, i + 1, j - 1);
}
else if(abs(gx) >= abs(gy) && gx * gy > 0)
{
weight = (float)abs(gy) / abs(gx);
g1 = GetPixelVal(sobel, sobel_height, sobel_width, i - 1, j );
g2 = GetPixelVal(sobel, sobel_height, sobel_width, i - 1, j + 1);
g3 = GetPixelVal(sobel, sobel_height, sobel_width, i + 1, j );
g4 = GetPixelVal(sobel, sobel_height, sobel_width, i + 1, j - 1);
}
else if(abs(gx) > abs(gy) && gx * gy <= 0)
{
weight = (float)abs(gy) / abs(gx);
g1 = GetPixelVal(sobel, sobel_height, sobel_width, i - 1, j );
g2 = GetPixelVal(sobel, sobel_height, sobel_width, i - 1, j - 1);
g3 = GetPixelVal(sobel, sobel_height, sobel_width, i + 1, j );
g4 = GetPixelVal(sobel, sobel_height, sobel_width, i + 1, j + 1);
}
else if(abs(gx) <= abs(gy) && gx * gy < 0)
{
weight = (float)abs(gx) / abs(gy);
g1 = GetPixelVal(sobel, sobel_height, sobel_width, i , j - 1);
g2 = GetPixelVal(sobel, sobel_height, sobel_width, i - 1, j - 1);
g3 = GetPixelVal(sobel, sobel_height, sobel_width, i , j + 1);
g4 = GetPixelVal(sobel, sobel_height, sobel_width, i + 1, j + 1);
}
else
{
cout<<"none"<<endl;
}
int dot1 = g1 * (1 - weight) + g2 * weight;
int dot2 = g3 * (1 - weight) + g4 * weight;
if(*(sobel + i * sobel_width + j) > dot1 && *(sobel + i * sobel_width + j) > dot2)
*(output + i * sobel_width + j) = *(sobel + i * sobel_width + j);
else
*(output + i * sobel_width + j) = 0;
}
}
}
}
}
__global__ void CUDA_DoubleThreshold(unsigned char* sobel, int sobel_width, int sobel_height, int min_val, int max_val, unsigned char* canny)
{
int id = blockIdx.x * blockDim.x + threadIdx.x;
int i = id / sobel_width;
int j = id % sobel_width;
if(id >= sobel_width * sobel_height)
return;
if(i == 0 || i == sobel_height - 1 || j == 0 || j == sobel_width - 1)
{
*(canny + i * sobel_width + j) = 255;
return;
}
if(*(sobel + i * sobel_width + j) > max_val)
{
*(canny + i * sobel_width + j) = 255;
}
else if(*(sobel + i * sobel_width + j) > min_val && *(sobel + i * sobel_width + j) < max_val)
{
if(CUDA_GetPixelVal(sobel, sobel_height, sobel_width, i - 1, j - 1) > max_val ||
CUDA_GetPixelVal(sobel, sobel_height, sobel_width, i - 1, j ) > max_val ||
CUDA_GetPixelVal(sobel, sobel_height, sobel_width, i - 1, j + 1) > max_val ||
CUDA_GetPixelVal(sobel, sobel_height, sobel_width, i , j - 1) > max_val ||
CUDA_GetPixelVal(sobel, sobel_height, sobel_width, i , j + 1) > max_val ||
CUDA_GetPixelVal(sobel, sobel_height, sobel_width, i + 1, j - 1) > max_val ||
CUDA_GetPixelVal(sobel, sobel_height, sobel_width, i + 1, j ) > max_val ||
CUDA_GetPixelVal(sobel, sobel_height, sobel_width, i + 1, j + 1) > max_val)
{
*(canny + i * sobel_width + j) = 255;
}
else
{
*(canny + i * sobel_width + j) = 0;
}
}
else
{
*(canny + i * sobel_width + j) = 0;
}
}
void DoubleThreshold(unsigned char* sobel, int sobel_width, int sobel_height, int min_val, int max_val, unsigned char* canny)
{
for(int i = 1; i < sobel_height - 1; i++)
{
for(int j = 1; j < sobel_width - 1; j++)
{
if(*(sobel + i * sobel_width + j) > max_val)
{
*(canny + i * sobel_width + j) = 255;
}
else if(*(sobel + i * sobel_width + j) > min_val && *(sobel + i * sobel_width + j) < max_val)
{
if(GetPixelVal(sobel, sobel_height, sobel_width, i - 1, j - 1) > max_val ||
GetPixelVal(sobel, sobel_height, sobel_width, i - 1, j ) > max_val ||
GetPixelVal(sobel, sobel_height, sobel_width, i - 1, j + 1) > max_val ||
GetPixelVal(sobel, sobel_height, sobel_width, i , j - 1) > max_val ||
GetPixelVal(sobel, sobel_height, sobel_width, i , j + 1) > max_val ||
GetPixelVal(sobel, sobel_height, sobel_width, i + 1, j - 1) > max_val ||
GetPixelVal(sobel, sobel_height, sobel_width, i + 1, j ) > max_val ||
GetPixelVal(sobel, sobel_height, sobel_width, i + 1, j + 1) > max_val)
{
*(canny + i * sobel_width + j) = 255;
}
else
{
*(canny + i * sobel_width + j) = 0;
}
}
else
{
*(canny + i * sobel_width + j) = 0;
}
}
}
}
|
7a55e6d0fca65e37b5d772f3ef1bcdd4b12d7525.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "reservoir_sampling.cuh"
int const threadsPerBlock = 512;
__global__ void generate_samples(
int64_t *samples,
int k,
hiprandStateMtgp32_t *state
){
int thread_id = blockIdx.x * blockDim.x + threadIdx.x;
samples[thread_id] = hiprand(state) % (thread_id + k + 1);
}
template <typename scalar_t>
__global__ void generate_keys(
scalar_t *keys,
scalar_t *weights,
hiprandStateMtgp32_t *state
){
int thread_id = blockIdx.x * blockDim.x + threadIdx.x;
float u = hiprand_uniform(state);
keys[thread_id] = (scalar_t) __powf(u, (float) 1/weights[thread_id]);
}
__global__ void generate_reservoir(
int64_t *indices,
int64_t *samples,
int nb_iterations,
int k
){
for(int i = 0; i < nb_iterations; i++){
int64_t z = samples[i];
if (z < k) {
thrust::swap(indices[z], indices[i + k]);
}
}
}
at::Tensor reservoir_sampling_cuda(
at::Tensor& x,
at::Tensor &weights,
int k
){
if (!x.is_contiguous()){
x = x.contiguous();
}
int n = x.numel();
auto options = x.options().dtype(torch::kLong);
dim3 threads(threadsPerBlock);
THCState *state = at::globalContext().getTHCState();
THCRandom_seed(state);
THCGenerator *generator = THCRandom_getGenerator(state);
if (weights.numel() == 0){
at::Tensor indices_n = torch::arange({n}, options);
int split, begin, end;
if(2 * k < n){
split = n - k;
begin = n - k;
end = n;
} else {
split = k;
begin = 0;
end = k;
}
int nb_iterations = ::min(k, n - k);
dim3 blocks((nb_iterations + threadsPerBlock - 1)/threadsPerBlock);
at::Tensor samples = torch::arange({nb_iterations}, options);
hipLaunchKernelGGL(( generate_samples), dim3(blocks), dim3(threads), 0, 0,
samples.data<int64_t>(),
split,
generator->state.gen_states
);
hipLaunchKernelGGL(( generate_reservoir), dim3(1), dim3(1), 0, 0,
indices_n.data<int64_t>(),
samples.data<int64_t>(),
nb_iterations,
split
);
return x.index_select(
0,
indices_n.index_select(
0,
torch::arange(begin, end, options)
)
);
} else {
at::Tensor keys = torch::empty({n}, weights.options());
dim3 all_blocks((n + threadsPerBlock - 1)/threadsPerBlock);
AT_DISPATCH_FLOATING_TYPES(weights.type(), "generate keys", [&] {
hipLaunchKernelGGL(( generate_keys<scalar_t>), dim3(all_blocks), dim3(threads), 0, 0,
keys.data<scalar_t>(),
weights.data<scalar_t>(),
generator->state.gen_states
);
});
return x.index_select(0, std::get<1>(keys.topk(k)));
}
}
|
7a55e6d0fca65e37b5d772f3ef1bcdd4b12d7525.cu
|
#include "reservoir_sampling.cuh"
int const threadsPerBlock = 512;
__global__ void generate_samples(
int64_t *samples,
int k,
curandStateMtgp32 *state
){
int thread_id = blockIdx.x * blockDim.x + threadIdx.x;
samples[thread_id] = curand(state) % (thread_id + k + 1);
}
template <typename scalar_t>
__global__ void generate_keys(
scalar_t *keys,
scalar_t *weights,
curandStateMtgp32 *state
){
int thread_id = blockIdx.x * blockDim.x + threadIdx.x;
float u = curand_uniform(state);
keys[thread_id] = (scalar_t) __powf(u, (float) 1/weights[thread_id]);
}
__global__ void generate_reservoir(
int64_t *indices,
int64_t *samples,
int nb_iterations,
int k
){
for(int i = 0; i < nb_iterations; i++){
int64_t z = samples[i];
if (z < k) {
thrust::swap(indices[z], indices[i + k]);
}
}
}
at::Tensor reservoir_sampling_cuda(
at::Tensor& x,
at::Tensor &weights,
int k
){
if (!x.is_contiguous()){
x = x.contiguous();
}
int n = x.numel();
auto options = x.options().dtype(torch::kLong);
dim3 threads(threadsPerBlock);
THCState *state = at::globalContext().getTHCState();
THCRandom_seed(state);
THCGenerator *generator = THCRandom_getGenerator(state);
if (weights.numel() == 0){
at::Tensor indices_n = torch::arange({n}, options);
int split, begin, end;
if(2 * k < n){
split = n - k;
begin = n - k;
end = n;
} else {
split = k;
begin = 0;
end = k;
}
int nb_iterations = std::min(k, n - k);
dim3 blocks((nb_iterations + threadsPerBlock - 1)/threadsPerBlock);
at::Tensor samples = torch::arange({nb_iterations}, options);
generate_samples<<<blocks, threads>>>(
samples.data<int64_t>(),
split,
generator->state.gen_states
);
generate_reservoir<<<1, 1>>>(
indices_n.data<int64_t>(),
samples.data<int64_t>(),
nb_iterations,
split
);
return x.index_select(
0,
indices_n.index_select(
0,
torch::arange(begin, end, options)
)
);
} else {
at::Tensor keys = torch::empty({n}, weights.options());
dim3 all_blocks((n + threadsPerBlock - 1)/threadsPerBlock);
AT_DISPATCH_FLOATING_TYPES(weights.type(), "generate keys", [&] {
generate_keys<scalar_t><<<all_blocks, threads>>>(
keys.data<scalar_t>(),
weights.data<scalar_t>(),
generator->state.gen_states
);
});
return x.index_select(0, std::get<1>(keys.topk(k)));
}
}
|
7fb3ebf3dbee321b25f8a19f449585b063d0b7dc.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2011, Alex Krizhevsky ([email protected])
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* - Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* - Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
* EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <cudaconv2.cuh>
/*
* Block size: 16x16.
* blockIdx.x determines case in batches of 16*imgsPerThread.
* blockIdx.y determines 4x4 image region in target image.
*
* threadIdx.x determines case.
* threadIdx.y determines pixel.
*
* hidActs: (numFilters, numModules, numImages)
* filters: (numColors, filterPixels, numFilters) if conv
* (numModules, numColors, filterPixels, numFilters) otherwise
* targets: (numColors, imgPixels, numImages)
*
* Each block reconstructs one 4x4 pixels from 16*imgsPerThread cases.
*
* Number of filters must be divisible by 16.
* Number of images must be divisible by 16*imgsPerThread if checkCaseBounds is false.
* 16 * imgsPerThread must be divisible by 32.
*
* This version loads 32 cases at a time, so it gets full coalescing on that load.
* It only loads 16 weights at a time, so those aren't fully coalesced.
* This version conserves shared memory by loading 16 filters at a time rather than 32.
*/
template <int imgsPerThread, int numColors, bool scale, bool checkCaseBounds, bool conv>
__global__ void img_acts_color(const float* hidActs, const float* filters, float* targets,
const int numModulesX, const int numImages, const int numFilters,
const int filterSize, const int imgSize, const int paddingStart, const int moduleStride,
const float scaleTargets, const float scaleOutputs) {
__shared__ float shFilters[numColors*16][16 + 1];
__shared__ float shHidActs[16][16*imgsPerThread];
const int blockCaseIdx = blockIdx.x * 16*imgsPerThread;
const int numRegionsX = DIVUP(imgSize, 4);
const int blockRegionIdx = blockIdx.y;
const int blockRegionIdxX = blockRegionIdx % numRegionsX;
const int blockRegionIdxY = blockRegionIdx / numRegionsX;
const int blockRegionLeft = blockRegionIdxX * 4;
const int blockRegionTop = blockRegionIdxY * 4;
const int pxYInRegion = threadIdx.y / 4, pxXInRegion = threadIdx.y % 4;
const int pxY = blockRegionTop + pxYInRegion;
const int pxX = blockRegionLeft + pxXInRegion;
const int pxIdx = pxY * imgSize + pxX;
const bool isPxInImg = pxY < imgSize && pxX < imgSize;
const uint numModules = numModulesX * numModulesX;
const int filterPixels = filterSize * filterSize;
const int imgPixels = imgSize * imgSize;
const int tidx = threadIdx.y * 16 + threadIdx.x;
const int loadY = tidx / 32, loadX = tidx % 32;
hidActs += blockCaseIdx + loadY * numImages * numModulesX * numModulesX + loadX;
filters += threadIdx.x;
targets += pxIdx * numImages + blockCaseIdx + threadIdx.x;
float prod[numColors][imgsPerThread];
#pragma unroll
for (int c = 0; c < numColors; c++) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
prod[c][i] = 0;
}
}
const int startY = blockRegionTop - paddingStart < filterSize ? 0
: 1 + (blockRegionTop - paddingStart - filterSize) / moduleStride;
const int endY = MIN(numModulesX, 1 + (blockRegionTop + 3 - paddingStart) / moduleStride);
const int startX = blockRegionLeft - paddingStart < filterSize ? 0
: 1 + (blockRegionLeft - paddingStart - filterSize) / moduleStride;
const int endX = MIN(numModulesX, 1 + (blockRegionLeft + 3 - paddingStart) / moduleStride);
float* shilterLoad = &shFilters[threadIdx.y][threadIdx.x];
float* shHidActLoad = &shHidActs[loadY][loadX];
for (int my = startY; my < endY; my++) {
const int moduleTop = paddingStart + my * moduleStride;
const int pxInModuleY = pxY - moduleTop;
for (int mx = startX; mx < endX; mx++) {
const int moduleIdx = my * numModulesX + mx;
const int moduleLeft = paddingStart + mx * moduleStride;
const int pxInModuleX = pxX - moduleLeft;
const bool isPxInModule = pxInModuleY >= 0 && pxInModuleY < filterSize && pxInModuleX >= 0 && pxInModuleX < filterSize;
const int pxIdxInModule = pxInModuleY * filterSize + pxInModuleX;
for (int f = 0; f < numFilters; f += 16) { // multiply with 16 filters at a time
// Now the threads split up into half-warps, and each half-warp decides if it's interested.
const float* hLoad = &hidActs[(moduleIdx + f * numModules) * numImages];
#pragma unroll
for (int i = 0; i < imgsPerThread * 16; i += 32) {
if (!checkCaseBounds || blockCaseIdx + i + loadX < numImages) {
#pragma unroll
for (int j = 0; j < 16; j += 8) { // load 16 rows of imgsPerThread*16 cols, 8 * 32 elements at a time.
shHidActLoad[j * 16 * imgsPerThread + i] = hLoad[j * numModules * numImages + i];
}
} else {
#pragma unroll
for (int j = 0; j < 16; j += 8) { // load 16 rows of imgsPerThread*16 cols, 8 * 32 elements at a time.
shHidActLoad[j * 16 * imgsPerThread + i] = 0;
}
}
}
if (isPxInImg && isPxInModule) {
// This half-warp is interested, so it's going to load the weights from this module to its pixel.
// Not fully coalesced read :(
// But taking out this read entirely only reduces the runtime by ~2.8%, so it isn't costing me much.
const float* fLoad = conv ? &filters[pxIdxInModule * numFilters + f]
: &filters[(moduleIdx * numColors * filterPixels + pxIdxInModule) * numFilters + f];
#pragma unroll
for (int c = 0; c < numColors; c++) {
shilterLoad[c * 16 * (16 + 1)] = fLoad[c * filterPixels * numFilters];
}
}
__syncthreads();
// Do some actual computation
if (isPxInImg && isPxInModule) {
#pragma unroll
for (int c = 0; c < numColors; c++) {
#pragma unroll
for (int w = 0; w < 16; w++) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
prod[c][i] += shFilters[threadIdx.y + c * 16][w] * shHidActs[w][threadIdx.x + i * 16];
}
}
}
}
__syncthreads();
}
}
}
// Not fully coalesced write :(... shmem (and fully coalesced) version is actually slightly slower, though
if (isPxInImg) {
if (scale) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || blockCaseIdx + threadIdx.x + i * 16 < numImages) {
#pragma unroll
for (int c = 0; c < numColors; c++) {
targets[c * imgPixels * numImages + i * 16] = scaleTargets * targets[c * imgPixels * numImages + i * 16] + scaleOutputs * prod[c][i];
}
}
}
} else {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || blockCaseIdx + threadIdx.x + i * 16 < numImages) {
#pragma unroll
for (int c = 0; c < numColors; c++) {
targets[c * imgPixels * numImages + i * 16] = scaleOutputs * prod[c][i];
}
}
}
}
}
}
/*
* Block size: 16x16.
* blockIdx.x determines case in batches of 16*imgsPerThread, also color in batches of colorsPerThread.
* In essence, blockIdx.x.x = 1..numImages/(16*imgsPerThread)
* blockIdx.x.y = 1..numImgColors/colorsPerThread
* blockIdx.y determines 4x4 image region in target image.
*
* threadIdx.x determines case.
* threadIdx.y determines pixel.
*
* hidActs: (numFilters, numModules, numImages)
* filters: (numFilterColors, filterPixels, numFilters) if conv
* (numModules, numFilterColors, filterPixels, numFilters) otherwise
* targets: (numImageColors, imgPixels, numImages)
*
* Each block reconstructs one 4x4 pixels from 16*imgsPerThread cases.
*
* numImages must be divisible by 16*imgsPerThread if checkCaseBounds is false.
* 16 * imgsPerThread must be divisible by 32.
* numImageColors/numGroups must be divisible by colorsPerThread.
*
* This version loads 32 cases at a time, so it gets full coalescing on that load.
* It only loads 16 weights at a time, so those aren't fully coalesced.
* This version conserves shared memory by loading 16 filters at a time rather than 32.
*
* To be used when there are 4-16 color channels.
*/
template <int imgsPerThread, int colorsPerThread, bool scale, bool checkCaseBounds, bool conv>
__global__ void img_acts_mediumcolor(const float* hidActs, const float* filters, float* targets,
const int numModulesX, const int numImages, const int numFilters,
const int filterSize, const int imgSize, const int paddingStart, const int moduleStride,
const int numImgColors, const int numGroups,
const float scaleTargets, const float scaleOutputs) {
__shared__ float shFilters[colorsPerThread*16][16 + 1];
__shared__ float shHidActs[16][16*imgsPerThread];
const int numImgBlocks = DIVUP(numImages,16*imgsPerThread);
const int blockCaseIdx = (blockIdx.x % numImgBlocks) * 16*imgsPerThread;
const int imgColorIdx = (blockIdx.x / numImgBlocks) * colorsPerThread; // color idx globally
const int numFilterColors = numImgColors / numGroups;
const int blockGroupIdx = imgColorIdx / numFilterColors;
const int filterColorIdx = imgColorIdx % numFilterColors; // color idx within group
const int numFiltersPerGroup = numFilters / numGroups;
const int blockFilterIdx = blockGroupIdx * numFiltersPerGroup;
const int numRegionsX = DIVUP(imgSize, 4);
const int blockRegionIdx = blockIdx.y;
const int blockRegionIdxX = blockRegionIdx % numRegionsX;
const int blockRegionIdxY = blockRegionIdx / numRegionsX;
const int blockRegionLeft = blockRegionIdxX * 4;
const int blockRegionTop = blockRegionIdxY * 4;
const int pxYInRegion = threadIdx.y / 4, pxXInRegion = threadIdx.y % 4;
const int pxY = blockRegionTop + pxYInRegion;
const int pxX = blockRegionLeft + pxXInRegion;
const int pxIdx = pxY * imgSize + pxX;
const bool isPxInImg = pxY < imgSize && pxX < imgSize;
// const uint numModules = numModulesX * numModulesX;
const int filterPixels = filterSize * filterSize;
const int imgPixels = imgSize * imgSize;
const int tidx = threadIdx.y * 16 + threadIdx.x;
const int loadY = tidx / 32, loadX = tidx % 32;
hidActs += blockCaseIdx + (blockFilterIdx + loadY) * numImages * numModulesX * numModulesX + loadX;
filters += blockFilterIdx + filterColorIdx * filterPixels * numFilters + threadIdx.x;
targets += imgColorIdx * imgPixels * numImages + pxIdx * numImages + blockCaseIdx + threadIdx.x;
float prod[colorsPerThread][imgsPerThread];
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
prod[c][i] = 0;
}
}
const int startY = blockRegionTop - paddingStart < filterSize ? 0
: 1 + (blockRegionTop - paddingStart - filterSize) / moduleStride;
const int endY = MIN(numModulesX, 1 + (blockRegionTop + 3 - paddingStart) / moduleStride);
const int startX = blockRegionLeft - paddingStart < filterSize ? 0
: 1 + (blockRegionLeft - paddingStart - filterSize) / moduleStride;
const int endX = MIN(numModulesX, 1 + (blockRegionLeft + 3 - paddingStart) / moduleStride);
float* shFilterLoad = &shFilters[threadIdx.y][threadIdx.x];
float* shHidActLoad = &shHidActs[loadY][loadX];
for (int my = startY; my < endY; my++) {
const int moduleTop = paddingStart + my * moduleStride;
const int pxInModuleY = pxY - moduleTop;
for (int mx = startX; mx < endX; mx++) {
const int moduleIdx = my * numModulesX + mx;
const int moduleLeft = paddingStart + mx * moduleStride;
const int pxInModuleX = pxX - moduleLeft;
const bool isPxInModule = pxInModuleY >= 0 && pxInModuleY < filterSize && pxInModuleX >= 0 && pxInModuleX < filterSize;
const int pxIdxInModule = pxInModuleY * filterSize + pxInModuleX;
for (int f = 0; f < numFiltersPerGroup; f += 16) { // multipply with 16 filters at a time
// Now the threads split up into half-warps, and each half-warp decides if it's interested.
const float* hLoad = &hidActs[(moduleIdx + f * numModulesX * numModulesX) * numImages];
#pragma unroll
for (int i = 0; i < imgsPerThread * 16; i += 32) {
if (!checkCaseBounds || blockCaseIdx + loadX + i < numImages) {
#pragma unroll
for (int j = 0; j < 16; j += 8) { // load 16 rows of imgsPerThread*16 cols, 8 * 32 elements at a time.
shHidActLoad[j * 16 * imgsPerThread + i] = hLoad[j * numModulesX * numModulesX * numImages + i];
}
} else {
#pragma unroll
for (int j = 0; j < 16; j += 8) { // load 16 rows of imgsPerThread*16 cols, 8 * 32 elements at a time.
shHidActLoad[j * 16 * imgsPerThread + i] = 0;
}
}
}
if (isPxInImg && isPxInModule) {
// This half-warp is interested, so it's going to load the weights from this module to its pixel.
// Not fully coalesced read :(
// But taking out this read entirely only reduces the runtime by ~2.8%, so it isn't costing me much.
const float* fLoad = conv ? &filters[pxIdxInModule * numFilters + f]
: &filters[moduleIdx * numFilterColors * filterPixels * numFilters + pxIdxInModule * numFilters + f];
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
shFilterLoad[c * 16 * (16 + 1)] = fLoad[c * filterPixels * numFilters];
}
}
__syncthreads();
// Do some actual computation
if (isPxInImg && isPxInModule) {
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
#pragma unroll
for (int w = 0; w < 16; w++) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
prod[c][i] += shFilters[threadIdx.y + c * 16][w] * shHidActs[w][threadIdx.x + i * 16];
}
}
}
}
__syncthreads();
}
}
}
// Not fully coalesced write :(... shmem (and fully coalesced) version is actually slightly slower, though
if (isPxInImg) {
if (scale) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || blockCaseIdx + threadIdx.x + i * 16 < numImages) {
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
targets[c * imgPixels * numImages + i * 16] = scaleTargets * targets[c * imgPixels * numImages + i * 16] + scaleOutputs * prod[c][i];
}
}
}
} else {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || blockCaseIdx + threadIdx.x + i * 16 < numImages) {
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
targets[c * imgPixels * numImages + i * 16] = scaleOutputs * prod[c][i];
}
}
}
}
}
}
/*
* Block size: B_YxB_X.
* blockIdx.x determines case in batches of B_X*imgsPerThread, also color in batches of B_Y*colorsPerThread.
* In essence, blockIdx.x.x = 1..numImages/(B_X*imgsPerThread)
* blockIdx.x.y = 1..numImgColors/(B_Y*colorsPerThread)
* blockIdx.y determines image pixel in target image.
*
* threadIdx.x determines case.
* threadIdx.y determines color.
*
* hidActs: (numFilters, numModules, numImages)
* filters: (numFilterColors, filterPixels, numFilters) if conv
* (numModules, numFilterColors, filterPixels, numFilters) otherwise
* targets: (numImageColors, imgPixels, numImages)
*
* Each block reconstructs one B_Y*colorsPerThread colors from 1 pixel from B_X*imgsPerThread cases.
*
* numImages must be divisible by B_X*imgsPerThread if checkCaseBounds is false.
* numFiltersPerGroup must be divisible by 16.
*
* B_X * imgsPerThread must be divisible by 32.
* numFilterColors must be divisible by B_Y*colorsPerThread.
* B_X*B_Y must be divisible by 32.
*
* This version loads 32 cases at a time, so it gets full coalescing on that load.
* It only loads 16 weights at a time, so those aren't fully coalesced.
* This version conserves shared memory by loading 16 filters at a time rather than 32.
*
* To be used when there are >= 16 color channels.
*/
template <int B_Y, int B_X, int imgsPerThread, int colorsPerThread, bool scale, bool checkCaseBounds, bool conv>
__global__ void conv_img_acts_manycolor(const float* hidActs, const float* filters, float* targets,
const int numModulesX, const int numImages, const int numFilters,
const int filterSize, const int imgSize, const int paddingStart, const int moduleStride,
const int numImgColors, const int numGroups,
const float scaleTargets, const float scaleOutputs) {
__shared__ float shFilters[colorsPerThread*B_Y][16 + 1]; // TODO: perhaps reconsider this 16
__shared__ float shHidActs[16][B_X*imgsPerThread];
const int numImgBlocks = DIVUP(numImages,B_X*imgsPerThread);
const int blockCaseIdx = (blockIdx.x % numImgBlocks) * B_X*imgsPerThread;
const int imgColorIdx = (blockIdx.x / numImgBlocks) * B_Y*colorsPerThread; // color idx globally
const int numFilterColors = numImgColors / numGroups;
const int blockGroupIdx = imgColorIdx / numFilterColors;
const int filterColorIdx = imgColorIdx % numFilterColors; // color idx within group
const int numFiltersPerGroup = numFilters / numGroups;
const int blockFilterIdx = blockGroupIdx * numFiltersPerGroup;
const int blockPixelIdx = blockIdx.y;
const int blockPixelIdxX = blockPixelIdx % imgSize;
const int blockPixelIdxY = blockPixelIdx / imgSize;
const int filterPixels = filterSize * filterSize;
const int imgPixels = imgSize * imgSize;
const int tidx = threadIdx.y * B_X + threadIdx.x;
const int hidActLoadY = tidx / 32, hidActLoadX = tidx % 32;
const int filtersLoadY = tidx / 16, filtersLoadX = tidx % 16;
const int numModules = numModulesX * numModulesX;
hidActs += blockCaseIdx + (blockFilterIdx + hidActLoadY) * numImages * numModules + hidActLoadX;
filters += blockFilterIdx + (filterColorIdx + filtersLoadY) * filterPixels * numFilters + filtersLoadX;
targets += (imgColorIdx + threadIdx.y) * imgPixels * numImages + blockPixelIdx * numImages + blockCaseIdx + threadIdx.x;
float prod[colorsPerThread][imgsPerThread];
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
prod[c][i] = 0;
}
}
const int startY = blockPixelIdxY - paddingStart < filterSize ? 0
: 1 + (blockPixelIdxY - paddingStart - filterSize) / moduleStride;
const int endY = MIN(numModulesX, 1 + (blockPixelIdxY - paddingStart) / moduleStride);
const int startX = blockPixelIdxX - paddingStart < filterSize ? 0
: 1 + (blockPixelIdxX - paddingStart - filterSize) / moduleStride;
const int endX = MIN(numModulesX, 1 + (blockPixelIdxX - paddingStart) / moduleStride);
float* shFilterLoad = &shFilters[filtersLoadY][filtersLoadX];
float* shHidActLoad = &shHidActs[hidActLoadY][hidActLoadX];
for (int my = startY; my < endY; my++) {
const int moduleTop = paddingStart + my * moduleStride;
const int pxInFilterY = blockPixelIdxY - moduleTop;
for (int mx = startX; mx < endX; mx++) {
const int moduleIdx = my * numModulesX + mx;
const int moduleLeft = paddingStart + mx * moduleStride;
const int pxInFilterX = blockPixelIdxX - moduleLeft;
const int pxIdxInFilter = pxInFilterY * filterSize + pxInFilterX;
for (int f = 0; f < numFiltersPerGroup; f += 16) { // multiply with 16 filters at a time
const float* hLoad = &hidActs[(moduleIdx + f * numModules) * numImages];
#pragma unroll
for (int i = 0; i < imgsPerThread * B_X; i += 32) {
if (!checkCaseBounds || blockCaseIdx + hidActLoadX + i < numImages) {
#pragma unroll
for (int j = 0; j < 16; j += B_X*B_Y/32) { // load 16 rows of imgsPerThread*16 cols, 8 * 32 elements at a time.
shHidActLoad[j * B_X * imgsPerThread + i] = hLoad[j * numModules * numImages + i];
}
} else {
#pragma unroll
for (int j = 0; j < 16; j += B_X*B_Y/32) { // load 16 rows of imgsPerThread*16 cols, 8 * 32 elements at a time.
shHidActLoad[j * B_X * imgsPerThread + i] = 0;
}
}
}
const float* fLoad = conv ? &filters[pxIdxInFilter * numFilters + f]
: &filters[moduleIdx * numFilterColors * filterPixels * numFilters + pxIdxInFilter * numFilters + f];
#pragma unroll
for (int i = 0; i < colorsPerThread*B_Y; i+= B_X*B_Y/16) {
if ((colorsPerThread*B_Y) % (B_X*B_Y/16) == 0 || i + filtersLoadY < colorsPerThread*B_Y) {
shFilterLoad[i * (16 + 1)] = fLoad[i * filterPixels * numFilters];
}
}
__syncthreads();
// Do some actual computation
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
#pragma unroll
for (int w = 0; w < 16; w++) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
prod[c][i] += shFilters[c * B_Y + threadIdx.y][w] * shHidActs[w][threadIdx.x + i * B_X];
}
}
}
__syncthreads();
}
}
}
if (scale) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || blockCaseIdx + threadIdx.x + i * B_X < numImages) {
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
targets[c * B_Y * imgPixels * numImages + i * B_X] = scaleTargets * targets[c * B_Y * imgPixels * numImages + i * B_X] + scaleOutputs * prod[c][i];
}
}
}
} else {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || blockCaseIdx + threadIdx.x + i * B_X < numImages) {
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
targets[c * B_Y * imgPixels * numImages + i * B_X] = scaleOutputs * prod[c][i];
}
}
}
}
}
/*
* Block size: 16x16.
* blockIdx.x determines case in batches of 16*imgsPerThread, also color in batches of colorsPerThread.
* In essence, blockIdx.x.x = 1..numImages/(16*imgsPerThread)
* blockIdx.x.y = 1..numImgColors/colorsPerThread
* blockIdx.y determines 4x4 image region in target image, also sample
* In essence, blockIdx.y.x = 1..numRegions
* blockIdx.y.y = 1..overSample
*
* threadIdx.x determines case.
* threadIdx.y determines pixel.
*
* overSample := numFilterColors*numGroups/numImgColors
* ^ this is the number of groups that each color channel is connected to
*
* hidActs: (numFilters, numModules, numImages)
* filters: (numFilterColors, filterPixels, numFilters) if conv
* (numModules, numFilterColors, filterPixels, numFilters) otherwise
* targets: (overSample, numImgColors, imgPixels, numImages)
*
* colorIndices: (numGroups, numFilterColors)
*
* Each block reconstructs one 4x4 pixels from 16*imgsPerThread cases.
*
* numImages must be divisible by 16*imgsPerThread if checkCaseBounds is false.
* 16 * imgsPerThread must be divisible by 32.
* numFilterColors must be divisible by colorsPerThread.
*
* This version loads 32 cases at a time, so it gets full coalescing on that load.
* It only loads 16 weights at a time, so those aren't fully coalesced.
* This version conserves shared memory by loading 16 filters at a time rather than 32.
*
* To be used when there are 4-16 color channels.
*/
template <int imgsPerThread, int colorsPerThread, bool scale, bool checkCaseBounds, bool conv>
__global__ void img_acts_mediumcolor_sparse_rand(const float* hidActs, const float* filters, float* targets, int* colorIndices,
const int numModulesX, const int numImages, const int numFilters,
const int filterSize, const int imgSize, const int paddingStart, const int moduleStride,
const int numImgColors, const int numFilterColors, const int numGroups,
const float scaleTargets, const float scaleOutputs) {
__shared__ float shFilters[colorsPerThread*16][16 + 1];
__shared__ float shHidActs[16][16*imgsPerThread];
__shared__ int shColors[colorsPerThread]; // not really necessary -- can repurpose the other shmems
const int numImgBlocks = DIVUP(numImages,16*imgsPerThread);
const int blockCaseIdx = (blockIdx.x % numImgBlocks) * 16*imgsPerThread;
const int numRegionsX = DIVUP(imgSize, 4);
const int numRegions = numRegionsX * numRegionsX;
const int imgColorIdx = (blockIdx.x / numImgBlocks) * colorsPerThread; // color idx globally
const int filterColorIdx = imgColorIdx % numFilterColors; // color idx within group
const int numFiltersPerGroup = numFilters / numGroups;
const int overSample = gridDim.y / numRegions;
const int blockSample = blockIdx.y / numRegions;
const int groupsPerSample = numGroups / overSample;
const int blockGroupIdx = imgColorIdx / numFilterColors + blockSample * groupsPerSample;
const int blockFilterIdx = blockGroupIdx * numFiltersPerGroup;
const int blockRegionIdx = blockIdx.y % numRegions;
const int blockRegionIdxX = blockRegionIdx % numRegionsX;
const int blockRegionIdxY = blockRegionIdx / numRegionsX;
const int blockRegionLeft = blockRegionIdxX * 4;
const int blockRegionTop = blockRegionIdxY * 4;
const int pxYInRegion = threadIdx.y / 4, pxXInRegion = threadIdx.y % 4;
const int pxY = blockRegionTop + pxYInRegion;
const int pxX = blockRegionLeft + pxXInRegion;
const int pxIdx = pxY * imgSize + pxX;
const bool isPxInImg = pxY < imgSize && pxX < imgSize;
// const uint numModules = numModulesX * numModulesX;
const int filterPixels = filterSize * filterSize;
const int imgPixels = imgSize * imgSize;
const int tidx = threadIdx.y * 16 + threadIdx.x;
const int loadY = tidx / 32, loadX = tidx % 32;
hidActs += blockCaseIdx + (blockFilterIdx + loadY) * numImages * numModulesX * numModulesX + loadX;
filters += blockFilterIdx + filterColorIdx * filterPixels * numFilters + threadIdx.x;
targets += blockSample * numImgColors * imgPixels * numImages + pxIdx * numImages + blockCaseIdx + threadIdx.x;
float prod[colorsPerThread][imgsPerThread];
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
prod[c][i] = 0;
}
}
const int startY = blockRegionTop - paddingStart < filterSize ? 0
: 1 + (blockRegionTop - paddingStart - filterSize) / moduleStride;
const int endY = MIN(numModulesX, 1 + (blockRegionTop + 3 - paddingStart) / moduleStride);
const int startX = blockRegionLeft - paddingStart < filterSize ? 0
: 1 + (blockRegionLeft - paddingStart - filterSize) / moduleStride;
const int endX = MIN(numModulesX, 1 + (blockRegionLeft + 3 - paddingStart) / moduleStride);
float* shFilterLoad = &shFilters[threadIdx.y][threadIdx.x];
float* shHidActLoad = &shHidActs[loadY][loadX];
if (tidx < colorsPerThread) {
shColors[tidx] = colorIndices[blockGroupIdx * numFilterColors + filterColorIdx + tidx] * imgPixels * numImages;
}
for (int my = startY; my < endY; my++) {
const int moduleTop = paddingStart + my * moduleStride;
const int pxInModuleY = pxY - moduleTop;
for (int mx = startX; mx < endX; mx++) {
const int moduleIdx = my * numModulesX + mx;
const int moduleLeft = paddingStart + mx * moduleStride;
const int pxInModuleX = pxX - moduleLeft;
const bool isPxInModule = pxInModuleY >= 0 && pxInModuleY < filterSize && pxInModuleX >= 0 && pxInModuleX < filterSize;
const int pxIdxInModule = pxInModuleY * filterSize + pxInModuleX;
for (int f = 0; f < numFiltersPerGroup; f += 16) { // multipply with 16 filters at a time
// Now the threads split up into half-warps, and each half-warp decides if it's interested.
const float* hLoad = &hidActs[(moduleIdx + f * numModulesX * numModulesX) * numImages];
#pragma unroll
for (int i = 0; i < imgsPerThread * 16; i += 32) {
if (!checkCaseBounds || blockCaseIdx + loadX + i < numImages) {
#pragma unroll
for (int j = 0; j < 16; j += 8) { // load 16 rows of imgsPerThread*16 cols, 8 * 32 elements at a time.
shHidActLoad[j * 16 * imgsPerThread + i] = hLoad[j * numModulesX * numModulesX * numImages + i];
}
} else {
#pragma unroll
for (int j = 0; j < 16; j += 8) { // load 16 rows of imgsPerThread*16 cols, 8 * 32 elements at a time.
shHidActLoad[j * 16 * imgsPerThread + i] = 0;
}
}
}
if (isPxInImg && isPxInModule) {
// This half-warp is interested, so it's going to load the weights from this module to its pixel.
// Not fully coalesced read :(
// But taking out this read entirely only reduces the runtime by ~2.8%, so it isn't costing me much.
const float* fLoad = conv ? &filters[pxIdxInModule * numFilters + f]
: &filters[moduleIdx * numFilterColors * filterPixels * numFilters + pxIdxInModule * numFilters + f];
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
shFilterLoad[c * 16 * (16 + 1)] = fLoad[c * filterPixels * numFilters];
}
}
__syncthreads();
// Do some actual computation
if (isPxInImg && isPxInModule) {
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
#pragma unroll
for (int w = 0; w < 16; w++) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
prod[c][i] += shFilters[threadIdx.y + c * 16][w] * shHidActs[w][threadIdx.x + i * 16];
}
}
}
}
__syncthreads();
}
}
}
// Not fully coalesced write :(... shmem (and fully coalesced) version is actually slightly slower, though
if (isPxInImg) {
if (scale) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || blockCaseIdx + threadIdx.x + i * 16 < numImages) {
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
targets[shColors[c] + i * 16] = scaleTargets * targets[shColors[c] + i * 16] + scaleOutputs * prod[c][i];
}
}
}
} else {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || blockCaseIdx + threadIdx.x + i * 16 < numImages) {
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
targets[shColors[c] + i * 16] = scaleOutputs * prod[c][i];
}
}
}
}
}
}
/*
* Block size: B_YxB_X.
* blockIdx.x determines case in batches of B_X*imgsPerThread, also color in batches of B_Y*colorsPerThread.
* In essence, blockIdx.x.x = 1..numImages/(B_X*imgsPerThread)
* blockIdx.x.y = 1..numImgColors/(B_Y*colorsPerThread)
* blockIdx.y determines image pixel in target image, sample idx.
* In essence, blockIdx.y.x = 1..imgPixels
* blockIdx.y.y = 1..overSample
*
* threadIdx.x determines case.
* threadIdx.y determines color.
*
* overSample := numFilterColors*numGroups/numImgColors
* ^ this is the number of groups that each color channel is connected to
*
* hidActs: (numFilters, numModules, numImages)
* filters: (numFilterColors, filterPixels, numFilters) if conv
* (numModules, numFilterColors, filterPixels, numFilters) otherwise
* targets: (overSample, numImgColors, imgPixels, numImages)
*
* colorIndices: (numGroups, numFilterColors)
*
* Each block reconstructs one B_Y*colorsPerThread colors from 1 pixel from B_X*imgsPerThread cases.
*
* numImages must be divisible by B_X*imgsPerThread if checkCaseBounds is false.
* numFiltersPerGroup must be divisible by 16.
* numFilterColors*numGroups must be divisible by numImgColors.
*
* B_X * imgsPerThread must be divisible by 32.
* numFilterColors must be divisible by B_Y*colorsPerThread.
* B_X*B_Y must be divisible by 32.
*
* This version loads 32 cases at a time, so it gets full coalescing on that load.
* It only loads 16 weights at a time, so those aren't fully coalesced.
* This version conserves shared memory by loading 16 filters at a time rather than 32.
*
* To be used when there are >= 16 color channels.
*/
template <int B_Y, int B_X, int imgsPerThread, int colorsPerThread, bool scale, bool checkCaseBounds, bool conv>
__global__ void img_acts_manycolor_sparse_rand(const float* hidActs, const float* filters, float* targets, int* colorIndices,
const int numModulesX, const int numImages, const int numFilters,
const int filterSize, const int imgSize, const int paddingStart, const int moduleStride,
const int numImgColors, const int numFilterColors, const int numGroups,
const float scaleTargets, const float scaleOutputs) {
__shared__ float shFilters[colorsPerThread*B_Y][16 + 1]; // TODO: perhaps reconsider this 16
__shared__ float shHidActs[16][B_X*imgsPerThread];
__shared__ int shColors[colorsPerThread * B_Y]; // not really necessary -- can repurpose the other shmems
const int numImgBlocks = DIVUP(numImages,B_X*imgsPerThread);
const int blockCaseIdx = (blockIdx.x % numImgBlocks) * B_X*imgsPerThread;
const int filterPixels = filterSize * filterSize;
const int imgPixels = imgSize * imgSize;
const int tidx = threadIdx.y * B_X + threadIdx.x;
const int hidActLoadY = tidx / 32, hidActLoadX = tidx % 32;
const int filtersLoadY = tidx / 16, filtersLoadX = tidx % 16;
const int numModules = numModulesX * numModulesX;
const int overSample = gridDim.y / imgPixels;
const int blockSample = blockIdx.y / imgPixels;
const int groupsPerSample = numGroups / overSample;
// const int overSample = (numFilterColors * numGroups) / numImgColors;
const int imgColorIdx = (blockIdx.x / numImgBlocks) * B_Y * colorsPerThread; // color idx globally
const int blockGroupIdx = imgColorIdx / numFilterColors + blockSample * groupsPerSample;
// const int filterColorsPerSample = numFilterColors / overSample;
const int blockPixelIdx = blockIdx.y % imgPixels;
const int blockPixelIdxX = blockPixelIdx % imgSize;
const int blockPixelIdxY = blockPixelIdx / imgSize;
const int filterColorIdx = imgColorIdx % numFilterColors; // color idx within group
const int numFiltersPerGroup = numFilters / numGroups;
const int blockFilterIdx = blockGroupIdx * numFiltersPerGroup;
hidActs += blockCaseIdx + (blockFilterIdx + hidActLoadY) * numImages * numModules + hidActLoadX;
filters += blockFilterIdx + (filterColorIdx + filtersLoadY) * filterPixels * numFilters + filtersLoadX;
targets += blockSample * numImgColors * imgPixels * numImages + blockPixelIdx * numImages + blockCaseIdx + threadIdx.x;
float prod[colorsPerThread][imgsPerThread];
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
prod[c][i] = 0;
}
}
const int startY = blockPixelIdxY - paddingStart < filterSize ? 0
: 1 + (blockPixelIdxY - paddingStart - filterSize) / moduleStride;
const int endY = MIN(numModulesX, 1 + (blockPixelIdxY - paddingStart) / moduleStride);
const int startX = blockPixelIdxX - paddingStart < filterSize ? 0
: 1 + (blockPixelIdxX - paddingStart - filterSize) / moduleStride;
const int endX = MIN(numModulesX, 1 + (blockPixelIdxX - paddingStart) / moduleStride);
float* shFilterLoad = &shFilters[filtersLoadY][filtersLoadX];
float* shHidActLoad = &shHidActs[hidActLoadY][hidActLoadX];
if (tidx < colorsPerThread * B_Y) {
shColors[tidx] = colorIndices[blockGroupIdx * numFilterColors + filterColorIdx + tidx] * imgPixels * numImages;
}
for (int my = startY; my < endY; my++) {
const int moduleTop = paddingStart + my * moduleStride;
const int pxInFilterY = blockPixelIdxY - moduleTop;
for (int mx = startX; mx < endX; mx++) {
const int moduleIdx = my * numModulesX + mx;
const int moduleLeft = paddingStart + mx * moduleStride;
const int pxInFilterX = blockPixelIdxX - moduleLeft;
const int pxIdxInFilter = pxInFilterY * filterSize + pxInFilterX;
for (int f = 0; f < numFiltersPerGroup; f += 16) { // multiply with 16 filters at a time
const float* hLoad = &hidActs[(moduleIdx + f * numModules) * numImages];
#pragma unroll
for (int i = 0; i < imgsPerThread * B_X; i += 32) {
if (!checkCaseBounds || blockCaseIdx + hidActLoadX + i < numImages) {
#pragma unroll
for (int j = 0; j < 16; j += B_X*B_Y/32) { // load 16 rows of imgsPerThread*16 cols, 8 * 32 elements at a time.
shHidActLoad[j * B_X * imgsPerThread + i] = hLoad[j * numModules * numImages + i];
}
} else {
#pragma unroll
for (int j = 0; j < 16; j += B_X*B_Y/32) { // load 16 rows of imgsPerThread*16 cols, 8 * 32 elements at a time.
shHidActLoad[j * B_X * imgsPerThread + i] = 0;
}
}
}
const float* fLoad = conv ? &filters[pxIdxInFilter * numFilters + f]
: &filters[moduleIdx * numFilterColors * filterPixels * numFilters + pxIdxInFilter * numFilters + f];
#pragma unroll
for (int i = 0; i < colorsPerThread*B_Y; i+= B_X*B_Y/16) {
if ((colorsPerThread*B_Y) % (B_X*B_Y/16) == 0 || i + filtersLoadY < colorsPerThread*B_Y) {
shFilterLoad[i * (16 + 1)] = fLoad[i * filterPixels * numFilters];
}
}
__syncthreads();
// Do some actual computation
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
#pragma unroll
for (int w = 0; w < 16; w++) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
prod[c][i] += shFilters[c * B_Y + threadIdx.y][w] * shHidActs[w][threadIdx.x + i * B_X];
}
}
}
__syncthreads();
}
}
}
if (scale) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || blockCaseIdx + threadIdx.x + i * B_X < numImages) {
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
targets[shColors[c * B_Y + threadIdx.y] + i * B_X] = scaleTargets * targets[shColors[c * B_Y + threadIdx.y] + i * B_X] + scaleOutputs * prod[c][i];
}
}
}
} else {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || blockCaseIdx + threadIdx.x + i * B_X < numImages) {
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
targets[shColors[c * B_Y + threadIdx.y] + i * B_X] = scaleOutputs * prod[c][i];
}
}
}
}
}
/*
* hidActs: (numFilters, numModules, numImages)
* filters: (numFilterColors, filterPixels, numFilters) if conv
* (numModules, numFilterColors, filterPixels, numFilters) otherwise
* targets: (numImageColors, imgPixels, numImages)
*/
void _imgActs(NVMatrix& hidActs, NVMatrix& filters, NVMatrix& targets,
int imgSize, int paddingStart, int moduleStride, int numImgColors, int numGroups,
float scaleTargets, float scaleOutput, bool conv) {
int numFilterColors = numImgColors / numGroups;
int numImages = hidActs.getNumCols();
int numFilters = filters.getNumCols();
int numFiltersPerGroup = numFilters / numGroups;
int numModules = hidActs.getNumRows() / numFilters;
int filterModuleMult = conv ? 1 : numModules;
int filterPixels = filters.getNumRows() / (filterModuleMult * numFilterColors);
int filterSize = sqrt(filterPixels);
int imgPixels = imgSize * imgSize;
int numModulesX = sqrt(numModules);
assert(numImgColors % numGroups == 0);
assert(numFilters % (16*numGroups) == 0);
assert(numGroups > 1 || (numImgColors > 0 && (numImgColors <= 3 || numImgColors % 2 == 0)));
assert(numGroups == 1 || numFilterColors % 4 == 0);
assert(filterPixels == filterSize * filterSize);
assert(hidActs.getNumRows() == numModules * numFilters);
assert(filters.getNumRows() == filterModuleMult * numFilterColors * filterPixels);
assert(numModules == numModulesX * numModulesX);
assert(hidActs.isContiguous());
assert(filters.isContiguous());
assert(!hidActs.isTrans());
assert(!filters.isTrans());
assert(!targets.isTrans());
// These routines don't handle the case when only part of the image is visited in the convolution
assert(paddingStart <= 0 && paddingStart + (numModules-1)*moduleStride + filterSize >= imgSize);
assert(moduleStride <= filterSize);
assert(targets.isContiguous()); // no stride support here!
dim3 blocks;
dim3 threads(16,16);
int colorsPerThread;
bool checkCaseBounds;
if (numFilterColors % 8 == 0) {
threads = dim3(32, 4);
colorsPerThread = numFilterColors % 16 == 0 ? 4 : 2;
int imgsPerThread = 4;
assert(numFilterColors % (threads.y * colorsPerThread) == 0);
checkCaseBounds = numImages % (threads.x * imgsPerThread) != 0;
blocks = dim3(DIVUP(numImages, threads.x*imgsPerThread) * (numImgColors/(threads.y*colorsPerThread)), imgPixels);
} else if (numFilterColors > 3) {
colorsPerThread = numFilterColors % 4 == 0 ? 4 : 2;
blocks = dim3(DIVUP(numImages,16*8) * (numImgColors / colorsPerThread), DIVUP(imgSize,4) * DIVUP(imgSize,4));
checkCaseBounds = numImages % (16*8) != 0;
} else {
blocks = dim3(DIVUP(numImages,16*8), DIVUP(imgSize,4) * DIVUP(imgSize,4));
checkCaseBounds = numImages % (16*8) != 0;
}
if (scaleTargets == 0) { // do not scale or use targets matrix
targets.resize(numImgColors*imgPixels, numImages);
} else {
assert(targets.getNumRows() == numImgColors * imgPixels);
assert(targets.getNumCols() == numImages);
}
if (conv) { // convolutional units
if (scaleTargets == 0) { // do not scale or use targets matrix
if (numFilterColors % 8 == 0) {
if (checkCaseBounds) {
if (numFilterColors % 16 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 4, 4, false, true, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor<4, 32, 4, 4, false, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 4, 2, false, true, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor<4, 32, 4, 2, false, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
} else {
if (numFilterColors % 16 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 4, 4, false, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor<4, 32, 4, 4, false, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 4, 2, false, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor<4, 32, 4, 2, false, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
} else if (numFilterColors > 3) {
if (checkCaseBounds) {
if (colorsPerThread == 4) {
hipFuncSetCacheConfig(img_acts_mediumcolor<8, 4, false, true, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor<8, 4, false, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(img_acts_mediumcolor<8, 2, false, true, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor<8, 2, false, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
} else {
if (colorsPerThread == 4) {
hipFuncSetCacheConfig(img_acts_mediumcolor<8, 4, false, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor<8, 4, false, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(img_acts_mediumcolor<8, 2, false, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor<8, 2, false, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
} else {
if (checkCaseBounds) {
if (numFilterColors == 1) {
hipFuncSetCacheConfig(img_acts_color<8, 1, false, true, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color<8, 1, false, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, scaleTargets, scaleOutput);
} else if (numFilterColors == 2) {
hipFuncSetCacheConfig(img_acts_color<8, 2, false, true, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color<8, 2, false, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, scaleTargets, scaleOutput);
} else if (numFilterColors == 3) {
hipFuncSetCacheConfig(img_acts_color<8, 3, false, true, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color<8, 3, false, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
} else {
if (numFilterColors == 1) {
hipFuncSetCacheConfig(img_acts_color<8, 1, false, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color<8, 1, false, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, scaleTargets, scaleOutput);
} else if (numFilterColors == 2) {
hipFuncSetCacheConfig(img_acts_color<8, 2, false, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color<8, 2, false, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, scaleTargets, scaleOutput);
} else if (numFilterColors == 3) {
hipFuncSetCacheConfig(img_acts_color<8, 3, false, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color<8, 3, false, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
}
}
} else { // do scale
if (numFilterColors % 8 == 0) {
if (checkCaseBounds) {
if (numFilterColors % 16 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 4, 4, true, true, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor<4, 32, 4, 4, true, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 4, 2, true, true, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor<4, 32, 4, 2, true, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
} else {
if (numFilterColors % 16 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 4, 4, true, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor<4, 32, 4, 4, true, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 4, 2, true, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor<4, 32, 4, 2, true, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
} else if (numFilterColors > 3) {
if (checkCaseBounds) {
if (colorsPerThread == 4) {
hipFuncSetCacheConfig(img_acts_mediumcolor<8, 4, true, true, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor<8, 4, true, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(img_acts_mediumcolor<8, 2, true, true, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor<8, 2, true, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
} else {
if (colorsPerThread == 4) {
hipFuncSetCacheConfig(img_acts_mediumcolor<8, 4, true, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor<8, 4, true, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(img_acts_mediumcolor<8, 2, true, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor<8, 2, true, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
} else {
if (checkCaseBounds) {
if (numFilterColors == 1) {
hipFuncSetCacheConfig(img_acts_color<8, 1, true, true, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color<8, 1, true, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, scaleTargets, scaleOutput);
} else if (numFilterColors == 2) {
hipFuncSetCacheConfig(img_acts_color<8, 2, true, true, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color<8, 2, true, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, scaleTargets, scaleOutput);
} else if (numFilterColors == 3) {
hipFuncSetCacheConfig(img_acts_color<8, 3, true, true, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color<8, 3, true, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
} else {
if (numFilterColors == 1) {
hipFuncSetCacheConfig(img_acts_color<8, 1, true, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color<8, 1, true, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, scaleTargets, scaleOutput);
} else if (numFilterColors == 2) {
hipFuncSetCacheConfig(img_acts_color<8, 2, true, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color<8, 2, true, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, scaleTargets, scaleOutput);
} else if (numFilterColors == 3) {
hipFuncSetCacheConfig(img_acts_color<8, 3, true, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color<8, 3, true, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
}
}
}
} else { // local, unshared units
if (scaleTargets == 0) { // do not scale or use targets matrix
if (numFilterColors % 8 == 0) {
if (checkCaseBounds) {
if (numFilterColors % 16 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 4, 4, false, true, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor<4, 32, 4, 4, false, true, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 4, 2, false, true, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor<4, 32, 4, 2, false, true, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
} else {
if (numFilterColors % 16 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 4, 4, false, false, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor<4, 32, 4, 4, false, false, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 4, 2, false, false, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor<4, 32, 4, 2, false, false, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
} else if (numFilterColors > 3) {
if (checkCaseBounds) {
if (colorsPerThread == 4) {
hipFuncSetCacheConfig(img_acts_mediumcolor<8, 4, false, true, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor<8, 4, false, true, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(img_acts_mediumcolor<8, 2, false, true, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor<8, 2, false, true, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
} else {
if (colorsPerThread == 4) {
hipFuncSetCacheConfig(img_acts_mediumcolor<8, 4, false, false, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor<8, 4, false, false, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(img_acts_mediumcolor<8, 2, false, false, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor<8, 2, false, false, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
} else {
if (checkCaseBounds) {
if (numFilterColors == 1) {
hipFuncSetCacheConfig(img_acts_color<8, 1, false, true, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color<8, 1, false, true, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, scaleTargets, scaleOutput);
} else if (numFilterColors == 2) {
hipFuncSetCacheConfig(img_acts_color<8, 2, false, true, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color<8, 2, false, true, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, scaleTargets, scaleOutput);
} else if (numFilterColors == 3) {
hipFuncSetCacheConfig(img_acts_color<8, 3, false, true, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color<8, 3, false, true, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
} else {
if (numFilterColors == 1) {
hipFuncSetCacheConfig(img_acts_color<8, 1, false, false, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color<8, 1, false, false, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, scaleTargets, scaleOutput);
} else if (numFilterColors == 2) {
hipFuncSetCacheConfig(img_acts_color<8, 2, false, false, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color<8, 2, false, false, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, scaleTargets, scaleOutput);
} else if (numFilterColors == 3) {
hipFuncSetCacheConfig(img_acts_color<8, 3, false, false, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color<8, 3, false, false, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
}
}
} else { // do scale
if (numFilterColors % 8 == 0) {
if (checkCaseBounds) {
if (numFilterColors % 16 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 4, 4, true, true, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor<4, 32, 4, 4, true, true, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 4, 2, true, true, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor<4, 32, 4, 2, true, true, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
} else {
if (numFilterColors % 16 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 4, 4, true, false, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor<4, 32, 4, 4, true, false, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 4, 2, true, false, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor<4, 32, 4, 2, true, false, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
} else if (numFilterColors > 3) {
if (checkCaseBounds) {
if (colorsPerThread == 4) {
hipFuncSetCacheConfig(img_acts_mediumcolor<8, 4, true, true, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor<8, 4, true, true, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(img_acts_mediumcolor<8, 2, true, true, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor<8, 2, true, true, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
} else {
if (colorsPerThread == 4) {
hipFuncSetCacheConfig(img_acts_mediumcolor<8, 4, true, false, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor<8, 4, true, false, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(img_acts_mediumcolor<8, 2, true, false, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor<8, 2, true, false, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
} else {
if (checkCaseBounds) {
if (numFilterColors == 1) {
hipFuncSetCacheConfig(img_acts_color<8, 1, true, true, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color<8, 1, true, true, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, scaleTargets, scaleOutput);
} else if (numFilterColors == 2) {
hipFuncSetCacheConfig(img_acts_color<8, 2, true, true, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color<8, 2, true, true, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, scaleTargets, scaleOutput);
} else if (numFilterColors == 3) {
hipFuncSetCacheConfig(img_acts_color<8, 3, true, true, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color<8, 3, true, true, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
} else {
if (numFilterColors == 1) {
hipFuncSetCacheConfig(img_acts_color<8, 1, true, false, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color<8, 1, true, false, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, scaleTargets, scaleOutput);
} else if (numFilterColors == 2) {
hipFuncSetCacheConfig(img_acts_color<8, 2, true, false, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color<8, 2, true, false, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, scaleTargets, scaleOutput);
} else if (numFilterColors == 3) {
hipFuncSetCacheConfig(img_acts_color<8, 3, true, false, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color<8, 3, true, false, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
}
}
}
}
cutilCheckMsg("imgActs: kernel execution failed");
}
void convImgActs(NVMatrix& hidActs, NVMatrix& filters, NVMatrix& targets,
int imgSize, int paddingStart, int moduleStride, int numImgColors, int numGroups) {
_imgActs(hidActs, filters, targets, imgSize, paddingStart, moduleStride, numImgColors, numGroups, 0, 1, true);
}
void convImgActs(NVMatrix& hidActs, NVMatrix& filters, NVMatrix& targets,
int imgSize, int paddingStart, int moduleStride, int numImgColors, int numGroups,
float scaleTargets, float scaleOutput) {
_imgActs(hidActs, filters, targets, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput, true);
}
void localImgActs(NVMatrix& hidActs, NVMatrix& filters, NVMatrix& targets,
int imgSize, int paddingStart, int moduleStride, int numImgColors, int numGroups) {
_imgActs(hidActs, filters, targets, imgSize, paddingStart, moduleStride, numImgColors, numGroups, 0, 1, false);
}
void localImgActs(NVMatrix& hidActs, NVMatrix& filters, NVMatrix& targets,
int imgSize, int paddingStart, int moduleStride, int numImgColors, int numGroups,
float scaleTargets, float scaleOutput) {
_imgActs(hidActs, filters, targets, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput, false);
}
/*
* hidActs: (numFilters, numModules, numImages)
* filters: (numFilterColors, filterPixels, numFilters) if conv
* (numModules, numFilterColors, filterPixels, numFilters) otherwise
* targets: (overSample, numImgColors, imgPixels, numImages)
* colorIndices: (numGroups, numFilterColors)
*
* where overSample := (numFilterColors * numGroups) / numImgColors
*
*/
void _imgActsSparse(NVMatrix& hidActs, NVMatrix& filters, NVMatrix& targets, int* dColorIndices,
int imgSize, int paddingStart, int moduleStride, int numImgColors, int numFilterColors, int numGroups,
float scaleTargets, float scaleOutput, bool conv) {
int numImages = hidActs.getNumCols();
int numFilters = filters.getNumCols();
// int numFiltersPerGroup = numFilters / numGroups;
int numModules = hidActs.getNumRows() / numFilters;
int filterModuleMult = conv ? 1 : numModules;
int filterPixels = filters.getNumRows() / (filterModuleMult * numFilterColors);
int filterSize = sqrt(filterPixels);
int imgPixels = imgSize * imgSize;
int numModulesX = sqrt(numModules);
int overSample = (numFilterColors * numGroups) / numImgColors;
assert(numImgColors % numFilterColors == 0);
assert(numFilters % (16*numGroups) == 0);
assert((numFilterColors * numGroups) % numImgColors == 0);
assert(numGroups > 1);
assert(numFilterColors > 3 && numFilterColors % 2 == 0);
assert(filterPixels == filterSize * filterSize);
assert(hidActs.getNumRows() == numModules * numFilters);
assert(filters.getNumRows() == filterModuleMult * numFilterColors * filterPixels);
assert(numModules == numModulesX * numModulesX);
assert(hidActs.isContiguous());
assert(filters.isContiguous());
assert(!hidActs.isTrans());
assert(!filters.isTrans());
assert(!targets.isTrans());
// These routines don't handle the case when only part of the image is visited in the convolution
assert(paddingStart <= 0 && paddingStart + (numModules-1)*moduleStride + filterSize >= imgSize);
assert(moduleStride <= filterSize);
assert(targets.isContiguous()); // no stride support here!
dim3 blocks;
dim3 threads;
int colorsPerThread;
bool checkCaseBounds;
if (numFilterColors % 8 == 0) {
threads = dim3(32, 4);
colorsPerThread = numFilterColors % 16 == 0 ? 4 : 2;
int imgsPerThread = 4;
assert(numFilterColors % (threads.y * colorsPerThread) == 0);
checkCaseBounds = numImages % (threads.x * imgsPerThread) != 0;
blocks = dim3(DIVUP(numImages, threads.x*imgsPerThread) * (numImgColors/(threads.y*colorsPerThread)), overSample * imgPixels);
} else if (numFilterColors > 3) {
threads = dim3(16, 16);
colorsPerThread = numFilterColors % 4 == 0 ? 4 : 2;
blocks = dim3(DIVUP(numImages,16*8) * (numImgColors / colorsPerThread), overSample * DIVUP(imgSize,4) * DIVUP(imgSize,4));
checkCaseBounds = numImages % (16*8) != 0;
}
if (scaleTargets == 0) { // do not scale or use targets matrix
targets.resize(overSample*numImgColors*imgPixels, numImages);
} else {
assert(targets.getNumRows() == overSample * numImgColors * imgPixels);
assert(targets.getNumCols() == numImages);
}
if (conv) {
if (scaleTargets == 0) { // do not scale or use targets matrix
if (numFilterColors % 8 == 0) {
if (checkCaseBounds) {
if (numFilterColors % 16 == 0) {
hipFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 4, 4, false, true, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_manycolor_sparse_rand<4, 32, 4, 4, false, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 4, 2, false, true, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_manycolor_sparse_rand<4, 32, 4, 2, false, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
}
} else {
if (numFilterColors % 16 == 0) {
hipFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 4, 4, false, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_manycolor_sparse_rand<4, 32, 4, 4, false, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 4, 2, false, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_manycolor_sparse_rand<4, 32, 4, 2, false, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
}
}
} else if (numFilterColors > 3) {
if (checkCaseBounds) {
if (colorsPerThread == 4) {
hipFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<8, 4, false, true, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor_sparse_rand<8, 4, false, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<8, 2, false, true, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor_sparse_rand<8, 2, false, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
}
} else {
if (colorsPerThread == 4) {
hipFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<8, 4, false, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor_sparse_rand<8, 4, false, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<8, 2, false, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor_sparse_rand<8, 2, false, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
}
}
}
} else { // do scale
if (numFilterColors % 8 == 0) {
if (checkCaseBounds) {
if (numFilterColors % 16 == 0) {
hipFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 4, 4, true, true, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_manycolor_sparse_rand<4, 32, 4, 4, true, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 4, 2, true, true, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_manycolor_sparse_rand<4, 32, 4, 2, true, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
}
} else {
if (numFilterColors % 16 == 0) {
hipFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 4, 4, true, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_manycolor_sparse_rand<4, 32, 4, 4, true, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 4, 2, true, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_manycolor_sparse_rand<4, 32, 4, 2, true, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
}
}
} else if (numFilterColors > 3) {
if (checkCaseBounds) {
if (colorsPerThread == 4) {
hipFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<8, 4, true, true, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor_sparse_rand<8, 4, true, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<8, 2, true, true, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor_sparse_rand<8, 2, true, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
}
} else {
if (colorsPerThread == 4) {
hipFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<8, 4, true, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor_sparse_rand<8, 4, true, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<8, 2, true, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor_sparse_rand<8, 2, true, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
}
}
}
}
} else {
if (scaleTargets == 0) { // do not scale or use targets matrix
if (numFilterColors % 8 == 0) {
if (checkCaseBounds) {
if (numFilterColors % 16 == 0) {
hipFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 4, 4, false, true, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_manycolor_sparse_rand<4, 32, 4, 4, false, true, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 4, 2, false, true, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_manycolor_sparse_rand<4, 32, 4, 2, false, true, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
}
} else {
if (numFilterColors % 16 == 0) {
hipFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 4, 4, false, false, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_manycolor_sparse_rand<4, 32, 4, 4, false, false, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 4, 2, false, false, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_manycolor_sparse_rand<4, 32, 4, 2, false, false, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
}
}
} else if (numFilterColors > 3) {
if (checkCaseBounds) {
if (colorsPerThread == 4) {
hipFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<8, 4, false, true, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor_sparse_rand<8, 4, false, true, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<8, 2, false, true, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor_sparse_rand<8, 2, false, true, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
}
} else {
if (colorsPerThread == 4) {
hipFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<8, 4, false, false, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor_sparse_rand<8, 4, false, false, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<8, 2, false, false, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor_sparse_rand<8, 2, false, false, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
}
}
}
} else { // do scale
if (numFilterColors % 8 == 0) {
if (checkCaseBounds) {
if (numFilterColors % 16 == 0) {
hipFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 4, 4, true, true, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_manycolor_sparse_rand<4, 32, 4, 4, true, true, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 4, 2, true, true, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_manycolor_sparse_rand<4, 32, 4, 2, true, true, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
}
} else {
if (numFilterColors % 16 == 0) {
hipFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 4, 4, true, false, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_manycolor_sparse_rand<4, 32, 4, 4, true, false, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 4, 2, true, false, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_manycolor_sparse_rand<4, 32, 4, 2, true, false, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
}
}
} else if (numFilterColors > 3) {
if (checkCaseBounds) {
if (colorsPerThread == 4) {
hipFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<8, 4, true, true, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor_sparse_rand<8, 4, true, true, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<8, 2, true, true, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor_sparse_rand<8, 2, true, true, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
}
} else {
if (colorsPerThread == 4) {
hipFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<8, 4, true, false, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor_sparse_rand<8, 4, true, false, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<8, 2, true, false, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor_sparse_rand<8, 2, true, false, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
}
}
}
}
}
cutilCheckMsg("imgActsSparse: kernel execution failed");
}
void convImgActsSparse(NVMatrix& hidActs, NVMatrix& filters, NVMatrix& targets, int* dColorIndices,
int imgSize, int paddingStart, int moduleStride, int numImgColors, int numFilterColors, int numGroups) {
_imgActsSparse(hidActs, filters, targets, dColorIndices, imgSize, paddingStart,
moduleStride, numImgColors, numFilterColors, numGroups, 0, 1, true);
}
void convImgActsSparse(NVMatrix& hidActs, NVMatrix& filters, NVMatrix& targets, int* dColorIndices,
int imgSize, int paddingStart, int moduleStride, int numImgColors, int numFilterColors, int numGroups,
float scaleTargets, float scaleOutput) {
_imgActsSparse(hidActs, filters, targets, dColorIndices, imgSize, paddingStart, moduleStride,
numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput, true);
}
void localImgActsSparse(NVMatrix& hidActs, NVMatrix& filters, NVMatrix& targets, int* dColorIndices,
int imgSize, int paddingStart, int moduleStride, int numImgColors, int numFilterColors, int numGroups) {
_imgActsSparse(hidActs, filters, targets, dColorIndices, imgSize, paddingStart,
moduleStride, numImgColors, numFilterColors, numGroups, 0, 1, false);
}
void localImgActsSparse(NVMatrix& hidActs, NVMatrix& filters, NVMatrix& targets, int* dColorIndices,
int imgSize, int paddingStart, int moduleStride, int numImgColors, int numFilterColors, int numGroups,
float scaleTargets, float scaleOutput) {
_imgActsSparse(hidActs, filters, targets, dColorIndices, imgSize, paddingStart, moduleStride,
numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput, false);
}
|
7fb3ebf3dbee321b25f8a19f449585b063d0b7dc.cu
|
/*
* Copyright (c) 2011, Alex Krizhevsky ([email protected])
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* - Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* - Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
* EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <cudaconv2.cuh>
/*
* Block size: 16x16.
* blockIdx.x determines case in batches of 16*imgsPerThread.
* blockIdx.y determines 4x4 image region in target image.
*
* threadIdx.x determines case.
* threadIdx.y determines pixel.
*
* hidActs: (numFilters, numModules, numImages)
* filters: (numColors, filterPixels, numFilters) if conv
* (numModules, numColors, filterPixels, numFilters) otherwise
* targets: (numColors, imgPixels, numImages)
*
* Each block reconstructs one 4x4 pixels from 16*imgsPerThread cases.
*
* Number of filters must be divisible by 16.
* Number of images must be divisible by 16*imgsPerThread if checkCaseBounds is false.
* 16 * imgsPerThread must be divisible by 32.
*
* This version loads 32 cases at a time, so it gets full coalescing on that load.
* It only loads 16 weights at a time, so those aren't fully coalesced.
* This version conserves shared memory by loading 16 filters at a time rather than 32.
*/
template <int imgsPerThread, int numColors, bool scale, bool checkCaseBounds, bool conv>
__global__ void img_acts_color(const float* hidActs, const float* filters, float* targets,
const int numModulesX, const int numImages, const int numFilters,
const int filterSize, const int imgSize, const int paddingStart, const int moduleStride,
const float scaleTargets, const float scaleOutputs) {
__shared__ float shFilters[numColors*16][16 + 1];
__shared__ float shHidActs[16][16*imgsPerThread];
const int blockCaseIdx = blockIdx.x * 16*imgsPerThread;
const int numRegionsX = DIVUP(imgSize, 4);
const int blockRegionIdx = blockIdx.y;
const int blockRegionIdxX = blockRegionIdx % numRegionsX;
const int blockRegionIdxY = blockRegionIdx / numRegionsX;
const int blockRegionLeft = blockRegionIdxX * 4;
const int blockRegionTop = blockRegionIdxY * 4;
const int pxYInRegion = threadIdx.y / 4, pxXInRegion = threadIdx.y % 4;
const int pxY = blockRegionTop + pxYInRegion;
const int pxX = blockRegionLeft + pxXInRegion;
const int pxIdx = pxY * imgSize + pxX;
const bool isPxInImg = pxY < imgSize && pxX < imgSize;
const uint numModules = numModulesX * numModulesX;
const int filterPixels = filterSize * filterSize;
const int imgPixels = imgSize * imgSize;
const int tidx = threadIdx.y * 16 + threadIdx.x;
const int loadY = tidx / 32, loadX = tidx % 32;
hidActs += blockCaseIdx + loadY * numImages * numModulesX * numModulesX + loadX;
filters += threadIdx.x;
targets += pxIdx * numImages + blockCaseIdx + threadIdx.x;
float prod[numColors][imgsPerThread];
#pragma unroll
for (int c = 0; c < numColors; c++) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
prod[c][i] = 0;
}
}
const int startY = blockRegionTop - paddingStart < filterSize ? 0
: 1 + (blockRegionTop - paddingStart - filterSize) / moduleStride;
const int endY = MIN(numModulesX, 1 + (blockRegionTop + 3 - paddingStart) / moduleStride);
const int startX = blockRegionLeft - paddingStart < filterSize ? 0
: 1 + (blockRegionLeft - paddingStart - filterSize) / moduleStride;
const int endX = MIN(numModulesX, 1 + (blockRegionLeft + 3 - paddingStart) / moduleStride);
float* shilterLoad = &shFilters[threadIdx.y][threadIdx.x];
float* shHidActLoad = &shHidActs[loadY][loadX];
for (int my = startY; my < endY; my++) {
const int moduleTop = paddingStart + my * moduleStride;
const int pxInModuleY = pxY - moduleTop;
for (int mx = startX; mx < endX; mx++) {
const int moduleIdx = my * numModulesX + mx;
const int moduleLeft = paddingStart + mx * moduleStride;
const int pxInModuleX = pxX - moduleLeft;
const bool isPxInModule = pxInModuleY >= 0 && pxInModuleY < filterSize && pxInModuleX >= 0 && pxInModuleX < filterSize;
const int pxIdxInModule = pxInModuleY * filterSize + pxInModuleX;
for (int f = 0; f < numFilters; f += 16) { // multiply with 16 filters at a time
// Now the threads split up into half-warps, and each half-warp decides if it's interested.
const float* hLoad = &hidActs[(moduleIdx + f * numModules) * numImages];
#pragma unroll
for (int i = 0; i < imgsPerThread * 16; i += 32) {
if (!checkCaseBounds || blockCaseIdx + i + loadX < numImages) {
#pragma unroll
for (int j = 0; j < 16; j += 8) { // load 16 rows of imgsPerThread*16 cols, 8 * 32 elements at a time.
shHidActLoad[j * 16 * imgsPerThread + i] = hLoad[j * numModules * numImages + i];
}
} else {
#pragma unroll
for (int j = 0; j < 16; j += 8) { // load 16 rows of imgsPerThread*16 cols, 8 * 32 elements at a time.
shHidActLoad[j * 16 * imgsPerThread + i] = 0;
}
}
}
if (isPxInImg && isPxInModule) {
// This half-warp is interested, so it's going to load the weights from this module to its pixel.
// Not fully coalesced read :(
// But taking out this read entirely only reduces the runtime by ~2.8%, so it isn't costing me much.
const float* fLoad = conv ? &filters[pxIdxInModule * numFilters + f]
: &filters[(moduleIdx * numColors * filterPixels + pxIdxInModule) * numFilters + f];
#pragma unroll
for (int c = 0; c < numColors; c++) {
shilterLoad[c * 16 * (16 + 1)] = fLoad[c * filterPixels * numFilters];
}
}
__syncthreads();
// Do some actual computation
if (isPxInImg && isPxInModule) {
#pragma unroll
for (int c = 0; c < numColors; c++) {
#pragma unroll
for (int w = 0; w < 16; w++) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
prod[c][i] += shFilters[threadIdx.y + c * 16][w] * shHidActs[w][threadIdx.x + i * 16];
}
}
}
}
__syncthreads();
}
}
}
// Not fully coalesced write :(... shmem (and fully coalesced) version is actually slightly slower, though
if (isPxInImg) {
if (scale) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || blockCaseIdx + threadIdx.x + i * 16 < numImages) {
#pragma unroll
for (int c = 0; c < numColors; c++) {
targets[c * imgPixels * numImages + i * 16] = scaleTargets * targets[c * imgPixels * numImages + i * 16] + scaleOutputs * prod[c][i];
}
}
}
} else {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || blockCaseIdx + threadIdx.x + i * 16 < numImages) {
#pragma unroll
for (int c = 0; c < numColors; c++) {
targets[c * imgPixels * numImages + i * 16] = scaleOutputs * prod[c][i];
}
}
}
}
}
}
/*
* Block size: 16x16.
* blockIdx.x determines case in batches of 16*imgsPerThread, also color in batches of colorsPerThread.
* In essence, blockIdx.x.x = 1..numImages/(16*imgsPerThread)
* blockIdx.x.y = 1..numImgColors/colorsPerThread
* blockIdx.y determines 4x4 image region in target image.
*
* threadIdx.x determines case.
* threadIdx.y determines pixel.
*
* hidActs: (numFilters, numModules, numImages)
* filters: (numFilterColors, filterPixels, numFilters) if conv
* (numModules, numFilterColors, filterPixels, numFilters) otherwise
* targets: (numImageColors, imgPixels, numImages)
*
* Each block reconstructs one 4x4 pixels from 16*imgsPerThread cases.
*
* numImages must be divisible by 16*imgsPerThread if checkCaseBounds is false.
* 16 * imgsPerThread must be divisible by 32.
* numImageColors/numGroups must be divisible by colorsPerThread.
*
* This version loads 32 cases at a time, so it gets full coalescing on that load.
* It only loads 16 weights at a time, so those aren't fully coalesced.
* This version conserves shared memory by loading 16 filters at a time rather than 32.
*
* To be used when there are 4-16 color channels.
*/
template <int imgsPerThread, int colorsPerThread, bool scale, bool checkCaseBounds, bool conv>
__global__ void img_acts_mediumcolor(const float* hidActs, const float* filters, float* targets,
const int numModulesX, const int numImages, const int numFilters,
const int filterSize, const int imgSize, const int paddingStart, const int moduleStride,
const int numImgColors, const int numGroups,
const float scaleTargets, const float scaleOutputs) {
__shared__ float shFilters[colorsPerThread*16][16 + 1];
__shared__ float shHidActs[16][16*imgsPerThread];
const int numImgBlocks = DIVUP(numImages,16*imgsPerThread);
const int blockCaseIdx = (blockIdx.x % numImgBlocks) * 16*imgsPerThread;
const int imgColorIdx = (blockIdx.x / numImgBlocks) * colorsPerThread; // color idx globally
const int numFilterColors = numImgColors / numGroups;
const int blockGroupIdx = imgColorIdx / numFilterColors;
const int filterColorIdx = imgColorIdx % numFilterColors; // color idx within group
const int numFiltersPerGroup = numFilters / numGroups;
const int blockFilterIdx = blockGroupIdx * numFiltersPerGroup;
const int numRegionsX = DIVUP(imgSize, 4);
const int blockRegionIdx = blockIdx.y;
const int blockRegionIdxX = blockRegionIdx % numRegionsX;
const int blockRegionIdxY = blockRegionIdx / numRegionsX;
const int blockRegionLeft = blockRegionIdxX * 4;
const int blockRegionTop = blockRegionIdxY * 4;
const int pxYInRegion = threadIdx.y / 4, pxXInRegion = threadIdx.y % 4;
const int pxY = blockRegionTop + pxYInRegion;
const int pxX = blockRegionLeft + pxXInRegion;
const int pxIdx = pxY * imgSize + pxX;
const bool isPxInImg = pxY < imgSize && pxX < imgSize;
// const uint numModules = numModulesX * numModulesX;
const int filterPixels = filterSize * filterSize;
const int imgPixels = imgSize * imgSize;
const int tidx = threadIdx.y * 16 + threadIdx.x;
const int loadY = tidx / 32, loadX = tidx % 32;
hidActs += blockCaseIdx + (blockFilterIdx + loadY) * numImages * numModulesX * numModulesX + loadX;
filters += blockFilterIdx + filterColorIdx * filterPixels * numFilters + threadIdx.x;
targets += imgColorIdx * imgPixels * numImages + pxIdx * numImages + blockCaseIdx + threadIdx.x;
float prod[colorsPerThread][imgsPerThread];
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
prod[c][i] = 0;
}
}
const int startY = blockRegionTop - paddingStart < filterSize ? 0
: 1 + (blockRegionTop - paddingStart - filterSize) / moduleStride;
const int endY = MIN(numModulesX, 1 + (blockRegionTop + 3 - paddingStart) / moduleStride);
const int startX = blockRegionLeft - paddingStart < filterSize ? 0
: 1 + (blockRegionLeft - paddingStart - filterSize) / moduleStride;
const int endX = MIN(numModulesX, 1 + (blockRegionLeft + 3 - paddingStart) / moduleStride);
float* shFilterLoad = &shFilters[threadIdx.y][threadIdx.x];
float* shHidActLoad = &shHidActs[loadY][loadX];
for (int my = startY; my < endY; my++) {
const int moduleTop = paddingStart + my * moduleStride;
const int pxInModuleY = pxY - moduleTop;
for (int mx = startX; mx < endX; mx++) {
const int moduleIdx = my * numModulesX + mx;
const int moduleLeft = paddingStart + mx * moduleStride;
const int pxInModuleX = pxX - moduleLeft;
const bool isPxInModule = pxInModuleY >= 0 && pxInModuleY < filterSize && pxInModuleX >= 0 && pxInModuleX < filterSize;
const int pxIdxInModule = pxInModuleY * filterSize + pxInModuleX;
for (int f = 0; f < numFiltersPerGroup; f += 16) { // multipply with 16 filters at a time
// Now the threads split up into half-warps, and each half-warp decides if it's interested.
const float* hLoad = &hidActs[(moduleIdx + f * numModulesX * numModulesX) * numImages];
#pragma unroll
for (int i = 0; i < imgsPerThread * 16; i += 32) {
if (!checkCaseBounds || blockCaseIdx + loadX + i < numImages) {
#pragma unroll
for (int j = 0; j < 16; j += 8) { // load 16 rows of imgsPerThread*16 cols, 8 * 32 elements at a time.
shHidActLoad[j * 16 * imgsPerThread + i] = hLoad[j * numModulesX * numModulesX * numImages + i];
}
} else {
#pragma unroll
for (int j = 0; j < 16; j += 8) { // load 16 rows of imgsPerThread*16 cols, 8 * 32 elements at a time.
shHidActLoad[j * 16 * imgsPerThread + i] = 0;
}
}
}
if (isPxInImg && isPxInModule) {
// This half-warp is interested, so it's going to load the weights from this module to its pixel.
// Not fully coalesced read :(
// But taking out this read entirely only reduces the runtime by ~2.8%, so it isn't costing me much.
const float* fLoad = conv ? &filters[pxIdxInModule * numFilters + f]
: &filters[moduleIdx * numFilterColors * filterPixels * numFilters + pxIdxInModule * numFilters + f];
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
shFilterLoad[c * 16 * (16 + 1)] = fLoad[c * filterPixels * numFilters];
}
}
__syncthreads();
// Do some actual computation
if (isPxInImg && isPxInModule) {
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
#pragma unroll
for (int w = 0; w < 16; w++) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
prod[c][i] += shFilters[threadIdx.y + c * 16][w] * shHidActs[w][threadIdx.x + i * 16];
}
}
}
}
__syncthreads();
}
}
}
// Not fully coalesced write :(... shmem (and fully coalesced) version is actually slightly slower, though
if (isPxInImg) {
if (scale) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || blockCaseIdx + threadIdx.x + i * 16 < numImages) {
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
targets[c * imgPixels * numImages + i * 16] = scaleTargets * targets[c * imgPixels * numImages + i * 16] + scaleOutputs * prod[c][i];
}
}
}
} else {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || blockCaseIdx + threadIdx.x + i * 16 < numImages) {
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
targets[c * imgPixels * numImages + i * 16] = scaleOutputs * prod[c][i];
}
}
}
}
}
}
/*
* Block size: B_YxB_X.
* blockIdx.x determines case in batches of B_X*imgsPerThread, also color in batches of B_Y*colorsPerThread.
* In essence, blockIdx.x.x = 1..numImages/(B_X*imgsPerThread)
* blockIdx.x.y = 1..numImgColors/(B_Y*colorsPerThread)
* blockIdx.y determines image pixel in target image.
*
* threadIdx.x determines case.
* threadIdx.y determines color.
*
* hidActs: (numFilters, numModules, numImages)
* filters: (numFilterColors, filterPixels, numFilters) if conv
* (numModules, numFilterColors, filterPixels, numFilters) otherwise
* targets: (numImageColors, imgPixels, numImages)
*
* Each block reconstructs one B_Y*colorsPerThread colors from 1 pixel from B_X*imgsPerThread cases.
*
* numImages must be divisible by B_X*imgsPerThread if checkCaseBounds is false.
* numFiltersPerGroup must be divisible by 16.
*
* B_X * imgsPerThread must be divisible by 32.
* numFilterColors must be divisible by B_Y*colorsPerThread.
* B_X*B_Y must be divisible by 32.
*
* This version loads 32 cases at a time, so it gets full coalescing on that load.
* It only loads 16 weights at a time, so those aren't fully coalesced.
* This version conserves shared memory by loading 16 filters at a time rather than 32.
*
* To be used when there are >= 16 color channels.
*/
template <int B_Y, int B_X, int imgsPerThread, int colorsPerThread, bool scale, bool checkCaseBounds, bool conv>
__global__ void conv_img_acts_manycolor(const float* hidActs, const float* filters, float* targets,
const int numModulesX, const int numImages, const int numFilters,
const int filterSize, const int imgSize, const int paddingStart, const int moduleStride,
const int numImgColors, const int numGroups,
const float scaleTargets, const float scaleOutputs) {
__shared__ float shFilters[colorsPerThread*B_Y][16 + 1]; // TODO: perhaps reconsider this 16
__shared__ float shHidActs[16][B_X*imgsPerThread];
const int numImgBlocks = DIVUP(numImages,B_X*imgsPerThread);
const int blockCaseIdx = (blockIdx.x % numImgBlocks) * B_X*imgsPerThread;
const int imgColorIdx = (blockIdx.x / numImgBlocks) * B_Y*colorsPerThread; // color idx globally
const int numFilterColors = numImgColors / numGroups;
const int blockGroupIdx = imgColorIdx / numFilterColors;
const int filterColorIdx = imgColorIdx % numFilterColors; // color idx within group
const int numFiltersPerGroup = numFilters / numGroups;
const int blockFilterIdx = blockGroupIdx * numFiltersPerGroup;
const int blockPixelIdx = blockIdx.y;
const int blockPixelIdxX = blockPixelIdx % imgSize;
const int blockPixelIdxY = blockPixelIdx / imgSize;
const int filterPixels = filterSize * filterSize;
const int imgPixels = imgSize * imgSize;
const int tidx = threadIdx.y * B_X + threadIdx.x;
const int hidActLoadY = tidx / 32, hidActLoadX = tidx % 32;
const int filtersLoadY = tidx / 16, filtersLoadX = tidx % 16;
const int numModules = numModulesX * numModulesX;
hidActs += blockCaseIdx + (blockFilterIdx + hidActLoadY) * numImages * numModules + hidActLoadX;
filters += blockFilterIdx + (filterColorIdx + filtersLoadY) * filterPixels * numFilters + filtersLoadX;
targets += (imgColorIdx + threadIdx.y) * imgPixels * numImages + blockPixelIdx * numImages + blockCaseIdx + threadIdx.x;
float prod[colorsPerThread][imgsPerThread];
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
prod[c][i] = 0;
}
}
const int startY = blockPixelIdxY - paddingStart < filterSize ? 0
: 1 + (blockPixelIdxY - paddingStart - filterSize) / moduleStride;
const int endY = MIN(numModulesX, 1 + (blockPixelIdxY - paddingStart) / moduleStride);
const int startX = blockPixelIdxX - paddingStart < filterSize ? 0
: 1 + (blockPixelIdxX - paddingStart - filterSize) / moduleStride;
const int endX = MIN(numModulesX, 1 + (blockPixelIdxX - paddingStart) / moduleStride);
float* shFilterLoad = &shFilters[filtersLoadY][filtersLoadX];
float* shHidActLoad = &shHidActs[hidActLoadY][hidActLoadX];
for (int my = startY; my < endY; my++) {
const int moduleTop = paddingStart + my * moduleStride;
const int pxInFilterY = blockPixelIdxY - moduleTop;
for (int mx = startX; mx < endX; mx++) {
const int moduleIdx = my * numModulesX + mx;
const int moduleLeft = paddingStart + mx * moduleStride;
const int pxInFilterX = blockPixelIdxX - moduleLeft;
const int pxIdxInFilter = pxInFilterY * filterSize + pxInFilterX;
for (int f = 0; f < numFiltersPerGroup; f += 16) { // multiply with 16 filters at a time
const float* hLoad = &hidActs[(moduleIdx + f * numModules) * numImages];
#pragma unroll
for (int i = 0; i < imgsPerThread * B_X; i += 32) {
if (!checkCaseBounds || blockCaseIdx + hidActLoadX + i < numImages) {
#pragma unroll
for (int j = 0; j < 16; j += B_X*B_Y/32) { // load 16 rows of imgsPerThread*16 cols, 8 * 32 elements at a time.
shHidActLoad[j * B_X * imgsPerThread + i] = hLoad[j * numModules * numImages + i];
}
} else {
#pragma unroll
for (int j = 0; j < 16; j += B_X*B_Y/32) { // load 16 rows of imgsPerThread*16 cols, 8 * 32 elements at a time.
shHidActLoad[j * B_X * imgsPerThread + i] = 0;
}
}
}
const float* fLoad = conv ? &filters[pxIdxInFilter * numFilters + f]
: &filters[moduleIdx * numFilterColors * filterPixels * numFilters + pxIdxInFilter * numFilters + f];
#pragma unroll
for (int i = 0; i < colorsPerThread*B_Y; i+= B_X*B_Y/16) {
if ((colorsPerThread*B_Y) % (B_X*B_Y/16) == 0 || i + filtersLoadY < colorsPerThread*B_Y) {
shFilterLoad[i * (16 + 1)] = fLoad[i * filterPixels * numFilters];
}
}
__syncthreads();
// Do some actual computation
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
#pragma unroll
for (int w = 0; w < 16; w++) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
prod[c][i] += shFilters[c * B_Y + threadIdx.y][w] * shHidActs[w][threadIdx.x + i * B_X];
}
}
}
__syncthreads();
}
}
}
if (scale) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || blockCaseIdx + threadIdx.x + i * B_X < numImages) {
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
targets[c * B_Y * imgPixels * numImages + i * B_X] = scaleTargets * targets[c * B_Y * imgPixels * numImages + i * B_X] + scaleOutputs * prod[c][i];
}
}
}
} else {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || blockCaseIdx + threadIdx.x + i * B_X < numImages) {
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
targets[c * B_Y * imgPixels * numImages + i * B_X] = scaleOutputs * prod[c][i];
}
}
}
}
}
/*
* Block size: 16x16.
* blockIdx.x determines case in batches of 16*imgsPerThread, also color in batches of colorsPerThread.
* In essence, blockIdx.x.x = 1..numImages/(16*imgsPerThread)
* blockIdx.x.y = 1..numImgColors/colorsPerThread
* blockIdx.y determines 4x4 image region in target image, also sample
* In essence, blockIdx.y.x = 1..numRegions
* blockIdx.y.y = 1..overSample
*
* threadIdx.x determines case.
* threadIdx.y determines pixel.
*
* overSample := numFilterColors*numGroups/numImgColors
* ^ this is the number of groups that each color channel is connected to
*
* hidActs: (numFilters, numModules, numImages)
* filters: (numFilterColors, filterPixels, numFilters) if conv
* (numModules, numFilterColors, filterPixels, numFilters) otherwise
* targets: (overSample, numImgColors, imgPixels, numImages)
*
* colorIndices: (numGroups, numFilterColors)
*
* Each block reconstructs one 4x4 pixels from 16*imgsPerThread cases.
*
* numImages must be divisible by 16*imgsPerThread if checkCaseBounds is false.
* 16 * imgsPerThread must be divisible by 32.
* numFilterColors must be divisible by colorsPerThread.
*
* This version loads 32 cases at a time, so it gets full coalescing on that load.
* It only loads 16 weights at a time, so those aren't fully coalesced.
* This version conserves shared memory by loading 16 filters at a time rather than 32.
*
* To be used when there are 4-16 color channels.
*/
template <int imgsPerThread, int colorsPerThread, bool scale, bool checkCaseBounds, bool conv>
__global__ void img_acts_mediumcolor_sparse_rand(const float* hidActs, const float* filters, float* targets, int* colorIndices,
const int numModulesX, const int numImages, const int numFilters,
const int filterSize, const int imgSize, const int paddingStart, const int moduleStride,
const int numImgColors, const int numFilterColors, const int numGroups,
const float scaleTargets, const float scaleOutputs) {
__shared__ float shFilters[colorsPerThread*16][16 + 1];
__shared__ float shHidActs[16][16*imgsPerThread];
__shared__ int shColors[colorsPerThread]; // not really necessary -- can repurpose the other shmems
const int numImgBlocks = DIVUP(numImages,16*imgsPerThread);
const int blockCaseIdx = (blockIdx.x % numImgBlocks) * 16*imgsPerThread;
const int numRegionsX = DIVUP(imgSize, 4);
const int numRegions = numRegionsX * numRegionsX;
const int imgColorIdx = (blockIdx.x / numImgBlocks) * colorsPerThread; // color idx globally
const int filterColorIdx = imgColorIdx % numFilterColors; // color idx within group
const int numFiltersPerGroup = numFilters / numGroups;
const int overSample = gridDim.y / numRegions;
const int blockSample = blockIdx.y / numRegions;
const int groupsPerSample = numGroups / overSample;
const int blockGroupIdx = imgColorIdx / numFilterColors + blockSample * groupsPerSample;
const int blockFilterIdx = blockGroupIdx * numFiltersPerGroup;
const int blockRegionIdx = blockIdx.y % numRegions;
const int blockRegionIdxX = blockRegionIdx % numRegionsX;
const int blockRegionIdxY = blockRegionIdx / numRegionsX;
const int blockRegionLeft = blockRegionIdxX * 4;
const int blockRegionTop = blockRegionIdxY * 4;
const int pxYInRegion = threadIdx.y / 4, pxXInRegion = threadIdx.y % 4;
const int pxY = blockRegionTop + pxYInRegion;
const int pxX = blockRegionLeft + pxXInRegion;
const int pxIdx = pxY * imgSize + pxX;
const bool isPxInImg = pxY < imgSize && pxX < imgSize;
// const uint numModules = numModulesX * numModulesX;
const int filterPixels = filterSize * filterSize;
const int imgPixels = imgSize * imgSize;
const int tidx = threadIdx.y * 16 + threadIdx.x;
const int loadY = tidx / 32, loadX = tidx % 32;
hidActs += blockCaseIdx + (blockFilterIdx + loadY) * numImages * numModulesX * numModulesX + loadX;
filters += blockFilterIdx + filterColorIdx * filterPixels * numFilters + threadIdx.x;
targets += blockSample * numImgColors * imgPixels * numImages + pxIdx * numImages + blockCaseIdx + threadIdx.x;
float prod[colorsPerThread][imgsPerThread];
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
prod[c][i] = 0;
}
}
const int startY = blockRegionTop - paddingStart < filterSize ? 0
: 1 + (blockRegionTop - paddingStart - filterSize) / moduleStride;
const int endY = MIN(numModulesX, 1 + (blockRegionTop + 3 - paddingStart) / moduleStride);
const int startX = blockRegionLeft - paddingStart < filterSize ? 0
: 1 + (blockRegionLeft - paddingStart - filterSize) / moduleStride;
const int endX = MIN(numModulesX, 1 + (blockRegionLeft + 3 - paddingStart) / moduleStride);
float* shFilterLoad = &shFilters[threadIdx.y][threadIdx.x];
float* shHidActLoad = &shHidActs[loadY][loadX];
if (tidx < colorsPerThread) {
shColors[tidx] = colorIndices[blockGroupIdx * numFilterColors + filterColorIdx + tidx] * imgPixels * numImages;
}
for (int my = startY; my < endY; my++) {
const int moduleTop = paddingStart + my * moduleStride;
const int pxInModuleY = pxY - moduleTop;
for (int mx = startX; mx < endX; mx++) {
const int moduleIdx = my * numModulesX + mx;
const int moduleLeft = paddingStart + mx * moduleStride;
const int pxInModuleX = pxX - moduleLeft;
const bool isPxInModule = pxInModuleY >= 0 && pxInModuleY < filterSize && pxInModuleX >= 0 && pxInModuleX < filterSize;
const int pxIdxInModule = pxInModuleY * filterSize + pxInModuleX;
for (int f = 0; f < numFiltersPerGroup; f += 16) { // multipply with 16 filters at a time
// Now the threads split up into half-warps, and each half-warp decides if it's interested.
const float* hLoad = &hidActs[(moduleIdx + f * numModulesX * numModulesX) * numImages];
#pragma unroll
for (int i = 0; i < imgsPerThread * 16; i += 32) {
if (!checkCaseBounds || blockCaseIdx + loadX + i < numImages) {
#pragma unroll
for (int j = 0; j < 16; j += 8) { // load 16 rows of imgsPerThread*16 cols, 8 * 32 elements at a time.
shHidActLoad[j * 16 * imgsPerThread + i] = hLoad[j * numModulesX * numModulesX * numImages + i];
}
} else {
#pragma unroll
for (int j = 0; j < 16; j += 8) { // load 16 rows of imgsPerThread*16 cols, 8 * 32 elements at a time.
shHidActLoad[j * 16 * imgsPerThread + i] = 0;
}
}
}
if (isPxInImg && isPxInModule) {
// This half-warp is interested, so it's going to load the weights from this module to its pixel.
// Not fully coalesced read :(
// But taking out this read entirely only reduces the runtime by ~2.8%, so it isn't costing me much.
const float* fLoad = conv ? &filters[pxIdxInModule * numFilters + f]
: &filters[moduleIdx * numFilterColors * filterPixels * numFilters + pxIdxInModule * numFilters + f];
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
shFilterLoad[c * 16 * (16 + 1)] = fLoad[c * filterPixels * numFilters];
}
}
__syncthreads();
// Do some actual computation
if (isPxInImg && isPxInModule) {
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
#pragma unroll
for (int w = 0; w < 16; w++) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
prod[c][i] += shFilters[threadIdx.y + c * 16][w] * shHidActs[w][threadIdx.x + i * 16];
}
}
}
}
__syncthreads();
}
}
}
// Not fully coalesced write :(... shmem (and fully coalesced) version is actually slightly slower, though
if (isPxInImg) {
if (scale) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || blockCaseIdx + threadIdx.x + i * 16 < numImages) {
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
targets[shColors[c] + i * 16] = scaleTargets * targets[shColors[c] + i * 16] + scaleOutputs * prod[c][i];
}
}
}
} else {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || blockCaseIdx + threadIdx.x + i * 16 < numImages) {
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
targets[shColors[c] + i * 16] = scaleOutputs * prod[c][i];
}
}
}
}
}
}
/*
* Block size: B_YxB_X.
* blockIdx.x determines case in batches of B_X*imgsPerThread, also color in batches of B_Y*colorsPerThread.
* In essence, blockIdx.x.x = 1..numImages/(B_X*imgsPerThread)
* blockIdx.x.y = 1..numImgColors/(B_Y*colorsPerThread)
* blockIdx.y determines image pixel in target image, sample idx.
* In essence, blockIdx.y.x = 1..imgPixels
* blockIdx.y.y = 1..overSample
*
* threadIdx.x determines case.
* threadIdx.y determines color.
*
* overSample := numFilterColors*numGroups/numImgColors
* ^ this is the number of groups that each color channel is connected to
*
* hidActs: (numFilters, numModules, numImages)
* filters: (numFilterColors, filterPixels, numFilters) if conv
* (numModules, numFilterColors, filterPixels, numFilters) otherwise
* targets: (overSample, numImgColors, imgPixels, numImages)
*
* colorIndices: (numGroups, numFilterColors)
*
* Each block reconstructs one B_Y*colorsPerThread colors from 1 pixel from B_X*imgsPerThread cases.
*
* numImages must be divisible by B_X*imgsPerThread if checkCaseBounds is false.
* numFiltersPerGroup must be divisible by 16.
* numFilterColors*numGroups must be divisible by numImgColors.
*
* B_X * imgsPerThread must be divisible by 32.
* numFilterColors must be divisible by B_Y*colorsPerThread.
* B_X*B_Y must be divisible by 32.
*
* This version loads 32 cases at a time, so it gets full coalescing on that load.
* It only loads 16 weights at a time, so those aren't fully coalesced.
* This version conserves shared memory by loading 16 filters at a time rather than 32.
*
* To be used when there are >= 16 color channels.
*/
template <int B_Y, int B_X, int imgsPerThread, int colorsPerThread, bool scale, bool checkCaseBounds, bool conv>
__global__ void img_acts_manycolor_sparse_rand(const float* hidActs, const float* filters, float* targets, int* colorIndices,
const int numModulesX, const int numImages, const int numFilters,
const int filterSize, const int imgSize, const int paddingStart, const int moduleStride,
const int numImgColors, const int numFilterColors, const int numGroups,
const float scaleTargets, const float scaleOutputs) {
__shared__ float shFilters[colorsPerThread*B_Y][16 + 1]; // TODO: perhaps reconsider this 16
__shared__ float shHidActs[16][B_X*imgsPerThread];
__shared__ int shColors[colorsPerThread * B_Y]; // not really necessary -- can repurpose the other shmems
const int numImgBlocks = DIVUP(numImages,B_X*imgsPerThread);
const int blockCaseIdx = (blockIdx.x % numImgBlocks) * B_X*imgsPerThread;
const int filterPixels = filterSize * filterSize;
const int imgPixels = imgSize * imgSize;
const int tidx = threadIdx.y * B_X + threadIdx.x;
const int hidActLoadY = tidx / 32, hidActLoadX = tidx % 32;
const int filtersLoadY = tidx / 16, filtersLoadX = tidx % 16;
const int numModules = numModulesX * numModulesX;
const int overSample = gridDim.y / imgPixels;
const int blockSample = blockIdx.y / imgPixels;
const int groupsPerSample = numGroups / overSample;
// const int overSample = (numFilterColors * numGroups) / numImgColors;
const int imgColorIdx = (blockIdx.x / numImgBlocks) * B_Y * colorsPerThread; // color idx globally
const int blockGroupIdx = imgColorIdx / numFilterColors + blockSample * groupsPerSample;
// const int filterColorsPerSample = numFilterColors / overSample;
const int blockPixelIdx = blockIdx.y % imgPixels;
const int blockPixelIdxX = blockPixelIdx % imgSize;
const int blockPixelIdxY = blockPixelIdx / imgSize;
const int filterColorIdx = imgColorIdx % numFilterColors; // color idx within group
const int numFiltersPerGroup = numFilters / numGroups;
const int blockFilterIdx = blockGroupIdx * numFiltersPerGroup;
hidActs += blockCaseIdx + (blockFilterIdx + hidActLoadY) * numImages * numModules + hidActLoadX;
filters += blockFilterIdx + (filterColorIdx + filtersLoadY) * filterPixels * numFilters + filtersLoadX;
targets += blockSample * numImgColors * imgPixels * numImages + blockPixelIdx * numImages + blockCaseIdx + threadIdx.x;
float prod[colorsPerThread][imgsPerThread];
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
prod[c][i] = 0;
}
}
const int startY = blockPixelIdxY - paddingStart < filterSize ? 0
: 1 + (blockPixelIdxY - paddingStart - filterSize) / moduleStride;
const int endY = MIN(numModulesX, 1 + (blockPixelIdxY - paddingStart) / moduleStride);
const int startX = blockPixelIdxX - paddingStart < filterSize ? 0
: 1 + (blockPixelIdxX - paddingStart - filterSize) / moduleStride;
const int endX = MIN(numModulesX, 1 + (blockPixelIdxX - paddingStart) / moduleStride);
float* shFilterLoad = &shFilters[filtersLoadY][filtersLoadX];
float* shHidActLoad = &shHidActs[hidActLoadY][hidActLoadX];
if (tidx < colorsPerThread * B_Y) {
shColors[tidx] = colorIndices[blockGroupIdx * numFilterColors + filterColorIdx + tidx] * imgPixels * numImages;
}
for (int my = startY; my < endY; my++) {
const int moduleTop = paddingStart + my * moduleStride;
const int pxInFilterY = blockPixelIdxY - moduleTop;
for (int mx = startX; mx < endX; mx++) {
const int moduleIdx = my * numModulesX + mx;
const int moduleLeft = paddingStart + mx * moduleStride;
const int pxInFilterX = blockPixelIdxX - moduleLeft;
const int pxIdxInFilter = pxInFilterY * filterSize + pxInFilterX;
for (int f = 0; f < numFiltersPerGroup; f += 16) { // multiply with 16 filters at a time
const float* hLoad = &hidActs[(moduleIdx + f * numModules) * numImages];
#pragma unroll
for (int i = 0; i < imgsPerThread * B_X; i += 32) {
if (!checkCaseBounds || blockCaseIdx + hidActLoadX + i < numImages) {
#pragma unroll
for (int j = 0; j < 16; j += B_X*B_Y/32) { // load 16 rows of imgsPerThread*16 cols, 8 * 32 elements at a time.
shHidActLoad[j * B_X * imgsPerThread + i] = hLoad[j * numModules * numImages + i];
}
} else {
#pragma unroll
for (int j = 0; j < 16; j += B_X*B_Y/32) { // load 16 rows of imgsPerThread*16 cols, 8 * 32 elements at a time.
shHidActLoad[j * B_X * imgsPerThread + i] = 0;
}
}
}
const float* fLoad = conv ? &filters[pxIdxInFilter * numFilters + f]
: &filters[moduleIdx * numFilterColors * filterPixels * numFilters + pxIdxInFilter * numFilters + f];
#pragma unroll
for (int i = 0; i < colorsPerThread*B_Y; i+= B_X*B_Y/16) {
if ((colorsPerThread*B_Y) % (B_X*B_Y/16) == 0 || i + filtersLoadY < colorsPerThread*B_Y) {
shFilterLoad[i * (16 + 1)] = fLoad[i * filterPixels * numFilters];
}
}
__syncthreads();
// Do some actual computation
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
#pragma unroll
for (int w = 0; w < 16; w++) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
prod[c][i] += shFilters[c * B_Y + threadIdx.y][w] * shHidActs[w][threadIdx.x + i * B_X];
}
}
}
__syncthreads();
}
}
}
if (scale) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || blockCaseIdx + threadIdx.x + i * B_X < numImages) {
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
targets[shColors[c * B_Y + threadIdx.y] + i * B_X] = scaleTargets * targets[shColors[c * B_Y + threadIdx.y] + i * B_X] + scaleOutputs * prod[c][i];
}
}
}
} else {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || blockCaseIdx + threadIdx.x + i * B_X < numImages) {
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
targets[shColors[c * B_Y + threadIdx.y] + i * B_X] = scaleOutputs * prod[c][i];
}
}
}
}
}
/*
* hidActs: (numFilters, numModules, numImages)
* filters: (numFilterColors, filterPixels, numFilters) if conv
* (numModules, numFilterColors, filterPixels, numFilters) otherwise
* targets: (numImageColors, imgPixels, numImages)
*/
void _imgActs(NVMatrix& hidActs, NVMatrix& filters, NVMatrix& targets,
int imgSize, int paddingStart, int moduleStride, int numImgColors, int numGroups,
float scaleTargets, float scaleOutput, bool conv) {
int numFilterColors = numImgColors / numGroups;
int numImages = hidActs.getNumCols();
int numFilters = filters.getNumCols();
int numFiltersPerGroup = numFilters / numGroups;
int numModules = hidActs.getNumRows() / numFilters;
int filterModuleMult = conv ? 1 : numModules;
int filterPixels = filters.getNumRows() / (filterModuleMult * numFilterColors);
int filterSize = sqrt(filterPixels);
int imgPixels = imgSize * imgSize;
int numModulesX = sqrt(numModules);
assert(numImgColors % numGroups == 0);
assert(numFilters % (16*numGroups) == 0);
assert(numGroups > 1 || (numImgColors > 0 && (numImgColors <= 3 || numImgColors % 2 == 0)));
assert(numGroups == 1 || numFilterColors % 4 == 0);
assert(filterPixels == filterSize * filterSize);
assert(hidActs.getNumRows() == numModules * numFilters);
assert(filters.getNumRows() == filterModuleMult * numFilterColors * filterPixels);
assert(numModules == numModulesX * numModulesX);
assert(hidActs.isContiguous());
assert(filters.isContiguous());
assert(!hidActs.isTrans());
assert(!filters.isTrans());
assert(!targets.isTrans());
// These routines don't handle the case when only part of the image is visited in the convolution
assert(paddingStart <= 0 && paddingStart + (numModules-1)*moduleStride + filterSize >= imgSize);
assert(moduleStride <= filterSize);
assert(targets.isContiguous()); // no stride support here!
dim3 blocks;
dim3 threads(16,16);
int colorsPerThread;
bool checkCaseBounds;
if (numFilterColors % 8 == 0) {
threads = dim3(32, 4);
colorsPerThread = numFilterColors % 16 == 0 ? 4 : 2;
int imgsPerThread = 4;
assert(numFilterColors % (threads.y * colorsPerThread) == 0);
checkCaseBounds = numImages % (threads.x * imgsPerThread) != 0;
blocks = dim3(DIVUP(numImages, threads.x*imgsPerThread) * (numImgColors/(threads.y*colorsPerThread)), imgPixels);
} else if (numFilterColors > 3) {
colorsPerThread = numFilterColors % 4 == 0 ? 4 : 2;
blocks = dim3(DIVUP(numImages,16*8) * (numImgColors / colorsPerThread), DIVUP(imgSize,4) * DIVUP(imgSize,4));
checkCaseBounds = numImages % (16*8) != 0;
} else {
blocks = dim3(DIVUP(numImages,16*8), DIVUP(imgSize,4) * DIVUP(imgSize,4));
checkCaseBounds = numImages % (16*8) != 0;
}
if (scaleTargets == 0) { // do not scale or use targets matrix
targets.resize(numImgColors*imgPixels, numImages);
} else {
assert(targets.getNumRows() == numImgColors * imgPixels);
assert(targets.getNumCols() == numImages);
}
if (conv) { // convolutional units
if (scaleTargets == 0) { // do not scale or use targets matrix
if (numFilterColors % 8 == 0) {
if (checkCaseBounds) {
if (numFilterColors % 16 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 4, 4, false, true, true>, cudaFuncCachePreferShared);
conv_img_acts_manycolor<4, 32, 4, 4, false, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 4, 2, false, true, true>, cudaFuncCachePreferShared);
conv_img_acts_manycolor<4, 32, 4, 2, false, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
} else {
if (numFilterColors % 16 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 4, 4, false, false, true>, cudaFuncCachePreferShared);
conv_img_acts_manycolor<4, 32, 4, 4, false, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 4, 2, false, false, true>, cudaFuncCachePreferShared);
conv_img_acts_manycolor<4, 32, 4, 2, false, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
} else if (numFilterColors > 3) {
if (checkCaseBounds) {
if (colorsPerThread == 4) {
cudaFuncSetCacheConfig(img_acts_mediumcolor<8, 4, false, true, true>, cudaFuncCachePreferShared);
img_acts_mediumcolor<8, 4, false, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(img_acts_mediumcolor<8, 2, false, true, true>, cudaFuncCachePreferShared);
img_acts_mediumcolor<8, 2, false, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
} else {
if (colorsPerThread == 4) {
cudaFuncSetCacheConfig(img_acts_mediumcolor<8, 4, false, false, true>, cudaFuncCachePreferShared);
img_acts_mediumcolor<8, 4, false, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(img_acts_mediumcolor<8, 2, false, false, true>, cudaFuncCachePreferShared);
img_acts_mediumcolor<8, 2, false, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
} else {
if (checkCaseBounds) {
if (numFilterColors == 1) {
cudaFuncSetCacheConfig(img_acts_color<8, 1, false, true, true>, cudaFuncCachePreferShared);
img_acts_color<8, 1, false, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, scaleTargets, scaleOutput);
} else if (numFilterColors == 2) {
cudaFuncSetCacheConfig(img_acts_color<8, 2, false, true, true>, cudaFuncCachePreferShared);
img_acts_color<8, 2, false, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, scaleTargets, scaleOutput);
} else if (numFilterColors == 3) {
cudaFuncSetCacheConfig(img_acts_color<8, 3, false, true, true>, cudaFuncCachePreferShared);
img_acts_color<8, 3, false, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
} else {
if (numFilterColors == 1) {
cudaFuncSetCacheConfig(img_acts_color<8, 1, false, false, true>, cudaFuncCachePreferShared);
img_acts_color<8, 1, false, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, scaleTargets, scaleOutput);
} else if (numFilterColors == 2) {
cudaFuncSetCacheConfig(img_acts_color<8, 2, false, false, true>, cudaFuncCachePreferShared);
img_acts_color<8, 2, false, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, scaleTargets, scaleOutput);
} else if (numFilterColors == 3) {
cudaFuncSetCacheConfig(img_acts_color<8, 3, false, false, true>, cudaFuncCachePreferShared);
img_acts_color<8, 3, false, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
}
}
} else { // do scale
if (numFilterColors % 8 == 0) {
if (checkCaseBounds) {
if (numFilterColors % 16 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 4, 4, true, true, true>, cudaFuncCachePreferShared);
conv_img_acts_manycolor<4, 32, 4, 4, true, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 4, 2, true, true, true>, cudaFuncCachePreferShared);
conv_img_acts_manycolor<4, 32, 4, 2, true, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
} else {
if (numFilterColors % 16 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 4, 4, true, false, true>, cudaFuncCachePreferShared);
conv_img_acts_manycolor<4, 32, 4, 4, true, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 4, 2, true, false, true>, cudaFuncCachePreferShared);
conv_img_acts_manycolor<4, 32, 4, 2, true, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
} else if (numFilterColors > 3) {
if (checkCaseBounds) {
if (colorsPerThread == 4) {
cudaFuncSetCacheConfig(img_acts_mediumcolor<8, 4, true, true, true>, cudaFuncCachePreferShared);
img_acts_mediumcolor<8, 4, true, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(img_acts_mediumcolor<8, 2, true, true, true>, cudaFuncCachePreferShared);
img_acts_mediumcolor<8, 2, true, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
} else {
if (colorsPerThread == 4) {
cudaFuncSetCacheConfig(img_acts_mediumcolor<8, 4, true, false, true>, cudaFuncCachePreferShared);
img_acts_mediumcolor<8, 4, true, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(img_acts_mediumcolor<8, 2, true, false, true>, cudaFuncCachePreferShared);
img_acts_mediumcolor<8, 2, true, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
} else {
if (checkCaseBounds) {
if (numFilterColors == 1) {
cudaFuncSetCacheConfig(img_acts_color<8, 1, true, true, true>, cudaFuncCachePreferShared);
img_acts_color<8, 1, true, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, scaleTargets, scaleOutput);
} else if (numFilterColors == 2) {
cudaFuncSetCacheConfig(img_acts_color<8, 2, true, true, true>, cudaFuncCachePreferShared);
img_acts_color<8, 2, true, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, scaleTargets, scaleOutput);
} else if (numFilterColors == 3) {
cudaFuncSetCacheConfig(img_acts_color<8, 3, true, true, true>, cudaFuncCachePreferShared);
img_acts_color<8, 3, true, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
} else {
if (numFilterColors == 1) {
cudaFuncSetCacheConfig(img_acts_color<8, 1, true, false, true>, cudaFuncCachePreferShared);
img_acts_color<8, 1, true, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, scaleTargets, scaleOutput);
} else if (numFilterColors == 2) {
cudaFuncSetCacheConfig(img_acts_color<8, 2, true, false, true>, cudaFuncCachePreferShared);
img_acts_color<8, 2, true, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, scaleTargets, scaleOutput);
} else if (numFilterColors == 3) {
cudaFuncSetCacheConfig(img_acts_color<8, 3, true, false, true>, cudaFuncCachePreferShared);
img_acts_color<8, 3, true, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
}
}
}
} else { // local, unshared units
if (scaleTargets == 0) { // do not scale or use targets matrix
if (numFilterColors % 8 == 0) {
if (checkCaseBounds) {
if (numFilterColors % 16 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 4, 4, false, true, false>, cudaFuncCachePreferShared);
conv_img_acts_manycolor<4, 32, 4, 4, false, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 4, 2, false, true, false>, cudaFuncCachePreferShared);
conv_img_acts_manycolor<4, 32, 4, 2, false, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
} else {
if (numFilterColors % 16 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 4, 4, false, false, false>, cudaFuncCachePreferShared);
conv_img_acts_manycolor<4, 32, 4, 4, false, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 4, 2, false, false, false>, cudaFuncCachePreferShared);
conv_img_acts_manycolor<4, 32, 4, 2, false, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
} else if (numFilterColors > 3) {
if (checkCaseBounds) {
if (colorsPerThread == 4) {
cudaFuncSetCacheConfig(img_acts_mediumcolor<8, 4, false, true, false>, cudaFuncCachePreferShared);
img_acts_mediumcolor<8, 4, false, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(img_acts_mediumcolor<8, 2, false, true, false>, cudaFuncCachePreferShared);
img_acts_mediumcolor<8, 2, false, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
} else {
if (colorsPerThread == 4) {
cudaFuncSetCacheConfig(img_acts_mediumcolor<8, 4, false, false, false>, cudaFuncCachePreferShared);
img_acts_mediumcolor<8, 4, false, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(img_acts_mediumcolor<8, 2, false, false, false>, cudaFuncCachePreferShared);
img_acts_mediumcolor<8, 2, false, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
} else {
if (checkCaseBounds) {
if (numFilterColors == 1) {
cudaFuncSetCacheConfig(img_acts_color<8, 1, false, true, false>, cudaFuncCachePreferShared);
img_acts_color<8, 1, false, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, scaleTargets, scaleOutput);
} else if (numFilterColors == 2) {
cudaFuncSetCacheConfig(img_acts_color<8, 2, false, true, false>, cudaFuncCachePreferShared);
img_acts_color<8, 2, false, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, scaleTargets, scaleOutput);
} else if (numFilterColors == 3) {
cudaFuncSetCacheConfig(img_acts_color<8, 3, false, true, false>, cudaFuncCachePreferShared);
img_acts_color<8, 3, false, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
} else {
if (numFilterColors == 1) {
cudaFuncSetCacheConfig(img_acts_color<8, 1, false, false, false>, cudaFuncCachePreferShared);
img_acts_color<8, 1, false, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, scaleTargets, scaleOutput);
} else if (numFilterColors == 2) {
cudaFuncSetCacheConfig(img_acts_color<8, 2, false, false, false>, cudaFuncCachePreferShared);
img_acts_color<8, 2, false, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, scaleTargets, scaleOutput);
} else if (numFilterColors == 3) {
cudaFuncSetCacheConfig(img_acts_color<8, 3, false, false, false>, cudaFuncCachePreferShared);
img_acts_color<8, 3, false, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
}
}
} else { // do scale
if (numFilterColors % 8 == 0) {
if (checkCaseBounds) {
if (numFilterColors % 16 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 4, 4, true, true, false>, cudaFuncCachePreferShared);
conv_img_acts_manycolor<4, 32, 4, 4, true, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 4, 2, true, true, false>, cudaFuncCachePreferShared);
conv_img_acts_manycolor<4, 32, 4, 2, true, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
} else {
if (numFilterColors % 16 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 4, 4, true, false, false>, cudaFuncCachePreferShared);
conv_img_acts_manycolor<4, 32, 4, 4, true, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 4, 2, true, false, false>, cudaFuncCachePreferShared);
conv_img_acts_manycolor<4, 32, 4, 2, true, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
} else if (numFilterColors > 3) {
if (checkCaseBounds) {
if (colorsPerThread == 4) {
cudaFuncSetCacheConfig(img_acts_mediumcolor<8, 4, true, true, false>, cudaFuncCachePreferShared);
img_acts_mediumcolor<8, 4, true, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(img_acts_mediumcolor<8, 2, true, true, false>, cudaFuncCachePreferShared);
img_acts_mediumcolor<8, 2, true, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
} else {
if (colorsPerThread == 4) {
cudaFuncSetCacheConfig(img_acts_mediumcolor<8, 4, true, false, false>, cudaFuncCachePreferShared);
img_acts_mediumcolor<8, 4, true, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(img_acts_mediumcolor<8, 2, true, false, false>, cudaFuncCachePreferShared);
img_acts_mediumcolor<8, 2, true, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
} else {
if (checkCaseBounds) {
if (numFilterColors == 1) {
cudaFuncSetCacheConfig(img_acts_color<8, 1, true, true, false>, cudaFuncCachePreferShared);
img_acts_color<8, 1, true, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, scaleTargets, scaleOutput);
} else if (numFilterColors == 2) {
cudaFuncSetCacheConfig(img_acts_color<8, 2, true, true, false>, cudaFuncCachePreferShared);
img_acts_color<8, 2, true, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, scaleTargets, scaleOutput);
} else if (numFilterColors == 3) {
cudaFuncSetCacheConfig(img_acts_color<8, 3, true, true, false>, cudaFuncCachePreferShared);
img_acts_color<8, 3, true, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
} else {
if (numFilterColors == 1) {
cudaFuncSetCacheConfig(img_acts_color<8, 1, true, false, false>, cudaFuncCachePreferShared);
img_acts_color<8, 1, true, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, scaleTargets, scaleOutput);
} else if (numFilterColors == 2) {
cudaFuncSetCacheConfig(img_acts_color<8, 2, true, false, false>, cudaFuncCachePreferShared);
img_acts_color<8, 2, true, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, scaleTargets, scaleOutput);
} else if (numFilterColors == 3) {
cudaFuncSetCacheConfig(img_acts_color<8, 3, true, false, false>, cudaFuncCachePreferShared);
img_acts_color<8, 3, true, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
}
}
}
}
cutilCheckMsg("imgActs: kernel execution failed");
}
void convImgActs(NVMatrix& hidActs, NVMatrix& filters, NVMatrix& targets,
int imgSize, int paddingStart, int moduleStride, int numImgColors, int numGroups) {
_imgActs(hidActs, filters, targets, imgSize, paddingStart, moduleStride, numImgColors, numGroups, 0, 1, true);
}
void convImgActs(NVMatrix& hidActs, NVMatrix& filters, NVMatrix& targets,
int imgSize, int paddingStart, int moduleStride, int numImgColors, int numGroups,
float scaleTargets, float scaleOutput) {
_imgActs(hidActs, filters, targets, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput, true);
}
void localImgActs(NVMatrix& hidActs, NVMatrix& filters, NVMatrix& targets,
int imgSize, int paddingStart, int moduleStride, int numImgColors, int numGroups) {
_imgActs(hidActs, filters, targets, imgSize, paddingStart, moduleStride, numImgColors, numGroups, 0, 1, false);
}
void localImgActs(NVMatrix& hidActs, NVMatrix& filters, NVMatrix& targets,
int imgSize, int paddingStart, int moduleStride, int numImgColors, int numGroups,
float scaleTargets, float scaleOutput) {
_imgActs(hidActs, filters, targets, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput, false);
}
/*
* hidActs: (numFilters, numModules, numImages)
* filters: (numFilterColors, filterPixels, numFilters) if conv
* (numModules, numFilterColors, filterPixels, numFilters) otherwise
* targets: (overSample, numImgColors, imgPixels, numImages)
* colorIndices: (numGroups, numFilterColors)
*
* where overSample := (numFilterColors * numGroups) / numImgColors
*
*/
void _imgActsSparse(NVMatrix& hidActs, NVMatrix& filters, NVMatrix& targets, int* dColorIndices,
int imgSize, int paddingStart, int moduleStride, int numImgColors, int numFilterColors, int numGroups,
float scaleTargets, float scaleOutput, bool conv) {
int numImages = hidActs.getNumCols();
int numFilters = filters.getNumCols();
// int numFiltersPerGroup = numFilters / numGroups;
int numModules = hidActs.getNumRows() / numFilters;
int filterModuleMult = conv ? 1 : numModules;
int filterPixels = filters.getNumRows() / (filterModuleMult * numFilterColors);
int filterSize = sqrt(filterPixels);
int imgPixels = imgSize * imgSize;
int numModulesX = sqrt(numModules);
int overSample = (numFilterColors * numGroups) / numImgColors;
assert(numImgColors % numFilterColors == 0);
assert(numFilters % (16*numGroups) == 0);
assert((numFilterColors * numGroups) % numImgColors == 0);
assert(numGroups > 1);
assert(numFilterColors > 3 && numFilterColors % 2 == 0);
assert(filterPixels == filterSize * filterSize);
assert(hidActs.getNumRows() == numModules * numFilters);
assert(filters.getNumRows() == filterModuleMult * numFilterColors * filterPixels);
assert(numModules == numModulesX * numModulesX);
assert(hidActs.isContiguous());
assert(filters.isContiguous());
assert(!hidActs.isTrans());
assert(!filters.isTrans());
assert(!targets.isTrans());
// These routines don't handle the case when only part of the image is visited in the convolution
assert(paddingStart <= 0 && paddingStart + (numModules-1)*moduleStride + filterSize >= imgSize);
assert(moduleStride <= filterSize);
assert(targets.isContiguous()); // no stride support here!
dim3 blocks;
dim3 threads;
int colorsPerThread;
bool checkCaseBounds;
if (numFilterColors % 8 == 0) {
threads = dim3(32, 4);
colorsPerThread = numFilterColors % 16 == 0 ? 4 : 2;
int imgsPerThread = 4;
assert(numFilterColors % (threads.y * colorsPerThread) == 0);
checkCaseBounds = numImages % (threads.x * imgsPerThread) != 0;
blocks = dim3(DIVUP(numImages, threads.x*imgsPerThread) * (numImgColors/(threads.y*colorsPerThread)), overSample * imgPixels);
} else if (numFilterColors > 3) {
threads = dim3(16, 16);
colorsPerThread = numFilterColors % 4 == 0 ? 4 : 2;
blocks = dim3(DIVUP(numImages,16*8) * (numImgColors / colorsPerThread), overSample * DIVUP(imgSize,4) * DIVUP(imgSize,4));
checkCaseBounds = numImages % (16*8) != 0;
}
if (scaleTargets == 0) { // do not scale or use targets matrix
targets.resize(overSample*numImgColors*imgPixels, numImages);
} else {
assert(targets.getNumRows() == overSample * numImgColors * imgPixels);
assert(targets.getNumCols() == numImages);
}
if (conv) {
if (scaleTargets == 0) { // do not scale or use targets matrix
if (numFilterColors % 8 == 0) {
if (checkCaseBounds) {
if (numFilterColors % 16 == 0) {
cudaFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 4, 4, false, true, true>, cudaFuncCachePreferShared);
img_acts_manycolor_sparse_rand<4, 32, 4, 4, false, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 4, 2, false, true, true>, cudaFuncCachePreferShared);
img_acts_manycolor_sparse_rand<4, 32, 4, 2, false, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
}
} else {
if (numFilterColors % 16 == 0) {
cudaFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 4, 4, false, false, true>, cudaFuncCachePreferShared);
img_acts_manycolor_sparse_rand<4, 32, 4, 4, false, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 4, 2, false, false, true>, cudaFuncCachePreferShared);
img_acts_manycolor_sparse_rand<4, 32, 4, 2, false, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
}
}
} else if (numFilterColors > 3) {
if (checkCaseBounds) {
if (colorsPerThread == 4) {
cudaFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<8, 4, false, true, true>, cudaFuncCachePreferShared);
img_acts_mediumcolor_sparse_rand<8, 4, false, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<8, 2, false, true, true>, cudaFuncCachePreferShared);
img_acts_mediumcolor_sparse_rand<8, 2, false, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
}
} else {
if (colorsPerThread == 4) {
cudaFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<8, 4, false, false, true>, cudaFuncCachePreferShared);
img_acts_mediumcolor_sparse_rand<8, 4, false, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<8, 2, false, false, true>, cudaFuncCachePreferShared);
img_acts_mediumcolor_sparse_rand<8, 2, false, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
}
}
}
} else { // do scale
if (numFilterColors % 8 == 0) {
if (checkCaseBounds) {
if (numFilterColors % 16 == 0) {
cudaFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 4, 4, true, true, true>, cudaFuncCachePreferShared);
img_acts_manycolor_sparse_rand<4, 32, 4, 4, true, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 4, 2, true, true, true>, cudaFuncCachePreferShared);
img_acts_manycolor_sparse_rand<4, 32, 4, 2, true, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
}
} else {
if (numFilterColors % 16 == 0) {
cudaFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 4, 4, true, false, true>, cudaFuncCachePreferShared);
img_acts_manycolor_sparse_rand<4, 32, 4, 4, true, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 4, 2, true, false, true>, cudaFuncCachePreferShared);
img_acts_manycolor_sparse_rand<4, 32, 4, 2, true, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
}
}
} else if (numFilterColors > 3) {
if (checkCaseBounds) {
if (colorsPerThread == 4) {
cudaFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<8, 4, true, true, true>, cudaFuncCachePreferShared);
img_acts_mediumcolor_sparse_rand<8, 4, true, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<8, 2, true, true, true>, cudaFuncCachePreferShared);
img_acts_mediumcolor_sparse_rand<8, 2, true, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
}
} else {
if (colorsPerThread == 4) {
cudaFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<8, 4, true, false, true>, cudaFuncCachePreferShared);
img_acts_mediumcolor_sparse_rand<8, 4, true, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<8, 2, true, false, true>, cudaFuncCachePreferShared);
img_acts_mediumcolor_sparse_rand<8, 2, true, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
}
}
}
}
} else {
if (scaleTargets == 0) { // do not scale or use targets matrix
if (numFilterColors % 8 == 0) {
if (checkCaseBounds) {
if (numFilterColors % 16 == 0) {
cudaFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 4, 4, false, true, false>, cudaFuncCachePreferShared);
img_acts_manycolor_sparse_rand<4, 32, 4, 4, false, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 4, 2, false, true, false>, cudaFuncCachePreferShared);
img_acts_manycolor_sparse_rand<4, 32, 4, 2, false, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
}
} else {
if (numFilterColors % 16 == 0) {
cudaFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 4, 4, false, false, false>, cudaFuncCachePreferShared);
img_acts_manycolor_sparse_rand<4, 32, 4, 4, false, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 4, 2, false, false, false>, cudaFuncCachePreferShared);
img_acts_manycolor_sparse_rand<4, 32, 4, 2, false, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
}
}
} else if (numFilterColors > 3) {
if (checkCaseBounds) {
if (colorsPerThread == 4) {
cudaFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<8, 4, false, true, false>, cudaFuncCachePreferShared);
img_acts_mediumcolor_sparse_rand<8, 4, false, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<8, 2, false, true, false>, cudaFuncCachePreferShared);
img_acts_mediumcolor_sparse_rand<8, 2, false, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
}
} else {
if (colorsPerThread == 4) {
cudaFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<8, 4, false, false, false>, cudaFuncCachePreferShared);
img_acts_mediumcolor_sparse_rand<8, 4, false, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<8, 2, false, false, false>, cudaFuncCachePreferShared);
img_acts_mediumcolor_sparse_rand<8, 2, false, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
}
}
}
} else { // do scale
if (numFilterColors % 8 == 0) {
if (checkCaseBounds) {
if (numFilterColors % 16 == 0) {
cudaFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 4, 4, true, true, false>, cudaFuncCachePreferShared);
img_acts_manycolor_sparse_rand<4, 32, 4, 4, true, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 4, 2, true, true, false>, cudaFuncCachePreferShared);
img_acts_manycolor_sparse_rand<4, 32, 4, 2, true, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
}
} else {
if (numFilterColors % 16 == 0) {
cudaFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 4, 4, true, false, false>, cudaFuncCachePreferShared);
img_acts_manycolor_sparse_rand<4, 32, 4, 4, true, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 4, 2, true, false, false>, cudaFuncCachePreferShared);
img_acts_manycolor_sparse_rand<4, 32, 4, 2, true, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
}
}
} else if (numFilterColors > 3) {
if (checkCaseBounds) {
if (colorsPerThread == 4) {
cudaFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<8, 4, true, true, false>, cudaFuncCachePreferShared);
img_acts_mediumcolor_sparse_rand<8, 4, true, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<8, 2, true, true, false>, cudaFuncCachePreferShared);
img_acts_mediumcolor_sparse_rand<8, 2, true, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
}
} else {
if (colorsPerThread == 4) {
cudaFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<8, 4, true, false, false>, cudaFuncCachePreferShared);
img_acts_mediumcolor_sparse_rand<8, 4, true, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<8, 2, true, false, false>, cudaFuncCachePreferShared);
img_acts_mediumcolor_sparse_rand<8, 2, true, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
}
}
}
}
}
cutilCheckMsg("imgActsSparse: kernel execution failed");
}
void convImgActsSparse(NVMatrix& hidActs, NVMatrix& filters, NVMatrix& targets, int* dColorIndices,
int imgSize, int paddingStart, int moduleStride, int numImgColors, int numFilterColors, int numGroups) {
_imgActsSparse(hidActs, filters, targets, dColorIndices, imgSize, paddingStart,
moduleStride, numImgColors, numFilterColors, numGroups, 0, 1, true);
}
void convImgActsSparse(NVMatrix& hidActs, NVMatrix& filters, NVMatrix& targets, int* dColorIndices,
int imgSize, int paddingStart, int moduleStride, int numImgColors, int numFilterColors, int numGroups,
float scaleTargets, float scaleOutput) {
_imgActsSparse(hidActs, filters, targets, dColorIndices, imgSize, paddingStart, moduleStride,
numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput, true);
}
void localImgActsSparse(NVMatrix& hidActs, NVMatrix& filters, NVMatrix& targets, int* dColorIndices,
int imgSize, int paddingStart, int moduleStride, int numImgColors, int numFilterColors, int numGroups) {
_imgActsSparse(hidActs, filters, targets, dColorIndices, imgSize, paddingStart,
moduleStride, numImgColors, numFilterColors, numGroups, 0, 1, false);
}
void localImgActsSparse(NVMatrix& hidActs, NVMatrix& filters, NVMatrix& targets, int* dColorIndices,
int imgSize, int paddingStart, int moduleStride, int numImgColors, int numFilterColors, int numGroups,
float scaleTargets, float scaleOutput) {
_imgActsSparse(hidActs, filters, targets, dColorIndices, imgSize, paddingStart, moduleStride,
numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput, false);
}
|
333d8e6516f1dfda57ad28700fea9a5a4dc75d2c.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* Copyright (c) 2020 by Contributors
* \file relu_lib.cu
* \brief simple custom relu operator implemented using CUDA function
*/
#include <iostream>
#include "lib_api.h"
__global__ void relu_gpu_forward(float *out, float *in, int64_t N) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < N)
out[tid] = in[tid] > 0 ? in[tid] : 0;
}
__global__ void relu_gpu_backward(float *ingrad, float *outgrad, float *indata, int64_t N) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < N)
ingrad[tid] = indata[tid] > 0 ? 1 * outgrad[tid] : 0;
}
MXReturnValue forwardCPU(std::map<std::string, std::string> attrs,
std::vector<MXTensor> inputs,
std::vector<MXTensor> outputs,
OpResource res) {
float* in_data = inputs[0].data<float>();
float* out_data = outputs[0].data<float>();
for (int i=0; i<inputs[0].size(); i++) {
out_data[i] = in_data[i] > 0 ? in_data[i] : 0;
}
return MX_SUCCESS;
}
MXReturnValue backwardCPU(std::map<std::string, std::string> attrs,
std::vector<MXTensor> inputs,
std::vector<MXTensor> outputs,
OpResource res) {
float* out_grad = inputs[0].data<float>();
float* in_data = inputs[1].data<float>();
float* in_grad = outputs[0].data<float>();
for (int i=0; i<inputs[1].size(); i++) {
in_grad[i] = in_data[i] > 0 ? 1 * out_grad[i] : 0;
}
return MX_SUCCESS;
}
MXReturnValue forwardGPU(std::map<std::string, std::string> attrs,
std::vector<MXTensor> inputs,
std::vector<MXTensor> outputs,
OpResource res) {
float* in_data = inputs[0].data<float>();
float* out_data = outputs[0].data<float>();
mx_stream_t cuda_stream = res.get_cuda_stream();
int64_t N = inputs[0].size();
int block = 256;
int grid = (N + (block - 1)) / block;
hipLaunchKernelGGL(( relu_gpu_forward), dim3(grid),dim3(block),0,cuda_stream, out_data, in_data, N);
return MX_SUCCESS;
}
MXReturnValue backwardGPU(std::map<std::string, std::string> attrs,
std::vector<MXTensor> inputs,
std::vector<MXTensor> outputs,
OpResource res) {
float* out_grad = inputs[0].data<float>();
float* in_data = inputs[1].data<float>();
float* in_grad = outputs[0].data<float>();
mx_stream_t cuda_stream = res.get_cuda_stream();
int64_t N = inputs[0].size();
int block = 256;
int grid = (N + (block - 1)) / block;
hipLaunchKernelGGL(( relu_gpu_backward), dim3(grid),dim3(block),0,cuda_stream, in_grad, out_grad, in_data, N);
return MX_SUCCESS;
}
MXReturnValue parseAttrs(std::map<std::string, std::string> attrs, int* num_in, int* num_out) {
*num_in = 1;
*num_out = 1;
return MX_SUCCESS;
}
MXReturnValue inferType(std::map<std::string, std::string> attrs,
std::vector<int> &intypes,
std::vector<int> &outtypes) {
outtypes[0] = intypes[0];
return MX_SUCCESS;
}
MXReturnValue inferShape(std::map<std::string, std::string> attrs,
std::vector<std::vector<unsigned int>> &inshapes,
std::vector<std::vector<unsigned int>> &outshapes) {
outshapes[0] = inshapes[0];
return MX_SUCCESS;
}
REGISTER_OP(my_relu)
.setParseAttrs(parseAttrs)
.setInferType(inferType)
.setInferShape(inferShape)
.setForward(forwardCPU, "cpu")
.setForward(forwardGPU, "gpu")
.setBackward(backwardCPU, "cpu")
.setBackward(backwardGPU, "gpu");
class MyStatefulReluCPU : public CustomStatefulOp {
public:
explicit MyStatefulReluCPU() {}
MXReturnValue Forward(std::vector<MXTensor> inputs,
std::vector<MXTensor> outputs,
OpResource op_res) {
std::map<std::string, std::string> attrs;
return forwardCPU(attrs, inputs, outputs, op_res);
}
MXReturnValue Backward(std::vector<MXTensor> inputs,
std::vector<MXTensor> outputs,
OpResource op_res) {
std::map<std::string, std::string> attrs;
return backwardCPU(attrs, inputs, outputs, op_res);
}
~MyStatefulReluCPU() {}
};
class MyStatefulReluGPU : public CustomStatefulOp {
public:
explicit MyStatefulReluGPU() {}
MXReturnValue Forward(std::vector<MXTensor> inputs,
std::vector<MXTensor> outputs,
OpResource op_res) {
std::map<std::string, std::string> attrs;
return forwardGPU(attrs, inputs, outputs, op_res);
}
MXReturnValue Backward(std::vector<MXTensor> inputs,
std::vector<MXTensor> outputs,
OpResource op_res) {
std::map<std::string, std::string> attrs;
return backwardGPU(attrs, inputs, outputs, op_res);
}
~MyStatefulReluGPU() {}
};
MXReturnValue createOpStateCPU(std::map<std::string, std::string> attrs,
CustomStatefulOp** op_inst) {
*op_inst = new MyStatefulReluCPU();
return MX_SUCCESS;
}
MXReturnValue createOpStateGPU(std::map<std::string, std::string> attrs,
CustomStatefulOp** op_inst) {
*op_inst = new MyStatefulReluGPU();
return MX_SUCCESS;
}
REGISTER_OP(my_state_relu)
.setParseAttrs(parseAttrs)
.setInferType(inferType)
.setInferShape(inferShape)
.setCreateOpState(createOpStateCPU, "cpu")
.setCreateOpState(createOpStateGPU, "gpu");
MXReturnValue initialize(int version) {
if (version >= 10400) {
std::cout << "MXNet version " << version << " supported" << std::endl;
return MX_SUCCESS;
} else {
std::cout << "MXNet version " << version << " not supported" << std::endl;
return MX_FAIL;
}
}
|
333d8e6516f1dfda57ad28700fea9a5a4dc75d2c.cu
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* Copyright (c) 2020 by Contributors
* \file relu_lib.cu
* \brief simple custom relu operator implemented using CUDA function
*/
#include <iostream>
#include "lib_api.h"
__global__ void relu_gpu_forward(float *out, float *in, int64_t N) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < N)
out[tid] = in[tid] > 0 ? in[tid] : 0;
}
__global__ void relu_gpu_backward(float *ingrad, float *outgrad, float *indata, int64_t N) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < N)
ingrad[tid] = indata[tid] > 0 ? 1 * outgrad[tid] : 0;
}
MXReturnValue forwardCPU(std::map<std::string, std::string> attrs,
std::vector<MXTensor> inputs,
std::vector<MXTensor> outputs,
OpResource res) {
float* in_data = inputs[0].data<float>();
float* out_data = outputs[0].data<float>();
for (int i=0; i<inputs[0].size(); i++) {
out_data[i] = in_data[i] > 0 ? in_data[i] : 0;
}
return MX_SUCCESS;
}
MXReturnValue backwardCPU(std::map<std::string, std::string> attrs,
std::vector<MXTensor> inputs,
std::vector<MXTensor> outputs,
OpResource res) {
float* out_grad = inputs[0].data<float>();
float* in_data = inputs[1].data<float>();
float* in_grad = outputs[0].data<float>();
for (int i=0; i<inputs[1].size(); i++) {
in_grad[i] = in_data[i] > 0 ? 1 * out_grad[i] : 0;
}
return MX_SUCCESS;
}
MXReturnValue forwardGPU(std::map<std::string, std::string> attrs,
std::vector<MXTensor> inputs,
std::vector<MXTensor> outputs,
OpResource res) {
float* in_data = inputs[0].data<float>();
float* out_data = outputs[0].data<float>();
mx_stream_t cuda_stream = res.get_cuda_stream();
int64_t N = inputs[0].size();
int block = 256;
int grid = (N + (block - 1)) / block;
relu_gpu_forward<<<grid,block,0,cuda_stream>>>(out_data, in_data, N);
return MX_SUCCESS;
}
MXReturnValue backwardGPU(std::map<std::string, std::string> attrs,
std::vector<MXTensor> inputs,
std::vector<MXTensor> outputs,
OpResource res) {
float* out_grad = inputs[0].data<float>();
float* in_data = inputs[1].data<float>();
float* in_grad = outputs[0].data<float>();
mx_stream_t cuda_stream = res.get_cuda_stream();
int64_t N = inputs[0].size();
int block = 256;
int grid = (N + (block - 1)) / block;
relu_gpu_backward<<<grid,block,0,cuda_stream>>>(in_grad, out_grad, in_data, N);
return MX_SUCCESS;
}
MXReturnValue parseAttrs(std::map<std::string, std::string> attrs, int* num_in, int* num_out) {
*num_in = 1;
*num_out = 1;
return MX_SUCCESS;
}
MXReturnValue inferType(std::map<std::string, std::string> attrs,
std::vector<int> &intypes,
std::vector<int> &outtypes) {
outtypes[0] = intypes[0];
return MX_SUCCESS;
}
MXReturnValue inferShape(std::map<std::string, std::string> attrs,
std::vector<std::vector<unsigned int>> &inshapes,
std::vector<std::vector<unsigned int>> &outshapes) {
outshapes[0] = inshapes[0];
return MX_SUCCESS;
}
REGISTER_OP(my_relu)
.setParseAttrs(parseAttrs)
.setInferType(inferType)
.setInferShape(inferShape)
.setForward(forwardCPU, "cpu")
.setForward(forwardGPU, "gpu")
.setBackward(backwardCPU, "cpu")
.setBackward(backwardGPU, "gpu");
class MyStatefulReluCPU : public CustomStatefulOp {
public:
explicit MyStatefulReluCPU() {}
MXReturnValue Forward(std::vector<MXTensor> inputs,
std::vector<MXTensor> outputs,
OpResource op_res) {
std::map<std::string, std::string> attrs;
return forwardCPU(attrs, inputs, outputs, op_res);
}
MXReturnValue Backward(std::vector<MXTensor> inputs,
std::vector<MXTensor> outputs,
OpResource op_res) {
std::map<std::string, std::string> attrs;
return backwardCPU(attrs, inputs, outputs, op_res);
}
~MyStatefulReluCPU() {}
};
class MyStatefulReluGPU : public CustomStatefulOp {
public:
explicit MyStatefulReluGPU() {}
MXReturnValue Forward(std::vector<MXTensor> inputs,
std::vector<MXTensor> outputs,
OpResource op_res) {
std::map<std::string, std::string> attrs;
return forwardGPU(attrs, inputs, outputs, op_res);
}
MXReturnValue Backward(std::vector<MXTensor> inputs,
std::vector<MXTensor> outputs,
OpResource op_res) {
std::map<std::string, std::string> attrs;
return backwardGPU(attrs, inputs, outputs, op_res);
}
~MyStatefulReluGPU() {}
};
MXReturnValue createOpStateCPU(std::map<std::string, std::string> attrs,
CustomStatefulOp** op_inst) {
*op_inst = new MyStatefulReluCPU();
return MX_SUCCESS;
}
MXReturnValue createOpStateGPU(std::map<std::string, std::string> attrs,
CustomStatefulOp** op_inst) {
*op_inst = new MyStatefulReluGPU();
return MX_SUCCESS;
}
REGISTER_OP(my_state_relu)
.setParseAttrs(parseAttrs)
.setInferType(inferType)
.setInferShape(inferShape)
.setCreateOpState(createOpStateCPU, "cpu")
.setCreateOpState(createOpStateGPU, "gpu");
MXReturnValue initialize(int version) {
if (version >= 10400) {
std::cout << "MXNet version " << version << " supported" << std::endl;
return MX_SUCCESS;
} else {
std::cout << "MXNet version " << version << " not supported" << std::endl;
return MX_FAIL;
}
}
|
774acf0a14673caa4367face364cfd8a71468abd.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by ops.py
//
__constant__ int xdim0_initialise_chunk_kernel_zero_y;
int xdim0_initialise_chunk_kernel_zero_y_h = -1;
int ydim0_initialise_chunk_kernel_zero_y_h = -1;
#undef OPS_ACC0
#define OPS_ACC0(x, y) (x + xdim0_initialise_chunk_kernel_zero_y * (y))
// user function
__device__
void
initialise_chunk_kernel_zero_y_gpu(double *var) {
*var = 0.0;
}
#undef OPS_ACC0
__global__ void ops_initialise_chunk_kernel_zero_y(double *__restrict arg0,
int size0, int size1) {
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 0 * 1 + idx_y * 1 * 1 * xdim0_initialise_chunk_kernel_zero_y;
if (idx_x < size0 && idx_y < size1) {
initialise_chunk_kernel_zero_y_gpu(arg0);
}
}
// host stub function
void ops_par_loop_initialise_chunk_kernel_zero_y(char const *name,
ops_block block, int dim,
int *range, ops_arg arg0) {
// Timing
double t1, t2, c1, c2;
ops_arg args[1] = {arg0};
#ifdef CHECKPOINTING
if (!ops_checkpointing_before(args, 1, range, 7))
return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(7, "initialise_chunk_kernel_zero_y");
OPS_kernels[7].count++;
ops_timers_core(&c1, &t1);
}
// compute locally allocated range for the sub-block
int start[2];
int end[2];
#ifdef OPS_MPI
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned)
return;
for (int n = 0; n < 2; n++) {
start[n] = sb->decomp_disp[n];
end[n] = sb->decomp_disp[n] + sb->decomp_size[n];
if (start[n] >= range[2 * n]) {
start[n] = 0;
} else {
start[n] = range[2 * n] - start[n];
}
if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0)
start[n] = range[2 * n];
if (end[n] >= range[2 * n + 1]) {
end[n] = range[2 * n + 1] - sb->decomp_disp[n];
} else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n] == MPI_PROC_NULL &&
(range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n]))
end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]);
}
#else
for (int n = 0; n < 2; n++) {
start[n] = range[2 * n];
end[n] = range[2 * n + 1];
}
#endif
int x_size = MAX(0, end[0] - start[0]);
int y_size = MAX(0, end[1] - start[1]);
int xdim0 = args[0].dat->size[0];
if (xdim0 != xdim0_initialise_chunk_kernel_zero_y_h) {
hipMemcpyToSymbol(xdim0_initialise_chunk_kernel_zero_y, &xdim0,
sizeof(int));
xdim0_initialise_chunk_kernel_zero_y_h = xdim0;
}
dim3 grid((x_size - 1) / OPS_block_size_x + 1,
(y_size - 1) / OPS_block_size_y + 1, 1);
dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1);
int dat0 = args[0].dat->elem_size;
char *p_a[1];
// set up initial pointers
int d_m[OPS_MAX_DIM];
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[0].dat->d_m[d];
#endif
int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] -
args[0].dat->base[0] - d_m[0]);
base0 = base0 +
dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1] -
args[0].dat->base[1] - d_m[1]);
p_a[0] = (char *)args[0].data_d + base0;
ops_H_D_exchanges_device(args, 1);
ops_halo_exchanges(args, 1, range);
if (OPS_diags > 1) {
ops_timers_core(&c2, &t2);
OPS_kernels[7].mpi_time += t2 - t1;
}
// call kernel wrapper function, passing in pointers to data
hipLaunchKernelGGL(( ops_initialise_chunk_kernel_zero_y), dim3(grid), dim3(tblock), 0, 0, (double *)p_a[0], x_size,
y_size);
if (OPS_diags > 1) {
cutilSafeCall(hipDeviceSynchronize());
ops_timers_core(&c1, &t1);
OPS_kernels[7].time += t1 - t2;
}
ops_set_dirtybit_device(args, 1);
ops_set_halo_dirtybit3(&args[0], range);
if (OPS_diags > 1) {
// Update kernel record
ops_timers_core(&c2, &t2);
OPS_kernels[7].mpi_time += t2 - t1;
OPS_kernels[7].transfer += ops_compute_transfer(dim, start, end, &arg0);
}
}
|
774acf0a14673caa4367face364cfd8a71468abd.cu
|
//
// auto-generated by ops.py
//
__constant__ int xdim0_initialise_chunk_kernel_zero_y;
int xdim0_initialise_chunk_kernel_zero_y_h = -1;
int ydim0_initialise_chunk_kernel_zero_y_h = -1;
#undef OPS_ACC0
#define OPS_ACC0(x, y) (x + xdim0_initialise_chunk_kernel_zero_y * (y))
// user function
__device__
void
initialise_chunk_kernel_zero_y_gpu(double *var) {
*var = 0.0;
}
#undef OPS_ACC0
__global__ void ops_initialise_chunk_kernel_zero_y(double *__restrict arg0,
int size0, int size1) {
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 0 * 1 + idx_y * 1 * 1 * xdim0_initialise_chunk_kernel_zero_y;
if (idx_x < size0 && idx_y < size1) {
initialise_chunk_kernel_zero_y_gpu(arg0);
}
}
// host stub function
void ops_par_loop_initialise_chunk_kernel_zero_y(char const *name,
ops_block block, int dim,
int *range, ops_arg arg0) {
// Timing
double t1, t2, c1, c2;
ops_arg args[1] = {arg0};
#ifdef CHECKPOINTING
if (!ops_checkpointing_before(args, 1, range, 7))
return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(7, "initialise_chunk_kernel_zero_y");
OPS_kernels[7].count++;
ops_timers_core(&c1, &t1);
}
// compute locally allocated range for the sub-block
int start[2];
int end[2];
#ifdef OPS_MPI
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned)
return;
for (int n = 0; n < 2; n++) {
start[n] = sb->decomp_disp[n];
end[n] = sb->decomp_disp[n] + sb->decomp_size[n];
if (start[n] >= range[2 * n]) {
start[n] = 0;
} else {
start[n] = range[2 * n] - start[n];
}
if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0)
start[n] = range[2 * n];
if (end[n] >= range[2 * n + 1]) {
end[n] = range[2 * n + 1] - sb->decomp_disp[n];
} else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n] == MPI_PROC_NULL &&
(range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n]))
end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]);
}
#else
for (int n = 0; n < 2; n++) {
start[n] = range[2 * n];
end[n] = range[2 * n + 1];
}
#endif
int x_size = MAX(0, end[0] - start[0]);
int y_size = MAX(0, end[1] - start[1]);
int xdim0 = args[0].dat->size[0];
if (xdim0 != xdim0_initialise_chunk_kernel_zero_y_h) {
cudaMemcpyToSymbol(xdim0_initialise_chunk_kernel_zero_y, &xdim0,
sizeof(int));
xdim0_initialise_chunk_kernel_zero_y_h = xdim0;
}
dim3 grid((x_size - 1) / OPS_block_size_x + 1,
(y_size - 1) / OPS_block_size_y + 1, 1);
dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1);
int dat0 = args[0].dat->elem_size;
char *p_a[1];
// set up initial pointers
int d_m[OPS_MAX_DIM];
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[0].dat->d_m[d];
#endif
int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] -
args[0].dat->base[0] - d_m[0]);
base0 = base0 +
dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1] -
args[0].dat->base[1] - d_m[1]);
p_a[0] = (char *)args[0].data_d + base0;
ops_H_D_exchanges_device(args, 1);
ops_halo_exchanges(args, 1, range);
if (OPS_diags > 1) {
ops_timers_core(&c2, &t2);
OPS_kernels[7].mpi_time += t2 - t1;
}
// call kernel wrapper function, passing in pointers to data
ops_initialise_chunk_kernel_zero_y<<<grid, tblock>>>((double *)p_a[0], x_size,
y_size);
if (OPS_diags > 1) {
cutilSafeCall(cudaDeviceSynchronize());
ops_timers_core(&c1, &t1);
OPS_kernels[7].time += t1 - t2;
}
ops_set_dirtybit_device(args, 1);
ops_set_halo_dirtybit3(&args[0], range);
if (OPS_diags > 1) {
// Update kernel record
ops_timers_core(&c2, &t2);
OPS_kernels[7].mpi_time += t2 - t1;
OPS_kernels[7].transfer += ops_compute_transfer(dim, start, end, &arg0);
}
}
|
0baf026a5b2e3b5d394bac67eefc6a44184abc22.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <cstdlib>
#include <cstring>
#include <fstream>
#include <stdlib.h>
#include <locale>
#include <string>
#include <limits>
#include <time.h>
#include <stdio.h>
#include <iomanip>
#include <sys/time.h>
using namespace std;
//------------ Kernel de Processamento
__global__ void Classif(int* d_dados, int* d_class, long dsize, int colsIn, int colsOut)
{
int i=(threadIdx.x * colsIn) + (blockIdx.x * blockDim.x * colsIn);
int o=(threadIdx.x * colsOut) + (blockIdx.x * blockDim.x * colsOut);
int VlOpen,VlHigh,VlLow,VlClose,classe;
if (i<=dsize) {
VlOpen = d_dados[i+1];
VlHigh = d_dados[i+2];
VlLow = d_dados[i+3];
VlClose = d_dados[i+4];
classe=(VlOpen==VlClose ? 512: VlOpen>VlClose ? 256:1024)+(VlLow<VlOpen ? 1:4)+(VlLow<VlClose ? 2:8)+(VlHigh>VlOpen ? 16:64)+(VlHigh>VlClose ? 32:128);
d_class[o]=d_dados[i];
d_class[o+1]=classe;
}
}
//--------------------- Funcoes de tempo --------------------------------
std::string DataHora()
{
time_t rawtime;
struct tm * timeinfo;
char buffer [20];
time ( &rawtime );
timeinfo = localtime ( &rawtime );
strftime (buffer,20,"%F %H-%M-%S",timeinfo);
return buffer;
}
/* funcao de tempo */
double calcula_tempo(const unsigned long int ini, const unsigned long int fim)
{
double r;
if(fim >= ini)
r = ((double)(fim - ini)) / CLOCKS_PER_SEC;
else
r = ((double)( (fim + (unsigned long int)-1) - ini)) / CLOCKS_PER_SEC;
return r;
}
//------- Classif_paralela:: / std::string ---------------------------
void Classif_GPU(const char * nome, long plins, int nthd, const char * sthd){
char arq[256];
//char arqo[256];
//std::ifstream fin;
int colsIn=5, colsOut=2;
long lins,i, c, dsize, csize;
//int classe,VlOpen,VlHigh,VlLow,VlClose;
int v_blocos,v_threads;
std::string sIndice,sVlOpen,sVlHigh,sVlLow,sVlClose;
unsigned long int t_ini;
unsigned long int t_fin;
unsigned long int t_tmp;
std::string dateStr,fn,fnl,s_threads;
/*--- define variaveis de tempo -------------*/
timeval start, end;
double delta;
dateStr=DataHora();
std::cout<<" <DataHora > = "<<dateStr<<std::endl;
/* tempo inicial */
t_ini = (unsigned long int) clock;
gettimeofday(&start, NULL); //marcador de incio do processamento
/* -- define as dimenses dos vetores que sero criados em logar de matrizes */
/* -- dsize define o tamanho do vetor de dados em funo do numero de linhas e colunas*/
dsize=plins*colsIn;
/* -- csize define o tamanho do vetor de classificacao em funo do numero de linhas e colunas*/
csize=plins*colsOut;
/* -- Cria os vetores que contero os dados lidos do arquivo e a classificao */
int *h_dados;
int *h_class;
//std::cout<<"dsize= "<< dsize <<" csize= "<< csize<<std::endl;
size_t d_nbytes=dsize * sizeof(int);
size_t c_nbytes=csize * sizeof(int);
hipMallocManaged ((void**)&h_dados, d_nbytes);
hipMallocManaged ((void**)&h_class, c_nbytes);
//h_dados[0]=0;
//h_dados[1]=1;
//std::cout<<"h_dados[0]= "<< h_dados[0] <<" h_dados[1]= "<< h_dados[1]<<std::endl;
lins=plins-0;
std::cout<<" <inicializou lns> = "<<lins<<std::endl;
/* ----- Abre o arquivo csv e inicia a carga dos vetores ------------------- */
strcpy(arq,nome);
ifstream fin(arq);
if (fin.is_open())
{
t_tmp=(unsigned long int) clock();
/*--- carrega o arquivo no vetor host h_dados e inicializa h_class, transformando valores float em int*/
i=0;
c=0;
while (fin.good())
{
getline(fin,sIndice,',');
getline(fin,sVlOpen,',');
getline(fin,sVlHigh,',');
getline(fin,sVlLow,',');
getline(fin,sVlClose,'\n');
//std::cout<<"sIndice= "<< sIndice <<"sVlOpen= "<< sVlOpen<<"sVlHigh= "<< sVlHigh<<"sVlLow= "<< sVlLow<<"sVlClose= "<< sVlClose<<std::endl;
//h_dados[i]=std::stoi(sIndice);
h_dados[i]=std::atoi(sIndice.c_str());
//h_dados[i+1]=static_cast<int>(std::stof(sVlOpen,NULL)*100);
h_dados[i+1]=static_cast<int>(std::atof(sVlOpen.c_str())*100);
h_dados[i+2]=static_cast<int>(std::atof(sVlHigh.c_str())*100);
h_dados[i+3]=static_cast<int>(std::atof(sVlLow.c_str())*100);
h_dados[i+4]=static_cast<int>(std::atof(sVlClose.c_str())*100);
h_class[c]=0;
h_class[c+1]=0;
i+=colsIn;
c+=colsOut;
}
//std::cout<<" <Carregou h_dados com "<< i <<" posies e h_class com "<< c << " posicoes"<<std::endl;
/*--- Calcula o nmero de blocos e threads em funo do nmero de registros
i = nmero de posies geradas para o vetor vezes o nmero de colunas de entrada (colsIn)
Fixei as threads em 256
Para processar todas as linhas do arquivo de entrada, plins, uso i/colsIN que tem o mesmo valor de plins
assim, para 17.000.000 de registros a classificar tremos:
v_blocos=ceil((85.000.000/5)/256)=66406,26 ==> 66407 blocos
---*/
v_threads=nthd;
s_threads=std::string(sthd);
//s_threads = "64";
//v_blocos=ceil((i/colsIn)/v_threads);
v_blocos=(int)ceil((float)lins/v_threads);
//std::cout<<" <Calculou v_blocos com "<< v_blocos <<" threads com "<< v_threads <<std::endl;
/*--- invoca o kernel de classificao ---*/
hipLaunchKernelGGL(( Classif), dim3(v_blocos),dim3(v_threads), 0, 0, h_dados, h_class, dsize, colsIn, colsOut);
/*--- copia de volta o vetor de classicao --*/
hipDeviceSynchronize();
//std::cout<<" <Sincronizou -------------------"<<std::endl;
fnl="log/Classif_KernelT"+ s_threads +dateStr+".log.txt";
//arqo=fnl.c_str();
std::ofstream mylog (fnl.c_str());
//std::ofstream mylog (arqo);
mylog<<"Processado em "<< dateStr <<std::endl;
mylog<<"Processado em "<< v_blocos <<" blocos com "<< v_threads <<" threads"<<std::endl;
mylog<<"Tempo total de classificaao (ler CSV e classificar via kernel)= "<< calcula_tempo(t_tmp, (unsigned long int) clock()) <<std::endl;
/*---- fecha o arquivo de entrada de registros a classificar*/
fin.close();
//mylog<<"Tempo decorrido at o final da classificaao= "<< calcula_tempo(t_ini, (unsigned long int) clock()) <<std::endl;
/*--- cria o nome do arquivo csv de sada com as classificaes ----*/
//fn="/home/UFF/GPU/Trabalho/Dados/Classif_Kernel"+dateStr+".csv";
fn="csv/Classif_KernelT"+ s_threads +dateStr+".csv";
//std::cout<<std::endl<<fn <<std::endl;
t_tmp=(unsigned long int) clock();
/*--- abre o csv de sada ---*/
std::ofstream myfile (fn.c_str());
myfile<<"Indice,IdClasse"<<std::endl;
/*--- exporta o contedo do vetor h_class ---*/
for (i=0; i<csize; i+=colsOut)
{
myfile<<h_class[i]<<','<<h_class[i+1]<<"\n";
}
myfile.close();
mylog<<"Tempo para exportar classificaao para CSV= "<< calcula_tempo(t_tmp, (unsigned long int) clock()) <<std::endl;
// desaloca a matriz << no Thtrust a desalocao dos vetores transparente ---------------
//mylog<<"Tempo para free matriz = "<< calcula_tempo(t_tmp, (unsigned long int) clock()) <<std::endl;
/* tempo final */
t_fin = (unsigned long int) clock();
mylog<<"Total de registros classificados= "<< lins <<std::endl;
mylog<<"Tempo total de processamento= "<< setprecision(6) << calcula_tempo(t_ini, t_fin) <<std::endl;
gettimeofday(&end, NULL);
delta = ((end.tv_sec - start.tv_sec) * 1000000u + end.tv_usec - start.tv_usec) / 1.e6;
mylog<<"Tempo total de processamento 2 = "<< delta <<std::endl;
mylog.close();
std::cout<<std::endl<<"Tempo total de processamento= "<< calcula_tempo(t_ini, t_fin) <<std::endl;
std::cout<<"Tempo total de processamento 2 = "<< delta <<std::endl;
}
else
{
std::cout<<std::endl<<"Erro na abertura do arquivo "<< nome <<std::endl;
}
}
//---------------------------------------------------------------------------
int main(int argc, char * argv[])
{
long nlin=0;
int nthd=0;
if (argc < 4){
std::cout<<"Digite o nome do arquivo de entrada e a quantidade de registros e quantas threads"<<std::endl;
abort();
}
// File
std::cout<<" <Arquivo de entrada> = "<<argv[1]<<std::endl;
//nlin=std::stol(argv[2]);
nlin=std::atol(argv[2]);
nthd=std::atoi(argv[3]);
/* processa a classificaao */
std::cout<<" <Qtd Registros> = "<<nlin<<std::endl;
Classif_GPU(argv[1],nlin,nthd,argv[3]);
}
|
0baf026a5b2e3b5d394bac67eefc6a44184abc22.cu
|
#include <iostream>
#include <cstdlib>
#include <cstring>
#include <fstream>
#include <stdlib.h>
#include <locale>
#include <string>
#include <limits>
#include <time.h>
#include <stdio.h>
#include <iomanip>
#include <sys/time.h>
using namespace std;
//------------ Kernel de Processamento
__global__ void Classif(int* d_dados, int* d_class, long dsize, int colsIn, int colsOut)
{
int i=(threadIdx.x * colsIn) + (blockIdx.x * blockDim.x * colsIn);
int o=(threadIdx.x * colsOut) + (blockIdx.x * blockDim.x * colsOut);
int VlOpen,VlHigh,VlLow,VlClose,classe;
if (i<=dsize) {
VlOpen = d_dados[i+1];
VlHigh = d_dados[i+2];
VlLow = d_dados[i+3];
VlClose = d_dados[i+4];
classe=(VlOpen==VlClose ? 512: VlOpen>VlClose ? 256:1024)+(VlLow<VlOpen ? 1:4)+(VlLow<VlClose ? 2:8)+(VlHigh>VlOpen ? 16:64)+(VlHigh>VlClose ? 32:128);
d_class[o]=d_dados[i];
d_class[o+1]=classe;
}
}
//--------------------- Funcoes de tempo --------------------------------
std::string DataHora()
{
time_t rawtime;
struct tm * timeinfo;
char buffer [20];
time ( &rawtime );
timeinfo = localtime ( &rawtime );
strftime (buffer,20,"%F %H-%M-%S",timeinfo);
return buffer;
}
/* funcao de tempo */
double calcula_tempo(const unsigned long int ini, const unsigned long int fim)
{
double r;
if(fim >= ini)
r = ((double)(fim - ini)) / CLOCKS_PER_SEC;
else
r = ((double)( (fim + (unsigned long int)-1) - ini)) / CLOCKS_PER_SEC;
return r;
}
//------- Classif_paralela:: / std::string ---------------------------
void Classif_GPU(const char * nome, long plins, int nthd, const char * sthd){
char arq[256];
//char arqo[256];
//std::ifstream fin;
int colsIn=5, colsOut=2;
long lins,i, c, dsize, csize;
//int classe,VlOpen,VlHigh,VlLow,VlClose;
int v_blocos,v_threads;
std::string sIndice,sVlOpen,sVlHigh,sVlLow,sVlClose;
unsigned long int t_ini;
unsigned long int t_fin;
unsigned long int t_tmp;
std::string dateStr,fn,fnl,s_threads;
/*--- define variaveis de tempo -------------*/
timeval start, end;
double delta;
dateStr=DataHora();
std::cout<<" <DataHora > = "<<dateStr<<std::endl;
/* tempo inicial */
t_ini = (unsigned long int) clock;
gettimeofday(&start, NULL); //marcador de início do processamento
/* -- define as dimensões dos vetores que serão criados em logar de matrizes */
/* -- dsize define o tamanho do vetor de dados em função do numero de linhas e colunas*/
dsize=plins*colsIn;
/* -- csize define o tamanho do vetor de classificacao em função do numero de linhas e colunas*/
csize=plins*colsOut;
/* -- Cria os vetores que conterão os dados lidos do arquivo e a classificação */
int *h_dados;
int *h_class;
//std::cout<<"dsize= "<< dsize <<" csize= "<< csize<<std::endl;
size_t d_nbytes=dsize * sizeof(int);
size_t c_nbytes=csize * sizeof(int);
cudaMallocManaged ((void**)&h_dados, d_nbytes);
cudaMallocManaged ((void**)&h_class, c_nbytes);
//h_dados[0]=0;
//h_dados[1]=1;
//std::cout<<"h_dados[0]= "<< h_dados[0] <<" h_dados[1]= "<< h_dados[1]<<std::endl;
lins=plins-0;
std::cout<<" <inicializou lns> = "<<lins<<std::endl;
/* ----- Abre o arquivo csv e inicia a carga dos vetores ------------------- */
strcpy(arq,nome);
ifstream fin(arq);
if (fin.is_open())
{
t_tmp=(unsigned long int) clock();
/*--- carrega o arquivo no vetor host h_dados e inicializa h_class, transformando valores float em int*/
i=0;
c=0;
while (fin.good())
{
getline(fin,sIndice,',');
getline(fin,sVlOpen,',');
getline(fin,sVlHigh,',');
getline(fin,sVlLow,',');
getline(fin,sVlClose,'\n');
//std::cout<<"sIndice= "<< sIndice <<"sVlOpen= "<< sVlOpen<<"sVlHigh= "<< sVlHigh<<"sVlLow= "<< sVlLow<<"sVlClose= "<< sVlClose<<std::endl;
//h_dados[i]=std::stoi(sIndice);
h_dados[i]=std::atoi(sIndice.c_str());
//h_dados[i+1]=static_cast<int>(std::stof(sVlOpen,NULL)*100);
h_dados[i+1]=static_cast<int>(std::atof(sVlOpen.c_str())*100);
h_dados[i+2]=static_cast<int>(std::atof(sVlHigh.c_str())*100);
h_dados[i+3]=static_cast<int>(std::atof(sVlLow.c_str())*100);
h_dados[i+4]=static_cast<int>(std::atof(sVlClose.c_str())*100);
h_class[c]=0;
h_class[c+1]=0;
i+=colsIn;
c+=colsOut;
}
//std::cout<<" <Carregou h_dados com "<< i <<" posições e h_class com "<< c << " posicoes"<<std::endl;
/*--- Calcula o número de blocos e threads em função do número de registros
i = número de posições geradas para o vetor vezes o número de colunas de entrada (colsIn)
Fixei as threads em 256
Para processar todas as linhas do arquivo de entrada, plins, uso i/colsIN que tem o mesmo valor de plins
assim, para 17.000.000 de registros a classificar tremos:
v_blocos=ceil((85.000.000/5)/256)=66406,26 ==> 66407 blocos
---*/
v_threads=nthd;
s_threads=std::string(sthd);
//s_threads = "64";
//v_blocos=ceil((i/colsIn)/v_threads);
v_blocos=(int)ceil((float)lins/v_threads);
//std::cout<<" <Calculou v_blocos com "<< v_blocos <<" threads com "<< v_threads <<std::endl;
/*--- invoca o kernel de classificação ---*/
Classif<<<v_blocos,v_threads>>>(h_dados, h_class, dsize, colsIn, colsOut);
/*--- copia de volta o vetor de classicação --*/
cudaDeviceSynchronize();
//std::cout<<" <Sincronizou -------------------"<<std::endl;
fnl="log/Classif_KernelT"+ s_threads +dateStr+".log.txt";
//arqo=fnl.c_str();
std::ofstream mylog (fnl.c_str());
//std::ofstream mylog (arqo);
mylog<<"Processado em "<< dateStr <<std::endl;
mylog<<"Processado em "<< v_blocos <<" blocos com "<< v_threads <<" threads"<<std::endl;
mylog<<"Tempo total de classificaçao (ler CSV e classificar via kernel)= "<< calcula_tempo(t_tmp, (unsigned long int) clock()) <<std::endl;
/*---- fecha o arquivo de entrada de registros a classificar*/
fin.close();
//mylog<<"Tempo decorrido até o final da classificaçao= "<< calcula_tempo(t_ini, (unsigned long int) clock()) <<std::endl;
/*--- cria o nome do arquivo csv de saída com as classificações ----*/
//fn="/home/UFF/GPU/Trabalho/Dados/Classif_Kernel"+dateStr+".csv";
fn="csv/Classif_KernelT"+ s_threads +dateStr+".csv";
//std::cout<<std::endl<<fn <<std::endl;
t_tmp=(unsigned long int) clock();
/*--- abre o csv de saída ---*/
std::ofstream myfile (fn.c_str());
myfile<<"Indice,IdClasse"<<std::endl;
/*--- exporta o conteúdo do vetor h_class ---*/
for (i=0; i<csize; i+=colsOut)
{
myfile<<h_class[i]<<','<<h_class[i+1]<<"\n";
}
myfile.close();
mylog<<"Tempo para exportar classificaçao para CSV= "<< calcula_tempo(t_tmp, (unsigned long int) clock()) <<std::endl;
// desaloca a matriz << no Thtrust a desalocação dos vetores é transparente ---------------
//mylog<<"Tempo para free matriz = "<< calcula_tempo(t_tmp, (unsigned long int) clock()) <<std::endl;
/* tempo final */
t_fin = (unsigned long int) clock();
mylog<<"Total de registros classificados= "<< lins <<std::endl;
mylog<<"Tempo total de processamento= "<< setprecision(6) << calcula_tempo(t_ini, t_fin) <<std::endl;
gettimeofday(&end, NULL);
delta = ((end.tv_sec - start.tv_sec) * 1000000u + end.tv_usec - start.tv_usec) / 1.e6;
mylog<<"Tempo total de processamento 2 = "<< delta <<std::endl;
mylog.close();
std::cout<<std::endl<<"Tempo total de processamento= "<< calcula_tempo(t_ini, t_fin) <<std::endl;
std::cout<<"Tempo total de processamento 2 = "<< delta <<std::endl;
}
else
{
std::cout<<std::endl<<"Erro na abertura do arquivo "<< nome <<std::endl;
}
}
//---------------------------------------------------------------------------
int main(int argc, char * argv[])
{
long nlin=0;
int nthd=0;
if (argc < 4){
std::cout<<"Digite o nome do arquivo de entrada e a quantidade de registros e quantas threads"<<std::endl;
abort();
}
// File
std::cout<<" <Arquivo de entrada> = "<<argv[1]<<std::endl;
//nlin=std::stol(argv[2]);
nlin=std::atol(argv[2]);
nthd=std::atoi(argv[3]);
/* processa a classificaçao */
std::cout<<" <Qtd Registros> = "<<nlin<<std::endl;
Classif_GPU(argv[1],nlin,nthd,argv[3]);
}
|
a29a419659bcd2c2b08aa8ab51a0c03cb9ae5fdf.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "../include/dtypes.cuh"
#include "../include/inits.cuh"
#include "../include/solvers.cuh"
#include "minunit.cuh"
__device__ float4 oscillator(float4 Xi, float4 r, float dist, int i, int j)
{
float4 dF{0};
if (i == j) return dF;
if (i == 0) return Xi - r;
return -(Xi - r);
}
const char* test_oscillation()
{
Solution<float4, 2, Tile_solver> oscillation;
oscillation.h_X[0].w = 1;
oscillation.h_X[1].w = 0;
oscillation.copy_to_device();
auto n_steps = 100;
for (auto i = 0; i < n_steps; i++) {
oscillation.take_step<oscillator>(2 * M_PI / n_steps);
oscillation.copy_to_host();
MU_ASSERT("Oscillator off circle",
isclose(
powf(oscillation.h_X[0].w, 2) + powf(oscillation.h_X[1].w, 2),
1));
}
oscillation.copy_to_host();
MU_ASSERT("Oscillator final cosine", isclose(oscillation.h_X[0].w, 1));
// The sine is substantially less precise ;-)
return NULL;
}
const auto L_0 = 0.5;
__device__ float3 clipped_spring(float3 Xi, float3 r, float dist, int i, int j)
{
float3 dF{0};
if (i == j) return dF;
if (dist >= 1) return dF;
dF = r * (L_0 - dist) / dist;
return dF;
}
const char* test_tile_tetrahedron()
{
Solution<float3, 4, Tile_solver> tile;
random_sphere(L_0, tile);
auto com_i = center_of_mass(tile);
for (auto i = 0; i < 500; i++) {
tile.take_step<clipped_spring>(0.1);
}
tile.copy_to_host();
for (auto i = 1; i < 4; i++) {
auto r = tile.h_X[0] - tile.h_X[i];
auto dist = sqrtf(r.x * r.x + r.y * r.y + r.z * r.z);
MU_ASSERT(
"Spring not relaxed in tile tetrahedron", isclose(dist, L_0));
}
auto com_f = center_of_mass(tile);
MU_ASSERT("Momentum in tile tetrahedron", isclose(com_i.x, com_f.x));
MU_ASSERT("Momentum in tile tetrahedron", isclose(com_i.y, com_f.y));
MU_ASSERT("Momentum in tile tetrahedron", isclose(com_i.z, com_f.z));
return NULL;
}
const char* test_grid_tetrahedron()
{
Solution<float3, 4, Grid_solver> grid;
random_sphere(L_0, grid);
auto com_i = center_of_mass(grid);
for (auto i = 0; i < 500; i++) {
grid.take_step<clipped_spring>(0.1);
}
grid.copy_to_host();
for (auto i = 1; i < 4; i++) {
auto r = float3{grid.h_X[0].x - grid.h_X[i].x,
grid.h_X[0].y - grid.h_X[i].y, grid.h_X[0].z - grid.h_X[i].z};
auto dist = sqrtf(r.x * r.x + r.y * r.y + r.z * r.z);
MU_ASSERT(
"Spring not relaxed in grid tetrahedron", isclose(dist, L_0));
}
auto com_f = center_of_mass(grid);
MU_ASSERT("Momentum in grid tetrahedron", isclose(com_i.x, com_f.x));
MU_ASSERT("Momentum in grid tetrahedron", isclose(com_i.y, com_f.y));
MU_ASSERT("Momentum in grid tetrahedron", isclose(com_i.z, com_f.z));
return NULL;
}
const auto n_max = 50;
const char* test_compare_methods()
{
Solution<float3, n_max, Tile_solver> tile;
Solution<float3, n_max, Grid_solver> grid;
random_sphere(0.733333, tile);
for (auto i = 0; i < n_max; i++) {
grid.h_X[i].x = tile.h_X[i].x;
grid.h_X[i].y = tile.h_X[i].y;
grid.h_X[i].z = tile.h_X[i].z;
}
grid.copy_to_device();
for (auto i = 0; i < 2; i++) tile.take_step<clipped_spring>(0.1);
for (auto i = 0; i < 2; i++) grid.take_step<clipped_spring>(0.1);
tile.copy_to_host();
grid.copy_to_host();
for (auto i = 0; i < n_max; i++) {
MU_ASSERT("Methods disagree", isclose(tile.h_X[i].x, grid.h_X[i].x));
MU_ASSERT("Methods disagree", isclose(tile.h_X[i].y, grid.h_X[i].y));
MU_ASSERT("Methods disagree", isclose(tile.h_X[i].z, grid.h_X[i].z));
}
return NULL;
}
__device__ float3 no_pw_int(float3 Xi, float3 r, float dist, int i, int j)
{
return float3{0};
}
__global__ void push_cell(float3* d_dX)
{
auto i = blockIdx.x * blockDim.x + threadIdx.x;
if (i != 0) return;
d_dX[1] = float3{1, 0, 0};
}
void push(const float3* __restrict__ d_X, float3* d_dX)
{
hipLaunchKernelGGL(( push_cell), dim3(1), dim3(1), 0, 0, d_dX);
}
const char* test_generic_forces()
{
Solution<float3, 2, Tile_solver> tile;
tile.h_X[0] = float3{0, 0, 10};
tile.h_X[1] = float3{0, 0, 0};
tile.copy_to_device();
auto com_i = center_of_mass(tile);
tile.take_step<no_pw_int>(1, push);
tile.copy_to_host();
auto com_f = center_of_mass(tile);
MU_ASSERT("Momentum in tile generic force", isclose(com_i.x, com_f.x));
MU_ASSERT("Momentum in tile generic force", isclose(com_i.y, com_f.y));
MU_ASSERT("Momentum in tile generic force", isclose(com_i.z, com_f.z));
MU_ASSERT("Tile generic force failed in x", isclose(tile.h_X[1].x, 0.5));
MU_ASSERT("Tile generic force failed in y", isclose(tile.h_X[1].y, 0));
MU_ASSERT("Tile generic force failed in z", isclose(tile.h_X[1].z, 0));
Solution<float3, 2, Grid_solver> grid;
grid.h_X[0] = float3{0, 0, 10};
grid.h_X[1] = float3{0, 0, 0};
grid.copy_to_device();
com_i = center_of_mass(grid);
grid.take_step<clipped_spring>(1, push);
grid.copy_to_host();
com_f = center_of_mass(grid);
MU_ASSERT("Momentum in grid generic force", isclose(com_i.x, com_f.x));
MU_ASSERT("Momentum in grid generic force", isclose(com_i.y, com_f.y));
MU_ASSERT("Momentum in grid generic force", isclose(com_i.z, com_f.z));
MU_ASSERT("Grid generic force failed in x", isclose(grid.h_X[1].x, 0.5));
MU_ASSERT("Grid generic force failed in y", isclose(grid.h_X[1].y, 0));
MU_ASSERT("Grid generic force failed in z", isclose(grid.h_X[1].z, 0));
return NULL;
}
const char* test_friction()
{
Solution<float3, 2, Tile_solver> tile;
tile.h_X[0] = float3{0, 0, 0};
tile.h_X[1] = float3{.5, 0, 0};
tile.copy_to_device();
for (auto i = 0; i < 10; i++)
tile.take_step<no_pw_int, friction_on_background>(0.05, push);
tile.copy_to_host();
MU_ASSERT("Tile friction on background",
isclose(tile.h_X[1].x - tile.h_X[0].x, 1));
tile.h_X[0] = float3{0, 0, 0};
tile.h_X[1] = float3{.5, 0, 0};
tile.copy_to_device();
for (auto i = 0; i < 10; i++) tile.take_step<no_pw_int>(0.05, push);
tile.copy_to_host();
MU_ASSERT("Tile friction w/ neighbour",
isclose(tile.h_X[1].x - tile.h_X[0].x, 0.75));
Solution<float3, 2, Grid_solver> grid;
grid.h_X[0] = float3{0, 0, 0};
grid.h_X[1] = float3{.5, 0, 0};
grid.copy_to_device();
for (auto i = 0; i < 10; i++)
grid.take_step<no_pw_int, friction_on_background>(0.05, push);
grid.copy_to_host();
MU_ASSERT("Grid friction on background",
isclose(grid.h_X[1].x - grid.h_X[0].x, 1));
grid.h_X[0] = float3{0, 0, 0};
grid.h_X[1] = float3{.5, 0, 0};
grid.copy_to_device();
for (auto i = 0; i < 10; i++) grid.take_step<no_pw_int>(0.05, push);
grid.copy_to_host();
MU_ASSERT("Grid friction w/ neighbour",
isclose(grid.h_X[1].x - grid.h_X[0].x, 0.75));
return NULL;
}
const char* test_fix_point()
{
Solution<float3, 100, Tile_solver> tile;
random_sphere(0.733333, tile);
auto fix_point = 13;
tile.h_X[fix_point] = float3{0};
tile.copy_to_device();
tile.set_fixed(fix_point);
tile.take_step<clipped_spring>(0.1);
tile.copy_to_host();
MU_ASSERT("Fixed point moved in x", isclose(tile.h_X[fix_point].x, 0));
MU_ASSERT("Fixed point moved in y", isclose(tile.h_X[fix_point].y, 0));
MU_ASSERT("Fixed point moved in z", isclose(tile.h_X[fix_point].z, 0));
return NULL;
}
template<int n_max>
__global__ void single_grid(const Grid<n_max>* __restrict__ d_grid)
{
auto i = threadIdx.x + blockDim.x * threadIdx.y +
blockDim.x * blockDim.y * threadIdx.z;
auto cube_id_origin = (GRID_SIZE * GRID_SIZE * GRID_SIZE) / 2 +
(GRID_SIZE * GRID_SIZE) / 2 + GRID_SIZE / 2;
auto expected_cube = cube_id_origin + threadIdx.x +
(GRID_SIZE * threadIdx.y) +
(GRID_SIZE * GRID_SIZE * threadIdx.z);
auto one_point_per_cube = d_grid->d_cube_start[expected_cube] ==
d_grid->d_cube_end[expected_cube];
D_ASSERT(one_point_per_cube); // Thus no sorting!
D_ASSERT(d_grid->d_cube_id[i] == expected_cube);
}
template<int n_max>
__global__ void double_grid(const Grid<n_max>* __restrict__ d_grid)
{
auto i = threadIdx.x + blockDim.x * threadIdx.y +
blockDim.x * blockDim.y * threadIdx.z;
auto cube_id_origin = (GRID_SIZE * GRID_SIZE * GRID_SIZE) / 2 +
(GRID_SIZE * GRID_SIZE) / 2 + GRID_SIZE / 2;
auto expected_cube =
static_cast<int>(cube_id_origin + floor(threadIdx.x / 2.f) +
(GRID_SIZE * floor(threadIdx.y / 2.f)) +
(GRID_SIZE * GRID_SIZE * floor(threadIdx.z / 2.f)));
auto in_expected_cube = false;
for (auto j = d_grid->d_cube_start[expected_cube];
j <= d_grid->d_cube_end[expected_cube]; j++) {
if (d_grid->d_point_id[j] == i) in_expected_cube = true;
}
D_ASSERT(in_expected_cube);
}
const char* test_grid_spacing()
{
const auto n_x = 7;
const auto n_y = 7;
const auto n_z = 7;
Solution<float3, n_x * n_y * n_z, Grid_solver> bolls;
for (auto i = 0; i < n_z; i++) {
for (auto j = 0; j < n_y; j++) {
for (auto k = 0; k < n_x; k++) {
bolls.h_X[n_x * n_y * i + n_x * j + k].x = k + 0.5;
bolls.h_X[n_x * n_y * i + n_x * j + k].y = j + 0.5;
bolls.h_X[n_x * n_y * i + n_x * j + k].z = i + 0.5;
}
}
}
bolls.copy_to_device();
Grid<n_x * n_y * n_z> grid;
grid.build(bolls, 1);
hipLaunchKernelGGL(( single_grid), dim3(1), dim3(dim3{n_x), n_y, n_z}, grid.d_grid);
grid.build(bolls, 2);
hipLaunchKernelGGL(( double_grid), dim3(1), dim3(dim3{n_x), n_y, n_z}, grid.d_grid);
hipDeviceSynchronize(); // Wait for device to exit
return NULL;
}
const char* all_tests()
{
MU_RUN_TEST(test_oscillation);
MU_RUN_TEST(test_tile_tetrahedron);
MU_RUN_TEST(test_grid_tetrahedron);
MU_RUN_TEST(test_compare_methods);
MU_RUN_TEST(test_generic_forces);
MU_RUN_TEST(test_friction);
MU_RUN_TEST(test_fix_point);
MU_RUN_TEST(test_grid_spacing);
return NULL;
}
MU_RUN_SUITE(all_tests);
|
a29a419659bcd2c2b08aa8ab51a0c03cb9ae5fdf.cu
|
#include "../include/dtypes.cuh"
#include "../include/inits.cuh"
#include "../include/solvers.cuh"
#include "minunit.cuh"
__device__ float4 oscillator(float4 Xi, float4 r, float dist, int i, int j)
{
float4 dF{0};
if (i == j) return dF;
if (i == 0) return Xi - r;
return -(Xi - r);
}
const char* test_oscillation()
{
Solution<float4, 2, Tile_solver> oscillation;
oscillation.h_X[0].w = 1;
oscillation.h_X[1].w = 0;
oscillation.copy_to_device();
auto n_steps = 100;
for (auto i = 0; i < n_steps; i++) {
oscillation.take_step<oscillator>(2 * M_PI / n_steps);
oscillation.copy_to_host();
MU_ASSERT("Oscillator off circle",
isclose(
powf(oscillation.h_X[0].w, 2) + powf(oscillation.h_X[1].w, 2),
1));
}
oscillation.copy_to_host();
MU_ASSERT("Oscillator final cosine", isclose(oscillation.h_X[0].w, 1));
// The sine is substantially less precise ;-)
return NULL;
}
const auto L_0 = 0.5;
__device__ float3 clipped_spring(float3 Xi, float3 r, float dist, int i, int j)
{
float3 dF{0};
if (i == j) return dF;
if (dist >= 1) return dF;
dF = r * (L_0 - dist) / dist;
return dF;
}
const char* test_tile_tetrahedron()
{
Solution<float3, 4, Tile_solver> tile;
random_sphere(L_0, tile);
auto com_i = center_of_mass(tile);
for (auto i = 0; i < 500; i++) {
tile.take_step<clipped_spring>(0.1);
}
tile.copy_to_host();
for (auto i = 1; i < 4; i++) {
auto r = tile.h_X[0] - tile.h_X[i];
auto dist = sqrtf(r.x * r.x + r.y * r.y + r.z * r.z);
MU_ASSERT(
"Spring not relaxed in tile tetrahedron", isclose(dist, L_0));
}
auto com_f = center_of_mass(tile);
MU_ASSERT("Momentum in tile tetrahedron", isclose(com_i.x, com_f.x));
MU_ASSERT("Momentum in tile tetrahedron", isclose(com_i.y, com_f.y));
MU_ASSERT("Momentum in tile tetrahedron", isclose(com_i.z, com_f.z));
return NULL;
}
const char* test_grid_tetrahedron()
{
Solution<float3, 4, Grid_solver> grid;
random_sphere(L_0, grid);
auto com_i = center_of_mass(grid);
for (auto i = 0; i < 500; i++) {
grid.take_step<clipped_spring>(0.1);
}
grid.copy_to_host();
for (auto i = 1; i < 4; i++) {
auto r = float3{grid.h_X[0].x - grid.h_X[i].x,
grid.h_X[0].y - grid.h_X[i].y, grid.h_X[0].z - grid.h_X[i].z};
auto dist = sqrtf(r.x * r.x + r.y * r.y + r.z * r.z);
MU_ASSERT(
"Spring not relaxed in grid tetrahedron", isclose(dist, L_0));
}
auto com_f = center_of_mass(grid);
MU_ASSERT("Momentum in grid tetrahedron", isclose(com_i.x, com_f.x));
MU_ASSERT("Momentum in grid tetrahedron", isclose(com_i.y, com_f.y));
MU_ASSERT("Momentum in grid tetrahedron", isclose(com_i.z, com_f.z));
return NULL;
}
const auto n_max = 50;
const char* test_compare_methods()
{
Solution<float3, n_max, Tile_solver> tile;
Solution<float3, n_max, Grid_solver> grid;
random_sphere(0.733333, tile);
for (auto i = 0; i < n_max; i++) {
grid.h_X[i].x = tile.h_X[i].x;
grid.h_X[i].y = tile.h_X[i].y;
grid.h_X[i].z = tile.h_X[i].z;
}
grid.copy_to_device();
for (auto i = 0; i < 2; i++) tile.take_step<clipped_spring>(0.1);
for (auto i = 0; i < 2; i++) grid.take_step<clipped_spring>(0.1);
tile.copy_to_host();
grid.copy_to_host();
for (auto i = 0; i < n_max; i++) {
MU_ASSERT("Methods disagree", isclose(tile.h_X[i].x, grid.h_X[i].x));
MU_ASSERT("Methods disagree", isclose(tile.h_X[i].y, grid.h_X[i].y));
MU_ASSERT("Methods disagree", isclose(tile.h_X[i].z, grid.h_X[i].z));
}
return NULL;
}
__device__ float3 no_pw_int(float3 Xi, float3 r, float dist, int i, int j)
{
return float3{0};
}
__global__ void push_cell(float3* d_dX)
{
auto i = blockIdx.x * blockDim.x + threadIdx.x;
if (i != 0) return;
d_dX[1] = float3{1, 0, 0};
}
void push(const float3* __restrict__ d_X, float3* d_dX)
{
push_cell<<<1, 1>>>(d_dX);
}
const char* test_generic_forces()
{
Solution<float3, 2, Tile_solver> tile;
tile.h_X[0] = float3{0, 0, 10};
tile.h_X[1] = float3{0, 0, 0};
tile.copy_to_device();
auto com_i = center_of_mass(tile);
tile.take_step<no_pw_int>(1, push);
tile.copy_to_host();
auto com_f = center_of_mass(tile);
MU_ASSERT("Momentum in tile generic force", isclose(com_i.x, com_f.x));
MU_ASSERT("Momentum in tile generic force", isclose(com_i.y, com_f.y));
MU_ASSERT("Momentum in tile generic force", isclose(com_i.z, com_f.z));
MU_ASSERT("Tile generic force failed in x", isclose(tile.h_X[1].x, 0.5));
MU_ASSERT("Tile generic force failed in y", isclose(tile.h_X[1].y, 0));
MU_ASSERT("Tile generic force failed in z", isclose(tile.h_X[1].z, 0));
Solution<float3, 2, Grid_solver> grid;
grid.h_X[0] = float3{0, 0, 10};
grid.h_X[1] = float3{0, 0, 0};
grid.copy_to_device();
com_i = center_of_mass(grid);
grid.take_step<clipped_spring>(1, push);
grid.copy_to_host();
com_f = center_of_mass(grid);
MU_ASSERT("Momentum in grid generic force", isclose(com_i.x, com_f.x));
MU_ASSERT("Momentum in grid generic force", isclose(com_i.y, com_f.y));
MU_ASSERT("Momentum in grid generic force", isclose(com_i.z, com_f.z));
MU_ASSERT("Grid generic force failed in x", isclose(grid.h_X[1].x, 0.5));
MU_ASSERT("Grid generic force failed in y", isclose(grid.h_X[1].y, 0));
MU_ASSERT("Grid generic force failed in z", isclose(grid.h_X[1].z, 0));
return NULL;
}
const char* test_friction()
{
Solution<float3, 2, Tile_solver> tile;
tile.h_X[0] = float3{0, 0, 0};
tile.h_X[1] = float3{.5, 0, 0};
tile.copy_to_device();
for (auto i = 0; i < 10; i++)
tile.take_step<no_pw_int, friction_on_background>(0.05, push);
tile.copy_to_host();
MU_ASSERT("Tile friction on background",
isclose(tile.h_X[1].x - tile.h_X[0].x, 1));
tile.h_X[0] = float3{0, 0, 0};
tile.h_X[1] = float3{.5, 0, 0};
tile.copy_to_device();
for (auto i = 0; i < 10; i++) tile.take_step<no_pw_int>(0.05, push);
tile.copy_to_host();
MU_ASSERT("Tile friction w/ neighbour",
isclose(tile.h_X[1].x - tile.h_X[0].x, 0.75));
Solution<float3, 2, Grid_solver> grid;
grid.h_X[0] = float3{0, 0, 0};
grid.h_X[1] = float3{.5, 0, 0};
grid.copy_to_device();
for (auto i = 0; i < 10; i++)
grid.take_step<no_pw_int, friction_on_background>(0.05, push);
grid.copy_to_host();
MU_ASSERT("Grid friction on background",
isclose(grid.h_X[1].x - grid.h_X[0].x, 1));
grid.h_X[0] = float3{0, 0, 0};
grid.h_X[1] = float3{.5, 0, 0};
grid.copy_to_device();
for (auto i = 0; i < 10; i++) grid.take_step<no_pw_int>(0.05, push);
grid.copy_to_host();
MU_ASSERT("Grid friction w/ neighbour",
isclose(grid.h_X[1].x - grid.h_X[0].x, 0.75));
return NULL;
}
const char* test_fix_point()
{
Solution<float3, 100, Tile_solver> tile;
random_sphere(0.733333, tile);
auto fix_point = 13;
tile.h_X[fix_point] = float3{0};
tile.copy_to_device();
tile.set_fixed(fix_point);
tile.take_step<clipped_spring>(0.1);
tile.copy_to_host();
MU_ASSERT("Fixed point moved in x", isclose(tile.h_X[fix_point].x, 0));
MU_ASSERT("Fixed point moved in y", isclose(tile.h_X[fix_point].y, 0));
MU_ASSERT("Fixed point moved in z", isclose(tile.h_X[fix_point].z, 0));
return NULL;
}
template<int n_max>
__global__ void single_grid(const Grid<n_max>* __restrict__ d_grid)
{
auto i = threadIdx.x + blockDim.x * threadIdx.y +
blockDim.x * blockDim.y * threadIdx.z;
auto cube_id_origin = (GRID_SIZE * GRID_SIZE * GRID_SIZE) / 2 +
(GRID_SIZE * GRID_SIZE) / 2 + GRID_SIZE / 2;
auto expected_cube = cube_id_origin + threadIdx.x +
(GRID_SIZE * threadIdx.y) +
(GRID_SIZE * GRID_SIZE * threadIdx.z);
auto one_point_per_cube = d_grid->d_cube_start[expected_cube] ==
d_grid->d_cube_end[expected_cube];
D_ASSERT(one_point_per_cube); // Thus no sorting!
D_ASSERT(d_grid->d_cube_id[i] == expected_cube);
}
template<int n_max>
__global__ void double_grid(const Grid<n_max>* __restrict__ d_grid)
{
auto i = threadIdx.x + blockDim.x * threadIdx.y +
blockDim.x * blockDim.y * threadIdx.z;
auto cube_id_origin = (GRID_SIZE * GRID_SIZE * GRID_SIZE) / 2 +
(GRID_SIZE * GRID_SIZE) / 2 + GRID_SIZE / 2;
auto expected_cube =
static_cast<int>(cube_id_origin + floor(threadIdx.x / 2.f) +
(GRID_SIZE * floor(threadIdx.y / 2.f)) +
(GRID_SIZE * GRID_SIZE * floor(threadIdx.z / 2.f)));
auto in_expected_cube = false;
for (auto j = d_grid->d_cube_start[expected_cube];
j <= d_grid->d_cube_end[expected_cube]; j++) {
if (d_grid->d_point_id[j] == i) in_expected_cube = true;
}
D_ASSERT(in_expected_cube);
}
const char* test_grid_spacing()
{
const auto n_x = 7;
const auto n_y = 7;
const auto n_z = 7;
Solution<float3, n_x * n_y * n_z, Grid_solver> bolls;
for (auto i = 0; i < n_z; i++) {
for (auto j = 0; j < n_y; j++) {
for (auto k = 0; k < n_x; k++) {
bolls.h_X[n_x * n_y * i + n_x * j + k].x = k + 0.5;
bolls.h_X[n_x * n_y * i + n_x * j + k].y = j + 0.5;
bolls.h_X[n_x * n_y * i + n_x * j + k].z = i + 0.5;
}
}
}
bolls.copy_to_device();
Grid<n_x * n_y * n_z> grid;
grid.build(bolls, 1);
single_grid<<<1, dim3{n_x, n_y, n_z}>>>(grid.d_grid);
grid.build(bolls, 2);
double_grid<<<1, dim3{n_x, n_y, n_z}>>>(grid.d_grid);
cudaDeviceSynchronize(); // Wait for device to exit
return NULL;
}
const char* all_tests()
{
MU_RUN_TEST(test_oscillation);
MU_RUN_TEST(test_tile_tetrahedron);
MU_RUN_TEST(test_grid_tetrahedron);
MU_RUN_TEST(test_compare_methods);
MU_RUN_TEST(test_generic_forces);
MU_RUN_TEST(test_friction);
MU_RUN_TEST(test_fix_point);
MU_RUN_TEST(test_grid_spacing);
return NULL;
}
MU_RUN_SUITE(all_tests);
|
377f0acc81ff68ea610774f7ce235f672614b49d.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
All modification made by Intel Corporation: 2016 Intel Corporation
All contributions by the University of California:
Copyright (c) 2014, 2015, The Regents of the University of California (Regents)
All rights reserved.
All other contributions:
Copyright (c) 2014, 2015, the respective contributors
All rights reserved.
For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of Intel Corporation nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <vector>
#include "caffe/layers/dropout_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
__global__ void DropoutForward(const int n, const Dtype* in,
const unsigned int* mask, const unsigned int threshold, const float scale,
Dtype* out) {
CUDA_KERNEL_LOOP(index, n) {
out[index] = in[index] * (mask[index] > threshold) * scale;
}
}
template <typename Dtype>
void DropoutLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
const int count = bottom[0]->count();
if (this->phase_ == TRAIN) {
unsigned int* mask =
static_cast<unsigned int*>(rand_vec_.mutable_gpu_data());
caffe_gpu_rng_uniform(count, mask);
// set thresholds
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( DropoutForward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, bottom_data, mask, uint_thres_, scale_, top_data);
CUDA_POST_KERNEL_CHECK;
} else {
caffe_copy(count, bottom_data, top_data);
}
}
template <typename Dtype>
__global__ void DropoutBackward(const int n, const Dtype* in_diff,
const unsigned int* mask, const unsigned int threshold, const float scale,
Dtype* out_diff) {
CUDA_KERNEL_LOOP(index, n) {
out_diff[index] = in_diff[index] * scale * (mask[index] > threshold);
}
}
template <typename Dtype>
void DropoutLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[0]) {
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
if (this->phase_ == TRAIN) {
const unsigned int* mask =
static_cast<const unsigned int*>(rand_vec_.gpu_data());
const int count = bottom[0]->count();
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( DropoutBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, top_diff, mask, uint_thres_, scale_, bottom_diff);
CUDA_POST_KERNEL_CHECK;
} else {
caffe_copy(top[0]->count(), top_diff, bottom_diff);
}
}
}
INSTANTIATE_LAYER_GPU_FUNCS(DropoutLayer);
} // namespace caffe
|
377f0acc81ff68ea610774f7ce235f672614b49d.cu
|
/*
All modification made by Intel Corporation: © 2016 Intel Corporation
All contributions by the University of California:
Copyright (c) 2014, 2015, The Regents of the University of California (Regents)
All rights reserved.
All other contributions:
Copyright (c) 2014, 2015, the respective contributors
All rights reserved.
For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of Intel Corporation nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <vector>
#include "caffe/layers/dropout_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
__global__ void DropoutForward(const int n, const Dtype* in,
const unsigned int* mask, const unsigned int threshold, const float scale,
Dtype* out) {
CUDA_KERNEL_LOOP(index, n) {
out[index] = in[index] * (mask[index] > threshold) * scale;
}
}
template <typename Dtype>
void DropoutLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
const int count = bottom[0]->count();
if (this->phase_ == TRAIN) {
unsigned int* mask =
static_cast<unsigned int*>(rand_vec_.mutable_gpu_data());
caffe_gpu_rng_uniform(count, mask);
// set thresholds
// NOLINT_NEXT_LINE(whitespace/operators)
DropoutForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, bottom_data, mask, uint_thres_, scale_, top_data);
CUDA_POST_KERNEL_CHECK;
} else {
caffe_copy(count, bottom_data, top_data);
}
}
template <typename Dtype>
__global__ void DropoutBackward(const int n, const Dtype* in_diff,
const unsigned int* mask, const unsigned int threshold, const float scale,
Dtype* out_diff) {
CUDA_KERNEL_LOOP(index, n) {
out_diff[index] = in_diff[index] * scale * (mask[index] > threshold);
}
}
template <typename Dtype>
void DropoutLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[0]) {
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
if (this->phase_ == TRAIN) {
const unsigned int* mask =
static_cast<const unsigned int*>(rand_vec_.gpu_data());
const int count = bottom[0]->count();
// NOLINT_NEXT_LINE(whitespace/operators)
DropoutBackward<Dtype><<<CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS>>>(
count, top_diff, mask, uint_thres_, scale_, bottom_diff);
CUDA_POST_KERNEL_CHECK;
} else {
caffe_copy(top[0]->count(), top_diff, bottom_diff);
}
}
}
INSTANTIATE_LAYER_GPU_FUNCS(DropoutLayer);
} // namespace caffe
|
718e68f3dc5fc526d0a58df44c541d9cd98a83b5.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2018, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "gtest/gtest.h"
#include "gmock/gmock.h"
#include "hip/hip_runtime_api.h"
#include "tests/utilities/cudf_test_utils.cuh"
#include "tests/utilities/cudf_test_fixtures.h"
#include "bitmask/bit_mask.cuh"
#include <chrono>
struct BitMaskTest : public GdfTest {};
//
// Kernel to count bits set in the bit mask
//
__global__ void count_bits_g(int *counter, BitMask bits) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
int local_counter = 0;
int i;
for (i = index ; i < (bits.num_elements() - 1) ; i += stride) {
local_counter += __popc(bits.get_element_device(i));
}
if (i == (bits.num_elements() - 1)) {
//
// Special case... last word is only partial
//
int bits_used = bits.length() % bit_mask::detail::BITS_PER_ELEMENT;
if (bits_used == 0) {
//
// The whole word is used
//
local_counter += __popc(bits.get_element_device(i));
} else {
local_counter += __popc(bits.get_element_device(i) & ((bit_mask_t{1} << bits_used) - 1));
}
}
atomicAdd(counter, local_counter);
}
//
// Testing function, will set a bit in a container. This assumes <1,1>
// for simplicity - all of the tests are small.
//
__global__ void set_bit(gdf_size_type bit, BitMask bits) {
bits.set_bit_unsafe(bit);
}
//
// Kernel to do safe bit set/clear
//
__global__ void test_safe_set_clear_g(BitMask bits) {
int index = threadIdx.x;
if ((index % 2) == 0) {
for (int i = index ; i < bits.length() ; i += bit_mask::detail::BITS_PER_ELEMENT) {
bits.set_bit(i);
}
}
for (int i = index ; i < bits.length() ; i += bit_mask::detail::BITS_PER_ELEMENT) {
bits.clear_bit(i);
}
if ((index % 2) == 0) {
for (int i = index ; i < bits.length() ; i += bit_mask::detail::BITS_PER_ELEMENT) {
bits.set_bit(i);
}
}
}
__host__ gdf_error count_bits(gdf_size_type *count, const BitMask &bit_mask, int a = 1, int b = 1) {
int *count_d;
CUDA_TRY(hipMalloc(&count_d, sizeof(int)));
CUDA_TRY(hipMemset(count_d, 0, sizeof(int)));
hipLaunchKernelGGL(( count_bits_g), dim3(a),dim3(b), 0, 0, count_d, bit_mask);
CUDA_TRY(hipMemcpy(count, count_d, sizeof(int), hipMemcpyDeviceToHost));
CUDA_TRY(hipFree(count_d));
return GDF_SUCCESS;
}
TEST_F(BitMaskTest, NoValids)
{
const int num_rows = 100;
bit_mask_t *bits = nullptr;
EXPECT_EQ(GDF_SUCCESS, bit_mask::create_bit_mask(&bits, num_rows, 0));
BitMask bit_mask(bits, num_rows);
gdf_size_type local_count = 0;
EXPECT_EQ(GDF_SUCCESS, count_bits(&local_count, bit_mask));
EXPECT_EQ(GDF_SUCCESS, bit_mask::destroy_bit_mask(bits));
EXPECT_EQ(gdf_size_type{0}, local_count);
}
TEST_F(BitMaskTest, AllValids)
{
const int num_rows = 100;
bit_mask_t *bits = nullptr;
EXPECT_EQ(GDF_SUCCESS, bit_mask::create_bit_mask(&bits, num_rows, 1));
BitMask bit_mask(bits, num_rows);
gdf_size_type local_count = 0;
EXPECT_EQ(GDF_SUCCESS, count_bits(&local_count, bit_mask));
EXPECT_EQ(GDF_SUCCESS, bit_mask::destroy_bit_mask(bits));
EXPECT_EQ(gdf_size_type{100}, local_count);
}
TEST_F(BitMaskTest, FirstRowValid)
{
const int num_rows = 4;
bit_mask_t *bits = nullptr;
EXPECT_EQ(GDF_SUCCESS, bit_mask::create_bit_mask(&bits, num_rows, 0));
BitMask bit_mask(bits, num_rows);
hipLaunchKernelGGL(( set_bit), dim3(1),dim3(1), 0, 0, 0, bit_mask);
gdf_size_type local_count = 0;
EXPECT_EQ(GDF_SUCCESS, count_bits(&local_count, bit_mask));
EXPECT_EQ(gdf_size_type{1}, local_count);
bit_mask_t temp = 0;
bit_mask.get_element_host(0, temp);
EXPECT_EQ(GDF_SUCCESS, bit_mask::destroy_bit_mask(bits));
EXPECT_EQ(temp, bit_mask_t{0x1});
}
TEST_F(BitMaskTest, EveryOtherBit)
{
const int num_rows = 8;
bit_mask_t *bits = nullptr;
EXPECT_EQ(GDF_SUCCESS, bit_mask::create_bit_mask(&bits, num_rows, 0));
BitMask bit_mask(bits, num_rows);
hipLaunchKernelGGL(( set_bit), dim3(1),dim3(1), 0, 0, 0, bit_mask);
hipLaunchKernelGGL(( set_bit), dim3(1),dim3(1), 0, 0, 2, bit_mask);
hipLaunchKernelGGL(( set_bit), dim3(1),dim3(1), 0, 0, 4, bit_mask);
hipLaunchKernelGGL(( set_bit), dim3(1),dim3(1), 0, 0, 6, bit_mask);
gdf_size_type local_count = 0;
EXPECT_EQ(GDF_SUCCESS, count_bits(&local_count, bit_mask));
EXPECT_EQ(gdf_size_type{4}, local_count);
bit_mask_t temp = 0;
bit_mask.get_element_host(0, temp);
EXPECT_EQ(GDF_SUCCESS, bit_mask::destroy_bit_mask(bits));
EXPECT_EQ(temp, bit_mask_t{0x55});
}
TEST_F(BitMaskTest, OtherEveryOtherBit)
{
const int num_rows = 8;
bit_mask_t *bits = nullptr;
EXPECT_EQ(GDF_SUCCESS, bit_mask::create_bit_mask(&bits, num_rows, 0));
BitMask bit_mask(bits, num_rows);
hipLaunchKernelGGL(( set_bit), dim3(1),dim3(1), 0, 0, 1, bit_mask);
hipLaunchKernelGGL(( set_bit), dim3(1),dim3(1), 0, 0, 3, bit_mask);
hipLaunchKernelGGL(( set_bit), dim3(1),dim3(1), 0, 0, 5, bit_mask);
hipLaunchKernelGGL(( set_bit), dim3(1),dim3(1), 0, 0, 7, bit_mask);
gdf_size_type local_count = 0;
EXPECT_EQ(GDF_SUCCESS, count_bits(&local_count, bit_mask));
EXPECT_EQ(gdf_size_type{4}, local_count);
bit_mask_t temp = 0;
bit_mask.get_element_host(0, temp);
EXPECT_EQ(GDF_SUCCESS, bit_mask::destroy_bit_mask(bits));
EXPECT_EQ(temp, bit_mask_t{0xAA});
}
TEST_F(BitMaskTest, 15rows)
{
const int num_rows = 15;
bit_mask_t *bits = nullptr;
EXPECT_EQ(GDF_SUCCESS, bit_mask::create_bit_mask(&bits, num_rows, 0));
BitMask bit_mask(bits, num_rows);
hipLaunchKernelGGL(( set_bit), dim3(1),dim3(1), 0, 0, 0, bit_mask);
hipLaunchKernelGGL(( set_bit), dim3(1),dim3(1), 0, 0, 8, bit_mask);
gdf_size_type local_count = 0;
EXPECT_EQ(GDF_SUCCESS, count_bits(&local_count, bit_mask));
EXPECT_EQ(gdf_size_type{2}, local_count);
EXPECT_EQ(GDF_SUCCESS, bit_mask::destroy_bit_mask(bits));
}
TEST_F(BitMaskTest, 5rows)
{
const int num_rows = 5;
bit_mask_t *bits = nullptr;
EXPECT_EQ(GDF_SUCCESS, bit_mask::create_bit_mask(&bits, num_rows, 0));
BitMask bit_mask(bits, num_rows);
hipLaunchKernelGGL(( set_bit), dim3(1),dim3(1), 0, 0, 0, bit_mask);
gdf_size_type local_count = 0;
EXPECT_EQ(GDF_SUCCESS, count_bits(&local_count, bit_mask));
EXPECT_EQ(GDF_SUCCESS, bit_mask::destroy_bit_mask(bits));
EXPECT_EQ(gdf_size_type{1}, local_count);
}
TEST_F(BitMaskTest, 10ValidRows)
{
const int num_rows = 10;
bit_mask_t *bits = nullptr;
EXPECT_EQ(GDF_SUCCESS, bit_mask::create_bit_mask(&bits, num_rows, 1));
BitMask bit_mask(bits, num_rows);
gdf_size_type local_count = 0;
EXPECT_EQ(GDF_SUCCESS, count_bits(&local_count, bit_mask));
EXPECT_EQ(GDF_SUCCESS, bit_mask::destroy_bit_mask(bits));
EXPECT_EQ(gdf_size_type{10}, local_count);
}
TEST_F(BitMaskTest, MultipleOfEight)
{
const int num_rows = 1024;
bit_mask_t *bits = nullptr;
EXPECT_EQ(GDF_SUCCESS, bit_mask::create_bit_mask(&bits, num_rows, 0));
BitMask bit_mask(bits, num_rows);
for (int i = 0 ; i < num_rows ; i += 8) {
hipLaunchKernelGGL(( set_bit), dim3(1),dim3(1), 0, 0, i, bit_mask);
}
gdf_size_type local_count = 0;
EXPECT_EQ(GDF_SUCCESS, count_bits(&local_count, bit_mask));
EXPECT_EQ(GDF_SUCCESS, bit_mask::destroy_bit_mask(bits));
EXPECT_EQ(gdf_size_type{128}, local_count);
}
TEST_F(BitMaskTest, NotMultipleOfEight)
{
const int num_rows = 1023;
bit_mask_t *bits = nullptr;
EXPECT_EQ(GDF_SUCCESS, bit_mask::create_bit_mask(&bits, num_rows, 0));
BitMask bit_mask(bits, num_rows);
for (int i = 7 ; i < num_rows ; i += 8) {
hipLaunchKernelGGL(( set_bit), dim3(1),dim3(1), 0, 0, i, bit_mask);
}
gdf_size_type local_count = 0;
EXPECT_EQ(GDF_SUCCESS, count_bits(&local_count, bit_mask));
EXPECT_EQ(GDF_SUCCESS, bit_mask::destroy_bit_mask(bits));
EXPECT_EQ(gdf_size_type{127}, local_count);
}
TEST_F(BitMaskTest, TenThousandRows)
{
const int num_rows = 10000;
bit_mask_t *bits = nullptr;
EXPECT_EQ(GDF_SUCCESS, bit_mask::create_bit_mask(&bits, num_rows, 1));
BitMask bit_mask(bits, num_rows);
gdf_size_type local_count = 0;
EXPECT_EQ(GDF_SUCCESS, count_bits(&local_count, bit_mask));
EXPECT_EQ(GDF_SUCCESS, bit_mask::destroy_bit_mask(bits));
EXPECT_EQ(gdf_size_type{10000}, local_count);
}
TEST_F(BitMaskTest, PerformanceTest)
{
const int num_rows = 100000000;
bit_mask_t *bits = nullptr;
EXPECT_EQ(GDF_SUCCESS, bit_mask::create_bit_mask(&bits, num_rows, 0));
BitMask bit_mask(bits, num_rows);
int num_elements = bit_mask::num_elements(num_rows);
int block_size = 256;
int grid_size = (num_elements + block_size - 1)/block_size;
uint32_t *local_valid = (uint32_t *) malloc(num_elements * sizeof(uint32_t));
for (int i = 0 ; i < num_elements ; ++i) {
local_valid[i] = 0x55555555U;
}
EXPECT_EQ(GDF_SUCCESS, bit_mask::copy_bit_mask(bit_mask.get_valid(), local_valid, num_rows, hipMemcpyHostToDevice));
auto start = std::chrono::system_clock::now();
hipProfilerStart();
for(int i = 0; i < 1000; ++i) {
gdf_size_type local_count = 0;
count_bits(&local_count, bit_mask, grid_size, block_size);
}
hipProfilerStop();
auto end = std::chrono::system_clock::now();
std::chrono::duration<double> elapsed_seconds = end-start;
std::cout << "Elapsed time (ms): " << elapsed_seconds.count()*1000 << std::endl;
EXPECT_EQ(GDF_SUCCESS, bit_mask::destroy_bit_mask(bits));
free(local_valid);
}
TEST_F(BitMaskTest, CudaThreadingTest)
{
const int num_rows = 100000;
bit_mask_t *bits = nullptr;
EXPECT_EQ(GDF_SUCCESS, bit_mask::create_bit_mask(&bits, num_rows, 0));
BitMask bit_mask(bits, num_rows);
hipLaunchKernelGGL(( test_safe_set_clear_g), dim3(1),dim3(bit_mask::detail::BITS_PER_ELEMENT), 0, 0, bit_mask);
gdf_size_type local_count = 0;
EXPECT_EQ(GDF_SUCCESS, count_bits(&local_count, bit_mask));
EXPECT_EQ((gdf_size_type) (num_rows/2), local_count);
EXPECT_EQ(GDF_SUCCESS, bit_mask::destroy_bit_mask(bits));
}
TEST_F(BitMaskTest, PaddingTest)
{
//
// Set the number of rows to 32, we'll try padding to
// 256 bytes.
//
const int num_rows = 32;
const int padding_bytes = 256;
bit_mask_t *bits = nullptr;
EXPECT_EQ(GDF_SUCCESS, bit_mask::create_bit_mask(&bits, num_rows, 1, padding_bytes));
BitMask bit_mask(bits, num_rows);
gdf_size_type local_count = 0;
EXPECT_EQ(GDF_SUCCESS, count_bits(&local_count, bit_mask));
EXPECT_EQ((gdf_size_type) num_rows, local_count);
//
// To test this, we should be able to access the last element
//
int last_element = (padding_bytes / sizeof(bit_mask_t)) - 1;
bit_mask_t temp = bit_mask_t{0};
bit_mask.get_element_host(last_element, temp);
EXPECT_EQ(~bit_mask_t{0}, temp);
EXPECT_EQ(GDF_SUCCESS, bit_mask::destroy_bit_mask(bits));
}
|
718e68f3dc5fc526d0a58df44c541d9cd98a83b5.cu
|
/*
* Copyright (c) 2018, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "gtest/gtest.h"
#include "gmock/gmock.h"
#include "cuda_profiler_api.h"
#include "tests/utilities/cudf_test_utils.cuh"
#include "tests/utilities/cudf_test_fixtures.h"
#include "bitmask/bit_mask.cuh"
#include <chrono>
struct BitMaskTest : public GdfTest {};
//
// Kernel to count bits set in the bit mask
//
__global__ void count_bits_g(int *counter, BitMask bits) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
int local_counter = 0;
int i;
for (i = index ; i < (bits.num_elements() - 1) ; i += stride) {
local_counter += __popc(bits.get_element_device(i));
}
if (i == (bits.num_elements() - 1)) {
//
// Special case... last word is only partial
//
int bits_used = bits.length() % bit_mask::detail::BITS_PER_ELEMENT;
if (bits_used == 0) {
//
// The whole word is used
//
local_counter += __popc(bits.get_element_device(i));
} else {
local_counter += __popc(bits.get_element_device(i) & ((bit_mask_t{1} << bits_used) - 1));
}
}
atomicAdd(counter, local_counter);
}
//
// Testing function, will set a bit in a container. This assumes <1,1>
// for simplicity - all of the tests are small.
//
__global__ void set_bit(gdf_size_type bit, BitMask bits) {
bits.set_bit_unsafe(bit);
}
//
// Kernel to do safe bit set/clear
//
__global__ void test_safe_set_clear_g(BitMask bits) {
int index = threadIdx.x;
if ((index % 2) == 0) {
for (int i = index ; i < bits.length() ; i += bit_mask::detail::BITS_PER_ELEMENT) {
bits.set_bit(i);
}
}
for (int i = index ; i < bits.length() ; i += bit_mask::detail::BITS_PER_ELEMENT) {
bits.clear_bit(i);
}
if ((index % 2) == 0) {
for (int i = index ; i < bits.length() ; i += bit_mask::detail::BITS_PER_ELEMENT) {
bits.set_bit(i);
}
}
}
__host__ gdf_error count_bits(gdf_size_type *count, const BitMask &bit_mask, int a = 1, int b = 1) {
int *count_d;
CUDA_TRY(cudaMalloc(&count_d, sizeof(int)));
CUDA_TRY(cudaMemset(count_d, 0, sizeof(int)));
count_bits_g<<<a,b>>>(count_d, bit_mask);
CUDA_TRY(cudaMemcpy(count, count_d, sizeof(int), cudaMemcpyDeviceToHost));
CUDA_TRY(cudaFree(count_d));
return GDF_SUCCESS;
}
TEST_F(BitMaskTest, NoValids)
{
const int num_rows = 100;
bit_mask_t *bits = nullptr;
EXPECT_EQ(GDF_SUCCESS, bit_mask::create_bit_mask(&bits, num_rows, 0));
BitMask bit_mask(bits, num_rows);
gdf_size_type local_count = 0;
EXPECT_EQ(GDF_SUCCESS, count_bits(&local_count, bit_mask));
EXPECT_EQ(GDF_SUCCESS, bit_mask::destroy_bit_mask(bits));
EXPECT_EQ(gdf_size_type{0}, local_count);
}
TEST_F(BitMaskTest, AllValids)
{
const int num_rows = 100;
bit_mask_t *bits = nullptr;
EXPECT_EQ(GDF_SUCCESS, bit_mask::create_bit_mask(&bits, num_rows, 1));
BitMask bit_mask(bits, num_rows);
gdf_size_type local_count = 0;
EXPECT_EQ(GDF_SUCCESS, count_bits(&local_count, bit_mask));
EXPECT_EQ(GDF_SUCCESS, bit_mask::destroy_bit_mask(bits));
EXPECT_EQ(gdf_size_type{100}, local_count);
}
TEST_F(BitMaskTest, FirstRowValid)
{
const int num_rows = 4;
bit_mask_t *bits = nullptr;
EXPECT_EQ(GDF_SUCCESS, bit_mask::create_bit_mask(&bits, num_rows, 0));
BitMask bit_mask(bits, num_rows);
set_bit<<<1,1>>>(0, bit_mask);
gdf_size_type local_count = 0;
EXPECT_EQ(GDF_SUCCESS, count_bits(&local_count, bit_mask));
EXPECT_EQ(gdf_size_type{1}, local_count);
bit_mask_t temp = 0;
bit_mask.get_element_host(0, temp);
EXPECT_EQ(GDF_SUCCESS, bit_mask::destroy_bit_mask(bits));
EXPECT_EQ(temp, bit_mask_t{0x1});
}
TEST_F(BitMaskTest, EveryOtherBit)
{
const int num_rows = 8;
bit_mask_t *bits = nullptr;
EXPECT_EQ(GDF_SUCCESS, bit_mask::create_bit_mask(&bits, num_rows, 0));
BitMask bit_mask(bits, num_rows);
set_bit<<<1,1>>>(0, bit_mask);
set_bit<<<1,1>>>(2, bit_mask);
set_bit<<<1,1>>>(4, bit_mask);
set_bit<<<1,1>>>(6, bit_mask);
gdf_size_type local_count = 0;
EXPECT_EQ(GDF_SUCCESS, count_bits(&local_count, bit_mask));
EXPECT_EQ(gdf_size_type{4}, local_count);
bit_mask_t temp = 0;
bit_mask.get_element_host(0, temp);
EXPECT_EQ(GDF_SUCCESS, bit_mask::destroy_bit_mask(bits));
EXPECT_EQ(temp, bit_mask_t{0x55});
}
TEST_F(BitMaskTest, OtherEveryOtherBit)
{
const int num_rows = 8;
bit_mask_t *bits = nullptr;
EXPECT_EQ(GDF_SUCCESS, bit_mask::create_bit_mask(&bits, num_rows, 0));
BitMask bit_mask(bits, num_rows);
set_bit<<<1,1>>>(1, bit_mask);
set_bit<<<1,1>>>(3, bit_mask);
set_bit<<<1,1>>>(5, bit_mask);
set_bit<<<1,1>>>(7, bit_mask);
gdf_size_type local_count = 0;
EXPECT_EQ(GDF_SUCCESS, count_bits(&local_count, bit_mask));
EXPECT_EQ(gdf_size_type{4}, local_count);
bit_mask_t temp = 0;
bit_mask.get_element_host(0, temp);
EXPECT_EQ(GDF_SUCCESS, bit_mask::destroy_bit_mask(bits));
EXPECT_EQ(temp, bit_mask_t{0xAA});
}
TEST_F(BitMaskTest, 15rows)
{
const int num_rows = 15;
bit_mask_t *bits = nullptr;
EXPECT_EQ(GDF_SUCCESS, bit_mask::create_bit_mask(&bits, num_rows, 0));
BitMask bit_mask(bits, num_rows);
set_bit<<<1,1>>>(0, bit_mask);
set_bit<<<1,1>>>(8, bit_mask);
gdf_size_type local_count = 0;
EXPECT_EQ(GDF_SUCCESS, count_bits(&local_count, bit_mask));
EXPECT_EQ(gdf_size_type{2}, local_count);
EXPECT_EQ(GDF_SUCCESS, bit_mask::destroy_bit_mask(bits));
}
TEST_F(BitMaskTest, 5rows)
{
const int num_rows = 5;
bit_mask_t *bits = nullptr;
EXPECT_EQ(GDF_SUCCESS, bit_mask::create_bit_mask(&bits, num_rows, 0));
BitMask bit_mask(bits, num_rows);
set_bit<<<1,1>>>(0, bit_mask);
gdf_size_type local_count = 0;
EXPECT_EQ(GDF_SUCCESS, count_bits(&local_count, bit_mask));
EXPECT_EQ(GDF_SUCCESS, bit_mask::destroy_bit_mask(bits));
EXPECT_EQ(gdf_size_type{1}, local_count);
}
TEST_F(BitMaskTest, 10ValidRows)
{
const int num_rows = 10;
bit_mask_t *bits = nullptr;
EXPECT_EQ(GDF_SUCCESS, bit_mask::create_bit_mask(&bits, num_rows, 1));
BitMask bit_mask(bits, num_rows);
gdf_size_type local_count = 0;
EXPECT_EQ(GDF_SUCCESS, count_bits(&local_count, bit_mask));
EXPECT_EQ(GDF_SUCCESS, bit_mask::destroy_bit_mask(bits));
EXPECT_EQ(gdf_size_type{10}, local_count);
}
TEST_F(BitMaskTest, MultipleOfEight)
{
const int num_rows = 1024;
bit_mask_t *bits = nullptr;
EXPECT_EQ(GDF_SUCCESS, bit_mask::create_bit_mask(&bits, num_rows, 0));
BitMask bit_mask(bits, num_rows);
for (int i = 0 ; i < num_rows ; i += 8) {
set_bit<<<1,1>>>(i, bit_mask);
}
gdf_size_type local_count = 0;
EXPECT_EQ(GDF_SUCCESS, count_bits(&local_count, bit_mask));
EXPECT_EQ(GDF_SUCCESS, bit_mask::destroy_bit_mask(bits));
EXPECT_EQ(gdf_size_type{128}, local_count);
}
TEST_F(BitMaskTest, NotMultipleOfEight)
{
const int num_rows = 1023;
bit_mask_t *bits = nullptr;
EXPECT_EQ(GDF_SUCCESS, bit_mask::create_bit_mask(&bits, num_rows, 0));
BitMask bit_mask(bits, num_rows);
for (int i = 7 ; i < num_rows ; i += 8) {
set_bit<<<1,1>>>(i, bit_mask);
}
gdf_size_type local_count = 0;
EXPECT_EQ(GDF_SUCCESS, count_bits(&local_count, bit_mask));
EXPECT_EQ(GDF_SUCCESS, bit_mask::destroy_bit_mask(bits));
EXPECT_EQ(gdf_size_type{127}, local_count);
}
TEST_F(BitMaskTest, TenThousandRows)
{
const int num_rows = 10000;
bit_mask_t *bits = nullptr;
EXPECT_EQ(GDF_SUCCESS, bit_mask::create_bit_mask(&bits, num_rows, 1));
BitMask bit_mask(bits, num_rows);
gdf_size_type local_count = 0;
EXPECT_EQ(GDF_SUCCESS, count_bits(&local_count, bit_mask));
EXPECT_EQ(GDF_SUCCESS, bit_mask::destroy_bit_mask(bits));
EXPECT_EQ(gdf_size_type{10000}, local_count);
}
TEST_F(BitMaskTest, PerformanceTest)
{
const int num_rows = 100000000;
bit_mask_t *bits = nullptr;
EXPECT_EQ(GDF_SUCCESS, bit_mask::create_bit_mask(&bits, num_rows, 0));
BitMask bit_mask(bits, num_rows);
int num_elements = bit_mask::num_elements(num_rows);
int block_size = 256;
int grid_size = (num_elements + block_size - 1)/block_size;
uint32_t *local_valid = (uint32_t *) malloc(num_elements * sizeof(uint32_t));
for (int i = 0 ; i < num_elements ; ++i) {
local_valid[i] = 0x55555555U;
}
EXPECT_EQ(GDF_SUCCESS, bit_mask::copy_bit_mask(bit_mask.get_valid(), local_valid, num_rows, cudaMemcpyHostToDevice));
auto start = std::chrono::system_clock::now();
cudaProfilerStart();
for(int i = 0; i < 1000; ++i) {
gdf_size_type local_count = 0;
count_bits(&local_count, bit_mask, grid_size, block_size);
}
cudaProfilerStop();
auto end = std::chrono::system_clock::now();
std::chrono::duration<double> elapsed_seconds = end-start;
std::cout << "Elapsed time (ms): " << elapsed_seconds.count()*1000 << std::endl;
EXPECT_EQ(GDF_SUCCESS, bit_mask::destroy_bit_mask(bits));
free(local_valid);
}
TEST_F(BitMaskTest, CudaThreadingTest)
{
const int num_rows = 100000;
bit_mask_t *bits = nullptr;
EXPECT_EQ(GDF_SUCCESS, bit_mask::create_bit_mask(&bits, num_rows, 0));
BitMask bit_mask(bits, num_rows);
test_safe_set_clear_g<<<1,bit_mask::detail::BITS_PER_ELEMENT>>>(bit_mask);
gdf_size_type local_count = 0;
EXPECT_EQ(GDF_SUCCESS, count_bits(&local_count, bit_mask));
EXPECT_EQ((gdf_size_type) (num_rows/2), local_count);
EXPECT_EQ(GDF_SUCCESS, bit_mask::destroy_bit_mask(bits));
}
TEST_F(BitMaskTest, PaddingTest)
{
//
// Set the number of rows to 32, we'll try padding to
// 256 bytes.
//
const int num_rows = 32;
const int padding_bytes = 256;
bit_mask_t *bits = nullptr;
EXPECT_EQ(GDF_SUCCESS, bit_mask::create_bit_mask(&bits, num_rows, 1, padding_bytes));
BitMask bit_mask(bits, num_rows);
gdf_size_type local_count = 0;
EXPECT_EQ(GDF_SUCCESS, count_bits(&local_count, bit_mask));
EXPECT_EQ((gdf_size_type) num_rows, local_count);
//
// To test this, we should be able to access the last element
//
int last_element = (padding_bytes / sizeof(bit_mask_t)) - 1;
bit_mask_t temp = bit_mask_t{0};
bit_mask.get_element_host(last_element, temp);
EXPECT_EQ(~bit_mask_t{0}, temp);
EXPECT_EQ(GDF_SUCCESS, bit_mask::destroy_bit_mask(bits));
}
|
c80905da6e28eb66d99e767fa07e77043982ed7e.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <hiprand/hiprand.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <random>
#include <math.h>
#include <unistd.h>
#define PI 3.1415926535
#define RANGE 600
__host__ __device__ double ackley(int DIMENSION, double *point, int offset);//RANGE = 1
__host__ __device__ double griewangk(int DIMENSION, double *point, int offset);//RANGE = 600
__host__ __device__ double schwefel(int DIMENSION, double *point, int offset);//RANGE = 500
__host__ __device__ double dejong(int DIMENSION, double *point, int offset);//RANGE = 5.12
__host__ __device__ double rastrigin(int DIMENSION, double *point, int offset);//RANGE = 5.12
//Replace with a predefined function and set RANGE accordingly
__host__ __device__ double fitness(int DIMENSION, double *point, int offset){
return griewangk(DIMENSION, point, offset);
}
__host__ __device__ double ackley(int DIMENSION, double *point, int offset){
double sum_result1 = 0;
double sum_result2 = 0;
for(int i=0; i<DIMENSION; i++){
sum_result1 += point[offset+i]*point[offset+i];
sum_result2 += cos(2*PI*point[offset+i]);
}
return -20*exp(-0.2*sqrt(sum_result1/DIMENSION))-exp(sum_result2/DIMENSION)+20+exp(1.);
}
__host__ __device__ double griewangk(int DIMENSION, double *point, int offset){
double sum_result = 0;
double product_result = 1;
for(int i=0; i<DIMENSION; i++){
sum_result += point[offset+i]/4000*point[offset+i];
product_result *= cos(point[offset+i]/sqrt((double)i+1));
}
return sum_result-product_result+1;
}
__host__ __device__ double schwefel(int DIMENSION, double *point, int offset){
double result = 0;
for(int i=0; i<DIMENSION; i++){
result -= point[offset+i]*sin(sqrt(abs(point[offset+i])));
}
return result+DIMENSION*418.9829;//shifted to obtain 0 minimum
}
__host__ __device__ double dejong(int DIMENSION, double *point, int offset){
double result = 0;
for(int i=0; i<DIMENSION; i++){
result += point[offset+i]*point[offset+i];
}
return result;
}
__host__ __device__ double rastrigin(int DIMENSION, double *point, int offset){
double result = DIMENSION*10;
for(int i=0; i<DIMENSION; i++){
result += point[offset+i]*point[offset+i]-10*cos(2*PI*point[offset+i]);
}
return result;
}
__global__ void curand_init_kernel(hiprandState_t *state, int seed){
int tid = blockIdx.x * blockDim.x + threadIdx.x;
hiprand_init(seed, tid, 0, &state[tid]);
}
__global__ void sa_kernel(int INSTANCES, int DIMENSION, int GPU_ITERATIONS, hiprandState_t *state, double *decay, double *point, double *neighbor, double *temperature) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if(tid < INSTANCES) {
for(int i=0; i<GPU_ITERATIONS; i++){
int axis = trunc(hiprand_uniform_double(&state[tid])*DIMENSION);
neighbor[tid*DIMENSION+axis] = (hiprand_uniform_double(&state[tid])*2-1)*RANGE;
double point_fitness=fitness(DIMENSION, point, DIMENSION*tid);
double neighbor_fitness=fitness(DIMENSION, neighbor, DIMENSION*tid);
double change_prob = 2; //ensure change
if(neighbor_fitness>point_fitness) change_prob = exp((point_fitness-neighbor_fitness)/temperature[tid]);
if(change_prob > hiprand_uniform_double(&state[tid]))
point[tid*DIMENSION+axis] = neighbor[tid*DIMENSION+axis];
else
neighbor[tid*DIMENSION+axis] = point[tid*DIMENSION+axis];
temperature[tid]*=decay[tid];
}
}
}
int main(int argc, char **argv) {
if(argc<5) {
printf("Required arguments:\nINSTANCES - less than 1024\nDIMENSION - size of a problem\nITERATIONS - number of times the data will be synchronized with GPUs\nGPU_ITERATIONS - number of iterations per core in a single synchronization step\n");
return 0;
}
const int INSTANCES = atoi(argv[1]);
const int DIMENSION = atoi(argv[2]);
const int ITERATIONS = atoi(argv[3]);
const int GPU_ITERATIONS = atoi(argv[4]);
time_t time_measure = time(0);
const int RANDOM_SEED = time(0)*getpid();
hiprandState_t* dev_state;
hipMalloc((void**) &dev_state, INSTANCES * sizeof(hiprandState_t));
hipLaunchKernelGGL(( curand_init_kernel), dim3(1),dim3(INSTANCES), 0, 0, dev_state, RANDOM_SEED);
std::default_random_engine generator(static_cast<long unsigned int>(RANDOM_SEED));
std::uniform_real_distribution<double> decay_distribution(0.9,1.0);
std::uniform_real_distribution<double> point_distribution(-RANGE,RANGE);
double *decay = new double[INSTANCES];
for(int i=0; i<INSTANCES; i++){
decay_distribution.reset();
decay[i] = decay_distribution(generator);
//printf("DECAY FACTOR %d: %f\n",i,decay[i]);
}
double *dev_decay;
hipMalloc((void**)&dev_decay, INSTANCES * sizeof(double));
hipMemcpy(dev_decay, decay, INSTANCES * sizeof(double), hipMemcpyHostToDevice);
double *point = new double[INSTANCES*DIMENSION];
double *neighbor = new double[INSTANCES*DIMENSION];
for(int i=0; i<INSTANCES; i++){
for(int d=0; d<DIMENSION; d++){
point_distribution.reset();
point[i*DIMENSION+d]=point_distribution(generator);
neighbor[i*DIMENSION+d]=point[i*DIMENSION+d];
}
}
double *dev_point, *dev_neighbor;
hipMalloc((void**)&dev_point, INSTANCES * DIMENSION * sizeof(double));
hipMemcpy(dev_point, point, INSTANCES * DIMENSION * sizeof(double), hipMemcpyHostToDevice);
hipMalloc((void**)&dev_neighbor, INSTANCES * DIMENSION * sizeof(double));
hipMemcpy(dev_neighbor, neighbor, INSTANCES * DIMENSION * sizeof(double), hipMemcpyHostToDevice);
double *temperature = new double[INSTANCES];
for(int i=0; i<INSTANCES; i++){
temperature[i] = 100.0;
}
double *dev_temperature;
hipMalloc((void**)&dev_temperature, INSTANCES * sizeof(double));
hipMemcpy(dev_temperature, temperature, INSTANCES * sizeof(double), hipMemcpyHostToDevice);
//######################## KERNEL ########################
for(int i=0; i<ITERATIONS; i++){
hipLaunchKernelGGL(( sa_kernel), dim3(1),dim3(INSTANCES), 0, 0, INSTANCES, DIMENSION, GPU_ITERATIONS, dev_state, dev_decay, dev_point, dev_neighbor, dev_temperature);
hipDeviceSynchronize();
hipMemcpy(point, dev_point, INSTANCES * DIMENSION * sizeof(double), hipMemcpyDeviceToHost);
//Pick the minimal point
int minimum_index = 0;
double minimum_value = fitness(DIMENSION,point,0);
for(int k=1; k<INSTANCES; k++){
double minimum_value_contestant = fitness(DIMENSION,point,DIMENSION*k);
if(minimum_value_contestant<minimum_value){
minimum_value = minimum_value_contestant;
minimum_index = k;
}
}
//Copy minimal point
for(int k=0; k<INSTANCES; k++){
for(int t=0; t<DIMENSION; t++){
point[DIMENSION*k+t] = point[DIMENSION*minimum_index+t];
}
}
hipMemcpy(dev_point, point, INSTANCES * DIMENSION * sizeof(double), hipMemcpyHostToDevice);
}
hipMemcpy(point, dev_point, INSTANCES * DIMENSION * sizeof(double), hipMemcpyDeviceToHost);
printf("%d\t%.10f\n", time(0)-time_measure, fitness(DIMENSION,point,0));
delete [] decay;
delete [] neighbor;
delete [] point;
delete [] temperature;
hipFree(dev_decay);
hipFree(dev_neighbor);
hipFree(dev_point);
hipFree(dev_temperature);
return 0;
}
|
c80905da6e28eb66d99e767fa07e77043982ed7e.cu
|
#include <stdio.h>
#include <cuda.h>
#include <curand.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <random>
#include <math.h>
#include <unistd.h>
#define PI 3.1415926535
#define RANGE 600
__host__ __device__ double ackley(int DIMENSION, double *point, int offset);//RANGE = 1
__host__ __device__ double griewangk(int DIMENSION, double *point, int offset);//RANGE = 600
__host__ __device__ double schwefel(int DIMENSION, double *point, int offset);//RANGE = 500
__host__ __device__ double dejong(int DIMENSION, double *point, int offset);//RANGE = 5.12
__host__ __device__ double rastrigin(int DIMENSION, double *point, int offset);//RANGE = 5.12
//Replace with a predefined function and set RANGE accordingly
__host__ __device__ double fitness(int DIMENSION, double *point, int offset){
return griewangk(DIMENSION, point, offset);
}
__host__ __device__ double ackley(int DIMENSION, double *point, int offset){
double sum_result1 = 0;
double sum_result2 = 0;
for(int i=0; i<DIMENSION; i++){
sum_result1 += point[offset+i]*point[offset+i];
sum_result2 += cos(2*PI*point[offset+i]);
}
return -20*exp(-0.2*sqrt(sum_result1/DIMENSION))-exp(sum_result2/DIMENSION)+20+exp(1.);
}
__host__ __device__ double griewangk(int DIMENSION, double *point, int offset){
double sum_result = 0;
double product_result = 1;
for(int i=0; i<DIMENSION; i++){
sum_result += point[offset+i]/4000*point[offset+i];
product_result *= cos(point[offset+i]/sqrt((double)i+1));
}
return sum_result-product_result+1;
}
__host__ __device__ double schwefel(int DIMENSION, double *point, int offset){
double result = 0;
for(int i=0; i<DIMENSION; i++){
result -= point[offset+i]*sin(sqrt(abs(point[offset+i])));
}
return result+DIMENSION*418.9829;//shifted to obtain 0 minimum
}
__host__ __device__ double dejong(int DIMENSION, double *point, int offset){
double result = 0;
for(int i=0; i<DIMENSION; i++){
result += point[offset+i]*point[offset+i];
}
return result;
}
__host__ __device__ double rastrigin(int DIMENSION, double *point, int offset){
double result = DIMENSION*10;
for(int i=0; i<DIMENSION; i++){
result += point[offset+i]*point[offset+i]-10*cos(2*PI*point[offset+i]);
}
return result;
}
__global__ void curand_init_kernel(curandState *state, int seed){
int tid = blockIdx.x * blockDim.x + threadIdx.x;
curand_init(seed, tid, 0, &state[tid]);
}
__global__ void sa_kernel(int INSTANCES, int DIMENSION, int GPU_ITERATIONS, curandState *state, double *decay, double *point, double *neighbor, double *temperature) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if(tid < INSTANCES) {
for(int i=0; i<GPU_ITERATIONS; i++){
int axis = trunc(curand_uniform_double(&state[tid])*DIMENSION);
neighbor[tid*DIMENSION+axis] = (curand_uniform_double(&state[tid])*2-1)*RANGE;
double point_fitness=fitness(DIMENSION, point, DIMENSION*tid);
double neighbor_fitness=fitness(DIMENSION, neighbor, DIMENSION*tid);
double change_prob = 2; //ensure change
if(neighbor_fitness>point_fitness) change_prob = exp((point_fitness-neighbor_fitness)/temperature[tid]);
if(change_prob > curand_uniform_double(&state[tid]))
point[tid*DIMENSION+axis] = neighbor[tid*DIMENSION+axis];
else
neighbor[tid*DIMENSION+axis] = point[tid*DIMENSION+axis];
temperature[tid]*=decay[tid];
}
}
}
int main(int argc, char **argv) {
if(argc<5) {
printf("Required arguments:\nINSTANCES - less than 1024\nDIMENSION - size of a problem\nITERATIONS - number of times the data will be synchronized with GPUs\nGPU_ITERATIONS - number of iterations per core in a single synchronization step\n");
return 0;
}
const int INSTANCES = atoi(argv[1]);
const int DIMENSION = atoi(argv[2]);
const int ITERATIONS = atoi(argv[3]);
const int GPU_ITERATIONS = atoi(argv[4]);
time_t time_measure = time(0);
const int RANDOM_SEED = time(0)*getpid();
curandState* dev_state;
cudaMalloc((void**) &dev_state, INSTANCES * sizeof(curandState));
curand_init_kernel<<<1,INSTANCES>>>(dev_state, RANDOM_SEED);
std::default_random_engine generator(static_cast<long unsigned int>(RANDOM_SEED));
std::uniform_real_distribution<double> decay_distribution(0.9,1.0);
std::uniform_real_distribution<double> point_distribution(-RANGE,RANGE);
double *decay = new double[INSTANCES];
for(int i=0; i<INSTANCES; i++){
decay_distribution.reset();
decay[i] = decay_distribution(generator);
//printf("DECAY FACTOR %d: %f\n",i,decay[i]);
}
double *dev_decay;
cudaMalloc((void**)&dev_decay, INSTANCES * sizeof(double));
cudaMemcpy(dev_decay, decay, INSTANCES * sizeof(double), cudaMemcpyHostToDevice);
double *point = new double[INSTANCES*DIMENSION];
double *neighbor = new double[INSTANCES*DIMENSION];
for(int i=0; i<INSTANCES; i++){
for(int d=0; d<DIMENSION; d++){
point_distribution.reset();
point[i*DIMENSION+d]=point_distribution(generator);
neighbor[i*DIMENSION+d]=point[i*DIMENSION+d];
}
}
double *dev_point, *dev_neighbor;
cudaMalloc((void**)&dev_point, INSTANCES * DIMENSION * sizeof(double));
cudaMemcpy(dev_point, point, INSTANCES * DIMENSION * sizeof(double), cudaMemcpyHostToDevice);
cudaMalloc((void**)&dev_neighbor, INSTANCES * DIMENSION * sizeof(double));
cudaMemcpy(dev_neighbor, neighbor, INSTANCES * DIMENSION * sizeof(double), cudaMemcpyHostToDevice);
double *temperature = new double[INSTANCES];
for(int i=0; i<INSTANCES; i++){
temperature[i] = 100.0;
}
double *dev_temperature;
cudaMalloc((void**)&dev_temperature, INSTANCES * sizeof(double));
cudaMemcpy(dev_temperature, temperature, INSTANCES * sizeof(double), cudaMemcpyHostToDevice);
//######################## KERNEL ########################
for(int i=0; i<ITERATIONS; i++){
sa_kernel<<<1,INSTANCES>>>(INSTANCES, DIMENSION, GPU_ITERATIONS, dev_state, dev_decay, dev_point, dev_neighbor, dev_temperature);
cudaDeviceSynchronize();
cudaMemcpy(point, dev_point, INSTANCES * DIMENSION * sizeof(double), cudaMemcpyDeviceToHost);
//Pick the minimal point
int minimum_index = 0;
double minimum_value = fitness(DIMENSION,point,0);
for(int k=1; k<INSTANCES; k++){
double minimum_value_contestant = fitness(DIMENSION,point,DIMENSION*k);
if(minimum_value_contestant<minimum_value){
minimum_value = minimum_value_contestant;
minimum_index = k;
}
}
//Copy minimal point
for(int k=0; k<INSTANCES; k++){
for(int t=0; t<DIMENSION; t++){
point[DIMENSION*k+t] = point[DIMENSION*minimum_index+t];
}
}
cudaMemcpy(dev_point, point, INSTANCES * DIMENSION * sizeof(double), cudaMemcpyHostToDevice);
}
cudaMemcpy(point, dev_point, INSTANCES * DIMENSION * sizeof(double), cudaMemcpyDeviceToHost);
printf("%d\t%.10f\n", time(0)-time_measure, fitness(DIMENSION,point,0));
delete [] decay;
delete [] neighbor;
delete [] point;
delete [] temperature;
cudaFree(dev_decay);
cudaFree(dev_neighbor);
cudaFree(dev_point);
cudaFree(dev_temperature);
return 0;
}
|
3a67d6866554d87289920e31916b28fbe2043117.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <unistd.h>
#include <sys/time.h>
#include <stdint.h>
#define MAX 10
#define MIN 1
void lu_decomp(float *a, float *u,int dimension);
__global__ void DUKernel(float *D_a, float *D_u,unsigned int size);
uint64_t getTime();
int main(int argc, char **argv){
float *a, *u, *l;
int dimension;
dimension = atoi(argv[1]);
a= (float*)malloc(sizeof(float) * (dimension*dimension));
l= (float*)malloc(sizeof(float) * (dimension*dimension));
u= (float*)malloc(sizeof(float) * (dimension*dimension));
for(int i = 0; i<dimension; i++)
{
for(int j = 0; j<dimension; j++)
{
a[(i*dimension)+j] = rand() % (MAX - MIN) + MIN;
u[(i*dimension)+j] = a[(i*dimension)+j];
if(i == j)
{
l[(i*dimension)+j] = 1;
}
else
{
l[(i*dimension)+j] = 0;
}
}
}
for(int k = 0; k < dimension-1; k++)
{
for(int j=k+1; j < dimension; j++ )
{
l[(j*dimension)+k] = a[(j*dimension)+k]/a[(k*dimension)+k];
u[(j*dimension)+k]=0;
}
}
/*printf("U before\n");
for(int i = 0; i<dimension; i++)
{
for(int j = 0; j<dimension; j++)
{
printf("%15f",u[(i*dimension)+j]);
}
printf("\n");
}*/
lu_decomp(a,u,dimension);
/*
remove this comment for verification part
float temp =0;
float x=0;
float diff_allowed=10;
for(int i =0; i < dimension; i++)
{
for(int j=0; j < dimension; j++)
{
temp =0;
for(int k=0; k < dimension; k++)
{
temp = temp + l[(i*dimension)+k]* u[(k*dimension)+j];
temp=a[(i*dimension)+j];
}
//printf("%15f",temp);
if((abs(temp-a[(i*dimension)+j])>diff_allowed))
{
x=abs(temp-a[(i*dimension)+j]);
printf("problem");
printf("diff: %5f\n",x);
}
}
//printf("\n");
}
remove this comment for verification
*/
//printf("\n");
//printf("U Matrix:\n");
/*
for(int i = 0; i<dimension; i++)
{
for(int j = 0; j<dimension; j++)
{
printf("%15f",u[(i*dimension)+j]);
}
printf("\n");
}
for(int i = 0; i<dimension; i++)
{
for(int j = 0; j<dimension; j++)
{
printf("%15f",l[(i*dimension)+j]);
}
printf("\n");
}
printf("\n");
printf("Original Matrix:\n");
for(int i = 0; i<dimension; i++)
{
for(int j = 0; j<dimension; j++)
{
printf("%15f",a[(i*dimension)+j]);
}
printf("\n");
}*/
return 0;
}
void lu_decomp(float *a,float *u, int dimension)
{
float *d_a ,*d_u;
uint64_t astart, aend;
astart = getTime();
hipMalloc(&d_a, (dimension*dimension)*sizeof(float));
hipMalloc(&d_u, (dimension*dimension)*sizeof(float));
//Copying data to device from host
hipMemcpy(d_a, a, sizeof(float)*dimension*(dimension),hipMemcpyHostToDevice);
hipMemcpy(d_u, u, sizeof(float)*dimension*(dimension),hipMemcpyHostToDevice);
//Kernel call
if(dimension<1001)
hipLaunchKernelGGL(( DUKernel), dim3(dimension) ,dim3(dimension),4*dimension*dimension, 0, d_a, d_u ,dimension);
else
hipLaunchKernelGGL(( DUKernel), dim3((dimension*dimension/1000)),dim3(1000),4*dimension*dimension, 0, d_a, d_u ,dimension);
//DUKernel<<<1024 ,100,4*dimension*dimension>>>(d_a,d_u, dimension);
//Coping data to host from device
hipMemcpy(a,d_a,sizeof(float)*dimension*(dimension),hipMemcpyDeviceToHost);
//hipMemcpy(l,d_l,sizeof(float)*dimension*(dimension),hipMemcpyDeviceToHost);
hipMemcpy(u,d_u,sizeof(float)*dimension*(dimension),hipMemcpyDeviceToHost);
//Deallocating memory on the device
hipFree(d_a);
hipFree(d_u);
aend = getTime();
printf("%d ,%f \n",dimension,(aend-astart)/1000000.0);
}
__global__ void DUKernel(float *D_a,float *D_u, unsigned int dimension)
{
// 10x10 size matrix is for experiment, so argv[1]=10
extern __shared__ float temp[];
int k=threadIdx.x;
int j=blockIdx.x;
int p= threadIdx.x+(blockIdx.x*blockDim.x);
temp[p]=D_u[p];
__syncthreads();
int i=0;
int s=0;
while(i<threadIdx.x && s< blockIdx.x)
{
temp[p]=temp[p]-(temp[(s*dimension)+(k*(j/1000))+k] * ((temp[(j*dimension)+(i*(j/1000))+i])/temp[(j*dimension)+(j*(j/1000))+j]));
i++;
s++;
}
/* printf("outside1 Temp:%10f k:%d j:%d\n",temp[(k*dimension)+j],k,j);
float p=temp[(j*dimension)+k]/temp[(k*dimension)+k];
for(int i=(k+1);i<dimension;i++)
{
//printf("inside loop%d\n",i);
//printf("before Temp:%10f,j:%d i:%d\n",temp[(j*dimension)+i]);
temp[(j*dimension)+i]=temp[(j*dimension)+i]-(temp[(k*dimension)+i]*p);
//printf("after:Temp:%10f\n",temp[j*dimension+i]);
//printf("after j:%d i:%d",j,i);
}*/
__syncthreads();
D_u[p]=temp[p];
}
uint64_t getTime(){
struct timeval t;
gettimeofday(&t, NULL);
return (uint64_t)(t.tv_sec)*1000000 + (uint64_t)(t.tv_usec);
}
|
3a67d6866554d87289920e31916b28fbe2043117.cu
|
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <unistd.h>
#include <sys/time.h>
#include <stdint.h>
#define MAX 10
#define MIN 1
void lu_decomp(float *a, float *u,int dimension);
__global__ void DUKernel(float *D_a, float *D_u,unsigned int size);
uint64_t getTime();
int main(int argc, char **argv){
float *a, *u, *l;
int dimension;
dimension = atoi(argv[1]);
a= (float*)malloc(sizeof(float) * (dimension*dimension));
l= (float*)malloc(sizeof(float) * (dimension*dimension));
u= (float*)malloc(sizeof(float) * (dimension*dimension));
for(int i = 0; i<dimension; i++)
{
for(int j = 0; j<dimension; j++)
{
a[(i*dimension)+j] = rand() % (MAX - MIN) + MIN;
u[(i*dimension)+j] = a[(i*dimension)+j];
if(i == j)
{
l[(i*dimension)+j] = 1;
}
else
{
l[(i*dimension)+j] = 0;
}
}
}
for(int k = 0; k < dimension-1; k++)
{
for(int j=k+1; j < dimension; j++ )
{
l[(j*dimension)+k] = a[(j*dimension)+k]/a[(k*dimension)+k];
u[(j*dimension)+k]=0;
}
}
/*printf("U before\n");
for(int i = 0; i<dimension; i++)
{
for(int j = 0; j<dimension; j++)
{
printf("%15f",u[(i*dimension)+j]);
}
printf("\n");
}*/
lu_decomp(a,u,dimension);
/*
remove this comment for verification part
float temp =0;
float x=0;
float diff_allowed=10;
for(int i =0; i < dimension; i++)
{
for(int j=0; j < dimension; j++)
{
temp =0;
for(int k=0; k < dimension; k++)
{
temp = temp + l[(i*dimension)+k]* u[(k*dimension)+j];
temp=a[(i*dimension)+j];
}
//printf("%15f",temp);
if((abs(temp-a[(i*dimension)+j])>diff_allowed))
{
x=abs(temp-a[(i*dimension)+j]);
printf("problem");
printf("diff: %5f\n",x);
}
}
//printf("\n");
}
remove this comment for verification
*/
//printf("\n");
//printf("U Matrix:\n");
/*
for(int i = 0; i<dimension; i++)
{
for(int j = 0; j<dimension; j++)
{
printf("%15f",u[(i*dimension)+j]);
}
printf("\n");
}
for(int i = 0; i<dimension; i++)
{
for(int j = 0; j<dimension; j++)
{
printf("%15f",l[(i*dimension)+j]);
}
printf("\n");
}
printf("\n");
printf("Original Matrix:\n");
for(int i = 0; i<dimension; i++)
{
for(int j = 0; j<dimension; j++)
{
printf("%15f",a[(i*dimension)+j]);
}
printf("\n");
}*/
return 0;
}
void lu_decomp(float *a,float *u, int dimension)
{
float *d_a ,*d_u;
uint64_t astart, aend;
astart = getTime();
cudaMalloc(&d_a, (dimension*dimension)*sizeof(float));
cudaMalloc(&d_u, (dimension*dimension)*sizeof(float));
//Copying data to device from host
cudaMemcpy(d_a, a, sizeof(float)*dimension*(dimension),cudaMemcpyHostToDevice);
cudaMemcpy(d_u, u, sizeof(float)*dimension*(dimension),cudaMemcpyHostToDevice);
//Kernel call
if(dimension<1001)
DUKernel<<<dimension ,dimension,4*dimension*dimension>>>(d_a, d_u ,dimension);
else
DUKernel<<<(dimension*dimension/1000),1000,4*dimension*dimension>>>(d_a, d_u ,dimension);
//DUKernel<<<1024 ,100,4*dimension*dimension>>>(d_a,d_u, dimension);
//Coping data to host from device
cudaMemcpy(a,d_a,sizeof(float)*dimension*(dimension),cudaMemcpyDeviceToHost);
//cudaMemcpy(l,d_l,sizeof(float)*dimension*(dimension),cudaMemcpyDeviceToHost);
cudaMemcpy(u,d_u,sizeof(float)*dimension*(dimension),cudaMemcpyDeviceToHost);
//Deallocating memory on the device
cudaFree(d_a);
cudaFree(d_u);
aend = getTime();
printf("%d ,%f \n",dimension,(aend-astart)/1000000.0);
}
__global__ void DUKernel(float *D_a,float *D_u, unsigned int dimension)
{
// 10x10 size matrix is for experiment, so argv[1]=10
extern __shared__ float temp[];
int k=threadIdx.x;
int j=blockIdx.x;
int p= threadIdx.x+(blockIdx.x*blockDim.x);
temp[p]=D_u[p];
__syncthreads();
int i=0;
int s=0;
while(i<threadIdx.x && s< blockIdx.x)
{
temp[p]=temp[p]-(temp[(s*dimension)+(k*(j/1000))+k] * ((temp[(j*dimension)+(i*(j/1000))+i])/temp[(j*dimension)+(j*(j/1000))+j]));
i++;
s++;
}
/* printf("outside1 Temp:%10f k:%d j:%d\n",temp[(k*dimension)+j],k,j);
float p=temp[(j*dimension)+k]/temp[(k*dimension)+k];
for(int i=(k+1);i<dimension;i++)
{
//printf("inside loop%d\n",i);
//printf("before Temp:%10f,j:%d i:%d\n",temp[(j*dimension)+i]);
temp[(j*dimension)+i]=temp[(j*dimension)+i]-(temp[(k*dimension)+i]*p);
//printf("after:Temp:%10f\n",temp[j*dimension+i]);
//printf("after j:%d i:%d",j,i);
}*/
__syncthreads();
D_u[p]=temp[p];
}
uint64_t getTime(){
struct timeval t;
gettimeofday(&t, NULL);
return (uint64_t)(t.tv_sec)*1000000 + (uint64_t)(t.tv_usec);
}
|
9444cab08e5c24d61625984cacd293e3c74cfc08.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "generic/THCTensorSort.cu"
#else
// In alignment with default sort on a c++ map, this function
// will permute key and value tensors identically, and
// in such a way that the 'key' tensor is ordered numerically
THC_API void THCTensor_(sortKeyValueInplace)(THCState* state,
THCTensor* key,
THCudaLongTensor* value,
int dim, bool dir) {
THArgCheck(key->sizes().equals(value->sizes()), 2,
"Key tensor must have same size as value tensor");
int dims = THCudaLongTensor_nDimensionLegacyNoScalars(state, value);
THArgCheck(dims <= MAX_CUTORCH_DIMS, 3, CUTORCH_DIM_WARNING);
dims = THCTensor_(nDimensionLegacyNoScalars)(state, key);
THArgCheck(dims <= MAX_CUTORCH_DIMS, 2, CUTORCH_DIM_WARNING);
ptrdiff_t inElements = THCTensor_(nElement)(state, key);
if (inElements == 0) {
return;
}
int64_t keySliceSize = THCTensor_(sizeLegacyNoScalars)(state, key, dim);
ptrdiff_t keySlices = inElements / keySliceSize;
// The amount of shared memory and block size is based on
// 2^ceil(lg(n)); we choose that sorting implementation for a given
// size.
int64_t ceilPowerOf2 = nextHighestPowerOf2(keySliceSize);
// FIXME: We'd have to find some other trick with Thrust to perform a
// vectorized (key, value) sort by slice segment
if (ceilPowerOf2 > 2048) {
THError("sortKeyValueInplace only works for sizes <= 2048 at present");
}
// The grid is based on the number of independent slices that we
// have to sort; one block per slice
dim3 grid;
if (!THC_getGridFromTiles(keySlices, grid)) {
THError("Slice to sort is too large");
}
#define HANDLE_CASE(TYPE, A, SIZE) \
do { \
int blockSize = SIZE / 2; \
if (blockSize < 1) { \
blockSize = 1; \
} \
\
dim3 block(blockSize); \
\
if (dir) { \
hipLaunchKernelGGL(( bitonicSortKVInPlace<real, int64_t, A, -1, GTComp<real>, TYPE, SIZE>) \
, dim3(grid), dim3(block), 0, THCState_getCurrentStream(state), \
keyInfo, \
keySlices, \
(TYPE) keySliceSize, \
(TYPE) keyInfo.strides[collapseKeyDim], \
valueInfo, \
(TYPE) valueInfo.strides[collapseValueDim], \
GTComp<real>()); \
} else { \
hipLaunchKernelGGL(( bitonicSortKVInPlace<real, int64_t, A, -1, LTComp<real>, TYPE, SIZE>) \
, dim3(grid), dim3(block), 0, THCState_getCurrentStream(state), \
keyInfo, \
keySlices, \
(TYPE) keySliceSize, \
(TYPE) keyInfo.strides[collapseKeyDim], \
valueInfo, \
(TYPE) valueInfo.strides[collapseValueDim], \
LTComp<real>()); \
} \
} while (0)
#define HANDLE_SORT_CASE(TYPE, A) \
{ \
switch (ceilPowerOf2) { \
case 2048: \
HANDLE_CASE(TYPE, A, 2048); \
break; \
case 1024: \
case 512: \
case 256: \
HANDLE_CASE(TYPE, A, 1024); \
break; \
case 128: \
case 64: \
HANDLE_CASE(TYPE, A, 128); \
break; \
case 32: \
case 16: \
case 8: \
case 4: \
case 2: \
HANDLE_CASE(TYPE, A, 32); \
break; \
case 1: \
/* Nothing to do, data already sorted */ \
break; \
default: \
assert(false); \
} \
}
// The constructed key/value tensor info is used to select the slice
// we are sorting on a per-block basis
if (THCTensor_canUse32BitIndexMath(state, key)) {
TensorInfo<real, unsigned int> keyInfo =
getTensorInfo<real, THCTensor, unsigned int>(state, key);
keyInfo.reduceDim(dim);
int collapseKeyDim = keyInfo.collapseDims(dim);
TensorInfo<int64_t, unsigned int> valueInfo =
getTensorInfo<int64_t, THCudaLongTensor, unsigned int>(state, value);
valueInfo.reduceDim(dim);
int collapseValueDim = valueInfo.collapseDims(dim);
if (keyInfo.isContiguous()) {
HANDLE_SORT_CASE(unsigned int, -2);
} else {
switch (keyInfo.dims) {
case 2:
HANDLE_SORT_CASE(unsigned int, 2);
break;
default:
HANDLE_SORT_CASE(unsigned int, -1);
break;
}
}
} else {
TensorInfo<real, uint64_t> keyInfo =
getTensorInfo<real, THCTensor, uint64_t>(state, key);
keyInfo.reduceDim(dim);
int collapseKeyDim = keyInfo.collapseDims(dim);
TensorInfo<int64_t, uint64_t> valueInfo =
getTensorInfo<int64_t, THCudaLongTensor, uint64_t>(state, value);
valueInfo.reduceDim(dim);
int collapseValueDim = valueInfo.collapseDims(dim);
// int64_t case is rare, just instantiate the generic version
HANDLE_SORT_CASE(uint64_t, -1);
}
#undef HANDLE_CASE
#undef HANDLE_SORT_CASE
#undef HANDLE_A_CASE
THCudaCheck(hipGetLastError());
}
void THCTensor_(sortViaThrust)(THCState* state,
THCTensor* sorted,
THCudaLongTensor* indices,
THCTensor* input,
int dim, bool dir) {
int nDims = THCTensor_(nDimensionLegacyAll)(state, input);
ptrdiff_t totalElements = THCTensor_(nElement)(state, input);
int64_t sliceSize = THCTensor_(sizeLegacyNoScalars)(state, input, dim);
int64_t sliceStride = THTensor_strideLegacyNoScalars(input, dim);
// We perform a vectorized segmented sort in Thrust.
// Say we are sorting a (2, 3) tensor. We have in flattened form:
// values 0.4 1.2 5.3 6.2 1.3 2.3
// indices 0 1 2 3 4 5
// where indices is a global index (across all slices)
// First we sort by values, globally:
// values 6.2 5.3 2.3 1.2 1.3 0.4
// indices 3 2 5 1 4 0
// Then we stable sort by segment, which is index / 3:
// values 5.3 1.2 0.4 6.2 2.3 1.3
// indices 2 1 0 3 5 4
// Then we translate the global index to a per-slice Lua index
// (index % 3) + 1:
// values 5.3 1.2 0.4 6.2 2.3 1.3
// indices 3 2 1 1 3 2
// This method can only work if the slice we are sorting (`dim`) is
// innermost, and both values and indices are contiguous. We do this
// by re-arranging the input into this form as needed, which will
// unfortunately allocate memory if the request is not in this form.
// Vectorized sort is slower than iterated sort if the number of
// slices is small (since we're sorting twice, instead of invoking a
// smaller sort `numSlices` times), but the Thrust sort
// implementation here is a catch-all, so we're not looking for
// efficiency, but instead correctness.
THCTensor_(copy)(state, sorted, input);
THCTensor* trKeys = THCTensor_(newWithTensor)(state, sorted);
THCudaLongTensor* trIndices = THCudaLongTensor_newWithTensor(state, indices);
// Transpose dim to innermost
if (dim != nDims - 1) {
THCTensor_(transpose)(state, trKeys, NULL, dim, nDims - 1);
THCudaLongTensor_transpose(state, trIndices, NULL, dim, nDims - 1);
}
// Thrust must operate on a contiguous layout
THCTensor* trContigKey = THCTensor_(newContiguous)(state, trKeys);
THCudaLongTensor* trContigIndices = THCudaLongTensor_newContiguous(state, trIndices);
THCTensor_(free)(state, trKeys);
THCudaLongTensor_free(state, trIndices);
THCThrustAllocator thrustAlloc(state);
thrust::device_ptr<real> keyIter(THCTensor_(data)(state, trContigKey));
// Since we are composing a global index across all segments rather
// than a per-segment index, we treat the memory as int so we don't
// have problems sorting slices < 2^24 but where the entire tensor
// has more than 2^24 elements
thrust::device_ptr<int64_t>
indexIter((int64_t*) THCudaLongTensor_data(state, trContigIndices));
// Fill the indices with a global index across all slices
thrust::counting_iterator<int64_t> countIter(0);
thrust::copy(
#if TORCH_HIP_VERSION >= 7000
thrust::hip::par(thrustAlloc).on(THCState_getCurrentStream(state)),
#endif
countIter, countIter + totalElements, indexIter);
// First, we sort globally (across all slices) according to key
// (the values we're sorting)
if (dir) {
thrust::stable_sort_by_key(
#if TORCH_HIP_VERSION >= 7000
thrust::hip::par(thrustAlloc).on(THCState_getCurrentStream(state)),
#endif
keyIter, keyIter + totalElements, indexIter, ThrustGTOp<real>());
} else {
thrust::stable_sort_by_key(
#if TORCH_HIP_VERSION >= 7000
thrust::hip::par(thrustAlloc).on(THCState_getCurrentStream(state)),
#endif
keyIter, keyIter + totalElements, indexIter, ThrustLTOp<real>());
}
// Then, re-sort according to slice that each index is
// in. This completes the segment sort in Thrust, since we're
// stably sorting here, preserving the relative order of values
// per each slice
thrust::stable_sort_by_key(
#if TORCH_HIP_VERSION >= 7000
thrust::hip::par(thrustAlloc).on(THCState_getCurrentStream(state)),
#endif
indexIter, indexIter + totalElements, keyIter,
SliceComp(sliceSize));
// Translate the global integer 0-based index to a per-slice real
// Lua index
thrust::for_each(
#if TORCH_HIP_VERSION >= 7000
thrust::hip::par(thrustAlloc).on(THCState_getCurrentStream(state)),
#endif
indexIter, indexIter + totalElements,
GlobalIndexToPerSliceIndex(sliceSize));
// Reverse the transposition as needed
if (dim != nDims - 1) {
THCTensor_(transpose)(state, trContigKey, NULL, dim, nDims - 1);
THCudaLongTensor_transpose(state, trContigIndices, NULL, dim, nDims - 1);
}
// Then copy back to the expected output
THCTensor_(freeCopyTo)(state, trContigKey, sorted);
THCudaLongTensor_freeCopyTo(state, trContigIndices, indices);
}
THC_API void THCTensor_(sort)(THCState* state,
THCTensor *sorted,
THCudaLongTensor *indices,
THCTensor *input,
int dim, int order) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, sorted, input));
THCAssertSameGPU(THCudaLongTensor_checkGPU(state, 1, indices));
int64_t dims = THCTensor_(nDimensionLegacyNoScalars)(state, sorted);
THArgCheck(dims <= MAX_CUTORCH_DIMS, 2, CUTORCH_DIM_WARNING);
dims = THCTensor_(nDimensionLegacyNoScalars)(state, input);
THArgCheck(dims <= MAX_CUTORCH_DIMS, 4, CUTORCH_DIM_WARNING);
dims = THCudaLongTensor_nDimensionLegacyNoScalars(state, indices);
THArgCheck(dims <= MAX_CUTORCH_DIMS, 3, CUTORCH_DIM_WARNING);
// Make sure sufficient output space is allocated
THCTensor_(resizeAs)(state, sorted, input);
THLongStorage *inputSize = THCTensor_(newSizeOf)(state, input);
THCudaLongTensor_resize(state, indices, inputSize, NULL);
THLongStorage_free(inputSize);
// How large are the slices that we are sorting?
int64_t sliceSize = THCTensor_(sizeLegacyNoScalars)(state, input, dim);
// Workaround:
// CUDA 8 uses more shared memory than 7.5 for bitonicSortKVInPlace,
// and so for the double word types,
// we get "too many resources requested for launch" in the 2048 case
#if TORCH_HIP_VERSION >= 8000
#if defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_LONG)
int maxSliceSize = 1024;
#else
int maxSliceSize = 2048;
#endif
#else
int maxSliceSize = 2048;
#endif
if (sliceSize <= maxSliceSize) {
// Fill `indices` (the values) with the
// slice-relative index.
THCudaLongTensor_fillSliceWithIndex(state, indices, dim);
// We sort k/v pairs in-place; copy unsorted input to output
THCTensor_(copy)(state, sorted, input);
// Sort using our in-place k/v kernel that supports arbitrary
// layout
THCTensor_(sortKeyValueInplace)(state, sorted, indices, dim, order);
} else {
// Otherwise, fall back upon Thrust, which handles all other cases
// (potentially slowly, with extra copies/memory allocations)
THCTensor_(sortViaThrust)(state, sorted, indices, input, dim, (bool) order);
}
THCudaCheck(hipGetLastError());
}
#endif
|
9444cab08e5c24d61625984cacd293e3c74cfc08.cu
|
#ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "generic/THCTensorSort.cu"
#else
// In alignment with default sort on a c++ map, this function
// will permute key and value tensors identically, and
// in such a way that the 'key' tensor is ordered numerically
THC_API void THCTensor_(sortKeyValueInplace)(THCState* state,
THCTensor* key,
THCudaLongTensor* value,
int dim, bool dir) {
THArgCheck(key->sizes().equals(value->sizes()), 2,
"Key tensor must have same size as value tensor");
int dims = THCudaLongTensor_nDimensionLegacyNoScalars(state, value);
THArgCheck(dims <= MAX_CUTORCH_DIMS, 3, CUTORCH_DIM_WARNING);
dims = THCTensor_(nDimensionLegacyNoScalars)(state, key);
THArgCheck(dims <= MAX_CUTORCH_DIMS, 2, CUTORCH_DIM_WARNING);
ptrdiff_t inElements = THCTensor_(nElement)(state, key);
if (inElements == 0) {
return;
}
int64_t keySliceSize = THCTensor_(sizeLegacyNoScalars)(state, key, dim);
ptrdiff_t keySlices = inElements / keySliceSize;
// The amount of shared memory and block size is based on
// 2^ceil(lg(n)); we choose that sorting implementation for a given
// size.
int64_t ceilPowerOf2 = nextHighestPowerOf2(keySliceSize);
// FIXME: We'd have to find some other trick with Thrust to perform a
// vectorized (key, value) sort by slice segment
if (ceilPowerOf2 > 2048) {
THError("sortKeyValueInplace only works for sizes <= 2048 at present");
}
// The grid is based on the number of independent slices that we
// have to sort; one block per slice
dim3 grid;
if (!THC_getGridFromTiles(keySlices, grid)) {
THError("Slice to sort is too large");
}
#define HANDLE_CASE(TYPE, A, SIZE) \
do { \
int blockSize = SIZE / 2; \
if (blockSize < 1) { \
blockSize = 1; \
} \
\
dim3 block(blockSize); \
\
if (dir) { \
bitonicSortKVInPlace<real, int64_t, A, -1, GTComp<real>, TYPE, SIZE> \
<<<grid, block, 0, THCState_getCurrentStream(state)>>>( \
keyInfo, \
keySlices, \
(TYPE) keySliceSize, \
(TYPE) keyInfo.strides[collapseKeyDim], \
valueInfo, \
(TYPE) valueInfo.strides[collapseValueDim], \
GTComp<real>()); \
} else { \
bitonicSortKVInPlace<real, int64_t, A, -1, LTComp<real>, TYPE, SIZE> \
<<<grid, block, 0, THCState_getCurrentStream(state)>>>( \
keyInfo, \
keySlices, \
(TYPE) keySliceSize, \
(TYPE) keyInfo.strides[collapseKeyDim], \
valueInfo, \
(TYPE) valueInfo.strides[collapseValueDim], \
LTComp<real>()); \
} \
} while (0)
#define HANDLE_SORT_CASE(TYPE, A) \
{ \
switch (ceilPowerOf2) { \
case 2048: \
HANDLE_CASE(TYPE, A, 2048); \
break; \
case 1024: \
case 512: \
case 256: \
HANDLE_CASE(TYPE, A, 1024); \
break; \
case 128: \
case 64: \
HANDLE_CASE(TYPE, A, 128); \
break; \
case 32: \
case 16: \
case 8: \
case 4: \
case 2: \
HANDLE_CASE(TYPE, A, 32); \
break; \
case 1: \
/* Nothing to do, data already sorted */ \
break; \
default: \
assert(false); \
} \
}
// The constructed key/value tensor info is used to select the slice
// we are sorting on a per-block basis
if (THCTensor_canUse32BitIndexMath(state, key)) {
TensorInfo<real, unsigned int> keyInfo =
getTensorInfo<real, THCTensor, unsigned int>(state, key);
keyInfo.reduceDim(dim);
int collapseKeyDim = keyInfo.collapseDims(dim);
TensorInfo<int64_t, unsigned int> valueInfo =
getTensorInfo<int64_t, THCudaLongTensor, unsigned int>(state, value);
valueInfo.reduceDim(dim);
int collapseValueDim = valueInfo.collapseDims(dim);
if (keyInfo.isContiguous()) {
HANDLE_SORT_CASE(unsigned int, -2);
} else {
switch (keyInfo.dims) {
case 2:
HANDLE_SORT_CASE(unsigned int, 2);
break;
default:
HANDLE_SORT_CASE(unsigned int, -1);
break;
}
}
} else {
TensorInfo<real, uint64_t> keyInfo =
getTensorInfo<real, THCTensor, uint64_t>(state, key);
keyInfo.reduceDim(dim);
int collapseKeyDim = keyInfo.collapseDims(dim);
TensorInfo<int64_t, uint64_t> valueInfo =
getTensorInfo<int64_t, THCudaLongTensor, uint64_t>(state, value);
valueInfo.reduceDim(dim);
int collapseValueDim = valueInfo.collapseDims(dim);
// int64_t case is rare, just instantiate the generic version
HANDLE_SORT_CASE(uint64_t, -1);
}
#undef HANDLE_CASE
#undef HANDLE_SORT_CASE
#undef HANDLE_A_CASE
THCudaCheck(cudaGetLastError());
}
void THCTensor_(sortViaThrust)(THCState* state,
THCTensor* sorted,
THCudaLongTensor* indices,
THCTensor* input,
int dim, bool dir) {
int nDims = THCTensor_(nDimensionLegacyAll)(state, input);
ptrdiff_t totalElements = THCTensor_(nElement)(state, input);
int64_t sliceSize = THCTensor_(sizeLegacyNoScalars)(state, input, dim);
int64_t sliceStride = THTensor_strideLegacyNoScalars(input, dim);
// We perform a vectorized segmented sort in Thrust.
// Say we are sorting a (2, 3) tensor. We have in flattened form:
// values 0.4 1.2 5.3 6.2 1.3 2.3
// indices 0 1 2 3 4 5
// where indices is a global index (across all slices)
// First we sort by values, globally:
// values 6.2 5.3 2.3 1.2 1.3 0.4
// indices 3 2 5 1 4 0
// Then we stable sort by segment, which is index / 3:
// values 5.3 1.2 0.4 6.2 2.3 1.3
// indices 2 1 0 3 5 4
// Then we translate the global index to a per-slice Lua index
// (index % 3) + 1:
// values 5.3 1.2 0.4 6.2 2.3 1.3
// indices 3 2 1 1 3 2
// This method can only work if the slice we are sorting (`dim`) is
// innermost, and both values and indices are contiguous. We do this
// by re-arranging the input into this form as needed, which will
// unfortunately allocate memory if the request is not in this form.
// Vectorized sort is slower than iterated sort if the number of
// slices is small (since we're sorting twice, instead of invoking a
// smaller sort `numSlices` times), but the Thrust sort
// implementation here is a catch-all, so we're not looking for
// efficiency, but instead correctness.
THCTensor_(copy)(state, sorted, input);
THCTensor* trKeys = THCTensor_(newWithTensor)(state, sorted);
THCudaLongTensor* trIndices = THCudaLongTensor_newWithTensor(state, indices);
// Transpose dim to innermost
if (dim != nDims - 1) {
THCTensor_(transpose)(state, trKeys, NULL, dim, nDims - 1);
THCudaLongTensor_transpose(state, trIndices, NULL, dim, nDims - 1);
}
// Thrust must operate on a contiguous layout
THCTensor* trContigKey = THCTensor_(newContiguous)(state, trKeys);
THCudaLongTensor* trContigIndices = THCudaLongTensor_newContiguous(state, trIndices);
THCTensor_(free)(state, trKeys);
THCudaLongTensor_free(state, trIndices);
THCThrustAllocator thrustAlloc(state);
thrust::device_ptr<real> keyIter(THCTensor_(data)(state, trContigKey));
// Since we are composing a global index across all segments rather
// than a per-segment index, we treat the memory as int so we don't
// have problems sorting slices < 2^24 but where the entire tensor
// has more than 2^24 elements
thrust::device_ptr<int64_t>
indexIter((int64_t*) THCudaLongTensor_data(state, trContigIndices));
// Fill the indices with a global index across all slices
thrust::counting_iterator<int64_t> countIter(0);
thrust::copy(
#if CUDA_VERSION >= 7000
thrust::cuda::par(thrustAlloc).on(THCState_getCurrentStream(state)),
#endif
countIter, countIter + totalElements, indexIter);
// First, we sort globally (across all slices) according to key
// (the values we're sorting)
if (dir) {
thrust::stable_sort_by_key(
#if CUDA_VERSION >= 7000
thrust::cuda::par(thrustAlloc).on(THCState_getCurrentStream(state)),
#endif
keyIter, keyIter + totalElements, indexIter, ThrustGTOp<real>());
} else {
thrust::stable_sort_by_key(
#if CUDA_VERSION >= 7000
thrust::cuda::par(thrustAlloc).on(THCState_getCurrentStream(state)),
#endif
keyIter, keyIter + totalElements, indexIter, ThrustLTOp<real>());
}
// Then, re-sort according to slice that each index is
// in. This completes the segment sort in Thrust, since we're
// stably sorting here, preserving the relative order of values
// per each slice
thrust::stable_sort_by_key(
#if CUDA_VERSION >= 7000
thrust::cuda::par(thrustAlloc).on(THCState_getCurrentStream(state)),
#endif
indexIter, indexIter + totalElements, keyIter,
SliceComp(sliceSize));
// Translate the global integer 0-based index to a per-slice real
// Lua index
thrust::for_each(
#if CUDA_VERSION >= 7000
thrust::cuda::par(thrustAlloc).on(THCState_getCurrentStream(state)),
#endif
indexIter, indexIter + totalElements,
GlobalIndexToPerSliceIndex(sliceSize));
// Reverse the transposition as needed
if (dim != nDims - 1) {
THCTensor_(transpose)(state, trContigKey, NULL, dim, nDims - 1);
THCudaLongTensor_transpose(state, trContigIndices, NULL, dim, nDims - 1);
}
// Then copy back to the expected output
THCTensor_(freeCopyTo)(state, trContigKey, sorted);
THCudaLongTensor_freeCopyTo(state, trContigIndices, indices);
}
THC_API void THCTensor_(sort)(THCState* state,
THCTensor *sorted,
THCudaLongTensor *indices,
THCTensor *input,
int dim, int order) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, sorted, input));
THCAssertSameGPU(THCudaLongTensor_checkGPU(state, 1, indices));
int64_t dims = THCTensor_(nDimensionLegacyNoScalars)(state, sorted);
THArgCheck(dims <= MAX_CUTORCH_DIMS, 2, CUTORCH_DIM_WARNING);
dims = THCTensor_(nDimensionLegacyNoScalars)(state, input);
THArgCheck(dims <= MAX_CUTORCH_DIMS, 4, CUTORCH_DIM_WARNING);
dims = THCudaLongTensor_nDimensionLegacyNoScalars(state, indices);
THArgCheck(dims <= MAX_CUTORCH_DIMS, 3, CUTORCH_DIM_WARNING);
// Make sure sufficient output space is allocated
THCTensor_(resizeAs)(state, sorted, input);
THLongStorage *inputSize = THCTensor_(newSizeOf)(state, input);
THCudaLongTensor_resize(state, indices, inputSize, NULL);
THLongStorage_free(inputSize);
// How large are the slices that we are sorting?
int64_t sliceSize = THCTensor_(sizeLegacyNoScalars)(state, input, dim);
// Workaround:
// CUDA 8 uses more shared memory than 7.5 for bitonicSortKVInPlace,
// and so for the double word types,
// we get "too many resources requested for launch" in the 2048 case
#if CUDA_VERSION >= 8000
#if defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_LONG)
int maxSliceSize = 1024;
#else
int maxSliceSize = 2048;
#endif
#else
int maxSliceSize = 2048;
#endif
if (sliceSize <= maxSliceSize) {
// Fill `indices` (the values) with the
// slice-relative index.
THCudaLongTensor_fillSliceWithIndex(state, indices, dim);
// We sort k/v pairs in-place; copy unsorted input to output
THCTensor_(copy)(state, sorted, input);
// Sort using our in-place k/v kernel that supports arbitrary
// layout
THCTensor_(sortKeyValueInplace)(state, sorted, indices, dim, order);
} else {
// Otherwise, fall back upon Thrust, which handles all other cases
// (potentially slowly, with extra copies/memory allocations)
THCTensor_(sortViaThrust)(state, sorted, indices, input, dim, (bool) order);
}
THCudaCheck(cudaGetLastError());
}
#endif
|
cc7a0da9549f799747b39ee066d19a4211e833ac.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.4.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date June 2018
@precisions normal z -> c d s
@author Peng Du
@author Tingxing Dong
@author Mark Gates
@author Azzam Haidar
File named ztrtri_diag.cu to avoid name conflict with src/ztrtri.o
in the library. The actual kernels are in ztrtri_lower.cu and ztrtri_upper.cu
*/
#include "magma_internal.h"
#define TRTRI_BATCHED
#include "ztrtri.cuh"
/***************************************************************************//**
Purpose
-------
ZTRTRI_DIAG inverts the NB x NB diagonal blocks of a triangular matrix.
This routine is used in ztrsm.
Arguments
----------
@param[in]
uplo magma_uplo_t.
On entry, uplo specifies whether the matrix A is an upper or
lower triangular matrix as follows:
- = MagmaUpper: A is an upper triangular matrix.
- = MagmaLower: A is a lower triangular matrix.
@param[in]
diag magma_diag_t.
On entry, diag specifies whether or not A is unit triangular
as follows:
- = MagmaUnit: A is assumed to be unit triangular.
- = MagmaNonUnit: A is not assumed to be unit triangular.
@param[in]
n INTEGER.
On entry, n specifies the order of the matrix A. N >= 0.
@param[in]
dA_array Array of pointers, dimension (batchCount).
Each is a COMPLEX_16 array A of dimension ( ldda, n )
The triangular matrix A.
\n
If UPLO = MagmaUpper, the leading N-by-N upper triangular part of A
contains the upper triangular matrix, and the strictly lower
triangular part of A is not referenced.
\n
If UPLO = MagmaLower, the leading N-by-N lower triangular part of A
contains the lower triangular matrix, and the strictly upper
triangular part of A is not referenced.
\n
If DIAG = MagmaUnit, the diagonal elements of A are also not referenced
and are assumed to be 1.
@param[in]
ldda INTEGER.
The leading dimension of each array A. LDDA >= max(1,N).
@param[out]
dinvA_array Array of pointers, dimension (batchCount).
Each is a COMPLEX_16 array dinvA of dimension (NB, ceil(n/NB)*NB),
where NB = 128.
On exit, contains inverses of the NB-by-NB diagonal blocks of A.
@param[in]
resetozero INTEGER
If not zero, each array dinvA will be reset to all zeros
@param[in]
batchCount INTEGER
The number of matrices to operate on.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_trtri_diag_batched
*******************************************************************************/
extern "C" void
magmablas_ztrtri_diag_batched(
magma_uplo_t uplo, magma_diag_t diag, magma_int_t n,
magmaDoubleComplex const * const *dA_array, magma_int_t ldda,
magmaDoubleComplex **dinvA_array,
magma_int_t resetozero, magma_int_t batchCount, magma_queue_t queue)
{
magma_int_t info = 0;
if (uplo != MagmaLower && uplo != MagmaUpper)
info = -1;
else if (diag != MagmaNonUnit && diag != MagmaUnit)
info = -2;
else if (n < 0)
info = -3;
else if (ldda < n)
info = -5;
if (info != 0) {
magma_xerbla( __func__, -(info) );
return; //info
}
int nblocks = magma_ceildiv( n, IB );
if ( resetozero ) {
magmablas_zlaset_batched(MagmaFull, magma_roundup( n, NB ), NB, MAGMA_Z_ZERO, MAGMA_Z_ZERO, dinvA_array, magma_roundup( n, NB ), batchCount, queue);
}
// if someone want to use cudamemset he need to set the whole vectors
// of initial size otherwise it is a bug and thus need to have dinvA_length
// in input parameter and has been tested and was slower.
//was not the largest size computed by the high API getrf_batched then it is bug and need to use magmablas_zlaset_batched
if ( uplo == MagmaLower ) {
// invert diagonal IB x IB inner blocks
dim3 diaggrid( nblocks, 1, batchCount ); // emulate 3D grid
hipLaunchKernelGGL(( ztrtri_diag_lower_kernel_batched)
, dim3(diaggrid), dim3(IB), 0, queue->cuda_stream() ,
diag, n, dA_array, ldda, dinvA_array );
// build up NB x NB blocks (assuming IB=16 here):
// use 16 x 16 blocks to build 32 x 32 blocks, 1 x (1 x npages) grid, 4 x 4 threads;
// then 32 x 32 blocks to build 64 x 64 blocks, 1 x (2 x npages) grid, 8 x 4 threads;
// then 64 x 64 blocks to build 128 x 128 blocks, 1 x (4 x npages) grid, 16 x 4 threads;
// then 128 x 128 blocks to build 256 x 256 blocks, 2 x (8 x npages) grid, 16 x 4 threads.
for( int jb=IB; jb < NB; jb *= 2 ) {
int kb = jb*2;
int npages = magma_ceildiv( n, kb );
dim3 threads( (jb <= 32 ? jb/4 : 16), 4 );
dim3 grid( jb/(threads.x*threads.y), npages*(jb/16), batchCount ); // emulate 3D grid: NX * (NY*npages), for CUDA ARCH 1.x
//printf( "n %d, jb %d, grid %d x %d (%d x %d)\n", n, jb, grid.x, grid.y, grid.y / npages, npages );
switch (jb) {
case 16:
hipLaunchKernelGGL(( triple_zgemm16_part1_lower_kernel_batched), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n, dA_array, ldda, dinvA_array, jb, npages );
hipLaunchKernelGGL(( triple_zgemm16_part2_lower_kernel_batched), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n, dA_array, ldda, dinvA_array, jb, npages );
break;
case 32:
hipLaunchKernelGGL(( triple_zgemm32_part1_lower_kernel_batched), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n, dA_array, ldda, dinvA_array, jb, npages );
hipLaunchKernelGGL(( triple_zgemm32_part2_lower_kernel_batched), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n, dA_array, ldda, dinvA_array, jb, npages );
break;
case 64:
hipLaunchKernelGGL(( triple_zgemm64_part1_lower_kernel_batched), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n, dA_array, ldda, dinvA_array, jb, npages );
hipLaunchKernelGGL(( triple_zgemm64_part2_lower_kernel_batched), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n, dA_array, ldda, dinvA_array, jb, npages );
break;
default:
hipLaunchKernelGGL(( triple_zgemm_above64_part1_lower_kernel_batched), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n, dA_array, ldda, dinvA_array, jb, npages );
hipLaunchKernelGGL(( triple_zgemm_above64_part2_lower_kernel_batched), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n, dA_array, ldda, dinvA_array, jb, npages );
hipLaunchKernelGGL(( triple_zgemm_above64_part3_lower_kernel_batched), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n, dA_array, ldda, dinvA_array, jb, npages );
break;
}
if ( kb >= n ) break;
}
}
else {
dim3 diaggrid( nblocks, 1, batchCount ); // emulate 3D grid
hipLaunchKernelGGL(( ztrtri_diag_upper_kernel_batched)
, dim3(diaggrid), dim3(IB), 0, queue->cuda_stream() ,
diag, n, dA_array, ldda, dinvA_array );
// update the inverse up to the size of IB
for( int jb=IB; jb < NB; jb *= 2 ) {
int kb = jb*2;
int npages = magma_ceildiv( n, kb );
dim3 threads( (jb <= 32 ? jb/4 : 16), 4 );
dim3 grid( jb/(threads.x*threads.y), npages*(jb/16), batchCount ); // emulate 3D grid: NX * (NY*npages), for CUDA ARCH 1.x
switch (jb) {
case 16:
hipLaunchKernelGGL(( triple_zgemm16_part1_upper_kernel_batched), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n, dA_array, ldda, dinvA_array, jb, npages );
hipLaunchKernelGGL(( triple_zgemm16_part2_upper_kernel_batched), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n, dA_array, ldda, dinvA_array, jb, npages );
break;
case 32:
hipLaunchKernelGGL(( triple_zgemm32_part1_upper_kernel_batched), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n, dA_array, ldda, dinvA_array, jb, npages );
hipLaunchKernelGGL(( triple_zgemm32_part2_upper_kernel_batched), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n, dA_array, ldda, dinvA_array, jb, npages );
break;
case 64:
hipLaunchKernelGGL(( triple_zgemm64_part1_upper_kernel_batched), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n, dA_array, ldda, dinvA_array, jb, npages );
hipLaunchKernelGGL(( triple_zgemm64_part2_upper_kernel_batched), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n, dA_array, ldda, dinvA_array, jb, npages );
break;
default:
hipLaunchKernelGGL(( triple_zgemm_above64_part1_upper_kernel_batched), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n, dA_array, ldda, dinvA_array, jb, npages );
hipLaunchKernelGGL(( triple_zgemm_above64_part2_upper_kernel_batched), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n, dA_array, ldda, dinvA_array, jb, npages );
hipLaunchKernelGGL(( triple_zgemm_above64_part3_upper_kernel_batched), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n, dA_array, ldda, dinvA_array, jb, npages );
break;
}
if ( kb >= n ) break;
}
}
}
|
cc7a0da9549f799747b39ee066d19a4211e833ac.cu
|
/*
-- MAGMA (version 2.4.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date June 2018
@precisions normal z -> c d s
@author Peng Du
@author Tingxing Dong
@author Mark Gates
@author Azzam Haidar
File named ztrtri_diag.cu to avoid name conflict with src/ztrtri.o
in the library. The actual kernels are in ztrtri_lower.cu and ztrtri_upper.cu
*/
#include "magma_internal.h"
#define TRTRI_BATCHED
#include "ztrtri.cuh"
/***************************************************************************//**
Purpose
-------
ZTRTRI_DIAG inverts the NB x NB diagonal blocks of a triangular matrix.
This routine is used in ztrsm.
Arguments
----------
@param[in]
uplo magma_uplo_t.
On entry, uplo specifies whether the matrix A is an upper or
lower triangular matrix as follows:
- = MagmaUpper: A is an upper triangular matrix.
- = MagmaLower: A is a lower triangular matrix.
@param[in]
diag magma_diag_t.
On entry, diag specifies whether or not A is unit triangular
as follows:
- = MagmaUnit: A is assumed to be unit triangular.
- = MagmaNonUnit: A is not assumed to be unit triangular.
@param[in]
n INTEGER.
On entry, n specifies the order of the matrix A. N >= 0.
@param[in]
dA_array Array of pointers, dimension (batchCount).
Each is a COMPLEX_16 array A of dimension ( ldda, n )
The triangular matrix A.
\n
If UPLO = MagmaUpper, the leading N-by-N upper triangular part of A
contains the upper triangular matrix, and the strictly lower
triangular part of A is not referenced.
\n
If UPLO = MagmaLower, the leading N-by-N lower triangular part of A
contains the lower triangular matrix, and the strictly upper
triangular part of A is not referenced.
\n
If DIAG = MagmaUnit, the diagonal elements of A are also not referenced
and are assumed to be 1.
@param[in]
ldda INTEGER.
The leading dimension of each array A. LDDA >= max(1,N).
@param[out]
dinvA_array Array of pointers, dimension (batchCount).
Each is a COMPLEX_16 array dinvA of dimension (NB, ceil(n/NB)*NB),
where NB = 128.
On exit, contains inverses of the NB-by-NB diagonal blocks of A.
@param[in]
resetozero INTEGER
If not zero, each array dinvA will be reset to all zeros
@param[in]
batchCount INTEGER
The number of matrices to operate on.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_trtri_diag_batched
*******************************************************************************/
extern "C" void
magmablas_ztrtri_diag_batched(
magma_uplo_t uplo, magma_diag_t diag, magma_int_t n,
magmaDoubleComplex const * const *dA_array, magma_int_t ldda,
magmaDoubleComplex **dinvA_array,
magma_int_t resetozero, magma_int_t batchCount, magma_queue_t queue)
{
magma_int_t info = 0;
if (uplo != MagmaLower && uplo != MagmaUpper)
info = -1;
else if (diag != MagmaNonUnit && diag != MagmaUnit)
info = -2;
else if (n < 0)
info = -3;
else if (ldda < n)
info = -5;
if (info != 0) {
magma_xerbla( __func__, -(info) );
return; //info
}
int nblocks = magma_ceildiv( n, IB );
if ( resetozero ) {
magmablas_zlaset_batched(MagmaFull, magma_roundup( n, NB ), NB, MAGMA_Z_ZERO, MAGMA_Z_ZERO, dinvA_array, magma_roundup( n, NB ), batchCount, queue);
}
// if someone want to use cudamemset he need to set the whole vectors
// of initial size otherwise it is a bug and thus need to have dinvA_length
// in input parameter and has been tested and was slower.
//was not the largest size computed by the high API getrf_batched then it is bug and need to use magmablas_zlaset_batched
if ( uplo == MagmaLower ) {
// invert diagonal IB x IB inner blocks
dim3 diaggrid( nblocks, 1, batchCount ); // emulate 3D grid
ztrtri_diag_lower_kernel_batched
<<< diaggrid, IB, 0, queue->cuda_stream() >>>
( diag, n, dA_array, ldda, dinvA_array );
// build up NB x NB blocks (assuming IB=16 here):
// use 16 x 16 blocks to build 32 x 32 blocks, 1 x (1 x npages) grid, 4 x 4 threads;
// then 32 x 32 blocks to build 64 x 64 blocks, 1 x (2 x npages) grid, 8 x 4 threads;
// then 64 x 64 blocks to build 128 x 128 blocks, 1 x (4 x npages) grid, 16 x 4 threads;
// then 128 x 128 blocks to build 256 x 256 blocks, 2 x (8 x npages) grid, 16 x 4 threads.
for( int jb=IB; jb < NB; jb *= 2 ) {
int kb = jb*2;
int npages = magma_ceildiv( n, kb );
dim3 threads( (jb <= 32 ? jb/4 : 16), 4 );
dim3 grid( jb/(threads.x*threads.y), npages*(jb/16), batchCount ); // emulate 3D grid: NX * (NY*npages), for CUDA ARCH 1.x
//printf( "n %d, jb %d, grid %d x %d (%d x %d)\n", n, jb, grid.x, grid.y, grid.y / npages, npages );
switch (jb) {
case 16:
triple_zgemm16_part1_lower_kernel_batched<<< grid, threads, 0, queue->cuda_stream() >>>( n, dA_array, ldda, dinvA_array, jb, npages );
triple_zgemm16_part2_lower_kernel_batched<<< grid, threads, 0, queue->cuda_stream() >>>( n, dA_array, ldda, dinvA_array, jb, npages );
break;
case 32:
triple_zgemm32_part1_lower_kernel_batched<<< grid, threads, 0, queue->cuda_stream() >>>( n, dA_array, ldda, dinvA_array, jb, npages );
triple_zgemm32_part2_lower_kernel_batched<<< grid, threads, 0, queue->cuda_stream() >>>( n, dA_array, ldda, dinvA_array, jb, npages );
break;
case 64:
triple_zgemm64_part1_lower_kernel_batched<<< grid, threads, 0, queue->cuda_stream() >>>( n, dA_array, ldda, dinvA_array, jb, npages );
triple_zgemm64_part2_lower_kernel_batched<<< grid, threads, 0, queue->cuda_stream() >>>( n, dA_array, ldda, dinvA_array, jb, npages );
break;
default:
triple_zgemm_above64_part1_lower_kernel_batched<<< grid, threads, 0, queue->cuda_stream() >>>( n, dA_array, ldda, dinvA_array, jb, npages );
triple_zgemm_above64_part2_lower_kernel_batched<<< grid, threads, 0, queue->cuda_stream() >>>( n, dA_array, ldda, dinvA_array, jb, npages );
triple_zgemm_above64_part3_lower_kernel_batched<<< grid, threads, 0, queue->cuda_stream() >>>( n, dA_array, ldda, dinvA_array, jb, npages );
break;
}
if ( kb >= n ) break;
}
}
else {
dim3 diaggrid( nblocks, 1, batchCount ); // emulate 3D grid
ztrtri_diag_upper_kernel_batched
<<< diaggrid, IB, 0, queue->cuda_stream() >>>
( diag, n, dA_array, ldda, dinvA_array );
// update the inverse up to the size of IB
for( int jb=IB; jb < NB; jb *= 2 ) {
int kb = jb*2;
int npages = magma_ceildiv( n, kb );
dim3 threads( (jb <= 32 ? jb/4 : 16), 4 );
dim3 grid( jb/(threads.x*threads.y), npages*(jb/16), batchCount ); // emulate 3D grid: NX * (NY*npages), for CUDA ARCH 1.x
switch (jb) {
case 16:
triple_zgemm16_part1_upper_kernel_batched<<< grid, threads, 0, queue->cuda_stream() >>>( n, dA_array, ldda, dinvA_array, jb, npages );
triple_zgemm16_part2_upper_kernel_batched<<< grid, threads, 0, queue->cuda_stream() >>>( n, dA_array, ldda, dinvA_array, jb, npages );
break;
case 32:
triple_zgemm32_part1_upper_kernel_batched<<< grid, threads, 0, queue->cuda_stream() >>>( n, dA_array, ldda, dinvA_array, jb, npages );
triple_zgemm32_part2_upper_kernel_batched<<< grid, threads, 0, queue->cuda_stream() >>>( n, dA_array, ldda, dinvA_array, jb, npages );
break;
case 64:
triple_zgemm64_part1_upper_kernel_batched<<< grid, threads, 0, queue->cuda_stream() >>>( n, dA_array, ldda, dinvA_array, jb, npages );
triple_zgemm64_part2_upper_kernel_batched<<< grid, threads, 0, queue->cuda_stream() >>>( n, dA_array, ldda, dinvA_array, jb, npages );
break;
default:
triple_zgemm_above64_part1_upper_kernel_batched<<< grid, threads, 0, queue->cuda_stream() >>>( n, dA_array, ldda, dinvA_array, jb, npages );
triple_zgemm_above64_part2_upper_kernel_batched<<< grid, threads, 0, queue->cuda_stream() >>>( n, dA_array, ldda, dinvA_array, jb, npages );
triple_zgemm_above64_part3_upper_kernel_batched<<< grid, threads, 0, queue->cuda_stream() >>>( n, dA_array, ldda, dinvA_array, jb, npages );
break;
}
if ( kb >= n ) break;
}
}
}
|
8ca22f1e59bfb9783bfcd176fd0d8f31bdb62510.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
__global__ void mxv(int m, int n, double *a, double *b, double *c){
printf("Have we at least entered the function?\n");
int index,j;
index = threadIdx.x + blockIdx.x*blockDim.x;
double sum;
sum = 0.0;
if(index<m){
for (j=0; j<n; j++){
sum += a[m*j + index]*b[j];
}
c[index] = sum;
}
}
|
8ca22f1e59bfb9783bfcd176fd0d8f31bdb62510.cu
|
#include <stdio.h>
__global__ void mxv(int m, int n, double *a, double *b, double *c){
printf("Have we at least entered the function?\n");
int index,j;
index = threadIdx.x + blockIdx.x*blockDim.x;
double sum;
sum = 0.0;
if(index<m){
for (j=0; j<n; j++){
sum += a[m*j + index]*b[j];
}
c[index] = sum;
}
}
|
4952a2db03ea2d7461a7dbe81eca7d2bdbfd1ace.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include "../commom/book.h"
__global__
void add(int a, int b, int *c)
{
*c = a + b;
}
int main( void )
{
int c;
int *dev_c;
HANDLE_ERROR(hipMalloc( (void**)&dev_c, sizeof(int) ) );
hipLaunchKernelGGL(( add), dim3(1),dim3(1), 0, 0, 2,7,dev_c);
HANDLE_ERROR( hipMemcpy(&c, dev_c, sizeof(int), hipMemcpyDeviceToHost));
printf("2 + 7 = %d\n",c);
hipFree( dev_c );
return 0;
}
|
4952a2db03ea2d7461a7dbe81eca7d2bdbfd1ace.cu
|
#include <stdio.h>
#include "../commom/book.h"
__global__
void add(int a, int b, int *c)
{
*c = a + b;
}
int main( void )
{
int c;
int *dev_c;
HANDLE_ERROR(cudaMalloc( (void**)&dev_c, sizeof(int) ) );
add<<<1,1>>>(2,7,dev_c);
HANDLE_ERROR( cudaMemcpy(&c, dev_c, sizeof(int), cudaMemcpyDeviceToHost));
printf("2 + 7 = %d\n",c);
cudaFree( dev_c );
return 0;
}
|
edcc72cae5a1b6fcfc901f6e02d0ac49bbff7895.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <assert.h>
#include <stdio.h>
#include "box2d4r-128-1-128_kernel.hu"
#define BENCH_DIM 2
#define BENCH_FPP 161
#define BENCH_RAD 4
#include "common.h"
double kernel_stencil(SB_TYPE *A1, int compsize, int timestep, bool scop)
{
double start_time = sb_time(), end_time = 0.0;
int dimsize = compsize + BENCH_RAD * 2;
SB_TYPE (*A)[dimsize][dimsize] = (SB_TYPE (*)[dimsize][dimsize])A1;
if (scop) {
if (dimsize >= 9 && timestep >= 1) {
#define cudaCheckReturn(ret) \
do { \
hipError_t cudaCheckReturn_e = (ret); \
if (cudaCheckReturn_e != hipSuccess) { \
fprintf(stderr, "CUDA error: %s\n", hipGetErrorString(cudaCheckReturn_e)); \
fflush(stderr); \
} \
assert(cudaCheckReturn_e == hipSuccess); \
} while(0)
#define cudaCheckKernel() \
do { \
cudaCheckReturn(hipGetLastError()); \
} while(0)
float *dev_A;
cudaCheckReturn(hipMalloc((void **) &dev_A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(float)));
{
cudaCheckReturn(hipMemcpy(dev_A, A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(float), hipMemcpyHostToDevice));
#ifdef STENCILBENCH
hipDeviceSynchronize();
SB_START_INSTRUMENTS;
#endif
}
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 4 - 4);
const AN5D_TYPE __c1Pad = (4);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 4 - 4);
const AN5D_TYPE __c2Pad = (4);
#define __c2 c2
const AN5D_TYPE __halo1 = 4;
const AN5D_TYPE __halo2 = 4;
AN5D_TYPE c0;
AN5D_TYPE __side0LenMax;
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 120;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
AN5D_TYPE __c0Padr = (__c0Len % 2) != (((__c0Len + __side0Len - 1) / __side0Len) % 2) && __c0Len % __side0Len < 2 ? 1 : 0;
__side0LenMax = __side0Len;
for (c0 = __c0Pad; c0 < __c0Pad + __c0Len / __side0Len - __c0Padr; c0 += 1)
{
hipLaunchKernelGGL(( kernel0_1), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
if ((__c0Len % 2) != (((__c0Len + __side0LenMax - 1) / __side0LenMax) % 2))
{
if (__c0Len % __side0LenMax == 0)
{
}
}
else if (__c0Len % __side0LenMax)
{
}
}
cudaCheckKernel();
{
#ifdef STENCILBENCH
hipDeviceSynchronize();
SB_STOP_INSTRUMENTS;
#endif
cudaCheckReturn(hipMemcpy(A, dev_A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(float), hipMemcpyDeviceToHost));
}
cudaCheckReturn(hipFree(dev_A));
}
}
else {
for (int t = 0; t < timestep; t++)
#pragma omp parallel for
for (int i = BENCH_RAD; i < dimsize - BENCH_RAD; i++)
for (int j = BENCH_RAD; j < dimsize - BENCH_RAD; j++)
A[(t+1)%2][i][j] =
0.00930f * A[t%2][i-4][j-4] +
0.00931f * A[t%2][i-4][j-3] +
0.00932f * A[t%2][i-4][j-2] +
0.00933f * A[t%2][i-4][j-1] +
0.00934f * A[t%2][i-4][j] +
0.00935f * A[t%2][i-4][j+1] +
0.00936f * A[t%2][i-4][j+2] +
0.00937f * A[t%2][i-4][j+3] +
0.00938f * A[t%2][i-4][j+4] +
0.00939f * A[t%2][i-3][j-4] +
0.00940f * A[t%2][i-3][j-3] +
0.00941f * A[t%2][i-3][j-2] +
0.00942f * A[t%2][i-3][j-1] +
0.00943f * A[t%2][i-3][j] +
0.00944f * A[t%2][i-3][j+1] +
0.00945f * A[t%2][i-3][j+2] +
0.00946f * A[t%2][i-3][j+3] +
0.00947f * A[t%2][i-3][j+4] +
0.00948f * A[t%2][i-2][j-4] +
0.00949f * A[t%2][i-2][j-3] +
0.00950f * A[t%2][i-2][j-2] +
0.00951f * A[t%2][i-2][j-1] +
0.00952f * A[t%2][i-2][j] +
0.00953f * A[t%2][i-2][j+1] +
0.00954f * A[t%2][i-2][j+2] +
0.00955f * A[t%2][i-2][j+3] +
0.00956f * A[t%2][i-2][j+4] +
0.00957f * A[t%2][i-1][j-4] +
0.00958f * A[t%2][i-1][j-3] +
0.00959f * A[t%2][i-1][j-2] +
0.00960f * A[t%2][i-1][j-1] +
0.00961f * A[t%2][i-1][j] +
0.00962f * A[t%2][i-1][j+1] +
0.00963f * A[t%2][i-1][j+2] +
0.00964f * A[t%2][i-1][j+3] +
0.00965f * A[t%2][i-1][j+4] +
0.00966f * A[t%2][i][j-4] +
0.00967f * A[t%2][i][j-3] +
0.00968f * A[t%2][i][j-2] +
0.00969f * A[t%2][i][j-1] +
0.22400f * A[t%2][i][j] +
0.00971f * A[t%2][i][j+1] +
0.00972f * A[t%2][i][j+2] +
0.00973f * A[t%2][i][j+3] +
0.00974f * A[t%2][i][j+4] +
0.00975f * A[t%2][i+1][j-4] +
0.00976f * A[t%2][i+1][j-3] +
0.00977f * A[t%2][i+1][j-2] +
0.00978f * A[t%2][i+1][j-1] +
0.00979f * A[t%2][i+1][j] +
0.00980f * A[t%2][i+1][j+1] +
0.00981f * A[t%2][i+1][j+2] +
0.00982f * A[t%2][i+1][j+3] +
0.00983f * A[t%2][i+1][j+4] +
0.00984f * A[t%2][i+2][j-4] +
0.00985f * A[t%2][i+2][j-3] +
0.00986f * A[t%2][i+2][j-2] +
0.00987f * A[t%2][i+2][j-1] +
0.00988f * A[t%2][i+2][j] +
0.00989f * A[t%2][i+2][j+1] +
0.00990f * A[t%2][i+2][j+2] +
0.00991f * A[t%2][i+2][j+3] +
0.00992f * A[t%2][i+2][j+4] +
0.00993f * A[t%2][i+3][j-4] +
0.00994f * A[t%2][i+3][j-3] +
0.00995f * A[t%2][i+3][j-2] +
0.00996f * A[t%2][i+3][j-1] +
0.00997f * A[t%2][i+3][j] +
0.00998f * A[t%2][i+3][j+1] +
0.00999f * A[t%2][i+3][j+2] +
0.01000f * A[t%2][i+3][j+3] +
0.01001f * A[t%2][i+3][j+4] +
0.01002f * A[t%2][i+4][j-4] +
0.01003f * A[t%2][i+4][j-3] +
0.01004f * A[t%2][i+4][j-2] +
0.01005f * A[t%2][i+4][j-1] +
0.01006f * A[t%2][i+4][j] +
0.01007f * A[t%2][i+4][j+1] +
0.01008f * A[t%2][i+4][j+2] +
0.01009f * A[t%2][i+4][j+3] +
0.01010f * A[t%2][i+4][j+4];
}
return (((end_time != 0.0) ? end_time : sb_time()) - start_time);
}
|
edcc72cae5a1b6fcfc901f6e02d0ac49bbff7895.cu
|
#include <assert.h>
#include <stdio.h>
#include "box2d4r-128-1-128_kernel.hu"
#define BENCH_DIM 2
#define BENCH_FPP 161
#define BENCH_RAD 4
#include "common.h"
double kernel_stencil(SB_TYPE *A1, int compsize, int timestep, bool scop)
{
double start_time = sb_time(), end_time = 0.0;
int dimsize = compsize + BENCH_RAD * 2;
SB_TYPE (*A)[dimsize][dimsize] = (SB_TYPE (*)[dimsize][dimsize])A1;
if (scop) {
if (dimsize >= 9 && timestep >= 1) {
#define cudaCheckReturn(ret) \
do { \
cudaError_t cudaCheckReturn_e = (ret); \
if (cudaCheckReturn_e != cudaSuccess) { \
fprintf(stderr, "CUDA error: %s\n", cudaGetErrorString(cudaCheckReturn_e)); \
fflush(stderr); \
} \
assert(cudaCheckReturn_e == cudaSuccess); \
} while(0)
#define cudaCheckKernel() \
do { \
cudaCheckReturn(cudaGetLastError()); \
} while(0)
float *dev_A;
cudaCheckReturn(cudaMalloc((void **) &dev_A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(float)));
{
cudaCheckReturn(cudaMemcpy(dev_A, A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(float), cudaMemcpyHostToDevice));
#ifdef STENCILBENCH
cudaDeviceSynchronize();
SB_START_INSTRUMENTS;
#endif
}
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 4 - 4);
const AN5D_TYPE __c1Pad = (4);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 4 - 4);
const AN5D_TYPE __c2Pad = (4);
#define __c2 c2
const AN5D_TYPE __halo1 = 4;
const AN5D_TYPE __halo2 = 4;
AN5D_TYPE c0;
AN5D_TYPE __side0LenMax;
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 120;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
AN5D_TYPE __c0Padr = (__c0Len % 2) != (((__c0Len + __side0Len - 1) / __side0Len) % 2) && __c0Len % __side0Len < 2 ? 1 : 0;
__side0LenMax = __side0Len;
for (c0 = __c0Pad; c0 < __c0Pad + __c0Len / __side0Len - __c0Padr; c0 += 1)
{
kernel0_1<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
if ((__c0Len % 2) != (((__c0Len + __side0LenMax - 1) / __side0LenMax) % 2))
{
if (__c0Len % __side0LenMax == 0)
{
}
}
else if (__c0Len % __side0LenMax)
{
}
}
cudaCheckKernel();
{
#ifdef STENCILBENCH
cudaDeviceSynchronize();
SB_STOP_INSTRUMENTS;
#endif
cudaCheckReturn(cudaMemcpy(A, dev_A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(float), cudaMemcpyDeviceToHost));
}
cudaCheckReturn(cudaFree(dev_A));
}
}
else {
for (int t = 0; t < timestep; t++)
#pragma omp parallel for
for (int i = BENCH_RAD; i < dimsize - BENCH_RAD; i++)
for (int j = BENCH_RAD; j < dimsize - BENCH_RAD; j++)
A[(t+1)%2][i][j] =
0.00930f * A[t%2][i-4][j-4] +
0.00931f * A[t%2][i-4][j-3] +
0.00932f * A[t%2][i-4][j-2] +
0.00933f * A[t%2][i-4][j-1] +
0.00934f * A[t%2][i-4][j] +
0.00935f * A[t%2][i-4][j+1] +
0.00936f * A[t%2][i-4][j+2] +
0.00937f * A[t%2][i-4][j+3] +
0.00938f * A[t%2][i-4][j+4] +
0.00939f * A[t%2][i-3][j-4] +
0.00940f * A[t%2][i-3][j-3] +
0.00941f * A[t%2][i-3][j-2] +
0.00942f * A[t%2][i-3][j-1] +
0.00943f * A[t%2][i-3][j] +
0.00944f * A[t%2][i-3][j+1] +
0.00945f * A[t%2][i-3][j+2] +
0.00946f * A[t%2][i-3][j+3] +
0.00947f * A[t%2][i-3][j+4] +
0.00948f * A[t%2][i-2][j-4] +
0.00949f * A[t%2][i-2][j-3] +
0.00950f * A[t%2][i-2][j-2] +
0.00951f * A[t%2][i-2][j-1] +
0.00952f * A[t%2][i-2][j] +
0.00953f * A[t%2][i-2][j+1] +
0.00954f * A[t%2][i-2][j+2] +
0.00955f * A[t%2][i-2][j+3] +
0.00956f * A[t%2][i-2][j+4] +
0.00957f * A[t%2][i-1][j-4] +
0.00958f * A[t%2][i-1][j-3] +
0.00959f * A[t%2][i-1][j-2] +
0.00960f * A[t%2][i-1][j-1] +
0.00961f * A[t%2][i-1][j] +
0.00962f * A[t%2][i-1][j+1] +
0.00963f * A[t%2][i-1][j+2] +
0.00964f * A[t%2][i-1][j+3] +
0.00965f * A[t%2][i-1][j+4] +
0.00966f * A[t%2][i][j-4] +
0.00967f * A[t%2][i][j-3] +
0.00968f * A[t%2][i][j-2] +
0.00969f * A[t%2][i][j-1] +
0.22400f * A[t%2][i][j] +
0.00971f * A[t%2][i][j+1] +
0.00972f * A[t%2][i][j+2] +
0.00973f * A[t%2][i][j+3] +
0.00974f * A[t%2][i][j+4] +
0.00975f * A[t%2][i+1][j-4] +
0.00976f * A[t%2][i+1][j-3] +
0.00977f * A[t%2][i+1][j-2] +
0.00978f * A[t%2][i+1][j-1] +
0.00979f * A[t%2][i+1][j] +
0.00980f * A[t%2][i+1][j+1] +
0.00981f * A[t%2][i+1][j+2] +
0.00982f * A[t%2][i+1][j+3] +
0.00983f * A[t%2][i+1][j+4] +
0.00984f * A[t%2][i+2][j-4] +
0.00985f * A[t%2][i+2][j-3] +
0.00986f * A[t%2][i+2][j-2] +
0.00987f * A[t%2][i+2][j-1] +
0.00988f * A[t%2][i+2][j] +
0.00989f * A[t%2][i+2][j+1] +
0.00990f * A[t%2][i+2][j+2] +
0.00991f * A[t%2][i+2][j+3] +
0.00992f * A[t%2][i+2][j+4] +
0.00993f * A[t%2][i+3][j-4] +
0.00994f * A[t%2][i+3][j-3] +
0.00995f * A[t%2][i+3][j-2] +
0.00996f * A[t%2][i+3][j-1] +
0.00997f * A[t%2][i+3][j] +
0.00998f * A[t%2][i+3][j+1] +
0.00999f * A[t%2][i+3][j+2] +
0.01000f * A[t%2][i+3][j+3] +
0.01001f * A[t%2][i+3][j+4] +
0.01002f * A[t%2][i+4][j-4] +
0.01003f * A[t%2][i+4][j-3] +
0.01004f * A[t%2][i+4][j-2] +
0.01005f * A[t%2][i+4][j-1] +
0.01006f * A[t%2][i+4][j] +
0.01007f * A[t%2][i+4][j+1] +
0.01008f * A[t%2][i+4][j+2] +
0.01009f * A[t%2][i+4][j+3] +
0.01010f * A[t%2][i+4][j+4];
}
return (((end_time != 0.0) ? end_time : sb_time()) - start_time);
}
|
61c78fbefe8c532d34d6342e2f1c7e8441dbebc8.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<stdio.h>
#include<cuda.h>
#include<math.h>
#include<sys/time.h>
__global__
void Matadd(char* A, char*B, int N)
{
}
int main()
{
for(int j=0;j<=25;j++)
{
hipEvent_t start1,stop1,start2,stop2;
float time1,time2, time3;
int i;
int N = pow(2,j);
size_t size = N;
printf ("\n The value of N is %d",N);
hipEventCreate(&start1);
hipEventCreate(&stop1);
hipEventCreate(&start2);
hipEventCreate(&stop2);
//allocate input matrices hA, hB, hC,refC in host memory
char* hA = (char*)malloc(size);
char* hB = (char*)malloc(size);
for(i=0;i<N;i++)
{
hA[i] = rand()%20-10;
}
//allocate memory on the device at location A (GPU)
char* dA;
hipMalloc((void**) &dA,size);
//allocate memory on the device at location B (GPU)
char* dB;
hipMalloc((void**) &dB,size);
//timing start for inclusive timing
hipEventRecord(start1, 0);
//copy vectors from host memory to devie memory
hipMemcpy(dA, hA, size, hipMemcpyHostToDevice);
hipEventRecord(stop1, 0);
hipEventSynchronize(stop1);
//invoke GPU kernel, with two blocks each having eight threads
int threadsperblock = 16;
int blockspergrid = (N + threadsperblock - 1)/ threadsperblock;
hipEventRecord(start2, 0);
//timing start for exclusive timing
//hipEventRecord(start2, 0);hipLaunchKernelGGL((
Matadd), dim3(blockspergrid),dim3(threadsperblock), 0, 0, dA,dB,N);
hipMemcpy(hB, dB, size, hipMemcpyDeviceToHost);
hipEventRecord(stop2, 0);
hipEventSynchronize(stop2);
hipEventElapsedTime(&time1,start1,stop1);
hipEventElapsedTime(&time2,start2,stop2);
printf("\n The Host to Device time for location A in microseconds for 2 to power %d is %f respectively \n",j,time1);
printf("\n The Device to Host time for location B in microseconds for 2 to power %d is %f respectively \n",j,time2);
time3 = time1 + time2;
printf("\n The total data transfer time in microseconds for 2 to power %d is %f respectively \n",j,time3);
hipFree(hA);
hipFree(hB);
hipFree(dA);
hipFree(dB);
}
return 0;
}
|
61c78fbefe8c532d34d6342e2f1c7e8441dbebc8.cu
|
#include<stdio.h>
#include<cuda.h>
#include<math.h>
#include<sys/time.h>
__global__
void Matadd(char* A, char*B, int N)
{
}
int main()
{
for(int j=0;j<=25;j++)
{
cudaEvent_t start1,stop1,start2,stop2;
float time1,time2, time3;
int i;
int N = pow(2,j);
size_t size = N;
printf ("\n The value of N is %d",N);
cudaEventCreate(&start1);
cudaEventCreate(&stop1);
cudaEventCreate(&start2);
cudaEventCreate(&stop2);
//allocate input matrices hA, hB, hC,refC in host memory
char* hA = (char*)malloc(size);
char* hB = (char*)malloc(size);
for(i=0;i<N;i++)
{
hA[i] = rand()%20-10;
}
//allocate memory on the device at location A (GPU)
char* dA;
cudaMalloc((void**) &dA,size);
//allocate memory on the device at location B (GPU)
char* dB;
cudaMalloc((void**) &dB,size);
//timing start for inclusive timing
cudaEventRecord(start1, 0);
//copy vectors from host memory to devie memory
cudaMemcpy(dA, hA, size, cudaMemcpyHostToDevice);
cudaEventRecord(stop1, 0);
cudaEventSynchronize(stop1);
//invoke GPU kernel, with two blocks each having eight threads
int threadsperblock = 16;
int blockspergrid = (N + threadsperblock - 1)/ threadsperblock;
cudaEventRecord(start2, 0);
//timing start for exclusive timing
//cudaEventRecord(start2, 0);
Matadd<<<blockspergrid,threadsperblock>>>(dA,dB,N);
cudaMemcpy(hB, dB, size, cudaMemcpyDeviceToHost);
cudaEventRecord(stop2, 0);
cudaEventSynchronize(stop2);
cudaEventElapsedTime(&time1,start1,stop1);
cudaEventElapsedTime(&time2,start2,stop2);
printf("\n The Host to Device time for location A in microseconds for 2 to power %d is %f respectively \n",j,time1);
printf("\n The Device to Host time for location B in microseconds for 2 to power %d is %f respectively \n",j,time2);
time3 = time1 + time2;
printf("\n The total data transfer time in microseconds for 2 to power %d is %f respectively \n",j,time3);
cudaFree(hA);
cudaFree(hB);
cudaFree(dA);
cudaFree(dB);
}
return 0;
}
|
233e90dbc70f24e83ee018ffd63660f115cf4cda.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "include/common.h"
#include "include/kernel.h"
template<class T>
__global__ void add(const T* a, const T* b, T* c, const int N) {
int tid = blockIdx.x; // this thread handles the data at its thread id
if (tid < N)
c[tid] = a[tid] + b[tid];
}
template<class T>
void gpu_add_loop(const T* ptr_a, const T* ptr_b, T* ptr_c, const int N) {
T *dev_a, *dev_b, *dev_c;
// allocate the memory on the GPU
HANDLE_ERROR( hipMalloc( (void**)&dev_a, N * sizeof(T) ) );
HANDLE_ERROR( hipMalloc( (void**)&dev_b, N * sizeof(T) ) );
HANDLE_ERROR( hipMalloc( (void**)&dev_c, N * sizeof(T) ) );
// copy the arrays 'a' and 'b' to the GPU
HANDLE_ERROR( hipMemcpy( (void*)dev_a, (void*)ptr_a, N * sizeof(T),
hipMemcpyHostToDevice ) );
HANDLE_ERROR( hipMemcpy( (void*)dev_b, (void*)ptr_b, N * sizeof(T),
hipMemcpyHostToDevice ) );
hipLaunchKernelGGL(( add<T>), dim3(N),dim3(1), 0, 0, dev_a, dev_b, dev_c, N);
// copy the array 'c' back from the GPU to the CPU
HANDLE_ERROR( hipMemcpy( (void*)ptr_c, (void*)dev_c, N * sizeof(T),
hipMemcpyDeviceToHost ) );
// free the memory allocated on the GPU
HANDLE_ERROR( hipFree( dev_a ) );
HANDLE_ERROR( hipFree( dev_b ) );
HANDLE_ERROR( hipFree( dev_c ) );
}
template void gpu_add_loop<double>(const double* a, const double* b, double* c, int N);
template void gpu_add_loop<int>(const int* a, const int* b, int* c, const int N);
|
233e90dbc70f24e83ee018ffd63660f115cf4cda.cu
|
#include "include/common.h"
#include "include/kernel.h"
template<class T>
__global__ void add(const T* a, const T* b, T* c, const int N) {
int tid = blockIdx.x; // this thread handles the data at its thread id
if (tid < N)
c[tid] = a[tid] + b[tid];
}
template<class T>
void gpu_add_loop(const T* ptr_a, const T* ptr_b, T* ptr_c, const int N) {
T *dev_a, *dev_b, *dev_c;
// allocate the memory on the GPU
HANDLE_ERROR( cudaMalloc( (void**)&dev_a, N * sizeof(T) ) );
HANDLE_ERROR( cudaMalloc( (void**)&dev_b, N * sizeof(T) ) );
HANDLE_ERROR( cudaMalloc( (void**)&dev_c, N * sizeof(T) ) );
// copy the arrays 'a' and 'b' to the GPU
HANDLE_ERROR( cudaMemcpy( (void*)dev_a, (void*)ptr_a, N * sizeof(T),
cudaMemcpyHostToDevice ) );
HANDLE_ERROR( cudaMemcpy( (void*)dev_b, (void*)ptr_b, N * sizeof(T),
cudaMemcpyHostToDevice ) );
add<T><<<N,1>>>( dev_a, dev_b, dev_c, N);
// copy the array 'c' back from the GPU to the CPU
HANDLE_ERROR( cudaMemcpy( (void*)ptr_c, (void*)dev_c, N * sizeof(T),
cudaMemcpyDeviceToHost ) );
// free the memory allocated on the GPU
HANDLE_ERROR( cudaFree( dev_a ) );
HANDLE_ERROR( cudaFree( dev_b ) );
HANDLE_ERROR( cudaFree( dev_c ) );
}
template void gpu_add_loop<double>(const double* a, const double* b, double* c, int N);
template void gpu_add_loop<int>(const int* a, const int* b, int* c, const int N);
|
1083a94600d357970bb9a267c664b8722073519f.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <cfloat>
#include <stdio.h>
#include "assert.h"
#include "hip/hip_runtime.h"
#include "utility/src/csrmv.h"
#include "utility/src/print.h"
#include "utility/src/Msg.h"
DREAMPLACE_BEGIN_NAMESPACE
template <typename T>
__global__ void fillArray(T* x, const int n, const T v)
{
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x)
{
x[i] = v;
}
}
template <typename T>
__global__ void computeExp(const T* x, const T* nx, const int n, const T* gamma, T* exp_x)
{
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x)
{
exp_x[i] = exp(x[i]/(*gamma));
}
}
template <typename T>
__global__ void computeNegExp(const T* x, const T* nx, const int n, const T* gamma, T* exp_nx)
{
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x)
{
exp_nx[i] = exp(-x[i]/(*gamma));
}
}
template <typename T>
__global__ void computeMaxAndExp(
const T* x,
const int* flat_netpin,
const int* netpin_start,
const unsigned char* net_mask,
int num_nets,
const T* gamma,
T* x_max,
T* exp_x
)
{
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < num_nets; i += blockDim.x * gridDim.x)
{
x_max[i] = -FLT_MAX;
if (net_mask[i])
{
for (int j = netpin_start[i]; j < netpin_start[i+1]; ++j)
{
int jj = flat_netpin[j];
T xx = x[jj];
x_max[i] = max(x_max[i], xx);
}
for (int j = netpin_start[i]; j < netpin_start[i+1]; ++j)
{
int jj = flat_netpin[j];
T xx = x[jj];
exp_x[jj] = exp((xx-x_max[i])/(*gamma));
}
}
}
}
template <typename T>
__global__ void computeMinAndNegExp(
const T* x,
const int* flat_netpin,
const int* netpin_start,
const unsigned char* net_mask,
int num_nets,
const T* gamma,
T* x_min,
T* exp_nx
)
{
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < num_nets; i += blockDim.x * gridDim.x)
{
x_min[i] = FLT_MAX;
if (net_mask[i])
{
for (int j = netpin_start[i]; j < netpin_start[i+1]; ++j)
{
int jj = flat_netpin[j];
T xx = x[jj];
x_min[i] = min(x_min[i], xx);
}
for (int j = netpin_start[i]; j < netpin_start[i+1]; ++j)
{
int jj = flat_netpin[j];
T xx = x[jj];
exp_nx[jj] = exp(-(xx-x_min[i])/(*gamma));
}
}
}
}
template <typename T>
__global__ void computeLogSumExp(
const T* exp_x_sum,
const T* x_max,
const int* flat_netpin,
const int* netpin_start,
const unsigned char* net_mask,
int num_nets,
const T* gamma,
T* partial_wl
)
{
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < num_nets; i += blockDim.x * gridDim.x)
{
if (net_mask[i])
{
partial_wl[i] = (*gamma)*log(exp_x_sum[i]) + x_max[i];
}
else
{
partial_wl[i] = 0;
}
}
}
template <typename T>
__global__ void computeLogSumNegExp(
const T* exp_nx_sum,
const T* x_min,
const int* flat_netpin,
const int* netpin_start,
const unsigned char* net_mask,
int num_nets,
const T* gamma,
T* partial_wl
)
{
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < num_nets; i += blockDim.x * gridDim.x)
{
if (net_mask[i])
{
partial_wl[i] = (*gamma)*log(exp_nx_sum[i]) - x_min[i];
}
else
{
partial_wl[i] = 0;
}
}
}
template <typename T>
__global__ void sumArray(const T* x, const int n, T* output)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i == 0)
{
*output = 0;
for (int j = 0; j < n; ++j)
{
*output += x[j];
}
}
}
template <typename T>
__global__ void computeLogSumExpWirelengthGrad(
const T* exp_x, const T* exp_nx,
const T* exp_x_sum, const T* exp_nx_sum,
const int* flat_netpin,
const int* netpin_start,
const unsigned char* net_mask,
int num_nets,
const T* gamma,
const T* grad_tensor,
T* grad_x_tensor
)
{
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < num_nets; i += blockDim.x * gridDim.x)
{
if (net_mask[i])
{
T reciprocal_exp_x_sum = 1.0/exp_x_sum[i];
T reciprocal_exp_nx_sum = 1.0/exp_nx_sum[i];
for (int j = netpin_start[i]; j < netpin_start[i+1]; ++j)
{
int jj = flat_netpin[j];
grad_x_tensor[jj] = (exp_x[jj]*reciprocal_exp_x_sum - exp_nx[jj]*reciprocal_exp_nx_sum)*(*grad_tensor);
//grad_x_tensor[jj] = (exp_x[jj]/exp_x_sum[i] - exp_nx[jj]/exp_nx_sum[i])*(*grad_tensor);
}
}
}
}
template <typename T>
int computeLogSumExpWirelengthCudaLauncher(
const T* x, const T* y,
const int* flat_netpin,
const int* netpin_start,
const T* netpin_values,
const unsigned char* net_mask,
int num_nets,
int num_pins,
const T* gamma,
T* exp_xy, T* exp_nxy,
T* exp_xy_sum, T* exp_nxy_sum,
T* partial_wl, // wirelength of each net
const T* grad_tensor,
T* grad_x_tensor, T* grad_y_tensor // the gradient is partial total wirelength to partial pin position
)
{
int thread_count = 1024;
int block_count = 32; // separate x and y
hipError_t status;
hipStream_t stream_x_exp;
hipStream_t stream_nx_exp;
hipStream_t stream_y_exp;
hipStream_t stream_ny_exp;
status = hipStreamCreate(&stream_x_exp);
if (status != hipSuccess)
{
printf("hipStreamCreate failed for stream_x_exp\n");
fflush(stdout);
return 1;
}
status = hipStreamCreate(&stream_y_exp);
if (status != hipSuccess)
{
printf("hipStreamCreate failed for stream_y_exp\n");
fflush(stdout);
return 1;
}
if (grad_tensor)
{
hipLaunchKernelGGL(( computeLogSumExpWirelengthGrad), dim3(block_count), dim3(thread_count), 0, stream_x_exp,
exp_xy, exp_nxy,
exp_xy_sum, exp_nxy_sum,
flat_netpin,
netpin_start,
net_mask,
num_nets,
gamma,
grad_tensor,
grad_x_tensor
);
hipLaunchKernelGGL(( computeLogSumExpWirelengthGrad), dim3(block_count), dim3(thread_count), 0, stream_y_exp,
exp_xy+num_pins, exp_nxy+num_pins,
exp_xy_sum+num_nets, exp_nxy_sum+num_nets,
flat_netpin,
netpin_start,
net_mask,
num_nets,
gamma,
grad_tensor,
grad_y_tensor
);
}
else
{
status = hipStreamCreate(&stream_nx_exp);
if (status != hipSuccess)
{
printf("hipStreamCreate failed for stream_nx_exp\n");
fflush(stdout);
return 1;
}
status = hipStreamCreate(&stream_ny_exp);
if (status != hipSuccess)
{
printf("hipStreamCreate failed for stream_ny_exp\n");
fflush(stdout);
return 1;
}
T* xy_max = nullptr;
status = hipMalloc((void**)&xy_max, 2*num_nets*sizeof(T));
if (status != hipSuccess)
{
printf("hipMalloc failed for xy_max\n");
fflush(stdout);
return 1;
}
T* xy_min = nullptr;
status = hipMalloc((void**)&xy_min, 2*num_nets*sizeof(T));
if (status != hipSuccess)
{
printf("hipMalloc failed for xy_min\n");
fflush(stdout);
return 1;
}
//T* partial_wl = nullptr;
//status = hipMalloc((void**)&partial_wl, 2*num_nets*sizeof(T));
//if (status != hipSuccess)
//{
// printf("hipMalloc failed for partial_wl\n");
// fflush(stdout);
// return 1;
//}
//// be careful, partial_wl is not initialized yet
T alpha = 1.0;
T beta = 0.0;
hipLaunchKernelGGL(( computeMaxAndExp), dim3(block_count), dim3(thread_count), 0, stream_x_exp,
x,
flat_netpin,
netpin_start,
net_mask,
num_nets,
gamma,
xy_max,
exp_xy
);
hipLaunchKernelGGL(( computeMinAndNegExp), dim3(block_count), dim3(thread_count), 0, stream_nx_exp,
x,
flat_netpin,
netpin_start,
net_mask,
num_nets,
gamma,
xy_min,
exp_nxy
);
hipLaunchKernelGGL(( computeMaxAndExp), dim3(block_count), dim3(thread_count), 0, stream_y_exp,
y,
flat_netpin,
netpin_start,
net_mask,
num_nets,
gamma,
xy_max+num_nets,
exp_xy+num_pins
);
hipLaunchKernelGGL(( computeMinAndNegExp), dim3(block_count), dim3(thread_count), 0, stream_ny_exp,
y,
flat_netpin,
netpin_start,
net_mask,
num_nets,
gamma,
xy_min+num_nets,
exp_nxy+num_pins
);
hipsparseStatus_t sparse_status;
hipsparseHandle_t handle_x_exp = 0;
hipsparseHandle_t handle_nx_exp = 0;
hipsparseHandle_t handle_y_exp = 0;
hipsparseHandle_t handle_ny_exp = 0;
hipsparseMatDescr_t descr = 0;
/* initialize cusparse library */
sparse_status= hipsparseCreate(&handle_x_exp);
if (sparse_status != HIPSPARSE_STATUS_SUCCESS) {
printf("CUSPARSE Library initialization failed\n");
fflush(stdout);
return 1;
}
sparse_status= hipsparseCreate(&handle_nx_exp);
if (sparse_status != HIPSPARSE_STATUS_SUCCESS) {
printf("CUSPARSE Library initialization failed\n");
fflush(stdout);
return 1;
}
sparse_status= hipsparseCreate(&handle_y_exp);
if (sparse_status != HIPSPARSE_STATUS_SUCCESS) {
printf("CUSPARSE Library initialization failed\n");
fflush(stdout);
return 1;
}
sparse_status= hipsparseCreate(&handle_ny_exp);
if (sparse_status != HIPSPARSE_STATUS_SUCCESS) {
printf("CUSPARSE Library initialization failed\n");
fflush(stdout);
return 1;
}
/* create and setup matrix descriptor */
sparse_status= hipsparseCreateMatDescr(&descr);
if (sparse_status != HIPSPARSE_STATUS_SUCCESS) {
printf("Matrix descriptor initialization failed\n");
fflush(stdout);
return 1;
}
hipsparseSetMatType(descr,HIPSPARSE_MATRIX_TYPE_GENERAL);
hipsparseSetMatIndexBase(descr,HIPSPARSE_INDEX_BASE_ZERO);
hipsparseSetStream(handle_x_exp, stream_x_exp);
hipsparseSetStream(handle_nx_exp, stream_nx_exp);
hipsparseSetStream(handle_y_exp, stream_y_exp);
hipsparseSetStream(handle_ny_exp, stream_ny_exp);
csrmv(
handle_x_exp,
HIPSPARSE_OPERATION_NON_TRANSPOSE,
num_nets,
num_pins,
num_pins,
&alpha,
descr,
netpin_values,
netpin_start, flat_netpin,
exp_xy,
&beta,
exp_xy_sum
);
csrmv(
handle_y_exp,
HIPSPARSE_OPERATION_NON_TRANSPOSE,
num_nets,
num_pins,
num_pins,
&alpha,
descr,
netpin_values,
netpin_start, flat_netpin,
exp_xy+num_pins,
&beta,
exp_xy_sum+num_nets
);
csrmv(
handle_nx_exp,
HIPSPARSE_OPERATION_NON_TRANSPOSE,
num_nets,
num_pins,
num_pins,
&alpha,
descr,
netpin_values,
netpin_start, flat_netpin,
exp_nxy,
&beta,
exp_nxy_sum
);
csrmv(
handle_ny_exp,
HIPSPARSE_OPERATION_NON_TRANSPOSE,
num_nets,
num_pins,
num_pins,
&alpha,
descr,
netpin_values,
netpin_start, flat_netpin,
exp_nxy+num_pins,
&beta,
exp_nxy_sum+num_nets
);
hipLaunchKernelGGL(( computeLogSumExp), dim3(block_count), dim3(thread_count), 0, stream_x_exp,
exp_xy_sum,
xy_max,
flat_netpin,
netpin_start,
net_mask,
num_nets,
gamma,
partial_wl
);
hipLaunchKernelGGL(( computeLogSumNegExp), dim3(block_count), dim3(thread_count), 0, stream_nx_exp,
exp_nxy_sum,
xy_min,
flat_netpin,
netpin_start,
net_mask,
num_nets,
gamma,
partial_wl+num_nets
);
hipLaunchKernelGGL(( computeLogSumExp), dim3(block_count), dim3(thread_count), 0, stream_y_exp,
exp_xy_sum+num_nets,
xy_max+num_nets,
flat_netpin,
netpin_start,
net_mask,
num_nets,
gamma,
partial_wl+2*num_nets
);
hipLaunchKernelGGL(( computeLogSumNegExp), dim3(block_count), dim3(thread_count), 0, stream_ny_exp,
exp_nxy_sum+num_nets,
xy_min+num_nets,
flat_netpin,
netpin_start,
net_mask,
num_nets,
gamma,
partial_wl+3*num_nets
);
/* destroy matrix descriptor */
sparse_status = hipsparseDestroyMatDescr(descr);
descr = 0;
if (sparse_status != HIPSPARSE_STATUS_SUCCESS)
{
printf("Matrix descriptor destruction failed\n");
fflush(stdout);
return 1;
}
/* destroy handle */
sparse_status = hipsparseDestroy(handle_x_exp);
handle_x_exp = 0;
if (sparse_status != HIPSPARSE_STATUS_SUCCESS)
{
printf("CUSPARSE Library release of resources failed\n");
fflush(stdout);
return 1;
}
sparse_status = hipsparseDestroy(handle_nx_exp);
handle_nx_exp = 0;
if (sparse_status != HIPSPARSE_STATUS_SUCCESS)
{
printf("CUSPARSE Library release of resources failed\n");
fflush(stdout);
return 1;
}
sparse_status = hipsparseDestroy(handle_y_exp);
handle_y_exp = 0;
if (sparse_status != HIPSPARSE_STATUS_SUCCESS)
{
printf("CUSPARSE Library release of resources failed\n");
fflush(stdout);
return 1;
}
sparse_status = hipsparseDestroy(handle_ny_exp);
handle_ny_exp = 0;
if (sparse_status != HIPSPARSE_STATUS_SUCCESS)
{
printf("CUSPARSE Library release of resources failed\n");
fflush(stdout);
return 1;
}
// I move out the summation to use ATen
// significant speedup is observed
//sumArray<<<1, 1>>>(partial_wl, 2*num_nets, wl);
if (xy_max)
{
hipFree(xy_max);
xy_max = nullptr;
}
if (xy_min)
{
hipFree(xy_min);
xy_min = nullptr;
}
//if (partial_wl)
//{
// hipFree(partial_wl);
// partial_wl = nullptr;
//}
fflush(stdout);
status = hipStreamDestroy(stream_nx_exp);
stream_nx_exp = 0;
if (status != hipSuccess)
{
printf("stream_nx_exp destroy failed\n");
fflush(stdout);
return 1;
}
status = hipStreamDestroy(stream_ny_exp);
stream_ny_exp = 0;
if (status != hipSuccess)
{
printf("stream_ny_exp destroy failed\n");
fflush(stdout);
return 1;
}
}
/* destroy stream */
status = hipStreamDestroy(stream_x_exp);
stream_x_exp = 0;
if (status != hipSuccess)
{
printf("stream_x_exp destroy failed\n");
fflush(stdout);
return 1;
}
status = hipStreamDestroy(stream_y_exp);
stream_y_exp = 0;
if (status != hipSuccess)
{
printf("stream_y_exp destroy failed\n");
fflush(stdout);
return 1;
}
return 0;
}
#define REGISTER_KERNEL_LAUNCHER(T) \
int instantiateComputeLogSumExpWirelengthLauncher(\
const T* x, const T* y, \
const int* flat_netpin, \
const int* netpin_start, \
const T* netpin_values, \
const unsigned char* net_mask, \
int num_nets,\
int num_pins,\
const T* gamma, \
T* exp_xy, T* exp_nxy, \
T* exp_xy_sum, T* exp_nxy_sum, \
T* partial_wl, \
const T* grad_tensor, \
T* grad_x_tensor, T* grad_y_tensor \
)\
{\
return computeLogSumExpWirelengthCudaLauncher(\
x, y, \
flat_netpin, \
netpin_start, \
netpin_values, \
net_mask, \
num_nets,\
num_pins,\
gamma, \
exp_xy, exp_nxy, \
exp_xy_sum, exp_nxy_sum, \
partial_wl, \
grad_tensor, \
grad_x_tensor, grad_y_tensor \
);\
}
REGISTER_KERNEL_LAUNCHER(float);
REGISTER_KERNEL_LAUNCHER(double);
DREAMPLACE_END_NAMESPACE
|
1083a94600d357970bb9a267c664b8722073519f.cu
|
#include <cfloat>
#include <stdio.h>
#include "assert.h"
#include "cuda_runtime.h"
#include "utility/src/csrmv.h"
#include "utility/src/print.h"
#include "utility/src/Msg.h"
DREAMPLACE_BEGIN_NAMESPACE
template <typename T>
__global__ void fillArray(T* x, const int n, const T v)
{
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x)
{
x[i] = v;
}
}
template <typename T>
__global__ void computeExp(const T* x, const T* nx, const int n, const T* gamma, T* exp_x)
{
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x)
{
exp_x[i] = exp(x[i]/(*gamma));
}
}
template <typename T>
__global__ void computeNegExp(const T* x, const T* nx, const int n, const T* gamma, T* exp_nx)
{
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x)
{
exp_nx[i] = exp(-x[i]/(*gamma));
}
}
template <typename T>
__global__ void computeMaxAndExp(
const T* x,
const int* flat_netpin,
const int* netpin_start,
const unsigned char* net_mask,
int num_nets,
const T* gamma,
T* x_max,
T* exp_x
)
{
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < num_nets; i += blockDim.x * gridDim.x)
{
x_max[i] = -FLT_MAX;
if (net_mask[i])
{
for (int j = netpin_start[i]; j < netpin_start[i+1]; ++j)
{
int jj = flat_netpin[j];
T xx = x[jj];
x_max[i] = max(x_max[i], xx);
}
for (int j = netpin_start[i]; j < netpin_start[i+1]; ++j)
{
int jj = flat_netpin[j];
T xx = x[jj];
exp_x[jj] = exp((xx-x_max[i])/(*gamma));
}
}
}
}
template <typename T>
__global__ void computeMinAndNegExp(
const T* x,
const int* flat_netpin,
const int* netpin_start,
const unsigned char* net_mask,
int num_nets,
const T* gamma,
T* x_min,
T* exp_nx
)
{
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < num_nets; i += blockDim.x * gridDim.x)
{
x_min[i] = FLT_MAX;
if (net_mask[i])
{
for (int j = netpin_start[i]; j < netpin_start[i+1]; ++j)
{
int jj = flat_netpin[j];
T xx = x[jj];
x_min[i] = min(x_min[i], xx);
}
for (int j = netpin_start[i]; j < netpin_start[i+1]; ++j)
{
int jj = flat_netpin[j];
T xx = x[jj];
exp_nx[jj] = exp(-(xx-x_min[i])/(*gamma));
}
}
}
}
template <typename T>
__global__ void computeLogSumExp(
const T* exp_x_sum,
const T* x_max,
const int* flat_netpin,
const int* netpin_start,
const unsigned char* net_mask,
int num_nets,
const T* gamma,
T* partial_wl
)
{
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < num_nets; i += blockDim.x * gridDim.x)
{
if (net_mask[i])
{
partial_wl[i] = (*gamma)*log(exp_x_sum[i]) + x_max[i];
}
else
{
partial_wl[i] = 0;
}
}
}
template <typename T>
__global__ void computeLogSumNegExp(
const T* exp_nx_sum,
const T* x_min,
const int* flat_netpin,
const int* netpin_start,
const unsigned char* net_mask,
int num_nets,
const T* gamma,
T* partial_wl
)
{
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < num_nets; i += blockDim.x * gridDim.x)
{
if (net_mask[i])
{
partial_wl[i] = (*gamma)*log(exp_nx_sum[i]) - x_min[i];
}
else
{
partial_wl[i] = 0;
}
}
}
template <typename T>
__global__ void sumArray(const T* x, const int n, T* output)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i == 0)
{
*output = 0;
for (int j = 0; j < n; ++j)
{
*output += x[j];
}
}
}
template <typename T>
__global__ void computeLogSumExpWirelengthGrad(
const T* exp_x, const T* exp_nx,
const T* exp_x_sum, const T* exp_nx_sum,
const int* flat_netpin,
const int* netpin_start,
const unsigned char* net_mask,
int num_nets,
const T* gamma,
const T* grad_tensor,
T* grad_x_tensor
)
{
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < num_nets; i += blockDim.x * gridDim.x)
{
if (net_mask[i])
{
T reciprocal_exp_x_sum = 1.0/exp_x_sum[i];
T reciprocal_exp_nx_sum = 1.0/exp_nx_sum[i];
for (int j = netpin_start[i]; j < netpin_start[i+1]; ++j)
{
int jj = flat_netpin[j];
grad_x_tensor[jj] = (exp_x[jj]*reciprocal_exp_x_sum - exp_nx[jj]*reciprocal_exp_nx_sum)*(*grad_tensor);
//grad_x_tensor[jj] = (exp_x[jj]/exp_x_sum[i] - exp_nx[jj]/exp_nx_sum[i])*(*grad_tensor);
}
}
}
}
template <typename T>
int computeLogSumExpWirelengthCudaLauncher(
const T* x, const T* y,
const int* flat_netpin,
const int* netpin_start,
const T* netpin_values,
const unsigned char* net_mask,
int num_nets,
int num_pins,
const T* gamma,
T* exp_xy, T* exp_nxy,
T* exp_xy_sum, T* exp_nxy_sum,
T* partial_wl, // wirelength of each net
const T* grad_tensor,
T* grad_x_tensor, T* grad_y_tensor // the gradient is partial total wirelength to partial pin position
)
{
int thread_count = 1024;
int block_count = 32; // separate x and y
cudaError_t status;
cudaStream_t stream_x_exp;
cudaStream_t stream_nx_exp;
cudaStream_t stream_y_exp;
cudaStream_t stream_ny_exp;
status = cudaStreamCreate(&stream_x_exp);
if (status != cudaSuccess)
{
printf("cudaStreamCreate failed for stream_x_exp\n");
fflush(stdout);
return 1;
}
status = cudaStreamCreate(&stream_y_exp);
if (status != cudaSuccess)
{
printf("cudaStreamCreate failed for stream_y_exp\n");
fflush(stdout);
return 1;
}
if (grad_tensor)
{
computeLogSumExpWirelengthGrad<<<block_count, thread_count, 0, stream_x_exp>>>(
exp_xy, exp_nxy,
exp_xy_sum, exp_nxy_sum,
flat_netpin,
netpin_start,
net_mask,
num_nets,
gamma,
grad_tensor,
grad_x_tensor
);
computeLogSumExpWirelengthGrad<<<block_count, thread_count, 0, stream_y_exp>>>(
exp_xy+num_pins, exp_nxy+num_pins,
exp_xy_sum+num_nets, exp_nxy_sum+num_nets,
flat_netpin,
netpin_start,
net_mask,
num_nets,
gamma,
grad_tensor,
grad_y_tensor
);
}
else
{
status = cudaStreamCreate(&stream_nx_exp);
if (status != cudaSuccess)
{
printf("cudaStreamCreate failed for stream_nx_exp\n");
fflush(stdout);
return 1;
}
status = cudaStreamCreate(&stream_ny_exp);
if (status != cudaSuccess)
{
printf("cudaStreamCreate failed for stream_ny_exp\n");
fflush(stdout);
return 1;
}
T* xy_max = nullptr;
status = cudaMalloc((void**)&xy_max, 2*num_nets*sizeof(T));
if (status != cudaSuccess)
{
printf("cudaMalloc failed for xy_max\n");
fflush(stdout);
return 1;
}
T* xy_min = nullptr;
status = cudaMalloc((void**)&xy_min, 2*num_nets*sizeof(T));
if (status != cudaSuccess)
{
printf("cudaMalloc failed for xy_min\n");
fflush(stdout);
return 1;
}
//T* partial_wl = nullptr;
//status = cudaMalloc((void**)&partial_wl, 2*num_nets*sizeof(T));
//if (status != cudaSuccess)
//{
// printf("cudaMalloc failed for partial_wl\n");
// fflush(stdout);
// return 1;
//}
//// be careful, partial_wl is not initialized yet
T alpha = 1.0;
T beta = 0.0;
computeMaxAndExp<<<block_count, thread_count, 0, stream_x_exp>>>(
x,
flat_netpin,
netpin_start,
net_mask,
num_nets,
gamma,
xy_max,
exp_xy
);
computeMinAndNegExp<<<block_count, thread_count, 0, stream_nx_exp>>>(
x,
flat_netpin,
netpin_start,
net_mask,
num_nets,
gamma,
xy_min,
exp_nxy
);
computeMaxAndExp<<<block_count, thread_count, 0, stream_y_exp>>>(
y,
flat_netpin,
netpin_start,
net_mask,
num_nets,
gamma,
xy_max+num_nets,
exp_xy+num_pins
);
computeMinAndNegExp<<<block_count, thread_count, 0, stream_ny_exp>>>(
y,
flat_netpin,
netpin_start,
net_mask,
num_nets,
gamma,
xy_min+num_nets,
exp_nxy+num_pins
);
cusparseStatus_t sparse_status;
cusparseHandle_t handle_x_exp = 0;
cusparseHandle_t handle_nx_exp = 0;
cusparseHandle_t handle_y_exp = 0;
cusparseHandle_t handle_ny_exp = 0;
cusparseMatDescr_t descr = 0;
/* initialize cusparse library */
sparse_status= cusparseCreate(&handle_x_exp);
if (sparse_status != CUSPARSE_STATUS_SUCCESS) {
printf("CUSPARSE Library initialization failed\n");
fflush(stdout);
return 1;
}
sparse_status= cusparseCreate(&handle_nx_exp);
if (sparse_status != CUSPARSE_STATUS_SUCCESS) {
printf("CUSPARSE Library initialization failed\n");
fflush(stdout);
return 1;
}
sparse_status= cusparseCreate(&handle_y_exp);
if (sparse_status != CUSPARSE_STATUS_SUCCESS) {
printf("CUSPARSE Library initialization failed\n");
fflush(stdout);
return 1;
}
sparse_status= cusparseCreate(&handle_ny_exp);
if (sparse_status != CUSPARSE_STATUS_SUCCESS) {
printf("CUSPARSE Library initialization failed\n");
fflush(stdout);
return 1;
}
/* create and setup matrix descriptor */
sparse_status= cusparseCreateMatDescr(&descr);
if (sparse_status != CUSPARSE_STATUS_SUCCESS) {
printf("Matrix descriptor initialization failed\n");
fflush(stdout);
return 1;
}
cusparseSetMatType(descr,CUSPARSE_MATRIX_TYPE_GENERAL);
cusparseSetMatIndexBase(descr,CUSPARSE_INDEX_BASE_ZERO);
cusparseSetStream(handle_x_exp, stream_x_exp);
cusparseSetStream(handle_nx_exp, stream_nx_exp);
cusparseSetStream(handle_y_exp, stream_y_exp);
cusparseSetStream(handle_ny_exp, stream_ny_exp);
csrmv(
handle_x_exp,
CUSPARSE_OPERATION_NON_TRANSPOSE,
num_nets,
num_pins,
num_pins,
&alpha,
descr,
netpin_values,
netpin_start, flat_netpin,
exp_xy,
&beta,
exp_xy_sum
);
csrmv(
handle_y_exp,
CUSPARSE_OPERATION_NON_TRANSPOSE,
num_nets,
num_pins,
num_pins,
&alpha,
descr,
netpin_values,
netpin_start, flat_netpin,
exp_xy+num_pins,
&beta,
exp_xy_sum+num_nets
);
csrmv(
handle_nx_exp,
CUSPARSE_OPERATION_NON_TRANSPOSE,
num_nets,
num_pins,
num_pins,
&alpha,
descr,
netpin_values,
netpin_start, flat_netpin,
exp_nxy,
&beta,
exp_nxy_sum
);
csrmv(
handle_ny_exp,
CUSPARSE_OPERATION_NON_TRANSPOSE,
num_nets,
num_pins,
num_pins,
&alpha,
descr,
netpin_values,
netpin_start, flat_netpin,
exp_nxy+num_pins,
&beta,
exp_nxy_sum+num_nets
);
computeLogSumExp<<<block_count, thread_count, 0, stream_x_exp>>>(
exp_xy_sum,
xy_max,
flat_netpin,
netpin_start,
net_mask,
num_nets,
gamma,
partial_wl
);
computeLogSumNegExp<<<block_count, thread_count, 0, stream_nx_exp>>>(
exp_nxy_sum,
xy_min,
flat_netpin,
netpin_start,
net_mask,
num_nets,
gamma,
partial_wl+num_nets
);
computeLogSumExp<<<block_count, thread_count, 0, stream_y_exp>>>(
exp_xy_sum+num_nets,
xy_max+num_nets,
flat_netpin,
netpin_start,
net_mask,
num_nets,
gamma,
partial_wl+2*num_nets
);
computeLogSumNegExp<<<block_count, thread_count, 0, stream_ny_exp>>>(
exp_nxy_sum+num_nets,
xy_min+num_nets,
flat_netpin,
netpin_start,
net_mask,
num_nets,
gamma,
partial_wl+3*num_nets
);
/* destroy matrix descriptor */
sparse_status = cusparseDestroyMatDescr(descr);
descr = 0;
if (sparse_status != CUSPARSE_STATUS_SUCCESS)
{
printf("Matrix descriptor destruction failed\n");
fflush(stdout);
return 1;
}
/* destroy handle */
sparse_status = cusparseDestroy(handle_x_exp);
handle_x_exp = 0;
if (sparse_status != CUSPARSE_STATUS_SUCCESS)
{
printf("CUSPARSE Library release of resources failed\n");
fflush(stdout);
return 1;
}
sparse_status = cusparseDestroy(handle_nx_exp);
handle_nx_exp = 0;
if (sparse_status != CUSPARSE_STATUS_SUCCESS)
{
printf("CUSPARSE Library release of resources failed\n");
fflush(stdout);
return 1;
}
sparse_status = cusparseDestroy(handle_y_exp);
handle_y_exp = 0;
if (sparse_status != CUSPARSE_STATUS_SUCCESS)
{
printf("CUSPARSE Library release of resources failed\n");
fflush(stdout);
return 1;
}
sparse_status = cusparseDestroy(handle_ny_exp);
handle_ny_exp = 0;
if (sparse_status != CUSPARSE_STATUS_SUCCESS)
{
printf("CUSPARSE Library release of resources failed\n");
fflush(stdout);
return 1;
}
// I move out the summation to use ATen
// significant speedup is observed
//sumArray<<<1, 1>>>(partial_wl, 2*num_nets, wl);
if (xy_max)
{
cudaFree(xy_max);
xy_max = nullptr;
}
if (xy_min)
{
cudaFree(xy_min);
xy_min = nullptr;
}
//if (partial_wl)
//{
// cudaFree(partial_wl);
// partial_wl = nullptr;
//}
fflush(stdout);
status = cudaStreamDestroy(stream_nx_exp);
stream_nx_exp = 0;
if (status != cudaSuccess)
{
printf("stream_nx_exp destroy failed\n");
fflush(stdout);
return 1;
}
status = cudaStreamDestroy(stream_ny_exp);
stream_ny_exp = 0;
if (status != cudaSuccess)
{
printf("stream_ny_exp destroy failed\n");
fflush(stdout);
return 1;
}
}
/* destroy stream */
status = cudaStreamDestroy(stream_x_exp);
stream_x_exp = 0;
if (status != cudaSuccess)
{
printf("stream_x_exp destroy failed\n");
fflush(stdout);
return 1;
}
status = cudaStreamDestroy(stream_y_exp);
stream_y_exp = 0;
if (status != cudaSuccess)
{
printf("stream_y_exp destroy failed\n");
fflush(stdout);
return 1;
}
return 0;
}
#define REGISTER_KERNEL_LAUNCHER(T) \
int instantiateComputeLogSumExpWirelengthLauncher(\
const T* x, const T* y, \
const int* flat_netpin, \
const int* netpin_start, \
const T* netpin_values, \
const unsigned char* net_mask, \
int num_nets,\
int num_pins,\
const T* gamma, \
T* exp_xy, T* exp_nxy, \
T* exp_xy_sum, T* exp_nxy_sum, \
T* partial_wl, \
const T* grad_tensor, \
T* grad_x_tensor, T* grad_y_tensor \
)\
{\
return computeLogSumExpWirelengthCudaLauncher(\
x, y, \
flat_netpin, \
netpin_start, \
netpin_values, \
net_mask, \
num_nets,\
num_pins,\
gamma, \
exp_xy, exp_nxy, \
exp_xy_sum, exp_nxy_sum, \
partial_wl, \
grad_tensor, \
grad_x_tensor, grad_y_tensor \
);\
}
REGISTER_KERNEL_LAUNCHER(float);
REGISTER_KERNEL_LAUNCHER(double);
DREAMPLACE_END_NAMESPACE
|
b973474d564f700c3ed370fad086729be0a07023.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "common.h"
#include <hip/hip_runtime.h>
#include <stdio.h>
/*
* This code implements the interleaved Pair approaches to
* parallel reduction in CUDA. For this example, the sum operation is used.
*/
// Recursive Implementation of Interleaved Pair Approach
int recursiveReduce(int *data, int const size)
{
// terminate check
if (size == 1) return data[0];
// renew the stride
int const stride = size / 2;
// in-place reduction
for (int i = 0; i < stride; i++)
{
data[i] += data[i + stride];
}
// call recursively
return recursiveReduce(data, stride);
}
// Kernel: Interleaved Pair Implementation
__global__ void reduceInterleaved (int *g_idata, int *g_odata, unsigned int n)
{
// set thread ID
unsigned int tid = threadIdx.x;
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
// boundary check
if(idx >= n) return;
// in-place reduction in global memory
for (int stride = blockDim.x / 2; stride > 0; stride /= 2)
{
if (tid < stride)
{
g_idata[idx] += g_idata[idx + stride];
}
__syncthreads();
}
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = g_idata[idx];
}
__global__ void reduceUnrolling (int *g_idata, int *g_odata, unsigned int n)
{
// set thread ID
unsigned int tid = threadIdx.x;
unsigned int idx = blockIdx.x * blockDim.x * 2 + threadIdx.x;
// unroll 2
if (idx + blockDim.x < n)
{
g_idata[idx] += g_idata[idx + blockDim.x];
}
__syncthreads();
// in-place reduction in global memory
for (int stride = blockDim.x / 2; stride > 0; stride /= 2)
{
if (tid < stride)
{
g_idata[idx] += g_idata[idx + stride];
}
// synchronize within threadblock
__syncthreads();
}
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = g_idata[idx];
}
int main(int argc, char **argv)
{
// set up device
int dev = 0;
hipDeviceProp_t deviceProp;
CHECK(hipGetDeviceProperties(&deviceProp, dev));
printf("%s starting reduction at ", argv[0]);
printf("device %d: %s ", dev, deviceProp.name);
CHECK(hipSetDevice(dev));
bool bResult = false;
// initialization
int size = 1 << 24; // total number of elements to reduce
printf(" with array size %d ", size);
// execution configuration
int blocksize = 512; // initial block size
if(argc > 1)
{
blocksize = atoi(argv[1]); // block size from command line argument
}
dim3 block (blocksize, 1);
dim3 grid ((size + block.x - 1) / block.x, 1);
printf("grid %d block %d\n", grid.x, block.x);
// allocate host memory
size_t bytes = size * sizeof(int);
int *h_idata = (int *) malloc(bytes);
int *h_odata = (int *) malloc(grid.x * sizeof(int));
int *tmp = (int *) malloc(bytes);
// initialize the array
int sign=1;
for (int i = 0; i < size; i++)
{
// mask off high 2 bytes to force max number to 255
h_idata[i] = sign*((int)( rand() & 0xFF ));
sign*=-1;
}
memcpy (tmp, h_idata, bytes);
double iStart, iElaps;
int gpu_sum = 0;
// allocate device memory
int *d_idata = NULL;
int *d_odata = NULL;
CHECK(hipMalloc((void **) &d_idata, bytes));
CHECK(hipMalloc((void **) &d_odata, grid.x * sizeof(int)));
// cpu reduction
iStart = seconds();
int cpu_sum = recursiveReduce (tmp, size);
iElaps = seconds() - iStart;
printf("cpu reduce elapsed %f sec cpu_sum: %d\n", iElaps, cpu_sum);
// kernel: reduceInterleaved
CHECK(hipMemcpy(d_idata, h_idata, bytes, hipMemcpyHostToDevice));
CHECK(hipDeviceSynchronize());
iStart = seconds();
hipLaunchKernelGGL(( reduceInterleaved), dim3(grid), dim3(block), 0, 0, d_idata, d_odata, size);
CHECK(hipDeviceSynchronize());
iElaps = seconds() - iStart;
CHECK(hipGetLastError());
CHECK(hipMemcpy(h_odata, d_odata, grid.x * sizeof(int),
hipMemcpyDeviceToHost));
gpu_sum = 0;
for (int i = 0; i < grid.x; i++) gpu_sum += h_odata[i];
printf("gpu Interleaved elapsed %f sec gpu_sum: %d <<<grid %d block "
"%d>>>\n", iElaps, gpu_sum, grid.x, block.x);
// kernel: reduceUnrolling
if (grid.x>1)
{
dim3 grid2 ((grid.x + 1)/2,1);
CHECK(hipMemcpy(d_idata, h_idata, bytes, hipMemcpyHostToDevice));
CHECK(hipDeviceSynchronize());
iStart = seconds();
hipLaunchKernelGGL(( reduceUnrolling), dim3(grid2.x), dim3(block), 0, 0, d_idata, d_odata, size);
CHECK(hipDeviceSynchronize());
iElaps = seconds() - iStart;
CHECK(hipGetLastError());
CHECK(hipMemcpy(h_odata, d_odata, grid2.x * sizeof(int),
hipMemcpyDeviceToHost));
gpu_sum = 0;
for (int i = 0; i < grid2.x; i++) gpu_sum += h_odata[i];
printf("gpu Unrolling elapsed %f sec gpu_sum: %d <<<grid %d block "
"%d>>>\n", iElaps, gpu_sum, grid2.x, block.x);
}
// free host memory
free(h_idata);
free(h_odata);
// free device memory
CHECK(hipFree(d_idata));
CHECK(hipFree(d_odata));
// reset device
CHECK(hipDeviceReset());
// check the results
bResult = (gpu_sum == cpu_sum);
if(!bResult) printf("Test failed!\n");
return EXIT_SUCCESS;
}
|
b973474d564f700c3ed370fad086729be0a07023.cu
|
#include "common.h"
#include <cuda_runtime.h>
#include <stdio.h>
/*
* This code implements the interleaved Pair approaches to
* parallel reduction in CUDA. For this example, the sum operation is used.
*/
// Recursive Implementation of Interleaved Pair Approach
int recursiveReduce(int *data, int const size)
{
// terminate check
if (size == 1) return data[0];
// renew the stride
int const stride = size / 2;
// in-place reduction
for (int i = 0; i < stride; i++)
{
data[i] += data[i + stride];
}
// call recursively
return recursiveReduce(data, stride);
}
// Kernel: Interleaved Pair Implementation
__global__ void reduceInterleaved (int *g_idata, int *g_odata, unsigned int n)
{
// set thread ID
unsigned int tid = threadIdx.x;
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
// boundary check
if(idx >= n) return;
// in-place reduction in global memory
for (int stride = blockDim.x / 2; stride > 0; stride /= 2)
{
if (tid < stride)
{
g_idata[idx] += g_idata[idx + stride];
}
__syncthreads();
}
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = g_idata[idx];
}
__global__ void reduceUnrolling (int *g_idata, int *g_odata, unsigned int n)
{
// set thread ID
unsigned int tid = threadIdx.x;
unsigned int idx = blockIdx.x * blockDim.x * 2 + threadIdx.x;
// unroll 2
if (idx + blockDim.x < n)
{
g_idata[idx] += g_idata[idx + blockDim.x];
}
__syncthreads();
// in-place reduction in global memory
for (int stride = blockDim.x / 2; stride > 0; stride /= 2)
{
if (tid < stride)
{
g_idata[idx] += g_idata[idx + stride];
}
// synchronize within threadblock
__syncthreads();
}
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = g_idata[idx];
}
int main(int argc, char **argv)
{
// set up device
int dev = 0;
cudaDeviceProp deviceProp;
CHECK(cudaGetDeviceProperties(&deviceProp, dev));
printf("%s starting reduction at ", argv[0]);
printf("device %d: %s ", dev, deviceProp.name);
CHECK(cudaSetDevice(dev));
bool bResult = false;
// initialization
int size = 1 << 24; // total number of elements to reduce
printf(" with array size %d ", size);
// execution configuration
int blocksize = 512; // initial block size
if(argc > 1)
{
blocksize = atoi(argv[1]); // block size from command line argument
}
dim3 block (blocksize, 1);
dim3 grid ((size + block.x - 1) / block.x, 1);
printf("grid %d block %d\n", grid.x, block.x);
// allocate host memory
size_t bytes = size * sizeof(int);
int *h_idata = (int *) malloc(bytes);
int *h_odata = (int *) malloc(grid.x * sizeof(int));
int *tmp = (int *) malloc(bytes);
// initialize the array
int sign=1;
for (int i = 0; i < size; i++)
{
// mask off high 2 bytes to force max number to 255
h_idata[i] = sign*((int)( rand() & 0xFF ));
sign*=-1;
}
memcpy (tmp, h_idata, bytes);
double iStart, iElaps;
int gpu_sum = 0;
// allocate device memory
int *d_idata = NULL;
int *d_odata = NULL;
CHECK(cudaMalloc((void **) &d_idata, bytes));
CHECK(cudaMalloc((void **) &d_odata, grid.x * sizeof(int)));
// cpu reduction
iStart = seconds();
int cpu_sum = recursiveReduce (tmp, size);
iElaps = seconds() - iStart;
printf("cpu reduce elapsed %f sec cpu_sum: %d\n", iElaps, cpu_sum);
// kernel: reduceInterleaved
CHECK(cudaMemcpy(d_idata, h_idata, bytes, cudaMemcpyHostToDevice));
CHECK(cudaDeviceSynchronize());
iStart = seconds();
reduceInterleaved<<<grid, block>>>(d_idata, d_odata, size);
CHECK(cudaDeviceSynchronize());
iElaps = seconds() - iStart;
CHECK(cudaGetLastError());
CHECK(cudaMemcpy(h_odata, d_odata, grid.x * sizeof(int),
cudaMemcpyDeviceToHost));
gpu_sum = 0;
for (int i = 0; i < grid.x; i++) gpu_sum += h_odata[i];
printf("gpu Interleaved elapsed %f sec gpu_sum: %d <<<grid %d block "
"%d>>>\n", iElaps, gpu_sum, grid.x, block.x);
// kernel: reduceUnrolling
if (grid.x>1)
{
dim3 grid2 ((grid.x + 1)/2,1);
CHECK(cudaMemcpy(d_idata, h_idata, bytes, cudaMemcpyHostToDevice));
CHECK(cudaDeviceSynchronize());
iStart = seconds();
reduceUnrolling<<<grid2.x, block>>>(d_idata, d_odata, size);
CHECK(cudaDeviceSynchronize());
iElaps = seconds() - iStart;
CHECK(cudaGetLastError());
CHECK(cudaMemcpy(h_odata, d_odata, grid2.x * sizeof(int),
cudaMemcpyDeviceToHost));
gpu_sum = 0;
for (int i = 0; i < grid2.x; i++) gpu_sum += h_odata[i];
printf("gpu Unrolling elapsed %f sec gpu_sum: %d <<<grid %d block "
"%d>>>\n", iElaps, gpu_sum, grid2.x, block.x);
}
// free host memory
free(h_idata);
free(h_odata);
// free device memory
CHECK(cudaFree(d_idata));
CHECK(cudaFree(d_odata));
// reset device
CHECK(cudaDeviceReset());
// check the results
bResult = (gpu_sum == cpu_sum);
if(!bResult) printf("Test failed!\n");
return EXIT_SUCCESS;
}
|
d103918b755d2c06f018cb9b867591d9a76c6abc.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<iostream>
__global__ void add(int *a, int *b, int *c )
{
*c = *a+*b;
}
int main(void )
{
int a, b, c;
int *dev_a, *dev_b, *dev_c;
int size = sizeof( int );
hipMalloc( (void**)&dev_a, size );
hipMalloc( (void**)&dev_b, size );
hipMalloc( (void**)&dev_c, size );
a = 5;
b = 10;
hipMemcpy(dev_a, &a,size,hipMemcpyHostToDevice);
hipMemcpy(dev_b, &b, size, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( add), dim3(1),dim3(1), 0, 0, dev_a, dev_b, dev_c);
hipMemcpy(&c,dev_c,size,hipMemcpyDeviceToHost);
hipFree(dev_a);
hipFree(dev_b);
hipFree(dev_c);
std::cout<<a<<" "<<b<<" "<<c<<std::endl;
return 0;
}
|
d103918b755d2c06f018cb9b867591d9a76c6abc.cu
|
#include<iostream>
__global__ void add(int *a, int *b, int *c )
{
*c = *a+*b;
}
int main(void )
{
int a, b, c;
int *dev_a, *dev_b, *dev_c;
int size = sizeof( int );
cudaMalloc( (void**)&dev_a, size );
cudaMalloc( (void**)&dev_b, size );
cudaMalloc( (void**)&dev_c, size );
a = 5;
b = 10;
cudaMemcpy(dev_a, &a,size,cudaMemcpyHostToDevice);
cudaMemcpy(dev_b, &b, size, cudaMemcpyHostToDevice);
add<<<1,1>>>(dev_a, dev_b, dev_c);
cudaMemcpy(&c,dev_c,size,cudaMemcpyDeviceToHost);
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
std::cout<<a<<" "<<b<<" "<<c<<std::endl;
return 0;
}
|
ecaea8ef62fedb35e57556cfb3c5be4a9a90419c.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <ATen/ATen.h>
#include <ATen/TensorUtils.h>
#include <ATen/hip/HIPContext.h>
#include <ATen/hip/impl/HIPGuardImplMasqueradingAsCUDA.h>
#include <THH/THHAtomics.cuh>
#include "cuda_helpers.h"
template <typename T>
__global__ void PSROIPoolForward(
const int nthreads,
const T* input,
const T spatial_scale,
const int channels,
const int height,
const int width,
const int pooled_height,
const int pooled_width,
const T* rois,
const int channels_out,
T* output,
int* channel_mapping) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
// (n, c_out, ph, pw) is an element in the pooled output
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c_out = (index / pooled_width / pooled_height) % channels_out;
int n = index / pooled_width / pooled_height / channels_out;
// (n, c_in, ph, pw) is the associated element in the input
int c_in = (c_out * pooled_height + ph) * pooled_width + pw;
// [start, end) interval for spatial sampling
const T* offset_rois = rois + n * 5;
int roi_batch_ind = offset_rois[0];
int roi_start_w = roundf(offset_rois[1] * spatial_scale);
int roi_start_h = roundf(offset_rois[2] * spatial_scale);
int roi_end_w = roundf(offset_rois[3] * spatial_scale);
int roi_end_h = roundf(offset_rois[4] * spatial_scale);
// Force too small ROIs to be 1x1
int roi_width = max(roi_end_w - roi_start_w, 1);
int roi_height = max(roi_end_h - roi_start_h, 1);
T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height);
T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width);
int hstart = static_cast<int>(floor(static_cast<T>(ph) * bin_size_h));
int wstart = static_cast<int>(floor(static_cast<T>(pw) * bin_size_w));
int hend = static_cast<int>(ceil(static_cast<T>(ph + 1) * bin_size_h));
int wend = static_cast<int>(ceil(static_cast<T>(pw + 1) * bin_size_w));
// Add roi offsets and clip to input boundaries
hstart = min(max(hstart + roi_start_h, 0), height - 1);
hend = min(max(hend + roi_start_h, 0), height - 1);
wstart = min(max(wstart + roi_start_w, 0), width - 1);
wend = min(max(wend + roi_start_w, 0), width - 1);
bool is_empty = (hend <= hstart) || (wend <= wstart);
const T* offset_input =
input + (roi_batch_ind * channels + c_in) * height * width;
T out_sum = 0;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
int input_index = h * width + w;
out_sum += offset_input[input_index];
}
}
T bin_area = (hend - hstart) * (wend - wstart);
output[index] = is_empty ? static_cast<T>(0) : out_sum / bin_area;
channel_mapping[index] = c_in;
}
}
template <typename T>
__global__ void PSROIPoolBackward(
const int nthreads,
const T* grad_output,
const int* channel_mapping,
const int num_rois,
const T spatial_scale,
const int channels,
const int height,
const int width,
const int pooled_height,
const int pooled_width,
const int channels_out,
T* grad_input,
const T* rois) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
// (n, *, ph, pw) is an element in the pooled output
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int n = index / pooled_width / pooled_height / channels_out;
const T* offset_rois = rois + n * 5;
int roi_batch_ind = offset_rois[0];
int roi_start_w = roundf(offset_rois[1] * spatial_scale);
int roi_start_h = roundf(offset_rois[2] * spatial_scale);
int roi_end_w = roundf(offset_rois[3] * spatial_scale);
int roi_end_h = roundf(offset_rois[4] * spatial_scale);
// Force too small ROIs to be 1x1
int roi_width = max(roi_end_w - roi_start_w, 1);
int roi_height = max(roi_end_h - roi_start_h, 1);
T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height);
T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width);
int hstart = static_cast<int>(floor(static_cast<T>(ph) * bin_size_h));
int wstart = static_cast<int>(floor(static_cast<T>(pw) * bin_size_w));
int hend = static_cast<int>(ceil(static_cast<T>(ph + 1) * bin_size_h));
int wend = static_cast<int>(ceil(static_cast<T>(pw + 1) * bin_size_w));
// Add roi offsets and clip to input boundaries
hstart = min(max(hstart + roi_start_h, 0), height);
hend = min(max(hend + roi_start_h, 0), height);
wstart = min(max(wstart + roi_start_w, 0), width);
wend = min(max(wend + roi_start_w, 0), width);
bool is_empty = (hend <= hstart) || (wend <= wstart);
int c_in = channel_mapping[index];
T* grad_input_offset =
grad_input + (roi_batch_ind * channels + c_in) * height * width;
T bin_area = (hend - hstart) * (wend - wstart);
T diff_val = is_empty ? static_cast<T>(0) : grad_output[index] / bin_area;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
int grad_input_index = h * width + w;
atomicAdd(grad_input_offset + grad_input_index, diff_val);
}
}
}
}
std::tuple<at::Tensor, at::Tensor> PSROIPool_forward_cuda(
const at::Tensor& input,
const at::Tensor& rois,
const float spatial_scale,
const int pooled_height,
const int pooled_width) {
// Check if input tensors are CUDA tensors
AT_ASSERTM(input.is_cuda(), "input must be a CUDA tensor");
AT_ASSERTM(rois.is_cuda(), "rois must be a CUDA tensor");
at::TensorArg input_t{input, "input", 1}, rois_t{rois, "rois", 2};
at::CheckedFrom c = "PSROIPool_forward_cuda";
at::checkAllSameGPU(c, {input_t, rois_t});
at::checkAllSameType(c, {input_t, rois_t});
at::hip::HIPGuardMasqueradingAsCUDA device_guard(input.device());
auto num_rois = rois.size(0);
auto channels = input.size(1);
auto height = input.size(2);
auto width = input.size(3);
AT_ASSERTM(
channels % (pooled_height * pooled_width) == 0,
"input channels must be a multiple of pooling height * pooling width");
int channels_out = channels / (pooled_height * pooled_width);
auto output = at::zeros(
{num_rois, channels_out, pooled_height, pooled_width}, input.options());
auto channel_mapping =
at::zeros(output.sizes(), input.options().dtype(at::kInt));
auto output_size = output.numel();
if (output_size == 0) {
AT_CUDA_CHECK(hipGetLastError());
return std::make_tuple(output, channel_mapping);
}
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
dim3 grid(::min(
ceil_div(static_cast<int64_t>(output_size), static_cast<int64_t>(512)),
static_cast<int64_t>(4096)));
dim3 block(512);
auto input_ = input.contiguous(),
rois_ = rois.contiguous();
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
input.scalar_type(), "PSROIPool_forward", [&] {
hipLaunchKernelGGL(( PSROIPoolForward<scalar_t>), dim3(grid), dim3(block), 0, stream,
output_size,
input_.data_ptr<scalar_t>(),
spatial_scale,
channels,
height,
width,
pooled_height,
pooled_width,
rois_.data_ptr<scalar_t>(),
channels_out,
output.data_ptr<scalar_t>(),
channel_mapping.data_ptr<int>());
});
AT_CUDA_CHECK(hipGetLastError());
return std::make_tuple(output, channel_mapping);
}
at::Tensor PSROIPool_backward_cuda(
const at::Tensor& grad,
const at::Tensor& rois,
const at::Tensor& channel_mapping,
const float spatial_scale,
const int pooled_height,
const int pooled_width,
const int batch_size,
const int channels,
const int height,
const int width) {
// Check if input tensors are CUDA tensors
AT_ASSERTM(grad.is_cuda(), "grad must be a CUDA tensor");
AT_ASSERTM(rois.is_cuda(), "rois must be a CUDA tensor");
AT_ASSERTM(
channel_mapping.is_cuda(),
"channel_mapping must be a CUDA tensor");
at::TensorArg grad_t{grad, "grad", 1}, rois_t{rois, "rois", 2},
channel_mapping_t{channel_mapping, "channel_mapping", 3};
at::CheckedFrom c = "PSROIPool_backward_cuda";
at::checkAllSameGPU(c, {grad_t, rois_t, channel_mapping_t});
at::checkAllSameType(c, {grad_t, rois_t});
at::hip::HIPGuardMasqueradingAsCUDA device_guard(grad.device());
auto num_rois = rois.size(0);
auto grad_input =
at::zeros({batch_size, channels, height, width}, grad.options());
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
dim3 grid(::min(
ceil_div(static_cast<int64_t>(grad.numel()), static_cast<int64_t>(512)),
static_cast<int64_t>(4096)));
dim3 block(512);
// handle possibly empty gradients
if (grad.numel() == 0) {
AT_CUDA_CHECK(hipGetLastError());
return grad_input;
}
int channels_out = channels / (pooled_height * pooled_width);
auto grad_ = grad.contiguous(),
rois_ = rois.contiguous();
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
grad.scalar_type(), "PSROIPool_backward", [&] {
hipLaunchKernelGGL(( PSROIPoolBackward<scalar_t>), dim3(grid), dim3(block), 0, stream,
grad.numel(),
grad_.data_ptr<scalar_t>(),
channel_mapping.data_ptr<int>(),
num_rois,
spatial_scale,
channels,
height,
width,
pooled_height,
pooled_width,
channels_out,
grad_input.data_ptr<scalar_t>(),
rois_.data_ptr<scalar_t>());
});
AT_CUDA_CHECK(hipGetLastError());
return grad_input;
}
|
ecaea8ef62fedb35e57556cfb3c5be4a9a90419c.cu
|
#include <ATen/ATen.h>
#include <ATen/TensorUtils.h>
#include <ATen/cuda/CUDAContext.h>
#include <c10/cuda/CUDAGuard.h>
#include <THC/THCAtomics.cuh>
#include "cuda_helpers.h"
template <typename T>
__global__ void PSROIPoolForward(
const int nthreads,
const T* input,
const T spatial_scale,
const int channels,
const int height,
const int width,
const int pooled_height,
const int pooled_width,
const T* rois,
const int channels_out,
T* output,
int* channel_mapping) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
// (n, c_out, ph, pw) is an element in the pooled output
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c_out = (index / pooled_width / pooled_height) % channels_out;
int n = index / pooled_width / pooled_height / channels_out;
// (n, c_in, ph, pw) is the associated element in the input
int c_in = (c_out * pooled_height + ph) * pooled_width + pw;
// [start, end) interval for spatial sampling
const T* offset_rois = rois + n * 5;
int roi_batch_ind = offset_rois[0];
int roi_start_w = roundf(offset_rois[1] * spatial_scale);
int roi_start_h = roundf(offset_rois[2] * spatial_scale);
int roi_end_w = roundf(offset_rois[3] * spatial_scale);
int roi_end_h = roundf(offset_rois[4] * spatial_scale);
// Force too small ROIs to be 1x1
int roi_width = max(roi_end_w - roi_start_w, 1);
int roi_height = max(roi_end_h - roi_start_h, 1);
T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height);
T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width);
int hstart = static_cast<int>(floor(static_cast<T>(ph) * bin_size_h));
int wstart = static_cast<int>(floor(static_cast<T>(pw) * bin_size_w));
int hend = static_cast<int>(ceil(static_cast<T>(ph + 1) * bin_size_h));
int wend = static_cast<int>(ceil(static_cast<T>(pw + 1) * bin_size_w));
// Add roi offsets and clip to input boundaries
hstart = min(max(hstart + roi_start_h, 0), height - 1);
hend = min(max(hend + roi_start_h, 0), height - 1);
wstart = min(max(wstart + roi_start_w, 0), width - 1);
wend = min(max(wend + roi_start_w, 0), width - 1);
bool is_empty = (hend <= hstart) || (wend <= wstart);
const T* offset_input =
input + (roi_batch_ind * channels + c_in) * height * width;
T out_sum = 0;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
int input_index = h * width + w;
out_sum += offset_input[input_index];
}
}
T bin_area = (hend - hstart) * (wend - wstart);
output[index] = is_empty ? static_cast<T>(0) : out_sum / bin_area;
channel_mapping[index] = c_in;
}
}
template <typename T>
__global__ void PSROIPoolBackward(
const int nthreads,
const T* grad_output,
const int* channel_mapping,
const int num_rois,
const T spatial_scale,
const int channels,
const int height,
const int width,
const int pooled_height,
const int pooled_width,
const int channels_out,
T* grad_input,
const T* rois) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
// (n, *, ph, pw) is an element in the pooled output
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int n = index / pooled_width / pooled_height / channels_out;
const T* offset_rois = rois + n * 5;
int roi_batch_ind = offset_rois[0];
int roi_start_w = roundf(offset_rois[1] * spatial_scale);
int roi_start_h = roundf(offset_rois[2] * spatial_scale);
int roi_end_w = roundf(offset_rois[3] * spatial_scale);
int roi_end_h = roundf(offset_rois[4] * spatial_scale);
// Force too small ROIs to be 1x1
int roi_width = max(roi_end_w - roi_start_w, 1);
int roi_height = max(roi_end_h - roi_start_h, 1);
T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height);
T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width);
int hstart = static_cast<int>(floor(static_cast<T>(ph) * bin_size_h));
int wstart = static_cast<int>(floor(static_cast<T>(pw) * bin_size_w));
int hend = static_cast<int>(ceil(static_cast<T>(ph + 1) * bin_size_h));
int wend = static_cast<int>(ceil(static_cast<T>(pw + 1) * bin_size_w));
// Add roi offsets and clip to input boundaries
hstart = min(max(hstart + roi_start_h, 0), height);
hend = min(max(hend + roi_start_h, 0), height);
wstart = min(max(wstart + roi_start_w, 0), width);
wend = min(max(wend + roi_start_w, 0), width);
bool is_empty = (hend <= hstart) || (wend <= wstart);
int c_in = channel_mapping[index];
T* grad_input_offset =
grad_input + (roi_batch_ind * channels + c_in) * height * width;
T bin_area = (hend - hstart) * (wend - wstart);
T diff_val = is_empty ? static_cast<T>(0) : grad_output[index] / bin_area;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
int grad_input_index = h * width + w;
atomicAdd(grad_input_offset + grad_input_index, diff_val);
}
}
}
}
std::tuple<at::Tensor, at::Tensor> PSROIPool_forward_cuda(
const at::Tensor& input,
const at::Tensor& rois,
const float spatial_scale,
const int pooled_height,
const int pooled_width) {
// Check if input tensors are CUDA tensors
AT_ASSERTM(input.is_cuda(), "input must be a CUDA tensor");
AT_ASSERTM(rois.is_cuda(), "rois must be a CUDA tensor");
at::TensorArg input_t{input, "input", 1}, rois_t{rois, "rois", 2};
at::CheckedFrom c = "PSROIPool_forward_cuda";
at::checkAllSameGPU(c, {input_t, rois_t});
at::checkAllSameType(c, {input_t, rois_t});
at::cuda::CUDAGuard device_guard(input.device());
auto num_rois = rois.size(0);
auto channels = input.size(1);
auto height = input.size(2);
auto width = input.size(3);
AT_ASSERTM(
channels % (pooled_height * pooled_width) == 0,
"input channels must be a multiple of pooling height * pooling width");
int channels_out = channels / (pooled_height * pooled_width);
auto output = at::zeros(
{num_rois, channels_out, pooled_height, pooled_width}, input.options());
auto channel_mapping =
at::zeros(output.sizes(), input.options().dtype(at::kInt));
auto output_size = output.numel();
if (output_size == 0) {
AT_CUDA_CHECK(cudaGetLastError());
return std::make_tuple(output, channel_mapping);
}
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
dim3 grid(std::min(
ceil_div(static_cast<int64_t>(output_size), static_cast<int64_t>(512)),
static_cast<int64_t>(4096)));
dim3 block(512);
auto input_ = input.contiguous(),
rois_ = rois.contiguous();
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
input.scalar_type(), "PSROIPool_forward", [&] {
PSROIPoolForward<scalar_t><<<grid, block, 0, stream>>>(
output_size,
input_.data_ptr<scalar_t>(),
spatial_scale,
channels,
height,
width,
pooled_height,
pooled_width,
rois_.data_ptr<scalar_t>(),
channels_out,
output.data_ptr<scalar_t>(),
channel_mapping.data_ptr<int>());
});
AT_CUDA_CHECK(cudaGetLastError());
return std::make_tuple(output, channel_mapping);
}
at::Tensor PSROIPool_backward_cuda(
const at::Tensor& grad,
const at::Tensor& rois,
const at::Tensor& channel_mapping,
const float spatial_scale,
const int pooled_height,
const int pooled_width,
const int batch_size,
const int channels,
const int height,
const int width) {
// Check if input tensors are CUDA tensors
AT_ASSERTM(grad.is_cuda(), "grad must be a CUDA tensor");
AT_ASSERTM(rois.is_cuda(), "rois must be a CUDA tensor");
AT_ASSERTM(
channel_mapping.is_cuda(),
"channel_mapping must be a CUDA tensor");
at::TensorArg grad_t{grad, "grad", 1}, rois_t{rois, "rois", 2},
channel_mapping_t{channel_mapping, "channel_mapping", 3};
at::CheckedFrom c = "PSROIPool_backward_cuda";
at::checkAllSameGPU(c, {grad_t, rois_t, channel_mapping_t});
at::checkAllSameType(c, {grad_t, rois_t});
at::cuda::CUDAGuard device_guard(grad.device());
auto num_rois = rois.size(0);
auto grad_input =
at::zeros({batch_size, channels, height, width}, grad.options());
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
dim3 grid(std::min(
ceil_div(static_cast<int64_t>(grad.numel()), static_cast<int64_t>(512)),
static_cast<int64_t>(4096)));
dim3 block(512);
// handle possibly empty gradients
if (grad.numel() == 0) {
AT_CUDA_CHECK(cudaGetLastError());
return grad_input;
}
int channels_out = channels / (pooled_height * pooled_width);
auto grad_ = grad.contiguous(),
rois_ = rois.contiguous();
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
grad.scalar_type(), "PSROIPool_backward", [&] {
PSROIPoolBackward<scalar_t><<<grid, block, 0, stream>>>(
grad.numel(),
grad_.data_ptr<scalar_t>(),
channel_mapping.data_ptr<int>(),
num_rois,
spatial_scale,
channels,
height,
width,
pooled_height,
pooled_width,
channels_out,
grad_input.data_ptr<scalar_t>(),
rois_.data_ptr<scalar_t>());
});
AT_CUDA_CHECK(cudaGetLastError());
return grad_input;
}
|
a3d848bc40f20639deacc2d40e8bdfc66cc5bb36.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <mpi.h>
#include <cstdio>
__global__ void mykernel(void) {
}
int main(int argc, char ** argv) {
MPI_Init(&argc, &argv);
int mpisize, mpirank;
MPI_Comm_size(MPI_COMM_WORLD, &mpisize);
MPI_Comm_rank(MPI_COMM_WORLD, &mpirank);
hipLaunchKernelGGL(( mykernel), dim3(1),dim3(1), 0, 0, );
printf("rank: %d/%d\n",mpirank,mpisize);
MPI_Finalize();
}
|
a3d848bc40f20639deacc2d40e8bdfc66cc5bb36.cu
|
#include <mpi.h>
#include <cstdio>
__global__ void mykernel(void) {
}
int main(int argc, char ** argv) {
MPI_Init(&argc, &argv);
int mpisize, mpirank;
MPI_Comm_size(MPI_COMM_WORLD, &mpisize);
MPI_Comm_rank(MPI_COMM_WORLD, &mpirank);
mykernel<<<1,1>>>();
printf("rank: %d/%d\n",mpirank,mpisize);
MPI_Finalize();
}
|
af635cb374fab692380957f00040fed81e525bad.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "SignalStrengthsSortedCuda.h"
#include "CellPhoneCoverage.h"
#include "CudaArray.h"
#include "Helpers.h"
#include <stdio.h>
#include <iostream>
using namespace std;
void checkCUDAError(const char *msg)
{
hipError_t err = hipGetLastError();
if (hipSuccess != err)
{
fprintf(stderr, "Cuda error: %s: %s.\n", msg, hipGetErrorString(err));
exit(-1);
}
}
// "Smart" CUDA implementation which computes signal strengths
//
// First, all transmitters are sorted into buckets
// Then, all receivers are sorted into buckets
// Then, receivers only compute signal strength against transmitters in nearby buckets
//
// This multi-step algorithm makes the signal strength computation scale much
// better to high number of transmitters/receivers
struct Bucket
{
int startIndex; // Start of bucket within array
int numElements; // Number of elements in bucket
};
///////////////////////////////////////////////////////////////////////////////////////////////
//
// No-operation sorting kernel
//
// This takes in an unordered set, and builds a dummy bucket representation around it
// It does not perform any actual sorting!
//
// This kernel must be launched with a 1,1 configuration (1 grid block, 1 thread).
static __global__ void noSortKernel(const Position* inputPositions,
int numInputPositions, Position* outputPositions, Bucket* outputBuckets)
{
int numBuckets = BucketsPerAxis * BucketsPerAxis;
// Copy contents of input positions into output positions
for (int i = 0; i < numInputPositions; ++i)
outputPositions[i] = inputPositions[i];
// Set up the set of buckets to cover the output positions evenly
for (int i = 0; i < numBuckets; i++)
{
Bucket& bucket = outputBuckets[i];
bucket.startIndex = numInputPositions * i / numBuckets;
bucket.numElements = (numInputPositions * (i + 1) / numBuckets)
- bucket.startIndex;
}
}
static __global__ void bucketInitKernel(Bucket* cudaInBucket){
// set all numElements to 0
int tid = threadIdx.x;
int myID = blockIdx.x* blockDim.x+tid;
Bucket& bucket = cudaInBucket[myID];
bucket.startIndex = 0;
bucket.numElements = 0;
}
static __global__ void bucketSizeKernel(Bucket* gpuOutBuckets,Position* gpuInputPositions, int num)
{
// 1. Determine how many of the input elements should end up in each bucket (build a histogram)
int x = 0;
int y = 0;
int index = 0;
int tid = threadIdx.x;
int myID = blockIdx.x* blockDim.x+tid;
if(myID<num) {
Position pos = gpuInputPositions[myID];
x = pos.x * BucketsPerAxis;
y = pos.y * BucketsPerAxis;
index = y * BucketsPerAxis + x;
Bucket& bucket = gpuOutBuckets[index];
atomicAdd(&bucket.numElements,1);
}
}
void startPosition(CudaArray<int>& cudaOutArray , CudaArray<Bucket>& cudaInBuckets)
{
// 2. Given the histogram, compute where in the output array that each bucket begins, and how large it is
// (perform prefix summation over the histogram)
Bucket *cpuBucketArray = new Bucket[cudaInBuckets.size()];
int * cpuArray;
cpuArray = (int *) malloc(256*sizeof(int));
cpuArray[0] = 0;
cudaInBuckets.copyFromCuda(cpuBucketArray);
for(int i = 1 ; i<256;i++){
cpuArray[i] = cpuBucketArray[i-1].numElements + cpuArray[i-1];
}
printf("\n");
for(int i = 0 ; i<256;i++){
printf("(%d, %d, %d), ", i, cpuBucketArray[i].numElements, cpuArray[i]);
}
printf("\n");
for(int i = 0 ; i<256;i++){
printf("%d, ", cpuArray[i]);
}
printf("\n");
cudaOutArray.copyToCuda(cpuArray);
free(cpuArray);
delete[] cpuBucketArray;
}
static __global__ void sortPositionKernel(Position* outPositions,int* indexArray, Bucket* inBucket,
Position* inPosition, int num)
{
// 3. Given the start of each bucket within the output array, scatter elements from the input
// array into the output array
int tid = threadIdx.x;
int myID = blockIdx.x* blockDim.x+tid;
if(myID<num){
Position pos = inPosition[myID];
int px = pos.x*BucketsPerAxis;
int py = pos.y*BucketsPerAxis;
int bucketIndex = px + py*BucketsPerAxis;
int offset = atomicAdd(&inBucket[bucketIndex].numElements,1);
int out_pos_idx = indexArray[bucketIndex] + offset;
outPositions[out_pos_idx] = pos;
}
}
// !!! missing !!!
// Kernels needed for sortPositionsIntoBuckets(...)
///////////////////////////////////////////////////////////////////////////////////////////////
//
// Sort a set of positions into a set of buckets
//
// Given a set of input positions, these will be re-ordered such that
// each range of elements in the output array belong to the same bucket.
// The list of buckets that is output describes where each such range begins
// and ends in the re-ordered position array.
static void sortPositionsIntoBuckets(CudaArray<Position>& cudaInputPositions,
CudaArray<Position>& cudaOutputPositions,
CudaArray<Bucket>& cudaOutputPositionBuckets)
{
// Bucket sorting with "Counting Sort" is a multi-phase process:
//
// 1. Determine how many of the input elements should end up in each bucket (build a histogram)
//
// 2. Given the histogram, compute where in the output array that each bucket begins, and how large it is
// (perform prefix summation over the histogram)
//
// 3. Given the start of each bucket within the output array, scatter elements from the input
// array into the output array
//
// Your new sort implementation should be able to handle at least 10 million entries, and
// run in reasonable time (the reference implementations does the job in less than 5 seconds).
//================= Your code here =====================================
//max 256 buckets
//Input
// !!! missing !!!
// Instead of sorting, we will now run a dummy kernel that just duplicates the
// output positions, and constructs a set of dummy buckets. This is just so that
// the test program will not crash when you try to run it.
//
// This kernel is run single-threaded because it is throw-away code where performance
// does not matter; after all, the purpose of the lab is to replace it with a
// proper sort algorithm instead!
//=========== Remove this code when you begin to implement your own sorting algorithm =================
int num = cudaInputPositions.size();
CudaArray<int> cudaTemp(256);
hipLaunchKernelGGL(( bucketInitKernel), dim3(1),dim3(256), 0, 0, cudaOutputPositionBuckets.hipArray());
hipDeviceSynchronize();
checkCUDAError("bucketInitKernel");
hipLaunchKernelGGL(( bucketSizeKernel), dim3((num+255)/256),dim3(256), 0, 0, cudaOutputPositionBuckets.hipArray(),cudaInputPositions.hipArray(),num);
hipDeviceSynchronize();
checkCUDAError("bucketSizeKernel");
startPosition(cudaTemp,cudaOutputPositionBuckets);
hipLaunchKernelGGL(( bucketInitKernel), dim3(1),dim3(256), 0, 0, cudaOutputPositionBuckets.hipArray());
hipDeviceSynchronize();
checkCUDAError("bucketInitKernel");
printf("\n");
hipLaunchKernelGGL(( sortPositionKernel), dim3((num+255)/256),dim3(256), 0, 0, cudaOutputPositions.hipArray(),cudaTemp.hipArray(),
cudaOutputPositionBuckets.hipArray(),cudaInputPositions.hipArray(),num);
hipDeviceSynchronize();
checkCUDAError("sortPositionKernel");
}
///////////////////////////////////////////////////////////////////////////////////////////////
//
// Go through all transmitters in one bucket, find highest signal strength
// Return highest strength (or the old value, if that was higher)
static __device__ float scanBucket(const Position* transmitters,
int numTransmitters, const Position& receiver, float bestSignalStrength)
{
for (int transmitterIndex = 0; transmitterIndex < numTransmitters;
++transmitterIndex)
{
const Position& transmitter = transmitters[transmitterIndex];
float strength = signalStrength(transmitter, receiver);
if (bestSignalStrength < strength)
bestSignalStrength = strength;
}
return bestSignalStrength;
}
///////////////////////////////////////////////////////////////////////////////////////////////
//
// Calculate signal strength for all receivers
static __global__ void calculateSignalStrengthsSortedKernel(
const Position* transmitters, const Bucket* transmitterBuckets,
const Position* receivers, const Bucket* receiverBuckets,
float* signalStrengths)
{
// Determine which bucket the current grid block is processing
int receiverBucketIndexX = blockIdx.x;
int receiverBucketIndexY = blockIdx.y;
int receiverBucketIndex = receiverBucketIndexY * BucketsPerAxis
+ receiverBucketIndexX;
const Bucket& receiverBucket = receiverBuckets[receiverBucketIndex];
int receiverStartIndex = receiverBucket.startIndex;
int numReceivers = receiverBucket.numElements;
// Distribute available receivers over the set of available threads
for (int receiverIndex = threadIdx.x; receiverIndex < numReceivers;
receiverIndex += blockDim.x)
{
// Locate current receiver within the current bucket
const Position& receiver = receivers[receiverStartIndex + receiverIndex];
float& finalStrength = signalStrengths[receiverStartIndex
+ receiverIndex];
float bestSignalStrength = 0.f;
// Scan all buckets in the 3x3 region enclosing the receiver's bucket index
for (int transmitterBucketIndexY = receiverBucketIndexY - 1;
transmitterBucketIndexY < receiverBucketIndexY + 2;
++transmitterBucketIndexY)
for (int transmitterBucketIndexX = receiverBucketIndexX - 1;
transmitterBucketIndexX < receiverBucketIndexX + 2;
++transmitterBucketIndexX)
{
// Only process bucket if its index is within [0, BucketsPerAxis - 1] along each axis
if (transmitterBucketIndexX >= 0
&& transmitterBucketIndexX < BucketsPerAxis
&& transmitterBucketIndexY >= 0
&& transmitterBucketIndexY < BucketsPerAxis)
{
// Scan bucket for a potential new "highest signal strength"
int transmitterBucketIndex = transmitterBucketIndexY
* BucketsPerAxis + transmitterBucketIndexX;
int transmitterStartIndex =
transmitterBuckets[transmitterBucketIndex].startIndex;
int numTransmitters =
transmitterBuckets[transmitterBucketIndex].numElements;
bestSignalStrength = scanBucket(
&transmitters[transmitterStartIndex],
numTransmitters, receiver, bestSignalStrength);
}
}
// Store out the highest signal strength found for the receiver
finalStrength = bestSignalStrength;
}
}
///////////////////////////////////////////////////////////////////////////////////////////////
void calculateSignalStrengthsSortedCuda(const PositionList& cpuTransmitters,
const PositionList& cpuReceivers,
SignalStrengthList& cpuSignalStrengths)
{
int numBuckets = BucketsPerAxis * BucketsPerAxis;
// Copy input positions to device memory
CudaArray<Position> cudaTempTransmitters(cpuTransmitters.size());
cudaTempTransmitters.copyToCuda(&(*cpuTransmitters.begin()));
CudaArray<Position> cudaTempReceivers(cpuReceivers.size());
cudaTempReceivers.copyToCuda(&(*cpuReceivers.begin()));
// Allocate device memory for sorted arrays
CudaArray<Position> cudaTransmitters(cpuTransmitters.size());
CudaArray<Bucket> cudaTransmitterBuckets(numBuckets);
CudaArray<Position> cudaReceivers(cpuReceivers.size());
CudaArray<Bucket> cudaReceiverBuckets(numBuckets);
// Sort transmitters and receivers into buckets
sortPositionsIntoBuckets(cudaTempTransmitters, cudaTransmitters,
cudaTransmitterBuckets);
sortPositionsIntoBuckets(cudaTempReceivers, cudaReceivers,
cudaReceiverBuckets);
// Perform signal strength computation
CudaArray<float> cudaSignalStrengths(cpuReceivers.size());
int numThreads = 256;
dim3 grid = dim3(BucketsPerAxis, BucketsPerAxis);
hipLaunchKernelGGL(( calculateSignalStrengthsSortedKernel), dim3(grid), dim3(numThreads), 0, 0,
cudaTransmitters.hipArray(), cudaTransmitterBuckets.hipArray(),
cudaReceivers.hipArray(), cudaReceiverBuckets.hipArray(),
cudaSignalStrengths.hipArray());
// Copy results back to host memory
cpuSignalStrengths.resize(cudaSignalStrengths.size());
cudaSignalStrengths.copyFromCuda(&(*cpuSignalStrengths.begin()));
}
|
af635cb374fab692380957f00040fed81e525bad.cu
|
#include "SignalStrengthsSortedCuda.h"
#include "CellPhoneCoverage.h"
#include "CudaArray.h"
#include "Helpers.h"
#include <stdio.h>
#include <iostream>
using namespace std;
void checkCUDAError(const char *msg)
{
cudaError_t err = cudaGetLastError();
if (cudaSuccess != err)
{
fprintf(stderr, "Cuda error: %s: %s.\n", msg, cudaGetErrorString(err));
exit(-1);
}
}
// "Smart" CUDA implementation which computes signal strengths
//
// First, all transmitters are sorted into buckets
// Then, all receivers are sorted into buckets
// Then, receivers only compute signal strength against transmitters in nearby buckets
//
// This multi-step algorithm makes the signal strength computation scale much
// better to high number of transmitters/receivers
struct Bucket
{
int startIndex; // Start of bucket within array
int numElements; // Number of elements in bucket
};
///////////////////////////////////////////////////////////////////////////////////////////////
//
// No-operation sorting kernel
//
// This takes in an unordered set, and builds a dummy bucket representation around it
// It does not perform any actual sorting!
//
// This kernel must be launched with a 1,1 configuration (1 grid block, 1 thread).
static __global__ void noSortKernel(const Position* inputPositions,
int numInputPositions, Position* outputPositions, Bucket* outputBuckets)
{
int numBuckets = BucketsPerAxis * BucketsPerAxis;
// Copy contents of input positions into output positions
for (int i = 0; i < numInputPositions; ++i)
outputPositions[i] = inputPositions[i];
// Set up the set of buckets to cover the output positions evenly
for (int i = 0; i < numBuckets; i++)
{
Bucket& bucket = outputBuckets[i];
bucket.startIndex = numInputPositions * i / numBuckets;
bucket.numElements = (numInputPositions * (i + 1) / numBuckets)
- bucket.startIndex;
}
}
static __global__ void bucketInitKernel(Bucket* cudaInBucket){
// set all numElements to 0
int tid = threadIdx.x;
int myID = blockIdx.x* blockDim.x+tid;
Bucket& bucket = cudaInBucket[myID];
bucket.startIndex = 0;
bucket.numElements = 0;
}
static __global__ void bucketSizeKernel(Bucket* gpuOutBuckets,Position* gpuInputPositions, int num)
{
// 1. Determine how many of the input elements should end up in each bucket (build a histogram)
int x = 0;
int y = 0;
int index = 0;
int tid = threadIdx.x;
int myID = blockIdx.x* blockDim.x+tid;
if(myID<num) {
Position pos = gpuInputPositions[myID];
x = pos.x * BucketsPerAxis;
y = pos.y * BucketsPerAxis;
index = y * BucketsPerAxis + x;
Bucket& bucket = gpuOutBuckets[index];
atomicAdd(&bucket.numElements,1);
}
}
void startPosition(CudaArray<int>& cudaOutArray , CudaArray<Bucket>& cudaInBuckets)
{
// 2. Given the histogram, compute where in the output array that each bucket begins, and how large it is
// (perform prefix summation over the histogram)
Bucket *cpuBucketArray = new Bucket[cudaInBuckets.size()];
int * cpuArray;
cpuArray = (int *) malloc(256*sizeof(int));
cpuArray[0] = 0;
cudaInBuckets.copyFromCuda(cpuBucketArray);
for(int i = 1 ; i<256;i++){
cpuArray[i] = cpuBucketArray[i-1].numElements + cpuArray[i-1];
}
printf("\n");
for(int i = 0 ; i<256;i++){
printf("(%d, %d, %d), ", i, cpuBucketArray[i].numElements, cpuArray[i]);
}
printf("\n");
for(int i = 0 ; i<256;i++){
printf("%d, ", cpuArray[i]);
}
printf("\n");
cudaOutArray.copyToCuda(cpuArray);
free(cpuArray);
delete[] cpuBucketArray;
}
static __global__ void sortPositionKernel(Position* outPositions,int* indexArray, Bucket* inBucket,
Position* inPosition, int num)
{
// 3. Given the start of each bucket within the output array, scatter elements from the input
// array into the output array
int tid = threadIdx.x;
int myID = blockIdx.x* blockDim.x+tid;
if(myID<num){
Position pos = inPosition[myID];
int px = pos.x*BucketsPerAxis;
int py = pos.y*BucketsPerAxis;
int bucketIndex = px + py*BucketsPerAxis;
int offset = atomicAdd(&inBucket[bucketIndex].numElements,1);
int out_pos_idx = indexArray[bucketIndex] + offset;
outPositions[out_pos_idx] = pos;
}
}
// !!! missing !!!
// Kernels needed for sortPositionsIntoBuckets(...)
///////////////////////////////////////////////////////////////////////////////////////////////
//
// Sort a set of positions into a set of buckets
//
// Given a set of input positions, these will be re-ordered such that
// each range of elements in the output array belong to the same bucket.
// The list of buckets that is output describes where each such range begins
// and ends in the re-ordered position array.
static void sortPositionsIntoBuckets(CudaArray<Position>& cudaInputPositions,
CudaArray<Position>& cudaOutputPositions,
CudaArray<Bucket>& cudaOutputPositionBuckets)
{
// Bucket sorting with "Counting Sort" is a multi-phase process:
//
// 1. Determine how many of the input elements should end up in each bucket (build a histogram)
//
// 2. Given the histogram, compute where in the output array that each bucket begins, and how large it is
// (perform prefix summation over the histogram)
//
// 3. Given the start of each bucket within the output array, scatter elements from the input
// array into the output array
//
// Your new sort implementation should be able to handle at least 10 million entries, and
// run in reasonable time (the reference implementations does the job in less than 5 seconds).
//================= Your code here =====================================
//max 256 buckets
//Input
// !!! missing !!!
// Instead of sorting, we will now run a dummy kernel that just duplicates the
// output positions, and constructs a set of dummy buckets. This is just so that
// the test program will not crash when you try to run it.
//
// This kernel is run single-threaded because it is throw-away code where performance
// does not matter; after all, the purpose of the lab is to replace it with a
// proper sort algorithm instead!
//=========== Remove this code when you begin to implement your own sorting algorithm =================
int num = cudaInputPositions.size();
CudaArray<int> cudaTemp(256);
bucketInitKernel<<<1,256>>>(cudaOutputPositionBuckets.cudaArray());
cudaThreadSynchronize();
checkCUDAError("bucketInitKernel");
bucketSizeKernel<<<(num+255)/256,256>>>(cudaOutputPositionBuckets.cudaArray(),cudaInputPositions.cudaArray(),num);
cudaThreadSynchronize();
checkCUDAError("bucketSizeKernel");
startPosition(cudaTemp,cudaOutputPositionBuckets);
bucketInitKernel<<<1,256>>>(cudaOutputPositionBuckets.cudaArray());
cudaThreadSynchronize();
checkCUDAError("bucketInitKernel");
printf("\n");
sortPositionKernel<<<(num+255)/256,256>>>(cudaOutputPositions.cudaArray(),cudaTemp.cudaArray(),
cudaOutputPositionBuckets.cudaArray(),cudaInputPositions.cudaArray(),num);
cudaThreadSynchronize();
checkCUDAError("sortPositionKernel");
}
///////////////////////////////////////////////////////////////////////////////////////////////
//
// Go through all transmitters in one bucket, find highest signal strength
// Return highest strength (or the old value, if that was higher)
static __device__ float scanBucket(const Position* transmitters,
int numTransmitters, const Position& receiver, float bestSignalStrength)
{
for (int transmitterIndex = 0; transmitterIndex < numTransmitters;
++transmitterIndex)
{
const Position& transmitter = transmitters[transmitterIndex];
float strength = signalStrength(transmitter, receiver);
if (bestSignalStrength < strength)
bestSignalStrength = strength;
}
return bestSignalStrength;
}
///////////////////////////////////////////////////////////////////////////////////////////////
//
// Calculate signal strength for all receivers
static __global__ void calculateSignalStrengthsSortedKernel(
const Position* transmitters, const Bucket* transmitterBuckets,
const Position* receivers, const Bucket* receiverBuckets,
float* signalStrengths)
{
// Determine which bucket the current grid block is processing
int receiverBucketIndexX = blockIdx.x;
int receiverBucketIndexY = blockIdx.y;
int receiverBucketIndex = receiverBucketIndexY * BucketsPerAxis
+ receiverBucketIndexX;
const Bucket& receiverBucket = receiverBuckets[receiverBucketIndex];
int receiverStartIndex = receiverBucket.startIndex;
int numReceivers = receiverBucket.numElements;
// Distribute available receivers over the set of available threads
for (int receiverIndex = threadIdx.x; receiverIndex < numReceivers;
receiverIndex += blockDim.x)
{
// Locate current receiver within the current bucket
const Position& receiver = receivers[receiverStartIndex + receiverIndex];
float& finalStrength = signalStrengths[receiverStartIndex
+ receiverIndex];
float bestSignalStrength = 0.f;
// Scan all buckets in the 3x3 region enclosing the receiver's bucket index
for (int transmitterBucketIndexY = receiverBucketIndexY - 1;
transmitterBucketIndexY < receiverBucketIndexY + 2;
++transmitterBucketIndexY)
for (int transmitterBucketIndexX = receiverBucketIndexX - 1;
transmitterBucketIndexX < receiverBucketIndexX + 2;
++transmitterBucketIndexX)
{
// Only process bucket if its index is within [0, BucketsPerAxis - 1] along each axis
if (transmitterBucketIndexX >= 0
&& transmitterBucketIndexX < BucketsPerAxis
&& transmitterBucketIndexY >= 0
&& transmitterBucketIndexY < BucketsPerAxis)
{
// Scan bucket for a potential new "highest signal strength"
int transmitterBucketIndex = transmitterBucketIndexY
* BucketsPerAxis + transmitterBucketIndexX;
int transmitterStartIndex =
transmitterBuckets[transmitterBucketIndex].startIndex;
int numTransmitters =
transmitterBuckets[transmitterBucketIndex].numElements;
bestSignalStrength = scanBucket(
&transmitters[transmitterStartIndex],
numTransmitters, receiver, bestSignalStrength);
}
}
// Store out the highest signal strength found for the receiver
finalStrength = bestSignalStrength;
}
}
///////////////////////////////////////////////////////////////////////////////////////////////
void calculateSignalStrengthsSortedCuda(const PositionList& cpuTransmitters,
const PositionList& cpuReceivers,
SignalStrengthList& cpuSignalStrengths)
{
int numBuckets = BucketsPerAxis * BucketsPerAxis;
// Copy input positions to device memory
CudaArray<Position> cudaTempTransmitters(cpuTransmitters.size());
cudaTempTransmitters.copyToCuda(&(*cpuTransmitters.begin()));
CudaArray<Position> cudaTempReceivers(cpuReceivers.size());
cudaTempReceivers.copyToCuda(&(*cpuReceivers.begin()));
// Allocate device memory for sorted arrays
CudaArray<Position> cudaTransmitters(cpuTransmitters.size());
CudaArray<Bucket> cudaTransmitterBuckets(numBuckets);
CudaArray<Position> cudaReceivers(cpuReceivers.size());
CudaArray<Bucket> cudaReceiverBuckets(numBuckets);
// Sort transmitters and receivers into buckets
sortPositionsIntoBuckets(cudaTempTransmitters, cudaTransmitters,
cudaTransmitterBuckets);
sortPositionsIntoBuckets(cudaTempReceivers, cudaReceivers,
cudaReceiverBuckets);
// Perform signal strength computation
CudaArray<float> cudaSignalStrengths(cpuReceivers.size());
int numThreads = 256;
dim3 grid = dim3(BucketsPerAxis, BucketsPerAxis);
calculateSignalStrengthsSortedKernel<<<grid, numThreads>>>(
cudaTransmitters.cudaArray(), cudaTransmitterBuckets.cudaArray(),
cudaReceivers.cudaArray(), cudaReceiverBuckets.cudaArray(),
cudaSignalStrengths.cudaArray());
// Copy results back to host memory
cpuSignalStrengths.resize(cudaSignalStrengths.size());
cudaSignalStrengths.copyFromCuda(&(*cpuSignalStrengths.begin()));
}
|
34eb1862edab0b820a4b36af839f4985c0565350.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "downSanple422_gpu.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
hipTextureObject_t ch1 = 1;
hipTextureObject_t ch2 = 1;
uint8_t *downCh1 = NULL;
hipMalloc(&downCh1, XSIZE*YSIZE);
uint8_t *downCh2 = NULL;
hipMalloc(&downCh2, XSIZE*YSIZE);
size_t width = XSIZE;
size_t height = YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
downSanple422_gpu), dim3(gridBlock),dim3(threadBlock), 0, 0, ch1,ch2,downCh1,downCh2,width,height);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
downSanple422_gpu), dim3(gridBlock),dim3(threadBlock), 0, 0, ch1,ch2,downCh1,downCh2,width,height);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
downSanple422_gpu), dim3(gridBlock),dim3(threadBlock), 0, 0, ch1,ch2,downCh1,downCh2,width,height);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
34eb1862edab0b820a4b36af839f4985c0565350.cu
|
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "downSanple422_gpu.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
cudaTextureObject_t ch1 = 1;
cudaTextureObject_t ch2 = 1;
uint8_t *downCh1 = NULL;
cudaMalloc(&downCh1, XSIZE*YSIZE);
uint8_t *downCh2 = NULL;
cudaMalloc(&downCh2, XSIZE*YSIZE);
size_t width = XSIZE;
size_t height = YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
downSanple422_gpu<<<gridBlock,threadBlock>>>(ch1,ch2,downCh1,downCh2,width,height);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
downSanple422_gpu<<<gridBlock,threadBlock>>>(ch1,ch2,downCh1,downCh2,width,height);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
downSanple422_gpu<<<gridBlock,threadBlock>>>(ch1,ch2,downCh1,downCh2,width,height);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
75a45e426fd696d3b507ac3720282eca8a94da9f.hip
|
// !!! This is a file automatically generated by hipify!!!
#define FP float
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <stdlib.h>
#include <math.h>
__global__ void gpu_matrixmult(FP *a,FP *b, FP *c, int n, int p, int m) {
int col = threadIdx.x + blockDim.x * blockIdx.x;
int row = threadIdx.y + blockDim.y * blockIdx.y;
int indexb = col;
int index = row * m + col;
if(col < m && row < n) {
c[index] = 0.;
for (int indexa = row*p; indexa < (row*p + p); indexa++, indexb+=m)
c[index] += a[indexa]*b[indexb];
}
}
void cpu_matrixmult(FP *a,FP *b, FP *c, int n, int p, int m) {
int index, indexa, indexb;
FP cvalue;
for(int col=0;col < m; col++)
for(int row=0;row < n; row++) {
indexb = col;
index = row * m + col;
cvalue = 0.;
for (indexa = row*p; indexa < (row*p + p); indexa++, indexb+=m)
cvalue += a[indexa]*b[indexb];
c[index] -= cvalue; //NOTE: This calculates the diff between CPU and GPU computations.
}
}
int main(int argc, char *argv[]) {
int i, j; // loop counters
int gpucount = 0; // Count of available GPUs
int gpunum = 0; // Device number to use
int Grid_Dim_x = 1, Grid_Dim_y = 1; //Grid dimension, x and y, square
int Block_Dim = 1; //Block dimension, x and y, square
int n, p, m; // matrix dimension
FP *a,*b,*c;
FP *dev_a, *dev_b, *dev_c;
int size_a, size_b, size_c; // number of bytes in arrays
hipEvent_t start, stop; // using cuda events to measure time
float elapsed_time_ms; // which is applicable for asynchronous code also
hipError_t errorcode;
// --------------------SET PARAMETERS AND DATA -----------------------
errorcode = hipGetDeviceCount(&gpucount);
if (errorcode == hipErrorNoDevice) {
printf("No GPUs are visible\n");
exit(-1);
}
else {
printf("Device count = %d\n",gpucount);
}
if ((argc<5) || (argc>6)) {
printf("Usage: matmul <matrix dim n> <matrix dim p> <matrix dim m> <block dim> [<dev num>]\n");
exit (-1);
}
n = atoi(argv[1]);
p = atoi(argv[2]);
m = atoi(argv[3]);
Block_Dim = atoi(argv[4]); // Square block
if (Block_Dim*Block_Dim > 1024) {
printf("Error, too many threads in block\n");
// exit (-1);
}
Grid_Dim_x = (m + Block_Dim - 1) / Block_Dim; // Square grid
Grid_Dim_y = (n + Block_Dim - 1) / Block_Dim;
// if (Grid_Dim*Block_Dim < n) {
// printf("Error, number of threads in x/y dimensions less than number of array elements\n");
// exit (-1);
// }
if (argc==6) {
gpunum = atoi(argv[5]); // Device number
if ((gpunum > 2) || (gpunum < 0)) {
printf("Error, Device number must be 0, 1, or 2\n");
exit (-1);
}
}
hipSetDevice(gpunum);
printf("Using device %d\n",gpunum);
printf("Matrix Dimension = (%d, %d, %d)\n",n, p, m);
printf("Block_Dim = %d, Grid_Dim_x = %d, Grid_Dim_y = %d\n",Block_Dim, Grid_Dim_x, Grid_Dim_y);
dim3 Grid(Grid_Dim_x, Grid_Dim_y); //Grid structure
dim3 Block(Block_Dim, Block_Dim); //Block structure
size_a = n * p * sizeof(FP); // number of bytes in total in arrays
size_b = p * m * sizeof(FP);
size_c = n * m * sizeof(FP);
a = (FP*) malloc(size_a); // dynamically allocated memory for arrays on host
b = (FP*) malloc(size_b);
c = (FP*) malloc(size_c); // results from GPU
srand(12345);
// int p = n; //Used here only to illustrate proper initialization for non-square case
for(i=0;i < n;i++)
for(j=0;j < p;j++) {
a[i * p + j] = (FP) rand() / (FP) RAND_MAX;
// a[i * p + j] = (FP) i+j; // may be helpful for debugging
}
for(i=0;i < p;i++)
for(j=0;j < m;j++) {
b[i * m + j] = (FP) rand() / (FP) RAND_MAX;
// b[i * n + j] = (FP) i+j; // may be helpful for debugging
}
// ------------- COMPUTATION DONE ON GPU ----------------------------
hipMalloc((void**)&dev_a, size_a); // allocate memory on device
hipMalloc((void**)&dev_b, size_b);
hipMalloc((void**)&dev_c, size_c);
hipMemcpy(dev_a, a , size_a ,hipMemcpyHostToDevice);
hipMemcpy(dev_b, b , size_b ,hipMemcpyHostToDevice);
hipEventCreate(&start); // instrument code to measure start time
hipEventCreate(&stop);
hipEventRecord(start, 0);
// hipEventSynchronize(start); // not needed
hipLaunchKernelGGL(( gpu_matrixmult), dim3(Grid),dim3(Block), 0, 0, dev_a,dev_b,dev_c,n, p, m);
hipEventRecord(stop, 0); // instrument code to measure end time
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsed_time_ms, start, stop );
hipMemcpy(c,dev_c, size_c ,hipMemcpyDeviceToHost);
printf("Time to calculate results on GPU: %f ms.\n", elapsed_time_ms); // exec. time
// ------------- COMPUTATION DONE ON HOST CPU ----------------------------
// DEBUGGING USE ONLY (AND FOR LIMITED NUMBERS OF TIMING RUNS)
hipEventRecord(start, 0); // use same timing
// hipEventSynchronize(start); // not needed
cpu_matrixmult(a,b,c, n, p, m); // do calculation on host (NOTE: This computes the diff with GPU result.)
hipEventRecord(stop, 0); // instrument code to measue end time
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsed_time_ms, start, stop );
printf("Time to calculate results on CPU: %f ms.\n", elapsed_time_ms); // exec. time
// ------------------- check device creates correct results -----------------
double error, suma, sumb, sumc, ai, bi, ci;
suma = 0.; sumb = 0; sumc = 0;
for(i=0;i < n*n;i++) {
ai = (double) a[i];
bi = (double) b[i];
ci = (double) c[i];
suma += ai*ai;
sumb += bi*bi;
sumc += ci*ci;
}
suma = sqrt(suma);
sumb = sqrt(sumb);
sumc = sqrt(sumc);
error = sumc/(n*suma*sumb);
printf("Scaled error between GPU and CPU: %e\n", error);
// -------------- clean up ---------------------------------------
free(a);
free(b);
free(c);
hipFree(dev_a);
hipFree(dev_b);
hipFree(dev_c);
hipEventDestroy(start);
hipEventDestroy(stop);
return 0;
}
|
75a45e426fd696d3b507ac3720282eca8a94da9f.cu
|
#define FP float
#include <stdio.h>
#include <cuda.h>
#include <stdlib.h>
#include <math.h>
__global__ void gpu_matrixmult(FP *a,FP *b, FP *c, int n, int p, int m) {
int col = threadIdx.x + blockDim.x * blockIdx.x;
int row = threadIdx.y + blockDim.y * blockIdx.y;
int indexb = col;
int index = row * m + col;
if(col < m && row < n) {
c[index] = 0.;
for (int indexa = row*p; indexa < (row*p + p); indexa++, indexb+=m)
c[index] += a[indexa]*b[indexb];
}
}
void cpu_matrixmult(FP *a,FP *b, FP *c, int n, int p, int m) {
int index, indexa, indexb;
FP cvalue;
for(int col=0;col < m; col++)
for(int row=0;row < n; row++) {
indexb = col;
index = row * m + col;
cvalue = 0.;
for (indexa = row*p; indexa < (row*p + p); indexa++, indexb+=m)
cvalue += a[indexa]*b[indexb];
c[index] -= cvalue; //NOTE: This calculates the diff between CPU and GPU computations.
}
}
int main(int argc, char *argv[]) {
int i, j; // loop counters
int gpucount = 0; // Count of available GPUs
int gpunum = 0; // Device number to use
int Grid_Dim_x = 1, Grid_Dim_y = 1; //Grid dimension, x and y, square
int Block_Dim = 1; //Block dimension, x and y, square
int n, p, m; // matrix dimension
FP *a,*b,*c;
FP *dev_a, *dev_b, *dev_c;
int size_a, size_b, size_c; // number of bytes in arrays
cudaEvent_t start, stop; // using cuda events to measure time
float elapsed_time_ms; // which is applicable for asynchronous code also
cudaError_t errorcode;
// --------------------SET PARAMETERS AND DATA -----------------------
errorcode = cudaGetDeviceCount(&gpucount);
if (errorcode == cudaErrorNoDevice) {
printf("No GPUs are visible\n");
exit(-1);
}
else {
printf("Device count = %d\n",gpucount);
}
if ((argc<5) || (argc>6)) {
printf("Usage: matmul <matrix dim n> <matrix dim p> <matrix dim m> <block dim> [<dev num>]\n");
exit (-1);
}
n = atoi(argv[1]);
p = atoi(argv[2]);
m = atoi(argv[3]);
Block_Dim = atoi(argv[4]); // Square block
if (Block_Dim*Block_Dim > 1024) {
printf("Error, too many threads in block\n");
// exit (-1);
}
Grid_Dim_x = (m + Block_Dim - 1) / Block_Dim; // Square grid
Grid_Dim_y = (n + Block_Dim - 1) / Block_Dim;
// if (Grid_Dim*Block_Dim < n) {
// printf("Error, number of threads in x/y dimensions less than number of array elements\n");
// exit (-1);
// }
if (argc==6) {
gpunum = atoi(argv[5]); // Device number
if ((gpunum > 2) || (gpunum < 0)) {
printf("Error, Device number must be 0, 1, or 2\n");
exit (-1);
}
}
cudaSetDevice(gpunum);
printf("Using device %d\n",gpunum);
printf("Matrix Dimension = (%d, %d, %d)\n",n, p, m);
printf("Block_Dim = %d, Grid_Dim_x = %d, Grid_Dim_y = %d\n",Block_Dim, Grid_Dim_x, Grid_Dim_y);
dim3 Grid(Grid_Dim_x, Grid_Dim_y); //Grid structure
dim3 Block(Block_Dim, Block_Dim); //Block structure
size_a = n * p * sizeof(FP); // number of bytes in total in arrays
size_b = p * m * sizeof(FP);
size_c = n * m * sizeof(FP);
a = (FP*) malloc(size_a); // dynamically allocated memory for arrays on host
b = (FP*) malloc(size_b);
c = (FP*) malloc(size_c); // results from GPU
srand(12345);
// int p = n; //Used here only to illustrate proper initialization for non-square case
for(i=0;i < n;i++)
for(j=0;j < p;j++) {
a[i * p + j] = (FP) rand() / (FP) RAND_MAX;
// a[i * p + j] = (FP) i+j; // may be helpful for debugging
}
for(i=0;i < p;i++)
for(j=0;j < m;j++) {
b[i * m + j] = (FP) rand() / (FP) RAND_MAX;
// b[i * n + j] = (FP) i+j; // may be helpful for debugging
}
// ------------- COMPUTATION DONE ON GPU ----------------------------
cudaMalloc((void**)&dev_a, size_a); // allocate memory on device
cudaMalloc((void**)&dev_b, size_b);
cudaMalloc((void**)&dev_c, size_c);
cudaMemcpy(dev_a, a , size_a ,cudaMemcpyHostToDevice);
cudaMemcpy(dev_b, b , size_b ,cudaMemcpyHostToDevice);
cudaEventCreate(&start); // instrument code to measure start time
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
// cudaEventSynchronize(start); // not needed
gpu_matrixmult<<<Grid,Block>>>(dev_a,dev_b,dev_c,n, p, m);
cudaEventRecord(stop, 0); // instrument code to measure end time
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsed_time_ms, start, stop );
cudaMemcpy(c,dev_c, size_c ,cudaMemcpyDeviceToHost);
printf("Time to calculate results on GPU: %f ms.\n", elapsed_time_ms); // exec. time
// ------------- COMPUTATION DONE ON HOST CPU ----------------------------
// DEBUGGING USE ONLY (AND FOR LIMITED NUMBERS OF TIMING RUNS)
cudaEventRecord(start, 0); // use same timing
// cudaEventSynchronize(start); // not needed
cpu_matrixmult(a,b,c, n, p, m); // do calculation on host (NOTE: This computes the diff with GPU result.)
cudaEventRecord(stop, 0); // instrument code to measue end time
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsed_time_ms, start, stop );
printf("Time to calculate results on CPU: %f ms.\n", elapsed_time_ms); // exec. time
// ------------------- check device creates correct results -----------------
double error, suma, sumb, sumc, ai, bi, ci;
suma = 0.; sumb = 0; sumc = 0;
for(i=0;i < n*n;i++) {
ai = (double) a[i];
bi = (double) b[i];
ci = (double) c[i];
suma += ai*ai;
sumb += bi*bi;
sumc += ci*ci;
}
suma = sqrt(suma);
sumb = sqrt(sumb);
sumc = sqrt(sumc);
error = sumc/(n*suma*sumb);
printf("Scaled error between GPU and CPU: %e\n", error);
// -------------- clean up ---------------------------------------
free(a);
free(b);
free(c);
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
cudaEventDestroy(start);
cudaEventDestroy(stop);
return 0;
}
|
786dc365001e8b7f6d4e3cb247c46d59204c55ce.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <Vec3.cuh>
namespace TinyRT {
Vec3 randomVec3InUnitSphere() {
while (true) {
const Vec3 p = Vec3::random(-1.0f, 1.0f);
if (p.lengthSquared() >= 1) continue;
return p;
}
}
Vec3 randomUnitVec3() {
const auto a = randomFloat(0.0f, 2 * M_PI);
const auto z = randomFloat(-1.0f, 1.0f);
const auto r = sqrtf(1 - z * z);
return { r * cos(a), r * sin(a), z };
}
Vec3 randomVec3InHemisphere(const Vec3& normal) {
const Vec3 vec3InUnitSphere = randomVec3InUnitSphere();
if (dot(vec3InUnitSphere, normal) > 0.0f) // In the same hemisphere as the normal
return vec3InUnitSphere;
else
return -vec3InUnitSphere;
}
__device__ Vec3 randomVec3InUnitSphere(hiprandState_t* const randStatePtr) {
while (true) {
const Vec3 p = randStatePtr == nullptr ? Vec3(0.0f, 0.0f, 0.0f) : Vec3::random(-1.0f, 1.0f, randStatePtr);
if (p.lengthSquared() >= 1) continue;
return p;
}
}
__device__ Vec3 randomUnitVec3(hiprandState_t* const randStatePtr) {
if (randStatePtr == nullptr)
return { 0.0f, 0.0f, 0.0f };
const auto a = 2.0f * M_PI * randomFloat(randStatePtr);
const auto z = -1.0f + 2.0f * randomFloat(randStatePtr);
const auto r = sqrtf(1 - z * z);
return { r * cos(a), r * sin(a), z };
}
__device__ Vec3 randomVec3InHemisphere(const Vec3& normal, hiprandState_t* const randStatePtr) {
const Vec3 vec3InUnitSphere = randStatePtr == nullptr ? Vec3(0.0f, 0.0f, 0.0f) : randomVec3InUnitSphere(randStatePtr);
if (dot(vec3InUnitSphere, normal) > 0.0f) // In the same hemisphere as the normal
return vec3InUnitSphere;
else
return -vec3InUnitSphere;
}
__device__ Vec3 randomVec3InUnitDisk(hiprandState_t* const randStatePtr) {
while (true) {
const auto v = randStatePtr == nullptr ? Vec3(0.0f, 0.0f, 0.0f) :
Vec3(-1.0f + randomFloat(randStatePtr), -1.0f + randomFloat(randStatePtr), 0.0f);
if (v.lengthSquared() >= 1.0f) continue;
return v;
}
}
__host__ __device__ Vec3 refract(const Vec3& uv, const Vec3& n, float etaOverEtaPrime) {
const float cosTheta = dot(-uv, n);
const Vec3 rayOutParallel = etaOverEtaPrime * (uv + cosTheta * n);
const Vec3 rayOutPerpendicular = -sqrtf(1.0f - rayOutParallel.lengthSquared()) * n;
return rayOutParallel + rayOutPerpendicular;
}
}
|
786dc365001e8b7f6d4e3cb247c46d59204c55ce.cu
|
#include <Vec3.cuh>
namespace TinyRT {
Vec3 randomVec3InUnitSphere() {
while (true) {
const Vec3 p = Vec3::random(-1.0f, 1.0f);
if (p.lengthSquared() >= 1) continue;
return p;
}
}
Vec3 randomUnitVec3() {
const auto a = randomFloat(0.0f, 2 * M_PI);
const auto z = randomFloat(-1.0f, 1.0f);
const auto r = sqrtf(1 - z * z);
return { r * cos(a), r * sin(a), z };
}
Vec3 randomVec3InHemisphere(const Vec3& normal) {
const Vec3 vec3InUnitSphere = randomVec3InUnitSphere();
if (dot(vec3InUnitSphere, normal) > 0.0f) // In the same hemisphere as the normal
return vec3InUnitSphere;
else
return -vec3InUnitSphere;
}
__device__ Vec3 randomVec3InUnitSphere(curandState* const randStatePtr) {
while (true) {
const Vec3 p = randStatePtr == nullptr ? Vec3(0.0f, 0.0f, 0.0f) : Vec3::random(-1.0f, 1.0f, randStatePtr);
if (p.lengthSquared() >= 1) continue;
return p;
}
}
__device__ Vec3 randomUnitVec3(curandState* const randStatePtr) {
if (randStatePtr == nullptr)
return { 0.0f, 0.0f, 0.0f };
const auto a = 2.0f * M_PI * randomFloat(randStatePtr);
const auto z = -1.0f + 2.0f * randomFloat(randStatePtr);
const auto r = sqrtf(1 - z * z);
return { r * cos(a), r * sin(a), z };
}
__device__ Vec3 randomVec3InHemisphere(const Vec3& normal, curandState* const randStatePtr) {
const Vec3 vec3InUnitSphere = randStatePtr == nullptr ? Vec3(0.0f, 0.0f, 0.0f) : randomVec3InUnitSphere(randStatePtr);
if (dot(vec3InUnitSphere, normal) > 0.0f) // In the same hemisphere as the normal
return vec3InUnitSphere;
else
return -vec3InUnitSphere;
}
__device__ Vec3 randomVec3InUnitDisk(curandState* const randStatePtr) {
while (true) {
const auto v = randStatePtr == nullptr ? Vec3(0.0f, 0.0f, 0.0f) :
Vec3(-1.0f + randomFloat(randStatePtr), -1.0f + randomFloat(randStatePtr), 0.0f);
if (v.lengthSquared() >= 1.0f) continue;
return v;
}
}
__host__ __device__ Vec3 refract(const Vec3& uv, const Vec3& n, float etaOverEtaPrime) {
const float cosTheta = dot(-uv, n);
const Vec3 rayOutParallel = etaOverEtaPrime * (uv + cosTheta * n);
const Vec3 rayOutPerpendicular = -sqrtf(1.0f - rayOutParallel.lengthSquared()) * n;
return rayOutParallel + rayOutPerpendicular;
}
}
|
bd10d3fe3f239a24fe4994c63e2148a5360e45bb.hip
|
// !!! This is a file automatically generated by hipify!!!
/** @file vl_nnconv.cu
** @brief Convolution block
** @author Andrea Vedaldi
** @author Michael Figurnov
**/
/*
Copyright (C) 2014 Andrea Vedaldi and Max Jaderberg.
All rights reserved.
This file is part of the VLFeat library and is made available under
the terms of the BSD license (see the COPYING file).
*/
#include "bits/mexutils.h"
#include "bits/nnhelper.h"
#include "bits/im2col.hpp"
#include "bits/subsample.hpp"
#include <assert.h>
#include <algorithm>
#include <blas.h>
#ifdef ENABLE_GPU
#include "bits/gpu.hpp"
#include <rocblas.h>
#endif
/* option codes */
enum {
opt_stride = 0,
opt_pad,
opt_conv_indices,
opt_microbatch_size,
opt_der_filters,
opt_der_biases,
opt_verbose,
opt_no_der_data,
opt_no_der_filters,
opt_no_der_biases,
} ;
/* options */
vlmxOption options [] = {
{"Stride", 1, opt_stride },
{"Pad", 1, opt_pad },
{"ConvIndices", 1, opt_conv_indices },
{"MicrobatchSize", 1, opt_microbatch_size },
{"DerFilters", 1, opt_der_filters },
{"DerBiases", 1, opt_der_biases },
{"Verbose", 0, opt_verbose },
{"NoDerData", 0, opt_no_der_data },
{"NoDerFilters", 0, opt_no_der_filters },
{"NoDerBiases", 0, opt_no_der_biases },
{0, 0, 0 }
} ;
/* ---------------------------------------------------------------- */
/* Cache */
/* ---------------------------------------------------------------- */
#ifdef ENABLE_GPU
bool cublasInitialized = false ;
hipblasHandle_t thisCublasHandle ;
#endif
bool persistentDataInitialized = false ;
PackedData temp ;
PackedData derOutputMasked;
PackedData outputMasked;
PackedData allOnes ;
void atExit()
{
if (persistentDataInitialized) {
packed_data_deinit (&temp) ;
packed_data_deinit (&derOutputMasked) ;
packed_data_deinit (&outputMasked) ;
packed_data_deinit (&allOnes) ;
persistentDataInitialized = false ;
}
#ifdef ENABLE_GPU
if (cublasInitialized) {
hipblasDestroy(thisCublasHandle) ;
cublasInitialized = false ;
}
#endif
}
/* ---------------------------------------------------------------- */
/* Dispatcher func */
/* ---------------------------------------------------------------- */
static void
sgemv_dispatch(bool gpuMode,
char op,
ptrdiff_t m, ptrdiff_t n,
float alpha,
float const * a, ptrdiff_t lda,
float const * x, ptrdiff_t incx,
float beta,
float * y, ptrdiff_t incy)
{
if (!gpuMode) {
sgemv(&op,
&m, &n, &alpha,
(float*)a, &lda,
(float*)x, &incx,
&beta,
y, &incy) ;
} else {
#ifdef ENABLE_GPU
hipblasSgemv(thisCublasHandle,
(op == 't') ? HIPBLAS_OP_T : HIPBLAS_OP_N,
(int)m, (int)n,
&alpha,
a, lda,
x, (int)incx,
&beta,
y, (int)incy) ;
#endif
}
}
static void
sgemm_dispatch(bool gpuMode,
char op1, char op2,
ptrdiff_t m, ptrdiff_t n, ptrdiff_t k,
float alpha,
float const * a, ptrdiff_t lda,
float const * b, ptrdiff_t ldb,
float beta,
float * c, ptrdiff_t ldc)
{
if (!gpuMode) {
sgemm(&op1, &op2,
&m, &n, &k,
&alpha,
(float*)a, &lda,
(float*)b, &ldb,
&beta,
c, &ldc) ;
} else {
#ifdef ENABLE_GPU
hipblasSgemm(thisCublasHandle,
(op1 == 't') ? HIPBLAS_OP_T : HIPBLAS_OP_N,
(op2 == 't') ? HIPBLAS_OP_T : HIPBLAS_OP_N,
(int)m, (int)n, (int)k,
&alpha,
a, (int)lda,
b, (int)ldb,
&beta,
c, (int)ldc);
#endif
}
}
static void
copy_dispatch(bool gpuMode,
float * dest,
float const * src,
size_t numElements)
{
if (!gpuMode) {
memcpy(dest, src, numElements * sizeof(float)) ;
} else {
#ifdef ENABLE_GPU
hipMemcpy(dest, src, numElements * sizeof(float), hipMemcpyDeviceToDevice) ;
#endif
}
}
static void
subsample_dispatch(bool gpuMode,
float* subsampled,
float const* data,
size_t width,
size_t height,
size_t depth,
size_t strideX,
size_t strideY,
size_t padLeft,
size_t padRight,
size_t padTop,
size_t padBottom)
{
if (!gpuMode) {
subsample_cpu(subsampled,
data,
width,
height,
depth,
strideX,
strideY,
padLeft,
padRight,
padTop,
padBottom) ;
} else {
#ifdef ENABLE_GPU
subsample_gpu(subsampled,
data,
width,
height,
depth,
strideX,
strideY,
padLeft,
padRight,
padTop,
padBottom) ;
#endif
}
}
static void
subsampleBackward_dispatch(bool gpuMode,
float* dzdx,
float const* dzdy,
size_t width,
size_t height,
size_t depth,
size_t strideX,
size_t strideY,
size_t padLeft,
size_t padRight,
size_t padTop,
size_t padBottom)
{
if (!gpuMode) {
subsampleBackward_cpu(dzdx,
dzdy,
width,
height,
depth,
strideX,
strideY,
padLeft,
padRight,
padTop,
padBottom) ;
} else {
#ifdef ENABLE_GPU
subsampleBackward_gpu(dzdx,
dzdy,
width,
height,
depth,
strideX,
strideY,
padLeft,
padRight,
padTop,
padBottom) ;
#endif
}
}
static void
im2col_dispatch(bool gpuMode,
float* stacked,
float const* data,
size_t width,
size_t height,
size_t depth,
size_t windowWidth,
size_t windowHeight,
size_t strideX,
size_t strideY,
size_t padLeft,
size_t padRight,
size_t padTop,
size_t padBottom)
{
if (!gpuMode) {
im2col_cpu<float>(stacked,
data,
width,
height,
depth,
windowWidth,
windowHeight,
strideX,
strideY,
padLeft,
padRight,
padTop,
padBottom) ;
} else {
#ifdef ENABLE_GPU
im2col_gpu<float>(stacked,
data,
width,
height,
depth,
windowWidth,
windowHeight,
strideX,
strideY,
padLeft,
padRight,
padTop,
padBottom) ;
#endif
}
}
static void
im2col_indexed_dispatch(bool gpuMode,
float* stacked,
float const* data,
int const* im2colIndices,
int im2colIndicesLength,
size_t width,
size_t height,
size_t depth,
size_t size,
size_t windowWidth,
size_t windowHeight)
{
if (!gpuMode) {
im2col_indexed_cpu<float>(stacked,
data,
im2colIndices,
im2colIndicesLength,
width,
height,
depth,
size,
windowWidth,
windowHeight);
} else {
#ifdef ENABLE_GPU
im2col_indexed_gpu<float>(stacked,
data,
im2colIndices,
im2colIndicesLength,
width,
height,
depth,
size,
windowWidth,
windowHeight);
#endif
}
}
static void
col2im_dispatch(bool gpuMode,
float* data,
float const* stacked,
size_t width,
size_t height,
size_t depth,
size_t windowWidth,
size_t windowHeight,
size_t strideX,
size_t strideY,
size_t padLeft,
size_t padRight,
size_t padTop,
size_t padBottom)
{
if (!gpuMode) {
col2im_cpu<float>(data,
stacked,
width,
height,
depth,
windowWidth,
windowHeight,
strideX,
strideY,
padLeft,
padRight,
padTop,
padBottom) ;
} else {
#ifdef ENABLE_GPU
col2im_gpu<float>(data,
stacked,
width,
height,
depth,
windowWidth,
windowHeight,
strideX,
strideY,
padLeft,
padRight,
padTop,
padBottom) ;
#endif
}
}
static void
col2im_indexed_dispatch(bool gpuMode,
float* data,
float const* stacked,
int const* im2colIndices,
int im2colIndicesLength,
size_t width,
size_t height,
size_t depth,
size_t size,
size_t windowWidth,
size_t windowHeight)
{
if (!gpuMode) {
col2im_indexed_cpu(data,
stacked,
im2colIndices,
im2colIndicesLength,
width,
height,
depth,
size,
windowWidth,
windowHeight);
} else {
#ifdef ENABLE_GPU
col2im_indexed_gpu(data,
stacked,
im2colIndices,
im2colIndicesLength,
width,
height,
depth,
size,
windowWidth,
windowHeight) ;
#endif
}
}
static void
transpose23_dispatch(bool gpuMode,
float* transposed,
float const* data,
size_t d1,
size_t d2,
size_t d3)
{
if (!gpuMode) {
transpose23_cpu(transposed, data, d1, d2, d3);
} else {
#ifdef ENABLE_GPU
transpose23_gpu(transposed, data, d1, d2, d3) ;
#endif
}
}
/* ---------------------------------------------------------------- */
/* MEX driver */
/* ---------------------------------------------------------------- */
enum {
IN_DATA = 0, IN_FILTERS, IN_BIASES, IN_DEROUTPUT, IN_END
} ;
enum {
OUT_RESULT = 0, OUT_DERFILTERS, OUT_DERBIASES, OUT_END
} ;
void mexFunction(int nout, mxArray *out[],
int nin, mxArray const *in[])
{
/* inputs */
PackedData data ;
PackedData filters ;
PackedData biases ;
PackedData derOutput ;
PackedData convIndices ;
PackedData derFiltersInit ;
PackedData derBiasesInit ;
/* outputs */
PackedData output ;
PackedData derData ;
PackedData derFilters ;
PackedData derBiases ;
PackedDataGeometry outputGeom ;
PackedDataGeometry derDataGeom ;
PackedDataGeometry derFiltersGeom ;
PackedDataGeometry derBiasesGeom ;
PackedDataGeometry tempGeom ;
PackedDataGeometry derOutputMaskedGeom ;
PackedDataGeometry outputMaskedGeom ;
PackedDataGeometry allOnesGeom ;
int strideX = 1 ;
int strideY = 1 ;
int padLeft = 0 ;
int padRight = 0 ;
int padTop = 0 ;
int padBottom = 0 ;
int numGroups = 1 ;
int microbatchSize = 1 ;
#if ENABLE_GPU
hipblasStatus_t stat;
bool gpuMode = false ;
#else
bool const gpuMode = false ;
#endif
bool backMode = false ;
bool hasFilters = false ;
bool hasBiases = false ;
bool fullyConnectedMode = false ;
bool is_1x1 = false ;
bool computeDerData = true ;
bool computeDerFilters = true ;
bool computeDerBiases = true ;
bool convIndicesMode = false;
bool derFiltersInitialized = false ;
bool derBiasesInitialized = false ;
int verbosity = 0 ;
int opt ;
int next = IN_END ;
mxArray const *optarg ;
packed_data_init_empty(&data) ;
packed_data_init_empty(&filters) ;
packed_data_init_empty(&biases) ;
packed_data_init_empty(&derOutput) ;
packed_data_init_empty(&convIndices) ;
packed_data_init_empty(&output) ;
packed_data_init_empty(&derData) ;
packed_data_init_empty(&derFilters) ;
packed_data_init_empty(&derBiases) ;
packed_data_init_empty(&derFiltersInit) ;
packed_data_init_empty(&derBiasesInit) ;
if (!persistentDataInitialized) {
packed_data_init_empty(&temp) ;
packed_data_init_empty(&outputMasked) ;
packed_data_init_empty(&allOnes) ;
persistentDataInitialized = true ;
}
/* -------------------------------------------------------------- */
/* Check the arguments */
/* -------------------------------------------------------------- */
mexAtExit(atExit) ;
if (nin < 3) {
mexErrMsgTxt("There are less than three arguments.") ;
}
if (nin > 3 && vlmxIsString(in[3],-1)) {
next = 3 ;
backMode = 0 ;
} else {
backMode = (nin >= 4) ;
}
while ((opt = vlmxNextOption (in, nin, options, &next, &optarg)) >= 0) {
switch (opt) {
case opt_verbose :
++ verbosity ;
break ;
case opt_stride :
if (!vlmxIsPlainMatrix(optarg,-1,-1)) {
mexErrMsgTxt("STRIDE is not a plain matrix.") ;
}
switch (mxGetNumberOfElements(optarg)) {
case 1:
strideY = (int)mxGetPr(optarg)[0] ;
strideX = strideY ;
break ;
case 2:
strideY = (int)mxGetPr(optarg)[0] ;
strideX = (int)mxGetPr(optarg)[1] ;
break ;
default:
mexErrMsgTxt("STRIDE has neither one nor two elements.") ;
}
break ;
case opt_pad :
if (!vlmxIsPlainMatrix(optarg,-1,-1)) {
mexErrMsgTxt("PAD is not a plain matrix.") ;
}
switch (mxGetNumberOfElements(optarg)) {
case 1:
padLeft = (int)mxGetPr(optarg)[0] ;
padRight = padLeft ;
padTop = padLeft ;
padBottom = padLeft ;
break ;
case 4:
padTop = (int)mxGetPr(optarg)[0] ;
padBottom = (int)mxGetPr(optarg)[1] ;
padLeft = (int)mxGetPr(optarg)[2] ;
padRight = (int)mxGetPr(optarg)[3] ;
break ;
default:
mexErrMsgTxt("STRIDE has neither one nor two elements.") ;
}
break ;
case opt_conv_indices :
if (mxGetNumberOfElements(optarg) != 0) {
convIndicesMode = true;
packed_data_init_with_array_int(&convIndices, optarg);
}
break;
case opt_microbatch_size :
if (mxGetNumberOfElements(optarg) == 1) {
microbatchSize = (int)mxGetPr(optarg)[0] ;
}
break;
case opt_der_filters :
if (mxGetNumberOfElements(optarg) != 0) {
derFiltersInitialized = true;
packed_data_init_with_array(&derFiltersInit, optarg);
}
break;
case opt_der_biases :
if (mxGetNumberOfElements(optarg) != 0) {
derBiasesInitialized = true;
packed_data_init_with_array(&derBiasesInit, optarg);
}
break;
case opt_no_der_data :
computeDerData = VL_FALSE ;
break ;
case opt_no_der_filters :
computeDerFilters = VL_FALSE ;
break ;
case opt_no_der_biases :
computeDerBiases = VL_FALSE ;
break ;
default: break ;
}
}
packed_data_init_with_array(&data, in[IN_DATA]) ;
packed_data_init_with_array(&filters, in[IN_FILTERS]) ;
packed_data_init_with_array(&biases, in[IN_BIASES]) ;
if (backMode) { packed_data_init_with_array(&derOutput, in[IN_DEROUTPUT]) ; }
#if ENABLE_GPU
gpuMode = (data.mode == matlabGpuArrayWrapper) ;
if (gpuMode) {
mxInitGPU() ;
if (!cublasInitialized) {
stat = hipblasCreate(&thisCublasHandle) ;
if (stat != HIPBLAS_STATUS_SUCCESS) {
mexErrMsgTxt("Could not initialize cuBLAS.") ;
}
cublasInitialized = true ;
}
}
#endif
hasFilters = filters.geom.numElements > 0 ;
hasBiases = biases.geom.numElements > 0 ;
/* check for GPU/data class consistency */
if (hasFilters && ! packed_data_are_compatible(&data, &filters)) {
mexErrMsgTxt("DATA and FILTERS are not both CPU or GPU arrays.") ;
}
if (hasBiases && ! packed_data_are_compatible(&data, &biases)) {
mexErrMsgTxt("DATA and BIASES are not both CPU or GPU arrays.") ;
}
if (backMode && ! packed_data_are_compatible(&data, &derOutput)) {
mexErrMsgTxt("DATA and DEROUTPUT are not both CPU or GPU arrays.") ;
}
if (data.geom.classID != mxSINGLE_CLASS) {
mexErrMsgTxt("DATA is not of class SINGLE.");
}
if (hasFilters && filters.geom.classID != mxSINGLE_CLASS) {
mexErrMsgTxt("FILTERS is not of class SINGLE.");
}
if (hasBiases && (biases.geom.classID != mxSINGLE_CLASS)) {
mexErrMsgTxt("BIASES is not of class SINGLE.");
}
if (backMode && (derOutput.geom.classID != mxSINGLE_CLASS)) {
mexErrMsgTxt("DEROUTPUT is not of class SINGLE.");
}
if (strideX < 1 || strideY < 1) {
mexErrMsgTxt("At least one element of STRIDE is smaller than one.") ;
}
if (!hasFilters) {
/*
Specifying empty filters assumes that they act as the
identity matrix. Geometrically, emulate this as data.geom.detph
fiilters of size 1x1xdata.geom.depth.
*/
filters.geom.width = 1 ;
filters.geom.height = 1 ;
filters.geom.depth = data.geom.depth ;
filters.geom.size = data.geom.depth ;
}
if (convIndicesMode && ! packed_data_are_compatible(&data, &convIndices)) {
mexErrMsgTxt("DATA and CONVINDICES are not both CPU or GPU arrays.") ;
}
if (convIndicesMode && (convIndices.geom.classID != mxINT32_CLASS)) {
mexErrMsgTxt("CONVINDICES is not of class INT32.");
}
if (convIndicesMode) {
packed_data_geom_init(&outputGeom,
mxSINGLE_CLASS,
convIndices.geom.height,
convIndices.geom.width,
filters.geom.size,
data.geom.size) ;
} else {
packed_data_geom_init(&outputGeom,
mxSINGLE_CLASS,
(data.geom.height + (padTop+padBottom) - filters.geom.height)/strideY + 1,
(data.geom.width + (padLeft+padRight) - filters.geom.width)/strideX + 1,
filters.geom.size,
data.geom.size) ;
}
/* grouped filters */
numGroups = data.geom.depth / filters.geom.depth ;
/* if the output is 1x1 pixels, then there is no need to actually
call im2col as it does not do anything
*/
fullyConnectedMode = (!convIndicesMode &&
outputGeom.height == 1 &&
outputGeom.width == 1 &&
padTop == 0 &&
padBottom == 0 &&
padLeft == 0 &&
padRight == 0 &&
numGroups == 1) ;
is_1x1 = (!convIndicesMode &&
hasFilters &&
filters.geom.height == 1 &&
filters.geom.width == 1 &&
strideY == 1 &&
strideX == 1 &&
padTop == 0 &&
padBottom == 0 &&
padLeft == 0 &&
padRight == 0);
if (convIndicesMode) {
if (convIndices.geom.depth != filters.geom.height*filters.geom.width) {
mexErrMsgTxt("CONVINDICES depth is not compatible with filters.");
}
if (convIndices.geom.size != 1 && convIndices.geom.size != data.geom.size) {
mexErrMsgTxt("CONVINDICES size should be equal either one, or the number of input images.");
}
}
if (!is_1x1) {
packed_data_geom_init
(&tempGeom, mxSINGLE_CLASS,
outputGeom.height,
outputGeom.width,
filters.geom.height*filters.geom.width*filters.geom.depth*numGroups,
microbatchSize) ;
} else {
packed_data_geom_init (&tempGeom, mxSINGLE_CLASS,
0, 0, 0, 0) ;
}
if (convIndicesMode) {
packed_data_geom_init
(&outputMaskedGeom, mxSINGLE_CLASS,
outputGeom.height,
outputGeom.width,
filters.geom.size,
microbatchSize) ;
} else {
packed_data_geom_init (&outputMaskedGeom, mxSINGLE_CLASS,
0, 0, 0, 0) ;
}
if (false) {
packed_data_geom_init (&derOutputMaskedGeom, mxSINGLE_CLASS,
outputGeom.height,
outputGeom.width,
filters.geom.size,
microbatchSize) ;
} else {
packed_data_geom_init (&derOutputMaskedGeom, mxSINGLE_CLASS,
0, 0, 0, 0) ;
}
derDataGeom = data.geom ;
derFiltersGeom = filters.geom ;
if (hasBiases) {
if (fullyConnectedMode) {
packed_data_geom_init (&allOnesGeom, mxSINGLE_CLASS,
1, 1,
1, data.geom.size) ;
} else {
packed_data_geom_init (&allOnesGeom, mxSINGLE_CLASS,
outputGeom.height,
outputGeom.width,
1, microbatchSize) ;
}
derBiasesGeom = biases.geom ;
} else {
packed_data_geom_init (&allOnesGeom, mxSINGLE_CLASS,
0, 0, 0, 0) ;
}
if (verbosity > 0) {
mexPrintf("vl_nnconv: mode %s; %s\n", gpuMode?"gpu":"cpu", backMode?"backward":"forward") ;
mexPrintf("vl_nnconv: stride: [%d %d], pad: [%d %d %d %d], numGroups: %d, has bias: %d, fully connected: %d, 1x1: %d, conv indices: %d, microbatchSize: %d\n",
strideY, strideX,
padTop, padBottom, padLeft, padRight,
numGroups, hasBiases, fullyConnectedMode, is_1x1, convIndicesMode,
microbatchSize) ;
packed_data_geom_display(&data.geom, "vl_nnconv: data") ;
if (hasFilters) { packed_data_geom_display(&filters.geom, "vl_nnconv: filters") ; }
if (hasBiases) { packed_data_geom_display(&biases.geom, "vl_nnconv: biases") ; }
if (backMode) {
packed_data_geom_display(&derOutput.geom, "vl_nnconv: derOutput") ;
packed_data_geom_display(&derOutputMaskedGeom, "vl_nnconv: derOutputMasked") ;
packed_data_geom_display(&derOutputMasked.geom, "vl_nnconv: derOutputMasked (cached)") ;
packed_data_geom_display(&derDataGeom, "vl_nnconv: derData") ;
if (hasFilters) { packed_data_geom_display(&derFiltersGeom, "vl_nnconv: derFilters") ; }
if (hasBiases) { packed_data_geom_display(&derBiasesGeom, "vl_nnconv: derBiases") ; }
} else {
packed_data_geom_display(&outputGeom, "vl_nnconv: output") ;
}
packed_data_geom_display(&tempGeom, "vl_nnconv: temp") ;
packed_data_geom_display(&temp.geom, "vl_nnconv: temp (cached)") ;
packed_data_geom_display(&outputMaskedGeom, "vl_nnconv: outputMasked") ;
packed_data_geom_display(&outputMasked.geom, "vl_nnconv: outputMasked (cached)") ;
packed_data_geom_display(&allOnesGeom, "vl_nnconv: allOnes") ;
packed_data_geom_display(&allOnes.geom, "vl_nnconv: allOnes (cached)") ;
if (convIndicesMode) {
packed_data_geom_display(&convIndices.geom, "vl_nnconv: convIndices") ;
}
}
if (backMode) {
if (derOutput.geom.height != outputGeom.height ||
derOutput.geom.width != outputGeom.width ||
derOutput.geom.depth != filters.geom.size ||
derOutput.geom.size != data.geom.size)
{
mexErrMsgTxt("DEROUTPUT dimensions are incompatible with X and FILTERS.") ;
}
}
if (numGroups * filters.geom.depth != data.geom.depth) {
mexErrMsgTxt("The filter depth does not divide the image depth.") ;
}
if (filters.geom.size % numGroups != 0) {
mexErrMsgTxt("The number of filter groups does not divide the total number of filters.") ;
}
if (padLeft < 0 ||
padRight < 0 ||
padTop < 0 ||
padBottom < 0) {
mexErrMsgTxt("An element of PAD is negative.") ;
}
if (outputGeom.height == 0 || outputGeom.width == 0) {
mexErrMsgTxt("FILTERS are larger than the DATA (including padding).") ;
}
if (filters.geom.height == 0 || filters.geom.width == 0 || filters.geom.depth == 0) {
mexErrMsgTxt("A dimension of FILTERS is void.") ;
}
if (hasBiases) {
if (biases.geom.numElements != filters.geom.size) {
mexErrMsgTxt("The number of elements of BIASES is not the same as the number of filters.") ;
}
}
/* -------------------------------------------------------------- */
/* Do the work */
/* -------------------------------------------------------------- */
/* auxiliary buffers */
if (hasBiases) {
if (allOnes.memorySize < allOnesGeom.numElements * sizeof(float) ||
(allOnes.mode == matlabGpuArray || allOnes.mode == matlabGpuArrayWrapper) != gpuMode) {
packed_data_deinit (&allOnes) ;
packed_data_init_with_geom (&allOnes, gpuMode, allOnesGeom, true, true, 1.0f) ;
}
}
if (!fullyConnectedMode) {
if (temp.memorySize < tempGeom.numElements * sizeof(float) ||
(temp.mode == matlabGpuArray || temp.mode == matlabGpuArrayWrapper) != gpuMode) {
packed_data_deinit (&temp) ;
packed_data_init_with_geom (&temp, gpuMode, tempGeom, true, false, 0);
}
}
if (derOutputMasked.memorySize < derOutputMaskedGeom.numElements * sizeof(float) ||
(derOutputMasked.mode == matlabGpuArray || derOutputMasked.mode == matlabGpuArrayWrapper) != gpuMode) {
packed_data_deinit (&derOutputMasked) ;
packed_data_init_with_geom (&derOutputMasked, gpuMode, derOutputMaskedGeom, true, false, 0);
}
if (outputMasked.memorySize < outputMaskedGeom.numElements * sizeof(float) ||
(outputMasked.mode == matlabGpuArray || outputMasked.mode == matlabGpuArrayWrapper) != gpuMode) {
packed_data_deinit (&outputMasked) ;
packed_data_init_with_geom (&outputMasked, gpuMode, outputMaskedGeom, true, false, 0);
}
if (!backMode) {
packed_data_init_with_geom(&output, gpuMode, outputGeom, false, false, 0) ;
} else {
if (computeDerData) {
packed_data_init_with_geom(&derData, gpuMode, derDataGeom, false, false, 0) ;
}
if (computeDerFilters) {
packed_data_init_with_geom(&derFilters, gpuMode, derFiltersGeom, false, false, 0) ;
if (derFiltersInitialized) {
copy_dispatch(gpuMode, derFilters.memory, derFiltersInit.memory, derFilters.geom.numElements);;
}
}
if (computeDerBiases && hasBiases) {
packed_data_init_with_geom(&derBiases, gpuMode, derBiasesGeom, false, false, 0) ;
if (derFiltersInitialized) {
copy_dispatch(gpuMode, derBiases.memory, derBiasesInit.memory, derBiases.geom.numElements);;
}
}
}
if (fullyConnectedMode) {
float alpha = 1 ;
float beta = 0 ;
ptrdiff_t filtersVolume = filters.geom.height*filters.geom.width*filters.geom.depth ;
/* note: fullyConnectedMode also guarantees no padding, num filter groups = 1 */
/* optimise fully-connected mode case */
if (!backMode) {
if (hasFilters) {
if (data.geom.size == 1) {
/* one image in the stack */
sgemv_dispatch(gpuMode, 't',
filtersVolume, filters.geom.size,
alpha,
filters.memory, filtersVolume,
data.memory, 1,
beta,
output.memory, 1) ;
} else {
/* multiple images in the stack */
sgemm_dispatch(gpuMode, 't', 'n',
filters.geom.size, data.geom.size, filtersVolume,
alpha,
filters.memory, filtersVolume,
data.memory, filtersVolume,
beta,
output.memory, filters.geom.size) ;
}
} else {
/* if no filter specified, assume that they act as the
identity */
copy_dispatch(gpuMode,
output.memory, data.memory,
filtersVolume * data.geom.size) ;
}
if (hasBiases) {
float beta = 1 ;
ptrdiff_t q = 1 ;
sgemm_dispatch(gpuMode, 'n', 'n',
filters.geom.size, data.geom.size, q,
alpha,
biases.memory, filters.geom.size,
allOnes.memory, q,
beta,
output.memory, filters.geom.size) ;
}
} else {
/* back mode */
if (computeDerFilters && hasFilters) {
sgemm_dispatch(gpuMode, 'n', 't',
filtersVolume, filters.geom.size, data.geom.size,
alpha,
data.memory, filtersVolume,
derOutput.memory, filters.geom.size,
(float)(derFiltersInitialized > 0),
derFilters.memory, filtersVolume) ;
}
if (computeDerBiases && hasBiases) {
ptrdiff_t q = 1 ;
sgemm_dispatch(gpuMode, 'n', 't',
q, filters.geom.size, data.geom.size,
alpha,
allOnes.memory, q,
derOutput.memory, filters.geom.size,
(float)(derBiasesInitialized > 0),
derBiases.memory, q) ;
}
if (computeDerData) {
if (hasFilters) {
sgemm_dispatch(gpuMode, 'n', 'n',
filtersVolume, data.geom.size, filters.geom.size,
alpha,
filters.memory, filtersVolume,
derOutput.memory, filters.geom.size,
beta,
derData.memory, filtersVolume) ;
} else {
/* does not have filters, just act as identity */
copy_dispatch(gpuMode,
derData.memory, derOutput.memory,
filtersVolume * data.geom.size) ;
}
}
}
} else if (convIndicesMode && hasFilters) {
// microbatchSize specifies the number of images to stack for GEMM
const int numMicrobatches = (data.geom.size + microbatchSize - 1) / microbatchSize;
for (int microbatchIdx = 0; microbatchIdx < numMicrobatches; ++microbatchIdx) {
int image = microbatchIdx * microbatchSize;
int numImages = (microbatchIdx != numMicrobatches - 1) ? microbatchSize : (data.geom.size - image);
ptrdiff_t dataOffset = (data.geom.height*data.geom.width*data.geom.depth) * image ;
ptrdiff_t outputOffset = (output.geom.height*output.geom.width*output.geom.depth) * image ;
ptrdiff_t derDataOffset = (derData.geom.height*derData.geom.width*derData.geom.depth) * image ;
ptrdiff_t derOutputOffset = (derOutput.geom.height*derOutput.geom.width*derOutput.geom.depth) * image ;
ptrdiff_t m = outputGeom.height * outputGeom.width ; /* num output pixels */
ptrdiff_t numRows = m * numImages ;
ptrdiff_t n = filters.geom.size/numGroups ; /* num filters per group */
ptrdiff_t k = filters.geom.height*filters.geom.width*filters.geom.depth ; /* filter volume */
if (backMode) {
if (numImages > 1) {
transpose23_dispatch(gpuMode,
outputMasked.memory,
derOutput.memory + derOutputOffset,
m, derOutput.geom.depth, numImages) ;
}
float *curDerOutputMemory = numImages > 1 ? outputMasked.memory : derOutput.memory + derOutputOffset;
/* compute derFilters dz/dF */
if (computeDerFilters) {
im2col_indexed_dispatch(gpuMode,
temp.memory,
data.memory + dataOffset,
convIndices.memoryInt,
convIndices.geom.numElements,
data.geom.height, data.geom.width, data.geom.depth, numImages,
filters.geom.height, filters.geom.width) ;
for (int g = 0 ; g < numGroups ; ++ g) {
ptrdiff_t filterGrpOffset = k * n * g ;
ptrdiff_t tempGrpOffset = numRows * k * g ;
ptrdiff_t derOutputGrpOffset = numRows * n * g ;
float alpha = 1 ;
float beta = (image > 0 || derFiltersInitialized) ; /* this saves init. the output array with 0 */
sgemm_dispatch(gpuMode, 't', 'n',
k, n, numRows,
alpha,
temp.memory + tempGrpOffset, numRows,
curDerOutputMemory + derOutputGrpOffset, numRows,
beta,
derFilters.memory + filterGrpOffset, k) ;
}
}
/* compute derData dz/dbias */
if (computeDerBiases & hasBiases) {
sgemv_dispatch(gpuMode, 't',
numRows, filters.geom.size,
1, /* alpha */
curDerOutputMemory, numRows,
allOnes.memory, 1,
(float)(image > 0 || derBiasesInitialized), /* beta */
derBiases.memory, 1) ;
}
/* compute derData dz/dx */
if (computeDerData) {
for (int g = 0 ; g < numGroups ; ++ g) {
ptrdiff_t filterGrpOffset = k * n * g ;
ptrdiff_t tempGrpOffset = numRows * k * g ;
ptrdiff_t derOutputGrpOffset = numRows * n * g ;
float alpha = 1 ;
float beta = 0 ;
sgemm_dispatch(gpuMode, 'n', 't',
numRows, k, n,
alpha,
curDerOutputMemory + derOutputGrpOffset, numRows,
filters.memory + filterGrpOffset, k,
beta,
temp.memory + tempGrpOffset,
numRows) ;
}
col2im_indexed_dispatch(gpuMode,
derData.memory + derDataOffset,
temp.memory,
convIndices.memoryInt,
convIndices.geom.numElements,
data.geom.height, data.geom.width, data.geom.depth, numImages,
filters.geom.height, filters.geom.width);
}
} else {
float *curOutputMemory = numImages > 1 ? outputMasked.memory : output.memory + outputOffset;
im2col_indexed_dispatch(gpuMode,
temp.memory,
data.memory + dataOffset,
convIndices.memoryInt,
convIndices.geom.numElements,
data.geom.height, data.geom.width, data.geom.depth, numImages,
filters.geom.height, filters.geom.width) ;
for (int g = 0 ; g < numGroups ; ++ g) {
ptrdiff_t filterGrpOffset = k * n * g ;
ptrdiff_t tempGrpOffset = numRows * k * g ;
ptrdiff_t outputGrpOffset = numRows * n * g ;
float alpha = 1 ;
float beta = 0 ;
sgemm_dispatch(gpuMode, 'n', 'n',
numRows, n, k,
alpha,
temp.memory + tempGrpOffset, numRows,
filters.memory + filterGrpOffset, k,
beta,
curOutputMemory + outputGrpOffset,
numRows) ;
}
if (hasBiases) {
float alpha = 1 ;
float beta = 1 ;
ptrdiff_t q = 1 ;
sgemm_dispatch(gpuMode, 'n', 'n',
numRows, biases.geom.numElements, q,
alpha,
allOnes.memory, numRows,
biases.memory, q,
beta,
curOutputMemory,
numRows) ;
}
if (numImages > 1) {
transpose23_dispatch(gpuMode,
output.memory + outputOffset,
outputMasked.memory,
m, numImages, output.geom.depth) ;
}
}
}
} else {
// This branch catches corner cases: 1x1 convolutions (skipping im2col/col2im), and when
// vl_nnconv called without convIndices.
// It can be merged with the previous branch, but the number of conditionals inside is already
// way too high.
for (int image = 0 ; image < data.geom.size ; ++image) {
/*
temp (phi(x)): m x k
filters, derFilters: k x n (for one group of filters)
derOutput (dzdy) : m x n (for one group of filters)
res (y) : m x n (for one group of filters)
*/
ptrdiff_t dataOffset = (data.geom.height*data.geom.width*data.geom.depth) * image ;
ptrdiff_t outputOffset = (output.geom.height*output.geom.width*output.geom.depth) * image ;
ptrdiff_t derDataOffset = (derData.geom.height*derData.geom.width*derData.geom.depth) * image ;
ptrdiff_t derOutputOffset = (derOutput.geom.height*derOutput.geom.width*derOutput.geom.depth) * image ;
ptrdiff_t m = outputGeom.height * outputGeom.width ; /* num output pixels */
ptrdiff_t n = filters.geom.size/numGroups ; /* num filters per group */
ptrdiff_t k = filters.geom.height*filters.geom.width*filters.geom.depth ; /* filter volume */
float* tempMemory;
if (backMode) {
/* ---------------------------------------------------------- */
/* Backward mode */
/* ---------------------------------------------------------- */
/* compute derFilters dz/dF */
if (computeDerFilters & hasFilters) {
if (!is_1x1) {
im2col_dispatch(gpuMode,
temp.memory,
data.memory + dataOffset,
data.geom.height, data.geom.width, data.geom.depth,
filters.geom.height, filters.geom.width,
strideY, strideX,
padTop, padBottom, padLeft, padRight) ;
tempMemory = temp.memory;
} else {
tempMemory = data.memory + dataOffset;
}
for (int g = 0 ; g < numGroups ; ++ g) {
ptrdiff_t filterGrpOffset = k * n * g ;
ptrdiff_t tempGrpOffset = m * k * g ;
ptrdiff_t derOutputGrpOffset = m * n * g ;
float alpha = 1 ;
float beta = (image > 0 || derFiltersInitialized) ; /* this saves init. the output array with 0 */
sgemm_dispatch(gpuMode, 't', 'n',
k, n, m,
alpha,
tempMemory + tempGrpOffset, m,
derOutput.memory + derOutputOffset + derOutputGrpOffset, m,
beta,
derFilters.memory + filterGrpOffset, k) ;
}
}
/* compute derBiases dz/dbias */
if (computeDerBiases & hasBiases) {
sgemv_dispatch(gpuMode, 't',
m, filters.geom.size,
1, /* alpha */
derOutput.memory + derOutputOffset, m,
allOnes.memory, 1,
(float)(image > 0 || derBiasesInitialized), /* beta */
derBiases.memory, 1) ;
}
/* compute derData dz/dx */
if (computeDerData) {
if (hasFilters) {
if (!is_1x1) {
tempMemory = temp.memory;
} else {
tempMemory = derData.memory + derDataOffset;
}
for (int g = 0 ; g < numGroups ; ++ g) {
ptrdiff_t filterGrpOffset = k * n * g ;
ptrdiff_t tempGrpOffset = m * k * g ;
ptrdiff_t derOutputGrpOffset = m * n * g ;
float alpha = 1 ;
float beta = 0 ;
sgemm_dispatch(gpuMode, 'n', 't',
m, k, n,
alpha,
derOutput.memory + derOutputOffset + derOutputGrpOffset, m,
filters.memory + filterGrpOffset, k,
beta,
tempMemory + tempGrpOffset,
m) ;
}
if (!is_1x1) {
col2im_dispatch(gpuMode,
derData.memory + derDataOffset,
temp.memory,
data.geom.height, data.geom.width, data.geom.depth,
filters.geom.height, filters.geom.width,
strideY, strideX,
padTop, padBottom, padLeft, padRight) ;
}
} else {
subsampleBackward_dispatch(gpuMode,
derData.memory + derDataOffset,
derOutput.memory + derOutputOffset,
data.geom.height, data.geom.width, data.geom.depth,
strideY, strideX,
padTop, padBottom, padLeft, padRight) ;
}
}
} else {
/* ---------------------------------------------------------- */
/* Forward mode */
/* ---------------------------------------------------------- */
if (hasFilters) {
if (!is_1x1) {
im2col_dispatch(gpuMode,
temp.memory,
data.memory + dataOffset,
data.geom.height, data.geom.width, data.geom.depth,
filters.geom.height, filters.geom.width,
strideY, strideX,
padTop, padBottom, padLeft, padRight) ;
tempMemory = temp.memory;
} else {
tempMemory = data.memory + dataOffset;
}
for (int g = 0 ; g < numGroups ; ++ g) {
ptrdiff_t filterGrpOffset = k * n * g ;
ptrdiff_t tempGrpOffset = m * k * g ;
ptrdiff_t outputGrpOffset = m * n * g ;
float alpha = 1 ;
float beta = 0 ;
sgemm_dispatch(gpuMode, 'n', 'n',
m, n, k,
alpha,
tempMemory + tempGrpOffset, m,
filters.memory + filterGrpOffset, k,
beta,
output.memory + outputOffset + outputGrpOffset, m) ;
}
} else {
/* no filters: identity */
subsample_dispatch(gpuMode,
output.memory + outputOffset,
data.memory + dataOffset,
data.geom.height, data.geom.width, data.geom.depth,
strideY, strideX,
padTop, padBottom, padLeft, padRight) ;
}
if (hasBiases) {
float alpha = 1 ;
float beta = 1 ;
ptrdiff_t q = 1 ;
sgemm_dispatch(gpuMode, 'n', 'n',
m, biases.geom.numElements, q,
alpha,
allOnes.memory, m,
biases.memory, q,
beta,
output.memory + outputOffset, m) ;
}
}
}
}
/* -------------------------------------------------------------- */
/* Cleanup */
/* -------------------------------------------------------------- */
packed_data_deinit(&data) ;
packed_data_deinit(&filters) ;
packed_data_deinit(&biases) ;
if (convIndicesMode) {
packed_data_deinit(&convIndices);
}
if (backMode) {
packed_data_deinit(&derOutput) ;
out[OUT_RESULT] = (computeDerData) ? packed_data_deinit_extracting_array(&derData) : mxCreateDoubleMatrix(0,0,mxREAL) ;
out[OUT_DERFILTERS] =(computeDerFilters & hasFilters)? packed_data_deinit_extracting_array(&derFilters) : mxCreateDoubleMatrix(0,0,mxREAL) ;
out[OUT_DERBIASES] = (computeDerBiases & hasBiases) ? packed_data_deinit_extracting_array(&derBiases) : mxCreateDoubleMatrix(0,0,mxREAL) ;
} else {
out[OUT_RESULT] = packed_data_deinit_extracting_array(&output) ;
}
packed_data_deinit(&derFiltersInit) ;
packed_data_deinit(&derBiasesInit) ;
}
|
bd10d3fe3f239a24fe4994c63e2148a5360e45bb.cu
|
/** @file vl_nnconv.cu
** @brief Convolution block
** @author Andrea Vedaldi
** @author Michael Figurnov
**/
/*
Copyright (C) 2014 Andrea Vedaldi and Max Jaderberg.
All rights reserved.
This file is part of the VLFeat library and is made available under
the terms of the BSD license (see the COPYING file).
*/
#include "bits/mexutils.h"
#include "bits/nnhelper.h"
#include "bits/im2col.hpp"
#include "bits/subsample.hpp"
#include <assert.h>
#include <algorithm>
#include <blas.h>
#ifdef ENABLE_GPU
#include "bits/gpu.hpp"
#include <cublas_v2.h>
#endif
/* option codes */
enum {
opt_stride = 0,
opt_pad,
opt_conv_indices,
opt_microbatch_size,
opt_der_filters,
opt_der_biases,
opt_verbose,
opt_no_der_data,
opt_no_der_filters,
opt_no_der_biases,
} ;
/* options */
vlmxOption options [] = {
{"Stride", 1, opt_stride },
{"Pad", 1, opt_pad },
{"ConvIndices", 1, opt_conv_indices },
{"MicrobatchSize", 1, opt_microbatch_size },
{"DerFilters", 1, opt_der_filters },
{"DerBiases", 1, opt_der_biases },
{"Verbose", 0, opt_verbose },
{"NoDerData", 0, opt_no_der_data },
{"NoDerFilters", 0, opt_no_der_filters },
{"NoDerBiases", 0, opt_no_der_biases },
{0, 0, 0 }
} ;
/* ---------------------------------------------------------------- */
/* Cache */
/* ---------------------------------------------------------------- */
#ifdef ENABLE_GPU
bool cublasInitialized = false ;
cublasHandle_t thisCublasHandle ;
#endif
bool persistentDataInitialized = false ;
PackedData temp ;
PackedData derOutputMasked;
PackedData outputMasked;
PackedData allOnes ;
void atExit()
{
if (persistentDataInitialized) {
packed_data_deinit (&temp) ;
packed_data_deinit (&derOutputMasked) ;
packed_data_deinit (&outputMasked) ;
packed_data_deinit (&allOnes) ;
persistentDataInitialized = false ;
}
#ifdef ENABLE_GPU
if (cublasInitialized) {
cublasDestroy(thisCublasHandle) ;
cublasInitialized = false ;
}
#endif
}
/* ---------------------------------------------------------------- */
/* Dispatcher func */
/* ---------------------------------------------------------------- */
static void
sgemv_dispatch(bool gpuMode,
char op,
ptrdiff_t m, ptrdiff_t n,
float alpha,
float const * a, ptrdiff_t lda,
float const * x, ptrdiff_t incx,
float beta,
float * y, ptrdiff_t incy)
{
if (!gpuMode) {
sgemv(&op,
&m, &n, &alpha,
(float*)a, &lda,
(float*)x, &incx,
&beta,
y, &incy) ;
} else {
#ifdef ENABLE_GPU
cublasSgemv(thisCublasHandle,
(op == 't') ? CUBLAS_OP_T : CUBLAS_OP_N,
(int)m, (int)n,
&alpha,
a, lda,
x, (int)incx,
&beta,
y, (int)incy) ;
#endif
}
}
static void
sgemm_dispatch(bool gpuMode,
char op1, char op2,
ptrdiff_t m, ptrdiff_t n, ptrdiff_t k,
float alpha,
float const * a, ptrdiff_t lda,
float const * b, ptrdiff_t ldb,
float beta,
float * c, ptrdiff_t ldc)
{
if (!gpuMode) {
sgemm(&op1, &op2,
&m, &n, &k,
&alpha,
(float*)a, &lda,
(float*)b, &ldb,
&beta,
c, &ldc) ;
} else {
#ifdef ENABLE_GPU
cublasSgemm(thisCublasHandle,
(op1 == 't') ? CUBLAS_OP_T : CUBLAS_OP_N,
(op2 == 't') ? CUBLAS_OP_T : CUBLAS_OP_N,
(int)m, (int)n, (int)k,
&alpha,
a, (int)lda,
b, (int)ldb,
&beta,
c, (int)ldc);
#endif
}
}
static void
copy_dispatch(bool gpuMode,
float * dest,
float const * src,
size_t numElements)
{
if (!gpuMode) {
memcpy(dest, src, numElements * sizeof(float)) ;
} else {
#ifdef ENABLE_GPU
cudaMemcpy(dest, src, numElements * sizeof(float), cudaMemcpyDeviceToDevice) ;
#endif
}
}
static void
subsample_dispatch(bool gpuMode,
float* subsampled,
float const* data,
size_t width,
size_t height,
size_t depth,
size_t strideX,
size_t strideY,
size_t padLeft,
size_t padRight,
size_t padTop,
size_t padBottom)
{
if (!gpuMode) {
subsample_cpu(subsampled,
data,
width,
height,
depth,
strideX,
strideY,
padLeft,
padRight,
padTop,
padBottom) ;
} else {
#ifdef ENABLE_GPU
subsample_gpu(subsampled,
data,
width,
height,
depth,
strideX,
strideY,
padLeft,
padRight,
padTop,
padBottom) ;
#endif
}
}
static void
subsampleBackward_dispatch(bool gpuMode,
float* dzdx,
float const* dzdy,
size_t width,
size_t height,
size_t depth,
size_t strideX,
size_t strideY,
size_t padLeft,
size_t padRight,
size_t padTop,
size_t padBottom)
{
if (!gpuMode) {
subsampleBackward_cpu(dzdx,
dzdy,
width,
height,
depth,
strideX,
strideY,
padLeft,
padRight,
padTop,
padBottom) ;
} else {
#ifdef ENABLE_GPU
subsampleBackward_gpu(dzdx,
dzdy,
width,
height,
depth,
strideX,
strideY,
padLeft,
padRight,
padTop,
padBottom) ;
#endif
}
}
static void
im2col_dispatch(bool gpuMode,
float* stacked,
float const* data,
size_t width,
size_t height,
size_t depth,
size_t windowWidth,
size_t windowHeight,
size_t strideX,
size_t strideY,
size_t padLeft,
size_t padRight,
size_t padTop,
size_t padBottom)
{
if (!gpuMode) {
im2col_cpu<float>(stacked,
data,
width,
height,
depth,
windowWidth,
windowHeight,
strideX,
strideY,
padLeft,
padRight,
padTop,
padBottom) ;
} else {
#ifdef ENABLE_GPU
im2col_gpu<float>(stacked,
data,
width,
height,
depth,
windowWidth,
windowHeight,
strideX,
strideY,
padLeft,
padRight,
padTop,
padBottom) ;
#endif
}
}
static void
im2col_indexed_dispatch(bool gpuMode,
float* stacked,
float const* data,
int const* im2colIndices,
int im2colIndicesLength,
size_t width,
size_t height,
size_t depth,
size_t size,
size_t windowWidth,
size_t windowHeight)
{
if (!gpuMode) {
im2col_indexed_cpu<float>(stacked,
data,
im2colIndices,
im2colIndicesLength,
width,
height,
depth,
size,
windowWidth,
windowHeight);
} else {
#ifdef ENABLE_GPU
im2col_indexed_gpu<float>(stacked,
data,
im2colIndices,
im2colIndicesLength,
width,
height,
depth,
size,
windowWidth,
windowHeight);
#endif
}
}
static void
col2im_dispatch(bool gpuMode,
float* data,
float const* stacked,
size_t width,
size_t height,
size_t depth,
size_t windowWidth,
size_t windowHeight,
size_t strideX,
size_t strideY,
size_t padLeft,
size_t padRight,
size_t padTop,
size_t padBottom)
{
if (!gpuMode) {
col2im_cpu<float>(data,
stacked,
width,
height,
depth,
windowWidth,
windowHeight,
strideX,
strideY,
padLeft,
padRight,
padTop,
padBottom) ;
} else {
#ifdef ENABLE_GPU
col2im_gpu<float>(data,
stacked,
width,
height,
depth,
windowWidth,
windowHeight,
strideX,
strideY,
padLeft,
padRight,
padTop,
padBottom) ;
#endif
}
}
static void
col2im_indexed_dispatch(bool gpuMode,
float* data,
float const* stacked,
int const* im2colIndices,
int im2colIndicesLength,
size_t width,
size_t height,
size_t depth,
size_t size,
size_t windowWidth,
size_t windowHeight)
{
if (!gpuMode) {
col2im_indexed_cpu(data,
stacked,
im2colIndices,
im2colIndicesLength,
width,
height,
depth,
size,
windowWidth,
windowHeight);
} else {
#ifdef ENABLE_GPU
col2im_indexed_gpu(data,
stacked,
im2colIndices,
im2colIndicesLength,
width,
height,
depth,
size,
windowWidth,
windowHeight) ;
#endif
}
}
static void
transpose23_dispatch(bool gpuMode,
float* transposed,
float const* data,
size_t d1,
size_t d2,
size_t d3)
{
if (!gpuMode) {
transpose23_cpu(transposed, data, d1, d2, d3);
} else {
#ifdef ENABLE_GPU
transpose23_gpu(transposed, data, d1, d2, d3) ;
#endif
}
}
/* ---------------------------------------------------------------- */
/* MEX driver */
/* ---------------------------------------------------------------- */
enum {
IN_DATA = 0, IN_FILTERS, IN_BIASES, IN_DEROUTPUT, IN_END
} ;
enum {
OUT_RESULT = 0, OUT_DERFILTERS, OUT_DERBIASES, OUT_END
} ;
void mexFunction(int nout, mxArray *out[],
int nin, mxArray const *in[])
{
/* inputs */
PackedData data ;
PackedData filters ;
PackedData biases ;
PackedData derOutput ;
PackedData convIndices ;
PackedData derFiltersInit ;
PackedData derBiasesInit ;
/* outputs */
PackedData output ;
PackedData derData ;
PackedData derFilters ;
PackedData derBiases ;
PackedDataGeometry outputGeom ;
PackedDataGeometry derDataGeom ;
PackedDataGeometry derFiltersGeom ;
PackedDataGeometry derBiasesGeom ;
PackedDataGeometry tempGeom ;
PackedDataGeometry derOutputMaskedGeom ;
PackedDataGeometry outputMaskedGeom ;
PackedDataGeometry allOnesGeom ;
int strideX = 1 ;
int strideY = 1 ;
int padLeft = 0 ;
int padRight = 0 ;
int padTop = 0 ;
int padBottom = 0 ;
int numGroups = 1 ;
int microbatchSize = 1 ;
#if ENABLE_GPU
cublasStatus_t stat;
bool gpuMode = false ;
#else
bool const gpuMode = false ;
#endif
bool backMode = false ;
bool hasFilters = false ;
bool hasBiases = false ;
bool fullyConnectedMode = false ;
bool is_1x1 = false ;
bool computeDerData = true ;
bool computeDerFilters = true ;
bool computeDerBiases = true ;
bool convIndicesMode = false;
bool derFiltersInitialized = false ;
bool derBiasesInitialized = false ;
int verbosity = 0 ;
int opt ;
int next = IN_END ;
mxArray const *optarg ;
packed_data_init_empty(&data) ;
packed_data_init_empty(&filters) ;
packed_data_init_empty(&biases) ;
packed_data_init_empty(&derOutput) ;
packed_data_init_empty(&convIndices) ;
packed_data_init_empty(&output) ;
packed_data_init_empty(&derData) ;
packed_data_init_empty(&derFilters) ;
packed_data_init_empty(&derBiases) ;
packed_data_init_empty(&derFiltersInit) ;
packed_data_init_empty(&derBiasesInit) ;
if (!persistentDataInitialized) {
packed_data_init_empty(&temp) ;
packed_data_init_empty(&outputMasked) ;
packed_data_init_empty(&allOnes) ;
persistentDataInitialized = true ;
}
/* -------------------------------------------------------------- */
/* Check the arguments */
/* -------------------------------------------------------------- */
mexAtExit(atExit) ;
if (nin < 3) {
mexErrMsgTxt("There are less than three arguments.") ;
}
if (nin > 3 && vlmxIsString(in[3],-1)) {
next = 3 ;
backMode = 0 ;
} else {
backMode = (nin >= 4) ;
}
while ((opt = vlmxNextOption (in, nin, options, &next, &optarg)) >= 0) {
switch (opt) {
case opt_verbose :
++ verbosity ;
break ;
case opt_stride :
if (!vlmxIsPlainMatrix(optarg,-1,-1)) {
mexErrMsgTxt("STRIDE is not a plain matrix.") ;
}
switch (mxGetNumberOfElements(optarg)) {
case 1:
strideY = (int)mxGetPr(optarg)[0] ;
strideX = strideY ;
break ;
case 2:
strideY = (int)mxGetPr(optarg)[0] ;
strideX = (int)mxGetPr(optarg)[1] ;
break ;
default:
mexErrMsgTxt("STRIDE has neither one nor two elements.") ;
}
break ;
case opt_pad :
if (!vlmxIsPlainMatrix(optarg,-1,-1)) {
mexErrMsgTxt("PAD is not a plain matrix.") ;
}
switch (mxGetNumberOfElements(optarg)) {
case 1:
padLeft = (int)mxGetPr(optarg)[0] ;
padRight = padLeft ;
padTop = padLeft ;
padBottom = padLeft ;
break ;
case 4:
padTop = (int)mxGetPr(optarg)[0] ;
padBottom = (int)mxGetPr(optarg)[1] ;
padLeft = (int)mxGetPr(optarg)[2] ;
padRight = (int)mxGetPr(optarg)[3] ;
break ;
default:
mexErrMsgTxt("STRIDE has neither one nor two elements.") ;
}
break ;
case opt_conv_indices :
if (mxGetNumberOfElements(optarg) != 0) {
convIndicesMode = true;
packed_data_init_with_array_int(&convIndices, optarg);
}
break;
case opt_microbatch_size :
if (mxGetNumberOfElements(optarg) == 1) {
microbatchSize = (int)mxGetPr(optarg)[0] ;
}
break;
case opt_der_filters :
if (mxGetNumberOfElements(optarg) != 0) {
derFiltersInitialized = true;
packed_data_init_with_array(&derFiltersInit, optarg);
}
break;
case opt_der_biases :
if (mxGetNumberOfElements(optarg) != 0) {
derBiasesInitialized = true;
packed_data_init_with_array(&derBiasesInit, optarg);
}
break;
case opt_no_der_data :
computeDerData = VL_FALSE ;
break ;
case opt_no_der_filters :
computeDerFilters = VL_FALSE ;
break ;
case opt_no_der_biases :
computeDerBiases = VL_FALSE ;
break ;
default: break ;
}
}
packed_data_init_with_array(&data, in[IN_DATA]) ;
packed_data_init_with_array(&filters, in[IN_FILTERS]) ;
packed_data_init_with_array(&biases, in[IN_BIASES]) ;
if (backMode) { packed_data_init_with_array(&derOutput, in[IN_DEROUTPUT]) ; }
#if ENABLE_GPU
gpuMode = (data.mode == matlabGpuArrayWrapper) ;
if (gpuMode) {
mxInitGPU() ;
if (!cublasInitialized) {
stat = cublasCreate(&thisCublasHandle) ;
if (stat != CUBLAS_STATUS_SUCCESS) {
mexErrMsgTxt("Could not initialize cuBLAS.") ;
}
cublasInitialized = true ;
}
}
#endif
hasFilters = filters.geom.numElements > 0 ;
hasBiases = biases.geom.numElements > 0 ;
/* check for GPU/data class consistency */
if (hasFilters && ! packed_data_are_compatible(&data, &filters)) {
mexErrMsgTxt("DATA and FILTERS are not both CPU or GPU arrays.") ;
}
if (hasBiases && ! packed_data_are_compatible(&data, &biases)) {
mexErrMsgTxt("DATA and BIASES are not both CPU or GPU arrays.") ;
}
if (backMode && ! packed_data_are_compatible(&data, &derOutput)) {
mexErrMsgTxt("DATA and DEROUTPUT are not both CPU or GPU arrays.") ;
}
if (data.geom.classID != mxSINGLE_CLASS) {
mexErrMsgTxt("DATA is not of class SINGLE.");
}
if (hasFilters && filters.geom.classID != mxSINGLE_CLASS) {
mexErrMsgTxt("FILTERS is not of class SINGLE.");
}
if (hasBiases && (biases.geom.classID != mxSINGLE_CLASS)) {
mexErrMsgTxt("BIASES is not of class SINGLE.");
}
if (backMode && (derOutput.geom.classID != mxSINGLE_CLASS)) {
mexErrMsgTxt("DEROUTPUT is not of class SINGLE.");
}
if (strideX < 1 || strideY < 1) {
mexErrMsgTxt("At least one element of STRIDE is smaller than one.") ;
}
if (!hasFilters) {
/*
Specifying empty filters assumes that they act as the
identity matrix. Geometrically, emulate this as data.geom.detph
fiilters of size 1x1xdata.geom.depth.
*/
filters.geom.width = 1 ;
filters.geom.height = 1 ;
filters.geom.depth = data.geom.depth ;
filters.geom.size = data.geom.depth ;
}
if (convIndicesMode && ! packed_data_are_compatible(&data, &convIndices)) {
mexErrMsgTxt("DATA and CONVINDICES are not both CPU or GPU arrays.") ;
}
if (convIndicesMode && (convIndices.geom.classID != mxINT32_CLASS)) {
mexErrMsgTxt("CONVINDICES is not of class INT32.");
}
if (convIndicesMode) {
packed_data_geom_init(&outputGeom,
mxSINGLE_CLASS,
convIndices.geom.height,
convIndices.geom.width,
filters.geom.size,
data.geom.size) ;
} else {
packed_data_geom_init(&outputGeom,
mxSINGLE_CLASS,
(data.geom.height + (padTop+padBottom) - filters.geom.height)/strideY + 1,
(data.geom.width + (padLeft+padRight) - filters.geom.width)/strideX + 1,
filters.geom.size,
data.geom.size) ;
}
/* grouped filters */
numGroups = data.geom.depth / filters.geom.depth ;
/* if the output is 1x1 pixels, then there is no need to actually
call im2col as it does not do anything
*/
fullyConnectedMode = (!convIndicesMode &&
outputGeom.height == 1 &&
outputGeom.width == 1 &&
padTop == 0 &&
padBottom == 0 &&
padLeft == 0 &&
padRight == 0 &&
numGroups == 1) ;
is_1x1 = (!convIndicesMode &&
hasFilters &&
filters.geom.height == 1 &&
filters.geom.width == 1 &&
strideY == 1 &&
strideX == 1 &&
padTop == 0 &&
padBottom == 0 &&
padLeft == 0 &&
padRight == 0);
if (convIndicesMode) {
if (convIndices.geom.depth != filters.geom.height*filters.geom.width) {
mexErrMsgTxt("CONVINDICES depth is not compatible with filters.");
}
if (convIndices.geom.size != 1 && convIndices.geom.size != data.geom.size) {
mexErrMsgTxt("CONVINDICES size should be equal either one, or the number of input images.");
}
}
if (!is_1x1) {
packed_data_geom_init
(&tempGeom, mxSINGLE_CLASS,
outputGeom.height,
outputGeom.width,
filters.geom.height*filters.geom.width*filters.geom.depth*numGroups,
microbatchSize) ;
} else {
packed_data_geom_init (&tempGeom, mxSINGLE_CLASS,
0, 0, 0, 0) ;
}
if (convIndicesMode) {
packed_data_geom_init
(&outputMaskedGeom, mxSINGLE_CLASS,
outputGeom.height,
outputGeom.width,
filters.geom.size,
microbatchSize) ;
} else {
packed_data_geom_init (&outputMaskedGeom, mxSINGLE_CLASS,
0, 0, 0, 0) ;
}
if (false) {
packed_data_geom_init (&derOutputMaskedGeom, mxSINGLE_CLASS,
outputGeom.height,
outputGeom.width,
filters.geom.size,
microbatchSize) ;
} else {
packed_data_geom_init (&derOutputMaskedGeom, mxSINGLE_CLASS,
0, 0, 0, 0) ;
}
derDataGeom = data.geom ;
derFiltersGeom = filters.geom ;
if (hasBiases) {
if (fullyConnectedMode) {
packed_data_geom_init (&allOnesGeom, mxSINGLE_CLASS,
1, 1,
1, data.geom.size) ;
} else {
packed_data_geom_init (&allOnesGeom, mxSINGLE_CLASS,
outputGeom.height,
outputGeom.width,
1, microbatchSize) ;
}
derBiasesGeom = biases.geom ;
} else {
packed_data_geom_init (&allOnesGeom, mxSINGLE_CLASS,
0, 0, 0, 0) ;
}
if (verbosity > 0) {
mexPrintf("vl_nnconv: mode %s; %s\n", gpuMode?"gpu":"cpu", backMode?"backward":"forward") ;
mexPrintf("vl_nnconv: stride: [%d %d], pad: [%d %d %d %d], numGroups: %d, has bias: %d, fully connected: %d, 1x1: %d, conv indices: %d, microbatchSize: %d\n",
strideY, strideX,
padTop, padBottom, padLeft, padRight,
numGroups, hasBiases, fullyConnectedMode, is_1x1, convIndicesMode,
microbatchSize) ;
packed_data_geom_display(&data.geom, "vl_nnconv: data") ;
if (hasFilters) { packed_data_geom_display(&filters.geom, "vl_nnconv: filters") ; }
if (hasBiases) { packed_data_geom_display(&biases.geom, "vl_nnconv: biases") ; }
if (backMode) {
packed_data_geom_display(&derOutput.geom, "vl_nnconv: derOutput") ;
packed_data_geom_display(&derOutputMaskedGeom, "vl_nnconv: derOutputMasked") ;
packed_data_geom_display(&derOutputMasked.geom, "vl_nnconv: derOutputMasked (cached)") ;
packed_data_geom_display(&derDataGeom, "vl_nnconv: derData") ;
if (hasFilters) { packed_data_geom_display(&derFiltersGeom, "vl_nnconv: derFilters") ; }
if (hasBiases) { packed_data_geom_display(&derBiasesGeom, "vl_nnconv: derBiases") ; }
} else {
packed_data_geom_display(&outputGeom, "vl_nnconv: output") ;
}
packed_data_geom_display(&tempGeom, "vl_nnconv: temp") ;
packed_data_geom_display(&temp.geom, "vl_nnconv: temp (cached)") ;
packed_data_geom_display(&outputMaskedGeom, "vl_nnconv: outputMasked") ;
packed_data_geom_display(&outputMasked.geom, "vl_nnconv: outputMasked (cached)") ;
packed_data_geom_display(&allOnesGeom, "vl_nnconv: allOnes") ;
packed_data_geom_display(&allOnes.geom, "vl_nnconv: allOnes (cached)") ;
if (convIndicesMode) {
packed_data_geom_display(&convIndices.geom, "vl_nnconv: convIndices") ;
}
}
if (backMode) {
if (derOutput.geom.height != outputGeom.height ||
derOutput.geom.width != outputGeom.width ||
derOutput.geom.depth != filters.geom.size ||
derOutput.geom.size != data.geom.size)
{
mexErrMsgTxt("DEROUTPUT dimensions are incompatible with X and FILTERS.") ;
}
}
if (numGroups * filters.geom.depth != data.geom.depth) {
mexErrMsgTxt("The filter depth does not divide the image depth.") ;
}
if (filters.geom.size % numGroups != 0) {
mexErrMsgTxt("The number of filter groups does not divide the total number of filters.") ;
}
if (padLeft < 0 ||
padRight < 0 ||
padTop < 0 ||
padBottom < 0) {
mexErrMsgTxt("An element of PAD is negative.") ;
}
if (outputGeom.height == 0 || outputGeom.width == 0) {
mexErrMsgTxt("FILTERS are larger than the DATA (including padding).") ;
}
if (filters.geom.height == 0 || filters.geom.width == 0 || filters.geom.depth == 0) {
mexErrMsgTxt("A dimension of FILTERS is void.") ;
}
if (hasBiases) {
if (biases.geom.numElements != filters.geom.size) {
mexErrMsgTxt("The number of elements of BIASES is not the same as the number of filters.") ;
}
}
/* -------------------------------------------------------------- */
/* Do the work */
/* -------------------------------------------------------------- */
/* auxiliary buffers */
if (hasBiases) {
if (allOnes.memorySize < allOnesGeom.numElements * sizeof(float) ||
(allOnes.mode == matlabGpuArray || allOnes.mode == matlabGpuArrayWrapper) != gpuMode) {
packed_data_deinit (&allOnes) ;
packed_data_init_with_geom (&allOnes, gpuMode, allOnesGeom, true, true, 1.0f) ;
}
}
if (!fullyConnectedMode) {
if (temp.memorySize < tempGeom.numElements * sizeof(float) ||
(temp.mode == matlabGpuArray || temp.mode == matlabGpuArrayWrapper) != gpuMode) {
packed_data_deinit (&temp) ;
packed_data_init_with_geom (&temp, gpuMode, tempGeom, true, false, 0);
}
}
if (derOutputMasked.memorySize < derOutputMaskedGeom.numElements * sizeof(float) ||
(derOutputMasked.mode == matlabGpuArray || derOutputMasked.mode == matlabGpuArrayWrapper) != gpuMode) {
packed_data_deinit (&derOutputMasked) ;
packed_data_init_with_geom (&derOutputMasked, gpuMode, derOutputMaskedGeom, true, false, 0);
}
if (outputMasked.memorySize < outputMaskedGeom.numElements * sizeof(float) ||
(outputMasked.mode == matlabGpuArray || outputMasked.mode == matlabGpuArrayWrapper) != gpuMode) {
packed_data_deinit (&outputMasked) ;
packed_data_init_with_geom (&outputMasked, gpuMode, outputMaskedGeom, true, false, 0);
}
if (!backMode) {
packed_data_init_with_geom(&output, gpuMode, outputGeom, false, false, 0) ;
} else {
if (computeDerData) {
packed_data_init_with_geom(&derData, gpuMode, derDataGeom, false, false, 0) ;
}
if (computeDerFilters) {
packed_data_init_with_geom(&derFilters, gpuMode, derFiltersGeom, false, false, 0) ;
if (derFiltersInitialized) {
copy_dispatch(gpuMode, derFilters.memory, derFiltersInit.memory, derFilters.geom.numElements);;
}
}
if (computeDerBiases && hasBiases) {
packed_data_init_with_geom(&derBiases, gpuMode, derBiasesGeom, false, false, 0) ;
if (derFiltersInitialized) {
copy_dispatch(gpuMode, derBiases.memory, derBiasesInit.memory, derBiases.geom.numElements);;
}
}
}
if (fullyConnectedMode) {
float alpha = 1 ;
float beta = 0 ;
ptrdiff_t filtersVolume = filters.geom.height*filters.geom.width*filters.geom.depth ;
/* note: fullyConnectedMode also guarantees no padding, num filter groups = 1 */
/* optimise fully-connected mode case */
if (!backMode) {
if (hasFilters) {
if (data.geom.size == 1) {
/* one image in the stack */
sgemv_dispatch(gpuMode, 't',
filtersVolume, filters.geom.size,
alpha,
filters.memory, filtersVolume,
data.memory, 1,
beta,
output.memory, 1) ;
} else {
/* multiple images in the stack */
sgemm_dispatch(gpuMode, 't', 'n',
filters.geom.size, data.geom.size, filtersVolume,
alpha,
filters.memory, filtersVolume,
data.memory, filtersVolume,
beta,
output.memory, filters.geom.size) ;
}
} else {
/* if no filter specified, assume that they act as the
identity */
copy_dispatch(gpuMode,
output.memory, data.memory,
filtersVolume * data.geom.size) ;
}
if (hasBiases) {
float beta = 1 ;
ptrdiff_t q = 1 ;
sgemm_dispatch(gpuMode, 'n', 'n',
filters.geom.size, data.geom.size, q,
alpha,
biases.memory, filters.geom.size,
allOnes.memory, q,
beta,
output.memory, filters.geom.size) ;
}
} else {
/* back mode */
if (computeDerFilters && hasFilters) {
sgemm_dispatch(gpuMode, 'n', 't',
filtersVolume, filters.geom.size, data.geom.size,
alpha,
data.memory, filtersVolume,
derOutput.memory, filters.geom.size,
(float)(derFiltersInitialized > 0),
derFilters.memory, filtersVolume) ;
}
if (computeDerBiases && hasBiases) {
ptrdiff_t q = 1 ;
sgemm_dispatch(gpuMode, 'n', 't',
q, filters.geom.size, data.geom.size,
alpha,
allOnes.memory, q,
derOutput.memory, filters.geom.size,
(float)(derBiasesInitialized > 0),
derBiases.memory, q) ;
}
if (computeDerData) {
if (hasFilters) {
sgemm_dispatch(gpuMode, 'n', 'n',
filtersVolume, data.geom.size, filters.geom.size,
alpha,
filters.memory, filtersVolume,
derOutput.memory, filters.geom.size,
beta,
derData.memory, filtersVolume) ;
} else {
/* does not have filters, just act as identity */
copy_dispatch(gpuMode,
derData.memory, derOutput.memory,
filtersVolume * data.geom.size) ;
}
}
}
} else if (convIndicesMode && hasFilters) {
// microbatchSize specifies the number of images to stack for GEMM
const int numMicrobatches = (data.geom.size + microbatchSize - 1) / microbatchSize;
for (int microbatchIdx = 0; microbatchIdx < numMicrobatches; ++microbatchIdx) {
int image = microbatchIdx * microbatchSize;
int numImages = (microbatchIdx != numMicrobatches - 1) ? microbatchSize : (data.geom.size - image);
ptrdiff_t dataOffset = (data.geom.height*data.geom.width*data.geom.depth) * image ;
ptrdiff_t outputOffset = (output.geom.height*output.geom.width*output.geom.depth) * image ;
ptrdiff_t derDataOffset = (derData.geom.height*derData.geom.width*derData.geom.depth) * image ;
ptrdiff_t derOutputOffset = (derOutput.geom.height*derOutput.geom.width*derOutput.geom.depth) * image ;
ptrdiff_t m = outputGeom.height * outputGeom.width ; /* num output pixels */
ptrdiff_t numRows = m * numImages ;
ptrdiff_t n = filters.geom.size/numGroups ; /* num filters per group */
ptrdiff_t k = filters.geom.height*filters.geom.width*filters.geom.depth ; /* filter volume */
if (backMode) {
if (numImages > 1) {
transpose23_dispatch(gpuMode,
outputMasked.memory,
derOutput.memory + derOutputOffset,
m, derOutput.geom.depth, numImages) ;
}
float *curDerOutputMemory = numImages > 1 ? outputMasked.memory : derOutput.memory + derOutputOffset;
/* compute derFilters dz/dF */
if (computeDerFilters) {
im2col_indexed_dispatch(gpuMode,
temp.memory,
data.memory + dataOffset,
convIndices.memoryInt,
convIndices.geom.numElements,
data.geom.height, data.geom.width, data.geom.depth, numImages,
filters.geom.height, filters.geom.width) ;
for (int g = 0 ; g < numGroups ; ++ g) {
ptrdiff_t filterGrpOffset = k * n * g ;
ptrdiff_t tempGrpOffset = numRows * k * g ;
ptrdiff_t derOutputGrpOffset = numRows * n * g ;
float alpha = 1 ;
float beta = (image > 0 || derFiltersInitialized) ; /* this saves init. the output array with 0 */
sgemm_dispatch(gpuMode, 't', 'n',
k, n, numRows,
alpha,
temp.memory + tempGrpOffset, numRows,
curDerOutputMemory + derOutputGrpOffset, numRows,
beta,
derFilters.memory + filterGrpOffset, k) ;
}
}
/* compute derData dz/dbias */
if (computeDerBiases & hasBiases) {
sgemv_dispatch(gpuMode, 't',
numRows, filters.geom.size,
1, /* alpha */
curDerOutputMemory, numRows,
allOnes.memory, 1,
(float)(image > 0 || derBiasesInitialized), /* beta */
derBiases.memory, 1) ;
}
/* compute derData dz/dx */
if (computeDerData) {
for (int g = 0 ; g < numGroups ; ++ g) {
ptrdiff_t filterGrpOffset = k * n * g ;
ptrdiff_t tempGrpOffset = numRows * k * g ;
ptrdiff_t derOutputGrpOffset = numRows * n * g ;
float alpha = 1 ;
float beta = 0 ;
sgemm_dispatch(gpuMode, 'n', 't',
numRows, k, n,
alpha,
curDerOutputMemory + derOutputGrpOffset, numRows,
filters.memory + filterGrpOffset, k,
beta,
temp.memory + tempGrpOffset,
numRows) ;
}
col2im_indexed_dispatch(gpuMode,
derData.memory + derDataOffset,
temp.memory,
convIndices.memoryInt,
convIndices.geom.numElements,
data.geom.height, data.geom.width, data.geom.depth, numImages,
filters.geom.height, filters.geom.width);
}
} else {
float *curOutputMemory = numImages > 1 ? outputMasked.memory : output.memory + outputOffset;
im2col_indexed_dispatch(gpuMode,
temp.memory,
data.memory + dataOffset,
convIndices.memoryInt,
convIndices.geom.numElements,
data.geom.height, data.geom.width, data.geom.depth, numImages,
filters.geom.height, filters.geom.width) ;
for (int g = 0 ; g < numGroups ; ++ g) {
ptrdiff_t filterGrpOffset = k * n * g ;
ptrdiff_t tempGrpOffset = numRows * k * g ;
ptrdiff_t outputGrpOffset = numRows * n * g ;
float alpha = 1 ;
float beta = 0 ;
sgemm_dispatch(gpuMode, 'n', 'n',
numRows, n, k,
alpha,
temp.memory + tempGrpOffset, numRows,
filters.memory + filterGrpOffset, k,
beta,
curOutputMemory + outputGrpOffset,
numRows) ;
}
if (hasBiases) {
float alpha = 1 ;
float beta = 1 ;
ptrdiff_t q = 1 ;
sgemm_dispatch(gpuMode, 'n', 'n',
numRows, biases.geom.numElements, q,
alpha,
allOnes.memory, numRows,
biases.memory, q,
beta,
curOutputMemory,
numRows) ;
}
if (numImages > 1) {
transpose23_dispatch(gpuMode,
output.memory + outputOffset,
outputMasked.memory,
m, numImages, output.geom.depth) ;
}
}
}
} else {
// This branch catches corner cases: 1x1 convolutions (skipping im2col/col2im), and when
// vl_nnconv called without convIndices.
// It can be merged with the previous branch, but the number of conditionals inside is already
// way too high.
for (int image = 0 ; image < data.geom.size ; ++image) {
/*
temp (phi(x)): m x k
filters, derFilters: k x n (for one group of filters)
derOutput (dzdy) : m x n (for one group of filters)
res (y) : m x n (for one group of filters)
*/
ptrdiff_t dataOffset = (data.geom.height*data.geom.width*data.geom.depth) * image ;
ptrdiff_t outputOffset = (output.geom.height*output.geom.width*output.geom.depth) * image ;
ptrdiff_t derDataOffset = (derData.geom.height*derData.geom.width*derData.geom.depth) * image ;
ptrdiff_t derOutputOffset = (derOutput.geom.height*derOutput.geom.width*derOutput.geom.depth) * image ;
ptrdiff_t m = outputGeom.height * outputGeom.width ; /* num output pixels */
ptrdiff_t n = filters.geom.size/numGroups ; /* num filters per group */
ptrdiff_t k = filters.geom.height*filters.geom.width*filters.geom.depth ; /* filter volume */
float* tempMemory;
if (backMode) {
/* ---------------------------------------------------------- */
/* Backward mode */
/* ---------------------------------------------------------- */
/* compute derFilters dz/dF */
if (computeDerFilters & hasFilters) {
if (!is_1x1) {
im2col_dispatch(gpuMode,
temp.memory,
data.memory + dataOffset,
data.geom.height, data.geom.width, data.geom.depth,
filters.geom.height, filters.geom.width,
strideY, strideX,
padTop, padBottom, padLeft, padRight) ;
tempMemory = temp.memory;
} else {
tempMemory = data.memory + dataOffset;
}
for (int g = 0 ; g < numGroups ; ++ g) {
ptrdiff_t filterGrpOffset = k * n * g ;
ptrdiff_t tempGrpOffset = m * k * g ;
ptrdiff_t derOutputGrpOffset = m * n * g ;
float alpha = 1 ;
float beta = (image > 0 || derFiltersInitialized) ; /* this saves init. the output array with 0 */
sgemm_dispatch(gpuMode, 't', 'n',
k, n, m,
alpha,
tempMemory + tempGrpOffset, m,
derOutput.memory + derOutputOffset + derOutputGrpOffset, m,
beta,
derFilters.memory + filterGrpOffset, k) ;
}
}
/* compute derBiases dz/dbias */
if (computeDerBiases & hasBiases) {
sgemv_dispatch(gpuMode, 't',
m, filters.geom.size,
1, /* alpha */
derOutput.memory + derOutputOffset, m,
allOnes.memory, 1,
(float)(image > 0 || derBiasesInitialized), /* beta */
derBiases.memory, 1) ;
}
/* compute derData dz/dx */
if (computeDerData) {
if (hasFilters) {
if (!is_1x1) {
tempMemory = temp.memory;
} else {
tempMemory = derData.memory + derDataOffset;
}
for (int g = 0 ; g < numGroups ; ++ g) {
ptrdiff_t filterGrpOffset = k * n * g ;
ptrdiff_t tempGrpOffset = m * k * g ;
ptrdiff_t derOutputGrpOffset = m * n * g ;
float alpha = 1 ;
float beta = 0 ;
sgemm_dispatch(gpuMode, 'n', 't',
m, k, n,
alpha,
derOutput.memory + derOutputOffset + derOutputGrpOffset, m,
filters.memory + filterGrpOffset, k,
beta,
tempMemory + tempGrpOffset,
m) ;
}
if (!is_1x1) {
col2im_dispatch(gpuMode,
derData.memory + derDataOffset,
temp.memory,
data.geom.height, data.geom.width, data.geom.depth,
filters.geom.height, filters.geom.width,
strideY, strideX,
padTop, padBottom, padLeft, padRight) ;
}
} else {
subsampleBackward_dispatch(gpuMode,
derData.memory + derDataOffset,
derOutput.memory + derOutputOffset,
data.geom.height, data.geom.width, data.geom.depth,
strideY, strideX,
padTop, padBottom, padLeft, padRight) ;
}
}
} else {
/* ---------------------------------------------------------- */
/* Forward mode */
/* ---------------------------------------------------------- */
if (hasFilters) {
if (!is_1x1) {
im2col_dispatch(gpuMode,
temp.memory,
data.memory + dataOffset,
data.geom.height, data.geom.width, data.geom.depth,
filters.geom.height, filters.geom.width,
strideY, strideX,
padTop, padBottom, padLeft, padRight) ;
tempMemory = temp.memory;
} else {
tempMemory = data.memory + dataOffset;
}
for (int g = 0 ; g < numGroups ; ++ g) {
ptrdiff_t filterGrpOffset = k * n * g ;
ptrdiff_t tempGrpOffset = m * k * g ;
ptrdiff_t outputGrpOffset = m * n * g ;
float alpha = 1 ;
float beta = 0 ;
sgemm_dispatch(gpuMode, 'n', 'n',
m, n, k,
alpha,
tempMemory + tempGrpOffset, m,
filters.memory + filterGrpOffset, k,
beta,
output.memory + outputOffset + outputGrpOffset, m) ;
}
} else {
/* no filters: identity */
subsample_dispatch(gpuMode,
output.memory + outputOffset,
data.memory + dataOffset,
data.geom.height, data.geom.width, data.geom.depth,
strideY, strideX,
padTop, padBottom, padLeft, padRight) ;
}
if (hasBiases) {
float alpha = 1 ;
float beta = 1 ;
ptrdiff_t q = 1 ;
sgemm_dispatch(gpuMode, 'n', 'n',
m, biases.geom.numElements, q,
alpha,
allOnes.memory, m,
biases.memory, q,
beta,
output.memory + outputOffset, m) ;
}
}
}
}
/* -------------------------------------------------------------- */
/* Cleanup */
/* -------------------------------------------------------------- */
packed_data_deinit(&data) ;
packed_data_deinit(&filters) ;
packed_data_deinit(&biases) ;
if (convIndicesMode) {
packed_data_deinit(&convIndices);
}
if (backMode) {
packed_data_deinit(&derOutput) ;
out[OUT_RESULT] = (computeDerData) ? packed_data_deinit_extracting_array(&derData) : mxCreateDoubleMatrix(0,0,mxREAL) ;
out[OUT_DERFILTERS] =(computeDerFilters & hasFilters)? packed_data_deinit_extracting_array(&derFilters) : mxCreateDoubleMatrix(0,0,mxREAL) ;
out[OUT_DERBIASES] = (computeDerBiases & hasBiases) ? packed_data_deinit_extracting_array(&derBiases) : mxCreateDoubleMatrix(0,0,mxREAL) ;
} else {
out[OUT_RESULT] = packed_data_deinit_extracting_array(&output) ;
}
packed_data_deinit(&derFiltersInit) ;
packed_data_deinit(&derBiasesInit) ;
}
|
b117fd5fc18f391b3d4bb36b6b7977dee5d0ca55.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<iostream>
#include<chrono>
#include<cuda.h>
#include<cuda_runtime.h>
#define N 1024
using namespace std;
using namespace std::chrono;
static const int wholeArraySize = 100000000;
static const int blockSize = 1024;
static const int gridSize = 24; //this number is hardware-dependent; usually #SM*2 is a good number.
__global__ void maxPerBlock(const int *gArr, int arraySize, int *gOut) {
int thIdx = threadIdx.x;
int gthIdx = thIdx + blockIdx.x*blockSize;
const int gridSize = blockSize*gridDim.x;
int max = gArr[0];
for (int i = gthIdx; i < arraySize; i += gridSize)
if(max < gArr[i])
max = gArr[i];
__shared__ int shArr[blockSize];
shArr[thIdx] = max;
__syncthreads();
/*for (int size = blockSize/2; size>0; size/=2) { //uniform
if (thIdx<size)
shArr[thIdx] += shArr[thIdx+size];
__syncthreads();
}*/
if (thIdx == 0)
{
max = shArr[0];
for(int i = 0 ; i < blockSize ; i++)
{
if(max < shArr[i])
{
max = shArr[i];
}
}
}
if (thIdx == 0)
gOut[blockIdx.x] = max;
}
int main() {
int *arr = new int[wholeArraySize];
for(int i = 0; i < wholeArraySize ; i++)
{
arr[i] = (i+1)%10;
}
int* dev_arr;
hipMalloc((void**)&dev_arr, wholeArraySize * sizeof(int));
hipMemcpy(dev_arr, arr, wholeArraySize * sizeof(int), hipMemcpyHostToDevice);
int out;
int* dev_out;
hipMalloc((void**)&dev_out, sizeof(int)*gridSize);
hipLaunchKernelGGL(( maxPerBlock), dim3(gridSize), dim3(blockSize), 0, 0, dev_arr, wholeArraySize, dev_out);
//dev_out now holds the partial result
hipLaunchKernelGGL(( maxPerBlock), dim3(1), dim3(blockSize), 0, 0, dev_out, gridSize, dev_out);
//dev_out[0] now holds the final result
hipDeviceSynchronize();
hipMemcpy(&out, dev_out, sizeof(int), hipMemcpyDeviceToHost);
hipFree(dev_arr);
hipFree(dev_out);
cout<<"Max is : "<<out;
}
|
b117fd5fc18f391b3d4bb36b6b7977dee5d0ca55.cu
|
#include<iostream>
#include<chrono>
#include<cuda.h>
#include<cuda_runtime.h>
#define N 1024
using namespace std;
using namespace std::chrono;
static const int wholeArraySize = 100000000;
static const int blockSize = 1024;
static const int gridSize = 24; //this number is hardware-dependent; usually #SM*2 is a good number.
__global__ void maxPerBlock(const int *gArr, int arraySize, int *gOut) {
int thIdx = threadIdx.x;
int gthIdx = thIdx + blockIdx.x*blockSize;
const int gridSize = blockSize*gridDim.x;
int max = gArr[0];
for (int i = gthIdx; i < arraySize; i += gridSize)
if(max < gArr[i])
max = gArr[i];
__shared__ int shArr[blockSize];
shArr[thIdx] = max;
__syncthreads();
/*for (int size = blockSize/2; size>0; size/=2) { //uniform
if (thIdx<size)
shArr[thIdx] += shArr[thIdx+size];
__syncthreads();
}*/
if (thIdx == 0)
{
max = shArr[0];
for(int i = 0 ; i < blockSize ; i++)
{
if(max < shArr[i])
{
max = shArr[i];
}
}
}
if (thIdx == 0)
gOut[blockIdx.x] = max;
}
int main() {
int *arr = new int[wholeArraySize];
for(int i = 0; i < wholeArraySize ; i++)
{
arr[i] = (i+1)%10;
}
int* dev_arr;
cudaMalloc((void**)&dev_arr, wholeArraySize * sizeof(int));
cudaMemcpy(dev_arr, arr, wholeArraySize * sizeof(int), cudaMemcpyHostToDevice);
int out;
int* dev_out;
cudaMalloc((void**)&dev_out, sizeof(int)*gridSize);
maxPerBlock<<<gridSize, blockSize>>>(dev_arr, wholeArraySize, dev_out);
//dev_out now holds the partial result
maxPerBlock<<<1, blockSize>>>(dev_out, gridSize, dev_out);
//dev_out[0] now holds the final result
cudaDeviceSynchronize();
cudaMemcpy(&out, dev_out, sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(dev_arr);
cudaFree(dev_out);
cout<<"Max is : "<<out;
}
|
4fa1d175c06006148b9e65b330427d92ee942151.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <float.h>
#include <stdio.h>
#include "types.h"
#define X_DIM BOX_SIZE
__device__ __shared__ t_vector view_point;
__device__ __shared__ float y_dim;
__device__ __shared__ int n_spheres;
__device__ __shared__ int n_lights;
__device__ __shared__ t_sphere spheres[ SPHERES_MAX ];
__device__ __shared__ t_light lights[ LIGHTS_MAX ];
#define EPSILON 1e-2
__device__ bool fequal(float a, float b)
{
return fabs( __fadd_rn(a, -b) ) < EPSILON;
}
__device__ void vec_sub(t_vector *v1, t_vector *v2, t_vector *v3)
{
v1->x = __fadd_rn(v2->x, -v3->x);
v1->y = __fadd_rn(v2->y, -v3->y);
v1->z = __fadd_rn(v2->z, -v3->z);
}
__device__ void vec_add(t_vector *v1, t_vector *v2, t_vector *v3)
{
v1->x = __fadd_rn(v2->x, v3->x);
v1->y = __fadd_rn(v2->y, v3->y);
v1->z = __fadd_rn(v2->z, v3->z);
}
__device__ void vec_scale(float scale, t_vector *v1, t_vector *v2)
{
v1->x = __fmul_rn(scale, v2->x); // multiplying
v1->y = __fmul_rn(scale, v2->y);
v1->z = __fmul_rn(scale, v2->z);
}
__device__ float dotproduct(t_vector *v1, t_vector *v2)
{
return __fadd_rn(
__fmul_rn(v1->x, v2->x),
__fadd_rn( __fmul_rn (v1->y, v2->y), __fmul_rn(v1->z, v2->z)));
}
__device__ void normalize_vector(t_vector *v)
{
float magnitude = __fsqrt_rn(dotproduct(v, v));
v->x = __fdiv_rn (v->x, magnitude);
v->y = __fdiv_rn (v->y, magnitude);
v->z = __fdiv_rn (v->z, magnitude);
}
__device__ void compute_ray(t_ray* ray, t_vector* view_point, t_pixel* pixel, float width, float height)
{
ray->origin = *view_point;
ray->direction.x =
__fdiv_rn(__fmul_rn(X_DIM, pixel->i+1),
(width)) - __fdiv_rn(X_DIM, 2.0f) ;
ray->direction.y =
__fdiv_rn(__fmul_rn(y_dim, pixel->j),
(height)) - __fdiv_rn(y_dim, 2.0f) ;
ray->direction.z = (float)DISTANCE;
normalize_vector(&ray->direction);
}
__device__ void compute_reflected_ray(t_ray* reflected_ray, t_ray* incidence_ray,
t_sphere_intersection* intersection)
{
float dp1;
t_vector scaled_normal;
reflected_ray->origin=intersection->point;
dp1 = dotproduct(&intersection->normal, &incidence_ray->direction);
dp1 = __fmul_rn (2.0f, dp1);
vec_scale(dp1, &scaled_normal, &intersection->normal);
vec_sub(&reflected_ray->direction, &incidence_ray->direction, &scaled_normal);
}
__device__ void compute_ray_to_light(t_ray* ray,
t_sphere_intersection* intersection, t_vector* light)
{
ray->origin = intersection->point;
vec_sub(&ray->direction, light, &intersection->point);
normalize_vector(&ray->direction);
}
__device__ bool sphere_intersection (t_ray *ray, t_sphere *sphere,
t_sphere_intersection* intersection)
{
float discriminant;
float A, B, C;
float lambda1, lambda2;
t_vector temp;
A = dotproduct(&ray->direction, &ray->direction);
vec_sub(&temp, &ray->origin, &sphere->center);
B = __fmul_rn (2.0f, dotproduct(&temp, &ray->direction));
C = __fadd_rn( dotproduct(&temp, &temp),
-__fmul_rn( sphere->radius, sphere->radius ));
discriminant = __fadd_rn( __fmul_rn(B, B),
-__fmul_rn(4.0f, __fmul_rn(A, C)));
if (discriminant >= 0.0f)
{
lambda1 = __fdiv_rn(__fadd_rn(-B, __fsqrt_rn(discriminant)),
__fmul_rn(2.0f, A));
lambda2 = __fdiv_rn(__fadd_rn(-B, -__fsqrt_rn(discriminant)),
__fmul_rn(2.0f, A));
intersection->lambda_in = fminf(lambda1, lambda2);
// is the object visible from the eye (lambda1,2>0)
if (fequal(intersection->lambda_in, 0.0f))
{
return true;
}
if (lambda1 > 0.0f && lambda2 > 0.0f)
{
return true;
}
}
return false;
}
// Calculate normal vector in the point of intersection:
__device__ void intersection_normal(t_sphere *sphere,
t_sphere_intersection* intersection, t_ray* ray)
{
float scale;
t_vector tmp_vec;
//calculating coordinates of intersection point
vec_scale(intersection->lambda_in, &tmp_vec, &ray->direction);
vec_add(&intersection->point, &tmp_vec, &ray->origin);
//calculating direction of normal in the point of intersection
vec_sub(&tmp_vec, &intersection->point, &sphere->center);
//scaling normal vector
scale = __frcp_rn(sphere->radius);
vec_scale(scale, &intersection->normal, &tmp_vec);
normalize_vector(&intersection->normal);
}
__device__ t_color TraceRay(t_ray ray, int depth)
{
t_ray ray_tmp;
t_color illumination={0.0f, 0.0f, 0.0f};
t_color tmp;
if (depth > DEPTH_MAX )
{
return illumination;
}
t_sphere_intersection intersection, current_intersection;
int intersection_object = -1; // none
int k, i;
int cnt_lights=n_lights; // count of visible lights. Default =2
float visible = 1.0f;
float current_lambda = FLT_MAX; // maximum positive float
//find closest ray object / intersection ;
for (k=0; k<n_spheres; k++)
{
if (sphere_intersection(&ray, &spheres[k], &intersection))
{
if (intersection.lambda_in<current_lambda)
{
current_lambda=intersection.lambda_in;
intersection_object=k;
current_intersection=intersection;
}
}
}
//if( intersection exists )
if (intersection_object > -1)
{
intersection_normal(&spheres[intersection_object], ¤t_intersection, &ray);
//for each light source in the scene
for (i=0; i<n_lights; i++)
{
compute_ray_to_light(&ray_tmp, ¤t_intersection, &lights[i]);
for (k=0; k<n_spheres; k++)
{
if (sphere_intersection
(&ray_tmp, &spheres[k], &intersection)
)
{
cnt_lights--;
break;
}
}
}
visible=0.4f+0.6f*cnt_lights/n_lights;
illumination.red = __fmul_rn(visible, spheres[intersection_object].red);
illumination.green = __fmul_rn(visible, spheres[intersection_object].green);
illumination.blue = __fmul_rn(visible, spheres[intersection_object].blue);
compute_reflected_ray(&ray_tmp, &ray, ¤t_intersection);
tmp = TraceRay(ray_tmp, depth + 1 );
illumination.red = __fadd_rn (illumination.red, tmp.red);
illumination.blue = __fadd_rn (illumination.blue, tmp.blue);
illumination.green = __fadd_rn (illumination.green,tmp.green);
}
return illumination;
}
__global__ void kernel(unsigned char * dev_image_red,
unsigned char * dev_image_blue,
unsigned char * dev_image_green,
int height, int width,
t_sphere * dev_spheres, int dev_n_spheres,
t_light * dev_lights, int dev_n_lights)
{
t_color illumination;
t_ray ray;
t_pixel pixel;
pixel.i = blockIdx.x * blockDim.x + threadIdx.x; // x coordinate inside whole picture
pixel.j = blockIdx.y * blockDim.y + threadIdx.y; // y coordinate inside whole picture
int idx = threadIdx.x + threadIdx.y * blockDim.x; //linear index inside a block
// is there a way to overcome warp divergence?
if (threadIdx.x == 0 && threadIdx.y == 0)
{
n_spheres = dev_n_spheres;
n_lights = dev_n_lights;
y_dim = __fdiv_rn(BOX_SIZE, __fdiv_rn((float)width, (float)height));
view_point.x = __fdiv_rn (X_DIM, 2.0f);
view_point.y = __fdiv_rn (y_dim, 2.0f);
view_point.z = 0.0f;
}
__syncthreads();
int n1 = n_spheres * int(sizeof(t_sphere) / sizeof(float));
// n1 is the number of floats in spheres array;
int n2 = n_lights * int(sizeof(t_light) / sizeof(float));
// n2 is the number of floats in lights array;
int n3 = ( idx < n1 ) ? 0 : 1;
// n3 is the index inside the float* arrays (bellow)
int n4 = (idx < n1 ) ? idx : idx -n1;
float * dst_arr[] = { (float*)spheres, (float*)lights};
float * src_arr[] = { (float*)dev_spheres, (float*)dev_lights};
float * dst;
float * src;
if (idx < n1 + n2 )
{
dst = dst_arr [n3];
src = src_arr [n3];
dst[n4] = src[n4];
}
__syncthreads();
if (pixel.i >= width || pixel.j >= height)
{
return;
}
//compute ray starting point and direction ;
compute_ray(&ray, &view_point, &pixel, width, height);
illumination = TraceRay(ray, 0) ;
//pixel color = illumination tone mapped to displayable range ;
if (illumination.red>1.0f)
illumination.red=1.0f;
if (illumination.green>1.0f)
illumination.green=1.0f;
if (illumination.blue>1.0f)
illumination.blue=1.0f;
idx = pixel.i + __mul24(width, pixel.j);
if ( !fequal(illumination.red, 0.0f) )
{
dev_image_red [idx ] =
(unsigned char)round(__fmul_rn (RGB_MAX, illumination.red));
}
if ( !fequal(illumination.green, 0.0f) )
{
dev_image_green[ idx ] =
(unsigned char)round(__fmul_rn (RGB_MAX, illumination.green));
}
if ( !fequal(illumination.blue, 0.0f) )
{
dev_image_blue [ idx ] =
(unsigned char)round(__fmul_rn (RGB_MAX, illumination.blue));
}
}
|
4fa1d175c06006148b9e65b330427d92ee942151.cu
|
#include <float.h>
#include <stdio.h>
#include "types.h"
#define X_DIM BOX_SIZE
__device__ __shared__ t_vector view_point;
__device__ __shared__ float y_dim;
__device__ __shared__ int n_spheres;
__device__ __shared__ int n_lights;
__device__ __shared__ t_sphere spheres[ SPHERES_MAX ];
__device__ __shared__ t_light lights[ LIGHTS_MAX ];
#define EPSILON 1e-2
__device__ bool fequal(float a, float b)
{
return fabs( __fadd_rn(a, -b) ) < EPSILON;
}
__device__ void vec_sub(t_vector *v1, t_vector *v2, t_vector *v3)
{
v1->x = __fadd_rn(v2->x, -v3->x);
v1->y = __fadd_rn(v2->y, -v3->y);
v1->z = __fadd_rn(v2->z, -v3->z);
}
__device__ void vec_add(t_vector *v1, t_vector *v2, t_vector *v3)
{
v1->x = __fadd_rn(v2->x, v3->x);
v1->y = __fadd_rn(v2->y, v3->y);
v1->z = __fadd_rn(v2->z, v3->z);
}
__device__ void vec_scale(float scale, t_vector *v1, t_vector *v2)
{
v1->x = __fmul_rn(scale, v2->x); // multiplying
v1->y = __fmul_rn(scale, v2->y);
v1->z = __fmul_rn(scale, v2->z);
}
__device__ float dotproduct(t_vector *v1, t_vector *v2)
{
return __fadd_rn(
__fmul_rn(v1->x, v2->x),
__fadd_rn( __fmul_rn (v1->y, v2->y), __fmul_rn(v1->z, v2->z)));
}
__device__ void normalize_vector(t_vector *v)
{
float magnitude = __fsqrt_rn(dotproduct(v, v));
v->x = __fdiv_rn (v->x, magnitude);
v->y = __fdiv_rn (v->y, magnitude);
v->z = __fdiv_rn (v->z, magnitude);
}
__device__ void compute_ray(t_ray* ray, t_vector* view_point, t_pixel* pixel, float width, float height)
{
ray->origin = *view_point;
ray->direction.x =
__fdiv_rn(__fmul_rn(X_DIM, pixel->i+1),
(width)) - __fdiv_rn(X_DIM, 2.0f) ;
ray->direction.y =
__fdiv_rn(__fmul_rn(y_dim, pixel->j),
(height)) - __fdiv_rn(y_dim, 2.0f) ;
ray->direction.z = (float)DISTANCE;
normalize_vector(&ray->direction);
}
__device__ void compute_reflected_ray(t_ray* reflected_ray, t_ray* incidence_ray,
t_sphere_intersection* intersection)
{
float dp1;
t_vector scaled_normal;
reflected_ray->origin=intersection->point;
dp1 = dotproduct(&intersection->normal, &incidence_ray->direction);
dp1 = __fmul_rn (2.0f, dp1);
vec_scale(dp1, &scaled_normal, &intersection->normal);
vec_sub(&reflected_ray->direction, &incidence_ray->direction, &scaled_normal);
}
__device__ void compute_ray_to_light(t_ray* ray,
t_sphere_intersection* intersection, t_vector* light)
{
ray->origin = intersection->point;
vec_sub(&ray->direction, light, &intersection->point);
normalize_vector(&ray->direction);
}
__device__ bool sphere_intersection (t_ray *ray, t_sphere *sphere,
t_sphere_intersection* intersection)
{
float discriminant;
float A, B, C;
float lambda1, lambda2;
t_vector temp;
A = dotproduct(&ray->direction, &ray->direction);
vec_sub(&temp, &ray->origin, &sphere->center);
B = __fmul_rn (2.0f, dotproduct(&temp, &ray->direction));
C = __fadd_rn( dotproduct(&temp, &temp),
-__fmul_rn( sphere->radius, sphere->radius ));
discriminant = __fadd_rn( __fmul_rn(B, B),
-__fmul_rn(4.0f, __fmul_rn(A, C)));
if (discriminant >= 0.0f)
{
lambda1 = __fdiv_rn(__fadd_rn(-B, __fsqrt_rn(discriminant)),
__fmul_rn(2.0f, A));
lambda2 = __fdiv_rn(__fadd_rn(-B, -__fsqrt_rn(discriminant)),
__fmul_rn(2.0f, A));
intersection->lambda_in = fminf(lambda1, lambda2);
// is the object visible from the eye (lambda1,2>0)
if (fequal(intersection->lambda_in, 0.0f))
{
return true;
}
if (lambda1 > 0.0f && lambda2 > 0.0f)
{
return true;
}
}
return false;
}
// Calculate normal vector in the point of intersection:
__device__ void intersection_normal(t_sphere *sphere,
t_sphere_intersection* intersection, t_ray* ray)
{
float scale;
t_vector tmp_vec;
//calculating coordinates of intersection point
vec_scale(intersection->lambda_in, &tmp_vec, &ray->direction);
vec_add(&intersection->point, &tmp_vec, &ray->origin);
//calculating direction of normal in the point of intersection
vec_sub(&tmp_vec, &intersection->point, &sphere->center);
//scaling normal vector
scale = __frcp_rn(sphere->radius);
vec_scale(scale, &intersection->normal, &tmp_vec);
normalize_vector(&intersection->normal);
}
__device__ t_color TraceRay(t_ray ray, int depth)
{
t_ray ray_tmp;
t_color illumination={0.0f, 0.0f, 0.0f};
t_color tmp;
if (depth > DEPTH_MAX )
{
return illumination;
}
t_sphere_intersection intersection, current_intersection;
int intersection_object = -1; // none
int k, i;
int cnt_lights=n_lights; // count of visible lights. Default =2
float visible = 1.0f;
float current_lambda = FLT_MAX; // maximum positive float
//find closest ray object / intersection ;
for (k=0; k<n_spheres; k++)
{
if (sphere_intersection(&ray, &spheres[k], &intersection))
{
if (intersection.lambda_in<current_lambda)
{
current_lambda=intersection.lambda_in;
intersection_object=k;
current_intersection=intersection;
}
}
}
//if( intersection exists )
if (intersection_object > -1)
{
intersection_normal(&spheres[intersection_object], ¤t_intersection, &ray);
//for each light source in the scene
for (i=0; i<n_lights; i++)
{
compute_ray_to_light(&ray_tmp, ¤t_intersection, &lights[i]);
for (k=0; k<n_spheres; k++)
{
if (sphere_intersection
(&ray_tmp, &spheres[k], &intersection)
)
{
cnt_lights--;
break;
}
}
}
visible=0.4f+0.6f*cnt_lights/n_lights;
illumination.red = __fmul_rn(visible, spheres[intersection_object].red);
illumination.green = __fmul_rn(visible, spheres[intersection_object].green);
illumination.blue = __fmul_rn(visible, spheres[intersection_object].blue);
compute_reflected_ray(&ray_tmp, &ray, ¤t_intersection);
tmp = TraceRay(ray_tmp, depth + 1 );
illumination.red = __fadd_rn (illumination.red, tmp.red);
illumination.blue = __fadd_rn (illumination.blue, tmp.blue);
illumination.green = __fadd_rn (illumination.green,tmp.green);
}
return illumination;
}
__global__ void kernel(unsigned char * dev_image_red,
unsigned char * dev_image_blue,
unsigned char * dev_image_green,
int height, int width,
t_sphere * dev_spheres, int dev_n_spheres,
t_light * dev_lights, int dev_n_lights)
{
t_color illumination;
t_ray ray;
t_pixel pixel;
pixel.i = blockIdx.x * blockDim.x + threadIdx.x; // x coordinate inside whole picture
pixel.j = blockIdx.y * blockDim.y + threadIdx.y; // y coordinate inside whole picture
int idx = threadIdx.x + threadIdx.y * blockDim.x; //linear index inside a block
// is there a way to overcome warp divergence?
if (threadIdx.x == 0 && threadIdx.y == 0)
{
n_spheres = dev_n_spheres;
n_lights = dev_n_lights;
y_dim = __fdiv_rn(BOX_SIZE, __fdiv_rn((float)width, (float)height));
view_point.x = __fdiv_rn (X_DIM, 2.0f);
view_point.y = __fdiv_rn (y_dim, 2.0f);
view_point.z = 0.0f;
}
__syncthreads();
int n1 = n_spheres * int(sizeof(t_sphere) / sizeof(float));
// n1 is the number of floats in spheres array;
int n2 = n_lights * int(sizeof(t_light) / sizeof(float));
// n2 is the number of floats in lights array;
int n3 = ( idx < n1 ) ? 0 : 1;
// n3 is the index inside the float* arrays (bellow)
int n4 = (idx < n1 ) ? idx : idx -n1;
float * dst_arr[] = { (float*)spheres, (float*)lights};
float * src_arr[] = { (float*)dev_spheres, (float*)dev_lights};
float * dst;
float * src;
if (idx < n1 + n2 )
{
dst = dst_arr [n3];
src = src_arr [n3];
dst[n4] = src[n4];
}
__syncthreads();
if (pixel.i >= width || pixel.j >= height)
{
return;
}
//compute ray starting point and direction ;
compute_ray(&ray, &view_point, &pixel, width, height);
illumination = TraceRay(ray, 0) ;
//pixel color = illumination tone mapped to displayable range ;
if (illumination.red>1.0f)
illumination.red=1.0f;
if (illumination.green>1.0f)
illumination.green=1.0f;
if (illumination.blue>1.0f)
illumination.blue=1.0f;
idx = pixel.i + __mul24(width, pixel.j);
if ( !fequal(illumination.red, 0.0f) )
{
dev_image_red [idx ] =
(unsigned char)round(__fmul_rn (RGB_MAX, illumination.red));
}
if ( !fequal(illumination.green, 0.0f) )
{
dev_image_green[ idx ] =
(unsigned char)round(__fmul_rn (RGB_MAX, illumination.green));
}
if ( !fequal(illumination.blue, 0.0f) )
{
dev_image_blue [ idx ] =
(unsigned char)round(__fmul_rn (RGB_MAX, illumination.blue));
}
}
|
1aa4d4218407f90730b6576ab1e15e0a32dd5361.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*-------------------------------------------------------------------------
*
* CUDA functions for Steepest descend in POCS-type algorithms.
*
* This file will iteratively minimize by stepest descend the total variation
* of the input image, with the parameters given, using GPUs.
*
* CODE by Ander Biguri
*
* ---------------------------------------------------------------------------
* ---------------------------------------------------------------------------
* Copyright (c) 2015, University of Bath and CERN- European Organization for
* Nuclear Research
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
* ---------------------------------------------------------------------------
*
* Contact: [email protected]
* Codes : https://github.com/CERN/TIGRE
* ---------------------------------------------------------------------------
*/
#define MAXTHREADS 1024
#define MAX_BUFFER 60
#include "POCS_TV.hpp"
#include "gpuUtils.hpp"
#define cudaCheckErrors(msg) \
do { \
hipError_t __err = hipGetLastError(); \
if (__err != hipSuccess) { \
mexPrintf("%s \n",msg);\
hipDeviceReset();\
mexErrMsgIdAndTxt("POCS_TV:GPU",hipGetErrorString(__err));\
} \
} while (0)
// CUDA kernels
//https://stackoverflow.com/questions/21332040/simple-cuda-kernel-optimization/21340927#21340927
__global__ void divideArrayScalar(float* vec,float scalar,const size_t n){
unsigned long long i = (blockIdx.x * blockDim.x) + threadIdx.x;
for(; i<n; i+=gridDim.x*blockDim.x) {
vec[i]/=scalar;
}
}
__global__ void multiplyArrayScalar(float* vec,float scalar,const size_t n)
{
unsigned long long i = (blockIdx.x * blockDim.x) + threadIdx.x;
for(; i<n; i+=gridDim.x*blockDim.x) {
vec[i]*=scalar;
}
}
__global__ void substractArrays(float* vec,float* vec2,const size_t n)
{
unsigned long long i = (blockIdx.x * blockDim.x) + threadIdx.x;
for(; i<n; i+=gridDim.x*blockDim.x) {
vec[i]-=vec2[i];
}
}
__device__ __inline__
void gradient(const float* u, float* grad,
long z, long y, long x,
long depth, long rows, long cols){
unsigned long size2d = rows*cols;
unsigned long long idx = z * size2d + y * cols + x;
float uidx = u[idx];
if ( z - 1 >= 0 && z<depth) {
grad[0] = (uidx-u[(z-1)*size2d + y*cols + x]) ;
}
if ( y - 1 >= 0 && y<rows){
grad[1] = (uidx-u[z*size2d + (y-1)*cols + x]) ;
}
if ( x - 1 >= 0 && x<cols) {
grad[2] = (uidx-u[z*size2d + y*cols + (x-1)]);
}
}
__global__ void gradientTV(const float* f, float* dftv,
long depth, long rows, long cols){
unsigned long x = threadIdx.x + blockIdx.x * blockDim.x;
unsigned long y = threadIdx.y + blockIdx.y * blockDim.y;
unsigned long z = threadIdx.z + blockIdx.z * blockDim.z;
unsigned long long idx = z * rows * cols + y * cols + x;
if ( x >= cols || y >= rows || z >= depth )
return;
float df[3] ={0.f,0.f,0.f};
float dfi[3]={0.f,0.f,0.f}; // dfi== \partial f_{i+1,j,k}
float dfj[3]={0.f,0.f,0.f};
float dfk[3]={0.f,0.f,0.f};
gradient(f,df ,z ,y ,x , depth,rows,cols);
gradient(f,dfi ,z ,y ,x+1, depth,rows,cols);
gradient(f,dfj ,z ,y+1,x , depth,rows,cols);
gradient(f,dfk ,z+1,y ,x , depth,rows,cols);
float eps=0.00000001; //% avoid division by zero
dftv[idx]=(df[0]+df[1]+df[2])/(sqrt(df[0] *df[0] +df[1] *df[1] +df[2] *df[2])+eps)
-dfi[2]/(sqrt(dfi[0]*dfi[0]+dfi[1]*dfi[1]+dfi[2]*dfi[2]) +eps) // I wish I coudl precompute this, but if I do then Id need to recompute the gradient.
-dfj[1]/(sqrt(dfj[0]*dfj[0]+dfj[1]*dfj[1]+dfj[2]*dfj[2]) +eps)
-dfk[0]/(sqrt(dfk[0]*dfk[0]+dfk[1]*dfk[1]+dfk[2]*dfk[2]) +eps);
return;
}
__device__ void warpReduce(volatile float *sdata, size_t tid) {
sdata[tid] += sdata[tid + 32];
sdata[tid] += sdata[tid + 16];
sdata[tid] += sdata[tid + 8];
sdata[tid] += sdata[tid + 4];
sdata[tid] += sdata[tid + 2];
sdata[tid] += sdata[tid + 1];
}
__global__ void reduceNorm2(float *g_idata, float *g_odata, size_t n){
extern __shared__ volatile float sdata[];
//http://stackoverflow.com/a/35133396/1485872
size_t tid = threadIdx.x;
size_t i = blockIdx.x*blockDim.x + tid;
size_t gridSize = blockDim.x*gridDim.x;
float mySum = 0;
float value=0;
while (i < n) {
value=g_idata[i]; //avoid reading twice
mySum += value*value;
i += gridSize;
}
sdata[tid] = mySum;
__syncthreads();
if (tid < 512)
sdata[tid] += sdata[tid + 512];
__syncthreads();
if (tid < 256)
sdata[tid] += sdata[tid + 256];
__syncthreads();
if (tid < 128)
sdata[tid] += sdata[tid + 128];
__syncthreads();
if (tid < 64)
sdata[tid] += sdata[tid + 64];
__syncthreads();
#if (__CUDART_VERSION >= 9000)
if ( tid < 32 )
{
mySum = sdata[tid] + sdata[tid + 32];
for (int offset = warpSize/2; offset > 0; offset /= 2) {
mySum += __shfl_down_sync(0xFFFFFFFF, mySum, offset,32);
}
}
#else
if (tid < 32) {
warpReduce(sdata, tid);
mySum = sdata[0];
}
#endif
if (tid == 0) g_odata[blockIdx.x] = mySum;
}
__global__ void reduceSum(float *g_idata, float *g_odata, size_t n){
extern __shared__ volatile float sdata[];
//http://stackoverflow.com/a/35133396/1485872
size_t tid = threadIdx.x;
size_t i = blockIdx.x*blockDim.x + tid;
size_t gridSize = blockDim.x*gridDim.x;
float mySum = 0;
// float value=0;
while (i < n) {
mySum += g_idata[i];
i += gridSize;
}
sdata[tid] = mySum;
__syncthreads();
if (tid < 512)
sdata[tid] += sdata[tid + 512];
__syncthreads();
if (tid < 256)
sdata[tid] += sdata[tid + 256];
__syncthreads();
if (tid < 128)
sdata[tid] += sdata[tid + 128];
__syncthreads();
if (tid < 64)
sdata[tid] += sdata[tid + 64];
__syncthreads();
#if (__CUDART_VERSION >= 9000)
if ( tid < 32 )
{
mySum = sdata[tid] + sdata[tid + 32];
for (int offset = warpSize/2; offset > 0; offset /= 2) {
mySum += __shfl_down_sync(0xFFFFFFFF, mySum, offset,32);
}
}
#else
if (tid < 32) {
warpReduce(sdata, tid);
mySum = sdata[0];
}
#endif
if (tid == 0) g_odata[blockIdx.x] = mySum;
}
// main function
void pocs_tv(float* img,float* dst,float alpha,const long* image_size, int maxIter, const GpuIds& gpuids){
// Prepare for MultiGPU
int deviceCount = gpuids.GetLength();
cudaCheckErrors("Device query fail");
if (deviceCount == 0) {
mexErrMsgIdAndTxt("POCS_TV:GPU","There are no available device(s) that support CUDA\n");
}
//
// CODE assumes
// 1.-All available devices are usable by this code
// 2.-All available devices are equal, they are the same machine (warning thrown)
// Check the available devices, and if they are the same
if (!gpuids.AreEqualDevices()) {
mexWarnMsgIdAndTxt("minimizeTV:POCS_TV:GPUselect","Detected one (or more) different GPUs.\n This code is not smart enough to separate the memory GPU wise if they have different computational times or memory limits.\n First GPU parameters used. If the code errors you might need to change the way GPU selection is performed.");
}
int dev;
// We don't know if the devices are being used. lets check that. and only use the amount of memory we need.
size_t mem_GPU_global;
checkFreeMemory(gpuids, &mem_GPU_global);
// %5 of free memory should be enough, we have almost no variables in these kernels
size_t total_pixels = image_size[0] * image_size[1] * image_size[2] ;
size_t mem_slice_image = sizeof(float)* image_size[0] * image_size[1] ;
size_t mem_size_image = sizeof(float)* total_pixels;
size_t mem_auxiliary = sizeof(float)* (total_pixels + MAXTHREADS - 1) / MAXTHREADS;
// Decide how are we handling the distribution of computation
size_t mem_img_each_GPU;
unsigned int buffer_length=2;
//Does everything fit in the GPU?
unsigned int slices_per_split;
// if it is a thin problem (no need to split), just use one GPU
if (image_size[2]<4){deviceCount=1;}
unsigned int splits=1; // if the number does not fit in an uint, you have more serious trouble than this.
if(mem_GPU_global> 3*mem_size_image+3*(deviceCount-1)*mem_slice_image*buffer_length+mem_auxiliary){
// We only need to split if we have extra GPUs
slices_per_split=(image_size[2]+deviceCount-1)/deviceCount;
mem_img_each_GPU=mem_slice_image*((slices_per_split+buffer_length*2));
}else{
// As mem_auxiliary is not expected to be a large value (for a 2000^3 image is around 28Mbytes), lets for now assume we need it all
size_t mem_free=mem_GPU_global-mem_auxiliary;
splits=(unsigned int)(ceil(((float)(3*mem_size_image)/(float)(deviceCount))/mem_free));
// Now, there is an overhead here, as each splits should have 2 slices more, to accoutn for overlap of images.
// lets make sure these 2 slices fit, if they do not, add 1 to splits.
slices_per_split=(image_size[2]+deviceCount*splits-1)/(deviceCount*splits);
mem_img_each_GPU=(mem_slice_image*(slices_per_split+buffer_length*2));
// if the new stuff does not fit in the GPU, it measn we are in the edge case where adding that extra slice will overflow memory
if (mem_GPU_global< 3*mem_img_each_GPU+mem_auxiliary){
// one more split should do the job, as its an edge case.
splits++;
//recompute for later
slices_per_split=(image_size[2]+deviceCount*splits-1)/(deviceCount*splits); // amount of slices that fit on a GPU. Later we add 2 to these, as we need them for overlap
mem_img_each_GPU=(mem_slice_image*(slices_per_split+buffer_length*2));
}
// How many EXTRA buffer slices should be able to fit in here??!?!
// Only do it if there are splits needed.
if(splits>1){
mem_free=mem_GPU_global-(3*mem_img_each_GPU+mem_auxiliary);
unsigned int extra_buff=(mem_free/mem_slice_image);
buffer_length=(extra_buff/2)/3; // we need double whatever this results in, rounded down.
buffer_length=max(buffer_length,2);// minimum 2
buffer_length=min(MAX_BUFFER,buffer_length);
mem_img_each_GPU=mem_slice_image*(slices_per_split+buffer_length*2);
}else{
buffer_length=2;
}
// Assert
if (mem_GPU_global< 3*mem_img_each_GPU+mem_auxiliary){
mexErrMsgIdAndTxt("POCS_TV:GPU","Assertion Failed. Logic behind spliting flawed! Please tell: [email protected]\n");
}
}
// Assert
if ((slices_per_split+buffer_length*2)*image_size[0]*image_size[1]* sizeof(float)!= mem_img_each_GPU){
mexErrMsgIdAndTxt("POCS_TV:GPU","Assertion Failed. Memory needed calculation broken! Please tell: [email protected]\n");
}
float** d_image= (float**)malloc(deviceCount*sizeof(float*));
float** d_dimgTV= (float**)malloc(deviceCount*sizeof(float*));
float** d_norm2aux= (float**)malloc(deviceCount*sizeof(float*));
float** d_norm2= (float**)malloc(deviceCount*sizeof(float*));
// allocate memory in each GPU
for (dev = 0; dev < deviceCount; dev++){
hipSetDevice(gpuids[dev]);
hipMalloc((void**)&d_image[dev] , mem_img_each_GPU);
hipMemset(d_image[dev],0 , mem_img_each_GPU);
hipMalloc((void**)&d_dimgTV[dev] , mem_img_each_GPU);
hipMalloc((void**)&d_norm2[dev] , slices_per_split*mem_slice_image);
hipMalloc((void**)&d_norm2aux[dev] , mem_auxiliary);
cudaCheckErrors("Malloc error");
}
unsigned long long buffer_pixels=buffer_length*image_size[0]*image_size[1];
float* buffer;
if(splits>1){
mexWarnMsgIdAndTxt("minimizeTV:POCS_TV:Image_split","Your image can not be fully split between the available GPUs. The computation of minTV will be significantly slowed due to the image size.\nApproximated mathematics turned on for computational speed.");
}else{
hipHostMalloc((void**)&buffer,buffer_length*image_size[0]*image_size[1]*sizeof(float));
}
// Lets try to make the host memory pinned:
// We laredy queried the GPU and assuemd they are the same, thus should have the same attributes.
int isHostRegisterSupported = 0;
#if CUDART_VERSION >= 9020
hipDeviceGetAttribute(&isHostRegisterSupported,hipDeviceAttributeHostRegisterSupported,gpuids[0]);
#endif
// splits>2 is completely empirical observation
if (isHostRegisterSupported & splits>2){
hipHostRegister(img ,image_size[2]*image_size[1]*image_size[0]*sizeof(float),hipHostRegisterPortable);
hipHostRegister(dst ,image_size[2]*image_size[1]*image_size[0]*sizeof(float),hipHostRegisterPortable);
}
cudaCheckErrors("Error pinning memory");
// Create streams
int nStream_device=2;
int nStreams=deviceCount*nStream_device;
hipStream_t* stream=(hipStream_t*)malloc(nStreams*sizeof(hipStream_t));
for (dev = 0; dev < deviceCount; dev++){
hipSetDevice(gpuids[dev]);
for (int i = 0; i < nStream_device; ++i){
hipStreamCreate(&stream[i+dev*nStream_device]);
}
}
cudaCheckErrors("Stream creation fail");
// For the reduction
double totalsum_prev;
double totalsum;
float sum_curr_spl;
float * sumnorm2;
hipHostMalloc((void**)&sumnorm2,deviceCount*sizeof(float));
unsigned int curr_slices;
unsigned long long curr_pixels;
size_t linear_idx_start;
unsigned long long* offset_device=(unsigned long long*)malloc(deviceCount*sizeof(unsigned long long));
unsigned long long* offset_host =(unsigned long long*)malloc(deviceCount*sizeof(unsigned long long));
unsigned long long* bytes_device =(unsigned long long*)malloc(deviceCount*sizeof(unsigned long long));
bool is_first_chunk;
bool is_last_chunk;
for(unsigned int i=0;i<maxIter;i+=(buffer_length-1)){
if(splits>1){
totalsum_prev=0;
}
for(unsigned int sp=0;sp<splits;sp++){
// For each iteration we need to compute all the image. The ordering of these loops
// need to be like this due to the bounding layers between splits. If more than 1 split is needed
// for each GPU then there is no other way that taking the entire memory out of GPU and putting it back.
// If the memory can be shared between GPUs fully without extra splits, then there is an easy way of synchronizing the memory
// Copy image to memory
for (dev = 0; dev < deviceCount; dev++){
curr_slices=((sp*deviceCount+dev+1)*slices_per_split<image_size[2])? slices_per_split: image_size[2]-slices_per_split*(sp*deviceCount+dev);
curr_pixels=curr_slices*image_size[0]*image_size[1];
linear_idx_start=image_size[0]*image_size[1]*slices_per_split*(sp*deviceCount+dev);
// Check if its the first or last chunck
is_last_chunk=!((sp*deviceCount+dev)<deviceCount*splits-1);
is_first_chunk=!(sp*deviceCount+dev);
// lets compute where we start copyes and how much. This avoids 3 calls to Memcpy
offset_device[dev]=buffer_pixels*is_first_chunk;
offset_host[dev]=linear_idx_start-buffer_pixels*!is_first_chunk;
bytes_device[dev]=curr_pixels+buffer_pixels*!is_first_chunk+buffer_pixels*!is_last_chunk;
}
if(i==0){
for (dev = 0; dev < deviceCount; dev++){
hipSetDevice(gpuids[dev]);
hipMemcpyAsync(d_image[dev]+offset_device[dev], img+offset_host[dev] , bytes_device[dev]*sizeof(float), hipMemcpyHostToDevice,stream[dev*nStream_device+1]);
}
for (dev = 0; dev < deviceCount; dev++){
hipSetDevice(gpuids[dev]);
hipDeviceSynchronize();
}
}
// if we need to split and its not the first iteration, then we need to copy from Host memory the previosu result.
if (splits>1 & i>0){
for (dev = 0; dev < deviceCount; dev++){
hipSetDevice(gpuids[dev]);
hipMemcpyAsync(d_image[dev]+offset_device[dev], dst+offset_host[dev] , bytes_device[dev]*sizeof(float), hipMemcpyHostToDevice,stream[dev*nStream_device+1]);
}
for (dev = 0; dev < deviceCount; dev++){
hipSetDevice(gpuids[dev]);
hipDeviceSynchronize();
}
}
cudaCheckErrors("Memcpy failure on multi split");
for(unsigned int ib=0; (ib<(buffer_length-1)) && ((i+ib)<maxIter); ib++){
// For the gradient
dim3 blockGrad(10, 10, 10);
dim3 gridGrad((image_size[0]+blockGrad.x-1)/blockGrad.x, (image_size[1]+blockGrad.y-1)/blockGrad.y, (curr_slices+buffer_length*2+blockGrad.z-1)/blockGrad.z);
for (dev = 0; dev < deviceCount; dev++){
hipSetDevice(gpuids[dev]);
curr_slices=((sp*deviceCount+dev+1)*slices_per_split<image_size[2])? slices_per_split: image_size[2]-slices_per_split*(sp*deviceCount+dev);
// Compute the gradient of the TV norm
// I don't understand why I need to store 2 layers to compute correctly with 1 buffer. The bounding checks should
// be enough but they are not.
hipLaunchKernelGGL(( gradientTV), dim3(gridGrad), dim3(blockGrad),0,stream[dev*nStream_device], d_image[dev],d_dimgTV[dev],(long)(curr_slices+buffer_length*2-1), image_size[1],image_size[0]);
}
for (dev = 0; dev < deviceCount; dev++){
hipSetDevice(gpuids[dev]);
curr_slices=((sp*deviceCount+dev+1)*slices_per_split<image_size[2])? slices_per_split: image_size[2]-slices_per_split*(sp*deviceCount+dev);
// no need to copy the 2 aux slices here
hipStreamSynchronize(stream[dev*nStream_device]);
hipMemcpyAsync(d_norm2[dev], d_dimgTV[dev]+buffer_pixels, image_size[0]*image_size[1]*curr_slices*sizeof(float), hipMemcpyDeviceToDevice,stream[dev*nStream_device+1]);
}
// Compute the L2 norm of the gradient. For that, reduction is used.
//REDUCE
for (dev = 0; dev < deviceCount; dev++){
hipSetDevice(gpuids[dev]);
curr_slices=((sp*deviceCount+dev+1)*slices_per_split<image_size[2])? slices_per_split: image_size[2]-slices_per_split*(sp*deviceCount+dev);
total_pixels=curr_slices*image_size[0]*image_size[1];
size_t dimblockRed = MAXTHREADS;
size_t dimgridRed = (total_pixels + MAXTHREADS - 1) / MAXTHREADS;
hipStreamSynchronize(stream[dev*nStream_device+1]);
reduceNorm2 << <dimgridRed, dimblockRed, MAXTHREADS*sizeof(float),stream[dev*nStream_device]>> >(d_norm2[dev], d_norm2aux[dev], total_pixels);
}
for (dev = 0; dev < deviceCount; dev++){
hipSetDevice(gpuids[dev]);
curr_slices=((sp*deviceCount+dev+1)*slices_per_split<image_size[2])? slices_per_split: image_size[2]-slices_per_split*(sp*deviceCount+dev);
total_pixels=curr_slices*image_size[0]*image_size[1];
size_t dimblockRed = MAXTHREADS;
size_t dimgridRed = (total_pixels + MAXTHREADS - 1) / MAXTHREADS;
if (dimgridRed > 1) {
reduceSum << <1, dimblockRed, MAXTHREADS*sizeof(float),stream[dev*nStream_device] >> >(d_norm2aux[dev], d_norm2[dev], dimgridRed);
hipStreamSynchronize(stream[dev*nStream_device]);
hipMemcpyAsync(&sumnorm2[dev], d_norm2[dev], sizeof(float), hipMemcpyDeviceToHost,stream[dev*nStream_device+1]);
}
else {
hipStreamSynchronize(stream[dev*nStream_device]);
hipMemcpyAsync(&sumnorm2[dev], d_norm2aux[dev], sizeof(float), hipMemcpyDeviceToHost,stream[dev*nStream_device+1]);
}
}
for (dev = 0; dev < deviceCount; dev++){
hipSetDevice(gpuids[dev]);
hipDeviceSynchronize();
}
cudaCheckErrors("Reduction error");
// Accumulate the norm accross devices
sum_curr_spl=0;
// this is CPU code
for (dev = 0; dev < deviceCount; dev++){
sum_curr_spl+=sumnorm2[dev];
}
sum_curr_spl+=0.0000001f; // avoid division by zero
// If we have more than one splits, lets use the result from prior calls
if(i>0 && splits>1){
// this is already stored:
//totalsum=totalsum_prev;
}else{
totalsum=sum_curr_spl;
}
for (dev = 0; dev < deviceCount; dev++){
hipSetDevice(gpuids[dev]);
curr_slices=((sp*deviceCount+dev+1)*slices_per_split<image_size[2])? slices_per_split: image_size[2]-slices_per_split*(sp*deviceCount+dev);
total_pixels=curr_slices*image_size[0]*image_size[1];
//NORMALIZE
//in a Tesla, maximum blocks =15 SM * 4 blocks/SM
hipLaunchKernelGGL(( divideArrayScalar) , dim3(60),dim3(MAXTHREADS),0,stream[dev*nStream_device], d_dimgTV[dev]+buffer_pixels,(float)sqrt(totalsum),total_pixels);
//MULTIPLY HYPERPARAMETER
hipLaunchKernelGGL(( multiplyArrayScalar), dim3(60),dim3(MAXTHREADS),0,stream[dev*nStream_device], d_dimgTV[dev]+buffer_pixels,alpha, total_pixels);
}
for (dev = 0; dev < deviceCount; dev++){
hipSetDevice(gpuids[dev]);
hipDeviceSynchronize();
}
cudaCheckErrors("Scalar operations error");
//SUBSTRACT GRADIENT
//////////////////////////////////////////////
for (dev = 0; dev < deviceCount; dev++){
hipSetDevice(gpuids[dev]);
curr_slices=((sp*deviceCount+dev+1)*slices_per_split<image_size[2])? slices_per_split: image_size[2]-slices_per_split*(sp*deviceCount+dev);
total_pixels=curr_slices*image_size[0]*image_size[1];
hipLaunchKernelGGL(( substractArrays), dim3(60),dim3(MAXTHREADS),0,stream[dev*nStream_device], d_image[dev]+buffer_pixels,d_dimgTV[dev]+buffer_pixels, total_pixels);
}
}
// Synchronize mathematics, make sure bounding pixels are correct
for (dev = 0; dev < deviceCount; dev++){
hipSetDevice(gpuids[dev]);
hipDeviceSynchronize();
}
if(splits==1){
for(dev=0; dev<deviceCount;dev++){
curr_slices=((sp*deviceCount+dev+1)*slices_per_split<image_size[2])? slices_per_split: image_size[2]-slices_per_split*(sp*deviceCount+dev);
total_pixels=curr_slices*image_size[0]*image_size[1];
if (dev<deviceCount-1){
hipSetDevice(gpuids[dev+1]);
hipMemcpy(buffer, d_image[dev+1], buffer_pixels*sizeof(float), hipMemcpyDeviceToHost);
hipSetDevice(gpuids[dev]);
hipMemcpy(d_image[dev]+total_pixels+buffer_pixels,buffer, buffer_pixels*sizeof(float), hipMemcpyHostToDevice);
}
hipDeviceSynchronize();
if (dev>0){
hipSetDevice(gpuids[dev-1]);
hipMemcpyAsync(buffer, d_image[dev-1]+total_pixels+buffer_pixels, buffer_pixels*sizeof(float), hipMemcpyDeviceToHost);
hipSetDevice(gpuids[dev]);
hipMemcpyAsync(d_image[dev],buffer, buffer_pixels*sizeof(float), hipMemcpyHostToDevice);
}
}
}else{
// We need to take it out :(
for(dev=0; dev<deviceCount;dev++){
hipSetDevice(gpuids[dev]);
curr_slices=((sp*deviceCount+dev+1)*slices_per_split<image_size[2])? slices_per_split: image_size[2]-slices_per_split*(sp*deviceCount+dev);
linear_idx_start=image_size[0]*image_size[1]*slices_per_split*(sp*deviceCount+dev);
total_pixels=curr_slices*image_size[0]*image_size[1];
hipMemcpyAsync(&dst[linear_idx_start], d_image[dev]+buffer_pixels,total_pixels*sizeof(float), hipMemcpyDeviceToHost,stream[dev*nStream_device+1]);
}
}
for (dev = 0; dev < deviceCount; dev++){
hipSetDevice(gpuids[dev]);
hipDeviceSynchronize();
}
cudaCheckErrors("Memory gather error");
totalsum_prev+=sum_curr_spl;
}
totalsum=totalsum_prev;
}
// If there has not been splits, we still have data in memory
if(splits==1){
for(dev=0; dev<deviceCount;dev++){
hipSetDevice(gpuids[dev]);
curr_slices=((dev+1)*slices_per_split<image_size[2])? slices_per_split: image_size[2]-slices_per_split*dev;
total_pixels=curr_slices*image_size[0]*image_size[1];
hipMemcpy(dst+slices_per_split*image_size[0]*image_size[1]*dev, d_image[dev]+buffer_pixels,total_pixels*sizeof(float), hipMemcpyDeviceToHost);
}
}
cudaCheckErrors("Copy result back");
for(dev=0; dev<deviceCount;dev++){
hipSetDevice(gpuids[dev]);
hipFree(d_image[dev]);
hipFree(d_norm2aux[dev]);
hipFree(d_dimgTV[dev]);
hipFree(d_norm2[dev]);
}
if (splits==1){
hipHostFree(buffer);
}
if (isHostRegisterSupported& splits>2){
hipHostUnregister(img);
hipHostUnregister(dst);
}
for (int i = 0; i < nStreams; ++i)
hipStreamDestroy(stream[i]) ;
for (dev = 0; dev < deviceCount; dev++){
hipSetDevice(gpuids[dev]);
hipDeviceSynchronize();
}
cudaCheckErrors("Memory free");
hipDeviceReset();
}
void checkFreeMemory(const GpuIds& gpuids,size_t *mem_GPU_global){
size_t memfree;
size_t memtotal;
int deviceCount = gpuids.GetLength();
for (int dev = 0; dev < deviceCount; dev++){
hipSetDevice(gpuids[dev]);
hipMemGetInfo(&memfree,&memtotal);
if(dev==0) *mem_GPU_global=memfree;
if(memfree<memtotal/2){
mexErrMsgIdAndTxt("POCS_TV:GPU","One (or more) of your GPUs is being heavily used by another program (possibly graphics-based).\n Free the GPU to run TIGRE\n");
}
cudaCheckErrors("Check mem error");
*mem_GPU_global=(memfree<*mem_GPU_global)?memfree:*mem_GPU_global;
}
*mem_GPU_global=(size_t)((double)*mem_GPU_global*0.95);
//*mem_GPU_global= insert your known number here, in bytes.
}
|
1aa4d4218407f90730b6576ab1e15e0a32dd5361.cu
|
/*-------------------------------------------------------------------------
*
* CUDA functions for Steepest descend in POCS-type algorithms.
*
* This file will iteratively minimize by stepest descend the total variation
* of the input image, with the parameters given, using GPUs.
*
* CODE by Ander Biguri
*
* ---------------------------------------------------------------------------
* ---------------------------------------------------------------------------
* Copyright (c) 2015, University of Bath and CERN- European Organization for
* Nuclear Research
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
* ---------------------------------------------------------------------------
*
* Contact: [email protected]
* Codes : https://github.com/CERN/TIGRE
* ---------------------------------------------------------------------------
*/
#define MAXTHREADS 1024
#define MAX_BUFFER 60
#include "POCS_TV.hpp"
#include "gpuUtils.hpp"
#define cudaCheckErrors(msg) \
do { \
cudaError_t __err = cudaGetLastError(); \
if (__err != cudaSuccess) { \
mexPrintf("%s \n",msg);\
cudaDeviceReset();\
mexErrMsgIdAndTxt("POCS_TV:GPU",cudaGetErrorString(__err));\
} \
} while (0)
// CUDA kernels
//https://stackoverflow.com/questions/21332040/simple-cuda-kernel-optimization/21340927#21340927
__global__ void divideArrayScalar(float* vec,float scalar,const size_t n){
unsigned long long i = (blockIdx.x * blockDim.x) + threadIdx.x;
for(; i<n; i+=gridDim.x*blockDim.x) {
vec[i]/=scalar;
}
}
__global__ void multiplyArrayScalar(float* vec,float scalar,const size_t n)
{
unsigned long long i = (blockIdx.x * blockDim.x) + threadIdx.x;
for(; i<n; i+=gridDim.x*blockDim.x) {
vec[i]*=scalar;
}
}
__global__ void substractArrays(float* vec,float* vec2,const size_t n)
{
unsigned long long i = (blockIdx.x * blockDim.x) + threadIdx.x;
for(; i<n; i+=gridDim.x*blockDim.x) {
vec[i]-=vec2[i];
}
}
__device__ __inline__
void gradient(const float* u, float* grad,
long z, long y, long x,
long depth, long rows, long cols){
unsigned long size2d = rows*cols;
unsigned long long idx = z * size2d + y * cols + x;
float uidx = u[idx];
if ( z - 1 >= 0 && z<depth) {
grad[0] = (uidx-u[(z-1)*size2d + y*cols + x]) ;
}
if ( y - 1 >= 0 && y<rows){
grad[1] = (uidx-u[z*size2d + (y-1)*cols + x]) ;
}
if ( x - 1 >= 0 && x<cols) {
grad[2] = (uidx-u[z*size2d + y*cols + (x-1)]);
}
}
__global__ void gradientTV(const float* f, float* dftv,
long depth, long rows, long cols){
unsigned long x = threadIdx.x + blockIdx.x * blockDim.x;
unsigned long y = threadIdx.y + blockIdx.y * blockDim.y;
unsigned long z = threadIdx.z + blockIdx.z * blockDim.z;
unsigned long long idx = z * rows * cols + y * cols + x;
if ( x >= cols || y >= rows || z >= depth )
return;
float df[3] ={0.f,0.f,0.f};
float dfi[3]={0.f,0.f,0.f}; // dfi== \partial f_{i+1,j,k}
float dfj[3]={0.f,0.f,0.f};
float dfk[3]={0.f,0.f,0.f};
gradient(f,df ,z ,y ,x , depth,rows,cols);
gradient(f,dfi ,z ,y ,x+1, depth,rows,cols);
gradient(f,dfj ,z ,y+1,x , depth,rows,cols);
gradient(f,dfk ,z+1,y ,x , depth,rows,cols);
float eps=0.00000001; //% avoid division by zero
dftv[idx]=(df[0]+df[1]+df[2])/(sqrt(df[0] *df[0] +df[1] *df[1] +df[2] *df[2])+eps)
-dfi[2]/(sqrt(dfi[0]*dfi[0]+dfi[1]*dfi[1]+dfi[2]*dfi[2]) +eps) // I wish I coudl precompute this, but if I do then Id need to recompute the gradient.
-dfj[1]/(sqrt(dfj[0]*dfj[0]+dfj[1]*dfj[1]+dfj[2]*dfj[2]) +eps)
-dfk[0]/(sqrt(dfk[0]*dfk[0]+dfk[1]*dfk[1]+dfk[2]*dfk[2]) +eps);
return;
}
__device__ void warpReduce(volatile float *sdata, size_t tid) {
sdata[tid] += sdata[tid + 32];
sdata[tid] += sdata[tid + 16];
sdata[tid] += sdata[tid + 8];
sdata[tid] += sdata[tid + 4];
sdata[tid] += sdata[tid + 2];
sdata[tid] += sdata[tid + 1];
}
__global__ void reduceNorm2(float *g_idata, float *g_odata, size_t n){
extern __shared__ volatile float sdata[];
//http://stackoverflow.com/a/35133396/1485872
size_t tid = threadIdx.x;
size_t i = blockIdx.x*blockDim.x + tid;
size_t gridSize = blockDim.x*gridDim.x;
float mySum = 0;
float value=0;
while (i < n) {
value=g_idata[i]; //avoid reading twice
mySum += value*value;
i += gridSize;
}
sdata[tid] = mySum;
__syncthreads();
if (tid < 512)
sdata[tid] += sdata[tid + 512];
__syncthreads();
if (tid < 256)
sdata[tid] += sdata[tid + 256];
__syncthreads();
if (tid < 128)
sdata[tid] += sdata[tid + 128];
__syncthreads();
if (tid < 64)
sdata[tid] += sdata[tid + 64];
__syncthreads();
#if (__CUDART_VERSION >= 9000)
if ( tid < 32 )
{
mySum = sdata[tid] + sdata[tid + 32];
for (int offset = warpSize/2; offset > 0; offset /= 2) {
mySum += __shfl_down_sync(0xFFFFFFFF, mySum, offset,32);
}
}
#else
if (tid < 32) {
warpReduce(sdata, tid);
mySum = sdata[0];
}
#endif
if (tid == 0) g_odata[blockIdx.x] = mySum;
}
__global__ void reduceSum(float *g_idata, float *g_odata, size_t n){
extern __shared__ volatile float sdata[];
//http://stackoverflow.com/a/35133396/1485872
size_t tid = threadIdx.x;
size_t i = blockIdx.x*blockDim.x + tid;
size_t gridSize = blockDim.x*gridDim.x;
float mySum = 0;
// float value=0;
while (i < n) {
mySum += g_idata[i];
i += gridSize;
}
sdata[tid] = mySum;
__syncthreads();
if (tid < 512)
sdata[tid] += sdata[tid + 512];
__syncthreads();
if (tid < 256)
sdata[tid] += sdata[tid + 256];
__syncthreads();
if (tid < 128)
sdata[tid] += sdata[tid + 128];
__syncthreads();
if (tid < 64)
sdata[tid] += sdata[tid + 64];
__syncthreads();
#if (__CUDART_VERSION >= 9000)
if ( tid < 32 )
{
mySum = sdata[tid] + sdata[tid + 32];
for (int offset = warpSize/2; offset > 0; offset /= 2) {
mySum += __shfl_down_sync(0xFFFFFFFF, mySum, offset,32);
}
}
#else
if (tid < 32) {
warpReduce(sdata, tid);
mySum = sdata[0];
}
#endif
if (tid == 0) g_odata[blockIdx.x] = mySum;
}
// main function
void pocs_tv(float* img,float* dst,float alpha,const long* image_size, int maxIter, const GpuIds& gpuids){
// Prepare for MultiGPU
int deviceCount = gpuids.GetLength();
cudaCheckErrors("Device query fail");
if (deviceCount == 0) {
mexErrMsgIdAndTxt("POCS_TV:GPU","There are no available device(s) that support CUDA\n");
}
//
// CODE assumes
// 1.-All available devices are usable by this code
// 2.-All available devices are equal, they are the same machine (warning thrown)
// Check the available devices, and if they are the same
if (!gpuids.AreEqualDevices()) {
mexWarnMsgIdAndTxt("minimizeTV:POCS_TV:GPUselect","Detected one (or more) different GPUs.\n This code is not smart enough to separate the memory GPU wise if they have different computational times or memory limits.\n First GPU parameters used. If the code errors you might need to change the way GPU selection is performed.");
}
int dev;
// We don't know if the devices are being used. lets check that. and only use the amount of memory we need.
size_t mem_GPU_global;
checkFreeMemory(gpuids, &mem_GPU_global);
// %5 of free memory should be enough, we have almost no variables in these kernels
size_t total_pixels = image_size[0] * image_size[1] * image_size[2] ;
size_t mem_slice_image = sizeof(float)* image_size[0] * image_size[1] ;
size_t mem_size_image = sizeof(float)* total_pixels;
size_t mem_auxiliary = sizeof(float)* (total_pixels + MAXTHREADS - 1) / MAXTHREADS;
// Decide how are we handling the distribution of computation
size_t mem_img_each_GPU;
unsigned int buffer_length=2;
//Does everything fit in the GPU?
unsigned int slices_per_split;
// if it is a thin problem (no need to split), just use one GPU
if (image_size[2]<4){deviceCount=1;}
unsigned int splits=1; // if the number does not fit in an uint, you have more serious trouble than this.
if(mem_GPU_global> 3*mem_size_image+3*(deviceCount-1)*mem_slice_image*buffer_length+mem_auxiliary){
// We only need to split if we have extra GPUs
slices_per_split=(image_size[2]+deviceCount-1)/deviceCount;
mem_img_each_GPU=mem_slice_image*((slices_per_split+buffer_length*2));
}else{
// As mem_auxiliary is not expected to be a large value (for a 2000^3 image is around 28Mbytes), lets for now assume we need it all
size_t mem_free=mem_GPU_global-mem_auxiliary;
splits=(unsigned int)(ceil(((float)(3*mem_size_image)/(float)(deviceCount))/mem_free));
// Now, there is an overhead here, as each splits should have 2 slices more, to accoutn for overlap of images.
// lets make sure these 2 slices fit, if they do not, add 1 to splits.
slices_per_split=(image_size[2]+deviceCount*splits-1)/(deviceCount*splits);
mem_img_each_GPU=(mem_slice_image*(slices_per_split+buffer_length*2));
// if the new stuff does not fit in the GPU, it measn we are in the edge case where adding that extra slice will overflow memory
if (mem_GPU_global< 3*mem_img_each_GPU+mem_auxiliary){
// one more split should do the job, as its an edge case.
splits++;
//recompute for later
slices_per_split=(image_size[2]+deviceCount*splits-1)/(deviceCount*splits); // amount of slices that fit on a GPU. Later we add 2 to these, as we need them for overlap
mem_img_each_GPU=(mem_slice_image*(slices_per_split+buffer_length*2));
}
// How many EXTRA buffer slices should be able to fit in here??!?!
// Only do it if there are splits needed.
if(splits>1){
mem_free=mem_GPU_global-(3*mem_img_each_GPU+mem_auxiliary);
unsigned int extra_buff=(mem_free/mem_slice_image);
buffer_length=(extra_buff/2)/3; // we need double whatever this results in, rounded down.
buffer_length=max(buffer_length,2);// minimum 2
buffer_length=min(MAX_BUFFER,buffer_length);
mem_img_each_GPU=mem_slice_image*(slices_per_split+buffer_length*2);
}else{
buffer_length=2;
}
// Assert
if (mem_GPU_global< 3*mem_img_each_GPU+mem_auxiliary){
mexErrMsgIdAndTxt("POCS_TV:GPU","Assertion Failed. Logic behind spliting flawed! Please tell: [email protected]\n");
}
}
// Assert
if ((slices_per_split+buffer_length*2)*image_size[0]*image_size[1]* sizeof(float)!= mem_img_each_GPU){
mexErrMsgIdAndTxt("POCS_TV:GPU","Assertion Failed. Memory needed calculation broken! Please tell: [email protected]\n");
}
float** d_image= (float**)malloc(deviceCount*sizeof(float*));
float** d_dimgTV= (float**)malloc(deviceCount*sizeof(float*));
float** d_norm2aux= (float**)malloc(deviceCount*sizeof(float*));
float** d_norm2= (float**)malloc(deviceCount*sizeof(float*));
// allocate memory in each GPU
for (dev = 0; dev < deviceCount; dev++){
cudaSetDevice(gpuids[dev]);
cudaMalloc((void**)&d_image[dev] , mem_img_each_GPU);
cudaMemset(d_image[dev],0 , mem_img_each_GPU);
cudaMalloc((void**)&d_dimgTV[dev] , mem_img_each_GPU);
cudaMalloc((void**)&d_norm2[dev] , slices_per_split*mem_slice_image);
cudaMalloc((void**)&d_norm2aux[dev] , mem_auxiliary);
cudaCheckErrors("Malloc error");
}
unsigned long long buffer_pixels=buffer_length*image_size[0]*image_size[1];
float* buffer;
if(splits>1){
mexWarnMsgIdAndTxt("minimizeTV:POCS_TV:Image_split","Your image can not be fully split between the available GPUs. The computation of minTV will be significantly slowed due to the image size.\nApproximated mathematics turned on for computational speed.");
}else{
cudaMallocHost((void**)&buffer,buffer_length*image_size[0]*image_size[1]*sizeof(float));
}
// Lets try to make the host memory pinned:
// We laredy queried the GPU and assuemd they are the same, thus should have the same attributes.
int isHostRegisterSupported = 0;
#if CUDART_VERSION >= 9020
cudaDeviceGetAttribute(&isHostRegisterSupported,cudaDevAttrHostRegisterSupported,gpuids[0]);
#endif
// splits>2 is completely empirical observation
if (isHostRegisterSupported & splits>2){
cudaHostRegister(img ,image_size[2]*image_size[1]*image_size[0]*sizeof(float),cudaHostRegisterPortable);
cudaHostRegister(dst ,image_size[2]*image_size[1]*image_size[0]*sizeof(float),cudaHostRegisterPortable);
}
cudaCheckErrors("Error pinning memory");
// Create streams
int nStream_device=2;
int nStreams=deviceCount*nStream_device;
cudaStream_t* stream=(cudaStream_t*)malloc(nStreams*sizeof(cudaStream_t));
for (dev = 0; dev < deviceCount; dev++){
cudaSetDevice(gpuids[dev]);
for (int i = 0; i < nStream_device; ++i){
cudaStreamCreate(&stream[i+dev*nStream_device]);
}
}
cudaCheckErrors("Stream creation fail");
// For the reduction
double totalsum_prev;
double totalsum;
float sum_curr_spl;
float * sumnorm2;
cudaMallocHost((void**)&sumnorm2,deviceCount*sizeof(float));
unsigned int curr_slices;
unsigned long long curr_pixels;
size_t linear_idx_start;
unsigned long long* offset_device=(unsigned long long*)malloc(deviceCount*sizeof(unsigned long long));
unsigned long long* offset_host =(unsigned long long*)malloc(deviceCount*sizeof(unsigned long long));
unsigned long long* bytes_device =(unsigned long long*)malloc(deviceCount*sizeof(unsigned long long));
bool is_first_chunk;
bool is_last_chunk;
for(unsigned int i=0;i<maxIter;i+=(buffer_length-1)){
if(splits>1){
totalsum_prev=0;
}
for(unsigned int sp=0;sp<splits;sp++){
// For each iteration we need to compute all the image. The ordering of these loops
// need to be like this due to the bounding layers between splits. If more than 1 split is needed
// for each GPU then there is no other way that taking the entire memory out of GPU and putting it back.
// If the memory can be shared between GPUs fully without extra splits, then there is an easy way of synchronizing the memory
// Copy image to memory
for (dev = 0; dev < deviceCount; dev++){
curr_slices=((sp*deviceCount+dev+1)*slices_per_split<image_size[2])? slices_per_split: image_size[2]-slices_per_split*(sp*deviceCount+dev);
curr_pixels=curr_slices*image_size[0]*image_size[1];
linear_idx_start=image_size[0]*image_size[1]*slices_per_split*(sp*deviceCount+dev);
// Check if its the first or last chunck
is_last_chunk=!((sp*deviceCount+dev)<deviceCount*splits-1);
is_first_chunk=!(sp*deviceCount+dev);
// lets compute where we start copyes and how much. This avoids 3 calls to Memcpy
offset_device[dev]=buffer_pixels*is_first_chunk;
offset_host[dev]=linear_idx_start-buffer_pixels*!is_first_chunk;
bytes_device[dev]=curr_pixels+buffer_pixels*!is_first_chunk+buffer_pixels*!is_last_chunk;
}
if(i==0){
for (dev = 0; dev < deviceCount; dev++){
cudaSetDevice(gpuids[dev]);
cudaMemcpyAsync(d_image[dev]+offset_device[dev], img+offset_host[dev] , bytes_device[dev]*sizeof(float), cudaMemcpyHostToDevice,stream[dev*nStream_device+1]);
}
for (dev = 0; dev < deviceCount; dev++){
cudaSetDevice(gpuids[dev]);
cudaDeviceSynchronize();
}
}
// if we need to split and its not the first iteration, then we need to copy from Host memory the previosu result.
if (splits>1 & i>0){
for (dev = 0; dev < deviceCount; dev++){
cudaSetDevice(gpuids[dev]);
cudaMemcpyAsync(d_image[dev]+offset_device[dev], dst+offset_host[dev] , bytes_device[dev]*sizeof(float), cudaMemcpyHostToDevice,stream[dev*nStream_device+1]);
}
for (dev = 0; dev < deviceCount; dev++){
cudaSetDevice(gpuids[dev]);
cudaDeviceSynchronize();
}
}
cudaCheckErrors("Memcpy failure on multi split");
for(unsigned int ib=0; (ib<(buffer_length-1)) && ((i+ib)<maxIter); ib++){
// For the gradient
dim3 blockGrad(10, 10, 10);
dim3 gridGrad((image_size[0]+blockGrad.x-1)/blockGrad.x, (image_size[1]+blockGrad.y-1)/blockGrad.y, (curr_slices+buffer_length*2+blockGrad.z-1)/blockGrad.z);
for (dev = 0; dev < deviceCount; dev++){
cudaSetDevice(gpuids[dev]);
curr_slices=((sp*deviceCount+dev+1)*slices_per_split<image_size[2])? slices_per_split: image_size[2]-slices_per_split*(sp*deviceCount+dev);
// Compute the gradient of the TV norm
// I don't understand why I need to store 2 layers to compute correctly with 1 buffer. The bounding checks should
// be enough but they are not.
gradientTV<<<gridGrad, blockGrad,0,stream[dev*nStream_device]>>>(d_image[dev],d_dimgTV[dev],(long)(curr_slices+buffer_length*2-1), image_size[1],image_size[0]);
}
for (dev = 0; dev < deviceCount; dev++){
cudaSetDevice(gpuids[dev]);
curr_slices=((sp*deviceCount+dev+1)*slices_per_split<image_size[2])? slices_per_split: image_size[2]-slices_per_split*(sp*deviceCount+dev);
// no need to copy the 2 aux slices here
cudaStreamSynchronize(stream[dev*nStream_device]);
cudaMemcpyAsync(d_norm2[dev], d_dimgTV[dev]+buffer_pixels, image_size[0]*image_size[1]*curr_slices*sizeof(float), cudaMemcpyDeviceToDevice,stream[dev*nStream_device+1]);
}
// Compute the L2 norm of the gradient. For that, reduction is used.
//REDUCE
for (dev = 0; dev < deviceCount; dev++){
cudaSetDevice(gpuids[dev]);
curr_slices=((sp*deviceCount+dev+1)*slices_per_split<image_size[2])? slices_per_split: image_size[2]-slices_per_split*(sp*deviceCount+dev);
total_pixels=curr_slices*image_size[0]*image_size[1];
size_t dimblockRed = MAXTHREADS;
size_t dimgridRed = (total_pixels + MAXTHREADS - 1) / MAXTHREADS;
cudaStreamSynchronize(stream[dev*nStream_device+1]);
reduceNorm2 << <dimgridRed, dimblockRed, MAXTHREADS*sizeof(float),stream[dev*nStream_device]>> >(d_norm2[dev], d_norm2aux[dev], total_pixels);
}
for (dev = 0; dev < deviceCount; dev++){
cudaSetDevice(gpuids[dev]);
curr_slices=((sp*deviceCount+dev+1)*slices_per_split<image_size[2])? slices_per_split: image_size[2]-slices_per_split*(sp*deviceCount+dev);
total_pixels=curr_slices*image_size[0]*image_size[1];
size_t dimblockRed = MAXTHREADS;
size_t dimgridRed = (total_pixels + MAXTHREADS - 1) / MAXTHREADS;
if (dimgridRed > 1) {
reduceSum << <1, dimblockRed, MAXTHREADS*sizeof(float),stream[dev*nStream_device] >> >(d_norm2aux[dev], d_norm2[dev], dimgridRed);
cudaStreamSynchronize(stream[dev*nStream_device]);
cudaMemcpyAsync(&sumnorm2[dev], d_norm2[dev], sizeof(float), cudaMemcpyDeviceToHost,stream[dev*nStream_device+1]);
}
else {
cudaStreamSynchronize(stream[dev*nStream_device]);
cudaMemcpyAsync(&sumnorm2[dev], d_norm2aux[dev], sizeof(float), cudaMemcpyDeviceToHost,stream[dev*nStream_device+1]);
}
}
for (dev = 0; dev < deviceCount; dev++){
cudaSetDevice(gpuids[dev]);
cudaDeviceSynchronize();
}
cudaCheckErrors("Reduction error");
// Accumulate the norm accross devices
sum_curr_spl=0;
// this is CPU code
for (dev = 0; dev < deviceCount; dev++){
sum_curr_spl+=sumnorm2[dev];
}
sum_curr_spl+=0.0000001f; // avoid division by zero
// If we have more than one splits, lets use the result from prior calls
if(i>0 && splits>1){
// this is already stored:
//totalsum=totalsum_prev;
}else{
totalsum=sum_curr_spl;
}
for (dev = 0; dev < deviceCount; dev++){
cudaSetDevice(gpuids[dev]);
curr_slices=((sp*deviceCount+dev+1)*slices_per_split<image_size[2])? slices_per_split: image_size[2]-slices_per_split*(sp*deviceCount+dev);
total_pixels=curr_slices*image_size[0]*image_size[1];
//NORMALIZE
//in a Tesla, maximum blocks =15 SM * 4 blocks/SM
divideArrayScalar <<<60,MAXTHREADS,0,stream[dev*nStream_device]>>>(d_dimgTV[dev]+buffer_pixels,(float)sqrt(totalsum),total_pixels);
//MULTIPLY HYPERPARAMETER
multiplyArrayScalar<<<60,MAXTHREADS,0,stream[dev*nStream_device]>>>(d_dimgTV[dev]+buffer_pixels,alpha, total_pixels);
}
for (dev = 0; dev < deviceCount; dev++){
cudaSetDevice(gpuids[dev]);
cudaDeviceSynchronize();
}
cudaCheckErrors("Scalar operations error");
//SUBSTRACT GRADIENT
//////////////////////////////////////////////
for (dev = 0; dev < deviceCount; dev++){
cudaSetDevice(gpuids[dev]);
curr_slices=((sp*deviceCount+dev+1)*slices_per_split<image_size[2])? slices_per_split: image_size[2]-slices_per_split*(sp*deviceCount+dev);
total_pixels=curr_slices*image_size[0]*image_size[1];
substractArrays<<<60,MAXTHREADS,0,stream[dev*nStream_device]>>>(d_image[dev]+buffer_pixels,d_dimgTV[dev]+buffer_pixels, total_pixels);
}
}
// Synchronize mathematics, make sure bounding pixels are correct
for (dev = 0; dev < deviceCount; dev++){
cudaSetDevice(gpuids[dev]);
cudaDeviceSynchronize();
}
if(splits==1){
for(dev=0; dev<deviceCount;dev++){
curr_slices=((sp*deviceCount+dev+1)*slices_per_split<image_size[2])? slices_per_split: image_size[2]-slices_per_split*(sp*deviceCount+dev);
total_pixels=curr_slices*image_size[0]*image_size[1];
if (dev<deviceCount-1){
cudaSetDevice(gpuids[dev+1]);
cudaMemcpy(buffer, d_image[dev+1], buffer_pixels*sizeof(float), cudaMemcpyDeviceToHost);
cudaSetDevice(gpuids[dev]);
cudaMemcpy(d_image[dev]+total_pixels+buffer_pixels,buffer, buffer_pixels*sizeof(float), cudaMemcpyHostToDevice);
}
cudaDeviceSynchronize();
if (dev>0){
cudaSetDevice(gpuids[dev-1]);
cudaMemcpyAsync(buffer, d_image[dev-1]+total_pixels+buffer_pixels, buffer_pixels*sizeof(float), cudaMemcpyDeviceToHost);
cudaSetDevice(gpuids[dev]);
cudaMemcpyAsync(d_image[dev],buffer, buffer_pixels*sizeof(float), cudaMemcpyHostToDevice);
}
}
}else{
// We need to take it out :(
for(dev=0; dev<deviceCount;dev++){
cudaSetDevice(gpuids[dev]);
curr_slices=((sp*deviceCount+dev+1)*slices_per_split<image_size[2])? slices_per_split: image_size[2]-slices_per_split*(sp*deviceCount+dev);
linear_idx_start=image_size[0]*image_size[1]*slices_per_split*(sp*deviceCount+dev);
total_pixels=curr_slices*image_size[0]*image_size[1];
cudaMemcpyAsync(&dst[linear_idx_start], d_image[dev]+buffer_pixels,total_pixels*sizeof(float), cudaMemcpyDeviceToHost,stream[dev*nStream_device+1]);
}
}
for (dev = 0; dev < deviceCount; dev++){
cudaSetDevice(gpuids[dev]);
cudaDeviceSynchronize();
}
cudaCheckErrors("Memory gather error");
totalsum_prev+=sum_curr_spl;
}
totalsum=totalsum_prev;
}
// If there has not been splits, we still have data in memory
if(splits==1){
for(dev=0; dev<deviceCount;dev++){
cudaSetDevice(gpuids[dev]);
curr_slices=((dev+1)*slices_per_split<image_size[2])? slices_per_split: image_size[2]-slices_per_split*dev;
total_pixels=curr_slices*image_size[0]*image_size[1];
cudaMemcpy(dst+slices_per_split*image_size[0]*image_size[1]*dev, d_image[dev]+buffer_pixels,total_pixels*sizeof(float), cudaMemcpyDeviceToHost);
}
}
cudaCheckErrors("Copy result back");
for(dev=0; dev<deviceCount;dev++){
cudaSetDevice(gpuids[dev]);
cudaFree(d_image[dev]);
cudaFree(d_norm2aux[dev]);
cudaFree(d_dimgTV[dev]);
cudaFree(d_norm2[dev]);
}
if (splits==1){
cudaFreeHost(buffer);
}
if (isHostRegisterSupported& splits>2){
cudaHostUnregister(img);
cudaHostUnregister(dst);
}
for (int i = 0; i < nStreams; ++i)
cudaStreamDestroy(stream[i]) ;
for (dev = 0; dev < deviceCount; dev++){
cudaSetDevice(gpuids[dev]);
cudaDeviceSynchronize();
}
cudaCheckErrors("Memory free");
cudaDeviceReset();
}
void checkFreeMemory(const GpuIds& gpuids,size_t *mem_GPU_global){
size_t memfree;
size_t memtotal;
int deviceCount = gpuids.GetLength();
for (int dev = 0; dev < deviceCount; dev++){
cudaSetDevice(gpuids[dev]);
cudaMemGetInfo(&memfree,&memtotal);
if(dev==0) *mem_GPU_global=memfree;
if(memfree<memtotal/2){
mexErrMsgIdAndTxt("POCS_TV:GPU","One (or more) of your GPUs is being heavily used by another program (possibly graphics-based).\n Free the GPU to run TIGRE\n");
}
cudaCheckErrors("Check mem error");
*mem_GPU_global=(memfree<*mem_GPU_global)?memfree:*mem_GPU_global;
}
*mem_GPU_global=(size_t)((double)*mem_GPU_global*0.95);
//*mem_GPU_global= insert your known number here, in bytes.
}
|
f90b6f17f2d5482550d2465427e33f8254710d2f.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#define N 1024
#define N_THR 512
void fill_ints(int* a, int size){
for(int i =0; i<size; i++)
a[i]=i;
}
__global__ void dotVecs(int *x, int *y, int *r){
__shared__ int s_tmp[N_THR];
int index = threadIdx.x + blockIdx.x * blockDim.x;
int temp = x[index] * y[index];
s_tmp[threadIdx.x] = temp; // store the multiplication to the shared memory
__syncthreads();
// Thread 0 performs the reduction
if(threadIdx.x == 0){
int sum = 0;
for(int i = 0 ; i < N_THR ; i++) sum += s_tmp[i];
*r += sum;
}
}
int main(void){
int *a, *b, *c; // host pointers
int *d_a, *d_b, *d_c; // device pointers
int size = N * sizeof(int);
// Alloc space for device copies of a, b, c
hipMalloc((void **)&d_a, size);
hipMalloc((void **)&d_b, size);
hipMalloc((void **)&d_c, sizeof(int));
a = (int *)malloc(size);
fill_ints(a, N); // Alloc space host, random initialization
b = (int *)malloc(size);
fill_ints(b, N);
c = (int *)malloc(sizeof(int));
// Copy data from host to device memory
// hipMemcpyHostToDevice is a flag determining copying from host to dev.
hipMemcpy(d_a, a, size, hipMemcpyHostToDevice);
hipMemcpy(d_b, b, size, hipMemcpyHostToDevice);
hipMemset(d_c,0,sizeof(int));
// Launch kernel to add two vector with N threads and 1 block
// Kernel calls are asynchronous
hipLaunchKernelGGL(( dotVecs), dim3(2),dim3(N_THR), 0, 0, d_a, d_b, d_c);
// Copy results from device to host
// hipMemcpy blocks CPU until Kernels finish execution
hipMemcpy(c, d_c, sizeof(int), hipMemcpyDeviceToHost);
printf("%d\n",*c);
// needs hipFree to deallocate device pointers
hipFree(d_a); hipFree(d_b); hipFree(d_c);
free(a); free(b); free(c);
return 0;
}
|
f90b6f17f2d5482550d2465427e33f8254710d2f.cu
|
#include <stdio.h>
#include <stdlib.h>
#define N 1024
#define N_THR 512
void fill_ints(int* a, int size){
for(int i =0; i<size; i++)
a[i]=i;
}
__global__ void dotVecs(int *x, int *y, int *r){
__shared__ int s_tmp[N_THR];
int index = threadIdx.x + blockIdx.x * blockDim.x;
int temp = x[index] * y[index];
s_tmp[threadIdx.x] = temp; // store the multiplication to the shared memory
__syncthreads();
// Thread 0 performs the reduction
if(threadIdx.x == 0){
int sum = 0;
for(int i = 0 ; i < N_THR ; i++) sum += s_tmp[i];
*r += sum;
}
}
int main(void){
int *a, *b, *c; // host pointers
int *d_a, *d_b, *d_c; // device pointers
int size = N * sizeof(int);
// Alloc space for device copies of a, b, c
cudaMalloc((void **)&d_a, size);
cudaMalloc((void **)&d_b, size);
cudaMalloc((void **)&d_c, sizeof(int));
a = (int *)malloc(size);
fill_ints(a, N); // Alloc space host, random initialization
b = (int *)malloc(size);
fill_ints(b, N);
c = (int *)malloc(sizeof(int));
// Copy data from host to device memory
// cudaMemcpyHostToDevice is a flag determining copying from host to dev.
cudaMemcpy(d_a, a, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b, size, cudaMemcpyHostToDevice);
cudaMemset(d_c,0,sizeof(int));
// Launch kernel to add two vector with N threads and 1 block
// Kernel calls are asynchronous
dotVecs<<<2,N_THR>>>(d_a, d_b, d_c);
// Copy results from device to host
// cudaMemcpy blocks CPU until Kernels finish execution
cudaMemcpy(c, d_c, sizeof(int), cudaMemcpyDeviceToHost);
printf("%d\n",*c);
// needs cudaFree to deallocate device pointers
cudaFree(d_a); cudaFree(d_b); cudaFree(d_c);
free(a); free(b); free(c);
return 0;
}
|
379e71eb6dde202e511995b9b521616ac397c16f.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2021-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <io/utilities/output_builder.cuh>
#include <cudf/column/column.hpp>
#include <cudf/column/column_factories.hpp>
#include <cudf/detail/iterator.cuh>
#include <cudf/detail/nvtx/ranges.hpp>
#include <cudf/detail/utilities/integer_utils.hpp>
#include <cudf/io/text/byte_range_info.hpp>
#include <cudf/io/text/data_chunk_source.hpp>
#include <cudf/io/text/detail/multistate.hpp>
#include <cudf/io/text/detail/tile_state.hpp>
#include <cudf/io/text/multibyte_split.hpp>
#include <cudf/scalar/scalar.hpp>
#include <cudf/strings/detail/strings_column_factories.cuh>
#include <cudf/utilities/default_stream.hpp>
#include <cudf/utilities/span.hpp>
#include <rmm/cuda_stream_pool.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/exec_policy.hpp>
#include <rmm/mr/device/device_memory_resource.hpp>
#include <rmm/mr/device/per_device_resource.hpp>
#include <thrust/copy.h>
#include <thrust/find.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/transform.h>
#include <hipcub/hipcub.hpp>
#include <cub/block/block_scan.cuh>
#include <cstdint>
#include <limits>
#include <memory>
#include <numeric>
#include <optional>
namespace {
using cudf::io::text::detail::multistate;
int32_t constexpr ITEMS_PER_THREAD = 64;
int32_t constexpr THREADS_PER_TILE = 128;
int32_t constexpr ITEMS_PER_TILE = ITEMS_PER_THREAD * THREADS_PER_TILE;
int32_t constexpr TILES_PER_CHUNK = 4096;
int32_t constexpr ITEMS_PER_CHUNK = ITEMS_PER_TILE * TILES_PER_CHUNK;
constexpr multistate transition_init(char c, cudf::device_span<char const> delim)
{
auto result = multistate();
result.enqueue(0, 0);
for (std::size_t i = 0; i < delim.size(); i++) {
if (delim[i] == c) { result.enqueue(i, i + 1); }
}
return result;
}
constexpr multistate transition(char c, multistate state, cudf::device_span<char const> delim)
{
auto result = multistate();
result.enqueue(0, 0);
for (uint8_t i = 0; i < state.size(); i++) {
auto const tail = state.get_tail(i);
if (tail < delim.size() && delim[tail] == c) { result.enqueue(state.get_head(i), tail + 1); }
}
return result;
}
struct PatternScan {
using BlockScan = hipcub::BlockScan<multistate, THREADS_PER_TILE>;
using BlockScanCallback = cudf::io::text::detail::scan_tile_state_callback<multistate>;
struct _TempStorage {
typename BlockScan::TempStorage scan;
};
_TempStorage& _temp_storage;
using TempStorage = cub::Uninitialized<_TempStorage>;
__device__ inline PatternScan(TempStorage& temp_storage) : _temp_storage(temp_storage.Alias()) {}
__device__ inline void Scan(cudf::size_type tile_idx,
cudf::io::text::detail::scan_tile_state_view<multistate> tile_state,
cudf::device_span<char const> delim,
char (&thread_data)[ITEMS_PER_THREAD],
multistate& thread_multistate)
{
thread_multistate = transition_init(thread_data[0], delim);
for (uint32_t i = 1; i < ITEMS_PER_THREAD; i++) {
thread_multistate = transition(thread_data[i], thread_multistate, delim);
}
auto prefix_callback = BlockScanCallback(tile_state, tile_idx);
BlockScan(_temp_storage.scan)
.ExclusiveSum(thread_multistate, thread_multistate, prefix_callback);
}
};
// type aliases to distinguish between row offsets and character offsets
using output_offset = int64_t;
using byte_offset = int64_t;
// multibyte_split works by splitting up inputs in to 32 inputs (bytes) per thread, and transforming
// them in to data structures called "multistates". these multistates are created by searching a
// trie, but instead of a tradition trie where the search begins at a single node at the beginning,
// we allow our search to begin anywhere within the trie tree. The position within the trie tree is
// stored as a "partial match path", which indicates "we can get from here to there by a set of
// specific transitions". By scanning together multistates, we effectively know "we can get here
// from the beginning by following the inputs". By doing this, each thread knows exactly what state
// it begins in. From there, each thread can then take deterministic action. In this case, the
// deterministic action is counting and outputting delimiter offsets when a delimiter is found.
__global__ void multibyte_split_init_kernel(
cudf::size_type base_tile_idx,
cudf::size_type num_tiles,
cudf::io::text::detail::scan_tile_state_view<multistate> tile_multistates,
cudf::io::text::detail::scan_tile_state_view<output_offset> tile_output_offsets,
cudf::io::text::detail::scan_tile_status status =
cudf::io::text::detail::scan_tile_status::invalid)
{
auto const thread_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (thread_idx < num_tiles) {
auto const tile_idx = base_tile_idx + thread_idx;
tile_multistates.set_status(tile_idx, status);
tile_output_offsets.set_status(tile_idx, status);
}
}
__global__ void multibyte_split_seed_kernel(
cudf::io::text::detail::scan_tile_state_view<multistate> tile_multistates,
cudf::io::text::detail::scan_tile_state_view<output_offset> tile_output_offsets,
multistate tile_multistate_seed,
output_offset tile_output_offset)
{
auto const thread_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (thread_idx == 0) {
tile_multistates.set_inclusive_prefix(-1, tile_multistate_seed);
tile_output_offsets.set_inclusive_prefix(-1, tile_output_offset);
}
}
__global__ __launch_bounds__(THREADS_PER_TILE) void multibyte_split_kernel(
cudf::size_type base_tile_idx,
byte_offset base_input_offset,
output_offset base_output_offset,
cudf::io::text::detail::scan_tile_state_view<multistate> tile_multistates,
cudf::io::text::detail::scan_tile_state_view<output_offset> tile_output_offsets,
cudf::device_span<char const> delim,
cudf::device_span<char const> chunk_input_chars,
cudf::split_device_span<byte_offset> row_offsets)
{
using InputLoad =
cub::BlockLoad<char, THREADS_PER_TILE, ITEMS_PER_THREAD, cub::BLOCK_LOAD_WARP_TRANSPOSE>;
using OffsetScan = hipcub::BlockScan<output_offset, THREADS_PER_TILE>;
using OffsetScanCallback = cudf::io::text::detail::scan_tile_state_callback<output_offset>;
__shared__ union {
typename InputLoad::TempStorage input_load;
typename PatternScan::TempStorage pattern_scan;
typename OffsetScan::TempStorage offset_scan;
} temp_storage;
int32_t const tile_idx = base_tile_idx + blockIdx.x;
int32_t const tile_input_offset = blockIdx.x * ITEMS_PER_TILE;
int32_t const thread_input_offset = tile_input_offset + threadIdx.x * ITEMS_PER_THREAD;
int32_t const thread_input_size = chunk_input_chars.size() - thread_input_offset;
// STEP 1: Load inputs
char thread_chars[ITEMS_PER_THREAD];
InputLoad(temp_storage.input_load)
.Load(chunk_input_chars.data() + tile_input_offset,
thread_chars,
chunk_input_chars.size() - tile_input_offset);
// STEP 2: Scan inputs to determine absolute thread states
multistate thread_multistate;
__syncthreads(); // required before temp_memory re-use
PatternScan(temp_storage.pattern_scan)
.Scan(tile_idx, tile_multistates, delim, thread_chars, thread_multistate);
// STEP 3: Flag matches
output_offset thread_offset{};
uint32_t thread_match_mask[(ITEMS_PER_THREAD + 31) / 32]{};
for (int32_t i = 0; i < ITEMS_PER_THREAD; i++) {
thread_multistate = transition(thread_chars[i], thread_multistate, delim);
auto const thread_state = thread_multistate.max_tail();
auto const is_match = i < thread_input_size and thread_state == delim.size();
thread_match_mask[i / 32] |= uint32_t{is_match} << (i % 32);
thread_offset += output_offset{is_match};
}
// STEP 4: Scan flags to determine absolute thread output offset
auto prefix_callback = OffsetScanCallback(tile_output_offsets, tile_idx);
__syncthreads(); // required before temp_memory re-use
OffsetScan(temp_storage.offset_scan).ExclusiveSum(thread_offset, thread_offset, prefix_callback);
// Step 5: Assign outputs from each thread using match offsets.
for (int32_t i = 0; i < ITEMS_PER_THREAD; i++) {
auto const is_match = (thread_match_mask[i / 32] >> (i % 32)) & 1u;
if (is_match) {
auto const match_end = base_input_offset + thread_input_offset + i + 1;
row_offsets[thread_offset - base_output_offset] = match_end;
thread_offset++;
}
}
}
__global__ __launch_bounds__(THREADS_PER_TILE) void byte_split_kernel(
cudf::size_type base_tile_idx,
byte_offset base_input_offset,
output_offset base_output_offset,
cudf::io::text::detail::scan_tile_state_view<output_offset> tile_output_offsets,
char delim,
cudf::device_span<char const> chunk_input_chars,
cudf::split_device_span<byte_offset> row_offsets)
{
using InputLoad =
cub::BlockLoad<char, THREADS_PER_TILE, ITEMS_PER_THREAD, cub::BLOCK_LOAD_WARP_TRANSPOSE>;
using OffsetScan = hipcub::BlockScan<output_offset, THREADS_PER_TILE>;
using OffsetScanCallback = cudf::io::text::detail::scan_tile_state_callback<output_offset>;
__shared__ union {
typename InputLoad::TempStorage input_load;
typename OffsetScan::TempStorage offset_scan;
} temp_storage;
int32_t const tile_idx = base_tile_idx + blockIdx.x;
int32_t const tile_input_offset = blockIdx.x * ITEMS_PER_TILE;
int32_t const thread_input_offset = tile_input_offset + threadIdx.x * ITEMS_PER_THREAD;
int32_t const thread_input_size = chunk_input_chars.size() - thread_input_offset;
// STEP 1: Load inputs
char thread_chars[ITEMS_PER_THREAD];
InputLoad(temp_storage.input_load)
.Load(chunk_input_chars.data() + tile_input_offset,
thread_chars,
chunk_input_chars.size() - tile_input_offset);
// STEP 2: Flag matches
output_offset thread_offset{};
uint32_t thread_match_mask[(ITEMS_PER_THREAD + 31) / 32]{};
for (int32_t i = 0; i < ITEMS_PER_THREAD; i++) {
auto const is_match = i < thread_input_size and thread_chars[i] == delim;
thread_match_mask[i / 32] |= uint32_t{is_match} << (i % 32);
thread_offset += output_offset{is_match};
}
// STEP 3: Scan flags to determine absolute thread output offset
auto prefix_callback = OffsetScanCallback(tile_output_offsets, tile_idx);
__syncthreads(); // required before temp_memory re-use
OffsetScan(temp_storage.offset_scan).ExclusiveSum(thread_offset, thread_offset, prefix_callback);
// Step 4: Assign outputs from each thread using match offsets.
for (int32_t i = 0; i < ITEMS_PER_THREAD; i++) {
auto const is_match = (thread_match_mask[i / 32] >> (i % 32)) & 1u;
if (is_match) {
auto const match_end = base_input_offset + thread_input_offset + i + 1;
row_offsets[thread_offset - base_output_offset] = match_end;
thread_offset++;
}
}
}
} // namespace
namespace cudf {
namespace io {
namespace text {
namespace detail {
void fork_stream(std::vector<rmm::cuda_stream_view> streams, rmm::cuda_stream_view stream)
{
hipEvent_t event;
hipEventCreate(&event);
hipEventRecord(event, stream);
for (uint32_t i = 0; i < streams.size(); i++) {
hipStreamWaitEvent(streams[i], event, 0);
}
hipEventDestroy(event);
}
void join_stream(std::vector<rmm::cuda_stream_view> streams, rmm::cuda_stream_view stream)
{
hipEvent_t event;
hipEventCreate(&event);
for (uint32_t i = 0; i < streams.size(); i++) {
hipEventRecord(event, streams[i]);
hipStreamWaitEvent(stream, event, 0);
}
hipEventDestroy(event);
}
std::vector<rmm::cuda_stream_view> get_streams(int32_t count, rmm::cuda_stream_pool& stream_pool)
{
auto streams = std::vector<rmm::cuda_stream_view>();
for (int32_t i = 0; i < count; i++) {
streams.emplace_back(stream_pool.get_stream());
}
return streams;
}
std::unique_ptr<cudf::column> multibyte_split(cudf::io::text::data_chunk_source const& source,
std::string const& delimiter,
byte_range_info byte_range,
bool strip_delimiters,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr,
rmm::cuda_stream_pool& stream_pool)
{
CUDF_FUNC_RANGE();
if (byte_range.empty()) { return make_empty_column(type_id::STRING); }
auto device_delim = cudf::string_scalar(delimiter, true, stream, mr);
auto sorted_delim = delimiter;
std::sort(sorted_delim.begin(), sorted_delim.end());
auto [_last_char, _last_char_count, max_duplicate_tokens] = std::accumulate(
sorted_delim.begin(), sorted_delim.end(), std::make_tuple('\0', 0, 0), [](auto acc, char c) {
if (std::get<0>(acc) != c) {
std::get<0>(acc) = c;
std::get<1>(acc) = 0;
}
std::get<1>(acc)++;
std::get<2>(acc) = ::max(std::get<1>(acc), std::get<2>(acc));
return acc;
});
CUDF_EXPECTS(max_duplicate_tokens < multistate::max_segment_count,
"delimiter contains too many duplicate tokens to produce a deterministic result.");
CUDF_EXPECTS(delimiter.size() < multistate::max_segment_value,
"delimiter contains too many total tokens to produce a deterministic result.");
auto concurrency = 2;
auto streams = get_streams(concurrency, stream_pool);
// must be at least 32 when using warp-reduce on partials
// must be at least 1 more than max possible concurrent tiles
// best when at least 32 more than max possible concurrent tiles, due to rolling `invalid`s
auto num_tile_states = ::max(32, TILES_PER_CHUNK * concurrency + 32);
auto tile_multistates = scan_tile_state<multistate>(num_tile_states, stream);
auto tile_offsets = scan_tile_state<output_offset>(num_tile_states, stream);
hipLaunchKernelGGL(( multibyte_split_init_kernel), dim3(TILES_PER_CHUNK),
dim3(THREADS_PER_TILE),
0,
stream.value(), //
-TILES_PER_CHUNK,
TILES_PER_CHUNK,
tile_multistates,
tile_offsets,
cudf::io::text::detail::scan_tile_status::oob);
auto multistate_seed = multistate();
multistate_seed.enqueue(0, 0); // this represents the first state in the pattern.
// Seeding the tile state with an identity value allows the 0th tile to follow the same logic as
// the Nth tile, assuming it can look up an inclusive prefix. Without this seed, the 0th block
// would have to follow separate logic.
hipLaunchKernelGGL(( multibyte_split_seed_kernel), dim3(1), dim3(1), 0, stream.value(), //
tile_multistates,
tile_offsets,
multistate_seed,
0);
auto reader = source.create_reader();
auto chunk_offset = std::max<byte_offset>(0, byte_range.offset() - delimiter.size());
auto const byte_range_end = byte_range.offset() + byte_range.size();
reader->skip_bytes(chunk_offset);
// amortize output chunk allocations over 8 worst-case outputs. This limits the overallocation
constexpr auto max_growth = 8;
output_builder<byte_offset> row_offset_storage(ITEMS_PER_CHUNK, max_growth, stream);
output_builder<char> char_storage(ITEMS_PER_CHUNK, max_growth, stream);
fork_stream(streams, stream);
hipEvent_t last_launch_event;
hipEventCreate(&last_launch_event);
auto& read_stream = streams[0];
auto& scan_stream = streams[1];
auto chunk = reader->get_next_chunk(ITEMS_PER_CHUNK, read_stream);
int64_t base_tile_idx = 0;
std::optional<byte_offset> first_row_offset;
std::optional<byte_offset> last_row_offset;
bool found_last_offset = false;
if (byte_range.offset() == 0) { first_row_offset = 0; }
std::swap(read_stream, scan_stream);
while (chunk->size() > 0) {
// if we found the last delimiter, or didn't find delimiters inside the byte range at all: abort
if (last_row_offset.has_value() or
(not first_row_offset.has_value() and chunk_offset >= byte_range_end)) {
break;
}
auto tiles_in_launch =
cudf::util::div_rounding_up_safe(chunk->size(), static_cast<std::size_t>(ITEMS_PER_TILE));
auto row_offsets = row_offset_storage.next_output(scan_stream);
// reset the next chunk of tile state
hipLaunchKernelGGL(( multibyte_split_init_kernel), dim3(tiles_in_launch),
dim3(THREADS_PER_TILE),
0,
scan_stream.value(), //
base_tile_idx,
tiles_in_launch,
tile_multistates,
tile_offsets);
hipStreamWaitEvent(scan_stream.value(), last_launch_event);
if (delimiter.size() == 1) {
// the single-byte case allows for a much more efficient kernel, so we special-case it
hipLaunchKernelGGL(( byte_split_kernel), dim3(tiles_in_launch),
dim3(THREADS_PER_TILE),
0,
scan_stream.value(), //
base_tile_idx,
chunk_offset,
row_offset_storage.size(),
tile_offsets,
delimiter[0],
*chunk,
row_offsets);
} else {
hipLaunchKernelGGL(( multibyte_split_kernel), dim3(tiles_in_launch),
dim3(THREADS_PER_TILE),
0,
scan_stream.value(), //
base_tile_idx,
chunk_offset,
row_offset_storage.size(),
tile_multistates,
tile_offsets,
{device_delim.data(), static_cast<std::size_t>(device_delim.size())},
*chunk,
row_offsets);
}
// load the next chunk
auto next_chunk = reader->get_next_chunk(ITEMS_PER_CHUNK, read_stream);
// while that is running, determine how many offsets we output (synchronizes)
auto const new_offsets = [&] {
auto const new_offsets_unclamped =
tile_offsets.get_inclusive_prefix(base_tile_idx + tiles_in_launch - 1, scan_stream) -
static_cast<output_offset>(row_offset_storage.size());
// if we are not in the last chunk, we can use all offsets
if (chunk_offset + static_cast<output_offset>(chunk->size()) < byte_range_end) {
return new_offsets_unclamped;
}
// if we are in the last chunk, we need to find the first out-of-bounds offset
auto const it = thrust::make_counting_iterator(output_offset{});
auto const end_loc =
*thrust::find_if(rmm::exec_policy_nosync(scan_stream),
it,
it + new_offsets_unclamped,
[row_offsets, byte_range_end] __device__(output_offset i) {
return row_offsets[i] >= byte_range_end;
});
// if we had no out-of-bounds offset, we copy all offsets
if (end_loc == new_offsets_unclamped) { return end_loc; }
// otherwise we copy only up to (including) the first out-of-bounds delimiter
found_last_offset = true;
return end_loc + 1;
}();
row_offset_storage.advance_output(new_offsets, scan_stream);
// determine if we found the first or last field offset for the byte range
if (new_offsets > 0 and not first_row_offset) {
first_row_offset = row_offset_storage.front_element(scan_stream);
}
if (found_last_offset) { last_row_offset = row_offset_storage.back_element(scan_stream); }
// copy over the characters we need, if we already encountered the first field delimiter
if (first_row_offset.has_value()) {
auto const begin = chunk->data() + std::max<byte_offset>(0, *first_row_offset - chunk_offset);
auto const sentinel = last_row_offset.value_or(std::numeric_limits<byte_offset>::max());
auto const end =
chunk->data() + std::min<byte_offset>(sentinel - chunk_offset, chunk->size());
auto const output_size = end - begin;
auto char_output = char_storage.next_output(scan_stream);
thrust::copy(rmm::exec_policy_nosync(scan_stream), begin, end, char_output.begin());
char_storage.advance_output(output_size, scan_stream);
}
hipEventRecord(last_launch_event, scan_stream.value());
std::swap(read_stream, scan_stream);
base_tile_idx += tiles_in_launch;
chunk_offset += chunk->size();
chunk = std::move(next_chunk);
}
hipEventDestroy(last_launch_event);
join_stream(streams, stream);
// if the input was empty, we didn't find a delimiter at all,
// or the first delimiter was also the last: empty output
if (chunk_offset == 0 or not first_row_offset.has_value() or
first_row_offset == last_row_offset) {
return make_empty_column(type_id::STRING);
}
auto chars = char_storage.gather(stream, mr);
auto global_offsets = row_offset_storage.gather(stream, mr);
// insert an offset at the beginning if we started at the beginning of the input
bool const insert_begin = first_row_offset.value_or(0) == 0;
// insert an offset at the end if we have not terminated the last row
bool const insert_end =
not(last_row_offset.has_value() or
(global_offsets.size() > 0 and global_offsets.back_element(stream) == chunk_offset));
rmm::device_uvector<int32_t> offsets{
global_offsets.size() + insert_begin + insert_end, stream, mr};
if (insert_begin) { offsets.set_element_to_zero_async(0, stream); }
if (insert_end) {
offsets.set_element(offsets.size() - 1, chunk_offset - *first_row_offset, stream);
}
thrust::transform(rmm::exec_policy(stream),
global_offsets.begin(),
global_offsets.end(),
offsets.begin() + insert_begin,
[baseline = *first_row_offset] __device__(byte_offset global_offset) {
return static_cast<int32_t>(global_offset - baseline);
});
auto string_count = offsets.size() - 1;
if (strip_delimiters) {
auto it = cudf::detail::make_counting_transform_iterator(
0,
[ofs = offsets.data(),
chars = chars.data(),
delim_size = static_cast<size_type>(delimiter.size()),
last_row = static_cast<size_type>(string_count) - 1,
insert_end] __device__(size_type row) {
auto const begin = ofs[row];
auto const len = ofs[row + 1] - begin;
if (row == last_row && insert_end) {
return thrust::make_pair(chars + begin, len);
} else {
return thrust::make_pair(chars + begin, std::max<size_type>(0, len - delim_size));
};
});
return cudf::strings::detail::make_strings_column(it, it + string_count, stream, mr);
} else {
return cudf::make_strings_column(string_count, std::move(offsets), std::move(chars));
}
}
} // namespace detail
std::unique_ptr<cudf::column> multibyte_split(cudf::io::text::data_chunk_source const& source,
std::string const& delimiter,
std::optional<byte_range_info> byte_range,
rmm::mr::device_memory_resource* mr)
{
return multibyte_split(
source, delimiter, parse_options{byte_range.value_or(create_byte_range_info_max())}, mr);
}
std::unique_ptr<cudf::column> multibyte_split(cudf::io::text::data_chunk_source const& source,
std::string const& delimiter,
parse_options options,
rmm::mr::device_memory_resource* mr)
{
auto stream = cudf::get_default_stream();
auto stream_pool = rmm::cuda_stream_pool(2);
auto result = detail::multibyte_split(
source, delimiter, options.byte_range, options.strip_delimiters, stream, mr, stream_pool);
return result;
}
std::unique_ptr<cudf::column> multibyte_split(cudf::io::text::data_chunk_source const& source,
std::string const& delimiter,
rmm::mr::device_memory_resource* mr)
{
return multibyte_split(source, delimiter, parse_options{}, mr);
}
} // namespace text
} // namespace io
} // namespace cudf
|
379e71eb6dde202e511995b9b521616ac397c16f.cu
|
/*
* Copyright (c) 2021-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <io/utilities/output_builder.cuh>
#include <cudf/column/column.hpp>
#include <cudf/column/column_factories.hpp>
#include <cudf/detail/iterator.cuh>
#include <cudf/detail/nvtx/ranges.hpp>
#include <cudf/detail/utilities/integer_utils.hpp>
#include <cudf/io/text/byte_range_info.hpp>
#include <cudf/io/text/data_chunk_source.hpp>
#include <cudf/io/text/detail/multistate.hpp>
#include <cudf/io/text/detail/tile_state.hpp>
#include <cudf/io/text/multibyte_split.hpp>
#include <cudf/scalar/scalar.hpp>
#include <cudf/strings/detail/strings_column_factories.cuh>
#include <cudf/utilities/default_stream.hpp>
#include <cudf/utilities/span.hpp>
#include <rmm/cuda_stream_pool.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/exec_policy.hpp>
#include <rmm/mr/device/device_memory_resource.hpp>
#include <rmm/mr/device/per_device_resource.hpp>
#include <thrust/copy.h>
#include <thrust/find.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/transform.h>
#include <cub/block/block_load.cuh>
#include <cub/block/block_scan.cuh>
#include <cstdint>
#include <limits>
#include <memory>
#include <numeric>
#include <optional>
namespace {
using cudf::io::text::detail::multistate;
int32_t constexpr ITEMS_PER_THREAD = 64;
int32_t constexpr THREADS_PER_TILE = 128;
int32_t constexpr ITEMS_PER_TILE = ITEMS_PER_THREAD * THREADS_PER_TILE;
int32_t constexpr TILES_PER_CHUNK = 4096;
int32_t constexpr ITEMS_PER_CHUNK = ITEMS_PER_TILE * TILES_PER_CHUNK;
constexpr multistate transition_init(char c, cudf::device_span<char const> delim)
{
auto result = multistate();
result.enqueue(0, 0);
for (std::size_t i = 0; i < delim.size(); i++) {
if (delim[i] == c) { result.enqueue(i, i + 1); }
}
return result;
}
constexpr multistate transition(char c, multistate state, cudf::device_span<char const> delim)
{
auto result = multistate();
result.enqueue(0, 0);
for (uint8_t i = 0; i < state.size(); i++) {
auto const tail = state.get_tail(i);
if (tail < delim.size() && delim[tail] == c) { result.enqueue(state.get_head(i), tail + 1); }
}
return result;
}
struct PatternScan {
using BlockScan = cub::BlockScan<multistate, THREADS_PER_TILE>;
using BlockScanCallback = cudf::io::text::detail::scan_tile_state_callback<multistate>;
struct _TempStorage {
typename BlockScan::TempStorage scan;
};
_TempStorage& _temp_storage;
using TempStorage = cub::Uninitialized<_TempStorage>;
__device__ inline PatternScan(TempStorage& temp_storage) : _temp_storage(temp_storage.Alias()) {}
__device__ inline void Scan(cudf::size_type tile_idx,
cudf::io::text::detail::scan_tile_state_view<multistate> tile_state,
cudf::device_span<char const> delim,
char (&thread_data)[ITEMS_PER_THREAD],
multistate& thread_multistate)
{
thread_multistate = transition_init(thread_data[0], delim);
for (uint32_t i = 1; i < ITEMS_PER_THREAD; i++) {
thread_multistate = transition(thread_data[i], thread_multistate, delim);
}
auto prefix_callback = BlockScanCallback(tile_state, tile_idx);
BlockScan(_temp_storage.scan)
.ExclusiveSum(thread_multistate, thread_multistate, prefix_callback);
}
};
// type aliases to distinguish between row offsets and character offsets
using output_offset = int64_t;
using byte_offset = int64_t;
// multibyte_split works by splitting up inputs in to 32 inputs (bytes) per thread, and transforming
// them in to data structures called "multistates". these multistates are created by searching a
// trie, but instead of a tradition trie where the search begins at a single node at the beginning,
// we allow our search to begin anywhere within the trie tree. The position within the trie tree is
// stored as a "partial match path", which indicates "we can get from here to there by a set of
// specific transitions". By scanning together multistates, we effectively know "we can get here
// from the beginning by following the inputs". By doing this, each thread knows exactly what state
// it begins in. From there, each thread can then take deterministic action. In this case, the
// deterministic action is counting and outputting delimiter offsets when a delimiter is found.
__global__ void multibyte_split_init_kernel(
cudf::size_type base_tile_idx,
cudf::size_type num_tiles,
cudf::io::text::detail::scan_tile_state_view<multistate> tile_multistates,
cudf::io::text::detail::scan_tile_state_view<output_offset> tile_output_offsets,
cudf::io::text::detail::scan_tile_status status =
cudf::io::text::detail::scan_tile_status::invalid)
{
auto const thread_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (thread_idx < num_tiles) {
auto const tile_idx = base_tile_idx + thread_idx;
tile_multistates.set_status(tile_idx, status);
tile_output_offsets.set_status(tile_idx, status);
}
}
__global__ void multibyte_split_seed_kernel(
cudf::io::text::detail::scan_tile_state_view<multistate> tile_multistates,
cudf::io::text::detail::scan_tile_state_view<output_offset> tile_output_offsets,
multistate tile_multistate_seed,
output_offset tile_output_offset)
{
auto const thread_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (thread_idx == 0) {
tile_multistates.set_inclusive_prefix(-1, tile_multistate_seed);
tile_output_offsets.set_inclusive_prefix(-1, tile_output_offset);
}
}
__global__ __launch_bounds__(THREADS_PER_TILE) void multibyte_split_kernel(
cudf::size_type base_tile_idx,
byte_offset base_input_offset,
output_offset base_output_offset,
cudf::io::text::detail::scan_tile_state_view<multistate> tile_multistates,
cudf::io::text::detail::scan_tile_state_view<output_offset> tile_output_offsets,
cudf::device_span<char const> delim,
cudf::device_span<char const> chunk_input_chars,
cudf::split_device_span<byte_offset> row_offsets)
{
using InputLoad =
cub::BlockLoad<char, THREADS_PER_TILE, ITEMS_PER_THREAD, cub::BLOCK_LOAD_WARP_TRANSPOSE>;
using OffsetScan = cub::BlockScan<output_offset, THREADS_PER_TILE>;
using OffsetScanCallback = cudf::io::text::detail::scan_tile_state_callback<output_offset>;
__shared__ union {
typename InputLoad::TempStorage input_load;
typename PatternScan::TempStorage pattern_scan;
typename OffsetScan::TempStorage offset_scan;
} temp_storage;
int32_t const tile_idx = base_tile_idx + blockIdx.x;
int32_t const tile_input_offset = blockIdx.x * ITEMS_PER_TILE;
int32_t const thread_input_offset = tile_input_offset + threadIdx.x * ITEMS_PER_THREAD;
int32_t const thread_input_size = chunk_input_chars.size() - thread_input_offset;
// STEP 1: Load inputs
char thread_chars[ITEMS_PER_THREAD];
InputLoad(temp_storage.input_load)
.Load(chunk_input_chars.data() + tile_input_offset,
thread_chars,
chunk_input_chars.size() - tile_input_offset);
// STEP 2: Scan inputs to determine absolute thread states
multistate thread_multistate;
__syncthreads(); // required before temp_memory re-use
PatternScan(temp_storage.pattern_scan)
.Scan(tile_idx, tile_multistates, delim, thread_chars, thread_multistate);
// STEP 3: Flag matches
output_offset thread_offset{};
uint32_t thread_match_mask[(ITEMS_PER_THREAD + 31) / 32]{};
for (int32_t i = 0; i < ITEMS_PER_THREAD; i++) {
thread_multistate = transition(thread_chars[i], thread_multistate, delim);
auto const thread_state = thread_multistate.max_tail();
auto const is_match = i < thread_input_size and thread_state == delim.size();
thread_match_mask[i / 32] |= uint32_t{is_match} << (i % 32);
thread_offset += output_offset{is_match};
}
// STEP 4: Scan flags to determine absolute thread output offset
auto prefix_callback = OffsetScanCallback(tile_output_offsets, tile_idx);
__syncthreads(); // required before temp_memory re-use
OffsetScan(temp_storage.offset_scan).ExclusiveSum(thread_offset, thread_offset, prefix_callback);
// Step 5: Assign outputs from each thread using match offsets.
for (int32_t i = 0; i < ITEMS_PER_THREAD; i++) {
auto const is_match = (thread_match_mask[i / 32] >> (i % 32)) & 1u;
if (is_match) {
auto const match_end = base_input_offset + thread_input_offset + i + 1;
row_offsets[thread_offset - base_output_offset] = match_end;
thread_offset++;
}
}
}
__global__ __launch_bounds__(THREADS_PER_TILE) void byte_split_kernel(
cudf::size_type base_tile_idx,
byte_offset base_input_offset,
output_offset base_output_offset,
cudf::io::text::detail::scan_tile_state_view<output_offset> tile_output_offsets,
char delim,
cudf::device_span<char const> chunk_input_chars,
cudf::split_device_span<byte_offset> row_offsets)
{
using InputLoad =
cub::BlockLoad<char, THREADS_PER_TILE, ITEMS_PER_THREAD, cub::BLOCK_LOAD_WARP_TRANSPOSE>;
using OffsetScan = cub::BlockScan<output_offset, THREADS_PER_TILE>;
using OffsetScanCallback = cudf::io::text::detail::scan_tile_state_callback<output_offset>;
__shared__ union {
typename InputLoad::TempStorage input_load;
typename OffsetScan::TempStorage offset_scan;
} temp_storage;
int32_t const tile_idx = base_tile_idx + blockIdx.x;
int32_t const tile_input_offset = blockIdx.x * ITEMS_PER_TILE;
int32_t const thread_input_offset = tile_input_offset + threadIdx.x * ITEMS_PER_THREAD;
int32_t const thread_input_size = chunk_input_chars.size() - thread_input_offset;
// STEP 1: Load inputs
char thread_chars[ITEMS_PER_THREAD];
InputLoad(temp_storage.input_load)
.Load(chunk_input_chars.data() + tile_input_offset,
thread_chars,
chunk_input_chars.size() - tile_input_offset);
// STEP 2: Flag matches
output_offset thread_offset{};
uint32_t thread_match_mask[(ITEMS_PER_THREAD + 31) / 32]{};
for (int32_t i = 0; i < ITEMS_PER_THREAD; i++) {
auto const is_match = i < thread_input_size and thread_chars[i] == delim;
thread_match_mask[i / 32] |= uint32_t{is_match} << (i % 32);
thread_offset += output_offset{is_match};
}
// STEP 3: Scan flags to determine absolute thread output offset
auto prefix_callback = OffsetScanCallback(tile_output_offsets, tile_idx);
__syncthreads(); // required before temp_memory re-use
OffsetScan(temp_storage.offset_scan).ExclusiveSum(thread_offset, thread_offset, prefix_callback);
// Step 4: Assign outputs from each thread using match offsets.
for (int32_t i = 0; i < ITEMS_PER_THREAD; i++) {
auto const is_match = (thread_match_mask[i / 32] >> (i % 32)) & 1u;
if (is_match) {
auto const match_end = base_input_offset + thread_input_offset + i + 1;
row_offsets[thread_offset - base_output_offset] = match_end;
thread_offset++;
}
}
}
} // namespace
namespace cudf {
namespace io {
namespace text {
namespace detail {
void fork_stream(std::vector<rmm::cuda_stream_view> streams, rmm::cuda_stream_view stream)
{
cudaEvent_t event;
cudaEventCreate(&event);
cudaEventRecord(event, stream);
for (uint32_t i = 0; i < streams.size(); i++) {
cudaStreamWaitEvent(streams[i], event, 0);
}
cudaEventDestroy(event);
}
void join_stream(std::vector<rmm::cuda_stream_view> streams, rmm::cuda_stream_view stream)
{
cudaEvent_t event;
cudaEventCreate(&event);
for (uint32_t i = 0; i < streams.size(); i++) {
cudaEventRecord(event, streams[i]);
cudaStreamWaitEvent(stream, event, 0);
}
cudaEventDestroy(event);
}
std::vector<rmm::cuda_stream_view> get_streams(int32_t count, rmm::cuda_stream_pool& stream_pool)
{
auto streams = std::vector<rmm::cuda_stream_view>();
for (int32_t i = 0; i < count; i++) {
streams.emplace_back(stream_pool.get_stream());
}
return streams;
}
std::unique_ptr<cudf::column> multibyte_split(cudf::io::text::data_chunk_source const& source,
std::string const& delimiter,
byte_range_info byte_range,
bool strip_delimiters,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr,
rmm::cuda_stream_pool& stream_pool)
{
CUDF_FUNC_RANGE();
if (byte_range.empty()) { return make_empty_column(type_id::STRING); }
auto device_delim = cudf::string_scalar(delimiter, true, stream, mr);
auto sorted_delim = delimiter;
std::sort(sorted_delim.begin(), sorted_delim.end());
auto [_last_char, _last_char_count, max_duplicate_tokens] = std::accumulate(
sorted_delim.begin(), sorted_delim.end(), std::make_tuple('\0', 0, 0), [](auto acc, char c) {
if (std::get<0>(acc) != c) {
std::get<0>(acc) = c;
std::get<1>(acc) = 0;
}
std::get<1>(acc)++;
std::get<2>(acc) = std::max(std::get<1>(acc), std::get<2>(acc));
return acc;
});
CUDF_EXPECTS(max_duplicate_tokens < multistate::max_segment_count,
"delimiter contains too many duplicate tokens to produce a deterministic result.");
CUDF_EXPECTS(delimiter.size() < multistate::max_segment_value,
"delimiter contains too many total tokens to produce a deterministic result.");
auto concurrency = 2;
auto streams = get_streams(concurrency, stream_pool);
// must be at least 32 when using warp-reduce on partials
// must be at least 1 more than max possible concurrent tiles
// best when at least 32 more than max possible concurrent tiles, due to rolling `invalid`s
auto num_tile_states = std::max(32, TILES_PER_CHUNK * concurrency + 32);
auto tile_multistates = scan_tile_state<multistate>(num_tile_states, stream);
auto tile_offsets = scan_tile_state<output_offset>(num_tile_states, stream);
multibyte_split_init_kernel<<<TILES_PER_CHUNK,
THREADS_PER_TILE,
0,
stream.value()>>>( //
-TILES_PER_CHUNK,
TILES_PER_CHUNK,
tile_multistates,
tile_offsets,
cudf::io::text::detail::scan_tile_status::oob);
auto multistate_seed = multistate();
multistate_seed.enqueue(0, 0); // this represents the first state in the pattern.
// Seeding the tile state with an identity value allows the 0th tile to follow the same logic as
// the Nth tile, assuming it can look up an inclusive prefix. Without this seed, the 0th block
// would have to follow separate logic.
multibyte_split_seed_kernel<<<1, 1, 0, stream.value()>>>( //
tile_multistates,
tile_offsets,
multistate_seed,
0);
auto reader = source.create_reader();
auto chunk_offset = std::max<byte_offset>(0, byte_range.offset() - delimiter.size());
auto const byte_range_end = byte_range.offset() + byte_range.size();
reader->skip_bytes(chunk_offset);
// amortize output chunk allocations over 8 worst-case outputs. This limits the overallocation
constexpr auto max_growth = 8;
output_builder<byte_offset> row_offset_storage(ITEMS_PER_CHUNK, max_growth, stream);
output_builder<char> char_storage(ITEMS_PER_CHUNK, max_growth, stream);
fork_stream(streams, stream);
cudaEvent_t last_launch_event;
cudaEventCreate(&last_launch_event);
auto& read_stream = streams[0];
auto& scan_stream = streams[1];
auto chunk = reader->get_next_chunk(ITEMS_PER_CHUNK, read_stream);
int64_t base_tile_idx = 0;
std::optional<byte_offset> first_row_offset;
std::optional<byte_offset> last_row_offset;
bool found_last_offset = false;
if (byte_range.offset() == 0) { first_row_offset = 0; }
std::swap(read_stream, scan_stream);
while (chunk->size() > 0) {
// if we found the last delimiter, or didn't find delimiters inside the byte range at all: abort
if (last_row_offset.has_value() or
(not first_row_offset.has_value() and chunk_offset >= byte_range_end)) {
break;
}
auto tiles_in_launch =
cudf::util::div_rounding_up_safe(chunk->size(), static_cast<std::size_t>(ITEMS_PER_TILE));
auto row_offsets = row_offset_storage.next_output(scan_stream);
// reset the next chunk of tile state
multibyte_split_init_kernel<<<tiles_in_launch,
THREADS_PER_TILE,
0,
scan_stream.value()>>>( //
base_tile_idx,
tiles_in_launch,
tile_multistates,
tile_offsets);
cudaStreamWaitEvent(scan_stream.value(), last_launch_event);
if (delimiter.size() == 1) {
// the single-byte case allows for a much more efficient kernel, so we special-case it
byte_split_kernel<<<tiles_in_launch,
THREADS_PER_TILE,
0,
scan_stream.value()>>>( //
base_tile_idx,
chunk_offset,
row_offset_storage.size(),
tile_offsets,
delimiter[0],
*chunk,
row_offsets);
} else {
multibyte_split_kernel<<<tiles_in_launch,
THREADS_PER_TILE,
0,
scan_stream.value()>>>( //
base_tile_idx,
chunk_offset,
row_offset_storage.size(),
tile_multistates,
tile_offsets,
{device_delim.data(), static_cast<std::size_t>(device_delim.size())},
*chunk,
row_offsets);
}
// load the next chunk
auto next_chunk = reader->get_next_chunk(ITEMS_PER_CHUNK, read_stream);
// while that is running, determine how many offsets we output (synchronizes)
auto const new_offsets = [&] {
auto const new_offsets_unclamped =
tile_offsets.get_inclusive_prefix(base_tile_idx + tiles_in_launch - 1, scan_stream) -
static_cast<output_offset>(row_offset_storage.size());
// if we are not in the last chunk, we can use all offsets
if (chunk_offset + static_cast<output_offset>(chunk->size()) < byte_range_end) {
return new_offsets_unclamped;
}
// if we are in the last chunk, we need to find the first out-of-bounds offset
auto const it = thrust::make_counting_iterator(output_offset{});
auto const end_loc =
*thrust::find_if(rmm::exec_policy_nosync(scan_stream),
it,
it + new_offsets_unclamped,
[row_offsets, byte_range_end] __device__(output_offset i) {
return row_offsets[i] >= byte_range_end;
});
// if we had no out-of-bounds offset, we copy all offsets
if (end_loc == new_offsets_unclamped) { return end_loc; }
// otherwise we copy only up to (including) the first out-of-bounds delimiter
found_last_offset = true;
return end_loc + 1;
}();
row_offset_storage.advance_output(new_offsets, scan_stream);
// determine if we found the first or last field offset for the byte range
if (new_offsets > 0 and not first_row_offset) {
first_row_offset = row_offset_storage.front_element(scan_stream);
}
if (found_last_offset) { last_row_offset = row_offset_storage.back_element(scan_stream); }
// copy over the characters we need, if we already encountered the first field delimiter
if (first_row_offset.has_value()) {
auto const begin = chunk->data() + std::max<byte_offset>(0, *first_row_offset - chunk_offset);
auto const sentinel = last_row_offset.value_or(std::numeric_limits<byte_offset>::max());
auto const end =
chunk->data() + std::min<byte_offset>(sentinel - chunk_offset, chunk->size());
auto const output_size = end - begin;
auto char_output = char_storage.next_output(scan_stream);
thrust::copy(rmm::exec_policy_nosync(scan_stream), begin, end, char_output.begin());
char_storage.advance_output(output_size, scan_stream);
}
cudaEventRecord(last_launch_event, scan_stream.value());
std::swap(read_stream, scan_stream);
base_tile_idx += tiles_in_launch;
chunk_offset += chunk->size();
chunk = std::move(next_chunk);
}
cudaEventDestroy(last_launch_event);
join_stream(streams, stream);
// if the input was empty, we didn't find a delimiter at all,
// or the first delimiter was also the last: empty output
if (chunk_offset == 0 or not first_row_offset.has_value() or
first_row_offset == last_row_offset) {
return make_empty_column(type_id::STRING);
}
auto chars = char_storage.gather(stream, mr);
auto global_offsets = row_offset_storage.gather(stream, mr);
// insert an offset at the beginning if we started at the beginning of the input
bool const insert_begin = first_row_offset.value_or(0) == 0;
// insert an offset at the end if we have not terminated the last row
bool const insert_end =
not(last_row_offset.has_value() or
(global_offsets.size() > 0 and global_offsets.back_element(stream) == chunk_offset));
rmm::device_uvector<int32_t> offsets{
global_offsets.size() + insert_begin + insert_end, stream, mr};
if (insert_begin) { offsets.set_element_to_zero_async(0, stream); }
if (insert_end) {
offsets.set_element(offsets.size() - 1, chunk_offset - *first_row_offset, stream);
}
thrust::transform(rmm::exec_policy(stream),
global_offsets.begin(),
global_offsets.end(),
offsets.begin() + insert_begin,
[baseline = *first_row_offset] __device__(byte_offset global_offset) {
return static_cast<int32_t>(global_offset - baseline);
});
auto string_count = offsets.size() - 1;
if (strip_delimiters) {
auto it = cudf::detail::make_counting_transform_iterator(
0,
[ofs = offsets.data(),
chars = chars.data(),
delim_size = static_cast<size_type>(delimiter.size()),
last_row = static_cast<size_type>(string_count) - 1,
insert_end] __device__(size_type row) {
auto const begin = ofs[row];
auto const len = ofs[row + 1] - begin;
if (row == last_row && insert_end) {
return thrust::make_pair(chars + begin, len);
} else {
return thrust::make_pair(chars + begin, std::max<size_type>(0, len - delim_size));
};
});
return cudf::strings::detail::make_strings_column(it, it + string_count, stream, mr);
} else {
return cudf::make_strings_column(string_count, std::move(offsets), std::move(chars));
}
}
} // namespace detail
std::unique_ptr<cudf::column> multibyte_split(cudf::io::text::data_chunk_source const& source,
std::string const& delimiter,
std::optional<byte_range_info> byte_range,
rmm::mr::device_memory_resource* mr)
{
return multibyte_split(
source, delimiter, parse_options{byte_range.value_or(create_byte_range_info_max())}, mr);
}
std::unique_ptr<cudf::column> multibyte_split(cudf::io::text::data_chunk_source const& source,
std::string const& delimiter,
parse_options options,
rmm::mr::device_memory_resource* mr)
{
auto stream = cudf::get_default_stream();
auto stream_pool = rmm::cuda_stream_pool(2);
auto result = detail::multibyte_split(
source, delimiter, options.byte_range, options.strip_delimiters, stream, mr, stream_pool);
return result;
}
std::unique_ptr<cudf::column> multibyte_split(cudf::io::text::data_chunk_source const& source,
std::string const& delimiter,
rmm::mr::device_memory_resource* mr)
{
return multibyte_split(source, delimiter, parse_options{}, mr);
}
} // namespace text
} // namespace io
} // namespace cudf
|
1e3c8f6f0dbc3354327599564edc291ce751a63b.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* GridTools
*
* Copyright (c) 2014-2019, ETH Zurich
* All rights reserved.
*
* Please, refer to the LICENSE file in the root directory.
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <gtest/gtest.h>
#include <gridtools/common/cuda_util.hpp>
#include <gridtools/common/integral_constant.hpp>
#include <gridtools/meta.hpp>
#include <gridtools/sid/allocator.hpp>
#include <cuda_test_helper.hpp>
namespace gridtools {
namespace {
template <typename PtrHolder>
__device__ bool check_allocation(PtrHolder ptr_holder) {
auto &ref = *ptr_holder();
ref = 1.;
return ref == 1.;
}
template <typename PtrHolder>
__global__ void test_allocated(PtrHolder testee, bool *result) {}
TEST(simple_device_memory_allocator, test) {
sid::device::allocator<GT_INTEGRAL_CONSTANT_FROM_VALUE(&cuda_util::cuda_malloc<char[]>)> alloc;
auto ptr_holder = allocate(alloc, meta::lazy::id<double>{}, 1);
auto result = gridtools::on_device::exec(
GT_MAKE_INTEGRAL_CONSTANT_FROM_VALUE(&check_allocation<decltype(ptr_holder)>), ptr_holder);
ASSERT_TRUE(result);
}
} // namespace
} // namespace gridtools
|
1e3c8f6f0dbc3354327599564edc291ce751a63b.cu
|
/*
* GridTools
*
* Copyright (c) 2014-2019, ETH Zurich
* All rights reserved.
*
* Please, refer to the LICENSE file in the root directory.
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <gtest/gtest.h>
#include <gridtools/common/cuda_util.hpp>
#include <gridtools/common/integral_constant.hpp>
#include <gridtools/meta.hpp>
#include <gridtools/sid/allocator.hpp>
#include <cuda_test_helper.hpp>
namespace gridtools {
namespace {
template <typename PtrHolder>
__device__ bool check_allocation(PtrHolder ptr_holder) {
auto &ref = *ptr_holder();
ref = 1.;
return ref == 1.;
}
template <typename PtrHolder>
__global__ void test_allocated(PtrHolder testee, bool *result) {}
TEST(simple_device_memory_allocator, test) {
sid::device::allocator<GT_INTEGRAL_CONSTANT_FROM_VALUE(&cuda_util::cuda_malloc<char[]>)> alloc;
auto ptr_holder = allocate(alloc, meta::lazy::id<double>{}, 1);
auto result = gridtools::on_device::exec(
GT_MAKE_INTEGRAL_CONSTANT_FROM_VALUE(&check_allocation<decltype(ptr_holder)>), ptr_holder);
ASSERT_TRUE(result);
}
} // namespace
} // namespace gridtools
|
6a9f9b90a9ff2a511a0f4c358c089110d6aef5c4.hip
|
// !!! This is a file automatically generated by hipify!!!
#if !MEGDNN_TEGRA_X1
// generated by gen_cuda_conv_bias_kern_impls.py
// ignore warning of cutlass
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl"
using LayoutSrc = cutlass::layout::TensorNCxHWx<4>;
using LayoutFilter = cutlass::layout::TensorCxRSKx<4>;
using LayoutDst = cutlass::layout::TensorNCxHWx<4>;
using ThreadBlockShape = cutlass::gemm::GemmShape<32, 128, 32>;
using WarpShape = cutlass::gemm::GemmShape<32, 64, 32>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 4>;
using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationHSwishClamp<
int8_t, 4, int32_t, int32_t, float>;
using Convolution = cutlass::conv::device::Convolution<
int8_t, LayoutSrc, int8_t, LayoutFilter, int8_t,
LayoutDst, int32_t, LayoutDst, int32_t,
cutlass::conv::ConvType::kConvolution, cutlass::arch::OpClassSimt, cutlass::arch::Sm61,
ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp,
cutlass::conv::threadblock::ConvolutionFpropNCxHWxThreadblockSwizzle,
2, 4, 16, true,
cutlass::arch::OpMultiplyAddSaturate>;
template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>(
const typename Convolution::ElementSrc* d_src,
const typename Convolution::ElementFilter* d_filter,
const typename Convolution::ElementBias* d_bias,
const typename Convolution::ElementDst* d_z,
typename Convolution::ElementDst* d_dst,
int* workspace,
typename Convolution::ConvolutionParameter const& conv_param,
typename Convolution::EpilogueOutputOp::Params const& epilogue,
hipStream_t stream);
#pragma GCC diagnostic pop
#endif
|
6a9f9b90a9ff2a511a0f4c358c089110d6aef5c4.cu
|
#if !MEGDNN_TEGRA_X1
// generated by gen_cuda_conv_bias_kern_impls.py
// ignore warning of cutlass
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl"
using LayoutSrc = cutlass::layout::TensorNCxHWx<4>;
using LayoutFilter = cutlass::layout::TensorCxRSKx<4>;
using LayoutDst = cutlass::layout::TensorNCxHWx<4>;
using ThreadBlockShape = cutlass::gemm::GemmShape<32, 128, 32>;
using WarpShape = cutlass::gemm::GemmShape<32, 64, 32>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 4>;
using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationHSwishClamp<
int8_t, 4, int32_t, int32_t, float>;
using Convolution = cutlass::conv::device::Convolution<
int8_t, LayoutSrc, int8_t, LayoutFilter, int8_t,
LayoutDst, int32_t, LayoutDst, int32_t,
cutlass::conv::ConvType::kConvolution, cutlass::arch::OpClassSimt, cutlass::arch::Sm61,
ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp,
cutlass::conv::threadblock::ConvolutionFpropNCxHWxThreadblockSwizzle,
2, 4, 16, true,
cutlass::arch::OpMultiplyAddSaturate>;
template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>(
const typename Convolution::ElementSrc* d_src,
const typename Convolution::ElementFilter* d_filter,
const typename Convolution::ElementBias* d_bias,
const typename Convolution::ElementDst* d_z,
typename Convolution::ElementDst* d_dst,
int* workspace,
typename Convolution::ConvolutionParameter const& conv_param,
typename Convolution::EpilogueOutputOp::Params const& epilogue,
cudaStream_t stream);
#pragma GCC diagnostic pop
#endif
|
537d8f3088e642aed4d4ec1b2efb8d257f89be23.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <sastbx/fXS/cuda_functions.cuh>
namespace sastbx {
namespace fXS {
const int threads_per_block = 1024;
// form factors
const int max_types = 50;
const int max_terms = 10;
__device__ __constant__ float d_a[max_types * max_terms];
__device__ __constant__ float d_b[max_types * max_terms];
__device__ __constant__ float d_c[max_types];
__device__ __constant__ int d_n_types;
__device__ __constant__ int d_n_terms;
// constants
__device__ __constant__ float two_pi = float(2.0)*CUDART_PI_F;
const int padded_size = 16;
__device__ __constant__ int d_padded_size = padded_size;
/* ==========================================================================
Memory properties for C2070, compute capability 2.0
(CUDA C Programming Guide v 4.0, Appendix F)
---------------------------------------------------
registers - register, r/w, "fast", per-multiprocessor 32K 4B registers
local memory - r/w, "slow", per-thread 512 KB
__shared__ - shared memory, r/w, "fast", block-wide 48 KB
__device__ - global memory, r/w, "slow", grid-wide, 6 GB
__constant__ - constant memory, r, "fast", grid-wide 64 KB
Shared memory is broken up into 32 banks and is interleaved into 32-bit
words (4 bytes). For example, an array of length 64 containing single
precision values will have elements 0 and 32 in the same bank, 1 and 33
in the same bank, etc. To access shared memory with no conflicts (all
threads get data with one read), each thread should read from a different
bank, or have multiple threads read the same value in the same bank. In
the previous example, having all threads access element 0 or having each
thread read a different element between 0 and 31, inclusive, will only
require one read. Accessing elements 0 and 32 will require two reads.
Appendix F.4 describes the memory properties for compute capability 2.0
devices in more detail and has figures for efficient memory access
patterns.
Basic approach
--------------
Each thread calculates the sum for one h, so each thread will
independently loop over all atoms and put the sum into global memory
All coordinates are loaded into global memory and then each thread copies
sections into shared memory. The kernel loops over all sections to sum
over all atoms. Rotation matrix/translation vectors pairs are also loaded
and looped in the same manner. Form factors are stored in constant memory
The thread index is checked against the length of the array multiple times
because all threads are used for reading atom data from global, but only
threads whose index is less than the array length are needed for
summation. Additionaly, two __syncthreads() calls are required. The
first is to make sure all the atom data is copied into shared memory
before any summation is started, and the second is to make sure all the
summation is finished before the atom data in shared memory is replaced
with new data.
Data format for kernel
----------------------
xyz = x_0 ... x_n y_0 ... y_n z_0 ... z_n
solvent_weights = s_0 s_1 ... s_n
h = h_0 ... h_n k_0 ... k_n l_0 ... l_n
rt = r_00 ... r_08 t_00 ... t_02 ... r_10 ... t_n2
a = a_00 a_01 a_02 ... a_n3 a_n4 a_n5
b = ""
c = c_0 c_1 ... c_n
To facilitate coalesced reads from global memory, the data is grouped
into sections. For example, for xyz, all the x's come first, then all
the y's, and lastly, all the z's. When read from global memory, three
coalesced reads will read in all the xyz's for a set of 32 atoms, one
read from each section. The size of the shared arrays is equal to the
number of threads so that all threads will attempt to read from global
memory. There are checks against the actual length of available data.
For the structue_factor_kernel, the general format of the loops is,
-----------------------------
| x_0 | x_1 | x_2 | x_3 | ... xyz array in global memory
-----------------------------
| | | | each thread stores one value into
| | | | shared memory
V V V V x[threadIdx.x] = xyz[current_atom];
-----------------------------
| x_0 | x_1 | x_2 | x_3 | ... x array in shared memory
-----------------------------
|
|-----|-----|-----| each thread reads one value
V V V V x_a = x[a];
--------------------------------------------------------
|each thread calculates its own sum with its registers |
--------------------------------------------------------
| | | |
| | | | loop over all atoms
V V V V
-----------------------------
| r_0 | r_1 | r_2 | r_3 | ... each thread copies its sums into
----------------------------- the structure factor arrays in
----------------------------- global memory
| i_0 | i_1 | i_2 | i_3 | ...
-----------------------------
--------------------------------------------------------------------------
*/
// kernel
__global__ void structure_factor_kernel
(const int* scattering_type, const float* xyz,
const float* solvent_weights, const int n_xyz,
const float* h, const int n_h,
const float* rt, const int n_rt,
float* sf_real, float* sf_imag) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
float h_i, k_i, l_i, stol_sq;
float f[max_types];
if (i < n_h) {
// read h from global memory (stored in registers)
h_i = h[i];
k_i = h[n_h + i];
l_i = h[2*n_h + i];
// calculate form factors (stored in local memory)
// last form factor is always for boundary solvent layer
stol_sq = float(0.25) * (h_i*h_i + k_i*k_i + l_i*l_i);
for (int type=0; type<d_n_types; type++) {
f[type] = 0.0;
for (int term=0; term<d_n_terms; term++) {
f[type] += d_a[type*d_n_terms + term] *
__expf(-d_b[type*d_n_terms + term] * stol_sq);
}
f[type] += d_c[type];
}
}
// copy atoms into shared memory one chunk at a time and sum
// all threads are used for reading data
// shared arrays can be allocated at kernel invocation, but it requires
// partitioning a big array (implement later)
__shared__ float x[threads_per_block];
__shared__ float y[threads_per_block];
__shared__ float z[threads_per_block];
__shared__ float solvent[threads_per_block];
__shared__ int s_type[threads_per_block];
__shared__ float rot_trans[threads_per_block];
float real_sum = 0.0;
float imag_sum = 0.0;
float s,c,ff,xx,yy,zz,x_a,y_a,z_a;
int current_atom, current_rt, rt_offset;
for (int atom=0; atom<n_xyz; atom += blockDim.x) {
current_atom = atom + threadIdx.x;
// coalesce reads using threads, but don't read past n_xyz
// one read for each variable should fill chunk of 32 atoms
// total length = # of threads/block
if (current_atom < n_xyz) {
x[threadIdx.x] = xyz[ current_atom];
y[threadIdx.x] = xyz[n_xyz + current_atom];
z[threadIdx.x] = xyz[2*n_xyz + current_atom];
solvent[threadIdx.x] = solvent_weights[current_atom];
s_type[threadIdx.x] = scattering_type[current_atom];
}
// loop over all rotation/translation operators
// one coalesced read will copy (# of threads)/(padded_size) rot/trans
// since the number of threads is a multiple of 32, it will also always
// be evenly divisible by padded_size
for (int rt_i=0; rt_i<n_rt; rt_i += blockDim.x/d_padded_size) {
current_rt = rt_i*d_padded_size + threadIdx.x;
if (current_rt < n_rt*d_padded_size) {
rot_trans[threadIdx.x] = rt[current_rt];
}
// wait for all data to be copied into shared memory
__syncthreads();
// then sum over all the atoms that are now available to all threads
if (i < n_h) {
for (int r=0; r<blockDim.x/d_padded_size; r++) {
current_rt = rt_i + r; // overall counter for rot/trans pairs
if (current_rt < n_rt) {
for (int a=0; a<blockDim.x; a++) {
current_atom = atom + a; // overall counter for atom number
if (current_atom < n_xyz) {
x_a = x[a]; // transfer from shared memory to registers
y_a = y[a]; // might not be necessary due to cache
z_a = z[a];
rt_offset = r*d_padded_size;
// apply rotation and translation by expanding Rx + t
xx = (x_a*rot_trans[rt_offset ] +
y_a*rot_trans[rt_offset + 1] +
z_a*rot_trans[rt_offset + 2] +
rot_trans[rt_offset + 9]);
yy = (x_a*rot_trans[rt_offset + 3] +
y_a*rot_trans[rt_offset + 4] +
z_a*rot_trans[rt_offset + 5] +
rot_trans[r*padded_size + 10]);;
zz = (x_a*rot_trans[rt_offset + 6] +
y_a*rot_trans[rt_offset + 7] +
z_a*rot_trans[rt_offset + 8] +
rot_trans[rt_offset + 11]);;
__sincosf(two_pi*(xx * h_i + yy * k_i + zz * l_i),&s,&c);
// bulk solvent correction in f
// boundary layer solvent scale in solvent
ff = f[s_type[a]] + solvent[a]*f[d_n_types-1];
real_sum += ff * c;
imag_sum += ff * s;
}
}
}
}
}
// wait before starting next chunk so data isn't changed for lagging threads
__syncthreads();
}
}
// transfer result to global memory
if (i < n_h) {
sf_real[i] = real_sum;
sf_imag[i] = imag_sum;
}
}
/* ==========================================================================
*/
sastbx::fXS::cuda_direct_summation::cuda_direct_summation() {
sf_size = 0;
}
sastbx::fXS::cuda_direct_summation::~cuda_direct_summation() {
cudaSafeCall( hipFree(sf_real) );
cudaSafeCall( hipFree(sf_imag) );
}
/* --------------------------------------------------------------------------
reorganizes data and calls cuda
*/
void sastbx::fXS::cuda_direct_summation::add
(const scitbx::af::const_ref<std::string>& scatterers,
const scitbx::af::const_ref<scitbx::vec3<double> >& xyz,
const scitbx::af::const_ref<double>& solvent_weights,
const scitbx::af::const_ref<scitbx::vec3<double> >& h,
const scitbx::af::const_ref<double>& rotations,
const scitbx::af::const_ref<scitbx::vec3<double> >& translations,
const cctbx::xray::scattering_type_registry& registry) {
// reorganize coordinates
int n_xyz = xyz.size();
int size_xyz = 3 * n_xyz;
float* h_xyz = new float[size_xyz];
for (int i=0; i<n_xyz; i++) {
for (int j=0; j<3; j++) {
h_xyz[j*n_xyz + i] = float(xyz[i][j]);
}
}
// copy boundary layer solvent weights
float* h_solvent = new float[n_xyz];
for (int i=0; i<n_xyz; i++) {
h_solvent[i] = float(solvent_weights[i]);
}
// reorganize h
int n_h = h.size();
int size_h = 3 * n_h;
float* h_h = new float[size_h];
for (int i=0; i<n_h; i++) {
for (int j=0; j<3; j++) {
h_h[j*n_h + i] = float(h[i][j]);
}
}
// reorganize rotations and translations
// each rotation/translation pair is combined and padded to take up
// 64 bytes so that a coalesced read will read two pairs
int n_rt = translations.size();
int size_rt = padded_size * n_rt;
float* h_rt = new float[size_rt];
for (int i=0; i<n_rt; i++) {
for (int j=0; j<9; j++) {
h_rt[padded_size*i + j] = float(rotations[9*i + j]);
}
for (int j=0; j<3; j++) {
h_rt[padded_size*i + j + 9] = float(translations[i][j]);
}
}
// convert scattering types and form factors
// add ordinary oxygen form factor at end for boundary layer solvent
int* h_scattering_type = new int[n_xyz];
for (int i=0; i<n_xyz; i++) {
h_scattering_type[i] = registry.unique_index(scatterers[i]);
}
scitbx::af::shared<boost::optional
<cctbx::eltbx::xray_scattering::gaussian> >
unique_gaussians = registry.unique_gaussians;
int n_types = unique_gaussians.size() + 1;
int n_terms = unique_gaussians[0].get().n_terms();
int f_size = n_types * n_terms;
float* h_a = new float[f_size];
float* h_b = new float[f_size];
float* h_c = new float[n_types];
for (int i=0; i<f_size; i++) {
h_a[i] = 0.0;
h_b[i] = 0.0;
}
for (int i=0; i<n_types-1; i++) {
for (int j=0; j<n_terms; j++) {
h_a[i*n_terms + j] = unique_gaussians[i].get().array_of_a()[j];
h_b[i*n_terms + j] = unique_gaussians[i].get().array_of_b()[j];
}
if (unique_gaussians[i].get().use_c()) {
h_c[i] = unique_gaussians[i].get().c();
}
else {
h_c[i] = float(0.0);
}
}
// add form factor for boundary layer solvent (# of terms may be different)
cctbx::eltbx::xray_scattering::gaussian hoh =
cctbx::eltbx::xray_scattering::wk1995("O",true).fetch();
for (int i=0; i<hoh.array_of_a().size(); i++){
h_a[(n_types-1)*n_terms + i] = hoh.array_of_a()[i];
h_b[(n_types-1)*n_terms + i] = hoh.array_of_b()[i];
}
if (hoh.use_c()) {
h_c[n_types-1] = hoh.c();
}
else {
h_c[n_types-1] = float(0.0);
}
// transfer data to global memory
int* d_scattering_type;
cudaSafeCall( hipMalloc((void**)&d_scattering_type,n_xyz*sizeof(int)) );
cudaSafeCall( hipMemcpy(d_scattering_type,h_scattering_type,
n_xyz*sizeof(int),hipMemcpyHostToDevice) );
float* d_xyz;
cudaSafeCall( hipMalloc((void**)&d_xyz,size_xyz*sizeof(float)) );
cudaSafeCall( hipMemcpy(d_xyz, h_xyz, size_xyz*sizeof(float),
hipMemcpyHostToDevice) );
float* d_solvent;
cudaSafeCall( hipMalloc((void**)&d_solvent,n_xyz*sizeof(float)) );
cudaSafeCall( hipMemcpy(d_solvent,h_solvent,n_xyz*sizeof(float),
hipMemcpyHostToDevice) );
float* d_rt;
cudaSafeCall( hipMalloc((void**)&d_rt,size_rt*sizeof(float)) );
cudaSafeCall( hipMemcpy(d_rt, h_rt, size_rt*sizeof(float),
hipMemcpyHostToDevice) );
float* d_h;
cudaSafeCall( hipMalloc((void**)&d_h,size_h*sizeof(float)) );
cudaSafeCall( hipMemcpy(d_h, h_h, size_h*sizeof(float),
hipMemcpyHostToDevice) );
// transfer data to constant memory
// should combine d_n_types and d_n_terms into one transfer
cudaSafeCall( hipMemcpyToSymbol(d_a, h_a, f_size*sizeof(float)) );
cudaSafeCall( hipMemcpyToSymbol(d_b, h_b, f_size*sizeof(float)) );
cudaSafeCall( hipMemcpyToSymbol(d_c, h_c, n_types*sizeof(float)) );
cudaSafeCall( hipMemcpyToSymbol(d_n_types, &n_types, sizeof(int)) );
cudaSafeCall( hipMemcpyToSymbol(d_n_terms, &n_terms, sizeof(int)) );
// allocate arrays for results if necessary
if (sf_size == 0) {
sf_size = n_h;
cudaSafeCall( hipMalloc((void**)&sf_real,n_h*sizeof(float)) );
cudaSafeCall( hipMalloc((void**)&sf_imag,n_h*sizeof(float)) );
}
else {
assert(sf_size == n_h);
}
// run calculation
int blocks_per_grid = (n_h + threads_per_block - 1)/threads_per_block;
hipLaunchKernelGGL(( structure_factor_kernel), dim3(blocks_per_grid),dim3(threads_per_block), 0, 0,
d_scattering_type, d_xyz, d_solvent, n_xyz,
d_h, n_h,
d_rt, n_rt,
sf_real, sf_imag);
// clean up
delete[] h_xyz;
delete[] h_solvent;
delete[] h_h;
delete[] h_rt;
delete[] h_scattering_type;
delete[] h_a;
delete[] h_b;
delete[] h_c;
cudaSafeCall( hipFree(d_h) );
cudaSafeCall( hipFree(d_xyz) );
cudaSafeCall( hipFree(d_solvent) );
cudaSafeCall( hipFree(d_rt) );
cudaSafeCall( hipFree(d_scattering_type) );
}
/* --------------------------------------------------------------------------
return total sum
*/
scitbx::af::shared<std::complex<double> >
sastbx::fXS::cuda_direct_summation::get_sum() {
scitbx::af::shared<std::complex<double> > sf(sf_size);
if (sf_size != 0) {
float* h_real = new float[sf_size];
float* h_imag = new float[sf_size];
cudaSafeCall( hipMemcpy(h_real,sf_real,sf_size*sizeof(float),
hipMemcpyDeviceToHost) );
cudaSafeCall( hipMemcpy(h_imag,sf_imag,sf_size*sizeof(float),
hipMemcpyDeviceToHost) );
for (int i=0; i<sf_size; i++) {
sf[i] = std::complex<double>(double(h_real[i]),double(h_imag[i]));
}
delete[] h_real;
delete[] h_imag;
}
return sf;
}
}
}
|
537d8f3088e642aed4d4ec1b2efb8d257f89be23.cu
|
#include <sastbx/fXS/cuda_functions.cuh>
namespace sastbx {
namespace fXS {
const int threads_per_block = 1024;
// form factors
const int max_types = 50;
const int max_terms = 10;
__device__ __constant__ float d_a[max_types * max_terms];
__device__ __constant__ float d_b[max_types * max_terms];
__device__ __constant__ float d_c[max_types];
__device__ __constant__ int d_n_types;
__device__ __constant__ int d_n_terms;
// constants
__device__ __constant__ float two_pi = float(2.0)*CUDART_PI_F;
const int padded_size = 16;
__device__ __constant__ int d_padded_size = padded_size;
/* ==========================================================================
Memory properties for C2070, compute capability 2.0
(CUDA C Programming Guide v 4.0, Appendix F)
---------------------------------------------------
registers - register, r/w, "fast", per-multiprocessor 32K 4B registers
local memory - r/w, "slow", per-thread 512 KB
__shared__ - shared memory, r/w, "fast", block-wide 48 KB
__device__ - global memory, r/w, "slow", grid-wide, 6 GB
__constant__ - constant memory, r, "fast", grid-wide 64 KB
Shared memory is broken up into 32 banks and is interleaved into 32-bit
words (4 bytes). For example, an array of length 64 containing single
precision values will have elements 0 and 32 in the same bank, 1 and 33
in the same bank, etc. To access shared memory with no conflicts (all
threads get data with one read), each thread should read from a different
bank, or have multiple threads read the same value in the same bank. In
the previous example, having all threads access element 0 or having each
thread read a different element between 0 and 31, inclusive, will only
require one read. Accessing elements 0 and 32 will require two reads.
Appendix F.4 describes the memory properties for compute capability 2.0
devices in more detail and has figures for efficient memory access
patterns.
Basic approach
--------------
Each thread calculates the sum for one h, so each thread will
independently loop over all atoms and put the sum into global memory
All coordinates are loaded into global memory and then each thread copies
sections into shared memory. The kernel loops over all sections to sum
over all atoms. Rotation matrix/translation vectors pairs are also loaded
and looped in the same manner. Form factors are stored in constant memory
The thread index is checked against the length of the array multiple times
because all threads are used for reading atom data from global, but only
threads whose index is less than the array length are needed for
summation. Additionaly, two __syncthreads() calls are required. The
first is to make sure all the atom data is copied into shared memory
before any summation is started, and the second is to make sure all the
summation is finished before the atom data in shared memory is replaced
with new data.
Data format for kernel
----------------------
xyz = x_0 ... x_n y_0 ... y_n z_0 ... z_n
solvent_weights = s_0 s_1 ... s_n
h = h_0 ... h_n k_0 ... k_n l_0 ... l_n
rt = r_00 ... r_08 t_00 ... t_02 ... r_10 ... t_n2
a = a_00 a_01 a_02 ... a_n3 a_n4 a_n5
b = ""
c = c_0 c_1 ... c_n
To facilitate coalesced reads from global memory, the data is grouped
into sections. For example, for xyz, all the x's come first, then all
the y's, and lastly, all the z's. When read from global memory, three
coalesced reads will read in all the xyz's for a set of 32 atoms, one
read from each section. The size of the shared arrays is equal to the
number of threads so that all threads will attempt to read from global
memory. There are checks against the actual length of available data.
For the structue_factor_kernel, the general format of the loops is,
-----------------------------
| x_0 | x_1 | x_2 | x_3 | ... xyz array in global memory
-----------------------------
| | | | each thread stores one value into
| | | | shared memory
V V V V x[threadIdx.x] = xyz[current_atom];
-----------------------------
| x_0 | x_1 | x_2 | x_3 | ... x array in shared memory
-----------------------------
|
|-----|-----|-----| each thread reads one value
V V V V x_a = x[a];
--------------------------------------------------------
|each thread calculates its own sum with its registers |
--------------------------------------------------------
| | | |
| | | | loop over all atoms
V V V V
-----------------------------
| r_0 | r_1 | r_2 | r_3 | ... each thread copies its sums into
----------------------------- the structure factor arrays in
----------------------------- global memory
| i_0 | i_1 | i_2 | i_3 | ...
-----------------------------
--------------------------------------------------------------------------
*/
// kernel
__global__ void structure_factor_kernel
(const int* scattering_type, const float* xyz,
const float* solvent_weights, const int n_xyz,
const float* h, const int n_h,
const float* rt, const int n_rt,
float* sf_real, float* sf_imag) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
float h_i, k_i, l_i, stol_sq;
float f[max_types];
if (i < n_h) {
// read h from global memory (stored in registers)
h_i = h[i];
k_i = h[n_h + i];
l_i = h[2*n_h + i];
// calculate form factors (stored in local memory)
// last form factor is always for boundary solvent layer
stol_sq = float(0.25) * (h_i*h_i + k_i*k_i + l_i*l_i);
for (int type=0; type<d_n_types; type++) {
f[type] = 0.0;
for (int term=0; term<d_n_terms; term++) {
f[type] += d_a[type*d_n_terms + term] *
__expf(-d_b[type*d_n_terms + term] * stol_sq);
}
f[type] += d_c[type];
}
}
// copy atoms into shared memory one chunk at a time and sum
// all threads are used for reading data
// shared arrays can be allocated at kernel invocation, but it requires
// partitioning a big array (implement later)
__shared__ float x[threads_per_block];
__shared__ float y[threads_per_block];
__shared__ float z[threads_per_block];
__shared__ float solvent[threads_per_block];
__shared__ int s_type[threads_per_block];
__shared__ float rot_trans[threads_per_block];
float real_sum = 0.0;
float imag_sum = 0.0;
float s,c,ff,xx,yy,zz,x_a,y_a,z_a;
int current_atom, current_rt, rt_offset;
for (int atom=0; atom<n_xyz; atom += blockDim.x) {
current_atom = atom + threadIdx.x;
// coalesce reads using threads, but don't read past n_xyz
// one read for each variable should fill chunk of 32 atoms
// total length = # of threads/block
if (current_atom < n_xyz) {
x[threadIdx.x] = xyz[ current_atom];
y[threadIdx.x] = xyz[n_xyz + current_atom];
z[threadIdx.x] = xyz[2*n_xyz + current_atom];
solvent[threadIdx.x] = solvent_weights[current_atom];
s_type[threadIdx.x] = scattering_type[current_atom];
}
// loop over all rotation/translation operators
// one coalesced read will copy (# of threads)/(padded_size) rot/trans
// since the number of threads is a multiple of 32, it will also always
// be evenly divisible by padded_size
for (int rt_i=0; rt_i<n_rt; rt_i += blockDim.x/d_padded_size) {
current_rt = rt_i*d_padded_size + threadIdx.x;
if (current_rt < n_rt*d_padded_size) {
rot_trans[threadIdx.x] = rt[current_rt];
}
// wait for all data to be copied into shared memory
__syncthreads();
// then sum over all the atoms that are now available to all threads
if (i < n_h) {
for (int r=0; r<blockDim.x/d_padded_size; r++) {
current_rt = rt_i + r; // overall counter for rot/trans pairs
if (current_rt < n_rt) {
for (int a=0; a<blockDim.x; a++) {
current_atom = atom + a; // overall counter for atom number
if (current_atom < n_xyz) {
x_a = x[a]; // transfer from shared memory to registers
y_a = y[a]; // might not be necessary due to cache
z_a = z[a];
rt_offset = r*d_padded_size;
// apply rotation and translation by expanding Rx + t
xx = (x_a*rot_trans[rt_offset ] +
y_a*rot_trans[rt_offset + 1] +
z_a*rot_trans[rt_offset + 2] +
rot_trans[rt_offset + 9]);
yy = (x_a*rot_trans[rt_offset + 3] +
y_a*rot_trans[rt_offset + 4] +
z_a*rot_trans[rt_offset + 5] +
rot_trans[r*padded_size + 10]);;
zz = (x_a*rot_trans[rt_offset + 6] +
y_a*rot_trans[rt_offset + 7] +
z_a*rot_trans[rt_offset + 8] +
rot_trans[rt_offset + 11]);;
__sincosf(two_pi*(xx * h_i + yy * k_i + zz * l_i),&s,&c);
// bulk solvent correction in f
// boundary layer solvent scale in solvent
ff = f[s_type[a]] + solvent[a]*f[d_n_types-1];
real_sum += ff * c;
imag_sum += ff * s;
}
}
}
}
}
// wait before starting next chunk so data isn't changed for lagging threads
__syncthreads();
}
}
// transfer result to global memory
if (i < n_h) {
sf_real[i] = real_sum;
sf_imag[i] = imag_sum;
}
}
/* ==========================================================================
*/
sastbx::fXS::cuda_direct_summation::cuda_direct_summation() {
sf_size = 0;
}
sastbx::fXS::cuda_direct_summation::~cuda_direct_summation() {
cudaSafeCall( cudaFree(sf_real) );
cudaSafeCall( cudaFree(sf_imag) );
}
/* --------------------------------------------------------------------------
reorganizes data and calls cuda
*/
void sastbx::fXS::cuda_direct_summation::add
(const scitbx::af::const_ref<std::string>& scatterers,
const scitbx::af::const_ref<scitbx::vec3<double> >& xyz,
const scitbx::af::const_ref<double>& solvent_weights,
const scitbx::af::const_ref<scitbx::vec3<double> >& h,
const scitbx::af::const_ref<double>& rotations,
const scitbx::af::const_ref<scitbx::vec3<double> >& translations,
const cctbx::xray::scattering_type_registry& registry) {
// reorganize coordinates
int n_xyz = xyz.size();
int size_xyz = 3 * n_xyz;
float* h_xyz = new float[size_xyz];
for (int i=0; i<n_xyz; i++) {
for (int j=0; j<3; j++) {
h_xyz[j*n_xyz + i] = float(xyz[i][j]);
}
}
// copy boundary layer solvent weights
float* h_solvent = new float[n_xyz];
for (int i=0; i<n_xyz; i++) {
h_solvent[i] = float(solvent_weights[i]);
}
// reorganize h
int n_h = h.size();
int size_h = 3 * n_h;
float* h_h = new float[size_h];
for (int i=0; i<n_h; i++) {
for (int j=0; j<3; j++) {
h_h[j*n_h + i] = float(h[i][j]);
}
}
// reorganize rotations and translations
// each rotation/translation pair is combined and padded to take up
// 64 bytes so that a coalesced read will read two pairs
int n_rt = translations.size();
int size_rt = padded_size * n_rt;
float* h_rt = new float[size_rt];
for (int i=0; i<n_rt; i++) {
for (int j=0; j<9; j++) {
h_rt[padded_size*i + j] = float(rotations[9*i + j]);
}
for (int j=0; j<3; j++) {
h_rt[padded_size*i + j + 9] = float(translations[i][j]);
}
}
// convert scattering types and form factors
// add ordinary oxygen form factor at end for boundary layer solvent
int* h_scattering_type = new int[n_xyz];
for (int i=0; i<n_xyz; i++) {
h_scattering_type[i] = registry.unique_index(scatterers[i]);
}
scitbx::af::shared<boost::optional
<cctbx::eltbx::xray_scattering::gaussian> >
unique_gaussians = registry.unique_gaussians;
int n_types = unique_gaussians.size() + 1;
int n_terms = unique_gaussians[0].get().n_terms();
int f_size = n_types * n_terms;
float* h_a = new float[f_size];
float* h_b = new float[f_size];
float* h_c = new float[n_types];
for (int i=0; i<f_size; i++) {
h_a[i] = 0.0;
h_b[i] = 0.0;
}
for (int i=0; i<n_types-1; i++) {
for (int j=0; j<n_terms; j++) {
h_a[i*n_terms + j] = unique_gaussians[i].get().array_of_a()[j];
h_b[i*n_terms + j] = unique_gaussians[i].get().array_of_b()[j];
}
if (unique_gaussians[i].get().use_c()) {
h_c[i] = unique_gaussians[i].get().c();
}
else {
h_c[i] = float(0.0);
}
}
// add form factor for boundary layer solvent (# of terms may be different)
cctbx::eltbx::xray_scattering::gaussian hoh =
cctbx::eltbx::xray_scattering::wk1995("O",true).fetch();
for (int i=0; i<hoh.array_of_a().size(); i++){
h_a[(n_types-1)*n_terms + i] = hoh.array_of_a()[i];
h_b[(n_types-1)*n_terms + i] = hoh.array_of_b()[i];
}
if (hoh.use_c()) {
h_c[n_types-1] = hoh.c();
}
else {
h_c[n_types-1] = float(0.0);
}
// transfer data to global memory
int* d_scattering_type;
cudaSafeCall( cudaMalloc((void**)&d_scattering_type,n_xyz*sizeof(int)) );
cudaSafeCall( cudaMemcpy(d_scattering_type,h_scattering_type,
n_xyz*sizeof(int),cudaMemcpyHostToDevice) );
float* d_xyz;
cudaSafeCall( cudaMalloc((void**)&d_xyz,size_xyz*sizeof(float)) );
cudaSafeCall( cudaMemcpy(d_xyz, h_xyz, size_xyz*sizeof(float),
cudaMemcpyHostToDevice) );
float* d_solvent;
cudaSafeCall( cudaMalloc((void**)&d_solvent,n_xyz*sizeof(float)) );
cudaSafeCall( cudaMemcpy(d_solvent,h_solvent,n_xyz*sizeof(float),
cudaMemcpyHostToDevice) );
float* d_rt;
cudaSafeCall( cudaMalloc((void**)&d_rt,size_rt*sizeof(float)) );
cudaSafeCall( cudaMemcpy(d_rt, h_rt, size_rt*sizeof(float),
cudaMemcpyHostToDevice) );
float* d_h;
cudaSafeCall( cudaMalloc((void**)&d_h,size_h*sizeof(float)) );
cudaSafeCall( cudaMemcpy(d_h, h_h, size_h*sizeof(float),
cudaMemcpyHostToDevice) );
// transfer data to constant memory
// should combine d_n_types and d_n_terms into one transfer
cudaSafeCall( cudaMemcpyToSymbol(d_a, h_a, f_size*sizeof(float)) );
cudaSafeCall( cudaMemcpyToSymbol(d_b, h_b, f_size*sizeof(float)) );
cudaSafeCall( cudaMemcpyToSymbol(d_c, h_c, n_types*sizeof(float)) );
cudaSafeCall( cudaMemcpyToSymbol(d_n_types, &n_types, sizeof(int)) );
cudaSafeCall( cudaMemcpyToSymbol(d_n_terms, &n_terms, sizeof(int)) );
// allocate arrays for results if necessary
if (sf_size == 0) {
sf_size = n_h;
cudaSafeCall( cudaMalloc((void**)&sf_real,n_h*sizeof(float)) );
cudaSafeCall( cudaMalloc((void**)&sf_imag,n_h*sizeof(float)) );
}
else {
assert(sf_size == n_h);
}
// run calculation
int blocks_per_grid = (n_h + threads_per_block - 1)/threads_per_block;
structure_factor_kernel<<<blocks_per_grid,threads_per_block>>>
(d_scattering_type, d_xyz, d_solvent, n_xyz,
d_h, n_h,
d_rt, n_rt,
sf_real, sf_imag);
// clean up
delete[] h_xyz;
delete[] h_solvent;
delete[] h_h;
delete[] h_rt;
delete[] h_scattering_type;
delete[] h_a;
delete[] h_b;
delete[] h_c;
cudaSafeCall( cudaFree(d_h) );
cudaSafeCall( cudaFree(d_xyz) );
cudaSafeCall( cudaFree(d_solvent) );
cudaSafeCall( cudaFree(d_rt) );
cudaSafeCall( cudaFree(d_scattering_type) );
}
/* --------------------------------------------------------------------------
return total sum
*/
scitbx::af::shared<std::complex<double> >
sastbx::fXS::cuda_direct_summation::get_sum() {
scitbx::af::shared<std::complex<double> > sf(sf_size);
if (sf_size != 0) {
float* h_real = new float[sf_size];
float* h_imag = new float[sf_size];
cudaSafeCall( cudaMemcpy(h_real,sf_real,sf_size*sizeof(float),
cudaMemcpyDeviceToHost) );
cudaSafeCall( cudaMemcpy(h_imag,sf_imag,sf_size*sizeof(float),
cudaMemcpyDeviceToHost) );
for (int i=0; i<sf_size; i++) {
sf[i] = std::complex<double>(double(h_real[i]),double(h_imag[i]));
}
delete[] h_real;
delete[] h_imag;
}
return sf;
}
}
}
|
11828d4611a3373f95823a17d1ca5304b6b99df3.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "FluidCUDA.cuh"
// *** Author: Martin Staykov
// *** May, 2015
// ***
// *** References:
// *** Abertay University
// *** rastertek.com
// *** Real-Time Fluid Dynamics for Games, by Jos Stam
// *** NVIDIA CUDA Toolkit
// ***
// *** These functions perform mathematical calculations based on the equations described in my dissertation.
// *** They will not be described in detail here.
#define _(i,j) ((i)+(256)*(j)) // transition from 2d array toa 1d one
#define size 65536 // smoke resolution, total number of cells
#define BLOCKS 90
#define THREADS 736
// The d_ prefix (device) shows this is an array, which lives on the GPU.
float * d_dens, * d_dens_prev, * d_u, * d_v, * d_u_prev, * d_v_prev, * d_aux, * d_MC1, * d_MC2;
__global__ void RedGaussSeidelKernel(int N, float* dest, float* src1, float* src2, float C1, float C2)
{
int index = threadIdx.x + blockIdx.x * blockDim.x;
int j = (int) index / (N+2);
int i = (int) index - (j*(N+2));
if (i<1 || i>N || j<1 || j>N) return;
if ((i + j)%2 == 0)
{
dest[_(i,j)] = (src1[_(i,j)] + C1*(src2[_(i-1,j)] + src2[_(i+1,j)] + src2[_(i,j-1)] + src2[_(i,j+1)]))/C2;
}
}
__global__ void BlackGaussSeidelKernel(int N, float* dest, float* src1, float* src2, float C1, float C2)
{
int index = threadIdx.x + blockIdx.x * blockDim.x;
int j = (int) index / (N+2);
int i = (int) index - (j*(N+2));
if (i<1 || i>N || j<1 || j>N) return;
if ((i + j)%2 != 0)
{
dest[_(i,j)] = (src1[_(i,j)] + C1*(src2[_(i-1,j)] + src2[_(i+1,j)] + src2[_(i,j-1)] + src2[_(i,j+1)]))/C2;
}
}
__global__ void JacobiKernel(int N, float* dest, float* src1, float* src2, float C1, float C2)
{
int index = threadIdx.x + blockIdx.x * blockDim.x;
int j = (int) index / (N+2);
int i = (int) index - (j*(N+2));
if (i<1 || i>N || j<1 || j>N) return;
dest[_(i,j)] = (src1[_(i,j)] + C1*(src2[_(i-1,j)] + src2[_(i+1,j)] + src2[_(i,j-1)] + src2[_(i,j+1)]))/C2;
}
__global__ void AdvectKernel(int N, float* d, float* d0, float* u, float* v, float dt, bool bBackward)
{
int index = threadIdx.x + blockIdx.x * blockDim.x;
int j = (int) index / (N+2);
int i = (int) index - (j*(N+2));
int i0, j0, i1, j1;
float x, y, s0, t0, s1, t1, dt0;
// Scale the velocity into grid space.
float dx = 1.0f/N;
dx = 1/dx;
dt0 = (dt*dx)/1;
if (i<1 || i>N || j<1 || j>N) return;
// The backwards step is used for the MacCormack advection.
if (bBackward)
{
x = i - dt0*u[_(i,j)];
y = j - dt0*v[_(i,j)];
}
else
{
x = i + dt0*u[_(i,j)];
y = j + dt0*v[_(i,j)];
}
if (x<0.5f) x = 0.5f;
if (x>N+0.5f) x = N+0.5f;
i0 = (int)x;
i1 = i0+1;
if (y<0.5f) y = 0.5f;
if (y>N+0.5f) y = N+0.5f;
j0 = (int)y;
j1 = j0+1;
s1 = x-i0;
s0 = 1-s1;
t1 = y-j0;
t0 = 1-t1;
// Interpolate.
d[_(i,j)] = s0*( t0*d0[_(i0,j0)] + t1*d0[_(i0,j1)] ) + s1*( t0*d0[_(i1,j0)] + t1*d0[_(i1,j1)] );
}
__global__ void MacCormackKernel(int N, float* dest, float* d0, float* MC1, float* MC2, float* u, float* v, float dt)
{
int index = threadIdx.x + blockIdx.x * blockDim.x;
int j = (int) index / (N+2);
int i = (int) index - (j*(N+2));
int i0, j0, i1, j1;
float x, y, s0, t0, s1, t1, dt0;
// Scale the velocity into grid space.
float dx = 1.0f/N;
dx = 1/dx;
dt0 = dt*dx;
if (i<1 || i>N || j<1 || j>N) return;
x = i - dt0*u[_(i,j)];
y = j - dt0*v[_(i,j)];
if (x<0.5f) x = 0.5f;
if (x>N+0.5f) x = N+0.5f;
i0 = (int)x;
i1 = i0+1;
if (y<0.5f) y = 0.5f;
if (y>N+0.5f) y = N+0.5f;
j0 = (int)y;
j1 = j0+1;
// Get the values of nodes that contribute to the interpolated value.
float r0 = d0[_(i0, j0)];
float r1 = d0[_(i1, j0)];
float r2 = d0[_(i0, j1)];
float r3 = d0[_(i1, j1)];
float result = MC1[_(i,j)] + 0.5f*(d0[_(i,j)] - MC2[_(i,j)]);
float min = (r0 > r1) ? r1 : r0;
min = (min > r2) ? r2 : min;
min = (min > r3) ? r3 : min;
float max = (r0 < r1) ? r1 : r0;
max = (max < r2) ? r2 : max;
max = (max < r3) ? r3 : max;
// Clamp the result, so that it's stable.
// If outside the two extrema, revert to results from ordinary advection scheme.
// The extrema appear to produce errors for unknown reasons. Amend them by adding/subtracting a small number.
// Too big of a number, and the result produces tearings.
// Too small and results appear good but blurred, which defeats the purpose of the MacCormack scheme, which is to provide more detail.
if (result >= (max - 0.02f)) result = MC1[_(i,j)];//max;
if (result <= (min + 0.02f)) result = MC1[_(i,j)];//min;
dest[_(i,j)] = result;
}
__global__ void DivergenceKernel(int N, float* u, float* v, float* p, float* div)
{
int index = threadIdx.x + blockIdx.x * blockDim.x;
int j = (int) index / (N+2);
int i = (int) index - (j*(N+2));
if (i<1 || i>N || j<1 || j>N) return;
float h = 1.0f/N;
div[_(i,j)] = -0.5f*h*(u[_(i+1,j)] - u[_(i-1,j)] + v[_(i,j+1)] - v[_(i,j-1)]);
p[_(i,j)] = 0;
}
__global__ void SubtractGradientKernel(int N, float* u, float* v, float* p)
{
int index = threadIdx.x + blockIdx.x * blockDim.x;
int j = (int) index / (N+2);
int i = (int) index - (j*(N+2));
if (i<1 || i>N || j<1 || j>N) return;
float h = 1.0f/N;
u[_(i,j)] -= 0.5f*(p[_(i+1,j)] - p[_(i-1,j)])/h;
v[_(i,j)] -= 0.5f*(p[_(i,j+1)] - p[_(i,j-1)])/h;
}
__global__ void BoundaryKernel ( int N, int b, float * x)
{
int index = threadIdx.x + blockIdx.x * blockDim.x;
int i = index + 1;
if (i<=N)
{
x[_(0 ,i)] = b==1 ? -x[_(1,i)] : x[_(1,i)];
x[_(N+1,i)] = b==1 ? -x[_(N,i)] : x[_(N,i)];
x[_(i,0 )] = b==2 ? -x[_(i,1)] : x[_(i,1)];
x[_(i,N+1)] = b==2 ? -x[_(i,N)] : x[_(i,N)];
if (i==1)
{
x[_(0 ,0 )] = 0.5f*(x[_(1,0 )]+x[_(0 ,1)]);
x[_(0 ,N+1)] = 0.5f*(x[_(1,N+1)]+x[_(0 ,N )]);
x[_(N+1,0 )] = 0.5f*(x[_(N,0 )]+x[_(N+1,1)]);
x[_(N+1,N+1)] = 0.5f*(x[_(N,N+1)]+x[_(N+1,N )]);
}
}
}
__global__ void AddSourceKernel(int N, float* x, int i, int j, float value, float dt)
{
int index = threadIdx.x + blockIdx.x * blockDim.x;
if (index != 0) return;
x[_(i, j)] += dt*value;
x[_(i-1, j)] += dt*value;
x[_(i+1, j)] += dt*value;
x[_(i+2, j)] += dt*value;
x[_(i-2, j)] += dt*value;
}
__global__ void AddSourceMultipleKernel(int N, float * a, float * b, float dt)
{
int index = threadIdx.x + blockIdx.x * blockDim.x;
int j = (int) index / (N+2);
int i = (int) index - (j*(N+2));
if (i<1 || i>N || j<1 || j>N) return;
a[index] += dt*b[index];
}
__global__ void textureKernel(int N, unsigned char *surface, size_t pitch, float * a)
{
int y = (int) (threadIdx.x + blockIdx.x * blockDim.x) / (N+2);
int x = (int) (threadIdx.x + blockIdx.x * blockDim.x) - (y*(N+2));
float *pixel;
// in the case where, due to quantization into grids, we have
// more threads than pixels, skip the threads which don't
// correspond to valid pixels
if (x >= N+2 || y >= N+2 || (!x) || (!y)) return;
else
{
// get a pointer to the pixel at (x,y)
pixel = (float *)(surface + y*pitch) + 1*x;
float pvalue = a[_(x, y)];
// populate it
pixel[0] = pvalue;
}
}
__global__ void BuoyancyKernel(int N, float * dest, float * src, float kappa, float sigma)
{
int index = threadIdx.x + blockIdx.x * blockDim.x;
int j = (int) index / (N+2);
int i = (int) index - (j*(N+2));
if (i<1 || i>N || j<1 || j>N) return;
float source = src[_(i, j)];
dest[_(i, j)] = sigma*source + -kappa*source;
}
__global__ void CurlKernel(int N, float * u, float * v, float * dest, float dt)
{
int index = threadIdx.x + blockIdx.x * blockDim.x;
int j = (int) index / (N+2);
int i = (int) index - (j*(N+2));
if (i<1 || i>N || j<1 || j>N) return;
float h = 1.0f/N;//(2*N);
float du_dy;
float dv_dx;
du_dy = (u[_(i, j+1)] - u[_(i, j-1)]) / h * 0.5f;
dv_dx = (v[_(i+1, j)] - v[_(i-1, j)]) / h * 0.5f;
dest[_(i, j)] = (dv_dx - du_dy);// * h * 0.5f;
}
__global__ void VorticityKernel(int N, float * u, float * v, float * c, float dt, float vort)
{
int index = threadIdx.x + blockIdx.x * blockDim.x;
int j = (int) index / (N+2);
int i = (int) index - (j*(N+2));
if (i<1 || i>N || j<1 || j>N) return;
float h = 1.0f/N;//(2*N);
float omegaT = (c[_(i, j-1)]);
float omegaB = (c[_(i, j+1)]);
float omegaR = (c[_(i+1, j)]);
float omegaL = (c[_(i-1, j)]);
float comp1 = omegaT - omegaB;
float comp2 = omegaR - omegaL;
float2 force; force.x = comp1; force.y = comp2; force *= 0.5f; force /= h;
force /= (length(force) + 0.00001f);
float2 NN;
NN.x = -c[_(i, j)]*force.y;
NN.y = c[_(i, j)]*force.x;
NN *= vort;
u[_(i, j)] += NN.x * dt;
v[_(i, j)] += NN.y * dt;
}
void InitCUDA()
{
// Allocate all arrays on the device.
if (hipMalloc((void**)&d_u, size * sizeof(float)) != hipSuccess)
{
return;
}
if (hipMalloc((void**)&d_u_prev, size * sizeof(float)) != hipSuccess)
{
hipFree(d_u);
return;
}
if (hipMalloc((void**)&d_v, size * sizeof(float)) != hipSuccess)
{
hipFree(d_u);
hipFree(d_u_prev);
return;
}
if (hipMalloc((void**)&d_v_prev, size * sizeof(float)) != hipSuccess)
{
hipFree(d_u);
hipFree(d_u_prev);
hipFree(d_v);
return;
}
if (hipMalloc((void**)&d_dens, size * sizeof(float)) != hipSuccess)
{
hipFree(d_u);
hipFree(d_u_prev);
hipFree(d_v);
hipFree(d_v_prev);
return;
}
if (hipMalloc((void**)&d_dens_prev, size * sizeof(float)) != hipSuccess)
{
hipFree(d_u);
hipFree(d_u_prev);
hipFree(d_v);
hipFree(d_v_prev);
hipFree(d_dens);
return;
}
if (hipMalloc((void**)&d_aux, size * sizeof(float)) != hipSuccess)
{
hipFree(d_u);
hipFree(d_u_prev);
hipFree(d_v);
hipFree(d_v_prev);
hipFree(d_dens);
hipFree(d_dens_prev);
return;
}
if (hipMalloc((void**)&d_MC1, size * sizeof(float)) != hipSuccess)
{
hipFree(d_u);
hipFree(d_u_prev);
hipFree(d_v);
hipFree(d_v_prev);
hipFree(d_dens);
hipFree(d_dens_prev);
hipFree(d_aux);
return;
}
if (hipMalloc((void**)&d_MC2, size * sizeof(float)) != hipSuccess)
{
hipFree(d_u);
hipFree(d_u_prev);
hipFree(d_v);
hipFree(d_v_prev);
hipFree(d_dens);
hipFree(d_dens_prev);
hipFree(d_aux);
hipFree(d_MC1);
return;
}
// Initialize the arrays to 0.
if (hipMemset(d_dens, 0, size * sizeof(float)) != hipSuccess)
{
FreeCUDA();
return;
}
if (hipMemset(d_dens_prev, 0, size * sizeof(float)) != hipSuccess)
{
FreeCUDA();
return;
}
if (hipMemset(d_u, 0, size * sizeof(float)) != hipSuccess)
{
FreeCUDA();
return;
}
if (hipMemset(d_u_prev, 0, size * sizeof(float)) != hipSuccess)
{
FreeCUDA();
return;
}
if (hipMemset(d_v, 0, size * sizeof(float)) != hipSuccess)
{
FreeCUDA();
return;
}
if (hipMemset(d_v_prev, 0, size * sizeof(float)) != hipSuccess)
{
FreeCUDA();
return;
}
if (hipMemset(d_aux, 0, size * sizeof(float)) != hipSuccess)
{
FreeCUDA();
return;
}
if (hipMemset(d_MC1, 0, size * sizeof(float)) != hipSuccess)
{
FreeCUDA();
return;
}
if (hipMemset(d_MC2, 0, size * sizeof(float)) != hipSuccess)
{
FreeCUDA();
return;
}
// Exit and return void.
return;
}
void FreeCUDA()
{
hipFree(d_dens);
hipFree(d_dens_prev);
hipFree(d_u);
hipFree(d_u_prev);
hipFree(d_v);
hipFree(d_v_prev);
hipFree(d_aux);
hipFree(d_MC1);
hipFree(d_MC2);
hipDeviceReset();
}
void DiffuseCUDA(int N, float b, float * x, float * x0, float diff, float dt, int iterations, bool isJacobi)
{
float a = dt*diff*(float)N*(float)N;
for (int k=0; k<iterations; k++)
{
if (isJacobi)
{
hipLaunchKernelGGL(( JacobiKernel), dim3(BLOCKS), dim3(THREADS), 0, 0, N, d_aux, x0, x, a, 1+4*a);
hipDeviceSynchronize();
x = d_aux;
}
else
{
hipLaunchKernelGGL(( RedGaussSeidelKernel), dim3(BLOCKS), dim3(THREADS), 0, 0, N, x, x0, x, a, 1+4*a);
hipDeviceSynchronize();
hipLaunchKernelGGL(( BlackGaussSeidelKernel), dim3(BLOCKS), dim3(THREADS), 0, 0, N, x, x0, x, a, 1+4*a);
}
hipDeviceSynchronize();
hipLaunchKernelGGL(( BoundaryKernel), dim3(1), dim3(N), 0, 0, N, b, x);
hipDeviceSynchronize();
}
}
void AdvectCUDA(int N, int b, float* d, float* d0, float* u, float* v, float dt, bool bBackward)
{
hipLaunchKernelGGL(( AdvectKernel), dim3(BLOCKS), dim3(THREADS), 0, 0, N, d, d0, u, v, dt, bBackward);
hipDeviceSynchronize();
hipLaunchKernelGGL(( BoundaryKernel), dim3(1), dim3(N), 0, 0, N, b, d);
hipDeviceSynchronize();
}
void AdvectMacCormackCUDA(int N, int b, float* d, float* d0, float* u, float* v, float dt)
{
hipLaunchKernelGGL(( AdvectKernel), dim3(BLOCKS), dim3(THREADS), 0, 0, N, d_MC1, d0, u, v, dt, true);
hipDeviceSynchronize();
hipLaunchKernelGGL(( BoundaryKernel), dim3(1), dim3(N), 0, 0, N, b, d_MC1);
hipDeviceSynchronize();
hipLaunchKernelGGL(( AdvectKernel), dim3(BLOCKS), dim3(THREADS), 0, 0, N, d_MC2, d_MC1, u, v, dt, false);
hipDeviceSynchronize();
hipLaunchKernelGGL(( BoundaryKernel), dim3(1), dim3(N), 0, 0, N, b, d_MC2);
hipDeviceSynchronize();
hipLaunchKernelGGL(( MacCormackKernel), dim3(BLOCKS), dim3(THREADS), 0, 0, N, d, d0, d_MC1, d_MC2, u, v, dt);
hipDeviceSynchronize();
hipLaunchKernelGGL(( BoundaryKernel), dim3(1), dim3(N), 0, 0, N, b, d);
hipDeviceSynchronize();
}
void ProjectCUDA(int N, float* u, float* v, float* p, float* div, int iterations, bool isJacobi)
{
hipLaunchKernelGGL(( DivergenceKernel), dim3(BLOCKS), dim3(THREADS), 0, 0, N, u, v, p, div);
hipDeviceSynchronize();
hipLaunchKernelGGL(( BoundaryKernel), dim3(1), dim3(N), 0, 0, N, 0, div);
hipLaunchKernelGGL(( BoundaryKernel), dim3(1), dim3(N), 0, 0, N, 0, p);
hipDeviceSynchronize();
for ( int k=0; k<iterations; k++ )
{
if (isJacobi)
{
hipLaunchKernelGGL(( JacobiKernel), dim3(BLOCKS), dim3(THREADS), 0, 0, N, d_aux, div, p, 1, 4);
hipDeviceSynchronize();
p = d_aux;
}
else
{
hipLaunchKernelGGL(( RedGaussSeidelKernel), dim3(BLOCKS), dim3(THREADS), 0, 0, N, p, div, p, 1, 4);
hipDeviceSynchronize();
hipLaunchKernelGGL(( BlackGaussSeidelKernel), dim3(BLOCKS), dim3(THREADS), 0, 0, N, p, div, p, 1, 4);
}
hipDeviceSynchronize();
hipLaunchKernelGGL(( BoundaryKernel), dim3(1), dim3(N), 0, 0, N, 0, p);
hipDeviceSynchronize();
}
hipLaunchKernelGGL(( SubtractGradientKernel), dim3(BLOCKS), dim3(THREADS), 0, 0, N, u, v, p);
hipDeviceSynchronize();
hipLaunchKernelGGL(( BoundaryKernel), dim3(1), dim3(N), 0, 0, N, 1, u);
hipLaunchKernelGGL(( BoundaryKernel), dim3(1), dim3(N), 0, 0, N, 2, v);
hipDeviceSynchronize();
}
void FrameCUDA(int N, int source_vel_i, int source_vel_j, int source_dens_i, int source_dens_j, float source_u_value, float source_v_value, float source_dens_value, Parameters a)
{
float * tempPtr; // used for swapping arrays
// Velocity step.
hipLaunchKernelGGL(( AddSourceKernel), dim3(1), dim3(1), 0, 0, N, d_u, source_vel_i, source_vel_j, source_u_value, a.dt);
hipLaunchKernelGGL(( AddSourceKernel), dim3(1), dim3(1), 0, 0, N, d_v, source_vel_i, source_vel_j, source_v_value, a.dt);
hipDeviceSynchronize();
if (a.bViscosity)
{
if (a.enumIterativeMethod == Jacobi) DiffuseCUDA(N, 1, d_u_prev, d_u, a.fViscFactor, a.dt, a.iPoissonIterations, true);
else if (a.enumIterativeMethod == GaussSeidel) DiffuseCUDA(N, 1, d_u_prev, d_u, a.fViscFactor, a.dt, a.iPoissonIterations, false);
if (a.enumIterativeMethod == Jacobi) DiffuseCUDA(N, 2, d_v_prev, d_v, a.fViscFactor, a.dt, a.iPoissonIterations, true);
else if (a.enumIterativeMethod == GaussSeidel) DiffuseCUDA(N, 2, d_v_prev, d_v, a.fViscFactor, a.dt, a.iPoissonIterations, false);
tempPtr = d_u; d_u = d_u_prev; d_u_prev = tempPtr;
tempPtr = d_v; d_v = d_v_prev; d_v_prev = tempPtr;
if (a.enumIterativeMethod == Jacobi) ProjectCUDA(N, d_u, d_v, d_u_prev, d_v_prev, a.iPoissonIterations, true);
else if (a.enumIterativeMethod == GaussSeidel) ProjectCUDA(N, d_u, d_v, d_u_prev, d_v_prev, a.iPoissonIterations, false);
}
if (a.enumCurrentAdvect == SingleSL)
{
AdvectCUDA(N, 1, d_u_prev, d_u, d_u, d_v, a.dt, true);
AdvectCUDA(N, 2, d_v_prev, d_v, d_u, d_v, a.dt, true);
}
else if (a.enumCurrentAdvect == SemiMacCormack)
{
AdvectMacCormackCUDA(N, 1, d_u_prev, d_u, d_u, d_v, a.dt);
AdvectMacCormackCUDA(N, 2, d_v_prev, d_v, d_u, d_v, a.dt);
}
if (a.bVorticity)
{
// *** VORTICITY CONFINEMENT ***
hipLaunchKernelGGL(( CurlKernel), dim3(BLOCKS), dim3(THREADS), 0, 0, N, d_u_prev, d_v_prev, d_aux, a.dt);
hipDeviceSynchronize();
hipLaunchKernelGGL(( VorticityKernel), dim3(BLOCKS), dim3(THREADS), 0, 0, N, d_u_prev, d_v_prev, d_aux, a.dt, a.fVortStrength);
hipDeviceSynchronize();
hipLaunchKernelGGL(( BoundaryKernel), dim3(1), dim3(N), 0, 0, N, 1, d_u_prev);
hipLaunchKernelGGL(( BoundaryKernel), dim3(1), dim3(N), 0, 0, N, 2, d_v_prev);
hipDeviceSynchronize();
}
if (a.bBuoyancy)
{
hipLaunchKernelGGL(( BuoyancyKernel), dim3(BLOCKS), dim3(THREADS), 0, 0, N, d_v, d_dens, a.fKappa, a.fSigma);
hipDeviceSynchronize();
hipLaunchKernelGGL(( AddSourceMultipleKernel), dim3(BLOCKS), dim3(THREADS), 0, 0, N, d_v_prev, d_v, a.dt);
hipDeviceSynchronize();
hipLaunchKernelGGL(( BoundaryKernel), dim3(1), dim3(N), 0, 0, N, 2, d_v_prev);
hipDeviceSynchronize();
}
if (a.enumIterativeMethod == Jacobi) ProjectCUDA(N, d_u_prev, d_v_prev, d_u, d_v, a.iPoissonIterations, true);
else if (a.enumIterativeMethod == GaussSeidel) ProjectCUDA(N, d_u_prev, d_v_prev, d_u, d_v, a.iPoissonIterations, false);
tempPtr = d_u; d_u = d_u_prev; d_u_prev = tempPtr;
tempPtr = d_v; d_v = d_v_prev; d_v_prev = tempPtr;
tempPtr = d_dens; d_dens = d_dens_prev; d_dens_prev = tempPtr;
// Density step.
hipLaunchKernelGGL(( AddSourceKernel), dim3(1), dim3(1), 0, 0, N, d_dens_prev, source_dens_i, source_dens_j, source_dens_value, a.dt);
hipLaunchKernelGGL(( AddSourceKernel), dim3(1), dim3(1), 0, 0, N, d_dens_prev, 128, 248, 50, a.dt);
hipDeviceSynchronize();
if (a.bDiffusion)
{
if (a.enumIterativeMethod == Jacobi) DiffuseCUDA(N, 0, d_dens, d_dens_prev, a.fDiffFactor, a.dt, a.iPoissonIterations, true);
else if (a.enumIterativeMethod == GaussSeidel) DiffuseCUDA(N, 0, d_dens, d_dens_prev, a.fDiffFactor, a.dt, a.iPoissonIterations, false);
tempPtr = d_dens; d_dens = d_dens_prev; d_dens_prev = tempPtr;
}
if (a.enumCurrentAdvect == SingleSL) AdvectCUDA(N, 0, d_dens, d_dens_prev, d_u, d_v, a.dt, true);
else if (a.enumCurrentAdvect == SemiMacCormack) AdvectMacCormackCUDA(N, 0, d_dens, d_dens_prev, d_u, d_v, a.dt);
return;
}
void cuda_texture_2d(int N, void *surface, int width, int height, size_t pitch)
{
hipLaunchKernelGGL(( textureKernel), dim3(BLOCKS), dim3(THREADS), 0, 0, N, (unsigned char *)surface, pitch, d_dens);
}
|
11828d4611a3373f95823a17d1ca5304b6b99df3.cu
|
#include "FluidCUDA.cuh"
// *** Author: Martin Staykov
// *** May, 2015
// ***
// *** References:
// *** Abertay University
// *** rastertek.com
// *** Real-Time Fluid Dynamics for Games, by Jos Stam
// *** NVIDIA CUDA Toolkit
// ***
// *** These functions perform mathematical calculations based on the equations described in my dissertation.
// *** They will not be described in detail here.
#define _(i,j) ((i)+(256)*(j)) // transition from 2d array toa 1d one
#define size 65536 // smoke resolution, total number of cells
#define BLOCKS 90
#define THREADS 736
// The d_ prefix (device) shows this is an array, which lives on the GPU.
float * d_dens, * d_dens_prev, * d_u, * d_v, * d_u_prev, * d_v_prev, * d_aux, * d_MC1, * d_MC2;
__global__ void RedGaussSeidelKernel(int N, float* dest, float* src1, float* src2, float C1, float C2)
{
int index = threadIdx.x + blockIdx.x * blockDim.x;
int j = (int) index / (N+2);
int i = (int) index - (j*(N+2));
if (i<1 || i>N || j<1 || j>N) return;
if ((i + j)%2 == 0)
{
dest[_(i,j)] = (src1[_(i,j)] + C1*(src2[_(i-1,j)] + src2[_(i+1,j)] + src2[_(i,j-1)] + src2[_(i,j+1)]))/C2;
}
}
__global__ void BlackGaussSeidelKernel(int N, float* dest, float* src1, float* src2, float C1, float C2)
{
int index = threadIdx.x + blockIdx.x * blockDim.x;
int j = (int) index / (N+2);
int i = (int) index - (j*(N+2));
if (i<1 || i>N || j<1 || j>N) return;
if ((i + j)%2 != 0)
{
dest[_(i,j)] = (src1[_(i,j)] + C1*(src2[_(i-1,j)] + src2[_(i+1,j)] + src2[_(i,j-1)] + src2[_(i,j+1)]))/C2;
}
}
__global__ void JacobiKernel(int N, float* dest, float* src1, float* src2, float C1, float C2)
{
int index = threadIdx.x + blockIdx.x * blockDim.x;
int j = (int) index / (N+2);
int i = (int) index - (j*(N+2));
if (i<1 || i>N || j<1 || j>N) return;
dest[_(i,j)] = (src1[_(i,j)] + C1*(src2[_(i-1,j)] + src2[_(i+1,j)] + src2[_(i,j-1)] + src2[_(i,j+1)]))/C2;
}
__global__ void AdvectKernel(int N, float* d, float* d0, float* u, float* v, float dt, bool bBackward)
{
int index = threadIdx.x + blockIdx.x * blockDim.x;
int j = (int) index / (N+2);
int i = (int) index - (j*(N+2));
int i0, j0, i1, j1;
float x, y, s0, t0, s1, t1, dt0;
// Scale the velocity into grid space.
float dx = 1.0f/N;
dx = 1/dx;
dt0 = (dt*dx)/1;
if (i<1 || i>N || j<1 || j>N) return;
// The backwards step is used for the MacCormack advection.
if (bBackward)
{
x = i - dt0*u[_(i,j)];
y = j - dt0*v[_(i,j)];
}
else
{
x = i + dt0*u[_(i,j)];
y = j + dt0*v[_(i,j)];
}
if (x<0.5f) x = 0.5f;
if (x>N+0.5f) x = N+0.5f;
i0 = (int)x;
i1 = i0+1;
if (y<0.5f) y = 0.5f;
if (y>N+0.5f) y = N+0.5f;
j0 = (int)y;
j1 = j0+1;
s1 = x-i0;
s0 = 1-s1;
t1 = y-j0;
t0 = 1-t1;
// Interpolate.
d[_(i,j)] = s0*( t0*d0[_(i0,j0)] + t1*d0[_(i0,j1)] ) + s1*( t0*d0[_(i1,j0)] + t1*d0[_(i1,j1)] );
}
__global__ void MacCormackKernel(int N, float* dest, float* d0, float* MC1, float* MC2, float* u, float* v, float dt)
{
int index = threadIdx.x + blockIdx.x * blockDim.x;
int j = (int) index / (N+2);
int i = (int) index - (j*(N+2));
int i0, j0, i1, j1;
float x, y, s0, t0, s1, t1, dt0;
// Scale the velocity into grid space.
float dx = 1.0f/N;
dx = 1/dx;
dt0 = dt*dx;
if (i<1 || i>N || j<1 || j>N) return;
x = i - dt0*u[_(i,j)];
y = j - dt0*v[_(i,j)];
if (x<0.5f) x = 0.5f;
if (x>N+0.5f) x = N+0.5f;
i0 = (int)x;
i1 = i0+1;
if (y<0.5f) y = 0.5f;
if (y>N+0.5f) y = N+0.5f;
j0 = (int)y;
j1 = j0+1;
// Get the values of nodes that contribute to the interpolated value.
float r0 = d0[_(i0, j0)];
float r1 = d0[_(i1, j0)];
float r2 = d0[_(i0, j1)];
float r3 = d0[_(i1, j1)];
float result = MC1[_(i,j)] + 0.5f*(d0[_(i,j)] - MC2[_(i,j)]);
float min = (r0 > r1) ? r1 : r0;
min = (min > r2) ? r2 : min;
min = (min > r3) ? r3 : min;
float max = (r0 < r1) ? r1 : r0;
max = (max < r2) ? r2 : max;
max = (max < r3) ? r3 : max;
// Clamp the result, so that it's stable.
// If outside the two extrema, revert to results from ordinary advection scheme.
// The extrema appear to produce errors for unknown reasons. Amend them by adding/subtracting a small number.
// Too big of a number, and the result produces tearings.
// Too small and results appear good but blurred, which defeats the purpose of the MacCormack scheme, which is to provide more detail.
if (result >= (max - 0.02f)) result = MC1[_(i,j)];//max;
if (result <= (min + 0.02f)) result = MC1[_(i,j)];//min;
dest[_(i,j)] = result;
}
__global__ void DivergenceKernel(int N, float* u, float* v, float* p, float* div)
{
int index = threadIdx.x + blockIdx.x * blockDim.x;
int j = (int) index / (N+2);
int i = (int) index - (j*(N+2));
if (i<1 || i>N || j<1 || j>N) return;
float h = 1.0f/N;
div[_(i,j)] = -0.5f*h*(u[_(i+1,j)] - u[_(i-1,j)] + v[_(i,j+1)] - v[_(i,j-1)]);
p[_(i,j)] = 0;
}
__global__ void SubtractGradientKernel(int N, float* u, float* v, float* p)
{
int index = threadIdx.x + blockIdx.x * blockDim.x;
int j = (int) index / (N+2);
int i = (int) index - (j*(N+2));
if (i<1 || i>N || j<1 || j>N) return;
float h = 1.0f/N;
u[_(i,j)] -= 0.5f*(p[_(i+1,j)] - p[_(i-1,j)])/h;
v[_(i,j)] -= 0.5f*(p[_(i,j+1)] - p[_(i,j-1)])/h;
}
__global__ void BoundaryKernel ( int N, int b, float * x)
{
int index = threadIdx.x + blockIdx.x * blockDim.x;
int i = index + 1;
if (i<=N)
{
x[_(0 ,i)] = b==1 ? -x[_(1,i)] : x[_(1,i)];
x[_(N+1,i)] = b==1 ? -x[_(N,i)] : x[_(N,i)];
x[_(i,0 )] = b==2 ? -x[_(i,1)] : x[_(i,1)];
x[_(i,N+1)] = b==2 ? -x[_(i,N)] : x[_(i,N)];
if (i==1)
{
x[_(0 ,0 )] = 0.5f*(x[_(1,0 )]+x[_(0 ,1)]);
x[_(0 ,N+1)] = 0.5f*(x[_(1,N+1)]+x[_(0 ,N )]);
x[_(N+1,0 )] = 0.5f*(x[_(N,0 )]+x[_(N+1,1)]);
x[_(N+1,N+1)] = 0.5f*(x[_(N,N+1)]+x[_(N+1,N )]);
}
}
}
__global__ void AddSourceKernel(int N, float* x, int i, int j, float value, float dt)
{
int index = threadIdx.x + blockIdx.x * blockDim.x;
if (index != 0) return;
x[_(i, j)] += dt*value;
x[_(i-1, j)] += dt*value;
x[_(i+1, j)] += dt*value;
x[_(i+2, j)] += dt*value;
x[_(i-2, j)] += dt*value;
}
__global__ void AddSourceMultipleKernel(int N, float * a, float * b, float dt)
{
int index = threadIdx.x + blockIdx.x * blockDim.x;
int j = (int) index / (N+2);
int i = (int) index - (j*(N+2));
if (i<1 || i>N || j<1 || j>N) return;
a[index] += dt*b[index];
}
__global__ void textureKernel(int N, unsigned char *surface, size_t pitch, float * a)
{
int y = (int) (threadIdx.x + blockIdx.x * blockDim.x) / (N+2);
int x = (int) (threadIdx.x + blockIdx.x * blockDim.x) - (y*(N+2));
float *pixel;
// in the case where, due to quantization into grids, we have
// more threads than pixels, skip the threads which don't
// correspond to valid pixels
if (x >= N+2 || y >= N+2 || (!x) || (!y)) return;
else
{
// get a pointer to the pixel at (x,y)
pixel = (float *)(surface + y*pitch) + 1*x;
float pvalue = a[_(x, y)];
// populate it
pixel[0] = pvalue;
}
}
__global__ void BuoyancyKernel(int N, float * dest, float * src, float kappa, float sigma)
{
int index = threadIdx.x + blockIdx.x * blockDim.x;
int j = (int) index / (N+2);
int i = (int) index - (j*(N+2));
if (i<1 || i>N || j<1 || j>N) return;
float source = src[_(i, j)];
dest[_(i, j)] = sigma*source + -kappa*source;
}
__global__ void CurlKernel(int N, float * u, float * v, float * dest, float dt)
{
int index = threadIdx.x + blockIdx.x * blockDim.x;
int j = (int) index / (N+2);
int i = (int) index - (j*(N+2));
if (i<1 || i>N || j<1 || j>N) return;
float h = 1.0f/N;//(2*N);
float du_dy;
float dv_dx;
du_dy = (u[_(i, j+1)] - u[_(i, j-1)]) / h * 0.5f;
dv_dx = (v[_(i+1, j)] - v[_(i-1, j)]) / h * 0.5f;
dest[_(i, j)] = (dv_dx - du_dy);// * h * 0.5f;
}
__global__ void VorticityKernel(int N, float * u, float * v, float * c, float dt, float vort)
{
int index = threadIdx.x + blockIdx.x * blockDim.x;
int j = (int) index / (N+2);
int i = (int) index - (j*(N+2));
if (i<1 || i>N || j<1 || j>N) return;
float h = 1.0f/N;//(2*N);
float omegaT = (c[_(i, j-1)]);
float omegaB = (c[_(i, j+1)]);
float omegaR = (c[_(i+1, j)]);
float omegaL = (c[_(i-1, j)]);
float comp1 = omegaT - omegaB;
float comp2 = omegaR - omegaL;
float2 force; force.x = comp1; force.y = comp2; force *= 0.5f; force /= h;
force /= (length(force) + 0.00001f);
float2 NN;
NN.x = -c[_(i, j)]*force.y;
NN.y = c[_(i, j)]*force.x;
NN *= vort;
u[_(i, j)] += NN.x * dt;
v[_(i, j)] += NN.y * dt;
}
void InitCUDA()
{
// Allocate all arrays on the device.
if (cudaMalloc((void**)&d_u, size * sizeof(float)) != cudaSuccess)
{
return;
}
if (cudaMalloc((void**)&d_u_prev, size * sizeof(float)) != cudaSuccess)
{
cudaFree(d_u);
return;
}
if (cudaMalloc((void**)&d_v, size * sizeof(float)) != cudaSuccess)
{
cudaFree(d_u);
cudaFree(d_u_prev);
return;
}
if (cudaMalloc((void**)&d_v_prev, size * sizeof(float)) != cudaSuccess)
{
cudaFree(d_u);
cudaFree(d_u_prev);
cudaFree(d_v);
return;
}
if (cudaMalloc((void**)&d_dens, size * sizeof(float)) != cudaSuccess)
{
cudaFree(d_u);
cudaFree(d_u_prev);
cudaFree(d_v);
cudaFree(d_v_prev);
return;
}
if (cudaMalloc((void**)&d_dens_prev, size * sizeof(float)) != cudaSuccess)
{
cudaFree(d_u);
cudaFree(d_u_prev);
cudaFree(d_v);
cudaFree(d_v_prev);
cudaFree(d_dens);
return;
}
if (cudaMalloc((void**)&d_aux, size * sizeof(float)) != cudaSuccess)
{
cudaFree(d_u);
cudaFree(d_u_prev);
cudaFree(d_v);
cudaFree(d_v_prev);
cudaFree(d_dens);
cudaFree(d_dens_prev);
return;
}
if (cudaMalloc((void**)&d_MC1, size * sizeof(float)) != cudaSuccess)
{
cudaFree(d_u);
cudaFree(d_u_prev);
cudaFree(d_v);
cudaFree(d_v_prev);
cudaFree(d_dens);
cudaFree(d_dens_prev);
cudaFree(d_aux);
return;
}
if (cudaMalloc((void**)&d_MC2, size * sizeof(float)) != cudaSuccess)
{
cudaFree(d_u);
cudaFree(d_u_prev);
cudaFree(d_v);
cudaFree(d_v_prev);
cudaFree(d_dens);
cudaFree(d_dens_prev);
cudaFree(d_aux);
cudaFree(d_MC1);
return;
}
// Initialize the arrays to 0.
if (cudaMemset(d_dens, 0, size * sizeof(float)) != cudaSuccess)
{
FreeCUDA();
return;
}
if (cudaMemset(d_dens_prev, 0, size * sizeof(float)) != cudaSuccess)
{
FreeCUDA();
return;
}
if (cudaMemset(d_u, 0, size * sizeof(float)) != cudaSuccess)
{
FreeCUDA();
return;
}
if (cudaMemset(d_u_prev, 0, size * sizeof(float)) != cudaSuccess)
{
FreeCUDA();
return;
}
if (cudaMemset(d_v, 0, size * sizeof(float)) != cudaSuccess)
{
FreeCUDA();
return;
}
if (cudaMemset(d_v_prev, 0, size * sizeof(float)) != cudaSuccess)
{
FreeCUDA();
return;
}
if (cudaMemset(d_aux, 0, size * sizeof(float)) != cudaSuccess)
{
FreeCUDA();
return;
}
if (cudaMemset(d_MC1, 0, size * sizeof(float)) != cudaSuccess)
{
FreeCUDA();
return;
}
if (cudaMemset(d_MC2, 0, size * sizeof(float)) != cudaSuccess)
{
FreeCUDA();
return;
}
// Exit and return void.
return;
}
void FreeCUDA()
{
cudaFree(d_dens);
cudaFree(d_dens_prev);
cudaFree(d_u);
cudaFree(d_u_prev);
cudaFree(d_v);
cudaFree(d_v_prev);
cudaFree(d_aux);
cudaFree(d_MC1);
cudaFree(d_MC2);
cudaDeviceReset();
}
void DiffuseCUDA(int N, float b, float * x, float * x0, float diff, float dt, int iterations, bool isJacobi)
{
float a = dt*diff*(float)N*(float)N;
for (int k=0; k<iterations; k++)
{
if (isJacobi)
{
JacobiKernel<<<BLOCKS, THREADS>>>(N, d_aux, x0, x, a, 1+4*a);
cudaDeviceSynchronize();
x = d_aux;
}
else
{
RedGaussSeidelKernel<<<BLOCKS, THREADS>>>(N, x, x0, x, a, 1+4*a);
cudaDeviceSynchronize();
BlackGaussSeidelKernel<<<BLOCKS, THREADS>>>(N, x, x0, x, a, 1+4*a);
}
cudaDeviceSynchronize();
BoundaryKernel<<<1, N>>>(N, b, x);
cudaDeviceSynchronize();
}
}
void AdvectCUDA(int N, int b, float* d, float* d0, float* u, float* v, float dt, bool bBackward)
{
AdvectKernel<<<BLOCKS, THREADS>>>(N, d, d0, u, v, dt, bBackward);
cudaDeviceSynchronize();
BoundaryKernel<<<1, N>>>(N, b, d);
cudaDeviceSynchronize();
}
void AdvectMacCormackCUDA(int N, int b, float* d, float* d0, float* u, float* v, float dt)
{
AdvectKernel<<<BLOCKS, THREADS>>>(N, d_MC1, d0, u, v, dt, true);
cudaDeviceSynchronize();
BoundaryKernel<<<1, N>>>(N, b, d_MC1);
cudaDeviceSynchronize();
AdvectKernel<<<BLOCKS, THREADS>>>(N, d_MC2, d_MC1, u, v, dt, false);
cudaDeviceSynchronize();
BoundaryKernel<<<1, N>>>(N, b, d_MC2);
cudaDeviceSynchronize();
MacCormackKernel<<<BLOCKS, THREADS>>>(N, d, d0, d_MC1, d_MC2, u, v, dt);
cudaDeviceSynchronize();
BoundaryKernel<<<1, N>>>(N, b, d);
cudaDeviceSynchronize();
}
void ProjectCUDA(int N, float* u, float* v, float* p, float* div, int iterations, bool isJacobi)
{
DivergenceKernel<<<BLOCKS, THREADS>>>(N, u, v, p, div);
cudaDeviceSynchronize();
BoundaryKernel<<<1, N>>>(N, 0, div);
BoundaryKernel<<<1, N>>>(N, 0, p);
cudaDeviceSynchronize();
for ( int k=0; k<iterations; k++ )
{
if (isJacobi)
{
JacobiKernel<<<BLOCKS, THREADS>>>(N, d_aux, div, p, 1, 4);
cudaDeviceSynchronize();
p = d_aux;
}
else
{
RedGaussSeidelKernel<<<BLOCKS, THREADS>>>(N, p, div, p, 1, 4);
cudaDeviceSynchronize();
BlackGaussSeidelKernel<<<BLOCKS, THREADS>>>(N, p, div, p, 1, 4);
}
cudaDeviceSynchronize();
BoundaryKernel<<<1, N>>>(N, 0, p);
cudaDeviceSynchronize();
}
SubtractGradientKernel<<<BLOCKS, THREADS>>>(N, u, v, p);
cudaDeviceSynchronize();
BoundaryKernel<<<1, N>>>(N, 1, u);
BoundaryKernel<<<1, N>>>(N, 2, v);
cudaDeviceSynchronize();
}
void FrameCUDA(int N, int source_vel_i, int source_vel_j, int source_dens_i, int source_dens_j, float source_u_value, float source_v_value, float source_dens_value, Parameters a)
{
float * tempPtr; // used for swapping arrays
// Velocity step.
AddSourceKernel<<<1, 1>>>(N, d_u, source_vel_i, source_vel_j, source_u_value, a.dt);
AddSourceKernel<<<1, 1>>>(N, d_v, source_vel_i, source_vel_j, source_v_value, a.dt);
cudaDeviceSynchronize();
if (a.bViscosity)
{
if (a.enumIterativeMethod == Jacobi) DiffuseCUDA(N, 1, d_u_prev, d_u, a.fViscFactor, a.dt, a.iPoissonIterations, true);
else if (a.enumIterativeMethod == GaussSeidel) DiffuseCUDA(N, 1, d_u_prev, d_u, a.fViscFactor, a.dt, a.iPoissonIterations, false);
if (a.enumIterativeMethod == Jacobi) DiffuseCUDA(N, 2, d_v_prev, d_v, a.fViscFactor, a.dt, a.iPoissonIterations, true);
else if (a.enumIterativeMethod == GaussSeidel) DiffuseCUDA(N, 2, d_v_prev, d_v, a.fViscFactor, a.dt, a.iPoissonIterations, false);
tempPtr = d_u; d_u = d_u_prev; d_u_prev = tempPtr;
tempPtr = d_v; d_v = d_v_prev; d_v_prev = tempPtr;
if (a.enumIterativeMethod == Jacobi) ProjectCUDA(N, d_u, d_v, d_u_prev, d_v_prev, a.iPoissonIterations, true);
else if (a.enumIterativeMethod == GaussSeidel) ProjectCUDA(N, d_u, d_v, d_u_prev, d_v_prev, a.iPoissonIterations, false);
}
if (a.enumCurrentAdvect == SingleSL)
{
AdvectCUDA(N, 1, d_u_prev, d_u, d_u, d_v, a.dt, true);
AdvectCUDA(N, 2, d_v_prev, d_v, d_u, d_v, a.dt, true);
}
else if (a.enumCurrentAdvect == SemiMacCormack)
{
AdvectMacCormackCUDA(N, 1, d_u_prev, d_u, d_u, d_v, a.dt);
AdvectMacCormackCUDA(N, 2, d_v_prev, d_v, d_u, d_v, a.dt);
}
if (a.bVorticity)
{
// *** VORTICITY CONFINEMENT ***
CurlKernel<<<BLOCKS, THREADS>>>(N, d_u_prev, d_v_prev, d_aux, a.dt);
cudaDeviceSynchronize();
VorticityKernel<<<BLOCKS, THREADS>>>(N, d_u_prev, d_v_prev, d_aux, a.dt, a.fVortStrength);
cudaDeviceSynchronize();
BoundaryKernel<<<1, N>>>(N, 1, d_u_prev);
BoundaryKernel<<<1, N>>>(N, 2, d_v_prev);
cudaDeviceSynchronize();
}
if (a.bBuoyancy)
{
BuoyancyKernel<<<BLOCKS, THREADS>>>(N, d_v, d_dens, a.fKappa, a.fSigma);
cudaDeviceSynchronize();
AddSourceMultipleKernel<<<BLOCKS, THREADS>>>(N, d_v_prev, d_v, a.dt);
cudaDeviceSynchronize();
BoundaryKernel<<<1, N>>>(N, 2, d_v_prev);
cudaDeviceSynchronize();
}
if (a.enumIterativeMethod == Jacobi) ProjectCUDA(N, d_u_prev, d_v_prev, d_u, d_v, a.iPoissonIterations, true);
else if (a.enumIterativeMethod == GaussSeidel) ProjectCUDA(N, d_u_prev, d_v_prev, d_u, d_v, a.iPoissonIterations, false);
tempPtr = d_u; d_u = d_u_prev; d_u_prev = tempPtr;
tempPtr = d_v; d_v = d_v_prev; d_v_prev = tempPtr;
tempPtr = d_dens; d_dens = d_dens_prev; d_dens_prev = tempPtr;
// Density step.
AddSourceKernel<<<1, 1>>>(N, d_dens_prev, source_dens_i, source_dens_j, source_dens_value, a.dt);
AddSourceKernel<<<1, 1>>>(N, d_dens_prev, 128, 248, 50, a.dt);
cudaDeviceSynchronize();
if (a.bDiffusion)
{
if (a.enumIterativeMethod == Jacobi) DiffuseCUDA(N, 0, d_dens, d_dens_prev, a.fDiffFactor, a.dt, a.iPoissonIterations, true);
else if (a.enumIterativeMethod == GaussSeidel) DiffuseCUDA(N, 0, d_dens, d_dens_prev, a.fDiffFactor, a.dt, a.iPoissonIterations, false);
tempPtr = d_dens; d_dens = d_dens_prev; d_dens_prev = tempPtr;
}
if (a.enumCurrentAdvect == SingleSL) AdvectCUDA(N, 0, d_dens, d_dens_prev, d_u, d_v, a.dt, true);
else if (a.enumCurrentAdvect == SemiMacCormack) AdvectMacCormackCUDA(N, 0, d_dens, d_dens_prev, d_u, d_v, a.dt);
return;
}
void cuda_texture_2d(int N, void *surface, int width, int height, size_t pitch)
{
textureKernel<<<BLOCKS, THREADS>>>(N, (unsigned char *)surface, pitch, d_dens);
}
|
c010d63a9f42c1cd1ab5f45445adfa97ebf2d24f.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "Prerequisites.cuh"
#include "Angles.cuh"
#include "Helper.cuh"
#include "Relion.cuh"
namespace gtom
{
//template<uint TpB> __global__ void Project2Dto2DKernel(cudaTex t_volumeRe, cudaTex t_volumeIm, uint dimvolume, tcomplex* d_proj, uint dimproj, uint rmax, uint rmax2);
template<uint ndims, bool decentered> __global__ void Backproject3DtoNDKernel(tcomplex* d_volumeft, tfloat* d_volumeweights, uint dimvolume, tcomplex* d_projft, tfloat* d_projweights, uint dimproj, size_t elementsproj, glm::mat3* d_rotations, int* d_ivolume, glm::mat2 magnification, uint rmax, int rmax2);
void d_rlnBackproject(tcomplex* d_volumeft, tfloat* d_volumeweights, int3 dimsvolume, tcomplex* d_projft, tfloat* d_projweights, int3 dimsproj, uint rmax, tfloat3* h_angles, int* h_ivolume, float3 magnification, float supersample, bool outputdecentered, uint batch)
{
glm::mat3* d_matrices;
int* d_ivolume = NULL;
{
glm::mat3* h_matrices = (glm::mat3*)malloc(sizeof(glm::mat3) * batch);
for (int i = 0; i < batch; i++)
h_matrices[i] = glm::transpose(Matrix3Euler(h_angles[i])) * Matrix3Scale(supersample);
d_matrices = (glm::mat3*)CudaMallocFromHostArray(h_matrices, sizeof(glm::mat3) * batch);
free(h_matrices);
if (h_ivolume != NULL)
d_ivolume = (int*)CudaMallocFromHostArray(h_ivolume, sizeof(int) * batch);
}
d_rlnBackproject(d_volumeft, d_volumeweights, dimsvolume, d_projft, d_projweights, dimsproj, rmax, d_matrices, d_ivolume, magnification, outputdecentered, batch);
{
hipFree(d_matrices);
if (d_ivolume != NULL)
hipFree(d_ivolume);
}
}
void d_rlnBackproject(tcomplex* d_volumeft, tfloat* d_volumeweights, int3 dimsvolume, tcomplex* d_projft, tfloat* d_projweights, int3 dimsproj, uint rmax, glm::mat3* d_matrices, int* d_ivolume, float3 magnification, bool outputdecentered, uint batch)
{
uint ndimsvolume = DimensionCount(dimsvolume);
uint ndimsproj = DimensionCount(dimsproj);
if (ndimsvolume < ndimsproj)
throw;
rmax = tmin(rmax, dimsproj.x / 2);
glm::mat2 m_magnification = Matrix2Rotation(-magnification.z) * Matrix2Scale(tfloat2(magnification.x, magnification.y)) * Matrix2Rotation(magnification.z);
if (ndimsvolume == 3)
{
dim3 grid = dim3(1, batch, 1);
uint elements = ElementsFFT(dimsproj);
if (ndimsproj == 2)
{
if (outputdecentered)
Backproject3DtoNDKernel<2, true> << <grid, 128 >> > (d_volumeft, d_volumeweights, dimsvolume.x, d_projft, d_projweights, dimsproj.x, elements, d_matrices, d_ivolume, m_magnification, rmax, rmax * rmax);
else
Backproject3DtoNDKernel<2, false> << <grid, 128 >> > (d_volumeft, d_volumeweights, dimsvolume.x, d_projft, d_projweights, dimsproj.x, elements, d_matrices, d_ivolume, m_magnification, rmax, rmax * rmax);
}
else if (ndimsproj == 3)
{
if (outputdecentered)
Backproject3DtoNDKernel<3, true> << <grid, 128 >> > (d_volumeft, d_volumeweights, dimsvolume.x, d_projft, d_projweights, dimsproj.x, elements, d_matrices, d_ivolume, m_magnification, rmax, rmax * rmax);
else
Backproject3DtoNDKernel<3, false> << <grid, 128 >> > (d_volumeft, d_volumeweights, dimsvolume.x, d_projft, d_projweights, dimsproj.x, elements, d_matrices, d_ivolume, m_magnification, rmax, rmax * rmax);
}
}
else
{
/*hipMemcpyToSymbol(c_backmatrices, d_matrices, batch * sizeof(glm::mat3), 0, hipMemcpyDeviceToDevice);
dim3 grid = dim3(1, batch, 1);
uint elements = ElementsFFT(dimsproj);
uint TpB = 1 << tmin(7, tmax(7, (uint)(log(elements / 4.0) / log(2.0))));
if (TpB == 32)
Project2Dto2DKernel<32> << <grid, 32 >> > (t_volumeRe, t_volumeIm, dimsvolume.x, d_proj, dimsproj.x, rmax, rmax * rmax);
else if (TpB == 64)
Project2Dto2DKernel<64> << <grid, 64 >> > (t_volumeRe, t_volumeIm, dimsvolume.x, d_proj, dimsproj.x, rmax, rmax * rmax);
else if (TpB == 128)
Project2Dto2DKernel<128> << <grid, 128 >> > (t_volumeRe, t_volumeIm, dimsvolume.x, d_proj, dimsproj.x, rmax, rmax * rmax);
else if (TpB == 256)
Project2Dto2DKernel<256> << <grid, 256 >> > (t_volumeRe, t_volumeIm, dimsvolume.x, d_proj, dimsproj.x, rmax, rmax * rmax);
else
throw;*/
}
}
template<uint ndims, bool decentered> __global__ void Backproject3DtoNDKernel(tcomplex* d_volumeft, tfloat* d_volumeweights, uint dimvolume, tcomplex* d_projft, tfloat* d_projweights, uint dimproj, size_t elementsproj, glm::mat3* d_rotations, int* d_ivolume, glm::mat2 magnification, uint rmax, int rmax2)
{
d_projft += elementsproj * blockIdx.y;
d_projweights += elementsproj * blockIdx.y;
if (d_ivolume != NULL)
{
int ivolume = d_ivolume[blockIdx.y];
d_volumeft += ElementsFFT1(dimvolume) * dimvolume * dimvolume * ivolume;
d_volumeweights += ElementsFFT1(dimvolume) * dimvolume * dimvolume * ivolume;
}
uint slice = ndims == 3 ? ElementsFFT1(dimproj) * dimproj : 1;
uint dimft = ElementsFFT1(dimproj);
uint dimvolumeft = ElementsFFT1(dimvolume);
glm::mat3 rotation = d_rotations[blockIdx.y];
for (uint id = threadIdx.x; id < elementsproj; id += blockDim.x)
{
uint idx = id % dimft;
uint idy = (ndims == 3 ? id % slice : id) / dimft;
uint idz = ndims == 3 ? id / slice : 0;
int x = idx;
int y = idy <= dimproj / 2 ? idy : (int)idy - (int)dimproj;
int z = ndims == 3 ? (idz <= dimproj / 2 ? idz : (int)idz - (int)dimproj) : 0;
glm::vec2 posmag = glm::vec2(x, y);
if (ndims == 2)
posmag = magnification * posmag;
int r2 = ndims == 3 ? (z * z + y * y + x * x) : (posmag.y * posmag.y + posmag.x * posmag.x);
if (r2 > rmax2)
continue;
glm::vec3 pos = glm::vec3(posmag.x, posmag.y, z);
pos = rotation * pos;
// Only asymmetric half is stored
float is_neg_x = 1.0f;
if (pos.x + 1e-5f < 0)
{
// Get complex conjugated hermitian symmetry pair
pos.x = abs(pos.x);
pos.y = -pos.y;
pos.z = -pos.z;
is_neg_x = -1.0f;
}
// Trilinear interpolation
int x0 = floor(pos.x + 1e-5f);
pos.x -= x0;
int x1 = x0 + 1;
int y0 = floor(pos.y);
pos.y -= y0;
y0 += dimvolume / 2;
int y1 = y0 + 1;
int z0 = floor(pos.z);
pos.z -= z0;
z0 += dimvolume / 2;
int z1 = z0 + 1;
float c0 = 1.0f - pos.z;
float c1 = pos.z;
float c00 = (1.0f - pos.y) * c0;
float c10 = pos.y * c0;
float c01 = (1.0f - pos.y) * c1;
float c11 = pos.y * c1;
float c000 = (1.0f - pos.x) * c00;
float c100 = pos.x * c00;
float c010 = (1.0f - pos.x) * c10;
float c110 = pos.x * c10;
float c001 = (1.0f - pos.x) * c01;
float c101 = pos.x * c01;
float c011 = (1.0f - pos.x) * c11;
float c111 = pos.x * c11;
tcomplex val = d_projft[id];
val.y *= is_neg_x;
tfloat weight = d_projweights[id];
if (decentered)
{
/*z0 = z0 < dimvolume / 2 ? z0 + dimvolume / 2 : z0 - dimvolume / 2;
z1 = z1 < dimvolume / 2 ? z1 + dimvolume / 2 : z1 - dimvolume / 2;
y0 = y0 < dimvolume / 2 ? y0 + dimvolume / 2 : y0 - dimvolume / 2;
y1 = y1 < dimvolume / 2 ? y1 + dimvolume / 2 : y1 - dimvolume / 2;*/
z0 = (z0 + dimvolume / 2) % dimvolume;
z1 = (z1 + dimvolume / 2) % dimvolume;
y0 = (y0 + dimvolume / 2) % dimvolume;
y1 = (y1 + dimvolume / 2) % dimvolume;
}
atomicAdd((tfloat*)(d_volumeft + (z0 * dimvolume + y0) * dimvolumeft + x0), c000 * val.x);
atomicAdd((tfloat*)(d_volumeft + (z0 * dimvolume + y0) * dimvolumeft + x0) + 1, c000 * val.y);
atomicAdd((tfloat*)(d_volumeweights + (z0 * dimvolume + y0) * dimvolumeft + x0), c000 * weight);
atomicAdd((tfloat*)(d_volumeft + (z0 * dimvolume + y0) * dimvolumeft + x1), c100 * val.x);
atomicAdd((tfloat*)(d_volumeft + (z0 * dimvolume + y0) * dimvolumeft + x1) + 1, c100 * val.y);
atomicAdd((tfloat*)(d_volumeweights + (z0 * dimvolume + y0) * dimvolumeft + x1), c100 * weight);
atomicAdd((tfloat*)(d_volumeft + (z0 * dimvolume + y1) * dimvolumeft + x0), c010 * val.x);
atomicAdd((tfloat*)(d_volumeft + (z0 * dimvolume + y1) * dimvolumeft + x0) + 1, c010 * val.y);
atomicAdd((tfloat*)(d_volumeweights + (z0 * dimvolume + y1) * dimvolumeft + x0), c010 * weight);
atomicAdd((tfloat*)(d_volumeft + (z0 * dimvolume + y1) * dimvolumeft + x1), c110 * val.x);
atomicAdd((tfloat*)(d_volumeft + (z0 * dimvolume + y1) * dimvolumeft + x1) + 1, c110 * val.y);
atomicAdd((tfloat*)(d_volumeweights + (z0 * dimvolume + y1) * dimvolumeft + x1), c110 * weight);
atomicAdd((tfloat*)(d_volumeft + (z1 * dimvolume + y0) * dimvolumeft + x0), c001 * val.x);
atomicAdd((tfloat*)(d_volumeft + (z1 * dimvolume + y0) * dimvolumeft + x0) + 1, c001 * val.y);
atomicAdd((tfloat*)(d_volumeweights + (z1 * dimvolume + y0) * dimvolumeft + x0), c001 * weight);
atomicAdd((tfloat*)(d_volumeft + (z1 * dimvolume + y0) * dimvolumeft + x1), c101 * val.x);
atomicAdd((tfloat*)(d_volumeft + (z1 * dimvolume + y0) * dimvolumeft + x1) + 1, c101 * val.y);
atomicAdd((tfloat*)(d_volumeweights + (z1 * dimvolume + y0) * dimvolumeft + x1), c101 * weight);
atomicAdd((tfloat*)(d_volumeft + (z1 * dimvolume + y1) * dimvolumeft + x0), c011 * val.x);
atomicAdd((tfloat*)(d_volumeft + (z1 * dimvolume + y1) * dimvolumeft + x0) + 1, c011 * val.y);
atomicAdd((tfloat*)(d_volumeweights + (z1 * dimvolume + y1) * dimvolumeft + x0), c011 * weight);
atomicAdd((tfloat*)(d_volumeft + (z1 * dimvolume + y1) * dimvolumeft + x1), c111 * val.x);
atomicAdd((tfloat*)(d_volumeft + (z1 * dimvolume + y1) * dimvolumeft + x1) + 1, c111 * val.y);
atomicAdd((tfloat*)(d_volumeweights + (z1 * dimvolume + y1) * dimvolumeft + x1), c111 * weight);
}
}
}
|
c010d63a9f42c1cd1ab5f45445adfa97ebf2d24f.cu
|
#include "Prerequisites.cuh"
#include "Angles.cuh"
#include "Helper.cuh"
#include "Relion.cuh"
namespace gtom
{
//template<uint TpB> __global__ void Project2Dto2DKernel(cudaTex t_volumeRe, cudaTex t_volumeIm, uint dimvolume, tcomplex* d_proj, uint dimproj, uint rmax, uint rmax2);
template<uint ndims, bool decentered> __global__ void Backproject3DtoNDKernel(tcomplex* d_volumeft, tfloat* d_volumeweights, uint dimvolume, tcomplex* d_projft, tfloat* d_projweights, uint dimproj, size_t elementsproj, glm::mat3* d_rotations, int* d_ivolume, glm::mat2 magnification, uint rmax, int rmax2);
void d_rlnBackproject(tcomplex* d_volumeft, tfloat* d_volumeweights, int3 dimsvolume, tcomplex* d_projft, tfloat* d_projweights, int3 dimsproj, uint rmax, tfloat3* h_angles, int* h_ivolume, float3 magnification, float supersample, bool outputdecentered, uint batch)
{
glm::mat3* d_matrices;
int* d_ivolume = NULL;
{
glm::mat3* h_matrices = (glm::mat3*)malloc(sizeof(glm::mat3) * batch);
for (int i = 0; i < batch; i++)
h_matrices[i] = glm::transpose(Matrix3Euler(h_angles[i])) * Matrix3Scale(supersample);
d_matrices = (glm::mat3*)CudaMallocFromHostArray(h_matrices, sizeof(glm::mat3) * batch);
free(h_matrices);
if (h_ivolume != NULL)
d_ivolume = (int*)CudaMallocFromHostArray(h_ivolume, sizeof(int) * batch);
}
d_rlnBackproject(d_volumeft, d_volumeweights, dimsvolume, d_projft, d_projweights, dimsproj, rmax, d_matrices, d_ivolume, magnification, outputdecentered, batch);
{
cudaFree(d_matrices);
if (d_ivolume != NULL)
cudaFree(d_ivolume);
}
}
void d_rlnBackproject(tcomplex* d_volumeft, tfloat* d_volumeweights, int3 dimsvolume, tcomplex* d_projft, tfloat* d_projweights, int3 dimsproj, uint rmax, glm::mat3* d_matrices, int* d_ivolume, float3 magnification, bool outputdecentered, uint batch)
{
uint ndimsvolume = DimensionCount(dimsvolume);
uint ndimsproj = DimensionCount(dimsproj);
if (ndimsvolume < ndimsproj)
throw;
rmax = tmin(rmax, dimsproj.x / 2);
glm::mat2 m_magnification = Matrix2Rotation(-magnification.z) * Matrix2Scale(tfloat2(magnification.x, magnification.y)) * Matrix2Rotation(magnification.z);
if (ndimsvolume == 3)
{
dim3 grid = dim3(1, batch, 1);
uint elements = ElementsFFT(dimsproj);
if (ndimsproj == 2)
{
if (outputdecentered)
Backproject3DtoNDKernel<2, true> << <grid, 128 >> > (d_volumeft, d_volumeweights, dimsvolume.x, d_projft, d_projweights, dimsproj.x, elements, d_matrices, d_ivolume, m_magnification, rmax, rmax * rmax);
else
Backproject3DtoNDKernel<2, false> << <grid, 128 >> > (d_volumeft, d_volumeweights, dimsvolume.x, d_projft, d_projweights, dimsproj.x, elements, d_matrices, d_ivolume, m_magnification, rmax, rmax * rmax);
}
else if (ndimsproj == 3)
{
if (outputdecentered)
Backproject3DtoNDKernel<3, true> << <grid, 128 >> > (d_volumeft, d_volumeweights, dimsvolume.x, d_projft, d_projweights, dimsproj.x, elements, d_matrices, d_ivolume, m_magnification, rmax, rmax * rmax);
else
Backproject3DtoNDKernel<3, false> << <grid, 128 >> > (d_volumeft, d_volumeweights, dimsvolume.x, d_projft, d_projweights, dimsproj.x, elements, d_matrices, d_ivolume, m_magnification, rmax, rmax * rmax);
}
}
else
{
/*cudaMemcpyToSymbol(c_backmatrices, d_matrices, batch * sizeof(glm::mat3), 0, cudaMemcpyDeviceToDevice);
dim3 grid = dim3(1, batch, 1);
uint elements = ElementsFFT(dimsproj);
uint TpB = 1 << tmin(7, tmax(7, (uint)(log(elements / 4.0) / log(2.0))));
if (TpB == 32)
Project2Dto2DKernel<32> << <grid, 32 >> > (t_volumeRe, t_volumeIm, dimsvolume.x, d_proj, dimsproj.x, rmax, rmax * rmax);
else if (TpB == 64)
Project2Dto2DKernel<64> << <grid, 64 >> > (t_volumeRe, t_volumeIm, dimsvolume.x, d_proj, dimsproj.x, rmax, rmax * rmax);
else if (TpB == 128)
Project2Dto2DKernel<128> << <grid, 128 >> > (t_volumeRe, t_volumeIm, dimsvolume.x, d_proj, dimsproj.x, rmax, rmax * rmax);
else if (TpB == 256)
Project2Dto2DKernel<256> << <grid, 256 >> > (t_volumeRe, t_volumeIm, dimsvolume.x, d_proj, dimsproj.x, rmax, rmax * rmax);
else
throw;*/
}
}
template<uint ndims, bool decentered> __global__ void Backproject3DtoNDKernel(tcomplex* d_volumeft, tfloat* d_volumeweights, uint dimvolume, tcomplex* d_projft, tfloat* d_projweights, uint dimproj, size_t elementsproj, glm::mat3* d_rotations, int* d_ivolume, glm::mat2 magnification, uint rmax, int rmax2)
{
d_projft += elementsproj * blockIdx.y;
d_projweights += elementsproj * blockIdx.y;
if (d_ivolume != NULL)
{
int ivolume = d_ivolume[blockIdx.y];
d_volumeft += ElementsFFT1(dimvolume) * dimvolume * dimvolume * ivolume;
d_volumeweights += ElementsFFT1(dimvolume) * dimvolume * dimvolume * ivolume;
}
uint slice = ndims == 3 ? ElementsFFT1(dimproj) * dimproj : 1;
uint dimft = ElementsFFT1(dimproj);
uint dimvolumeft = ElementsFFT1(dimvolume);
glm::mat3 rotation = d_rotations[blockIdx.y];
for (uint id = threadIdx.x; id < elementsproj; id += blockDim.x)
{
uint idx = id % dimft;
uint idy = (ndims == 3 ? id % slice : id) / dimft;
uint idz = ndims == 3 ? id / slice : 0;
int x = idx;
int y = idy <= dimproj / 2 ? idy : (int)idy - (int)dimproj;
int z = ndims == 3 ? (idz <= dimproj / 2 ? idz : (int)idz - (int)dimproj) : 0;
glm::vec2 posmag = glm::vec2(x, y);
if (ndims == 2)
posmag = magnification * posmag;
int r2 = ndims == 3 ? (z * z + y * y + x * x) : (posmag.y * posmag.y + posmag.x * posmag.x);
if (r2 > rmax2)
continue;
glm::vec3 pos = glm::vec3(posmag.x, posmag.y, z);
pos = rotation * pos;
// Only asymmetric half is stored
float is_neg_x = 1.0f;
if (pos.x + 1e-5f < 0)
{
// Get complex conjugated hermitian symmetry pair
pos.x = abs(pos.x);
pos.y = -pos.y;
pos.z = -pos.z;
is_neg_x = -1.0f;
}
// Trilinear interpolation
int x0 = floor(pos.x + 1e-5f);
pos.x -= x0;
int x1 = x0 + 1;
int y0 = floor(pos.y);
pos.y -= y0;
y0 += dimvolume / 2;
int y1 = y0 + 1;
int z0 = floor(pos.z);
pos.z -= z0;
z0 += dimvolume / 2;
int z1 = z0 + 1;
float c0 = 1.0f - pos.z;
float c1 = pos.z;
float c00 = (1.0f - pos.y) * c0;
float c10 = pos.y * c0;
float c01 = (1.0f - pos.y) * c1;
float c11 = pos.y * c1;
float c000 = (1.0f - pos.x) * c00;
float c100 = pos.x * c00;
float c010 = (1.0f - pos.x) * c10;
float c110 = pos.x * c10;
float c001 = (1.0f - pos.x) * c01;
float c101 = pos.x * c01;
float c011 = (1.0f - pos.x) * c11;
float c111 = pos.x * c11;
tcomplex val = d_projft[id];
val.y *= is_neg_x;
tfloat weight = d_projweights[id];
if (decentered)
{
/*z0 = z0 < dimvolume / 2 ? z0 + dimvolume / 2 : z0 - dimvolume / 2;
z1 = z1 < dimvolume / 2 ? z1 + dimvolume / 2 : z1 - dimvolume / 2;
y0 = y0 < dimvolume / 2 ? y0 + dimvolume / 2 : y0 - dimvolume / 2;
y1 = y1 < dimvolume / 2 ? y1 + dimvolume / 2 : y1 - dimvolume / 2;*/
z0 = (z0 + dimvolume / 2) % dimvolume;
z1 = (z1 + dimvolume / 2) % dimvolume;
y0 = (y0 + dimvolume / 2) % dimvolume;
y1 = (y1 + dimvolume / 2) % dimvolume;
}
atomicAdd((tfloat*)(d_volumeft + (z0 * dimvolume + y0) * dimvolumeft + x0), c000 * val.x);
atomicAdd((tfloat*)(d_volumeft + (z0 * dimvolume + y0) * dimvolumeft + x0) + 1, c000 * val.y);
atomicAdd((tfloat*)(d_volumeweights + (z0 * dimvolume + y0) * dimvolumeft + x0), c000 * weight);
atomicAdd((tfloat*)(d_volumeft + (z0 * dimvolume + y0) * dimvolumeft + x1), c100 * val.x);
atomicAdd((tfloat*)(d_volumeft + (z0 * dimvolume + y0) * dimvolumeft + x1) + 1, c100 * val.y);
atomicAdd((tfloat*)(d_volumeweights + (z0 * dimvolume + y0) * dimvolumeft + x1), c100 * weight);
atomicAdd((tfloat*)(d_volumeft + (z0 * dimvolume + y1) * dimvolumeft + x0), c010 * val.x);
atomicAdd((tfloat*)(d_volumeft + (z0 * dimvolume + y1) * dimvolumeft + x0) + 1, c010 * val.y);
atomicAdd((tfloat*)(d_volumeweights + (z0 * dimvolume + y1) * dimvolumeft + x0), c010 * weight);
atomicAdd((tfloat*)(d_volumeft + (z0 * dimvolume + y1) * dimvolumeft + x1), c110 * val.x);
atomicAdd((tfloat*)(d_volumeft + (z0 * dimvolume + y1) * dimvolumeft + x1) + 1, c110 * val.y);
atomicAdd((tfloat*)(d_volumeweights + (z0 * dimvolume + y1) * dimvolumeft + x1), c110 * weight);
atomicAdd((tfloat*)(d_volumeft + (z1 * dimvolume + y0) * dimvolumeft + x0), c001 * val.x);
atomicAdd((tfloat*)(d_volumeft + (z1 * dimvolume + y0) * dimvolumeft + x0) + 1, c001 * val.y);
atomicAdd((tfloat*)(d_volumeweights + (z1 * dimvolume + y0) * dimvolumeft + x0), c001 * weight);
atomicAdd((tfloat*)(d_volumeft + (z1 * dimvolume + y0) * dimvolumeft + x1), c101 * val.x);
atomicAdd((tfloat*)(d_volumeft + (z1 * dimvolume + y0) * dimvolumeft + x1) + 1, c101 * val.y);
atomicAdd((tfloat*)(d_volumeweights + (z1 * dimvolume + y0) * dimvolumeft + x1), c101 * weight);
atomicAdd((tfloat*)(d_volumeft + (z1 * dimvolume + y1) * dimvolumeft + x0), c011 * val.x);
atomicAdd((tfloat*)(d_volumeft + (z1 * dimvolume + y1) * dimvolumeft + x0) + 1, c011 * val.y);
atomicAdd((tfloat*)(d_volumeweights + (z1 * dimvolume + y1) * dimvolumeft + x0), c011 * weight);
atomicAdd((tfloat*)(d_volumeft + (z1 * dimvolume + y1) * dimvolumeft + x1), c111 * val.x);
atomicAdd((tfloat*)(d_volumeft + (z1 * dimvolume + y1) * dimvolumeft + x1) + 1, c111 * val.y);
atomicAdd((tfloat*)(d_volumeweights + (z1 * dimvolume + y1) * dimvolumeft + x1), c111 * weight);
}
}
}
|
c217791f023db1527e99a8d57d6b3be008f04166.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "THHReduceApplyUtils.cuh"
#include "THHTensorCopy.h"
#include <thrust/device_ptr.h>
#include <thrust/sort.h>
#if TORCH_HIP_VERSION >= 7000
#include <thrust/system/hip/execution_policy.h>
#endif
template <typename IndexType, int Power2SortSize>
__device__ __forceinline__ IndexType
getSortSliceLinearIndex() {
// linear block ID -> slice we are sorting (one per block)
return getLinearBlockId<IndexType>();
}
// Returns 2^(ceil(lg(n)) from Stanford bit twiddling hacks
unsigned long nextHighestPowerOf2(unsigned long n) {
n--;
n |= n >> 1;
n |= n >> 2;
n |= n >> 4;
n |= n >> 8;
n |= n >> 16;
n |= n >> 32;
n++;
return n;
}
template <typename T>
struct LTComp {
__device__ __forceinline__ bool operator()(const T& a, const T& b) const {
return (a < b);
}
};
template <typename T>
struct GTComp {
__device__ __forceinline__ bool operator()(const T& a, const T& b) const {
return (a > b);
}
};
template <typename Comparator, typename K, typename V>
__device__ __forceinline__ void bitonicSwap(K& kA, V& vA,
K& kB, V& vB,
bool dir,
const Comparator& comp) {
// Entries with -1 indices (not real data; out of bounds) always
// sort to the end
bool val = (comp(kA, kB) && (vA != -1)) || (vB == -1);
if (val == dir) {
K k = kA;
kA = kB;
kB = k;
V v = vA;
vA = vB;
vB = v;
}
};
template <typename Comparator, typename K, typename V,
typename IndexType, int Power2SortSize>
__device__ inline void bitonicSort(K keys[Power2SortSize],
V values[Power2SortSize],
const Comparator& comp) {
#pragma unroll
for (unsigned int size = 2; size < Power2SortSize; size *= 2) {
bool flag = ((threadIdx.x & (size / 2)) != 0);
#pragma unroll
for (unsigned int stride = size / 2; stride > 0; stride /= 2) {
// Single warp per slice is completely synchronous
if (Power2SortSize > 64) {
__syncthreads();
}
unsigned int pos = 2 * threadIdx.x - (threadIdx.x & (stride - 1));
bitonicSwap<Comparator, K, V>(
keys[pos], values[pos], keys[pos + stride], values[pos + stride],
flag, comp);
}
}
#pragma unroll
for (unsigned int stride = Power2SortSize / 2; stride > 0; stride /= 2) {
// Single warp per slice is completely synchronous
if (Power2SortSize > 64) {
__syncthreads();
}
unsigned int pos = 2 * threadIdx.x - (threadIdx.x & (stride - 1));
bitonicSwap<Comparator, K, V>(
keys[pos], values[pos], keys[pos + stride], values [pos + stride],
false, comp);
}
// Single warp per slice is completely synchronous
if (Power2SortSize > 64) {
__syncthreads();
}
}
template <typename Comparator, typename IndexType, int Dims, int Power2SortSize>
__global__ void
THCudaTensor_bitonicSortWithIndex(TensorInfo<IndexType> sorted,
TensorInfo<IndexType> indices,
TensorInfo<IndexType> input,
IndexType totalSlices,
IndexType sliceSize,
IndexType sliceStride,
IndexType outSize,
IndexType outStride,
const Comparator comp) {
// Find the slice of the tensor that we are sorting
const IndexType linearIndex =
getSortSliceLinearIndex<IndexType, Power2SortSize>();
// Tiling the slices could have us be out of bounds, if there are a
// lot of slices to sort
if (linearIndex >= totalSlices) {
return;
}
__shared__ float keys[Power2SortSize];
__shared__ int values[Power2SortSize];
// Read unsorted values
const IndexType inputStartOffset =
IndexToOffset<IndexType, Dims>::get(linearIndex, input);
// Each thread is responsible for loading and storing 2 elements
const int elem1 = threadIdx.x;
const int elem2 = threadIdx.x + (Power2SortSize / 2);
keys[elem1] = (elem1 < sliceSize) ?
input.data[inputStartOffset + elem1 * sliceStride] :
0.0f; // doesn't matter, element val out of bounds
// Torch indices are 1-based (hence the +1)
values[elem1] = (elem1 < sliceSize) ? (elem1 + 1) :
-1; // out of bounds
keys[elem2] = (elem2 < sliceSize) ?
input.data[inputStartOffset + elem2 * sliceStride] :
0.0f; // doesn't matter, element val out of bounds
// Torch indices are 1-based (hence the +1)
values[elem2] = (elem2 < sliceSize) ? (elem2 + 1) :
-1; // out of bounds
// Sort!
bitonicSort<Comparator, float, int, IndexType, Power2SortSize>(
keys, values, comp);
// Write sorted values; indices have same layout
const IndexType sortedStartOffset =
IndexToOffset<IndexType, -1>::get(linearIndex, sorted);
const IndexType out1 = sortedStartOffset + elem1 * outStride;
// elem1 values are always valid, since otherwise we would have
// chosen the next smallest power-of-2 for sorting
sorted.data[out1] = keys[elem1];
indices.data[out1] = values[elem1];
const IndexType out2 = sortedStartOffset + elem2 * outStride;
// elem2 values might be out-of-range, if the data size we are
// sorting is not a power-of-2
if (values[elem2] != -1) {
sorted.data[out2] = keys[elem2];
indices.data[out2] = values[elem2];
}
}
bool THCudaTensor_sortImpl(THCState* state,
THCudaTensor* sorted,
THCudaTensor* indices,
THCudaTensor* input,
int dim, bool dir) {
long inElements = THCudaTensor_nElement(state, input);
long sliceSize = THCudaTensor_size(state, input, dim);
long sliceStride = THCudaTensor_stride(state, input, dim);
long slices = inElements / sliceSize;
long outSize = THCudaTensor_size(state, sorted, dim);
long outStride = THCudaTensor_stride(state, sorted, dim);
if (THCudaTensor_nDimension(state, input) > MAX_CUTORCH_DIMS) {
// Too many dimensions
return false;
}
if (THCudaTensor_nDimension(state, input) == 0) {
// Zero-dim tensor; do nothing
return true;
}
// The amount of shared memory and block size is based on
// 2^ceil(lg(n)); we choose that sorting implementation for a given
// size.
long ceilPowerOf2 = nextHighestPowerOf2(sliceSize);
// Only handle 1-2048 at the moment
if (ceilPowerOf2 > 2048) {
return false;
}
const dim3 block(ceilPowerOf2 / 2);
// The grid is based on the number of independent slices that we
// have to sort; one block per slice
dim3 grid;
if (!THC_getGridFromTiles(slices, grid)) {
return false;
}
#define HANDLE_CASE(TYPE, A, SIZE) \
if (dir) { \
hipLaunchKernelGGL(( THCudaTensor_bitonicSortWithIndex<GTComp<float>, TYPE, A, SIZE>) \
, dim3(grid), dim3(block), 0, THCState_getCurrentStream(state), \
sortedInfo, indicesInfo, inputInfo, \
slices, (TYPE) sliceSize, (TYPE) sliceStride, \
(TYPE) outSize, (TYPE) outStride, \
GTComp<float>()); \
} else { \
hipLaunchKernelGGL(( THCudaTensor_bitonicSortWithIndex<LTComp<float>, TYPE, A, SIZE>) \
, dim3(grid), dim3(block), 0, THCState_getCurrentStream(state), \
sortedInfo, indicesInfo, inputInfo, \
slices, (TYPE) sliceSize, (TYPE) sliceStride, \
(TYPE) outSize, (TYPE) outStride, \
LTComp<float>()); \
}
#define HANDLE_SORT_CASE(TYPE, A) \
{ \
switch (ceilPowerOf2) { \
case 2048: \
HANDLE_CASE(TYPE, A, 2048); \
break; \
case 1024: \
HANDLE_CASE(TYPE, A, 1024); \
break; \
case 512: \
HANDLE_CASE(TYPE, A, 512); \
break; \
case 256: \
HANDLE_CASE(TYPE, A, 256); \
break; \
case 128: \
HANDLE_CASE(TYPE, A, 128); \
break; \
case 64: \
HANDLE_CASE(TYPE, A, 64); \
break; \
case 32: \
HANDLE_CASE(TYPE, A, 32); \
break; \
case 16: \
HANDLE_CASE(TYPE, A, 16); \
break; \
case 8: \
HANDLE_CASE(TYPE, A, 8); \
break; \
case 4: \
HANDLE_CASE(TYPE, A, 4); \
break; \
case 2: \
HANDLE_CASE(TYPE, A, 2); \
break; \
case 1: \
HANDLE_CASE(TYPE, A, 1); \
break; \
default: \
assert(false); \
} \
}
#define HANDLE_A_CASE(TYPE, A) \
{ \
if (inputInfo.isContiguous()) { \
HANDLE_SORT_CASE(TYPE, -2); \
} else { \
switch (A) { \
case 1: \
HANDLE_SORT_CASE(TYPE, 1); \
break; \
case 2: \
HANDLE_SORT_CASE(TYPE, 2); \
break; \
case 3: \
HANDLE_SORT_CASE(TYPE, 3); \
break; \
default: \
HANDLE_SORT_CASE(TYPE, -1); \
break; \
} \
} \
}
if (THC_canUse32BitIndexMath(state, input)) {
// In order to get to the right offset for the slice we are
// sorting, set `dim` size to 1 (the `dropDim` argument)
TensorInfo<unsigned int> sortedInfo(state, sorted, dim);
sortedInfo.collapseDims();
TensorInfo<unsigned int> indicesInfo(state, indices, dim);
indicesInfo.collapseDims();
TensorInfo<unsigned int> inputInfo(state, input, dim);
inputInfo.collapseDims();
HANDLE_A_CASE(unsigned int, inputInfo.dims);
} else {
// In order to get to the right offset for the slice we are
// sorting, set `dim` size to 1 (the `dropDim` argument)
TensorInfo<unsigned long> sortedInfo(state, sorted, dim);
sortedInfo.collapseDims();
TensorInfo<unsigned long> indicesInfo(state, indices, dim);
indicesInfo.collapseDims();
TensorInfo<unsigned long> inputInfo(state, input, dim);
inputInfo.collapseDims();
// long case is rare, just instantiate these versions
if (inputInfo.isContiguous()) {
HANDLE_SORT_CASE(unsigned long, -2);
} else {
HANDLE_SORT_CASE(unsigned long, -1);
}
}
#undef HANDLE_CASE
#undef HANDLE_SORT_CASE
#undef HANDLE_A_CASE
return true;
}
// `base` is the base address of a tensor
// For each slice (defined as a linear point of `out`, from 0 ->
// (sliceSize - 1) * sliceStride, we fill that slice from `0` to
// `sliceSize - 1`.
__global__ void
THCudaTensor_fillSliceWithIndex(TensorInfo<unsigned long> out,
long totalSlices,
long sliceSize,
long sliceStride) {
long slice = getLinearBlockId<long>();
if (slice >= totalSlices) {
return;
}
const unsigned long offset =
IndexToOffset<unsigned long, -1>::get(slice, out);
for (long i = threadIdx.x; i < sliceSize; i += blockDim.x) {
// Torch indices are 1-based (hence the +1)
out.data[offset + i * sliceStride] = (float) i + 1;
}
}
bool shouldSortThrust(THCState* state, THCudaTensor* input, int dim) {
long totalElements = THCudaTensor_nElement(state, input);
long sliceSize = THCudaTensor_size(state, input, dim);
long numSlices = totalElements / sliceSize;
// Only bother deferring to Thrust if the sort slice is contiguous,
// the number of slices are small, and they are large
return ((THCudaTensor_stride(state, input, dim) == 1) &&
numSlices <= 16 &&
sliceSize > 2048);
}
void THCudaTensor_sortImplThrust(THCState* state,
THCudaTensor* sorted,
THCudaTensor* indices,
THCudaTensor* input,
int dim, bool dir) {
// Fill the indices as values that Thrust can use for key/value sorting
long totalElements = THCudaTensor_nElement(state, input);
long sliceSize = THCudaTensor_size(state, input, dim);
long sliceStride = THCudaTensor_stride(state, input, dim);
long numSlices = totalElements / sliceSize;
THArgCheck(THCudaTensor_stride(state, input, dim) == 1, 1,
"The dimension to be sorted must be contiguous.");
// Copy input to sorted, since we sort in place
if (sorted != input) {
THCudaTensor_copy(state, sorted, input);
}
TensorInfo<unsigned long> sortedInfo(state, sorted, dim);
sortedInfo.collapseDims();
TensorInfo<unsigned long> indicesInfo(state, indices, dim);
indicesInfo.collapseDims();
dim3 grid;
THC_getGridFromTiles(numSlices, grid);
hipLaunchKernelGGL(( THCudaTensor_fillSliceWithIndex), dim3(grid), dim3(min((long long)sliceSize, 1024LL)),
0, THCState_getCurrentStream(state),
indicesInfo, numSlices, sliceSize, sliceStride);
THCudaCheck(hipGetLastError());
for (long slice = 0; slice < numSlices; ++slice) {
unsigned long sortedStart =
IndexToOffset<unsigned long, -1>::get(slice, sortedInfo);
unsigned long indicesStart =
IndexToOffset<unsigned long, -1>::get(slice, indicesInfo);
thrust::device_ptr<float>
sortedSliceStart(THCudaTensor_data(state, sorted) +
sortedStart);
thrust::device_ptr<float>
sortedSliceEnd(THCudaTensor_data(state, sorted) +
sortedStart + sliceSize);
thrust::device_ptr<float>
indicesSliceStart(THCudaTensor_data(state, indices) +
indicesStart);
if (dir) {
thrust::sort_by_key(
#if TORCH_HIP_VERSION >= 7000
thrust::hip::par.on(THCState_getCurrentStream(state)),
#endif
sortedSliceStart, sortedSliceEnd, indicesSliceStart,
thrust::greater<float>());
} else {
thrust::sort_by_key(
#if TORCH_HIP_VERSION >= 7000
thrust::hip::par.on(THCState_getCurrentStream(state)),
#endif
sortedSliceStart, sortedSliceEnd, indicesSliceStart,
thrust::less<float>());
}
}
}
THC_API void THCudaTensor_sort(THCState* state,
THCudaTensor *sorted,
THCudaTensor *indices,
THCudaTensor *input,
int dim, int order) {
THAssert(THCudaTensor_checkGPU(state, 3, sorted, indices, input));
// Make sure sufficient output space is allocated
THCudaTensor_resizeAs(state, sorted, input);
THCudaTensor_resizeAs(state, indices, input);
// If we think Thrust will be more efficient, use that
if (shouldSortThrust(state, input, dim)) {
THCudaTensor_sortImplThrust(state, sorted, indices, input,
dim, (bool) order);
return;
}
// Otherwise, try to use our blockwide sort kernel per each reduction slice
if (THCudaTensor_sortImpl(state, sorted, indices, input,
dim, (bool) order)) {
return;
}
// Fall back to Thrust if our kernel can't handle the input
THCudaTensor_sortImplThrust(state, sorted, indices, input,
dim, (bool) order);
THCudaCheck(hipGetLastError());
}
|
c217791f023db1527e99a8d57d6b3be008f04166.cu
|
#include "THCReduceApplyUtils.cuh"
#include "THCTensorCopy.h"
#include <thrust/device_ptr.h>
#include <thrust/sort.h>
#if CUDA_VERSION >= 7000
#include <thrust/system/cuda/execution_policy.h>
#endif
template <typename IndexType, int Power2SortSize>
__device__ __forceinline__ IndexType
getSortSliceLinearIndex() {
// linear block ID -> slice we are sorting (one per block)
return getLinearBlockId<IndexType>();
}
// Returns 2^(ceil(lg(n)) from Stanford bit twiddling hacks
unsigned long nextHighestPowerOf2(unsigned long n) {
n--;
n |= n >> 1;
n |= n >> 2;
n |= n >> 4;
n |= n >> 8;
n |= n >> 16;
n |= n >> 32;
n++;
return n;
}
template <typename T>
struct LTComp {
__device__ __forceinline__ bool operator()(const T& a, const T& b) const {
return (a < b);
}
};
template <typename T>
struct GTComp {
__device__ __forceinline__ bool operator()(const T& a, const T& b) const {
return (a > b);
}
};
template <typename Comparator, typename K, typename V>
__device__ __forceinline__ void bitonicSwap(K& kA, V& vA,
K& kB, V& vB,
bool dir,
const Comparator& comp) {
// Entries with -1 indices (not real data; out of bounds) always
// sort to the end
bool val = (comp(kA, kB) && (vA != -1)) || (vB == -1);
if (val == dir) {
K k = kA;
kA = kB;
kB = k;
V v = vA;
vA = vB;
vB = v;
}
};
template <typename Comparator, typename K, typename V,
typename IndexType, int Power2SortSize>
__device__ inline void bitonicSort(K keys[Power2SortSize],
V values[Power2SortSize],
const Comparator& comp) {
#pragma unroll
for (unsigned int size = 2; size < Power2SortSize; size *= 2) {
bool flag = ((threadIdx.x & (size / 2)) != 0);
#pragma unroll
for (unsigned int stride = size / 2; stride > 0; stride /= 2) {
// Single warp per slice is completely synchronous
if (Power2SortSize > 64) {
__syncthreads();
}
unsigned int pos = 2 * threadIdx.x - (threadIdx.x & (stride - 1));
bitonicSwap<Comparator, K, V>(
keys[pos], values[pos], keys[pos + stride], values[pos + stride],
flag, comp);
}
}
#pragma unroll
for (unsigned int stride = Power2SortSize / 2; stride > 0; stride /= 2) {
// Single warp per slice is completely synchronous
if (Power2SortSize > 64) {
__syncthreads();
}
unsigned int pos = 2 * threadIdx.x - (threadIdx.x & (stride - 1));
bitonicSwap<Comparator, K, V>(
keys[pos], values[pos], keys[pos + stride], values [pos + stride],
false, comp);
}
// Single warp per slice is completely synchronous
if (Power2SortSize > 64) {
__syncthreads();
}
}
template <typename Comparator, typename IndexType, int Dims, int Power2SortSize>
__global__ void
THCudaTensor_bitonicSortWithIndex(TensorInfo<IndexType> sorted,
TensorInfo<IndexType> indices,
TensorInfo<IndexType> input,
IndexType totalSlices,
IndexType sliceSize,
IndexType sliceStride,
IndexType outSize,
IndexType outStride,
const Comparator comp) {
// Find the slice of the tensor that we are sorting
const IndexType linearIndex =
getSortSliceLinearIndex<IndexType, Power2SortSize>();
// Tiling the slices could have us be out of bounds, if there are a
// lot of slices to sort
if (linearIndex >= totalSlices) {
return;
}
__shared__ float keys[Power2SortSize];
__shared__ int values[Power2SortSize];
// Read unsorted values
const IndexType inputStartOffset =
IndexToOffset<IndexType, Dims>::get(linearIndex, input);
// Each thread is responsible for loading and storing 2 elements
const int elem1 = threadIdx.x;
const int elem2 = threadIdx.x + (Power2SortSize / 2);
keys[elem1] = (elem1 < sliceSize) ?
input.data[inputStartOffset + elem1 * sliceStride] :
0.0f; // doesn't matter, element val out of bounds
// Torch indices are 1-based (hence the +1)
values[elem1] = (elem1 < sliceSize) ? (elem1 + 1) :
-1; // out of bounds
keys[elem2] = (elem2 < sliceSize) ?
input.data[inputStartOffset + elem2 * sliceStride] :
0.0f; // doesn't matter, element val out of bounds
// Torch indices are 1-based (hence the +1)
values[elem2] = (elem2 < sliceSize) ? (elem2 + 1) :
-1; // out of bounds
// Sort!
bitonicSort<Comparator, float, int, IndexType, Power2SortSize>(
keys, values, comp);
// Write sorted values; indices have same layout
const IndexType sortedStartOffset =
IndexToOffset<IndexType, -1>::get(linearIndex, sorted);
const IndexType out1 = sortedStartOffset + elem1 * outStride;
// elem1 values are always valid, since otherwise we would have
// chosen the next smallest power-of-2 for sorting
sorted.data[out1] = keys[elem1];
indices.data[out1] = values[elem1];
const IndexType out2 = sortedStartOffset + elem2 * outStride;
// elem2 values might be out-of-range, if the data size we are
// sorting is not a power-of-2
if (values[elem2] != -1) {
sorted.data[out2] = keys[elem2];
indices.data[out2] = values[elem2];
}
}
bool THCudaTensor_sortImpl(THCState* state,
THCudaTensor* sorted,
THCudaTensor* indices,
THCudaTensor* input,
int dim, bool dir) {
long inElements = THCudaTensor_nElement(state, input);
long sliceSize = THCudaTensor_size(state, input, dim);
long sliceStride = THCudaTensor_stride(state, input, dim);
long slices = inElements / sliceSize;
long outSize = THCudaTensor_size(state, sorted, dim);
long outStride = THCudaTensor_stride(state, sorted, dim);
if (THCudaTensor_nDimension(state, input) > MAX_CUTORCH_DIMS) {
// Too many dimensions
return false;
}
if (THCudaTensor_nDimension(state, input) == 0) {
// Zero-dim tensor; do nothing
return true;
}
// The amount of shared memory and block size is based on
// 2^ceil(lg(n)); we choose that sorting implementation for a given
// size.
long ceilPowerOf2 = nextHighestPowerOf2(sliceSize);
// Only handle 1-2048 at the moment
if (ceilPowerOf2 > 2048) {
return false;
}
const dim3 block(ceilPowerOf2 / 2);
// The grid is based on the number of independent slices that we
// have to sort; one block per slice
dim3 grid;
if (!THC_getGridFromTiles(slices, grid)) {
return false;
}
#define HANDLE_CASE(TYPE, A, SIZE) \
if (dir) { \
THCudaTensor_bitonicSortWithIndex<GTComp<float>, TYPE, A, SIZE> \
<<<grid, block, 0, THCState_getCurrentStream(state)>>>( \
sortedInfo, indicesInfo, inputInfo, \
slices, (TYPE) sliceSize, (TYPE) sliceStride, \
(TYPE) outSize, (TYPE) outStride, \
GTComp<float>()); \
} else { \
THCudaTensor_bitonicSortWithIndex<LTComp<float>, TYPE, A, SIZE> \
<<<grid, block, 0, THCState_getCurrentStream(state)>>>( \
sortedInfo, indicesInfo, inputInfo, \
slices, (TYPE) sliceSize, (TYPE) sliceStride, \
(TYPE) outSize, (TYPE) outStride, \
LTComp<float>()); \
}
#define HANDLE_SORT_CASE(TYPE, A) \
{ \
switch (ceilPowerOf2) { \
case 2048: \
HANDLE_CASE(TYPE, A, 2048); \
break; \
case 1024: \
HANDLE_CASE(TYPE, A, 1024); \
break; \
case 512: \
HANDLE_CASE(TYPE, A, 512); \
break; \
case 256: \
HANDLE_CASE(TYPE, A, 256); \
break; \
case 128: \
HANDLE_CASE(TYPE, A, 128); \
break; \
case 64: \
HANDLE_CASE(TYPE, A, 64); \
break; \
case 32: \
HANDLE_CASE(TYPE, A, 32); \
break; \
case 16: \
HANDLE_CASE(TYPE, A, 16); \
break; \
case 8: \
HANDLE_CASE(TYPE, A, 8); \
break; \
case 4: \
HANDLE_CASE(TYPE, A, 4); \
break; \
case 2: \
HANDLE_CASE(TYPE, A, 2); \
break; \
case 1: \
HANDLE_CASE(TYPE, A, 1); \
break; \
default: \
assert(false); \
} \
}
#define HANDLE_A_CASE(TYPE, A) \
{ \
if (inputInfo.isContiguous()) { \
HANDLE_SORT_CASE(TYPE, -2); \
} else { \
switch (A) { \
case 1: \
HANDLE_SORT_CASE(TYPE, 1); \
break; \
case 2: \
HANDLE_SORT_CASE(TYPE, 2); \
break; \
case 3: \
HANDLE_SORT_CASE(TYPE, 3); \
break; \
default: \
HANDLE_SORT_CASE(TYPE, -1); \
break; \
} \
} \
}
if (THC_canUse32BitIndexMath(state, input)) {
// In order to get to the right offset for the slice we are
// sorting, set `dim` size to 1 (the `dropDim` argument)
TensorInfo<unsigned int> sortedInfo(state, sorted, dim);
sortedInfo.collapseDims();
TensorInfo<unsigned int> indicesInfo(state, indices, dim);
indicesInfo.collapseDims();
TensorInfo<unsigned int> inputInfo(state, input, dim);
inputInfo.collapseDims();
HANDLE_A_CASE(unsigned int, inputInfo.dims);
} else {
// In order to get to the right offset for the slice we are
// sorting, set `dim` size to 1 (the `dropDim` argument)
TensorInfo<unsigned long> sortedInfo(state, sorted, dim);
sortedInfo.collapseDims();
TensorInfo<unsigned long> indicesInfo(state, indices, dim);
indicesInfo.collapseDims();
TensorInfo<unsigned long> inputInfo(state, input, dim);
inputInfo.collapseDims();
// long case is rare, just instantiate these versions
if (inputInfo.isContiguous()) {
HANDLE_SORT_CASE(unsigned long, -2);
} else {
HANDLE_SORT_CASE(unsigned long, -1);
}
}
#undef HANDLE_CASE
#undef HANDLE_SORT_CASE
#undef HANDLE_A_CASE
return true;
}
// `base` is the base address of a tensor
// For each slice (defined as a linear point of `out`, from 0 ->
// (sliceSize - 1) * sliceStride, we fill that slice from `0` to
// `sliceSize - 1`.
__global__ void
THCudaTensor_fillSliceWithIndex(TensorInfo<unsigned long> out,
long totalSlices,
long sliceSize,
long sliceStride) {
long slice = getLinearBlockId<long>();
if (slice >= totalSlices) {
return;
}
const unsigned long offset =
IndexToOffset<unsigned long, -1>::get(slice, out);
for (long i = threadIdx.x; i < sliceSize; i += blockDim.x) {
// Torch indices are 1-based (hence the +1)
out.data[offset + i * sliceStride] = (float) i + 1;
}
}
bool shouldSortThrust(THCState* state, THCudaTensor* input, int dim) {
long totalElements = THCudaTensor_nElement(state, input);
long sliceSize = THCudaTensor_size(state, input, dim);
long numSlices = totalElements / sliceSize;
// Only bother deferring to Thrust if the sort slice is contiguous,
// the number of slices are small, and they are large
return ((THCudaTensor_stride(state, input, dim) == 1) &&
numSlices <= 16 &&
sliceSize > 2048);
}
void THCudaTensor_sortImplThrust(THCState* state,
THCudaTensor* sorted,
THCudaTensor* indices,
THCudaTensor* input,
int dim, bool dir) {
// Fill the indices as values that Thrust can use for key/value sorting
long totalElements = THCudaTensor_nElement(state, input);
long sliceSize = THCudaTensor_size(state, input, dim);
long sliceStride = THCudaTensor_stride(state, input, dim);
long numSlices = totalElements / sliceSize;
THArgCheck(THCudaTensor_stride(state, input, dim) == 1, 1,
"The dimension to be sorted must be contiguous.");
// Copy input to sorted, since we sort in place
if (sorted != input) {
THCudaTensor_copy(state, sorted, input);
}
TensorInfo<unsigned long> sortedInfo(state, sorted, dim);
sortedInfo.collapseDims();
TensorInfo<unsigned long> indicesInfo(state, indices, dim);
indicesInfo.collapseDims();
dim3 grid;
THC_getGridFromTiles(numSlices, grid);
THCudaTensor_fillSliceWithIndex<<<grid, min((long long)sliceSize, 1024LL),
0, THCState_getCurrentStream(state)>>>(
indicesInfo, numSlices, sliceSize, sliceStride);
THCudaCheck(cudaGetLastError());
for (long slice = 0; slice < numSlices; ++slice) {
unsigned long sortedStart =
IndexToOffset<unsigned long, -1>::get(slice, sortedInfo);
unsigned long indicesStart =
IndexToOffset<unsigned long, -1>::get(slice, indicesInfo);
thrust::device_ptr<float>
sortedSliceStart(THCudaTensor_data(state, sorted) +
sortedStart);
thrust::device_ptr<float>
sortedSliceEnd(THCudaTensor_data(state, sorted) +
sortedStart + sliceSize);
thrust::device_ptr<float>
indicesSliceStart(THCudaTensor_data(state, indices) +
indicesStart);
if (dir) {
thrust::sort_by_key(
#if CUDA_VERSION >= 7000
thrust::cuda::par.on(THCState_getCurrentStream(state)),
#endif
sortedSliceStart, sortedSliceEnd, indicesSliceStart,
thrust::greater<float>());
} else {
thrust::sort_by_key(
#if CUDA_VERSION >= 7000
thrust::cuda::par.on(THCState_getCurrentStream(state)),
#endif
sortedSliceStart, sortedSliceEnd, indicesSliceStart,
thrust::less<float>());
}
}
}
THC_API void THCudaTensor_sort(THCState* state,
THCudaTensor *sorted,
THCudaTensor *indices,
THCudaTensor *input,
int dim, int order) {
THAssert(THCudaTensor_checkGPU(state, 3, sorted, indices, input));
// Make sure sufficient output space is allocated
THCudaTensor_resizeAs(state, sorted, input);
THCudaTensor_resizeAs(state, indices, input);
// If we think Thrust will be more efficient, use that
if (shouldSortThrust(state, input, dim)) {
THCudaTensor_sortImplThrust(state, sorted, indices, input,
dim, (bool) order);
return;
}
// Otherwise, try to use our blockwide sort kernel per each reduction slice
if (THCudaTensor_sortImpl(state, sorted, indices, input,
dim, (bool) order)) {
return;
}
// Fall back to Thrust if our kernel can't handle the input
THCudaTensor_sortImplThrust(state, sorted, indices, input,
dim, (bool) order);
THCudaCheck(cudaGetLastError());
}
|
e3761d50944ed0a402972dcd2bb473d5a37831bf.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cstdlib>
#include <cstdio>
#include <iostream>
#include "util.hpp"
// a kernel that reverses a string of length n in place
__global__
void reverse_string_shared(char* str, int n) {
__shared__ char buf[1024];
int i = threadIdx.x;
if (i<n) {
buf[i] = str[i];
__syncthreads();
str[i] = buf[n-i-1];
}
}
__global__
void reverse_string_swap(char* str, int n) {
int i = threadIdx.x;
if (i<n/2) {
char tmp = str[i];
str[i] = str[n-i-1];
str[n-i-1] = tmp;
}
}
int main(int argc, char** argv) {
// check that the user has passed a string to reverse
if(argc<2) {
std::cout << "useage : ./string_reverse \"string to reverse\"\n" << std::endl;
exit(0);
}
// determine the length of the string, and copy in to buffer
auto n = strlen(argv[1]);
auto string = malloc_managed<char>(n+1);
std::copy(argv[1], argv[1]+n, string);
string[n] = 0; // add null terminator
std::cout << "string to reverse:\n" << string << "\n";
// call the string reverse kernel
//reverse_string_shared<<<1, n>>>(string, n);
hipLaunchKernelGGL(( reverse_string_swap), dim3(1), dim3(n), 0, 0, string, n);
// print reversed string
hipDeviceSynchronize();
std::cout << "reversed string:\n" << string << "\n";
// free memory
hipFree(string);
return 0;
}
|
e3761d50944ed0a402972dcd2bb473d5a37831bf.cu
|
#include <cstdlib>
#include <cstdio>
#include <iostream>
#include "util.hpp"
// a kernel that reverses a string of length n in place
__global__
void reverse_string_shared(char* str, int n) {
__shared__ char buf[1024];
int i = threadIdx.x;
if (i<n) {
buf[i] = str[i];
__syncthreads();
str[i] = buf[n-i-1];
}
}
__global__
void reverse_string_swap(char* str, int n) {
int i = threadIdx.x;
if (i<n/2) {
char tmp = str[i];
str[i] = str[n-i-1];
str[n-i-1] = tmp;
}
}
int main(int argc, char** argv) {
// check that the user has passed a string to reverse
if(argc<2) {
std::cout << "useage : ./string_reverse \"string to reverse\"\n" << std::endl;
exit(0);
}
// determine the length of the string, and copy in to buffer
auto n = strlen(argv[1]);
auto string = malloc_managed<char>(n+1);
std::copy(argv[1], argv[1]+n, string);
string[n] = 0; // add null terminator
std::cout << "string to reverse:\n" << string << "\n";
// call the string reverse kernel
//reverse_string_shared<<<1, n>>>(string, n);
reverse_string_swap<<<1, n>>>(string, n);
// print reversed string
cudaDeviceSynchronize();
std::cout << "reversed string:\n" << string << "\n";
// free memory
cudaFree(string);
return 0;
}
|
252796a182a3e9fd0eb9328a61f499943e344d89.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "cuda-coord.cu"
using namespace pCUDA_coord;
#include "links.cuh"
#include <gp/cuda-util-kernel.h>
__constant__ CPU_GPU_Common dc;
__global__ void time_step_gpu_links(float delta_t);
__global__ void time_step_gpu_balls(float delta_t);
__host__ hipError_t
cuda_setup(GPU_Info *gpu_info)
{
// Return attributes of CUDA functions. The code needs the
// maximum number of threads.
hipError_t e1 = hipSuccess;
gpu_info->GET_INFO(time_step_gpu_links);
gpu_info->GET_INFO(time_step_gpu_balls);
return e1;
}
void
data_cpu_to_gpu_common(CPU_GPU_Common *host_c)
{
CE( hipMemcpyToSymbol( dc, host_c, sizeof(*host_c) ) );
}
__host__ void
launch_time_step(float delta_t, int gsize, int blksize)
{
hipLaunchKernelGGL(( time_step_gpu_links), dim3(gsize),dim3(blksize), 0, 0, delta_t);
hipLaunchKernelGGL(( time_step_gpu_balls), dim3(gsize),dim3(blksize), 0, 0, delta_t);
}
__device__ bool
platform_collision_possible(pCoor pos)
{
// Assuming no motion in x or z axes.
//
return pos.x >= dc.platform_xmin && pos.x <= dc.platform_xmax
&& pos.z >= dc.platform_zmin && pos.z <= dc.platform_zmax;
}
__device__ void
pAtomic_Add(pVect4& d, pVect s)
{
atomicAdd(&d.x,s.x);
atomicAdd(&d.y,s.y);
atomicAdd(&d.z,s.z);
}
__global__ void
time_step_gpu_links(float delta_t)
{
const int tid = threadIdx.x + blockDim.x * blockIdx.x;
const int n_threads = blockDim.x * gridDim.x;
#define link(mem) dc.links.mem[li]
#define ball1(mem) dc.balls.mem[ball_1_idx]
#define ball2(mem) dc.balls.mem[ball_2_idx]
for ( int li=tid; li<dc.n_links; li += n_threads )
{
if ( !link(is_simulatable) ) continue;
// Spring Force from Neighbor Balls
//
const int ball_1_idx = link(ball1_idx);
const int ball_2_idx = link(ball2_idx);
pCoor ball1_pos = ball1(position);
pCoor ball2_pos = ball2(position);
// Find position and velocity of the point where the link touches
// the surface of ball 1 ...
//
pVect dir1 = ball1(omatrix) * link(cb1);
pCoor pos1 = ball1_pos + dir1;
pVect vel1 = ball1(velocity) + cross( ball1(omega), dir1 );
// ... and ball 2.
//
pVect dir2 = ball2(omatrix) * link(cb2);
pCoor pos2 = ball2_pos + dir2;
pVect vel2 = ball2(velocity) + cross( ball2(omega), dir2 );
// Construct a normalized (Unit) Vector from ball to neighbor
// based on link connection points and ball centers.
//
pNorm link_dir(pos1,pos2);
pNorm c_to_c(ball1_pos,ball2_pos);
const float link_length = link_dir.magnitude;
// Compute the speed of ball's end of link towards neighbor's end of link.
//
pVect delta_v = vel2 - vel1;
float delta_s = dot( delta_v, link_dir );
// Compute by how much the spring is stretched (positive value)
// or compressed (negative value).
//
const float spring_stretch = link_length - link(distance_relaxed);
// Determine whether spring is gaining energy (whether its length
// is getting further from its relaxed length).
//
const bool gaining_e = ( delta_s > 0.0f ) == ( spring_stretch > 0 );
// Use a smaller spring constant when spring is loosing energy,
// a quick and dirty way of simulating energy loss due to spring
// friction.
//
const float spring_constant =
gaining_e ? dc.opt_spring_constant : dc.opt_spring_constant * 0.7f;
const float force_mag = spring_constant * spring_stretch;
pVect spring_force_12 = force_mag * link_dir;
// Apply forces affecting linear momentum.
//
pAtomic_Add( ball1(force), spring_force_12 );
pAtomic_Add( ball2(force), -spring_force_12 );
if ( ! link(is_surface_connection) ) continue;
pNorm dir1n(dir1);
pNorm dir2n(dir2);
// Apply torque.
//
pVect torque1 = cross(dir1n, spring_force_12);
pVect torque2 = cross(spring_force_12,dir2n);
pAtomic_Add( ball1(torque), torque1 );
pAtomic_Add( ball2(torque), torque2 );
}
}
__global__ void
time_step_gpu_balls(float delta_t)
{
const int tid = threadIdx.x + blockDim.x * blockIdx.x;
const int n_threads = blockDim.x * gridDim.x;
#define ball(mem) dc.balls.mem[bi]
///
/// Update Position of Each Ball
///
for ( int bi=tid; bi<dc.n_balls; bi += n_threads )
{
if ( ball(locked) )
{
ball(velocity) = pVect4(0);
ball(omega) = pVect4(0);
continue;
}
// Update Velocity
//
// This code assumes that force on ball is constant over time
// step. This is clearly wrong when balls are moving with
// respect to each other because the springs are changing
// length. This inaccuracy will make the simulation unstable
// when spring constant is large for the time step.
//
const float mass = ball(mass);
pCoor ball_position = ball(position);
pVect4 ball_velocity = ball(velocity);
pVect4 ball_force = ball(force);
ball(force) = pVect4(0);
ball_force += mass * dc.gravity_accel;
pVect delta_v = ( delta_t / mass ) * ball_force;
if ( platform_collision_possible(ball_position) && ball_position.y < 0 )
{
const float spring_constant_plat =
ball_velocity.y < 0 ? 100000 : 50000;
const float fric_coefficient = 0.1;
const float force_up = -ball_position.y * spring_constant_plat;
const float delta_v_up = force_up / mass * delta_t;
const float fric_force_mag = fric_coefficient * force_up;
pNorm surface_v(ball_velocity.x,0,ball_velocity.z);
const float delta_v_surf = fric_force_mag / mass * delta_t;
if ( delta_v_surf > surface_v.magnitude )
{
// Ignoring other forces?
delta_v = pVect(-ball_velocity.x,delta_v.y,-ball_velocity.z);
}
else
{
delta_v -= delta_v_surf * surface_v;
}
delta_v.y += delta_v_up;
}
ball_velocity += delta_v;
// Air Resistance
//
const float fs = powf(1+dc.opt_air_resistance,-delta_t);
ball_velocity *= fs;
ball(velocity) = ball_velocity;
// Update Position
//
// Assume that velocity is constant.
//
ball_position += ball_velocity * delta_t;
ball(position) = ball_position;
pVect4 ball_omega = ball(omega);
ball_omega += delta_t * ball(fdt_to_do) * ball(torque);
ball(omega) = ball_omega;
ball(torque) = pVect4(0);
pNorm axis(ball_omega);
// Update Orientation
//
// If ball isn't spinning fast skip expensive rotation.
//
if ( axis.mag_sq < 0.000001f ) continue;
pQuat orientation =
pQuat( axis, delta_t * axis.magnitude ) * ball(orientation);
ball(orientation) = orientation;
ball(omatrix) = pMatrix3x3p(orientation);
}
#undef ball
}
|
252796a182a3e9fd0eb9328a61f499943e344d89.cu
|
#include "cuda-coord.cu"
using namespace pCUDA_coord;
#include "links.cuh"
#include <gp/cuda-util-kernel.h>
__constant__ CPU_GPU_Common dc;
__global__ void time_step_gpu_links(float delta_t);
__global__ void time_step_gpu_balls(float delta_t);
__host__ cudaError_t
cuda_setup(GPU_Info *gpu_info)
{
// Return attributes of CUDA functions. The code needs the
// maximum number of threads.
cudaError_t e1 = cudaSuccess;
gpu_info->GET_INFO(time_step_gpu_links);
gpu_info->GET_INFO(time_step_gpu_balls);
return e1;
}
void
data_cpu_to_gpu_common(CPU_GPU_Common *host_c)
{
CE( cudaMemcpyToSymbol( dc, host_c, sizeof(*host_c) ) );
}
__host__ void
launch_time_step(float delta_t, int gsize, int blksize)
{
time_step_gpu_links<<<gsize,blksize>>>(delta_t);
time_step_gpu_balls<<<gsize,blksize>>>(delta_t);
}
__device__ bool
platform_collision_possible(pCoor pos)
{
// Assuming no motion in x or z axes.
//
return pos.x >= dc.platform_xmin && pos.x <= dc.platform_xmax
&& pos.z >= dc.platform_zmin && pos.z <= dc.platform_zmax;
}
__device__ void
pAtomic_Add(pVect4& d, pVect s)
{
atomicAdd(&d.x,s.x);
atomicAdd(&d.y,s.y);
atomicAdd(&d.z,s.z);
}
__global__ void
time_step_gpu_links(float delta_t)
{
const int tid = threadIdx.x + blockDim.x * blockIdx.x;
const int n_threads = blockDim.x * gridDim.x;
#define link(mem) dc.links.mem[li]
#define ball1(mem) dc.balls.mem[ball_1_idx]
#define ball2(mem) dc.balls.mem[ball_2_idx]
for ( int li=tid; li<dc.n_links; li += n_threads )
{
if ( !link(is_simulatable) ) continue;
// Spring Force from Neighbor Balls
//
const int ball_1_idx = link(ball1_idx);
const int ball_2_idx = link(ball2_idx);
pCoor ball1_pos = ball1(position);
pCoor ball2_pos = ball2(position);
// Find position and velocity of the point where the link touches
// the surface of ball 1 ...
//
pVect dir1 = ball1(omatrix) * link(cb1);
pCoor pos1 = ball1_pos + dir1;
pVect vel1 = ball1(velocity) + cross( ball1(omega), dir1 );
// ... and ball 2.
//
pVect dir2 = ball2(omatrix) * link(cb2);
pCoor pos2 = ball2_pos + dir2;
pVect vel2 = ball2(velocity) + cross( ball2(omega), dir2 );
// Construct a normalized (Unit) Vector from ball to neighbor
// based on link connection points and ball centers.
//
pNorm link_dir(pos1,pos2);
pNorm c_to_c(ball1_pos,ball2_pos);
const float link_length = link_dir.magnitude;
// Compute the speed of ball's end of link towards neighbor's end of link.
//
pVect delta_v = vel2 - vel1;
float delta_s = dot( delta_v, link_dir );
// Compute by how much the spring is stretched (positive value)
// or compressed (negative value).
//
const float spring_stretch = link_length - link(distance_relaxed);
// Determine whether spring is gaining energy (whether its length
// is getting further from its relaxed length).
//
const bool gaining_e = ( delta_s > 0.0f ) == ( spring_stretch > 0 );
// Use a smaller spring constant when spring is loosing energy,
// a quick and dirty way of simulating energy loss due to spring
// friction.
//
const float spring_constant =
gaining_e ? dc.opt_spring_constant : dc.opt_spring_constant * 0.7f;
const float force_mag = spring_constant * spring_stretch;
pVect spring_force_12 = force_mag * link_dir;
// Apply forces affecting linear momentum.
//
pAtomic_Add( ball1(force), spring_force_12 );
pAtomic_Add( ball2(force), -spring_force_12 );
if ( ! link(is_surface_connection) ) continue;
pNorm dir1n(dir1);
pNorm dir2n(dir2);
// Apply torque.
//
pVect torque1 = cross(dir1n, spring_force_12);
pVect torque2 = cross(spring_force_12,dir2n);
pAtomic_Add( ball1(torque), torque1 );
pAtomic_Add( ball2(torque), torque2 );
}
}
__global__ void
time_step_gpu_balls(float delta_t)
{
const int tid = threadIdx.x + blockDim.x * blockIdx.x;
const int n_threads = blockDim.x * gridDim.x;
#define ball(mem) dc.balls.mem[bi]
///
/// Update Position of Each Ball
///
for ( int bi=tid; bi<dc.n_balls; bi += n_threads )
{
if ( ball(locked) )
{
ball(velocity) = pVect4(0);
ball(omega) = pVect4(0);
continue;
}
// Update Velocity
//
// This code assumes that force on ball is constant over time
// step. This is clearly wrong when balls are moving with
// respect to each other because the springs are changing
// length. This inaccuracy will make the simulation unstable
// when spring constant is large for the time step.
//
const float mass = ball(mass);
pCoor ball_position = ball(position);
pVect4 ball_velocity = ball(velocity);
pVect4 ball_force = ball(force);
ball(force) = pVect4(0);
ball_force += mass * dc.gravity_accel;
pVect delta_v = ( delta_t / mass ) * ball_force;
if ( platform_collision_possible(ball_position) && ball_position.y < 0 )
{
const float spring_constant_plat =
ball_velocity.y < 0 ? 100000 : 50000;
const float fric_coefficient = 0.1;
const float force_up = -ball_position.y * spring_constant_plat;
const float delta_v_up = force_up / mass * delta_t;
const float fric_force_mag = fric_coefficient * force_up;
pNorm surface_v(ball_velocity.x,0,ball_velocity.z);
const float delta_v_surf = fric_force_mag / mass * delta_t;
if ( delta_v_surf > surface_v.magnitude )
{
// Ignoring other forces?
delta_v = pVect(-ball_velocity.x,delta_v.y,-ball_velocity.z);
}
else
{
delta_v -= delta_v_surf * surface_v;
}
delta_v.y += delta_v_up;
}
ball_velocity += delta_v;
// Air Resistance
//
const float fs = powf(1+dc.opt_air_resistance,-delta_t);
ball_velocity *= fs;
ball(velocity) = ball_velocity;
// Update Position
//
// Assume that velocity is constant.
//
ball_position += ball_velocity * delta_t;
ball(position) = ball_position;
pVect4 ball_omega = ball(omega);
ball_omega += delta_t * ball(fdt_to_do) * ball(torque);
ball(omega) = ball_omega;
ball(torque) = pVect4(0);
pNorm axis(ball_omega);
// Update Orientation
//
// If ball isn't spinning fast skip expensive rotation.
//
if ( axis.mag_sq < 0.000001f ) continue;
pQuat orientation =
pQuat( axis, delta_t * axis.magnitude ) * ball(orientation);
ball(orientation) = orientation;
ball(omatrix) = pMatrix3x3p(orientation);
}
#undef ball
}
|
8b5c07c9472406fada04b89010f36601d20b1d30.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//************************************************************************
//Sub-Pixel Layer
//Written by ChaoFan, 2018.07.15
//For GPU, this layer suports both forward and backward compution
//************************************************************************
#include <cfloat>
#include <algorithm>
#include <vector>
#include "caffe/layers/pixel_shuffle_layer.hpp"
#include "caffe/util/gpu_util.cuh"
namespace caffe{
template <typename Dtype>
__global__ void PixelShuffleForward(const int nthreads,
const int upscale_factor, const int output_dim, Dtype* top_data,
const int num, const int channels, const int height, const int width, const Dtype* bottom_data){
CUDA_KERNEL_LOOP(index, nthreads){
int bot_channels = channels;
int bot_height = height;
int bot_width = width;
int bot_width_index = index % bot_width;
int bot_height_index = ((index - bot_width_index) % (bot_width * bot_height)) / bot_width;
int bot_channel_index = ((index - bot_width_index - bot_height_index * bot_width) % (bot_channels * bot_height * bot_width)) / (bot_height * bot_width);
int num_index = (index - bot_width_index - bot_height_index * bot_width - bot_channel_index * bot_height * bot_width) / (bot_channels * bot_height * bot_width);
int top_channel_index = bot_channel_index / (upscale_factor * upscale_factor);
int top_width_index = bot_width_index * upscale_factor + bot_channel_index % upscale_factor;
int top_height_index = bot_height_index * upscale_factor +
(bot_channel_index - top_channel_index * upscale_factor * upscale_factor) / upscale_factor;
int top_data_index = num_index * (output_dim * bot_height * upscale_factor * bot_width * upscale_factor) +
top_channel_index * (bot_height * upscale_factor * bot_width * upscale_factor) +
top_height_index * bot_width * upscale_factor + top_width_index;
top_data[top_data_index] = (index < num * bot_channels * bot_height * bot_width)?bottom_data[index]:0;
}
}
template<typename Dtype>
void PixelShuffleLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top){
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
int count = bottom[0]->count();
caffe_gpu_set(count, Dtype(0), top_data);
PixelShuffleForward<Dtype> << <CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS >> >(count, upscale_factor_, output_dim_, top_data,
num_, channels_, height_, width_, bottom_data);
CUDA_POST_KERNEL_CHECK;
}
template <typename Dtype>
__global__ void PixelShuffleBackward(const int nthreads,
const int upscale_factor, const int output_dim, const Dtype* top_diff,
const int num, const int channels, const int height, const int width, Dtype* bottom_diff){
CUDA_KERNEL_LOOP(index, nthreads){
int top_width = width * upscale_factor;
int top_height = height * upscale_factor;
int top_channels = output_dim;
int top_width_index = index % top_width;
int top_height_index = ((index - top_width_index) % (top_width * top_height)) / (top_width);
int top_channel_index = ((index - top_width_index - top_height_index * top_width) % (top_channels * top_height * top_width)) / (top_width * top_height);
int num_index = (index - top_width_index - top_height_index * top_width -
top_channel_index * (top_height * top_width)) / (top_channels * top_height * top_width);
int bot_channels = channels;
int bot_height = height;
int bot_width = width;
int bot_channel_index = top_channel_index * upscale_factor * upscale_factor +
(top_width_index % upscale_factor) + (top_height_index % upscale_factor) * upscale_factor;
int bot_width_index = top_width_index / upscale_factor;
int bot_height_index = top_height_index / upscale_factor;
int bottom_diff_index = num_index * (bot_channels * bot_height * bot_width) + bot_channel_index * (bot_height * bot_width ) +
bot_height_index * bot_width + bot_width_index;
bottom_diff[bottom_diff_index] = (index < num * top_channels * top_height * top_width)?top_diff[index]:0;
}
}
template <typename Dtype>
void PixelShuffleLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom){
if(!propagate_down[0]){ return; }
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
int count = top[0]->count();
caffe_gpu_set(count, Dtype(0), bottom_diff);
PixelShuffleBackward<Dtype> << <CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS >> >(count, upscale_factor_, output_dim_, top_diff,
num_, channels_, height_, width_, bottom_diff);
CUDA_POST_KERNEL_CHECK;
}
INSTANTIATE_LAYER_GPU_FUNCS(PixelShuffleLayer);
}
|
8b5c07c9472406fada04b89010f36601d20b1d30.cu
|
//************************************************************************
//Sub-Pixel Layer
//Written by ChaoFan, 2018.07.15
//For GPU, this layer suports both forward and backward compution
//************************************************************************
#include <cfloat>
#include <algorithm>
#include <vector>
#include "caffe/layers/pixel_shuffle_layer.hpp"
#include "caffe/util/gpu_util.cuh"
namespace caffe{
template <typename Dtype>
__global__ void PixelShuffleForward(const int nthreads,
const int upscale_factor, const int output_dim, Dtype* top_data,
const int num, const int channels, const int height, const int width, const Dtype* bottom_data){
CUDA_KERNEL_LOOP(index, nthreads){
int bot_channels = channels;
int bot_height = height;
int bot_width = width;
int bot_width_index = index % bot_width;
int bot_height_index = ((index - bot_width_index) % (bot_width * bot_height)) / bot_width;
int bot_channel_index = ((index - bot_width_index - bot_height_index * bot_width) % (bot_channels * bot_height * bot_width)) / (bot_height * bot_width);
int num_index = (index - bot_width_index - bot_height_index * bot_width - bot_channel_index * bot_height * bot_width) / (bot_channels * bot_height * bot_width);
int top_channel_index = bot_channel_index / (upscale_factor * upscale_factor);
int top_width_index = bot_width_index * upscale_factor + bot_channel_index % upscale_factor;
int top_height_index = bot_height_index * upscale_factor +
(bot_channel_index - top_channel_index * upscale_factor * upscale_factor) / upscale_factor;
int top_data_index = num_index * (output_dim * bot_height * upscale_factor * bot_width * upscale_factor) +
top_channel_index * (bot_height * upscale_factor * bot_width * upscale_factor) +
top_height_index * bot_width * upscale_factor + top_width_index;
top_data[top_data_index] = (index < num * bot_channels * bot_height * bot_width)?bottom_data[index]:0;
}
}
template<typename Dtype>
void PixelShuffleLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top){
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
int count = bottom[0]->count();
caffe_gpu_set(count, Dtype(0), top_data);
PixelShuffleForward<Dtype> << <CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS >> >(count, upscale_factor_, output_dim_, top_data,
num_, channels_, height_, width_, bottom_data);
CUDA_POST_KERNEL_CHECK;
}
template <typename Dtype>
__global__ void PixelShuffleBackward(const int nthreads,
const int upscale_factor, const int output_dim, const Dtype* top_diff,
const int num, const int channels, const int height, const int width, Dtype* bottom_diff){
CUDA_KERNEL_LOOP(index, nthreads){
int top_width = width * upscale_factor;
int top_height = height * upscale_factor;
int top_channels = output_dim;
int top_width_index = index % top_width;
int top_height_index = ((index - top_width_index) % (top_width * top_height)) / (top_width);
int top_channel_index = ((index - top_width_index - top_height_index * top_width) % (top_channels * top_height * top_width)) / (top_width * top_height);
int num_index = (index - top_width_index - top_height_index * top_width -
top_channel_index * (top_height * top_width)) / (top_channels * top_height * top_width);
int bot_channels = channels;
int bot_height = height;
int bot_width = width;
int bot_channel_index = top_channel_index * upscale_factor * upscale_factor +
(top_width_index % upscale_factor) + (top_height_index % upscale_factor) * upscale_factor;
int bot_width_index = top_width_index / upscale_factor;
int bot_height_index = top_height_index / upscale_factor;
int bottom_diff_index = num_index * (bot_channels * bot_height * bot_width) + bot_channel_index * (bot_height * bot_width ) +
bot_height_index * bot_width + bot_width_index;
bottom_diff[bottom_diff_index] = (index < num * top_channels * top_height * top_width)?top_diff[index]:0;
}
}
template <typename Dtype>
void PixelShuffleLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom){
if(!propagate_down[0]){ return; }
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
int count = top[0]->count();
caffe_gpu_set(count, Dtype(0), bottom_diff);
PixelShuffleBackward<Dtype> << <CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS >> >(count, upscale_factor_, output_dim_, top_diff,
num_, channels_, height_, width_, bottom_diff);
CUDA_POST_KERNEL_CHECK;
}
INSTANTIATE_LAYER_GPU_FUNCS(PixelShuffleLayer);
}
|
76192176db4db89e729716bfc97936cf4c2ffc81.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* ConvoyTracker.cu
*
* Created on: 06.06.2016
* Author: Sebastian Reinhart
*/
#include "ConvoyTracker.cuh"
#include <assert.h>
ConvoyTracker::ConvoyTracker() {
currentSpeed = 0;
currentYawRate = 0;
x = 0;
y = 0;
yaw = 0;
xOld = 0;
yOld = 0;
yawOld = 0;
ID = 0;
convoyID = 0;
currentHistoryOnDevice = false;
currentConvoyOnDevice = false;
convoySize = 0;
startIndexConvoys = 0;
endIndexConvoys = 0;
historySize = 0;
startIndexHistory = 0;
endIndexHistory = 0;
convoyCheckSize = 0;
intervalSize = 0;
hipError_t error;
//create cuda streams
hipStreamCreate(&stream2);
hipStreamCreate(&stream3);
hipStreamCreate(&stream4);
//allocate host memory and create device pointers
error = hipHostMalloc((void**) &history, NUM_HIST*sizeof(History), hipHostMallocMapped);
if (error != hipSuccess) {
printf("hipGetDevice returned error %s (code %d), line(%d)\n",
hipGetErrorString(error), error, __LINE__);
}
error = hipHostGetDevicePointer(&d_history_ptr, history, 0);
if (error != hipSuccess) {
printf("hipGetDevice returned error %s (code %d), line(%d)\n",
hipGetErrorString(error), error, __LINE__);
}
error = hipHostMalloc((void**) &h_historyMatch, MAX_SEGMENTS*sizeof(int), hipHostMallocMapped);
if (error != hipSuccess) {
printf("hipGetDevice returned error %s (code %d), line(%d)\n",
hipGetErrorString(error), error, __LINE__);
}
error = hipHostGetDevicePointer(&d_historyMatch_ptr, h_historyMatch, 0);
if (error != hipSuccess) {
printf("hipGetDevice returned error %s (code %d), line(%d)\n",
hipGetErrorString(error), error, __LINE__);
}
error = hipHostMalloc((void**) &h_historyMatchSelf, sizeof(int), hipHostMallocMapped);
if (error != hipSuccess) {
printf("hipGetDevice returned error %s (code %d), line(%d)\n",
hipGetErrorString(error), error, __LINE__);
}
error = hipHostGetDevicePointer(&d_historyMatchSelf_ptr, h_historyMatchSelf, 0);
if (error != hipSuccess) {
printf("hipGetDevice returned error %s (code %d), line(%d)\n",
hipGetErrorString(error), error, __LINE__);
}
error = hipHostMalloc((void**) &h_intervalMap, MAX_SEGMENTS*sizeof(PointCellDevice), hipHostMallocMapped);
if (error != hipSuccess) {
printf("hipGetDevice returned error %s (code %d), line(%d)\n",
hipGetErrorString(error), error, __LINE__);
}
error = hipHostGetDevicePointer(&d_intervalMap_ptr, h_intervalMap, 0);
if (error != hipSuccess) {
printf("hipGetDevice returned error %s (code %d), line(%d)\n",
hipGetErrorString(error), error, __LINE__);
}
error = hipHostMalloc((void**) &h_convoyCheck, MAX_SEGMENTS*sizeof(PointCellDevice), hipHostMallocMapped);
if (error != hipSuccess) {
printf("hipGetDevice returned error %s (code %d), line(%d)\n",
hipGetErrorString(error), error, __LINE__);
}
error = hipHostGetDevicePointer(&d_newVeh_ptr, h_convoyCheck, 0);
if (error != hipSuccess) {
printf("hipGetDevice returned error %s (code %d), line(%d)\n",
hipGetErrorString(error), error, __LINE__);
}
error = hipHostMalloc((void**) &h_IDincluded, NUM_HIST*2*sizeof(int), hipHostMallocMapped);
if (error != hipSuccess) {
printf("hipGetDevice returned error %s (code %d), line(%d)\n",
hipGetErrorString(error), error, __LINE__);
}
error = hipHostGetDevicePointer(&d_IDincluded_ptr, h_IDincluded, 0);
if (error != hipSuccess) {
printf("hipGetDevice returned error %s (code %d), line(%d)\n",
hipGetErrorString(error), error, __LINE__);
}
error = hipHostMalloc((void**) &h_duplicate, NUM_HIST*sizeof(bool), hipHostMallocMapped);
if (error != hipSuccess) {
printf("hipGetDevice returned error %s (code %d), line(%d)\n",
hipGetErrorString(error), error, __LINE__);
}
error = hipHostGetDevicePointer(&d_duplicate_ptr, h_duplicate, 0);
if (error != hipSuccess) {
printf("hipGetDevice returned error %s (code %d), line(%d)\n",
hipGetErrorString(error), error, __LINE__);
}
error = hipHostMalloc((void**) &h_updateData, MAX_SEGMENTS*3*sizeof(float), hipHostMallocMapped);
if (error != hipSuccess) {
printf("hipGetDevice returned error %s (code %d), line(%d)\n",
hipGetErrorString(error), error, __LINE__);
}
error = hipHostGetDevicePointer(&d_updataData_ptr, h_updateData, 0);
if (error != hipSuccess) {
printf("hipGetDevice returned error %s (code %d), line(%d)\n",
hipGetErrorString(error), error, __LINE__);
}
error = hipHostMalloc((void**) &h_intvlIndex, MAX_SEGMENTS*sizeof(int), hipHostMallocMapped);
if (error != hipSuccess) {
printf("hipGetDevice returned error %s (code %d), line(%d)\n",
hipGetErrorString(error), error, __LINE__);
}
error = hipHostGetDevicePointer(&d_intvlIndex_ptr, h_intvlIndex, 0);
if (error != hipSuccess) {
printf("hipGetDevice returned error %s (code %d), line(%d)\n",
hipGetErrorString(error), error, __LINE__);
}
error = hipHostMalloc((void**) &h_vehicles, MAX_SEGMENTS*sizeof(PointCellDevice), hipHostMallocMapped);
if (error != hipSuccess) {
printf("hipGetDevice returned error %s (code %d), line(%d)\n",
hipGetErrorString(error), error, __LINE__);
}
error = hipHostGetDevicePointer(&d_vehicles_ptr, h_vehicles, 0);
if (error != hipSuccess) {
printf("hipGetDevice returned error %s (code %d), line(%d)\n",
hipGetErrorString(error), error, __LINE__);
}
error = hipHostMalloc((void**) &h_distance, MAX_SEGMENTS*MAX_SEGMENTS*sizeof(float), hipHostMallocMapped);
if (error != hipSuccess) {
printf("hipGetDevice returned error %s (code %d), line(%d)\n",
hipGetErrorString(error), error, __LINE__);
}
error = hipHostGetDevicePointer(&d_distance_ptr, h_distance, 0);
if (error != hipSuccess) {
printf("hipGetDevice returned error %s (code %d), line(%d)\n",
hipGetErrorString(error), error, __LINE__);
}
size_t sizeConv = NUM_CONV;
sizeConv *= sizeof(Convoy);
error = hipHostMalloc((void **) &convoys, sizeConv, hipHostMallocMapped);
if(error != hipSuccess)
{
printf(
"hipGetDeviceProperties returned error %s (code %d), line(%d)\n",
hipGetErrorString(error), error, __LINE__);
}
error = hipHostMalloc((void**) &xSubInterval, sizeof(float), hipHostMallocMapped);
if (error != hipSuccess) {
printf("hipGetDevice returned error %s (code %d), line(%d)\n",
hipGetErrorString(error), error, __LINE__);
}
*xSubInterval = 0;
error = hipHostGetDevicePointer(&d_subIntvl_ptr, xSubInterval, 0);
if (error != hipSuccess) {
printf("hipGetDevice returned error %s (code %d), line(%d)\n",
hipGetErrorString(error), error, __LINE__);
}
error = hipHostGetDevicePointer(&d_convoys_ptr, convoys, 0);
if (error != hipSuccess) {
printf("hipGetDevice returned error %s (code %d), line(%d)\n",
hipGetErrorString(error), error, __LINE__);
}
}
ConvoyTracker::~ConvoyTracker() {
// free everything
hipHostFree(xSubInterval);
hipHostFree(convoys);
hipHostFree(history);
hipHostFree(h_historyMatch);
hipHostFree(h_convoyCheck);
hipHostFree(h_intervalMap);
hipHostFree(h_historyMatchSelf);
hipHostFree(h_IDincluded);
hipHostFree(h_vehicles);
hipHostFree(h_distance);
hipHostFree(h_duplicate);
hipHostFree(h_intvlIndex);
hipStreamDestroy(stream2);
hipStreamDestroy(stream3);
hipStreamDestroy(stream4);
}
/**
* returns the string representation of the given number @param i
*/
std::string getNextMeasureAsString(int i)
{
std::ostringstream number;
if(i<10)
{
number << "000" << i;
}
else if(i<100)
{
number << "00" << i;
}
else if(i<1000)
{
number << "0" << i;
}
else
{
number << i;
}
return number.str();
}
/*
* performs the ego motion compensation of the position with the given index in the given history
*/
__device__ void shiftRotateHistory(History* d_pc, float x, float y, float theta, int index)
{
//check whether index is out of range at this history
if(((index < d_pc->endIndex) && (d_pc->endIndex > d_pc->startIndex)) || ((d_pc->endIndex < d_pc->startIndex) && (index != d_pc->endIndex)))
{
//shift compensation
d_pc->tracks[index].subIntvl += x;
int numIntervals = (int) ((d_pc->tracks[index].subIntvl) / INTERVALL_LENGTH);
d_pc->tracks[index].x -= numIntervals;
d_pc->tracks[index].subIntvl -= numIntervals;
//rotate compensation
float angleInRadians = theta*((float)M_PI)/180.0f;
float mat[2][2] = { { cosf(angleInRadians), -sinf(angleInRadians) },
{ sinf(angleInRadians), cosf(angleInRadians) } };
d_pc->tracks[index].y -= y;
d_pc->tracks[index].theta -= angleInRadians;
float xAbs = d_pc->tracks[index].x;
float yAbs = d_pc->tracks[index].y;
xAbs = (mat[0][0] * xAbs + mat[0][1] * yAbs) - xAbs;
yAbs = (mat[1][0] * xAbs + mat[1][1] * yAbs) - yAbs;
d_pc->tracks[index].y -= yAbs;
d_pc->tracks[index].subIntvl -= xAbs;
}
}
/*
* performs the ego motion compensation of the position with the given index in the given convoy
*/
__device__ void shiftRotateConvoy(Convoy* d_eml, float x, float y, float theta, int index)
{
//check whether index is out of range at this history
if(((index < d_eml->endIndexTracks) && (d_eml->endIndexTracks > d_eml->startIndexTracks)) || ((d_eml->endIndexTracks < d_eml->startIndexTracks) && (index != d_eml->endIndexTracks)))
{
//shift compensation
d_eml->tracks[index].subIntvl += x;
int numIntervals = (int) ((d_eml->tracks[index].subIntvl) / INTERVALL_LENGTH);
d_eml->tracks[index].x -= numIntervals;
d_eml->tracks[index].subIntvl -= numIntervals;
//rotate compensation
float angleInRadians = theta*((float)M_PI)/180.0f;
float mat[2][2] = { { cosf(angleInRadians), -sinf(angleInRadians) },
{ sinf(angleInRadians), cosf(angleInRadians) } };
d_eml->tracks[index].y -= y;
d_eml->tracks[index].theta -= angleInRadians;
float xAbs = d_eml->tracks[index].x;
float yAbs = d_eml->tracks[index].y;
xAbs = (mat[0][0] * xAbs + mat[0][1] * yAbs) - xAbs;
yAbs = (mat[1][0] * xAbs + mat[1][1] * yAbs) - yAbs;
d_eml->tracks[index].y -= yAbs;
d_eml->tracks[index].subIntvl -= xAbs;
}
}
/*
* performs the ego motion compensation of the given pointcell
*/
__device__ void computeIntervalMap(PointCellDevice* d_interval, float xMotion, float yMotion, float angle, float* xSubInterval)
{
float angleInRadians = angle * ((float)M_PI) / 180.0f;
//shift compensation
*xSubInterval += xMotion;
int numIntervals = (int) (*xSubInterval / INTERVALL_LENGTH);
*xSubInterval -= numIntervals;
for (int i = 0; i < numIntervals; i++)
{
double x = d_interval->getX();
int interval = floor(x) + CARINTERVAL;
if(interval == 0)
{
//mark content to delete
d_interval->setX(-10000);
continue;
}
d_interval->setX(floor(x) - 0.5f);
}
int interval = floor(d_interval->getX());
//rotation
//1.Step correct directions of stored PCs
d_interval->setY(d_interval->getY() - yMotion);
d_interval->setTheta(d_interval->getTheta() - angleInRadians);
//2. compensate rotation
float xAbs = ( interval - CARINTERVAL + 0.5f) * INTERVALL_LENGTH
- *xSubInterval;
float yAbs = d_interval->getY();
float mat[2][2] = { { cosf(angleInRadians), -sinf(angleInRadians) },
{ sinf(angleInRadians), cosf(angleInRadians) } };
xAbs = (mat[0][0] * xAbs + mat[0][1] * yAbs) - xAbs;
yAbs = (mat[1][0] * xAbs + mat[1][1] * yAbs) - yAbs;
d_interval->setY(d_interval->getY() - yAbs);
if(xAbs > 0.5*INTERVALL_LENGTH)
{
d_interval->setX(interval + 1.5);
}
else if(xAbs < -0.5*INTERVALL_LENGTH)
{
d_interval->setX(interval - 0.5);
}
else
{
d_interval->subInvtl -= xAbs;
}
}
/*
* checks whether the given vehicle matches with the position at index in the given history
*/
__device__ bool findHistoryMatch(PointCellDevice* trackedVehicles, History* d_history, int index)
{
bool result = (d_history->ID != trackedVehicles->getID());
result = (result && (d_history->tracks[index].x - 0.5 <= trackedVehicles->getX()));
result = (result && (trackedVehicles->getX() <= d_history->tracks[index].x + 0.5));
result = (result && (d_history->tracks[index].y - 1.0 <= trackedVehicles->getY()));
result = (result && (trackedVehicles->getY() <= d_history->tracks[index].y + 1.0));
return result;
}
/*
* checks whether the own vehicles position matches with the position at index in the given history
*/
__device__ bool findHistoryMatchSelf(History* d_history, int index)
{
bool result = true;
result = (result && (d_history->tracks[index].x - 0.5 <= 0));
result = (result && (0 <= d_history->tracks[index].x + 0.5));
result = (result && (d_history->tracks[index].y - 1.0 <= 0));
result = (result && (0 <= d_history->tracks[index].y + 1.0));
return result;
}
/*
* performs kalman filter predict step for one matrix element defined by index of the given vehicle
*/
__device__ void predictDevice(PointCellDevice* vehicle, int index)
{
int state = index%5;
//row
int i = index / 5 ;
//column
int j = state;
vehicle->data[state+5] = vehicle->data[state];
__syncthreads();
vehicle->computeF();
vehicle->computeCovarianceF();
float tmp = 0;
//Tmp = F*P
for(int k=0; k<5; k++)
{
tmp += vehicle->getF(i,k)*vehicle->getP(k,j);
}
vehicle->writeTmp(i,j, tmp);
__syncthreads();
//P = tmp*F_t
tmp = 0;
for(int k=0; k<5; k++)
{
tmp += vehicle->getTmp(i,k)*vehicle->getF(j,k);
}
vehicle->writeP(i,j, tmp);
__syncthreads();
//P = P+Q
tmp = vehicle->getP(i,j) + vehicle->getQ(i,j);
vehicle->writeP(i,j, tmp);
}
/*
* distribution of the data for each thread
*/
__global__ void compensateEgoMotionMap(PointCellDevice* d_interval, float* d_subIntvl, float x, float y, float angle)
{
computeIntervalMap(&(d_interval[threadIdx.x]), x, y, angle, d_subIntvl);
}
__global__ void compensateEgoMotionHistory(History* d_history, float x, float y, float angle)
{
shiftRotateHistory(&(d_history[blockIdx.x]), x, y, angle, threadIdx.x);
}
__global__ void compensateEgoMotionConvoy(Convoy* d_convoy, float x, float y, float angle)
{
shiftRotateConvoy(&(d_convoy[blockIdx.x]), x, y, angle, threadIdx.x);
}
/*
* tries to find a match in history for each tracked vehicles
*/
__global__ void findConvoyDevice(PointCellDevice* trackedVehicles, History* d_history, int* d_historyMatch)
{
//check whether thread is in bounds
if(((threadIdx.x < d_history[blockIdx.x].endIndex) && (d_history[blockIdx.x].endIndex > d_history[blockIdx.x].startIndex)) || ((d_history[blockIdx.x].endIndex < d_history[blockIdx.x].startIndex) && (threadIdx.x != d_history[blockIdx.x].endIndex)))
{
//check whether given car matches given history position
if(findHistoryMatch(&(trackedVehicles[blockIdx.y]),&(d_history[blockIdx.x]),threadIdx.x))
{
//write the smallest matched history id to memory
atomicMin(&(d_historyMatch[blockIdx.y]), d_history[blockIdx.x].ID);
}
}
}
/*
* tries to find a match in history for current vehicle position
*/
__global__ void findConvoyDeviceSelf(History* d_history, int* d_historyMatchSelf)
{
//check whether thread is in bounds
if(((threadIdx.x < d_history[blockIdx.x].endIndex) && (d_history[blockIdx.x].endIndex > d_history[blockIdx.x].startIndex)) || ((d_history[blockIdx.x].endIndex < d_history[blockIdx.x].startIndex) && (threadIdx.x != d_history[blockIdx.x].endIndex)))
{
//check wether given history position matches (0,0)
if(findHistoryMatchSelf(&(d_history[blockIdx.x]),threadIdx.x))
{
//write the smallest matched history id to memory
atomicMin(d_historyMatchSelf, d_history[blockIdx.x].ID);
}
}
}
/*
* set memory for the findConvoy kernel
*/
__global__ void memSetHistoryMatch(int* d_historyMatch)
{
d_historyMatch[threadIdx.x] = INT_MAX;
}
/*
* Run Kalman-Filter Predict on Device with #vehicles as Blocks and 25 Threads per Block
*/
__global__ void predict(PointCellDevice* d_interval)
{
predictDevice(&(d_interval[blockIdx.x]), threadIdx.x);
}
/*
* Run Kalman-Filter Update on Device with 25 Threads
*/
__device__ void updateDevice(PointCellDevice* d_interval, int index, float velocity, float phi, float xNew, float yNew, float thetaNew)
{
//row
int i = index / 5;
//column
int j = index % 5;
float tmp = 0;
//tmp = H*P
for(int k=0; k<5; k++)
{
tmp += d_interval->getH(i,k)*d_interval->getP(k,j);
}
d_interval->writeTmp(i,j, tmp);
__syncthreads();
//S = tmp*H_t
tmp = 0;
for(int k=0; k<5; k++)
{
tmp += d_interval->getTmp(i,k)*d_interval->getH(j,k);
}
d_interval->writeS(i,j, tmp);
__syncthreads();
//S = S+R
tmp = d_interval->getS(i,j) + d_interval->getR(i,j);
d_interval->writeS(i,j, tmp);
__syncthreads();
//tmp = P*H_t
tmp = 0;
for(int k=0; k<5; k++)
{
tmp += d_interval->getP(i,k)*d_interval->getH(j,k);
}
d_interval->writeTmp(i,j, tmp);
__syncthreads();
//invertS
if(threadIdx.x == 0)
{
d_interval->invertS();
}
__syncthreads();
//K = tmp*S_i
tmp = 0;
for(int k=0; k<5; k++)
{
tmp += d_interval->getTmp(i,k)*d_interval->getS(k,j);
}
d_interval->writeK(i,j, tmp);
__syncthreads();
//tmp = K*(newState-stateVector)
tmp = 0;
tmp += d_interval->getK(i,0)*(xNew-d_interval->getX());
tmp += d_interval->getK(i,1)*(yNew-d_interval->getY());
tmp += d_interval->getK(i,2)*(thetaNew-d_interval->getTheta());
tmp += d_interval->getK(i,3)*(velocity-d_interval->getVelocity());
tmp += d_interval->getK(i,4)*(phi-d_interval->getPhi());
d_interval->writeTmp(i,j, tmp);
__syncthreads();
//stateVector = stateVector + tmp
if(threadIdx.x == 0)
{
d_interval->setX(d_interval->getX() + d_interval->getTmp(0,0));
d_interval->setY(d_interval->getY() + d_interval->getTmp(1,0));
d_interval->setTheta(d_interval->getTheta() + d_interval->getTmp(2,0));
d_interval->setVelocity(d_interval->getVelocity() + d_interval->getTmp(3,0));
d_interval->setPhi(d_interval->getPhi() + d_interval->getTmp(4,0));
}
__syncthreads();
//tmp = K*H
tmp = 0;
for(int k=0; k<5; k++)
{
tmp += d_interval->getK(i,k)*d_interval->getH(k,j);
}
d_interval->writeTmp(i,j, tmp);
__syncthreads();
//tmp = I- tmp
tmp = d_interval->getI(i,j) - d_interval->getTmp(i,j);
d_interval->writeTmp(i,j, tmp);
__syncthreads();
//tmp2 = tmp*P
tmp = 0;
for(int k=0; k<5; k++)
{
tmp += d_interval->getTmp(i,k)*d_interval->getP(k,j);
}
d_interval->writeTmp2(i,j, tmp);
__syncthreads();
d_interval->writeP(i,j, d_interval->getTmp2(i,j));
}
__global__ void updateKernel(PointCellDevice* d_intvl, float* d_updateData, int* d_intvlIndex)
{
int index = d_intvlIndex[blockIdx.x];
float xNew = d_updateData[blockIdx.x*3];
float yNew = d_updateData[blockIdx.x*3+1];
float thetaNew = d_updateData[blockIdx.x*3+2];
/* if(threadIdx.x == 0)
{
printf("Update ID %d with x %f y %f theta %f, updateValues x %f y %f theta %f\n", d_intvl[index].getID(),d_intvl[index].getX(),d_intvl[index].getY(),d_intvl[index].getTheta(),xNew, yNew,thetaNew);
}*/
float x = d_intvl[index].data[5];
float y = d_intvl[index].data[6];
float theta = d_intvl[index].data[7];
float velocity = sqrtf((xNew - x) * (xNew - x) + (yNew - y)*(yNew - y)) / TIMESTAMP;
float phi = (thetaNew-theta) / TIMESTAMP;
if(threadIdx.x == 0)
{
d_intvl[index].setVelocity(velocity);
d_intvl[index].setPhi(phi);
}
updateDevice(&(d_intvl[index]),threadIdx.x,velocity, phi,xNew,yNew,thetaNew);
}
/*
* marks for every convoy if the given ids are included in it or not
*/
__global__ void findIDInConvoyDevice(Convoy* d_convoy, int* d_IDIncluded, int id1, int id2)
{
//check whether thread is in bounds
if(((threadIdx.x < d_convoy[blockIdx.x].endIndexID) && (d_convoy[blockIdx.x].endIndexID > d_convoy[blockIdx.x].startIndexID)) || ((d_convoy[blockIdx.x].endIndexID < d_convoy[blockIdx.x].startIndexID) && (threadIdx.x != d_convoy[blockIdx.x].endIndexID)))
{
int index = blockIdx.x*2;
//init memory
d_IDIncluded[index] = INT_MAX;
d_IDIncluded[index+1] = INT_MAX;
__syncthreads();
//check and write results
int result = (d_convoy[blockIdx.x].participatingVehicles[threadIdx.x] == id1);
if(result)
{
atomicMin(&(d_IDIncluded[index]), threadIdx.x);
}
result = (d_convoy[blockIdx.x].participatingVehicles[threadIdx.x] == id2);
if(result)
{
atomicMin(&(d_IDIncluded[index+1]), threadIdx.x);
}
//if current convoy is the ego convoy, mark it with INT_MIN
result = (d_convoy[blockIdx.x].participatingVehicles[threadIdx.x] == -1);
if(result)
{
atomicMin(&(d_IDIncluded[index+1]), INT_MIN);
atomicMin(&(d_IDIncluded[index]), INT_MIN);
}
}
}
/*
* checks for every convoy whether the given vehicle position is already included or not
*/
__global__ void checkConvoyForDuplicateDevice(Convoy* d_convoy, PointCellDevice* d_vehicle, bool* d_duplicate)
{
//check whether thread is in bounds
if(((threadIdx.x < d_convoy[blockIdx.x].endIndexTracks) && (d_convoy[blockIdx.x].endIndexTracks > d_convoy[blockIdx.x].startIndexTracks)) || ((d_convoy[blockIdx.x].endIndexTracks < d_convoy[blockIdx.x].startIndexTracks) && (threadIdx.x != d_convoy[blockIdx.x].endIndexTracks)))
{
d_duplicate[blockIdx.x] = true;
bool result = (d_convoy[blockIdx.x].tracks[threadIdx.x].x != (floor(d_vehicle->getX())+0.5));
if(!result)
{
d_duplicate[blockIdx.x] = d_duplicate[blockIdx.x] && result;
}
}
}
/*
* checks for every convoy whether own vehicle position is already included or not
*/
__global__ void checkConvoyForDuplicateDeviceSelf(Convoy* d_convoy, bool* d_duplicate)
{
//check whether thread is in bounds
if(((threadIdx.x < d_convoy[blockIdx.x].endIndexTracks) && (d_convoy[blockIdx.x].endIndexTracks > d_convoy[blockIdx.x].startIndexTracks)) || ((d_convoy[blockIdx.x].endIndexTracks < d_convoy[blockIdx.x].startIndexTracks) && (threadIdx.x != d_convoy[blockIdx.x].endIndexTracks)))
{
d_duplicate[blockIdx.x] = true;
bool result = (d_convoy[blockIdx.x].tracks[threadIdx.x].x != 0.5);
if(!result)
{
d_duplicate[blockIdx.x] = d_duplicate[blockIdx.x] && result;
}
}
}
/*
* checks for every history whether own vehicle position is already included or not
*/
__global__ void checkHistoryForDuplicateDevice(History* d_history, PointCellDevice* d_intvl, int* d_intvlIndex, int* d_IDincluded, bool* d_duplicate)
{
//check whether thread is in bounds
if(((threadIdx.x < d_history[d_IDincluded[blockIdx.x]].endIndex) && (d_history[d_IDincluded[blockIdx.x]].endIndex > d_history[d_IDincluded[blockIdx.x]].startIndex)) || ((d_history[d_IDincluded[blockIdx.x]].endIndex < d_history[d_IDincluded[blockIdx.x]].startIndex) && (threadIdx.x != d_history[d_IDincluded[blockIdx.x]].endIndex)))
{
d_duplicate[blockIdx.x] = true;
int index = d_intvlIndex[blockIdx.x];
int intvl = floor(d_intvl[index].getX());
intvl += 0.5;
if(d_history[d_IDincluded[blockIdx.x]].tracks[threadIdx.x].x == intvl)
{
d_duplicate[blockIdx.x] = false;
}
}
}
/*
* tries to find the index of the history corresponding to each car
*/
__global__ void findHistoryWithIDDevice(History* d_history, PointCellDevice* d_intvl, int* d_intvlIndex, int* d_IDincluded)
{
int index = d_intvlIndex[threadIdx.x];
int ID = d_intvl[index].getID();
if(d_history[blockIdx.x].ID == ID)
{
//write index to memory
d_IDincluded[threadIdx.x] = blockIdx.x;
}
}
/*
* adds updatet positions to corresponding histories
*/
__global__ void addUpdatedPositionToHistoryDevice(History* d_history, PointCellDevice* d_intvl, int* d_intvlIndex, int* d_IDincluded, bool* d_duplicate)
{
int intvl = floor(d_intvl[d_intvlIndex[threadIdx.x]].getX());
d_intvl[d_intvlIndex[threadIdx.x]].setX(intvl+ 0.5);
int historyIndex = d_IDincluded[threadIdx.x];
//look up whether current position is a duplicate
if(d_duplicate[threadIdx.x])
{
int index = d_history[historyIndex].endIndex;
d_history[historyIndex].tracks[index].subIntvl = 0.5;
d_history[historyIndex].tracks[index].x = d_intvl[d_intvlIndex[threadIdx.x]].getX();
d_history[historyIndex].tracks[index].y = d_intvl[d_intvlIndex[threadIdx.x]].getY();
d_history[historyIndex].tracks[index].theta = d_intvl[d_intvlIndex[threadIdx.x]].getTheta();
index = (index+1)%MAX_LENGTH_HIST_CONV;
d_history[historyIndex].endIndex = index;
//if number of position exceeds limit, delete oldest position
if(index == d_history[historyIndex].startIndex)
{
d_history[historyIndex].startIndex = (d_history[historyIndex].startIndex+1)%NUM_HIST;
}
}
}
int main()
{
//comment in define in data.cuh to enable laser simulation
#ifdef CREATE_MEASURES
PGMReader pgmReader;
double speed = 4.0/3.0;
for(int i=0; i<NUM_MEASUREMENT; i++)
{
std::string number = getNextMeasureAsString(i);
pgmReader.simulateLaserRays(number);
//comment in for automatic generation of eml files for a straight drive with 120km/h
/* std::ofstream EMLMeasureFile;
std::ostringstream measurePath;
measurePath << "./Laserdata/EML" << number << ".txt";
EMLMeasureFile.open (measurePath.str().c_str());
EMLMeasureFile << ((double)i)*speed << " 0 0 120 0" << std::endl;
EMLMeasureFile.close();*/
}
#endif
int devID = 0;
hipError_t error;
hipDeviceProp_t deviceProp;
error = hipGetDevice(&devID);
if (error != hipSuccess) {
printf("hipGetDevice returned error %s (code %d), line(%d)\n",
hipGetErrorString(error), error, __LINE__);
}
error = hipGetDeviceProperties(&deviceProp, devID);
if (deviceProp.computeMode == hipComputeModeProhibited) {
fprintf(stderr,
"Error: device is running in <Compute Mode Prohibited>, no threads can use ::hipSetDevice().\n");
exit(EXIT_SUCCESS);
}
if (deviceProp.canMapHostMemory != 1){
fprintf(stderr, "Device cannot map memory!\n");
return 1;
}
if (error != hipSuccess) {
printf(
"hipGetDeviceProperties returned error %s (code %d), line(%d)\n",
hipGetErrorString(error), error, __LINE__);
} else {
#ifdef PRINT
printf("GPU Device %d: \"%s\" with compute capability %d.%d\n\n", devID,
deviceProp.name, deviceProp.major, deviceProp.minor);
#endif
}
hipSetDeviceFlags(hipDeviceMapHost);
hipEvent_t startEvent, stopEvent, start2Event, stop2Event;
hipEventCreate(&startEvent);
hipEventCreate(&stopEvent);
hipEventCreate(&start2Event);
hipEventCreate(&stop2Event);
float time = 0;
hipEventRecord(startEvent, 0);
ConvoyTracker tracker;
hipEventRecord(stopEvent, 0);
hipEventSynchronize(stopEvent);
hipEventElapsedTime(&time,startEvent,stopEvent);
#if SZENARIO == 6
std::vector<PointCellDevice> vehiclesSim;
error = hipHostMalloc((void**) &tracker.h_vehicleSim, MAX_SEGMENTS*sizeof(PointCellDevice), hipHostMallocMapped);
if (error != hipSuccess) {
printf("hipGetDevice returned error %s (code %d), line(%d)\n",
hipGetErrorString(error), error, __LINE__);
}
error = hipHostGetDevicePointer(&tracker.d_vehicleSim_ptr, tracker.h_vehicleSim, 0);
if (error != hipSuccess) {
printf("hipGetDevice returned error %s (code %d), line(%d)\n",
hipGetErrorString(error), error, __LINE__);
}
for(uint i=0;i <20; i++)
{
tracker.h_vehicleSim[i].initializeMemory();
if(i%2 == 0)
{
tracker.h_vehicleSim[i].setY(-3);
tracker.h_vehicleSim[i].setVelocity(38.9);
}
else
{
tracker.h_vehicleSim[i].setY(3);
tracker.h_vehicleSim[i].setVelocity(27.8);
}
tracker.h_vehicleSim[i].setX((i/2)*8);
tracker.h_vehicleSim[i].setTheta(0);
tracker.h_vehicleSim[i].setPhi(0);
}
#endif
hipEventRecord(startEvent, 0);
int vehicleCount = 0;
float compensateHistory[NUM_MEASUREMENT];
for(int i=0; i<NUM_MEASUREMENT; i++)
{
hipEventRecord(start2Event, 0);
std::vector<PointCellDevice*> trackedVehicles;
std::string number = getNextMeasureAsString(i);
tracker.readEMLData(number);
//1. Compensate own vehicle motion
double deltaX = tracker.getX() - tracker.getXOld();
double deltaY = tracker.getY() - tracker.getYOld();
double deltaYaw = tracker.getYaw() - tracker.getYawOld();
float angleInRadians = deltaYaw * ((float)M_PI) / 180.0f;
float mat[2][2] = { { cosf(angleInRadians), -sinf(angleInRadians) },
{ sinf(angleInRadians), cosf(angleInRadians) } };
//call ego compensation kernels regarding to current sizes
if(tracker.historySize > 0)
{
hipLaunchKernelGGL(( compensateEgoMotionHistory), dim3(tracker.historySize), dim3(MAX_LENGTH_HIST_CONV),0, tracker.stream4, tracker.d_history_ptr, deltaX, deltaY, deltaYaw);
}
//read new laserdate and extract vehicles
vehicleCount = tracker.reader.processLaserData(number,tracker.getCurrentSpeed(), tracker.getCurrentYawRate(), tracker.h_vehicles);
if(tracker.convoySize > 0)
{
hipLaunchKernelGGL(( compensateEgoMotionConvoy), dim3(tracker.convoySize), dim3(MAX_LENGTH_HIST_CONV),0, tracker.stream2, tracker.d_convoys_ptr, deltaX, deltaY, deltaYaw);
//compensate ego motion to highest value as well
for(uint k = 0; k < tracker.convoySize; k++)
{
tracker.convoys[k].highestValue.subIntvl += deltaX;
int numIntervals = (int) ((tracker.convoys[k].highestValue.subIntvl) / INTERVALL_LENGTH);
tracker.convoys[k].highestValue.x -= numIntervals;
tracker.convoys[k].highestValue.subIntvl -= numIntervals;
tracker.convoys[k].highestValue.y -= deltaY;
tracker.convoys[k].highestValue.theta -= angleInRadians;
float xAbs = tracker.convoys[k].highestValue.x;
float yAbs = tracker.convoys[k].highestValue.y;
xAbs = (mat[0][0] * xAbs + mat[0][1] * yAbs) - xAbs;
yAbs = (mat[1][0] * xAbs + mat[1][1] * yAbs) - yAbs;
tracker.convoys[k].highestValue.y -= yAbs;
tracker.convoys[k].highestValue.subIntvl -= xAbs;
}
}
//compesate ego motion interval map and perform pedrict step
if(tracker.intervalSize > 0)
{
hipLaunchKernelGGL(( compensateEgoMotionMap), dim3(1),dim3(tracker.intervalSize),0,tracker.stream3, tracker.d_intervalMap_ptr, tracker.d_subIntvl_ptr, deltaX, deltaY, deltaYaw);
hipLaunchKernelGGL(( predict), dim3(tracker.intervalSize),dim3(25),0,tracker.stream3, tracker.d_intervalMap_ptr);
}
//search histories for match with ego position
if(tracker.historySize > 0)
{
*tracker.h_historyMatchSelf = INT_MAX;
hipLaunchKernelGGL(( findConvoyDeviceSelf), dim3(tracker.historySize), dim3(MAX_LENGTH_HIST_CONV), 0, 0, tracker.d_history_ptr, tracker.d_historyMatchSelf_ptr);
hipDeviceSynchronize();
if(*tracker.h_historyMatchSelf != INT_MAX)
{
tracker.findConvoySelf(*tracker.h_historyMatchSelf);
}
}
tracker.transformDataFromDevice();
hipStreamSynchronize(tracker.stream3);
//write adress of each pointcell to vector
for(uint j=0; j<tracker.intervalSize;j++)
{
trackedVehicles.push_back(&(tracker.h_intervalMap[j]));
}
//3. Associate and Update
#if SZENARIO == 6
tracker.associateAndUpdate(20, trackedVehicles);
#else
tracker.associateAndUpdate(vehicleCount, trackedVehicles);
#endif
hipEventRecord(stop2Event, 0);
hipEventSynchronize(stop2Event);
float time3;
hipEventElapsedTime(&time3, start2Event, stop2Event);
compensateHistory[i] = time3;
}
hipEventRecord(stopEvent, 0);
hipEventSynchronize(stopEvent);
float sumH = 0;
for(int i = 0; i< NUM_MEASUREMENT; i++)
{
sumH += compensateHistory[i];
}
sumH /= NUM_MEASUREMENT;
#ifdef PRINT
std::cout << "Duration of compensate History: " << sumH << std::endl;
#endif
float time2;
hipEventElapsedTime(&time2, startEvent, stopEvent);
#ifdef PRINT
std::cout << "Overall Time: " << time +time2<< std::endl;
#else
std::cout << time + time2 << std::endl;
#endif
#if SZENARIO == 6
hipHostFree(tracker.h_vehicleSim);
#endif
tracker.visualizeConvoys();
tracker.visualizeHistory();
return 0;
}
/**
* Stores current Speed and yaw rate from file to class variables
*/
void ConvoyTracker::readEMLData(std::string number)
{
std::ostringstream measurePath;
measurePath << EMLPATH << number << ".txt";
#ifdef PRINT
std::cout << measurePath.str() << std::endl;
#endif
std::ifstream input(measurePath.str().c_str());
std::string line;
std::string segment;
if(std::getline( input, line ))
{
std::stringstream ss;
ss << line;
int dataCnt = 1;
while(std::getline(ss, segment, ' '))
{
switch (dataCnt)
{
case 1:
{
//x in m
xOld = x;
x = atof(segment.c_str());
++dataCnt;
break;
}
case 2:
{
//y in m
yOld = y;
y = atof(segment.c_str());
++dataCnt;
break;
}
case 3:
{
//yaw in
yawOld = yaw;
yaw = atof(segment.c_str());
++dataCnt;
break;
}
case 4:
{
//velocity in kmh
//Compute value in m/s
currentSpeed = atof(segment.c_str()) / 3.6;
++dataCnt;
break;
}
case 5:
{
//yaw rate in /s
//Compute value in rad/s
currentYawRate = atof(segment.c_str()) * M_PI / 180.0;
break;
}
}
}
EMLPos curPos;
curPos.x = x;
curPos.y = y;
curPos.theta = yaw;
EML.push_back(curPos);
}
}
double ConvoyTracker::getCurrentSpeed() const {
return currentSpeed;
}
void ConvoyTracker::setCurrentSpeed(double currentSpeed) {
this->currentSpeed = currentSpeed;
}
double ConvoyTracker::getCurrentYawRate() const {
return currentYawRate;
}
void ConvoyTracker::setCurrentYawRate(double currentYawRate) {
this->currentYawRate = currentYawRate;
}
double ConvoyTracker::getX() const {
return x;
}
void ConvoyTracker::setX(double x) {
this->x = x;
}
double ConvoyTracker::getXOld() const {
return xOld;
}
void ConvoyTracker::setXOld(double old) {
this->xOld = old;
}
double ConvoyTracker::getY() const {
return y;
}
void ConvoyTracker::setY(double y) {
this->y = y;
}
double ConvoyTracker::getYaw() const {
return yaw;
}
void ConvoyTracker::setYaw(double yaw) {
this->yaw = yaw;
}
double ConvoyTracker::getYawOld() const {
return yawOld;
}
void ConvoyTracker::setYawOld(double yawOld) {
this->yawOld = yawOld;
}
double ConvoyTracker::getYOld() const {
return yOld;
}
void ConvoyTracker::setYOld(double old) {
yOld = old;
}
/**
* Searches for corresponding vehicles using Global Nearest Neighbor algorithm and updates the results
* @param trackedVehicles: contains pointers to all vehicles stored in the interval map
* @param vehicels: contains all observed vehicles from the current laser file
*/
void ConvoyTracker::associateAndUpdate(int vehicleCount, std::vector<PointCellDevice*> trackedVehicles)
{
//initialize all IDs in possible history to -1 to have no false detection in findConvoy
hipLaunchKernelGGL(( memSetHistoryMatch), dim3(1),dim3(MAX_SEGMENTS),0,stream2, d_historyMatch_ptr);
convoyCheckSize = 0;
int updateCounter = 0;
int indexCounter = trackedVehicles.size();
std::vector<int> indicesToAdd;
std::vector<PointCellDevice*> updateCheck;
for(uint i = 0; i<vehicleCount; i++)
{
//get values from observation
#if SZENARIO == 6
double x = h_vehicleSim[i].getX();
double y = h_vehicleSim[i].getY();
double theta = h_vehicleSim[i].getTheta();
#else
double x = h_vehicles[i].getX();
double y = h_vehicles[i].getY();
double theta = h_vehicles[i].getTheta();
#endif
double minDist = INT_MAX;
int minIndex = INT_MAX;
#ifdef PRINT
std::cout << "X: " << x << " Y: " << y << " Theta: " << theta <<std::endl;
#endif
for(uint j = 0; j<trackedVehicles.size(); j++)
{
//compute distance to stored vehicle
double x1 = trackedVehicles.at(j)->getX();
double y1 = trackedVehicles.at(j)->getY();
double theta1 = trackedVehicles.at(j)->getTheta();
#ifdef PRINT
std::cout << "X1: " << x1 << " Y1: " << y1<< " Theta1: " << theta1 <<std::endl;
#endif
double dist = sqrt((x - x1)*(x - x1) + (y - y1)*(y - y1) + (theta - theta1)*(theta - theta1));
//find vehicle with smallest distance
if(dist < minDist)
{
minDist = dist;
minIndex = j;
}
}
#ifdef PRINT
std::cout << "Min distance: " << minDist << std::endl;
#endif
if(minDist > ASSOCIATION_THRESHOLD)
{
//do not associate vehicles with to big distance in between
//create new track instead
++ID;
#if SZENARIO == 6
h_vehicleSim[i].setID(ID);
#else
h_vehicles[i].setID(ID);
#endif
indicesToAdd.push_back(i);
history[endIndexHistory].ID = ID;
history[endIndexHistory].tracks[0].subIntvl = 0.5f;
#if SZENARIO == 6
history[endIndexHistory].tracks[0].x = h_vehicleSim[i].getX();
history[endIndexHistory].tracks[0].y = h_vehicleSim[i].getY();
history[endIndexHistory].tracks[0].theta = h_vehicleSim[i].getTheta();
#else
history[endIndexHistory].tracks[0].x = h_vehicles[i].getX();
history[endIndexHistory].tracks[0].y = h_vehicles[i].getY();
history[endIndexHistory].tracks[0].theta = h_vehicles[i].getTheta();
#endif
history[endIndexHistory].startIndex = 0;
history[endIndexHistory].endIndex = 1;
int index = (endIndexHistory+1)%NUM_HIST;
if(index == startIndexHistory)
{
startIndexHistory = (startIndexHistory+1)%NUM_HIST;
}
else
{
++historySize;
}
endIndexHistory = index;
#ifdef PRINT
std::cout << "Added new Vehicle with ID " << ID << std::endl;
#endif
currentHistoryOnDevice = false;
#if SZENARIO == 6
h_convoyCheck[convoyCheckSize] = h_vehicleSim[i];
#else
h_convoyCheck[convoyCheckSize] = h_vehicles[i];
#endif
++convoyCheckSize;
}
else
{
//vehicle matched, mark for update
PointCellDevice* tmp = trackedVehicles.at(trackedVehicles.size() -1 );
PointCellDevice* update = trackedVehicles.at(minIndex);
#ifdef PRINT
std::cout << "Update ID " << update->getID() << std::endl;
#endif
trackedVehicles.at(minIndex) = tmp;
h_intvlIndex[minIndex] = h_intvlIndex[trackedVehicles.size()-1];
h_intvlIndex[trackedVehicles.size()-1] = minIndex;
//save update data for later use
#if SZENARIO == 6
h_updateData[updateCounter*3] = h_vehicleSim[i].getX();
h_updateData[updateCounter*3+1] = h_vehicleSim[i].getY();
h_updateData[updateCounter*3+2] = h_vehicleSim[i].getTheta();
#else
h_updateData[updateCounter*3] = h_vehicles[i].getX();
h_updateData[updateCounter*3+1] = h_vehicles[i].getY();
h_updateData[updateCounter*3+2] = h_vehicles[i].getTheta();
#endif
trackedVehicles.pop_back();
#ifdef PRINT
std::cout << "Updated vehicle with ID " << update->getID() << std::endl;
#endif
updateCheck.push_back(update);
++updateCounter;
}
}
//get interval index for all updatet pointers
for(int i=0; i<updateCounter; i++)
{
for(int j=0; j<intervalSize; j++)
{
if(updateCheck[i] == &h_intervalMap[j])
{
h_intvlIndex[i] = j;
break;
}
}
}
//Update all matched vehicles
if(updateCounter >0)
{
hipLaunchKernelGGL(( updateKernel), dim3(updateCounter),dim3(25), 0, 0, d_intervalMap_ptr, d_updataData_ptr, d_intvlIndex_ptr);
hipLaunchKernelGGL(( findHistoryWithIDDevice), dim3(historySize),dim3(updateCounter), 0, 0, d_history_ptr,d_intervalMap_ptr,d_intvlIndex_ptr,d_IDincluded_ptr);
hipLaunchKernelGGL(( checkHistoryForDuplicateDevice), dim3(updateCounter), dim3(MAX_LENGTH_HIST_CONV), 0, 0, d_history_ptr,d_intervalMap_ptr,d_intvlIndex_ptr,d_IDincluded_ptr,d_duplicate_ptr);
hipLaunchKernelGGL(( addUpdatedPositionToHistoryDevice), dim3(1),dim3(updateCounter), 0, 0, d_history_ptr, d_intervalMap_ptr,d_intvlIndex_ptr, d_IDincluded_ptr,d_duplicate_ptr);
hipDeviceSynchronize();
for(int i=0; i<updateCounter;i++)
{
h_convoyCheck[convoyCheckSize] = h_intervalMap[h_intvlIndex[i]];
++convoyCheckSize;
}
}
//delete all tracks that could not be matched
for(uint k = 0; k < trackedVehicles.size(); k++)
{
PointCellDevice* tmp = trackedVehicles.at(k);
for(uint m = 0; m < intervalSize; m++)
{
if(tmp == &h_intervalMap[m])
{
h_intervalMap[m] = h_intervalMap[--intervalSize];
break;
}
}
}
//add all observations that could not be matched
for(uint k = 0; k < indicesToAdd.size(); k++)
{
if(intervalSize < MAX_SEGMENTS)
{
#if SZENARIO == 6
h_intervalMap[intervalSize++] = h_vehicleSim[indicesToAdd.at(k)];
#else
h_intervalMap[intervalSize++] = h_vehicles[indicesToAdd.at(k)];
#endif
}
}
//find Convoy
if(historySize > 0 && convoyCheckSize >0)
{
dim3 grid(historySize, convoyCheckSize);
hipLaunchKernelGGL(( findConvoyDevice), dim3(grid), dim3(MAX_LENGTH_HIST_CONV), 0, 0, d_newVeh_ptr,d_history_ptr,d_historyMatch_ptr);
hipDeviceSynchronize();
for(uint i=0; i<convoyCheckSize;i++)
{
//look up if vehicle i matched history position
if(h_historyMatch[i] != INT_MAX)
{
//get vehicle i
PointCellDevice vehicle = h_convoyCheck[i];
float x = vehicle.getX();
int interval = floor(x);
//get history id
int id1 = h_historyMatch[i];
int id2 = vehicle.getID();
#ifdef PRINT
std::cout << "ID1 " << id1 << " ID2 " << id2 << std::endl;
#endif
bool convoyFound = false;
if(convoySize >0)
{ //find convoy and duplicates on device
hipLaunchKernelGGL(( findIDInConvoyDevice), dim3(convoySize), dim3(MAX_LENGTH_HIST_CONV),0,stream3, d_convoys_ptr, d_IDincluded_ptr,id1,id2);
hipLaunchKernelGGL(( checkConvoyForDuplicateDevice), dim3(convoySize), dim3(MAX_LENGTH_HIST_CONV),0,stream2, d_convoys_ptr, &(d_newVeh_ptr[i]),d_duplicate_ptr);
hipDeviceSynchronize();
}
for(uint j = startIndexConvoys; j != endIndexConvoys; j = (j+1)%NUM_CONV)
{
Convoy currentConvoy = convoys[j];
int it1 = h_IDincluded[j*2];
int it2 = h_IDincluded[j*2+1];
if(it1 == INT_MIN || it2 == INT_MIN)
{
continue;
}
if(it1 != INT_MAX && it2 != INT_MAX)
{
//convoy already exists with both IDS
//check if this x value is already contained
if(h_duplicate[j])
{
//x value is not contained
int index = (currentConvoy.endIndexTracks+1)%MAX_LENGTH_HIST_CONV;
convoys[j].tracks[currentConvoy.endIndexTracks].x = interval+0.5f;
convoys[j].tracks[currentConvoy.endIndexTracks].y = vehicle.getY();
convoys[j].tracks[currentConvoy.endIndexTracks].theta = vehicle.getTheta();
convoys[j].tracks[currentConvoy.endIndexTracks].subIntvl = 0.5f;
convoys[j].endIndexTracks = index;
if(index == convoys[j].startIndexTracks)
{
convoys[j].startIndexTracks = (convoys[j].startIndexTracks+1)%MAX_LENGTH_HIST_CONV;
}
if(interval+0.5 > convoys[j].highestValue.x)
{
convoys[j].highestValue.x = interval+0.5f;
convoys[j].highestValue.y = vehicle.getY();
convoys[j].highestValue.theta = vehicle.getTheta();
convoys[j].highestValue.subIntvl = 0.5f;
}
}
convoyFound = true;
#ifdef PRINT
std::cout << "existing Convoy with ID " << convoys[j].ID << " y " << vehicle.getY() << " startIndexTracks: " << convoys[j].startIndexTracks <<" EndindexTracks: "<< convoys[j].endIndexTracks<< std::endl;
#endif
break;
}
}
if(convoyFound)
{
continue;
}
for(uint j = startIndexConvoys; j != endIndexConvoys; j = (j+1)%NUM_CONV)
{
Convoy currentConvoy = convoys[j];
int it1 = h_IDincluded[j*2];
int it2 = h_IDincluded[j*2+1];
if(it1 == INT_MIN || it2 == INT_MIN)
{
continue;
}
if (it1 != INT_MAX)
{
int index = (currentConvoy.endIndexTracks+1)%MAX_LENGTH_HIST_CONV;
//check if this x value is already contained
if(h_duplicate[j])
{
convoys[j].tracks[currentConvoy.endIndexTracks].x = interval+0.5f;
convoys[j].tracks[currentConvoy.endIndexTracks].y = vehicle.getY();
convoys[j].tracks[currentConvoy.endIndexTracks].theta = vehicle.getTheta();
convoys[j].tracks[currentConvoy.endIndexTracks].subIntvl = 0.5f;
convoys[j].endIndexTracks = index;
if(index == convoys[j].startIndexTracks)
{
convoys[j].startIndexTracks = (convoys[j].startIndexTracks+1)%MAX_LENGTH_HIST_CONV;
}
if(interval+0.5 > convoys[j].highestValue.x)
{
convoys[j].highestValue.x = interval+0.5f;
convoys[j].highestValue.y = vehicle.getY();
convoys[j].highestValue.theta = vehicle.getTheta();
convoys[j].highestValue.subIntvl = 0.5f;
}
}
int IDindex = (currentConvoy.endIndexID+1)%MAX_LENGTH_HIST_CONV;
convoys[j].participatingVehicles[currentConvoy.endIndexID] = id2;
convoys[j].endIndexID = IDindex;
if(IDindex == convoys[j].startIndexID)
{
convoys[j].startIndexID = (convoys[j].startIndexID+1)%MAX_LENGTH_HIST_CONV;
}
convoyFound = true;
#ifdef PRINT
std::cout << "existing Convoy with ID " << convoys[j].ID << " y " << vehicle.getY() << " startIndexTracks: " << convoys[j].startIndexTracks <<" EndindexTracks: "<< convoys[j].endIndexTracks<< std::endl;
#endif
break;
}
else if (it2 != INT_MAX)
{
//only add position to convoy if it will be the highest value or the difference in y is not so big
if(interval+0.5 < convoys[j].highestValue.x && !checkConvoyForY(vehicle.getY(),interval+0.5f,currentConvoy))
{
continue;
}
int index = (currentConvoy.endIndexTracks+1)%MAX_LENGTH_HIST_CONV;
//check if this x value is already contained
if(h_duplicate[j])
{
convoys[j].tracks[currentConvoy.endIndexTracks].x = interval+0.5f;
convoys[j].tracks[currentConvoy.endIndexTracks].y = vehicle.getY();
convoys[j].tracks[currentConvoy.endIndexTracks].theta = vehicle.getTheta();
convoys[j].tracks[currentConvoy.endIndexTracks].subIntvl = 0.5f;
convoys[j].endIndexTracks = index;
if(index == convoys[j].startIndexTracks)
{
convoys[j].startIndexTracks = (convoys[j].startIndexTracks+1)%MAX_LENGTH_HIST_CONV;
}
if(interval+0.5 > convoys[j].highestValue.x)
{
convoys[j].highestValue.x = interval+0.5f;
convoys[j].highestValue.y = vehicle.getY();
convoys[j].highestValue.theta = vehicle.getTheta();
convoys[j].highestValue.subIntvl = 0.5f;
}
}
int IDindex = (currentConvoy.endIndexID+1)%MAX_LENGTH_HIST_CONV;
convoys[j].participatingVehicles[currentConvoy.endIndexID] = id1;
convoys[j].endIndexID = IDindex;
if(IDindex == convoys[j].startIndexID)
{
convoys[j].startIndexID = (convoys[j].startIndexID+1)%MAX_LENGTH_HIST_CONV;
}
convoyFound = true;
#ifdef PRINT
std::cout << "existing Convoy with ID " << convoys[j].ID << " y " << vehicle.getY() << " startIndexTracks: " << convoys[j].startIndexTracks <<" EndindexTracks: "<< convoys[j].endIndexTracks<< std::endl;
#endif
break;
}
}
//if now convoy matche our needs, create new one
if(!convoyFound)
{
int cIndex = endIndexConvoys;
convoys[cIndex].ID = convoyID++;
convoys[cIndex].participatingVehicles[0] = id1;
convoys[cIndex].participatingVehicles[1] = id2;
convoys[cIndex].startIndexID = 0;
convoys[cIndex].endIndexID = 2;
convoys[cIndex].startIndexTracks = 0;
convoys[cIndex].endIndexTracks = 1;
convoys[cIndex].tracks[0].x = interval+0.5f;
convoys[cIndex].tracks[0].y = vehicle.getY();
convoys[cIndex].tracks[0].theta = vehicle.getTheta();
convoys[cIndex].tracks[0].subIntvl = 0.5f;
endIndexConvoys = (endIndexConvoys+1)%NUM_CONV;
convoys[cIndex].highestValue.x = interval+0.5f;
convoys[cIndex].highestValue.y = vehicle.getY();
convoys[cIndex].highestValue.theta = vehicle.getTheta();
convoys[cIndex].highestValue.subIntvl = 0.5f;
if(convoySize == NUM_CONV)
{
startIndexConvoys = (startIndexConvoys+1)%NUM_CONV;
}
else
{
++convoySize;
}
#ifdef PRINT
std::cout << "new Convoy with ID " << convoyID-1 << " containing "<< id1 << " , " << id2 << std::endl;
#endif
}
currentConvoyOnDevice = false;
}
}
}
}
/*
* calls the visualizer to draw the convoys
*/
void ConvoyTracker::visualizeConvoys()
{
visualizer.visualizeConvoys(EML, convoys, startIndexConvoys, endIndexConvoys);
}
/*
* calls the visualizer to draw the historys
*/
void ConvoyTracker::visualizeHistory()
{
visualizer.visualizeHistory(EML, history, startIndexHistory, endIndexHistory);
}
/*
* checks the transformed data from device, deletes entries if necessary
*/
void ConvoyTracker::transformDataFromDevice()
{
std::vector<int> toDelete;
int end;
//check history for delete
for (int i = startIndexHistory; i != endIndexHistory; i = (i+1)%NUM_HIST)
{
int endId = (history[i].endIndex-1)%MAX_LENGTH_HIST_CONV;
if(endId <0)
{
endId = MAX_LENGTH_HIST_CONV-1;
}
if(history[i].tracks[endId].x < -5)
{
//if yes, mark history to delete
#ifdef PRINT
std::cout << "Delete history with ID " << history[i].ID << std::endl;
#endif
toDelete.push_back(i);
}
}
//delete history
if(toDelete.size() > 0)
{
for(int i=toDelete.size()-1; i>=0; i--)
{
end = (endIndexHistory-1)%NUM_HIST;
if(end < 0)
{
end = NUM_HIST-1;
}
if(toDelete.at(i) != end)
{
history[toDelete.at(i)] = history[end];
}
endIndexHistory = end;
--historySize;
}
}
toDelete.clear();
//check convoys for delete
for (int i = startIndexConvoys; i != endIndexConvoys; i = (i + 1) % NUM_CONV)
{
end = (convoys[i].endIndexTracks-1) % MAX_LENGTH_HIST_CONV;
if(end < 0)
{
end = MAX_LENGTH_HIST_CONV-1;
}
if(convoys[i].highestValue.x < -5)
{
#ifdef PRINT
std::cout << "delete convoy with ID " << convoys[i].ID << std::endl;
#endif
toDelete.push_back(i);
}
}
//delete convoys
if(toDelete.size() > 0)
{
for(int i=toDelete.size()-1; i >=0; i--)
{
end = (endIndexConvoys-1) % NUM_CONV;
if(end < 0)
{
end = NUM_CONV-1;
}
convoys[toDelete.at(i)] = convoys[end];
endIndexConvoys = end;
--convoySize;
}
}
toDelete.clear();
//check interval map for delete
for(uint i=0; i<intervalSize;i++)
{
if(h_intervalMap[i].getX() < -100)
{
toDelete.push_back(i);
}
}
//deltete pointcells
if(toDelete.size() > 0)
{
for(int i=toDelete.size()-1; i>=0;i--)
{
h_intervalMap[toDelete.at(i)] = h_intervalMap[--intervalSize];
}
}
}
/*
* Adds own position to convoy
*/
void ConvoyTracker::findConvoySelf(int ID)
{
double x = 0;
int interval = floor(x);
int id1 = -1;
int id2 = ID;
#ifdef PRINT
std::cout << "ID1 " << id1 << " ID2 " << id2 << std::endl;
#endif
bool convoyFound = false;
if(convoySize >0)
{
//findConvoy and duplicateCheck for all convoys
hipLaunchKernelGGL(( findIDInConvoyDevice), dim3(convoySize), dim3(MAX_LENGTH_HIST_CONV),0,stream3, d_convoys_ptr, d_IDincluded_ptr,id1,id2);
hipLaunchKernelGGL(( checkConvoyForDuplicateDeviceSelf), dim3(convoySize), dim3(MAX_LENGTH_HIST_CONV),0,stream2, d_convoys_ptr,d_duplicate_ptr);
hipDeviceSynchronize();
}
for(uint j = startIndexConvoys; j != endIndexConvoys; j = (j+1)%NUM_CONV)
{
Convoy currentConvoy = convoys[j];
int it1 = h_IDincluded[j*2];
int it2 = h_IDincluded[j*2+1];
if(it1 != INT_MAX && it2 != INT_MAX)
{
//convoy already exists with both IDS
//check if this x value is already contained
if(h_duplicate[j])
{
//x value is not contained
int index = (currentConvoy.endIndexTracks+1)%MAX_LENGTH_HIST_CONV;
convoys[j].tracks[currentConvoy.endIndexTracks].x = 0.5f;
convoys[j].tracks[currentConvoy.endIndexTracks].y = 0;
convoys[j].tracks[currentConvoy.endIndexTracks].theta = 0;
convoys[j].tracks[currentConvoy.endIndexTracks].subIntvl = 0.5f;
convoys[j].endIndexTracks = index;
if(index == convoys[j].startIndexTracks)
{
convoys[j].startIndexTracks = (convoys[j].startIndexTracks+1)%MAX_LENGTH_HIST_CONV;
}
if(interval+0.5 > convoys[j].highestValue.x)
{
convoys[j].highestValue.x = 0.5f;
convoys[j].highestValue.y = 0;
convoys[j].highestValue.theta = 0;
convoys[j].highestValue.subIntvl = 0.5f;
}
}
convoyFound = true;
#ifdef PRINT
std::cout << "existing Convoy with ID " << convoys[j].ID << std::endl;
#endif
break;
}
else if (it1 != INT_MAX)
{
//only self convoy exists
int index = (currentConvoy.endIndexTracks+1)%MAX_LENGTH_HIST_CONV;
//check if this x value is already contained
if(h_duplicate[j])
{
convoys[j].tracks[currentConvoy.endIndexTracks].x = 0.5f;
convoys[j].tracks[currentConvoy.endIndexTracks].y = 0;
convoys[j].tracks[currentConvoy.endIndexTracks].theta = 0;
convoys[j].tracks[currentConvoy.endIndexTracks].subIntvl = 0.5f;
convoys[j].endIndexTracks = index;
if(index == convoys[j].startIndexTracks)
{
convoys[j].startIndexTracks = (convoys[j].startIndexTracks+1)%MAX_LENGTH_HIST_CONV;
}
if(interval+0.5 > convoys[j].highestValue.x)
{
convoys[j].highestValue.x = 0.5f;
convoys[j].highestValue.y = 0;
convoys[j].highestValue.theta = 0;
convoys[j].highestValue.subIntvl = 0.5f;
}
}
int IDindex = (currentConvoy.endIndexID+1)%MAX_LENGTH_HIST_CONV;
convoys[j].participatingVehicles[currentConvoy.endIndexID] = id2;
convoys[j].endIndexID = IDindex;
if(IDindex == convoys[j].startIndexID)
{
convoys[j].startIndexID = (convoys[j].startIndexID+1)%MAX_LENGTH_HIST_CONV;
}
convoyFound = true;
#ifdef PRINT
std::cout << "existing Convoy with ID " << convoys[j].ID << std::endl;
#endif
break;
}
}
//if there was no match, create new one
if(!convoyFound)
{
int cIndex = endIndexConvoys;
convoys[cIndex].ID = convoyID++;
convoys[cIndex].participatingVehicles[0] = id1;
convoys[cIndex].participatingVehicles[1] = id2;
convoys[cIndex].startIndexID = 0;
convoys[cIndex].endIndexID = 2;
convoys[cIndex].startIndexTracks = 0;
convoys[cIndex].endIndexTracks = 1;
convoys[cIndex].tracks[0].x = 0.5;
convoys[cIndex].tracks[0].y = 0;
convoys[cIndex].tracks[0].theta = 0;
convoys[cIndex].tracks[0].subIntvl = 0.5f;
endIndexConvoys = (endIndexConvoys+1)%NUM_CONV;
convoys[cIndex].highestValue.x = 0.5f;
convoys[cIndex].highestValue.y = 0;
convoys[cIndex].highestValue.theta = 0;
convoys[cIndex].highestValue.subIntvl = 0.5f;
if(convoySize == NUM_CONV)
{
startIndexConvoys = (startIndexConvoys+1)%NUM_CONV;
}
else
{
++convoySize;
}
#ifdef PRINT
std::cout << "new Convoy with ID " << convoyID-1 << " containing "<< id1 << " , " << id2 << std::endl;
#endif
}
}
/*
* checks whether the given y value is near enough to next position in x of convoy c
* should prevent wrong additions in case of a lane change
*/
bool ConvoyTracker::checkConvoyForY(float y, float x, Convoy c)
{
double min = INT_MAX;
double dist;
int index;
for(int i=c.startIndexTracks; i != c.endIndexTracks; i = (i+1)%MAX_LENGTH_HIST_CONV)
{
dist = fabs(c.tracks[i].x - x);
if(dist < min)
{
min = dist;
index = i;
}
}
dist = fabs(c.tracks[index].y - y);
if(dist > CONVOY_THRESHOLD_Y)
{
return false;
}
return true;
}
|
76192176db4db89e729716bfc97936cf4c2ffc81.cu
|
/*
* ConvoyTracker.cu
*
* Created on: 06.06.2016
* Author: Sebastian Reinhart
*/
#include "ConvoyTracker.cuh"
#include <assert.h>
ConvoyTracker::ConvoyTracker() {
currentSpeed = 0;
currentYawRate = 0;
x = 0;
y = 0;
yaw = 0;
xOld = 0;
yOld = 0;
yawOld = 0;
ID = 0;
convoyID = 0;
currentHistoryOnDevice = false;
currentConvoyOnDevice = false;
convoySize = 0;
startIndexConvoys = 0;
endIndexConvoys = 0;
historySize = 0;
startIndexHistory = 0;
endIndexHistory = 0;
convoyCheckSize = 0;
intervalSize = 0;
cudaError_t error;
//create cuda streams
cudaStreamCreate(&stream2);
cudaStreamCreate(&stream3);
cudaStreamCreate(&stream4);
//allocate host memory and create device pointers
error = cudaHostAlloc((void**) &history, NUM_HIST*sizeof(History), cudaHostAllocMapped);
if (error != cudaSuccess) {
printf("cudaGetDevice returned error %s (code %d), line(%d)\n",
cudaGetErrorString(error), error, __LINE__);
}
error = cudaHostGetDevicePointer(&d_history_ptr, history, 0);
if (error != cudaSuccess) {
printf("cudaGetDevice returned error %s (code %d), line(%d)\n",
cudaGetErrorString(error), error, __LINE__);
}
error = cudaHostAlloc((void**) &h_historyMatch, MAX_SEGMENTS*sizeof(int), cudaHostAllocMapped);
if (error != cudaSuccess) {
printf("cudaGetDevice returned error %s (code %d), line(%d)\n",
cudaGetErrorString(error), error, __LINE__);
}
error = cudaHostGetDevicePointer(&d_historyMatch_ptr, h_historyMatch, 0);
if (error != cudaSuccess) {
printf("cudaGetDevice returned error %s (code %d), line(%d)\n",
cudaGetErrorString(error), error, __LINE__);
}
error = cudaHostAlloc((void**) &h_historyMatchSelf, sizeof(int), cudaHostAllocMapped);
if (error != cudaSuccess) {
printf("cudaGetDevice returned error %s (code %d), line(%d)\n",
cudaGetErrorString(error), error, __LINE__);
}
error = cudaHostGetDevicePointer(&d_historyMatchSelf_ptr, h_historyMatchSelf, 0);
if (error != cudaSuccess) {
printf("cudaGetDevice returned error %s (code %d), line(%d)\n",
cudaGetErrorString(error), error, __LINE__);
}
error = cudaHostAlloc((void**) &h_intervalMap, MAX_SEGMENTS*sizeof(PointCellDevice), cudaHostAllocMapped);
if (error != cudaSuccess) {
printf("cudaGetDevice returned error %s (code %d), line(%d)\n",
cudaGetErrorString(error), error, __LINE__);
}
error = cudaHostGetDevicePointer(&d_intervalMap_ptr, h_intervalMap, 0);
if (error != cudaSuccess) {
printf("cudaGetDevice returned error %s (code %d), line(%d)\n",
cudaGetErrorString(error), error, __LINE__);
}
error = cudaHostAlloc((void**) &h_convoyCheck, MAX_SEGMENTS*sizeof(PointCellDevice), cudaHostAllocMapped);
if (error != cudaSuccess) {
printf("cudaGetDevice returned error %s (code %d), line(%d)\n",
cudaGetErrorString(error), error, __LINE__);
}
error = cudaHostGetDevicePointer(&d_newVeh_ptr, h_convoyCheck, 0);
if (error != cudaSuccess) {
printf("cudaGetDevice returned error %s (code %d), line(%d)\n",
cudaGetErrorString(error), error, __LINE__);
}
error = cudaHostAlloc((void**) &h_IDincluded, NUM_HIST*2*sizeof(int), cudaHostAllocMapped);
if (error != cudaSuccess) {
printf("cudaGetDevice returned error %s (code %d), line(%d)\n",
cudaGetErrorString(error), error, __LINE__);
}
error = cudaHostGetDevicePointer(&d_IDincluded_ptr, h_IDincluded, 0);
if (error != cudaSuccess) {
printf("cudaGetDevice returned error %s (code %d), line(%d)\n",
cudaGetErrorString(error), error, __LINE__);
}
error = cudaHostAlloc((void**) &h_duplicate, NUM_HIST*sizeof(bool), cudaHostAllocMapped);
if (error != cudaSuccess) {
printf("cudaGetDevice returned error %s (code %d), line(%d)\n",
cudaGetErrorString(error), error, __LINE__);
}
error = cudaHostGetDevicePointer(&d_duplicate_ptr, h_duplicate, 0);
if (error != cudaSuccess) {
printf("cudaGetDevice returned error %s (code %d), line(%d)\n",
cudaGetErrorString(error), error, __LINE__);
}
error = cudaHostAlloc((void**) &h_updateData, MAX_SEGMENTS*3*sizeof(float), cudaHostAllocMapped);
if (error != cudaSuccess) {
printf("cudaGetDevice returned error %s (code %d), line(%d)\n",
cudaGetErrorString(error), error, __LINE__);
}
error = cudaHostGetDevicePointer(&d_updataData_ptr, h_updateData, 0);
if (error != cudaSuccess) {
printf("cudaGetDevice returned error %s (code %d), line(%d)\n",
cudaGetErrorString(error), error, __LINE__);
}
error = cudaHostAlloc((void**) &h_intvlIndex, MAX_SEGMENTS*sizeof(int), cudaHostAllocMapped);
if (error != cudaSuccess) {
printf("cudaGetDevice returned error %s (code %d), line(%d)\n",
cudaGetErrorString(error), error, __LINE__);
}
error = cudaHostGetDevicePointer(&d_intvlIndex_ptr, h_intvlIndex, 0);
if (error != cudaSuccess) {
printf("cudaGetDevice returned error %s (code %d), line(%d)\n",
cudaGetErrorString(error), error, __LINE__);
}
error = cudaHostAlloc((void**) &h_vehicles, MAX_SEGMENTS*sizeof(PointCellDevice), cudaHostAllocMapped);
if (error != cudaSuccess) {
printf("cudaGetDevice returned error %s (code %d), line(%d)\n",
cudaGetErrorString(error), error, __LINE__);
}
error = cudaHostGetDevicePointer(&d_vehicles_ptr, h_vehicles, 0);
if (error != cudaSuccess) {
printf("cudaGetDevice returned error %s (code %d), line(%d)\n",
cudaGetErrorString(error), error, __LINE__);
}
error = cudaHostAlloc((void**) &h_distance, MAX_SEGMENTS*MAX_SEGMENTS*sizeof(float), cudaHostAllocMapped);
if (error != cudaSuccess) {
printf("cudaGetDevice returned error %s (code %d), line(%d)\n",
cudaGetErrorString(error), error, __LINE__);
}
error = cudaHostGetDevicePointer(&d_distance_ptr, h_distance, 0);
if (error != cudaSuccess) {
printf("cudaGetDevice returned error %s (code %d), line(%d)\n",
cudaGetErrorString(error), error, __LINE__);
}
size_t sizeConv = NUM_CONV;
sizeConv *= sizeof(Convoy);
error = cudaHostAlloc((void **) &convoys, sizeConv, cudaHostAllocMapped);
if(error != cudaSuccess)
{
printf(
"cudaGetDeviceProperties returned error %s (code %d), line(%d)\n",
cudaGetErrorString(error), error, __LINE__);
}
error = cudaHostAlloc((void**) &xSubInterval, sizeof(float), cudaHostAllocMapped);
if (error != cudaSuccess) {
printf("cudaGetDevice returned error %s (code %d), line(%d)\n",
cudaGetErrorString(error), error, __LINE__);
}
*xSubInterval = 0;
error = cudaHostGetDevicePointer(&d_subIntvl_ptr, xSubInterval, 0);
if (error != cudaSuccess) {
printf("cudaGetDevice returned error %s (code %d), line(%d)\n",
cudaGetErrorString(error), error, __LINE__);
}
error = cudaHostGetDevicePointer(&d_convoys_ptr, convoys, 0);
if (error != cudaSuccess) {
printf("cudaGetDevice returned error %s (code %d), line(%d)\n",
cudaGetErrorString(error), error, __LINE__);
}
}
ConvoyTracker::~ConvoyTracker() {
// free everything
cudaFreeHost(xSubInterval);
cudaFreeHost(convoys);
cudaFreeHost(history);
cudaFreeHost(h_historyMatch);
cudaFreeHost(h_convoyCheck);
cudaFreeHost(h_intervalMap);
cudaFreeHost(h_historyMatchSelf);
cudaFreeHost(h_IDincluded);
cudaFreeHost(h_vehicles);
cudaFreeHost(h_distance);
cudaFreeHost(h_duplicate);
cudaFreeHost(h_intvlIndex);
cudaStreamDestroy(stream2);
cudaStreamDestroy(stream3);
cudaStreamDestroy(stream4);
}
/**
* returns the string representation of the given number @param i
*/
std::string getNextMeasureAsString(int i)
{
std::ostringstream number;
if(i<10)
{
number << "000" << i;
}
else if(i<100)
{
number << "00" << i;
}
else if(i<1000)
{
number << "0" << i;
}
else
{
number << i;
}
return number.str();
}
/*
* performs the ego motion compensation of the position with the given index in the given history
*/
__device__ void shiftRotateHistory(History* d_pc, float x, float y, float theta, int index)
{
//check whether index is out of range at this history
if(((index < d_pc->endIndex) && (d_pc->endIndex > d_pc->startIndex)) || ((d_pc->endIndex < d_pc->startIndex) && (index != d_pc->endIndex)))
{
//shift compensation
d_pc->tracks[index].subIntvl += x;
int numIntervals = (int) ((d_pc->tracks[index].subIntvl) / INTERVALL_LENGTH);
d_pc->tracks[index].x -= numIntervals;
d_pc->tracks[index].subIntvl -= numIntervals;
//rotate compensation
float angleInRadians = theta*((float)M_PI)/180.0f;
float mat[2][2] = { { cosf(angleInRadians), -sinf(angleInRadians) },
{ sinf(angleInRadians), cosf(angleInRadians) } };
d_pc->tracks[index].y -= y;
d_pc->tracks[index].theta -= angleInRadians;
float xAbs = d_pc->tracks[index].x;
float yAbs = d_pc->tracks[index].y;
xAbs = (mat[0][0] * xAbs + mat[0][1] * yAbs) - xAbs;
yAbs = (mat[1][0] * xAbs + mat[1][1] * yAbs) - yAbs;
d_pc->tracks[index].y -= yAbs;
d_pc->tracks[index].subIntvl -= xAbs;
}
}
/*
* performs the ego motion compensation of the position with the given index in the given convoy
*/
__device__ void shiftRotateConvoy(Convoy* d_eml, float x, float y, float theta, int index)
{
//check whether index is out of range at this history
if(((index < d_eml->endIndexTracks) && (d_eml->endIndexTracks > d_eml->startIndexTracks)) || ((d_eml->endIndexTracks < d_eml->startIndexTracks) && (index != d_eml->endIndexTracks)))
{
//shift compensation
d_eml->tracks[index].subIntvl += x;
int numIntervals = (int) ((d_eml->tracks[index].subIntvl) / INTERVALL_LENGTH);
d_eml->tracks[index].x -= numIntervals;
d_eml->tracks[index].subIntvl -= numIntervals;
//rotate compensation
float angleInRadians = theta*((float)M_PI)/180.0f;
float mat[2][2] = { { cosf(angleInRadians), -sinf(angleInRadians) },
{ sinf(angleInRadians), cosf(angleInRadians) } };
d_eml->tracks[index].y -= y;
d_eml->tracks[index].theta -= angleInRadians;
float xAbs = d_eml->tracks[index].x;
float yAbs = d_eml->tracks[index].y;
xAbs = (mat[0][0] * xAbs + mat[0][1] * yAbs) - xAbs;
yAbs = (mat[1][0] * xAbs + mat[1][1] * yAbs) - yAbs;
d_eml->tracks[index].y -= yAbs;
d_eml->tracks[index].subIntvl -= xAbs;
}
}
/*
* performs the ego motion compensation of the given pointcell
*/
__device__ void computeIntervalMap(PointCellDevice* d_interval, float xMotion, float yMotion, float angle, float* xSubInterval)
{
float angleInRadians = angle * ((float)M_PI) / 180.0f;
//shift compensation
*xSubInterval += xMotion;
int numIntervals = (int) (*xSubInterval / INTERVALL_LENGTH);
*xSubInterval -= numIntervals;
for (int i = 0; i < numIntervals; i++)
{
double x = d_interval->getX();
int interval = floor(x) + CARINTERVAL;
if(interval == 0)
{
//mark content to delete
d_interval->setX(-10000);
continue;
}
d_interval->setX(floor(x) - 0.5f);
}
int interval = floor(d_interval->getX());
//rotation
//1.Step correct directions of stored PCs
d_interval->setY(d_interval->getY() - yMotion);
d_interval->setTheta(d_interval->getTheta() - angleInRadians);
//2. compensate rotation
float xAbs = ( interval - CARINTERVAL + 0.5f) * INTERVALL_LENGTH
- *xSubInterval;
float yAbs = d_interval->getY();
float mat[2][2] = { { cosf(angleInRadians), -sinf(angleInRadians) },
{ sinf(angleInRadians), cosf(angleInRadians) } };
xAbs = (mat[0][0] * xAbs + mat[0][1] * yAbs) - xAbs;
yAbs = (mat[1][0] * xAbs + mat[1][1] * yAbs) - yAbs;
d_interval->setY(d_interval->getY() - yAbs);
if(xAbs > 0.5*INTERVALL_LENGTH)
{
d_interval->setX(interval + 1.5);
}
else if(xAbs < -0.5*INTERVALL_LENGTH)
{
d_interval->setX(interval - 0.5);
}
else
{
d_interval->subInvtl -= xAbs;
}
}
/*
* checks whether the given vehicle matches with the position at index in the given history
*/
__device__ bool findHistoryMatch(PointCellDevice* trackedVehicles, History* d_history, int index)
{
bool result = (d_history->ID != trackedVehicles->getID());
result = (result && (d_history->tracks[index].x - 0.5 <= trackedVehicles->getX()));
result = (result && (trackedVehicles->getX() <= d_history->tracks[index].x + 0.5));
result = (result && (d_history->tracks[index].y - 1.0 <= trackedVehicles->getY()));
result = (result && (trackedVehicles->getY() <= d_history->tracks[index].y + 1.0));
return result;
}
/*
* checks whether the own vehicles position matches with the position at index in the given history
*/
__device__ bool findHistoryMatchSelf(History* d_history, int index)
{
bool result = true;
result = (result && (d_history->tracks[index].x - 0.5 <= 0));
result = (result && (0 <= d_history->tracks[index].x + 0.5));
result = (result && (d_history->tracks[index].y - 1.0 <= 0));
result = (result && (0 <= d_history->tracks[index].y + 1.0));
return result;
}
/*
* performs kalman filter predict step for one matrix element defined by index of the given vehicle
*/
__device__ void predictDevice(PointCellDevice* vehicle, int index)
{
int state = index%5;
//row
int i = index / 5 ;
//column
int j = state;
vehicle->data[state+5] = vehicle->data[state];
__syncthreads();
vehicle->computeF();
vehicle->computeCovarianceF();
float tmp = 0;
//Tmp = F*P
for(int k=0; k<5; k++)
{
tmp += vehicle->getF(i,k)*vehicle->getP(k,j);
}
vehicle->writeTmp(i,j, tmp);
__syncthreads();
//P = tmp*F_t
tmp = 0;
for(int k=0; k<5; k++)
{
tmp += vehicle->getTmp(i,k)*vehicle->getF(j,k);
}
vehicle->writeP(i,j, tmp);
__syncthreads();
//P = P+Q
tmp = vehicle->getP(i,j) + vehicle->getQ(i,j);
vehicle->writeP(i,j, tmp);
}
/*
* distribution of the data for each thread
*/
__global__ void compensateEgoMotionMap(PointCellDevice* d_interval, float* d_subIntvl, float x, float y, float angle)
{
computeIntervalMap(&(d_interval[threadIdx.x]), x, y, angle, d_subIntvl);
}
__global__ void compensateEgoMotionHistory(History* d_history, float x, float y, float angle)
{
shiftRotateHistory(&(d_history[blockIdx.x]), x, y, angle, threadIdx.x);
}
__global__ void compensateEgoMotionConvoy(Convoy* d_convoy, float x, float y, float angle)
{
shiftRotateConvoy(&(d_convoy[blockIdx.x]), x, y, angle, threadIdx.x);
}
/*
* tries to find a match in history for each tracked vehicles
*/
__global__ void findConvoyDevice(PointCellDevice* trackedVehicles, History* d_history, int* d_historyMatch)
{
//check whether thread is in bounds
if(((threadIdx.x < d_history[blockIdx.x].endIndex) && (d_history[blockIdx.x].endIndex > d_history[blockIdx.x].startIndex)) || ((d_history[blockIdx.x].endIndex < d_history[blockIdx.x].startIndex) && (threadIdx.x != d_history[blockIdx.x].endIndex)))
{
//check whether given car matches given history position
if(findHistoryMatch(&(trackedVehicles[blockIdx.y]),&(d_history[blockIdx.x]),threadIdx.x))
{
//write the smallest matched history id to memory
atomicMin(&(d_historyMatch[blockIdx.y]), d_history[blockIdx.x].ID);
}
}
}
/*
* tries to find a match in history for current vehicle position
*/
__global__ void findConvoyDeviceSelf(History* d_history, int* d_historyMatchSelf)
{
//check whether thread is in bounds
if(((threadIdx.x < d_history[blockIdx.x].endIndex) && (d_history[blockIdx.x].endIndex > d_history[blockIdx.x].startIndex)) || ((d_history[blockIdx.x].endIndex < d_history[blockIdx.x].startIndex) && (threadIdx.x != d_history[blockIdx.x].endIndex)))
{
//check wether given history position matches (0,0)
if(findHistoryMatchSelf(&(d_history[blockIdx.x]),threadIdx.x))
{
//write the smallest matched history id to memory
atomicMin(d_historyMatchSelf, d_history[blockIdx.x].ID);
}
}
}
/*
* set memory for the findConvoy kernel
*/
__global__ void memSetHistoryMatch(int* d_historyMatch)
{
d_historyMatch[threadIdx.x] = INT_MAX;
}
/*
* Run Kalman-Filter Predict on Device with #vehicles as Blocks and 25 Threads per Block
*/
__global__ void predict(PointCellDevice* d_interval)
{
predictDevice(&(d_interval[blockIdx.x]), threadIdx.x);
}
/*
* Run Kalman-Filter Update on Device with 25 Threads
*/
__device__ void updateDevice(PointCellDevice* d_interval, int index, float velocity, float phi, float xNew, float yNew, float thetaNew)
{
//row
int i = index / 5;
//column
int j = index % 5;
float tmp = 0;
//tmp = H*P
for(int k=0; k<5; k++)
{
tmp += d_interval->getH(i,k)*d_interval->getP(k,j);
}
d_interval->writeTmp(i,j, tmp);
__syncthreads();
//S = tmp*H_t
tmp = 0;
for(int k=0; k<5; k++)
{
tmp += d_interval->getTmp(i,k)*d_interval->getH(j,k);
}
d_interval->writeS(i,j, tmp);
__syncthreads();
//S = S+R
tmp = d_interval->getS(i,j) + d_interval->getR(i,j);
d_interval->writeS(i,j, tmp);
__syncthreads();
//tmp = P*H_t
tmp = 0;
for(int k=0; k<5; k++)
{
tmp += d_interval->getP(i,k)*d_interval->getH(j,k);
}
d_interval->writeTmp(i,j, tmp);
__syncthreads();
//invertS
if(threadIdx.x == 0)
{
d_interval->invertS();
}
__syncthreads();
//K = tmp*S_i
tmp = 0;
for(int k=0; k<5; k++)
{
tmp += d_interval->getTmp(i,k)*d_interval->getS(k,j);
}
d_interval->writeK(i,j, tmp);
__syncthreads();
//tmp = K*(newState-stateVector)
tmp = 0;
tmp += d_interval->getK(i,0)*(xNew-d_interval->getX());
tmp += d_interval->getK(i,1)*(yNew-d_interval->getY());
tmp += d_interval->getK(i,2)*(thetaNew-d_interval->getTheta());
tmp += d_interval->getK(i,3)*(velocity-d_interval->getVelocity());
tmp += d_interval->getK(i,4)*(phi-d_interval->getPhi());
d_interval->writeTmp(i,j, tmp);
__syncthreads();
//stateVector = stateVector + tmp
if(threadIdx.x == 0)
{
d_interval->setX(d_interval->getX() + d_interval->getTmp(0,0));
d_interval->setY(d_interval->getY() + d_interval->getTmp(1,0));
d_interval->setTheta(d_interval->getTheta() + d_interval->getTmp(2,0));
d_interval->setVelocity(d_interval->getVelocity() + d_interval->getTmp(3,0));
d_interval->setPhi(d_interval->getPhi() + d_interval->getTmp(4,0));
}
__syncthreads();
//tmp = K*H
tmp = 0;
for(int k=0; k<5; k++)
{
tmp += d_interval->getK(i,k)*d_interval->getH(k,j);
}
d_interval->writeTmp(i,j, tmp);
__syncthreads();
//tmp = I- tmp
tmp = d_interval->getI(i,j) - d_interval->getTmp(i,j);
d_interval->writeTmp(i,j, tmp);
__syncthreads();
//tmp2 = tmp*P
tmp = 0;
for(int k=0; k<5; k++)
{
tmp += d_interval->getTmp(i,k)*d_interval->getP(k,j);
}
d_interval->writeTmp2(i,j, tmp);
__syncthreads();
d_interval->writeP(i,j, d_interval->getTmp2(i,j));
}
__global__ void updateKernel(PointCellDevice* d_intvl, float* d_updateData, int* d_intvlIndex)
{
int index = d_intvlIndex[blockIdx.x];
float xNew = d_updateData[blockIdx.x*3];
float yNew = d_updateData[blockIdx.x*3+1];
float thetaNew = d_updateData[blockIdx.x*3+2];
/* if(threadIdx.x == 0)
{
printf("Update ID %d with x %f y %f theta %f, updateValues x %f y %f theta %f\n", d_intvl[index].getID(),d_intvl[index].getX(),d_intvl[index].getY(),d_intvl[index].getTheta(),xNew, yNew,thetaNew);
}*/
float x = d_intvl[index].data[5];
float y = d_intvl[index].data[6];
float theta = d_intvl[index].data[7];
float velocity = sqrtf((xNew - x) * (xNew - x) + (yNew - y)*(yNew - y)) / TIMESTAMP;
float phi = (thetaNew-theta) / TIMESTAMP;
if(threadIdx.x == 0)
{
d_intvl[index].setVelocity(velocity);
d_intvl[index].setPhi(phi);
}
updateDevice(&(d_intvl[index]),threadIdx.x,velocity, phi,xNew,yNew,thetaNew);
}
/*
* marks for every convoy if the given ids are included in it or not
*/
__global__ void findIDInConvoyDevice(Convoy* d_convoy, int* d_IDIncluded, int id1, int id2)
{
//check whether thread is in bounds
if(((threadIdx.x < d_convoy[blockIdx.x].endIndexID) && (d_convoy[blockIdx.x].endIndexID > d_convoy[blockIdx.x].startIndexID)) || ((d_convoy[blockIdx.x].endIndexID < d_convoy[blockIdx.x].startIndexID) && (threadIdx.x != d_convoy[blockIdx.x].endIndexID)))
{
int index = blockIdx.x*2;
//init memory
d_IDIncluded[index] = INT_MAX;
d_IDIncluded[index+1] = INT_MAX;
__syncthreads();
//check and write results
int result = (d_convoy[blockIdx.x].participatingVehicles[threadIdx.x] == id1);
if(result)
{
atomicMin(&(d_IDIncluded[index]), threadIdx.x);
}
result = (d_convoy[blockIdx.x].participatingVehicles[threadIdx.x] == id2);
if(result)
{
atomicMin(&(d_IDIncluded[index+1]), threadIdx.x);
}
//if current convoy is the ego convoy, mark it with INT_MIN
result = (d_convoy[blockIdx.x].participatingVehicles[threadIdx.x] == -1);
if(result)
{
atomicMin(&(d_IDIncluded[index+1]), INT_MIN);
atomicMin(&(d_IDIncluded[index]), INT_MIN);
}
}
}
/*
* checks for every convoy whether the given vehicle position is already included or not
*/
__global__ void checkConvoyForDuplicateDevice(Convoy* d_convoy, PointCellDevice* d_vehicle, bool* d_duplicate)
{
//check whether thread is in bounds
if(((threadIdx.x < d_convoy[blockIdx.x].endIndexTracks) && (d_convoy[blockIdx.x].endIndexTracks > d_convoy[blockIdx.x].startIndexTracks)) || ((d_convoy[blockIdx.x].endIndexTracks < d_convoy[blockIdx.x].startIndexTracks) && (threadIdx.x != d_convoy[blockIdx.x].endIndexTracks)))
{
d_duplicate[blockIdx.x] = true;
bool result = (d_convoy[blockIdx.x].tracks[threadIdx.x].x != (floor(d_vehicle->getX())+0.5));
if(!result)
{
d_duplicate[blockIdx.x] = d_duplicate[blockIdx.x] && result;
}
}
}
/*
* checks for every convoy whether own vehicle position is already included or not
*/
__global__ void checkConvoyForDuplicateDeviceSelf(Convoy* d_convoy, bool* d_duplicate)
{
//check whether thread is in bounds
if(((threadIdx.x < d_convoy[blockIdx.x].endIndexTracks) && (d_convoy[blockIdx.x].endIndexTracks > d_convoy[blockIdx.x].startIndexTracks)) || ((d_convoy[blockIdx.x].endIndexTracks < d_convoy[blockIdx.x].startIndexTracks) && (threadIdx.x != d_convoy[blockIdx.x].endIndexTracks)))
{
d_duplicate[blockIdx.x] = true;
bool result = (d_convoy[blockIdx.x].tracks[threadIdx.x].x != 0.5);
if(!result)
{
d_duplicate[blockIdx.x] = d_duplicate[blockIdx.x] && result;
}
}
}
/*
* checks for every history whether own vehicle position is already included or not
*/
__global__ void checkHistoryForDuplicateDevice(History* d_history, PointCellDevice* d_intvl, int* d_intvlIndex, int* d_IDincluded, bool* d_duplicate)
{
//check whether thread is in bounds
if(((threadIdx.x < d_history[d_IDincluded[blockIdx.x]].endIndex) && (d_history[d_IDincluded[blockIdx.x]].endIndex > d_history[d_IDincluded[blockIdx.x]].startIndex)) || ((d_history[d_IDincluded[blockIdx.x]].endIndex < d_history[d_IDincluded[blockIdx.x]].startIndex) && (threadIdx.x != d_history[d_IDincluded[blockIdx.x]].endIndex)))
{
d_duplicate[blockIdx.x] = true;
int index = d_intvlIndex[blockIdx.x];
int intvl = floor(d_intvl[index].getX());
intvl += 0.5;
if(d_history[d_IDincluded[blockIdx.x]].tracks[threadIdx.x].x == intvl)
{
d_duplicate[blockIdx.x] = false;
}
}
}
/*
* tries to find the index of the history corresponding to each car
*/
__global__ void findHistoryWithIDDevice(History* d_history, PointCellDevice* d_intvl, int* d_intvlIndex, int* d_IDincluded)
{
int index = d_intvlIndex[threadIdx.x];
int ID = d_intvl[index].getID();
if(d_history[blockIdx.x].ID == ID)
{
//write index to memory
d_IDincluded[threadIdx.x] = blockIdx.x;
}
}
/*
* adds updatet positions to corresponding histories
*/
__global__ void addUpdatedPositionToHistoryDevice(History* d_history, PointCellDevice* d_intvl, int* d_intvlIndex, int* d_IDincluded, bool* d_duplicate)
{
int intvl = floor(d_intvl[d_intvlIndex[threadIdx.x]].getX());
d_intvl[d_intvlIndex[threadIdx.x]].setX(intvl+ 0.5);
int historyIndex = d_IDincluded[threadIdx.x];
//look up whether current position is a duplicate
if(d_duplicate[threadIdx.x])
{
int index = d_history[historyIndex].endIndex;
d_history[historyIndex].tracks[index].subIntvl = 0.5;
d_history[historyIndex].tracks[index].x = d_intvl[d_intvlIndex[threadIdx.x]].getX();
d_history[historyIndex].tracks[index].y = d_intvl[d_intvlIndex[threadIdx.x]].getY();
d_history[historyIndex].tracks[index].theta = d_intvl[d_intvlIndex[threadIdx.x]].getTheta();
index = (index+1)%MAX_LENGTH_HIST_CONV;
d_history[historyIndex].endIndex = index;
//if number of position exceeds limit, delete oldest position
if(index == d_history[historyIndex].startIndex)
{
d_history[historyIndex].startIndex = (d_history[historyIndex].startIndex+1)%NUM_HIST;
}
}
}
int main()
{
//comment in define in data.cuh to enable laser simulation
#ifdef CREATE_MEASURES
PGMReader pgmReader;
double speed = 4.0/3.0;
for(int i=0; i<NUM_MEASUREMENT; i++)
{
std::string number = getNextMeasureAsString(i);
pgmReader.simulateLaserRays(number);
//comment in for automatic generation of eml files for a straight drive with 120km/h
/* std::ofstream EMLMeasureFile;
std::ostringstream measurePath;
measurePath << "./Laserdata/EML" << number << ".txt";
EMLMeasureFile.open (measurePath.str().c_str());
EMLMeasureFile << ((double)i)*speed << " 0 0 120 0" << std::endl;
EMLMeasureFile.close();*/
}
#endif
int devID = 0;
cudaError_t error;
cudaDeviceProp deviceProp;
error = cudaGetDevice(&devID);
if (error != cudaSuccess) {
printf("cudaGetDevice returned error %s (code %d), line(%d)\n",
cudaGetErrorString(error), error, __LINE__);
}
error = cudaGetDeviceProperties(&deviceProp, devID);
if (deviceProp.computeMode == cudaComputeModeProhibited) {
fprintf(stderr,
"Error: device is running in <Compute Mode Prohibited>, no threads can use ::cudaSetDevice().\n");
exit(EXIT_SUCCESS);
}
if (deviceProp.canMapHostMemory != 1){
fprintf(stderr, "Device cannot map memory!\n");
return 1;
}
if (error != cudaSuccess) {
printf(
"cudaGetDeviceProperties returned error %s (code %d), line(%d)\n",
cudaGetErrorString(error), error, __LINE__);
} else {
#ifdef PRINT
printf("GPU Device %d: \"%s\" with compute capability %d.%d\n\n", devID,
deviceProp.name, deviceProp.major, deviceProp.minor);
#endif
}
cudaSetDeviceFlags(cudaDeviceMapHost);
cudaEvent_t startEvent, stopEvent, start2Event, stop2Event;
cudaEventCreate(&startEvent);
cudaEventCreate(&stopEvent);
cudaEventCreate(&start2Event);
cudaEventCreate(&stop2Event);
float time = 0;
cudaEventRecord(startEvent, 0);
ConvoyTracker tracker;
cudaEventRecord(stopEvent, 0);
cudaEventSynchronize(stopEvent);
cudaEventElapsedTime(&time,startEvent,stopEvent);
#if SZENARIO == 6
std::vector<PointCellDevice> vehiclesSim;
error = cudaHostAlloc((void**) &tracker.h_vehicleSim, MAX_SEGMENTS*sizeof(PointCellDevice), cudaHostAllocMapped);
if (error != cudaSuccess) {
printf("cudaGetDevice returned error %s (code %d), line(%d)\n",
cudaGetErrorString(error), error, __LINE__);
}
error = cudaHostGetDevicePointer(&tracker.d_vehicleSim_ptr, tracker.h_vehicleSim, 0);
if (error != cudaSuccess) {
printf("cudaGetDevice returned error %s (code %d), line(%d)\n",
cudaGetErrorString(error), error, __LINE__);
}
for(uint i=0;i <20; i++)
{
tracker.h_vehicleSim[i].initializeMemory();
if(i%2 == 0)
{
tracker.h_vehicleSim[i].setY(-3);
tracker.h_vehicleSim[i].setVelocity(38.9);
}
else
{
tracker.h_vehicleSim[i].setY(3);
tracker.h_vehicleSim[i].setVelocity(27.8);
}
tracker.h_vehicleSim[i].setX((i/2)*8);
tracker.h_vehicleSim[i].setTheta(0);
tracker.h_vehicleSim[i].setPhi(0);
}
#endif
cudaEventRecord(startEvent, 0);
int vehicleCount = 0;
float compensateHistory[NUM_MEASUREMENT];
for(int i=0; i<NUM_MEASUREMENT; i++)
{
cudaEventRecord(start2Event, 0);
std::vector<PointCellDevice*> trackedVehicles;
std::string number = getNextMeasureAsString(i);
tracker.readEMLData(number);
//1. Compensate own vehicle motion
double deltaX = tracker.getX() - tracker.getXOld();
double deltaY = tracker.getY() - tracker.getYOld();
double deltaYaw = tracker.getYaw() - tracker.getYawOld();
float angleInRadians = deltaYaw * ((float)M_PI) / 180.0f;
float mat[2][2] = { { cosf(angleInRadians), -sinf(angleInRadians) },
{ sinf(angleInRadians), cosf(angleInRadians) } };
//call ego compensation kernels regarding to current sizes
if(tracker.historySize > 0)
{
compensateEgoMotionHistory<<<tracker.historySize, MAX_LENGTH_HIST_CONV,0, tracker.stream4>>>(tracker.d_history_ptr, deltaX, deltaY, deltaYaw);
}
//read new laserdate and extract vehicles
vehicleCount = tracker.reader.processLaserData(number,tracker.getCurrentSpeed(), tracker.getCurrentYawRate(), tracker.h_vehicles);
if(tracker.convoySize > 0)
{
compensateEgoMotionConvoy<<<tracker.convoySize, MAX_LENGTH_HIST_CONV,0, tracker.stream2>>>(tracker.d_convoys_ptr, deltaX, deltaY, deltaYaw);
//compensate ego motion to highest value as well
for(uint k = 0; k < tracker.convoySize; k++)
{
tracker.convoys[k].highestValue.subIntvl += deltaX;
int numIntervals = (int) ((tracker.convoys[k].highestValue.subIntvl) / INTERVALL_LENGTH);
tracker.convoys[k].highestValue.x -= numIntervals;
tracker.convoys[k].highestValue.subIntvl -= numIntervals;
tracker.convoys[k].highestValue.y -= deltaY;
tracker.convoys[k].highestValue.theta -= angleInRadians;
float xAbs = tracker.convoys[k].highestValue.x;
float yAbs = tracker.convoys[k].highestValue.y;
xAbs = (mat[0][0] * xAbs + mat[0][1] * yAbs) - xAbs;
yAbs = (mat[1][0] * xAbs + mat[1][1] * yAbs) - yAbs;
tracker.convoys[k].highestValue.y -= yAbs;
tracker.convoys[k].highestValue.subIntvl -= xAbs;
}
}
//compesate ego motion interval map and perform pedrict step
if(tracker.intervalSize > 0)
{
compensateEgoMotionMap<<<1,tracker.intervalSize,0,tracker.stream3>>>(tracker.d_intervalMap_ptr, tracker.d_subIntvl_ptr, deltaX, deltaY, deltaYaw);
predict<<<tracker.intervalSize,25,0,tracker.stream3>>>(tracker.d_intervalMap_ptr);
}
//search histories for match with ego position
if(tracker.historySize > 0)
{
*tracker.h_historyMatchSelf = INT_MAX;
findConvoyDeviceSelf<<<tracker.historySize, MAX_LENGTH_HIST_CONV>>>(tracker.d_history_ptr, tracker.d_historyMatchSelf_ptr);
cudaDeviceSynchronize();
if(*tracker.h_historyMatchSelf != INT_MAX)
{
tracker.findConvoySelf(*tracker.h_historyMatchSelf);
}
}
tracker.transformDataFromDevice();
cudaStreamSynchronize(tracker.stream3);
//write adress of each pointcell to vector
for(uint j=0; j<tracker.intervalSize;j++)
{
trackedVehicles.push_back(&(tracker.h_intervalMap[j]));
}
//3. Associate and Update
#if SZENARIO == 6
tracker.associateAndUpdate(20, trackedVehicles);
#else
tracker.associateAndUpdate(vehicleCount, trackedVehicles);
#endif
cudaEventRecord(stop2Event, 0);
cudaEventSynchronize(stop2Event);
float time3;
cudaEventElapsedTime(&time3, start2Event, stop2Event);
compensateHistory[i] = time3;
}
cudaEventRecord(stopEvent, 0);
cudaEventSynchronize(stopEvent);
float sumH = 0;
for(int i = 0; i< NUM_MEASUREMENT; i++)
{
sumH += compensateHistory[i];
}
sumH /= NUM_MEASUREMENT;
#ifdef PRINT
std::cout << "Duration of compensate History: " << sumH << std::endl;
#endif
float time2;
cudaEventElapsedTime(&time2, startEvent, stopEvent);
#ifdef PRINT
std::cout << "Overall Time: " << time +time2<< std::endl;
#else
std::cout << time + time2 << std::endl;
#endif
#if SZENARIO == 6
cudaFreeHost(tracker.h_vehicleSim);
#endif
tracker.visualizeConvoys();
tracker.visualizeHistory();
return 0;
}
/**
* Stores current Speed and yaw rate from file to class variables
*/
void ConvoyTracker::readEMLData(std::string number)
{
std::ostringstream measurePath;
measurePath << EMLPATH << number << ".txt";
#ifdef PRINT
std::cout << measurePath.str() << std::endl;
#endif
std::ifstream input(measurePath.str().c_str());
std::string line;
std::string segment;
if(std::getline( input, line ))
{
std::stringstream ss;
ss << line;
int dataCnt = 1;
while(std::getline(ss, segment, ' '))
{
switch (dataCnt)
{
case 1:
{
//x in m
xOld = x;
x = atof(segment.c_str());
++dataCnt;
break;
}
case 2:
{
//y in m
yOld = y;
y = atof(segment.c_str());
++dataCnt;
break;
}
case 3:
{
//yaw in °
yawOld = yaw;
yaw = atof(segment.c_str());
++dataCnt;
break;
}
case 4:
{
//velocity in kmh
//Compute value in m/s
currentSpeed = atof(segment.c_str()) / 3.6;
++dataCnt;
break;
}
case 5:
{
//yaw rate in °/s
//Compute value in rad/s
currentYawRate = atof(segment.c_str()) * M_PI / 180.0;
break;
}
}
}
EMLPos curPos;
curPos.x = x;
curPos.y = y;
curPos.theta = yaw;
EML.push_back(curPos);
}
}
double ConvoyTracker::getCurrentSpeed() const {
return currentSpeed;
}
void ConvoyTracker::setCurrentSpeed(double currentSpeed) {
this->currentSpeed = currentSpeed;
}
double ConvoyTracker::getCurrentYawRate() const {
return currentYawRate;
}
void ConvoyTracker::setCurrentYawRate(double currentYawRate) {
this->currentYawRate = currentYawRate;
}
double ConvoyTracker::getX() const {
return x;
}
void ConvoyTracker::setX(double x) {
this->x = x;
}
double ConvoyTracker::getXOld() const {
return xOld;
}
void ConvoyTracker::setXOld(double old) {
this->xOld = old;
}
double ConvoyTracker::getY() const {
return y;
}
void ConvoyTracker::setY(double y) {
this->y = y;
}
double ConvoyTracker::getYaw() const {
return yaw;
}
void ConvoyTracker::setYaw(double yaw) {
this->yaw = yaw;
}
double ConvoyTracker::getYawOld() const {
return yawOld;
}
void ConvoyTracker::setYawOld(double yawOld) {
this->yawOld = yawOld;
}
double ConvoyTracker::getYOld() const {
return yOld;
}
void ConvoyTracker::setYOld(double old) {
yOld = old;
}
/**
* Searches for corresponding vehicles using Global Nearest Neighbor algorithm and updates the results
* @param trackedVehicles: contains pointers to all vehicles stored in the interval map
* @param vehicels: contains all observed vehicles from the current laser file
*/
void ConvoyTracker::associateAndUpdate(int vehicleCount, std::vector<PointCellDevice*> trackedVehicles)
{
//initialize all IDs in possible history to -1 to have no false detection in findConvoy
memSetHistoryMatch<<<1,MAX_SEGMENTS,0,stream2>>>(d_historyMatch_ptr);
convoyCheckSize = 0;
int updateCounter = 0;
int indexCounter = trackedVehicles.size();
std::vector<int> indicesToAdd;
std::vector<PointCellDevice*> updateCheck;
for(uint i = 0; i<vehicleCount; i++)
{
//get values from observation
#if SZENARIO == 6
double x = h_vehicleSim[i].getX();
double y = h_vehicleSim[i].getY();
double theta = h_vehicleSim[i].getTheta();
#else
double x = h_vehicles[i].getX();
double y = h_vehicles[i].getY();
double theta = h_vehicles[i].getTheta();
#endif
double minDist = INT_MAX;
int minIndex = INT_MAX;
#ifdef PRINT
std::cout << "X: " << x << " Y: " << y << " Theta: " << theta <<std::endl;
#endif
for(uint j = 0; j<trackedVehicles.size(); j++)
{
//compute distance to stored vehicle
double x1 = trackedVehicles.at(j)->getX();
double y1 = trackedVehicles.at(j)->getY();
double theta1 = trackedVehicles.at(j)->getTheta();
#ifdef PRINT
std::cout << "X1: " << x1 << " Y1: " << y1<< " Theta1: " << theta1 <<std::endl;
#endif
double dist = sqrt((x - x1)*(x - x1) + (y - y1)*(y - y1) + (theta - theta1)*(theta - theta1));
//find vehicle with smallest distance
if(dist < minDist)
{
minDist = dist;
minIndex = j;
}
}
#ifdef PRINT
std::cout << "Min distance: " << minDist << std::endl;
#endif
if(minDist > ASSOCIATION_THRESHOLD)
{
//do not associate vehicles with to big distance in between
//create new track instead
++ID;
#if SZENARIO == 6
h_vehicleSim[i].setID(ID);
#else
h_vehicles[i].setID(ID);
#endif
indicesToAdd.push_back(i);
history[endIndexHistory].ID = ID;
history[endIndexHistory].tracks[0].subIntvl = 0.5f;
#if SZENARIO == 6
history[endIndexHistory].tracks[0].x = h_vehicleSim[i].getX();
history[endIndexHistory].tracks[0].y = h_vehicleSim[i].getY();
history[endIndexHistory].tracks[0].theta = h_vehicleSim[i].getTheta();
#else
history[endIndexHistory].tracks[0].x = h_vehicles[i].getX();
history[endIndexHistory].tracks[0].y = h_vehicles[i].getY();
history[endIndexHistory].tracks[0].theta = h_vehicles[i].getTheta();
#endif
history[endIndexHistory].startIndex = 0;
history[endIndexHistory].endIndex = 1;
int index = (endIndexHistory+1)%NUM_HIST;
if(index == startIndexHistory)
{
startIndexHistory = (startIndexHistory+1)%NUM_HIST;
}
else
{
++historySize;
}
endIndexHistory = index;
#ifdef PRINT
std::cout << "Added new Vehicle with ID " << ID << std::endl;
#endif
currentHistoryOnDevice = false;
#if SZENARIO == 6
h_convoyCheck[convoyCheckSize] = h_vehicleSim[i];
#else
h_convoyCheck[convoyCheckSize] = h_vehicles[i];
#endif
++convoyCheckSize;
}
else
{
//vehicle matched, mark for update
PointCellDevice* tmp = trackedVehicles.at(trackedVehicles.size() -1 );
PointCellDevice* update = trackedVehicles.at(minIndex);
#ifdef PRINT
std::cout << "Update ID " << update->getID() << std::endl;
#endif
trackedVehicles.at(minIndex) = tmp;
h_intvlIndex[minIndex] = h_intvlIndex[trackedVehicles.size()-1];
h_intvlIndex[trackedVehicles.size()-1] = minIndex;
//save update data for later use
#if SZENARIO == 6
h_updateData[updateCounter*3] = h_vehicleSim[i].getX();
h_updateData[updateCounter*3+1] = h_vehicleSim[i].getY();
h_updateData[updateCounter*3+2] = h_vehicleSim[i].getTheta();
#else
h_updateData[updateCounter*3] = h_vehicles[i].getX();
h_updateData[updateCounter*3+1] = h_vehicles[i].getY();
h_updateData[updateCounter*3+2] = h_vehicles[i].getTheta();
#endif
trackedVehicles.pop_back();
#ifdef PRINT
std::cout << "Updated vehicle with ID " << update->getID() << std::endl;
#endif
updateCheck.push_back(update);
++updateCounter;
}
}
//get interval index for all updatet pointers
for(int i=0; i<updateCounter; i++)
{
for(int j=0; j<intervalSize; j++)
{
if(updateCheck[i] == &h_intervalMap[j])
{
h_intvlIndex[i] = j;
break;
}
}
}
//Update all matched vehicles
if(updateCounter >0)
{
updateKernel<<<updateCounter,25>>>(d_intervalMap_ptr, d_updataData_ptr, d_intvlIndex_ptr);
findHistoryWithIDDevice<<<historySize,updateCounter>>>(d_history_ptr,d_intervalMap_ptr,d_intvlIndex_ptr,d_IDincluded_ptr);
checkHistoryForDuplicateDevice<<<updateCounter, MAX_LENGTH_HIST_CONV>>>(d_history_ptr,d_intervalMap_ptr,d_intvlIndex_ptr,d_IDincluded_ptr,d_duplicate_ptr);
addUpdatedPositionToHistoryDevice<<<1,updateCounter>>>(d_history_ptr, d_intervalMap_ptr,d_intvlIndex_ptr, d_IDincluded_ptr,d_duplicate_ptr);
cudaDeviceSynchronize();
for(int i=0; i<updateCounter;i++)
{
h_convoyCheck[convoyCheckSize] = h_intervalMap[h_intvlIndex[i]];
++convoyCheckSize;
}
}
//delete all tracks that could not be matched
for(uint k = 0; k < trackedVehicles.size(); k++)
{
PointCellDevice* tmp = trackedVehicles.at(k);
for(uint m = 0; m < intervalSize; m++)
{
if(tmp == &h_intervalMap[m])
{
h_intervalMap[m] = h_intervalMap[--intervalSize];
break;
}
}
}
//add all observations that could not be matched
for(uint k = 0; k < indicesToAdd.size(); k++)
{
if(intervalSize < MAX_SEGMENTS)
{
#if SZENARIO == 6
h_intervalMap[intervalSize++] = h_vehicleSim[indicesToAdd.at(k)];
#else
h_intervalMap[intervalSize++] = h_vehicles[indicesToAdd.at(k)];
#endif
}
}
//find Convoy
if(historySize > 0 && convoyCheckSize >0)
{
dim3 grid(historySize, convoyCheckSize);
findConvoyDevice<<<grid, MAX_LENGTH_HIST_CONV>>>(d_newVeh_ptr,d_history_ptr,d_historyMatch_ptr);
cudaDeviceSynchronize();
for(uint i=0; i<convoyCheckSize;i++)
{
//look up if vehicle i matched history position
if(h_historyMatch[i] != INT_MAX)
{
//get vehicle i
PointCellDevice vehicle = h_convoyCheck[i];
float x = vehicle.getX();
int interval = floor(x);
//get history id
int id1 = h_historyMatch[i];
int id2 = vehicle.getID();
#ifdef PRINT
std::cout << "ID1 " << id1 << " ID2 " << id2 << std::endl;
#endif
bool convoyFound = false;
if(convoySize >0)
{ //find convoy and duplicates on device
findIDInConvoyDevice<<<convoySize, MAX_LENGTH_HIST_CONV,0,stream3>>>(d_convoys_ptr, d_IDincluded_ptr,id1,id2);
checkConvoyForDuplicateDevice<<<convoySize, MAX_LENGTH_HIST_CONV,0,stream2>>>(d_convoys_ptr, &(d_newVeh_ptr[i]),d_duplicate_ptr);
cudaDeviceSynchronize();
}
for(uint j = startIndexConvoys; j != endIndexConvoys; j = (j+1)%NUM_CONV)
{
Convoy currentConvoy = convoys[j];
int it1 = h_IDincluded[j*2];
int it2 = h_IDincluded[j*2+1];
if(it1 == INT_MIN || it2 == INT_MIN)
{
continue;
}
if(it1 != INT_MAX && it2 != INT_MAX)
{
//convoy already exists with both IDS
//check if this x value is already contained
if(h_duplicate[j])
{
//x value is not contained
int index = (currentConvoy.endIndexTracks+1)%MAX_LENGTH_HIST_CONV;
convoys[j].tracks[currentConvoy.endIndexTracks].x = interval+0.5f;
convoys[j].tracks[currentConvoy.endIndexTracks].y = vehicle.getY();
convoys[j].tracks[currentConvoy.endIndexTracks].theta = vehicle.getTheta();
convoys[j].tracks[currentConvoy.endIndexTracks].subIntvl = 0.5f;
convoys[j].endIndexTracks = index;
if(index == convoys[j].startIndexTracks)
{
convoys[j].startIndexTracks = (convoys[j].startIndexTracks+1)%MAX_LENGTH_HIST_CONV;
}
if(interval+0.5 > convoys[j].highestValue.x)
{
convoys[j].highestValue.x = interval+0.5f;
convoys[j].highestValue.y = vehicle.getY();
convoys[j].highestValue.theta = vehicle.getTheta();
convoys[j].highestValue.subIntvl = 0.5f;
}
}
convoyFound = true;
#ifdef PRINT
std::cout << "existing Convoy with ID " << convoys[j].ID << " y " << vehicle.getY() << " startIndexTracks: " << convoys[j].startIndexTracks <<" EndindexTracks: "<< convoys[j].endIndexTracks<< std::endl;
#endif
break;
}
}
if(convoyFound)
{
continue;
}
for(uint j = startIndexConvoys; j != endIndexConvoys; j = (j+1)%NUM_CONV)
{
Convoy currentConvoy = convoys[j];
int it1 = h_IDincluded[j*2];
int it2 = h_IDincluded[j*2+1];
if(it1 == INT_MIN || it2 == INT_MIN)
{
continue;
}
if (it1 != INT_MAX)
{
int index = (currentConvoy.endIndexTracks+1)%MAX_LENGTH_HIST_CONV;
//check if this x value is already contained
if(h_duplicate[j])
{
convoys[j].tracks[currentConvoy.endIndexTracks].x = interval+0.5f;
convoys[j].tracks[currentConvoy.endIndexTracks].y = vehicle.getY();
convoys[j].tracks[currentConvoy.endIndexTracks].theta = vehicle.getTheta();
convoys[j].tracks[currentConvoy.endIndexTracks].subIntvl = 0.5f;
convoys[j].endIndexTracks = index;
if(index == convoys[j].startIndexTracks)
{
convoys[j].startIndexTracks = (convoys[j].startIndexTracks+1)%MAX_LENGTH_HIST_CONV;
}
if(interval+0.5 > convoys[j].highestValue.x)
{
convoys[j].highestValue.x = interval+0.5f;
convoys[j].highestValue.y = vehicle.getY();
convoys[j].highestValue.theta = vehicle.getTheta();
convoys[j].highestValue.subIntvl = 0.5f;
}
}
int IDindex = (currentConvoy.endIndexID+1)%MAX_LENGTH_HIST_CONV;
convoys[j].participatingVehicles[currentConvoy.endIndexID] = id2;
convoys[j].endIndexID = IDindex;
if(IDindex == convoys[j].startIndexID)
{
convoys[j].startIndexID = (convoys[j].startIndexID+1)%MAX_LENGTH_HIST_CONV;
}
convoyFound = true;
#ifdef PRINT
std::cout << "existing Convoy with ID " << convoys[j].ID << " y " << vehicle.getY() << " startIndexTracks: " << convoys[j].startIndexTracks <<" EndindexTracks: "<< convoys[j].endIndexTracks<< std::endl;
#endif
break;
}
else if (it2 != INT_MAX)
{
//only add position to convoy if it will be the highest value or the difference in y is not so big
if(interval+0.5 < convoys[j].highestValue.x && !checkConvoyForY(vehicle.getY(),interval+0.5f,currentConvoy))
{
continue;
}
int index = (currentConvoy.endIndexTracks+1)%MAX_LENGTH_HIST_CONV;
//check if this x value is already contained
if(h_duplicate[j])
{
convoys[j].tracks[currentConvoy.endIndexTracks].x = interval+0.5f;
convoys[j].tracks[currentConvoy.endIndexTracks].y = vehicle.getY();
convoys[j].tracks[currentConvoy.endIndexTracks].theta = vehicle.getTheta();
convoys[j].tracks[currentConvoy.endIndexTracks].subIntvl = 0.5f;
convoys[j].endIndexTracks = index;
if(index == convoys[j].startIndexTracks)
{
convoys[j].startIndexTracks = (convoys[j].startIndexTracks+1)%MAX_LENGTH_HIST_CONV;
}
if(interval+0.5 > convoys[j].highestValue.x)
{
convoys[j].highestValue.x = interval+0.5f;
convoys[j].highestValue.y = vehicle.getY();
convoys[j].highestValue.theta = vehicle.getTheta();
convoys[j].highestValue.subIntvl = 0.5f;
}
}
int IDindex = (currentConvoy.endIndexID+1)%MAX_LENGTH_HIST_CONV;
convoys[j].participatingVehicles[currentConvoy.endIndexID] = id1;
convoys[j].endIndexID = IDindex;
if(IDindex == convoys[j].startIndexID)
{
convoys[j].startIndexID = (convoys[j].startIndexID+1)%MAX_LENGTH_HIST_CONV;
}
convoyFound = true;
#ifdef PRINT
std::cout << "existing Convoy with ID " << convoys[j].ID << " y " << vehicle.getY() << " startIndexTracks: " << convoys[j].startIndexTracks <<" EndindexTracks: "<< convoys[j].endIndexTracks<< std::endl;
#endif
break;
}
}
//if now convoy matche our needs, create new one
if(!convoyFound)
{
int cIndex = endIndexConvoys;
convoys[cIndex].ID = convoyID++;
convoys[cIndex].participatingVehicles[0] = id1;
convoys[cIndex].participatingVehicles[1] = id2;
convoys[cIndex].startIndexID = 0;
convoys[cIndex].endIndexID = 2;
convoys[cIndex].startIndexTracks = 0;
convoys[cIndex].endIndexTracks = 1;
convoys[cIndex].tracks[0].x = interval+0.5f;
convoys[cIndex].tracks[0].y = vehicle.getY();
convoys[cIndex].tracks[0].theta = vehicle.getTheta();
convoys[cIndex].tracks[0].subIntvl = 0.5f;
endIndexConvoys = (endIndexConvoys+1)%NUM_CONV;
convoys[cIndex].highestValue.x = interval+0.5f;
convoys[cIndex].highestValue.y = vehicle.getY();
convoys[cIndex].highestValue.theta = vehicle.getTheta();
convoys[cIndex].highestValue.subIntvl = 0.5f;
if(convoySize == NUM_CONV)
{
startIndexConvoys = (startIndexConvoys+1)%NUM_CONV;
}
else
{
++convoySize;
}
#ifdef PRINT
std::cout << "new Convoy with ID " << convoyID-1 << " containing "<< id1 << " , " << id2 << std::endl;
#endif
}
currentConvoyOnDevice = false;
}
}
}
}
/*
* calls the visualizer to draw the convoys
*/
void ConvoyTracker::visualizeConvoys()
{
visualizer.visualizeConvoys(EML, convoys, startIndexConvoys, endIndexConvoys);
}
/*
* calls the visualizer to draw the historys
*/
void ConvoyTracker::visualizeHistory()
{
visualizer.visualizeHistory(EML, history, startIndexHistory, endIndexHistory);
}
/*
* checks the transformed data from device, deletes entries if necessary
*/
void ConvoyTracker::transformDataFromDevice()
{
std::vector<int> toDelete;
int end;
//check history for delete
for (int i = startIndexHistory; i != endIndexHistory; i = (i+1)%NUM_HIST)
{
int endId = (history[i].endIndex-1)%MAX_LENGTH_HIST_CONV;
if(endId <0)
{
endId = MAX_LENGTH_HIST_CONV-1;
}
if(history[i].tracks[endId].x < -5)
{
//if yes, mark history to delete
#ifdef PRINT
std::cout << "Delete history with ID " << history[i].ID << std::endl;
#endif
toDelete.push_back(i);
}
}
//delete history
if(toDelete.size() > 0)
{
for(int i=toDelete.size()-1; i>=0; i--)
{
end = (endIndexHistory-1)%NUM_HIST;
if(end < 0)
{
end = NUM_HIST-1;
}
if(toDelete.at(i) != end)
{
history[toDelete.at(i)] = history[end];
}
endIndexHistory = end;
--historySize;
}
}
toDelete.clear();
//check convoys for delete
for (int i = startIndexConvoys; i != endIndexConvoys; i = (i + 1) % NUM_CONV)
{
end = (convoys[i].endIndexTracks-1) % MAX_LENGTH_HIST_CONV;
if(end < 0)
{
end = MAX_LENGTH_HIST_CONV-1;
}
if(convoys[i].highestValue.x < -5)
{
#ifdef PRINT
std::cout << "delete convoy with ID " << convoys[i].ID << std::endl;
#endif
toDelete.push_back(i);
}
}
//delete convoys
if(toDelete.size() > 0)
{
for(int i=toDelete.size()-1; i >=0; i--)
{
end = (endIndexConvoys-1) % NUM_CONV;
if(end < 0)
{
end = NUM_CONV-1;
}
convoys[toDelete.at(i)] = convoys[end];
endIndexConvoys = end;
--convoySize;
}
}
toDelete.clear();
//check interval map for delete
for(uint i=0; i<intervalSize;i++)
{
if(h_intervalMap[i].getX() < -100)
{
toDelete.push_back(i);
}
}
//deltete pointcells
if(toDelete.size() > 0)
{
for(int i=toDelete.size()-1; i>=0;i--)
{
h_intervalMap[toDelete.at(i)] = h_intervalMap[--intervalSize];
}
}
}
/*
* Adds own position to convoy
*/
void ConvoyTracker::findConvoySelf(int ID)
{
double x = 0;
int interval = floor(x);
int id1 = -1;
int id2 = ID;
#ifdef PRINT
std::cout << "ID1 " << id1 << " ID2 " << id2 << std::endl;
#endif
bool convoyFound = false;
if(convoySize >0)
{
//findConvoy and duplicateCheck for all convoys
findIDInConvoyDevice<<<convoySize, MAX_LENGTH_HIST_CONV,0,stream3>>>(d_convoys_ptr, d_IDincluded_ptr,id1,id2);
checkConvoyForDuplicateDeviceSelf<<<convoySize, MAX_LENGTH_HIST_CONV,0,stream2>>>(d_convoys_ptr,d_duplicate_ptr);
cudaDeviceSynchronize();
}
for(uint j = startIndexConvoys; j != endIndexConvoys; j = (j+1)%NUM_CONV)
{
Convoy currentConvoy = convoys[j];
int it1 = h_IDincluded[j*2];
int it2 = h_IDincluded[j*2+1];
if(it1 != INT_MAX && it2 != INT_MAX)
{
//convoy already exists with both IDS
//check if this x value is already contained
if(h_duplicate[j])
{
//x value is not contained
int index = (currentConvoy.endIndexTracks+1)%MAX_LENGTH_HIST_CONV;
convoys[j].tracks[currentConvoy.endIndexTracks].x = 0.5f;
convoys[j].tracks[currentConvoy.endIndexTracks].y = 0;
convoys[j].tracks[currentConvoy.endIndexTracks].theta = 0;
convoys[j].tracks[currentConvoy.endIndexTracks].subIntvl = 0.5f;
convoys[j].endIndexTracks = index;
if(index == convoys[j].startIndexTracks)
{
convoys[j].startIndexTracks = (convoys[j].startIndexTracks+1)%MAX_LENGTH_HIST_CONV;
}
if(interval+0.5 > convoys[j].highestValue.x)
{
convoys[j].highestValue.x = 0.5f;
convoys[j].highestValue.y = 0;
convoys[j].highestValue.theta = 0;
convoys[j].highestValue.subIntvl = 0.5f;
}
}
convoyFound = true;
#ifdef PRINT
std::cout << "existing Convoy with ID " << convoys[j].ID << std::endl;
#endif
break;
}
else if (it1 != INT_MAX)
{
//only self convoy exists
int index = (currentConvoy.endIndexTracks+1)%MAX_LENGTH_HIST_CONV;
//check if this x value is already contained
if(h_duplicate[j])
{
convoys[j].tracks[currentConvoy.endIndexTracks].x = 0.5f;
convoys[j].tracks[currentConvoy.endIndexTracks].y = 0;
convoys[j].tracks[currentConvoy.endIndexTracks].theta = 0;
convoys[j].tracks[currentConvoy.endIndexTracks].subIntvl = 0.5f;
convoys[j].endIndexTracks = index;
if(index == convoys[j].startIndexTracks)
{
convoys[j].startIndexTracks = (convoys[j].startIndexTracks+1)%MAX_LENGTH_HIST_CONV;
}
if(interval+0.5 > convoys[j].highestValue.x)
{
convoys[j].highestValue.x = 0.5f;
convoys[j].highestValue.y = 0;
convoys[j].highestValue.theta = 0;
convoys[j].highestValue.subIntvl = 0.5f;
}
}
int IDindex = (currentConvoy.endIndexID+1)%MAX_LENGTH_HIST_CONV;
convoys[j].participatingVehicles[currentConvoy.endIndexID] = id2;
convoys[j].endIndexID = IDindex;
if(IDindex == convoys[j].startIndexID)
{
convoys[j].startIndexID = (convoys[j].startIndexID+1)%MAX_LENGTH_HIST_CONV;
}
convoyFound = true;
#ifdef PRINT
std::cout << "existing Convoy with ID " << convoys[j].ID << std::endl;
#endif
break;
}
}
//if there was no match, create new one
if(!convoyFound)
{
int cIndex = endIndexConvoys;
convoys[cIndex].ID = convoyID++;
convoys[cIndex].participatingVehicles[0] = id1;
convoys[cIndex].participatingVehicles[1] = id2;
convoys[cIndex].startIndexID = 0;
convoys[cIndex].endIndexID = 2;
convoys[cIndex].startIndexTracks = 0;
convoys[cIndex].endIndexTracks = 1;
convoys[cIndex].tracks[0].x = 0.5;
convoys[cIndex].tracks[0].y = 0;
convoys[cIndex].tracks[0].theta = 0;
convoys[cIndex].tracks[0].subIntvl = 0.5f;
endIndexConvoys = (endIndexConvoys+1)%NUM_CONV;
convoys[cIndex].highestValue.x = 0.5f;
convoys[cIndex].highestValue.y = 0;
convoys[cIndex].highestValue.theta = 0;
convoys[cIndex].highestValue.subIntvl = 0.5f;
if(convoySize == NUM_CONV)
{
startIndexConvoys = (startIndexConvoys+1)%NUM_CONV;
}
else
{
++convoySize;
}
#ifdef PRINT
std::cout << "new Convoy with ID " << convoyID-1 << " containing "<< id1 << " , " << id2 << std::endl;
#endif
}
}
/*
* checks whether the given y value is near enough to next position in x of convoy c
* should prevent wrong additions in case of a lane change
*/
bool ConvoyTracker::checkConvoyForY(float y, float x, Convoy c)
{
double min = INT_MAX;
double dist;
int index;
for(int i=c.startIndexTracks; i != c.endIndexTracks; i = (i+1)%MAX_LENGTH_HIST_CONV)
{
dist = fabs(c.tracks[i].x - x);
if(dist < min)
{
min = dist;
index = i;
}
}
dist = fabs(c.tracks[index].y - y);
if(dist > CONVOY_THRESHOLD_Y)
{
return false;
}
return true;
}
|
6a478f79cb01cdc5c0b5399bb7b4ecc5cb512970.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "WTDense.cuh"
WTD::WTD(int argNumOfWordD, int argWordLength) {
numOfWordD = argNumOfWordD;
wordLength = argWordLength;
WTDenseLength = argNumOfWordD*K;
/*WTDense = new int[WTDenseLength];
WTDenseCopy = new int[WTDenseLength];*/
hipHostMalloc((void**)&WTDense, WTDenseLength * sizeof(int));
hipHostMalloc((void**)&WTDenseCopy, WTDenseLength * sizeof(int));
memset(WTDenseCopy, 0, WTDenseLength * sizeof(int));
/*WTRowSumDense = new int[K];*/
}
void WTD::CPUMemSet() {
memset(WTDense, 0, WTDenseLength * sizeof(int));
memset(WTDenseCopy, 0, WTDenseLength * sizeof(int));
//memset(WTRowSumDense, 0, K * sizeof(int));
}
void WTD::GPUMemAllocate() {
hipMalloc((void**)&deviceWTDense, (WTDenseLength) * sizeof(int));
hipMalloc((void**)&deviceWTDenseCopy, (WTDenseLength) * sizeof(int));
//hipMalloc((void**)&deviceWTRowSumDense, (K) * sizeof(int));
WTMemory = (2*WTDenseLength + K ) / 1000000000.0 * sizeof(int);
printf("WT memory usage(Sparse Part):%f GB\n", WTMemory);
}
void WTD::GPUMemInit()
{
hipMemset(deviceWTDense, 0, (WTDenseLength) * sizeof(int));
hipMemset(deviceWTDenseCopy, 0, (WTDenseLength) * sizeof(int));
//hipMemset(deviceWTRowSumDense, 0, (K) * sizeof(int));
}
void WTD::GPUMemCopy(hipStream_t& stream)
{
hipMemcpyAsync(deviceWTDense, deviceWTDenseCopy, (WTDenseLength) * sizeof(int), hipMemcpyDeviceToDevice, stream);
}
void WTD::GPUMemset(hipStream_t& stream)
{
hipMemsetAsync(deviceWTDenseCopy, 0, (WTDenseLength) * sizeof(int), stream);
//hipMemcpyAsync(deviceWTDenseCopy, WTDenseCopy, (WTDenseLength) * sizeof(int), hipMemcpyHostToDevice,stream);
//hipMemset(deviceWTRowSumDense, 0, (K) * sizeof(int));
}
void WTD::WTDenGPU2CPU()
{
hipMemcpy(WTDense, deviceWTDense, (WTDenseLength) * sizeof(int), hipMemcpyDeviceToHost);
hipMemcpy(WTDenseCopy, deviceWTDenseCopy, (WTDenseLength) * sizeof(int), hipMemcpyDeviceToHost);
}
void WTD::WTDenCPU2Disk(string argFilePrefix) {
ofstream WTDen((argFilePrefix + string("/WTDen.txt")).c_str(), ios::binary);
for (int i = 0; i < WTDenseLength; i++) {
WTDen << WTDense[i] << "\n";
}
WTDen.close();
ofstream WTDenCopy((argFilePrefix + string("/WTDenCopy.txt")).c_str(), ios::binary);
for (int i = 0; i < WTDenseLength; i++) {
WTDenCopy << WTDenseCopy[i] << "\n";
}
WTDen.close();
}
|
6a478f79cb01cdc5c0b5399bb7b4ecc5cb512970.cu
|
#include "WTDense.cuh"
WTD::WTD(int argNumOfWordD, int argWordLength) {
numOfWordD = argNumOfWordD;
wordLength = argWordLength;
WTDenseLength = argNumOfWordD*K;
/*WTDense = new int[WTDenseLength];
WTDenseCopy = new int[WTDenseLength];*/
cudaMallocHost((void**)&WTDense, WTDenseLength * sizeof(int));
cudaMallocHost((void**)&WTDenseCopy, WTDenseLength * sizeof(int));
memset(WTDenseCopy, 0, WTDenseLength * sizeof(int));
/*WTRowSumDense = new int[K];*/
}
void WTD::CPUMemSet() {
memset(WTDense, 0, WTDenseLength * sizeof(int));
memset(WTDenseCopy, 0, WTDenseLength * sizeof(int));
//memset(WTRowSumDense, 0, K * sizeof(int));
}
void WTD::GPUMemAllocate() {
cudaMalloc((void**)&deviceWTDense, (WTDenseLength) * sizeof(int));
cudaMalloc((void**)&deviceWTDenseCopy, (WTDenseLength) * sizeof(int));
//cudaMalloc((void**)&deviceWTRowSumDense, (K) * sizeof(int));
WTMemory = (2*WTDenseLength + K ) / 1000000000.0 * sizeof(int);
printf("WT memory usage(Sparse Part):%f GB\n", WTMemory);
}
void WTD::GPUMemInit()
{
cudaMemset(deviceWTDense, 0, (WTDenseLength) * sizeof(int));
cudaMemset(deviceWTDenseCopy, 0, (WTDenseLength) * sizeof(int));
//cudaMemset(deviceWTRowSumDense, 0, (K) * sizeof(int));
}
void WTD::GPUMemCopy(cudaStream_t& stream)
{
cudaMemcpyAsync(deviceWTDense, deviceWTDenseCopy, (WTDenseLength) * sizeof(int), cudaMemcpyDeviceToDevice, stream);
}
void WTD::GPUMemset(cudaStream_t& stream)
{
cudaMemsetAsync(deviceWTDenseCopy, 0, (WTDenseLength) * sizeof(int), stream);
//cudaMemcpyAsync(deviceWTDenseCopy, WTDenseCopy, (WTDenseLength) * sizeof(int), cudaMemcpyHostToDevice,stream);
//cudaMemset(deviceWTRowSumDense, 0, (K) * sizeof(int));
}
void WTD::WTDenGPU2CPU()
{
cudaMemcpy(WTDense, deviceWTDense, (WTDenseLength) * sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(WTDenseCopy, deviceWTDenseCopy, (WTDenseLength) * sizeof(int), cudaMemcpyDeviceToHost);
}
void WTD::WTDenCPU2Disk(string argFilePrefix) {
ofstream WTDen((argFilePrefix + string("/WTDen.txt")).c_str(), ios::binary);
for (int i = 0; i < WTDenseLength; i++) {
WTDen << WTDense[i] << "\n";
}
WTDen.close();
ofstream WTDenCopy((argFilePrefix + string("/WTDenCopy.txt")).c_str(), ios::binary);
for (int i = 0; i < WTDenseLength; i++) {
WTDenCopy << WTDenseCopy[i] << "\n";
}
WTDen.close();
}
|
68d90654166346ff681c45a7577f2353a57df53c.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "Image.h"
#include "PPM.h"
#include <iostream>
#include <cstdlib>
#include <time.h>
#include <hip/hip_runtime.h>
#include <chrono>
#include <math.h>
using namespace std;
using namespace std:: chrono;
#define TILE_WIDTH 16
#define maskCols 5
#define maskRows 5
#define w (TILE_WIDTH + maskCols -1)
__global__ void tilingKernelProcessing(float * InputImageData, const float *__restrict__ kernel,
float* outputImageData, int channels, int width, int height){
__shared__ float N_ds[w][w]; //block of image in shared memory
// allocation in shared memory of image blocks
int maskRadius = maskRows/2;
for (int k = 0; k <channels; k++) {
int dest = threadIdx.y * TILE_WIDTH + threadIdx.x;
int destY = dest/w; //row of shared memory
int destX = dest%w; //col of shared memory
int srcY = blockIdx.y *TILE_WIDTH + destY - maskRadius; // index to fetch data from input image
int srcX = blockIdx.x *TILE_WIDTH + destX - maskRadius; // index to fetch data from input image
int src = (srcY *width +srcX) * channels + k; // index of input image
if(srcY>= 0 && srcY < height && srcX>=0 && srcX < width)
N_ds[destY][destX] = InputImageData[src]; // copy element of image in shared memory
else
N_ds[destY][destX] = 0;
dest = threadIdx.y * TILE_WIDTH+ threadIdx.x + TILE_WIDTH * TILE_WIDTH;
destY = dest/w;
destX = dest%w;
srcY = blockIdx.y *TILE_WIDTH + destY - maskRadius;
srcX = blockIdx.x *TILE_WIDTH + destX - maskRadius;
src = (srcY *width +srcX) * channels + k;
if(destY < w){
if(srcY>= 0 && srcY < height && srcX>=0 && srcX < width)
N_ds[destY][destX] = InputImageData[src];
else
N_ds[destY][destX] = 0;
}
__syncthreads();
//compute kernel convolution
float accum = 0;
int y, x;
for (y= 0; y < maskCols; y++)
for(x = 0; x<maskRows; x++)
accum += N_ds[threadIdx.y + y][threadIdx.x + x] *kernel[y * maskCols + x];
y = blockIdx.y * TILE_WIDTH + threadIdx.y;
x = blockIdx.x * TILE_WIDTH + threadIdx.x;
if(y < height && x < width)
outputImageData[(y * width + x) * channels + k] = accum;
__syncthreads();
}
}
int main(){
int imageChannels;
int imageHeight;
int imageWidth;
Image_t* inputImage;
Image_t* outputImage;
float* hostInputImageData;
float* hostOutputImageData;
float* deviceInputImageData;
float* deviceOutputImageData;
float* deviceMaskData;
float hostMaskData[maskRows * maskCols]={
0.04, 0.04, 0.04, 0.04, 0.04,
0.04, 0.04, 0.04, 0.04, 0.04,
0.04, 0.04, 0.04, 0.04, 0.04,
0.04, 0.04, 0.04, 0.04, 0.04,
0.04, 0.04, 0.04, 0.04, 0.04
};
inputImage = PPM_import("./img/computer_programming.ppm");
imageWidth = Image_getWidth(inputImage);
imageHeight = Image_getHeight(inputImage);
imageChannels = Image_getChannels(inputImage);
outputImage = Image_new(imageWidth, imageHeight, imageChannels);
hostInputImageData = Image_getData(inputImage);
hostOutputImageData = Image_getData(outputImage);
hipDeviceReset();
hipMalloc((void **) &deviceInputImageData, imageWidth * imageHeight *
imageChannels * sizeof(float));
hipMalloc((void **) &deviceOutputImageData, imageWidth * imageHeight *
imageChannels * sizeof(float));
hipMalloc((void **) &deviceMaskData, maskRows * maskCols
* sizeof(float));
hipMemcpy(deviceInputImageData, hostInputImageData,
imageWidth * imageHeight * imageChannels * sizeof(float),
hipMemcpyHostToDevice);
hipMemcpy(deviceMaskData, hostMaskData,
maskRows * maskCols * sizeof(float),
hipMemcpyHostToDevice);
dim3 dimGrid(ceil((float) imageWidth/TILE_WIDTH),
ceil((float) imageHeight/TILE_WIDTH));
dim3 dimBlock(TILE_WIDTH,TILE_WIDTH,1);
cout <<"SHARED MEMORY KERNEL CONVOLUTION" << endl;
cout << "image dimensions: "<< imageWidth << "x" << imageHeight << endl;
cout << "start parallelizing" << endl;
cout << "elapsed in time: ";
high_resolution_clock::time_point start= high_resolution_clock::now();
hipLaunchKernelGGL(( tilingKernelProcessing), dim3(dimGrid),dim3(dimBlock), 0, 0, deviceInputImageData, deviceMaskData, deviceOutputImageData,
imageChannels, imageWidth, imageHeight);
high_resolution_clock::time_point end= high_resolution_clock::now();
chrono::duration<double> duration = end - start;
cout << duration.count()*1000 << endl;
cout << "----------------------------------" << endl;
hipMemcpy(hostOutputImageData, deviceOutputImageData, imageWidth * imageHeight *
imageChannels * sizeof(float), hipMemcpyDeviceToHost);
PPM_export("./output/result.ppm", outputImage);
hipMemset(deviceInputImageData,0,imageWidth * imageHeight *
imageChannels * sizeof(float));
hipMemset(deviceOutputImageData,0,imageWidth * imageHeight *
imageChannels * sizeof(float));
hipMemset(deviceMaskData,0,maskRows * maskCols
* sizeof(float));
hipFree(deviceInputImageData);
hipFree(deviceOutputImageData);
hipFree(deviceMaskData);
Image_delete(outputImage);
Image_delete(inputImage);
}
|
68d90654166346ff681c45a7577f2353a57df53c.cu
|
#include "Image.h"
#include "PPM.h"
#include <iostream>
#include <cstdlib>
#include <time.h>
#include <cuda_runtime.h>
#include <chrono>
#include <math.h>
using namespace std;
using namespace std:: chrono;
#define TILE_WIDTH 16
#define maskCols 5
#define maskRows 5
#define w (TILE_WIDTH + maskCols -1)
__global__ void tilingKernelProcessing(float * InputImageData, const float *__restrict__ kernel,
float* outputImageData, int channels, int width, int height){
__shared__ float N_ds[w][w]; //block of image in shared memory
// allocation in shared memory of image blocks
int maskRadius = maskRows/2;
for (int k = 0; k <channels; k++) {
int dest = threadIdx.y * TILE_WIDTH + threadIdx.x;
int destY = dest/w; //row of shared memory
int destX = dest%w; //col of shared memory
int srcY = blockIdx.y *TILE_WIDTH + destY - maskRadius; // index to fetch data from input image
int srcX = blockIdx.x *TILE_WIDTH + destX - maskRadius; // index to fetch data from input image
int src = (srcY *width +srcX) * channels + k; // index of input image
if(srcY>= 0 && srcY < height && srcX>=0 && srcX < width)
N_ds[destY][destX] = InputImageData[src]; // copy element of image in shared memory
else
N_ds[destY][destX] = 0;
dest = threadIdx.y * TILE_WIDTH+ threadIdx.x + TILE_WIDTH * TILE_WIDTH;
destY = dest/w;
destX = dest%w;
srcY = blockIdx.y *TILE_WIDTH + destY - maskRadius;
srcX = blockIdx.x *TILE_WIDTH + destX - maskRadius;
src = (srcY *width +srcX) * channels + k;
if(destY < w){
if(srcY>= 0 && srcY < height && srcX>=0 && srcX < width)
N_ds[destY][destX] = InputImageData[src];
else
N_ds[destY][destX] = 0;
}
__syncthreads();
//compute kernel convolution
float accum = 0;
int y, x;
for (y= 0; y < maskCols; y++)
for(x = 0; x<maskRows; x++)
accum += N_ds[threadIdx.y + y][threadIdx.x + x] *kernel[y * maskCols + x];
y = blockIdx.y * TILE_WIDTH + threadIdx.y;
x = blockIdx.x * TILE_WIDTH + threadIdx.x;
if(y < height && x < width)
outputImageData[(y * width + x) * channels + k] = accum;
__syncthreads();
}
}
int main(){
int imageChannels;
int imageHeight;
int imageWidth;
Image_t* inputImage;
Image_t* outputImage;
float* hostInputImageData;
float* hostOutputImageData;
float* deviceInputImageData;
float* deviceOutputImageData;
float* deviceMaskData;
float hostMaskData[maskRows * maskCols]={
0.04, 0.04, 0.04, 0.04, 0.04,
0.04, 0.04, 0.04, 0.04, 0.04,
0.04, 0.04, 0.04, 0.04, 0.04,
0.04, 0.04, 0.04, 0.04, 0.04,
0.04, 0.04, 0.04, 0.04, 0.04
};
inputImage = PPM_import("./img/computer_programming.ppm");
imageWidth = Image_getWidth(inputImage);
imageHeight = Image_getHeight(inputImage);
imageChannels = Image_getChannels(inputImage);
outputImage = Image_new(imageWidth, imageHeight, imageChannels);
hostInputImageData = Image_getData(inputImage);
hostOutputImageData = Image_getData(outputImage);
cudaDeviceReset();
cudaMalloc((void **) &deviceInputImageData, imageWidth * imageHeight *
imageChannels * sizeof(float));
cudaMalloc((void **) &deviceOutputImageData, imageWidth * imageHeight *
imageChannels * sizeof(float));
cudaMalloc((void **) &deviceMaskData, maskRows * maskCols
* sizeof(float));
cudaMemcpy(deviceInputImageData, hostInputImageData,
imageWidth * imageHeight * imageChannels * sizeof(float),
cudaMemcpyHostToDevice);
cudaMemcpy(deviceMaskData, hostMaskData,
maskRows * maskCols * sizeof(float),
cudaMemcpyHostToDevice);
dim3 dimGrid(ceil((float) imageWidth/TILE_WIDTH),
ceil((float) imageHeight/TILE_WIDTH));
dim3 dimBlock(TILE_WIDTH,TILE_WIDTH,1);
cout <<"SHARED MEMORY KERNEL CONVOLUTION" << endl;
cout << "image dimensions: "<< imageWidth << "x" << imageHeight << endl;
cout << "start parallelizing" << endl;
cout << "elapsed in time: ";
high_resolution_clock::time_point start= high_resolution_clock::now();
tilingKernelProcessing<<<dimGrid,dimBlock>>>(deviceInputImageData, deviceMaskData, deviceOutputImageData,
imageChannels, imageWidth, imageHeight);
high_resolution_clock::time_point end= high_resolution_clock::now();
chrono::duration<double> duration = end - start;
cout << duration.count()*1000 << endl;
cout << "----------------------------------" << endl;
cudaMemcpy(hostOutputImageData, deviceOutputImageData, imageWidth * imageHeight *
imageChannels * sizeof(float), cudaMemcpyDeviceToHost);
PPM_export("./output/result.ppm", outputImage);
cudaMemset(deviceInputImageData,0,imageWidth * imageHeight *
imageChannels * sizeof(float));
cudaMemset(deviceOutputImageData,0,imageWidth * imageHeight *
imageChannels * sizeof(float));
cudaMemset(deviceMaskData,0,maskRows * maskCols
* sizeof(float));
cudaFree(deviceInputImageData);
cudaFree(deviceOutputImageData);
cudaFree(deviceMaskData);
Image_delete(outputImage);
Image_delete(inputImage);
}
|
130df46179b74a49af016beb9b736879a2afa399.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <dmlc/filesystem.h>
#include <gtest/gtest.h>
#include <algorithm>
#include <cmath>
#include <thrust/device_vector.h>
#include <thrust/iterator/counting_iterator.h>
#include "xgboost/c_api.h"
#include "../../../src/common/device_helpers.cuh"
#include "../../../src/common/hist_util.h"
#include "../helpers.h"
namespace xgboost {
namespace common {
void TestDeviceSketch(bool use_external_memory) {
// create the data
int nrows = 10001;
std::shared_ptr<xgboost::DMatrix> *dmat = nullptr;
size_t num_cols = 1;
dmlc::TemporaryDirectory tmpdir;
std::string file = tmpdir.path + "/big.libsvm";
if (use_external_memory) {
auto sp_dmat = CreateSparsePageDMatrix(nrows * 3, 128UL, file); // 3 entries/row
dmat = new std::shared_ptr<xgboost::DMatrix>(std::move(sp_dmat));
num_cols = 5;
} else {
std::vector<float> test_data(nrows);
auto count_iter = thrust::make_counting_iterator(0);
// fill in reverse order
std::copy(count_iter, count_iter + nrows, test_data.rbegin());
// create the DMatrix
DMatrixHandle dmat_handle;
XGDMatrixCreateFromMat(test_data.data(), nrows, 1, -1,
&dmat_handle);
dmat = static_cast<std::shared_ptr<xgboost::DMatrix> *>(dmat_handle);
}
tree::TrainParam p;
p.max_bin = 20;
int gpu_batch_nrows = 0;
// find quantiles on the CPU
HistogramCuts hmat_cpu;
hmat_cpu.Build((*dmat).get(), p.max_bin);
// find the cuts on the GPU
HistogramCuts hmat_gpu;
size_t row_stride = DeviceSketch(p, CreateEmptyGenericParam(0), gpu_batch_nrows,
dmat->get(), &hmat_gpu);
// compare the row stride with the one obtained from the dmatrix
size_t expected_row_stride = 0;
for (const auto &batch : dmat->get()->GetBatches<xgboost::SparsePage>()) {
const auto &offset_vec = batch.offset.ConstHostVector();
for (int i = 1; i <= offset_vec.size() -1; ++i) {
expected_row_stride = ::max(expected_row_stride, offset_vec[i] - offset_vec[i-1]);
}
}
ASSERT_EQ(expected_row_stride, row_stride);
// compare the cuts
double eps = 1e-2;
ASSERT_EQ(hmat_gpu.MinValues().size(), num_cols);
ASSERT_EQ(hmat_gpu.Ptrs().size(), num_cols + 1);
ASSERT_EQ(hmat_gpu.Values().size(), hmat_cpu.Values().size());
ASSERT_LT(fabs(hmat_cpu.MinValues()[0] - hmat_gpu.MinValues()[0]), eps * nrows);
for (int i = 0; i < hmat_gpu.Values().size(); ++i) {
ASSERT_LT(fabs(hmat_cpu.Values()[i] - hmat_gpu.Values()[i]), eps * nrows);
}
delete dmat;
}
TEST(gpu_hist_util, DeviceSketch) {
TestDeviceSketch(false);
}
TEST(gpu_hist_util, DeviceSketch_ExternalMemory) {
TestDeviceSketch(true);
}
} // namespace common
} // namespace xgboost
|
130df46179b74a49af016beb9b736879a2afa399.cu
|
#include <dmlc/filesystem.h>
#include <gtest/gtest.h>
#include <algorithm>
#include <cmath>
#include <thrust/device_vector.h>
#include <thrust/iterator/counting_iterator.h>
#include "xgboost/c_api.h"
#include "../../../src/common/device_helpers.cuh"
#include "../../../src/common/hist_util.h"
#include "../helpers.h"
namespace xgboost {
namespace common {
void TestDeviceSketch(bool use_external_memory) {
// create the data
int nrows = 10001;
std::shared_ptr<xgboost::DMatrix> *dmat = nullptr;
size_t num_cols = 1;
dmlc::TemporaryDirectory tmpdir;
std::string file = tmpdir.path + "/big.libsvm";
if (use_external_memory) {
auto sp_dmat = CreateSparsePageDMatrix(nrows * 3, 128UL, file); // 3 entries/row
dmat = new std::shared_ptr<xgboost::DMatrix>(std::move(sp_dmat));
num_cols = 5;
} else {
std::vector<float> test_data(nrows);
auto count_iter = thrust::make_counting_iterator(0);
// fill in reverse order
std::copy(count_iter, count_iter + nrows, test_data.rbegin());
// create the DMatrix
DMatrixHandle dmat_handle;
XGDMatrixCreateFromMat(test_data.data(), nrows, 1, -1,
&dmat_handle);
dmat = static_cast<std::shared_ptr<xgboost::DMatrix> *>(dmat_handle);
}
tree::TrainParam p;
p.max_bin = 20;
int gpu_batch_nrows = 0;
// find quantiles on the CPU
HistogramCuts hmat_cpu;
hmat_cpu.Build((*dmat).get(), p.max_bin);
// find the cuts on the GPU
HistogramCuts hmat_gpu;
size_t row_stride = DeviceSketch(p, CreateEmptyGenericParam(0), gpu_batch_nrows,
dmat->get(), &hmat_gpu);
// compare the row stride with the one obtained from the dmatrix
size_t expected_row_stride = 0;
for (const auto &batch : dmat->get()->GetBatches<xgboost::SparsePage>()) {
const auto &offset_vec = batch.offset.ConstHostVector();
for (int i = 1; i <= offset_vec.size() -1; ++i) {
expected_row_stride = std::max(expected_row_stride, offset_vec[i] - offset_vec[i-1]);
}
}
ASSERT_EQ(expected_row_stride, row_stride);
// compare the cuts
double eps = 1e-2;
ASSERT_EQ(hmat_gpu.MinValues().size(), num_cols);
ASSERT_EQ(hmat_gpu.Ptrs().size(), num_cols + 1);
ASSERT_EQ(hmat_gpu.Values().size(), hmat_cpu.Values().size());
ASSERT_LT(fabs(hmat_cpu.MinValues()[0] - hmat_gpu.MinValues()[0]), eps * nrows);
for (int i = 0; i < hmat_gpu.Values().size(); ++i) {
ASSERT_LT(fabs(hmat_cpu.Values()[i] - hmat_gpu.Values()[i]), eps * nrows);
}
delete dmat;
}
TEST(gpu_hist_util, DeviceSketch) {
TestDeviceSketch(false);
}
TEST(gpu_hist_util, DeviceSketch_ExternalMemory) {
TestDeviceSketch(true);
}
} // namespace common
} // namespace xgboost
|
f0c848c0db73759197ebf879b8d547cd509eb271.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void returnQ1 ( const int dim, const int n, const float *p1, const float *p0, const float *s1, const float *s0, const float *zr, float *q ) {
int i = threadIdx.x + blockDim.x * blockIdx.x;
if ( i < n ) {
if ( p1[i] == INF || - 0.5 * ( s1[i] + p1[i] - s0[i] - p0[i] ) < -10. ) {
q[i] = 0.0;
} else if ( - 0.5 * ( s1[i] + p1[i] - s0[i] - p0[i] ) > 10. ) {
q[i] = 1.E10;
} else {
q[i] = expf ( - 0.5 * ( s1[i] + p1[i] - s0[i] - p0[i] ) ) * powf ( zr[i], dim - 1 );
}
}
}
|
f0c848c0db73759197ebf879b8d547cd509eb271.cu
|
#include "includes.h"
__global__ void returnQ1 ( const int dim, const int n, const float *p1, const float *p0, const float *s1, const float *s0, const float *zr, float *q ) {
int i = threadIdx.x + blockDim.x * blockIdx.x;
if ( i < n ) {
if ( p1[i] == INF || - 0.5 * ( s1[i] + p1[i] - s0[i] - p0[i] ) < -10. ) {
q[i] = 0.0;
} else if ( - 0.5 * ( s1[i] + p1[i] - s0[i] - p0[i] ) > 10. ) {
q[i] = 1.E10;
} else {
q[i] = expf ( - 0.5 * ( s1[i] + p1[i] - s0[i] - p0[i] ) ) * powf ( zr[i], dim - 1 );
}
}
}
|
e33fdb200e19fa2d5ddd40b8ac28800afb322268.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cuda_utils.h>
#include <gtest/gtest.h>
#include <test_utils.h>
#include "ml_utils.h"
#include "randomforest/randomforest.h"
namespace ML {
using namespace MLCommon;
template <typename T> // template useless for now.
struct RfInputs {
int n_rows;
int n_cols;
int n_trees;
float max_features;
float rows_sample;
int n_inference_rows;
int max_depth;
int max_leaves;
bool bootstrap;
bool bootstrap_features;
int n_bins;
int split_algo;
int min_rows_per_node;
};
template <typename T>
::std::ostream& operator<<(::std::ostream& os, const RfInputs<T>& dims) {
return os;
}
template <typename T>
class RfTest : public ::testing::TestWithParam<RfInputs<T>> {
protected:
void basicTest() {
params = ::testing::TestWithParam<RfInputs<T>>::GetParam();
DecisionTree::DecisionTreeParams tree_params(
params.max_depth, params.max_leaves, params.max_features, params.n_bins,
params.split_algo, params.min_rows_per_node, params.bootstrap_features);
RF_params rf_params(params.bootstrap, params.bootstrap_features,
params.n_trees, params.rows_sample, tree_params);
//rf_params.print();
//--------------------------------------------------------
// Random Forest
//--------------------------------------------------------
int data_len = params.n_rows * params.n_cols;
allocate(data, data_len);
allocate(labels, params.n_rows);
hipStream_t stream;
CUDA_CHECK(hipStreamCreate(&stream));
// Populate data (assume Col major)
std::vector<T> data_h = {30.0, 1.0, 2.0, 0.0, 10.0, 20.0, 10.0, 40.0};
data_h.resize(data_len);
updateDevice(data, data_h.data(), data_len, stream);
// Populate labels
labels_h = {0, 1, 0, 4};
labels_h.resize(params.n_rows);
preprocess_labels(params.n_rows, labels_h, labels_map);
updateDevice(labels, labels_h.data(), params.n_rows, stream);
rf_classifier = new typename rfClassifier<T>::rfClassifier(rf_params);
cumlHandle handle;
handle.setStream(stream);
fit(handle, rf_classifier, data, params.n_rows, params.n_cols, labels,
labels_map.size());
CUDA_CHECK(hipStreamSynchronize(stream));
CUDA_CHECK(hipStreamDestroy(stream));
// Inference data: same as train, but row major
int inference_data_len = params.n_inference_rows * params.n_cols;
inference_data_h = {30.0, 10.0, 1.0, 20.0, 2.0, 10.0, 0.0, 40.0};
inference_data_h.resize(inference_data_len);
// Predict and compare against known labels
predicted_labels.resize(params.n_inference_rows);
RF_metrics tmp = score(handle, rf_classifier, inference_data_h.data(),
labels_h.data(), params.n_inference_rows,
params.n_cols, predicted_labels.data(), false);
accuracy = tmp.accuracy;
}
void SetUp() override { basicTest(); }
void TearDown() override {
accuracy = -1.0f; // reset accuracy
postprocess_labels(params.n_rows, labels_h, labels_map);
inference_data_h.clear();
labels_h.clear();
labels_map.clear();
predicted_labels.clear();
CUDA_CHECK(hipFree(labels));
CUDA_CHECK(hipFree(data));
delete rf_classifier;
}
protected:
RfInputs<T> params;
T* data;
int* labels;
std::vector<T> inference_data_h;
std::vector<int> labels_h;
std::map<int, int>
labels_map; //unique map of labels to int vals starting from 0
rfClassifier<T>* rf_classifier;
float accuracy = -1.0f; // overriden in each test SetUp and TearDown
std::vector<int> predicted_labels;
};
const std::vector<RfInputs<float>> inputsf2 = {
{4, 2, 1, 1.0f, 1.0f, 4, -1, -1, false, false, 4, SPLIT_ALGO::HIST,
2}, // single tree forest, bootstrap false, unlimited depth, 4 bins
{4, 2, 1, 1.0f, 1.0f, 4, 8, -1, false, false, 4, SPLIT_ALGO::HIST,
2}, // single tree forest, bootstrap false, depth of 8, 4 bins
{4, 2, 10, 1.0f, 1.0f, 4, 8, -1, false, false, 4, SPLIT_ALGO::HIST,
2}, //forest with 10 trees, all trees should produce identical predictions (no bootstrapping or column subsampling)
{4, 2, 10, 0.8f, 0.8f, 4, 8, -1, true, false, 3, SPLIT_ALGO::HIST,
2}, //forest with 10 trees, with bootstrap and column subsampling enabled, 3 bins
{4, 2, 10, 0.8f, 0.8f, 4, 8, -1, true, false, 3, SPLIT_ALGO::GLOBAL_QUANTILE,
2} //forest with 10 trees, with bootstrap and column subsampling enabled, 3 bins, different split algorithm
};
const std::vector<RfInputs<double>> inputsd2 = { // Same as inputsf2
{4, 2, 1, 1.0f, 1.0f, 4, -1, -1, false, false, 4, SPLIT_ALGO::HIST, 2},
{4, 2, 1, 1.0f, 1.0f, 4, 8, -1, false, false, 4, SPLIT_ALGO::HIST, 2},
{4, 2, 10, 1.0f, 1.0f, 4, 8, -1, false, false, 4, SPLIT_ALGO::HIST, 2},
{4, 2, 10, 0.8f, 0.8f, 4, 8, -1, true, false, 3, SPLIT_ALGO::HIST, 2},
{4, 2, 10, 0.8f, 0.8f, 4, 8, -1, true, false, 3, SPLIT_ALGO::GLOBAL_QUANTILE,
2}};
typedef RfTest<float> RfTestF;
TEST_P(RfTestF, Fit) {
//rf_classifier->print_rf_detailed(); // Prints all trees in the forest. Leaf nodes use the remapped values from labels_map.
if (!params.bootstrap && (params.max_features == 1.0f)) {
ASSERT_TRUE(accuracy == 1.0f);
} else {
ASSERT_TRUE(accuracy >= 0.75f); // Empirically derived accuracy range
}
}
typedef RfTest<double> RfTestD;
TEST_P(RfTestD, Fit) {
if (!params.bootstrap && (params.max_features == 1.0f)) {
ASSERT_TRUE(accuracy == 1.0f);
} else {
ASSERT_TRUE(accuracy >= 0.75f);
}
}
INSTANTIATE_TEST_CASE_P(RfTests, RfTestF, ::testing::ValuesIn(inputsf2));
INSTANTIATE_TEST_CASE_P(RfTests, RfTestD, ::testing::ValuesIn(inputsd2));
} // end namespace ML
|
e33fdb200e19fa2d5ddd40b8ac28800afb322268.cu
|
/*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cuda_utils.h>
#include <gtest/gtest.h>
#include <test_utils.h>
#include "ml_utils.h"
#include "randomforest/randomforest.h"
namespace ML {
using namespace MLCommon;
template <typename T> // template useless for now.
struct RfInputs {
int n_rows;
int n_cols;
int n_trees;
float max_features;
float rows_sample;
int n_inference_rows;
int max_depth;
int max_leaves;
bool bootstrap;
bool bootstrap_features;
int n_bins;
int split_algo;
int min_rows_per_node;
};
template <typename T>
::std::ostream& operator<<(::std::ostream& os, const RfInputs<T>& dims) {
return os;
}
template <typename T>
class RfTest : public ::testing::TestWithParam<RfInputs<T>> {
protected:
void basicTest() {
params = ::testing::TestWithParam<RfInputs<T>>::GetParam();
DecisionTree::DecisionTreeParams tree_params(
params.max_depth, params.max_leaves, params.max_features, params.n_bins,
params.split_algo, params.min_rows_per_node, params.bootstrap_features);
RF_params rf_params(params.bootstrap, params.bootstrap_features,
params.n_trees, params.rows_sample, tree_params);
//rf_params.print();
//--------------------------------------------------------
// Random Forest
//--------------------------------------------------------
int data_len = params.n_rows * params.n_cols;
allocate(data, data_len);
allocate(labels, params.n_rows);
cudaStream_t stream;
CUDA_CHECK(cudaStreamCreate(&stream));
// Populate data (assume Col major)
std::vector<T> data_h = {30.0, 1.0, 2.0, 0.0, 10.0, 20.0, 10.0, 40.0};
data_h.resize(data_len);
updateDevice(data, data_h.data(), data_len, stream);
// Populate labels
labels_h = {0, 1, 0, 4};
labels_h.resize(params.n_rows);
preprocess_labels(params.n_rows, labels_h, labels_map);
updateDevice(labels, labels_h.data(), params.n_rows, stream);
rf_classifier = new typename rfClassifier<T>::rfClassifier(rf_params);
cumlHandle handle;
handle.setStream(stream);
fit(handle, rf_classifier, data, params.n_rows, params.n_cols, labels,
labels_map.size());
CUDA_CHECK(cudaStreamSynchronize(stream));
CUDA_CHECK(cudaStreamDestroy(stream));
// Inference data: same as train, but row major
int inference_data_len = params.n_inference_rows * params.n_cols;
inference_data_h = {30.0, 10.0, 1.0, 20.0, 2.0, 10.0, 0.0, 40.0};
inference_data_h.resize(inference_data_len);
// Predict and compare against known labels
predicted_labels.resize(params.n_inference_rows);
RF_metrics tmp = score(handle, rf_classifier, inference_data_h.data(),
labels_h.data(), params.n_inference_rows,
params.n_cols, predicted_labels.data(), false);
accuracy = tmp.accuracy;
}
void SetUp() override { basicTest(); }
void TearDown() override {
accuracy = -1.0f; // reset accuracy
postprocess_labels(params.n_rows, labels_h, labels_map);
inference_data_h.clear();
labels_h.clear();
labels_map.clear();
predicted_labels.clear();
CUDA_CHECK(cudaFree(labels));
CUDA_CHECK(cudaFree(data));
delete rf_classifier;
}
protected:
RfInputs<T> params;
T* data;
int* labels;
std::vector<T> inference_data_h;
std::vector<int> labels_h;
std::map<int, int>
labels_map; //unique map of labels to int vals starting from 0
rfClassifier<T>* rf_classifier;
float accuracy = -1.0f; // overriden in each test SetUp and TearDown
std::vector<int> predicted_labels;
};
const std::vector<RfInputs<float>> inputsf2 = {
{4, 2, 1, 1.0f, 1.0f, 4, -1, -1, false, false, 4, SPLIT_ALGO::HIST,
2}, // single tree forest, bootstrap false, unlimited depth, 4 bins
{4, 2, 1, 1.0f, 1.0f, 4, 8, -1, false, false, 4, SPLIT_ALGO::HIST,
2}, // single tree forest, bootstrap false, depth of 8, 4 bins
{4, 2, 10, 1.0f, 1.0f, 4, 8, -1, false, false, 4, SPLIT_ALGO::HIST,
2}, //forest with 10 trees, all trees should produce identical predictions (no bootstrapping or column subsampling)
{4, 2, 10, 0.8f, 0.8f, 4, 8, -1, true, false, 3, SPLIT_ALGO::HIST,
2}, //forest with 10 trees, with bootstrap and column subsampling enabled, 3 bins
{4, 2, 10, 0.8f, 0.8f, 4, 8, -1, true, false, 3, SPLIT_ALGO::GLOBAL_QUANTILE,
2} //forest with 10 trees, with bootstrap and column subsampling enabled, 3 bins, different split algorithm
};
const std::vector<RfInputs<double>> inputsd2 = { // Same as inputsf2
{4, 2, 1, 1.0f, 1.0f, 4, -1, -1, false, false, 4, SPLIT_ALGO::HIST, 2},
{4, 2, 1, 1.0f, 1.0f, 4, 8, -1, false, false, 4, SPLIT_ALGO::HIST, 2},
{4, 2, 10, 1.0f, 1.0f, 4, 8, -1, false, false, 4, SPLIT_ALGO::HIST, 2},
{4, 2, 10, 0.8f, 0.8f, 4, 8, -1, true, false, 3, SPLIT_ALGO::HIST, 2},
{4, 2, 10, 0.8f, 0.8f, 4, 8, -1, true, false, 3, SPLIT_ALGO::GLOBAL_QUANTILE,
2}};
typedef RfTest<float> RfTestF;
TEST_P(RfTestF, Fit) {
//rf_classifier->print_rf_detailed(); // Prints all trees in the forest. Leaf nodes use the remapped values from labels_map.
if (!params.bootstrap && (params.max_features == 1.0f)) {
ASSERT_TRUE(accuracy == 1.0f);
} else {
ASSERT_TRUE(accuracy >= 0.75f); // Empirically derived accuracy range
}
}
typedef RfTest<double> RfTestD;
TEST_P(RfTestD, Fit) {
if (!params.bootstrap && (params.max_features == 1.0f)) {
ASSERT_TRUE(accuracy == 1.0f);
} else {
ASSERT_TRUE(accuracy >= 0.75f);
}
}
INSTANTIATE_TEST_CASE_P(RfTests, RfTestF, ::testing::ValuesIn(inputsf2));
INSTANTIATE_TEST_CASE_P(RfTests, RfTestD, ::testing::ValuesIn(inputsd2));
} // end namespace ML
|
591e1d5267ff24182576888bd2c1e81f9f0eb1d4.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <chrono>
#include <hip/hip_runtime.h>
#include "complex.h"
#include "kernels.h"
bool check (const char *cs, int n)
{
bool ok = true;
for (int i = 0; i < n; i++) {
if (cs[i] != 5) {
ok = false;
break;
}
}
return ok;
}
int main(int argc, char* argv[]) {
if (argc != 3) {
printf("Usage: %s <size> <repeat>\n", argv[0]);
return 1;
}
const int n = atoi(argv[1]);
const int repeat = atoi(argv[2]);
char* cs = (char*) malloc (n);
char* d_cs;
hipMalloc((void**)&d_cs, n);
dim3 grids ((n + 255)/256);
dim3 blocks (256);
// warmup
hipLaunchKernelGGL(( complex_float), dim3(grids), dim3(blocks), 0, 0, d_cs, n);
hipLaunchKernelGGL(( complex_double), dim3(grids), dim3(blocks), 0, 0, d_cs, n);
hipDeviceSynchronize();
auto start = std::chrono::steady_clock::now();
// complex numbers in single precision
for (int i = 0; i < repeat; i++) {
hipLaunchKernelGGL(( complex_float), dim3(grids), dim3(blocks), 0, 0, d_cs, n);
}
hipDeviceSynchronize();
auto end = std::chrono::steady_clock::now();
auto time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count();
printf("Average kernel execution time (float) %f (s)\n", time * 1e-9f / repeat);
hipMemcpy(cs, d_cs, n, hipMemcpyDeviceToHost);
bool complex_float_check = check(cs, n);
start = std::chrono::steady_clock::now();
// complex numbers in double precision
for (int i = 0; i < repeat; i++) {
hipLaunchKernelGGL(( complex_double), dim3(grids), dim3(blocks), 0, 0, d_cs, n);
}
hipDeviceSynchronize();
end = std::chrono::steady_clock::now();
time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count();
printf("Average kernel execution time (double) %f (s)\n", time * 1e-9f / repeat);
hipMemcpy(cs, d_cs, n, hipMemcpyDeviceToHost);
bool complex_double_check = check(cs, n);
printf("%s\n", (complex_float_check && complex_double_check)
? "PASS" : "FAIL");
hipFree(d_cs);
free(cs);
return 0;
}
|
591e1d5267ff24182576888bd2c1e81f9f0eb1d4.cu
|
#include <stdio.h>
#include <stdlib.h>
#include <chrono>
#include <cuda.h>
#include "complex.h"
#include "kernels.h"
bool check (const char *cs, int n)
{
bool ok = true;
for (int i = 0; i < n; i++) {
if (cs[i] != 5) {
ok = false;
break;
}
}
return ok;
}
int main(int argc, char* argv[]) {
if (argc != 3) {
printf("Usage: %s <size> <repeat>\n", argv[0]);
return 1;
}
const int n = atoi(argv[1]);
const int repeat = atoi(argv[2]);
char* cs = (char*) malloc (n);
char* d_cs;
cudaMalloc((void**)&d_cs, n);
dim3 grids ((n + 255)/256);
dim3 blocks (256);
// warmup
complex_float<<<grids, blocks>>>(d_cs, n);
complex_double<<<grids, blocks>>>(d_cs, n);
cudaDeviceSynchronize();
auto start = std::chrono::steady_clock::now();
// complex numbers in single precision
for (int i = 0; i < repeat; i++) {
complex_float<<<grids, blocks>>>(d_cs, n);
}
cudaDeviceSynchronize();
auto end = std::chrono::steady_clock::now();
auto time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count();
printf("Average kernel execution time (float) %f (s)\n", time * 1e-9f / repeat);
cudaMemcpy(cs, d_cs, n, cudaMemcpyDeviceToHost);
bool complex_float_check = check(cs, n);
start = std::chrono::steady_clock::now();
// complex numbers in double precision
for (int i = 0; i < repeat; i++) {
complex_double<<<grids, blocks>>>(d_cs, n);
}
cudaDeviceSynchronize();
end = std::chrono::steady_clock::now();
time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count();
printf("Average kernel execution time (double) %f (s)\n", time * 1e-9f / repeat);
cudaMemcpy(cs, d_cs, n, cudaMemcpyDeviceToHost);
bool complex_double_check = check(cs, n);
printf("%s\n", (complex_float_check && complex_double_check)
? "PASS" : "FAIL");
cudaFree(d_cs);
free(cs);
return 0;
}
|
127d74bcb59f08a746b67b873a57cf6af29b3d61.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "scatterKernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
const unsigned int *d_In = NULL;
hipMalloc(&d_In, XSIZE*YSIZE);
const unsigned int *d_FalseKeyAddresses = NULL;
hipMalloc(&d_FalseKeyAddresses, XSIZE*YSIZE);
unsigned int *d_Out = NULL;
hipMalloc(&d_Out, XSIZE*YSIZE);
const unsigned int totalFalses = 1;
size_t size = XSIZE*YSIZE;
unsigned int bitPos = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
scatterKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, d_In,d_FalseKeyAddresses,d_Out,totalFalses,size,bitPos);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
scatterKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, d_In,d_FalseKeyAddresses,d_Out,totalFalses,size,bitPos);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
scatterKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, d_In,d_FalseKeyAddresses,d_Out,totalFalses,size,bitPos);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
127d74bcb59f08a746b67b873a57cf6af29b3d61.cu
|
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "scatterKernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
const unsigned int *d_In = NULL;
cudaMalloc(&d_In, XSIZE*YSIZE);
const unsigned int *d_FalseKeyAddresses = NULL;
cudaMalloc(&d_FalseKeyAddresses, XSIZE*YSIZE);
unsigned int *d_Out = NULL;
cudaMalloc(&d_Out, XSIZE*YSIZE);
const unsigned int totalFalses = 1;
size_t size = XSIZE*YSIZE;
unsigned int bitPos = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
scatterKernel<<<gridBlock,threadBlock>>>(d_In,d_FalseKeyAddresses,d_Out,totalFalses,size,bitPos);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
scatterKernel<<<gridBlock,threadBlock>>>(d_In,d_FalseKeyAddresses,d_Out,totalFalses,size,bitPos);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
scatterKernel<<<gridBlock,threadBlock>>>(d_In,d_FalseKeyAddresses,d_Out,totalFalses,size,bitPos);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
3cb3859bfae506efb1833711a4bc9c4e269d6925.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <sstream>
#include <ATen/hip/HIPContext.h>
#include <ATen/hip/detail/OffsetCalculator.cuh>
#include <ATen/hip/impl/HIPCachingAllocatorMasqueradingAsCUDA.h>
#include <ATen/native/hip/jit_utils.h>
#include <c10/core/ScalarType.h>
#include <c10/util/irange.h>
namespace at { namespace cuda { namespace jit {
const std::string jit_common_types = R"ESCAPE(
typedef long long int int64_t;
typedef unsigned int uint32_t;
typedef signed char int8_t;
typedef unsigned char uint8_t; // NOTE: this MUST be "unsigned char"! "char" is equivalent to "signed char"
typedef short int16_t;
static_assert(sizeof(int64_t) == 8, "expected size does not match");
static_assert(sizeof(uint32_t) == 4, "expected size does not match");
static_assert(sizeof(int8_t) == 1, "expected size does not match");
constexpr int num_threads = 128;
constexpr int thread_work_size = 4; // TODO: make template substitution once we decide where those vars live
constexpr int block_work_size = thread_work_size * num_threads;
//TODO use _assert_fail, because assert is disabled in non-debug builds
#define ERROR_UNSUPPORTED_CAST assert(false);
// NB: Order matters for this macro; it is relied upon in
// _promoteTypesLookup and the serialization format.
// Note, some types have ctype as void because we don't support them in codegen
#define AT_FORALL_SCALAR_TYPES_WITH_COMPLEX(_) \
_(uint8_t, Byte) /* 0 */ \
_(int8_t, Char) /* 1 */ \
_(int16_t, Short) /* 2 */ \
_(int, Int) /* 3 */ \
_(int64_t, Long) /* 4 */ \
_(at::Half, Half) /* 5 */ \
_(float, Float) /* 6 */ \
_(double, Double) /* 7 */ \
_(c10::complex<c10::Half>, ComplexHalf) /* 8 */ \
_(c10::complex<float>, ComplexFloat) /* 9 */ \
_(c10::complex<double>, ComplexDouble) /* 10 */ \
_(bool, Bool) /* 11 */ \
_(void, QInt8) /* 12 */ \
_(void, QUInt8) /* 13 */ \
_(void, QInt32) /* 14 */ \
_(at::BFloat16, BFloat16) /* 15 */ \
#define AT_FORALL_SCALAR_TYPES(_) \
_(uint8_t, Byte) \
_(int8_t, Char) \
_(int16_t, Short) \
_(int, Int) \
_(int64_t, Long) \
_(float, Float) \
_(at::Half, Half) \
_(at::BFloat16, BFloat16) \
_(double, Double) \
_(bool, Bool) \
_(c10::complex<float>, ComplexFloat) \
_(c10::complex<double>, ComplexDouble)
enum class ScalarType : int8_t {
#define DEFINE_ENUM(_1, n) n,
AT_FORALL_SCALAR_TYPES_WITH_COMPLEX(DEFINE_ENUM)
#undef DEFINE_ENUM
Undefined,
NumOptions
};
template <typename T, int size>
struct Array {
T data[size];
__device__ T operator[](int i) const {
return data[i];
}
__device__ T& operator[](int i) {
return data[i];
}
Array() = default;
Array(const Array&) = default;
Array& operator=(const Array&) = default;
};
${half_string}
${bfloat16_string}
${complex_string}
)ESCAPE";
//we need to include half, bfloat16 and complex strings to all kernels with half arguments and to all kernels with type casting
//regardless of whether they have half arguments (because fetch_and_cast and cast_and_store loop over all types)
const std::string jiterator_half_support_literal = R"ESCAPE(
namespace at {
struct alignas(2) Half {
unsigned short x;
Half() = default;
inline __host__ __device__ Half(float value){
asm("{ cvt.rn.f16.f32 %0, %1;}\n" : "=h"(x) : "f"(value));
}
inline __host__ __device__ operator float() const{
float val;
asm("{ cvt.f32.f16 %0, %1;}\n" : "=f"(val) : "h"(x)); // do we need const cast here?
//asm("{ cvt.f32.f16 %0, %1;}\n" : "=f"(val) : "h"(__HALF_TO_CUS(x)));
return val;
}
};
}
)ESCAPE";
const std::string jiterator_bfloat16_support_literal = R"ESCAPE(
namespace at {
struct alignas(2) BFloat16 {
unsigned short x;
__device__ unsigned short __internal_float2bfloat16(
const float f,
unsigned int& sign,
unsigned int& remainder) {
unsigned int x;
x = __float_as_uint(f);
if ((x & 0x7fffffffU) > 0x7f800000U) {
sign = 0U;
remainder = 0U;
return static_cast<unsigned short>(0x7fffU);
}
sign = x >> 31;
remainder = x << 16;
return static_cast<unsigned short>(x >> 16);
}
BFloat16() = default;
inline __host__ __device__ BFloat16(float value){
#if __CUDA_ARCH__ >= 800
asm("{ cvt.rn.bf16.f32 %0, %1;}\n" : "=h"(x) : "f"(value));
)ESCAPE"
R"ESCAPE(
#else
unsigned int sign;
unsigned int remainder;
x = __internal_float2bfloat16(value, sign, remainder);
if ((remainder > 0x80000000U) ||
((remainder == 0x80000000U) && ((x & 0x1U) != 0U))) {
x++;
}
#endif
}
inline __host__ __device__ operator float() const{
float val;
asm("{ mov.b32 %0, {0,%1};}\n" : "=f"(val) : "h"(x)); //do we need const cast here?
return val;
}
};
}
)ESCAPE";
//copy-pasted from util/complex.h
const std::string jiterator_complex_support_literal = R"ESCAPE(
//a very limited complex class, the only thing it currently allows is implicit conversion
//to complex, and complex -> real that is unused
namespace c10 {
template<typename T>
struct alignas(sizeof(T) * 2) complex {
using value_type = T;
T real_ = T(0);
T imag_ = T(0);
constexpr complex() = default;
inline __host__ __device__ constexpr complex(const T& re, const T& im = T())
: real_(re), imag_(im) {}
//FIXME I didn't find how complex -> real conversion is done in eager
//we are not going to use it, but it's needed for compilation
inline __host__ __device__ operator T() const{
return real_;
}
};
}
)ESCAPE";
const std::string jit_code_template = R"ESCAPE(
// Fetch a value with dynamic type src_type from ptr, and cast it to static type dest_t.
// For now, simplified version that does not handle complex and special casting to uint8
#define FETCH_AND_CAST_CASE(type, scalartype) case ScalarType::scalartype: return static_cast<dest_t>(*(const type *)ptr);
template<typename dest_t>
__device__ inline dest_t fetch_and_cast(const ScalarType src_type, const void *ptr) {
switch (src_type) {
AT_FORALL_SCALAR_TYPES(FETCH_AND_CAST_CASE)
default:
ERROR_UNSUPPORTED_CAST
}
return dest_t(0); // just to avoid compiler warning
}
// Cast a value with static type src_t into dynamic dest_type, and store it to ptr.
#define CAST_AND_STORE_CASE(type, scalartype) case ScalarType::scalartype: *(type *)ptr = static_cast<type>(value); return;
template<typename src_t>
__device__ inline void cast_and_store(const ScalarType dest_type, void *ptr, src_t value) {
switch (dest_type) {
AT_FORALL_SCALAR_TYPES(CAST_AND_STORE_CASE)
default:;
}
ERROR_UNSUPPORTED_CAST
}
struct LoadWithoutCast {
template <typename scalar_t>
__device__ scalar_t load(char* base_ptr, uint32_t offset, int arg=0) {
return *(reinterpret_cast<scalar_t*>(base_ptr) + offset);
}
};
template <int N>
struct LoadWithCast {
using array_t = Array<ScalarType, N==0? 1 : N>;
using size_array_t = Array<uint32_t, N==0? 1: N>;
array_t dtypes;
size_array_t element_sizes;
template <typename scalar_t>
__device__ scalar_t load(char* base_ptr, uint32_t offset, int arg) {
void* ptr = base_ptr + element_sizes[arg] * offset;
return fetch_and_cast<scalar_t>(dtypes[arg], ptr);
}
};
struct StoreWithoutCast {
template<typename scalar_t>
__device__ void store(scalar_t value, char *base_ptr, uint32_t offset) {
*(reinterpret_cast<scalar_t *>(base_ptr) + offset) = value;
}
};
struct StoreWithCast {
ScalarType dtype;
uint32_t element_size;
//StoreWithCast(at::ScalarType dtype): dtype(dtype), element_size(c10::elementSize(dtype)) {}
template<typename scalar_t>
__device__ void store(scalar_t value, char *base_ptr, uint32_t offset) {
void *ptr = base_ptr + element_size * offset;
cast_and_store<scalar_t>(dtype, ptr, value);
}
};
template <typename T>
struct DivMod {
T div;
T mod;
__device__ DivMod(T _div, T _mod) {
div = _div;
mod = _mod;
}
};
//<unsigned int>
struct IntDivider {
IntDivider() = default;
__device__ inline unsigned int div(unsigned int n) const {
unsigned int t = __umulhi(n, m1);
return (t + n) >> shift;
}
__device__ inline unsigned int mod(unsigned int n) const {
return n - div(n) * divisor;
}
__device__ inline DivMod<unsigned int> divmod(unsigned int n) const {
unsigned int q = div(n);
return DivMod<unsigned int>(q, n - q * divisor);
}
unsigned int divisor; // d above.
unsigned int m1; // Magic number: m' above.
unsigned int shift; // Shift amounts.
};
template <int NARGS>
struct TrivialOffsetCalculator {
// The offset for each argument. Wrapper around fixed-size array.
// The offsets are in # of elements, not in bytes.
Array<${index_type}, NARGS> get(${index_type} linear_idx) const {
Array<${index_type}, NARGS> offsets;
#pragma unroll
for (int arg = 0; arg < NARGS; arg++) {
offsets[arg] = linear_idx;
}
return offsets;
}
};
template<int NARGS>
struct OffsetCalculator {
OffsetCalculator() = default;
__device__ __forceinline__ Array<${index_type}, NARGS> get(${index_type} linear_idx) const {
Array<${index_type}, NARGS> offsets;
#pragma unroll
for (int arg = 0; arg < NARGS; ++arg) {
offsets[arg] = 0;
}
#pragma unroll
for (int dim = 0; dim < 25; ++dim) {
if (dim == dims) {
break;
}
auto divmod = sizes_[dim].divmod(linear_idx);
linear_idx = divmod.div;
#pragma unroll
for (int arg = 0; arg < NARGS; ++arg) {
offsets[arg] += divmod.mod * strides_[dim][arg];
}
//printf("offset calc thread dim size stride offset %d %d %d %d %d %d %d %d\n",
//threadIdx.x, dim, sizes_[dim].divisor, strides_[dim][0], offsets[0], linear_idx, divmod.div, divmod.mod);
}
return offsets;
}
int dims;
IntDivider sizes_[25];
// NOTE: this approach will not support nInputs == 0
${index_type} strides_[25][NARGS];
};
${functor}
// TODO: setup grid-stride loop
extern "C" __global__
void ${name}_kernel(
const int numel,
Array<char*, ${nInputs}+1> data, //[${nInputs}+1],
${offset_calculator}<${nInputs}> input_calculator,
${offset_calculator}<1> output_calculator,
${loader} l,
${storer} s,
${compute_type} scalar_val) {
${declare_load_arrays}
${declare_store_arrays}
int idx = blockIdx.x;
int remaining = numel - block_work_size * idx;
auto thread_idx = threadIdx.x;
#pragma unroll
for (int j = 0; j < thread_work_size; j++){
if (thread_idx >= remaining) {
break;
}
int linear_idx = thread_idx + block_work_size * idx;
auto input_offsets = input_calculator.get(linear_idx);
${load_inputs}
// printf(
// "thread %d a %f offsets %d\n", threadIdx.x, arg0[j], input_offsets[0]);
thread_idx += num_threads;
}
#pragma unroll
for (int j = 0; j < thread_work_size; j++) {
if ((threadIdx.x + j*num_threads) < remaining) {
out[j] = ${name}<${compute_type}>(${args});
}
}
thread_idx = threadIdx.x;
#pragma unroll
for (int j = 0; j < thread_work_size; j++){
if (thread_idx >= remaining) {
break;
}
//TODO maybe think about unifying offset calculators and reuse
//offsets computed in the load loop
int linear_idx = thread_idx + block_work_size * idx;
auto output_offsets = output_calculator.get(linear_idx);
//printf("output thread %d offset %d\n", threadIdx.x, output_offsets[0]);
//TODO handle multi-return functors
${store_outputs}
thread_idx += num_threads;
}
}
)ESCAPE";
const std::string jit_vectorized_code_template = R"ESCAPE(
template <typename scalar_t>
__device__ __inline__ scalar_t load(char* base_ptr, uint32_t offset) {
return *(reinterpret_cast<scalar_t*>(base_ptr) + offset);
}
template<typename scalar_t>
__device__ __inline__ void store(scalar_t value, char *base_ptr, uint32_t offset) {
*(reinterpret_cast<scalar_t *>(base_ptr) + offset) = value;
}
// aligned vector generates vectorized load/store on CUDA
template<typename scalar_t, int vec_size>
struct alignas(sizeof(scalar_t) * vec_size) aligned_vector {
scalar_t val[vec_size];
};
${functor}
// TODO: setup grid-stride loop
extern "C" __global__
void ${name}_vectorized${vec_size}_kernel(
const int N,
Array<char*, ${nInputs}+1> data,
${compute_type} scalar_val) //[${nInputs}+1],
{
constexpr int vec_size = ${vec_size};
int remaining = N - block_work_size * blockIdx.x;
auto thread_idx = threadIdx.x;
int idx = blockIdx.x;
${declare_load_arrays}
${declare_store_arrays}
if (remaining < block_work_size) {
#pragma unroll
for (int j = 0; j < thread_work_size; j++){
if (thread_idx >= remaining) {
break;
}
int linear_idx = thread_idx + block_work_size * idx;
${load_unrolled_inputs}
thread_idx += num_threads;
}
#pragma unroll
for (int j = 0; j < thread_work_size; j++) {
if ((threadIdx.x + j*num_threads) < remaining) {
out[j] = ${name}<${compute_type}>(${args});
}
}
thread_idx = threadIdx.x;
#pragma unroll
for (int j = 0; j < thread_work_size; j++) {
if (thread_idx >= remaining) {
break;
}
int linear_idx = thread_idx + block_work_size * idx;
store<${result_type}>(out[j], data[0], linear_idx);
thread_idx += num_threads;
}
} else {
static constexpr int loop_size = thread_work_size / vec_size;
//actual loading
using vec_t_input = aligned_vector<${scalar_type}, vec_size>;
${vector_pointers}
#pragma unroll
for (int i = 0; i<loop_size; i++){
vec_t_input v;
${load_vectorized_inputs}
thread_idx += num_threads;
}
#pragma unroll
for (int j = 0; j < thread_work_size; j++) {
out[j] = ${name}<${compute_type}>(${args});
}
using vec_t_output = aligned_vector<${result_type}, vec_size>;
vec_t_output * to_ = reinterpret_cast<vec_t_output *>(data[0]) + block_work_size / vec_size * idx;
int thread_idx = threadIdx.x;
#pragma unroll
for (int i = 0; i<loop_size; i++){
vec_t_output v;
#pragma unroll
for (int j=0; j<vec_size; j++){
v.val[j] = out[vec_size * i + j];
}
to_[thread_idx] = v;
thread_idx += num_threads;
}
}
}
)ESCAPE";
// The following is copied from fused_kernel.cpp
// TODO: refactor codegenOutputQuery into its own file
// that can be included by both files
// See NOTE [ USE OF NVRTC AND DRIVER API ]
const at::cuda::NVRTC& nvrtc() {
return at::globalContext().getNVRTC();
}
// query codegen output arch and target
// TODO refactor so this function is usable both from jit and from aten
void codegenOutputQuery(
const hipDeviceProp_t* const prop,
int& major,
int& minor,
bool& compile_to_sass) {
using CudaVersion = std::pair<int, int>;
CudaVersion nvrtc_version;
AT_CUDA_NVRTC_CHECK(
nvrtc().hiprtcVersion(&nvrtc_version.first, &nvrtc_version.second));
TORCH_CHECK(
nvrtc_version.first >= 6,
"NVRTC versions less than 6 are not supported. Is: ",
nvrtc_version.first);
// Version supported by device
// Usually any lower version works too but is less efficient
const CudaVersion dev_version = CudaVersion(prop->major, prop->minor);
// Maximum version supported by the driver, cap dev_version to this
CudaVersion max_dev_version;
if (nvrtc_version.first <= 7) { // 7 supports 2-5.x
max_dev_version = CudaVersion(5, 0);
} else if (nvrtc_version.first <= 8) { // 8 supports 2-6.x
max_dev_version = CudaVersion(6, 0);
} else if (nvrtc_version.first <= 9) { // 9 supports 3-7.2
max_dev_version = CudaVersion(7, 2);
} else if (nvrtc_version.first <= 10) { // 10 supports 3-7.5
max_dev_version = CudaVersion(7, 5);
} else if (nvrtc_version == CudaVersion(11, 0)) { // 11.0 supports 3-8.0
max_dev_version = CudaVersion(8, 0);
} else {
// If the driver version is unknown (i.e. newer than this code)
// assume the driver supports this device
max_dev_version = dev_version;
}
if (dev_version > max_dev_version) {
major = max_dev_version.first;
minor = max_dev_version.second;
// if we are clamping major/minor, sass is not compatible
compile_to_sass = false;
} else {
major = dev_version.first;
minor = dev_version.second;
compile_to_sass = true;
}
}
//TODO another copy paste from jit, refactor so it's usable from both
void __inline__ initializeCudaContext() {
// lazily construct context if non-existing yet;
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
hipCtx_t pctx = nullptr;
AT_CUDA_DRIVER_CHECK(at::globalContext().getNVRTC().hipCtxGetCurrent(&pctx));
if (!pctx) {
std::unique_lock<std::mutex> cudaFreeMutexLock(
*(c10::hip::HIPCachingAllocator::getFreeMutex()));
hipFree(nullptr);
}
}
//FIXME - this are defined in Loops.cuh, but including Loops.cuh here would lead to circular includes Loops.cuh -> CUDALoops.cuh -> jit_utils.h -> Loops.cuh
#define THREAD_WORK_SIZE 4
constexpr int thread_work_size = THREAD_WORK_SIZE;
std::string generate_code(
int nTensors,
const std::string& func,
const std::string& name,
const std::string& f_inputs_type,
const std::string& compute_type,
const std::string& result_type,
bool contiguous,
bool dynamic_casting,
BinaryFuncVariant scalar_pos,
bool vectorized,
int vec_size) {
TemplateEnv env;
env.s("index_type", "unsigned int");
const int nInputs = nTensors - 1;
env.s("nInputs", std::to_string(nInputs));
env.s("scalar_type", f_inputs_type);
env.s("compute_type", compute_type);
env.s("functor", func);
env.s("name", name);
std::stringstream declare_load_arrays;
for (int i = 0; i < nInputs; i++) {
// TODO these arrays are potentially of the different types, use function
// traits to determine the types
declare_load_arrays << f_inputs_type << " arg" << std::to_string(i)
<< "[" << std::to_string(thread_work_size) << "];\n";
}
env.s("declare_load_arrays", declare_load_arrays.str());
std::stringstream declare_store_arrays;
declare_store_arrays << result_type << " out"
<< "[" << std::to_string(thread_work_size) << "];\n";
env.s("declare_store_arrays", declare_store_arrays.str());
const int nOutputs = 1; // FIXME
std::stringstream functor_args;
if (scalar_pos == BinaryFuncVariant::NoScalar) {
for (int i = 0; i < nInputs - 1; i++) {
functor_args << "arg" << std::to_string(i) << "[j], ";
}
functor_args << "arg" << std::to_string(nInputs - 1) << "[j]";
} else if (scalar_pos == BinaryFuncVariant::LhsScalar) {
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(nInputs == 1);
functor_args << "scalar_val, arg0[j]";
} else { //RhsScalar
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(nInputs == 1);
functor_args << "arg0[j], scalar_val";
}
env.s("args", functor_args.str());
if (f_inputs_type == "at::Half" || result_type == "at::Half" || dynamic_casting) {
env.s("half_string", jiterator_half_support_literal);
} else {
env.s("half_string", "");
}
if (f_inputs_type == "at::BFloat16" || result_type == "at::BFloat16" || dynamic_casting) {
env.s("bfloat16_string", jiterator_bfloat16_support_literal);
} else {
env.s("bfloat16_string", "");
}
if (dynamic_casting) {
env.s("complex_string", jiterator_complex_support_literal);
} else {
env.s("complex_string", "");
}
if (!vectorized) {
if (!dynamic_casting) {
env.s("loader", "LoadWithoutCast");
env.s("storer", "StoreWithoutCast");
} else {
env.s(
"loader", std::string("LoadWithCast<" + std::to_string(nInputs) + ">"));
env.s("storer", "StoreWithCast");
}
if (contiguous) {
env.s("offset_calculator", "TrivialOffsetCalculator");
} else {
env.s("offset_calculator", "OffsetCalculator");
}
std::stringstream load_inputs;
for (int i = 0; i < nInputs; i++) {
auto i_string = std::to_string(i);
load_inputs << "arg" << i_string << "[j] = l.load<" << f_inputs_type
<< ">(data[" << std::to_string(i + nOutputs)
<< "], input_offsets[" << i_string << "], " << i_string
<< ");\n";
}
env.s("load_inputs", load_inputs.str());
std::stringstream store_outputs;
store_outputs << "s.store<" << result_type
<< ">(out[j], data[0], output_offsets[0]);\n";
env.s("store_outputs", store_outputs.str());
static auto cuda_template = CodeTemplate(jit_common_types + jit_code_template);
return cuda_template.format(env);
}
// vectorized case
env.s("vec_size", std::to_string(vec_size));
env.s("result_type", result_type);
std::stringstream vector_pointers;
for (const auto i : c10::irange(nInputs)){
auto i_string = std::to_string(i);
vector_pointers << "vec_t_input * vec" << i_string <<
" = reinterpret_cast<vec_t_input *>(data[" << i_string << "+1])" <<
" + block_work_size / vec_size * idx;\n";
}
env.s("vector_pointers", vector_pointers.str());
std::stringstream load_vectorized_inputs;
for (const auto i : c10::irange(nInputs)) {
auto i_string = std::to_string(i);
load_vectorized_inputs << "v = vec" << i_string << "[thread_idx];\n";
load_vectorized_inputs << "#pragma unroll\n";
load_vectorized_inputs << "for (int j=0; j < vec_size; j++){\n";
load_vectorized_inputs << " arg" << i_string << "[vec_size * i + j] = v.val[j];\n";
load_vectorized_inputs << "}\n";
}
env.s("load_vectorized_inputs", load_vectorized_inputs.str());
std::stringstream load_unrolled_inputs;
for (const auto i: c10::irange(nInputs)){
auto i_string = std::to_string(i);
load_unrolled_inputs << "arg" << i_string << "[j] = load<" << f_inputs_type
<< ">(data[" << std::to_string(i + nOutputs) << "], linear_idx);\n";
}
env.s("load_unrolled_inputs", load_unrolled_inputs.str());
static auto cuda_template = CodeTemplate(jit_common_types + jit_vectorized_code_template);
return cuda_template.format(env);
}
// Compiles the kernel
NvrtcFunction jit_pwise_function(
const std::string& code,
const std::string& kernel_name) {
// Acquires device and NVRTC properties (for compile arch and occupancy calculations)
const hipDeviceProp_t* prop = at::cuda::getCurrentDeviceProperties();
int major = 0, minor = 0;
bool compile_to_sass = false;
codegenOutputQuery(prop, major, minor, compile_to_sass);
// Creates the NVRTC program
hiprtcProgram program;
const auto& nvrtc = at::globalContext().getNVRTC();
AT_CUDA_NVRTC_CHECK(nvrtc.hiprtcCreateProgram(
&program, code.c_str(), nullptr, 0, nullptr, nullptr));
// constructs nvrtc build arguments
#if defined(TORCH_HIP_VERSION) && TORCH_HIP_VERSION < 11010
// compile to sass is not allowed prior to CUDA 11.1
compile_to_sass = false;
#endif
// CUDA 11.1 allows going directly to SASS (sm_) instead of PTX (compute_)
// which gives better backwards compatibility to work on older driver,
// (since older driver doesn't necessrily recognize PTX emitted by new
// toolkit);
// Meanwhile, for forward compatibility (future device with
// `unsupported_arch==True`), since SASS are not necessarily compatible,
// we fallback to PTX instead.
const std::string compute = std::string("--gpu-architecture=") +
(compile_to_sass ? "sm_" : "compute_") + std::to_string(major) +
std::to_string(minor);
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
std::vector<const char*> args = {
"--std=c++14", compute.c_str(), "-default-device"};
#ifndef NDEBUG
// Add line info to generated kernels
args.push_back("-lineinfo");
#else
// Avoid excessive register usage from assertion
args.push_back("-DNDEBUG");
#endif
// compiles and validates result
initializeCudaContext();
const auto compilation_result =
nvrtc.hiprtcCompileProgram(program, args.size(), args.data());
if (compilation_result != HIPRTC_SUCCESS) {
size_t logsize;
AT_CUDA_NVRTC_CHECK(nvrtc.hiprtcGetProgramLogSize(program, &logsize));
std::vector<char> log(logsize);
AT_CUDA_NVRTC_CHECK(nvrtc.hiprtcGetProgramLog(program, log.data()));
std::stringstream cu;
cu << log.data();
throw std::runtime_error(cu.str() + code);
}
size_t ptx_size = 0;
std::vector<char> ptx;
#if defined(TORCH_HIP_VERSION) && TORCH_HIP_VERSION >= 11010
// compile_to_sass determines whether we are generating SASS or PTX, hence
// the different API.
const auto getSize = compile_to_sass
? at::globalContext().getNVRTC().hiprtcGetBitcodeSize
: at::globalContext().getNVRTC().hiprtcGetCodeSize;
const auto getFunc = compile_to_sass
? at::globalContext().getNVRTC().hiprtcGetBitcode
: at::globalContext().getNVRTC().hiprtcGetCode;
#else
const auto getSize = at::globalContext().getNVRTC().hiprtcGetCodeSize;
const auto getFunc = at::globalContext().getNVRTC().hiprtcGetCode;
#endif
AT_CUDA_NVRTC_CHECK(getSize(program, &ptx_size));
ptx.resize(ptx_size);
AT_CUDA_NVRTC_CHECK(getFunc(program, ptx.data()));
NvrtcFunction compiled_kernel_;
AT_CUDA_DRIVER_CHECK(nvrtc.hipModuleLoadData(&(compiled_kernel_.module), ptx.data()));
std::string name = kernel_name + "_kernel";
AT_CUDA_DRIVER_CHECK(
nvrtc.hipModuleGetFunction(&(compiled_kernel_.function), compiled_kernel_.module, name.c_str()));
// TODO: use guards to avoid leaking
AT_CUDA_NVRTC_CHECK(nvrtc.hiprtcDestroyProgram(&program));
return compiled_kernel_;
}
// TODO: may need/want to initialize CUDA context here (refactor into nvrtc call)
void launch_jitted_pwise_function(
NvrtcFunction function,
std::array<void*, 7>& args,
const int nBlocks,
const int kBlockSize) {
initializeCudaContext();
const auto& nvrtc = at::globalContext().getNVRTC();
// Launches kernel on current stream
auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
AT_CUDA_DRIVER_CHECK(nvrtc.hipModuleLaunchKernel(
function.function,
nBlocks,
1,
1,
kBlockSize,
1,
1,
0,
stream,
args.data(),
nullptr));
}
}}} // at::cuda::jit
|
3cb3859bfae506efb1833711a4bc9c4e269d6925.cu
|
#include <sstream>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/cuda/detail/OffsetCalculator.cuh>
#include <c10/cuda/CUDACachingAllocator.h>
#include <ATen/native/cuda/jit_utils.h>
#include <c10/core/ScalarType.h>
#include <c10/util/irange.h>
namespace at { namespace cuda { namespace jit {
const std::string jit_common_types = R"ESCAPE(
typedef long long int int64_t;
typedef unsigned int uint32_t;
typedef signed char int8_t;
typedef unsigned char uint8_t; // NOTE: this MUST be "unsigned char"! "char" is equivalent to "signed char"
typedef short int16_t;
static_assert(sizeof(int64_t) == 8, "expected size does not match");
static_assert(sizeof(uint32_t) == 4, "expected size does not match");
static_assert(sizeof(int8_t) == 1, "expected size does not match");
constexpr int num_threads = 128;
constexpr int thread_work_size = 4; // TODO: make template substitution once we decide where those vars live
constexpr int block_work_size = thread_work_size * num_threads;
//TODO use _assert_fail, because assert is disabled in non-debug builds
#define ERROR_UNSUPPORTED_CAST assert(false);
// NB: Order matters for this macro; it is relied upon in
// _promoteTypesLookup and the serialization format.
// Note, some types have ctype as void because we don't support them in codegen
#define AT_FORALL_SCALAR_TYPES_WITH_COMPLEX(_) \
_(uint8_t, Byte) /* 0 */ \
_(int8_t, Char) /* 1 */ \
_(int16_t, Short) /* 2 */ \
_(int, Int) /* 3 */ \
_(int64_t, Long) /* 4 */ \
_(at::Half, Half) /* 5 */ \
_(float, Float) /* 6 */ \
_(double, Double) /* 7 */ \
_(c10::complex<c10::Half>, ComplexHalf) /* 8 */ \
_(c10::complex<float>, ComplexFloat) /* 9 */ \
_(c10::complex<double>, ComplexDouble) /* 10 */ \
_(bool, Bool) /* 11 */ \
_(void, QInt8) /* 12 */ \
_(void, QUInt8) /* 13 */ \
_(void, QInt32) /* 14 */ \
_(at::BFloat16, BFloat16) /* 15 */ \
#define AT_FORALL_SCALAR_TYPES(_) \
_(uint8_t, Byte) \
_(int8_t, Char) \
_(int16_t, Short) \
_(int, Int) \
_(int64_t, Long) \
_(float, Float) \
_(at::Half, Half) \
_(at::BFloat16, BFloat16) \
_(double, Double) \
_(bool, Bool) \
_(c10::complex<float>, ComplexFloat) \
_(c10::complex<double>, ComplexDouble)
enum class ScalarType : int8_t {
#define DEFINE_ENUM(_1, n) n,
AT_FORALL_SCALAR_TYPES_WITH_COMPLEX(DEFINE_ENUM)
#undef DEFINE_ENUM
Undefined,
NumOptions
};
template <typename T, int size>
struct Array {
T data[size];
__device__ T operator[](int i) const {
return data[i];
}
__device__ T& operator[](int i) {
return data[i];
}
Array() = default;
Array(const Array&) = default;
Array& operator=(const Array&) = default;
};
${half_string}
${bfloat16_string}
${complex_string}
)ESCAPE";
//we need to include half, bfloat16 and complex strings to all kernels with half arguments and to all kernels with type casting
//regardless of whether they have half arguments (because fetch_and_cast and cast_and_store loop over all types)
const std::string jiterator_half_support_literal = R"ESCAPE(
namespace at {
struct alignas(2) Half {
unsigned short x;
Half() = default;
inline __host__ __device__ Half(float value){
asm("{ cvt.rn.f16.f32 %0, %1;}\n" : "=h"(x) : "f"(value));
}
inline __host__ __device__ operator float() const{
float val;
asm("{ cvt.f32.f16 %0, %1;}\n" : "=f"(val) : "h"(x)); // do we need const cast here?
//asm("{ cvt.f32.f16 %0, %1;}\n" : "=f"(val) : "h"(__HALF_TO_CUS(x)));
return val;
}
};
}
)ESCAPE";
const std::string jiterator_bfloat16_support_literal = R"ESCAPE(
namespace at {
struct alignas(2) BFloat16 {
unsigned short x;
__device__ unsigned short __internal_float2bfloat16(
const float f,
unsigned int& sign,
unsigned int& remainder) {
unsigned int x;
x = __float_as_uint(f);
if ((x & 0x7fffffffU) > 0x7f800000U) {
sign = 0U;
remainder = 0U;
return static_cast<unsigned short>(0x7fffU);
}
sign = x >> 31;
remainder = x << 16;
return static_cast<unsigned short>(x >> 16);
}
BFloat16() = default;
inline __host__ __device__ BFloat16(float value){
#if __CUDA_ARCH__ >= 800
asm("{ cvt.rn.bf16.f32 %0, %1;}\n" : "=h"(x) : "f"(value));
)ESCAPE"
R"ESCAPE(
#else
unsigned int sign;
unsigned int remainder;
x = __internal_float2bfloat16(value, sign, remainder);
if ((remainder > 0x80000000U) ||
((remainder == 0x80000000U) && ((x & 0x1U) != 0U))) {
x++;
}
#endif
}
inline __host__ __device__ operator float() const{
float val;
asm("{ mov.b32 %0, {0,%1};}\n" : "=f"(val) : "h"(x)); //do we need const cast here?
return val;
}
};
}
)ESCAPE";
//copy-pasted from util/complex.h
const std::string jiterator_complex_support_literal = R"ESCAPE(
//a very limited complex class, the only thing it currently allows is implicit conversion
//to complex, and complex -> real that is unused
namespace c10 {
template<typename T>
struct alignas(sizeof(T) * 2) complex {
using value_type = T;
T real_ = T(0);
T imag_ = T(0);
constexpr complex() = default;
inline __host__ __device__ constexpr complex(const T& re, const T& im = T())
: real_(re), imag_(im) {}
//FIXME I didn't find how complex -> real conversion is done in eager
//we are not going to use it, but it's needed for compilation
inline __host__ __device__ operator T() const{
return real_;
}
};
}
)ESCAPE";
const std::string jit_code_template = R"ESCAPE(
// Fetch a value with dynamic type src_type from ptr, and cast it to static type dest_t.
// For now, simplified version that does not handle complex and special casting to uint8
#define FETCH_AND_CAST_CASE(type, scalartype) case ScalarType::scalartype: return static_cast<dest_t>(*(const type *)ptr);
template<typename dest_t>
__device__ inline dest_t fetch_and_cast(const ScalarType src_type, const void *ptr) {
switch (src_type) {
AT_FORALL_SCALAR_TYPES(FETCH_AND_CAST_CASE)
default:
ERROR_UNSUPPORTED_CAST
}
return dest_t(0); // just to avoid compiler warning
}
// Cast a value with static type src_t into dynamic dest_type, and store it to ptr.
#define CAST_AND_STORE_CASE(type, scalartype) case ScalarType::scalartype: *(type *)ptr = static_cast<type>(value); return;
template<typename src_t>
__device__ inline void cast_and_store(const ScalarType dest_type, void *ptr, src_t value) {
switch (dest_type) {
AT_FORALL_SCALAR_TYPES(CAST_AND_STORE_CASE)
default:;
}
ERROR_UNSUPPORTED_CAST
}
struct LoadWithoutCast {
template <typename scalar_t>
__device__ scalar_t load(char* base_ptr, uint32_t offset, int arg=0) {
return *(reinterpret_cast<scalar_t*>(base_ptr) + offset);
}
};
template <int N>
struct LoadWithCast {
using array_t = Array<ScalarType, N==0? 1 : N>;
using size_array_t = Array<uint32_t, N==0? 1: N>;
array_t dtypes;
size_array_t element_sizes;
template <typename scalar_t>
__device__ scalar_t load(char* base_ptr, uint32_t offset, int arg) {
void* ptr = base_ptr + element_sizes[arg] * offset;
return fetch_and_cast<scalar_t>(dtypes[arg], ptr);
}
};
struct StoreWithoutCast {
template<typename scalar_t>
__device__ void store(scalar_t value, char *base_ptr, uint32_t offset) {
*(reinterpret_cast<scalar_t *>(base_ptr) + offset) = value;
}
};
struct StoreWithCast {
ScalarType dtype;
uint32_t element_size;
//StoreWithCast(at::ScalarType dtype): dtype(dtype), element_size(c10::elementSize(dtype)) {}
template<typename scalar_t>
__device__ void store(scalar_t value, char *base_ptr, uint32_t offset) {
void *ptr = base_ptr + element_size * offset;
cast_and_store<scalar_t>(dtype, ptr, value);
}
};
template <typename T>
struct DivMod {
T div;
T mod;
__device__ DivMod(T _div, T _mod) {
div = _div;
mod = _mod;
}
};
//<unsigned int>
struct IntDivider {
IntDivider() = default;
__device__ inline unsigned int div(unsigned int n) const {
unsigned int t = __umulhi(n, m1);
return (t + n) >> shift;
}
__device__ inline unsigned int mod(unsigned int n) const {
return n - div(n) * divisor;
}
__device__ inline DivMod<unsigned int> divmod(unsigned int n) const {
unsigned int q = div(n);
return DivMod<unsigned int>(q, n - q * divisor);
}
unsigned int divisor; // d above.
unsigned int m1; // Magic number: m' above.
unsigned int shift; // Shift amounts.
};
template <int NARGS>
struct TrivialOffsetCalculator {
// The offset for each argument. Wrapper around fixed-size array.
// The offsets are in # of elements, not in bytes.
Array<${index_type}, NARGS> get(${index_type} linear_idx) const {
Array<${index_type}, NARGS> offsets;
#pragma unroll
for (int arg = 0; arg < NARGS; arg++) {
offsets[arg] = linear_idx;
}
return offsets;
}
};
template<int NARGS>
struct OffsetCalculator {
OffsetCalculator() = default;
__device__ __forceinline__ Array<${index_type}, NARGS> get(${index_type} linear_idx) const {
Array<${index_type}, NARGS> offsets;
#pragma unroll
for (int arg = 0; arg < NARGS; ++arg) {
offsets[arg] = 0;
}
#pragma unroll
for (int dim = 0; dim < 25; ++dim) {
if (dim == dims) {
break;
}
auto divmod = sizes_[dim].divmod(linear_idx);
linear_idx = divmod.div;
#pragma unroll
for (int arg = 0; arg < NARGS; ++arg) {
offsets[arg] += divmod.mod * strides_[dim][arg];
}
//printf("offset calc thread dim size stride offset %d %d %d %d %d %d %d %d\n",
//threadIdx.x, dim, sizes_[dim].divisor, strides_[dim][0], offsets[0], linear_idx, divmod.div, divmod.mod);
}
return offsets;
}
int dims;
IntDivider sizes_[25];
// NOTE: this approach will not support nInputs == 0
${index_type} strides_[25][NARGS];
};
${functor}
// TODO: setup grid-stride loop
extern "C" __global__
void ${name}_kernel(
const int numel,
Array<char*, ${nInputs}+1> data, //[${nInputs}+1],
${offset_calculator}<${nInputs}> input_calculator,
${offset_calculator}<1> output_calculator,
${loader} l,
${storer} s,
${compute_type} scalar_val) {
${declare_load_arrays}
${declare_store_arrays}
int idx = blockIdx.x;
int remaining = numel - block_work_size * idx;
auto thread_idx = threadIdx.x;
#pragma unroll
for (int j = 0; j < thread_work_size; j++){
if (thread_idx >= remaining) {
break;
}
int linear_idx = thread_idx + block_work_size * idx;
auto input_offsets = input_calculator.get(linear_idx);
${load_inputs}
// printf(
// "thread %d a %f offsets %d\n", threadIdx.x, arg0[j], input_offsets[0]);
thread_idx += num_threads;
}
#pragma unroll
for (int j = 0; j < thread_work_size; j++) {
if ((threadIdx.x + j*num_threads) < remaining) {
out[j] = ${name}<${compute_type}>(${args});
}
}
thread_idx = threadIdx.x;
#pragma unroll
for (int j = 0; j < thread_work_size; j++){
if (thread_idx >= remaining) {
break;
}
//TODO maybe think about unifying offset calculators and reuse
//offsets computed in the load loop
int linear_idx = thread_idx + block_work_size * idx;
auto output_offsets = output_calculator.get(linear_idx);
//printf("output thread %d offset %d\n", threadIdx.x, output_offsets[0]);
//TODO handle multi-return functors
${store_outputs}
thread_idx += num_threads;
}
}
)ESCAPE";
const std::string jit_vectorized_code_template = R"ESCAPE(
template <typename scalar_t>
__device__ __inline__ scalar_t load(char* base_ptr, uint32_t offset) {
return *(reinterpret_cast<scalar_t*>(base_ptr) + offset);
}
template<typename scalar_t>
__device__ __inline__ void store(scalar_t value, char *base_ptr, uint32_t offset) {
*(reinterpret_cast<scalar_t *>(base_ptr) + offset) = value;
}
// aligned vector generates vectorized load/store on CUDA
template<typename scalar_t, int vec_size>
struct alignas(sizeof(scalar_t) * vec_size) aligned_vector {
scalar_t val[vec_size];
};
${functor}
// TODO: setup grid-stride loop
extern "C" __global__
void ${name}_vectorized${vec_size}_kernel(
const int N,
Array<char*, ${nInputs}+1> data,
${compute_type} scalar_val) //[${nInputs}+1],
{
constexpr int vec_size = ${vec_size};
int remaining = N - block_work_size * blockIdx.x;
auto thread_idx = threadIdx.x;
int idx = blockIdx.x;
${declare_load_arrays}
${declare_store_arrays}
if (remaining < block_work_size) {
#pragma unroll
for (int j = 0; j < thread_work_size; j++){
if (thread_idx >= remaining) {
break;
}
int linear_idx = thread_idx + block_work_size * idx;
${load_unrolled_inputs}
thread_idx += num_threads;
}
#pragma unroll
for (int j = 0; j < thread_work_size; j++) {
if ((threadIdx.x + j*num_threads) < remaining) {
out[j] = ${name}<${compute_type}>(${args});
}
}
thread_idx = threadIdx.x;
#pragma unroll
for (int j = 0; j < thread_work_size; j++) {
if (thread_idx >= remaining) {
break;
}
int linear_idx = thread_idx + block_work_size * idx;
store<${result_type}>(out[j], data[0], linear_idx);
thread_idx += num_threads;
}
} else {
static constexpr int loop_size = thread_work_size / vec_size;
//actual loading
using vec_t_input = aligned_vector<${scalar_type}, vec_size>;
${vector_pointers}
#pragma unroll
for (int i = 0; i<loop_size; i++){
vec_t_input v;
${load_vectorized_inputs}
thread_idx += num_threads;
}
#pragma unroll
for (int j = 0; j < thread_work_size; j++) {
out[j] = ${name}<${compute_type}>(${args});
}
using vec_t_output = aligned_vector<${result_type}, vec_size>;
vec_t_output * to_ = reinterpret_cast<vec_t_output *>(data[0]) + block_work_size / vec_size * idx;
int thread_idx = threadIdx.x;
#pragma unroll
for (int i = 0; i<loop_size; i++){
vec_t_output v;
#pragma unroll
for (int j=0; j<vec_size; j++){
v.val[j] = out[vec_size * i + j];
}
to_[thread_idx] = v;
thread_idx += num_threads;
}
}
}
)ESCAPE";
// The following is copied from fused_kernel.cpp
// TODO: refactor codegenOutputQuery into its own file
// that can be included by both files
// See NOTE [ USE OF NVRTC AND DRIVER API ]
const at::cuda::NVRTC& nvrtc() {
return at::globalContext().getNVRTC();
}
// query codegen output arch and target
// TODO refactor so this function is usable both from jit and from aten
void codegenOutputQuery(
const cudaDeviceProp* const prop,
int& major,
int& minor,
bool& compile_to_sass) {
using CudaVersion = std::pair<int, int>;
CudaVersion nvrtc_version;
AT_CUDA_NVRTC_CHECK(
nvrtc().nvrtcVersion(&nvrtc_version.first, &nvrtc_version.second));
TORCH_CHECK(
nvrtc_version.first >= 6,
"NVRTC versions less than 6 are not supported. Is: ",
nvrtc_version.first);
// Version supported by device
// Usually any lower version works too but is less efficient
const CudaVersion dev_version = CudaVersion(prop->major, prop->minor);
// Maximum version supported by the driver, cap dev_version to this
CudaVersion max_dev_version;
if (nvrtc_version.first <= 7) { // 7 supports 2-5.x
max_dev_version = CudaVersion(5, 0);
} else if (nvrtc_version.first <= 8) { // 8 supports 2-6.x
max_dev_version = CudaVersion(6, 0);
} else if (nvrtc_version.first <= 9) { // 9 supports 3-7.2
max_dev_version = CudaVersion(7, 2);
} else if (nvrtc_version.first <= 10) { // 10 supports 3-7.5
max_dev_version = CudaVersion(7, 5);
} else if (nvrtc_version == CudaVersion(11, 0)) { // 11.0 supports 3-8.0
max_dev_version = CudaVersion(8, 0);
} else {
// If the driver version is unknown (i.e. newer than this code)
// assume the driver supports this device
max_dev_version = dev_version;
}
if (dev_version > max_dev_version) {
major = max_dev_version.first;
minor = max_dev_version.second;
// if we are clamping major/minor, sass is not compatible
compile_to_sass = false;
} else {
major = dev_version.first;
minor = dev_version.second;
compile_to_sass = true;
}
}
//TODO another copy paste from jit, refactor so it's usable from both
void __inline__ initializeCudaContext() {
// lazily construct context if non-existing yet;
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
CUcontext pctx = nullptr;
AT_CUDA_DRIVER_CHECK(at::globalContext().getNVRTC().cuCtxGetCurrent(&pctx));
if (!pctx) {
std::unique_lock<std::mutex> cudaFreeMutexLock(
*(c10::cuda::CUDACachingAllocator::getFreeMutex()));
cudaFree(nullptr);
}
}
//FIXME - this are defined in Loops.cuh, but including Loops.cuh here would lead to circular includes Loops.cuh -> CUDALoops.cuh -> jit_utils.h -> Loops.cuh
#define THREAD_WORK_SIZE 4
constexpr int thread_work_size = THREAD_WORK_SIZE;
std::string generate_code(
int nTensors,
const std::string& func,
const std::string& name,
const std::string& f_inputs_type,
const std::string& compute_type,
const std::string& result_type,
bool contiguous,
bool dynamic_casting,
BinaryFuncVariant scalar_pos,
bool vectorized,
int vec_size) {
TemplateEnv env;
env.s("index_type", "unsigned int");
const int nInputs = nTensors - 1;
env.s("nInputs", std::to_string(nInputs));
env.s("scalar_type", f_inputs_type);
env.s("compute_type", compute_type);
env.s("functor", func);
env.s("name", name);
std::stringstream declare_load_arrays;
for (int i = 0; i < nInputs; i++) {
// TODO these arrays are potentially of the different types, use function
// traits to determine the types
declare_load_arrays << f_inputs_type << " arg" << std::to_string(i)
<< "[" << std::to_string(thread_work_size) << "];\n";
}
env.s("declare_load_arrays", declare_load_arrays.str());
std::stringstream declare_store_arrays;
declare_store_arrays << result_type << " out"
<< "[" << std::to_string(thread_work_size) << "];\n";
env.s("declare_store_arrays", declare_store_arrays.str());
const int nOutputs = 1; // FIXME
std::stringstream functor_args;
if (scalar_pos == BinaryFuncVariant::NoScalar) {
for (int i = 0; i < nInputs - 1; i++) {
functor_args << "arg" << std::to_string(i) << "[j], ";
}
functor_args << "arg" << std::to_string(nInputs - 1) << "[j]";
} else if (scalar_pos == BinaryFuncVariant::LhsScalar) {
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(nInputs == 1);
functor_args << "scalar_val, arg0[j]";
} else { //RhsScalar
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(nInputs == 1);
functor_args << "arg0[j], scalar_val";
}
env.s("args", functor_args.str());
if (f_inputs_type == "at::Half" || result_type == "at::Half" || dynamic_casting) {
env.s("half_string", jiterator_half_support_literal);
} else {
env.s("half_string", "");
}
if (f_inputs_type == "at::BFloat16" || result_type == "at::BFloat16" || dynamic_casting) {
env.s("bfloat16_string", jiterator_bfloat16_support_literal);
} else {
env.s("bfloat16_string", "");
}
if (dynamic_casting) {
env.s("complex_string", jiterator_complex_support_literal);
} else {
env.s("complex_string", "");
}
if (!vectorized) {
if (!dynamic_casting) {
env.s("loader", "LoadWithoutCast");
env.s("storer", "StoreWithoutCast");
} else {
env.s(
"loader", std::string("LoadWithCast<" + std::to_string(nInputs) + ">"));
env.s("storer", "StoreWithCast");
}
if (contiguous) {
env.s("offset_calculator", "TrivialOffsetCalculator");
} else {
env.s("offset_calculator", "OffsetCalculator");
}
std::stringstream load_inputs;
for (int i = 0; i < nInputs; i++) {
auto i_string = std::to_string(i);
load_inputs << "arg" << i_string << "[j] = l.load<" << f_inputs_type
<< ">(data[" << std::to_string(i + nOutputs)
<< "], input_offsets[" << i_string << "], " << i_string
<< ");\n";
}
env.s("load_inputs", load_inputs.str());
std::stringstream store_outputs;
store_outputs << "s.store<" << result_type
<< ">(out[j], data[0], output_offsets[0]);\n";
env.s("store_outputs", store_outputs.str());
static auto cuda_template = CodeTemplate(jit_common_types + jit_code_template);
return cuda_template.format(env);
}
// vectorized case
env.s("vec_size", std::to_string(vec_size));
env.s("result_type", result_type);
std::stringstream vector_pointers;
for (const auto i : c10::irange(nInputs)){
auto i_string = std::to_string(i);
vector_pointers << "vec_t_input * vec" << i_string <<
" = reinterpret_cast<vec_t_input *>(data[" << i_string << "+1])" <<
" + block_work_size / vec_size * idx;\n";
}
env.s("vector_pointers", vector_pointers.str());
std::stringstream load_vectorized_inputs;
for (const auto i : c10::irange(nInputs)) {
auto i_string = std::to_string(i);
load_vectorized_inputs << "v = vec" << i_string << "[thread_idx];\n";
load_vectorized_inputs << "#pragma unroll\n";
load_vectorized_inputs << "for (int j=0; j < vec_size; j++){\n";
load_vectorized_inputs << " arg" << i_string << "[vec_size * i + j] = v.val[j];\n";
load_vectorized_inputs << "}\n";
}
env.s("load_vectorized_inputs", load_vectorized_inputs.str());
std::stringstream load_unrolled_inputs;
for (const auto i: c10::irange(nInputs)){
auto i_string = std::to_string(i);
load_unrolled_inputs << "arg" << i_string << "[j] = load<" << f_inputs_type
<< ">(data[" << std::to_string(i + nOutputs) << "], linear_idx);\n";
}
env.s("load_unrolled_inputs", load_unrolled_inputs.str());
static auto cuda_template = CodeTemplate(jit_common_types + jit_vectorized_code_template);
return cuda_template.format(env);
}
// Compiles the kernel
NvrtcFunction jit_pwise_function(
const std::string& code,
const std::string& kernel_name) {
// Acquires device and NVRTC properties (for compile arch and occupancy calculations)
const cudaDeviceProp* prop = at::cuda::getCurrentDeviceProperties();
int major = 0, minor = 0;
bool compile_to_sass = false;
codegenOutputQuery(prop, major, minor, compile_to_sass);
// Creates the NVRTC program
nvrtcProgram program;
const auto& nvrtc = at::globalContext().getNVRTC();
AT_CUDA_NVRTC_CHECK(nvrtc.nvrtcCreateProgram(
&program, code.c_str(), nullptr, 0, nullptr, nullptr));
// constructs nvrtc build arguments
#if defined(CUDA_VERSION) && CUDA_VERSION < 11010
// compile to sass is not allowed prior to CUDA 11.1
compile_to_sass = false;
#endif
// CUDA 11.1 allows going directly to SASS (sm_) instead of PTX (compute_)
// which gives better backwards compatibility to work on older driver,
// (since older driver doesn't necessrily recognize PTX emitted by new
// toolkit);
// Meanwhile, for forward compatibility (future device with
// `unsupported_arch==True`), since SASS are not necessarily compatible,
// we fallback to PTX instead.
const std::string compute = std::string("--gpu-architecture=") +
(compile_to_sass ? "sm_" : "compute_") + std::to_string(major) +
std::to_string(minor);
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
std::vector<const char*> args = {
"--std=c++14", compute.c_str(), "-default-device"};
#ifndef NDEBUG
// Add line info to generated kernels
args.push_back("-lineinfo");
#else
// Avoid excessive register usage from assertion
args.push_back("-DNDEBUG");
#endif
// compiles and validates result
initializeCudaContext();
const auto compilation_result =
nvrtc.nvrtcCompileProgram(program, args.size(), args.data());
if (compilation_result != NVRTC_SUCCESS) {
size_t logsize;
AT_CUDA_NVRTC_CHECK(nvrtc.nvrtcGetProgramLogSize(program, &logsize));
std::vector<char> log(logsize);
AT_CUDA_NVRTC_CHECK(nvrtc.nvrtcGetProgramLog(program, log.data()));
std::stringstream cu;
cu << log.data();
throw std::runtime_error(cu.str() + code);
}
size_t ptx_size = 0;
std::vector<char> ptx;
#if defined(CUDA_VERSION) && CUDA_VERSION >= 11010
// compile_to_sass determines whether we are generating SASS or PTX, hence
// the different API.
const auto getSize = compile_to_sass
? at::globalContext().getNVRTC().nvrtcGetCUBINSize
: at::globalContext().getNVRTC().nvrtcGetPTXSize;
const auto getFunc = compile_to_sass
? at::globalContext().getNVRTC().nvrtcGetCUBIN
: at::globalContext().getNVRTC().nvrtcGetPTX;
#else
const auto getSize = at::globalContext().getNVRTC().nvrtcGetPTXSize;
const auto getFunc = at::globalContext().getNVRTC().nvrtcGetPTX;
#endif
AT_CUDA_NVRTC_CHECK(getSize(program, &ptx_size));
ptx.resize(ptx_size);
AT_CUDA_NVRTC_CHECK(getFunc(program, ptx.data()));
NvrtcFunction compiled_kernel_;
AT_CUDA_DRIVER_CHECK(nvrtc.cuModuleLoadData(&(compiled_kernel_.module), ptx.data()));
std::string name = kernel_name + "_kernel";
AT_CUDA_DRIVER_CHECK(
nvrtc.cuModuleGetFunction(&(compiled_kernel_.function), compiled_kernel_.module, name.c_str()));
// TODO: use guards to avoid leaking
AT_CUDA_NVRTC_CHECK(nvrtc.nvrtcDestroyProgram(&program));
return compiled_kernel_;
}
// TODO: may need/want to initialize CUDA context here (refactor into nvrtc call)
void launch_jitted_pwise_function(
NvrtcFunction function,
std::array<void*, 7>& args,
const int nBlocks,
const int kBlockSize) {
initializeCudaContext();
const auto& nvrtc = at::globalContext().getNVRTC();
// Launches kernel on current stream
auto stream = at::cuda::getCurrentCUDAStream();
AT_CUDA_DRIVER_CHECK(nvrtc.cuLaunchKernel(
function.function,
nBlocks,
1,
1,
kBlockSize,
1,
1,
0,
stream,
args.data(),
nullptr));
}
}}} // at::cuda::jit
|
90fad78469fdaec41e55dce41d539f219086f942.hip
|
// !!! This is a file automatically generated by hipify!!!
// Optimized using shared memory and on chip memory
// nvcc hw09.cu -o hw09 -lglut -lm -lGLU -lGL
//To stop hit "control c" in the window you launched it from.
#include <GL/glut.h>
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#define N 32768
#define halfN N/2
#define BLOCK 256
#define XWindowSize 2500
#define YWindowSize 2500
#define DRAW 10
#define DAMP 1.0
#define DT 0.001
#define STOP_TIME 10.0
#define G 1.0
#define H 1.0
#define EYE 45.0
#define FAR 90.0
// Globals
float4 p[N];
float3 v[N], f[N];
float4 *pos;
float3 *vel, *force;
FILE *data_file, *data_file1, *data_file2;
dim3 block, grid;
int NumberOfGpus, Gpu0Access, Gpu1Access;
const bool UseMultipleGPU = 1;
void CUDAerrorCheck(const char *message){
hipError_t error;
error = hipGetLastError();
if(error != hipSuccess){
printf("\n CUDA ERROR: %s = %s\n", message, hipGetErrorString(error));
exit(0);
}
}
void set_initail_conditions(){
int i,j,k,num,particles_per_side;
float position_start, temp;
float initial_seperation;
temp = pow((float)N,1.0/3.0) + 0.99999;
particles_per_side = temp;
printf("\n cube root of N = %d \n", particles_per_side);
position_start = -(particles_per_side -1.0)/2.0;
initial_seperation = 2.0;
for(i=0; i<N; i++) p[i].w = 1.0;
num = 0;
for(i=0; i<particles_per_side; i++){
for(j=0; j<particles_per_side; j++){
for(k=0; k<particles_per_side; k++){
if(N <= num) break;
p[num].x = position_start + i*initial_seperation;
p[num].y = position_start + j*initial_seperation;
p[num].z = position_start + k*initial_seperation;
v[num].x = 0.0;
v[num].y = 0.0;
v[num].z = 0.0;
num++;
}
}
}
block.x = BLOCK;
block.y = 1;
block.z = 1;
grid.x = (N-1)/block.x + 1;
grid.y = 1;
grid.z = 1;
hipMalloc( (void**)&pos, N *sizeof(float4) );
hipMalloc( (void**)&vel, N *sizeof(float3) );
hipMalloc( (void**)&force, N *sizeof(float3) );
}
void draw_picture(){
int i;
glClear(GL_COLOR_BUFFER_BIT);
glClear(GL_DEPTH_BUFFER_BIT);
glColor3d(1.0,1.0,0.5);
for(i=0; i<N; i++){
glPushMatrix();
glTranslatef(p[i].x, p[i].y, p[i].z);
glutSolidSphere(0.1,20,20);
glPopMatrix();
}
glutSwapBuffers();
}
__device__ float3 getBodyBodyForce(float4 p0, float4 p1){
float3 f;
float dx = p1.x - p0.x;
float dy = p1.y - p0.y;
float dz = p1.z - p0.z;
float r2 = dx*dx + dy*dy + dz*dz;
float r = sqrt(r2);
float force = (G*p0.w*p1.w)/(r2) - (H*p0.w*p1.w)/(r2*r2);
f.x = force*dx/r;
f.y = force*dy/r;
f.z = force*dz/r;
return(f);
}
__global__ void getForcesCollisionDoubleGPU(float4 *pos, float3 *vel, float3 *force){
int j,ii;
float3 force_mag, forceSum;
float4 posMe;
__shared__ float4 shPos[BLOCK];
int id = threadIdx.x + blockDim.x*blockIdx.x;
forceSum.x = 0.0;
forceSum.y = 0.0;
forceSum.z = 0.0;
posMe.x = pos[id].x;
posMe.y = pos[id].y;
posMe.z = pos[id].z;
posMe.w = pos[id].w;
for(j=0; j<gridDim.x; j++){
shPos[threadIdx.x] = pos[threadIdx.x + blockDim.x*j];
__syncthreads();
#pragma unroll 32
for(int i=0; i<blockDim.x; i++){
ii = i + blockDim.x*j;
if(ii != id && ii < N){
force_mag = getBodyBodyForce(posMe, shPos[i]);
forceSum.x += force_mag.x;
forceSum.y += force_mag.y;
forceSum.z += force_mag.z;
}
}
}
if(id < N){
force[id].x = forceSum.x;
force[id].y = forceSum.y;
force[id].z = forceSum.z;
}
}
__global__ void moveBodiesCollisionDoubleGPU(float4 *pos, float3 *vel, float3 *force){
int id = threadIdx.x + blockDim.x*blockIdx.x;
if(id<N){
vel[id].x += ((force[id].x-DAMP*vel[id].x)/pos[id].w)*DT;
vel[id].y += ((force[id].y-DAMP*vel[id].y)/pos[id].w)*DT;
vel[id].z += ((force[id].z-DAMP*vel[id].z)/pos[id].w)*DT;
pos[id].x += vel[id].x*DT;
pos[id].y += vel[id].y*DT;
pos[id].z += vel[id].z*DT;
}
}
void getNumberOfGPUs(){
hipGetDeviceCount(&NumberOfGpus);
printf("\n***** You have %d GPUs available\n", NumberOfGpus);
}
void checkPeerToPeerAccess(){
if(1 < NumberOfGpus && UseMultipleGPU == 1){
hipDeviceCanAccessPeer(&Gpu0Access,0,1);
hipDeviceCanAccessPeer(&Gpu1Access,1,0);
printf("\n***** You will be using %d GPUs\n", NumberOfGpus);
if(Gpu0Access == 0) printf("\nTSU Error: Device0 can not do peer to peer\n");
if(Gpu1Access == 0) printf("\nTSU Error: Device1 can not do peer to peer\n");
hipDeviceEnablePeerAccess(1,0);
}
}
void n_body(){
float dt;
int tdraw = 0;
float time = 0.0;
float elapsedTime;
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
dt = DT;
float4 *pos_gpu0, *pos_gpu1, *pos0, *pos1;
float3 *vel_gpu0, *vel_gpu1, *vel0, *vel1;
pos_gpu0 = pos;
pos_gpu1 = pos+N/2;
vel_gpu0 = vel;
vel_gpu1 = vel+N/2;
hipSetDevice(0);
hipMemcpy(pos_gpu0, pos0, N/2*sizeof(float4), hipMemcpyHostToDevice);
//CUDAerrorCheck("gpu position copy...\n");
hipMemcpy(vel_gpu0, vel0, N/2*sizeof(float3), hipMemcpyHostToDevice);
//CUDAerrorCheck("gpu velocity copy...\n");
hipSetDevice(1);
hipMemcpy(pos_gpu1, pos1+N/2, (N-N/2)*sizeof(float4), hipMemcpyHostToDevice);
//CUDAerrorCheck("gpu position copy...\n");
hipMemcpy(vel_gpu1, vel1+N/2, (N-N/2)*sizeof(float3), hipMemcpyHostToDevice);
//CUDAerrorCheck("gpu velocity copy...\n");
block.x = BLOCK;
block.y = 1;
block.z = 1;
grid.x = (N-1)/block.x + 1;
grid.y = 1;
grid.z = 1;
while(time < STOP_TIME){
hipSetDevice(0);
hipLaunchKernelGGL(( getForcesCollisionDoubleGPU), dim3(grid), dim3(block), 0, 0, pos_0, vel_0, force);
//CUDAerrorCheck("gpu0 force kernel...\n");
hipLaunchKernelGGL(( moveBodiesCollisionDoubleGPU), dim3(grid), dim3(block), 0, 0, pos_0, vel_0, force);
//CUDAerrorCheck("gpu0 move kernel...\n");
hipSetDevice(1);
hipLaunchKernelGGL(( getForcesCollisionDoubleGPU), dim3(grid), dim3(block), 0, 0, pos_1, vel_1, force);
//CUDAerrorCheck("gpu1 force kernel...\n");
hipLaunchKernelGGL(( moveBodiesCollisionDoubleGPU), dim3(grid), dim3(block), 0, 0, pos_1, vel_1, force);
//CUDAerrorCheck("gpu1 move kernel...\n");
hipDeviceSynchronize();
hipSetDevice(0);
hipMemcpyPeerAsync(pos_1,1,pos_0,0,(N/2)*sizeof(float4));
hipMemcpyPeerAsync(vel_1,1,vel_0,0,(N/2)*sizeof(float4));
hipDeviceSynchronize();
//To kill the draw comment out the next 7 lines.
/*if(tdraw == DRAW) {
hipMemcpy(p, p_GPU, N *sizeof(float4), hipMemcpyDeviceToHost);
draw_picture();
tdraw = 0;
}
tdraw++;*/
time += dt;
}
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsedTime, start, stop);
printf("\n\nGPU time = %3.1f milliseconds\n", elapsedTime);
//hipMemcpy( p, p_GPU, N *sizeof(float4), hipMemcpyDeviceToHost );
}
void control(){
set_initail_conditions();
draw_picture();
n_body();
draw_picture();
printf("\n DONE \n");
while(1);
}
void Display(void){
gluLookAt(EYE, EYE, EYE, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0);
glPolygonMode(GL_FRONT_AND_BACK, GL_FILL);
control();
}
void reshape(int w, int h){
glViewport(0, 0, (GLsizei) w, (GLsizei) h);
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
glFrustum(-0.2, 0.2, -0.2, 0.2, 0.2, FAR);
glMatrixMode(GL_MODELVIEW);
}
int main(int argc, char** argv){
glutInit(&argc,argv);
glutInitDisplayMode(GLUT_DOUBLE | GLUT_DEPTH | GLUT_RGB);
glutInitWindowSize(XWindowSize,YWindowSize);
glutInitWindowPosition(0,0);
glutCreateWindow("2 Body 3D");
GLfloat light_position[] = {1.0, 1.0, 1.0, 0.0};
GLfloat light_ambient[] = {0.0, 0.0, 0.0, 1.0};
GLfloat light_diffuse[] = {1.0, 1.0, 1.0, 1.0};
GLfloat light_specular[] = {1.0, 1.0, 1.0, 1.0};
GLfloat lmodel_ambient[] = {0.2, 0.2, 0.2, 1.0};
GLfloat mat_specular[] = {1.0, 1.0, 1.0, 1.0};
GLfloat mat_shininess[] = {10.0};
glClearColor(0.0, 0.0, 0.0, 0.0);
glShadeModel(GL_SMOOTH);
glColorMaterial(GL_FRONT, GL_AMBIENT_AND_DIFFUSE);
glLightfv(GL_LIGHT0, GL_POSITION, light_position);
glLightfv(GL_LIGHT0, GL_AMBIENT, light_ambient);
glLightfv(GL_LIGHT0, GL_DIFFUSE, light_diffuse);
glLightfv(GL_LIGHT0, GL_SPECULAR, light_specular);
glLightModelfv(GL_LIGHT_MODEL_AMBIENT, lmodel_ambient);
glMaterialfv(GL_FRONT, GL_SPECULAR, mat_specular);
glMaterialfv(GL_FRONT, GL_SHININESS, mat_shininess);
glEnable(GL_LIGHTING);
glEnable(GL_LIGHT0);
glEnable(GL_COLOR_MATERIAL);
glEnable(GL_DEPTH_TEST);
glutDisplayFunc(Display);
glutReshapeFunc(reshape);
glutMainLoop();
return 0;
}
|
90fad78469fdaec41e55dce41d539f219086f942.cu
|
// Optimized using shared memory and on chip memory
// nvcc hw09.cu -o hw09 -lglut -lm -lGLU -lGL
//To stop hit "control c" in the window you launched it from.
#include <GL/glut.h>
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#define N 32768
#define halfN N/2
#define BLOCK 256
#define XWindowSize 2500
#define YWindowSize 2500
#define DRAW 10
#define DAMP 1.0
#define DT 0.001
#define STOP_TIME 10.0
#define G 1.0
#define H 1.0
#define EYE 45.0
#define FAR 90.0
// Globals
float4 p[N];
float3 v[N], f[N];
float4 *pos;
float3 *vel, *force;
FILE *data_file, *data_file1, *data_file2;
dim3 block, grid;
int NumberOfGpus, Gpu0Access, Gpu1Access;
const bool UseMultipleGPU = 1;
void CUDAerrorCheck(const char *message){
cudaError_t error;
error = cudaGetLastError();
if(error != cudaSuccess){
printf("\n CUDA ERROR: %s = %s\n", message, cudaGetErrorString(error));
exit(0);
}
}
void set_initail_conditions(){
int i,j,k,num,particles_per_side;
float position_start, temp;
float initial_seperation;
temp = pow((float)N,1.0/3.0) + 0.99999;
particles_per_side = temp;
printf("\n cube root of N = %d \n", particles_per_side);
position_start = -(particles_per_side -1.0)/2.0;
initial_seperation = 2.0;
for(i=0; i<N; i++) p[i].w = 1.0;
num = 0;
for(i=0; i<particles_per_side; i++){
for(j=0; j<particles_per_side; j++){
for(k=0; k<particles_per_side; k++){
if(N <= num) break;
p[num].x = position_start + i*initial_seperation;
p[num].y = position_start + j*initial_seperation;
p[num].z = position_start + k*initial_seperation;
v[num].x = 0.0;
v[num].y = 0.0;
v[num].z = 0.0;
num++;
}
}
}
block.x = BLOCK;
block.y = 1;
block.z = 1;
grid.x = (N-1)/block.x + 1;
grid.y = 1;
grid.z = 1;
cudaMalloc( (void**)&pos, N *sizeof(float4) );
cudaMalloc( (void**)&vel, N *sizeof(float3) );
cudaMalloc( (void**)&force, N *sizeof(float3) );
}
void draw_picture(){
int i;
glClear(GL_COLOR_BUFFER_BIT);
glClear(GL_DEPTH_BUFFER_BIT);
glColor3d(1.0,1.0,0.5);
for(i=0; i<N; i++){
glPushMatrix();
glTranslatef(p[i].x, p[i].y, p[i].z);
glutSolidSphere(0.1,20,20);
glPopMatrix();
}
glutSwapBuffers();
}
__device__ float3 getBodyBodyForce(float4 p0, float4 p1){
float3 f;
float dx = p1.x - p0.x;
float dy = p1.y - p0.y;
float dz = p1.z - p0.z;
float r2 = dx*dx + dy*dy + dz*dz;
float r = sqrt(r2);
float force = (G*p0.w*p1.w)/(r2) - (H*p0.w*p1.w)/(r2*r2);
f.x = force*dx/r;
f.y = force*dy/r;
f.z = force*dz/r;
return(f);
}
__global__ void getForcesCollisionDoubleGPU(float4 *pos, float3 *vel, float3 *force){
int j,ii;
float3 force_mag, forceSum;
float4 posMe;
__shared__ float4 shPos[BLOCK];
int id = threadIdx.x + blockDim.x*blockIdx.x;
forceSum.x = 0.0;
forceSum.y = 0.0;
forceSum.z = 0.0;
posMe.x = pos[id].x;
posMe.y = pos[id].y;
posMe.z = pos[id].z;
posMe.w = pos[id].w;
for(j=0; j<gridDim.x; j++){
shPos[threadIdx.x] = pos[threadIdx.x + blockDim.x*j];
__syncthreads();
#pragma unroll 32
for(int i=0; i<blockDim.x; i++){
ii = i + blockDim.x*j;
if(ii != id && ii < N){
force_mag = getBodyBodyForce(posMe, shPos[i]);
forceSum.x += force_mag.x;
forceSum.y += force_mag.y;
forceSum.z += force_mag.z;
}
}
}
if(id < N){
force[id].x = forceSum.x;
force[id].y = forceSum.y;
force[id].z = forceSum.z;
}
}
__global__ void moveBodiesCollisionDoubleGPU(float4 *pos, float3 *vel, float3 *force){
int id = threadIdx.x + blockDim.x*blockIdx.x;
if(id<N){
vel[id].x += ((force[id].x-DAMP*vel[id].x)/pos[id].w)*DT;
vel[id].y += ((force[id].y-DAMP*vel[id].y)/pos[id].w)*DT;
vel[id].z += ((force[id].z-DAMP*vel[id].z)/pos[id].w)*DT;
pos[id].x += vel[id].x*DT;
pos[id].y += vel[id].y*DT;
pos[id].z += vel[id].z*DT;
}
}
void getNumberOfGPUs(){
cudaGetDeviceCount(&NumberOfGpus);
printf("\n***** You have %d GPUs available\n", NumberOfGpus);
}
void checkPeerToPeerAccess(){
if(1 < NumberOfGpus && UseMultipleGPU == 1){
cudaDeviceCanAccessPeer(&Gpu0Access,0,1);
cudaDeviceCanAccessPeer(&Gpu1Access,1,0);
printf("\n***** You will be using %d GPUs\n", NumberOfGpus);
if(Gpu0Access == 0) printf("\nTSU Error: Device0 can not do peer to peer\n");
if(Gpu1Access == 0) printf("\nTSU Error: Device1 can not do peer to peer\n");
cudaDeviceEnablePeerAccess(1,0);
}
}
void n_body(){
float dt;
int tdraw = 0;
float time = 0.0;
float elapsedTime;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
dt = DT;
float4 *pos_gpu0, *pos_gpu1, *pos0, *pos1;
float3 *vel_gpu0, *vel_gpu1, *vel0, *vel1;
pos_gpu0 = pos;
pos_gpu1 = pos+N/2;
vel_gpu0 = vel;
vel_gpu1 = vel+N/2;
cudaSetDevice(0);
cudaMemcpy(pos_gpu0, pos0, N/2*sizeof(float4), cudaMemcpyHostToDevice);
//CUDAerrorCheck("gpu position copy...\n");
cudaMemcpy(vel_gpu0, vel0, N/2*sizeof(float3), cudaMemcpyHostToDevice);
//CUDAerrorCheck("gpu velocity copy...\n");
cudaSetDevice(1);
cudaMemcpy(pos_gpu1, pos1+N/2, (N-N/2)*sizeof(float4), cudaMemcpyHostToDevice);
//CUDAerrorCheck("gpu position copy...\n");
cudaMemcpy(vel_gpu1, vel1+N/2, (N-N/2)*sizeof(float3), cudaMemcpyHostToDevice);
//CUDAerrorCheck("gpu velocity copy...\n");
block.x = BLOCK;
block.y = 1;
block.z = 1;
grid.x = (N-1)/block.x + 1;
grid.y = 1;
grid.z = 1;
while(time < STOP_TIME){
cudaSetDevice(0);
getForcesCollisionDoubleGPU<<<grid, block>>>(pos_0, vel_0, force);
//CUDAerrorCheck("gpu0 force kernel...\n");
moveBodiesCollisionDoubleGPU<<<grid, block>>>(pos_0, vel_0, force);
//CUDAerrorCheck("gpu0 move kernel...\n");
cudaSetDevice(1);
getForcesCollisionDoubleGPU<<<grid, block>>>(pos_1, vel_1, force);
//CUDAerrorCheck("gpu1 force kernel...\n");
moveBodiesCollisionDoubleGPU<<<grid, block>>>(pos_1, vel_1, force);
//CUDAerrorCheck("gpu1 move kernel...\n");
cudaDeviceSynchronize();
cudaSetDevice(0);
cudaMemcpyPeerAsync(pos_1,1,pos_0,0,(N/2)*sizeof(float4));
cudaMemcpyPeerAsync(vel_1,1,vel_0,0,(N/2)*sizeof(float4));
cudaDeviceSynchronize();
//To kill the draw comment out the next 7 lines.
/*if(tdraw == DRAW) {
cudaMemcpy(p, p_GPU, N *sizeof(float4), cudaMemcpyDeviceToHost);
draw_picture();
tdraw = 0;
}
tdraw++;*/
time += dt;
}
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start, stop);
printf("\n\nGPU time = %3.1f milliseconds\n", elapsedTime);
//cudaMemcpy( p, p_GPU, N *sizeof(float4), cudaMemcpyDeviceToHost );
}
void control(){
set_initail_conditions();
draw_picture();
n_body();
draw_picture();
printf("\n DONE \n");
while(1);
}
void Display(void){
gluLookAt(EYE, EYE, EYE, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0);
glPolygonMode(GL_FRONT_AND_BACK, GL_FILL);
control();
}
void reshape(int w, int h){
glViewport(0, 0, (GLsizei) w, (GLsizei) h);
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
glFrustum(-0.2, 0.2, -0.2, 0.2, 0.2, FAR);
glMatrixMode(GL_MODELVIEW);
}
int main(int argc, char** argv){
glutInit(&argc,argv);
glutInitDisplayMode(GLUT_DOUBLE | GLUT_DEPTH | GLUT_RGB);
glutInitWindowSize(XWindowSize,YWindowSize);
glutInitWindowPosition(0,0);
glutCreateWindow("2 Body 3D");
GLfloat light_position[] = {1.0, 1.0, 1.0, 0.0};
GLfloat light_ambient[] = {0.0, 0.0, 0.0, 1.0};
GLfloat light_diffuse[] = {1.0, 1.0, 1.0, 1.0};
GLfloat light_specular[] = {1.0, 1.0, 1.0, 1.0};
GLfloat lmodel_ambient[] = {0.2, 0.2, 0.2, 1.0};
GLfloat mat_specular[] = {1.0, 1.0, 1.0, 1.0};
GLfloat mat_shininess[] = {10.0};
glClearColor(0.0, 0.0, 0.0, 0.0);
glShadeModel(GL_SMOOTH);
glColorMaterial(GL_FRONT, GL_AMBIENT_AND_DIFFUSE);
glLightfv(GL_LIGHT0, GL_POSITION, light_position);
glLightfv(GL_LIGHT0, GL_AMBIENT, light_ambient);
glLightfv(GL_LIGHT0, GL_DIFFUSE, light_diffuse);
glLightfv(GL_LIGHT0, GL_SPECULAR, light_specular);
glLightModelfv(GL_LIGHT_MODEL_AMBIENT, lmodel_ambient);
glMaterialfv(GL_FRONT, GL_SPECULAR, mat_specular);
glMaterialfv(GL_FRONT, GL_SHININESS, mat_shininess);
glEnable(GL_LIGHTING);
glEnable(GL_LIGHT0);
glEnable(GL_COLOR_MATERIAL);
glEnable(GL_DEPTH_TEST);
glutDisplayFunc(Display);
glutReshapeFunc(reshape);
glutMainLoop();
return 0;
}
|
4ce6683537d3d919ea9c8b8aa607a02481c2363b.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "consts.h"
#include <hiprand/hiprand_kernel.h>
#include <cmath>
extern "C" {
__global__
void Init(hiprandState_t *states) {
int thid = blockIdx.x*blockDim.x+threadIdx.x;
hiprand_init(clock64(), thid, 0, &states[thid]);
__syncthreads();
}
}
extern "C" {
__device__
void randInt(float& toRand, int interval, hiprandState_t* state) {
int tmp = hiprand(state);
if(tmp<0) tmp = -tmp;
toRand = (tmp)%interval;
}
}
extern "C" {
__device__
void randFloat(float& toRand, float l, float r, hiprandState_t* state) {
int rand_max = 32767;
float randomVarb;
randInt(randomVarb, rand_max, state);
toRand = ((r - l) * (randomVarb / rand_max)) + l;
}
}
struct Bird {
public:
float x, y, fit, acc, speed;
bool active, moveUp;
__device__
Bird() {
x = INITIAL_POSITION_BIRD_X;
y = (float) SCREEN_H/2;
fit = 0;
speed = 0;
acc = 550;
active = true;
moveUp = false;
}
__device__
void backInitialValues() {
x = INITIAL_POSITION_BIRD_X;
y = (float) SCREEN_H/2;
fit = 0;
speed = 0;
acc = 550;
active = true;
moveUp = false;
}
};
struct Tree {
public:
float x, h1, h2;
bool scored_tree;
__device__
Tree() {}
Tree(int x) : x(x){
}
__device__
void heights(hiprandState_t* state) {
h1 = 0.0;
randInt(h1, RANGE_RAND_DIST_TREES, state);
h1 = h1 + MIN_HEIGHT_FIRST_TREE;
h2 = h1 + DIFF_BETWEEN_H;
scored_tree = false;
}
};
struct List {
public:
Tree trees[6];
int size;
__device__
List() {
size = 0;
}
__device__
void addTree(Tree t) {
trees[size].x = t.x;
trees[size].h1 = t.h1;
trees[size].h2 = t.h2;
trees[size++].scored_tree = t.scored_tree;
}
__device__
void removeFromStart() {
for(int i=1; i<size; i++) {
trees[i-1].x = trees[i].x;
trees[i-1].h1 = trees[i].h1;
trees[i-1].h2 = trees[i].h2;
trees[i-1].scored_tree = trees[i].scored_tree;
}
size--;
}
};
extern "C" {
__device__
void createTree(List& list, float i, hiprandState_t* state) {
{
int thid = blockIdx.x*blockDim.x+threadIdx.x;
Tree t;
t.x = i;
t.heights(state);
list.addTree(t);
}
}
}
extern "C" {
__device__
void treeToRemove(List& list, Bird& b, int gameScore)
{
if(!list.trees[0].scored_tree && b.x>(list.trees[0].x+WIDTH_TREE))
{
list.trees[0].scored_tree = true;
gameScore++;
}
}
}
extern "C" {
__device__
void update(Bird& b) {
float t = 1.0/FPS;
if(b.moveUp) {
b.y += b.speed*t;
b.speed = -180;
} else {
b.y += b.speed*t;
b.speed += b.acc*t;
}
}
}
extern "C" {
__device__
void sigmoid(float* weights, float hor, float hei, float speed, float* res) {
int thid = blockIdx.x*blockDim.x+threadIdx.x;
float values[wx];
int offset = thid*WEIGHTS_SIZE;
for(int i=0; i<wx; i++) values[i] = 0;
values[0] = 1/(1 + exp(-hor));
values[1] = 1/(1 + exp(-hei));
values[2] = 1/(1 + exp(-speed));
for(int i=0; i<inpVals; i++) {
for(int j=0; j<wy; j++) {
values[j+inpVals] += (weights[offset + i*6 + j]*values[i]);
}
}
for(int i=inpVals; i<wx-1; i++) {
values[i] = 1/(1 + exp(-values[i]));
values[wx-1] += weights[offset+i*wy+0]*values[i];
}
values[wx-1] = 1/(1 + exp(-values[wx-1]));
*res = values[wx-1];
}
}
extern "C" {
__device__
void intersects(Bird& b, float CircX, float CircY, bool& r) {
float DeltaX = CircX - max(b.x, min(CircX, b.x + GIL_SIZE));
float DeltaY = CircY - max(b.y, min(CircY, b.y + GIL_SIZE));
r = (DeltaX*DeltaX + DeltaY*DeltaY) < (CIRC_RAD*CIRC_RAD);
}
}
extern "C" {
__device__
void collisionCheck(List& list, Bird& b) {
bool res;
int siz = list.size;
for(int i=0; i<siz; i++) {
float wspX = b.x + GIL_SIZE;
float wspY = b.y + GIL_SIZE;
if(list.trees[i].x<wspX && list.trees[i].x+WIDTH_TREE>b.x && list.trees[i].h1>b.y) {
b.active = false;
return;
}
if(list.trees[i].x<wspX && list.trees[i].x+WIDTH_TREE>b.x && list.trees[i].h2<wspY) {
b.active = false;
return;
}
float middleX = list.trees[i].x+WIDTH_TREE/2;
float middleY = list.trees[i].h1+(2*CIRC_RAD)/3;
intersects(b, middleX, middleY, res);
if(res) {
b.active = false;
return;
}
middleY = list.trees[i].h2-(2*CIRC_RAD)/3;
intersects(b, middleX, middleY, res);
if(res) {
b.active = false;
return;
}
}
b.active = true;
}
}
extern "C" {
__device__
void droppedTooLow(Bird& bird) {
if(bird.y>=SCREEN_H) bird.active = false;
else bird.active = true;
}
}
extern "C" {
__global__
void TrainBirds(float* weights, float* biases, float* fitness, hiprandState_t* states) {
int thid = blockIdx.x*blockDim.x+threadIdx.x;
int gameScore = 0;
List list;
createTree(list, FIRST_TREE, &states[thid]);
createTree(list, SECOND_TREE, &states[thid]);
createTree(list, THIRD_TREE, &states[thid]);
Bird bird;
while(1) {
treeToRemove(list, bird, gameScore);
for(int i=0; i<list.size; i++)
list.trees[i].x -= MOVE_PIXS_ON_SEC/FPS;
if(list.trees[0].x + WIDTH_TREE < 0) {
list.removeFromStart();
}
if(SCREEN_W - list.trees[list.size-1].x >= DIST_BET_TWO) {
createTree(list, list.trees[list.size-1].x + DIST_BET_TWO, &states[thid]);
}
if(bird.active) {
int idx = ((list.trees[0].x + WIDTH_TREE) > bird.x) ? 0 : 1;
float horizontal = list.trees[idx].x + WIDTH_TREE - bird.x;
float heighDiff = ((list.trees[idx].h1 + list.trees[idx].h2)/2) - bird.y;
float sp = bird.speed;
float val = 0.0;
sigmoid(weights, horizontal, heighDiff, sp, &val);
if(val>biases[thid]) bird.moveUp = true;
else bird.moveUp = false;
update(bird);
collisionCheck(list, bird);
}
if(bird.active) {
droppedTooLow(bird);
}
if(!bird.active) {
int idx = list.trees[0].x+WIDTH_TREE > bird.x ? 0 : 1;
float heighDiff = ((list.trees[idx].h1 + list.trees[idx].h2)/2) - bird.y;
if(heighDiff<0) heighDiff = -heighDiff;
bird.fit += -heighDiff;
break;
} else {
float end = (MOVE_PIXS_ON_SEC/FPS)*250;
bird.fit += MOVE_PIXS_ON_SEC/FPS;
if(bird.fit > end) break;
}
}
fitness[thid] = bird.fit;
__syncthreads();
}
}
extern "C" {
__global__
void Mutation(float* weights, float* biases, hiprandState_t *states) {
int thid = blockDim.x*blockIdx.x + threadIdx.x;
int offset = thid*WEIGHTS_SIZE;
int divider = UNITY_IN_TRAINING/CROSSOVER_BIRDS;
int block = thid/divider;
int main_id = block*divider;
if((thid%divider)>0) {
for(int i=0; i<wx; i++) {
for(int j=0; j<wy; j++) {
float tmp = 0;
float prev = weights[main_id*WEIGHTS_SIZE + i*wy + j];
randFloat(tmp, prev-EPS, prev+EPS, &states[thid]);
weights[offset + i*wy + j] = tmp;
}
}
float temp;
randFloat(temp, biases[main_id]-0.05, biases[main_id]+0.05, &states[thid]);
biases[thid] = temp;
}
__syncthreads();
}
}
|
4ce6683537d3d919ea9c8b8aa607a02481c2363b.cu
|
#include "consts.h"
#include <curand_kernel.h>
#include <cmath>
extern "C" {
__global__
void Init(curandState *states) {
int thid = blockIdx.x*blockDim.x+threadIdx.x;
curand_init(clock64(), thid, 0, &states[thid]);
__syncthreads();
}
}
extern "C" {
__device__
void randInt(float& toRand, int interval, curandState* state) {
int tmp = curand(state);
if(tmp<0) tmp = -tmp;
toRand = (tmp)%interval;
}
}
extern "C" {
__device__
void randFloat(float& toRand, float l, float r, curandState* state) {
int rand_max = 32767;
float randomVarb;
randInt(randomVarb, rand_max, state);
toRand = ((r - l) * (randomVarb / rand_max)) + l;
}
}
struct Bird {
public:
float x, y, fit, acc, speed;
bool active, moveUp;
__device__
Bird() {
x = INITIAL_POSITION_BIRD_X;
y = (float) SCREEN_H/2;
fit = 0;
speed = 0;
acc = 550;
active = true;
moveUp = false;
}
__device__
void backInitialValues() {
x = INITIAL_POSITION_BIRD_X;
y = (float) SCREEN_H/2;
fit = 0;
speed = 0;
acc = 550;
active = true;
moveUp = false;
}
};
struct Tree {
public:
float x, h1, h2;
bool scored_tree;
__device__
Tree() {}
Tree(int x) : x(x){
}
__device__
void heights(curandState* state) {
h1 = 0.0;
randInt(h1, RANGE_RAND_DIST_TREES, state);
h1 = h1 + MIN_HEIGHT_FIRST_TREE;
h2 = h1 + DIFF_BETWEEN_H;
scored_tree = false;
}
};
struct List {
public:
Tree trees[6];
int size;
__device__
List() {
size = 0;
}
__device__
void addTree(Tree t) {
trees[size].x = t.x;
trees[size].h1 = t.h1;
trees[size].h2 = t.h2;
trees[size++].scored_tree = t.scored_tree;
}
__device__
void removeFromStart() {
for(int i=1; i<size; i++) {
trees[i-1].x = trees[i].x;
trees[i-1].h1 = trees[i].h1;
trees[i-1].h2 = trees[i].h2;
trees[i-1].scored_tree = trees[i].scored_tree;
}
size--;
}
};
extern "C" {
__device__
void createTree(List& list, float i, curandState* state) {
{
int thid = blockIdx.x*blockDim.x+threadIdx.x;
Tree t;
t.x = i;
t.heights(state);
list.addTree(t);
}
}
}
extern "C" {
__device__
void treeToRemove(List& list, Bird& b, int gameScore)
{
if(!list.trees[0].scored_tree && b.x>(list.trees[0].x+WIDTH_TREE))
{
list.trees[0].scored_tree = true;
gameScore++;
}
}
}
extern "C" {
__device__
void update(Bird& b) {
float t = 1.0/FPS;
if(b.moveUp) {
b.y += b.speed*t;
b.speed = -180;
} else {
b.y += b.speed*t;
b.speed += b.acc*t;
}
}
}
extern "C" {
__device__
void sigmoid(float* weights, float hor, float hei, float speed, float* res) {
int thid = blockIdx.x*blockDim.x+threadIdx.x;
float values[wx];
int offset = thid*WEIGHTS_SIZE;
for(int i=0; i<wx; i++) values[i] = 0;
values[0] = 1/(1 + exp(-hor));
values[1] = 1/(1 + exp(-hei));
values[2] = 1/(1 + exp(-speed));
for(int i=0; i<inpVals; i++) {
for(int j=0; j<wy; j++) {
values[j+inpVals] += (weights[offset + i*6 + j]*values[i]);
}
}
for(int i=inpVals; i<wx-1; i++) {
values[i] = 1/(1 + exp(-values[i]));
values[wx-1] += weights[offset+i*wy+0]*values[i];
}
values[wx-1] = 1/(1 + exp(-values[wx-1]));
*res = values[wx-1];
}
}
extern "C" {
__device__
void intersects(Bird& b, float CircX, float CircY, bool& r) {
float DeltaX = CircX - max(b.x, min(CircX, b.x + GIL_SIZE));
float DeltaY = CircY - max(b.y, min(CircY, b.y + GIL_SIZE));
r = (DeltaX*DeltaX + DeltaY*DeltaY) < (CIRC_RAD*CIRC_RAD);
}
}
extern "C" {
__device__
void collisionCheck(List& list, Bird& b) {
bool res;
int siz = list.size;
for(int i=0; i<siz; i++) {
float wspX = b.x + GIL_SIZE;
float wspY = b.y + GIL_SIZE;
if(list.trees[i].x<wspX && list.trees[i].x+WIDTH_TREE>b.x && list.trees[i].h1>b.y) {
b.active = false;
return;
}
if(list.trees[i].x<wspX && list.trees[i].x+WIDTH_TREE>b.x && list.trees[i].h2<wspY) {
b.active = false;
return;
}
float middleX = list.trees[i].x+WIDTH_TREE/2;
float middleY = list.trees[i].h1+(2*CIRC_RAD)/3;
intersects(b, middleX, middleY, res);
if(res) {
b.active = false;
return;
}
middleY = list.trees[i].h2-(2*CIRC_RAD)/3;
intersects(b, middleX, middleY, res);
if(res) {
b.active = false;
return;
}
}
b.active = true;
}
}
extern "C" {
__device__
void droppedTooLow(Bird& bird) {
if(bird.y>=SCREEN_H) bird.active = false;
else bird.active = true;
}
}
extern "C" {
__global__
void TrainBirds(float* weights, float* biases, float* fitness, curandState* states) {
int thid = blockIdx.x*blockDim.x+threadIdx.x;
int gameScore = 0;
List list;
createTree(list, FIRST_TREE, &states[thid]);
createTree(list, SECOND_TREE, &states[thid]);
createTree(list, THIRD_TREE, &states[thid]);
Bird bird;
while(1) {
treeToRemove(list, bird, gameScore);
for(int i=0; i<list.size; i++)
list.trees[i].x -= MOVE_PIXS_ON_SEC/FPS;
if(list.trees[0].x + WIDTH_TREE < 0) {
list.removeFromStart();
}
if(SCREEN_W - list.trees[list.size-1].x >= DIST_BET_TWO) {
createTree(list, list.trees[list.size-1].x + DIST_BET_TWO, &states[thid]);
}
if(bird.active) {
int idx = ((list.trees[0].x + WIDTH_TREE) > bird.x) ? 0 : 1;
float horizontal = list.trees[idx].x + WIDTH_TREE - bird.x;
float heighDiff = ((list.trees[idx].h1 + list.trees[idx].h2)/2) - bird.y;
float sp = bird.speed;
float val = 0.0;
sigmoid(weights, horizontal, heighDiff, sp, &val);
if(val>biases[thid]) bird.moveUp = true;
else bird.moveUp = false;
update(bird);
collisionCheck(list, bird);
}
if(bird.active) {
droppedTooLow(bird);
}
if(!bird.active) {
int idx = list.trees[0].x+WIDTH_TREE > bird.x ? 0 : 1;
float heighDiff = ((list.trees[idx].h1 + list.trees[idx].h2)/2) - bird.y;
if(heighDiff<0) heighDiff = -heighDiff;
bird.fit += -heighDiff;
break;
} else {
float end = (MOVE_PIXS_ON_SEC/FPS)*250;
bird.fit += MOVE_PIXS_ON_SEC/FPS;
if(bird.fit > end) break;
}
}
fitness[thid] = bird.fit;
__syncthreads();
}
}
extern "C" {
__global__
void Mutation(float* weights, float* biases, curandState *states) {
int thid = blockDim.x*blockIdx.x + threadIdx.x;
int offset = thid*WEIGHTS_SIZE;
int divider = UNITY_IN_TRAINING/CROSSOVER_BIRDS;
int block = thid/divider;
int main_id = block*divider;
if((thid%divider)>0) {
for(int i=0; i<wx; i++) {
for(int j=0; j<wy; j++) {
float tmp = 0;
float prev = weights[main_id*WEIGHTS_SIZE + i*wy + j];
randFloat(tmp, prev-EPS, prev+EPS, &states[thid]);
weights[offset + i*wy + j] = tmp;
}
}
float temp;
randFloat(temp, biases[main_id]-0.05, biases[main_id]+0.05, &states[thid]);
biases[thid] = temp;
}
__syncthreads();
}
}
|
2f2df220ed0fbc38131c826a670b0b223fe90cd6.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include "lodepng.h"
#include <stdio.h>
#include <time.h>
__global__ void rectifyParallel(unsigned char* original_img, unsigned char* new_img, unsigned int num_threads, unsigned int img_size)
{
// iterate through all the blocks, same threadIdx for each
for (int i = threadIdx.x; i < img_size; i += num_threads) {
if (original_img[i] < 127) {
new_img[i] = 127;
}
else {
new_img[i] = original_img[i];
}
}
}
void rectifySequential(unsigned char* original_img, unsigned char* new_img, unsigned int img_size) {
// iterate through all elements of og_img
for (int i = 0; i < img_size; i++) {
if (original_img[i] < 127) {
new_img[i] = 127;
}
else {
new_img[i] = original_img[i];
}
}
}
int main(int argc, char *argv[]) {
// ~~~~~~~~~~~~~~~~~~~~~~~
// step 1: parse arguments
// ~~~~~~~~~~~~~~~~~~~~~~~
if (argc != 4) {
printf("Error: Input arguments are of format:\n./rectify <input filename> <output filename> <# threads>");
return -1;
}
unsigned int num_threads = atoi(argv[3]);
if (num_threads < 1) {
printf("Error: '%u' is an invalid number of threads.\nNumber of threads must be greater than zero.", num_threads);
return -1;
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// step 2: read in input image from file
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
unsigned char* original_img;
unsigned char* new_img;
unsigned int img_width, img_height;
int error = lodepng_decode32_file(&original_img, &img_width, &img_height, argv[1]);
if (error) {
printf("Error %d: %s\n", error, lodepng_error_text(error));
return -1;
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// step 3: make variables available to both CPU and GPU
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
unsigned char* original_img_cuda;
unsigned char* new_img_cuda;
// allocate for CPU
unsigned int img_size = img_width * img_height * 4 * sizeof(unsigned char);
new_img = (unsigned char*)malloc(img_size);
// allocate for GPU
hipMalloc((void**)&original_img_cuda, img_size);
hipMalloc((void**)&new_img_cuda, img_size);
hipMemcpy(original_img_cuda, original_img, img_size, hipMemcpyHostToDevice);
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// step 4: call parallelized rectify function, record performance
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// start timing GPU
clock_t startGPU = clock();
hipLaunchKernelGGL(( rectifyParallel), dim3(1), dim3(num_threads), 0, 0, original_img_cuda, new_img_cuda, num_threads, img_size);
hipDeviceSynchronize();
// record performance
printf("Parallel: %u\n", clock() - startGPU);
hipMemcpy(new_img, new_img_cuda, img_size, hipMemcpyDeviceToHost);
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// step 5: write output image from parallelized rectify function to file
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
error = lodepng_encode32_file(argv[2], new_img, img_width, img_height);
if (error) {
printf("Error %d: %s\n", error, lodepng_error_text(error));
return -1;
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// step 6: call sequential rectify function, record performance
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// start timing CPU
clock_t startCPU = clock();
rectifySequential(original_img, new_img, img_size);
// record performance
printf("Sequential: %u\n", clock() - startCPU);
// ~~~~~~~~~~~~~~~~~~~~~
// step 7: free at last!
// ~~~~~~~~~~~~~~~~~~~~~
free(original_img);
free(new_img);
hipFree(original_img_cuda);
hipFree(new_img_cuda);
return 0;
}
|
2f2df220ed0fbc38131c826a670b0b223fe90cd6.cu
|
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "lodepng.h"
#include <stdio.h>
#include <time.h>
__global__ void rectifyParallel(unsigned char* original_img, unsigned char* new_img, unsigned int num_threads, unsigned int img_size)
{
// iterate through all the blocks, same threadIdx for each
for (int i = threadIdx.x; i < img_size; i += num_threads) {
if (original_img[i] < 127) {
new_img[i] = 127;
}
else {
new_img[i] = original_img[i];
}
}
}
void rectifySequential(unsigned char* original_img, unsigned char* new_img, unsigned int img_size) {
// iterate through all elements of og_img
for (int i = 0; i < img_size; i++) {
if (original_img[i] < 127) {
new_img[i] = 127;
}
else {
new_img[i] = original_img[i];
}
}
}
int main(int argc, char *argv[]) {
// ~~~~~~~~~~~~~~~~~~~~~~~
// step 1: parse arguments
// ~~~~~~~~~~~~~~~~~~~~~~~
if (argc != 4) {
printf("Error: Input arguments are of format:\n./rectify <input filename> <output filename> <# threads>");
return -1;
}
unsigned int num_threads = atoi(argv[3]);
if (num_threads < 1) {
printf("Error: '%u' is an invalid number of threads.\nNumber of threads must be greater than zero.", num_threads);
return -1;
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// step 2: read in input image from file
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
unsigned char* original_img;
unsigned char* new_img;
unsigned int img_width, img_height;
int error = lodepng_decode32_file(&original_img, &img_width, &img_height, argv[1]);
if (error) {
printf("Error %d: %s\n", error, lodepng_error_text(error));
return -1;
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// step 3: make variables available to both CPU and GPU
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
unsigned char* original_img_cuda;
unsigned char* new_img_cuda;
// allocate for CPU
unsigned int img_size = img_width * img_height * 4 * sizeof(unsigned char);
new_img = (unsigned char*)malloc(img_size);
// allocate for GPU
cudaMalloc((void**)&original_img_cuda, img_size);
cudaMalloc((void**)&new_img_cuda, img_size);
cudaMemcpy(original_img_cuda, original_img, img_size, cudaMemcpyHostToDevice);
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// step 4: call parallelized rectify function, record performance
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// start timing GPU
clock_t startGPU = clock();
rectifyParallel<<<1, num_threads>>>(original_img_cuda, new_img_cuda, num_threads, img_size);
cudaDeviceSynchronize();
// record performance
printf("Parallel: %u\n", clock() - startGPU);
cudaMemcpy(new_img, new_img_cuda, img_size, cudaMemcpyDeviceToHost);
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// step 5: write output image from parallelized rectify function to file
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
error = lodepng_encode32_file(argv[2], new_img, img_width, img_height);
if (error) {
printf("Error %d: %s\n", error, lodepng_error_text(error));
return -1;
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// step 6: call sequential rectify function, record performance
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// start timing CPU
clock_t startCPU = clock();
rectifySequential(original_img, new_img, img_size);
// record performance
printf("Sequential: %u\n", clock() - startCPU);
// ~~~~~~~~~~~~~~~~~~~~~
// step 7: free at last!
// ~~~~~~~~~~~~~~~~~~~~~
free(original_img);
free(new_img);
cudaFree(original_img_cuda);
cudaFree(new_img_cuda);
return 0;
}
|
f17763c59d5da3d2d10dd07e42d113b1b58f8e6e.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include "complex.h"
void __global__ gpuDFT(unsigned npts, float * pts, unsigned nq, float * qvals, cucomplex_t * ft) {
/* lucky we have excatly 4000 particles. 4000 X 3 should fit on most modern GPUs.
* If more particles are needed in future, we need to do some memory maneuvering gymnastics.
*/
const cucomplex_t NEG_I = make_cuFloatComplex(0.f, -1.f);
// compute dft
unsigned i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < nq) {
ft[i] = make_cuFloatComplex(0.f, 0.f);
for (unsigned j = 0; j < npts; j++) {
float q_r = 0;
for (unsigned k = 0; k < 3; k++)
q_r += qvals[3 * i + k] * pts[3 * j + k];
ft[i] = ft[i] + Cexpf(NEG_I * q_r);
}
}
}
void cudft(unsigned npts, float * pts, unsigned nq, float * qvals,
complex_t * output) {
// allocate memory on device
float * dpts, * dqvals;
hipMalloc((void **) &dpts, sizeof(float) * npts * 3);
hipMalloc((void **) &dqvals, sizeof(float) * nq * 3);
// copy arrays to device memory
hipMemcpy(dpts, pts, sizeof(float) * 3 * npts, hipMemcpyHostToDevice);
hipMemcpy(dqvals, qvals, sizeof(float) * 3 * nq, hipMemcpyHostToDevice);
// allocate memory for output
cucomplex_t * dft = NULL;
hipMalloc((void **) &dft, sizeof(cucomplex_t) * nq);
// device parameters
unsigned threads = 256;
unsigned blocks = nq / threads;
if (nq % threads != 0) blocks++;
hipLaunchKernelGGL(( gpuDFT), dim3(blocks), dim3(threads) , 0, 0, npts, dpts, nq, dqvals, dft);
// copy results back to host
hipMemcpy(output, dft, sizeof(complex_t) * nq, hipMemcpyDeviceToHost);
// free memory
hipFree(dpts);
hipFree(dqvals);
hipFree(dft);
}
|
f17763c59d5da3d2d10dd07e42d113b1b58f8e6e.cu
|
#include <cuda_runtime.h>
#include "complex.h"
void __global__ gpuDFT(unsigned npts, float * pts, unsigned nq, float * qvals, cucomplex_t * ft) {
/* lucky we have excatly 4000 particles. 4000 X 3 should fit on most modern GPUs.
* If more particles are needed in future, we need to do some memory maneuvering gymnastics.
*/
const cucomplex_t NEG_I = make_cuFloatComplex(0.f, -1.f);
// compute dft
unsigned i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < nq) {
ft[i] = make_cuFloatComplex(0.f, 0.f);
for (unsigned j = 0; j < npts; j++) {
float q_r = 0;
for (unsigned k = 0; k < 3; k++)
q_r += qvals[3 * i + k] * pts[3 * j + k];
ft[i] = ft[i] + Cexpf(NEG_I * q_r);
}
}
}
void cudft(unsigned npts, float * pts, unsigned nq, float * qvals,
complex_t * output) {
// allocate memory on device
float * dpts, * dqvals;
cudaMalloc((void **) &dpts, sizeof(float) * npts * 3);
cudaMalloc((void **) &dqvals, sizeof(float) * nq * 3);
// copy arrays to device memory
cudaMemcpy(dpts, pts, sizeof(float) * 3 * npts, cudaMemcpyHostToDevice);
cudaMemcpy(dqvals, qvals, sizeof(float) * 3 * nq, cudaMemcpyHostToDevice);
// allocate memory for output
cucomplex_t * dft = NULL;
cudaMalloc((void **) &dft, sizeof(cucomplex_t) * nq);
// device parameters
unsigned threads = 256;
unsigned blocks = nq / threads;
if (nq % threads != 0) blocks++;
gpuDFT<<< blocks, threads >>> (npts, dpts, nq, dqvals, dft);
// copy results back to host
cudaMemcpy(output, dft, sizeof(complex_t) * nq, cudaMemcpyDeviceToHost);
// free memory
cudaFree(dpts);
cudaFree(dqvals);
cudaFree(dft);
}
|
16abaa2aacd8d5c0e3f27b3c445e9cee6b5c535a.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// SPDX-FileCopyrightText: 2020 CERN
// SPDX-License-Identifier: Apache-2.0
/**
* @file test_track_block.cu
* @brief Unit test for the BlockData concurrent container.
* @author Andrei Gheata ([email protected])
*/
#include <iostream>
#include <cassert>
#include <AdePT/BlockData.h>
struct MyTrack {
int index{0};
double pos[3]{0};
double dir[3]{0};
bool flag1;
bool flag2;
};
// Kernel function to process the next free track in a block
__global__ void testTrackBlock(adept::BlockData<MyTrack> *block)
{
auto track = block->NextElement();
if (!track) return;
int id = blockIdx.x * blockDim.x + threadIdx.x;
track->index = id;
}
// Kernel function to process the next free track in a block
__global__ void releaseTrack(adept::BlockData<MyTrack> *block)
{
int id = blockIdx.x * blockDim.x + threadIdx.x;
block->ReleaseElement(id);
}
///______________________________________________________________________________________
int main(void)
{
using Block_t = adept::BlockData<MyTrack>;
const char *result[2] = {"FAILED", "OK"};
// Track capacity of the block
constexpr int capacity = 1 << 20;
// Define the kernels granularity: 10K blocks of 32 treads each
constexpr dim3 nblocks(10000), nthreads(32);
// Allocate a block of tracks with capacity larger than the total number of spawned threads
// Note that if we want to allocate several consecutive block in a buffer, we have to use
// Block_t::SizeOfAlignAware rather than SizeOfInstance to get the space needed per block
bool testOK = true;
bool success = true;
// Test simple allocation/de-allocation on host
std::cout << " host allocation MakeInstance ... ";
auto h_block = Block_t::MakeInstance(1024);
testOK &= h_block != nullptr;
std::cout << result[testOK] << "\n";
success &= testOK;
// Test using the slots on the block (more than existing)
std::cout << " host NextElement ... ";
testOK = true;
size_t checksum1 = 0;
for (auto i = 0; i < 2048; ++i) {
auto track = h_block->NextElement();
if (i >= 1024) testOK &= track == nullptr;
// Assign current index to the current track
if (track) {
track->index = i;
checksum1 += i;
}
}
testOK = h_block->GetNused() == 1024;
std::cout << result[testOK] << "\n";
success &= testOK;
// Create another block into adopted memory on host
testOK = true;
char *buff_host = new char[Block_t::SizeOfInstance(2048)];
std::cout << " host MakeCopyAt ... ";
// Test copying a block into another
auto h_block2 = Block_t::MakeCopyAt(*h_block, buff_host);
size_t checksum2 = 0;
for (auto i = 0; i < 1024; ++i) {
auto track = h_block2->NextElement();
assert(track);
checksum2 += track->index;
}
testOK = checksum1 == checksum2;
std::cout << result[testOK] << "\n";
success &= testOK;
// Release some elements end validate
testOK = true;
std::cout << " host ReleaseElement ... ";
for (auto i = 0; i < 10; ++i)
h_block2->ReleaseElement(i);
testOK &= h_block2->GetNused() == (1024 - 10);
testOK &= h_block2->GetNholes() == 10;
std::cout << result[testOK] << "\n";
success &= testOK;
// Release allocated blocks
Block_t::ReleaseInstance(h_block); // mandatory, frees memory for blocks allocated with MakeInstance
Block_t::ReleaseInstance(h_block2); // will not do anything since block adopted memory
delete[] buff_host; // Only this will actually free the memory
// Create a large block on the device
testOK = true;
std::cout << " host MakeInstanceAt ... ";
size_t blocksize = Block_t::SizeOfInstance(capacity);
char *buffer = nullptr;
hipMallocManaged(&buffer, blocksize);
auto block = Block_t::MakeInstanceAt(capacity, buffer);
testOK &= block != nullptr;
std::cout << result[testOK] << "\n";
success &= testOK;
std::cout << " device NextElement ... ";
testOK = true;
// Allow memory to reach the device
hipDeviceSynchronize();
// Launch a kernel processing tracks
hipLaunchKernelGGL(( testTrackBlock), dim3(nblocks), dim3(nthreads), 0, 0, block); ///< note that we are passing a host block type allocated on device
///< memory - works because the layout is the same
// Allow all warps to finish
hipDeviceSynchronize();
// The number of used tracks should be equal to the number of spawned threads
testOK &= block->GetNused() == nblocks.x * nthreads.x;
std::cout << result[testOK] << "\n";
success &= testOK;
// Compute the sum of assigned track indices, which has to match the sum from 0 to nthreads-1
// (the execution order is arbitrary, but all thread indices must be distributed)
unsigned long long counter1 = 0, counter2 = 0;
testOK = true;
std::cout << " device concurrency checksum ... ";
for (auto i = 0; i < nblocks.x * nthreads.x; ++i) {
counter1 += i;
counter2 += (*block)[i].index;
}
testOK &= counter1 == counter2;
std::cout << result[testOK] << "\n";
success &= testOK;
// Now release 32K tracks
testOK = true;
std::cout << " device ReleaseElement ... ";
hipLaunchKernelGGL(( releaseTrack), dim3(1000), dim3(32), 0, 0, block);
hipDeviceSynchronize();
testOK &= block->GetNused() == nblocks.x * nthreads.x - 32000;
testOK &= block->GetNholes() == 32000;
// Now allocate in the holes
hipLaunchKernelGGL(( testTrackBlock), dim3(10), dim3(32), 0, 0, block);
hipDeviceSynchronize();
testOK &= block->GetNholes() == (32000 - 320);
std::cout << result[testOK] << "\n";
hipFree(buffer);
if (!success) return 1;
return 0;
}
|
16abaa2aacd8d5c0e3f27b3c445e9cee6b5c535a.cu
|
// SPDX-FileCopyrightText: 2020 CERN
// SPDX-License-Identifier: Apache-2.0
/**
* @file test_track_block.cu
* @brief Unit test for the BlockData concurrent container.
* @author Andrei Gheata ([email protected])
*/
#include <iostream>
#include <cassert>
#include <AdePT/BlockData.h>
struct MyTrack {
int index{0};
double pos[3]{0};
double dir[3]{0};
bool flag1;
bool flag2;
};
// Kernel function to process the next free track in a block
__global__ void testTrackBlock(adept::BlockData<MyTrack> *block)
{
auto track = block->NextElement();
if (!track) return;
int id = blockIdx.x * blockDim.x + threadIdx.x;
track->index = id;
}
// Kernel function to process the next free track in a block
__global__ void releaseTrack(adept::BlockData<MyTrack> *block)
{
int id = blockIdx.x * blockDim.x + threadIdx.x;
block->ReleaseElement(id);
}
///______________________________________________________________________________________
int main(void)
{
using Block_t = adept::BlockData<MyTrack>;
const char *result[2] = {"FAILED", "OK"};
// Track capacity of the block
constexpr int capacity = 1 << 20;
// Define the kernels granularity: 10K blocks of 32 treads each
constexpr dim3 nblocks(10000), nthreads(32);
// Allocate a block of tracks with capacity larger than the total number of spawned threads
// Note that if we want to allocate several consecutive block in a buffer, we have to use
// Block_t::SizeOfAlignAware rather than SizeOfInstance to get the space needed per block
bool testOK = true;
bool success = true;
// Test simple allocation/de-allocation on host
std::cout << " host allocation MakeInstance ... ";
auto h_block = Block_t::MakeInstance(1024);
testOK &= h_block != nullptr;
std::cout << result[testOK] << "\n";
success &= testOK;
// Test using the slots on the block (more than existing)
std::cout << " host NextElement ... ";
testOK = true;
size_t checksum1 = 0;
for (auto i = 0; i < 2048; ++i) {
auto track = h_block->NextElement();
if (i >= 1024) testOK &= track == nullptr;
// Assign current index to the current track
if (track) {
track->index = i;
checksum1 += i;
}
}
testOK = h_block->GetNused() == 1024;
std::cout << result[testOK] << "\n";
success &= testOK;
// Create another block into adopted memory on host
testOK = true;
char *buff_host = new char[Block_t::SizeOfInstance(2048)];
std::cout << " host MakeCopyAt ... ";
// Test copying a block into another
auto h_block2 = Block_t::MakeCopyAt(*h_block, buff_host);
size_t checksum2 = 0;
for (auto i = 0; i < 1024; ++i) {
auto track = h_block2->NextElement();
assert(track);
checksum2 += track->index;
}
testOK = checksum1 == checksum2;
std::cout << result[testOK] << "\n";
success &= testOK;
// Release some elements end validate
testOK = true;
std::cout << " host ReleaseElement ... ";
for (auto i = 0; i < 10; ++i)
h_block2->ReleaseElement(i);
testOK &= h_block2->GetNused() == (1024 - 10);
testOK &= h_block2->GetNholes() == 10;
std::cout << result[testOK] << "\n";
success &= testOK;
// Release allocated blocks
Block_t::ReleaseInstance(h_block); // mandatory, frees memory for blocks allocated with MakeInstance
Block_t::ReleaseInstance(h_block2); // will not do anything since block adopted memory
delete[] buff_host; // Only this will actually free the memory
// Create a large block on the device
testOK = true;
std::cout << " host MakeInstanceAt ... ";
size_t blocksize = Block_t::SizeOfInstance(capacity);
char *buffer = nullptr;
cudaMallocManaged(&buffer, blocksize);
auto block = Block_t::MakeInstanceAt(capacity, buffer);
testOK &= block != nullptr;
std::cout << result[testOK] << "\n";
success &= testOK;
std::cout << " device NextElement ... ";
testOK = true;
// Allow memory to reach the device
cudaDeviceSynchronize();
// Launch a kernel processing tracks
testTrackBlock<<<nblocks, nthreads>>>(block); ///< note that we are passing a host block type allocated on device
///< memory - works because the layout is the same
// Allow all warps to finish
cudaDeviceSynchronize();
// The number of used tracks should be equal to the number of spawned threads
testOK &= block->GetNused() == nblocks.x * nthreads.x;
std::cout << result[testOK] << "\n";
success &= testOK;
// Compute the sum of assigned track indices, which has to match the sum from 0 to nthreads-1
// (the execution order is arbitrary, but all thread indices must be distributed)
unsigned long long counter1 = 0, counter2 = 0;
testOK = true;
std::cout << " device concurrency checksum ... ";
for (auto i = 0; i < nblocks.x * nthreads.x; ++i) {
counter1 += i;
counter2 += (*block)[i].index;
}
testOK &= counter1 == counter2;
std::cout << result[testOK] << "\n";
success &= testOK;
// Now release 32K tracks
testOK = true;
std::cout << " device ReleaseElement ... ";
releaseTrack<<<1000, 32>>>(block);
cudaDeviceSynchronize();
testOK &= block->GetNused() == nblocks.x * nthreads.x - 32000;
testOK &= block->GetNholes() == 32000;
// Now allocate in the holes
testTrackBlock<<<10, 32>>>(block);
cudaDeviceSynchronize();
testOK &= block->GetNholes() == (32000 - 320);
std::cout << result[testOK] << "\n";
cudaFree(buffer);
if (!success) return 1;
return 0;
}
|
4d998cacc81e7221dd0a24f4bfb21f4524fe666f.hip
|
// !!! This is a file automatically generated by hipify!!!
/**
* 3DConvolution.cu: This file is part of the PolyBench/GPU 1.0 test suite.
*
*
* Contact: Scott Grauer-Gray <[email protected]>
* Louis-Noel Pouchet <[email protected]>
* Web address: http://www.cse.ohio-state.edu/~pouchet/software/polybench/GPU
*/
#include <unistd.h>
#include <stdio.h>
#include <time.h>
#include <sys/time.h>
#include <stdlib.h>
#include <stdarg.h>
#include <string.h>
#include <hip/hip_runtime.h>
#include "polybenchUtilFuncts.h"
//define the error threshold for the results "not matching"
#define PERCENT_DIFF_ERROR_THRESHOLD 0.5
#define GPU_DEVICE 0
/* Problem size */
#define NI 256
#define NJ 256
#define NK 256
/* Thread block dimensions */
#define DIM_THREAD_BLOCK_X 1024
#define DIM_THREAD_BLOCK_Y 1
#define DIM_BLOCK NJ*NI/4 // 256*256/4 = 16384
#define DIM_BLOCK_VECTOR DIM_BLOCK/2 //8192
#define NUM NI*NJ*NK
#define NUM_VECTOR NUM/2
#define NUM_SM 80
#define NUM_SM_COMPUTE 77
#define NUM_SM_HtoD 2
#define OFFSET NUM_SM_HtoD * DIM_THREAD_BLOCK_X
#define NUM_SM_DtoH 1
#define IN_CHUNK_SIZE 32
#define IN_CHUNK NJ*NI/8/NUM_SM_HtoD/IN_CHUNK_SIZE // 256*256/8/4/32 = 64
#define IN_CHUNK_OFFSET OFFSET*IN_CHUNK_SIZE // 1024*64= 65536
#define OUT_CHUNK_SIZE 32
/* Can switch DATA_TYPE between float and double */
typedef double DATA_TYPE;
#define DUMMY_N 1000
__device__ void dummy_comp()
{
double sum = 0.0;
for (int i = 0; i < DUMMY_N; i++)
{
sum = sum + tan(0.1) * tan(0.1);
}
}
void conv3D(DATA_TYPE* A, DATA_TYPE* B)
{
int i, j, k;
DATA_TYPE c11, c12, c13, c21, c22, c23, c31, c32, c33;
c11 = +2; c21 = +5; c31 = -8;
c12 = -3; c22 = +6; c32 = -9;
c13 = +4; c23 = +7; c33 = +10;
for (i = 1; i < NI - 1; ++i) // 0
{
for (j = 1; j < NJ - 1; ++j) // 1
{
for (k = 1; k < NK -1; ++k) // 2
{
//printf("i:%d\nj:%d\nk:%d\n", i, j, k);
B[i*(NK * NJ) + j*NK + k] = c11 * A[(i - 1)*(NK * NJ) + (j - 1)*NK + (k - 1)] + c13 * A[(i + 1)*(NK * NJ) + (j - 1)*NK + (k - 1)]
+ c21 * A[(i - 1)*(NK * NJ) + (j - 1)*NK + (k - 1)] + c23 * A[(i + 1)*(NK * NJ) + (j - 1)*NK + (k - 1)]
+ c31 * A[(i - 1)*(NK * NJ) + (j - 1)*NK + (k - 1)] + c33 * A[(i + 1)*(NK * NJ) + (j - 1)*NK + (k - 1)]
+ c12 * A[(i + 0)*(NK * NJ) + (j - 1)*NK + (k + 0)] + c22 * A[(i + 0)*(NK * NJ) + (j + 0)*NK + (k + 0)]
+ c32 * A[(i + 0)*(NK * NJ) + (j + 1)*NK + (k + 0)] + c11 * A[(i - 1)*(NK * NJ) + (j - 1)*NK + (k + 1)]
+ c13 * A[(i + 1)*(NK * NJ) + (j - 1)*NK + (k + 1)] + c21 * A[(i - 1)*(NK * NJ) + (j + 0)*NK + (k + 1)]
+ c23 * A[(i + 1)*(NK * NJ) + (j + 0)*NK + (k + 1)] + c31 * A[(i - 1)*(NK * NJ) + (j + 1)*NK + (k + 1)]
+ c33 * A[(i + 1)*(NK * NJ) + (j + 1)*NK + (k + 1)];
}
}
}
}
__device__ int flag_global_read(volatile int * flag, int rid)
{
return(flag[rid]);
}
void init(DATA_TYPE* A)
{
int i, j, k;
for (i = 0; i < NI; ++i)
{
for (j = 0; j < NJ; ++j)
{
for (k = 0; k < NK; ++k)
{
A[i*(NK * NJ) + j*NK + k] = i % 12 + 2 * (j % 7) + 3 * (k % 13);
}
}
}
}
void compareResults(DATA_TYPE* B, DATA_TYPE* B_outputFromGpu)
{
int i, j, k, fail;
fail = 0;
// Compare result from cpu and gpu...
for (i = 1; i < NI - 1; ++i) // 0
{
for (j = 1; j < NJ - 1; ++j) // 1
{
for (k = 1; k < NK - 1; ++k) // 2
{
if (percentDiff(B[i*(NK * NJ) + j*NK + k], B_outputFromGpu[i*(NK * NJ) + j*NK + k]) > PERCENT_DIFF_ERROR_THRESHOLD)
{
fail++;
}
}
}
}
// Print results
printf("Non-Matching CPU-GPU Outputs Beyond Error Threshold of %4.2f Percent: %d\n", PERCENT_DIFF_ERROR_THRESHOLD, fail);
}
void GPU_argv_init()
{
hipDeviceProp_t deviceProp;
hipGetDeviceProperties(&deviceProp, GPU_DEVICE);
//printf("setting device %d with name %s\n",GPU_DEVICE,deviceProp.name);
hipSetDevice( 0 );
}
__global__ void convolution3D_kernel(DATA_TYPE *A_host, DATA_TYPE *B_host, DATA_TYPE *A, DATA_TYPE *B, int *inflag, int* outflag)
{
if (blockIdx.x < NUM_SM_HtoD ){ //copy kernel HtoD
const int idx = threadIdx.x;
const int bidx = blockIdx.x;
int chunk_offset;
for (int i = 0; i<IN_CHUNK;i++ ){
chunk_offset=i*IN_CHUNK_OFFSET;
for (int k = (chunk_offset+bidx*1024+idx);k < ( chunk_offset+IN_CHUNK_OFFSET ) ; k+= OFFSET ){
reinterpret_cast<double2*>(A)[k] = reinterpret_cast<double2*>(A_host)[k];
}
__syncthreads();
__threadfence();
if ( idx < IN_CHUNK_SIZE ){
atomicOr(&inflag[ (IN_CHUNK_SIZE*2*i) + idx*2+ bidx],1);
}
}
}else if (blockIdx.x < (NUM_SM_COMPUTE+NUM_SM_HtoD )){ //compute
DATA_TYPE c11, c12, c13, c21, c22, c23, c31, c32, c33;
c11 = +2; c21 = +5; c31 = -8;
c12 = -3; c22 = +6; c32 = -9;
c13 = +4; c23 = +7; c33 = +10;
for (int bid= blockIdx.x+62 ; bid < (DIM_BLOCK-64); bid+=NUM_SM_COMPUTE){
int i = bid /64 ;
int j = (bid % 64) *4 + (threadIdx.x /256);
int k = threadIdx.x % 256;
int fid = bid >> 1;
if(threadIdx.x==0) //spin ....wait for data ready
{
while( ( atomicAnd(&inflag[fid],1) == 0 ))
//while (flag_global_read(inflag,i+1 )==0)
{
//dummy_comp();
}
}
if(threadIdx.x==1) //spin ....wait for data ready
{
while( ( atomicAnd(&inflag[fid+1],1) == 0 ))
//while (flag_global_read(inflag,i+1 )==0)
{
//dummy_comp();
}
}
if(threadIdx.x==2) //spin ....wait for data ready
{
while( ( atomicAnd(&inflag[fid+31],1) == 0 ))
//while (flag_global_read(inflag,i+1 )==0)
{
//dummy_comp();
}
}
if(threadIdx.x==3) //spin ....wait for data ready
{
while( ( atomicAnd(&inflag[fid+32],1) == 0 ))
//while (flag_global_read(inflag,i+1 )==0)
{
//dummy_comp();
}
}
if(threadIdx.x==4) //spin ....wait for data ready
{
if(fid < 8159 ){
while( ( atomicAnd(&inflag[fid+33],1) == 0 ))
//while (flag_global_read(inflag,i+1 )==0)
{
//dummy_comp();
}
}
}
/*
if(threadIdx.x==5) //spin ....wait for data ready
{
while( ( atomicAnd(&inflag[fid-1],1) == 0 ))
//while (flag_global_read(inflag,i+1 )==0)
{
//dummy_comp();
}
}
if(threadIdx.x==6) //spin ....wait for data ready
{
while( ( atomicAnd(&inflag[fid-1],1) == 0 ))
//while (flag_global_read(inflag,i+1 )==0)
{
//dummy_comp();
}
}
if(threadIdx.x==7) //spin ....wait for data ready
{
while( ( atomicAnd(&inflag[fid-1],1) == 0 ))
//while (flag_global_read(inflag,i+1 )==0)
{
//dummy_comp();
}
}
if(threadIdx.x==8) //spin ....wait for data ready
{
while( ( atomicAnd(&inflag[fid-1],1) == 0 ))
//while (flag_global_read(inflag,i+1 )==0)
{
//dummy_comp();
}
}
*/
__syncthreads();
if ((i < (NI-1)) && (j < (NJ-1)) && (k < (NK-1)) && (i > 0) && (j > 0) && (k > 0))
{
B[i*(NK * NJ) + j*NK + k] = c11 * A[(i - 1)*(NK * NJ) + (j - 1)*NK + (k - 1)] + c13 * A[(i + 1)*(NK * NJ) + (j - 1)*NK + (k - 1)]
+ c21 * A[(i - 1)*(NK * NJ) + (j - 1)*NK + (k - 1)] + c23 * A[(i + 1)*(NK * NJ) + (j - 1)*NK + (k - 1)]
+ c31 * A[(i - 1)*(NK * NJ) + (j - 1)*NK + (k - 1)] + c33 * A[(i + 1)*(NK * NJ) + (j - 1)*NK + (k - 1)]
+ c12 * A[(i + 0)*(NK * NJ) + (j - 1)*NK + (k + 0)] + c22 * A[(i + 0)*(NK * NJ) + (j + 0)*NK + (k + 0)]
+ c32 * A[(i + 0)*(NK * NJ) + (j + 1)*NK + (k + 0)] + c11 * A[(i - 1)*(NK * NJ) + (j - 1)*NK + (k + 1)]
+ c13 * A[(i + 1)*(NK * NJ) + (j - 1)*NK + (k + 1)] + c21 * A[(i - 1)*(NK * NJ) + (j + 0)*NK + (k + 1)]
+ c23 * A[(i + 1)*(NK * NJ) + (j + 0)*NK + (k + 1)] + c31 * A[(i - 1)*(NK * NJ) + (j + 1)*NK + (k + 1)]
+ c33 * A[(i + 1)*(NK * NJ) + (j + 1)*NK + (k + 1)];
}
__syncthreads();
__threadfence();
if(threadIdx.x==0)
{
atomicAdd(&outflag[fid],1);
}
}
}else{ //copy kernel DtoH
const int idx = threadIdx.x;
const int bidx = blockIdx.x-(NUM_SM_COMPUTE+NUM_SM_HtoD);
int rid = 0;
for (int i = 32768+bidx*1024+idx; i< NUM_VECTOR-32768; i+= (1024*NUM_SM_DtoH) ){
rid = i>>10;
while( flag_global_read(outflag,rid) != 2 )
// while ( atomicAnd(&outflag[rid],3) == 0 )
{
//dummy_comp();
}
reinterpret_cast<double2*>(B_host)[i] = reinterpret_cast<double2*>(B)[i];
}
}
}
void convolution3DCuda(DATA_TYPE* A, DATA_TYPE* B_outputFromGpu)
{
//double t_start, t_end;
DATA_TYPE *A_gpu;
DATA_TYPE *B_gpu;
int *inflag,*outflag;
hipMalloc((void **)&A_gpu, sizeof(DATA_TYPE) * NI * NJ * NK);
hipMalloc((void **)&B_gpu, sizeof(DATA_TYPE) * NI * NJ * NK);
hipMalloc((void **)&inflag, sizeof(int) * DIM_BLOCK_VECTOR);
hipMalloc((void **)&outflag, sizeof(int) * DIM_BLOCK_VECTOR);
//initial flags
hipMemset(inflag, 0, sizeof(int) * DIM_BLOCK_VECTOR);
hipMemset(outflag, 0, sizeof(int) * DIM_BLOCK_VECTOR);
hipEvent_t start,stop;
float elapsedTimeInMs = 0.0f;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start);
//hipMemcpy(A_gpu, A, sizeof(DATA_TYPE) * NI * NJ * NK, hipMemcpyHostToDevice);
//dim3 block(DIM_THREAD_BLOCK_X, DIM_THREAD_BLOCK_Y);
//dim3 grid((size_t)(ceil( ((float)NK) / ((float)block.x) )), (size_t)(ceil( ((float)NJ) / ((float)block.y) )));
//dim3 grid((size_t)(NUM_SM) , (size_t) (1) );
hipLaunchKernelGGL(( convolution3D_kernel), dim3(NUM_SM), dim3(1024) , 0, 0, A,B_outputFromGpu,A_gpu, B_gpu,inflag,outflag);
//hipMemcpy(B_outputFromGpu, B_gpu, sizeof(DATA_TYPE) * NI * NJ * NK, hipMemcpyDeviceToHost);
hipDeviceSynchronize();
hipEventRecord(stop);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsedTimeInMs, start, stop);
fprintf(stdout,"GPU RunTime= %.2f Ms \n", elapsedTimeInMs);
hipFree(A_gpu);
hipFree(B_gpu);
}
int main(int argc, char *argv[])
{
//double t_start, t_end;
DATA_TYPE* A;
DATA_TYPE* B;
DATA_TYPE* B_outputFromGpu;
//A = (DATA_TYPE*)malloc(NI*NJ*NK*sizeof(DATA_TYPE));
//B = (DATA_TYPE*)malloc(NI*NJ*NK*sizeof(DATA_TYPE));
//B_outputFromGpu = (DATA_TYPE*)malloc(NI*NJ*NK*sizeof(DATA_TYPE));
hipHostMalloc((void **)&A, sizeof(DATA_TYPE) * NI * NJ * NK, hipHostMallocPortable);
hipHostMalloc((void **)&B, sizeof(DATA_TYPE) * NI * NJ * NK, hipHostMallocPortable);
hipHostMalloc((void **)&B_outputFromGpu, sizeof(DATA_TYPE) * NI * NJ *NK, hipHostMallocPortable);
init(A);
GPU_argv_init();
convolution3DCuda(A, B_outputFromGpu);
conv3D(A,B);
compareResults(B, B_outputFromGpu);
hipFree(A);
hipFree(B);
hipFree(B_outputFromGpu);
return 0;
}
|
4d998cacc81e7221dd0a24f4bfb21f4524fe666f.cu
|
/**
* 3DConvolution.cu: This file is part of the PolyBench/GPU 1.0 test suite.
*
*
* Contact: Scott Grauer-Gray <[email protected]>
* Louis-Noel Pouchet <[email protected]>
* Web address: http://www.cse.ohio-state.edu/~pouchet/software/polybench/GPU
*/
#include <unistd.h>
#include <stdio.h>
#include <time.h>
#include <sys/time.h>
#include <stdlib.h>
#include <stdarg.h>
#include <string.h>
#include <cuda.h>
#include "polybenchUtilFuncts.h"
//define the error threshold for the results "not matching"
#define PERCENT_DIFF_ERROR_THRESHOLD 0.5
#define GPU_DEVICE 0
/* Problem size */
#define NI 256
#define NJ 256
#define NK 256
/* Thread block dimensions */
#define DIM_THREAD_BLOCK_X 1024
#define DIM_THREAD_BLOCK_Y 1
#define DIM_BLOCK NJ*NI/4 // 256*256/4 = 16384
#define DIM_BLOCK_VECTOR DIM_BLOCK/2 //8192
#define NUM NI*NJ*NK
#define NUM_VECTOR NUM/2
#define NUM_SM 80
#define NUM_SM_COMPUTE 77
#define NUM_SM_HtoD 2
#define OFFSET NUM_SM_HtoD * DIM_THREAD_BLOCK_X
#define NUM_SM_DtoH 1
#define IN_CHUNK_SIZE 32
#define IN_CHUNK NJ*NI/8/NUM_SM_HtoD/IN_CHUNK_SIZE // 256*256/8/4/32 = 64
#define IN_CHUNK_OFFSET OFFSET*IN_CHUNK_SIZE // 1024*64= 65536
#define OUT_CHUNK_SIZE 32
/* Can switch DATA_TYPE between float and double */
typedef double DATA_TYPE;
#define DUMMY_N 1000
__device__ void dummy_comp()
{
double sum = 0.0;
for (int i = 0; i < DUMMY_N; i++)
{
sum = sum + tan(0.1) * tan(0.1);
}
}
void conv3D(DATA_TYPE* A, DATA_TYPE* B)
{
int i, j, k;
DATA_TYPE c11, c12, c13, c21, c22, c23, c31, c32, c33;
c11 = +2; c21 = +5; c31 = -8;
c12 = -3; c22 = +6; c32 = -9;
c13 = +4; c23 = +7; c33 = +10;
for (i = 1; i < NI - 1; ++i) // 0
{
for (j = 1; j < NJ - 1; ++j) // 1
{
for (k = 1; k < NK -1; ++k) // 2
{
//printf("i:%d\nj:%d\nk:%d\n", i, j, k);
B[i*(NK * NJ) + j*NK + k] = c11 * A[(i - 1)*(NK * NJ) + (j - 1)*NK + (k - 1)] + c13 * A[(i + 1)*(NK * NJ) + (j - 1)*NK + (k - 1)]
+ c21 * A[(i - 1)*(NK * NJ) + (j - 1)*NK + (k - 1)] + c23 * A[(i + 1)*(NK * NJ) + (j - 1)*NK + (k - 1)]
+ c31 * A[(i - 1)*(NK * NJ) + (j - 1)*NK + (k - 1)] + c33 * A[(i + 1)*(NK * NJ) + (j - 1)*NK + (k - 1)]
+ c12 * A[(i + 0)*(NK * NJ) + (j - 1)*NK + (k + 0)] + c22 * A[(i + 0)*(NK * NJ) + (j + 0)*NK + (k + 0)]
+ c32 * A[(i + 0)*(NK * NJ) + (j + 1)*NK + (k + 0)] + c11 * A[(i - 1)*(NK * NJ) + (j - 1)*NK + (k + 1)]
+ c13 * A[(i + 1)*(NK * NJ) + (j - 1)*NK + (k + 1)] + c21 * A[(i - 1)*(NK * NJ) + (j + 0)*NK + (k + 1)]
+ c23 * A[(i + 1)*(NK * NJ) + (j + 0)*NK + (k + 1)] + c31 * A[(i - 1)*(NK * NJ) + (j + 1)*NK + (k + 1)]
+ c33 * A[(i + 1)*(NK * NJ) + (j + 1)*NK + (k + 1)];
}
}
}
}
__device__ int flag_global_read(volatile int * flag, int rid)
{
return(flag[rid]);
}
void init(DATA_TYPE* A)
{
int i, j, k;
for (i = 0; i < NI; ++i)
{
for (j = 0; j < NJ; ++j)
{
for (k = 0; k < NK; ++k)
{
A[i*(NK * NJ) + j*NK + k] = i % 12 + 2 * (j % 7) + 3 * (k % 13);
}
}
}
}
void compareResults(DATA_TYPE* B, DATA_TYPE* B_outputFromGpu)
{
int i, j, k, fail;
fail = 0;
// Compare result from cpu and gpu...
for (i = 1; i < NI - 1; ++i) // 0
{
for (j = 1; j < NJ - 1; ++j) // 1
{
for (k = 1; k < NK - 1; ++k) // 2
{
if (percentDiff(B[i*(NK * NJ) + j*NK + k], B_outputFromGpu[i*(NK * NJ) + j*NK + k]) > PERCENT_DIFF_ERROR_THRESHOLD)
{
fail++;
}
}
}
}
// Print results
printf("Non-Matching CPU-GPU Outputs Beyond Error Threshold of %4.2f Percent: %d\n", PERCENT_DIFF_ERROR_THRESHOLD, fail);
}
void GPU_argv_init()
{
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, GPU_DEVICE);
//printf("setting device %d with name %s\n",GPU_DEVICE,deviceProp.name);
cudaSetDevice( 0 );
}
__global__ void convolution3D_kernel(DATA_TYPE *A_host, DATA_TYPE *B_host, DATA_TYPE *A, DATA_TYPE *B, int *inflag, int* outflag)
{
if (blockIdx.x < NUM_SM_HtoD ){ //copy kernel HtoD
const int idx = threadIdx.x;
const int bidx = blockIdx.x;
int chunk_offset;
for (int i = 0; i<IN_CHUNK;i++ ){
chunk_offset=i*IN_CHUNK_OFFSET;
for (int k = (chunk_offset+bidx*1024+idx);k < ( chunk_offset+IN_CHUNK_OFFSET ) ; k+= OFFSET ){
reinterpret_cast<double2*>(A)[k] = reinterpret_cast<double2*>(A_host)[k];
}
__syncthreads();
__threadfence();
if ( idx < IN_CHUNK_SIZE ){
atomicOr(&inflag[ (IN_CHUNK_SIZE*2*i) + idx*2+ bidx],1);
}
}
}else if (blockIdx.x < (NUM_SM_COMPUTE+NUM_SM_HtoD )){ //compute
DATA_TYPE c11, c12, c13, c21, c22, c23, c31, c32, c33;
c11 = +2; c21 = +5; c31 = -8;
c12 = -3; c22 = +6; c32 = -9;
c13 = +4; c23 = +7; c33 = +10;
for (int bid= blockIdx.x+62 ; bid < (DIM_BLOCK-64); bid+=NUM_SM_COMPUTE){
int i = bid /64 ;
int j = (bid % 64) *4 + (threadIdx.x /256);
int k = threadIdx.x % 256;
int fid = bid >> 1;
if(threadIdx.x==0) //spin ....wait for data ready
{
while( ( atomicAnd(&inflag[fid],1) == 0 ))
//while (flag_global_read(inflag,i+1 )==0)
{
//dummy_comp();
}
}
if(threadIdx.x==1) //spin ....wait for data ready
{
while( ( atomicAnd(&inflag[fid+1],1) == 0 ))
//while (flag_global_read(inflag,i+1 )==0)
{
//dummy_comp();
}
}
if(threadIdx.x==2) //spin ....wait for data ready
{
while( ( atomicAnd(&inflag[fid+31],1) == 0 ))
//while (flag_global_read(inflag,i+1 )==0)
{
//dummy_comp();
}
}
if(threadIdx.x==3) //spin ....wait for data ready
{
while( ( atomicAnd(&inflag[fid+32],1) == 0 ))
//while (flag_global_read(inflag,i+1 )==0)
{
//dummy_comp();
}
}
if(threadIdx.x==4) //spin ....wait for data ready
{
if(fid < 8159 ){
while( ( atomicAnd(&inflag[fid+33],1) == 0 ))
//while (flag_global_read(inflag,i+1 )==0)
{
//dummy_comp();
}
}
}
/*
if(threadIdx.x==5) //spin ....wait for data ready
{
while( ( atomicAnd(&inflag[fid-1],1) == 0 ))
//while (flag_global_read(inflag,i+1 )==0)
{
//dummy_comp();
}
}
if(threadIdx.x==6) //spin ....wait for data ready
{
while( ( atomicAnd(&inflag[fid-1],1) == 0 ))
//while (flag_global_read(inflag,i+1 )==0)
{
//dummy_comp();
}
}
if(threadIdx.x==7) //spin ....wait for data ready
{
while( ( atomicAnd(&inflag[fid-1],1) == 0 ))
//while (flag_global_read(inflag,i+1 )==0)
{
//dummy_comp();
}
}
if(threadIdx.x==8) //spin ....wait for data ready
{
while( ( atomicAnd(&inflag[fid-1],1) == 0 ))
//while (flag_global_read(inflag,i+1 )==0)
{
//dummy_comp();
}
}
*/
__syncthreads();
if ((i < (NI-1)) && (j < (NJ-1)) && (k < (NK-1)) && (i > 0) && (j > 0) && (k > 0))
{
B[i*(NK * NJ) + j*NK + k] = c11 * A[(i - 1)*(NK * NJ) + (j - 1)*NK + (k - 1)] + c13 * A[(i + 1)*(NK * NJ) + (j - 1)*NK + (k - 1)]
+ c21 * A[(i - 1)*(NK * NJ) + (j - 1)*NK + (k - 1)] + c23 * A[(i + 1)*(NK * NJ) + (j - 1)*NK + (k - 1)]
+ c31 * A[(i - 1)*(NK * NJ) + (j - 1)*NK + (k - 1)] + c33 * A[(i + 1)*(NK * NJ) + (j - 1)*NK + (k - 1)]
+ c12 * A[(i + 0)*(NK * NJ) + (j - 1)*NK + (k + 0)] + c22 * A[(i + 0)*(NK * NJ) + (j + 0)*NK + (k + 0)]
+ c32 * A[(i + 0)*(NK * NJ) + (j + 1)*NK + (k + 0)] + c11 * A[(i - 1)*(NK * NJ) + (j - 1)*NK + (k + 1)]
+ c13 * A[(i + 1)*(NK * NJ) + (j - 1)*NK + (k + 1)] + c21 * A[(i - 1)*(NK * NJ) + (j + 0)*NK + (k + 1)]
+ c23 * A[(i + 1)*(NK * NJ) + (j + 0)*NK + (k + 1)] + c31 * A[(i - 1)*(NK * NJ) + (j + 1)*NK + (k + 1)]
+ c33 * A[(i + 1)*(NK * NJ) + (j + 1)*NK + (k + 1)];
}
__syncthreads();
__threadfence();
if(threadIdx.x==0)
{
atomicAdd(&outflag[fid],1);
}
}
}else{ //copy kernel DtoH
const int idx = threadIdx.x;
const int bidx = blockIdx.x-(NUM_SM_COMPUTE+NUM_SM_HtoD);
int rid = 0;
for (int i = 32768+bidx*1024+idx; i< NUM_VECTOR-32768; i+= (1024*NUM_SM_DtoH) ){
rid = i>>10;
while( flag_global_read(outflag,rid) != 2 )
// while ( atomicAnd(&outflag[rid],3) == 0 )
{
//dummy_comp();
}
reinterpret_cast<double2*>(B_host)[i] = reinterpret_cast<double2*>(B)[i];
}
}
}
void convolution3DCuda(DATA_TYPE* A, DATA_TYPE* B_outputFromGpu)
{
//double t_start, t_end;
DATA_TYPE *A_gpu;
DATA_TYPE *B_gpu;
int *inflag,*outflag;
cudaMalloc((void **)&A_gpu, sizeof(DATA_TYPE) * NI * NJ * NK);
cudaMalloc((void **)&B_gpu, sizeof(DATA_TYPE) * NI * NJ * NK);
cudaMalloc((void **)&inflag, sizeof(int) * DIM_BLOCK_VECTOR);
cudaMalloc((void **)&outflag, sizeof(int) * DIM_BLOCK_VECTOR);
//initial flags
cudaMemset(inflag, 0, sizeof(int) * DIM_BLOCK_VECTOR);
cudaMemset(outflag, 0, sizeof(int) * DIM_BLOCK_VECTOR);
cudaEvent_t start,stop;
float elapsedTimeInMs = 0.0f;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
//cudaMemcpy(A_gpu, A, sizeof(DATA_TYPE) * NI * NJ * NK, cudaMemcpyHostToDevice);
//dim3 block(DIM_THREAD_BLOCK_X, DIM_THREAD_BLOCK_Y);
//dim3 grid((size_t)(ceil( ((float)NK) / ((float)block.x) )), (size_t)(ceil( ((float)NJ) / ((float)block.y) )));
//dim3 grid((size_t)(NUM_SM) , (size_t) (1) );
convolution3D_kernel<<< NUM_SM, 1024 >>>(A,B_outputFromGpu,A_gpu, B_gpu,inflag,outflag);
//cudaMemcpy(B_outputFromGpu, B_gpu, sizeof(DATA_TYPE) * NI * NJ * NK, cudaMemcpyDeviceToHost);
cudaThreadSynchronize();
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTimeInMs, start, stop);
fprintf(stdout,"GPU RunTime= %.2f Ms \n", elapsedTimeInMs);
cudaFree(A_gpu);
cudaFree(B_gpu);
}
int main(int argc, char *argv[])
{
//double t_start, t_end;
DATA_TYPE* A;
DATA_TYPE* B;
DATA_TYPE* B_outputFromGpu;
//A = (DATA_TYPE*)malloc(NI*NJ*NK*sizeof(DATA_TYPE));
//B = (DATA_TYPE*)malloc(NI*NJ*NK*sizeof(DATA_TYPE));
//B_outputFromGpu = (DATA_TYPE*)malloc(NI*NJ*NK*sizeof(DATA_TYPE));
cudaHostAlloc((void **)&A, sizeof(DATA_TYPE) * NI * NJ * NK, cudaHostAllocPortable);
cudaHostAlloc((void **)&B, sizeof(DATA_TYPE) * NI * NJ * NK, cudaHostAllocPortable);
cudaHostAlloc((void **)&B_outputFromGpu, sizeof(DATA_TYPE) * NI * NJ *NK, cudaHostAllocPortable);
init(A);
GPU_argv_init();
convolution3DCuda(A, B_outputFromGpu);
conv3D(A,B);
compareResults(B, B_outputFromGpu);
cudaFree(A);
cudaFree(B);
cudaFree(B_outputFromGpu);
return 0;
}
|
df8003ff6fb15654a29f8523383f76597f8e7df6.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2020-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cugraph/algorithms.hpp>
#include <cugraph/graph_view.hpp>
#include <cugraph/prims/reduce_op.cuh>
#include <cugraph/prims/update_frontier_v_push_if_out_nbr.cuh>
#include <cugraph/prims/vertex_frontier.cuh>
#include <cugraph/utilities/error.hpp>
#include <cugraph/vertex_partition_device_view.cuh>
#include <rmm/thrust_rmm_allocator.h>
#include <raft/handle.hpp>
#include <thrust/fill.h>
#include <thrust/iterator/constant_iterator.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/iterator/discard_iterator.h>
#include <thrust/iterator/zip_iterator.h>
#include <thrust/optional.h>
#include <thrust/transform.h>
#include <thrust/tuple.h>
#include <limits>
#include <type_traits>
namespace cugraph {
namespace detail {
template <typename GraphViewType, typename PredecessorIterator>
void bfs(raft::handle_t const& handle,
GraphViewType const& push_graph_view,
typename GraphViewType::vertex_type* distances,
PredecessorIterator predecessor_first,
typename GraphViewType::vertex_type source_vertex,
bool direction_optimizing,
typename GraphViewType::vertex_type depth_limit,
bool do_expensive_check)
{
using vertex_t = typename GraphViewType::vertex_type;
static_assert(std::is_integral<vertex_t>::value,
"GraphViewType::vertex_type should be integral.");
static_assert(!GraphViewType::is_adj_matrix_transposed,
"GraphViewType should support the push model.");
auto const num_vertices = push_graph_view.get_number_of_vertices();
if (num_vertices == 0) { return; }
// 1. check input arguments
CUGRAPH_EXPECTS(
push_graph_view.is_symmetric() || !direction_optimizing,
"Invalid input argument: input graph should be symmetric for direction optimizing BFS.");
CUGRAPH_EXPECTS(push_graph_view.is_valid_vertex(source_vertex),
"Invalid input argument: source vertex out-of-range.");
if (do_expensive_check) {
// nothing to do
}
// 2. initialize distances and predecessors
auto constexpr invalid_distance = std::numeric_limits<vertex_t>::max();
auto constexpr invalid_vertex = invalid_vertex_id<vertex_t>::value;
auto val_first = thrust::make_zip_iterator(thrust::make_tuple(distances, predecessor_first));
thrust::transform(rmm::exec_policy(handle.get_stream())->on(handle.get_stream()),
thrust::make_counting_iterator(push_graph_view.get_local_vertex_first()),
thrust::make_counting_iterator(push_graph_view.get_local_vertex_last()),
val_first,
[source_vertex] __device__(auto val) {
auto distance = invalid_distance;
if (val == source_vertex) { distance = vertex_t{0}; }
return thrust::make_tuple(distance, invalid_vertex);
});
// 3. initialize BFS frontier
enum class Bucket { cur, next, num_buckets };
VertexFrontier<vertex_t,
void,
GraphViewType::is_multi_gpu,
static_cast<size_t>(Bucket::num_buckets)>
vertex_frontier(handle);
if (push_graph_view.is_local_vertex_nocheck(source_vertex)) {
vertex_frontier.get_bucket(static_cast<size_t>(Bucket::cur)).insert(source_vertex);
}
// 4. BFS iteration
vertex_t depth{0};
while (true) {
if (direction_optimizing) {
CUGRAPH_FAIL("unimplemented.");
} else {
auto vertex_partition = vertex_partition_device_view_t<vertex_t, GraphViewType::is_multi_gpu>(
push_graph_view.get_vertex_partition_view());
update_frontier_v_push_if_out_nbr(
handle,
push_graph_view,
vertex_frontier,
static_cast<size_t>(Bucket::cur),
std::vector<size_t>{static_cast<size_t>(Bucket::next)},
thrust::make_constant_iterator(0) /* dummy */,
thrust::make_constant_iterator(0) /* dummy */,
[vertex_partition, distances] __device__(
vertex_t src, vertex_t dst, auto src_val, auto dst_val) {
auto push = true;
if (vertex_partition.is_local_vertex_nocheck(dst)) {
auto distance =
*(distances + vertex_partition.get_local_vertex_offset_from_vertex_nocheck(dst));
if (distance != invalid_distance) { push = false; }
}
return push ? thrust::optional<vertex_t>{src} : thrust::nullopt;
},
reduce_op::any<vertex_t>(),
distances,
thrust::make_zip_iterator(thrust::make_tuple(distances, predecessor_first)),
[depth] __device__(auto v, auto v_val, auto pushed_val) {
return (v_val == invalid_distance)
? thrust::optional<
thrust::tuple<size_t, thrust::tuple<vertex_t, vertex_t>>>{thrust::make_tuple(
static_cast<size_t>(Bucket::next),
thrust::make_tuple(depth + 1, pushed_val))}
: thrust::nullopt;
});
vertex_frontier.get_bucket(static_cast<size_t>(Bucket::cur)).clear();
vertex_frontier.get_bucket(static_cast<size_t>(Bucket::cur)).shrink_to_fit();
vertex_frontier.swap_buckets(static_cast<size_t>(Bucket::cur),
static_cast<size_t>(Bucket::next));
if (vertex_frontier.get_bucket(static_cast<size_t>(Bucket::cur)).aggregate_size() == 0) {
break;
}
}
depth++;
if (depth >= depth_limit) { break; }
}
CUDA_TRY(hipStreamSynchronize(
handle.get_stream())); // this is as necessary vertex_frontier will become out-of-scope once
// this function returns (FIXME: should I stream sync in VertexFrontier
// destructor?)
}
} // namespace detail
template <typename vertex_t, typename edge_t, typename weight_t, bool multi_gpu>
void bfs(raft::handle_t const& handle,
graph_view_t<vertex_t, edge_t, weight_t, false, multi_gpu> const& graph_view,
vertex_t* distances,
vertex_t* predecessors,
vertex_t source_vertex,
bool direction_optimizing,
vertex_t depth_limit,
bool do_expensive_check)
{
if (predecessors != nullptr) {
detail::bfs(handle,
graph_view,
distances,
predecessors,
source_vertex,
direction_optimizing,
depth_limit,
do_expensive_check);
} else {
detail::bfs(handle,
graph_view,
distances,
thrust::make_discard_iterator(),
source_vertex,
direction_optimizing,
depth_limit,
do_expensive_check);
}
}
// explicit instantiation
template void bfs(raft::handle_t const& handle,
graph_view_t<int32_t, int32_t, float, false, true> const& graph_view,
int32_t* distances,
int32_t* predecessors,
int32_t source_vertex,
bool direction_optimizing,
int32_t depth_limit,
bool do_expensive_check);
template void bfs(raft::handle_t const& handle,
graph_view_t<int32_t, int32_t, double, false, true> const& graph_view,
int32_t* distances,
int32_t* predecessors,
int32_t source_vertex,
bool direction_optimizing,
int32_t depth_limit,
bool do_expensive_check);
template void bfs(raft::handle_t const& handle,
graph_view_t<int32_t, int64_t, float, false, true> const& graph_view,
int32_t* distances,
int32_t* predecessors,
int32_t source_vertex,
bool direction_optimizing,
int32_t depth_limit,
bool do_expensive_check);
template void bfs(raft::handle_t const& handle,
graph_view_t<int32_t, int64_t, double, false, true> const& graph_view,
int32_t* distances,
int32_t* predecessors,
int32_t source_vertex,
bool direction_optimizing,
int32_t depth_limit,
bool do_expensive_check);
template void bfs(raft::handle_t const& handle,
graph_view_t<int64_t, int64_t, float, false, true> const& graph_view,
int64_t* distances,
int64_t* predecessors,
int64_t source_vertex,
bool direction_optimizing,
int64_t depth_limit,
bool do_expensive_check);
template void bfs(raft::handle_t const& handle,
graph_view_t<int64_t, int64_t, double, false, true> const& graph_view,
int64_t* distances,
int64_t* predecessors,
int64_t source_vertex,
bool direction_optimizing,
int64_t depth_limit,
bool do_expensive_check);
template void bfs(raft::handle_t const& handle,
graph_view_t<int32_t, int32_t, float, false, false> const& graph_view,
int32_t* distances,
int32_t* predecessors,
int32_t source_vertex,
bool direction_optimizing,
int32_t depth_limit,
bool do_expensive_check);
template void bfs(raft::handle_t const& handle,
graph_view_t<int32_t, int32_t, double, false, false> const& graph_view,
int32_t* distances,
int32_t* predecessors,
int32_t source_vertex,
bool direction_optimizing,
int32_t depth_limit,
bool do_expensive_check);
template void bfs(raft::handle_t const& handle,
graph_view_t<int32_t, int64_t, float, false, false> const& graph_view,
int32_t* distances,
int32_t* predecessors,
int32_t source_vertex,
bool direction_optimizing,
int32_t depth_limit,
bool do_expensive_check);
template void bfs(raft::handle_t const& handle,
graph_view_t<int32_t, int64_t, double, false, false> const& graph_view,
int32_t* distances,
int32_t* predecessors,
int32_t source_vertex,
bool direction_optimizing,
int32_t depth_limit,
bool do_expensive_check);
template void bfs(raft::handle_t const& handle,
graph_view_t<int64_t, int64_t, float, false, false> const& graph_view,
int64_t* distances,
int64_t* predecessors,
int64_t source_vertex,
bool direction_optimizing,
int64_t depth_limit,
bool do_expensive_check);
template void bfs(raft::handle_t const& handle,
graph_view_t<int64_t, int64_t, double, false, false> const& graph_view,
int64_t* distances,
int64_t* predecessors,
int64_t source_vertex,
bool direction_optimizing,
int64_t depth_limit,
bool do_expensive_check);
} // namespace cugraph
|
df8003ff6fb15654a29f8523383f76597f8e7df6.cu
|
/*
* Copyright (c) 2020-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cugraph/algorithms.hpp>
#include <cugraph/graph_view.hpp>
#include <cugraph/prims/reduce_op.cuh>
#include <cugraph/prims/update_frontier_v_push_if_out_nbr.cuh>
#include <cugraph/prims/vertex_frontier.cuh>
#include <cugraph/utilities/error.hpp>
#include <cugraph/vertex_partition_device_view.cuh>
#include <rmm/thrust_rmm_allocator.h>
#include <raft/handle.hpp>
#include <thrust/fill.h>
#include <thrust/iterator/constant_iterator.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/iterator/discard_iterator.h>
#include <thrust/iterator/zip_iterator.h>
#include <thrust/optional.h>
#include <thrust/transform.h>
#include <thrust/tuple.h>
#include <limits>
#include <type_traits>
namespace cugraph {
namespace detail {
template <typename GraphViewType, typename PredecessorIterator>
void bfs(raft::handle_t const& handle,
GraphViewType const& push_graph_view,
typename GraphViewType::vertex_type* distances,
PredecessorIterator predecessor_first,
typename GraphViewType::vertex_type source_vertex,
bool direction_optimizing,
typename GraphViewType::vertex_type depth_limit,
bool do_expensive_check)
{
using vertex_t = typename GraphViewType::vertex_type;
static_assert(std::is_integral<vertex_t>::value,
"GraphViewType::vertex_type should be integral.");
static_assert(!GraphViewType::is_adj_matrix_transposed,
"GraphViewType should support the push model.");
auto const num_vertices = push_graph_view.get_number_of_vertices();
if (num_vertices == 0) { return; }
// 1. check input arguments
CUGRAPH_EXPECTS(
push_graph_view.is_symmetric() || !direction_optimizing,
"Invalid input argument: input graph should be symmetric for direction optimizing BFS.");
CUGRAPH_EXPECTS(push_graph_view.is_valid_vertex(source_vertex),
"Invalid input argument: source vertex out-of-range.");
if (do_expensive_check) {
// nothing to do
}
// 2. initialize distances and predecessors
auto constexpr invalid_distance = std::numeric_limits<vertex_t>::max();
auto constexpr invalid_vertex = invalid_vertex_id<vertex_t>::value;
auto val_first = thrust::make_zip_iterator(thrust::make_tuple(distances, predecessor_first));
thrust::transform(rmm::exec_policy(handle.get_stream())->on(handle.get_stream()),
thrust::make_counting_iterator(push_graph_view.get_local_vertex_first()),
thrust::make_counting_iterator(push_graph_view.get_local_vertex_last()),
val_first,
[source_vertex] __device__(auto val) {
auto distance = invalid_distance;
if (val == source_vertex) { distance = vertex_t{0}; }
return thrust::make_tuple(distance, invalid_vertex);
});
// 3. initialize BFS frontier
enum class Bucket { cur, next, num_buckets };
VertexFrontier<vertex_t,
void,
GraphViewType::is_multi_gpu,
static_cast<size_t>(Bucket::num_buckets)>
vertex_frontier(handle);
if (push_graph_view.is_local_vertex_nocheck(source_vertex)) {
vertex_frontier.get_bucket(static_cast<size_t>(Bucket::cur)).insert(source_vertex);
}
// 4. BFS iteration
vertex_t depth{0};
while (true) {
if (direction_optimizing) {
CUGRAPH_FAIL("unimplemented.");
} else {
auto vertex_partition = vertex_partition_device_view_t<vertex_t, GraphViewType::is_multi_gpu>(
push_graph_view.get_vertex_partition_view());
update_frontier_v_push_if_out_nbr(
handle,
push_graph_view,
vertex_frontier,
static_cast<size_t>(Bucket::cur),
std::vector<size_t>{static_cast<size_t>(Bucket::next)},
thrust::make_constant_iterator(0) /* dummy */,
thrust::make_constant_iterator(0) /* dummy */,
[vertex_partition, distances] __device__(
vertex_t src, vertex_t dst, auto src_val, auto dst_val) {
auto push = true;
if (vertex_partition.is_local_vertex_nocheck(dst)) {
auto distance =
*(distances + vertex_partition.get_local_vertex_offset_from_vertex_nocheck(dst));
if (distance != invalid_distance) { push = false; }
}
return push ? thrust::optional<vertex_t>{src} : thrust::nullopt;
},
reduce_op::any<vertex_t>(),
distances,
thrust::make_zip_iterator(thrust::make_tuple(distances, predecessor_first)),
[depth] __device__(auto v, auto v_val, auto pushed_val) {
return (v_val == invalid_distance)
? thrust::optional<
thrust::tuple<size_t, thrust::tuple<vertex_t, vertex_t>>>{thrust::make_tuple(
static_cast<size_t>(Bucket::next),
thrust::make_tuple(depth + 1, pushed_val))}
: thrust::nullopt;
});
vertex_frontier.get_bucket(static_cast<size_t>(Bucket::cur)).clear();
vertex_frontier.get_bucket(static_cast<size_t>(Bucket::cur)).shrink_to_fit();
vertex_frontier.swap_buckets(static_cast<size_t>(Bucket::cur),
static_cast<size_t>(Bucket::next));
if (vertex_frontier.get_bucket(static_cast<size_t>(Bucket::cur)).aggregate_size() == 0) {
break;
}
}
depth++;
if (depth >= depth_limit) { break; }
}
CUDA_TRY(cudaStreamSynchronize(
handle.get_stream())); // this is as necessary vertex_frontier will become out-of-scope once
// this function returns (FIXME: should I stream sync in VertexFrontier
// destructor?)
}
} // namespace detail
template <typename vertex_t, typename edge_t, typename weight_t, bool multi_gpu>
void bfs(raft::handle_t const& handle,
graph_view_t<vertex_t, edge_t, weight_t, false, multi_gpu> const& graph_view,
vertex_t* distances,
vertex_t* predecessors,
vertex_t source_vertex,
bool direction_optimizing,
vertex_t depth_limit,
bool do_expensive_check)
{
if (predecessors != nullptr) {
detail::bfs(handle,
graph_view,
distances,
predecessors,
source_vertex,
direction_optimizing,
depth_limit,
do_expensive_check);
} else {
detail::bfs(handle,
graph_view,
distances,
thrust::make_discard_iterator(),
source_vertex,
direction_optimizing,
depth_limit,
do_expensive_check);
}
}
// explicit instantiation
template void bfs(raft::handle_t const& handle,
graph_view_t<int32_t, int32_t, float, false, true> const& graph_view,
int32_t* distances,
int32_t* predecessors,
int32_t source_vertex,
bool direction_optimizing,
int32_t depth_limit,
bool do_expensive_check);
template void bfs(raft::handle_t const& handle,
graph_view_t<int32_t, int32_t, double, false, true> const& graph_view,
int32_t* distances,
int32_t* predecessors,
int32_t source_vertex,
bool direction_optimizing,
int32_t depth_limit,
bool do_expensive_check);
template void bfs(raft::handle_t const& handle,
graph_view_t<int32_t, int64_t, float, false, true> const& graph_view,
int32_t* distances,
int32_t* predecessors,
int32_t source_vertex,
bool direction_optimizing,
int32_t depth_limit,
bool do_expensive_check);
template void bfs(raft::handle_t const& handle,
graph_view_t<int32_t, int64_t, double, false, true> const& graph_view,
int32_t* distances,
int32_t* predecessors,
int32_t source_vertex,
bool direction_optimizing,
int32_t depth_limit,
bool do_expensive_check);
template void bfs(raft::handle_t const& handle,
graph_view_t<int64_t, int64_t, float, false, true> const& graph_view,
int64_t* distances,
int64_t* predecessors,
int64_t source_vertex,
bool direction_optimizing,
int64_t depth_limit,
bool do_expensive_check);
template void bfs(raft::handle_t const& handle,
graph_view_t<int64_t, int64_t, double, false, true> const& graph_view,
int64_t* distances,
int64_t* predecessors,
int64_t source_vertex,
bool direction_optimizing,
int64_t depth_limit,
bool do_expensive_check);
template void bfs(raft::handle_t const& handle,
graph_view_t<int32_t, int32_t, float, false, false> const& graph_view,
int32_t* distances,
int32_t* predecessors,
int32_t source_vertex,
bool direction_optimizing,
int32_t depth_limit,
bool do_expensive_check);
template void bfs(raft::handle_t const& handle,
graph_view_t<int32_t, int32_t, double, false, false> const& graph_view,
int32_t* distances,
int32_t* predecessors,
int32_t source_vertex,
bool direction_optimizing,
int32_t depth_limit,
bool do_expensive_check);
template void bfs(raft::handle_t const& handle,
graph_view_t<int32_t, int64_t, float, false, false> const& graph_view,
int32_t* distances,
int32_t* predecessors,
int32_t source_vertex,
bool direction_optimizing,
int32_t depth_limit,
bool do_expensive_check);
template void bfs(raft::handle_t const& handle,
graph_view_t<int32_t, int64_t, double, false, false> const& graph_view,
int32_t* distances,
int32_t* predecessors,
int32_t source_vertex,
bool direction_optimizing,
int32_t depth_limit,
bool do_expensive_check);
template void bfs(raft::handle_t const& handle,
graph_view_t<int64_t, int64_t, float, false, false> const& graph_view,
int64_t* distances,
int64_t* predecessors,
int64_t source_vertex,
bool direction_optimizing,
int64_t depth_limit,
bool do_expensive_check);
template void bfs(raft::handle_t const& handle,
graph_view_t<int64_t, int64_t, double, false, false> const& graph_view,
int64_t* distances,
int64_t* predecessors,
int64_t source_vertex,
bool direction_optimizing,
int64_t depth_limit,
bool do_expensive_check);
} // namespace cugraph
|
7e8c0d41df90c238ecc75dfe579bb478346339ac.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "../SoftBodyMesh3D.h"
namespace ar3d {
__global__ void ComputeMassMatrixKernel(dim3 size, const int4* indices, const real3* positions, int numFree, real mass, real* massVector)
{
CUMAT_KERNEL_1D_LOOP(i, size)
int4 idx = indices[i];
real3 a = positions[idx.x];
real3 b = positions[idx.y];
real3 c = positions[idx.z];
real3 d = positions[idx.w];
real area = SoftBodyMesh3D::tetSize(a, b, c, d);
assert(area > 0);
//printf("Element %d: mass %f\n", int(i), float(area));
real m = mass * area / 4;
if (idx.x < numFree) atomicAddReal(massVector + idx.x, m);
if (idx.y < numFree) atomicAddReal(massVector + idx.y, m);
if (idx.z < numFree) atomicAddReal(massVector + idx.z, m);
if (idx.w < numFree) atomicAddReal(massVector + idx.w, m);
CUMAT_KERNEL_1D_LOOP_END
}
void SoftBodyMesh3D::computeMassMatrix(const Input& input, const Settings& settings, VectorX& lumpedMass)
{
cuMat::Context& ctx = cuMat::Context::current();
cuMat::KernelLaunchConfig cfg = ctx.createLaunchConfig1D(input.numElements_, ComputeMassMatrixKernel);
const int4* indices = input.indices_.data();
const real3* positions = input.referencePositions_.data();
real* massVector = lumpedMass.data();
#if 1
hipLaunchKernelGGL(( ComputeMassMatrixKernel) , dim3(cfg.block_count), dim3(cfg.thread_per_block), 0, ctx.stream() ,
cfg.virtual_size, indices, positions, input.numFreeNodes_, settings.mass_, massVector);
#else
lumpedMass.inplace() = VectorX::Constant(lumpedMass.rows(), settings.mass_);
#endif
CUMAT_CHECK_ERROR();
}
}
|
7e8c0d41df90c238ecc75dfe579bb478346339ac.cu
|
#include "../SoftBodyMesh3D.h"
namespace ar3d {
__global__ void ComputeMassMatrixKernel(dim3 size, const int4* indices, const real3* positions, int numFree, real mass, real* massVector)
{
CUMAT_KERNEL_1D_LOOP(i, size)
int4 idx = indices[i];
real3 a = positions[idx.x];
real3 b = positions[idx.y];
real3 c = positions[idx.z];
real3 d = positions[idx.w];
real area = SoftBodyMesh3D::tetSize(a, b, c, d);
assert(area > 0);
//printf("Element %d: mass %f\n", int(i), float(area));
real m = mass * area / 4;
if (idx.x < numFree) atomicAddReal(massVector + idx.x, m);
if (idx.y < numFree) atomicAddReal(massVector + idx.y, m);
if (idx.z < numFree) atomicAddReal(massVector + idx.z, m);
if (idx.w < numFree) atomicAddReal(massVector + idx.w, m);
CUMAT_KERNEL_1D_LOOP_END
}
void SoftBodyMesh3D::computeMassMatrix(const Input& input, const Settings& settings, VectorX& lumpedMass)
{
cuMat::Context& ctx = cuMat::Context::current();
cuMat::KernelLaunchConfig cfg = ctx.createLaunchConfig1D(input.numElements_, ComputeMassMatrixKernel);
const int4* indices = input.indices_.data();
const real3* positions = input.referencePositions_.data();
real* massVector = lumpedMass.data();
#if 1
ComputeMassMatrixKernel <<<cfg.block_count, cfg.thread_per_block, 0, ctx.stream() >>> (
cfg.virtual_size, indices, positions, input.numFreeNodes_, settings.mass_, massVector);
#else
lumpedMass.inplace() = VectorX::Constant(lumpedMass.rows(), settings.mass_);
#endif
CUMAT_CHECK_ERROR();
}
}
|
5407c1a70ee8544df7462fe2e775048a36c89427.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include <time.h>
#include <math.h>
#define SPHERES 20
#define rnd( x ) (x * rand() / RAND_MAX)
#define INF 2e10f
#define DIM 2048
struct Sphere {
float r, b, g;
float radius;
float x, y, z;
};
/* Added __global__ variable to run kernel function in GPU */
__global__ void kernel(const Sphere* s, unsigned char* ptr)
{
/* Each kernel function uses thread/block index and block dimension to determine a unique number to process a particular pixel (x, y) */
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x >= DIM || y >= DIM) return;
int offset = x + y * DIM;
float ox = (x - DIM / 2);
float oy = (y - DIM / 2);
float r = 0, g = 0, b = 0;
float maxz = -INF;
for (int i = 0; i<SPHERES; i++) {
float n;
float t;
/* Moved the 'hit' function from the 'Sphere' structure of the existing code */
float dx = ox - s[i].x;
float dy = oy - s[i].y;
if (dx*dx + dy*dy < s[i].radius*s[i].radius) {
float dz = sqrtf(s[i].radius*s[i].radius - dx*dx - dy*dy);
n = dz / sqrtf(s[i].radius * s[i].radius);
t = dz + s[i].z;
}
else t = -INF;
if (t > maxz) {
float fscale = n;
r = s[i].r * fscale;
g = s[i].g * fscale;
b = s[i].b * fscale;
maxz = t;
}
}
ptr[offset * 4 + 0] = (int)(r * 255);
ptr[offset * 4 + 1] = (int)(g * 255);
ptr[offset * 4 + 2] = (int)(b * 255);
ptr[offset * 4 + 3] = 255;
}
void ppm_write(unsigned char* bitmap, int xdim, int ydim, FILE* fp)
{
int i, x, y;
fprintf(fp, "P3\n");
fprintf(fp, "%d %d\n", xdim, ydim);
fprintf(fp, "255\n");
for (y = 0; y<ydim; y++) {
for (x = 0; x<xdim; x++) {
i = x + y*xdim;
fprintf(fp, "%d %d %d ", bitmap[4 * i], bitmap[4 * i + 1], bitmap[4 * i + 2]);
}
fprintf(fp, "\n");
}
}
int main(int argc, char* argv[])
{
int x, y;
unsigned char* bitmap;
hipEvent_t start, stop; // for time measurement
float timeDiff;
/* time variables event create */
hipEventCreate(&start);
hipEventCreate(&stop);
srand(time(NULL));
if (argc != 2) {
printf("> a.out [filename.ppm]\n");
exit(0);
}
FILE* fp = fopen(argv[1], "w");
Sphere *temp_s = (Sphere*)malloc(sizeof(Sphere) * SPHERES);
for (int i = 0; i<SPHERES; i++) {
temp_s[i].r = rnd(1.0f);
temp_s[i].g = rnd(1.0f);
temp_s[i].b = rnd(1.0f);
temp_s[i].x = rnd(2000.0f) - 1000;
temp_s[i].y = rnd(2000.0f) - 1000;
temp_s[i].z = rnd(2000.0f) - 1000;
temp_s[i].radius = rnd(200.0f) + 40;
}
bitmap = (unsigned char*)malloc(sizeof(unsigned char)*DIM*DIM * 4);
/* device_s and device_bitmap is to be assigned to device */
Sphere *device_s;
unsigned char* device_bitmap;
/* Allocate space on GPU to copy the temp_s */
hipMalloc((void**)&device_s, sizeof(Sphere)*SPHERES);
hipMalloc((void**)&device_bitmap, sizeof(unsigned char)*DIM*DIM * 4);
/* Copy temp_s to device_s to run the function in GPU */
hipMemcpy(device_s, temp_s, sizeof(Sphere)*SPHERES, hipMemcpyHostToDevice);
/* Start the recoding */
hipEventRecord(start, 0);
/* 768 thread per block */
dim3 dimBlock(32, 24);
/* (Dimension/blockDimension) block per grid */
dim3 dimGrid(DIM / dimBlock.x, DIM / dimBlock.y);
kernel << <dimGrid, dimBlock >> > (device_s, device_bitmap);
hipDeviceSynchronize();
/* End the recording */
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
/* Copy the result stored in the device back to the host */
hipMemcpy(bitmap, device_bitmap, sizeof(unsigned char)*DIM*DIM * 4, hipMemcpyDeviceToHost);
/* Execution time checking */
hipEventElapsedTime(&timeDiff, start, stop);
printf("CUDA ray tracing: %f sec \n", timeDiff / CLOCKS_PER_SEC);
ppm_write(bitmap, DIM, DIM, fp);
printf("[%s] was generated. \n", argv[1]);
fclose(fp);
free(bitmap);
free(temp_s);
hipFree(device_s);
hipFree(device_bitmap);
return 0;
}
|
5407c1a70ee8544df7462fe2e775048a36c89427.cu
|
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include <time.h>
#include <math.h>
#define SPHERES 20
#define rnd( x ) (x * rand() / RAND_MAX)
#define INF 2e10f
#define DIM 2048
struct Sphere {
float r, b, g;
float radius;
float x, y, z;
};
/* Added __global__ variable to run kernel function in GPU */
__global__ void kernel(const Sphere* s, unsigned char* ptr)
{
/* Each kernel function uses thread/block index and block dimension to determine a unique number to process a particular pixel (x, y) */
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x >= DIM || y >= DIM) return;
int offset = x + y * DIM;
float ox = (x - DIM / 2);
float oy = (y - DIM / 2);
float r = 0, g = 0, b = 0;
float maxz = -INF;
for (int i = 0; i<SPHERES; i++) {
float n;
float t;
/* Moved the 'hit' function from the 'Sphere' structure of the existing code */
float dx = ox - s[i].x;
float dy = oy - s[i].y;
if (dx*dx + dy*dy < s[i].radius*s[i].radius) {
float dz = sqrtf(s[i].radius*s[i].radius - dx*dx - dy*dy);
n = dz / sqrtf(s[i].radius * s[i].radius);
t = dz + s[i].z;
}
else t = -INF;
if (t > maxz) {
float fscale = n;
r = s[i].r * fscale;
g = s[i].g * fscale;
b = s[i].b * fscale;
maxz = t;
}
}
ptr[offset * 4 + 0] = (int)(r * 255);
ptr[offset * 4 + 1] = (int)(g * 255);
ptr[offset * 4 + 2] = (int)(b * 255);
ptr[offset * 4 + 3] = 255;
}
void ppm_write(unsigned char* bitmap, int xdim, int ydim, FILE* fp)
{
int i, x, y;
fprintf(fp, "P3\n");
fprintf(fp, "%d %d\n", xdim, ydim);
fprintf(fp, "255\n");
for (y = 0; y<ydim; y++) {
for (x = 0; x<xdim; x++) {
i = x + y*xdim;
fprintf(fp, "%d %d %d ", bitmap[4 * i], bitmap[4 * i + 1], bitmap[4 * i + 2]);
}
fprintf(fp, "\n");
}
}
int main(int argc, char* argv[])
{
int x, y;
unsigned char* bitmap;
cudaEvent_t start, stop; // for time measurement
float timeDiff;
/* time variables event create */
cudaEventCreate(&start);
cudaEventCreate(&stop);
srand(time(NULL));
if (argc != 2) {
printf("> a.out [filename.ppm]\n");
exit(0);
}
FILE* fp = fopen(argv[1], "w");
Sphere *temp_s = (Sphere*)malloc(sizeof(Sphere) * SPHERES);
for (int i = 0; i<SPHERES; i++) {
temp_s[i].r = rnd(1.0f);
temp_s[i].g = rnd(1.0f);
temp_s[i].b = rnd(1.0f);
temp_s[i].x = rnd(2000.0f) - 1000;
temp_s[i].y = rnd(2000.0f) - 1000;
temp_s[i].z = rnd(2000.0f) - 1000;
temp_s[i].radius = rnd(200.0f) + 40;
}
bitmap = (unsigned char*)malloc(sizeof(unsigned char)*DIM*DIM * 4);
/* device_s and device_bitmap is to be assigned to device */
Sphere *device_s;
unsigned char* device_bitmap;
/* Allocate space on GPU to copy the temp_s */
cudaMalloc((void**)&device_s, sizeof(Sphere)*SPHERES);
cudaMalloc((void**)&device_bitmap, sizeof(unsigned char)*DIM*DIM * 4);
/* Copy temp_s to device_s to run the function in GPU */
cudaMemcpy(device_s, temp_s, sizeof(Sphere)*SPHERES, cudaMemcpyHostToDevice);
/* Start the recoding */
cudaEventRecord(start, 0);
/* 768 thread per block */
dim3 dimBlock(32, 24);
/* (Dimension/blockDimension) block per grid */
dim3 dimGrid(DIM / dimBlock.x, DIM / dimBlock.y);
kernel << <dimGrid, dimBlock >> > (device_s, device_bitmap);
cudaDeviceSynchronize();
/* End the recording */
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
/* Copy the result stored in the device back to the host */
cudaMemcpy(bitmap, device_bitmap, sizeof(unsigned char)*DIM*DIM * 4, cudaMemcpyDeviceToHost);
/* Execution time checking */
cudaEventElapsedTime(&timeDiff, start, stop);
printf("CUDA ray tracing: %f sec \n", timeDiff / CLOCKS_PER_SEC);
ppm_write(bitmap, DIM, DIM, fp);
printf("[%s] was generated. \n", argv[1]);
fclose(fp);
free(bitmap);
free(temp_s);
cudaFree(device_s);
cudaFree(device_bitmap);
return 0;
}
|
ba28bcfb7921704c4996badd69a6dc09cbcf2882.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
This file contains routines for Parallel vector operations.
*/
#define PETSC_SKIP_SPINLOCK
#define PETSC_SKIP_CXX_COMPLEX_FIX
#include <petscconf.h>
#include <../src/vec/vec/impls/mpi/pvecimpl.h> /*I "petscvec.h" I*/
#include <../src/vec/vec/impls/seq/seqcuda/cudavecimpl.h>
PetscErrorCode VecDestroy_MPICUDA(Vec v)
{
PetscErrorCode ierr;
hipError_t err;
PetscFunctionBegin;
if (v->spptr) {
if (((Vec_CUDA*)v->spptr)->GPUarray_allocated) {
err = hipFree(((Vec_CUDA*)v->spptr)->GPUarray_allocated);CHKERRCUDA(err);
((Vec_CUDA*)v->spptr)->GPUarray_allocated = NULL;
}
err = hipStreamDestroy(((Vec_CUDA*)v->spptr)->stream);CHKERRCUDA(err);
ierr = PetscFree(v->spptr);CHKERRQ(ierr);
}
ierr = VecDestroy_MPI(v);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
PetscErrorCode VecNorm_MPICUDA(Vec xin,NormType type,PetscReal *z)
{
PetscReal sum,work = 0.0;
PetscErrorCode ierr;
PetscFunctionBegin;
if (type == NORM_2 || type == NORM_FROBENIUS) {
ierr = VecNorm_SeqCUDA(xin,NORM_2,&work);
work *= work;
ierr = MPIU_Allreduce(&work,&sum,1,MPIU_REAL,MPIU_SUM,PetscObjectComm((PetscObject)xin));CHKERRQ(ierr);
*z = PetscSqrtReal(sum);
} else if (type == NORM_1) {
/* Find the local part */
ierr = VecNorm_SeqCUDA(xin,NORM_1,&work);CHKERRQ(ierr);
/* Find the global max */
ierr = MPIU_Allreduce(&work,z,1,MPIU_REAL,MPIU_SUM,PetscObjectComm((PetscObject)xin));CHKERRQ(ierr);
} else if (type == NORM_INFINITY) {
/* Find the local max */
ierr = VecNorm_SeqCUDA(xin,NORM_INFINITY,&work);CHKERRQ(ierr);
/* Find the global max */
ierr = MPIU_Allreduce(&work,z,1,MPIU_REAL,MPIU_MAX,PetscObjectComm((PetscObject)xin));CHKERRQ(ierr);
} else if (type == NORM_1_AND_2) {
PetscReal temp[2];
ierr = VecNorm_SeqCUDA(xin,NORM_1,temp);CHKERRQ(ierr);
ierr = VecNorm_SeqCUDA(xin,NORM_2,temp+1);CHKERRQ(ierr);
temp[1] = temp[1]*temp[1];
ierr = MPIU_Allreduce(temp,z,2,MPIU_REAL,MPIU_SUM,PetscObjectComm((PetscObject)xin));CHKERRQ(ierr);
z[1] = PetscSqrtReal(z[1]);
}
PetscFunctionReturn(0);
}
PetscErrorCode VecDot_MPICUDA(Vec xin,Vec yin,PetscScalar *z)
{
PetscScalar sum,work;
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = VecDot_SeqCUDA(xin,yin,&work);CHKERRQ(ierr);
ierr = MPIU_Allreduce(&work,&sum,1,MPIU_SCALAR,MPIU_SUM,PetscObjectComm((PetscObject)xin));CHKERRQ(ierr);
*z = sum;
PetscFunctionReturn(0);
}
PetscErrorCode VecTDot_MPICUDA(Vec xin,Vec yin,PetscScalar *z)
{
PetscScalar sum,work;
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = VecTDot_SeqCUDA(xin,yin,&work);CHKERRQ(ierr);
ierr = MPIU_Allreduce(&work,&sum,1,MPIU_SCALAR,MPIU_SUM,PetscObjectComm((PetscObject)xin));CHKERRQ(ierr);
*z = sum;
PetscFunctionReturn(0);
}
PetscErrorCode VecMDot_MPICUDA(Vec xin,PetscInt nv,const Vec y[],PetscScalar *z)
{
PetscScalar awork[128],*work = awork;
PetscErrorCode ierr;
PetscFunctionBegin;
if (nv > 128) {
ierr = PetscMalloc1(nv,&work);CHKERRQ(ierr);
}
ierr = VecMDot_SeqCUDA(xin,nv,y,work);CHKERRQ(ierr);
ierr = MPIU_Allreduce(work,z,nv,MPIU_SCALAR,MPIU_SUM,PetscObjectComm((PetscObject)xin));CHKERRQ(ierr);
if (nv > 128) {
ierr = PetscFree(work);CHKERRQ(ierr);
}
PetscFunctionReturn(0);
}
/*MC
VECMPICUDA - VECMPICUDA = "mpicuda" - The basic parallel vector, modified to use CUDA
Options Database Keys:
. -vec_type mpicuda - sets the vector type to VECMPICUDA during a call to VecSetFromOptions()
Level: beginner
.seealso: VecCreate(), VecSetType(), VecSetFromOptions(), VecCreateMPIWithArray(), VECMPI, VecType, VecCreateMPI()
M*/
PetscErrorCode VecDuplicate_MPICUDA(Vec win,Vec *v)
{
PetscErrorCode ierr;
Vec_MPI *vw,*w = (Vec_MPI*)win->data;
PetscScalar *array;
PetscFunctionBegin;
ierr = VecCreate(PetscObjectComm((PetscObject)win),v);CHKERRQ(ierr);
ierr = PetscLayoutReference(win->map,&(*v)->map);CHKERRQ(ierr);
ierr = VecCreate_MPICUDA_Private(*v,PETSC_TRUE,w->nghost,0);CHKERRQ(ierr);
vw = (Vec_MPI*)(*v)->data;
ierr = PetscMemcpy((*v)->ops,win->ops,sizeof(struct _VecOps));CHKERRQ(ierr);
/* save local representation of the parallel vector (and scatter) if it exists */
if (w->localrep) {
ierr = VecGetArray(*v,&array);CHKERRQ(ierr);
ierr = VecCreateSeqWithArray(PETSC_COMM_SELF,1,win->map->n+w->nghost,array,&vw->localrep);CHKERRQ(ierr);
ierr = PetscMemcpy(vw->localrep->ops,w->localrep->ops,sizeof(struct _VecOps));CHKERRQ(ierr);
ierr = VecRestoreArray(*v,&array);CHKERRQ(ierr);
ierr = PetscLogObjectParent((PetscObject)*v,(PetscObject)vw->localrep);CHKERRQ(ierr);
vw->localupdate = w->localupdate;
if (vw->localupdate) {
ierr = PetscObjectReference((PetscObject)vw->localupdate);CHKERRQ(ierr);
}
}
/* New vector should inherit stashing property of parent */
(*v)->stash.donotstash = win->stash.donotstash;
(*v)->stash.ignorenegidx = win->stash.ignorenegidx;
/* change type_name appropriately */
ierr = VecCUDAAllocateCheck(*v);CHKERRQ(ierr);
ierr = PetscObjectChangeTypeName((PetscObject)(*v),VECMPICUDA);CHKERRQ(ierr);
ierr = PetscObjectListDuplicate(((PetscObject)win)->olist,&((PetscObject)(*v))->olist);CHKERRQ(ierr);
ierr = PetscFunctionListDuplicate(((PetscObject)win)->qlist,&((PetscObject)(*v))->qlist);CHKERRQ(ierr);
(*v)->map->bs = PetscAbs(win->map->bs);
(*v)->bstash.bs = win->bstash.bs;
PetscFunctionReturn(0);
}
PetscErrorCode VecDotNorm2_MPICUDA(Vec s,Vec t,PetscScalar *dp,PetscScalar *nm)
{
PetscErrorCode ierr;
PetscScalar work[2],sum[2];
PetscFunctionBegin;
ierr = VecDotNorm2_SeqCUDA(s,t,work,work+1);CHKERRQ(ierr);
ierr = MPIU_Allreduce(&work,&sum,2,MPIU_SCALAR,MPIU_SUM,PetscObjectComm((PetscObject)s));CHKERRQ(ierr);
*dp = sum[0];
*nm = sum[1];
PetscFunctionReturn(0);
}
PetscErrorCode VecCreate_MPICUDA(Vec vv)
{
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = PetscLayoutSetUp(vv->map);CHKERRQ(ierr);
ierr = VecCUDAAllocateCheck(vv);CHKERRCUDA(ierr);
vv->valid_GPU_array = PETSC_OFFLOAD_GPU;
ierr = VecCreate_MPICUDA_Private(vv,PETSC_FALSE,0,((Vec_CUDA*)vv->spptr)->GPUarray_allocated);CHKERRQ(ierr);
ierr = VecSet(vv,0.0);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
PetscErrorCode VecCreate_CUDA(Vec v)
{
PetscErrorCode ierr;
PetscMPIInt size;
PetscFunctionBegin;
ierr = MPI_Comm_size(PetscObjectComm((PetscObject)v),&size);CHKERRQ(ierr);
if (size == 1) {
ierr = VecSetType(v,VECSEQCUDA);CHKERRQ(ierr);
} else {
ierr = VecSetType(v,VECMPICUDA);CHKERRQ(ierr);
}
PetscFunctionReturn(0);
}
/*@C
VecCreateMPICUDAWithArray - Creates a parallel, array-style vector,
where the user provides the GPU array space to store the vector values.
Collective
Input Parameters:
+ comm - the MPI communicator to use
. bs - block size, same meaning as VecSetBlockSize()
. n - local vector length, cannot be PETSC_DECIDE
. N - global vector length (or PETSC_DECIDE to have calculated)
- array - the user provided GPU array to store the vector values
Output Parameter:
. vv - the vector
Notes:
Use VecDuplicate() or VecDuplicateVecs() to form additional vectors of the
same type as an existing vector.
If the user-provided array is NULL, then VecCUDAPlaceArray() can be used
at a later stage to SET the array for storing the vector values.
PETSc does NOT free the array when the vector is destroyed via VecDestroy().
The user should not free the array until the vector is destroyed.
Level: intermediate
.seealso: VecCreateSeqCUDAWithArray(), VecCreateMPIWithArray(), VecCreateSeqWithArray(),
VecCreate(), VecDuplicate(), VecDuplicateVecs(), VecCreateGhost(),
VecCreateMPI(), VecCreateGhostWithArray(), VecPlaceArray()
@*/
PetscErrorCode VecCreateMPICUDAWithArray(MPI_Comm comm,PetscInt bs,PetscInt n,PetscInt N,const PetscScalar array[],Vec *vv)
{
PetscErrorCode ierr;
PetscFunctionBegin;
if (n == PETSC_DECIDE) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Must set local size of vector");
ierr = PetscSplitOwnership(comm,&n,&N);CHKERRQ(ierr);
ierr = VecCreate(comm,vv);CHKERRQ(ierr);
ierr = VecSetSizes(*vv,n,N);CHKERRQ(ierr);
ierr = VecSetBlockSize(*vv,bs);CHKERRQ(ierr);
ierr = VecCreate_MPICUDA_Private(*vv,PETSC_FALSE,0,array);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
extern "C" PetscErrorCode VecGetArrayWrite_SeqCUDA(Vec,PetscScalar**);
PetscErrorCode VecPinToCPU_MPICUDA(Vec V,PetscBool pin)
{
PetscErrorCode ierr;
PetscFunctionBegin;
V->pinnedtocpu = pin;
if (pin) {
ierr = VecCUDACopyFromGPU(V);CHKERRQ(ierr);
V->valid_GPU_array = PETSC_OFFLOAD_CPU; /* since the CPU code will likely change values in the vector */
V->ops->dotnorm2 = NULL;
V->ops->waxpy = VecWAXPY_Seq;
V->ops->dot = VecDot_MPI;
V->ops->mdot = VecMDot_MPI;
V->ops->tdot = VecTDot_MPI;
V->ops->norm = VecNorm_MPI;
V->ops->scale = VecScale_Seq;
V->ops->copy = VecCopy_Seq;
V->ops->set = VecSet_Seq;
V->ops->swap = VecSwap_Seq;
V->ops->axpy = VecAXPY_Seq;
V->ops->axpby = VecAXPBY_Seq;
V->ops->maxpy = VecMAXPY_Seq;
V->ops->aypx = VecAYPX_Seq;
V->ops->axpbypcz = VecAXPBYPCZ_Seq;
V->ops->pointwisemult = VecPointwiseMult_Seq;
V->ops->setrandom = VecSetRandom_Seq;
V->ops->placearray = VecPlaceArray_Seq;
V->ops->replacearray = VecReplaceArray_Seq;
V->ops->resetarray = VecResetArray_Seq;
V->ops->dot_local = VecDot_Seq;
V->ops->tdot_local = VecTDot_Seq;
V->ops->norm_local = VecNorm_Seq;
V->ops->mdot_local = VecMDot_Seq;
V->ops->pointwisedivide = VecPointwiseDivide_Seq;
V->ops->getlocalvector = NULL;
V->ops->restorelocalvector = NULL;
V->ops->getlocalvectorread = NULL;
V->ops->restorelocalvectorread = NULL;
V->ops->getarraywrite = NULL;
} else {
V->ops->dotnorm2 = VecDotNorm2_MPICUDA;
V->ops->waxpy = VecWAXPY_SeqCUDA;
V->ops->duplicate = VecDuplicate_MPICUDA;
V->ops->dot = VecDot_MPICUDA;
V->ops->mdot = VecMDot_MPICUDA;
V->ops->tdot = VecTDot_MPICUDA;
V->ops->norm = VecNorm_MPICUDA;
V->ops->scale = VecScale_SeqCUDA;
V->ops->copy = VecCopy_SeqCUDA;
V->ops->set = VecSet_SeqCUDA;
V->ops->swap = VecSwap_SeqCUDA;
V->ops->axpy = VecAXPY_SeqCUDA;
V->ops->axpby = VecAXPBY_SeqCUDA;
V->ops->maxpy = VecMAXPY_SeqCUDA;
V->ops->aypx = VecAYPX_SeqCUDA;
V->ops->axpbypcz = VecAXPBYPCZ_SeqCUDA;
V->ops->pointwisemult = VecPointwiseMult_SeqCUDA;
V->ops->setrandom = VecSetRandom_SeqCUDA;
V->ops->placearray = VecPlaceArray_SeqCUDA;
V->ops->replacearray = VecReplaceArray_SeqCUDA;
V->ops->resetarray = VecResetArray_SeqCUDA;
V->ops->dot_local = VecDot_SeqCUDA;
V->ops->tdot_local = VecTDot_SeqCUDA;
V->ops->norm_local = VecNorm_SeqCUDA;
V->ops->mdot_local = VecMDot_SeqCUDA;
V->ops->destroy = VecDestroy_MPICUDA;
V->ops->pointwisedivide = VecPointwiseDivide_SeqCUDA;
V->ops->getlocalvector = VecGetLocalVector_SeqCUDA;
V->ops->restorelocalvector = VecRestoreLocalVector_SeqCUDA;
V->ops->getlocalvectorread = VecGetLocalVector_SeqCUDA;
V->ops->restorelocalvectorread = VecRestoreLocalVector_SeqCUDA;
V->ops->getarraywrite = VecGetArrayWrite_SeqCUDA;
}
PetscFunctionReturn(0);
}
PetscErrorCode VecCreate_MPICUDA_Private(Vec vv,PetscBool alloc,PetscInt nghost,const PetscScalar array[])
{
PetscErrorCode ierr;
hipError_t err;
Vec_CUDA *veccuda;
PetscFunctionBegin;
ierr = VecCreate_MPI_Private(vv,PETSC_FALSE,0,0);CHKERRQ(ierr);
ierr = PetscObjectChangeTypeName((PetscObject)vv,VECMPICUDA);CHKERRQ(ierr);
ierr = VecPinToCPU_MPICUDA(vv,PETSC_FALSE);CHKERRQ(ierr);
vv->ops->pintocpu = VecPinToCPU_MPICUDA;
/* Later, functions check for the Vec_CUDA structure existence, so do not create it without array */
if (alloc && !array) {
ierr = VecCUDAAllocateCheck(vv);CHKERRQ(ierr);
ierr = VecSet(vv,0.0);CHKERRQ(ierr);
}
if (array) {
if (!vv->spptr) {
/* Cannot use PetscNew() here because spptr is void* */
ierr = PetscMalloc(sizeof(Vec_CUDA),&vv->spptr);CHKERRQ(ierr);
veccuda = (Vec_CUDA*)vv->spptr;
err = hipStreamCreate(&veccuda->stream);CHKERRCUDA(err);
veccuda->GPUarray_allocated = 0;
veccuda->hostDataRegisteredAsPageLocked = PETSC_FALSE;
vv->valid_GPU_array = PETSC_OFFLOAD_UNALLOCATED;
}
veccuda = (Vec_CUDA*)vv->spptr;
veccuda->GPUarray = (PetscScalar*)array;
}
PetscFunctionReturn(0);
}
|
ba28bcfb7921704c4996badd69a6dc09cbcf2882.cu
|
/*
This file contains routines for Parallel vector operations.
*/
#define PETSC_SKIP_SPINLOCK
#define PETSC_SKIP_CXX_COMPLEX_FIX
#include <petscconf.h>
#include <../src/vec/vec/impls/mpi/pvecimpl.h> /*I "petscvec.h" I*/
#include <../src/vec/vec/impls/seq/seqcuda/cudavecimpl.h>
PetscErrorCode VecDestroy_MPICUDA(Vec v)
{
PetscErrorCode ierr;
cudaError_t err;
PetscFunctionBegin;
if (v->spptr) {
if (((Vec_CUDA*)v->spptr)->GPUarray_allocated) {
err = cudaFree(((Vec_CUDA*)v->spptr)->GPUarray_allocated);CHKERRCUDA(err);
((Vec_CUDA*)v->spptr)->GPUarray_allocated = NULL;
}
err = cudaStreamDestroy(((Vec_CUDA*)v->spptr)->stream);CHKERRCUDA(err);
ierr = PetscFree(v->spptr);CHKERRQ(ierr);
}
ierr = VecDestroy_MPI(v);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
PetscErrorCode VecNorm_MPICUDA(Vec xin,NormType type,PetscReal *z)
{
PetscReal sum,work = 0.0;
PetscErrorCode ierr;
PetscFunctionBegin;
if (type == NORM_2 || type == NORM_FROBENIUS) {
ierr = VecNorm_SeqCUDA(xin,NORM_2,&work);
work *= work;
ierr = MPIU_Allreduce(&work,&sum,1,MPIU_REAL,MPIU_SUM,PetscObjectComm((PetscObject)xin));CHKERRQ(ierr);
*z = PetscSqrtReal(sum);
} else if (type == NORM_1) {
/* Find the local part */
ierr = VecNorm_SeqCUDA(xin,NORM_1,&work);CHKERRQ(ierr);
/* Find the global max */
ierr = MPIU_Allreduce(&work,z,1,MPIU_REAL,MPIU_SUM,PetscObjectComm((PetscObject)xin));CHKERRQ(ierr);
} else if (type == NORM_INFINITY) {
/* Find the local max */
ierr = VecNorm_SeqCUDA(xin,NORM_INFINITY,&work);CHKERRQ(ierr);
/* Find the global max */
ierr = MPIU_Allreduce(&work,z,1,MPIU_REAL,MPIU_MAX,PetscObjectComm((PetscObject)xin));CHKERRQ(ierr);
} else if (type == NORM_1_AND_2) {
PetscReal temp[2];
ierr = VecNorm_SeqCUDA(xin,NORM_1,temp);CHKERRQ(ierr);
ierr = VecNorm_SeqCUDA(xin,NORM_2,temp+1);CHKERRQ(ierr);
temp[1] = temp[1]*temp[1];
ierr = MPIU_Allreduce(temp,z,2,MPIU_REAL,MPIU_SUM,PetscObjectComm((PetscObject)xin));CHKERRQ(ierr);
z[1] = PetscSqrtReal(z[1]);
}
PetscFunctionReturn(0);
}
PetscErrorCode VecDot_MPICUDA(Vec xin,Vec yin,PetscScalar *z)
{
PetscScalar sum,work;
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = VecDot_SeqCUDA(xin,yin,&work);CHKERRQ(ierr);
ierr = MPIU_Allreduce(&work,&sum,1,MPIU_SCALAR,MPIU_SUM,PetscObjectComm((PetscObject)xin));CHKERRQ(ierr);
*z = sum;
PetscFunctionReturn(0);
}
PetscErrorCode VecTDot_MPICUDA(Vec xin,Vec yin,PetscScalar *z)
{
PetscScalar sum,work;
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = VecTDot_SeqCUDA(xin,yin,&work);CHKERRQ(ierr);
ierr = MPIU_Allreduce(&work,&sum,1,MPIU_SCALAR,MPIU_SUM,PetscObjectComm((PetscObject)xin));CHKERRQ(ierr);
*z = sum;
PetscFunctionReturn(0);
}
PetscErrorCode VecMDot_MPICUDA(Vec xin,PetscInt nv,const Vec y[],PetscScalar *z)
{
PetscScalar awork[128],*work = awork;
PetscErrorCode ierr;
PetscFunctionBegin;
if (nv > 128) {
ierr = PetscMalloc1(nv,&work);CHKERRQ(ierr);
}
ierr = VecMDot_SeqCUDA(xin,nv,y,work);CHKERRQ(ierr);
ierr = MPIU_Allreduce(work,z,nv,MPIU_SCALAR,MPIU_SUM,PetscObjectComm((PetscObject)xin));CHKERRQ(ierr);
if (nv > 128) {
ierr = PetscFree(work);CHKERRQ(ierr);
}
PetscFunctionReturn(0);
}
/*MC
VECMPICUDA - VECMPICUDA = "mpicuda" - The basic parallel vector, modified to use CUDA
Options Database Keys:
. -vec_type mpicuda - sets the vector type to VECMPICUDA during a call to VecSetFromOptions()
Level: beginner
.seealso: VecCreate(), VecSetType(), VecSetFromOptions(), VecCreateMPIWithArray(), VECMPI, VecType, VecCreateMPI()
M*/
PetscErrorCode VecDuplicate_MPICUDA(Vec win,Vec *v)
{
PetscErrorCode ierr;
Vec_MPI *vw,*w = (Vec_MPI*)win->data;
PetscScalar *array;
PetscFunctionBegin;
ierr = VecCreate(PetscObjectComm((PetscObject)win),v);CHKERRQ(ierr);
ierr = PetscLayoutReference(win->map,&(*v)->map);CHKERRQ(ierr);
ierr = VecCreate_MPICUDA_Private(*v,PETSC_TRUE,w->nghost,0);CHKERRQ(ierr);
vw = (Vec_MPI*)(*v)->data;
ierr = PetscMemcpy((*v)->ops,win->ops,sizeof(struct _VecOps));CHKERRQ(ierr);
/* save local representation of the parallel vector (and scatter) if it exists */
if (w->localrep) {
ierr = VecGetArray(*v,&array);CHKERRQ(ierr);
ierr = VecCreateSeqWithArray(PETSC_COMM_SELF,1,win->map->n+w->nghost,array,&vw->localrep);CHKERRQ(ierr);
ierr = PetscMemcpy(vw->localrep->ops,w->localrep->ops,sizeof(struct _VecOps));CHKERRQ(ierr);
ierr = VecRestoreArray(*v,&array);CHKERRQ(ierr);
ierr = PetscLogObjectParent((PetscObject)*v,(PetscObject)vw->localrep);CHKERRQ(ierr);
vw->localupdate = w->localupdate;
if (vw->localupdate) {
ierr = PetscObjectReference((PetscObject)vw->localupdate);CHKERRQ(ierr);
}
}
/* New vector should inherit stashing property of parent */
(*v)->stash.donotstash = win->stash.donotstash;
(*v)->stash.ignorenegidx = win->stash.ignorenegidx;
/* change type_name appropriately */
ierr = VecCUDAAllocateCheck(*v);CHKERRQ(ierr);
ierr = PetscObjectChangeTypeName((PetscObject)(*v),VECMPICUDA);CHKERRQ(ierr);
ierr = PetscObjectListDuplicate(((PetscObject)win)->olist,&((PetscObject)(*v))->olist);CHKERRQ(ierr);
ierr = PetscFunctionListDuplicate(((PetscObject)win)->qlist,&((PetscObject)(*v))->qlist);CHKERRQ(ierr);
(*v)->map->bs = PetscAbs(win->map->bs);
(*v)->bstash.bs = win->bstash.bs;
PetscFunctionReturn(0);
}
PetscErrorCode VecDotNorm2_MPICUDA(Vec s,Vec t,PetscScalar *dp,PetscScalar *nm)
{
PetscErrorCode ierr;
PetscScalar work[2],sum[2];
PetscFunctionBegin;
ierr = VecDotNorm2_SeqCUDA(s,t,work,work+1);CHKERRQ(ierr);
ierr = MPIU_Allreduce(&work,&sum,2,MPIU_SCALAR,MPIU_SUM,PetscObjectComm((PetscObject)s));CHKERRQ(ierr);
*dp = sum[0];
*nm = sum[1];
PetscFunctionReturn(0);
}
PetscErrorCode VecCreate_MPICUDA(Vec vv)
{
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = PetscLayoutSetUp(vv->map);CHKERRQ(ierr);
ierr = VecCUDAAllocateCheck(vv);CHKERRCUDA(ierr);
vv->valid_GPU_array = PETSC_OFFLOAD_GPU;
ierr = VecCreate_MPICUDA_Private(vv,PETSC_FALSE,0,((Vec_CUDA*)vv->spptr)->GPUarray_allocated);CHKERRQ(ierr);
ierr = VecSet(vv,0.0);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
PetscErrorCode VecCreate_CUDA(Vec v)
{
PetscErrorCode ierr;
PetscMPIInt size;
PetscFunctionBegin;
ierr = MPI_Comm_size(PetscObjectComm((PetscObject)v),&size);CHKERRQ(ierr);
if (size == 1) {
ierr = VecSetType(v,VECSEQCUDA);CHKERRQ(ierr);
} else {
ierr = VecSetType(v,VECMPICUDA);CHKERRQ(ierr);
}
PetscFunctionReturn(0);
}
/*@C
VecCreateMPICUDAWithArray - Creates a parallel, array-style vector,
where the user provides the GPU array space to store the vector values.
Collective
Input Parameters:
+ comm - the MPI communicator to use
. bs - block size, same meaning as VecSetBlockSize()
. n - local vector length, cannot be PETSC_DECIDE
. N - global vector length (or PETSC_DECIDE to have calculated)
- array - the user provided GPU array to store the vector values
Output Parameter:
. vv - the vector
Notes:
Use VecDuplicate() or VecDuplicateVecs() to form additional vectors of the
same type as an existing vector.
If the user-provided array is NULL, then VecCUDAPlaceArray() can be used
at a later stage to SET the array for storing the vector values.
PETSc does NOT free the array when the vector is destroyed via VecDestroy().
The user should not free the array until the vector is destroyed.
Level: intermediate
.seealso: VecCreateSeqCUDAWithArray(), VecCreateMPIWithArray(), VecCreateSeqWithArray(),
VecCreate(), VecDuplicate(), VecDuplicateVecs(), VecCreateGhost(),
VecCreateMPI(), VecCreateGhostWithArray(), VecPlaceArray()
@*/
PetscErrorCode VecCreateMPICUDAWithArray(MPI_Comm comm,PetscInt bs,PetscInt n,PetscInt N,const PetscScalar array[],Vec *vv)
{
PetscErrorCode ierr;
PetscFunctionBegin;
if (n == PETSC_DECIDE) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Must set local size of vector");
ierr = PetscSplitOwnership(comm,&n,&N);CHKERRQ(ierr);
ierr = VecCreate(comm,vv);CHKERRQ(ierr);
ierr = VecSetSizes(*vv,n,N);CHKERRQ(ierr);
ierr = VecSetBlockSize(*vv,bs);CHKERRQ(ierr);
ierr = VecCreate_MPICUDA_Private(*vv,PETSC_FALSE,0,array);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
extern "C" PetscErrorCode VecGetArrayWrite_SeqCUDA(Vec,PetscScalar**);
PetscErrorCode VecPinToCPU_MPICUDA(Vec V,PetscBool pin)
{
PetscErrorCode ierr;
PetscFunctionBegin;
V->pinnedtocpu = pin;
if (pin) {
ierr = VecCUDACopyFromGPU(V);CHKERRQ(ierr);
V->valid_GPU_array = PETSC_OFFLOAD_CPU; /* since the CPU code will likely change values in the vector */
V->ops->dotnorm2 = NULL;
V->ops->waxpy = VecWAXPY_Seq;
V->ops->dot = VecDot_MPI;
V->ops->mdot = VecMDot_MPI;
V->ops->tdot = VecTDot_MPI;
V->ops->norm = VecNorm_MPI;
V->ops->scale = VecScale_Seq;
V->ops->copy = VecCopy_Seq;
V->ops->set = VecSet_Seq;
V->ops->swap = VecSwap_Seq;
V->ops->axpy = VecAXPY_Seq;
V->ops->axpby = VecAXPBY_Seq;
V->ops->maxpy = VecMAXPY_Seq;
V->ops->aypx = VecAYPX_Seq;
V->ops->axpbypcz = VecAXPBYPCZ_Seq;
V->ops->pointwisemult = VecPointwiseMult_Seq;
V->ops->setrandom = VecSetRandom_Seq;
V->ops->placearray = VecPlaceArray_Seq;
V->ops->replacearray = VecReplaceArray_Seq;
V->ops->resetarray = VecResetArray_Seq;
V->ops->dot_local = VecDot_Seq;
V->ops->tdot_local = VecTDot_Seq;
V->ops->norm_local = VecNorm_Seq;
V->ops->mdot_local = VecMDot_Seq;
V->ops->pointwisedivide = VecPointwiseDivide_Seq;
V->ops->getlocalvector = NULL;
V->ops->restorelocalvector = NULL;
V->ops->getlocalvectorread = NULL;
V->ops->restorelocalvectorread = NULL;
V->ops->getarraywrite = NULL;
} else {
V->ops->dotnorm2 = VecDotNorm2_MPICUDA;
V->ops->waxpy = VecWAXPY_SeqCUDA;
V->ops->duplicate = VecDuplicate_MPICUDA;
V->ops->dot = VecDot_MPICUDA;
V->ops->mdot = VecMDot_MPICUDA;
V->ops->tdot = VecTDot_MPICUDA;
V->ops->norm = VecNorm_MPICUDA;
V->ops->scale = VecScale_SeqCUDA;
V->ops->copy = VecCopy_SeqCUDA;
V->ops->set = VecSet_SeqCUDA;
V->ops->swap = VecSwap_SeqCUDA;
V->ops->axpy = VecAXPY_SeqCUDA;
V->ops->axpby = VecAXPBY_SeqCUDA;
V->ops->maxpy = VecMAXPY_SeqCUDA;
V->ops->aypx = VecAYPX_SeqCUDA;
V->ops->axpbypcz = VecAXPBYPCZ_SeqCUDA;
V->ops->pointwisemult = VecPointwiseMult_SeqCUDA;
V->ops->setrandom = VecSetRandom_SeqCUDA;
V->ops->placearray = VecPlaceArray_SeqCUDA;
V->ops->replacearray = VecReplaceArray_SeqCUDA;
V->ops->resetarray = VecResetArray_SeqCUDA;
V->ops->dot_local = VecDot_SeqCUDA;
V->ops->tdot_local = VecTDot_SeqCUDA;
V->ops->norm_local = VecNorm_SeqCUDA;
V->ops->mdot_local = VecMDot_SeqCUDA;
V->ops->destroy = VecDestroy_MPICUDA;
V->ops->pointwisedivide = VecPointwiseDivide_SeqCUDA;
V->ops->getlocalvector = VecGetLocalVector_SeqCUDA;
V->ops->restorelocalvector = VecRestoreLocalVector_SeqCUDA;
V->ops->getlocalvectorread = VecGetLocalVector_SeqCUDA;
V->ops->restorelocalvectorread = VecRestoreLocalVector_SeqCUDA;
V->ops->getarraywrite = VecGetArrayWrite_SeqCUDA;
}
PetscFunctionReturn(0);
}
PetscErrorCode VecCreate_MPICUDA_Private(Vec vv,PetscBool alloc,PetscInt nghost,const PetscScalar array[])
{
PetscErrorCode ierr;
cudaError_t err;
Vec_CUDA *veccuda;
PetscFunctionBegin;
ierr = VecCreate_MPI_Private(vv,PETSC_FALSE,0,0);CHKERRQ(ierr);
ierr = PetscObjectChangeTypeName((PetscObject)vv,VECMPICUDA);CHKERRQ(ierr);
ierr = VecPinToCPU_MPICUDA(vv,PETSC_FALSE);CHKERRQ(ierr);
vv->ops->pintocpu = VecPinToCPU_MPICUDA;
/* Later, functions check for the Vec_CUDA structure existence, so do not create it without array */
if (alloc && !array) {
ierr = VecCUDAAllocateCheck(vv);CHKERRQ(ierr);
ierr = VecSet(vv,0.0);CHKERRQ(ierr);
}
if (array) {
if (!vv->spptr) {
/* Cannot use PetscNew() here because spptr is void* */
ierr = PetscMalloc(sizeof(Vec_CUDA),&vv->spptr);CHKERRQ(ierr);
veccuda = (Vec_CUDA*)vv->spptr;
err = cudaStreamCreate(&veccuda->stream);CHKERRCUDA(err);
veccuda->GPUarray_allocated = 0;
veccuda->hostDataRegisteredAsPageLocked = PETSC_FALSE;
vv->valid_GPU_array = PETSC_OFFLOAD_UNALLOCATED;
}
veccuda = (Vec_CUDA*)vv->spptr;
veccuda->GPUarray = (PetscScalar*)array;
}
PetscFunctionReturn(0);
}
|
672a5695ad46a68b913f90194f4822c88eb6ab37.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
#define IMUL(a, b) __mul24(a, b)
#define iDivUp(a,b) ((a)+(b)-1)/(b)
#define CONV1_THREAD_SIZE 256
#define CONVN_THREAD_SIZE1 16
#define CONVN_THREAD_SIZE2 31 //31 is faster than 32 because shared memory is too full
// 28 space-time orientations of V1 simple cells
#define nrFilters 28
// 8 directions
#define nrDirs 8
// each of these is a unit vector (e.g. d_v1popDirs[0][1..3] => (-0.6559)^2+(0.7246)^2+(0.2113)^2 == 1
// all vectors lie on the surface of a dome (hemisphere) in 3D Fourier space
// x, y, t
__constant__ float d_v1popDirs[3][nrFilters] = {
{ 0.7246,-0.9718, 0.7496,-0.5837,-0.0810, 0.9439, 0.3203,-0.8712,-0.1593,-0.5142, 0.9304, 0.3737,-0.8031,-0.8126, 0.6004,-0.5738, 0.0024, 0.5969, 0.1436, 0.7757,-0.4004,-0.5108, 0.2375,-0.2221,-0.5140, 0.5194,-0.0870, 0.3838},
{-0.6559,-0.1019, 0.6240,-0.7797, 0.9692,-0.2312,-0.9151, 0.4207,-0.9533, 0.8175, 0.2398, 0.8810,-0.4430, 0.0588,-0.5384, 0.5644, 0.7931, 0.5142,-0.7680,-0.0669,-0.6670,-0.2747, 0.5034, 0.5042, 0.1580, 0.1332,-0.5159,-0.3549},
{ 0.2113, 0.2126, 0.2210, 0.2266, 0.2327, 0.2359, 0.2451, 0.2529, 0.2567, 0.2593, 0.2772, 0.2902, 0.3984, 0.5799, 0.5913, 0.5935, 0.6091, 0.6160, 0.6241, 0.6275, 0.6283, 0.8146, 0.8308, 0.8345, 0.8431, 0.8441, 0.8522, 0.8525}
};
// \TODO use dynamic shared memory to allocate interpolation weights from filters onto directions
//extern __shared__ float motionProjDyn[];
__constant__ float motionProj[3][nrFilters][nrDirs] = {
// 1.5 px/fr
{{0.002719, 0.011644, -0.002266, -0.094267, -0.088188, -0.021185, -0.097296, -0.081224},
{-0.023337, -0.106719, -0.077625, 0.007519, 0.015789, -0.006119, -0.100403, -0.080257},
{0.002680, -0.081351, -0.101069, -0.017226, -0.080847, -0.101749, -0.007590, 0.013499},
{-0.105574, -0.075236, 0.004742, 0.012976, -0.014051, -0.107587, -0.074961, -0.019999},
{-0.101953, -0.078081, -0.011287, -0.098204, -0.084890, 0.000210, 0.010038, -0.012016},
{0.013383, 0.006850, -0.065943, -0.111274, -0.019242, -0.057148, -0.114513, -0.024744},
{-0.061140, 0.005103, 0.009873, -0.040867, -0.119077, -0.040599, -0.024648, -0.118630},
{-0.044083, -0.024613, -0.117431, -0.058263, 0.008179, 0.011178, -0.041624, -0.123260},
{-0.117735, -0.024345, 0.008927, -0.001303, -0.081491, -0.104572, -0.007043, -0.067123},
{-0.112299, -0.007929, -0.052488, -0.116908, -0.030257, 0.009875, 0.000031, -0.080050},
{0.005659, -0.038535, -0.118830, -0.041564, -0.003097, -0.109397, -0.074442, -0.001213},
{-0.058675, -0.117962, -0.014533, -0.012484, -0.117591, -0.062163, 0.000398, 0.002208},
{0.018759, -0.114775, -0.072420, -0.015796, -0.024858, -0.096423, -0.092574, 0.055902},
{0.186206, 0.066666, -0.093901, -0.083400, -0.050103, -0.073462, -0.098693, 0.033705},
{-0.080118, -0.054913, -0.083459, -0.089374, 0.073523, 0.196686, 0.042820, -0.099633},
{0.054765, 0.196021, 0.053990, -0.096637, -0.079803, -0.052328, -0.079579, -0.097683},
{-0.092917, 0.065871, 0.209082, 0.069072, -0.091233, -0.083159, -0.057583, -0.084435},
{-0.080172, -0.094335, 0.051800, 0.212371, 0.093687, -0.080371, -0.086343, -0.058694},
{-0.098998, -0.072622, -0.059005, -0.090925, -0.063341, 0.128669, 0.208913, 0.022446},
{-0.060042, -0.079222, -0.091489, 0.056837, 0.222364, 0.105811, -0.074728, -0.087321},
{0.017240, -0.093537, -0.070377, -0.061221, -0.093369, -0.052045, 0.145020, 0.205317},
{0.286286, 0.081896, -0.036612, -0.052630, -0.051398, -0.018586, 0.133449, 0.321045},
{-0.022913, 0.104241, 0.296008, 0.312626, 0.130534, -0.012370, -0.046058, -0.046925},
{0.125102, 0.308023, 0.301202, 0.114413, -0.017407, -0.044594, -0.042965, -0.011909},
{0.326466, 0.292408, 0.103527, -0.017697, -0.041094, -0.038607, 0.005764, 0.158558},
{-0.041630, -0.020604, 0.094234, 0.286191, 0.333461, 0.167671, 0.008501, -0.038692},
{0.090092, -0.015961, -0.038161, -0.032698, 0.027051, 0.193491, 0.340642, 0.274792},
{-0.027770, -0.037982, -0.026487, 0.060731, 0.246749, 0.348872, 0.225464, 0.046336}
},
// 0.125 px/fr
{{-0.000000, 0.897296, 0.353176, 0.000000, 0.000000, 1.209524, 0.285543, 0.265591},
{1.029417, 0.000000, 0.000000, 0.000000, 0.620836, 0.000000, 0.188835, 0.246830},
{-0.108047, -0.000000, -0.000000, 0.929848, -0.197093, -0.000000, 0.000000, 0.508013},
{0.000000, 0.000000, 0.000000, 0.367456, 0.197863, 0.000000, -0.000000, 0.859015},
{0.000000, 0.000000, 1.229100, 0.000000, 0.000000, 0.000000, 0.738794, 0.271190},
{0.000000, -0.410008, -0.247282, -0.086121, 0.462063, -0.271767, -0.182609, -0.182525},
{0.000000, -0.263183, -0.099207, -0.088605, 0.000000, -0.000000, 0.174004, -0.096171},
{0.000000, 0.278772, -0.000000, -0.140555, -0.146193, -0.000000, -0.000000, -0.109096},
{-0.201618, -0.000000, 0.000000, -0.193351, -0.268166, -0.162138, 0.555250, -0.276805},
{-0.151171, 0.360803, -0.466397, -0.178297, -0.186825, -0.000000, -0.475992, -0.326441},
{-0.000000, -0.000000, -0.000000, -0.277033, 0.374329, -0.000000, -0.210372, -0.264749},
{-0.000000, -0.000000, 0.000000, 0.000000, -0.000000, -0.000000, -0.180506, -0.239941},
{-0.395916, -0.000000, -0.195059, -0.224185, -0.413778, -0.191141, -0.156726, -0.000000},
{-0.147002, -0.285356, -0.156458, -0.103351, -0.243213, -0.128499, -0.195833, -0.280861},
{-0.189982, -0.737936, -0.455772, -0.300128, -0.382581, -0.523640, -0.524815, -0.397732},
{0.000000, 0.000000, 0.287464, 0.000000, 0.200886, 0.000000, 0.338290, 0.285218},
{-0.101822, -0.298001, -0.479286, -0.185336, -0.174942, -0.190061, -0.451103, -0.143887},
{0.000000, -0.000000, -0.000000, -0.190980, -0.000000, -0.000000, -0.000000, -0.230796},
{0.000000, 0.293190, 0.000000, 0.000000, 0.172343, 0.000000, 0.000000, 0.210156},
{-0.000000, 0.430281, 0.305841, 0.200276, 0.000000, 0.000000, 0.363526, 0.321661},
{0.000000, -0.000000, -0.108791, -0.143990, 0.000000, 0.000000, -0.145709, -0.197730},
{0.000000, 0.204758, 0.000000, 0.000000, 0.200794, 0.000000, 0.271457, 0.000000},
{-0.000000, 0.332910, 0.286988, 0.000000, 0.155198, 0.000000, 0.329061, 0.300256},
{-0.000000, -0.165435, -0.000000, -0.092666, -0.128557, -0.000000, -0.000000, -0.269069},
{-0.097398, -0.000000, -0.208955, -0.130879, -0.082892, -0.000000, -0.212524, -0.000000},
{-0.105448, -0.491387, -0.410388, -0.190047, -0.237196, -0.307983, -0.477275, -0.285832},
{-0.218714, -0.380534, -0.261717, -0.160753, -0.338830, -0.255540, -0.277978, -0.161782},
{0.000000, 0.364896, 0.000000, 0.000000, 0.240844, 0.000000, 0.297409, 0.000000}
},
// 9 px/fr
{{-4.864834, -4.867060, -4.823441, -4.740868, -4.646694, -4.603636, -4.662763, -4.786739},
{-3.428012, -3.488151, -3.550758, -3.560310, -3.517467, -3.463406, -3.420058, -3.404072},
{-0.000000, -0.000000, -0.000000, -0.000000, -0.000000, -0.000000, -0.000000, -0.000000},
{-1.957444, -2.017401, -2.055229, -2.057289, -2.021035, -1.947560, -1.893333, -1.904727},
{-3.979133, -3.925736, -3.877434, -3.860755, -3.871451, -3.888292, -3.926100, -3.978706},
{1.948717, 1.963352, 2.010421, 2.063527, 2.077270, 2.045093, 1.995961, 1.960698},
{1.629890, 1.580667, 1.557382, 1.570485, 1.611004, 1.649102, 1.673758, 1.672242},
{1.784991, 1.784529, 1.721898, 1.625747, 1.555419, 1.550628, 1.617398, 1.718844},
{2.012012, 1.975361, 1.945907, 1.935350, 1.941052, 1.955430, 1.983538, 2.016023},
{3.419318, 3.451937, 3.429333, 3.357931, 3.269283, 3.215087, 3.236822, 3.329197},
{1.741699, 1.776702, 1.808409, 1.802087, 1.766176, 1.738879, 1.725429, 1.724178},
{1.588804, 1.642456, 1.666208, 1.648262, 1.603281, 1.552858, 1.526549, 1.541457},
{3.138541, 3.164963, 3.161345, 3.130037, 3.093148, 3.071225, 3.073344, 3.100179},
{0.000000, 0.000000, 1.099349, 1.180536, 1.181763, 1.126990, 0.000000, 0.000000},
{5.539593, 5.543994, 5.485430, 5.362374, 5.208172, 5.140021, 5.240953, 5.428928},
{-4.056137, -4.117032, -4.056287, -3.905148, -3.762848, -3.703982, -3.756792, -3.904550},
{2.270790, 2.128664, 2.040068, 2.067253, 2.168129, 2.257849, 2.320046, 2.343929},
{-0.000000, -0.000000, -0.000000, -0.000000, -0.000000, -0.000000, -0.000000, -0.000000},
{-3.781555, -3.698213, -3.649388, -3.646792, -3.687281, -3.747849, -3.813056, -3.839367},
{-4.309134, -4.343614, -4.415685, -4.477585, -4.459957, -4.384120, -4.318594, -4.299292},
{-0.000000, -0.000000, -0.000000, -0.000000, -0.000000, -0.000000, -0.000000, -0.000000},
{-3.010623, -3.110226, -3.151800, -3.137590, -3.097021, -3.040314, -2.977231, -2.948042},
{-2.954503, -2.839443, -2.696440, -2.662777, -2.755236, -2.858274, -2.933426, -2.978990},
{1.209452, 1.377843, 1.404586, 1.263931, 1.109909, 1.029849, 0.000000, 0.000000},
{2.089420, 2.032800, 1.842197, 1.695787, 1.641550, 1.658555, 1.762882, 1.948551},
{4.438072, 4.492991, 4.604519, 4.721339, 4.695153, 4.525393, 4.405185, 4.402908},
{4.205318, 4.047975, 3.943128, 3.896789, 3.932025, 4.088749, 4.284825, 4.331347},
{-3.438845, -3.446991, -3.378377, -3.223788, -3.010716, -2.916123, -3.067748, -3.308302}
}
};
// this corresponds to the number of spatiotemporal scales used
// same as pars.nScales in original S&H matlab code
// use only 1 spatiotemporal scale
//#define nrScales_ 3
// this filter is used to blur and downsample a 3D matrix
// same as filt in original S&H matlab code (see function blurDn3.m)
// blurring and downsampling is only used if nrScales_>1, that is, if we're processing at more than one
// spatiotemporal scale
#define scalingFiltSize 5
__constant__ float d_scalingFilt[scalingFiltSize] = {0.0884, 0.3536, 0.5303, 0.3536, 0.0884};
// d_v1GaussFilt defines the 1D receptive field size of a V1 unit, which is then used for all three dimensions (X,Y and T)
// this guy can be reproduced in matlab with g=normpdf(-4:4,0,1.25);
// same as in original S&H matlab code
#define v1GaussFiltSize 9
__constant__ float d_v1GaussFilt[v1GaussFiltSize] = {0.0007, 0.0155, 0.0903, 0.2345, 0.3179, 0.2345, 0.0903, 0.0155, 0.0007};
// d_complexV1Filt is the spacial filter for complex cells; it averages over "simple" V1 cells
// all simple cells must have the same space-time orientation and phase
// this guy can be reproduced in matlab with g=normpdf(-5:5,0,1.6);
// same as in original S&H matlab code
#define complexV1FiltSize 11
__constant__ float d_complexV1Filt[complexV1FiltSize] = {0.0019, 0.0110, 0.0430, 0.1142, 0.2052, 0.2495, 0.2052, 0.1142, 0.0430, 0.0110, 0.0019};
// d_normV1filt is the spatial filter used complex cell normalization
// this guy can be reproduced in matlab with: g=normpdf(-10:10,0,3.35);
// same as in original S&H matlab code
//#define normV1filtSize 21
//__constant__ float d_normV1filt[normV1filtSize] = {0.0013, 0.0031, 0.0067, 0.0132, 0.0237, 0.0389, 0.0584, 0.0800, 0.1001, 0.1146, 0.1199, 0.1146, 0.1001, 0.0800, 0.0584, 0.0389, 0.0237, 0.0132, 0.0067, 0.0031, 0.0013};
//float* normV1filt;
// use a slightly bigger filter: g=normpdf(-12:12,0,5.0)/sum(g);
#define normV1filtSize 25
__constant__ float d_normV1filt[normV1filtSize]={0.0045,0.0072,0.0109,0.0160,0.0225,0.0303,0.0393,0.0490,0.0587,0.0675,0.0746,0.0792,0.0808,0.0792,0.0746,0.0675,0.0587,0.0490,0.0393,0.0303,0.0225,0.0160,0.0109,0.0072,0.0045};
// difference operator for taking the first-order derivative
#define diff1filtSize 3
__constant__ float d_diff1filt[diff1filtSize] = {-1/2.0, 0, 1/2.0};
// difference operator for taking the second-order derivative
#define diff2filtSize 3
__constant__ float d_diff2filt[diff2filtSize] = {1, -2, 1};
// difference operator for taking the third-order derivative
// the grayscale values of our input stimuli will be convolved with d_scalingFilt in 3D
#define diff3filtSize 5
__constant__ float d_diff3filt[diff3filtSize] = {-1/2.0, 1, 0, -1, 1/2.0};
// number of time steps to be considered in computation
// in the easiest case (only 1 spatiotemporal scale), it should be the same as v1GaussFiltSize
#define nrT 9
/// **************************************************************************************************************** ///
/// DEVICE FUNCTIONS
/// **************************************************************************************************************** ///
// parallel averaging
// convolve idata with filt and store output in odata
// consider edge effects
// linearly combines filter responses with input activity to get direction-selective output activity
// parallel full-wave rectification
// compute the mean on the array's third dimension
// this is used to compute the mean of all 28 filter responses at a given location/scale (used in the complex cell
// normalization step)
// population normalization of complex cell responses
// parallel mulitplying with a scale factor
__global__ void dev_sub(float *i1data, float *i2data, float* odata, int len) {
const int tid = IMUL(blockDim.x, blockIdx.x) + threadIdx.x;
const int threadN = IMUL(blockDim.x, gridDim.x);
for(int i = tid; i < len; i += threadN) {
odata[i] = i1data[i] - i2data[i];
}
}
|
672a5695ad46a68b913f90194f4822c88eb6ab37.cu
|
#include "includes.h"
#define IMUL(a, b) __mul24(a, b)
#define iDivUp(a,b) ((a)+(b)-1)/(b)
#define CONV1_THREAD_SIZE 256
#define CONVN_THREAD_SIZE1 16
#define CONVN_THREAD_SIZE2 31 //31 is faster than 32 because shared memory is too full
// 28 space-time orientations of V1 simple cells
#define nrFilters 28
// 8 directions
#define nrDirs 8
// each of these is a unit vector (e.g. d_v1popDirs[0][1..3] => (-0.6559)^2+(0.7246)^2+(0.2113)^2 == 1
// all vectors lie on the surface of a dome (hemisphere) in 3D Fourier space
// x, y, t
__constant__ float d_v1popDirs[3][nrFilters] = {
{ 0.7246,-0.9718, 0.7496,-0.5837,-0.0810, 0.9439, 0.3203,-0.8712,-0.1593,-0.5142, 0.9304, 0.3737,-0.8031,-0.8126, 0.6004,-0.5738, 0.0024, 0.5969, 0.1436, 0.7757,-0.4004,-0.5108, 0.2375,-0.2221,-0.5140, 0.5194,-0.0870, 0.3838},
{-0.6559,-0.1019, 0.6240,-0.7797, 0.9692,-0.2312,-0.9151, 0.4207,-0.9533, 0.8175, 0.2398, 0.8810,-0.4430, 0.0588,-0.5384, 0.5644, 0.7931, 0.5142,-0.7680,-0.0669,-0.6670,-0.2747, 0.5034, 0.5042, 0.1580, 0.1332,-0.5159,-0.3549},
{ 0.2113, 0.2126, 0.2210, 0.2266, 0.2327, 0.2359, 0.2451, 0.2529, 0.2567, 0.2593, 0.2772, 0.2902, 0.3984, 0.5799, 0.5913, 0.5935, 0.6091, 0.6160, 0.6241, 0.6275, 0.6283, 0.8146, 0.8308, 0.8345, 0.8431, 0.8441, 0.8522, 0.8525}
};
// \TODO use dynamic shared memory to allocate interpolation weights from filters onto directions
//extern __shared__ float motionProjDyn[];
__constant__ float motionProj[3][nrFilters][nrDirs] = {
// 1.5 px/fr
{{0.002719, 0.011644, -0.002266, -0.094267, -0.088188, -0.021185, -0.097296, -0.081224},
{-0.023337, -0.106719, -0.077625, 0.007519, 0.015789, -0.006119, -0.100403, -0.080257},
{0.002680, -0.081351, -0.101069, -0.017226, -0.080847, -0.101749, -0.007590, 0.013499},
{-0.105574, -0.075236, 0.004742, 0.012976, -0.014051, -0.107587, -0.074961, -0.019999},
{-0.101953, -0.078081, -0.011287, -0.098204, -0.084890, 0.000210, 0.010038, -0.012016},
{0.013383, 0.006850, -0.065943, -0.111274, -0.019242, -0.057148, -0.114513, -0.024744},
{-0.061140, 0.005103, 0.009873, -0.040867, -0.119077, -0.040599, -0.024648, -0.118630},
{-0.044083, -0.024613, -0.117431, -0.058263, 0.008179, 0.011178, -0.041624, -0.123260},
{-0.117735, -0.024345, 0.008927, -0.001303, -0.081491, -0.104572, -0.007043, -0.067123},
{-0.112299, -0.007929, -0.052488, -0.116908, -0.030257, 0.009875, 0.000031, -0.080050},
{0.005659, -0.038535, -0.118830, -0.041564, -0.003097, -0.109397, -0.074442, -0.001213},
{-0.058675, -0.117962, -0.014533, -0.012484, -0.117591, -0.062163, 0.000398, 0.002208},
{0.018759, -0.114775, -0.072420, -0.015796, -0.024858, -0.096423, -0.092574, 0.055902},
{0.186206, 0.066666, -0.093901, -0.083400, -0.050103, -0.073462, -0.098693, 0.033705},
{-0.080118, -0.054913, -0.083459, -0.089374, 0.073523, 0.196686, 0.042820, -0.099633},
{0.054765, 0.196021, 0.053990, -0.096637, -0.079803, -0.052328, -0.079579, -0.097683},
{-0.092917, 0.065871, 0.209082, 0.069072, -0.091233, -0.083159, -0.057583, -0.084435},
{-0.080172, -0.094335, 0.051800, 0.212371, 0.093687, -0.080371, -0.086343, -0.058694},
{-0.098998, -0.072622, -0.059005, -0.090925, -0.063341, 0.128669, 0.208913, 0.022446},
{-0.060042, -0.079222, -0.091489, 0.056837, 0.222364, 0.105811, -0.074728, -0.087321},
{0.017240, -0.093537, -0.070377, -0.061221, -0.093369, -0.052045, 0.145020, 0.205317},
{0.286286, 0.081896, -0.036612, -0.052630, -0.051398, -0.018586, 0.133449, 0.321045},
{-0.022913, 0.104241, 0.296008, 0.312626, 0.130534, -0.012370, -0.046058, -0.046925},
{0.125102, 0.308023, 0.301202, 0.114413, -0.017407, -0.044594, -0.042965, -0.011909},
{0.326466, 0.292408, 0.103527, -0.017697, -0.041094, -0.038607, 0.005764, 0.158558},
{-0.041630, -0.020604, 0.094234, 0.286191, 0.333461, 0.167671, 0.008501, -0.038692},
{0.090092, -0.015961, -0.038161, -0.032698, 0.027051, 0.193491, 0.340642, 0.274792},
{-0.027770, -0.037982, -0.026487, 0.060731, 0.246749, 0.348872, 0.225464, 0.046336}
},
// 0.125 px/fr
{{-0.000000, 0.897296, 0.353176, 0.000000, 0.000000, 1.209524, 0.285543, 0.265591},
{1.029417, 0.000000, 0.000000, 0.000000, 0.620836, 0.000000, 0.188835, 0.246830},
{-0.108047, -0.000000, -0.000000, 0.929848, -0.197093, -0.000000, 0.000000, 0.508013},
{0.000000, 0.000000, 0.000000, 0.367456, 0.197863, 0.000000, -0.000000, 0.859015},
{0.000000, 0.000000, 1.229100, 0.000000, 0.000000, 0.000000, 0.738794, 0.271190},
{0.000000, -0.410008, -0.247282, -0.086121, 0.462063, -0.271767, -0.182609, -0.182525},
{0.000000, -0.263183, -0.099207, -0.088605, 0.000000, -0.000000, 0.174004, -0.096171},
{0.000000, 0.278772, -0.000000, -0.140555, -0.146193, -0.000000, -0.000000, -0.109096},
{-0.201618, -0.000000, 0.000000, -0.193351, -0.268166, -0.162138, 0.555250, -0.276805},
{-0.151171, 0.360803, -0.466397, -0.178297, -0.186825, -0.000000, -0.475992, -0.326441},
{-0.000000, -0.000000, -0.000000, -0.277033, 0.374329, -0.000000, -0.210372, -0.264749},
{-0.000000, -0.000000, 0.000000, 0.000000, -0.000000, -0.000000, -0.180506, -0.239941},
{-0.395916, -0.000000, -0.195059, -0.224185, -0.413778, -0.191141, -0.156726, -0.000000},
{-0.147002, -0.285356, -0.156458, -0.103351, -0.243213, -0.128499, -0.195833, -0.280861},
{-0.189982, -0.737936, -0.455772, -0.300128, -0.382581, -0.523640, -0.524815, -0.397732},
{0.000000, 0.000000, 0.287464, 0.000000, 0.200886, 0.000000, 0.338290, 0.285218},
{-0.101822, -0.298001, -0.479286, -0.185336, -0.174942, -0.190061, -0.451103, -0.143887},
{0.000000, -0.000000, -0.000000, -0.190980, -0.000000, -0.000000, -0.000000, -0.230796},
{0.000000, 0.293190, 0.000000, 0.000000, 0.172343, 0.000000, 0.000000, 0.210156},
{-0.000000, 0.430281, 0.305841, 0.200276, 0.000000, 0.000000, 0.363526, 0.321661},
{0.000000, -0.000000, -0.108791, -0.143990, 0.000000, 0.000000, -0.145709, -0.197730},
{0.000000, 0.204758, 0.000000, 0.000000, 0.200794, 0.000000, 0.271457, 0.000000},
{-0.000000, 0.332910, 0.286988, 0.000000, 0.155198, 0.000000, 0.329061, 0.300256},
{-0.000000, -0.165435, -0.000000, -0.092666, -0.128557, -0.000000, -0.000000, -0.269069},
{-0.097398, -0.000000, -0.208955, -0.130879, -0.082892, -0.000000, -0.212524, -0.000000},
{-0.105448, -0.491387, -0.410388, -0.190047, -0.237196, -0.307983, -0.477275, -0.285832},
{-0.218714, -0.380534, -0.261717, -0.160753, -0.338830, -0.255540, -0.277978, -0.161782},
{0.000000, 0.364896, 0.000000, 0.000000, 0.240844, 0.000000, 0.297409, 0.000000}
},
// 9 px/fr
{{-4.864834, -4.867060, -4.823441, -4.740868, -4.646694, -4.603636, -4.662763, -4.786739},
{-3.428012, -3.488151, -3.550758, -3.560310, -3.517467, -3.463406, -3.420058, -3.404072},
{-0.000000, -0.000000, -0.000000, -0.000000, -0.000000, -0.000000, -0.000000, -0.000000},
{-1.957444, -2.017401, -2.055229, -2.057289, -2.021035, -1.947560, -1.893333, -1.904727},
{-3.979133, -3.925736, -3.877434, -3.860755, -3.871451, -3.888292, -3.926100, -3.978706},
{1.948717, 1.963352, 2.010421, 2.063527, 2.077270, 2.045093, 1.995961, 1.960698},
{1.629890, 1.580667, 1.557382, 1.570485, 1.611004, 1.649102, 1.673758, 1.672242},
{1.784991, 1.784529, 1.721898, 1.625747, 1.555419, 1.550628, 1.617398, 1.718844},
{2.012012, 1.975361, 1.945907, 1.935350, 1.941052, 1.955430, 1.983538, 2.016023},
{3.419318, 3.451937, 3.429333, 3.357931, 3.269283, 3.215087, 3.236822, 3.329197},
{1.741699, 1.776702, 1.808409, 1.802087, 1.766176, 1.738879, 1.725429, 1.724178},
{1.588804, 1.642456, 1.666208, 1.648262, 1.603281, 1.552858, 1.526549, 1.541457},
{3.138541, 3.164963, 3.161345, 3.130037, 3.093148, 3.071225, 3.073344, 3.100179},
{0.000000, 0.000000, 1.099349, 1.180536, 1.181763, 1.126990, 0.000000, 0.000000},
{5.539593, 5.543994, 5.485430, 5.362374, 5.208172, 5.140021, 5.240953, 5.428928},
{-4.056137, -4.117032, -4.056287, -3.905148, -3.762848, -3.703982, -3.756792, -3.904550},
{2.270790, 2.128664, 2.040068, 2.067253, 2.168129, 2.257849, 2.320046, 2.343929},
{-0.000000, -0.000000, -0.000000, -0.000000, -0.000000, -0.000000, -0.000000, -0.000000},
{-3.781555, -3.698213, -3.649388, -3.646792, -3.687281, -3.747849, -3.813056, -3.839367},
{-4.309134, -4.343614, -4.415685, -4.477585, -4.459957, -4.384120, -4.318594, -4.299292},
{-0.000000, -0.000000, -0.000000, -0.000000, -0.000000, -0.000000, -0.000000, -0.000000},
{-3.010623, -3.110226, -3.151800, -3.137590, -3.097021, -3.040314, -2.977231, -2.948042},
{-2.954503, -2.839443, -2.696440, -2.662777, -2.755236, -2.858274, -2.933426, -2.978990},
{1.209452, 1.377843, 1.404586, 1.263931, 1.109909, 1.029849, 0.000000, 0.000000},
{2.089420, 2.032800, 1.842197, 1.695787, 1.641550, 1.658555, 1.762882, 1.948551},
{4.438072, 4.492991, 4.604519, 4.721339, 4.695153, 4.525393, 4.405185, 4.402908},
{4.205318, 4.047975, 3.943128, 3.896789, 3.932025, 4.088749, 4.284825, 4.331347},
{-3.438845, -3.446991, -3.378377, -3.223788, -3.010716, -2.916123, -3.067748, -3.308302}
}
};
// this corresponds to the number of spatiotemporal scales used
// same as pars.nScales in original S&H matlab code
// use only 1 spatiotemporal scale
//#define nrScales_ 3
// this filter is used to blur and downsample a 3D matrix
// same as filt in original S&H matlab code (see function blurDn3.m)
// blurring and downsampling is only used if nrScales_>1, that is, if we're processing at more than one
// spatiotemporal scale
#define scalingFiltSize 5
__constant__ float d_scalingFilt[scalingFiltSize] = {0.0884, 0.3536, 0.5303, 0.3536, 0.0884};
// d_v1GaussFilt defines the 1D receptive field size of a V1 unit, which is then used for all three dimensions (X,Y and T)
// this guy can be reproduced in matlab with g=normpdf(-4:4,0,1.25);
// same as in original S&H matlab code
#define v1GaussFiltSize 9
__constant__ float d_v1GaussFilt[v1GaussFiltSize] = {0.0007, 0.0155, 0.0903, 0.2345, 0.3179, 0.2345, 0.0903, 0.0155, 0.0007};
// d_complexV1Filt is the spacial filter for complex cells; it averages over "simple" V1 cells
// all simple cells must have the same space-time orientation and phase
// this guy can be reproduced in matlab with g=normpdf(-5:5,0,1.6);
// same as in original S&H matlab code
#define complexV1FiltSize 11
__constant__ float d_complexV1Filt[complexV1FiltSize] = {0.0019, 0.0110, 0.0430, 0.1142, 0.2052, 0.2495, 0.2052, 0.1142, 0.0430, 0.0110, 0.0019};
// d_normV1filt is the spatial filter used complex cell normalization
// this guy can be reproduced in matlab with: g=normpdf(-10:10,0,3.35);
// same as in original S&H matlab code
//#define normV1filtSize 21
//__constant__ float d_normV1filt[normV1filtSize] = {0.0013, 0.0031, 0.0067, 0.0132, 0.0237, 0.0389, 0.0584, 0.0800, 0.1001, 0.1146, 0.1199, 0.1146, 0.1001, 0.0800, 0.0584, 0.0389, 0.0237, 0.0132, 0.0067, 0.0031, 0.0013};
//float* normV1filt;
// use a slightly bigger filter: g=normpdf(-12:12,0,5.0)/sum(g);
#define normV1filtSize 25
__constant__ float d_normV1filt[normV1filtSize]={0.0045,0.0072,0.0109,0.0160,0.0225,0.0303,0.0393,0.0490,0.0587,0.0675,0.0746,0.0792,0.0808,0.0792,0.0746,0.0675,0.0587,0.0490,0.0393,0.0303,0.0225,0.0160,0.0109,0.0072,0.0045};
// difference operator for taking the first-order derivative
#define diff1filtSize 3
__constant__ float d_diff1filt[diff1filtSize] = {-1/2.0, 0, 1/2.0};
// difference operator for taking the second-order derivative
#define diff2filtSize 3
__constant__ float d_diff2filt[diff2filtSize] = {1, -2, 1};
// difference operator for taking the third-order derivative
// the grayscale values of our input stimuli will be convolved with d_scalingFilt in 3D
#define diff3filtSize 5
__constant__ float d_diff3filt[diff3filtSize] = {-1/2.0, 1, 0, -1, 1/2.0};
// number of time steps to be considered in computation
// in the easiest case (only 1 spatiotemporal scale), it should be the same as v1GaussFiltSize
#define nrT 9
/// **************************************************************************************************************** ///
/// DEVICE FUNCTIONS
/// **************************************************************************************************************** ///
// parallel averaging
// convolve idata with filt and store output in odata
// consider edge effects
// linearly combines filter responses with input activity to get direction-selective output activity
// parallel full-wave rectification
// compute the mean on the array's third dimension
// this is used to compute the mean of all 28 filter responses at a given location/scale (used in the complex cell
// normalization step)
// population normalization of complex cell responses
// parallel mulitplying with a scale factor
__global__ void dev_sub(float *i1data, float *i2data, float* odata, int len) {
const int tid = IMUL(blockDim.x, blockIdx.x) + threadIdx.x;
const int threadN = IMUL(blockDim.x, gridDim.x);
for(int i = tid; i < len; i += threadN) {
odata[i] = i1data[i] - i2data[i];
}
}
|
f6da5e70504a6f393b686c2bb5cdf50aaeee4eaa.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2011, Alex Krizhevsky ([email protected])
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* - Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* - Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
* EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <cudaconv2.cuh>
#define LO16(x) ((x) & 0x0000FFFF)
#define HI16(x) ((x) >> 16)
/*
* Each block computes weight gradients for B_Y * pixelsPerThread pixels and B_X filters
* threadIdx.x determines filter
* threadIdx.y determines pixel in filter
*
* blockIdx.x determines filter batch of B_X, module batch of partialSum
* blockIdx.y determines pixel batch of B_Y * pixelsPerThread
*
* Number of filters must be divisible by B_X
* Number of images (cases) should be divisible by preloadCases if checkCaseBounds is false.
*
* images: (numColors, imgSizeY, imgSizeX, numImages), with stride given
* hidActs: (numFilters, numModulesY, numModulesX, numImages)
*
* targets: (numModulesY*numModulesX/partialSum, numColors, filterPixels, numFilters)
*
* B_Y * B_X should be divisible by preloadCases.
* preloadCases one of 16, 32.
* B_X one of 4, 8, 16, 32
* B_Y arbitrary (satisfying divisibility constraints)
* numModules must be divisible by partialSum
*
* After adding pixelsPerThread, register usage went from 20 to 23 (when pixelsPerThread = 1)...
* so the compiler is messing up here somehow. It's unable to optimize that case away.
*/
template <int B_Y, int B_X, int pixelsPerThread, int preloadCases, int numColors, bool scale, bool checkCaseBounds>
__global__ void conv_weight_acts_c(float* images, float* hidActs, float* targets,
const int numImages, const int numFilters,
const int numModulesY, const int numModulesX,
const int imgSizeY, const int imgSizeX, const int filterSize,
const int paddingStart, const int moduleStride, const int imgStride,
const int partialSum,
const float scaleTargets, const float scaleOutputs) {
__shared__ float shImages[pixelsPerThread * B_Y * numColors][preloadCases]; // preload preloadCases cases of B_Y * pixelsPerThread pixels
__shared__ float shHidActs[B_X][preloadCases + 1]; // preload preloadCases cases of B_X hidActs
const int tidx = B_X * threadIdx.y + threadIdx.x;
const int loadY = tidx / preloadCases, loadX = tidx % preloadCases;
const int filterPixels = filterSize * filterSize;
const int imgPixels = imgSizeY * imgSizeX;
const int filterBlocksPerModule = numFilters / B_X;
const int outputModuleIdx = blockIdx.x / filterBlocksPerModule;
const int moduleIdx = partialSum * outputModuleIdx;
const int blockFilterIdx = B_X * (blockIdx.x % filterBlocksPerModule);
// const int moduleStride = (imgSize - filterSize + 1) / numModulesX;
const int numModules = numModulesY * numModulesX;
const int blockPixelOffset = blockIdx.y * B_Y * pixelsPerThread;
images += loadX;
hidActs += moduleIdx * numImages
+ blockFilterIdx * numImages * numModules
+ loadY * numImages * numModules
+ loadX;
targets += (outputModuleIdx * numFilters) * filterPixels * numColors
+ blockPixelOffset * numFilters
+ blockFilterIdx
+ threadIdx.y * numFilters + threadIdx.x;
float* shImgLoad = &shImages[loadY][loadX];
float* shHidActLoad = &shHidActs[loadY][loadX];
float prod[numColors][pixelsPerThread];
memset(prod, 0, sizeof(prod));
__shared__ int pxDivs[B_Y*pixelsPerThread];
if (tidx < B_Y * pixelsPerThread) {
pxDivs[tidx] = (((blockPixelOffset + tidx) / filterSize) << 16) + ((blockPixelOffset + tidx) % filterSize);
}
__syncthreads();
for (int m = moduleIdx; m < moduleIdx + partialSum; m++) {
const int imgLoadModPosY = paddingStart + (m / numModulesX) * moduleStride;
const int imgLoadModPosX = paddingStart + (m % numModulesX) * moduleStride;
for (int caseIdx = 0; caseIdx < numImages; caseIdx += preloadCases) {
if (loadY < B_Y * pixelsPerThread) {
/*
* As long as B_Y * B_X is divisible by preloadCases this will loop the right
* number of times.
*
* This will load some imgGrads from filter pixels that don't exit (it'll set those to 0),
* but the code does not produce any output for those pixels (see last lines).
*/
// #pragma unroll
for (int y = 0; y < B_Y * pixelsPerThread; y += (B_X * B_Y) / preloadCases) {
// Make sure number of rows in the array is divisible by number of rows filled per iteration
if ((B_Y * pixelsPerThread) % (B_X * B_Y / preloadCases) == 0 || y + loadY < B_Y * pixelsPerThread) {
const int pxIdx = loadY + y; // pixel idx in filter
if (pxIdx + blockPixelOffset < filterPixels && (!checkCaseBounds || caseIdx + loadX < numImages)) {
const int pxY = imgLoadModPosY + HI16(pxDivs[pxIdx]); // pixel x,y coords in image
const int pxX = imgLoadModPosX + LO16(pxDivs[pxIdx]);
if (pxY >= 0 && pxY < imgSizeY && pxX >= 0 && pxX < imgSizeX) {
const int pixIdx = (pxY * imgSizeX + pxX) * imgStride;
#pragma unroll
for (int c = 0; c < numColors; c++) {
shImgLoad[(y + c * pixelsPerThread * B_Y) * preloadCases] = images[caseIdx + c * imgPixels * imgStride + pixIdx];
}
} else {
#pragma unroll
for (int c = 0; c < numColors; c++) {
shImgLoad[(y + c * pixelsPerThread * B_Y) * preloadCases] = 0;
}
}
} else {
#pragma unroll
for (int c = 0; c < numColors; c++) {
shImgLoad[(y + c * pixelsPerThread * B_Y) * preloadCases] = 0;
}
}
}
}
}
if (loadY < B_X && (!checkCaseBounds || caseIdx + loadX < numImages)) {
#pragma unroll
for (int y = 0; y < B_X; y += (B_X * B_Y) / preloadCases) {
// Make sure number of rows in the array is divisible by number of rows filled per iteration
if (B_X % (B_X * B_Y / preloadCases) == 0 || y + loadY < B_X) {
shHidActLoad[y * (preloadCases + 1)] = hidActs[caseIdx + y * numImages * numModules];
}
}
}
__syncthreads();
#pragma unroll
for (int p = 0; p < pixelsPerThread; p++) {
#pragma unroll
for (int i = 0; i < preloadCases; i++) {
#pragma unroll
for (int c = 0; c < numColors; c++) {
prod[c][p] += shImages[threadIdx.y + p * B_Y + c * pixelsPerThread * B_Y][i] * shHidActs[threadIdx.x][i];
}
}
}
__syncthreads();
}
hidActs += numImages;
}
if (scale) {
#pragma unroll
for (int p = 0; p < pixelsPerThread; p++) {
if (blockPixelOffset + p * B_Y + threadIdx.y < filterPixels) {
#pragma unroll
for (int c = 0; c < numColors; c++) {
targets[p * B_Y * numFilters + c * filterPixels * numFilters] = scaleTargets * targets[p * B_Y * numFilters + c * filterPixels * numFilters] + scaleOutputs * prod[c][p];
}
}
}
} else {
#pragma unroll
for (int p = 0; p < pixelsPerThread; p++) {
if (blockPixelOffset + p * B_Y + threadIdx.y < filterPixels) {
#pragma unroll
for (int c = 0; c < numColors; c++) {
targets[p * B_Y * numFilters + c * filterPixels * numFilters] = scaleOutputs * prod[c][p];
}
}
}
}
}
/*
* Each block computes weight gradients for B_Y pixels and B_X * filtersPerThread filters
* threadIdx.x determines filter
* threadIdx.y determines pixel in filter
*
* blockIdx.x determines filter batch of B_X * filtersPerThread, module batch of partialSum
* blockIdx.y determines pixel, color batch of B_Y * colorsPerThread
* In essence, blockIdx.y.x = 0...numFilterColors / colorsPerThread
* blockIdx.y.y = 0...DIVUP(numPixels, B_Y)
* ============
* CONSTRAINTS:
* ============
* numFilters/numGroups must be divisible by B_X * filtersPerThread
* numImgColors/numGroups must be divisible by colorsPerThread
* numFilters must be divisible by numGroups
* numImgColors must be divisible by numGroups
* Number of images (cases) should be divisible by preloadCases if checkCaseBounds is false.
*
* images: (numImgColors, imgSizeY, imgSizeX, numImages), with stride given
* hidActs: (numFilters, numModulesY, numModulesX, numImages)
*
* targets: (numModulesY*numModulesX/partialSum, numFilterColors, filterPixels, numFilters)
*
* B_Y * B_X should be divisible by preloadCases.
* preloadCases one of 16, 32.
* B_X one of 4, 8, 16, 32
* B_Y arbitrary (satisfying divisibility constraints)
*
* This routine is especially fast when numFilters >= 32. That's when it should be used.
*/
template <int B_Y, int B_X, int filtersPerThread, int colorsPerThread, int preloadCases, bool scale, bool checkCaseBounds>
__global__ void conv_weight_acts_mc_mf(float* images, float* hidActs, float* targets,
const int numImages, const int numFilters,
const int numModulesY, const int numModulesX,
const int imgSizeY, const int imgSizeX, const int filterSize,
const int paddingStart, const int moduleStride, const int imgStride,
const int numImgColors, const int numGroups, const int partialSum,
const float scaleTargets, const float scaleOutputs) {
__shared__ float shImages[colorsPerThread * B_Y][preloadCases]; // preload preloadCases cases of B_Y * pixelsPerThread pixels
__shared__ float shHidActs[filtersPerThread * B_X][preloadCases + 1]; // preload preloadCases cases of B_X hidacts
const int tidx = B_X * threadIdx.y + threadIdx.x;
const int loadY = tidx / preloadCases, loadX = tidx % preloadCases;
const int filterPixels = filterSize * filterSize;
const int imgPixels = imgSizeY * imgSizeX;
const int numFilterBlocks = numFilters / (B_X * filtersPerThread);
const int outputModuleIdx = blockIdx.x / numFilterBlocks;
const int moduleIdx = partialSum * outputModuleIdx;
const int blockFilterIdx = filtersPerThread * B_X * (blockIdx.x % numFilterBlocks);
const int numModules = numModulesY * numModulesX;
const int numFiltersPerGroup = numFilters / numGroups;
const int blockGroupIdx = blockFilterIdx / numFiltersPerGroup;
const int numFilterColors = numImgColors / numGroups;
const int blockPixelOffset = (blockIdx.y / (numFilterColors/colorsPerThread)) * B_Y;
const int filterColorIdx = (blockIdx.y % (numFilterColors/colorsPerThread)) * colorsPerThread;
const int imgColorIdx = filterColorIdx + blockGroupIdx * numFilterColors;
images += imgColorIdx * imgPixels * imgStride + loadX;
hidActs += moduleIdx * numImages
+ blockFilterIdx * numImages * numModules
+ loadY * numImages * numModules
+ loadX;
targets += outputModuleIdx * numFilters * filterPixels * numFilterColors
+ filterColorIdx * filterPixels * numFilters
+ blockPixelOffset * numFilters
+ blockFilterIdx
+ threadIdx.y * numFilters + threadIdx.x;
float* shHidActLoad = &shHidActs[loadY][loadX];
float* shImgLoad = &shImages[loadY][loadX];
float prod[colorsPerThread][filtersPerThread];
memset(prod, 0, sizeof(prod));
// This avoids doing a division in an inner loop
__shared__ int pxDivs[B_Y];
if (tidx < B_Y) {
pxDivs[tidx] = (((blockPixelOffset + tidx) / filterSize) << 16) + (blockPixelOffset + tidx) % filterSize;
}
__syncthreads();
for (int m = moduleIdx; m < moduleIdx + partialSum; m++) {
const int imgLoadModPosY = paddingStart + (m / numModulesX) * moduleStride;
const int imgLoadModPosX = paddingStart + (m % numModulesX) * moduleStride;
for (int caseIdx = 0; caseIdx < numImages; caseIdx += preloadCases) {
if (loadY < B_Y) {
/*
* As long as B_Y * B_X is divisible by preloadCases this will loop the right
* number of times.
*
* This will load some images from filter pixels that don't exist (it'll set those to 0),
* but the code does not produce any output for those pixels (see last lines).
*/
// #pragma unroll
for (int y = 0; y < B_Y; y += (B_X * B_Y) / preloadCases) {
// Make sure number of rows in the array is divisible by number of rows filled per iteration
if (B_Y % (B_X * B_Y / preloadCases) == 0 || y + loadY < B_Y) {
const int pxIdx = loadY + y; // pixel idx in filter
if (pxIdx + blockPixelOffset < filterPixels && (!checkCaseBounds || caseIdx + loadX < numImages)) {
const int pxY = imgLoadModPosY + HI16(pxDivs[pxIdx]);//pxIdx / filterSize; // pixel x,y coords in image
const int pxX = imgLoadModPosX + LO16(pxDivs[pxIdx]);
if (pxY >= 0 && pxY < imgSizeY && pxX >= 0 && pxX < imgSizeX) {
const int pixIdx = (pxY * imgSizeX + pxX) * imgStride; // pixel idx in image
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
shImgLoad[(y + c * B_Y) * preloadCases] = images[caseIdx + c * imgPixels * imgStride + pixIdx];
}
} else {
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
shImgLoad[(y + c * B_Y) * preloadCases] = 0;
}
}
} else {
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
shImgLoad[(y + c * B_Y) * preloadCases] = 0;
}
}
}
}
}
if (loadY < B_X * filtersPerThread && (!checkCaseBounds || caseIdx + loadX < numImages)) {
#pragma unroll
for (int y = 0; y < B_X * filtersPerThread; y += (B_X * B_Y) / preloadCases) {
// Make sure number of rows in the array is divisible by number of rows filled per iteration
if ((B_X * filtersPerThread) % (B_X * B_Y / preloadCases) == 0 || y + loadY < B_X * filtersPerThread) {
shHidActLoad[y * (preloadCases + 1)] = hidActs[caseIdx + y * numImages * numModules];
}
}
}
__syncthreads();
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
#pragma unroll
for (int i = 0; i < preloadCases; i++) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
prod[c][f] += shImages[threadIdx.y + c * B_Y][i] * shHidActs[threadIdx.x + f * B_X][i];
}
}
}
__syncthreads();
}
hidActs += numImages;
}
if (blockPixelOffset + threadIdx.y < filterPixels) {
if (scale) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
targets[c * filterPixels * numFilters + f * B_X] = scaleTargets * targets[c * filterPixels * numFilters + f * B_X] + scaleOutputs * prod[c][f];
}
}
} else {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
targets[c * filterPixels * numFilters + f * B_X] = scaleOutputs * prod[c][f];
}
}
}
}
}
/*
* Each block computes weight gradients for B_Y pixels and B_X * filtersPerThread filters
* threadIdx.x determines filter
* threadIdx.y determines pixel in filter
*
* blockIdx.x determines filter batch of B_X * filtersPerThread, module batch of partialSum
* blockIdx.y determines pixel, color batch of B_Y * colorsPerThread
* In essence, blockIdx.y.x = 0...numFilterColors / colorsPerThread
* blockIdx.y.y = 0...DIVUP(numPixels, B_Y)
* ============
* CONSTRAINTS:
* ============
* numFilters/numGroups must be divisible by B_X * filtersPerThread
* numFilterColors must be divisible by colorsPerThread
* numFilters must be divisible by numGroups
* numImgColors must be divisible by numFilterColors
* Number of images (cases) should be divisible by preloadCases if checkCaseBounds is false.
*
* images: (numImgColors, imgSizeY, imgSizeX, numImages), with stride given
* hidActs: (numFilters, numModulesY, numModulesX, numImages)
*
* targets: (numModules, numFilterColors, filterPixels, numFilters)
* colorIndices: (numGroups, numFilterColors)
*
* B_Y * B_X should be divisible by preloadCases.
* preloadCases one of 16, 32.
* B_X one of 4, 8, 16, 32
* B_Y arbitrary (satisfying divisibility constraints)
*
* This routine is especially fast when numFilters >= 32. That's when it should be used.
*/
template <int B_Y, int B_X, int filtersPerThread, int colorsPerThread, int preloadCases, bool scale, bool checkCaseBounds>
__global__ void conv_weight_acts_mc_mf_rand(float* images, float* hidActs, float* targets, int* colorIndices,
const int numImages, const int numFilters,
const int numModulesY, const int numModulesX,
const int imgSizeY, const int imgSizeX, const int filterSize,
const int paddingStart, const int moduleStride, const int imgStride,
const int numFilterColors, const int numGroups, const int partialSum,
const float scaleTargets, const float scaleOutputs) {
__shared__ float shImages[colorsPerThread * B_Y][preloadCases]; // preload preloadCases cases of B_Y * pixelsPerThread pixels
__shared__ float shHidActs[filtersPerThread * B_X][preloadCases + 1]; // preload preloadCases cases of B_X hidacts
__shared__ int shColors[colorsPerThread];
// This avoids doing a division in an inner loop
__shared__ int pxDivs[B_Y];
const int tidx = B_X * threadIdx.y + threadIdx.x;
const int loadY = tidx / preloadCases, loadX = tidx % preloadCases;
const int filterPixels = filterSize * filterSize;
const int imgPixels = imgSizeY * imgSizeX;
const int numFilterBlocks = numFilters / (B_X * filtersPerThread);
const int outputModuleIdx = blockIdx.x / numFilterBlocks;
const int moduleIdx = partialSum * outputModuleIdx;
const int blockFilterIdx = filtersPerThread * B_X * (blockIdx.x % numFilterBlocks);
const int numModules = numModulesY * numModulesX;
const int numFiltersPerGroup = numFilters / numGroups;
const int blockGroupIdx = blockFilterIdx / numFiltersPerGroup;
const int blockPixelOffset = (blockIdx.y / (numFilterColors/colorsPerThread)) * B_Y;
const int filterColorIdx = (blockIdx.y % (numFilterColors/colorsPerThread)) * colorsPerThread;
// const int imgColorIdx = filterColorIdx + blockGroupIdx * numFilterColors;
images += loadX;
hidActs += moduleIdx * numImages
+ blockFilterIdx * numImages * numModules
+ loadY * numImages * numModules
+ loadX;
targets += outputModuleIdx * numFilters * filterPixels * numFilterColors
+ filterColorIdx * filterPixels * numFilters
+ blockPixelOffset * numFilters
+ blockFilterIdx
+ threadIdx.y * numFilters + threadIdx.x;
float* shHidActLoad = &shHidActs[loadY][loadX];
float* shImgLoad = &shImages[loadY][loadX];
float prod[colorsPerThread][filtersPerThread];
memset(prod, 0, sizeof(prod));
if (tidx < B_Y) {
pxDivs[tidx] = ((blockPixelOffset + tidx) / filterSize << 16) + ((blockPixelOffset + tidx) % filterSize);
}
if (tidx < colorsPerThread) {
shColors[tidx] = colorIndices[blockGroupIdx * numFilterColors + filterColorIdx + tidx] * imgPixels * imgStride;
}
__syncthreads();
for (int m = moduleIdx; m < moduleIdx + partialSum; m++) {
const int imgLoadModPosY = paddingStart + (m / numModulesX) * moduleStride;
const int imgLoadModPosX = paddingStart + (m % numModulesX) * moduleStride;
for (int caseIdx = 0; caseIdx < numImages; caseIdx += preloadCases) {
if (loadY < B_Y) {
/*
* As long as B_Y * B_X is divisible by preloadCases this will dloop the right
* number of times.
*
* This will load some images from filter pixels that don't exist (it'll set those to 0),
* but the code does not produce any output for those pixels (see last lines).
*/
// #pragma unroll
for (int y = 0; y < B_Y; y += (B_X * B_Y) / preloadCases) {
// Make sure number of rows in the array is divisible by number of rows filled per iteration
if (B_Y % (B_X * B_Y / preloadCases) == 0 || y + loadY < B_Y) {
const int pxIdx = loadY + y; // pixel idx in filter
if (pxIdx + blockPixelOffset < filterPixels && (!checkCaseBounds || caseIdx + loadX < numImages)) {
const int pxY = imgLoadModPosY + HI16(pxDivs[pxIdx]);//pxIdx / filterSize; // pixel x,y coords in image
const int pxX = imgLoadModPosX + LO16(pxDivs[pxIdx]);
if (pxY >= 0 && pxY < imgSizeY && pxX >= 0 && pxX < imgSizeX) {
const int pixIdx = (pxY * imgSizeX + pxX) * imgStride; // pixel idx in image
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
shImgLoad[(y + c * B_Y) * preloadCases] = images[caseIdx + shColors[c] + pixIdx];
}
} else {
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
shImgLoad[(y + c * B_Y) * preloadCases] = 0;
}
}
} else {
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
shImgLoad[(y + c * B_Y) * preloadCases] = 0;
}
}
}
}
}
if (loadY < B_X * filtersPerThread && (!checkCaseBounds || caseIdx + loadX < numImages)) {
#pragma unroll
for (int y = 0; y < B_X * filtersPerThread; y += (B_X * B_Y) / preloadCases) {
// Make sure number of rows in the array is divisible by number of rows filled per iteration
if ((B_X * filtersPerThread) % (B_X * B_Y / preloadCases) == 0 || y + loadY < B_X * filtersPerThread) {
shHidActLoad[y * (preloadCases + 1)] = hidActs[caseIdx + y * numImages * numModules];
}
}
}
__syncthreads();
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
#pragma unroll
for (int i = 0; i < preloadCases; i++) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
prod[c][f] += shImages[threadIdx.y + c * B_Y][i] * shHidActs[threadIdx.x + f * B_X][i];
}
}
}
__syncthreads();
}
hidActs += numImages;
}
if (blockPixelOffset + threadIdx.y < filterPixels) {
if (scale) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
targets[c * filterPixels * numFilters + f * B_X] = scaleTargets * targets[c * filterPixels * numFilters + f * B_X] + scaleOutputs * prod[c][f];
}
}
} else {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
targets[c * filterPixels * numFilters + f * B_X] = scaleOutputs * prod[c][f];
}
}
}
}
}
/*
* images: (numImgColors, imgSizeY, imgSizeX, numImages), with stride given
* hidActs: (numFilters, numModules, numImages)
*
* targets: (numModuleY*numModulesX/partialSum, numFilterColors, filterPixels, numFilters)
*
* TODO: you can get a slight speed boost for local non-convolutional units by writing special
* routines for partialSum = 1. But I dunno if the code duplication is worth it...
*
* Note: all of these convolution routines are optimized for the case when
* the number of images (i.e. the minibatch size) is a multiple of 128.
* Other batch sizes will work, but but I made no attempt whatsoever
* to make them work fast.
*/
void _weightActs(NVMatrix& images, NVMatrix& hidActs, NVMatrix& targets,
int imgSizeY, int numModulesY, int numModulesX, int filterSize, int paddingStart, int moduleStride, int numImgColors,
int numGroups, int partialSum, float scaleTargets, float scaleOutput) {
int numFilterColors = numImgColors / numGroups;
int imgStride = images.getStride();
int numImages = images.getNumCols();
int imgPixels = images.getNumRows() / numImgColors;
int imgSizeX = imgPixels / imgSizeY;
int numModules = numModulesY * numModulesX;
int numFilters = hidActs.getNumRows() / numModules;
int numFiltersPerGroup = numFilters / numGroups;
assert(numImgColors % numGroups == 0);
assert(numFilters % (16*numGroups) == 0);
assert(numGroups > 1 || (numImgColors > 0 && (numImgColors <= 3 || numImgColors % 4 == 0)));
assert(numGroups == 1 || numFilterColors % 4 == 0);
assert(imgSizeY * imgSizeX == imgPixels);
assert(images.getNumRows() == imgPixels * numImgColors);
int filterPixels = filterSize * filterSize;
partialSum = partialSum == 0 ? numModules : partialSum;
assert(numModules % partialSum == 0);
assert(hidActs.getNumCols() == numImages);
// These routines don't handle the case when only part of the image is visited in the convolution
assert(paddingStart <= 0);
assert(paddingStart + (numModulesX-1)*moduleStride + filterSize >= imgSizeX);
assert(paddingStart + (numModulesY-1)*moduleStride + filterSize >= imgSizeY);
assert(moduleStride <= filterSize);
assert(numModules * numFilters == hidActs.getNumRows());
assert(!images.isTrans());
assert(!hidActs.isTrans());
assert(hidActs.isContiguous());
assert(!targets.isTrans());
assert(targets.isContiguous());
int preloadCases = 32;
dim3 blocks, threads;
int bx, by;
int pixelsPerThread, filtersPerThread, colorsPerThread;
// Worth playing with these parameters to find best values for your problem.
// These values work relatively well, but not optimal for all problems.
if (numFilterColors > 3) {
filtersPerThread = numFiltersPerGroup % 32 == 0 ? 2 : 1;
colorsPerThread = numFilterColors % 8 == 0 ? 8 : 4;
by = numFiltersPerGroup % 64 == 0 ? 4 : 8;
bx = numFiltersPerGroup % 64 == 0 ? 32 : 16;
blocks = dim3((numModules/partialSum)*(numFilters/(bx*filtersPerThread)), DIVUP(filterPixels, by) * (numFilterColors / colorsPerThread));
} else {
assert(numGroups == 1); // Just for sanity
pixelsPerThread = numFilters % 32 == 0 ? (numImgColors == 1 ? 8 : 5) : (numImgColors == 1 ? 5 : 2);
by = numFilters % 32 == 0 ? 4 : 8; // by == 4 seems to work best
bx = numFilters % 32 == 0 ? 32 : 16;
blocks = dim3((numModules/partialSum)*(numFilters/bx), DIVUP(filterPixels, by*pixelsPerThread));
}
assert((by * bx) % preloadCases == 0);
threads = dim3(bx, by);
bool checkCaseBounds = numImages % 32 != 0;
if (scaleTargets == 0) {
targets.resize((numModules/partialSum) * numFilterColors*filterPixels, numFilters);
} else {
assert(targets.getNumRows() == (numModules/partialSum) * numFilterColors*filterPixels);
assert(targets.getNumCols() == numFilters);
}
if (numFilterColors > 3) {
if (scaleTargets == 0) { // do not scale
if (numFiltersPerGroup % 64 == 0) {
if (numFilterColors % 8 == 0) {
if (checkCaseBounds) {
hipFuncSetCacheConfig(conv_weight_acts_mc_mf<4,32,2,8,32, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_mc_mf<4,32,2,8,32,false, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), hidActs.getDevData(), targets.getDevData(),
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize,
paddingStart, moduleStride, imgStride, numImgColors, numGroups, partialSum, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(conv_weight_acts_mc_mf<4,32,2,8,32, false, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_mc_mf<4,32,2,8,32,false, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), hidActs.getDevData(), targets.getDevData(),
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize,
paddingStart, moduleStride, imgStride, numImgColors, numGroups, partialSum, scaleTargets, scaleOutput);
}
} else {
if (checkCaseBounds) {
hipFuncSetCacheConfig(conv_weight_acts_mc_mf<4,32,2,4,32, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_mc_mf<4,32,2,4,32,false, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), hidActs.getDevData(), targets.getDevData(),
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize,
paddingStart, moduleStride, imgStride, numImgColors, numGroups, partialSum, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(conv_weight_acts_mc_mf<4,32,2,4,32, false, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_mc_mf<4,32,2,4,32,false, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), hidActs.getDevData(), targets.getDevData(),
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize,
paddingStart, moduleStride, imgStride, numImgColors, numGroups, partialSum, scaleTargets, scaleOutput);
}
}
} else if (numFiltersPerGroup % 32 == 0) {
if (numFilterColors % 8 == 0) {
if (checkCaseBounds) {
hipFuncSetCacheConfig(conv_weight_acts_mc_mf<8,16,2,8,32, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_mc_mf<8,16,2,8,32,false, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), hidActs.getDevData(), targets.getDevData(),
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize,
paddingStart, moduleStride, imgStride, numImgColors, numGroups, partialSum, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(conv_weight_acts_mc_mf<8,16,2,8,32, false, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_mc_mf<8,16,2,8,32,false, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), hidActs.getDevData(), targets.getDevData(),
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize,
paddingStart, moduleStride, imgStride, numImgColors, numGroups, partialSum, scaleTargets, scaleOutput);
}
} else {
if (checkCaseBounds) {
hipFuncSetCacheConfig(conv_weight_acts_mc_mf<8,16,2,4,32, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_mc_mf<8,16,2,4,32,false, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), hidActs.getDevData(), targets.getDevData(),
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize,
paddingStart, moduleStride, imgStride, numImgColors, numGroups, partialSum, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(conv_weight_acts_mc_mf<8,16,2,4,32, false, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_mc_mf<8,16,2,4,32,false, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), hidActs.getDevData(), targets.getDevData(),
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize,
paddingStart, moduleStride, imgStride, numImgColors, numGroups, partialSum, scaleTargets, scaleOutput);
}
}
} else {
if (numFilterColors % 8 == 0) {
if (checkCaseBounds) {
hipFuncSetCacheConfig(conv_weight_acts_mc_mf<8,16,1,8,32, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_mc_mf<8,16,1,8,32,false, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), hidActs.getDevData(), targets.getDevData(),
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize,
paddingStart, moduleStride, imgStride, numImgColors, numGroups, partialSum, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(conv_weight_acts_mc_mf<8,16,1,8,32, false, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_mc_mf<8,16,1,8,32,false, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), hidActs.getDevData(), targets.getDevData(),
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize,
paddingStart, moduleStride, imgStride, numImgColors, numGroups, partialSum, scaleTargets, scaleOutput);
}
} else {
if (checkCaseBounds) {
hipFuncSetCacheConfig(conv_weight_acts_mc_mf<8,16,1,4,32, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_mc_mf<8,16,1,4,32,false, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), hidActs.getDevData(), targets.getDevData(),
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize,
paddingStart, moduleStride, imgStride, numImgColors, numGroups, partialSum, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(conv_weight_acts_mc_mf<8,16,1,4,32, false, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_mc_mf<8,16,1,4,32,false, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), hidActs.getDevData(), targets.getDevData(),
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize,
paddingStart, moduleStride, imgStride, numImgColors, numGroups, partialSum, scaleTargets, scaleOutput);
}
}
}
} else {
if (numFiltersPerGroup % 64 == 0) {
if (numFilterColors % 8 == 0) {
if (checkCaseBounds) {
hipFuncSetCacheConfig(conv_weight_acts_mc_mf<4,32,2,8,32, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_mc_mf<4,32,2,8,32,true, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), hidActs.getDevData(), targets.getDevData(),
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize,
paddingStart, moduleStride, imgStride, numImgColors, numGroups, partialSum, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(conv_weight_acts_mc_mf<4,32,2,8,32, false, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_mc_mf<4,32,2,8,32,true, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), hidActs.getDevData(), targets.getDevData(),
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize,
paddingStart, moduleStride, imgStride, numImgColors, numGroups, partialSum, scaleTargets, scaleOutput);
}
} else {
if (checkCaseBounds) {
hipFuncSetCacheConfig(conv_weight_acts_mc_mf<4,32,2,4,32, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_mc_mf<4,32,2,4,32,true, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), hidActs.getDevData(), targets.getDevData(),
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize,
paddingStart, moduleStride, imgStride, numImgColors, numGroups, partialSum, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(conv_weight_acts_mc_mf<4,32,2,4,32, false, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_mc_mf<4,32,2,4,32,true, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), hidActs.getDevData(), targets.getDevData(),
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize,
paddingStart, moduleStride, imgStride, numImgColors, numGroups, partialSum, scaleTargets, scaleOutput);
}
}
} else if (numFiltersPerGroup % 32 == 0) {
if (numFilterColors % 8 == 0) {
if (checkCaseBounds) {
hipFuncSetCacheConfig(conv_weight_acts_mc_mf<8,16,2,8,32, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_mc_mf<8,16,2,8,32,true, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), hidActs.getDevData(), targets.getDevData(),
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize,
paddingStart, moduleStride, imgStride, numImgColors, numGroups, partialSum, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(conv_weight_acts_mc_mf<8,16,2,8,32, false, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_mc_mf<8,16,2,8,32,true, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), hidActs.getDevData(), targets.getDevData(),
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize,
paddingStart, moduleStride, imgStride, numImgColors, numGroups, partialSum, scaleTargets, scaleOutput);
}
} else {
if (checkCaseBounds) {
hipFuncSetCacheConfig(conv_weight_acts_mc_mf<8,16,2,4,32, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_mc_mf<8,16,2,4,32,true, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), hidActs.getDevData(), targets.getDevData(),
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize,
paddingStart, moduleStride, imgStride, numImgColors, numGroups, partialSum, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(conv_weight_acts_mc_mf<8,16,2,4,32, false, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_mc_mf<8,16,2,4,32,true, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), hidActs.getDevData(), targets.getDevData(),
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize,
paddingStart, moduleStride, imgStride, numImgColors, numGroups, partialSum, scaleTargets, scaleOutput);
}
}
} else {
if (numFilterColors % 8 == 0) {
if (checkCaseBounds) {
hipFuncSetCacheConfig(conv_weight_acts_mc_mf<8,16,1,8,32, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_mc_mf<8,16,1,8,32,true, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), hidActs.getDevData(), targets.getDevData(),
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize,
paddingStart, moduleStride, imgStride, numImgColors, numGroups, partialSum, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(conv_weight_acts_mc_mf<8,16,1,8,32, false, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_mc_mf<8,16,1,8,32,true, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), hidActs.getDevData(), targets.getDevData(),
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize,
paddingStart, moduleStride, imgStride, numImgColors, numGroups, partialSum, scaleTargets, scaleOutput);
}
} else {
if (checkCaseBounds) {
hipFuncSetCacheConfig(conv_weight_acts_mc_mf<8,16,1,4,32, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_mc_mf<8,16,1,4,32,true, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), hidActs.getDevData(), targets.getDevData(),
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize,
paddingStart, moduleStride, imgStride, numImgColors, numGroups, partialSum, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(conv_weight_acts_mc_mf<8,16,1,4,32, false, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_mc_mf<8,16,1,4,32,true, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), hidActs.getDevData(), targets.getDevData(),
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize,
paddingStart, moduleStride, imgStride, numImgColors, numGroups, partialSum, scaleTargets, scaleOutput);
}
}
}
}
} else { // numColors in 1,2,3
if (scaleTargets == 0) { // do not scale
if (numFilterColors == 1) {
if (checkCaseBounds) {
if (numFilters % 32 == 0) {
hipFuncSetCacheConfig(conv_weight_acts_c<4,32,8,32,1, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_c<4,32,8,32,1,false, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), hidActs.getDevData(), targets.getDevData(),
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, partialSum, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(conv_weight_acts_c<8,16,5,32,1, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_c<8,16,5,32,1,false, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), hidActs.getDevData(), targets.getDevData(),
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, partialSum, scaleTargets, scaleOutput);
}
} else {
if (numFilters % 32 == 0) {
hipFuncSetCacheConfig(conv_weight_acts_c<4,32,8,32,1, false, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_c<4,32,8,32,1,false, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), hidActs.getDevData(), targets.getDevData(),
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, partialSum, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(conv_weight_acts_c<8,16,5,32,1, false, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_c<8,16,5,32,1,false, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), hidActs.getDevData(), targets.getDevData(),
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, partialSum, scaleTargets, scaleOutput);
}
}
} else if (numFilterColors == 2) {
if (checkCaseBounds) {
if (numFilters % 32 == 0) {
hipFuncSetCacheConfig(conv_weight_acts_c<4,32,5,32,2, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_c<4,32,5,32,2,false, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), hidActs.getDevData(), targets.getDevData(),
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, partialSum, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(conv_weight_acts_c<8,16,2,32,2, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_c<8,16,2,32,2,false, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), hidActs.getDevData(), targets.getDevData(),
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, partialSum, scaleTargets, scaleOutput);
}
} else {
if (numFilters % 32 == 0) {
hipFuncSetCacheConfig(conv_weight_acts_c<4,32,5,32,2, false, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_c<4,32,5,32,2,false, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), hidActs.getDevData(), targets.getDevData(),
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, partialSum, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(conv_weight_acts_c<8,16,2,32,2, false, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_c<8,16,2,32,2,false, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), hidActs.getDevData(), targets.getDevData(),
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, partialSum, scaleTargets, scaleOutput);
}
}
} else if (numFilterColors == 3) {
if (checkCaseBounds) {
if (numFilters % 32 == 0) {
hipFuncSetCacheConfig(conv_weight_acts_c<4,32,5,32,3, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_c<4,32,5,32,3,false, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), hidActs.getDevData(), targets.getDevData(),
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, partialSum, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(conv_weight_acts_c<8,16,2,32,3, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_c<8,16,2,32,3,false, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), hidActs.getDevData(), targets.getDevData(),
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, partialSum, scaleTargets, scaleOutput);
}
} else {
if (numFilters % 32 == 0) {
hipFuncSetCacheConfig(conv_weight_acts_c<4,32,5,32,3, false, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_c<4,32,5,32,3,false, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), hidActs.getDevData(), targets.getDevData(),
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, partialSum, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(conv_weight_acts_c<8,16,2,32,3, false, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_c<8,16,2,32,3,false, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), hidActs.getDevData(), targets.getDevData(),
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, partialSum, scaleTargets, scaleOutput);
}
}
}
} else { // do scale
if (numFilterColors == 1) {
if (checkCaseBounds) {
if (numFilters % 32 == 0) {
hipFuncSetCacheConfig(conv_weight_acts_c<4,32,8,32,1, true, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_c<4,32,8,32,1,true, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), hidActs.getDevData(), targets.getDevData(),
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, partialSum, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(conv_weight_acts_c<8,16,5,32,1, true, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_c<8,16,5,32,1,true, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), hidActs.getDevData(), targets.getDevData(),
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, partialSum, scaleTargets, scaleOutput);
}
} else {
if (numFilters % 32 == 0) {
hipFuncSetCacheConfig(conv_weight_acts_c<4,32,8,32,1, true, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_c<4,32,8,32,1,true, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), hidActs.getDevData(), targets.getDevData(),
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, partialSum, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(conv_weight_acts_c<8,16,5,32,1, true, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_c<8,16,5,32,1,true, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), hidActs.getDevData(), targets.getDevData(),
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, partialSum, scaleTargets, scaleOutput);
}
}
} else if (numFilterColors == 2) {
if (checkCaseBounds) {
if (numFilters % 32 == 0) {
hipFuncSetCacheConfig(conv_weight_acts_c<4,32,5,32,2, true, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_c<4,32,5,32,2,true, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), hidActs.getDevData(), targets.getDevData(),
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, partialSum, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(conv_weight_acts_c<8,16,2,32,2, true, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_c<8,16,2,32,2,true, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), hidActs.getDevData(), targets.getDevData(),
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, partialSum, scaleTargets, scaleOutput);
}
} else {
if (numFilters % 32 == 0) {
hipFuncSetCacheConfig(conv_weight_acts_c<4,32,5,32,2, true, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_c<4,32,5,32,2,true, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), hidActs.getDevData(), targets.getDevData(),
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, partialSum, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(conv_weight_acts_c<8,16,2,32,2, true, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_c<8,16,2,32,2,true, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), hidActs.getDevData(), targets.getDevData(),
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, partialSum, scaleTargets, scaleOutput);
}
}
} else if (numFilterColors == 3) {
if (checkCaseBounds) {
if (numFilters % 32 == 0) {
hipFuncSetCacheConfig(conv_weight_acts_c<4,32,5,32,3, true, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_c<4,32,5,32,3,true, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), hidActs.getDevData(), targets.getDevData(),
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, partialSum, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(conv_weight_acts_c<8,16,2,32,3, true, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_c<8,16,2,32,3,true, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), hidActs.getDevData(), targets.getDevData(),
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, partialSum, scaleTargets, scaleOutput);
}
} else {
if (numFilters % 32 == 0) {
hipFuncSetCacheConfig(conv_weight_acts_c<4,32,5,32,3, true, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_c<4,32,5,32,3,true, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), hidActs.getDevData(), targets.getDevData(),
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, partialSum, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(conv_weight_acts_c<8,16,2,32,3, true, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_c<8,16,2,32,3,true, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), hidActs.getDevData(), targets.getDevData(),
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, partialSum, scaleTargets, scaleOutput);
}
}
}
}
}
cutilCheckMsg("weightActs: kernel execution failed");
}
void convWeightActs(NVMatrix& images, NVMatrix& hidActs, NVMatrix& targets,
int imgSizeY, int numModulesY, int numModulesX, int filterSize, int paddingStart, int moduleStride, int numImgColors, int numGroups, int partialSum) {
_weightActs(images, hidActs, targets, imgSizeY, numModulesY, numModulesX, filterSize, paddingStart, moduleStride, numImgColors, numGroups, partialSum, 0, 1);
}
void convWeightActs(NVMatrix& images, NVMatrix& hidActs, NVMatrix& targets,
int imgSizeY, int numModulesY, int numModulesX, int filterSize, int paddingStart, int moduleStride, int numImgColors, int numGroups, int partialSum,
float scaleTargets, float scaleOutput) {
_weightActs(images, hidActs, targets, imgSizeY, numModulesY, numModulesX, filterSize, paddingStart, moduleStride, numImgColors, numGroups, partialSum, scaleTargets, scaleOutput);
}
void localWeightActs(NVMatrix& images, NVMatrix& hidActs, NVMatrix& targets,
int imgSizeY, int numModulesY, int numModulesX, int filterSize, int paddingStart, int moduleStride, int numImgColors, int numGroups) {
_weightActs(images, hidActs, targets, imgSizeY, numModulesY, numModulesX, filterSize, paddingStart, moduleStride, numImgColors, numGroups, 1, 0, 1);
}
void localWeightActs(NVMatrix& images, NVMatrix& hidActs, NVMatrix& targets,
int imgSizeY, int numModulesY, int numModulesX, int filterSize, int paddingStart, int moduleStride,
int numImgColors, int numGroups, float scaleTargets, float scaleOutput) {
_weightActs(images, hidActs, targets, imgSizeY, numModulesY, numModulesX, filterSize, paddingStart, moduleStride, numImgColors, numGroups, 1,
scaleTargets, scaleOutput);
}
/*
* images: (numImgColors, imgPixels, numImages), with stride given
* hidActs: (numFilters, numModules, numImages)
*
* targets: (numModules/partialSum, numFilterColors, filterPixels, numFilters)
* colorIndices: (numGroups, numFilterColors)
*
* Note: all of these convolution routines are optimized for the case when
* the number of images (i.e. the minibatch size) is a multiple of 128.
* Other batch sizes will work, but but I made no attempt whatsoever
* to make them work fast.
*/
void _weightActsSparse(NVMatrix& images, NVMatrix& hidActs, NVMatrix& targets, int* dColorIndices,
int imgSizeY, int numModulesY, int numModulesX, int filterSize, int paddingStart, int moduleStride,
int numImgColors, int numFilterColors, int numGroups, int partialSum,
float scaleTargets, float scaleOutput) {
int imgStride = images.getStride();
int numImages = images.getNumCols();
int imgPixels = images.getNumRows() / numImgColors;
int imgSizeX = imgPixels / imgSizeY;
int numModules = numModulesY * numModulesX;
int numFilters = hidActs.getNumRows() / numModules;
int numFiltersPerGroup = numFilters / numGroups;
assert(numGroups > 1);
assert(numImgColors % numFilterColors == 0);
assert((numFilterColors * numGroups) % numImgColors == 0);
assert(numFilters % (16*numGroups) == 0);
assert(numFilterColors % 4 == 0);
assert(imgSizeY * imgSizeX == imgPixels);
assert(images.getNumRows() == imgPixels * numImgColors);
int filterPixels = filterSize * filterSize;
partialSum = partialSum == 0 ? numModules : partialSum;
assert(numModules % partialSum == 0);
assert(hidActs.getNumCols() == numImages);
// These routines don't handle the case when only part of the image is visited in the convolution
assert(paddingStart <= 0);
assert(paddingStart + (numModulesX-1)*moduleStride + filterSize >= imgSizeX);
assert(paddingStart + (numModulesY-1)*moduleStride + filterSize >= imgSizeY);
assert(moduleStride <= filterSize);
assert(numModules * numFilters == hidActs.getNumRows());
assert(!images.isTrans());
assert(!hidActs.isTrans());
assert(hidActs.isContiguous());
assert(!targets.isTrans());
assert(targets.isContiguous());
int preloadCases = 32;
dim3 blocks, threads;
int bx, by;
int filtersPerThread, colorsPerThread;
filtersPerThread = numFiltersPerGroup % 32 == 0 ? 2 : 1;
colorsPerThread = numFilterColors % 8 == 0 ? 8 : 4;
by = numFiltersPerGroup % 64 == 0 ? 4 : 8;
bx = numFiltersPerGroup % 64 == 0 ? 32 : 16;
blocks = dim3((numModules/partialSum)*(numFilters/(bx*filtersPerThread)), DIVUP(filterPixels, by) * (numFilterColors / colorsPerThread));
assert((by * bx) % preloadCases == 0);
threads = dim3(bx, by);
bool checkCaseBounds = numImages % 32 != 0;
if (scaleTargets == 0) {
targets.resize((numModules/partialSum) * numFilterColors*filterPixels, numFilters);
} else {
assert(targets.getNumRows() == (numModules/partialSum) * numFilterColors*filterPixels);
assert(targets.getNumCols() == numFilters);
}
if (scaleTargets == 0) { // do not scale
if (numFiltersPerGroup % 64 == 0) {
if (numFilterColors % 8 == 0) {
if (checkCaseBounds) {
hipFuncSetCacheConfig(conv_weight_acts_mc_mf_rand<4,32,2,8,32, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_mc_mf_rand<4,32,2,8,32,false, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), hidActs.getDevData(), targets.getDevData(), dColorIndices,
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize,
paddingStart, moduleStride, imgStride, numFilterColors, numGroups, partialSum, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(conv_weight_acts_mc_mf_rand<4,32,2,8,32, false, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_mc_mf_rand<4,32,2,8,32,false, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), hidActs.getDevData(), targets.getDevData(), dColorIndices,
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize,
paddingStart, moduleStride, imgStride, numFilterColors, numGroups, partialSum, scaleTargets, scaleOutput);
}
} else {
if (checkCaseBounds) {
hipFuncSetCacheConfig(conv_weight_acts_mc_mf_rand<4,32,2,4,32, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_mc_mf_rand<4,32,2,4,32,false, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), hidActs.getDevData(), targets.getDevData(), dColorIndices,
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize,
paddingStart, moduleStride, imgStride, numFilterColors, numGroups, partialSum, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(conv_weight_acts_mc_mf_rand<4,32,2,4,32, false, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_mc_mf_rand<4,32,2,4,32,false, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), hidActs.getDevData(), targets.getDevData(), dColorIndices,
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize,
paddingStart, moduleStride, imgStride, numFilterColors, numGroups, partialSum, scaleTargets, scaleOutput);
}
}
} else if (numFiltersPerGroup % 32 == 0) {
if (numFilterColors % 8 == 0) {
if (checkCaseBounds) {
hipFuncSetCacheConfig(conv_weight_acts_mc_mf_rand<8,16,2,8,32, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_mc_mf_rand<8,16,2,8,32,false, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), hidActs.getDevData(), targets.getDevData(), dColorIndices,
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize,
paddingStart, moduleStride, imgStride, numFilterColors, numGroups, partialSum, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(conv_weight_acts_mc_mf_rand<8,16,2,8,32, false, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_mc_mf_rand<8,16,2,8,32,false, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), hidActs.getDevData(), targets.getDevData(), dColorIndices,
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize,
paddingStart, moduleStride, imgStride, numFilterColors, numGroups, partialSum, scaleTargets, scaleOutput);
}
} else {
if (checkCaseBounds) {
hipFuncSetCacheConfig(conv_weight_acts_mc_mf_rand<8,16,2,4,32, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_mc_mf_rand<8,16,2,4,32,false, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), hidActs.getDevData(), targets.getDevData(), dColorIndices,
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize,
paddingStart, moduleStride, imgStride, numFilterColors, numGroups, partialSum, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(conv_weight_acts_mc_mf_rand<8,16,2,4,32, false, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_mc_mf_rand<8,16,2,4,32,false, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), hidActs.getDevData(), targets.getDevData(), dColorIndices,
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize,
paddingStart, moduleStride, imgStride, numFilterColors, numGroups, partialSum, scaleTargets, scaleOutput);
}
}
} else {
if (numFilterColors % 8 == 0) {
if (checkCaseBounds) {
hipFuncSetCacheConfig(conv_weight_acts_mc_mf_rand<8,16,1,8,32, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_mc_mf_rand<8,16,1,8,32,false, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), hidActs.getDevData(), targets.getDevData(), dColorIndices,
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize,
paddingStart, moduleStride, imgStride, numFilterColors, numGroups, partialSum, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(conv_weight_acts_mc_mf_rand<8,16,1,8,32, false, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_mc_mf_rand<8,16,1,8,32,false, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), hidActs.getDevData(), targets.getDevData(), dColorIndices,
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize,
paddingStart, moduleStride, imgStride, numFilterColors, numGroups, partialSum, scaleTargets, scaleOutput);
}
} else {
if (checkCaseBounds) {
hipFuncSetCacheConfig(conv_weight_acts_mc_mf_rand<8,16,1,4,32, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_mc_mf_rand<8,16,1,4,32,false, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), hidActs.getDevData(), targets.getDevData(), dColorIndices,
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize,
paddingStart, moduleStride, imgStride, numFilterColors, numGroups, partialSum, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(conv_weight_acts_mc_mf_rand<8,16,1,4,32, false, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_mc_mf_rand<8,16,1,4,32,false, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), hidActs.getDevData(), targets.getDevData(), dColorIndices,
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize,
paddingStart, moduleStride, imgStride, numFilterColors, numGroups, partialSum, scaleTargets, scaleOutput);
}
}
}
} else {
if (numFiltersPerGroup % 64 == 0) {
if (numFilterColors % 8 == 0) {
if (checkCaseBounds) {
hipFuncSetCacheConfig(conv_weight_acts_mc_mf_rand<4,32,2,8,32, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_mc_mf_rand<4,32,2,8,32,true, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), hidActs.getDevData(), targets.getDevData(), dColorIndices,
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize,
paddingStart, moduleStride, imgStride, numFilterColors, numGroups, partialSum, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(conv_weight_acts_mc_mf_rand<4,32,2,8,32, false, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_mc_mf_rand<4,32,2,8,32,true, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), hidActs.getDevData(), targets.getDevData(), dColorIndices,
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize,
paddingStart, moduleStride, imgStride, numFilterColors, numGroups, partialSum, scaleTargets, scaleOutput);
}
} else {
if (checkCaseBounds) {
hipFuncSetCacheConfig(conv_weight_acts_mc_mf_rand<4,32,2,4,32, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_mc_mf_rand<4,32,2,4,32,true, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), hidActs.getDevData(), targets.getDevData(), dColorIndices,
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize,
paddingStart, moduleStride, imgStride, numFilterColors, numGroups, partialSum, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(conv_weight_acts_mc_mf_rand<4,32,2,4,32, false, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_mc_mf_rand<4,32,2,4,32,true, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), hidActs.getDevData(), targets.getDevData(), dColorIndices,
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize,
paddingStart, moduleStride, imgStride, numFilterColors, numGroups, partialSum, scaleTargets, scaleOutput);
}
}
} else if (numFiltersPerGroup % 32 == 0) {
if (numFilterColors % 8 == 0) {
if (checkCaseBounds) {
hipFuncSetCacheConfig(conv_weight_acts_mc_mf_rand<8,16,2,8,32, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_mc_mf_rand<8,16,2,8,32,true, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), hidActs.getDevData(), targets.getDevData(), dColorIndices,
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize,
paddingStart, moduleStride, imgStride, numFilterColors, numGroups, partialSum, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(conv_weight_acts_mc_mf_rand<8,16,2,8,32, false, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_mc_mf_rand<8,16,2,8,32,true, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), hidActs.getDevData(), targets.getDevData(), dColorIndices,
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize,
paddingStart, moduleStride, imgStride, numFilterColors, numGroups, partialSum, scaleTargets, scaleOutput);
}
} else {
if (checkCaseBounds) {
hipFuncSetCacheConfig(conv_weight_acts_mc_mf_rand<8,16,2,4,32, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_mc_mf_rand<8,16,2,4,32,true, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), hidActs.getDevData(), targets.getDevData(), dColorIndices,
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize,
paddingStart, moduleStride, imgStride, numFilterColors, numGroups, partialSum, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(conv_weight_acts_mc_mf_rand<8,16,2,4,32, false, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_mc_mf_rand<8,16,2,4,32,true, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), hidActs.getDevData(), targets.getDevData(), dColorIndices,
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize,
paddingStart, moduleStride, imgStride, numFilterColors, numGroups, partialSum, scaleTargets, scaleOutput);
}
}
} else {
if (numFilterColors % 8 == 0) {
if (checkCaseBounds) {
hipFuncSetCacheConfig(conv_weight_acts_mc_mf_rand<8,16,1,8,32, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_mc_mf_rand<8,16,1,8,32,true, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), hidActs.getDevData(), targets.getDevData(), dColorIndices,
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize,
paddingStart, moduleStride, imgStride, numFilterColors, numGroups, partialSum, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(conv_weight_acts_mc_mf_rand<8,16,1,8,32, false, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_mc_mf_rand<8,16,1,8,32,true, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), hidActs.getDevData(), targets.getDevData(), dColorIndices,
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize,
paddingStart, moduleStride, imgStride, numFilterColors, numGroups, partialSum, scaleTargets, scaleOutput);
}
} else {
if (checkCaseBounds) {
hipFuncSetCacheConfig(conv_weight_acts_mc_mf_rand<8,16,1,4,32, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_mc_mf_rand<8,16,1,4,32,true, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), hidActs.getDevData(), targets.getDevData(), dColorIndices,
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize,
paddingStart, moduleStride, imgStride, numFilterColors, numGroups, partialSum, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(conv_weight_acts_mc_mf_rand<8,16,2,4,32, false, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_weight_acts_mc_mf_rand<8,16,1,4,32,true, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), hidActs.getDevData(), targets.getDevData(), dColorIndices,
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize,
paddingStart, moduleStride, imgStride, numFilterColors, numGroups, partialSum, scaleTargets, scaleOutput);
}
}
}
}
cutilCheckMsg("weightActsSparse: kernel execution failed");
}
void convWeightActsSparse(NVMatrix& images, NVMatrix& hidActs, NVMatrix& targets, int* dColorIndices,
int imgSizeY, int numModulesY, int numModulesX, int filterSize, int paddingStart, int moduleStride,
int numImgColors, int numFilterColors, int numGroups) {
_weightActsSparse(images, hidActs, targets, dColorIndices, imgSizeY, numModulesY, numModulesX, filterSize, paddingStart,
moduleStride, numImgColors, numFilterColors, numGroups, 0, 1, 0);
}
void convWeightActsSparse(NVMatrix& images, NVMatrix& hidActs, NVMatrix& targets, int* dColorIndices,
int imgSizeY, int numModulesY, int numModulesX, int filterSize, int paddingStart, int moduleStride, int numImgColors, int numFilterColors,
int numGroups, int partialSum, float scaleTargets, float scaleOutput) {
_weightActsSparse(images, hidActs, targets, dColorIndices, imgSizeY, numModulesY, numModulesX, filterSize, paddingStart,
moduleStride, numImgColors, numFilterColors, numGroups, partialSum, scaleTargets, scaleOutput);
}
void localWeightActsSparse(NVMatrix& images, NVMatrix& hidActs, NVMatrix& targets, int* dColorIndices,
int imgSizeY, int numModulesY, int numModulesX, int filterSize, int paddingStart, int moduleStride,
int numImgColors, int numFilterColors, int numGroups) {
_weightActsSparse(images, hidActs, targets, dColorIndices, imgSizeY, numModulesY, numModulesX, filterSize, paddingStart,
moduleStride, numImgColors, numFilterColors, numGroups, 1, 1, 0);
}
void localWeightActsSparse(NVMatrix& images, NVMatrix& hidActs, NVMatrix& targets, int* dColorIndices,
int imgSizeY, int numModulesY, int numModulesX, int filterSize, int paddingStart, int moduleStride, int numImgColors, int numFilterColors,
int numGroups, float scaleTargets, float scaleOutput) {
_weightActsSparse(images, hidActs, targets, dColorIndices, imgSizeY, numModulesY, numModulesX, filterSize, paddingStart,
moduleStride, numImgColors, numFilterColors, numGroups, 1, scaleTargets, scaleOutput);
}
|
f6da5e70504a6f393b686c2bb5cdf50aaeee4eaa.cu
|
/*
* Copyright (c) 2011, Alex Krizhevsky ([email protected])
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* - Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* - Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
* EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <cudaconv2.cuh>
#define LO16(x) ((x) & 0x0000FFFF)
#define HI16(x) ((x) >> 16)
/*
* Each block computes weight gradients for B_Y * pixelsPerThread pixels and B_X filters
* threadIdx.x determines filter
* threadIdx.y determines pixel in filter
*
* blockIdx.x determines filter batch of B_X, module batch of partialSum
* blockIdx.y determines pixel batch of B_Y * pixelsPerThread
*
* Number of filters must be divisible by B_X
* Number of images (cases) should be divisible by preloadCases if checkCaseBounds is false.
*
* images: (numColors, imgSizeY, imgSizeX, numImages), with stride given
* hidActs: (numFilters, numModulesY, numModulesX, numImages)
*
* targets: (numModulesY*numModulesX/partialSum, numColors, filterPixels, numFilters)
*
* B_Y * B_X should be divisible by preloadCases.
* preloadCases one of 16, 32.
* B_X one of 4, 8, 16, 32
* B_Y arbitrary (satisfying divisibility constraints)
* numModules must be divisible by partialSum
*
* After adding pixelsPerThread, register usage went from 20 to 23 (when pixelsPerThread = 1)...
* so the compiler is messing up here somehow. It's unable to optimize that case away.
*/
template <int B_Y, int B_X, int pixelsPerThread, int preloadCases, int numColors, bool scale, bool checkCaseBounds>
__global__ void conv_weight_acts_c(float* images, float* hidActs, float* targets,
const int numImages, const int numFilters,
const int numModulesY, const int numModulesX,
const int imgSizeY, const int imgSizeX, const int filterSize,
const int paddingStart, const int moduleStride, const int imgStride,
const int partialSum,
const float scaleTargets, const float scaleOutputs) {
__shared__ float shImages[pixelsPerThread * B_Y * numColors][preloadCases]; // preload preloadCases cases of B_Y * pixelsPerThread pixels
__shared__ float shHidActs[B_X][preloadCases + 1]; // preload preloadCases cases of B_X hidActs
const int tidx = B_X * threadIdx.y + threadIdx.x;
const int loadY = tidx / preloadCases, loadX = tidx % preloadCases;
const int filterPixels = filterSize * filterSize;
const int imgPixels = imgSizeY * imgSizeX;
const int filterBlocksPerModule = numFilters / B_X;
const int outputModuleIdx = blockIdx.x / filterBlocksPerModule;
const int moduleIdx = partialSum * outputModuleIdx;
const int blockFilterIdx = B_X * (blockIdx.x % filterBlocksPerModule);
// const int moduleStride = (imgSize - filterSize + 1) / numModulesX;
const int numModules = numModulesY * numModulesX;
const int blockPixelOffset = blockIdx.y * B_Y * pixelsPerThread;
images += loadX;
hidActs += moduleIdx * numImages
+ blockFilterIdx * numImages * numModules
+ loadY * numImages * numModules
+ loadX;
targets += (outputModuleIdx * numFilters) * filterPixels * numColors
+ blockPixelOffset * numFilters
+ blockFilterIdx
+ threadIdx.y * numFilters + threadIdx.x;
float* shImgLoad = &shImages[loadY][loadX];
float* shHidActLoad = &shHidActs[loadY][loadX];
float prod[numColors][pixelsPerThread];
memset(prod, 0, sizeof(prod));
__shared__ int pxDivs[B_Y*pixelsPerThread];
if (tidx < B_Y * pixelsPerThread) {
pxDivs[tidx] = (((blockPixelOffset + tidx) / filterSize) << 16) + ((blockPixelOffset + tidx) % filterSize);
}
__syncthreads();
for (int m = moduleIdx; m < moduleIdx + partialSum; m++) {
const int imgLoadModPosY = paddingStart + (m / numModulesX) * moduleStride;
const int imgLoadModPosX = paddingStart + (m % numModulesX) * moduleStride;
for (int caseIdx = 0; caseIdx < numImages; caseIdx += preloadCases) {
if (loadY < B_Y * pixelsPerThread) {
/*
* As long as B_Y * B_X is divisible by preloadCases this will loop the right
* number of times.
*
* This will load some imgGrads from filter pixels that don't exit (it'll set those to 0),
* but the code does not produce any output for those pixels (see last lines).
*/
// #pragma unroll
for (int y = 0; y < B_Y * pixelsPerThread; y += (B_X * B_Y) / preloadCases) {
// Make sure number of rows in the array is divisible by number of rows filled per iteration
if ((B_Y * pixelsPerThread) % (B_X * B_Y / preloadCases) == 0 || y + loadY < B_Y * pixelsPerThread) {
const int pxIdx = loadY + y; // pixel idx in filter
if (pxIdx + blockPixelOffset < filterPixels && (!checkCaseBounds || caseIdx + loadX < numImages)) {
const int pxY = imgLoadModPosY + HI16(pxDivs[pxIdx]); // pixel x,y coords in image
const int pxX = imgLoadModPosX + LO16(pxDivs[pxIdx]);
if (pxY >= 0 && pxY < imgSizeY && pxX >= 0 && pxX < imgSizeX) {
const int pixIdx = (pxY * imgSizeX + pxX) * imgStride;
#pragma unroll
for (int c = 0; c < numColors; c++) {
shImgLoad[(y + c * pixelsPerThread * B_Y) * preloadCases] = images[caseIdx + c * imgPixels * imgStride + pixIdx];
}
} else {
#pragma unroll
for (int c = 0; c < numColors; c++) {
shImgLoad[(y + c * pixelsPerThread * B_Y) * preloadCases] = 0;
}
}
} else {
#pragma unroll
for (int c = 0; c < numColors; c++) {
shImgLoad[(y + c * pixelsPerThread * B_Y) * preloadCases] = 0;
}
}
}
}
}
if (loadY < B_X && (!checkCaseBounds || caseIdx + loadX < numImages)) {
#pragma unroll
for (int y = 0; y < B_X; y += (B_X * B_Y) / preloadCases) {
// Make sure number of rows in the array is divisible by number of rows filled per iteration
if (B_X % (B_X * B_Y / preloadCases) == 0 || y + loadY < B_X) {
shHidActLoad[y * (preloadCases + 1)] = hidActs[caseIdx + y * numImages * numModules];
}
}
}
__syncthreads();
#pragma unroll
for (int p = 0; p < pixelsPerThread; p++) {
#pragma unroll
for (int i = 0; i < preloadCases; i++) {
#pragma unroll
for (int c = 0; c < numColors; c++) {
prod[c][p] += shImages[threadIdx.y + p * B_Y + c * pixelsPerThread * B_Y][i] * shHidActs[threadIdx.x][i];
}
}
}
__syncthreads();
}
hidActs += numImages;
}
if (scale) {
#pragma unroll
for (int p = 0; p < pixelsPerThread; p++) {
if (blockPixelOffset + p * B_Y + threadIdx.y < filterPixels) {
#pragma unroll
for (int c = 0; c < numColors; c++) {
targets[p * B_Y * numFilters + c * filterPixels * numFilters] = scaleTargets * targets[p * B_Y * numFilters + c * filterPixels * numFilters] + scaleOutputs * prod[c][p];
}
}
}
} else {
#pragma unroll
for (int p = 0; p < pixelsPerThread; p++) {
if (blockPixelOffset + p * B_Y + threadIdx.y < filterPixels) {
#pragma unroll
for (int c = 0; c < numColors; c++) {
targets[p * B_Y * numFilters + c * filterPixels * numFilters] = scaleOutputs * prod[c][p];
}
}
}
}
}
/*
* Each block computes weight gradients for B_Y pixels and B_X * filtersPerThread filters
* threadIdx.x determines filter
* threadIdx.y determines pixel in filter
*
* blockIdx.x determines filter batch of B_X * filtersPerThread, module batch of partialSum
* blockIdx.y determines pixel, color batch of B_Y * colorsPerThread
* In essence, blockIdx.y.x = 0...numFilterColors / colorsPerThread
* blockIdx.y.y = 0...DIVUP(numPixels, B_Y)
* ============
* CONSTRAINTS:
* ============
* numFilters/numGroups must be divisible by B_X * filtersPerThread
* numImgColors/numGroups must be divisible by colorsPerThread
* numFilters must be divisible by numGroups
* numImgColors must be divisible by numGroups
* Number of images (cases) should be divisible by preloadCases if checkCaseBounds is false.
*
* images: (numImgColors, imgSizeY, imgSizeX, numImages), with stride given
* hidActs: (numFilters, numModulesY, numModulesX, numImages)
*
* targets: (numModulesY*numModulesX/partialSum, numFilterColors, filterPixels, numFilters)
*
* B_Y * B_X should be divisible by preloadCases.
* preloadCases one of 16, 32.
* B_X one of 4, 8, 16, 32
* B_Y arbitrary (satisfying divisibility constraints)
*
* This routine is especially fast when numFilters >= 32. That's when it should be used.
*/
template <int B_Y, int B_X, int filtersPerThread, int colorsPerThread, int preloadCases, bool scale, bool checkCaseBounds>
__global__ void conv_weight_acts_mc_mf(float* images, float* hidActs, float* targets,
const int numImages, const int numFilters,
const int numModulesY, const int numModulesX,
const int imgSizeY, const int imgSizeX, const int filterSize,
const int paddingStart, const int moduleStride, const int imgStride,
const int numImgColors, const int numGroups, const int partialSum,
const float scaleTargets, const float scaleOutputs) {
__shared__ float shImages[colorsPerThread * B_Y][preloadCases]; // preload preloadCases cases of B_Y * pixelsPerThread pixels
__shared__ float shHidActs[filtersPerThread * B_X][preloadCases + 1]; // preload preloadCases cases of B_X hidacts
const int tidx = B_X * threadIdx.y + threadIdx.x;
const int loadY = tidx / preloadCases, loadX = tidx % preloadCases;
const int filterPixels = filterSize * filterSize;
const int imgPixels = imgSizeY * imgSizeX;
const int numFilterBlocks = numFilters / (B_X * filtersPerThread);
const int outputModuleIdx = blockIdx.x / numFilterBlocks;
const int moduleIdx = partialSum * outputModuleIdx;
const int blockFilterIdx = filtersPerThread * B_X * (blockIdx.x % numFilterBlocks);
const int numModules = numModulesY * numModulesX;
const int numFiltersPerGroup = numFilters / numGroups;
const int blockGroupIdx = blockFilterIdx / numFiltersPerGroup;
const int numFilterColors = numImgColors / numGroups;
const int blockPixelOffset = (blockIdx.y / (numFilterColors/colorsPerThread)) * B_Y;
const int filterColorIdx = (blockIdx.y % (numFilterColors/colorsPerThread)) * colorsPerThread;
const int imgColorIdx = filterColorIdx + blockGroupIdx * numFilterColors;
images += imgColorIdx * imgPixels * imgStride + loadX;
hidActs += moduleIdx * numImages
+ blockFilterIdx * numImages * numModules
+ loadY * numImages * numModules
+ loadX;
targets += outputModuleIdx * numFilters * filterPixels * numFilterColors
+ filterColorIdx * filterPixels * numFilters
+ blockPixelOffset * numFilters
+ blockFilterIdx
+ threadIdx.y * numFilters + threadIdx.x;
float* shHidActLoad = &shHidActs[loadY][loadX];
float* shImgLoad = &shImages[loadY][loadX];
float prod[colorsPerThread][filtersPerThread];
memset(prod, 0, sizeof(prod));
// This avoids doing a division in an inner loop
__shared__ int pxDivs[B_Y];
if (tidx < B_Y) {
pxDivs[tidx] = (((blockPixelOffset + tidx) / filterSize) << 16) + (blockPixelOffset + tidx) % filterSize;
}
__syncthreads();
for (int m = moduleIdx; m < moduleIdx + partialSum; m++) {
const int imgLoadModPosY = paddingStart + (m / numModulesX) * moduleStride;
const int imgLoadModPosX = paddingStart + (m % numModulesX) * moduleStride;
for (int caseIdx = 0; caseIdx < numImages; caseIdx += preloadCases) {
if (loadY < B_Y) {
/*
* As long as B_Y * B_X is divisible by preloadCases this will loop the right
* number of times.
*
* This will load some images from filter pixels that don't exist (it'll set those to 0),
* but the code does not produce any output for those pixels (see last lines).
*/
// #pragma unroll
for (int y = 0; y < B_Y; y += (B_X * B_Y) / preloadCases) {
// Make sure number of rows in the array is divisible by number of rows filled per iteration
if (B_Y % (B_X * B_Y / preloadCases) == 0 || y + loadY < B_Y) {
const int pxIdx = loadY + y; // pixel idx in filter
if (pxIdx + blockPixelOffset < filterPixels && (!checkCaseBounds || caseIdx + loadX < numImages)) {
const int pxY = imgLoadModPosY + HI16(pxDivs[pxIdx]);//pxIdx / filterSize; // pixel x,y coords in image
const int pxX = imgLoadModPosX + LO16(pxDivs[pxIdx]);
if (pxY >= 0 && pxY < imgSizeY && pxX >= 0 && pxX < imgSizeX) {
const int pixIdx = (pxY * imgSizeX + pxX) * imgStride; // pixel idx in image
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
shImgLoad[(y + c * B_Y) * preloadCases] = images[caseIdx + c * imgPixels * imgStride + pixIdx];
}
} else {
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
shImgLoad[(y + c * B_Y) * preloadCases] = 0;
}
}
} else {
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
shImgLoad[(y + c * B_Y) * preloadCases] = 0;
}
}
}
}
}
if (loadY < B_X * filtersPerThread && (!checkCaseBounds || caseIdx + loadX < numImages)) {
#pragma unroll
for (int y = 0; y < B_X * filtersPerThread; y += (B_X * B_Y) / preloadCases) {
// Make sure number of rows in the array is divisible by number of rows filled per iteration
if ((B_X * filtersPerThread) % (B_X * B_Y / preloadCases) == 0 || y + loadY < B_X * filtersPerThread) {
shHidActLoad[y * (preloadCases + 1)] = hidActs[caseIdx + y * numImages * numModules];
}
}
}
__syncthreads();
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
#pragma unroll
for (int i = 0; i < preloadCases; i++) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
prod[c][f] += shImages[threadIdx.y + c * B_Y][i] * shHidActs[threadIdx.x + f * B_X][i];
}
}
}
__syncthreads();
}
hidActs += numImages;
}
if (blockPixelOffset + threadIdx.y < filterPixels) {
if (scale) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
targets[c * filterPixels * numFilters + f * B_X] = scaleTargets * targets[c * filterPixels * numFilters + f * B_X] + scaleOutputs * prod[c][f];
}
}
} else {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
targets[c * filterPixels * numFilters + f * B_X] = scaleOutputs * prod[c][f];
}
}
}
}
}
/*
* Each block computes weight gradients for B_Y pixels and B_X * filtersPerThread filters
* threadIdx.x determines filter
* threadIdx.y determines pixel in filter
*
* blockIdx.x determines filter batch of B_X * filtersPerThread, module batch of partialSum
* blockIdx.y determines pixel, color batch of B_Y * colorsPerThread
* In essence, blockIdx.y.x = 0...numFilterColors / colorsPerThread
* blockIdx.y.y = 0...DIVUP(numPixels, B_Y)
* ============
* CONSTRAINTS:
* ============
* numFilters/numGroups must be divisible by B_X * filtersPerThread
* numFilterColors must be divisible by colorsPerThread
* numFilters must be divisible by numGroups
* numImgColors must be divisible by numFilterColors
* Number of images (cases) should be divisible by preloadCases if checkCaseBounds is false.
*
* images: (numImgColors, imgSizeY, imgSizeX, numImages), with stride given
* hidActs: (numFilters, numModulesY, numModulesX, numImages)
*
* targets: (numModules, numFilterColors, filterPixels, numFilters)
* colorIndices: (numGroups, numFilterColors)
*
* B_Y * B_X should be divisible by preloadCases.
* preloadCases one of 16, 32.
* B_X one of 4, 8, 16, 32
* B_Y arbitrary (satisfying divisibility constraints)
*
* This routine is especially fast when numFilters >= 32. That's when it should be used.
*/
template <int B_Y, int B_X, int filtersPerThread, int colorsPerThread, int preloadCases, bool scale, bool checkCaseBounds>
__global__ void conv_weight_acts_mc_mf_rand(float* images, float* hidActs, float* targets, int* colorIndices,
const int numImages, const int numFilters,
const int numModulesY, const int numModulesX,
const int imgSizeY, const int imgSizeX, const int filterSize,
const int paddingStart, const int moduleStride, const int imgStride,
const int numFilterColors, const int numGroups, const int partialSum,
const float scaleTargets, const float scaleOutputs) {
__shared__ float shImages[colorsPerThread * B_Y][preloadCases]; // preload preloadCases cases of B_Y * pixelsPerThread pixels
__shared__ float shHidActs[filtersPerThread * B_X][preloadCases + 1]; // preload preloadCases cases of B_X hidacts
__shared__ int shColors[colorsPerThread];
// This avoids doing a division in an inner loop
__shared__ int pxDivs[B_Y];
const int tidx = B_X * threadIdx.y + threadIdx.x;
const int loadY = tidx / preloadCases, loadX = tidx % preloadCases;
const int filterPixels = filterSize * filterSize;
const int imgPixels = imgSizeY * imgSizeX;
const int numFilterBlocks = numFilters / (B_X * filtersPerThread);
const int outputModuleIdx = blockIdx.x / numFilterBlocks;
const int moduleIdx = partialSum * outputModuleIdx;
const int blockFilterIdx = filtersPerThread * B_X * (blockIdx.x % numFilterBlocks);
const int numModules = numModulesY * numModulesX;
const int numFiltersPerGroup = numFilters / numGroups;
const int blockGroupIdx = blockFilterIdx / numFiltersPerGroup;
const int blockPixelOffset = (blockIdx.y / (numFilterColors/colorsPerThread)) * B_Y;
const int filterColorIdx = (blockIdx.y % (numFilterColors/colorsPerThread)) * colorsPerThread;
// const int imgColorIdx = filterColorIdx + blockGroupIdx * numFilterColors;
images += loadX;
hidActs += moduleIdx * numImages
+ blockFilterIdx * numImages * numModules
+ loadY * numImages * numModules
+ loadX;
targets += outputModuleIdx * numFilters * filterPixels * numFilterColors
+ filterColorIdx * filterPixels * numFilters
+ blockPixelOffset * numFilters
+ blockFilterIdx
+ threadIdx.y * numFilters + threadIdx.x;
float* shHidActLoad = &shHidActs[loadY][loadX];
float* shImgLoad = &shImages[loadY][loadX];
float prod[colorsPerThread][filtersPerThread];
memset(prod, 0, sizeof(prod));
if (tidx < B_Y) {
pxDivs[tidx] = ((blockPixelOffset + tidx) / filterSize << 16) + ((blockPixelOffset + tidx) % filterSize);
}
if (tidx < colorsPerThread) {
shColors[tidx] = colorIndices[blockGroupIdx * numFilterColors + filterColorIdx + tidx] * imgPixels * imgStride;
}
__syncthreads();
for (int m = moduleIdx; m < moduleIdx + partialSum; m++) {
const int imgLoadModPosY = paddingStart + (m / numModulesX) * moduleStride;
const int imgLoadModPosX = paddingStart + (m % numModulesX) * moduleStride;
for (int caseIdx = 0; caseIdx < numImages; caseIdx += preloadCases) {
if (loadY < B_Y) {
/*
* As long as B_Y * B_X is divisible by preloadCases this will dloop the right
* number of times.
*
* This will load some images from filter pixels that don't exist (it'll set those to 0),
* but the code does not produce any output for those pixels (see last lines).
*/
// #pragma unroll
for (int y = 0; y < B_Y; y += (B_X * B_Y) / preloadCases) {
// Make sure number of rows in the array is divisible by number of rows filled per iteration
if (B_Y % (B_X * B_Y / preloadCases) == 0 || y + loadY < B_Y) {
const int pxIdx = loadY + y; // pixel idx in filter
if (pxIdx + blockPixelOffset < filterPixels && (!checkCaseBounds || caseIdx + loadX < numImages)) {
const int pxY = imgLoadModPosY + HI16(pxDivs[pxIdx]);//pxIdx / filterSize; // pixel x,y coords in image
const int pxX = imgLoadModPosX + LO16(pxDivs[pxIdx]);
if (pxY >= 0 && pxY < imgSizeY && pxX >= 0 && pxX < imgSizeX) {
const int pixIdx = (pxY * imgSizeX + pxX) * imgStride; // pixel idx in image
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
shImgLoad[(y + c * B_Y) * preloadCases] = images[caseIdx + shColors[c] + pixIdx];
}
} else {
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
shImgLoad[(y + c * B_Y) * preloadCases] = 0;
}
}
} else {
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
shImgLoad[(y + c * B_Y) * preloadCases] = 0;
}
}
}
}
}
if (loadY < B_X * filtersPerThread && (!checkCaseBounds || caseIdx + loadX < numImages)) {
#pragma unroll
for (int y = 0; y < B_X * filtersPerThread; y += (B_X * B_Y) / preloadCases) {
// Make sure number of rows in the array is divisible by number of rows filled per iteration
if ((B_X * filtersPerThread) % (B_X * B_Y / preloadCases) == 0 || y + loadY < B_X * filtersPerThread) {
shHidActLoad[y * (preloadCases + 1)] = hidActs[caseIdx + y * numImages * numModules];
}
}
}
__syncthreads();
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
#pragma unroll
for (int i = 0; i < preloadCases; i++) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
prod[c][f] += shImages[threadIdx.y + c * B_Y][i] * shHidActs[threadIdx.x + f * B_X][i];
}
}
}
__syncthreads();
}
hidActs += numImages;
}
if (blockPixelOffset + threadIdx.y < filterPixels) {
if (scale) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
targets[c * filterPixels * numFilters + f * B_X] = scaleTargets * targets[c * filterPixels * numFilters + f * B_X] + scaleOutputs * prod[c][f];
}
}
} else {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
targets[c * filterPixels * numFilters + f * B_X] = scaleOutputs * prod[c][f];
}
}
}
}
}
/*
* images: (numImgColors, imgSizeY, imgSizeX, numImages), with stride given
* hidActs: (numFilters, numModules, numImages)
*
* targets: (numModuleY*numModulesX/partialSum, numFilterColors, filterPixels, numFilters)
*
* TODO: you can get a slight speed boost for local non-convolutional units by writing special
* routines for partialSum = 1. But I dunno if the code duplication is worth it...
*
* Note: all of these convolution routines are optimized for the case when
* the number of images (i.e. the minibatch size) is a multiple of 128.
* Other batch sizes will work, but but I made no attempt whatsoever
* to make them work fast.
*/
void _weightActs(NVMatrix& images, NVMatrix& hidActs, NVMatrix& targets,
int imgSizeY, int numModulesY, int numModulesX, int filterSize, int paddingStart, int moduleStride, int numImgColors,
int numGroups, int partialSum, float scaleTargets, float scaleOutput) {
int numFilterColors = numImgColors / numGroups;
int imgStride = images.getStride();
int numImages = images.getNumCols();
int imgPixels = images.getNumRows() / numImgColors;
int imgSizeX = imgPixels / imgSizeY;
int numModules = numModulesY * numModulesX;
int numFilters = hidActs.getNumRows() / numModules;
int numFiltersPerGroup = numFilters / numGroups;
assert(numImgColors % numGroups == 0);
assert(numFilters % (16*numGroups) == 0);
assert(numGroups > 1 || (numImgColors > 0 && (numImgColors <= 3 || numImgColors % 4 == 0)));
assert(numGroups == 1 || numFilterColors % 4 == 0);
assert(imgSizeY * imgSizeX == imgPixels);
assert(images.getNumRows() == imgPixels * numImgColors);
int filterPixels = filterSize * filterSize;
partialSum = partialSum == 0 ? numModules : partialSum;
assert(numModules % partialSum == 0);
assert(hidActs.getNumCols() == numImages);
// These routines don't handle the case when only part of the image is visited in the convolution
assert(paddingStart <= 0);
assert(paddingStart + (numModulesX-1)*moduleStride + filterSize >= imgSizeX);
assert(paddingStart + (numModulesY-1)*moduleStride + filterSize >= imgSizeY);
assert(moduleStride <= filterSize);
assert(numModules * numFilters == hidActs.getNumRows());
assert(!images.isTrans());
assert(!hidActs.isTrans());
assert(hidActs.isContiguous());
assert(!targets.isTrans());
assert(targets.isContiguous());
int preloadCases = 32;
dim3 blocks, threads;
int bx, by;
int pixelsPerThread, filtersPerThread, colorsPerThread;
// Worth playing with these parameters to find best values for your problem.
// These values work relatively well, but not optimal for all problems.
if (numFilterColors > 3) {
filtersPerThread = numFiltersPerGroup % 32 == 0 ? 2 : 1;
colorsPerThread = numFilterColors % 8 == 0 ? 8 : 4;
by = numFiltersPerGroup % 64 == 0 ? 4 : 8;
bx = numFiltersPerGroup % 64 == 0 ? 32 : 16;
blocks = dim3((numModules/partialSum)*(numFilters/(bx*filtersPerThread)), DIVUP(filterPixels, by) * (numFilterColors / colorsPerThread));
} else {
assert(numGroups == 1); // Just for sanity
pixelsPerThread = numFilters % 32 == 0 ? (numImgColors == 1 ? 8 : 5) : (numImgColors == 1 ? 5 : 2);
by = numFilters % 32 == 0 ? 4 : 8; // by == 4 seems to work best
bx = numFilters % 32 == 0 ? 32 : 16;
blocks = dim3((numModules/partialSum)*(numFilters/bx), DIVUP(filterPixels, by*pixelsPerThread));
}
assert((by * bx) % preloadCases == 0);
threads = dim3(bx, by);
bool checkCaseBounds = numImages % 32 != 0;
if (scaleTargets == 0) {
targets.resize((numModules/partialSum) * numFilterColors*filterPixels, numFilters);
} else {
assert(targets.getNumRows() == (numModules/partialSum) * numFilterColors*filterPixels);
assert(targets.getNumCols() == numFilters);
}
if (numFilterColors > 3) {
if (scaleTargets == 0) { // do not scale
if (numFiltersPerGroup % 64 == 0) {
if (numFilterColors % 8 == 0) {
if (checkCaseBounds) {
cudaFuncSetCacheConfig(conv_weight_acts_mc_mf<4,32,2,8,32, false, true>, cudaFuncCachePreferShared);
conv_weight_acts_mc_mf<4,32,2,8,32,false, true><<<blocks, threads>>>(images.getDevData(), hidActs.getDevData(), targets.getDevData(),
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize,
paddingStart, moduleStride, imgStride, numImgColors, numGroups, partialSum, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(conv_weight_acts_mc_mf<4,32,2,8,32, false, false>, cudaFuncCachePreferShared);
conv_weight_acts_mc_mf<4,32,2,8,32,false, false><<<blocks, threads>>>(images.getDevData(), hidActs.getDevData(), targets.getDevData(),
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize,
paddingStart, moduleStride, imgStride, numImgColors, numGroups, partialSum, scaleTargets, scaleOutput);
}
} else {
if (checkCaseBounds) {
cudaFuncSetCacheConfig(conv_weight_acts_mc_mf<4,32,2,4,32, false, true>, cudaFuncCachePreferShared);
conv_weight_acts_mc_mf<4,32,2,4,32,false, true><<<blocks, threads>>>(images.getDevData(), hidActs.getDevData(), targets.getDevData(),
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize,
paddingStart, moduleStride, imgStride, numImgColors, numGroups, partialSum, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(conv_weight_acts_mc_mf<4,32,2,4,32, false, false>, cudaFuncCachePreferShared);
conv_weight_acts_mc_mf<4,32,2,4,32,false, false><<<blocks, threads>>>(images.getDevData(), hidActs.getDevData(), targets.getDevData(),
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize,
paddingStart, moduleStride, imgStride, numImgColors, numGroups, partialSum, scaleTargets, scaleOutput);
}
}
} else if (numFiltersPerGroup % 32 == 0) {
if (numFilterColors % 8 == 0) {
if (checkCaseBounds) {
cudaFuncSetCacheConfig(conv_weight_acts_mc_mf<8,16,2,8,32, false, true>, cudaFuncCachePreferShared);
conv_weight_acts_mc_mf<8,16,2,8,32,false, true><<<blocks, threads>>>(images.getDevData(), hidActs.getDevData(), targets.getDevData(),
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize,
paddingStart, moduleStride, imgStride, numImgColors, numGroups, partialSum, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(conv_weight_acts_mc_mf<8,16,2,8,32, false, false>, cudaFuncCachePreferShared);
conv_weight_acts_mc_mf<8,16,2,8,32,false, false><<<blocks, threads>>>(images.getDevData(), hidActs.getDevData(), targets.getDevData(),
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize,
paddingStart, moduleStride, imgStride, numImgColors, numGroups, partialSum, scaleTargets, scaleOutput);
}
} else {
if (checkCaseBounds) {
cudaFuncSetCacheConfig(conv_weight_acts_mc_mf<8,16,2,4,32, false, true>, cudaFuncCachePreferShared);
conv_weight_acts_mc_mf<8,16,2,4,32,false, true><<<blocks, threads>>>(images.getDevData(), hidActs.getDevData(), targets.getDevData(),
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize,
paddingStart, moduleStride, imgStride, numImgColors, numGroups, partialSum, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(conv_weight_acts_mc_mf<8,16,2,4,32, false, false>, cudaFuncCachePreferShared);
conv_weight_acts_mc_mf<8,16,2,4,32,false, false><<<blocks, threads>>>(images.getDevData(), hidActs.getDevData(), targets.getDevData(),
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize,
paddingStart, moduleStride, imgStride, numImgColors, numGroups, partialSum, scaleTargets, scaleOutput);
}
}
} else {
if (numFilterColors % 8 == 0) {
if (checkCaseBounds) {
cudaFuncSetCacheConfig(conv_weight_acts_mc_mf<8,16,1,8,32, false, true>, cudaFuncCachePreferShared);
conv_weight_acts_mc_mf<8,16,1,8,32,false, true><<<blocks, threads>>>(images.getDevData(), hidActs.getDevData(), targets.getDevData(),
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize,
paddingStart, moduleStride, imgStride, numImgColors, numGroups, partialSum, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(conv_weight_acts_mc_mf<8,16,1,8,32, false, false>, cudaFuncCachePreferShared);
conv_weight_acts_mc_mf<8,16,1,8,32,false, false><<<blocks, threads>>>(images.getDevData(), hidActs.getDevData(), targets.getDevData(),
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize,
paddingStart, moduleStride, imgStride, numImgColors, numGroups, partialSum, scaleTargets, scaleOutput);
}
} else {
if (checkCaseBounds) {
cudaFuncSetCacheConfig(conv_weight_acts_mc_mf<8,16,1,4,32, false, true>, cudaFuncCachePreferShared);
conv_weight_acts_mc_mf<8,16,1,4,32,false, true><<<blocks, threads>>>(images.getDevData(), hidActs.getDevData(), targets.getDevData(),
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize,
paddingStart, moduleStride, imgStride, numImgColors, numGroups, partialSum, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(conv_weight_acts_mc_mf<8,16,1,4,32, false, false>, cudaFuncCachePreferShared);
conv_weight_acts_mc_mf<8,16,1,4,32,false, false><<<blocks, threads>>>(images.getDevData(), hidActs.getDevData(), targets.getDevData(),
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize,
paddingStart, moduleStride, imgStride, numImgColors, numGroups, partialSum, scaleTargets, scaleOutput);
}
}
}
} else {
if (numFiltersPerGroup % 64 == 0) {
if (numFilterColors % 8 == 0) {
if (checkCaseBounds) {
cudaFuncSetCacheConfig(conv_weight_acts_mc_mf<4,32,2,8,32, false, true>, cudaFuncCachePreferShared);
conv_weight_acts_mc_mf<4,32,2,8,32,true, true><<<blocks, threads>>>(images.getDevData(), hidActs.getDevData(), targets.getDevData(),
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize,
paddingStart, moduleStride, imgStride, numImgColors, numGroups, partialSum, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(conv_weight_acts_mc_mf<4,32,2,8,32, false, false>, cudaFuncCachePreferShared);
conv_weight_acts_mc_mf<4,32,2,8,32,true, false><<<blocks, threads>>>(images.getDevData(), hidActs.getDevData(), targets.getDevData(),
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize,
paddingStart, moduleStride, imgStride, numImgColors, numGroups, partialSum, scaleTargets, scaleOutput);
}
} else {
if (checkCaseBounds) {
cudaFuncSetCacheConfig(conv_weight_acts_mc_mf<4,32,2,4,32, false, true>, cudaFuncCachePreferShared);
conv_weight_acts_mc_mf<4,32,2,4,32,true, true><<<blocks, threads>>>(images.getDevData(), hidActs.getDevData(), targets.getDevData(),
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize,
paddingStart, moduleStride, imgStride, numImgColors, numGroups, partialSum, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(conv_weight_acts_mc_mf<4,32,2,4,32, false, false>, cudaFuncCachePreferShared);
conv_weight_acts_mc_mf<4,32,2,4,32,true, false><<<blocks, threads>>>(images.getDevData(), hidActs.getDevData(), targets.getDevData(),
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize,
paddingStart, moduleStride, imgStride, numImgColors, numGroups, partialSum, scaleTargets, scaleOutput);
}
}
} else if (numFiltersPerGroup % 32 == 0) {
if (numFilterColors % 8 == 0) {
if (checkCaseBounds) {
cudaFuncSetCacheConfig(conv_weight_acts_mc_mf<8,16,2,8,32, false, true>, cudaFuncCachePreferShared);
conv_weight_acts_mc_mf<8,16,2,8,32,true, true><<<blocks, threads>>>(images.getDevData(), hidActs.getDevData(), targets.getDevData(),
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize,
paddingStart, moduleStride, imgStride, numImgColors, numGroups, partialSum, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(conv_weight_acts_mc_mf<8,16,2,8,32, false, false>, cudaFuncCachePreferShared);
conv_weight_acts_mc_mf<8,16,2,8,32,true, false><<<blocks, threads>>>(images.getDevData(), hidActs.getDevData(), targets.getDevData(),
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize,
paddingStart, moduleStride, imgStride, numImgColors, numGroups, partialSum, scaleTargets, scaleOutput);
}
} else {
if (checkCaseBounds) {
cudaFuncSetCacheConfig(conv_weight_acts_mc_mf<8,16,2,4,32, false, true>, cudaFuncCachePreferShared);
conv_weight_acts_mc_mf<8,16,2,4,32,true, true><<<blocks, threads>>>(images.getDevData(), hidActs.getDevData(), targets.getDevData(),
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize,
paddingStart, moduleStride, imgStride, numImgColors, numGroups, partialSum, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(conv_weight_acts_mc_mf<8,16,2,4,32, false, false>, cudaFuncCachePreferShared);
conv_weight_acts_mc_mf<8,16,2,4,32,true, false><<<blocks, threads>>>(images.getDevData(), hidActs.getDevData(), targets.getDevData(),
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize,
paddingStart, moduleStride, imgStride, numImgColors, numGroups, partialSum, scaleTargets, scaleOutput);
}
}
} else {
if (numFilterColors % 8 == 0) {
if (checkCaseBounds) {
cudaFuncSetCacheConfig(conv_weight_acts_mc_mf<8,16,1,8,32, false, true>, cudaFuncCachePreferShared);
conv_weight_acts_mc_mf<8,16,1,8,32,true, true><<<blocks, threads>>>(images.getDevData(), hidActs.getDevData(), targets.getDevData(),
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize,
paddingStart, moduleStride, imgStride, numImgColors, numGroups, partialSum, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(conv_weight_acts_mc_mf<8,16,1,8,32, false, false>, cudaFuncCachePreferShared);
conv_weight_acts_mc_mf<8,16,1,8,32,true, false><<<blocks, threads>>>(images.getDevData(), hidActs.getDevData(), targets.getDevData(),
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize,
paddingStart, moduleStride, imgStride, numImgColors, numGroups, partialSum, scaleTargets, scaleOutput);
}
} else {
if (checkCaseBounds) {
cudaFuncSetCacheConfig(conv_weight_acts_mc_mf<8,16,1,4,32, false, true>, cudaFuncCachePreferShared);
conv_weight_acts_mc_mf<8,16,1,4,32,true, true><<<blocks, threads>>>(images.getDevData(), hidActs.getDevData(), targets.getDevData(),
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize,
paddingStart, moduleStride, imgStride, numImgColors, numGroups, partialSum, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(conv_weight_acts_mc_mf<8,16,1,4,32, false, false>, cudaFuncCachePreferShared);
conv_weight_acts_mc_mf<8,16,1,4,32,true, false><<<blocks, threads>>>(images.getDevData(), hidActs.getDevData(), targets.getDevData(),
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize,
paddingStart, moduleStride, imgStride, numImgColors, numGroups, partialSum, scaleTargets, scaleOutput);
}
}
}
}
} else { // numColors in 1,2,3
if (scaleTargets == 0) { // do not scale
if (numFilterColors == 1) {
if (checkCaseBounds) {
if (numFilters % 32 == 0) {
cudaFuncSetCacheConfig(conv_weight_acts_c<4,32,8,32,1, false, true>, cudaFuncCachePreferShared);
conv_weight_acts_c<4,32,8,32,1,false, true><<<blocks, threads>>>(images.getDevData(), hidActs.getDevData(), targets.getDevData(),
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, partialSum, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(conv_weight_acts_c<8,16,5,32,1, false, true>, cudaFuncCachePreferShared);
conv_weight_acts_c<8,16,5,32,1,false, true><<<blocks, threads>>>(images.getDevData(), hidActs.getDevData(), targets.getDevData(),
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, partialSum, scaleTargets, scaleOutput);
}
} else {
if (numFilters % 32 == 0) {
cudaFuncSetCacheConfig(conv_weight_acts_c<4,32,8,32,1, false, false>, cudaFuncCachePreferShared);
conv_weight_acts_c<4,32,8,32,1,false, false><<<blocks, threads>>>(images.getDevData(), hidActs.getDevData(), targets.getDevData(),
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, partialSum, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(conv_weight_acts_c<8,16,5,32,1, false, false>, cudaFuncCachePreferShared);
conv_weight_acts_c<8,16,5,32,1,false, false><<<blocks, threads>>>(images.getDevData(), hidActs.getDevData(), targets.getDevData(),
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, partialSum, scaleTargets, scaleOutput);
}
}
} else if (numFilterColors == 2) {
if (checkCaseBounds) {
if (numFilters % 32 == 0) {
cudaFuncSetCacheConfig(conv_weight_acts_c<4,32,5,32,2, false, true>, cudaFuncCachePreferShared);
conv_weight_acts_c<4,32,5,32,2,false, true><<<blocks, threads>>>(images.getDevData(), hidActs.getDevData(), targets.getDevData(),
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, partialSum, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(conv_weight_acts_c<8,16,2,32,2, false, true>, cudaFuncCachePreferShared);
conv_weight_acts_c<8,16,2,32,2,false, true><<<blocks, threads>>>(images.getDevData(), hidActs.getDevData(), targets.getDevData(),
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, partialSum, scaleTargets, scaleOutput);
}
} else {
if (numFilters % 32 == 0) {
cudaFuncSetCacheConfig(conv_weight_acts_c<4,32,5,32,2, false, false>, cudaFuncCachePreferShared);
conv_weight_acts_c<4,32,5,32,2,false, false><<<blocks, threads>>>(images.getDevData(), hidActs.getDevData(), targets.getDevData(),
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, partialSum, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(conv_weight_acts_c<8,16,2,32,2, false, false>, cudaFuncCachePreferShared);
conv_weight_acts_c<8,16,2,32,2,false, false><<<blocks, threads>>>(images.getDevData(), hidActs.getDevData(), targets.getDevData(),
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, partialSum, scaleTargets, scaleOutput);
}
}
} else if (numFilterColors == 3) {
if (checkCaseBounds) {
if (numFilters % 32 == 0) {
cudaFuncSetCacheConfig(conv_weight_acts_c<4,32,5,32,3, false, true>, cudaFuncCachePreferShared);
conv_weight_acts_c<4,32,5,32,3,false, true><<<blocks, threads>>>(images.getDevData(), hidActs.getDevData(), targets.getDevData(),
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, partialSum, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(conv_weight_acts_c<8,16,2,32,3, false, true>, cudaFuncCachePreferShared);
conv_weight_acts_c<8,16,2,32,3,false, true><<<blocks, threads>>>(images.getDevData(), hidActs.getDevData(), targets.getDevData(),
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, partialSum, scaleTargets, scaleOutput);
}
} else {
if (numFilters % 32 == 0) {
cudaFuncSetCacheConfig(conv_weight_acts_c<4,32,5,32,3, false, false>, cudaFuncCachePreferShared);
conv_weight_acts_c<4,32,5,32,3,false, false><<<blocks, threads>>>(images.getDevData(), hidActs.getDevData(), targets.getDevData(),
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, partialSum, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(conv_weight_acts_c<8,16,2,32,3, false, false>, cudaFuncCachePreferShared);
conv_weight_acts_c<8,16,2,32,3,false, false><<<blocks, threads>>>(images.getDevData(), hidActs.getDevData(), targets.getDevData(),
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, partialSum, scaleTargets, scaleOutput);
}
}
}
} else { // do scale
if (numFilterColors == 1) {
if (checkCaseBounds) {
if (numFilters % 32 == 0) {
cudaFuncSetCacheConfig(conv_weight_acts_c<4,32,8,32,1, true, true>, cudaFuncCachePreferShared);
conv_weight_acts_c<4,32,8,32,1,true, true><<<blocks, threads>>>(images.getDevData(), hidActs.getDevData(), targets.getDevData(),
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, partialSum, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(conv_weight_acts_c<8,16,5,32,1, true, true>, cudaFuncCachePreferShared);
conv_weight_acts_c<8,16,5,32,1,true, true><<<blocks, threads>>>(images.getDevData(), hidActs.getDevData(), targets.getDevData(),
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, partialSum, scaleTargets, scaleOutput);
}
} else {
if (numFilters % 32 == 0) {
cudaFuncSetCacheConfig(conv_weight_acts_c<4,32,8,32,1, true, false>, cudaFuncCachePreferShared);
conv_weight_acts_c<4,32,8,32,1,true, false><<<blocks, threads>>>(images.getDevData(), hidActs.getDevData(), targets.getDevData(),
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, partialSum, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(conv_weight_acts_c<8,16,5,32,1, true, false>, cudaFuncCachePreferShared);
conv_weight_acts_c<8,16,5,32,1,true, false><<<blocks, threads>>>(images.getDevData(), hidActs.getDevData(), targets.getDevData(),
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, partialSum, scaleTargets, scaleOutput);
}
}
} else if (numFilterColors == 2) {
if (checkCaseBounds) {
if (numFilters % 32 == 0) {
cudaFuncSetCacheConfig(conv_weight_acts_c<4,32,5,32,2, true, true>, cudaFuncCachePreferShared);
conv_weight_acts_c<4,32,5,32,2,true, true><<<blocks, threads>>>(images.getDevData(), hidActs.getDevData(), targets.getDevData(),
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, partialSum, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(conv_weight_acts_c<8,16,2,32,2, true, true>, cudaFuncCachePreferShared);
conv_weight_acts_c<8,16,2,32,2,true, true><<<blocks, threads>>>(images.getDevData(), hidActs.getDevData(), targets.getDevData(),
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, partialSum, scaleTargets, scaleOutput);
}
} else {
if (numFilters % 32 == 0) {
cudaFuncSetCacheConfig(conv_weight_acts_c<4,32,5,32,2, true, false>, cudaFuncCachePreferShared);
conv_weight_acts_c<4,32,5,32,2,true, false><<<blocks, threads>>>(images.getDevData(), hidActs.getDevData(), targets.getDevData(),
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, partialSum, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(conv_weight_acts_c<8,16,2,32,2, true, false>, cudaFuncCachePreferShared);
conv_weight_acts_c<8,16,2,32,2,true, false><<<blocks, threads>>>(images.getDevData(), hidActs.getDevData(), targets.getDevData(),
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, partialSum, scaleTargets, scaleOutput);
}
}
} else if (numFilterColors == 3) {
if (checkCaseBounds) {
if (numFilters % 32 == 0) {
cudaFuncSetCacheConfig(conv_weight_acts_c<4,32,5,32,3, true, true>, cudaFuncCachePreferShared);
conv_weight_acts_c<4,32,5,32,3,true, true><<<blocks, threads>>>(images.getDevData(), hidActs.getDevData(), targets.getDevData(),
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, partialSum, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(conv_weight_acts_c<8,16,2,32,3, true, true>, cudaFuncCachePreferShared);
conv_weight_acts_c<8,16,2,32,3,true, true><<<blocks, threads>>>(images.getDevData(), hidActs.getDevData(), targets.getDevData(),
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, partialSum, scaleTargets, scaleOutput);
}
} else {
if (numFilters % 32 == 0) {
cudaFuncSetCacheConfig(conv_weight_acts_c<4,32,5,32,3, true, false>, cudaFuncCachePreferShared);
conv_weight_acts_c<4,32,5,32,3,true, false><<<blocks, threads>>>(images.getDevData(), hidActs.getDevData(), targets.getDevData(),
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, partialSum, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(conv_weight_acts_c<8,16,2,32,3, true, false>, cudaFuncCachePreferShared);
conv_weight_acts_c<8,16,2,32,3,true, false><<<blocks, threads>>>(images.getDevData(), hidActs.getDevData(), targets.getDevData(),
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, imgStride, partialSum, scaleTargets, scaleOutput);
}
}
}
}
}
cutilCheckMsg("weightActs: kernel execution failed");
}
void convWeightActs(NVMatrix& images, NVMatrix& hidActs, NVMatrix& targets,
int imgSizeY, int numModulesY, int numModulesX, int filterSize, int paddingStart, int moduleStride, int numImgColors, int numGroups, int partialSum) {
_weightActs(images, hidActs, targets, imgSizeY, numModulesY, numModulesX, filterSize, paddingStart, moduleStride, numImgColors, numGroups, partialSum, 0, 1);
}
void convWeightActs(NVMatrix& images, NVMatrix& hidActs, NVMatrix& targets,
int imgSizeY, int numModulesY, int numModulesX, int filterSize, int paddingStart, int moduleStride, int numImgColors, int numGroups, int partialSum,
float scaleTargets, float scaleOutput) {
_weightActs(images, hidActs, targets, imgSizeY, numModulesY, numModulesX, filterSize, paddingStart, moduleStride, numImgColors, numGroups, partialSum, scaleTargets, scaleOutput);
}
void localWeightActs(NVMatrix& images, NVMatrix& hidActs, NVMatrix& targets,
int imgSizeY, int numModulesY, int numModulesX, int filterSize, int paddingStart, int moduleStride, int numImgColors, int numGroups) {
_weightActs(images, hidActs, targets, imgSizeY, numModulesY, numModulesX, filterSize, paddingStart, moduleStride, numImgColors, numGroups, 1, 0, 1);
}
void localWeightActs(NVMatrix& images, NVMatrix& hidActs, NVMatrix& targets,
int imgSizeY, int numModulesY, int numModulesX, int filterSize, int paddingStart, int moduleStride,
int numImgColors, int numGroups, float scaleTargets, float scaleOutput) {
_weightActs(images, hidActs, targets, imgSizeY, numModulesY, numModulesX, filterSize, paddingStart, moduleStride, numImgColors, numGroups, 1,
scaleTargets, scaleOutput);
}
/*
* images: (numImgColors, imgPixels, numImages), with stride given
* hidActs: (numFilters, numModules, numImages)
*
* targets: (numModules/partialSum, numFilterColors, filterPixels, numFilters)
* colorIndices: (numGroups, numFilterColors)
*
* Note: all of these convolution routines are optimized for the case when
* the number of images (i.e. the minibatch size) is a multiple of 128.
* Other batch sizes will work, but but I made no attempt whatsoever
* to make them work fast.
*/
void _weightActsSparse(NVMatrix& images, NVMatrix& hidActs, NVMatrix& targets, int* dColorIndices,
int imgSizeY, int numModulesY, int numModulesX, int filterSize, int paddingStart, int moduleStride,
int numImgColors, int numFilterColors, int numGroups, int partialSum,
float scaleTargets, float scaleOutput) {
int imgStride = images.getStride();
int numImages = images.getNumCols();
int imgPixels = images.getNumRows() / numImgColors;
int imgSizeX = imgPixels / imgSizeY;
int numModules = numModulesY * numModulesX;
int numFilters = hidActs.getNumRows() / numModules;
int numFiltersPerGroup = numFilters / numGroups;
assert(numGroups > 1);
assert(numImgColors % numFilterColors == 0);
assert((numFilterColors * numGroups) % numImgColors == 0);
assert(numFilters % (16*numGroups) == 0);
assert(numFilterColors % 4 == 0);
assert(imgSizeY * imgSizeX == imgPixels);
assert(images.getNumRows() == imgPixels * numImgColors);
int filterPixels = filterSize * filterSize;
partialSum = partialSum == 0 ? numModules : partialSum;
assert(numModules % partialSum == 0);
assert(hidActs.getNumCols() == numImages);
// These routines don't handle the case when only part of the image is visited in the convolution
assert(paddingStart <= 0);
assert(paddingStart + (numModulesX-1)*moduleStride + filterSize >= imgSizeX);
assert(paddingStart + (numModulesY-1)*moduleStride + filterSize >= imgSizeY);
assert(moduleStride <= filterSize);
assert(numModules * numFilters == hidActs.getNumRows());
assert(!images.isTrans());
assert(!hidActs.isTrans());
assert(hidActs.isContiguous());
assert(!targets.isTrans());
assert(targets.isContiguous());
int preloadCases = 32;
dim3 blocks, threads;
int bx, by;
int filtersPerThread, colorsPerThread;
filtersPerThread = numFiltersPerGroup % 32 == 0 ? 2 : 1;
colorsPerThread = numFilterColors % 8 == 0 ? 8 : 4;
by = numFiltersPerGroup % 64 == 0 ? 4 : 8;
bx = numFiltersPerGroup % 64 == 0 ? 32 : 16;
blocks = dim3((numModules/partialSum)*(numFilters/(bx*filtersPerThread)), DIVUP(filterPixels, by) * (numFilterColors / colorsPerThread));
assert((by * bx) % preloadCases == 0);
threads = dim3(bx, by);
bool checkCaseBounds = numImages % 32 != 0;
if (scaleTargets == 0) {
targets.resize((numModules/partialSum) * numFilterColors*filterPixels, numFilters);
} else {
assert(targets.getNumRows() == (numModules/partialSum) * numFilterColors*filterPixels);
assert(targets.getNumCols() == numFilters);
}
if (scaleTargets == 0) { // do not scale
if (numFiltersPerGroup % 64 == 0) {
if (numFilterColors % 8 == 0) {
if (checkCaseBounds) {
cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_rand<4,32,2,8,32, false, true>, cudaFuncCachePreferShared);
conv_weight_acts_mc_mf_rand<4,32,2,8,32,false, true><<<blocks, threads>>>(images.getDevData(), hidActs.getDevData(), targets.getDevData(), dColorIndices,
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize,
paddingStart, moduleStride, imgStride, numFilterColors, numGroups, partialSum, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_rand<4,32,2,8,32, false, false>, cudaFuncCachePreferShared);
conv_weight_acts_mc_mf_rand<4,32,2,8,32,false, false><<<blocks, threads>>>(images.getDevData(), hidActs.getDevData(), targets.getDevData(), dColorIndices,
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize,
paddingStart, moduleStride, imgStride, numFilterColors, numGroups, partialSum, scaleTargets, scaleOutput);
}
} else {
if (checkCaseBounds) {
cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_rand<4,32,2,4,32, false, true>, cudaFuncCachePreferShared);
conv_weight_acts_mc_mf_rand<4,32,2,4,32,false, true><<<blocks, threads>>>(images.getDevData(), hidActs.getDevData(), targets.getDevData(), dColorIndices,
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize,
paddingStart, moduleStride, imgStride, numFilterColors, numGroups, partialSum, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_rand<4,32,2,4,32, false, false>, cudaFuncCachePreferShared);
conv_weight_acts_mc_mf_rand<4,32,2,4,32,false, false><<<blocks, threads>>>(images.getDevData(), hidActs.getDevData(), targets.getDevData(), dColorIndices,
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize,
paddingStart, moduleStride, imgStride, numFilterColors, numGroups, partialSum, scaleTargets, scaleOutput);
}
}
} else if (numFiltersPerGroup % 32 == 0) {
if (numFilterColors % 8 == 0) {
if (checkCaseBounds) {
cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_rand<8,16,2,8,32, false, true>, cudaFuncCachePreferShared);
conv_weight_acts_mc_mf_rand<8,16,2,8,32,false, true><<<blocks, threads>>>(images.getDevData(), hidActs.getDevData(), targets.getDevData(), dColorIndices,
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize,
paddingStart, moduleStride, imgStride, numFilterColors, numGroups, partialSum, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_rand<8,16,2,8,32, false, false>, cudaFuncCachePreferShared);
conv_weight_acts_mc_mf_rand<8,16,2,8,32,false, false><<<blocks, threads>>>(images.getDevData(), hidActs.getDevData(), targets.getDevData(), dColorIndices,
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize,
paddingStart, moduleStride, imgStride, numFilterColors, numGroups, partialSum, scaleTargets, scaleOutput);
}
} else {
if (checkCaseBounds) {
cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_rand<8,16,2,4,32, false, true>, cudaFuncCachePreferShared);
conv_weight_acts_mc_mf_rand<8,16,2,4,32,false, true><<<blocks, threads>>>(images.getDevData(), hidActs.getDevData(), targets.getDevData(), dColorIndices,
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize,
paddingStart, moduleStride, imgStride, numFilterColors, numGroups, partialSum, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_rand<8,16,2,4,32, false, false>, cudaFuncCachePreferShared);
conv_weight_acts_mc_mf_rand<8,16,2,4,32,false, false><<<blocks, threads>>>(images.getDevData(), hidActs.getDevData(), targets.getDevData(), dColorIndices,
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize,
paddingStart, moduleStride, imgStride, numFilterColors, numGroups, partialSum, scaleTargets, scaleOutput);
}
}
} else {
if (numFilterColors % 8 == 0) {
if (checkCaseBounds) {
cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_rand<8,16,1,8,32, false, true>, cudaFuncCachePreferShared);
conv_weight_acts_mc_mf_rand<8,16,1,8,32,false, true><<<blocks, threads>>>(images.getDevData(), hidActs.getDevData(), targets.getDevData(), dColorIndices,
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize,
paddingStart, moduleStride, imgStride, numFilterColors, numGroups, partialSum, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_rand<8,16,1,8,32, false, false>, cudaFuncCachePreferShared);
conv_weight_acts_mc_mf_rand<8,16,1,8,32,false, false><<<blocks, threads>>>(images.getDevData(), hidActs.getDevData(), targets.getDevData(), dColorIndices,
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize,
paddingStart, moduleStride, imgStride, numFilterColors, numGroups, partialSum, scaleTargets, scaleOutput);
}
} else {
if (checkCaseBounds) {
cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_rand<8,16,1,4,32, false, true>, cudaFuncCachePreferShared);
conv_weight_acts_mc_mf_rand<8,16,1,4,32,false, true><<<blocks, threads>>>(images.getDevData(), hidActs.getDevData(), targets.getDevData(), dColorIndices,
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize,
paddingStart, moduleStride, imgStride, numFilterColors, numGroups, partialSum, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_rand<8,16,1,4,32, false, false>, cudaFuncCachePreferShared);
conv_weight_acts_mc_mf_rand<8,16,1,4,32,false, false><<<blocks, threads>>>(images.getDevData(), hidActs.getDevData(), targets.getDevData(), dColorIndices,
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize,
paddingStart, moduleStride, imgStride, numFilterColors, numGroups, partialSum, scaleTargets, scaleOutput);
}
}
}
} else {
if (numFiltersPerGroup % 64 == 0) {
if (numFilterColors % 8 == 0) {
if (checkCaseBounds) {
cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_rand<4,32,2,8,32, false, true>, cudaFuncCachePreferShared);
conv_weight_acts_mc_mf_rand<4,32,2,8,32,true, true><<<blocks, threads>>>(images.getDevData(), hidActs.getDevData(), targets.getDevData(), dColorIndices,
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize,
paddingStart, moduleStride, imgStride, numFilterColors, numGroups, partialSum, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_rand<4,32,2,8,32, false, false>, cudaFuncCachePreferShared);
conv_weight_acts_mc_mf_rand<4,32,2,8,32,true, false><<<blocks, threads>>>(images.getDevData(), hidActs.getDevData(), targets.getDevData(), dColorIndices,
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize,
paddingStart, moduleStride, imgStride, numFilterColors, numGroups, partialSum, scaleTargets, scaleOutput);
}
} else {
if (checkCaseBounds) {
cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_rand<4,32,2,4,32, false, true>, cudaFuncCachePreferShared);
conv_weight_acts_mc_mf_rand<4,32,2,4,32,true, true><<<blocks, threads>>>(images.getDevData(), hidActs.getDevData(), targets.getDevData(), dColorIndices,
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize,
paddingStart, moduleStride, imgStride, numFilterColors, numGroups, partialSum, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_rand<4,32,2,4,32, false, false>, cudaFuncCachePreferShared);
conv_weight_acts_mc_mf_rand<4,32,2,4,32,true, false><<<blocks, threads>>>(images.getDevData(), hidActs.getDevData(), targets.getDevData(), dColorIndices,
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize,
paddingStart, moduleStride, imgStride, numFilterColors, numGroups, partialSum, scaleTargets, scaleOutput);
}
}
} else if (numFiltersPerGroup % 32 == 0) {
if (numFilterColors % 8 == 0) {
if (checkCaseBounds) {
cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_rand<8,16,2,8,32, false, true>, cudaFuncCachePreferShared);
conv_weight_acts_mc_mf_rand<8,16,2,8,32,true, true><<<blocks, threads>>>(images.getDevData(), hidActs.getDevData(), targets.getDevData(), dColorIndices,
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize,
paddingStart, moduleStride, imgStride, numFilterColors, numGroups, partialSum, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_rand<8,16,2,8,32, false, false>, cudaFuncCachePreferShared);
conv_weight_acts_mc_mf_rand<8,16,2,8,32,true, false><<<blocks, threads>>>(images.getDevData(), hidActs.getDevData(), targets.getDevData(), dColorIndices,
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize,
paddingStart, moduleStride, imgStride, numFilterColors, numGroups, partialSum, scaleTargets, scaleOutput);
}
} else {
if (checkCaseBounds) {
cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_rand<8,16,2,4,32, false, true>, cudaFuncCachePreferShared);
conv_weight_acts_mc_mf_rand<8,16,2,4,32,true, true><<<blocks, threads>>>(images.getDevData(), hidActs.getDevData(), targets.getDevData(), dColorIndices,
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize,
paddingStart, moduleStride, imgStride, numFilterColors, numGroups, partialSum, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_rand<8,16,2,4,32, false, false>, cudaFuncCachePreferShared);
conv_weight_acts_mc_mf_rand<8,16,2,4,32,true, false><<<blocks, threads>>>(images.getDevData(), hidActs.getDevData(), targets.getDevData(), dColorIndices,
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize,
paddingStart, moduleStride, imgStride, numFilterColors, numGroups, partialSum, scaleTargets, scaleOutput);
}
}
} else {
if (numFilterColors % 8 == 0) {
if (checkCaseBounds) {
cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_rand<8,16,1,8,32, false, true>, cudaFuncCachePreferShared);
conv_weight_acts_mc_mf_rand<8,16,1,8,32,true, true><<<blocks, threads>>>(images.getDevData(), hidActs.getDevData(), targets.getDevData(), dColorIndices,
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize,
paddingStart, moduleStride, imgStride, numFilterColors, numGroups, partialSum, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_rand<8,16,1,8,32, false, false>, cudaFuncCachePreferShared);
conv_weight_acts_mc_mf_rand<8,16,1,8,32,true, false><<<blocks, threads>>>(images.getDevData(), hidActs.getDevData(), targets.getDevData(), dColorIndices,
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize,
paddingStart, moduleStride, imgStride, numFilterColors, numGroups, partialSum, scaleTargets, scaleOutput);
}
} else {
if (checkCaseBounds) {
cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_rand<8,16,1,4,32, false, true>, cudaFuncCachePreferShared);
conv_weight_acts_mc_mf_rand<8,16,1,4,32,true, true><<<blocks, threads>>>(images.getDevData(), hidActs.getDevData(), targets.getDevData(), dColorIndices,
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize,
paddingStart, moduleStride, imgStride, numFilterColors, numGroups, partialSum, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(conv_weight_acts_mc_mf_rand<8,16,2,4,32, false, false>, cudaFuncCachePreferShared);
conv_weight_acts_mc_mf_rand<8,16,1,4,32,true, false><<<blocks, threads>>>(images.getDevData(), hidActs.getDevData(), targets.getDevData(), dColorIndices,
numImages, numFilters, numModulesY, numModulesX, imgSizeY, imgSizeX, filterSize,
paddingStart, moduleStride, imgStride, numFilterColors, numGroups, partialSum, scaleTargets, scaleOutput);
}
}
}
}
cutilCheckMsg("weightActsSparse: kernel execution failed");
}
void convWeightActsSparse(NVMatrix& images, NVMatrix& hidActs, NVMatrix& targets, int* dColorIndices,
int imgSizeY, int numModulesY, int numModulesX, int filterSize, int paddingStart, int moduleStride,
int numImgColors, int numFilterColors, int numGroups) {
_weightActsSparse(images, hidActs, targets, dColorIndices, imgSizeY, numModulesY, numModulesX, filterSize, paddingStart,
moduleStride, numImgColors, numFilterColors, numGroups, 0, 1, 0);
}
void convWeightActsSparse(NVMatrix& images, NVMatrix& hidActs, NVMatrix& targets, int* dColorIndices,
int imgSizeY, int numModulesY, int numModulesX, int filterSize, int paddingStart, int moduleStride, int numImgColors, int numFilterColors,
int numGroups, int partialSum, float scaleTargets, float scaleOutput) {
_weightActsSparse(images, hidActs, targets, dColorIndices, imgSizeY, numModulesY, numModulesX, filterSize, paddingStart,
moduleStride, numImgColors, numFilterColors, numGroups, partialSum, scaleTargets, scaleOutput);
}
void localWeightActsSparse(NVMatrix& images, NVMatrix& hidActs, NVMatrix& targets, int* dColorIndices,
int imgSizeY, int numModulesY, int numModulesX, int filterSize, int paddingStart, int moduleStride,
int numImgColors, int numFilterColors, int numGroups) {
_weightActsSparse(images, hidActs, targets, dColorIndices, imgSizeY, numModulesY, numModulesX, filterSize, paddingStart,
moduleStride, numImgColors, numFilterColors, numGroups, 1, 1, 0);
}
void localWeightActsSparse(NVMatrix& images, NVMatrix& hidActs, NVMatrix& targets, int* dColorIndices,
int imgSizeY, int numModulesY, int numModulesX, int filterSize, int paddingStart, int moduleStride, int numImgColors, int numFilterColors,
int numGroups, float scaleTargets, float scaleOutput) {
_weightActsSparse(images, hidActs, targets, dColorIndices, imgSizeY, numModulesY, numModulesX, filterSize, paddingStart,
moduleStride, numImgColors, numFilterColors, numGroups, 1, scaleTargets, scaleOutput);
}
|
6c4360047493b07dfedfbb8aab7c4ed0b75149eb.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "SteerForPursueCUDA.cuh"
#include "../AgentGroupData.cuh"
using namespace OpenSteer;
// Kernel function prototype.
extern "C"
{
__global__ void SteerForPursueCUDAKernel( float4 const* pdPosition,
float4 const* pdDirection,
float const* pdSpeed,
float3 const targetPosition,
float3 const targetForward,
float3 const targetVelocity,
float const targetSpeed,
float4 * pdSteering,
size_t const numAgents,
float const maxPredictionTime,
float const fWeight,
uint * pdAppliedKernels,
uint const doNotApplyWith
);
}
SteerForPursueCUDA::SteerForPursueCUDA( AgentGroup * pAgentGroup,
float3 const& targetPosition,
float3 const& targetDirection,
float const& targetSpeed,
const float fMaxPredictionTime,
float const fWeight,
uint const doNotApplyWith
)
: AbstractCUDAKernel( pAgentGroup, fWeight, doNotApplyWith ),
m_targetPosition( targetPosition ),
m_targetDirection( targetDirection ),
m_targetSpeed( targetSpeed ),
m_fMaxPredictionTime( fMaxPredictionTime )
{
m_targetVelocity = float3_scalar_multiply( m_targetDirection, m_targetSpeed );
}
void SteerForPursueCUDA::init(void)
{ }
void SteerForPursueCUDA::run(void)
{
dim3 grid = gridDim();
dim3 block = blockDim();
// Gether the required device pointers.
float4 * pdSteering = m_pAgentGroupData->pdSteering();
float4 const* pdPosition = m_pAgentGroupData->pdPosition();
float4 const* pdDirection = m_pAgentGroupData->pdDirection();
float const* pdSpeed = m_pAgentGroupData->pdSpeed();
uint * pdAppliedKernels = m_pAgentGroupData->pdAppliedKernels();
uint const& numAgents = getNumAgents();
hipLaunchKernelGGL(( SteerForPursueCUDAKernel), dim3(grid), dim3(block) , 0, 0, pdPosition,
pdDirection,
pdSpeed,
m_targetPosition,
m_targetDirection,
m_targetVelocity,
m_targetSpeed,
pdSteering,
numAgents,
m_fMaxPredictionTime,
m_fWeight,
pdAppliedKernels,
m_doNotApplyWith
);
cutilCheckMsg( "SteerForPursueCUDAKernel failed." );
//CUDA_SAFE_CALL( hipDeviceSynchronize() );
}
void SteerForPursueCUDA::close(void)
{
// Device data has changed. Instruct the AgentGroup it needs to synchronize the host.
m_pAgentGroup->SetSyncHost();
}
|
6c4360047493b07dfedfbb8aab7c4ed0b75149eb.cu
|
#include "SteerForPursueCUDA.cuh"
#include "../AgentGroupData.cuh"
using namespace OpenSteer;
// Kernel function prototype.
extern "C"
{
__global__ void SteerForPursueCUDAKernel( float4 const* pdPosition,
float4 const* pdDirection,
float const* pdSpeed,
float3 const targetPosition,
float3 const targetForward,
float3 const targetVelocity,
float const targetSpeed,
float4 * pdSteering,
size_t const numAgents,
float const maxPredictionTime,
float const fWeight,
uint * pdAppliedKernels,
uint const doNotApplyWith
);
}
SteerForPursueCUDA::SteerForPursueCUDA( AgentGroup * pAgentGroup,
float3 const& targetPosition,
float3 const& targetDirection,
float const& targetSpeed,
const float fMaxPredictionTime,
float const fWeight,
uint const doNotApplyWith
)
: AbstractCUDAKernel( pAgentGroup, fWeight, doNotApplyWith ),
m_targetPosition( targetPosition ),
m_targetDirection( targetDirection ),
m_targetSpeed( targetSpeed ),
m_fMaxPredictionTime( fMaxPredictionTime )
{
m_targetVelocity = float3_scalar_multiply( m_targetDirection, m_targetSpeed );
}
void SteerForPursueCUDA::init(void)
{ }
void SteerForPursueCUDA::run(void)
{
dim3 grid = gridDim();
dim3 block = blockDim();
// Gether the required device pointers.
float4 * pdSteering = m_pAgentGroupData->pdSteering();
float4 const* pdPosition = m_pAgentGroupData->pdPosition();
float4 const* pdDirection = m_pAgentGroupData->pdDirection();
float const* pdSpeed = m_pAgentGroupData->pdSpeed();
uint * pdAppliedKernels = m_pAgentGroupData->pdAppliedKernels();
uint const& numAgents = getNumAgents();
SteerForPursueCUDAKernel<<< grid, block >>>( pdPosition,
pdDirection,
pdSpeed,
m_targetPosition,
m_targetDirection,
m_targetVelocity,
m_targetSpeed,
pdSteering,
numAgents,
m_fMaxPredictionTime,
m_fWeight,
pdAppliedKernels,
m_doNotApplyWith
);
cutilCheckMsg( "SteerForPursueCUDAKernel failed." );
//CUDA_SAFE_CALL( cudaThreadSynchronize() );
}
void SteerForPursueCUDA::close(void)
{
// Device data has changed. Instruct the AgentGroup it needs to synchronize the host.
m_pAgentGroup->SetSyncHost();
}
|
2035434d3fc288f03292addf4073079b170379cb.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void shmem_reduce_kernel(float * d_out, const float * d_in)
{
// sdata is allocated in the kernel call: 3rd arg to <<<b, t, shmem>>>
extern __shared__ float sdata[];
int myId = threadIdx.x + blockDim.x * blockIdx.x;
int tid = threadIdx.x;
// load shared mem from global mem
sdata[tid] = d_in[myId];
__syncthreads(); // make sure entire block is loaded!
// do reduction in shared mem
for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1)
{
if (tid < s)
{
sdata[tid] += sdata[tid + s];
}
__syncthreads(); // make sure all adds at one stage are done!
}
// only thread 0 writes result for this block back to global mem
if (tid == 0)
{
d_out[blockIdx.x] = sdata[0];
}
}
|
2035434d3fc288f03292addf4073079b170379cb.cu
|
#include "includes.h"
__global__ void shmem_reduce_kernel(float * d_out, const float * d_in)
{
// sdata is allocated in the kernel call: 3rd arg to <<<b, t, shmem>>>
extern __shared__ float sdata[];
int myId = threadIdx.x + blockDim.x * blockIdx.x;
int tid = threadIdx.x;
// load shared mem from global mem
sdata[tid] = d_in[myId];
__syncthreads(); // make sure entire block is loaded!
// do reduction in shared mem
for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1)
{
if (tid < s)
{
sdata[tid] += sdata[tid + s];
}
__syncthreads(); // make sure all adds at one stage are done!
}
// only thread 0 writes result for this block back to global mem
if (tid == 0)
{
d_out[blockIdx.x] = sdata[0];
}
}
|
6381b5f562b9b5ca603c2aa5acfb15f36778bac9.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*---------------------------------------------------------------------------------------------------------------------------------------------*/
//Sriram Madhivanan
//GPU kernels
/*---------------------------------------------------------------------------------------------------------------------------------------------*/
#include "parallelHeader.h"
/*---------------------------------------------------------------------------------------------------------------------------------------------*/
// single run and no overflow
/*---------------------------------------------------------------------------------------------------------------------------------------------*/
__global__ void compress(unsigned char *d_inputFileData, unsigned int *d_compressedDataOffset, struct huffmanDictionary *d_huffmanDictionary, unsigned char *d_byteCompressedData, unsigned int d_inputFileLength, unsigned int constMemoryFlag){
__shared__ struct huffmanDictionary table;
memcpy(&table, d_huffmanDictionary, sizeof(struct huffmanDictionary));
unsigned int inputFileLength = d_inputFileLength;
unsigned int i, j, k;
unsigned int pos = blockIdx.x * blockDim.x + threadIdx.x;
// when shared memory is sufficient
if(constMemoryFlag == 0){
for(i = pos; i < inputFileLength; i += blockDim.x){
for(k = 0; k < table.bitSequenceLength[d_inputFileData[i]]; k++){
d_byteCompressedData[d_compressedDataOffset[i]+k] = table.bitSequence[d_inputFileData[i]][k];
}
}
}
// use constant memory and shared memory
else{
for(i = pos; i < inputFileLength; i += blockDim.x){
for(k = 0; k < table.bitSequenceLength[d_inputFileData[i]]; k++){
if(k < 191)
d_byteCompressedData[d_compressedDataOffset[i]+k] = table.bitSequence[d_inputFileData[i]][k];
else
d_byteCompressedData[d_compressedDataOffset[i]+k] = d_bitSequenceConstMemory[d_inputFileData[i]][k];
}
}
}
__syncthreads();
for(i = pos * 8; i < d_compressedDataOffset[inputFileLength]; i += blockDim.x * 8){
for(j = 0; j < 8; j++){
if(d_byteCompressedData[i + j] == 0){
d_inputFileData[i / 8] = d_inputFileData[i / 8] << 1;
}
else{
d_inputFileData[i / 8] = (d_inputFileData[i / 8] << 1) | 1;
}
}
}
}
/*---------------------------------------------------------------------------------------------------------------------------------------------*/
// single run with overflow
/*---------------------------------------------------------------------------------------------------------------------------------------------*/
__global__ void compress(unsigned char *d_inputFileData, unsigned int *d_compressedDataOffset, struct huffmanDictionary *d_huffmanDictionary, unsigned char *d_byteCompressedData, unsigned char *d_temp_overflow, unsigned int d_inputFileLength, unsigned int constMemoryFlag, unsigned int overflowPosition){
__shared__ struct huffmanDictionary table;
memcpy(&table, d_huffmanDictionary, sizeof(struct huffmanDictionary));
unsigned int inputFileLength = d_inputFileLength;
unsigned int i, j, k;
unsigned int pos = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int offset_overflow;
// when shared memory is sufficient
if(constMemoryFlag == 0){
for(i = pos; i < overflowPosition; i += blockDim.x){
for(k = 0; k < table.bitSequenceLength[d_inputFileData[i]]; k++){
d_byteCompressedData[d_compressedDataOffset[i]+k] = table.bitSequence[d_inputFileData[i]][k];
}
}
for(i = overflowPosition + pos; i < inputFileLength - 1; i += blockDim.x){
for(k = 0; k < table.bitSequenceLength[d_inputFileData[i + 1]]; k++){
d_temp_overflow[d_compressedDataOffset[i + 1] + k] = table.bitSequence[d_inputFileData[i + 1]][k];
}
}
if(pos == 0){
memcpy(&d_temp_overflow[d_compressedDataOffset[(overflowPosition + 1)] - table.bitSequenceLength[d_inputFileData[overflowPosition]]],
&table.bitSequence[d_inputFileData[overflowPosition]], table.bitSequenceLength[d_inputFileData[overflowPosition]]);
}
}
// use constant memory and shared memory
else{
for(i = pos; i < overflowPosition; i += blockDim.x){
for(k = 0; k < table.bitSequenceLength[d_inputFileData[i]]; k++){
if(k < 191)
d_byteCompressedData[d_compressedDataOffset[i]+k] = table.bitSequence[d_inputFileData[i]][k];
else
d_byteCompressedData[d_compressedDataOffset[i]+k] = d_bitSequenceConstMemory[d_inputFileData[i]][k];
}
}
for(i = overflowPosition + pos; i < inputFileLength - 1; i += blockDim.x){
for(k = 0; k < table.bitSequenceLength[d_inputFileData[i + 1]]; k++){
if(k < 191)
d_temp_overflow[d_compressedDataOffset[i + 1]+k] = table.bitSequence[d_inputFileData[i + 1]][k];
else
d_temp_overflow[d_compressedDataOffset[i + 1]+k] = d_bitSequenceConstMemory[d_inputFileData[i + 1]][k];
}
}
if(pos == 0){
memcpy(&d_temp_overflow[d_compressedDataOffset[(overflowPosition + 1)] - table.bitSequenceLength[d_inputFileData[overflowPosition]]],
&d_bitSequenceConstMemory[d_inputFileData[overflowPosition]], table.bitSequenceLength[d_inputFileData[overflowPosition]]);
}
}
__syncthreads();
for(i = pos * 8; i < d_compressedDataOffset[overflowPosition]; i += blockDim.x * 8){
for(j = 0; j < 8; j++){
if(d_byteCompressedData[i + j] == 0){
d_inputFileData[i / 8] = d_inputFileData[i / 8] << 1;
}
else{
d_inputFileData[i / 8] = (d_inputFileData[i / 8] << 1) | 1;
}
}
}
offset_overflow = d_compressedDataOffset[overflowPosition] / 8;
for(i = pos * 8; i < d_compressedDataOffset[inputFileLength]; i += blockDim.x * 8){
for(j = 0; j < 8; j++){
if(d_temp_overflow[i + j] == 0){
d_inputFileData[(i / 8) + offset_overflow] = d_inputFileData[(i / 8) + offset_overflow] << 1;
}
else{
d_inputFileData[(i / 8) + offset_overflow] = (d_inputFileData[(i / 8) + offset_overflow] << 1) | 1;
}
}
}
}
/*---------------------------------------------------------------------------------------------------------------------------------------------*/
// multiple run and no overflow
/*---------------------------------------------------------------------------------------------------------------------------------------------*/
__global__ void compress(unsigned char *d_inputFileData, unsigned int *d_compressedDataOffset, struct huffmanDictionary *d_huffmanDictionary, unsigned char *d_byteCompressedData, unsigned int d_lowerPosition, unsigned int constMemoryFlag, unsigned int d_upperPosition){
__shared__ struct huffmanDictionary table;
memcpy(&table, d_huffmanDictionary, sizeof(struct huffmanDictionary));
//unsigned int inputFileLength = d_inputFileLength;
unsigned int i, j, k;
unsigned int pos = blockIdx.x * blockDim.x + threadIdx.x;
// when shared memory is sufficient
if(constMemoryFlag == 0){
for(i = pos + d_lowerPosition; i < d_upperPosition; i += blockDim.x){
for(k = 0; k < table.bitSequenceLength[d_inputFileData[i]]; k++){
d_byteCompressedData[d_compressedDataOffset[i]+k] = table.bitSequence[d_inputFileData[i]][k];
}
}
if(pos == 0 && d_lowerPosition != 0){
memcpy(&d_byteCompressedData[d_compressedDataOffset[(d_lowerPosition)] - table.bitSequenceLength[d_inputFileData[d_lowerPosition - 1]]],
&table.bitSequence[d_inputFileData[d_lowerPosition - 1]], table.bitSequenceLength[d_inputFileData[d_lowerPosition - 1]]);
}
}
// use constant memory and shared memory
else{
for(i = pos + d_lowerPosition; i < d_upperPosition; i += blockDim.x){
for(k = 0; k < table.bitSequenceLength[d_inputFileData[i]]; k++){
if(k < 191)
d_byteCompressedData[d_compressedDataOffset[i]+k] = table.bitSequence[d_inputFileData[i]][k];
else
d_byteCompressedData[d_compressedDataOffset[i]+k] = d_bitSequenceConstMemory[d_inputFileData[i]][k];
}
}
if(pos == 0 && d_lowerPosition != 0){
memcpy(&d_byteCompressedData[d_compressedDataOffset[(d_lowerPosition)] - table.bitSequenceLength[d_inputFileData[d_lowerPosition - 1]]],
&d_bitSequenceConstMemory[d_inputFileData[d_lowerPosition - 1]], table.bitSequenceLength[d_inputFileData[d_lowerPosition - 1]]);
}
}
__syncthreads();
for(i = pos * 8; i < d_compressedDataOffset[d_upperPosition]; i += blockDim.x * 8){
for(j = 0; j < 8; j++){
if(d_byteCompressedData[i + j] == 0){
d_inputFileData[(i / 8)] = d_inputFileData[(i / 8)] << 1;
}
else{
d_inputFileData[(i / 8)] = (d_inputFileData[i / 8] << 1) | 1;
}
}
}
}
/*---------------------------------------------------------------------------------------------------------------------------------------------*/
// multiple run and with overflow
/*---------------------------------------------------------------------------------------------------------------------------------------------*/
__global__ void compress(unsigned char *d_inputFileData, unsigned int *d_compressedDataOffset, struct huffmanDictionary *d_huffmanDictionary, unsigned char *d_byteCompressedData, unsigned char *d_temp_overflow, unsigned int d_lowerPosition, unsigned int constMemoryFlag, unsigned int d_upperPosition, unsigned int overflowPosition){
__shared__ struct huffmanDictionary table;
memcpy(&table, d_huffmanDictionary, sizeof(struct huffmanDictionary));
//unsigned int inputFileLength = d_inputFileLength;
unsigned int i, j, k;
unsigned int pos = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int offset_overflow;
// when shared memory is sufficient
if(constMemoryFlag == 0){
for(i = pos + d_lowerPosition; i < overflowPosition; i += blockDim.x){
for(k = 0; k < table.bitSequenceLength[d_inputFileData[i]]; k++){
d_byteCompressedData[d_compressedDataOffset[i]+k] = table.bitSequence[d_inputFileData[i]][k];
}
}
for(i = overflowPosition + pos; i < d_upperPosition - 1; i += blockDim.x){
for(k = 0; k < table.bitSequenceLength[d_inputFileData[i + 1]]; k++){
d_temp_overflow[d_compressedDataOffset[i + 1] + k] = table.bitSequence[d_inputFileData[i + 1]][k];
}
}
if(pos == 0){
memcpy(&d_temp_overflow[d_compressedDataOffset[(overflowPosition + 1)] - table.bitSequenceLength[d_inputFileData[overflowPosition]]],
&table.bitSequence[d_inputFileData[overflowPosition]], table.bitSequenceLength[d_inputFileData[overflowPosition]]);
}
if(pos == 0 && d_lowerPosition != 0){
memcpy(&d_byteCompressedData[d_compressedDataOffset[(d_lowerPosition)] - table.bitSequenceLength[d_inputFileData[d_lowerPosition - 1]]],
&table.bitSequence[d_inputFileData[d_lowerPosition - 1]], table.bitSequenceLength[d_inputFileData[d_lowerPosition - 1]]);
}
}
// use constant memory and shared memory
else{
for(i = pos + d_lowerPosition; i < d_upperPosition; i += blockDim.x){
for(k = 0; k < table.bitSequenceLength[d_inputFileData[i]]; k++){
if(k < 191)
d_byteCompressedData[d_compressedDataOffset[i]+k] = table.bitSequence[d_inputFileData[i]][k];
else
d_byteCompressedData[d_compressedDataOffset[i]+k] = d_bitSequenceConstMemory[d_inputFileData[i]][k];
}
}
for(i = overflowPosition + pos; i < d_upperPosition - 1; i += blockDim.x){
for(k = 0; k < table.bitSequenceLength[d_inputFileData[i + 1]]; k++){
if(k < 191)
d_byteCompressedData[d_compressedDataOffset[i]+k] = table.bitSequence[d_inputFileData[i]][k];
else
d_byteCompressedData[d_compressedDataOffset[i]+k] = d_bitSequenceConstMemory[d_inputFileData[i]][k];
}
}
if(pos == 0){
memcpy(&d_temp_overflow[d_compressedDataOffset[(overflowPosition + 1)] - table.bitSequenceLength[d_inputFileData[overflowPosition]]],
&d_bitSequenceConstMemory[d_inputFileData[overflowPosition]], table.bitSequenceLength[d_inputFileData[overflowPosition]]);
}
if(pos == 0 && d_lowerPosition != 0){
memcpy(&d_byteCompressedData[d_compressedDataOffset[(d_lowerPosition)] - table.bitSequenceLength[d_inputFileData[d_lowerPosition - 1]]],
&d_bitSequenceConstMemory[d_inputFileData[d_lowerPosition - 1]], table.bitSequenceLength[d_inputFileData[d_lowerPosition - 1]]);
}
}
__syncthreads();
for(i = pos * 8; i < d_compressedDataOffset[overflowPosition]; i += blockDim.x * 8){
for(j = 0; j < 8; j++){
if(d_byteCompressedData[i + j] == 0){
d_inputFileData[(i / 8)] = d_inputFileData[(i / 8)] << 1;
}
else{
d_inputFileData[(i / 8)] = (d_inputFileData[i / 8] << 1) | 1;
}
}
}
offset_overflow = d_compressedDataOffset[overflowPosition] / 8;
for(i = pos * 8; i < d_compressedDataOffset[d_upperPosition]; i += blockDim.x * 8){
for(j = 0; j < 8; j++){
if(d_temp_overflow[i + j] == 0){
d_inputFileData[(i / 8) + offset_overflow] = d_inputFileData[(i / 8) + offset_overflow] << 1;
}
else{
d_inputFileData[(i / 8) + offset_overflow] = (d_inputFileData[(i / 8) + offset_overflow] << 1) | 1;
}
}
}
}
/*---------------------------------------------------------------------------------------------------------------------------------------------*/
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/*---------------------------------------------------------------------------------------------------------------------------------------------*/
|
6381b5f562b9b5ca603c2aa5acfb15f36778bac9.cu
|
/*---------------------------------------------------------------------------------------------------------------------------------------------*/
//Sriram Madhivanan
//GPU kernels
/*---------------------------------------------------------------------------------------------------------------------------------------------*/
#include "parallelHeader.h"
/*---------------------------------------------------------------------------------------------------------------------------------------------*/
// single run and no overflow
/*---------------------------------------------------------------------------------------------------------------------------------------------*/
__global__ void compress(unsigned char *d_inputFileData, unsigned int *d_compressedDataOffset, struct huffmanDictionary *d_huffmanDictionary, unsigned char *d_byteCompressedData, unsigned int d_inputFileLength, unsigned int constMemoryFlag){
__shared__ struct huffmanDictionary table;
memcpy(&table, d_huffmanDictionary, sizeof(struct huffmanDictionary));
unsigned int inputFileLength = d_inputFileLength;
unsigned int i, j, k;
unsigned int pos = blockIdx.x * blockDim.x + threadIdx.x;
// when shared memory is sufficient
if(constMemoryFlag == 0){
for(i = pos; i < inputFileLength; i += blockDim.x){
for(k = 0; k < table.bitSequenceLength[d_inputFileData[i]]; k++){
d_byteCompressedData[d_compressedDataOffset[i]+k] = table.bitSequence[d_inputFileData[i]][k];
}
}
}
// use constant memory and shared memory
else{
for(i = pos; i < inputFileLength; i += blockDim.x){
for(k = 0; k < table.bitSequenceLength[d_inputFileData[i]]; k++){
if(k < 191)
d_byteCompressedData[d_compressedDataOffset[i]+k] = table.bitSequence[d_inputFileData[i]][k];
else
d_byteCompressedData[d_compressedDataOffset[i]+k] = d_bitSequenceConstMemory[d_inputFileData[i]][k];
}
}
}
__syncthreads();
for(i = pos * 8; i < d_compressedDataOffset[inputFileLength]; i += blockDim.x * 8){
for(j = 0; j < 8; j++){
if(d_byteCompressedData[i + j] == 0){
d_inputFileData[i / 8] = d_inputFileData[i / 8] << 1;
}
else{
d_inputFileData[i / 8] = (d_inputFileData[i / 8] << 1) | 1;
}
}
}
}
/*---------------------------------------------------------------------------------------------------------------------------------------------*/
// single run with overflow
/*---------------------------------------------------------------------------------------------------------------------------------------------*/
__global__ void compress(unsigned char *d_inputFileData, unsigned int *d_compressedDataOffset, struct huffmanDictionary *d_huffmanDictionary, unsigned char *d_byteCompressedData, unsigned char *d_temp_overflow, unsigned int d_inputFileLength, unsigned int constMemoryFlag, unsigned int overflowPosition){
__shared__ struct huffmanDictionary table;
memcpy(&table, d_huffmanDictionary, sizeof(struct huffmanDictionary));
unsigned int inputFileLength = d_inputFileLength;
unsigned int i, j, k;
unsigned int pos = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int offset_overflow;
// when shared memory is sufficient
if(constMemoryFlag == 0){
for(i = pos; i < overflowPosition; i += blockDim.x){
for(k = 0; k < table.bitSequenceLength[d_inputFileData[i]]; k++){
d_byteCompressedData[d_compressedDataOffset[i]+k] = table.bitSequence[d_inputFileData[i]][k];
}
}
for(i = overflowPosition + pos; i < inputFileLength - 1; i += blockDim.x){
for(k = 0; k < table.bitSequenceLength[d_inputFileData[i + 1]]; k++){
d_temp_overflow[d_compressedDataOffset[i + 1] + k] = table.bitSequence[d_inputFileData[i + 1]][k];
}
}
if(pos == 0){
memcpy(&d_temp_overflow[d_compressedDataOffset[(overflowPosition + 1)] - table.bitSequenceLength[d_inputFileData[overflowPosition]]],
&table.bitSequence[d_inputFileData[overflowPosition]], table.bitSequenceLength[d_inputFileData[overflowPosition]]);
}
}
// use constant memory and shared memory
else{
for(i = pos; i < overflowPosition; i += blockDim.x){
for(k = 0; k < table.bitSequenceLength[d_inputFileData[i]]; k++){
if(k < 191)
d_byteCompressedData[d_compressedDataOffset[i]+k] = table.bitSequence[d_inputFileData[i]][k];
else
d_byteCompressedData[d_compressedDataOffset[i]+k] = d_bitSequenceConstMemory[d_inputFileData[i]][k];
}
}
for(i = overflowPosition + pos; i < inputFileLength - 1; i += blockDim.x){
for(k = 0; k < table.bitSequenceLength[d_inputFileData[i + 1]]; k++){
if(k < 191)
d_temp_overflow[d_compressedDataOffset[i + 1]+k] = table.bitSequence[d_inputFileData[i + 1]][k];
else
d_temp_overflow[d_compressedDataOffset[i + 1]+k] = d_bitSequenceConstMemory[d_inputFileData[i + 1]][k];
}
}
if(pos == 0){
memcpy(&d_temp_overflow[d_compressedDataOffset[(overflowPosition + 1)] - table.bitSequenceLength[d_inputFileData[overflowPosition]]],
&d_bitSequenceConstMemory[d_inputFileData[overflowPosition]], table.bitSequenceLength[d_inputFileData[overflowPosition]]);
}
}
__syncthreads();
for(i = pos * 8; i < d_compressedDataOffset[overflowPosition]; i += blockDim.x * 8){
for(j = 0; j < 8; j++){
if(d_byteCompressedData[i + j] == 0){
d_inputFileData[i / 8] = d_inputFileData[i / 8] << 1;
}
else{
d_inputFileData[i / 8] = (d_inputFileData[i / 8] << 1) | 1;
}
}
}
offset_overflow = d_compressedDataOffset[overflowPosition] / 8;
for(i = pos * 8; i < d_compressedDataOffset[inputFileLength]; i += blockDim.x * 8){
for(j = 0; j < 8; j++){
if(d_temp_overflow[i + j] == 0){
d_inputFileData[(i / 8) + offset_overflow] = d_inputFileData[(i / 8) + offset_overflow] << 1;
}
else{
d_inputFileData[(i / 8) + offset_overflow] = (d_inputFileData[(i / 8) + offset_overflow] << 1) | 1;
}
}
}
}
/*---------------------------------------------------------------------------------------------------------------------------------------------*/
// multiple run and no overflow
/*---------------------------------------------------------------------------------------------------------------------------------------------*/
__global__ void compress(unsigned char *d_inputFileData, unsigned int *d_compressedDataOffset, struct huffmanDictionary *d_huffmanDictionary, unsigned char *d_byteCompressedData, unsigned int d_lowerPosition, unsigned int constMemoryFlag, unsigned int d_upperPosition){
__shared__ struct huffmanDictionary table;
memcpy(&table, d_huffmanDictionary, sizeof(struct huffmanDictionary));
//unsigned int inputFileLength = d_inputFileLength;
unsigned int i, j, k;
unsigned int pos = blockIdx.x * blockDim.x + threadIdx.x;
// when shared memory is sufficient
if(constMemoryFlag == 0){
for(i = pos + d_lowerPosition; i < d_upperPosition; i += blockDim.x){
for(k = 0; k < table.bitSequenceLength[d_inputFileData[i]]; k++){
d_byteCompressedData[d_compressedDataOffset[i]+k] = table.bitSequence[d_inputFileData[i]][k];
}
}
if(pos == 0 && d_lowerPosition != 0){
memcpy(&d_byteCompressedData[d_compressedDataOffset[(d_lowerPosition)] - table.bitSequenceLength[d_inputFileData[d_lowerPosition - 1]]],
&table.bitSequence[d_inputFileData[d_lowerPosition - 1]], table.bitSequenceLength[d_inputFileData[d_lowerPosition - 1]]);
}
}
// use constant memory and shared memory
else{
for(i = pos + d_lowerPosition; i < d_upperPosition; i += blockDim.x){
for(k = 0; k < table.bitSequenceLength[d_inputFileData[i]]; k++){
if(k < 191)
d_byteCompressedData[d_compressedDataOffset[i]+k] = table.bitSequence[d_inputFileData[i]][k];
else
d_byteCompressedData[d_compressedDataOffset[i]+k] = d_bitSequenceConstMemory[d_inputFileData[i]][k];
}
}
if(pos == 0 && d_lowerPosition != 0){
memcpy(&d_byteCompressedData[d_compressedDataOffset[(d_lowerPosition)] - table.bitSequenceLength[d_inputFileData[d_lowerPosition - 1]]],
&d_bitSequenceConstMemory[d_inputFileData[d_lowerPosition - 1]], table.bitSequenceLength[d_inputFileData[d_lowerPosition - 1]]);
}
}
__syncthreads();
for(i = pos * 8; i < d_compressedDataOffset[d_upperPosition]; i += blockDim.x * 8){
for(j = 0; j < 8; j++){
if(d_byteCompressedData[i + j] == 0){
d_inputFileData[(i / 8)] = d_inputFileData[(i / 8)] << 1;
}
else{
d_inputFileData[(i / 8)] = (d_inputFileData[i / 8] << 1) | 1;
}
}
}
}
/*---------------------------------------------------------------------------------------------------------------------------------------------*/
// multiple run and with overflow
/*---------------------------------------------------------------------------------------------------------------------------------------------*/
__global__ void compress(unsigned char *d_inputFileData, unsigned int *d_compressedDataOffset, struct huffmanDictionary *d_huffmanDictionary, unsigned char *d_byteCompressedData, unsigned char *d_temp_overflow, unsigned int d_lowerPosition, unsigned int constMemoryFlag, unsigned int d_upperPosition, unsigned int overflowPosition){
__shared__ struct huffmanDictionary table;
memcpy(&table, d_huffmanDictionary, sizeof(struct huffmanDictionary));
//unsigned int inputFileLength = d_inputFileLength;
unsigned int i, j, k;
unsigned int pos = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int offset_overflow;
// when shared memory is sufficient
if(constMemoryFlag == 0){
for(i = pos + d_lowerPosition; i < overflowPosition; i += blockDim.x){
for(k = 0; k < table.bitSequenceLength[d_inputFileData[i]]; k++){
d_byteCompressedData[d_compressedDataOffset[i]+k] = table.bitSequence[d_inputFileData[i]][k];
}
}
for(i = overflowPosition + pos; i < d_upperPosition - 1; i += blockDim.x){
for(k = 0; k < table.bitSequenceLength[d_inputFileData[i + 1]]; k++){
d_temp_overflow[d_compressedDataOffset[i + 1] + k] = table.bitSequence[d_inputFileData[i + 1]][k];
}
}
if(pos == 0){
memcpy(&d_temp_overflow[d_compressedDataOffset[(overflowPosition + 1)] - table.bitSequenceLength[d_inputFileData[overflowPosition]]],
&table.bitSequence[d_inputFileData[overflowPosition]], table.bitSequenceLength[d_inputFileData[overflowPosition]]);
}
if(pos == 0 && d_lowerPosition != 0){
memcpy(&d_byteCompressedData[d_compressedDataOffset[(d_lowerPosition)] - table.bitSequenceLength[d_inputFileData[d_lowerPosition - 1]]],
&table.bitSequence[d_inputFileData[d_lowerPosition - 1]], table.bitSequenceLength[d_inputFileData[d_lowerPosition - 1]]);
}
}
// use constant memory and shared memory
else{
for(i = pos + d_lowerPosition; i < d_upperPosition; i += blockDim.x){
for(k = 0; k < table.bitSequenceLength[d_inputFileData[i]]; k++){
if(k < 191)
d_byteCompressedData[d_compressedDataOffset[i]+k] = table.bitSequence[d_inputFileData[i]][k];
else
d_byteCompressedData[d_compressedDataOffset[i]+k] = d_bitSequenceConstMemory[d_inputFileData[i]][k];
}
}
for(i = overflowPosition + pos; i < d_upperPosition - 1; i += blockDim.x){
for(k = 0; k < table.bitSequenceLength[d_inputFileData[i + 1]]; k++){
if(k < 191)
d_byteCompressedData[d_compressedDataOffset[i]+k] = table.bitSequence[d_inputFileData[i]][k];
else
d_byteCompressedData[d_compressedDataOffset[i]+k] = d_bitSequenceConstMemory[d_inputFileData[i]][k];
}
}
if(pos == 0){
memcpy(&d_temp_overflow[d_compressedDataOffset[(overflowPosition + 1)] - table.bitSequenceLength[d_inputFileData[overflowPosition]]],
&d_bitSequenceConstMemory[d_inputFileData[overflowPosition]], table.bitSequenceLength[d_inputFileData[overflowPosition]]);
}
if(pos == 0 && d_lowerPosition != 0){
memcpy(&d_byteCompressedData[d_compressedDataOffset[(d_lowerPosition)] - table.bitSequenceLength[d_inputFileData[d_lowerPosition - 1]]],
&d_bitSequenceConstMemory[d_inputFileData[d_lowerPosition - 1]], table.bitSequenceLength[d_inputFileData[d_lowerPosition - 1]]);
}
}
__syncthreads();
for(i = pos * 8; i < d_compressedDataOffset[overflowPosition]; i += blockDim.x * 8){
for(j = 0; j < 8; j++){
if(d_byteCompressedData[i + j] == 0){
d_inputFileData[(i / 8)] = d_inputFileData[(i / 8)] << 1;
}
else{
d_inputFileData[(i / 8)] = (d_inputFileData[i / 8] << 1) | 1;
}
}
}
offset_overflow = d_compressedDataOffset[overflowPosition] / 8;
for(i = pos * 8; i < d_compressedDataOffset[d_upperPosition]; i += blockDim.x * 8){
for(j = 0; j < 8; j++){
if(d_temp_overflow[i + j] == 0){
d_inputFileData[(i / 8) + offset_overflow] = d_inputFileData[(i / 8) + offset_overflow] << 1;
}
else{
d_inputFileData[(i / 8) + offset_overflow] = (d_inputFileData[(i / 8) + offset_overflow] << 1) | 1;
}
}
}
}
/*---------------------------------------------------------------------------------------------------------------------------------------------*/
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/*---------------------------------------------------------------------------------------------------------------------------------------------*/
|
fb47a18235a7d393c41bf4f7c6a6749378b17c5a.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include <sys/time.h>
double get_walltime()
{
struct timeval tp;
gettimeofday(&tp, NULL);
return (double) (tp.tv_sec + tp.tv_usec*1e-6);
}
void Write(double* buffer, int np, char* output){
FILE *f;
f=fopen(output,"w");
for(int i=0;i<np;++i){
fprintf(f,"%f \n",buffer[i]);
}
fclose(f);
}
void force_repulsion(int np, const double *pos, double L, double krepulsion,
double *forces)
{
int i, j;
double posi[4];
double rvec[4];
double s2, s, f;
// initialize forces to zero
for (i=0; i<3*np; i++)
forces[i] = 0.;
// loop over all pairs
for (i=0; i<np; i++)
{
posi[0] = pos[3*i ];
posi[1] = pos[3*i+1];
posi[2] = pos[3*i+2];
for (j=i+1; j<np; j++)
{
// compute minimum image difference
rvec[0] = remainder(posi[0] - pos[3*j ], L);
rvec[1] = remainder(posi[1] - pos[3*j+1], L);
rvec[2] = remainder(posi[2] - pos[3*j+2], L);
s2 = rvec[0]*rvec[0] + rvec[1]*rvec[1] + rvec[2]*rvec[2];
if (s2 < 4)
{
s = sqrt(s2);
rvec[0] /= s;
rvec[1] /= s;
rvec[2] /= s;
f = krepulsion*(2.-s);
forces[3*i ] += f*rvec[0];
forces[3*i+1] += f*rvec[1];
forces[3*i+2] += f*rvec[2];
forces[3*j ] += -f*rvec[0];
forces[3*j+1] += -f*rvec[1];
forces[3*j+2] += -f*rvec[2];
}
}
}
}
__global__ void gpu_find_repulsion(int np, double*pos, double L, double krepulsion, double* forces){
int i = blockDim.x * blockIdx.x + threadIdx.x;
if(i<np){
int j;
double posi[4];
double rvec[4];
double s2, s, f;
posi[0] = pos[3*i ];
posi[1] = pos[3*i+1];
posi[2] = pos[3*i+2];
for (j=i+1; j<np; j++){
// compute minimum image difference
rvec[0] = remainder(posi[0] - pos[3*j ], L);
rvec[1] = remainder(posi[1] - pos[3*j+1], L);
rvec[2] = remainder(posi[2] - pos[3*j+2], L);
s2 = rvec[0]*rvec[0] + rvec[1]*rvec[1] + rvec[2]*rvec[2];
if (s2 < 4){
s = sqrt(s2);
rvec[0] /= s;
rvec[1] /= s;
rvec[2] /= s;
f = krepulsion*(2.-s);
forces[3*i ] += f*rvec[0];
forces[3*i+1] += f*rvec[1];
forces[3*i+2] += f*rvec[2];
forces[3*j ] += -f*rvec[0];
forces[3*j+1] += -f*rvec[1];
forces[3*j+2] += -f*rvec[2];
}
}
}
}
int main(int argc, char *argv[])
{
int i;
int np = 100; // default number of particles
double phi = 0.3; // volume fraction
double krepulsion = 125.; // force constant
double *pos;
double *forces;
double time0, time1;
if (argc > 1)
np = atoi(argv[1]);
// compute simulation box width
double L = pow(4./3.*3.1415926536*np/phi, 1./3.);
// generate random particle positions inside simulation box
forces = (double *) malloc(3*np*sizeof(double));
pos = (double *) malloc(3*np*sizeof(double));
for (i=0; i<3*np; i++)
pos[i] = rand()/(double)RAND_MAX*L;
time0 = get_walltime();
force_repulsion(np, pos, L, krepulsion, forces);
time1 = get_walltime();
//print performance and write to file
printf("number of particles: %d\n", np);
printf("elapsed time of cpu program: %f seconds\n", time1-time0);
Write(forces,3*np,"cpu_output");
//reinitialization of forces
for(int i=0;i<np*3;++i) forces[i]=0.;
//gpu program
double *gpu_pos;
double *gpu_forces;
int bytes=3*np*sizeof(double);
hipEvent_t start, stop;
float time;
hipMalloc((void**)&gpu_pos,bytes);
hipMalloc((void**)&gpu_forces,bytes);
hipMemcpy(gpu_pos, pos, bytes, hipMemcpyHostToDevice);
hipMemcpy(gpu_forces, forces, bytes, hipMemcpyHostToDevice);
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
hipLaunchKernelGGL(( gpu_find_repulsion), dim3((3*np+1023)/1024),dim3(1024), 0, 0, np, gpu_pos, L, krepulsion, gpu_forces);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&time, start, stop);
hipMemcpy(forces, gpu_forces, bytes, hipMemcpyDeviceToHost);
printf("number of particles: %d\n", np);
printf("elapsed time of gpu program: %f seconds\n", time/1000);
Write(forces,3*np,"gpu_output");
printf("speed up of gpu is %f \n",(time1-time0)/(time/1000));
hipFree(gpu_pos);
hipFree(gpu_forces);
free(forces);
free(pos);
return 0;
}
|
fb47a18235a7d393c41bf4f7c6a6749378b17c5a.cu
|
#include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include <sys/time.h>
double get_walltime()
{
struct timeval tp;
gettimeofday(&tp, NULL);
return (double) (tp.tv_sec + tp.tv_usec*1e-6);
}
void Write(double* buffer, int np, char* output){
FILE *f;
f=fopen(output,"w");
for(int i=0;i<np;++i){
fprintf(f,"%f \n",buffer[i]);
}
fclose(f);
}
void force_repulsion(int np, const double *pos, double L, double krepulsion,
double *forces)
{
int i, j;
double posi[4];
double rvec[4];
double s2, s, f;
// initialize forces to zero
for (i=0; i<3*np; i++)
forces[i] = 0.;
// loop over all pairs
for (i=0; i<np; i++)
{
posi[0] = pos[3*i ];
posi[1] = pos[3*i+1];
posi[2] = pos[3*i+2];
for (j=i+1; j<np; j++)
{
// compute minimum image difference
rvec[0] = remainder(posi[0] - pos[3*j ], L);
rvec[1] = remainder(posi[1] - pos[3*j+1], L);
rvec[2] = remainder(posi[2] - pos[3*j+2], L);
s2 = rvec[0]*rvec[0] + rvec[1]*rvec[1] + rvec[2]*rvec[2];
if (s2 < 4)
{
s = sqrt(s2);
rvec[0] /= s;
rvec[1] /= s;
rvec[2] /= s;
f = krepulsion*(2.-s);
forces[3*i ] += f*rvec[0];
forces[3*i+1] += f*rvec[1];
forces[3*i+2] += f*rvec[2];
forces[3*j ] += -f*rvec[0];
forces[3*j+1] += -f*rvec[1];
forces[3*j+2] += -f*rvec[2];
}
}
}
}
__global__ void gpu_find_repulsion(int np, double*pos, double L, double krepulsion, double* forces){
int i = blockDim.x * blockIdx.x + threadIdx.x;
if(i<np){
int j;
double posi[4];
double rvec[4];
double s2, s, f;
posi[0] = pos[3*i ];
posi[1] = pos[3*i+1];
posi[2] = pos[3*i+2];
for (j=i+1; j<np; j++){
// compute minimum image difference
rvec[0] = remainder(posi[0] - pos[3*j ], L);
rvec[1] = remainder(posi[1] - pos[3*j+1], L);
rvec[2] = remainder(posi[2] - pos[3*j+2], L);
s2 = rvec[0]*rvec[0] + rvec[1]*rvec[1] + rvec[2]*rvec[2];
if (s2 < 4){
s = sqrt(s2);
rvec[0] /= s;
rvec[1] /= s;
rvec[2] /= s;
f = krepulsion*(2.-s);
forces[3*i ] += f*rvec[0];
forces[3*i+1] += f*rvec[1];
forces[3*i+2] += f*rvec[2];
forces[3*j ] += -f*rvec[0];
forces[3*j+1] += -f*rvec[1];
forces[3*j+2] += -f*rvec[2];
}
}
}
}
int main(int argc, char *argv[])
{
int i;
int np = 100; // default number of particles
double phi = 0.3; // volume fraction
double krepulsion = 125.; // force constant
double *pos;
double *forces;
double time0, time1;
if (argc > 1)
np = atoi(argv[1]);
// compute simulation box width
double L = pow(4./3.*3.1415926536*np/phi, 1./3.);
// generate random particle positions inside simulation box
forces = (double *) malloc(3*np*sizeof(double));
pos = (double *) malloc(3*np*sizeof(double));
for (i=0; i<3*np; i++)
pos[i] = rand()/(double)RAND_MAX*L;
time0 = get_walltime();
force_repulsion(np, pos, L, krepulsion, forces);
time1 = get_walltime();
//print performance and write to file
printf("number of particles: %d\n", np);
printf("elapsed time of cpu program: %f seconds\n", time1-time0);
Write(forces,3*np,"cpu_output");
//reinitialization of forces
for(int i=0;i<np*3;++i) forces[i]=0.;
//gpu program
double *gpu_pos;
double *gpu_forces;
int bytes=3*np*sizeof(double);
cudaEvent_t start, stop;
float time;
cudaMalloc((void**)&gpu_pos,bytes);
cudaMalloc((void**)&gpu_forces,bytes);
cudaMemcpy(gpu_pos, pos, bytes, cudaMemcpyHostToDevice);
cudaMemcpy(gpu_forces, forces, bytes, cudaMemcpyHostToDevice);
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
gpu_find_repulsion<<<(3*np+1023)/1024,1024>>>(np, gpu_pos, L, krepulsion, gpu_forces);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
cudaMemcpy(forces, gpu_forces, bytes, cudaMemcpyDeviceToHost);
printf("number of particles: %d\n", np);
printf("elapsed time of gpu program: %f seconds\n", time/1000);
Write(forces,3*np,"gpu_output");
printf("speed up of gpu is %f \n",(time1-time0)/(time/1000));
cudaFree(gpu_pos);
cudaFree(gpu_forces);
free(forces);
free(pos);
return 0;
}
|
3e9249df6575839ad900e4dc39b61faba76aa961.hip
|
// !!! This is a file automatically generated by hipify!!!
/* GEMM is a General Matrix Multiply - a subroutine in the Basic Linear Algebra Subprograms library*/
/* Includes, system */
#include <stdio.h>
#include <stdlib.h>
//#include <string.h>
//#include <cstring>
#include <sys/time.h>
/* Includes, cuda */
#include <hip/hip_runtime.h>
#include <rocblas.h>
#include "helper_cuda.h"
#include "gpuGEMM.cu"
#include "gpuArrayGemm.cu"
//using namespace std;
#define BLOCK_SIZE 16
void print1darray(double* a, int m)
{
for (int i=0; i<m; i++)
printf("%g ", a[i]);
printf("\n");
}
void print2darray(double* a, int m, int n)
{
for (int i=0; i<m; i++) {
for (int j=0; j<n; j++)
printf("%g ", a[j*m+i]);
printf("\n");
}
printf("\n");
}
void print3darray(double* a, int m, int n, int p)
{
for (int k=0; k<p; k++) {
for (int i=0; i<m; i++) {
for (int j=0; j<n; j++)
printf("%g ", a[k*n*m+j*m+i]);
printf("\n");
}
printf("\n");
}
printf("\n");
}
void printArray2D(double* a, int m, int n)
{
int N = m*n;
double *b = (double*) malloc (sizeof (double)*N);
hipMemcpy(b, a, N*sizeof(double), hipMemcpyDeviceToHost);
print2darray(b, m, n);
free(b);
}
/* ======================================================= */
/* CUDA implementation of dGEMM without using shared memory
/* ======================================================= */
__global__ void cuda_dgemm(int M, int N, int K,
double alpha,
const double *A,
const double *B,
double beta,
double *C) {
int row = blockDim.y * blockIdx.y + threadIdx.y;
int col = blockDim.x * blockIdx.x + threadIdx.x;
if (row >= M || col >= N) return;
double prod = 0;
for (int k = 0; k < K; ++k){
prod += A[k * M + row] * B[col * K + k];
}
C[col*M + row] = alpha * prod + beta * C[col*M + row];
}
/* ======================================================= */
/* CUDA implementation of dGEMM using shared memory
/* ======================================================= */
__global__ void cuda_dgemm_shmem(int n,
double alpha,
const double *B,
const double *A,
double beta,
double *C) {
// Block index
int block_col = blockIdx.x;
int block_row = blockIdx.y;
// Thread index
int thread_col = threadIdx.x;
int thread_row = threadIdx.y;
//printf("row = %d col = %d n= %d\n", block_col, block_row, n);
//int row = blockDim.y * blockIdx.y + threadIdx.y;
//int col = blockDim.x * blockIdx.x + threadIdx.x;
int aBegin = n * blockDim.x * block_row;
int aEnd = aBegin + n-1;
int bBegin = blockDim.x * block_col;
int bStep = n * blockDim.x;
double Csub = 0;
for (int a=aBegin, b=bBegin, istep=0;
a <= aEnd; a+= blockDim.x, b+=bStep, ++istep){
__shared__ double As[BLOCK_SIZE][BLOCK_SIZE];
__shared__ double Bs[BLOCK_SIZE][BLOCK_SIZE];
if ((istep*blockDim.x+thread_col < n) && (block_row*blockDim.x+ thread_row < n))
As[thread_row][thread_col] = A[a + n * thread_row + thread_col];
else
As[thread_row][thread_col] = 0;
if ((block_col*blockDim.x+thread_col < n) && (istep*blockDim.x + thread_row < n))
Bs[thread_row][thread_col] = B[b + n * thread_row + thread_col];
else
Bs[thread_row][thread_col] = 0;
__syncthreads();
// calculate the cell
for (int k = 0; k < blockDim.x; ++k)
Csub += As[thread_row][k] * Bs[k][thread_col];
__syncthreads();
}
// Write the block sub-matrix to global memory;
// each thread writes one element
int c = n * blockDim.x * block_row + blockDim.x * block_col;
if ((block_col*blockDim.x+thread_col < n) && (block_row*blockDim.x+ thread_row < n))
C[c + n * thread_row + thread_col] = alpha * Csub + beta * C[c +n * thread_row + thread_col];
}
/* ======================================================= */
/* Simple host implementation of a simple version of sgemm */
/* ======================================================= */
static void simple_dgemm(int M, int N, int K, double alpha, const double *A, const double *B,
double beta, double *C) {
int i, j, k;
for (i = 0; i < M; ++i) {
for (j = 0; j < N; ++j){
double prod = 0;
for (k = 0; k < K; ++k){
prod += A[k * M + i] * B[j * K + k];
}
C[j * M + i] = alpha * prod + beta * C[j * M + i];
}
}
}
/* ======================================================= */
/* Simple host implementation of a simple version of sgemm */
/* ======================================================= */
static void simple_permute12(int M, int N, int K, double *A)
{
//double B[M*N*K];
double *B = (double *)malloc(M*N*K * sizeof(double) );
int i, j, k;
for (k = 0; k < K; ++k)
for (j = 0; j < N; ++j)
for (i = 0; i < M; ++i)
B[i + M*j + M*N*k] = A[j + N*i + N*M*k];
k = M*N*K;
for (i=0; i<k; i++)
A[i] = B[i];
free(B);
}
/* ======================= */
/* dgemm from BLAS library */
/* ======================= */
extern "C"{
extern void dgemm_(char *, char * ,
int *, int *, int *,
double *, double *, int *,
double *, int *,
double *, double *, int *); };
/* ==== */
/* Main */
/* ==== */
int main(int argc, char **argv)
{
hipblasStatus_t status;
double *h_A, *h_B, *h_C, *h_D, *h_E, *h_F, *h_A1, *h_A2, *h_A3;
double *d_A, *d_B, *d_C, *d_D, *d_E, *d_F, *d_A1, *d_A2, *d_A3;
double *h_Cts, *d_Cts, *d_Ctm, *h_Fts, *d_Fts, *d_Ftm;
int i, K, M1, N1, M2, N2, M3, N3, S1, S2, S3, SA, SB, SC, SD, SE, SF, SI;
int *index1, *index2;
hipblasHandle_t handle;
struct timeval tv1, tv2;
status = hipblasCreate(&handle);
double alpha = 1.0f;
double beta = 0.0f;
K = 1024*16;
M1 = 8; M2 = 8; M3 = 8;
N1 = 8; N2 = 8; N3 = 8;
S1 = M1*N1;
S2 = M2*N2;
S3 = M3*N3;
SA = S1*S2;
SB = N1*N2*K;
SC = M1*M2*K;
SD = S1*S2*S3;
SE = N1*N2*N3*K;
SF = M1*M2*M3*K;
SI = M2*N1*K;
h_A1 = (double *)malloc(S1 * sizeof(double) );
h_A2 = (double *)malloc(S2 * sizeof(double) );
h_A3 = (double *)malloc(S3 * sizeof(double) );
h_A = (double *)malloc(SA * sizeof(double) );
h_B = (double *)malloc(SB * sizeof(double) );
h_C = (double *)malloc(SC * sizeof(double) );
h_D = (double *)malloc(SD * sizeof(double) );
h_E = (double *)malloc(SE * sizeof(double) );
h_F = (double *)malloc(SF * sizeof(double) );
h_Cts = (double *)malloc(SC * sizeof(double) );
h_Fts = (double *)malloc(SF * sizeof(double) );
for (i = 0; i < S1; i++)
h_A1[i] = rand() / (double)RAND_MAX;
for (i = 0; i < S2; i++)
h_A2[i] = rand() / (double)RAND_MAX;
for (i = 0; i < S3; i++)
h_A3[i] = rand() / (double)RAND_MAX;
for (i = 0; i < SA; i++)
h_A[i] = rand() / (double)RAND_MAX;
for (i = 0; i < SB; i++)
h_B[i] = rand() / (double)RAND_MAX;
for (i = 0; i < SC; i++)
h_C[i] = rand() / (double)RAND_MAX;
for (i = 0; i < SD; i++)
h_D[i] = rand() / (double)RAND_MAX;
for (i = 0; i < SE; i++)
h_E[i] = rand() / (double)RAND_MAX;
for (i = 0; i < SF; i++)
h_F[i] = rand() / (double)RAND_MAX;
hipMalloc((void **)&d_A1, S1 * sizeof(double));
hipMalloc((void **)&d_A2, S2 * sizeof(double));
hipMalloc((void **)&d_A3, S3 * sizeof(double));
hipMalloc((void **)&d_A, SA * sizeof(double));
hipMalloc((void **)&d_B, SB * sizeof(double));
hipMalloc((void **)&d_C, SC * sizeof(double));
hipMalloc((void **)&d_D, SD * sizeof(double));
hipMalloc((void **)&d_E, SE * sizeof(double));
hipMalloc((void **)&d_F, SF * sizeof(double));
hipMalloc((void **)&d_Cts, SC * sizeof(double));
hipMalloc((void **)&d_Ctm, SC * sizeof(double));
hipMalloc((void **)&d_Fts, SF * sizeof(double));
hipMalloc((void **)&d_Ftm, SF * sizeof(double));
hipMalloc((void **)&index1, SI * sizeof(int));
hipMalloc((void **)&index2, SC * sizeof(int));
gpuIndexPermute12(index1, M2, N1, K);
gpuIndexPermute12(index2, M1, M2, K);
status = hipblasSetVector(S1, sizeof(h_A1[0]), h_A1, 1, d_A1, 1);
status = hipblasSetVector(S2, sizeof(h_A2[0]), h_A2, 1, d_A2, 1);
status = hipblasSetVector(S3, sizeof(h_A3[0]), h_A3, 1, d_A3, 1);
status = hipblasSetVector(SA, sizeof(h_A[0]), h_A, 1, d_A, 1);
status = hipblasSetVector(SB, sizeof(h_B[0]), h_B, 1, d_B, 1);
status = hipblasSetVector(SC, sizeof(h_C[0]), h_C, 1, d_C, 1);
status = hipblasSetVector(SD, sizeof(h_D[0]), h_D, 1, d_D, 1);
status = hipblasSetVector(SE, sizeof(h_E[0]), h_E, 1, d_E, 1);
status = hipblasSetVector(SF, sizeof(h_F[0]), h_F, 1, d_F, 1);
gpuKron(d_A, d_A1, d_A2, M1, N1, M2, N2);
gpuKron(d_D, d_A, d_A3, M1*M2, N1*N2, M3, N3);
/*
print2darray(h_A1, M1, N1);
printArray2D(d_A1, M1, N1);
print2darray(h_A2, M2, N2);
printArray2D(d_A2, M2, N2);
print2darray(h_A3, M3, N3);
printArray2D(d_A3, M3, N3);
printArray2D(d_A, M1*M2, N1*N2);
printArray2D(d_D, M1*M2*M3, N1*N2*N3);
*/
hipDeviceSynchronize();
gettimeofday(&tv1, NULL);
status = hipblasDgemm(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, M1*M2, K, N1*N2, &alpha, d_A, M1*M2, d_B, N1*N2, &beta, d_C, M1*M2);
//status = hipblasDgemm(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, M2, N1*K, N2, &alpha, d_A2, M2, d_B, N2, &beta, d_Ctm, M2);
//printArray2D(d_Ctm, M2, N1);
//gpuPermute12(d_C, d_Ctm, M2, N1, K);
//printArray2D(d_C, N1, M2);
//status = hipblasDgemm(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, M1, M2*K, N1, &alpha, d_A1, M1, d_C, N1, &beta, d_Ctm, M1);
//printArray2D(d_Ctm, M1, M2);
//gpuPermute12(d_C, d_Ctm, M1, M2, K);
//printArray2D(d_C, M2, M1);
hipDeviceSynchronize();
gettimeofday(&tv2, NULL);
printf("\t 2D hipblasDgemm execution time (in millisec): %.2f\n",
(double)(tv2.tv_usec-tv1.tv_usec)/1000 +
(double)(tv2.tv_sec -tv1.tv_sec )*1000);
hipDeviceSynchronize();
gettimeofday(&tv1, NULL);
//gpuGEMM2DTensor<8,8>(d_Cts, d_A1, d_A2, d_B, d_Ctm, M1, M2, K);
//gpuTensorGEMM2D<8,8>(d_Cts, d_A1, d_A2, d_B, d_Ctm, index1, index2, M1, M2, K);
gpuTensorGEMM2D<8,8>(d_Cts, d_A1, d_A2, d_B, d_Ctm, index1, M1, M2, K);
hipDeviceSynchronize();
gettimeofday(&tv2, NULL);
printf("\t 2D tensor execution time (in millisec): %.2f\n",
(double)(tv2.tv_usec-tv1.tv_usec)/1000 +
(double)(tv2.tv_sec -tv1.tv_sec )*1000);
/*
printArray2D(d_A1, M1, N1);
printArray2D(d_A2, M2, N2);
printArray2D(d_A, M1*M2, N1*N2);
printArray2D(d_B, N1, N2);
printArray2D(d_C, M1, M2);
printArray2D(d_Cts, M1, M2);
*/
status = hipblasGetVector(SC, sizeof(h_C[0]), d_C, 1, h_C, 1);
status = hipblasGetVector(SC, sizeof(h_C[0]), d_Cts, 1, h_Cts, 1);
double e=0.0;
for (int i=0; i<M1*M2*K; i++)
if (fabs(h_C[i]-h_Cts[i])>e)
e = fabs(h_C[i]-h_Cts[i]);
printf("Maximum error: %g\n", e);
hipDeviceSynchronize();
gettimeofday(&tv1, NULL);
status = hipblasDgemm(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, M1*M2*M3, K, N1*N2*N3, &alpha, d_D, M1*M2*M3, d_E, N1*N2*N3, &beta, d_F, M1*M2*M3);
hipDeviceSynchronize();
gettimeofday(&tv2, NULL);
printf("\t 3D hipblasDgemm execution time (in millisec): %.2f\n",
(double)(tv2.tv_usec-tv1.tv_usec)/1000 +
(double)(tv2.tv_sec -tv1.tv_sec )*1000);
hipDeviceSynchronize();
gettimeofday(&tv1, NULL);
//gpuGEMM3DTensor<8,8,8>(d_Fts, d_A1, d_A2, d_A3, d_E, d_Ftm, M1, M2, M3, K);
//gpuTensorGEMM3D(T *C, T *A1, T *A2, T *A3, T *B, T* Ctmp, int *index, int M1, int M2, int M3, int K);
gpuTensorGEMM3D<8,8,8>(d_Fts, d_A1, d_A2, d_A3, d_E, d_Ftm, index1, M1, M2, M3, K);
hipDeviceSynchronize();
gettimeofday(&tv2, NULL);
printf("\t 3D tensor execution time (in millisec): %.2f\n",
(double)(tv2.tv_usec-tv1.tv_usec)/1000 +
(double)(tv2.tv_sec -tv1.tv_sec )*1000);
status = hipblasGetVector(SF, sizeof(h_F[0]), d_F, 1, h_F, 1);
status = hipblasGetVector(SF, sizeof(h_F[0]), d_Fts, 1, h_Fts, 1);
e=0.0;
for (int i=0; i<M1*M2*M3*K; i++)
if (fabs(h_F[i]-h_Fts[i])>e)
e = fabs(h_F[i]-h_Fts[i]);
printf("Maximum error: %g\n", e);
hipFree(d_A1);
hipFree(d_A2);
hipFree(d_A3);
hipFree(d_A);
hipFree(d_B);
hipFree(d_C);
hipFree(d_D);
hipFree(d_E);
hipFree(d_F);
hipFree(d_Cts);
hipFree(d_Ctm);
free(h_A1);
free(h_A2);
free(h_A3);
free(h_A);
free(h_B);
free(h_C);
free(h_D);
free(h_E);
free(h_F);
free(h_Cts);
/* Shutdown */
status = hipblasDestroy(handle);
return(0);
}
|
3e9249df6575839ad900e4dc39b61faba76aa961.cu
|
/* GEMM is a General Matrix Multiply - a subroutine in the Basic Linear Algebra Subprograms library*/
/* Includes, system */
#include <stdio.h>
#include <stdlib.h>
//#include <string.h>
//#include <cstring>
#include <sys/time.h>
/* Includes, cuda */
#include <cuda_runtime.h>
#include <cublas_v2.h>
#include "helper_cuda.h"
#include "gpuGEMM.cu"
#include "gpuArrayGemm.cu"
//using namespace std;
#define BLOCK_SIZE 16
void print1darray(double* a, int m)
{
for (int i=0; i<m; i++)
printf("%g ", a[i]);
printf("\n");
}
void print2darray(double* a, int m, int n)
{
for (int i=0; i<m; i++) {
for (int j=0; j<n; j++)
printf("%g ", a[j*m+i]);
printf("\n");
}
printf("\n");
}
void print3darray(double* a, int m, int n, int p)
{
for (int k=0; k<p; k++) {
for (int i=0; i<m; i++) {
for (int j=0; j<n; j++)
printf("%g ", a[k*n*m+j*m+i]);
printf("\n");
}
printf("\n");
}
printf("\n");
}
void printArray2D(double* a, int m, int n)
{
int N = m*n;
double *b = (double*) malloc (sizeof (double)*N);
cudaMemcpy(b, a, N*sizeof(double), cudaMemcpyDeviceToHost);
print2darray(b, m, n);
free(b);
}
/* ======================================================= */
/* CUDA implementation of dGEMM without using shared memory
/* ======================================================= */
__global__ void cuda_dgemm(int M, int N, int K,
double alpha,
const double *A,
const double *B,
double beta,
double *C) {
int row = blockDim.y * blockIdx.y + threadIdx.y;
int col = blockDim.x * blockIdx.x + threadIdx.x;
if (row >= M || col >= N) return;
double prod = 0;
for (int k = 0; k < K; ++k){
prod += A[k * M + row] * B[col * K + k];
}
C[col*M + row] = alpha * prod + beta * C[col*M + row];
}
/* ======================================================= */
/* CUDA implementation of dGEMM using shared memory
/* ======================================================= */
__global__ void cuda_dgemm_shmem(int n,
double alpha,
const double *B,
const double *A,
double beta,
double *C) {
// Block index
int block_col = blockIdx.x;
int block_row = blockIdx.y;
// Thread index
int thread_col = threadIdx.x;
int thread_row = threadIdx.y;
//printf("row = %d col = %d n= %d\n", block_col, block_row, n);
//int row = blockDim.y * blockIdx.y + threadIdx.y;
//int col = blockDim.x * blockIdx.x + threadIdx.x;
int aBegin = n * blockDim.x * block_row;
int aEnd = aBegin + n-1;
int bBegin = blockDim.x * block_col;
int bStep = n * blockDim.x;
double Csub = 0;
for (int a=aBegin, b=bBegin, istep=0;
a <= aEnd; a+= blockDim.x, b+=bStep, ++istep){
__shared__ double As[BLOCK_SIZE][BLOCK_SIZE];
__shared__ double Bs[BLOCK_SIZE][BLOCK_SIZE];
if ((istep*blockDim.x+thread_col < n) && (block_row*blockDim.x+ thread_row < n))
As[thread_row][thread_col] = A[a + n * thread_row + thread_col];
else
As[thread_row][thread_col] = 0;
if ((block_col*blockDim.x+thread_col < n) && (istep*blockDim.x + thread_row < n))
Bs[thread_row][thread_col] = B[b + n * thread_row + thread_col];
else
Bs[thread_row][thread_col] = 0;
__syncthreads();
// calculate the cell
for (int k = 0; k < blockDim.x; ++k)
Csub += As[thread_row][k] * Bs[k][thread_col];
__syncthreads();
}
// Write the block sub-matrix to global memory;
// each thread writes one element
int c = n * blockDim.x * block_row + blockDim.x * block_col;
if ((block_col*blockDim.x+thread_col < n) && (block_row*blockDim.x+ thread_row < n))
C[c + n * thread_row + thread_col] = alpha * Csub + beta * C[c +n * thread_row + thread_col];
}
/* ======================================================= */
/* Simple host implementation of a simple version of sgemm */
/* ======================================================= */
static void simple_dgemm(int M, int N, int K, double alpha, const double *A, const double *B,
double beta, double *C) {
int i, j, k;
for (i = 0; i < M; ++i) {
for (j = 0; j < N; ++j){
double prod = 0;
for (k = 0; k < K; ++k){
prod += A[k * M + i] * B[j * K + k];
}
C[j * M + i] = alpha * prod + beta * C[j * M + i];
}
}
}
/* ======================================================= */
/* Simple host implementation of a simple version of sgemm */
/* ======================================================= */
static void simple_permute12(int M, int N, int K, double *A)
{
//double B[M*N*K];
double *B = (double *)malloc(M*N*K * sizeof(double) );
int i, j, k;
for (k = 0; k < K; ++k)
for (j = 0; j < N; ++j)
for (i = 0; i < M; ++i)
B[i + M*j + M*N*k] = A[j + N*i + N*M*k];
k = M*N*K;
for (i=0; i<k; i++)
A[i] = B[i];
free(B);
}
/* ======================= */
/* dgemm from BLAS library */
/* ======================= */
extern "C"{
extern void dgemm_(char *, char * ,
int *, int *, int *,
double *, double *, int *,
double *, int *,
double *, double *, int *); };
/* ==== */
/* Main */
/* ==== */
int main(int argc, char **argv)
{
cublasStatus_t status;
double *h_A, *h_B, *h_C, *h_D, *h_E, *h_F, *h_A1, *h_A2, *h_A3;
double *d_A, *d_B, *d_C, *d_D, *d_E, *d_F, *d_A1, *d_A2, *d_A3;
double *h_Cts, *d_Cts, *d_Ctm, *h_Fts, *d_Fts, *d_Ftm;
int i, K, M1, N1, M2, N2, M3, N3, S1, S2, S3, SA, SB, SC, SD, SE, SF, SI;
int *index1, *index2;
cublasHandle_t handle;
struct timeval tv1, tv2;
status = cublasCreate(&handle);
double alpha = 1.0f;
double beta = 0.0f;
K = 1024*16;
M1 = 8; M2 = 8; M3 = 8;
N1 = 8; N2 = 8; N3 = 8;
S1 = M1*N1;
S2 = M2*N2;
S3 = M3*N3;
SA = S1*S2;
SB = N1*N2*K;
SC = M1*M2*K;
SD = S1*S2*S3;
SE = N1*N2*N3*K;
SF = M1*M2*M3*K;
SI = M2*N1*K;
h_A1 = (double *)malloc(S1 * sizeof(double) );
h_A2 = (double *)malloc(S2 * sizeof(double) );
h_A3 = (double *)malloc(S3 * sizeof(double) );
h_A = (double *)malloc(SA * sizeof(double) );
h_B = (double *)malloc(SB * sizeof(double) );
h_C = (double *)malloc(SC * sizeof(double) );
h_D = (double *)malloc(SD * sizeof(double) );
h_E = (double *)malloc(SE * sizeof(double) );
h_F = (double *)malloc(SF * sizeof(double) );
h_Cts = (double *)malloc(SC * sizeof(double) );
h_Fts = (double *)malloc(SF * sizeof(double) );
for (i = 0; i < S1; i++)
h_A1[i] = rand() / (double)RAND_MAX;
for (i = 0; i < S2; i++)
h_A2[i] = rand() / (double)RAND_MAX;
for (i = 0; i < S3; i++)
h_A3[i] = rand() / (double)RAND_MAX;
for (i = 0; i < SA; i++)
h_A[i] = rand() / (double)RAND_MAX;
for (i = 0; i < SB; i++)
h_B[i] = rand() / (double)RAND_MAX;
for (i = 0; i < SC; i++)
h_C[i] = rand() / (double)RAND_MAX;
for (i = 0; i < SD; i++)
h_D[i] = rand() / (double)RAND_MAX;
for (i = 0; i < SE; i++)
h_E[i] = rand() / (double)RAND_MAX;
for (i = 0; i < SF; i++)
h_F[i] = rand() / (double)RAND_MAX;
cudaMalloc((void **)&d_A1, S1 * sizeof(double));
cudaMalloc((void **)&d_A2, S2 * sizeof(double));
cudaMalloc((void **)&d_A3, S3 * sizeof(double));
cudaMalloc((void **)&d_A, SA * sizeof(double));
cudaMalloc((void **)&d_B, SB * sizeof(double));
cudaMalloc((void **)&d_C, SC * sizeof(double));
cudaMalloc((void **)&d_D, SD * sizeof(double));
cudaMalloc((void **)&d_E, SE * sizeof(double));
cudaMalloc((void **)&d_F, SF * sizeof(double));
cudaMalloc((void **)&d_Cts, SC * sizeof(double));
cudaMalloc((void **)&d_Ctm, SC * sizeof(double));
cudaMalloc((void **)&d_Fts, SF * sizeof(double));
cudaMalloc((void **)&d_Ftm, SF * sizeof(double));
cudaMalloc((void **)&index1, SI * sizeof(int));
cudaMalloc((void **)&index2, SC * sizeof(int));
gpuIndexPermute12(index1, M2, N1, K);
gpuIndexPermute12(index2, M1, M2, K);
status = cublasSetVector(S1, sizeof(h_A1[0]), h_A1, 1, d_A1, 1);
status = cublasSetVector(S2, sizeof(h_A2[0]), h_A2, 1, d_A2, 1);
status = cublasSetVector(S3, sizeof(h_A3[0]), h_A3, 1, d_A3, 1);
status = cublasSetVector(SA, sizeof(h_A[0]), h_A, 1, d_A, 1);
status = cublasSetVector(SB, sizeof(h_B[0]), h_B, 1, d_B, 1);
status = cublasSetVector(SC, sizeof(h_C[0]), h_C, 1, d_C, 1);
status = cublasSetVector(SD, sizeof(h_D[0]), h_D, 1, d_D, 1);
status = cublasSetVector(SE, sizeof(h_E[0]), h_E, 1, d_E, 1);
status = cublasSetVector(SF, sizeof(h_F[0]), h_F, 1, d_F, 1);
gpuKron(d_A, d_A1, d_A2, M1, N1, M2, N2);
gpuKron(d_D, d_A, d_A3, M1*M2, N1*N2, M3, N3);
/*
print2darray(h_A1, M1, N1);
printArray2D(d_A1, M1, N1);
print2darray(h_A2, M2, N2);
printArray2D(d_A2, M2, N2);
print2darray(h_A3, M3, N3);
printArray2D(d_A3, M3, N3);
printArray2D(d_A, M1*M2, N1*N2);
printArray2D(d_D, M1*M2*M3, N1*N2*N3);
*/
cudaDeviceSynchronize();
gettimeofday(&tv1, NULL);
status = cublasDgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, M1*M2, K, N1*N2, &alpha, d_A, M1*M2, d_B, N1*N2, &beta, d_C, M1*M2);
//status = cublasDgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, M2, N1*K, N2, &alpha, d_A2, M2, d_B, N2, &beta, d_Ctm, M2);
//printArray2D(d_Ctm, M2, N1);
//gpuPermute12(d_C, d_Ctm, M2, N1, K);
//printArray2D(d_C, N1, M2);
//status = cublasDgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, M1, M2*K, N1, &alpha, d_A1, M1, d_C, N1, &beta, d_Ctm, M1);
//printArray2D(d_Ctm, M1, M2);
//gpuPermute12(d_C, d_Ctm, M1, M2, K);
//printArray2D(d_C, M2, M1);
cudaDeviceSynchronize();
gettimeofday(&tv2, NULL);
printf("\t 2D cublasDgemm execution time (in millisec): %.2f\n",
(double)(tv2.tv_usec-tv1.tv_usec)/1000 +
(double)(tv2.tv_sec -tv1.tv_sec )*1000);
cudaDeviceSynchronize();
gettimeofday(&tv1, NULL);
//gpuGEMM2DTensor<8,8>(d_Cts, d_A1, d_A2, d_B, d_Ctm, M1, M2, K);
//gpuTensorGEMM2D<8,8>(d_Cts, d_A1, d_A2, d_B, d_Ctm, index1, index2, M1, M2, K);
gpuTensorGEMM2D<8,8>(d_Cts, d_A1, d_A2, d_B, d_Ctm, index1, M1, M2, K);
cudaDeviceSynchronize();
gettimeofday(&tv2, NULL);
printf("\t 2D tensor execution time (in millisec): %.2f\n",
(double)(tv2.tv_usec-tv1.tv_usec)/1000 +
(double)(tv2.tv_sec -tv1.tv_sec )*1000);
/*
printArray2D(d_A1, M1, N1);
printArray2D(d_A2, M2, N2);
printArray2D(d_A, M1*M2, N1*N2);
printArray2D(d_B, N1, N2);
printArray2D(d_C, M1, M2);
printArray2D(d_Cts, M1, M2);
*/
status = cublasGetVector(SC, sizeof(h_C[0]), d_C, 1, h_C, 1);
status = cublasGetVector(SC, sizeof(h_C[0]), d_Cts, 1, h_Cts, 1);
double e=0.0;
for (int i=0; i<M1*M2*K; i++)
if (fabs(h_C[i]-h_Cts[i])>e)
e = fabs(h_C[i]-h_Cts[i]);
printf("Maximum error: %g\n", e);
cudaDeviceSynchronize();
gettimeofday(&tv1, NULL);
status = cublasDgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, M1*M2*M3, K, N1*N2*N3, &alpha, d_D, M1*M2*M3, d_E, N1*N2*N3, &beta, d_F, M1*M2*M3);
cudaDeviceSynchronize();
gettimeofday(&tv2, NULL);
printf("\t 3D cublasDgemm execution time (in millisec): %.2f\n",
(double)(tv2.tv_usec-tv1.tv_usec)/1000 +
(double)(tv2.tv_sec -tv1.tv_sec )*1000);
cudaDeviceSynchronize();
gettimeofday(&tv1, NULL);
//gpuGEMM3DTensor<8,8,8>(d_Fts, d_A1, d_A2, d_A3, d_E, d_Ftm, M1, M2, M3, K);
//gpuTensorGEMM3D(T *C, T *A1, T *A2, T *A3, T *B, T* Ctmp, int *index, int M1, int M2, int M3, int K);
gpuTensorGEMM3D<8,8,8>(d_Fts, d_A1, d_A2, d_A3, d_E, d_Ftm, index1, M1, M2, M3, K);
cudaDeviceSynchronize();
gettimeofday(&tv2, NULL);
printf("\t 3D tensor execution time (in millisec): %.2f\n",
(double)(tv2.tv_usec-tv1.tv_usec)/1000 +
(double)(tv2.tv_sec -tv1.tv_sec )*1000);
status = cublasGetVector(SF, sizeof(h_F[0]), d_F, 1, h_F, 1);
status = cublasGetVector(SF, sizeof(h_F[0]), d_Fts, 1, h_Fts, 1);
e=0.0;
for (int i=0; i<M1*M2*M3*K; i++)
if (fabs(h_F[i]-h_Fts[i])>e)
e = fabs(h_F[i]-h_Fts[i]);
printf("Maximum error: %g\n", e);
cudaFree(d_A1);
cudaFree(d_A2);
cudaFree(d_A3);
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
cudaFree(d_D);
cudaFree(d_E);
cudaFree(d_F);
cudaFree(d_Cts);
cudaFree(d_Ctm);
free(h_A1);
free(h_A2);
free(h_A3);
free(h_A);
free(h_B);
free(h_C);
free(h_D);
free(h_E);
free(h_F);
free(h_Cts);
/* Shutdown */
status = cublasDestroy(handle);
return(0);
}
|
8fa25cd833874b205e4236652936ab3ddb8337c9.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cmath>
#include "../def.h"
#include "../smpl.h"
namespace smpl {
namespace device {
__global__ void PoseBlend1(float *theta,
float *poseRotation, float *restPoseRotation) {
int j = threadIdx.x;
int ind = j * 3;
float norm = std::sqrt(
theta[ind] * theta[ind] + theta[ind + 1] * theta[ind + 1] + theta[ind + 2] * theta[ind + 2]);
float sin = std::sin(norm);
float cos = std::cos(norm);
theta[ind] /= norm;
theta[ind + 1] /= norm;
theta[ind + 2] /= norm; // axes
float skew[9];
skew[0] = 0;
skew[1] = -1 * theta[ind + 2];
skew[2] = theta[ind + 1];
skew[3] = theta[ind + 2];
skew[4] = 0;
skew[5] = -1 * theta[ind];
skew[6] = -1 * theta[ind + 1];
skew[7] = theta[ind];
skew[8] = 0;
ind = ind * 3;
for (int p = 0; p < 0; p++)
poseRotation[ind + p] = 0;
poseRotation[ind] = 1;
poseRotation[ind + 4] = 1;
poseRotation[ind + 8] = 1;
for (int k1 = 0; k1 < 3; k1++)
for (int k2 = 0; k2 < 3; k2++) {
int k = k1 * 3 + k2;
poseRotation[ind + k] += skew[k] * sin;
float num = 0;
for (int l = 0; l < 3; l++)
num += skew[k1 * 3 + l] * skew[l * 3 + k2];
poseRotation[ind + k] += (1 - cos) * num;// (N, 24, 3, 3)
}
for (int k = 0; k < 9; k++)
restPoseRotation[ind + k] = 0;
restPoseRotation[ind] = 1;
restPoseRotation[ind + 4] = 1;
restPoseRotation[ind + 8] = 1;
}
__global__ void
PoseBlend2(float *poseRotation, float *poseBlendBasis, float *restPoseRotation,
float *poseBlendShape) {
int j = blockIdx.x;
int k = threadIdx.x;
int ind = j * 3 + k;
poseBlendShape[ind] = 0;
for (int l = 0; l < 207; l++)
poseBlendShape[ind] += (poseRotation[l + 9] - restPoseRotation[l + 9]) *
poseBlendBasis[ind * 207 + l];
}
__global__ void ShapeBlend(float *beta, float *shapeBlendBasis, int shapebasisdim,
float *shapeBlendShape) {
int j = blockIdx.x;
int k = threadIdx.x;
int ind = j * 3 + k;
shapeBlendShape[ind] = 0;
for (int l = 0; l < shapebasisdim; l++)
shapeBlendShape[ind] += beta[l] * shapeBlendBasis[ind * shapebasisdim + l];// (6890, 3)
}
}
std::tuple<float *, float *, float *> SMPL::poseBlendShape(float *theta) {
float *d_theta, *d_poseRotation, *d_restPoseRotation, *d_poseBlendShape;
hipMalloc((void **) &d_theta, JOINT_NUM * 3 * sizeof(float));
hipMalloc((void **) &d_poseRotation, JOINT_NUM * 9 * sizeof(float));
hipMalloc((void **) &d_restPoseRotation, JOINT_NUM * 9 * sizeof(float));
hipMalloc((void **) &d_poseBlendShape, VERTEX_NUM * 3 * sizeof(float));
hipMemcpy(d_theta, theta, JOINT_NUM * 3 * sizeof(float), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( device::PoseBlend1), dim3(1),dim3(JOINT_NUM), 0, 0, d_theta, d_poseRotation, d_restPoseRotation);
hipLaunchKernelGGL(( device::PoseBlend2), dim3(VERTEX_NUM),dim3(3), 0, 0, d_poseRotation, d_poseBlendBasis, d_restPoseRotation, d_poseBlendShape);
hipFree(d_theta);
return {d_poseRotation, d_restPoseRotation, d_poseBlendShape};
}
float *SMPL::shapeBlendShape(float *beta) {
float *d_beta, *d_shapeBlendShape;
hipMalloc((void **) &d_beta, SHAPE_BASIS_DIM * sizeof(float));
hipMalloc((void **) &d_shapeBlendShape, VERTEX_NUM * 3 * sizeof(float));
hipMemcpy(d_beta, beta, SHAPE_BASIS_DIM * sizeof(float), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( device::ShapeBlend), dim3(VERTEX_NUM),dim3(3), 0, 0, d_beta, d_shapeBlendBasis, SHAPE_BASIS_DIM, d_shapeBlendShape);
hipFree(d_beta);
return d_shapeBlendShape;
}
}
|
8fa25cd833874b205e4236652936ab3ddb8337c9.cu
|
#include <cmath>
#include "../def.h"
#include "../smpl.h"
namespace smpl {
namespace device {
__global__ void PoseBlend1(float *theta,
float *poseRotation, float *restPoseRotation) {
int j = threadIdx.x;
int ind = j * 3;
float norm = std::sqrt(
theta[ind] * theta[ind] + theta[ind + 1] * theta[ind + 1] + theta[ind + 2] * theta[ind + 2]);
float sin = std::sin(norm);
float cos = std::cos(norm);
theta[ind] /= norm;
theta[ind + 1] /= norm;
theta[ind + 2] /= norm; // axes
float skew[9];
skew[0] = 0;
skew[1] = -1 * theta[ind + 2];
skew[2] = theta[ind + 1];
skew[3] = theta[ind + 2];
skew[4] = 0;
skew[5] = -1 * theta[ind];
skew[6] = -1 * theta[ind + 1];
skew[7] = theta[ind];
skew[8] = 0;
ind = ind * 3;
for (int p = 0; p < 0; p++)
poseRotation[ind + p] = 0;
poseRotation[ind] = 1;
poseRotation[ind + 4] = 1;
poseRotation[ind + 8] = 1;
for (int k1 = 0; k1 < 3; k1++)
for (int k2 = 0; k2 < 3; k2++) {
int k = k1 * 3 + k2;
poseRotation[ind + k] += skew[k] * sin;
float num = 0;
for (int l = 0; l < 3; l++)
num += skew[k1 * 3 + l] * skew[l * 3 + k2];
poseRotation[ind + k] += (1 - cos) * num;// (N, 24, 3, 3)
}
for (int k = 0; k < 9; k++)
restPoseRotation[ind + k] = 0;
restPoseRotation[ind] = 1;
restPoseRotation[ind + 4] = 1;
restPoseRotation[ind + 8] = 1;
}
__global__ void
PoseBlend2(float *poseRotation, float *poseBlendBasis, float *restPoseRotation,
float *poseBlendShape) {
int j = blockIdx.x;
int k = threadIdx.x;
int ind = j * 3 + k;
poseBlendShape[ind] = 0;
for (int l = 0; l < 207; l++)
poseBlendShape[ind] += (poseRotation[l + 9] - restPoseRotation[l + 9]) *
poseBlendBasis[ind * 207 + l];
}
__global__ void ShapeBlend(float *beta, float *shapeBlendBasis, int shapebasisdim,
float *shapeBlendShape) {
int j = blockIdx.x;
int k = threadIdx.x;
int ind = j * 3 + k;
shapeBlendShape[ind] = 0;
for (int l = 0; l < shapebasisdim; l++)
shapeBlendShape[ind] += beta[l] * shapeBlendBasis[ind * shapebasisdim + l];// (6890, 3)
}
}
std::tuple<float *, float *, float *> SMPL::poseBlendShape(float *theta) {
float *d_theta, *d_poseRotation, *d_restPoseRotation, *d_poseBlendShape;
cudaMalloc((void **) &d_theta, JOINT_NUM * 3 * sizeof(float));
cudaMalloc((void **) &d_poseRotation, JOINT_NUM * 9 * sizeof(float));
cudaMalloc((void **) &d_restPoseRotation, JOINT_NUM * 9 * sizeof(float));
cudaMalloc((void **) &d_poseBlendShape, VERTEX_NUM * 3 * sizeof(float));
cudaMemcpy(d_theta, theta, JOINT_NUM * 3 * sizeof(float), cudaMemcpyHostToDevice);
device::PoseBlend1<<<1,JOINT_NUM>>>(d_theta, d_poseRotation, d_restPoseRotation);
device::PoseBlend2<<<VERTEX_NUM,3>>>(d_poseRotation, d_poseBlendBasis, d_restPoseRotation, d_poseBlendShape);
cudaFree(d_theta);
return {d_poseRotation, d_restPoseRotation, d_poseBlendShape};
}
float *SMPL::shapeBlendShape(float *beta) {
float *d_beta, *d_shapeBlendShape;
cudaMalloc((void **) &d_beta, SHAPE_BASIS_DIM * sizeof(float));
cudaMalloc((void **) &d_shapeBlendShape, VERTEX_NUM * 3 * sizeof(float));
cudaMemcpy(d_beta, beta, SHAPE_BASIS_DIM * sizeof(float), cudaMemcpyHostToDevice);
device::ShapeBlend<<<VERTEX_NUM,3>>>(d_beta, d_shapeBlendBasis, SHAPE_BASIS_DIM, d_shapeBlendShape);
cudaFree(d_beta);
return d_shapeBlendShape;
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.