hip_filename
stringlengths 5
84
| hip_content
stringlengths 79
9.69M
| cuda_filename
stringlengths 4
83
| cuda_content
stringlengths 19
9.69M
|
---|---|---|---|
523855a215dda133e4dafdb8e3916a97db4bf02d.hip | // !!! This is a file automatically generated by hipify!!!
#include <time.h>
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <fcntl.h>
#include <hip/hip_runtime.h>
#include "string.h"
#define DEFAULT_THRESHOLD 4000
#define DEFAULT_FILENAME "ansel3.ppm"
__global__ void sobel(unsigned int *ingoing, int *outgoing, int xsize, int ysize, int threshold) {
int x = threadIdx.x + (blockIdx.x * blockDim.x);
int y = threadIdx.y + (blockIdx.y * blockDim.y);
if ((x > 0) && (x < ysize - 1) &&(y > 0) && (y < xsize - 1)) {
int sum_x = ingoing[(x + 1) + ((y - 1) * ysize)] - ingoing[(x - 1) + ((y - 1) * ysize)]
+ (2 * ingoing[(x + 1) + ( y * ysize)]) - (2 * ingoing[(x - 1) + ( y * ysize)])
+ ingoing[(x + 1) + ((y + 1) * ysize)] - ingoing[(x - 1) + ((y + 1) * ysize)];
int sum_y = ingoing[(x + 1) + ((y + 1) * ysize)] - ingoing[(x + 1) + ((y - 1) * ysize)]
+ (2 * ingoing[ x + ((y + 1) * ysize)]) - (2 * ingoing[ x + ((y - 1) * ysize)])
+ ingoing[(x - 1) + ((y + 1) * ysize)] - ingoing[(x - 1) + ((y - 1) * ysize)];
int magnitude = (sum_x * sum_x) + (sum_y * sum_y);
int i = x + (y * ysize);
if (magnitude > threshold)
outgoing[i] = 255;
}
}
unsigned int *read_ppm(char *filename, int *xsize, int *ysize, int *maxval) {
if (!filename || filename[0] == '\0') {
fprintf(stderr, "read_ppm but no file name\n");
return NULL;
}
FILE *fp;
fprintf(stderr, "read_ppm(%s)\n", filename);
fp = fopen(filename, "rb");
if (!fp) {
fprintf(stderr, "read_ppm() ERROR file '%s' cannot be opened for reading\n", filename);
return NULL;
}
char chars[1024];
int num = fread(chars, sizeof(char), 1000, fp);
if (chars[0] != 'P' || chars[1] != '6') {
fprintf(stderr, "Texture::Texture() ERROR file '%s' does not start with \"P6\" I am expecting a binary PPM file\n", filename);
return NULL;
}
unsigned int width, height, maxvalue;
char *ptr = chars + 3; // P 6 newline
if (*ptr == '#') { // comment line!
ptr = 1 + strstr(ptr, "\n");
}
num = sscanf(ptr, "%d\n%d\n%d", &width, &height, &maxvalue);
fprintf(stderr, "read %d things: width %d, height %d, maxval %d\n", num, width, height, maxvalue);
*xsize = width;
*ysize = height;
*maxval = maxvalue;
unsigned int *pic = (unsigned int *)malloc(width * height * sizeof(unsigned int));
if (!pic) {
fprintf(stderr, "read_ppm() unable to allocate %d x %d unsigned ints for the picture\n", width, height);
return NULL; // fail but return
}
// allocate buffer to read the rest of the file into
int bufsize = 3 * width * height * sizeof(unsigned char);
if ((*maxval) > 255) {
bufsize *= 2;
}
unsigned char *buf = (unsigned char *)malloc(bufsize);
if (!buf) {
fprintf(stderr, "read_ppm() unable to allocate %d bytes of read buffer\n", bufsize);
return NULL; // fail but return
}
// TODO really read
char duh[80];
char *line = chars;
// find the start of the pixel data. no doubt stupid
sprintf(duh, "%d\0", *xsize);
line = strstr(line, duh);
//fprintf(stderr, "%s found at offset %d\n", duh, line-chars);
line += strlen(duh) + 1;
sprintf(duh, "%d\0", *ysize);
line = strstr(line, duh);
//fprintf(stderr, "%s found at offset %d\n", duh, line-chars);
line += strlen(duh) + 1;
sprintf(duh, "%d\0", *maxval);
line = strstr(line, duh);
fprintf(stderr, "%s found at offset %d\n", duh, line - chars);
line += strlen(duh) + 1;
long offset = line - chars;
//lseek(fd, offset, SEEK_SET); // move to the correct offset
fseek(fp, offset, SEEK_SET); // move to the correct offset
//long numread = read(fd, buf, bufsize);
long numread = fread(buf, sizeof(char), bufsize, fp);
fprintf(stderr, "Texture %s read %ld of %ld bytes\n", filename, numread, bufsize);
fclose(fp);
int pixels = (*xsize) * (*ysize);
int i;
for (i=0; i<pixels; i++) {
pic[i] = (int) buf[3*i]; // red channel
}
return pic; // success
}
void write_ppm( char *filename, int xsize, int ysize, int maxval, int *pic) {
FILE *fp;
fp = fopen(filename, "w");
if (!fp) {
fprintf(stderr, "FAILED TO OPEN FILE '%s' for writing\n");
exit(-1);
}
fprintf(fp, "P6\n");
fprintf(fp,"%d %d\n%d\n", xsize, ysize, maxval);
int numpix = xsize * ysize;
int i;
for (i=0; i<numpix; i++) {
unsigned char uc = (unsigned char) pic[i];
fprintf(fp, "%c%c%c", uc, uc, uc);
}
fclose(fp);
}
int main(int argc, char **argv) {
char *filename;
filename = strdup(DEFAULT_FILENAME);
int threshold;
threshold = DEFAULT_THRESHOLD;
if (argc > 1) {
if (argc == 3) {
filename = strdup(argv[1]);
threshold = atoi(argv[2]);
}
if (argc == 2) {
threshold = atoi(argv[1]);
}
}
int xsize, ysize, maxval;
unsigned int *pic = read_ppm(filename, &ysize, &xsize, &maxval);
int size = xsize * ysize;
dim3 BLOCK(32, 32);
dim3 GRID((int)ceil((float)ysize / 32), (int)ceil((float)xsize / 32));
unsigned int *h_ingoing;
int *h_outgoing;
h_ingoing = pic;
h_outgoing = (int *)calloc(size, sizeof *h_outgoing);
unsigned int *d_ingoing;
int *d_outgoing;
hipMalloc(&d_ingoing, size * sizeof *d_ingoing);
hipMalloc(&d_outgoing, size * sizeof *d_outgoing);
hipMemcpy(d_ingoing, h_ingoing, size * sizeof *h_ingoing, hipMemcpyHostToDevice);
hipMemcpy(d_outgoing, h_outgoing, size * sizeof *h_outgoing, hipMemcpyHostToDevice);
float time;
hipEvent_t begin, end;
hipEventCreate(&begin);
hipEventCreate(&end);
hipEventRecord(begin, 0);
hipLaunchKernelGGL(( sobel), dim3(GRID), dim3(BLOCK), 0, 0, d_ingoing, d_outgoing, xsize, ysize, threshold);
hipEventRecord(end, 0);
hipEventSynchronize(end);
hipEventElapsedTime(&time, begin, end);
hipMemcpy(h_outgoing, d_outgoing, size * sizeof *h_outgoing, hipMemcpyDeviceToHost);
printf("%f\n", time);
write_ppm("result.ppm", ysize, xsize, 255, h_outgoing);
}
| 523855a215dda133e4dafdb8e3916a97db4bf02d.cu | #include <time.h>
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <fcntl.h>
#include <cuda.h>
#include "string.h"
#define DEFAULT_THRESHOLD 4000
#define DEFAULT_FILENAME "ansel3.ppm"
__global__ void sobel(unsigned int *ingoing, int *outgoing, int xsize, int ysize, int threshold) {
int x = threadIdx.x + (blockIdx.x * blockDim.x);
int y = threadIdx.y + (blockIdx.y * blockDim.y);
if ((x > 0) && (x < ysize - 1) &&(y > 0) && (y < xsize - 1)) {
int sum_x = ingoing[(x + 1) + ((y - 1) * ysize)] - ingoing[(x - 1) + ((y - 1) * ysize)]
+ (2 * ingoing[(x + 1) + ( y * ysize)]) - (2 * ingoing[(x - 1) + ( y * ysize)])
+ ingoing[(x + 1) + ((y + 1) * ysize)] - ingoing[(x - 1) + ((y + 1) * ysize)];
int sum_y = ingoing[(x + 1) + ((y + 1) * ysize)] - ingoing[(x + 1) + ((y - 1) * ysize)]
+ (2 * ingoing[ x + ((y + 1) * ysize)]) - (2 * ingoing[ x + ((y - 1) * ysize)])
+ ingoing[(x - 1) + ((y + 1) * ysize)] - ingoing[(x - 1) + ((y - 1) * ysize)];
int magnitude = (sum_x * sum_x) + (sum_y * sum_y);
int i = x + (y * ysize);
if (magnitude > threshold)
outgoing[i] = 255;
}
}
unsigned int *read_ppm(char *filename, int *xsize, int *ysize, int *maxval) {
if (!filename || filename[0] == '\0') {
fprintf(stderr, "read_ppm but no file name\n");
return NULL;
}
FILE *fp;
fprintf(stderr, "read_ppm(%s)\n", filename);
fp = fopen(filename, "rb");
if (!fp) {
fprintf(stderr, "read_ppm() ERROR file '%s' cannot be opened for reading\n", filename);
return NULL;
}
char chars[1024];
int num = fread(chars, sizeof(char), 1000, fp);
if (chars[0] != 'P' || chars[1] != '6') {
fprintf(stderr, "Texture::Texture() ERROR file '%s' does not start with \"P6\" I am expecting a binary PPM file\n", filename);
return NULL;
}
unsigned int width, height, maxvalue;
char *ptr = chars + 3; // P 6 newline
if (*ptr == '#') { // comment line!
ptr = 1 + strstr(ptr, "\n");
}
num = sscanf(ptr, "%d\n%d\n%d", &width, &height, &maxvalue);
fprintf(stderr, "read %d things: width %d, height %d, maxval %d\n", num, width, height, maxvalue);
*xsize = width;
*ysize = height;
*maxval = maxvalue;
unsigned int *pic = (unsigned int *)malloc(width * height * sizeof(unsigned int));
if (!pic) {
fprintf(stderr, "read_ppm() unable to allocate %d x %d unsigned ints for the picture\n", width, height);
return NULL; // fail but return
}
// allocate buffer to read the rest of the file into
int bufsize = 3 * width * height * sizeof(unsigned char);
if ((*maxval) > 255) {
bufsize *= 2;
}
unsigned char *buf = (unsigned char *)malloc(bufsize);
if (!buf) {
fprintf(stderr, "read_ppm() unable to allocate %d bytes of read buffer\n", bufsize);
return NULL; // fail but return
}
// TODO really read
char duh[80];
char *line = chars;
// find the start of the pixel data. no doubt stupid
sprintf(duh, "%d\0", *xsize);
line = strstr(line, duh);
//fprintf(stderr, "%s found at offset %d\n", duh, line-chars);
line += strlen(duh) + 1;
sprintf(duh, "%d\0", *ysize);
line = strstr(line, duh);
//fprintf(stderr, "%s found at offset %d\n", duh, line-chars);
line += strlen(duh) + 1;
sprintf(duh, "%d\0", *maxval);
line = strstr(line, duh);
fprintf(stderr, "%s found at offset %d\n", duh, line - chars);
line += strlen(duh) + 1;
long offset = line - chars;
//lseek(fd, offset, SEEK_SET); // move to the correct offset
fseek(fp, offset, SEEK_SET); // move to the correct offset
//long numread = read(fd, buf, bufsize);
long numread = fread(buf, sizeof(char), bufsize, fp);
fprintf(stderr, "Texture %s read %ld of %ld bytes\n", filename, numread, bufsize);
fclose(fp);
int pixels = (*xsize) * (*ysize);
int i;
for (i=0; i<pixels; i++) {
pic[i] = (int) buf[3*i]; // red channel
}
return pic; // success
}
void write_ppm( char *filename, int xsize, int ysize, int maxval, int *pic) {
FILE *fp;
fp = fopen(filename, "w");
if (!fp) {
fprintf(stderr, "FAILED TO OPEN FILE '%s' for writing\n");
exit(-1);
}
fprintf(fp, "P6\n");
fprintf(fp,"%d %d\n%d\n", xsize, ysize, maxval);
int numpix = xsize * ysize;
int i;
for (i=0; i<numpix; i++) {
unsigned char uc = (unsigned char) pic[i];
fprintf(fp, "%c%c%c", uc, uc, uc);
}
fclose(fp);
}
int main(int argc, char **argv) {
char *filename;
filename = strdup(DEFAULT_FILENAME);
int threshold;
threshold = DEFAULT_THRESHOLD;
if (argc > 1) {
if (argc == 3) {
filename = strdup(argv[1]);
threshold = atoi(argv[2]);
}
if (argc == 2) {
threshold = atoi(argv[1]);
}
}
int xsize, ysize, maxval;
unsigned int *pic = read_ppm(filename, &ysize, &xsize, &maxval);
int size = xsize * ysize;
dim3 BLOCK(32, 32);
dim3 GRID((int)ceil((float)ysize / 32), (int)ceil((float)xsize / 32));
unsigned int *h_ingoing;
int *h_outgoing;
h_ingoing = pic;
h_outgoing = (int *)calloc(size, sizeof *h_outgoing);
unsigned int *d_ingoing;
int *d_outgoing;
cudaMalloc(&d_ingoing, size * sizeof *d_ingoing);
cudaMalloc(&d_outgoing, size * sizeof *d_outgoing);
cudaMemcpy(d_ingoing, h_ingoing, size * sizeof *h_ingoing, cudaMemcpyHostToDevice);
cudaMemcpy(d_outgoing, h_outgoing, size * sizeof *h_outgoing, cudaMemcpyHostToDevice);
float time;
cudaEvent_t begin, end;
cudaEventCreate(&begin);
cudaEventCreate(&end);
cudaEventRecord(begin, 0);
sobel<<<GRID, BLOCK>>>(d_ingoing, d_outgoing, xsize, ysize, threshold);
cudaEventRecord(end, 0);
cudaEventSynchronize(end);
cudaEventElapsedTime(&time, begin, end);
cudaMemcpy(h_outgoing, d_outgoing, size * sizeof *h_outgoing, cudaMemcpyDeviceToHost);
printf("%f\n", time);
write_ppm("result.ppm", ysize, xsize, 255, h_outgoing);
}
|
4392a8aa3eac59a6c030e5fccc1f1a55c734db63.hip | // !!! This is a file automatically generated by hipify!!!
/*
============================================================================
Name : cuda_lock.cu
Author : vuongp
Version :
Copyright : Your copyright notice
Description : CUDA thread wide lock, this code works well at the moment but
there is no guarantee that it will work with all GPU architecture.
============================================================================
*/
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <stdlib.h>
#include <stdio.h>
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
void gpuAssert(hipError_t code, const char *file, int line, bool abort = true)
{
if (code != hipSuccess)
{
fprintf(stderr, "GPUassert: %s %s %d\n", \
hipGetErrorString(code), file, line);
if (abort)
exit(code);
}
}
__device__ int mLock = 0;
__global__ void func(unsigned int *comm) {
bool blocked = true;
while(blocked) {
if(0 == atomicCAS(&mLock, 0, 1)) {
printf("Block Id = %d, Thread Id = %d acquired lock\n", blockIdx.x, threadIdx.x);
*comm += 1;
printf("Block Id = %d, Thread Id = %d, comm = %u\n", blockIdx.x, threadIdx.x, *comm);
atomicExch(&mLock, 0);
printf("Block Id = %d, Thread Id = %d released lock\n", blockIdx.x, threadIdx.x);
blocked = false;
}
}
}
int main(void)
{
unsigned int *d_comm;
gpuErrchk(hipMalloc(&d_comm, sizeof(unsigned int)));
gpuErrchk(hipMemset(d_comm, 0, sizeof(unsigned int)));
hipLaunchKernelGGL(( func), dim3(10), dim3(64), 0, 0, d_comm);
gpuErrchk(hipDeviceSynchronize());
gpuErrchk(hipPeekAtLastError());
return 0;
}
| 4392a8aa3eac59a6c030e5fccc1f1a55c734db63.cu | /*
============================================================================
Name : cuda_lock.cu
Author : vuongp
Version :
Copyright : Your copyright notice
Description : CUDA thread wide lock, this code works well at the moment but
there is no guarantee that it will work with all GPU architecture.
============================================================================
*/
#include <cuda.h>
#include <cuda_runtime.h>
#include <stdlib.h>
#include <stdio.h>
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
void gpuAssert(cudaError_t code, const char *file, int line, bool abort = true)
{
if (code != cudaSuccess)
{
fprintf(stderr, "GPUassert: %s %s %d\n", \
cudaGetErrorString(code), file, line);
if (abort)
exit(code);
}
}
__device__ int mLock = 0;
__global__ void func(unsigned int *comm) {
bool blocked = true;
while(blocked) {
if(0 == atomicCAS(&mLock, 0, 1)) {
printf("Block Id = %d, Thread Id = %d acquired lock\n", blockIdx.x, threadIdx.x);
*comm += 1;
printf("Block Id = %d, Thread Id = %d, comm = %u\n", blockIdx.x, threadIdx.x, *comm);
atomicExch(&mLock, 0);
printf("Block Id = %d, Thread Id = %d released lock\n", blockIdx.x, threadIdx.x);
blocked = false;
}
}
}
int main(void)
{
unsigned int *d_comm;
gpuErrchk(cudaMalloc(&d_comm, sizeof(unsigned int)));
gpuErrchk(cudaMemset(d_comm, 0, sizeof(unsigned int)));
func<<<10, 64>>>(d_comm);
gpuErrchk(cudaDeviceSynchronize());
gpuErrchk(cudaPeekAtLastError());
return 0;
}
|
dbe5baf2d3744b7e9b4a3eb9196e0780d21abd24.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright 2019 BlazingDB, Inc.
* Copyright 2019 Felipe Aramburu <[email protected]>
* Copyright 2018 Rommel Quintanilla <[email protected]>
* Copyright 2019 William Scott Malpica <[email protected]>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/cudf.h>
#include <cudf/functions.h>
#include <cudf/types.h>
#include <bitmask/bit_mask.cuh>
#include <utilities/cudf_utils.h>
#include <utilities/column_utils.hpp>
#include <tests/utilities/cudf_test_utils.cuh>
#include <tests/utilities/cudf_test_fixtures.h>
#include <tests/utilities/nvcategory_utils.cuh>
#include <nvstrings/NVCategory.h>
#include <nvstrings/NVStrings.h>
#include <rmm/rmm.h>
#include <iostream>
#include <random>
#include <cstring>
namespace cudf {
namespace test {
std::string random_string(size_t len, std::string const &allowed_chars) {
std::mt19937_64 gen { std::random_device()() };
std::uniform_int_distribution<size_t> dist { 0, allowed_chars.length()-1 };
std::string ret;
std::generate_n(std::back_inserter(ret), len, [&] { return allowed_chars[dist(gen)]; });
return ret;
}
gdf_column * create_nv_category_column(gdf_size_type num_rows, bool repeat_strings){
const char ** string_host_data = new const char *[num_rows];
for(gdf_size_type row_index = 0; row_index < num_rows; row_index++){
string_host_data[row_index] = new char[(num_rows + 25) / 26]; //allows string to grow depending on numbe of rows
std::string temp_string = "";
int num_chars = repeat_strings ? 1 : (row_index / 26) + 1;
char repeat_char = (26 - (row_index % 26)) + 65; //chars are Z,Y ...C,B,A,ZZ,YY,.....BBB,AAA.....
for(int char_index = 0; char_index < num_chars; char_index++){
temp_string.push_back(repeat_char);
}
temp_string.push_back(0);
std::memcpy((void *) string_host_data[row_index],temp_string.c_str(),temp_string.size());
}
NVCategory* category = NVCategory::create_from_array(string_host_data, num_rows);
gdf_column * column = new gdf_column{};
int * data;
RMM_ALLOC(&data, num_rows * sizeof(gdf_nvstring_category) , 0);
category->get_values( (int *)data, true );
bit_mask::bit_mask_t * valid;
bit_mask::create_bit_mask(&valid, num_rows,1);
gdf_error err = gdf_column_view(column,
(void *) data,
(gdf_valid_type *)valid,
num_rows,
GDF_STRING_CATEGORY);
column->dtype_info.category = category;
return column;
}
gdf_column * create_nv_category_column_strings(const char ** string_host_data, gdf_size_type num_rows){
NVCategory* category = NVCategory::create_from_array(string_host_data, num_rows);
gdf_column * column = new gdf_column{};
int * data;
RMM_ALLOC(&data, num_rows * sizeof(gdf_nvstring_category) , 0);
category->get_values( (int *)data, true );
bit_mask::bit_mask_t * valid;
bit_mask::create_bit_mask(&valid, num_rows,1);
gdf_error err = gdf_column_view(column,
(void *) data,
(gdf_valid_type *)valid,
num_rows,
GDF_STRING_CATEGORY);
column->dtype_info.category = category;
return column;
}
const char ** generate_string_data(gdf_size_type num_rows, size_t length, bool print){
const char ** string_host_data = new const char *[num_rows];
for(gdf_size_type row_index = 0; row_index < num_rows; row_index++){
string_host_data[row_index] = new char[length+1];
std::string rand_string = cudf::test::random_string(length);
rand_string.push_back(0);
if(print)
std::cout<<rand_string<<"\t";
std::memcpy((void *) string_host_data[row_index],rand_string.c_str(),rand_string.size());
}
if(print)
std::cout<<std::endl;
return string_host_data;
}
std::tuple<std::vector<std::string>, std::vector<gdf_valid_type>> nvcategory_column_to_host(gdf_column * column){
if (column->dtype == GDF_STRING_CATEGORY && column->dtype_info.category != nullptr && column->size > 0) {
NVStrings* tptr = static_cast<NVCategory*>(column->dtype_info.category)->to_strings();
unsigned int count = tptr->size();
if( count==0 )
return std::make_tuple(std::vector<std::string>(), std::vector<gdf_valid_type>());
std::vector<char*> list(count);
char** plist = list.data();
std::vector<int> lens(count);
size_t totalmem = tptr->byte_count(lens.data(),false);
std::vector<char> buffer(totalmem+count,0); // null terminates each string
char* pbuffer = buffer.data();
size_t offset = 0;
for( unsigned int idx=0; idx < count; ++idx )
{
plist[idx] = pbuffer + offset;
offset += lens[idx]+1; // account for null-terminator; also nulls are -1
}
tptr->to_host(plist,0,count);
// TODO: workaround for custrings issue #330. Remove once fix is merged
// workaround just resets the nullptr entries back to their proper offsets
// so that the std::vector constructor below can succeed.
offset = 0;
for( unsigned int idx=0; idx < count; ++idx )
{
plist[idx] = pbuffer + offset;
offset += lens[idx]+1; // account for null-terminator; also nulls are -1
}
NVStrings::destroy(tptr);
std::vector<std::string> host_strings_vector(plist, plist + column->size);
std::vector<gdf_valid_type> host_bitmask(gdf_valid_allocation_size(column->size));
if (cudf::is_nullable(*column)) {
CUDA_TRY(hipMemcpy(host_bitmask.data(),
column->valid,
host_bitmask.size()*sizeof(gdf_valid_type),
hipMemcpyDeviceToHost));
}
return std::make_tuple(host_strings_vector, host_bitmask);
} else {
return std::make_tuple(std::vector<std::string>(), std::vector<gdf_valid_type>());
}
}
} // namespace test
} // namespace cudf
| dbe5baf2d3744b7e9b4a3eb9196e0780d21abd24.cu | /*
* Copyright 2019 BlazingDB, Inc.
* Copyright 2019 Felipe Aramburu <[email protected]>
* Copyright 2018 Rommel Quintanilla <[email protected]>
* Copyright 2019 William Scott Malpica <[email protected]>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/cudf.h>
#include <cudf/functions.h>
#include <cudf/types.h>
#include <bitmask/bit_mask.cuh>
#include <utilities/cudf_utils.h>
#include <utilities/column_utils.hpp>
#include <tests/utilities/cudf_test_utils.cuh>
#include <tests/utilities/cudf_test_fixtures.h>
#include <tests/utilities/nvcategory_utils.cuh>
#include <nvstrings/NVCategory.h>
#include <nvstrings/NVStrings.h>
#include <rmm/rmm.h>
#include <iostream>
#include <random>
#include <cstring>
namespace cudf {
namespace test {
std::string random_string(size_t len, std::string const &allowed_chars) {
std::mt19937_64 gen { std::random_device()() };
std::uniform_int_distribution<size_t> dist { 0, allowed_chars.length()-1 };
std::string ret;
std::generate_n(std::back_inserter(ret), len, [&] { return allowed_chars[dist(gen)]; });
return ret;
}
gdf_column * create_nv_category_column(gdf_size_type num_rows, bool repeat_strings){
const char ** string_host_data = new const char *[num_rows];
for(gdf_size_type row_index = 0; row_index < num_rows; row_index++){
string_host_data[row_index] = new char[(num_rows + 25) / 26]; //allows string to grow depending on numbe of rows
std::string temp_string = "";
int num_chars = repeat_strings ? 1 : (row_index / 26) + 1;
char repeat_char = (26 - (row_index % 26)) + 65; //chars are Z,Y ...C,B,A,ZZ,YY,.....BBB,AAA.....
for(int char_index = 0; char_index < num_chars; char_index++){
temp_string.push_back(repeat_char);
}
temp_string.push_back(0);
std::memcpy((void *) string_host_data[row_index],temp_string.c_str(),temp_string.size());
}
NVCategory* category = NVCategory::create_from_array(string_host_data, num_rows);
gdf_column * column = new gdf_column{};
int * data;
RMM_ALLOC(&data, num_rows * sizeof(gdf_nvstring_category) , 0);
category->get_values( (int *)data, true );
bit_mask::bit_mask_t * valid;
bit_mask::create_bit_mask(&valid, num_rows,1);
gdf_error err = gdf_column_view(column,
(void *) data,
(gdf_valid_type *)valid,
num_rows,
GDF_STRING_CATEGORY);
column->dtype_info.category = category;
return column;
}
gdf_column * create_nv_category_column_strings(const char ** string_host_data, gdf_size_type num_rows){
NVCategory* category = NVCategory::create_from_array(string_host_data, num_rows);
gdf_column * column = new gdf_column{};
int * data;
RMM_ALLOC(&data, num_rows * sizeof(gdf_nvstring_category) , 0);
category->get_values( (int *)data, true );
bit_mask::bit_mask_t * valid;
bit_mask::create_bit_mask(&valid, num_rows,1);
gdf_error err = gdf_column_view(column,
(void *) data,
(gdf_valid_type *)valid,
num_rows,
GDF_STRING_CATEGORY);
column->dtype_info.category = category;
return column;
}
const char ** generate_string_data(gdf_size_type num_rows, size_t length, bool print){
const char ** string_host_data = new const char *[num_rows];
for(gdf_size_type row_index = 0; row_index < num_rows; row_index++){
string_host_data[row_index] = new char[length+1];
std::string rand_string = cudf::test::random_string(length);
rand_string.push_back(0);
if(print)
std::cout<<rand_string<<"\t";
std::memcpy((void *) string_host_data[row_index],rand_string.c_str(),rand_string.size());
}
if(print)
std::cout<<std::endl;
return string_host_data;
}
std::tuple<std::vector<std::string>, std::vector<gdf_valid_type>> nvcategory_column_to_host(gdf_column * column){
if (column->dtype == GDF_STRING_CATEGORY && column->dtype_info.category != nullptr && column->size > 0) {
NVStrings* tptr = static_cast<NVCategory*>(column->dtype_info.category)->to_strings();
unsigned int count = tptr->size();
if( count==0 )
return std::make_tuple(std::vector<std::string>(), std::vector<gdf_valid_type>());
std::vector<char*> list(count);
char** plist = list.data();
std::vector<int> lens(count);
size_t totalmem = tptr->byte_count(lens.data(),false);
std::vector<char> buffer(totalmem+count,0); // null terminates each string
char* pbuffer = buffer.data();
size_t offset = 0;
for( unsigned int idx=0; idx < count; ++idx )
{
plist[idx] = pbuffer + offset;
offset += lens[idx]+1; // account for null-terminator; also nulls are -1
}
tptr->to_host(plist,0,count);
// TODO: workaround for custrings issue #330. Remove once fix is merged
// workaround just resets the nullptr entries back to their proper offsets
// so that the std::vector constructor below can succeed.
offset = 0;
for( unsigned int idx=0; idx < count; ++idx )
{
plist[idx] = pbuffer + offset;
offset += lens[idx]+1; // account for null-terminator; also nulls are -1
}
NVStrings::destroy(tptr);
std::vector<std::string> host_strings_vector(plist, plist + column->size);
std::vector<gdf_valid_type> host_bitmask(gdf_valid_allocation_size(column->size));
if (cudf::is_nullable(*column)) {
CUDA_TRY(cudaMemcpy(host_bitmask.data(),
column->valid,
host_bitmask.size()*sizeof(gdf_valid_type),
cudaMemcpyDeviceToHost));
}
return std::make_tuple(host_strings_vector, host_bitmask);
} else {
return std::make_tuple(std::vector<std::string>(), std::vector<gdf_valid_type>());
}
}
} // namespace test
} // namespace cudf
|
ec013b639c24aa725fd6a9691ac235aba3a5e000.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "cunn_CriterionFilter_updateGradInput_kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *gradInput = NULL;
hipMalloc(&gradInput, XSIZE*YSIZE);
float *target = NULL;
hipMalloc(&target, XSIZE*YSIZE);
float *ignored_label = NULL;
hipMalloc(&ignored_label, XSIZE*YSIZE);
int batch_size = XSIZE*YSIZE;
int n_classes = 1;
int map_nelem = 1;
int blocks_per_sample = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
cunn_CriterionFilter_updateGradInput_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, gradInput,target,ignored_label,batch_size,n_classes,map_nelem,blocks_per_sample);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
cunn_CriterionFilter_updateGradInput_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, gradInput,target,ignored_label,batch_size,n_classes,map_nelem,blocks_per_sample);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
cunn_CriterionFilter_updateGradInput_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, gradInput,target,ignored_label,batch_size,n_classes,map_nelem,blocks_per_sample);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | ec013b639c24aa725fd6a9691ac235aba3a5e000.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "cunn_CriterionFilter_updateGradInput_kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *gradInput = NULL;
cudaMalloc(&gradInput, XSIZE*YSIZE);
float *target = NULL;
cudaMalloc(&target, XSIZE*YSIZE);
float *ignored_label = NULL;
cudaMalloc(&ignored_label, XSIZE*YSIZE);
int batch_size = XSIZE*YSIZE;
int n_classes = 1;
int map_nelem = 1;
int blocks_per_sample = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
cunn_CriterionFilter_updateGradInput_kernel<<<gridBlock,threadBlock>>>(gradInput,target,ignored_label,batch_size,n_classes,map_nelem,blocks_per_sample);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
cunn_CriterionFilter_updateGradInput_kernel<<<gridBlock,threadBlock>>>(gradInput,target,ignored_label,batch_size,n_classes,map_nelem,blocks_per_sample);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
cunn_CriterionFilter_updateGradInput_kernel<<<gridBlock,threadBlock>>>(gradInput,target,ignored_label,batch_size,n_classes,map_nelem,blocks_per_sample);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
d863158da9caab0ee9550eb0fe36a787d7dce752.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#define _size 512
__global__ void mul(int *a, int *b, int *c)
{
c[threadIdx.x + blockIdx.x*blockDim.x] = a[threadIdx.x + blockIdx.x*blockDim.x]*b[threadIdx.x + blockIdx.x*blockDim.x];
}
int main()
{
int SIZE = _size*sizeof(int);
int *a,*b,*c;
int *d_a,*d_b,*d_c;
a = malloc(SIZE); //Removes int* cast
b = malloc(SIZE);
c = malloc(SIZE);
hipMalloc((void **)&d_a,SIZE);
hipMalloc((void **)&d_b,SIZE);
hipMalloc((void **)&d_c,SIZE);
printf("Enter value of A :\n");
for(int i=0;i<_size;i++)
{
a[i]=i*2;
//printf("a[%d]\t",i);
//scanf("%d",&a[i]);
}
for(int i=0;i<_size;i++)
{
b[i]=i*2+12;
//printf("b[%d]\t",i);
//scanf("%d",&b[i]);
}
for(int i=0;i<_size;i++)
{
printf("a[%d]: %d\tb[%d]: %d\t",i,a[i],i,b[i]);
}
printf("\n");
hipMemcpy(d_a,a,SIZE,hipMemcpyHostToDevice);
hipMemcpy(d_b,b,SIZE,hipMemcpyHostToDevice);
hipLaunchKernelGGL(( mul), dim3(2),dim3(256), 0, 0, d_a,d_b,d_c);
hipDeviceSynchronize();
hipMemcpy(c,d_c,SIZE,hipMemcpyDeviceToHost);
printf("\n");
for(int i=0;i<_size;i++)
{
printf("c[%d]: %d\t",i,c[i]);
}
return 0;
}
| d863158da9caab0ee9550eb0fe36a787d7dce752.cu | #include <stdio.h>
#define _size 512
__global__ void mul(int *a, int *b, int *c)
{
c[threadIdx.x + blockIdx.x*blockDim.x] = a[threadIdx.x + blockIdx.x*blockDim.x]*b[threadIdx.x + blockIdx.x*blockDim.x];
}
int main()
{
int SIZE = _size*sizeof(int);
int *a,*b,*c;
int *d_a,*d_b,*d_c;
a = malloc(SIZE); //Removes int* cast
b = malloc(SIZE);
c = malloc(SIZE);
cudaMalloc((void **)&d_a,SIZE);
cudaMalloc((void **)&d_b,SIZE);
cudaMalloc((void **)&d_c,SIZE);
printf("Enter value of A :\n");
for(int i=0;i<_size;i++)
{
a[i]=i*2;
//printf("a[%d]\t",i);
//scanf("%d",&a[i]);
}
for(int i=0;i<_size;i++)
{
b[i]=i*2+12;
//printf("b[%d]\t",i);
//scanf("%d",&b[i]);
}
for(int i=0;i<_size;i++)
{
printf("a[%d]: %d\tb[%d]: %d\t",i,a[i],i,b[i]);
}
printf("\n");
cudaMemcpy(d_a,a,SIZE,cudaMemcpyHostToDevice);
cudaMemcpy(d_b,b,SIZE,cudaMemcpyHostToDevice);
mul<<<2,256>>>(d_a,d_b,d_c);
cudaThreadSynchronize();
cudaMemcpy(c,d_c,SIZE,cudaMemcpyDeviceToHost);
printf("\n");
for(int i=0;i<_size;i++)
{
printf("c[%d]: %d\t",i,c[i]);
}
return 0;
}
|
0d6714afd939fb27295d7fce5d0f52bc461b62a9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void initialConditions(int n, double* x, double* y, double* z, double* vx, double* vy, double* vz, double* mass){
/* TODO */
} | 0d6714afd939fb27295d7fce5d0f52bc461b62a9.cu | #include "includes.h"
__global__ void initialConditions(int n, double* x, double* y, double* z, double* vx, double* vy, double* vz, double* mass){
/* TODO */
} |
57f5868dfd42afeaf8021408f0fd7ac312b1e5b5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//===---- reduction.cu - NVPTX OpenMP reduction implementation ---- CUDA
//-*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file contains the implementation of reduction with KMPC interface.
//
//===----------------------------------------------------------------------===//
#include <complex.h>
#include <stdio.h>
#include "common/omptarget.h"
#include "target_impl.h"
EXTERN
void __kmpc_nvptx_end_reduce(int32_t global_tid) {}
EXTERN
void __kmpc_nvptx_end_reduce_nowait(int32_t global_tid) {}
EXTERN int32_t __kmpc_shuffle_int32(int32_t val, int16_t delta, int16_t size) {
return __kmpc_impl_shfl_down_sync(__kmpc_impl_all_lanes, val, delta, size);
}
EXTERN int64_t __kmpc_shuffle_int64(int64_t val, int16_t delta, int16_t size) {
uint32_t lo, hi;
__kmpc_impl_unpack(val, lo, hi);
hi = __kmpc_impl_shfl_down_sync(__kmpc_impl_all_lanes, hi, delta, size);
lo = __kmpc_impl_shfl_down_sync(__kmpc_impl_all_lanes, lo, delta, size);
return __kmpc_impl_pack(lo, hi);
}
INLINE static void gpu_regular_warp_reduce(void *reduce_data,
kmp_ShuffleReductFctPtr shflFct) {
for (uint32_t mask = WARPSIZE / 2; mask > 0; mask /= 2) {
shflFct(reduce_data, /*LaneId - not used= */ 0,
/*Offset = */ mask, /*AlgoVersion=*/0);
}
}
INLINE static void gpu_irregular_warp_reduce(void *reduce_data,
kmp_ShuffleReductFctPtr shflFct,
uint32_t size, uint32_t tid) {
uint32_t curr_size;
uint32_t mask;
curr_size = size;
mask = curr_size / 2;
while (mask > 0) {
shflFct(reduce_data, /*LaneId = */ tid, /*Offset=*/mask, /*AlgoVersion=*/1);
curr_size = (curr_size + 1) / 2;
mask = curr_size / 2;
}
}
INLINE static uint32_t
gpu_irregular_simd_reduce(void *reduce_data, kmp_ShuffleReductFctPtr shflFct) {
uint32_t size, remote_id, physical_lane_id;
physical_lane_id = GetThreadIdInBlock() % WARPSIZE;
__kmpc_impl_lanemask_t lanemask_lt = __kmpc_impl_lanemask_lt();
__kmpc_impl_lanemask_t Liveness = __kmpc_impl_activemask();
uint32_t logical_lane_id = __kmpc_impl_popc(Liveness & lanemask_lt) * 2;
__kmpc_impl_lanemask_t lanemask_gt = __kmpc_impl_lanemask_gt();
do {
Liveness = __kmpc_impl_activemask();
remote_id = __kmpc_impl_ffs(Liveness & lanemask_gt);
size = __kmpc_impl_popc(Liveness);
logical_lane_id /= 2;
shflFct(reduce_data, /*LaneId =*/logical_lane_id,
/*Offset=*/remote_id - 1 - physical_lane_id, /*AlgoVersion=*/2);
} while (logical_lane_id % 2 == 0 && size > 1);
return (logical_lane_id == 0);
}
EXTERN
int32_t __kmpc_nvptx_simd_reduce_nowait(int32_t global_tid, int32_t num_vars,
size_t reduce_size, void *reduce_data,
kmp_ShuffleReductFctPtr shflFct,
kmp_InterWarpCopyFctPtr cpyFct) {
__kmpc_impl_lanemask_t Liveness = __kmpc_impl_activemask();
if (Liveness == __kmpc_impl_all_lanes) {
gpu_regular_warp_reduce(reduce_data, shflFct);
return GetThreadIdInBlock() % WARPSIZE ==
0; // Result on lane 0 of the simd warp.
} else {
return gpu_irregular_simd_reduce(
reduce_data, shflFct); // Result on the first active lane.
}
}
INLINE
static int32_t nvptx_parallel_reduce_nowait(
int32_t global_tid, int32_t num_vars, size_t reduce_size, void *reduce_data,
kmp_ShuffleReductFctPtr shflFct, kmp_InterWarpCopyFctPtr cpyFct,
bool isSPMDExecutionMode, bool isRuntimeUninitialized) {
uint32_t BlockThreadId = GetLogicalThreadIdInBlock(isSPMDExecutionMode);
uint32_t NumThreads = GetNumberOfOmpThreads(isSPMDExecutionMode);
if (NumThreads == 1)
return 1;
/*
* This reduce function handles reduction within a team. It handles
* parallel regions in both L1 and L2 parallelism levels. It also
* supports Generic, SPMD, and NoOMP modes.
*
* 1. Reduce within a warp.
* 2. Warp master copies value to warp 0 via shared memory.
* 3. Warp 0 reduces to a single value.
* 4. The reduced value is available in the thread that returns 1.
*/
#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 700
uint32_t WarpsNeeded = (NumThreads + WARPSIZE - 1) / WARPSIZE;
uint32_t WarpId = BlockThreadId / WARPSIZE;
// Volta execution model:
// For the Generic execution mode a parallel region either has 1 thread and
// beyond that, always a multiple of 32. For the SPMD execution mode we may
// have any number of threads.
if ((NumThreads % WARPSIZE == 0) || (WarpId < WarpsNeeded - 1))
gpu_regular_warp_reduce(reduce_data, shflFct);
else if (NumThreads > 1) // Only SPMD execution mode comes thru this case.
gpu_irregular_warp_reduce(reduce_data, shflFct,
/*LaneCount=*/NumThreads % WARPSIZE,
/*LaneId=*/GetThreadIdInBlock() % WARPSIZE);
// When we have more than [warpsize] number of threads
// a block reduction is performed here.
//
// Only L1 parallel region can enter this if condition.
if (NumThreads > WARPSIZE) {
// Gather all the reduced values from each warp
// to the first warp.
cpyFct(reduce_data, WarpsNeeded);
if (WarpId == 0)
gpu_irregular_warp_reduce(reduce_data, shflFct, WarpsNeeded,
BlockThreadId);
}
return BlockThreadId == 0;
#else
__kmpc_impl_lanemask_t Liveness = __kmpc_impl_activemask();
if (Liveness == __kmpc_impl_all_lanes) // Full warp
gpu_regular_warp_reduce(reduce_data, shflFct);
else if (!(Liveness & (Liveness + 1))) // Partial warp but contiguous lanes
gpu_irregular_warp_reduce(reduce_data, shflFct,
/*LaneCount=*/__kmpc_impl_popc(Liveness),
/*LaneId=*/GetThreadIdInBlock() % WARPSIZE);
else if (!isRuntimeUninitialized) // Dispersed lanes. Only threads in L2
// parallel region may enter here; return
// early.
return gpu_irregular_simd_reduce(reduce_data, shflFct);
// When we have more than [warpsize] number of threads
// a block reduction is performed here.
//
// Only L1 parallel region can enter this if condition.
if (NumThreads > WARPSIZE) {
uint32_t WarpsNeeded = (NumThreads + WARPSIZE - 1) / WARPSIZE;
// Gather all the reduced values from each warp
// to the first warp.
cpyFct(reduce_data, WarpsNeeded);
uint32_t WarpId = BlockThreadId / WARPSIZE;
if (WarpId == 0)
gpu_irregular_warp_reduce(reduce_data, shflFct, WarpsNeeded,
BlockThreadId);
return BlockThreadId == 0;
} else if (isRuntimeUninitialized /* Never an L2 parallel region without the OMP runtime */) {
return BlockThreadId == 0;
}
// Get the OMP thread Id. This is different from BlockThreadId in the case of
// an L2 parallel region.
return global_tid == 0;
#endif // __CUDA_ARCH__ >= 700
}
EXTERN __attribute__((deprecated)) int32_t __kmpc_nvptx_parallel_reduce_nowait(
int32_t global_tid, int32_t num_vars, size_t reduce_size, void *reduce_data,
kmp_ShuffleReductFctPtr shflFct, kmp_InterWarpCopyFctPtr cpyFct) {
return nvptx_parallel_reduce_nowait(global_tid, num_vars, reduce_size,
reduce_data, shflFct, cpyFct,
isSPMDMode(), isRuntimeUninitialized());
}
EXTERN
int32_t __kmpc_nvptx_parallel_reduce_nowait_v2(
kmp_Ident *loc, int32_t global_tid, int32_t num_vars, size_t reduce_size,
void *reduce_data, kmp_ShuffleReductFctPtr shflFct,
kmp_InterWarpCopyFctPtr cpyFct) {
return nvptx_parallel_reduce_nowait(
global_tid, num_vars, reduce_size, reduce_data, shflFct, cpyFct,
checkSPMDMode(loc), checkRuntimeUninitialized(loc));
}
EXTERN
int32_t __kmpc_nvptx_parallel_reduce_nowait_simple_spmd(
int32_t global_tid, int32_t num_vars, size_t reduce_size, void *reduce_data,
kmp_ShuffleReductFctPtr shflFct, kmp_InterWarpCopyFctPtr cpyFct) {
return nvptx_parallel_reduce_nowait(
global_tid, num_vars, reduce_size, reduce_data, shflFct, cpyFct,
/*isSPMDExecutionMode=*/true, /*isRuntimeUninitialized=*/true);
}
EXTERN
int32_t __kmpc_nvptx_parallel_reduce_nowait_simple_generic(
int32_t global_tid, int32_t num_vars, size_t reduce_size, void *reduce_data,
kmp_ShuffleReductFctPtr shflFct, kmp_InterWarpCopyFctPtr cpyFct) {
return nvptx_parallel_reduce_nowait(
global_tid, num_vars, reduce_size, reduce_data, shflFct, cpyFct,
/*isSPMDExecutionMode=*/false, /*isRuntimeUninitialized=*/true);
}
INLINE
static int32_t nvptx_teams_reduce_nowait(int32_t global_tid, int32_t num_vars,
size_t reduce_size, void *reduce_data,
kmp_ShuffleReductFctPtr shflFct,
kmp_InterWarpCopyFctPtr cpyFct,
kmp_CopyToScratchpadFctPtr scratchFct,
kmp_LoadReduceFctPtr ldFct,
bool isSPMDExecutionMode) {
uint32_t ThreadId = GetLogicalThreadIdInBlock(isSPMDExecutionMode);
// In non-generic mode all workers participate in the teams reduction.
// In generic mode only the team master participates in the teams
// reduction because the workers are waiting for parallel work.
uint32_t NumThreads =
isSPMDExecutionMode ? GetNumberOfOmpThreads(/*isSPMDExecutionMode=*/true)
: /*Master thread only*/ 1;
uint32_t TeamId = GetBlockIdInKernel();
uint32_t NumTeams = GetNumberOfBlocksInKernel();
SHARED volatile bool IsLastTeam;
// Team masters of all teams write to the scratchpad.
if (ThreadId == 0) {
unsigned int *timestamp = GetTeamsReductionTimestamp();
char *scratchpad = GetTeamsReductionScratchpad();
scratchFct(reduce_data, scratchpad, TeamId, NumTeams);
__kmpc_impl_threadfence();
// atomicInc increments 'timestamp' and has a range [0, NumTeams-1].
// It resets 'timestamp' back to 0 once the last team increments
// this counter.
unsigned val = atomicInc(timestamp, NumTeams - 1);
IsLastTeam = val == NumTeams - 1;
}
// We have to wait on L1 barrier because in GENERIC mode the workers
// are waiting on barrier 0 for work.
//
// If we guard this barrier as follows it leads to deadlock, probably
// because of a compiler bug: if (!IsGenericMode()) __syncthreads();
uint16_t SyncWarps = (NumThreads + WARPSIZE - 1) / WARPSIZE;
__kmpc_impl_named_sync(L1_BARRIER, SyncWarps * WARPSIZE);
// If this team is not the last, quit.
if (/* Volatile read by all threads */ !IsLastTeam)
return 0;
//
// Last team processing.
//
// Threads in excess of #teams do not participate in reduction of the
// scratchpad values.
#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 700
uint32_t ActiveThreads = NumThreads;
if (NumTeams < NumThreads) {
ActiveThreads =
(NumTeams < WARPSIZE) ? 1 : NumTeams & ~((uint16_t)WARPSIZE - 1);
}
if (ThreadId >= ActiveThreads)
return 0;
// Load from scratchpad and reduce.
char *scratchpad = GetTeamsReductionScratchpad();
ldFct(reduce_data, scratchpad, ThreadId, NumTeams, /*Load only*/ 0);
for (uint32_t i = ActiveThreads + ThreadId; i < NumTeams; i += ActiveThreads)
ldFct(reduce_data, scratchpad, i, NumTeams, /*Load and reduce*/ 1);
uint32_t WarpsNeeded = (ActiveThreads + WARPSIZE - 1) / WARPSIZE;
uint32_t WarpId = ThreadId / WARPSIZE;
// Reduce across warps to the warp master.
if ((ActiveThreads % WARPSIZE == 0) ||
(WarpId < WarpsNeeded - 1)) // Full warp
gpu_regular_warp_reduce(reduce_data, shflFct);
else if (ActiveThreads > 1) // Partial warp but contiguous lanes
// Only SPMD execution mode comes thru this case.
gpu_irregular_warp_reduce(reduce_data, shflFct,
/*LaneCount=*/ActiveThreads % WARPSIZE,
/*LaneId=*/ThreadId % WARPSIZE);
// When we have more than [warpsize] number of threads
// a block reduction is performed here.
if (ActiveThreads > WARPSIZE) {
// Gather all the reduced values from each warp
// to the first warp.
cpyFct(reduce_data, WarpsNeeded);
if (WarpId == 0)
gpu_irregular_warp_reduce(reduce_data, shflFct, WarpsNeeded, ThreadId);
}
#else
if (ThreadId >= NumTeams)
return 0;
// Load from scratchpad and reduce.
char *scratchpad = GetTeamsReductionScratchpad();
ldFct(reduce_data, scratchpad, ThreadId, NumTeams, /*Load only*/ 0);
for (uint32_t i = NumThreads + ThreadId; i < NumTeams; i += NumThreads)
ldFct(reduce_data, scratchpad, i, NumTeams, /*Load and reduce*/ 1);
// Reduce across warps to the warp master.
__kmpc_impl_lanemask_t Liveness = __kmpc_impl_activemask();
if (Liveness == __kmpc_impl_all_lanes) // Full warp
gpu_regular_warp_reduce(reduce_data, shflFct);
else // Partial warp but contiguous lanes
gpu_irregular_warp_reduce(reduce_data, shflFct,
/*LaneCount=*/__kmpc_impl_popc(Liveness),
/*LaneId=*/ThreadId % WARPSIZE);
// When we have more than [warpsize] number of threads
// a block reduction is performed here.
uint32_t ActiveThreads = NumTeams < NumThreads ? NumTeams : NumThreads;
if (ActiveThreads > WARPSIZE) {
uint32_t WarpsNeeded = (ActiveThreads + WARPSIZE - 1) / WARPSIZE;
// Gather all the reduced values from each warp
// to the first warp.
cpyFct(reduce_data, WarpsNeeded);
uint32_t WarpId = ThreadId / WARPSIZE;
if (WarpId == 0)
gpu_irregular_warp_reduce(reduce_data, shflFct, WarpsNeeded, ThreadId);
}
#endif // __CUDA_ARCH__ >= 700
return ThreadId == 0;
}
EXTERN
int32_t __kmpc_nvptx_teams_reduce_nowait(int32_t global_tid, int32_t num_vars,
size_t reduce_size, void *reduce_data,
kmp_ShuffleReductFctPtr shflFct,
kmp_InterWarpCopyFctPtr cpyFct,
kmp_CopyToScratchpadFctPtr scratchFct,
kmp_LoadReduceFctPtr ldFct) {
return nvptx_teams_reduce_nowait(global_tid, num_vars, reduce_size,
reduce_data, shflFct, cpyFct, scratchFct,
ldFct, isSPMDMode());
}
EXTERN
int32_t __kmpc_nvptx_teams_reduce_nowait_simple_spmd(
int32_t global_tid, int32_t num_vars, size_t reduce_size, void *reduce_data,
kmp_ShuffleReductFctPtr shflFct, kmp_InterWarpCopyFctPtr cpyFct,
kmp_CopyToScratchpadFctPtr scratchFct, kmp_LoadReduceFctPtr ldFct) {
return nvptx_teams_reduce_nowait(global_tid, num_vars, reduce_size,
reduce_data, shflFct, cpyFct, scratchFct,
ldFct, /*isSPMDExecutionMode=*/true);
}
EXTERN
int32_t __kmpc_nvptx_teams_reduce_nowait_simple_generic(
int32_t global_tid, int32_t num_vars, size_t reduce_size, void *reduce_data,
kmp_ShuffleReductFctPtr shflFct, kmp_InterWarpCopyFctPtr cpyFct,
kmp_CopyToScratchpadFctPtr scratchFct, kmp_LoadReduceFctPtr ldFct) {
return nvptx_teams_reduce_nowait(global_tid, num_vars, reduce_size,
reduce_data, shflFct, cpyFct, scratchFct,
ldFct, /*isSPMDExecutionMode=*/false);
}
EXTERN int32_t __kmpc_nvptx_teams_reduce_nowait_simple(kmp_Ident *loc,
int32_t global_tid,
kmp_CriticalName *crit) {
if (checkSPMDMode(loc) && GetThreadIdInBlock() != 0)
return 0;
// The master thread of the team actually does the reduction.
while (atomicCAS((uint32_t *)crit, 0, 1))
;
return 1;
}
EXTERN void
__kmpc_nvptx_teams_end_reduce_nowait_simple(kmp_Ident *loc, int32_t global_tid,
kmp_CriticalName *crit) {
__kmpc_impl_threadfence_system();
(void)atomicExch((uint32_t *)crit, 0);
}
INLINE static bool isMaster(kmp_Ident *loc, uint32_t ThreadId) {
return checkGenericMode(loc) || IsTeamMaster(ThreadId);
}
INLINE static uint32_t roundToWarpsize(uint32_t s) {
if (s < WARPSIZE)
return 1;
return (s & ~(unsigned)(WARPSIZE - 1));
}
DEVICE static volatile uint32_t IterCnt = 0;
DEVICE static volatile uint32_t Cnt = 0;
EXTERN int32_t __kmpc_nvptx_teams_reduce_nowait_v2(
kmp_Ident *loc, int32_t global_tid, void *global_buffer,
int32_t num_of_records, void *reduce_data, kmp_ShuffleReductFctPtr shflFct,
kmp_InterWarpCopyFctPtr cpyFct, kmp_ListGlobalFctPtr lgcpyFct,
kmp_ListGlobalFctPtr lgredFct, kmp_ListGlobalFctPtr glcpyFct,
kmp_ListGlobalFctPtr glredFct) {
// Terminate all threads in non-SPMD mode except for the master thread.
if (checkGenericMode(loc) && GetThreadIdInBlock() != GetMasterThreadID())
return 0;
uint32_t ThreadId = GetLogicalThreadIdInBlock(checkSPMDMode(loc));
// In non-generic mode all workers participate in the teams reduction.
// In generic mode only the team master participates in the teams
// reduction because the workers are waiting for parallel work.
uint32_t NumThreads =
checkSPMDMode(loc) ? GetNumberOfOmpThreads(/*isSPMDExecutionMode=*/true)
: /*Master thread only*/ 1;
uint32_t TeamId = GetBlockIdInKernel();
uint32_t NumTeams = GetNumberOfBlocksInKernel();
SHARED unsigned Bound;
SHARED unsigned ChunkTeamCount;
// Block progress for teams greater than the current upper
// limit. We always only allow a number of teams less or equal
// to the number of slots in the buffer.
bool IsMaster = isMaster(loc, ThreadId);
while (IsMaster) {
// Atomic read
Bound = atomicAdd((uint32_t *)&IterCnt, 0);
if (TeamId < Bound + num_of_records)
break;
}
if (IsMaster) {
int ModBockId = TeamId % num_of_records;
if (TeamId < num_of_records)
lgcpyFct(global_buffer, ModBockId, reduce_data);
else
lgredFct(global_buffer, ModBockId, reduce_data);
__kmpc_impl_threadfence_system();
// Increment team counter.
// This counter is incremented by all teams in the current
// BUFFER_SIZE chunk.
ChunkTeamCount = atomicInc((uint32_t *)&Cnt, num_of_records - 1);
}
// Synchronize
if (checkSPMDMode(loc))
__kmpc_barrier(loc, global_tid);
// reduce_data is global or shared so before being reduced within the
// warp we need to bring it in local memory:
// local_reduce_data = reduce_data[i]
//
// Example for 3 reduction variables a, b, c (of potentially different
// types):
//
// buffer layout (struct of arrays):
// a, a, ..., a, b, b, ... b, c, c, ... c
// |__________|
// num_of_records
//
// local_data_reduce layout (struct):
// a, b, c
//
// Each thread will have a local struct containing the values to be
// reduced:
// 1. do reduction within each warp.
// 2. do reduction across warps.
// 3. write the final result to the main reduction variable
// by returning 1 in the thread holding the reduction result.
// Check if this is the very last team.
unsigned NumRecs = min(NumTeams, num_of_records);
if (ChunkTeamCount == NumTeams - Bound - 1) {
//
// Last team processing.
//
if (ThreadId >= NumRecs)
return 0;
NumThreads = roundToWarpsize(min(NumThreads, NumRecs));
if (ThreadId >= NumThreads)
return 0;
// Load from buffer and reduce.
glcpyFct(global_buffer, ThreadId, reduce_data);
for (uint32_t i = NumThreads + ThreadId; i < NumRecs; i += NumThreads)
glredFct(global_buffer, i, reduce_data);
// Reduce across warps to the warp master.
if (NumThreads > 1) {
gpu_regular_warp_reduce(reduce_data, shflFct);
// When we have more than [warpsize] number of threads
// a block reduction is performed here.
uint32_t ActiveThreads = min(NumRecs, NumThreads);
if (ActiveThreads > WARPSIZE) {
uint32_t WarpsNeeded = (ActiveThreads + WARPSIZE - 1) / WARPSIZE;
// Gather all the reduced values from each warp
// to the first warp.
cpyFct(reduce_data, WarpsNeeded);
uint32_t WarpId = ThreadId / WARPSIZE;
if (WarpId == 0)
gpu_irregular_warp_reduce(reduce_data, shflFct, WarpsNeeded,
ThreadId);
}
}
if (IsMaster) {
Cnt = 0;
IterCnt = 0;
return 1;
}
return 0;
}
if (IsMaster && ChunkTeamCount == num_of_records - 1) {
// Allow SIZE number of teams to proceed writing their
// intermediate results to the global buffer.
atomicAdd((uint32_t *)&IterCnt, num_of_records);
}
return 0;
}
| 57f5868dfd42afeaf8021408f0fd7ac312b1e5b5.cu | //===---- reduction.cu - NVPTX OpenMP reduction implementation ---- CUDA
//-*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file contains the implementation of reduction with KMPC interface.
//
//===----------------------------------------------------------------------===//
#include <complex.h>
#include <stdio.h>
#include "common/omptarget.h"
#include "target_impl.h"
EXTERN
void __kmpc_nvptx_end_reduce(int32_t global_tid) {}
EXTERN
void __kmpc_nvptx_end_reduce_nowait(int32_t global_tid) {}
EXTERN int32_t __kmpc_shuffle_int32(int32_t val, int16_t delta, int16_t size) {
return __kmpc_impl_shfl_down_sync(__kmpc_impl_all_lanes, val, delta, size);
}
EXTERN int64_t __kmpc_shuffle_int64(int64_t val, int16_t delta, int16_t size) {
uint32_t lo, hi;
__kmpc_impl_unpack(val, lo, hi);
hi = __kmpc_impl_shfl_down_sync(__kmpc_impl_all_lanes, hi, delta, size);
lo = __kmpc_impl_shfl_down_sync(__kmpc_impl_all_lanes, lo, delta, size);
return __kmpc_impl_pack(lo, hi);
}
INLINE static void gpu_regular_warp_reduce(void *reduce_data,
kmp_ShuffleReductFctPtr shflFct) {
for (uint32_t mask = WARPSIZE / 2; mask > 0; mask /= 2) {
shflFct(reduce_data, /*LaneId - not used= */ 0,
/*Offset = */ mask, /*AlgoVersion=*/0);
}
}
INLINE static void gpu_irregular_warp_reduce(void *reduce_data,
kmp_ShuffleReductFctPtr shflFct,
uint32_t size, uint32_t tid) {
uint32_t curr_size;
uint32_t mask;
curr_size = size;
mask = curr_size / 2;
while (mask > 0) {
shflFct(reduce_data, /*LaneId = */ tid, /*Offset=*/mask, /*AlgoVersion=*/1);
curr_size = (curr_size + 1) / 2;
mask = curr_size / 2;
}
}
INLINE static uint32_t
gpu_irregular_simd_reduce(void *reduce_data, kmp_ShuffleReductFctPtr shflFct) {
uint32_t size, remote_id, physical_lane_id;
physical_lane_id = GetThreadIdInBlock() % WARPSIZE;
__kmpc_impl_lanemask_t lanemask_lt = __kmpc_impl_lanemask_lt();
__kmpc_impl_lanemask_t Liveness = __kmpc_impl_activemask();
uint32_t logical_lane_id = __kmpc_impl_popc(Liveness & lanemask_lt) * 2;
__kmpc_impl_lanemask_t lanemask_gt = __kmpc_impl_lanemask_gt();
do {
Liveness = __kmpc_impl_activemask();
remote_id = __kmpc_impl_ffs(Liveness & lanemask_gt);
size = __kmpc_impl_popc(Liveness);
logical_lane_id /= 2;
shflFct(reduce_data, /*LaneId =*/logical_lane_id,
/*Offset=*/remote_id - 1 - physical_lane_id, /*AlgoVersion=*/2);
} while (logical_lane_id % 2 == 0 && size > 1);
return (logical_lane_id == 0);
}
EXTERN
int32_t __kmpc_nvptx_simd_reduce_nowait(int32_t global_tid, int32_t num_vars,
size_t reduce_size, void *reduce_data,
kmp_ShuffleReductFctPtr shflFct,
kmp_InterWarpCopyFctPtr cpyFct) {
__kmpc_impl_lanemask_t Liveness = __kmpc_impl_activemask();
if (Liveness == __kmpc_impl_all_lanes) {
gpu_regular_warp_reduce(reduce_data, shflFct);
return GetThreadIdInBlock() % WARPSIZE ==
0; // Result on lane 0 of the simd warp.
} else {
return gpu_irregular_simd_reduce(
reduce_data, shflFct); // Result on the first active lane.
}
}
INLINE
static int32_t nvptx_parallel_reduce_nowait(
int32_t global_tid, int32_t num_vars, size_t reduce_size, void *reduce_data,
kmp_ShuffleReductFctPtr shflFct, kmp_InterWarpCopyFctPtr cpyFct,
bool isSPMDExecutionMode, bool isRuntimeUninitialized) {
uint32_t BlockThreadId = GetLogicalThreadIdInBlock(isSPMDExecutionMode);
uint32_t NumThreads = GetNumberOfOmpThreads(isSPMDExecutionMode);
if (NumThreads == 1)
return 1;
/*
* This reduce function handles reduction within a team. It handles
* parallel regions in both L1 and L2 parallelism levels. It also
* supports Generic, SPMD, and NoOMP modes.
*
* 1. Reduce within a warp.
* 2. Warp master copies value to warp 0 via shared memory.
* 3. Warp 0 reduces to a single value.
* 4. The reduced value is available in the thread that returns 1.
*/
#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 700
uint32_t WarpsNeeded = (NumThreads + WARPSIZE - 1) / WARPSIZE;
uint32_t WarpId = BlockThreadId / WARPSIZE;
// Volta execution model:
// For the Generic execution mode a parallel region either has 1 thread and
// beyond that, always a multiple of 32. For the SPMD execution mode we may
// have any number of threads.
if ((NumThreads % WARPSIZE == 0) || (WarpId < WarpsNeeded - 1))
gpu_regular_warp_reduce(reduce_data, shflFct);
else if (NumThreads > 1) // Only SPMD execution mode comes thru this case.
gpu_irregular_warp_reduce(reduce_data, shflFct,
/*LaneCount=*/NumThreads % WARPSIZE,
/*LaneId=*/GetThreadIdInBlock() % WARPSIZE);
// When we have more than [warpsize] number of threads
// a block reduction is performed here.
//
// Only L1 parallel region can enter this if condition.
if (NumThreads > WARPSIZE) {
// Gather all the reduced values from each warp
// to the first warp.
cpyFct(reduce_data, WarpsNeeded);
if (WarpId == 0)
gpu_irregular_warp_reduce(reduce_data, shflFct, WarpsNeeded,
BlockThreadId);
}
return BlockThreadId == 0;
#else
__kmpc_impl_lanemask_t Liveness = __kmpc_impl_activemask();
if (Liveness == __kmpc_impl_all_lanes) // Full warp
gpu_regular_warp_reduce(reduce_data, shflFct);
else if (!(Liveness & (Liveness + 1))) // Partial warp but contiguous lanes
gpu_irregular_warp_reduce(reduce_data, shflFct,
/*LaneCount=*/__kmpc_impl_popc(Liveness),
/*LaneId=*/GetThreadIdInBlock() % WARPSIZE);
else if (!isRuntimeUninitialized) // Dispersed lanes. Only threads in L2
// parallel region may enter here; return
// early.
return gpu_irregular_simd_reduce(reduce_data, shflFct);
// When we have more than [warpsize] number of threads
// a block reduction is performed here.
//
// Only L1 parallel region can enter this if condition.
if (NumThreads > WARPSIZE) {
uint32_t WarpsNeeded = (NumThreads + WARPSIZE - 1) / WARPSIZE;
// Gather all the reduced values from each warp
// to the first warp.
cpyFct(reduce_data, WarpsNeeded);
uint32_t WarpId = BlockThreadId / WARPSIZE;
if (WarpId == 0)
gpu_irregular_warp_reduce(reduce_data, shflFct, WarpsNeeded,
BlockThreadId);
return BlockThreadId == 0;
} else if (isRuntimeUninitialized /* Never an L2 parallel region without the OMP runtime */) {
return BlockThreadId == 0;
}
// Get the OMP thread Id. This is different from BlockThreadId in the case of
// an L2 parallel region.
return global_tid == 0;
#endif // __CUDA_ARCH__ >= 700
}
EXTERN __attribute__((deprecated)) int32_t __kmpc_nvptx_parallel_reduce_nowait(
int32_t global_tid, int32_t num_vars, size_t reduce_size, void *reduce_data,
kmp_ShuffleReductFctPtr shflFct, kmp_InterWarpCopyFctPtr cpyFct) {
return nvptx_parallel_reduce_nowait(global_tid, num_vars, reduce_size,
reduce_data, shflFct, cpyFct,
isSPMDMode(), isRuntimeUninitialized());
}
EXTERN
int32_t __kmpc_nvptx_parallel_reduce_nowait_v2(
kmp_Ident *loc, int32_t global_tid, int32_t num_vars, size_t reduce_size,
void *reduce_data, kmp_ShuffleReductFctPtr shflFct,
kmp_InterWarpCopyFctPtr cpyFct) {
return nvptx_parallel_reduce_nowait(
global_tid, num_vars, reduce_size, reduce_data, shflFct, cpyFct,
checkSPMDMode(loc), checkRuntimeUninitialized(loc));
}
EXTERN
int32_t __kmpc_nvptx_parallel_reduce_nowait_simple_spmd(
int32_t global_tid, int32_t num_vars, size_t reduce_size, void *reduce_data,
kmp_ShuffleReductFctPtr shflFct, kmp_InterWarpCopyFctPtr cpyFct) {
return nvptx_parallel_reduce_nowait(
global_tid, num_vars, reduce_size, reduce_data, shflFct, cpyFct,
/*isSPMDExecutionMode=*/true, /*isRuntimeUninitialized=*/true);
}
EXTERN
int32_t __kmpc_nvptx_parallel_reduce_nowait_simple_generic(
int32_t global_tid, int32_t num_vars, size_t reduce_size, void *reduce_data,
kmp_ShuffleReductFctPtr shflFct, kmp_InterWarpCopyFctPtr cpyFct) {
return nvptx_parallel_reduce_nowait(
global_tid, num_vars, reduce_size, reduce_data, shflFct, cpyFct,
/*isSPMDExecutionMode=*/false, /*isRuntimeUninitialized=*/true);
}
INLINE
static int32_t nvptx_teams_reduce_nowait(int32_t global_tid, int32_t num_vars,
size_t reduce_size, void *reduce_data,
kmp_ShuffleReductFctPtr shflFct,
kmp_InterWarpCopyFctPtr cpyFct,
kmp_CopyToScratchpadFctPtr scratchFct,
kmp_LoadReduceFctPtr ldFct,
bool isSPMDExecutionMode) {
uint32_t ThreadId = GetLogicalThreadIdInBlock(isSPMDExecutionMode);
// In non-generic mode all workers participate in the teams reduction.
// In generic mode only the team master participates in the teams
// reduction because the workers are waiting for parallel work.
uint32_t NumThreads =
isSPMDExecutionMode ? GetNumberOfOmpThreads(/*isSPMDExecutionMode=*/true)
: /*Master thread only*/ 1;
uint32_t TeamId = GetBlockIdInKernel();
uint32_t NumTeams = GetNumberOfBlocksInKernel();
SHARED volatile bool IsLastTeam;
// Team masters of all teams write to the scratchpad.
if (ThreadId == 0) {
unsigned int *timestamp = GetTeamsReductionTimestamp();
char *scratchpad = GetTeamsReductionScratchpad();
scratchFct(reduce_data, scratchpad, TeamId, NumTeams);
__kmpc_impl_threadfence();
// atomicInc increments 'timestamp' and has a range [0, NumTeams-1].
// It resets 'timestamp' back to 0 once the last team increments
// this counter.
unsigned val = atomicInc(timestamp, NumTeams - 1);
IsLastTeam = val == NumTeams - 1;
}
// We have to wait on L1 barrier because in GENERIC mode the workers
// are waiting on barrier 0 for work.
//
// If we guard this barrier as follows it leads to deadlock, probably
// because of a compiler bug: if (!IsGenericMode()) __syncthreads();
uint16_t SyncWarps = (NumThreads + WARPSIZE - 1) / WARPSIZE;
__kmpc_impl_named_sync(L1_BARRIER, SyncWarps * WARPSIZE);
// If this team is not the last, quit.
if (/* Volatile read by all threads */ !IsLastTeam)
return 0;
//
// Last team processing.
//
// Threads in excess of #teams do not participate in reduction of the
// scratchpad values.
#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 700
uint32_t ActiveThreads = NumThreads;
if (NumTeams < NumThreads) {
ActiveThreads =
(NumTeams < WARPSIZE) ? 1 : NumTeams & ~((uint16_t)WARPSIZE - 1);
}
if (ThreadId >= ActiveThreads)
return 0;
// Load from scratchpad and reduce.
char *scratchpad = GetTeamsReductionScratchpad();
ldFct(reduce_data, scratchpad, ThreadId, NumTeams, /*Load only*/ 0);
for (uint32_t i = ActiveThreads + ThreadId; i < NumTeams; i += ActiveThreads)
ldFct(reduce_data, scratchpad, i, NumTeams, /*Load and reduce*/ 1);
uint32_t WarpsNeeded = (ActiveThreads + WARPSIZE - 1) / WARPSIZE;
uint32_t WarpId = ThreadId / WARPSIZE;
// Reduce across warps to the warp master.
if ((ActiveThreads % WARPSIZE == 0) ||
(WarpId < WarpsNeeded - 1)) // Full warp
gpu_regular_warp_reduce(reduce_data, shflFct);
else if (ActiveThreads > 1) // Partial warp but contiguous lanes
// Only SPMD execution mode comes thru this case.
gpu_irregular_warp_reduce(reduce_data, shflFct,
/*LaneCount=*/ActiveThreads % WARPSIZE,
/*LaneId=*/ThreadId % WARPSIZE);
// When we have more than [warpsize] number of threads
// a block reduction is performed here.
if (ActiveThreads > WARPSIZE) {
// Gather all the reduced values from each warp
// to the first warp.
cpyFct(reduce_data, WarpsNeeded);
if (WarpId == 0)
gpu_irregular_warp_reduce(reduce_data, shflFct, WarpsNeeded, ThreadId);
}
#else
if (ThreadId >= NumTeams)
return 0;
// Load from scratchpad and reduce.
char *scratchpad = GetTeamsReductionScratchpad();
ldFct(reduce_data, scratchpad, ThreadId, NumTeams, /*Load only*/ 0);
for (uint32_t i = NumThreads + ThreadId; i < NumTeams; i += NumThreads)
ldFct(reduce_data, scratchpad, i, NumTeams, /*Load and reduce*/ 1);
// Reduce across warps to the warp master.
__kmpc_impl_lanemask_t Liveness = __kmpc_impl_activemask();
if (Liveness == __kmpc_impl_all_lanes) // Full warp
gpu_regular_warp_reduce(reduce_data, shflFct);
else // Partial warp but contiguous lanes
gpu_irregular_warp_reduce(reduce_data, shflFct,
/*LaneCount=*/__kmpc_impl_popc(Liveness),
/*LaneId=*/ThreadId % WARPSIZE);
// When we have more than [warpsize] number of threads
// a block reduction is performed here.
uint32_t ActiveThreads = NumTeams < NumThreads ? NumTeams : NumThreads;
if (ActiveThreads > WARPSIZE) {
uint32_t WarpsNeeded = (ActiveThreads + WARPSIZE - 1) / WARPSIZE;
// Gather all the reduced values from each warp
// to the first warp.
cpyFct(reduce_data, WarpsNeeded);
uint32_t WarpId = ThreadId / WARPSIZE;
if (WarpId == 0)
gpu_irregular_warp_reduce(reduce_data, shflFct, WarpsNeeded, ThreadId);
}
#endif // __CUDA_ARCH__ >= 700
return ThreadId == 0;
}
EXTERN
int32_t __kmpc_nvptx_teams_reduce_nowait(int32_t global_tid, int32_t num_vars,
size_t reduce_size, void *reduce_data,
kmp_ShuffleReductFctPtr shflFct,
kmp_InterWarpCopyFctPtr cpyFct,
kmp_CopyToScratchpadFctPtr scratchFct,
kmp_LoadReduceFctPtr ldFct) {
return nvptx_teams_reduce_nowait(global_tid, num_vars, reduce_size,
reduce_data, shflFct, cpyFct, scratchFct,
ldFct, isSPMDMode());
}
EXTERN
int32_t __kmpc_nvptx_teams_reduce_nowait_simple_spmd(
int32_t global_tid, int32_t num_vars, size_t reduce_size, void *reduce_data,
kmp_ShuffleReductFctPtr shflFct, kmp_InterWarpCopyFctPtr cpyFct,
kmp_CopyToScratchpadFctPtr scratchFct, kmp_LoadReduceFctPtr ldFct) {
return nvptx_teams_reduce_nowait(global_tid, num_vars, reduce_size,
reduce_data, shflFct, cpyFct, scratchFct,
ldFct, /*isSPMDExecutionMode=*/true);
}
EXTERN
int32_t __kmpc_nvptx_teams_reduce_nowait_simple_generic(
int32_t global_tid, int32_t num_vars, size_t reduce_size, void *reduce_data,
kmp_ShuffleReductFctPtr shflFct, kmp_InterWarpCopyFctPtr cpyFct,
kmp_CopyToScratchpadFctPtr scratchFct, kmp_LoadReduceFctPtr ldFct) {
return nvptx_teams_reduce_nowait(global_tid, num_vars, reduce_size,
reduce_data, shflFct, cpyFct, scratchFct,
ldFct, /*isSPMDExecutionMode=*/false);
}
EXTERN int32_t __kmpc_nvptx_teams_reduce_nowait_simple(kmp_Ident *loc,
int32_t global_tid,
kmp_CriticalName *crit) {
if (checkSPMDMode(loc) && GetThreadIdInBlock() != 0)
return 0;
// The master thread of the team actually does the reduction.
while (atomicCAS((uint32_t *)crit, 0, 1))
;
return 1;
}
EXTERN void
__kmpc_nvptx_teams_end_reduce_nowait_simple(kmp_Ident *loc, int32_t global_tid,
kmp_CriticalName *crit) {
__kmpc_impl_threadfence_system();
(void)atomicExch((uint32_t *)crit, 0);
}
INLINE static bool isMaster(kmp_Ident *loc, uint32_t ThreadId) {
return checkGenericMode(loc) || IsTeamMaster(ThreadId);
}
INLINE static uint32_t roundToWarpsize(uint32_t s) {
if (s < WARPSIZE)
return 1;
return (s & ~(unsigned)(WARPSIZE - 1));
}
DEVICE static volatile uint32_t IterCnt = 0;
DEVICE static volatile uint32_t Cnt = 0;
EXTERN int32_t __kmpc_nvptx_teams_reduce_nowait_v2(
kmp_Ident *loc, int32_t global_tid, void *global_buffer,
int32_t num_of_records, void *reduce_data, kmp_ShuffleReductFctPtr shflFct,
kmp_InterWarpCopyFctPtr cpyFct, kmp_ListGlobalFctPtr lgcpyFct,
kmp_ListGlobalFctPtr lgredFct, kmp_ListGlobalFctPtr glcpyFct,
kmp_ListGlobalFctPtr glredFct) {
// Terminate all threads in non-SPMD mode except for the master thread.
if (checkGenericMode(loc) && GetThreadIdInBlock() != GetMasterThreadID())
return 0;
uint32_t ThreadId = GetLogicalThreadIdInBlock(checkSPMDMode(loc));
// In non-generic mode all workers participate in the teams reduction.
// In generic mode only the team master participates in the teams
// reduction because the workers are waiting for parallel work.
uint32_t NumThreads =
checkSPMDMode(loc) ? GetNumberOfOmpThreads(/*isSPMDExecutionMode=*/true)
: /*Master thread only*/ 1;
uint32_t TeamId = GetBlockIdInKernel();
uint32_t NumTeams = GetNumberOfBlocksInKernel();
SHARED unsigned Bound;
SHARED unsigned ChunkTeamCount;
// Block progress for teams greater than the current upper
// limit. We always only allow a number of teams less or equal
// to the number of slots in the buffer.
bool IsMaster = isMaster(loc, ThreadId);
while (IsMaster) {
// Atomic read
Bound = atomicAdd((uint32_t *)&IterCnt, 0);
if (TeamId < Bound + num_of_records)
break;
}
if (IsMaster) {
int ModBockId = TeamId % num_of_records;
if (TeamId < num_of_records)
lgcpyFct(global_buffer, ModBockId, reduce_data);
else
lgredFct(global_buffer, ModBockId, reduce_data);
__kmpc_impl_threadfence_system();
// Increment team counter.
// This counter is incremented by all teams in the current
// BUFFER_SIZE chunk.
ChunkTeamCount = atomicInc((uint32_t *)&Cnt, num_of_records - 1);
}
// Synchronize
if (checkSPMDMode(loc))
__kmpc_barrier(loc, global_tid);
// reduce_data is global or shared so before being reduced within the
// warp we need to bring it in local memory:
// local_reduce_data = reduce_data[i]
//
// Example for 3 reduction variables a, b, c (of potentially different
// types):
//
// buffer layout (struct of arrays):
// a, a, ..., a, b, b, ... b, c, c, ... c
// |__________|
// num_of_records
//
// local_data_reduce layout (struct):
// a, b, c
//
// Each thread will have a local struct containing the values to be
// reduced:
// 1. do reduction within each warp.
// 2. do reduction across warps.
// 3. write the final result to the main reduction variable
// by returning 1 in the thread holding the reduction result.
// Check if this is the very last team.
unsigned NumRecs = min(NumTeams, num_of_records);
if (ChunkTeamCount == NumTeams - Bound - 1) {
//
// Last team processing.
//
if (ThreadId >= NumRecs)
return 0;
NumThreads = roundToWarpsize(min(NumThreads, NumRecs));
if (ThreadId >= NumThreads)
return 0;
// Load from buffer and reduce.
glcpyFct(global_buffer, ThreadId, reduce_data);
for (uint32_t i = NumThreads + ThreadId; i < NumRecs; i += NumThreads)
glredFct(global_buffer, i, reduce_data);
// Reduce across warps to the warp master.
if (NumThreads > 1) {
gpu_regular_warp_reduce(reduce_data, shflFct);
// When we have more than [warpsize] number of threads
// a block reduction is performed here.
uint32_t ActiveThreads = min(NumRecs, NumThreads);
if (ActiveThreads > WARPSIZE) {
uint32_t WarpsNeeded = (ActiveThreads + WARPSIZE - 1) / WARPSIZE;
// Gather all the reduced values from each warp
// to the first warp.
cpyFct(reduce_data, WarpsNeeded);
uint32_t WarpId = ThreadId / WARPSIZE;
if (WarpId == 0)
gpu_irregular_warp_reduce(reduce_data, shflFct, WarpsNeeded,
ThreadId);
}
}
if (IsMaster) {
Cnt = 0;
IterCnt = 0;
return 1;
}
return 0;
}
if (IsMaster && ChunkTeamCount == num_of_records - 1) {
// Allow SIZE number of teams to proceed writing their
// intermediate results to the global buffer.
atomicAdd((uint32_t *)&IterCnt, num_of_records);
}
return 0;
}
|
ee198d2c3682ca9d79ffeb73cd1ec2d6a1434330.hip | // !!! This is a file automatically generated by hipify!!!
/* getgpuinfo.cu -- Prints information of the installed CUDA GPU-card(s). */
/* A. Goude 2011-04-12 */
#include "hip/hip_runtime.h"
#include "mex.h"
/*------------------------------------------------------------------------*/
void cudasafe(hipError_t error,char* message)
/* Function-call wrapper. */
{
if (error != hipSuccess) {
mexPrintf("ERROR: %s : %i\n",message,error);
exit(-1);
}
}
/*------------------------------------------------------------------------*/
void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[])
{
hipDeviceProp_t info;
int count,i;
cudasafe(hipGetDeviceCount(&count),"hipGetDeviceCount");
for(i = 0; i < count; i++) {
cudasafe(hipGetDeviceProperties(&info,i),"hipGetDeviceProperties");
mexPrintf("\nDevice #%d\n",i);
mexPrintf("---------\n");
mexPrintf("Device name:\t\t%s\n",info.name);
mexPrintf("totalGlobalMem:\t\t%d bytes\n",info.totalGlobalMem);
mexPrintf("sharedMemPerBlock:\t%d bytes\n",info.sharedMemPerBlock);
mexPrintf("regsPerBlock:\t\t%d\n",info.regsPerBlock);
mexPrintf("warpSize:\t\t%d threads\n",info.warpSize);
mexPrintf("memPitch:\t\t%d bytes\n",info.memPitch);
mexPrintf("maxThreadsPerBlock:\t%d\n",info.maxThreadsPerBlock);
mexPrintf("maxThreadsDim:\t\t[%d %d %d]\n",
info.maxThreadsDim[0],
info.maxThreadsDim[1],
info.maxThreadsDim[2]);
mexPrintf("maxGridSize:\t\t[%d %d %d]\n",
info.maxGridSize[0],
info.maxGridSize[1],
info.maxGridSize[2]);
mexPrintf("totalConstMem:\t\t%d bytes\n\n",info.totalConstMem);
mexPrintf("Compute Capability:\t%d.%d\n",info.major,info.minor);
mexPrintf("clockRate:\t\t%d kHz\n",info.clockRate);
mexPrintf("textureAlignment:\t%d\n",info.textureAlignment);
mexPrintf("deviceOverlap:\t\t%d\n",info.deviceOverlap);
mexPrintf("multiProcessorCount:\t%d\n",info.multiProcessorCount);
if (info.kernelExecTimeoutEnabled)
mexPrintf("kernelExecTimeout:\tEnabled\n");
else
mexPrintf("kernelExecTimeout:\tDisabled\n");
if (info.integrated)
mexPrintf("integrated:\t\tmotherboard GPU\n");
else
mexPrintf("integrated:\t\tcomponent\n");
mexPrintf("canMapHostMemory:\t%d\n",info.canMapHostMemory);
switch (info.computeMode) {
case hipComputeModeDefault:
mexPrintf("computeMode:\t\tcudaComputeModeDefault\n"); break;
case hipComputeModeExclusive:
mexPrintf("computeMode:\t\tcudaComputeModeExclusive\n"); break;
case hipComputeModeProhibited:
mexPrintf("computeMode:\t\tcudaComputeModeProhibited\n"); break;
default:
mexPrintf("computeMode:\t\tUNKNOWN\n"); break;
}
mexPrintf("maxTexture1D:\t\t%d\n",info.maxTexture1D);
mexPrintf("maxTexture2D:\t\t[%d %d]\n\n",
info.maxTexture2D[0],
info.maxTexture2D[1]);
mexPrintf("maxTexture3D:\t\t[%d %d %d]\n",
info.maxTexture3D[0],
info.maxTexture3D[1],
info.maxTexture3D[2]);
/*
mexPrintf("maxTexture2DArray:\t[%d %d %d]\n",
info.maxTexture2DArray[0],
info.maxTexture2DArray[1],
info.maxTexture2DArray[2]);
*/
mexPrintf("concurrentKernels:\t%d\n",info.concurrentKernels);
mexPrintf("ECCEnabled:\t\t%d\n",info.ECCEnabled);
mexPrintf("pciBusID:\t\t%d\n",info.pciBusID);
mexPrintf("pciDeviceID:\t\t%d\n",info.pciDeviceID);
mexPrintf("tccDriver:\t\t%d\n",info.tccDriver);
}
}
/*------------------------------------------------------------------------*/
| ee198d2c3682ca9d79ffeb73cd1ec2d6a1434330.cu | /* getgpuinfo.cu -- Prints information of the installed CUDA GPU-card(s). */
/* A. Goude 2011-04-12 */
#include "cuda.h"
#include "mex.h"
/*------------------------------------------------------------------------*/
void cudasafe(cudaError_t error,char* message)
/* Function-call wrapper. */
{
if (error != cudaSuccess) {
mexPrintf("ERROR: %s : %i\n",message,error);
exit(-1);
}
}
/*------------------------------------------------------------------------*/
void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[])
{
cudaDeviceProp info;
int count,i;
cudasafe(cudaGetDeviceCount(&count),"cudaGetDeviceCount");
for(i = 0; i < count; i++) {
cudasafe(cudaGetDeviceProperties(&info,i),"cudaGetDeviceProperties");
mexPrintf("\nDevice #%d\n",i);
mexPrintf("---------\n");
mexPrintf("Device name:\t\t%s\n",info.name);
mexPrintf("totalGlobalMem:\t\t%d bytes\n",info.totalGlobalMem);
mexPrintf("sharedMemPerBlock:\t%d bytes\n",info.sharedMemPerBlock);
mexPrintf("regsPerBlock:\t\t%d\n",info.regsPerBlock);
mexPrintf("warpSize:\t\t%d threads\n",info.warpSize);
mexPrintf("memPitch:\t\t%d bytes\n",info.memPitch);
mexPrintf("maxThreadsPerBlock:\t%d\n",info.maxThreadsPerBlock);
mexPrintf("maxThreadsDim:\t\t[%d %d %d]\n",
info.maxThreadsDim[0],
info.maxThreadsDim[1],
info.maxThreadsDim[2]);
mexPrintf("maxGridSize:\t\t[%d %d %d]\n",
info.maxGridSize[0],
info.maxGridSize[1],
info.maxGridSize[2]);
mexPrintf("totalConstMem:\t\t%d bytes\n\n",info.totalConstMem);
mexPrintf("Compute Capability:\t%d.%d\n",info.major,info.minor);
mexPrintf("clockRate:\t\t%d kHz\n",info.clockRate);
mexPrintf("textureAlignment:\t%d\n",info.textureAlignment);
mexPrintf("deviceOverlap:\t\t%d\n",info.deviceOverlap);
mexPrintf("multiProcessorCount:\t%d\n",info.multiProcessorCount);
if (info.kernelExecTimeoutEnabled)
mexPrintf("kernelExecTimeout:\tEnabled\n");
else
mexPrintf("kernelExecTimeout:\tDisabled\n");
if (info.integrated)
mexPrintf("integrated:\t\tmotherboard GPU\n");
else
mexPrintf("integrated:\t\tcomponent\n");
mexPrintf("canMapHostMemory:\t%d\n",info.canMapHostMemory);
switch (info.computeMode) {
case cudaComputeModeDefault:
mexPrintf("computeMode:\t\tcudaComputeModeDefault\n"); break;
case cudaComputeModeExclusive:
mexPrintf("computeMode:\t\tcudaComputeModeExclusive\n"); break;
case cudaComputeModeProhibited:
mexPrintf("computeMode:\t\tcudaComputeModeProhibited\n"); break;
default:
mexPrintf("computeMode:\t\tUNKNOWN\n"); break;
}
mexPrintf("maxTexture1D:\t\t%d\n",info.maxTexture1D);
mexPrintf("maxTexture2D:\t\t[%d %d]\n\n",
info.maxTexture2D[0],
info.maxTexture2D[1]);
mexPrintf("maxTexture3D:\t\t[%d %d %d]\n",
info.maxTexture3D[0],
info.maxTexture3D[1],
info.maxTexture3D[2]);
/*
mexPrintf("maxTexture2DArray:\t[%d %d %d]\n",
info.maxTexture2DArray[0],
info.maxTexture2DArray[1],
info.maxTexture2DArray[2]);
*/
mexPrintf("concurrentKernels:\t%d\n",info.concurrentKernels);
mexPrintf("ECCEnabled:\t\t%d\n",info.ECCEnabled);
mexPrintf("pciBusID:\t\t%d\n",info.pciBusID);
mexPrintf("pciDeviceID:\t\t%d\n",info.pciDeviceID);
mexPrintf("tccDriver:\t\t%d\n",info.tccDriver);
}
}
/*------------------------------------------------------------------------*/
|
065390453131465303e0f39fef3a33685dc4c1ac.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
void addWithCuda(int *c, const int *a, const int *b, size_t size);
__global__ void addKernel(int *c, const int *a, const int *b)
{
int i = blockIdx.x;
c[i] = a[i] + b[i];
}
int maindddd()
{
const int arraySize = 5;
const int a[arraySize] = { 1, 2, 3, 4, 5 };
const int b[arraySize] = { 10, 20, 30, 40, 50 };
int c[arraySize] = { 0 };
addWithCuda(c, a, b, arraySize);
printf("{1,2,3,4,5} + {10,20,30,40,50} = {%d,%d,%d,%d,%d}\n",c[0],c[1],c[2],c[3],c[4]);
hipDeviceReset();
return 0;
}
void addWithCuda(int *c, const int *a, const int *b, size_t size)
{
int *dev_a = 0;
int *dev_b = 0;
int *dev_c = 0;
hipSetDevice(0);
hipMalloc((void**)&dev_c, size * sizeof(int));
hipMalloc((void**)&dev_a, size * sizeof(int));
hipMalloc((void**)&dev_b, size * sizeof(int));
hipMemcpy(dev_a, a, size * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(dev_b, b, size * sizeof(int), hipMemcpyHostToDevice);
hipStream_t stream[5];
for(int i = 0;i<5;i++)
{
hipStreamCreate(&stream[i]);
}
for(int i = 0;i<5;i++)
hipLaunchKernelGGL(( addKernel), dim3(1),dim3(1),0,stream[i], dev_c+i, dev_a+i, dev_b+i);
hipDeviceSynchronize();
hipDeviceSynchronize();
hipMemcpy(c, dev_c, size * sizeof(int), hipMemcpyDeviceToHost);
for(int i=0;i<5;i++)
{
hipStreamDestroy(stream[i]);
}
hipFree(dev_c);
hipFree(dev_a);
hipFree(dev_b);
}
| 065390453131465303e0f39fef3a33685dc4c1ac.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
void addWithCuda(int *c, const int *a, const int *b, size_t size);
__global__ void addKernel(int *c, const int *a, const int *b)
{
int i = blockIdx.x;
c[i] = a[i] + b[i];
}
int maindddd()
{
const int arraySize = 5;
const int a[arraySize] = { 1, 2, 3, 4, 5 };
const int b[arraySize] = { 10, 20, 30, 40, 50 };
int c[arraySize] = { 0 };
addWithCuda(c, a, b, arraySize);
printf("{1,2,3,4,5} + {10,20,30,40,50} = {%d,%d,%d,%d,%d}\n",c[0],c[1],c[2],c[3],c[4]);
cudaThreadExit();
return 0;
}
void addWithCuda(int *c, const int *a, const int *b, size_t size)
{
int *dev_a = 0;
int *dev_b = 0;
int *dev_c = 0;
cudaSetDevice(0);
cudaMalloc((void**)&dev_c, size * sizeof(int));
cudaMalloc((void**)&dev_a, size * sizeof(int));
cudaMalloc((void**)&dev_b, size * sizeof(int));
cudaMemcpy(dev_a, a, size * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(dev_b, b, size * sizeof(int), cudaMemcpyHostToDevice);
cudaStream_t stream[5];
for(int i = 0;i<5;i++)
{
cudaStreamCreate(&stream[i]);
}
for(int i = 0;i<5;i++)
addKernel<<<1,1,0,stream[i]>>>(dev_c+i, dev_a+i, dev_b+i);
cudaDeviceSynchronize();
cudaThreadSynchronize();
cudaMemcpy(c, dev_c, size * sizeof(int), cudaMemcpyDeviceToHost);
for(int i=0;i<5;i++)
{
cudaStreamDestroy(stream[i]);
}
cudaFree(dev_c);
cudaFree(dev_a);
cudaFree(dev_b);
}
|
d171a843da897efbe769b0a8241bbbda8eb6e396.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <hip/hip_runtime.h>
#include <hiprand/hiprand.h>
#include <hiprand/hiprand_kernel.h>
const int SWEEPS = 100;
void printArray(int* arr, int size) {
for (int i = 0; i < size; i++) {
printf("%d ", arr[i]);
}
printf("\n");
}
int getSize(FILE* fptr) {
int size = 0;
int temp;
while (fscanf(fptr, "%d", &temp) == 1) {
size++;
}
return size;
}
void getArray(FILE* fptr, int* arr) {
int i = 0;
while (fscanf(fptr, "%d", &arr[i]) == 1) {
i++;
}
}
// CUDA kernel for performing Ising Annealing in parallel
__global__ void isingAnnealingStep(int* d_flat, int* d_places, int* d_state, int* count, int vertices, int N){
*count = N;
int vertex = blockDim.x*blockIdx.x + threadIdx.x;
if(vertex < vertices){
int sigmaI = d_state[vertex];
int H = 0;
for (int i = d_places[vertex]; i < d_places[vertex + 1]; i += 2) {
int sigmaJ = d_state[d_flat[i]];
int J = d_flat[i + 1];
H -= (J * sigmaI * sigmaJ);
}
__syncthreads();
hiprandState_t random;
hiprand_init(0, 0, 0, &random);
int s = H / sigmaI;
if (s > 0) {
d_state[vertex] = -1;
}
else if (s < 0) {
d_state[vertex] = 1;
}
else {
d_state[vertex] = 1 - 2 * (hiprand(&random) % 2);
}
__syncthreads();
hiprand_init(0, 0, 0, &random);
if(*count > 0){
int index = hiprand(&random) % vertices;
if (d_state[index] == 1) {
d_state[index] = -1;
}
else {
d_state[index] = 1;
}
*count--;
}
}
}
// utility function to compute Hamiltonian given a vertex
// ***not used in the parallel implementation***
int computeHamiltonian(int* flat, int* places, int* state, int vertex) {
int sigmaI = state[vertex];
int H = 0;
for (int i = places[vertex]; i < places[vertex + 1]; i += 2) {
int sigmaJ = state[flat[i]];
int J = flat[i + 1];
H -= (J * sigmaI * sigmaJ);
}
return H;
}
int main(){
FILE* fptr;
// load array $flat
fptr = fopen("flat.txt", "r");
if (fptr == NULL)
{
printf("Error!");
return -1;
}
// get number of integers for dynamic memory allocation on host
int fsize = getSize(fptr);
printf("Number of integers present in flat.txt: %d\n", fsize);
// read values in text file to array $flat
int* flat = (int*)malloc(sizeof(int) * fsize);
fseek(fptr, 0, SEEK_SET);
getArray(fptr, flat);
//printArray(flat, fsize);
fclose(fptr);
/**********/
// load array $places
fptr = fopen("places.txt", "r");
if (fptr == NULL)
{
printf("Error!");
return -1;
}
// get number of integers for dynamic memory allocation on host
int psize = getSize(fptr);
printf("Number of integers present in places.txt: %d\n", psize);
// read values in text file to array $places
int* places = (int*)malloc(sizeof(int) * psize);
fseek(fptr, 0, SEEK_SET);
getArray(fptr, places);
//printArray(places, psize);
fclose(fptr);
/**********/
int vertices = psize - 1;
int* state = (int*)malloc(sizeof(int) * vertices);
printf("Number of vertices: %d\n", vertices);
// initialize states randomly
for (int i = 0; i < vertices; ++i) {
state[i] = 1 - 2 * (rand() % 2);
}
double initial_energy = 0;
for (int i = 0; i < vertices; i++) {
int H = computeHamiltonian(flat, places, state, i);
initial_energy += H;
}
printf("Initial energy: %f\n", initial_energy / 2);
// allocate memory on device
int* d_flat;
int* d_places;
int* d_state;
int* count;
hipMalloc((void**)&count, sizeof(int));
hipMalloc((void**)&d_flat, fsize * sizeof(int));
hipMalloc((void**)&d_places, psize*sizeof(int));
hipMalloc((void**)&d_state, vertices*sizeof(int));
hipMemcpy(d_flat, flat, fsize*sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_places, places, psize*sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_state, state, vertices*sizeof(int), hipMemcpyHostToDevice);
int threads = 1024;
int blocks = (vertices / 1024) + 1;
int N = vertices*0.75;
clock_t begin = clock();
for (int i = 0; i < SWEEPS; i++) {
hipLaunchKernelGGL(( isingAnnealingStep), dim3(blocks), dim3(threads), 0, 0, d_flat, d_places, d_state, count, vertices, N);
//hipDeviceSynchronize();
N *= 0.9;
}
clock_t end = clock();
double time_spent = (double)(end - begin) / CLOCKS_PER_SEC;
hipMemcpy(flat, d_flat, fsize*sizeof(int), hipMemcpyDeviceToHost);
hipMemcpy(places, d_places, psize*sizeof(int), hipMemcpyDeviceToHost);
hipMemcpy(state, d_state, vertices*sizeof(int), hipMemcpyDeviceToHost);
double final_energy = 0;
for (int i = 0; i < vertices; i++) {
int H = computeHamiltonian(flat, places, state, i);
final_energy += H;
}
printf("Final energy: %f\n", final_energy / 2);
printf("Time taken for parallel Ising Annealing: %f seconds\n", time_spent);
// store results in txt files
// $time_spent $initial_energy $final_energy
printf("Storing parallel code's results in results.txt...\n");
fptr = fopen("results.txt", "a");
fprintf(fptr, "Parallel\t%d\t%f\t%f\t%f\n", vertices, time_spent, initial_energy, final_energy);
fclose(fptr);
printf("Finished writing to results.txt\n");
// free dynamic memory on host and device
free(flat);
free(places);
free(state);
hipFree(d_flat);
hipFree(d_places);
hipFree(d_state);
return 0;
} | d171a843da897efbe769b0a8241bbbda8eb6e396.cu | #include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <cuda.h>
#include <curand.h>
#include <curand_kernel.h>
const int SWEEPS = 100;
void printArray(int* arr, int size) {
for (int i = 0; i < size; i++) {
printf("%d ", arr[i]);
}
printf("\n");
}
int getSize(FILE* fptr) {
int size = 0;
int temp;
while (fscanf(fptr, "%d", &temp) == 1) {
size++;
}
return size;
}
void getArray(FILE* fptr, int* arr) {
int i = 0;
while (fscanf(fptr, "%d", &arr[i]) == 1) {
i++;
}
}
// CUDA kernel for performing Ising Annealing in parallel
__global__ void isingAnnealingStep(int* d_flat, int* d_places, int* d_state, int* count, int vertices, int N){
*count = N;
int vertex = blockDim.x*blockIdx.x + threadIdx.x;
if(vertex < vertices){
int sigmaI = d_state[vertex];
int H = 0;
for (int i = d_places[vertex]; i < d_places[vertex + 1]; i += 2) {
int sigmaJ = d_state[d_flat[i]];
int J = d_flat[i + 1];
H -= (J * sigmaI * sigmaJ);
}
__syncthreads();
curandState_t random;
curand_init(0, 0, 0, &random);
int s = H / sigmaI;
if (s > 0) {
d_state[vertex] = -1;
}
else if (s < 0) {
d_state[vertex] = 1;
}
else {
d_state[vertex] = 1 - 2 * (curand(&random) % 2);
}
__syncthreads();
curand_init(0, 0, 0, &random);
if(*count > 0){
int index = curand(&random) % vertices;
if (d_state[index] == 1) {
d_state[index] = -1;
}
else {
d_state[index] = 1;
}
*count--;
}
}
}
// utility function to compute Hamiltonian given a vertex
// ***not used in the parallel implementation***
int computeHamiltonian(int* flat, int* places, int* state, int vertex) {
int sigmaI = state[vertex];
int H = 0;
for (int i = places[vertex]; i < places[vertex + 1]; i += 2) {
int sigmaJ = state[flat[i]];
int J = flat[i + 1];
H -= (J * sigmaI * sigmaJ);
}
return H;
}
int main(){
FILE* fptr;
// load array $flat
fptr = fopen("flat.txt", "r");
if (fptr == NULL)
{
printf("Error!");
return -1;
}
// get number of integers for dynamic memory allocation on host
int fsize = getSize(fptr);
printf("Number of integers present in flat.txt: %d\n", fsize);
// read values in text file to array $flat
int* flat = (int*)malloc(sizeof(int) * fsize);
fseek(fptr, 0, SEEK_SET);
getArray(fptr, flat);
//printArray(flat, fsize);
fclose(fptr);
/**********/
// load array $places
fptr = fopen("places.txt", "r");
if (fptr == NULL)
{
printf("Error!");
return -1;
}
// get number of integers for dynamic memory allocation on host
int psize = getSize(fptr);
printf("Number of integers present in places.txt: %d\n", psize);
// read values in text file to array $places
int* places = (int*)malloc(sizeof(int) * psize);
fseek(fptr, 0, SEEK_SET);
getArray(fptr, places);
//printArray(places, psize);
fclose(fptr);
/**********/
int vertices = psize - 1;
int* state = (int*)malloc(sizeof(int) * vertices);
printf("Number of vertices: %d\n", vertices);
// initialize states randomly
for (int i = 0; i < vertices; ++i) {
state[i] = 1 - 2 * (rand() % 2);
}
double initial_energy = 0;
for (int i = 0; i < vertices; i++) {
int H = computeHamiltonian(flat, places, state, i);
initial_energy += H;
}
printf("Initial energy: %f\n", initial_energy / 2);
// allocate memory on device
int* d_flat;
int* d_places;
int* d_state;
int* count;
cudaMalloc((void**)&count, sizeof(int));
cudaMalloc((void**)&d_flat, fsize * sizeof(int));
cudaMalloc((void**)&d_places, psize*sizeof(int));
cudaMalloc((void**)&d_state, vertices*sizeof(int));
cudaMemcpy(d_flat, flat, fsize*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_places, places, psize*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_state, state, vertices*sizeof(int), cudaMemcpyHostToDevice);
int threads = 1024;
int blocks = (vertices / 1024) + 1;
int N = vertices*0.75;
clock_t begin = clock();
for (int i = 0; i < SWEEPS; i++) {
isingAnnealingStep<<<blocks, threads>>>(d_flat, d_places, d_state, count, vertices, N);
//cudaDeviceSynchronize();
N *= 0.9;
}
clock_t end = clock();
double time_spent = (double)(end - begin) / CLOCKS_PER_SEC;
cudaMemcpy(flat, d_flat, fsize*sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(places, d_places, psize*sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(state, d_state, vertices*sizeof(int), cudaMemcpyDeviceToHost);
double final_energy = 0;
for (int i = 0; i < vertices; i++) {
int H = computeHamiltonian(flat, places, state, i);
final_energy += H;
}
printf("Final energy: %f\n", final_energy / 2);
printf("Time taken for parallel Ising Annealing: %f seconds\n", time_spent);
// store results in txt files
// $time_spent $initial_energy $final_energy
printf("Storing parallel code's results in results.txt...\n");
fptr = fopen("results.txt", "a");
fprintf(fptr, "Parallel\t%d\t%f\t%f\t%f\n", vertices, time_spent, initial_energy, final_energy);
fclose(fptr);
printf("Finished writing to results.txt\n");
// free dynamic memory on host and device
free(flat);
free(places);
free(state);
cudaFree(d_flat);
cudaFree(d_places);
cudaFree(d_state);
return 0;
} |
8956dab766fba744647eca84019e4b355e8a92f5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "raytracer.cuh"
__device__ float vector_distance(Vector3d v1, Vector3d v2){
return pow(pow(v1(0) - v2(0), 2) + pow(v1(1) - v2(1), 2) + pow(v1(2) - v2(2), 2), 1/2);
}
__device__ float vertex_vector_distance(vertex v1, Vector3d v2){
return pow(pow(v1.x - v2(0), 2) + pow(v1.y - v2(1), 2) + pow(v1.z - v2(2), 2), 1/2);
}
__device__ Vector3d vertexToVector(vertex v){
return Vector3d(v.x, v.y, v.z);
}
// For each face, we must check for intersection. We store the closest intersection
__device__ bool intersect(object *obj, ray r, vertex *pointHit, vertex *normalHit){
bool intersection = false;
float minDistance = FLT_MAX;
for(int i = 0; i < obj->faces.size(); i++){
//Calculate the plane that the vertexes of the face lie on
Vector3d A = vertexToVector(*(obj->vertexes[obj->faces[i]->v1]));
Vector3d B = vertexToVector(*(obj->vertexes[obj->faces[i]->v2]));
Vector3d C = vertexToVector(*(obj->vertexes[obj->faces[i]->v3]));
Vector3d BA = A - B;
Vector3d CA = A - C;
Vector3d normal = BA.cross(CA);
float d = normal.dot(A);
// Check that the direction of the camera is not parallel to the plane
if(normal.dot(r.direction) != 0){
float t = (d - normal.dot(r.origin)) / normal.dot(r.direction);
// Calculate the point of intersection with the plane, and then use
// barycentric coordinates to see if we are in the face itself
Vector3d q = r.origin + t * r.direction;
if(((B - A).cross(q - A).dot(normal)) >= 0){
if(((C - B).cross(q - B).dot(normal)) >= 0){
if(((A - C).cross(q - C).dot(normal)) >= 0){
intersection = true;
// If we do have an intersection that is closer than previous intersections
// , calculate the interpolated normal so
// that we can calculate the lighting
if(vector_distance(q, r.origin) < minDistance){
minDistance = vector_distance(q, r.origin);
pointHit->x = q(0);
pointHit->y = q(1);
pointHit->z = q(2);
float bary_constant = ((B - A).cross(C - A)).dot(normal);
float alpha = ((C - B).cross(q - B)).dot(normal) / bary_constant;
float beta = ((A - C).cross(q - C)).dot(normal) / bary_constant;
float gamma = ((B - A).cross(q - A)).dot(normal) / bary_constant;
vertex n1 = *(obj->normal_vertexes[obj->faces[i]->n1]);
vertex n2 = *(obj->normal_vertexes[obj->faces[i]->n2]);
vertex n3 = *(obj->normal_vertexes[obj->faces[i]->n3]);
Vector3d point1 = alpha * vertexToVector(n1);
Vector3d point2 = beta * vertexToVector(n2);
Vector3d point3 = gamma * vertexToVector(n3);
Vector3d interpolated_normal = point1 + point2 + point3;
interpolated_normal.normalize();
normalHit->x = interpolated_normal(0);
normalHit->y = interpolated_normal(1);
normalHit->z = interpolated_normal(2);
}
}
}
}
}
}
return intersection;
}
__device__ Vector3d component_wise_product(Vector3d a, Vector3d b){
float comp1 = a(0) * b(0);
float comp2 = a(1) * b(1);
float comp3 = a(2) * b(2);
Vector3d vec(comp1, comp2, comp3);
return vec;
}
//Calculates lighting
__device__ color lighting(vertex p, vertex normal, material m, vector<light*> lights,
vertex c){
Vector3d diffuse(m.diffuse.r, m.diffuse.g, m.diffuse.b);
Vector3d ambient(m.ambient.r, m.ambient.g, m.ambient.b);
Vector3d specular(m.specular.r, m.specular.g, m.specular.b);
float shine = m.shine.p;
Vector3d diffuse_sum(0, 0, 0);
Vector3d specular_sum(0, 0, 0);
Vector3d position(p.x, p.y, p.z);
Vector3d camera_pos(c.x, c.y, c.z);
Vector3d n(normal.x, normal.y, normal.z);
Vector3d direction = camera_pos - position;
direction.normalize();
for(int i = 0; i < lights.size(); i++){
Vector3d light_pos(lights[i]->x, lights[i]->y ,lights[i]->z);
Vector3d light_color(lights[i]->r, lights[i]->g, lights[i]->b);
Vector3d light_direction(light_pos - position);
float distance = sqrt(light_direction(0) * light_direction(0) +
light_direction(1) * light_direction(1) +
light_direction(2) * light_direction(2));
float attenuation = 1 / (1 + lights[i]->k * pow(distance, 2));
light_color *= attenuation;
light_direction.normalize();
Vector3d light_diffuse;
if(n.dot(light_direction) < 0){
light_diffuse << 0, 0, 0;
}
else{
light_diffuse = light_color * n.dot(light_direction);
}
diffuse_sum += light_diffuse;
Vector3d light_specular;
Vector3d normalized_direction = direction + light_direction;
normalized_direction.normalize();
if(n.dot(normalized_direction) < 0){
light_specular << 0, 0, 0;
}
else{
light_specular = light_color * pow(n.dot(normalized_direction), shine);
}
specular_sum += light_specular;
}
Vector3d col = ambient + component_wise_product(diffuse_sum, diffuse) +
component_wise_product(specular_sum, specular);
if(col(0) > 1){
col(0) = 1;
}
if(col(1) > 1){
col(1) = 1;
}
if(col(2) > 1){
col(2) = 1;
}
color final_color;
final_color.r = col(0) * 255;
final_color.g = col(1) * 255;
final_color.b = col(2) * 255;
return final_color;
}
__global__
void cudaRaytraceKernel(const object *objects, const light *lights, const camera *c,
color *pixels, const Vector3d e1, const Vector3d e2, const Vector3d e3, float xres, float yres){
uint idx = threadIdx.x + blockIdx.x * blockDim.x;
// While we are still dealing with pixels inside of our block
while(idx < xres * yres){
// Calculate the direction of the ray (origin is always position of camera)
// The original direction has been presumed to be (0, 0, -1), and this
// has been adjusted by the camera orientation outside of the kernel
ray r;
r.origin(0) = camera_pos(0);
r.origin(1) = camera_pos(1);
r.origin(2) = camera_pos(2);
int curr_x = idx % xres;
int curr_y = idx / xres;
r.direction = c->near * e1 + (curr_x - xres / 2) * 2 * c->right / xres * e2 + e3 * (-curr_y + yres / 2) * 2 * c->top / yres;
r.direction.normalize();
// Store the closest pointHit, as well as the normal hit, and the object hit
vertex pointHit;
vertex normalHit;
object *closestObj = NULL;
// For every object, we will attempt see if the ray and the object intersect.
// This is done by for every face of every object, we will attempt to see if
// the ray and the face intersect
for(int i = 0; i < objects.size(); i++){
if(intersect(objects[i], r, &pointHit, &normalHit)){
if(vertex_vector_distance(pointHit, r.origin) < minDistance){
closestObj = objects[i];
minDistance = vertex_vector_distance(pointHit, r.origin);
}
}
}
if(closestObj != NULL){
vector<light*> lightsAtPixel;
for(int i = 0; i < lights.size(); i++){
ray shadowRay;
bool shadow;
shadowRay.origin(0) = pointHit.x;
shadowRay.origin(1) = pointHit.y;
shadowRay.origin(2) = pointHit.z;
shadowRay.direction(0) = lights[i]->x - pointHit.x;
shadowRay.direction(1) = lights[i]->y - pointHit.y;
shadowRay.direction(2) = lights[i]->z - pointHit.z;
shadowRay.direction.normalize();
vertex point;
vertex normal;
for(int j = 0; j < objects.size(); j++){
if(intersect(objects[j], shadowRay, &point, &normal)){
if(vector_distance(vertexToVector(point), vertexToVector(pointHit)) < .1){
shadow = true;
}
break;
}
}
if(!shadow){
lightsAtPixel.push_back(lights[i]);
}
}
}
idx += blockDim.x * gridDim.x;
}
}
void cudaCallRaytraceKernel(const int blocks, const int threadsPerBlock, const object *objects, const light *lights, const camera *c,
color *pixels, const Vector3d e1, const Vector3d e2, const Vector3d e3, float xres, float yres){
hipLaunchKernelGGL(( cudaRaytraceKernel), dim3(blocks), dim3(threadsPerBlock), 0, 0,( cudaRaytraceKernel)objects, lights, c, pixels,
e1, e2, e3, xres, yres);
}
| 8956dab766fba744647eca84019e4b355e8a92f5.cu | #include "raytracer.cuh"
__device__ float vector_distance(Vector3d v1, Vector3d v2){
return pow(pow(v1(0) - v2(0), 2) + pow(v1(1) - v2(1), 2) + pow(v1(2) - v2(2), 2), 1/2);
}
__device__ float vertex_vector_distance(vertex v1, Vector3d v2){
return pow(pow(v1.x - v2(0), 2) + pow(v1.y - v2(1), 2) + pow(v1.z - v2(2), 2), 1/2);
}
__device__ Vector3d vertexToVector(vertex v){
return Vector3d(v.x, v.y, v.z);
}
// For each face, we must check for intersection. We store the closest intersection
__device__ bool intersect(object *obj, ray r, vertex *pointHit, vertex *normalHit){
bool intersection = false;
float minDistance = FLT_MAX;
for(int i = 0; i < obj->faces.size(); i++){
//Calculate the plane that the vertexes of the face lie on
Vector3d A = vertexToVector(*(obj->vertexes[obj->faces[i]->v1]));
Vector3d B = vertexToVector(*(obj->vertexes[obj->faces[i]->v2]));
Vector3d C = vertexToVector(*(obj->vertexes[obj->faces[i]->v3]));
Vector3d BA = A - B;
Vector3d CA = A - C;
Vector3d normal = BA.cross(CA);
float d = normal.dot(A);
// Check that the direction of the camera is not parallel to the plane
if(normal.dot(r.direction) != 0){
float t = (d - normal.dot(r.origin)) / normal.dot(r.direction);
// Calculate the point of intersection with the plane, and then use
// barycentric coordinates to see if we are in the face itself
Vector3d q = r.origin + t * r.direction;
if(((B - A).cross(q - A).dot(normal)) >= 0){
if(((C - B).cross(q - B).dot(normal)) >= 0){
if(((A - C).cross(q - C).dot(normal)) >= 0){
intersection = true;
// If we do have an intersection that is closer than previous intersections
// , calculate the interpolated normal so
// that we can calculate the lighting
if(vector_distance(q, r.origin) < minDistance){
minDistance = vector_distance(q, r.origin);
pointHit->x = q(0);
pointHit->y = q(1);
pointHit->z = q(2);
float bary_constant = ((B - A).cross(C - A)).dot(normal);
float alpha = ((C - B).cross(q - B)).dot(normal) / bary_constant;
float beta = ((A - C).cross(q - C)).dot(normal) / bary_constant;
float gamma = ((B - A).cross(q - A)).dot(normal) / bary_constant;
vertex n1 = *(obj->normal_vertexes[obj->faces[i]->n1]);
vertex n2 = *(obj->normal_vertexes[obj->faces[i]->n2]);
vertex n3 = *(obj->normal_vertexes[obj->faces[i]->n3]);
Vector3d point1 = alpha * vertexToVector(n1);
Vector3d point2 = beta * vertexToVector(n2);
Vector3d point3 = gamma * vertexToVector(n3);
Vector3d interpolated_normal = point1 + point2 + point3;
interpolated_normal.normalize();
normalHit->x = interpolated_normal(0);
normalHit->y = interpolated_normal(1);
normalHit->z = interpolated_normal(2);
}
}
}
}
}
}
return intersection;
}
__device__ Vector3d component_wise_product(Vector3d a, Vector3d b){
float comp1 = a(0) * b(0);
float comp2 = a(1) * b(1);
float comp3 = a(2) * b(2);
Vector3d vec(comp1, comp2, comp3);
return vec;
}
//Calculates lighting
__device__ color lighting(vertex p, vertex normal, material m, vector<light*> lights,
vertex c){
Vector3d diffuse(m.diffuse.r, m.diffuse.g, m.diffuse.b);
Vector3d ambient(m.ambient.r, m.ambient.g, m.ambient.b);
Vector3d specular(m.specular.r, m.specular.g, m.specular.b);
float shine = m.shine.p;
Vector3d diffuse_sum(0, 0, 0);
Vector3d specular_sum(0, 0, 0);
Vector3d position(p.x, p.y, p.z);
Vector3d camera_pos(c.x, c.y, c.z);
Vector3d n(normal.x, normal.y, normal.z);
Vector3d direction = camera_pos - position;
direction.normalize();
for(int i = 0; i < lights.size(); i++){
Vector3d light_pos(lights[i]->x, lights[i]->y ,lights[i]->z);
Vector3d light_color(lights[i]->r, lights[i]->g, lights[i]->b);
Vector3d light_direction(light_pos - position);
float distance = sqrt(light_direction(0) * light_direction(0) +
light_direction(1) * light_direction(1) +
light_direction(2) * light_direction(2));
float attenuation = 1 / (1 + lights[i]->k * pow(distance, 2));
light_color *= attenuation;
light_direction.normalize();
Vector3d light_diffuse;
if(n.dot(light_direction) < 0){
light_diffuse << 0, 0, 0;
}
else{
light_diffuse = light_color * n.dot(light_direction);
}
diffuse_sum += light_diffuse;
Vector3d light_specular;
Vector3d normalized_direction = direction + light_direction;
normalized_direction.normalize();
if(n.dot(normalized_direction) < 0){
light_specular << 0, 0, 0;
}
else{
light_specular = light_color * pow(n.dot(normalized_direction), shine);
}
specular_sum += light_specular;
}
Vector3d col = ambient + component_wise_product(diffuse_sum, diffuse) +
component_wise_product(specular_sum, specular);
if(col(0) > 1){
col(0) = 1;
}
if(col(1) > 1){
col(1) = 1;
}
if(col(2) > 1){
col(2) = 1;
}
color final_color;
final_color.r = col(0) * 255;
final_color.g = col(1) * 255;
final_color.b = col(2) * 255;
return final_color;
}
__global__
void cudaRaytraceKernel(const object *objects, const light *lights, const camera *c,
color *pixels, const Vector3d e1, const Vector3d e2, const Vector3d e3, float xres, float yres){
uint idx = threadIdx.x + blockIdx.x * blockDim.x;
// While we are still dealing with pixels inside of our block
while(idx < xres * yres){
// Calculate the direction of the ray (origin is always position of camera)
// The original direction has been presumed to be (0, 0, -1), and this
// has been adjusted by the camera orientation outside of the kernel
ray r;
r.origin(0) = camera_pos(0);
r.origin(1) = camera_pos(1);
r.origin(2) = camera_pos(2);
int curr_x = idx % xres;
int curr_y = idx / xres;
r.direction = c->near * e1 + (curr_x - xres / 2) * 2 * c->right / xres * e2 + e3 * (-curr_y + yres / 2) * 2 * c->top / yres;
r.direction.normalize();
// Store the closest pointHit, as well as the normal hit, and the object hit
vertex pointHit;
vertex normalHit;
object *closestObj = NULL;
// For every object, we will attempt see if the ray and the object intersect.
// This is done by for every face of every object, we will attempt to see if
// the ray and the face intersect
for(int i = 0; i < objects.size(); i++){
if(intersect(objects[i], r, &pointHit, &normalHit)){
if(vertex_vector_distance(pointHit, r.origin) < minDistance){
closestObj = objects[i];
minDistance = vertex_vector_distance(pointHit, r.origin);
}
}
}
if(closestObj != NULL){
vector<light*> lightsAtPixel;
for(int i = 0; i < lights.size(); i++){
ray shadowRay;
bool shadow;
shadowRay.origin(0) = pointHit.x;
shadowRay.origin(1) = pointHit.y;
shadowRay.origin(2) = pointHit.z;
shadowRay.direction(0) = lights[i]->x - pointHit.x;
shadowRay.direction(1) = lights[i]->y - pointHit.y;
shadowRay.direction(2) = lights[i]->z - pointHit.z;
shadowRay.direction.normalize();
vertex point;
vertex normal;
for(int j = 0; j < objects.size(); j++){
if(intersect(objects[j], shadowRay, &point, &normal)){
if(vector_distance(vertexToVector(point), vertexToVector(pointHit)) < .1){
shadow = true;
}
break;
}
}
if(!shadow){
lightsAtPixel.push_back(lights[i]);
}
}
}
idx += blockDim.x * gridDim.x;
}
}
void cudaCallRaytraceKernel(const int blocks, const int threadsPerBlock, const object *objects, const light *lights, const camera *c,
color *pixels, const Vector3d e1, const Vector3d e2, const Vector3d e3, float xres, float yres){
cudaRaytraceKernel<<<blocks, threadsPerBlock>>>cudaRaytraceKernel(objects, lights, c, pixels,
e1, e2, e3, xres, yres);
}
|
GPUart_Impl.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//Copyright (c) 2017-2018 Christoph A. Hartmann, Ulrich Margull and Technische Hochschule Ingolstadt (THI)
//
//Permission is hereby granted, free of charge, to any person obtaining a copy of this
//software and associated documentation files (the "Software"), to deal in the Software
//without restriction, including without limitation the rights to use, copy, modify,
//merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
//permit persons to whom the Software is furnished to do so, subject to the following
//conditions:
//
//The above copyright notice and this permission notice shall be included in all copies
//or substantial portions of the Software.
//
//THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
//INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
//PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
//HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
//OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
//SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
/*
* File: GPUart_Impl.cu
* Created by: Christoph Hartmann
* Institute: Technische Hochschule Ingolstadt
* Date: 07.04.2017 */
/********************************************************
* ___ ___ ___ ___ 3 ___ *
* | | | | | |\ /| | | | | | *
* |___ | | |___| | \/ | | | |___ | | *
* | | | |\ | | | | | | | *
* | |___| | \ | | |___| ___| | |___ *
* *
*********************************************************/
/*! @file GPUart_Impl.cu
*
* @brief Implementation of the management component of the GPUart Implemenation layer.
*
* This file concentrates all GPGPU related memory declarations and allocations, memory transfers
* operations, kernel launches, kernel initialisations, and GPU related implementation details.
*
*
* @author Christoph Hartmann
* @date Created on: 7 Apr 2017
*/
/************************************************************************************************/
/* Includes */
/************************************************************************************************/
//include header of Implementation layer
#include "GPUart_Impl.cuh"
#include "GPUart_Impl.h"
//include interfaces to other GPUart layer
#include "GPUart_Impl_Abstr_IF.h"
#include "GPUart_Impl_Sched_IF.h"
//include kernel libraries
#include "GPUart_Sobel.cuh"
#include "GPUart_MatrMul.cuh"
/************************************************************************************************/
/* Compiler Switches */
/************************************************************************************************/
/*! @brief Use zero copy memory (requires integrated GPU)
*
* This MUST be defined so far, since memory transfers over PCIe are currently not implemented completely.
*
* @see http://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#zero-copy-memory
* @see https://software.intel.com/en-us/articles/getting-the-most-from-opencl-12-how-to-increase-performance-by-minimizing-buffer-copies-on-intel-processor-graphics
*/
#define S_USE_ZERO_COPY_FOR_GLOBAL_APPLICATION_MEMORY
/************************************************************************************************/
/* Constants */
/************************************************************************************************/
/*!
* @brief The length of the Event Queue, shared between GPU and CPU, used for kernel launch events.
*
* @see perKer_eventQueueCntHost_u32_host
* @see perKer_eventQueueCntDevice_u32_host
* @see perKer_eventQueue_s32_host
*/
#define C_PERSISTENT_KERNEL_EVENT_QUEUE_LENGTH (10) //Length of event queue
/*!
* @brief Event ID to indicate a termination request for the persistent kernel
*
* @see perKer_eventQueueCntHost_u32_host
* @see perKer_eventQueueCntHost_u32_g
*/
#define C_PERSISTENT_KERNEL_TERMINATE (-1) //Event ID to terminate persistent kernel
/************************************************************************************************/
/* Typedef */
/************************************************************************************************/
/*!
* @brief Typedef for command queues (streams) to abstract GPGPU-API
*
* Command queues are required to improve the concurrency of memory and kernel operatation on the GPU.
*
* @see https://developer.download.nvidia.com/CUDA/training/StreamsAndConcurrencyWebinar.pdf
* @see https://www.khronos.org/registry/OpenCL/sdk/1.0/docs/man/xhtml/clCreateCommandQueue.html
*/
typedef hipStream_t command_queue_s;
/*!
* @brief Typedef for a struct which combines global memory pointers, their related host pointers,
* and the size of the memory buffer.
*
*/
typedef struct
{
void ** mem_ptr;
void ** host_ptr;
size_t mem_size;
}device_global_memory_s;
/*!
* @brief Typedef for a struct which combines constant memory pointers and the size of the related memory buffer.
*
*/
typedef struct
{
void ** mem_ptr;
size_t mem_size;
}device_constant_memory_s;
/************************************************************************************************/
/* General Variables */
/************************************************************************************************/
/*!
* @brief The command queue (stream) for memory operations
*/
static command_queue_s memory_command_queue_s;
/*!
* @brief The command queue (stream) for the persistent kernel
*/
static command_queue_s persistent_kernel_command_queue_s;
/*! @var perKer_isRunning_u32_host
* @brief A status flag, which represents the running status of the persistent kernel (host pointer).
* @see perKer_isRunning_u32_g
*/
/*! @var perKer_isRunning_u32_g
* @brief A status flag, which represents the running status of the persistent kernel (device pointer).
* @see perKer_isRunning_u32_host
*/
volatile uint32 *perKer_isRunning_u32_host;
uint32 *perKer_isRunning_u32_g;
/*! @var perKer_eventQueueCntHost_u32_host
* @brief The index of the tail of the event queue for kernel launches written by the host (host pointer).
* @see perKer_eventQueueCntDevice_u32_g
*/
/*! @var perKer_eventQueueCntHost_u32_g
* @brief The index of the tail of the event queue for kernel launches written by the host (device pointer).
* @see perKer_eventQueueCntHost_u32_host
*/
volatile uint32 *perKer_eventQueueCntHost_u32_host;
uint32 *perKer_eventQueueCntHost_u32_g;
/*! @var perKer_eventQueueCntDevice_u32_host
* @brief The index of the head of the event queue for kernel launches written by the device (host pointer).
* @see perKer_eventQueueCntDevice_u32_g
*/
/*! @var perKer_eventQueueCntDevice_u32_g
* @brief The index of the head of the event queue for kernel launches written by the device (device pointer).
* @see perKer_eventQueueCntDevice_u32_host
*/
volatile uint32 *perKer_eventQueueCntDevice_u32_host;
uint32 *perKer_eventQueueCntDevice_u32_g;
/*! @var perKer_eventQueue_s32_host
* @brief The event queue for kernel launch requests, written by the CPU and red by the GPU (host pointer).
*
* To request a kernel launch, write the kernel's ID (#kernel_task_id_e) into the tail of the queue.
* Write #C_PERSISTENT_KERNEL_TERMINATE to terminate the persistent kernel #GPUart_Persistent_Kernel.
* @see perKer_eventQueue_s32_g
*/
/*! @var perKer_eventQueue_s32_g
* @brief The event queue for kernel launch requests, written by the CPU and red by the GPU (device pointer).
*
* To request a kernel launch, write the kernel's ID (#kernel_task_id_e) into the tail of the queue.
* Write #C_PERSISTENT_KERNEL_TERMINATE to terminate the persistent kernel #GPUart_Persistent_Kernel.
* @see perKer_eventQueue_s32_host
*/
volatile sint32 *perKer_eventQueue_s32_host;
sint32 *perKer_eventQueue_s32_g;
/*! @var perKer_kernelTasksRunningStates_u32_host
* @brief A status flag, which represents the running status of each kernel (host pointer).
* @see perKer_kernelTasksRunningStates_u32_g
*/
/*! @var perKer_kernelTasksRunningStates_u32_g
* @brief A status flag, which represents the running status of each kernel (device pointer).
* @see perKer_kernelTasksRunningStates_u32_host
*/
volatile uint32 *perKer_kernelTasksRunningStates_u32_host;
uint32 *perKer_kernelTasksRunningStates_u32_g;
/*!
* @brief The allowed job cost per kernel
*
* This value is equal to m * , whereby m is the number of Streaming Multiprocessors of the GPU
* #gpuS_nrOfMultiprocessor_u32 and is the resource factor #C_GPUS_RESOURCE_FACTOR.
*
* @see kernel_task_id_e
* @see C_GPUS_RESOURCE_FACTOR
* @see gpuS_nrOfMultiprocessor_u32
* @see kernel_job_costs
*/
uint32 max_costs_per_kernel = 0;
/************************************************************************************************/
/* Kernel Task Variables - E_KTID_SOBEL1 */
/************************************************************************************************/
sint32 * sob1_matrix_in_s32_g, * sob1_matrix_in_s32_host;
sint32 * sob1_matrix_out_s32_g, * sob1_matrix_out_s32_host;
/* Synchronization variables */
uint32 * sync_SOB1_flags_in_u32_g;
uint32 * sync_SOB1_flags_out_u32_g;
/* Preemption related variables*/
sint32 * preempt_SOB1_flag_g;
volatile sint32 *preempt_SOB1_flag_host;
sint32 * preempt_SOB1_flag_internal_g;
sint32 * preempt_SOB1_sm_g;
volatile sint32 *preempt_SOB1_sm_host;
/* Buffer variables */
uint32 * sob1_buffer_loop_counter_u32_g;
/************************************************************************************************/
/* Kernel Task Variables - E_KTID_SOBEL2 */
/************************************************************************************************/
sint32 * sob2_matrix_in_s32_g, * sob2_matrix_in_s32_host;
sint32 * sob2_matrix_out_s32_g, * sob2_matrix_out_s32_host;
/* Synchronization variables */
uint32 * sync_SOB2_flags_in_u32_g;
uint32 * sync_SOB2_flags_out_u32_g;
/* Preemption related variables*/
sint32 * preempt_SOB2_flag_g;
volatile sint32 *preempt_SOB2_flag_host;
sint32 * preempt_SOB2_flag_internal_g;
sint32 * preempt_SOB2_sm_g;
volatile sint32 *preempt_SOB2_sm_host;
/* Buffer variables */
uint32 * sob2_buffer_loop_counter_u32_g;
/************************************************************************************************/
/* Kernel Task Variables - E_KTID_MM */
/************************************************************************************************/
float32 * mm_matrix_A_f32_g, * mm_matrix_A_f32_host;
float32 * mm_matrix_B_f32_g, * mm_matrix_B_f32_host;
float32 * mm_matrix_C_f32_g, * mm_matrix_C_f32_host;
/* Synchronization variables */
uint32 * sync_MM_flags_in_u32_g;
uint32 * sync_MM_flags_out_u32_g;
/* Preemption related variables*/
sint32 * preempt_MM_flag_g;
volatile sint32 *preempt_MM_flag_host;
sint32 * preempt_MM_sm_g;
volatile sint32 *preempt_MM_sm_host;
/* Buffer variables */
uint32 * mm_buffer_blockY_g;
uint32 * mm_buffer_blockX_g;
uint32 * mm_buffer_M_g;
/************************************************************************************************/
/* Constant Variable Table */
/************************************************************************************************/
/*!
* @brief The constant memory table
*
* All constant memory buffers which must be written during runtime must be defined here.
* The i'th element represents the i'th constant memory buffer, define by #device_constant_memory_id_e
* in GPUart_Config.h. Each element must defined in the following style: { (void **)& CONSTANT_BUFFER_NAME,
* SIZE_IN_BYTES }.
*
* @see device_constant_memory_id_e
*/
static device_constant_memory_s constant_memory_list_a[E_CM_TOTAL_NR_OF_CONST_MEM_VARIABLES] =
{
//{ (void **)& VARIABLE_NAME, SIZE IN BYTES }
};
/************************************************************************************************/
/* Global Variable Table */
/************************************************************************************************/
/*!
* @brief The global memory table
*
* All global memory buffers which must be written or red during runtime must be defined here.
* The i'th element represents the i'th global memory buffer, define by #device_global_memory_id_e
* in GPUart_Config.h. Each element must defined in the following style: { (void **)&
* GLOBAL_MEMORY_BUFFER_POINTER_DEVICE, GLOBAL_MEMORY_BUFFER_POINTER_HOST, SIZE_IN_BYTES }.
*
* @see device_global_memory_id_e
*/
static device_global_memory_s global_memory_list_a[E_GM_TOTAL_NR_OF_GLOB_MEM_VARIABLES] =
{
/* Sobel1 */
{ (void **)&sob1_matrix_in_s32_g, (void **)&sob1_matrix_in_s32_host, C_SOB1_MATRIX_SIZE * sizeof(sint32) }, //E_GM_ID_SOB1_MATRIX_IN
{ (void **)&sob1_matrix_out_s32_g, (void **)&sob1_matrix_out_s32_host, C_SOB1_MATRIX_SIZE * sizeof(sint32) }, //E_GM_ID_SOB1_MATRIX_OUT
/* Sobel2 */
{ (void **)&sob2_matrix_in_s32_g, (void **)&sob2_matrix_in_s32_host, C_SOB2_MATRIX_SIZE * sizeof(sint32) }, //E_GM_ID_SOB2_MATRIX_IN
{ (void **)&sob2_matrix_out_s32_g, (void **)&sob2_matrix_out_s32_host, C_SOB2_MATRIX_SIZE * sizeof(sint32) }, //E_GM_ID_SOB2_MATRIX_OUT
/* MatrMul */
{ (void **)&mm_matrix_A_f32_g, (void **)&mm_matrix_A_f32_host, C_MM_MATRIX_TOTAL_SIZE * sizeof(sint32) }, //E_GM_ID_MM_MATRIX_A
{ (void **)&mm_matrix_B_f32_g, (void **)&mm_matrix_B_f32_host, C_MM_MATRIX_TOTAL_SIZE * sizeof(sint32) }, //E_GM_ID_MM_MATRIX_B
{ (void **)&mm_matrix_C_f32_g, (void **)&mm_matrix_C_f32_host, C_MM_MATRIX_TOTAL_SIZE * sizeof(sint32) } //E_GM_ID_MM_MATRIX_C
};
/************************************************************************************************/
/* Preemption Flag Table */
/************************************************************************************************/
/*!
* @brief The preemption flag table
*
* All preemption flags must be included by this table.
* The i'th element represents the i'th kernel, according to the enum #kernel_task_id_e
* in GPUart_Config.h. Each element must defined in the following style: (volatile sint32**)&
* NAME_OF_PREEMPTION_FLAG_POINTER. If a kernel does not implement a preemption flag, because it
* is non-preemptive, insert a NULL.
*
* @see kernel_task_id_e
*/
static volatile sint32** device_preemption_flags_a[E_KTID_NUMBER_OF_KERNEL_TASKS] =
{
(volatile sint32**) &preempt_SOB1_flag_host, //E_KTID_SOBEL1
(volatile sint32**) &preempt_SOB2_flag_host, //E_KTID_SOBEL2
(volatile sint32**) &preempt_MM_flag_host //E_KTID_MM
};
/************************************************************************************************/
/* Preemption Enabled Parameter Table */
/************************************************************************************************/
/*!
* @brief The preemption enabled table
*
* The i'th element represents the i'th kernel, according to the enum #kernel_task_id_e
* in GPUart_Config.h. Each element must defined in the following style: #C_TRUE if the related kernel
* is preemptive; #C_FALSE if the related kernel is non-preemptive.
*
* @see kernel_task_id_e
*/
const static sint32 preemption_enabled_a[E_KTID_NUMBER_OF_KERNEL_TASKS] =
{
C_TRUE, //E_KTID_SOBEL1
C_TRUE, //E_KTID_SOBEL2
C_TRUE //E_KTID_MM
};
/************************************************************************************************/
/* Kernel State Machine Table */
/************************************************************************************************/
/*!
* @brief The kernel state machine table
*
* The i'th element represents the i'th kernel, according to the enum #kernel_task_id_e
* in GPUart_Config.h. Each element must defined in the following style: &NAME_OF_STATE_MACHINE_POINTER.
* Use NULL if the related kernel is non-preemptive.
*
* @see kernel_task_id_e
*/
static volatile sint32** device_kernel_task_SM_a[E_KTID_NUMBER_OF_KERNEL_TASKS] =
{
&preempt_SOB1_sm_host, //E_KTID_SOBEL1
&preempt_SOB2_sm_host, //E_KTID_SOBEL2
&preempt_MM_sm_host //E_KTID_MM
};
/*!
* @brief The number of state machines table
*
* The i'th element represents the i'th kernel, according to the enum kernel_task_id_e
* in GPUart_Config.h. Each element must defined in the following style: NUMBER_OF_SM_IN_KERNEL.
* If a kernel preempts grid-synchronous then use the value 1u. If a kernel preempts thread-block
* synchronous then use the number of thread blocks of this kernel. If a kernel is non-preemptive
* then use 0u.
*
* @see kernel_task_id_e
*/
static uint32 nb_of_StateMachines_in_kernel_a[E_KTID_NUMBER_OF_KERNEL_TASKS] =
{
1u, //E_KTID_SOBEL1 -> Grid-wide preemption
1u, //E_KTID_SOBEL2 -> Grid-wide preemption
C_MM_NUMBER_OF_BLOCKS //E_KTID_MM -> Thread block-wide preemption
};
/************************************************************************************************/
/* Kernel Cost Table */
/************************************************************************************************/
/*!
* @brief The job cost table
*
* The i'th element represents the i'th kernel, according to the enum kernel_task_id_e
* in GPUart_Config.h. Each element represents the job costs of the related kernel.
* If a thread block of a kernel requires more then 1/ of the available registers, shared memory,
* thread residency slots, or thread block residency slots of an Streaming Multiprocessor,
* then set corresponding value to m * , whereby is the resource factor and m is the GPU's
* number of Streaming Multiprocessors. If a thread block of a kernel requires less then 1/ of each
* resource type, then set the corresponding value to the kernels number of thread blocks.
*
* @see kernel_task_id_e
* @see C_GPUS_RESOURCE_FACTOR
* @see gpuS_nrOfMultiprocessor_u32
* @see max_costs_per_kernel
*/
static uint8 kernel_job_costs[E_KTID_NUMBER_OF_KERNEL_TASKS] =
{
C_SOB1_NUMBER_OF_BLOCKS, //E_KTID_SOBEL1
C_SOB2_NUMBER_OF_BLOCKS, //E_KTID_SOBEL2
C_MM_NUMBER_OF_BLOCKS //E_KTID_MM
};
/*!
* @brief The device ID of the used GPU
*
* @see http://docs.nvidia.com/cuda/cuda-c-programming-guide
* @see https://www.khronos.org/registry/OpenCL/sdk/1.0/docs/man/xhtml/clGetDeviceIDs.html
*/
static uint8 gpuI_deviceID_u8 = 0;
/************************************************************************************************/
/* Persistent Kernel */
/************************************************************************************************/
/*!
* @brief The persistent kernel (GPU Daemon) which is used to reduce kernel launch latencies.
*
* The kernel arguments must include all global memory buffers of all kernels in this system, since
* this kernel is used to launch GPGPU kernel on demand. The persistent kernel reduces kernel launch
* latencies by bypassing the GPGPU driver stack when launching kernels.
*
* @see Mrozek et al. GPU Daemon: Road to zero cost submission, in Proceedings of the 4th International
* Workshop on OpenCL, Vienna, Austria, 2016 -> https://dl.acm.org/citation.cfm?id=2909450
*/
__global__ void GPUart_Persistent_Kernel
(
//Persistent Kernel Management Data
uint32* __restrict__ perKer_isRunning_u32_g,
uint32* __restrict__ perKer_eventQueueCntDevice_u32_g,
volatile uint32 * __restrict__ perKer_eventQueueCntHost_u32_g,
volatile sint32 * __restrict__ perKer_eventQueue_s32_g,
volatile uint32* __restrict__ perKer_kernelTasksRunningStates_u32_g,
//SOBEL1 Variables
sint32 * __restrict__ sob1_matrix_in_s32_g,
sint32 * __restrict__ sob1_matrix_out_s32_g,
//SOBEL2 Variables
sint32 * __restrict__ sob2_matrix_in_s32_g,
sint32 * __restrict__ sob2_matrix_out_s32_g,
//MM Variables
float32 * __restrict__ mm_matrix_A_f32_g,
float32 * __restrict__ mm_matrix_B_f32_g,
float32 * __restrict__ mm_matrix_C_f32_g,
/* Synchronization variables */
//SOBEL1
uint32 * __restrict__ sync_SOB1_flags_in_u32_g,
uint32 * __restrict__ sync_SOB1_flags_out_u32_g,
//SOBEL2
uint32 * __restrict__ sync_SOB2_flags_in_u32_g,
uint32 * __restrict__ sync_SOB2_flags_out_u32_g,
//MM
uint32 * __restrict__ sync_MM_flags_in_u32_g,
uint32 * __restrict__ sync_MM_flags_out_u32_g,
/* Preemption variables */
//SOB1
sint32 * __restrict__ preempt_SOB1_flag_g,
sint32 * __restrict__ preempt_SOB1_flag_internal_g,
sint32 * __restrict__ preempt_SOB1_sm_g,
//SOB2
sint32 * __restrict__ preempt_SOB2_flag_g,
sint32 * __restrict__ preempt_SOB2_flag_internal_g,
sint32 * __restrict__ preempt_SOB2_sm_g,
//MM
sint32 * __restrict__ preempt_MM_flag_g,
sint32 * __restrict__ preempt_MM_sm_g,
/* Buffer variables */
//SOB1
uint32 * __restrict__ sob1_buffer_loop_counter_u32_g,
//SOB2
uint32 * __restrict__ sob2_buffer_loop_counter_u32_g,
//MM
uint32 * __restrict__ mm_buffer_blockY_g,
uint32 * __restrict__ mm_buffer_blockX_g,
uint32 * __restrict__ mm_buffer_M_g
)
{
hipStream_t stream_kernel_SOB1;
hipStream_t stream_kernel_SOB2;
hipStream_t stream_kernel_MM;
hipStreamCreateWithFlags(&stream_kernel_SOB1, hipStreamNonBlocking);
hipStreamCreateWithFlags(&stream_kernel_SOB2, hipStreamNonBlocking);
hipStreamCreateWithFlags(&stream_kernel_MM, hipStreamNonBlocking);
while(C_TRUE)
{
//Check if host has issued a new event to queue
if(*perKer_eventQueueCntDevice_u32_g != *perKer_eventQueueCntHost_u32_g)
{
//Calculate position of next available event in queue
*perKer_eventQueueCntDevice_u32_g = (*perKer_eventQueueCntDevice_u32_g + 1)
% C_PERSISTENT_KERNEL_EVENT_QUEUE_LENGTH;
//Interpret new event
switch(perKer_eventQueue_s32_g[*perKer_eventQueueCntDevice_u32_g])
{
case C_PERSISTENT_KERNEL_TERMINATE: //Terminate persistent Kernel
*perKer_isRunning_u32_g = C_FALSE;
return;
case E_KTID_SOBEL1:
__syncthreads();
hipLaunchKernelGGL(( Sobel_Kernel), dim3(C_SOB1_NUMBER_OF_BLOCKS), dim3(C_SOB1_LOCAL_WORK_SIZE), 0, stream_kernel_SOB1,
sob1_matrix_in_s32_g,
sob1_matrix_out_s32_g,
C_SOB1_HEIGHT,
C_SOB1_WIDTH,
//Preemption status variables
preempt_SOB1_flag_g,
preempt_SOB1_flag_internal_g,
preempt_SOB1_sm_g,
//Buffer variables
sob1_buffer_loop_counter_u32_g,
//Synchronization variables
sync_SOB1_flags_in_u32_g,
sync_SOB1_flags_out_u32_g,
/* Running status flag */
&perKer_kernelTasksRunningStates_u32_g[E_KTID_SOBEL1]
);
__syncthreads();
break;
case E_KTID_SOBEL2:
__syncthreads();
hipLaunchKernelGGL(( Sobel_Kernel), dim3(C_SOB2_NUMBER_OF_BLOCKS), dim3(C_SOB2_LOCAL_WORK_SIZE), 0, stream_kernel_SOB2,
sob2_matrix_in_s32_g,
sob2_matrix_out_s32_g,
C_SOB2_HEIGHT,
C_SOB2_WIDTH,
//Preemption status variables
preempt_SOB2_flag_g,
preempt_SOB2_flag_internal_g,
preempt_SOB2_sm_g,
//Buffer variables
sob2_buffer_loop_counter_u32_g,
//Synchronization variables
sync_SOB2_flags_in_u32_g,
sync_SOB2_flags_out_u32_g,
/* Running status flag */
&perKer_kernelTasksRunningStates_u32_g[E_KTID_SOBEL2]
);
__syncthreads();
break;
case E_KTID_MM:
__syncthreads();
dim3 dimGridMM(C_MM_NUMBER_OF_BLOCKS_X, C_MM_NUMBER_OF_BLOCKS_Y);
dim3 dimBlockMM(C_MM_LOCAL_WORK_SIZE_X, C_MM_LOCAL_WORK_SIZE_Y);
hipLaunchKernelGGL(( MatrMul_Kernel), dim3(dimGridMM), dim3(dimBlockMM), 0, stream_kernel_MM,
//Functional Data
mm_matrix_A_f32_g,
mm_matrix_B_f32_g,
mm_matrix_C_f32_g,
//Preemption Buffer
mm_buffer_blockY_g,
mm_buffer_blockX_g,
mm_buffer_M_g,
//Preemption Managment
preempt_MM_flag_g,
preempt_MM_sm_g,
//Synchronization Flags
sync_MM_flags_in_u32_g,
sync_MM_flags_out_u32_g,
//Running status flag
&perKer_kernelTasksRunningStates_u32_g[E_KTID_MM]
);
__syncthreads();
break;
}
__threadfence_system();
}
}
}
/************************************************************************************************/
/* General function definition */
/************************************************************************************************/
/*! @brief Copy data from host memory to device memory.
*
* Device memory may be shared physical memory or discrete device memory. The device driver
* API call may depend on the type of device memory (global or texture memory).
*
* @param[in] void * variable_p -> The host variable to be copied
* @param[in] device_global_memory_id_e id_p -> The ID of the global memory variable
*
* @return GPUART_SUCCESS if memory copy operation has been successfully.
* @return GPUART_ERROR_INVALID_ARGUMENT if id_p is an invalid ID.
*/
GPUart_Retval gpuI_memcpyHost2Device(void * variable_p, device_global_memory_id_e id_p)
{
GPUart_Retval retval = GPUART_SUCCESS;
device_global_memory_s device_memory;
if((id_p >= E_GM_TOTAL_NR_OF_GLOB_MEM_VARIABLES)||(variable_p == NULL))
{
retval = GPUART_ERROR_INVALID_ARGUMENT;
}
else
{
device_memory = global_memory_list_a[id_p];
#ifdef S_USE_ZERO_COPY_FOR_GLOBAL_APPLICATION_MEMORY
memcpy(*device_memory.host_ptr, variable_p, device_memory.mem_size);
#else
CUDA_CHECK_RETURN(hipMemcpyAsync(*device_memory.mem_ptr, variable_p, device_memory.mem_size,
hipMemcpyHostToDevice, memory_command_queue_s));
#endif
}
return retval;
}
/*! @brief Copy data from device memory to host memory.
*
* Device memory may be shared physical memory or discrete device memory. The device driver
* API call may depend on the type of device memory (global or texture memory).
*
* @param[out] void * variable_p -> The host variable to be written
* @param[in] device_global_memory_id_e id_p -> The ID of the global memory variable
*
* @return GPUART_SUCCESS if memory copy operation has been successfully.
* @return GPUART_ERROR_INVALID_ARGUMENT if id_p is an invalid ID.
*/
GPUart_Retval gpuI_memcpyDevice2Host(void * variable_p, device_global_memory_id_e id_p)
{
GPUart_Retval retval = GPUART_SUCCESS;
device_global_memory_s device_memory;
if((id_p >= E_GM_TOTAL_NR_OF_GLOB_MEM_VARIABLES)||(variable_p == NULL))
{
retval = GPUART_ERROR_INVALID_ARGUMENT;
}
else
{
device_memory = global_memory_list_a[id_p];
#ifdef S_USE_ZERO_COPY_FOR_GLOBAL_APPLICATION_MEMORY
memcpy(variable_p, *device_memory.host_ptr, device_memory.mem_size);
#else
CUDA_CHECK_RETURN(hipMemcpyAsync(variable_p, *device_memory.mem_ptr, device_memory.mem_size,
hipMemcpyDeviceToHost, memory_command_queue_s));
#endif
}
return retval;
}
/*************************************************************************************************
Function: gpuI_memcpyConstantMemory
Description: Copies data from host memory to constant device memory. The copy is only possible
if persistent GPUart kernel is not running, since a constant memory variable is
immutable during kernel execution and its value is inherited from parent to child
kernel.
*/
/*! @brief Copy data from host memory to constant device memory.
*
* The copy is only possible if persistent GPUart kernel #GPUart_Persistent_Kernel
* is not running, since a constant memory data is immutable during kernel execution
* and its value is inherited from parent to child kernel.
*
* @param[in] void * variable_p -> The host variable to be copied
* @param[in] device_constant_memory_id_e id_p -> The ID of the constant memory buffer
*
* @return GPUART_SUCCESS if memory copy operation has been successfully.
* @return GPUART_ERROR_INVALID_ARGUMENT if id_p is an invalid ID.
*/
GPUart_Retval gpuI_memcpyConstantMemory(void * variable_p, device_constant_memory_id_e id_p)
{
GPUart_Retval retval = GPUART_SUCCESS;
device_constant_memory_s device_memory;
if((id_p >= E_CM_TOTAL_NR_OF_CONST_MEM_VARIABLES) || (variable_p == NULL))
{
retval = GPUART_ERROR_INVALID_ARGUMENT;
}
else
{
if(*perKer_isRunning_u32_host == C_TRUE)
{
retval = GPUART_ERROR_PESISTENT_KERNEL_IS_RUNNING;
}
else
{
device_memory = constant_memory_list_a[id_p];
CUDA_CHECK_RETURN(hipMemcpyToSymbolAsync(*device_memory.mem_ptr, variable_p, device_memory.mem_size, 0,
hipMemcpyHostToDevice, memory_command_queue_s));
CUDA_CHECK_RETURN(hipStreamSynchronize(memory_command_queue_s));
}
}
return retval;
}
/*!
* @brief Request the launch of a GPGPU kernel.
*
* @param kernel_task_id_e task_id_e -> The ID of the kernel to be launched.
*
* @return GPUART_SUCCESS if kernel launch has been successfully.
* @return GPUART_ERROR_NOT_READY if launch request is already active.
*/
GPUart_Retval gpuI_runJob(kernel_task_id_e task_id_e)
{
GPUart_Retval retval = GPUART_SUCCESS;
uint32 eventQueueCntHost_u32_l;
uint32 kernelStatus = ((volatile uint32 *)perKer_kernelTasksRunningStates_u32_host)[task_id_e];
if((kernelStatus == C_KERNEL_SUSPENDED)||
(kernelStatus == C_KERNEL_READY)||
(kernelStatus == C_KERNEL_INIT))
{
perKer_kernelTasksRunningStates_u32_host[task_id_e] = C_KERNEL_ACTIVE;
// //Reset Preemption flag
if(device_preemption_flags_a[task_id_e] != NULL)
{
// printf("-> Setze PreemptionFlag zurueck fue Kernel %d", task_id_e);
**device_preemption_flags_a[task_id_e] = C_FALSE;
}
//Reset state machine
if((kernelStatus == C_KERNEL_READY)||(kernelStatus == C_KERNEL_INIT))
{
//Do not reset Kernel SM if kernel has been preempted
if(device_kernel_task_SM_a[task_id_e] != NULL)
{
//**device_kernel_task_SM_a[task_id_e] = 0; --> Old. Now, all SMs of an Kernel are set to zero
memset((void *)*device_kernel_task_SM_a[task_id_e], 0, nb_of_StateMachines_in_kernel_a[task_id_e] * sizeof(sint32));
}
}
//Calculate next position in persistent kernel event queue
eventQueueCntHost_u32_l = (perKer_eventQueueCntHost_u32_host[0] + 1)
% C_PERSISTENT_KERNEL_EVENT_QUEUE_LENGTH;
//Set kernel call event
perKer_eventQueue_s32_host[eventQueueCntHost_u32_l] = task_id_e;
//Make new event visible
*perKer_eventQueueCntHost_u32_host = eventQueueCntHost_u32_l;
if((eventQueueCntHost_u32_l == UINT32_MAX )||(eventQueueCntHost_u32_l > C_PERSISTENT_KERNEL_EVENT_QUEUE_LENGTH))
{
printf("\nFEHLER: Host Counter falsch");
}
}
else
{
retval = GPUART_ERROR_NOT_READY;
}
return retval;
}
/*************************************************************************************************
Function: gpuI_preemptJob
Description: Issue preemption of a specific kernel task
*/
GPUart_Retval gpuI_preemptJob(kernel_task_id_e task_id_p)
{
GPUart_Retval retval = GPUART_SUCCESS;
//Check if kernel task is preemptive
if(preemption_enabled_a[task_id_p] == C_TRUE)
{
//Set preemption flag
**device_preemption_flags_a[task_id_p] = C_TRUE;
}
else
{
//Kernel task is not preemptive -> no operation
retval = GPUART_ERROR_NO_OPERTATION;
}
return retval;
}
/*************************************************************************************************
Function: gpuI_queryKernelIsRunning
Description: Query kernel running status.
Returns GPUART_SUCCESS if kernel task is not running.
Returns GPUART_ERROR_NOT_READY if kernel task is still running.
*/
uint32 gpuI_queryKernelIsRunning(kernel_task_id_e task_id_e)
{
uint32 retval = C_TRUE;
//Query stream whether there is a running operation
if((perKer_kernelTasksRunningStates_u32_host[task_id_e] == C_KERNEL_TERMINATED_SUCESSFUL)||
(perKer_kernelTasksRunningStates_u32_host[task_id_e] == C_KERNEL_SUSPENDED)||
(perKer_kernelTasksRunningStates_u32_host[task_id_e] == C_KERNEL_INIT))
{
//Kernel task is not running -> success
retval = C_FALSE;
}
else
{
//Kernel is still running
retval = C_TRUE;
}
return retval;
}
/*************************************************************************************************
Function: gpuI_queryKernelTerminatedSuccessful
Description: Query kernel running status.
Returns GPUART_SUCCESS if kernel task is not running.
Returns GPUART_ERROR_NOT_READY if kernel task is still running.
*/
uint32 gpuI_queryKernelTerminatedSuccessful(kernel_task_id_e task_id_e)
{
uint32 retval = C_TRUE;
//Query stream whether there is a running operation
if(perKer_kernelTasksRunningStates_u32_host[task_id_e] == C_KERNEL_TERMINATED_SUCESSFUL)
{
//Kernel task is not running -> success
}
else
{
//Kernel is still running
retval = C_FALSE;
}
return retval;
}
/*************************************************************************************************
Function: gpuI_queryKernelTerminatedSuccessful
Description: Query kernel running status.
Returns GPUART_SUCCESS if kernel task is not running.
Returns GPUART_ERROR_NOT_READY if kernel task is still running.
*/
uint32 gpuI_queryKernelPreempted(kernel_task_id_e task_id_e)
{
uint32 retval = C_TRUE;
//Query stream whether there is a running operation
if(perKer_kernelTasksRunningStates_u32_host[task_id_e] == C_KERNEL_SUSPENDED)
{
//Kernel task is not running -> success
}
else
{
//Kernel is still running
retval = C_FALSE;
}
return retval;
}
/*************************************************************************************************
Function: gpuI_getJobCosts
Description: Returns the number of thread blocks, i.e. the number of Multiprocessors used for
this kernel.
*/
uint32 gpuI_getJobCosts(kernel_task_id_e task_id_e)
{
uint32 retval = kernel_job_costs[task_id_e];
if(retval > max_costs_per_kernel)
{
retval = max_costs_per_kernel;
}
return retval;
}
/*************************************************************************************************
Function: gpuI_getJobCosts
Description: Sets the internal status of the corresponding kernel to ready. This function is
called after a new job has been enqueued.
*/
GPUart_Retval gpuI_SetKernelStatusReady(kernel_task_id_e task_id_e)
{
GPUart_Retval retval = GPUART_SUCCESS;
perKer_kernelTasksRunningStates_u32_host[task_id_e] = C_KERNEL_READY;
return retval;
}
/*************************************************************************************************
Function: gpuI_getJobCosts
Description: Returns the number of thread blocks, i.e. the number of Multiprocessors used for
this kernel.
*/
GPUart_Retval gpuI_get_NrOfMultiprocessors(uint32* nrOfMultprocessors, uint32 resourceFactor)
{
GPUart_Retval retval = GPUART_SUCCESS;
hipDeviceProp_t deviceProp_s;
CUDA_CHECK_RETURN(hipGetDeviceProperties(&deviceProp_s, gpuI_deviceID_u8));
*nrOfMultprocessors = deviceProp_s.multiProcessorCount * resourceFactor;
max_costs_per_kernel = deviceProp_s.multiProcessorCount * resourceFactor;
printf("\nNumber of multiprocessors on the device: %d", *nrOfMultprocessors);
if(*nrOfMultprocessors == 0)
{
retval = GPUART_NO_SUCCESS;
}
return retval;
}
/*************************************************************************************************
Function: gpuI_init()
Description: Initializes GPGPU Runtime, thus it initializes command_queues, device variables
and host variables.
*/
GPUart_Retval gpuI_init(void)
{
GPUart_Retval retval = GPUART_SUCCESS;
int deviceCount_u32 = 0;
CUDA_CHECK_RETURN(hipDeviceReset());
CUDA_CHECK_RETURN(hipGetDeviceCount(&deviceCount_u32));
for (int i = 0; i < deviceCount_u32; i++) {
hipDeviceProp_t prop;
CUDA_CHECK_RETURN(hipGetDeviceProperties(&prop, i));
if(prop.integrated)
{
printf("\nDevice %d with shared physical memory selected", i);
printf("\nMax Block Size: %d", prop.maxThreadsPerBlock);
printf("\nRegs per SM: %d", prop.regsPerMultiprocessor);
printf("\nShared memory per SM: %lu", prop.sharedMemPerBlock);
gpuI_deviceID_u8 = i;
break;
}
}
CUDA_CHECK_RETURN(hipSetDevice(gpuI_deviceID_u8));
/* Initialize device configurations */
CUDA_CHECK_RETURN(hipSetDeviceFlags(hipDeviceMapHost));
CUDA_CHECK_RETURN(hipDeviceSynchronize());
/* Initialize command queues */
CUDA_CHECK_RETURN( hipStreamCreate(&memory_command_queue_s) );
CUDA_CHECK_RETURN( hipStreamCreate(&persistent_kernel_command_queue_s) );
/* Device only variables */
/* Sobel1 ***********************************************************************************************************/
/* Initialize synchronization flags*/
CUDA_CHECK_RETURN( hipMalloc( (void **)&sync_SOB1_flags_in_u32_g, C_SOB1_NUMBER_OF_BLOCKS * sizeof(uint32)) );
CUDA_CHECK_RETURN( hipMalloc( (void **)&sync_SOB1_flags_out_u32_g, C_SOB1_NUMBER_OF_BLOCKS * sizeof(uint32)) );
/* Initialize preemption managment variables*/
CUDA_CHECK_RETURN( hipHostMalloc( (void **)&preempt_SOB1_flag_host, sizeof(sint32)) );
CUDA_CHECK_RETURN( hipHostGetDevicePointer( (void **)&preempt_SOB1_flag_g, (void *)preempt_SOB1_flag_host, 0) );
CUDA_CHECK_RETURN( hipMalloc( (void **)&preempt_SOB1_flag_internal_g, sizeof(sint32)) );
CUDA_CHECK_RETURN( hipHostMalloc( (void **)&preempt_SOB1_sm_host, sizeof(sint32)) );
CUDA_CHECK_RETURN( hipHostGetDevicePointer( (void **)&preempt_SOB1_sm_g, (void *)preempt_SOB1_sm_host, 0) );
/* Initialize preemption buffer*/
CUDA_CHECK_RETURN( hipMalloc( (void **)&sob1_buffer_loop_counter_u32_g, C_SOB1_GLOBAL_WORK_SIZE * sizeof(uint32)) );
/* Sobel2 ***********************************************************************************************************/
/* Initialize synchronization flags*/
CUDA_CHECK_RETURN( hipMalloc( (void **)&sync_SOB2_flags_in_u32_g, C_SOB2_NUMBER_OF_BLOCKS * sizeof(uint32)) );
CUDA_CHECK_RETURN( hipMalloc( (void **)&sync_SOB2_flags_out_u32_g, C_SOB2_NUMBER_OF_BLOCKS * sizeof(uint32)) );
/* Initialize preemption managment variables*/
CUDA_CHECK_RETURN( hipHostMalloc( (void **)&preempt_SOB2_flag_host, sizeof(sint32)) );
CUDA_CHECK_RETURN( hipHostGetDevicePointer( (void **)&preempt_SOB2_flag_g, (void *)preempt_SOB2_flag_host, 0) );
CUDA_CHECK_RETURN( hipMalloc( (void **)&preempt_SOB2_flag_internal_g, sizeof(sint32)) );
CUDA_CHECK_RETURN( hipHostMalloc( (void **)&preempt_SOB2_sm_host, sizeof(sint32)) );
CUDA_CHECK_RETURN( hipHostGetDevicePointer( (void **)&preempt_SOB2_sm_g, (void *)preempt_SOB2_sm_host, 0) );
/* Initialize preemption buffer*/
CUDA_CHECK_RETURN( hipMalloc( (void **)&sob2_buffer_loop_counter_u32_g, C_SOB2_GLOBAL_WORK_SIZE * sizeof(uint32)) );
/* MatrMul *********************************************************************************************************/
/* Initialize synchronization flags*/
CUDA_CHECK_RETURN( hipMalloc( (void **)&sync_MM_flags_in_u32_g, C_MM_NUMBER_OF_BLOCKS * sizeof(uint32)) );
CUDA_CHECK_RETURN( hipMalloc( (void **)&sync_MM_flags_out_u32_g, C_MM_NUMBER_OF_BLOCKS * sizeof(uint32)) );
/* Initialize preemption managment variables*/
CUDA_CHECK_RETURN( hipHostMalloc( (void **)&preempt_MM_flag_host, sizeof(sint32)) );
CUDA_CHECK_RETURN( hipHostGetDevicePointer( (void **)&preempt_MM_flag_g, (void *)preempt_MM_flag_host, 0) );
CUDA_CHECK_RETURN( hipHostMalloc( (void **)&preempt_MM_sm_host, C_MM_NUMBER_OF_BLOCKS * sizeof(sint32)) );
CUDA_CHECK_RETURN( hipHostGetDevicePointer( (void **)&preempt_MM_sm_g, (void *)preempt_MM_sm_host, 0) );
/* Initialize preemption buffer*/
CUDA_CHECK_RETURN( hipMalloc( (void **)&mm_buffer_blockY_g, C_MM_NUMBER_OF_BLOCKS * sizeof(uint32)) );
CUDA_CHECK_RETURN( hipMalloc( (void **)&mm_buffer_blockX_g, C_MM_NUMBER_OF_BLOCKS * sizeof(uint32)) );
CUDA_CHECK_RETURN( hipMalloc( (void **)&mm_buffer_M_g, C_MM_NUMBER_OF_BLOCKS * sizeof(uint32)) );
/* Initialize persistent kernel management variables */
CUDA_CHECK_RETURN( hipHostMalloc( (void **) &perKer_isRunning_u32_host, sizeof(uint32)) );
CUDA_CHECK_RETURN( hipHostGetDevicePointer( (void **)&perKer_isRunning_u32_g, (void *)perKer_isRunning_u32_host, 0) );
CUDA_CHECK_RETURN( hipHostMalloc( (void **) &perKer_eventQueueCntDevice_u32_host, sizeof(uint32)) );
CUDA_CHECK_RETURN( hipHostGetDevicePointer( (void **)&perKer_eventQueueCntDevice_u32_g, (void *)perKer_eventQueueCntDevice_u32_host, 0) );
CUDA_CHECK_RETURN( hipHostMalloc( (void **) &perKer_eventQueueCntHost_u32_host, sizeof(uint32)) );
CUDA_CHECK_RETURN( hipHostGetDevicePointer( (void **)&perKer_eventQueueCntHost_u32_g, (void *)perKer_eventQueueCntHost_u32_host, 0) );
CUDA_CHECK_RETURN( hipHostMalloc( (void **) &perKer_eventQueue_s32_host, C_PERSISTENT_KERNEL_EVENT_QUEUE_LENGTH * sizeof(sint32)) );
CUDA_CHECK_RETURN( hipHostGetDevicePointer( (void **)&perKer_eventQueue_s32_g, (void *)perKer_eventQueue_s32_host, 0) );
CUDA_CHECK_RETURN( hipHostMalloc( (void **) &perKer_kernelTasksRunningStates_u32_host, E_KTID_NUMBER_OF_KERNEL_TASKS * sizeof(uint32)) );
CUDA_CHECK_RETURN( hipHostGetDevicePointer( (void **)&perKer_kernelTasksRunningStates_u32_g, (void *)perKer_kernelTasksRunningStates_u32_host, 0) );
/* Initialize global device application variables */
for(int i = 0; i < E_GM_TOTAL_NR_OF_GLOB_MEM_VARIABLES; i++ )
{
#ifdef S_USE_ZERO_COPY_FOR_GLOBAL_APPLICATION_MEMORY
CUDA_CHECK_RETURN( hipHostMalloc( (void **)global_memory_list_a[i].host_ptr, global_memory_list_a[i].mem_size) );
CUDA_CHECK_RETURN( hipHostGetDevicePointer( (void **)global_memory_list_a[i].mem_ptr, (void *) *global_memory_list_a[i].host_ptr, 0) );
#else
CUDA_CHECK_RETURN( hipMalloc((void **)global_memory_list_a[i].mem_ptr, global_memory_list_a[i].mem_size) );
#endif
}
//Initialize status variables
*perKer_isRunning_u32_host = 0;
*perKer_eventQueueCntDevice_u32_host = 0;
*perKer_eventQueueCntHost_u32_host = 0;
for(int i = 0; i < E_KTID_NUMBER_OF_KERNEL_TASKS; i++)
{
perKer_kernelTasksRunningStates_u32_host[i] = C_KERNEL_INIT;
if(device_preemption_flags_a[i] != NULL)
{
**device_preemption_flags_a[i] = C_FALSE;
}
if(device_kernel_task_SM_a[i] != NULL)
{
**device_preemption_flags_a[i] = C_FALSE;
}
}
return retval;
}
//TODO:Wird der persistent Kernel gestartet, so sollte ein Flag gesetzt werden, was das Schreiben von COnstanten variablen ablehnt
/*************************************************************************************************
Function: gpuI_start()
Description: Start execution of persistent GPUart kernel.
*/
GPUart_Retval gpuI_start(void)
{
GPUart_Retval retval = GPUART_SUCCESS;
*perKer_isRunning_u32_host = C_TRUE; //After setting this flag constant memory writes are disabled
CUDA_CHECK_RETURN(hipDeviceSynchronize());
hipLaunchKernelGGL(( GPUart_Persistent_Kernel) , dim3(1), dim3(1), 0, persistent_kernel_command_queue_s,
perKer_isRunning_u32_g,
perKer_eventQueueCntDevice_u32_g,
perKer_eventQueueCntHost_u32_g,
perKer_eventQueue_s32_g,
perKer_kernelTasksRunningStates_u32_g,
//Sobel1 variables
sob1_matrix_in_s32_g,
sob1_matrix_out_s32_g,
//Sobel2 variables
sob2_matrix_in_s32_g,
sob2_matrix_out_s32_g,
//MM variables
mm_matrix_A_f32_g,
mm_matrix_B_f32_g,
mm_matrix_C_f32_g,
//Synchronization variables
sync_SOB1_flags_in_u32_g,
sync_SOB1_flags_out_u32_g,
sync_SOB2_flags_in_u32_g,
sync_SOB2_flags_out_u32_g,
sync_MM_flags_in_u32_g,
sync_MM_flags_out_u32_g,
//Preemption variables
preempt_SOB1_flag_g,
preempt_SOB1_flag_internal_g,
preempt_SOB1_sm_g,
preempt_SOB2_flag_g,
preempt_SOB2_flag_internal_g,
preempt_SOB2_sm_g,
preempt_MM_flag_g,
preempt_MM_sm_g,
//Buffer variables
//SOB1
sob1_buffer_loop_counter_u32_g,
//SOB2
sob2_buffer_loop_counter_u32_g,
//MM
mm_buffer_blockY_g,
mm_buffer_blockX_g,
mm_buffer_M_g
);
printf(".. started");
fflush(stdout);
return retval;
}
/*************************************************************************************************
Function: gpuI_stop()
Description: Stop execution of persisten GPUart kernel.
*/
GPUart_Retval gpuI_stop(void)
{
GPUart_Retval retval = GPUART_SUCCESS;
uint32 eventQueueCntHost_u32_l;
printf("\nSTOP PERSISTENT KERNEL");
//Calculate next position in persistent kernel event queue
eventQueueCntHost_u32_l = (*perKer_eventQueueCntHost_u32_host + 1) % C_PERSISTENT_KERNEL_EVENT_QUEUE_LENGTH;
//Set termination event
perKer_eventQueue_s32_host[eventQueueCntHost_u32_l] = C_PERSISTENT_KERNEL_TERMINATE;
//Make new event visible
*perKer_eventQueueCntHost_u32_host = eventQueueCntHost_u32_l;
return retval;
}
/*************************************************************************************************
Function: gpuI_destroy()
Description: Terminates GPUart.
Free dedicated or shared device memory. Destroy command_queues.
*/
GPUart_Retval gpuI_destroy(void)
{
GPUart_Retval retval = GPUART_SUCCESS;
CUDA_CHECK_RETURN(hipDeviceSynchronize());
/* Free global device variables */
for(int i = 0; i < (int)E_GM_TOTAL_NR_OF_GLOB_MEM_VARIABLES; i++ )
{
#ifdef S_USE_ZERO_COPY_FOR_GLOBAL_APPLICATION_MEMORY
CUDA_CHECK_RETURN( hipHostFree(*global_memory_list_a[i].host_ptr) );
#else
CUDA_CHECK_RETURN( hipFree(*global_memory_list_a[i].mem_ptr) );
#endif
}
/* Destroy device only variables */
/* Destroy persistent kernel variables */
CUDA_CHECK_RETURN(hipHostFree((void *)perKer_isRunning_u32_host));
CUDA_CHECK_RETURN(hipHostFree((void *)perKer_eventQueueCntDevice_u32_host));
CUDA_CHECK_RETURN(hipHostFree((void *)perKer_eventQueueCntHost_u32_host));
CUDA_CHECK_RETURN(hipHostFree((void *)perKer_eventQueue_s32_host));
CUDA_CHECK_RETURN(hipHostFree((void *)perKer_kernelTasksRunningStates_u32_host));
/* Destroy command queues */
CUDA_CHECK_RETURN( hipStreamDestroy(memory_command_queue_s) );
CUDA_CHECK_RETURN( hipStreamDestroy(persistent_kernel_command_queue_s) );
CUDA_CHECK_RETURN( hipDeviceReset());
return retval;
}
| GPUart_Impl.cu | //Copyright (c) 2017-2018 Christoph A. Hartmann, Ulrich Margull and Technische Hochschule Ingolstadt (THI)
//
//Permission is hereby granted, free of charge, to any person obtaining a copy of this
//software and associated documentation files (the "Software"), to deal in the Software
//without restriction, including without limitation the rights to use, copy, modify,
//merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
//permit persons to whom the Software is furnished to do so, subject to the following
//conditions:
//
//The above copyright notice and this permission notice shall be included in all copies
//or substantial portions of the Software.
//
//THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
//INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
//PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
//HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
//OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
//SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
/*
* File: GPUart_Impl.cu
* Created by: Christoph Hartmann
* Institute: Technische Hochschule Ingolstadt
* Date: 07.04.2017 */
/********************************************************
* ___ ___ ___ ___ 3 ___ *
* | | | | | |\ /| | | | | | *
* |___ | | |___| | \/ | | | |___ | | *
* | | | |\ | | | | | | | *
* | |___| | \ | | |___| ___| | |___ *
* *
*********************************************************/
/*! @file GPUart_Impl.cu
*
* @brief Implementation of the management component of the GPUart Implemenation layer.
*
* This file concentrates all GPGPU related memory declarations and allocations, memory transfers
* operations, kernel launches, kernel initialisations, and GPU related implementation details.
*
*
* @author Christoph Hartmann
* @date Created on: 7 Apr 2017
*/
/************************************************************************************************/
/* Includes */
/************************************************************************************************/
//include header of Implementation layer
#include "GPUart_Impl.cuh"
#include "GPUart_Impl.h"
//include interfaces to other GPUart layer
#include "GPUart_Impl_Abstr_IF.h"
#include "GPUart_Impl_Sched_IF.h"
//include kernel libraries
#include "GPUart_Sobel.cuh"
#include "GPUart_MatrMul.cuh"
/************************************************************************************************/
/* Compiler Switches */
/************************************************************************************************/
/*! @brief Use zero copy memory (requires integrated GPU)
*
* This MUST be defined so far, since memory transfers over PCIe are currently not implemented completely.
*
* @see http://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#zero-copy-memory
* @see https://software.intel.com/en-us/articles/getting-the-most-from-opencl-12-how-to-increase-performance-by-minimizing-buffer-copies-on-intel-processor-graphics
*/
#define S_USE_ZERO_COPY_FOR_GLOBAL_APPLICATION_MEMORY
/************************************************************************************************/
/* Constants */
/************************************************************************************************/
/*!
* @brief The length of the Event Queue, shared between GPU and CPU, used for kernel launch events.
*
* @see perKer_eventQueueCntHost_u32_host
* @see perKer_eventQueueCntDevice_u32_host
* @see perKer_eventQueue_s32_host
*/
#define C_PERSISTENT_KERNEL_EVENT_QUEUE_LENGTH (10) //Length of event queue
/*!
* @brief Event ID to indicate a termination request for the persistent kernel
*
* @see perKer_eventQueueCntHost_u32_host
* @see perKer_eventQueueCntHost_u32_g
*/
#define C_PERSISTENT_KERNEL_TERMINATE (-1) //Event ID to terminate persistent kernel
/************************************************************************************************/
/* Typedef */
/************************************************************************************************/
/*!
* @brief Typedef for command queues (streams) to abstract GPGPU-API
*
* Command queues are required to improve the concurrency of memory and kernel operatation on the GPU.
*
* @see https://developer.download.nvidia.com/CUDA/training/StreamsAndConcurrencyWebinar.pdf
* @see https://www.khronos.org/registry/OpenCL/sdk/1.0/docs/man/xhtml/clCreateCommandQueue.html
*/
typedef cudaStream_t command_queue_s;
/*!
* @brief Typedef for a struct which combines global memory pointers, their related host pointers,
* and the size of the memory buffer.
*
*/
typedef struct
{
void ** mem_ptr;
void ** host_ptr;
size_t mem_size;
}device_global_memory_s;
/*!
* @brief Typedef for a struct which combines constant memory pointers and the size of the related memory buffer.
*
*/
typedef struct
{
void ** mem_ptr;
size_t mem_size;
}device_constant_memory_s;
/************************************************************************************************/
/* General Variables */
/************************************************************************************************/
/*!
* @brief The command queue (stream) for memory operations
*/
static command_queue_s memory_command_queue_s;
/*!
* @brief The command queue (stream) for the persistent kernel
*/
static command_queue_s persistent_kernel_command_queue_s;
/*! @var perKer_isRunning_u32_host
* @brief A status flag, which represents the running status of the persistent kernel (host pointer).
* @see perKer_isRunning_u32_g
*/
/*! @var perKer_isRunning_u32_g
* @brief A status flag, which represents the running status of the persistent kernel (device pointer).
* @see perKer_isRunning_u32_host
*/
volatile uint32 *perKer_isRunning_u32_host;
uint32 *perKer_isRunning_u32_g;
/*! @var perKer_eventQueueCntHost_u32_host
* @brief The index of the tail of the event queue for kernel launches written by the host (host pointer).
* @see perKer_eventQueueCntDevice_u32_g
*/
/*! @var perKer_eventQueueCntHost_u32_g
* @brief The index of the tail of the event queue for kernel launches written by the host (device pointer).
* @see perKer_eventQueueCntHost_u32_host
*/
volatile uint32 *perKer_eventQueueCntHost_u32_host;
uint32 *perKer_eventQueueCntHost_u32_g;
/*! @var perKer_eventQueueCntDevice_u32_host
* @brief The index of the head of the event queue for kernel launches written by the device (host pointer).
* @see perKer_eventQueueCntDevice_u32_g
*/
/*! @var perKer_eventQueueCntDevice_u32_g
* @brief The index of the head of the event queue for kernel launches written by the device (device pointer).
* @see perKer_eventQueueCntDevice_u32_host
*/
volatile uint32 *perKer_eventQueueCntDevice_u32_host;
uint32 *perKer_eventQueueCntDevice_u32_g;
/*! @var perKer_eventQueue_s32_host
* @brief The event queue for kernel launch requests, written by the CPU and red by the GPU (host pointer).
*
* To request a kernel launch, write the kernel's ID (#kernel_task_id_e) into the tail of the queue.
* Write #C_PERSISTENT_KERNEL_TERMINATE to terminate the persistent kernel #GPUart_Persistent_Kernel.
* @see perKer_eventQueue_s32_g
*/
/*! @var perKer_eventQueue_s32_g
* @brief The event queue for kernel launch requests, written by the CPU and red by the GPU (device pointer).
*
* To request a kernel launch, write the kernel's ID (#kernel_task_id_e) into the tail of the queue.
* Write #C_PERSISTENT_KERNEL_TERMINATE to terminate the persistent kernel #GPUart_Persistent_Kernel.
* @see perKer_eventQueue_s32_host
*/
volatile sint32 *perKer_eventQueue_s32_host;
sint32 *perKer_eventQueue_s32_g;
/*! @var perKer_kernelTasksRunningStates_u32_host
* @brief A status flag, which represents the running status of each kernel (host pointer).
* @see perKer_kernelTasksRunningStates_u32_g
*/
/*! @var perKer_kernelTasksRunningStates_u32_g
* @brief A status flag, which represents the running status of each kernel (device pointer).
* @see perKer_kernelTasksRunningStates_u32_host
*/
volatile uint32 *perKer_kernelTasksRunningStates_u32_host;
uint32 *perKer_kernelTasksRunningStates_u32_g;
/*!
* @brief The allowed job cost per kernel
*
* This value is equal to m * µ, whereby m is the number of Streaming Multiprocessors of the GPU
* #gpuS_nrOfMultiprocessor_u32 and µ is the resource factor #C_GPUS_RESOURCE_FACTOR.
*
* @see kernel_task_id_e
* @see C_GPUS_RESOURCE_FACTOR
* @see gpuS_nrOfMultiprocessor_u32
* @see kernel_job_costs
*/
uint32 max_costs_per_kernel = 0;
/************************************************************************************************/
/* Kernel Task Variables - E_KTID_SOBEL1 */
/************************************************************************************************/
sint32 * sob1_matrix_in_s32_g, * sob1_matrix_in_s32_host;
sint32 * sob1_matrix_out_s32_g, * sob1_matrix_out_s32_host;
/* Synchronization variables */
uint32 * sync_SOB1_flags_in_u32_g;
uint32 * sync_SOB1_flags_out_u32_g;
/* Preemption related variables*/
sint32 * preempt_SOB1_flag_g;
volatile sint32 *preempt_SOB1_flag_host;
sint32 * preempt_SOB1_flag_internal_g;
sint32 * preempt_SOB1_sm_g;
volatile sint32 *preempt_SOB1_sm_host;
/* Buffer variables */
uint32 * sob1_buffer_loop_counter_u32_g;
/************************************************************************************************/
/* Kernel Task Variables - E_KTID_SOBEL2 */
/************************************************************************************************/
sint32 * sob2_matrix_in_s32_g, * sob2_matrix_in_s32_host;
sint32 * sob2_matrix_out_s32_g, * sob2_matrix_out_s32_host;
/* Synchronization variables */
uint32 * sync_SOB2_flags_in_u32_g;
uint32 * sync_SOB2_flags_out_u32_g;
/* Preemption related variables*/
sint32 * preempt_SOB2_flag_g;
volatile sint32 *preempt_SOB2_flag_host;
sint32 * preempt_SOB2_flag_internal_g;
sint32 * preempt_SOB2_sm_g;
volatile sint32 *preempt_SOB2_sm_host;
/* Buffer variables */
uint32 * sob2_buffer_loop_counter_u32_g;
/************************************************************************************************/
/* Kernel Task Variables - E_KTID_MM */
/************************************************************************************************/
float32 * mm_matrix_A_f32_g, * mm_matrix_A_f32_host;
float32 * mm_matrix_B_f32_g, * mm_matrix_B_f32_host;
float32 * mm_matrix_C_f32_g, * mm_matrix_C_f32_host;
/* Synchronization variables */
uint32 * sync_MM_flags_in_u32_g;
uint32 * sync_MM_flags_out_u32_g;
/* Preemption related variables*/
sint32 * preempt_MM_flag_g;
volatile sint32 *preempt_MM_flag_host;
sint32 * preempt_MM_sm_g;
volatile sint32 *preempt_MM_sm_host;
/* Buffer variables */
uint32 * mm_buffer_blockY_g;
uint32 * mm_buffer_blockX_g;
uint32 * mm_buffer_M_g;
/************************************************************************************************/
/* Constant Variable Table */
/************************************************************************************************/
/*!
* @brief The constant memory table
*
* All constant memory buffers which must be written during runtime must be defined here.
* The i'th element represents the i'th constant memory buffer, define by #device_constant_memory_id_e
* in GPUart_Config.h. Each element must defined in the following style: { (void **)& CONSTANT_BUFFER_NAME,
* SIZE_IN_BYTES }.
*
* @see device_constant_memory_id_e
*/
static device_constant_memory_s constant_memory_list_a[E_CM_TOTAL_NR_OF_CONST_MEM_VARIABLES] =
{
//{ (void **)& VARIABLE_NAME, SIZE IN BYTES }
};
/************************************************************************************************/
/* Global Variable Table */
/************************************************************************************************/
/*!
* @brief The global memory table
*
* All global memory buffers which must be written or red during runtime must be defined here.
* The i'th element represents the i'th global memory buffer, define by #device_global_memory_id_e
* in GPUart_Config.h. Each element must defined in the following style: { (void **)&
* GLOBAL_MEMORY_BUFFER_POINTER_DEVICE, GLOBAL_MEMORY_BUFFER_POINTER_HOST, SIZE_IN_BYTES }.
*
* @see device_global_memory_id_e
*/
static device_global_memory_s global_memory_list_a[E_GM_TOTAL_NR_OF_GLOB_MEM_VARIABLES] =
{
/* Sobel1 */
{ (void **)&sob1_matrix_in_s32_g, (void **)&sob1_matrix_in_s32_host, C_SOB1_MATRIX_SIZE * sizeof(sint32) }, //E_GM_ID_SOB1_MATRIX_IN
{ (void **)&sob1_matrix_out_s32_g, (void **)&sob1_matrix_out_s32_host, C_SOB1_MATRIX_SIZE * sizeof(sint32) }, //E_GM_ID_SOB1_MATRIX_OUT
/* Sobel2 */
{ (void **)&sob2_matrix_in_s32_g, (void **)&sob2_matrix_in_s32_host, C_SOB2_MATRIX_SIZE * sizeof(sint32) }, //E_GM_ID_SOB2_MATRIX_IN
{ (void **)&sob2_matrix_out_s32_g, (void **)&sob2_matrix_out_s32_host, C_SOB2_MATRIX_SIZE * sizeof(sint32) }, //E_GM_ID_SOB2_MATRIX_OUT
/* MatrMul */
{ (void **)&mm_matrix_A_f32_g, (void **)&mm_matrix_A_f32_host, C_MM_MATRIX_TOTAL_SIZE * sizeof(sint32) }, //E_GM_ID_MM_MATRIX_A
{ (void **)&mm_matrix_B_f32_g, (void **)&mm_matrix_B_f32_host, C_MM_MATRIX_TOTAL_SIZE * sizeof(sint32) }, //E_GM_ID_MM_MATRIX_B
{ (void **)&mm_matrix_C_f32_g, (void **)&mm_matrix_C_f32_host, C_MM_MATRIX_TOTAL_SIZE * sizeof(sint32) } //E_GM_ID_MM_MATRIX_C
};
/************************************************************************************************/
/* Preemption Flag Table */
/************************************************************************************************/
/*!
* @brief The preemption flag table
*
* All preemption flags must be included by this table.
* The i'th element represents the i'th kernel, according to the enum #kernel_task_id_e
* in GPUart_Config.h. Each element must defined in the following style: (volatile sint32**)&
* NAME_OF_PREEMPTION_FLAG_POINTER. If a kernel does not implement a preemption flag, because it
* is non-preemptive, insert a NULL.
*
* @see kernel_task_id_e
*/
static volatile sint32** device_preemption_flags_a[E_KTID_NUMBER_OF_KERNEL_TASKS] =
{
(volatile sint32**) &preempt_SOB1_flag_host, //E_KTID_SOBEL1
(volatile sint32**) &preempt_SOB2_flag_host, //E_KTID_SOBEL2
(volatile sint32**) &preempt_MM_flag_host //E_KTID_MM
};
/************************************************************************************************/
/* Preemption Enabled Parameter Table */
/************************************************************************************************/
/*!
* @brief The preemption enabled table
*
* The i'th element represents the i'th kernel, according to the enum #kernel_task_id_e
* in GPUart_Config.h. Each element must defined in the following style: #C_TRUE if the related kernel
* is preemptive; #C_FALSE if the related kernel is non-preemptive.
*
* @see kernel_task_id_e
*/
const static sint32 preemption_enabled_a[E_KTID_NUMBER_OF_KERNEL_TASKS] =
{
C_TRUE, //E_KTID_SOBEL1
C_TRUE, //E_KTID_SOBEL2
C_TRUE //E_KTID_MM
};
/************************************************************************************************/
/* Kernel State Machine Table */
/************************************************************************************************/
/*!
* @brief The kernel state machine table
*
* The i'th element represents the i'th kernel, according to the enum #kernel_task_id_e
* in GPUart_Config.h. Each element must defined in the following style: &NAME_OF_STATE_MACHINE_POINTER.
* Use NULL if the related kernel is non-preemptive.
*
* @see kernel_task_id_e
*/
static volatile sint32** device_kernel_task_SM_a[E_KTID_NUMBER_OF_KERNEL_TASKS] =
{
&preempt_SOB1_sm_host, //E_KTID_SOBEL1
&preempt_SOB2_sm_host, //E_KTID_SOBEL2
&preempt_MM_sm_host //E_KTID_MM
};
/*!
* @brief The number of state machines table
*
* The i'th element represents the i'th kernel, according to the enum kernel_task_id_e
* in GPUart_Config.h. Each element must defined in the following style: NUMBER_OF_SM_IN_KERNEL.
* If a kernel preempts grid-synchronous then use the value 1u. If a kernel preempts thread-block
* synchronous then use the number of thread blocks of this kernel. If a kernel is non-preemptive
* then use 0u.
*
* @see kernel_task_id_e
*/
static uint32 nb_of_StateMachines_in_kernel_a[E_KTID_NUMBER_OF_KERNEL_TASKS] =
{
1u, //E_KTID_SOBEL1 -> Grid-wide preemption
1u, //E_KTID_SOBEL2 -> Grid-wide preemption
C_MM_NUMBER_OF_BLOCKS //E_KTID_MM -> Thread block-wide preemption
};
/************************************************************************************************/
/* Kernel Cost Table */
/************************************************************************************************/
/*!
* @brief The job cost table
*
* The i'th element represents the i'th kernel, according to the enum kernel_task_id_e
* in GPUart_Config.h. Each element represents the job costs of the related kernel.
* If a thread block of a kernel requires more then 1/µ of the available registers, shared memory,
* thread residency slots, or thread block residency slots of an Streaming Multiprocessor,
* then set corresponding value to m * µ, whereby µ is the resource factor and m is the GPU's
* number of Streaming Multiprocessors. If a thread block of a kernel requires less then 1/µ of each
* resource type, then set the corresponding value to the kernels number of thread blocks.
*
* @see kernel_task_id_e
* @see C_GPUS_RESOURCE_FACTOR
* @see gpuS_nrOfMultiprocessor_u32
* @see max_costs_per_kernel
*/
static uint8 kernel_job_costs[E_KTID_NUMBER_OF_KERNEL_TASKS] =
{
C_SOB1_NUMBER_OF_BLOCKS, //E_KTID_SOBEL1
C_SOB2_NUMBER_OF_BLOCKS, //E_KTID_SOBEL2
C_MM_NUMBER_OF_BLOCKS //E_KTID_MM
};
/*!
* @brief The device ID of the used GPU
*
* @see http://docs.nvidia.com/cuda/cuda-c-programming-guide
* @see https://www.khronos.org/registry/OpenCL/sdk/1.0/docs/man/xhtml/clGetDeviceIDs.html
*/
static uint8 gpuI_deviceID_u8 = 0;
/************************************************************************************************/
/* Persistent Kernel */
/************************************************************************************************/
/*!
* @brief The persistent kernel (GPU Daemon) which is used to reduce kernel launch latencies.
*
* The kernel arguments must include all global memory buffers of all kernels in this system, since
* this kernel is used to launch GPGPU kernel on demand. The persistent kernel reduces kernel launch
* latencies by bypassing the GPGPU driver stack when launching kernels.
*
* @see Mrozek et al. GPU Daemon: Road to zero cost submission, in Proceedings of the 4th International
* Workshop on OpenCL, Vienna, Austria, 2016 -> https://dl.acm.org/citation.cfm?id=2909450
*/
__global__ void GPUart_Persistent_Kernel
(
//Persistent Kernel Management Data
uint32* __restrict__ perKer_isRunning_u32_g,
uint32* __restrict__ perKer_eventQueueCntDevice_u32_g,
volatile uint32 * __restrict__ perKer_eventQueueCntHost_u32_g,
volatile sint32 * __restrict__ perKer_eventQueue_s32_g,
volatile uint32* __restrict__ perKer_kernelTasksRunningStates_u32_g,
//SOBEL1 Variables
sint32 * __restrict__ sob1_matrix_in_s32_g,
sint32 * __restrict__ sob1_matrix_out_s32_g,
//SOBEL2 Variables
sint32 * __restrict__ sob2_matrix_in_s32_g,
sint32 * __restrict__ sob2_matrix_out_s32_g,
//MM Variables
float32 * __restrict__ mm_matrix_A_f32_g,
float32 * __restrict__ mm_matrix_B_f32_g,
float32 * __restrict__ mm_matrix_C_f32_g,
/* Synchronization variables */
//SOBEL1
uint32 * __restrict__ sync_SOB1_flags_in_u32_g,
uint32 * __restrict__ sync_SOB1_flags_out_u32_g,
//SOBEL2
uint32 * __restrict__ sync_SOB2_flags_in_u32_g,
uint32 * __restrict__ sync_SOB2_flags_out_u32_g,
//MM
uint32 * __restrict__ sync_MM_flags_in_u32_g,
uint32 * __restrict__ sync_MM_flags_out_u32_g,
/* Preemption variables */
//SOB1
sint32 * __restrict__ preempt_SOB1_flag_g,
sint32 * __restrict__ preempt_SOB1_flag_internal_g,
sint32 * __restrict__ preempt_SOB1_sm_g,
//SOB2
sint32 * __restrict__ preempt_SOB2_flag_g,
sint32 * __restrict__ preempt_SOB2_flag_internal_g,
sint32 * __restrict__ preempt_SOB2_sm_g,
//MM
sint32 * __restrict__ preempt_MM_flag_g,
sint32 * __restrict__ preempt_MM_sm_g,
/* Buffer variables */
//SOB1
uint32 * __restrict__ sob1_buffer_loop_counter_u32_g,
//SOB2
uint32 * __restrict__ sob2_buffer_loop_counter_u32_g,
//MM
uint32 * __restrict__ mm_buffer_blockY_g,
uint32 * __restrict__ mm_buffer_blockX_g,
uint32 * __restrict__ mm_buffer_M_g
)
{
cudaStream_t stream_kernel_SOB1;
cudaStream_t stream_kernel_SOB2;
cudaStream_t stream_kernel_MM;
cudaStreamCreateWithFlags(&stream_kernel_SOB1, cudaStreamNonBlocking);
cudaStreamCreateWithFlags(&stream_kernel_SOB2, cudaStreamNonBlocking);
cudaStreamCreateWithFlags(&stream_kernel_MM, cudaStreamNonBlocking);
while(C_TRUE)
{
//Check if host has issued a new event to queue
if(*perKer_eventQueueCntDevice_u32_g != *perKer_eventQueueCntHost_u32_g)
{
//Calculate position of next available event in queue
*perKer_eventQueueCntDevice_u32_g = (*perKer_eventQueueCntDevice_u32_g + 1)
% C_PERSISTENT_KERNEL_EVENT_QUEUE_LENGTH;
//Interpret new event
switch(perKer_eventQueue_s32_g[*perKer_eventQueueCntDevice_u32_g])
{
case C_PERSISTENT_KERNEL_TERMINATE: //Terminate persistent Kernel
*perKer_isRunning_u32_g = C_FALSE;
return;
case E_KTID_SOBEL1:
__syncthreads();
Sobel_Kernel<<<C_SOB1_NUMBER_OF_BLOCKS, C_SOB1_LOCAL_WORK_SIZE, 0, stream_kernel_SOB1>>>
(
sob1_matrix_in_s32_g,
sob1_matrix_out_s32_g,
C_SOB1_HEIGHT,
C_SOB1_WIDTH,
//Preemption status variables
preempt_SOB1_flag_g,
preempt_SOB1_flag_internal_g,
preempt_SOB1_sm_g,
//Buffer variables
sob1_buffer_loop_counter_u32_g,
//Synchronization variables
sync_SOB1_flags_in_u32_g,
sync_SOB1_flags_out_u32_g,
/* Running status flag */
&perKer_kernelTasksRunningStates_u32_g[E_KTID_SOBEL1]
);
__syncthreads();
break;
case E_KTID_SOBEL2:
__syncthreads();
Sobel_Kernel<<<C_SOB2_NUMBER_OF_BLOCKS, C_SOB2_LOCAL_WORK_SIZE, 0, stream_kernel_SOB2>>>
(
sob2_matrix_in_s32_g,
sob2_matrix_out_s32_g,
C_SOB2_HEIGHT,
C_SOB2_WIDTH,
//Preemption status variables
preempt_SOB2_flag_g,
preempt_SOB2_flag_internal_g,
preempt_SOB2_sm_g,
//Buffer variables
sob2_buffer_loop_counter_u32_g,
//Synchronization variables
sync_SOB2_flags_in_u32_g,
sync_SOB2_flags_out_u32_g,
/* Running status flag */
&perKer_kernelTasksRunningStates_u32_g[E_KTID_SOBEL2]
);
__syncthreads();
break;
case E_KTID_MM:
__syncthreads();
dim3 dimGridMM(C_MM_NUMBER_OF_BLOCKS_X, C_MM_NUMBER_OF_BLOCKS_Y);
dim3 dimBlockMM(C_MM_LOCAL_WORK_SIZE_X, C_MM_LOCAL_WORK_SIZE_Y);
MatrMul_Kernel<<<dimGridMM, dimBlockMM, 0, stream_kernel_MM>>>
(
//Functional Data
mm_matrix_A_f32_g,
mm_matrix_B_f32_g,
mm_matrix_C_f32_g,
//Preemption Buffer
mm_buffer_blockY_g,
mm_buffer_blockX_g,
mm_buffer_M_g,
//Preemption Managment
preempt_MM_flag_g,
preempt_MM_sm_g,
//Synchronization Flags
sync_MM_flags_in_u32_g,
sync_MM_flags_out_u32_g,
//Running status flag
&perKer_kernelTasksRunningStates_u32_g[E_KTID_MM]
);
__syncthreads();
break;
}
__threadfence_system();
}
}
}
/************************************************************************************************/
/* General function definition */
/************************************************************************************************/
/*! @brief Copy data from host memory to device memory.
*
* Device memory may be shared physical memory or discrete device memory. The device driver
* API call may depend on the type of device memory (global or texture memory).
*
* @param[in] void * variable_p -> The host variable to be copied
* @param[in] device_global_memory_id_e id_p -> The ID of the global memory variable
*
* @return GPUART_SUCCESS if memory copy operation has been successfully.
* @return GPUART_ERROR_INVALID_ARGUMENT if id_p is an invalid ID.
*/
GPUart_Retval gpuI_memcpyHost2Device(void * variable_p, device_global_memory_id_e id_p)
{
GPUart_Retval retval = GPUART_SUCCESS;
device_global_memory_s device_memory;
if((id_p >= E_GM_TOTAL_NR_OF_GLOB_MEM_VARIABLES)||(variable_p == NULL))
{
retval = GPUART_ERROR_INVALID_ARGUMENT;
}
else
{
device_memory = global_memory_list_a[id_p];
#ifdef S_USE_ZERO_COPY_FOR_GLOBAL_APPLICATION_MEMORY
memcpy(*device_memory.host_ptr, variable_p, device_memory.mem_size);
#else
CUDA_CHECK_RETURN(cudaMemcpyAsync(*device_memory.mem_ptr, variable_p, device_memory.mem_size,
cudaMemcpyHostToDevice, memory_command_queue_s));
#endif
}
return retval;
}
/*! @brief Copy data from device memory to host memory.
*
* Device memory may be shared physical memory or discrete device memory. The device driver
* API call may depend on the type of device memory (global or texture memory).
*
* @param[out] void * variable_p -> The host variable to be written
* @param[in] device_global_memory_id_e id_p -> The ID of the global memory variable
*
* @return GPUART_SUCCESS if memory copy operation has been successfully.
* @return GPUART_ERROR_INVALID_ARGUMENT if id_p is an invalid ID.
*/
GPUart_Retval gpuI_memcpyDevice2Host(void * variable_p, device_global_memory_id_e id_p)
{
GPUart_Retval retval = GPUART_SUCCESS;
device_global_memory_s device_memory;
if((id_p >= E_GM_TOTAL_NR_OF_GLOB_MEM_VARIABLES)||(variable_p == NULL))
{
retval = GPUART_ERROR_INVALID_ARGUMENT;
}
else
{
device_memory = global_memory_list_a[id_p];
#ifdef S_USE_ZERO_COPY_FOR_GLOBAL_APPLICATION_MEMORY
memcpy(variable_p, *device_memory.host_ptr, device_memory.mem_size);
#else
CUDA_CHECK_RETURN(cudaMemcpyAsync(variable_p, *device_memory.mem_ptr, device_memory.mem_size,
cudaMemcpyDeviceToHost, memory_command_queue_s));
#endif
}
return retval;
}
/*************************************************************************************************
Function: gpuI_memcpyConstantMemory
Description: Copies data from host memory to constant device memory. The copy is only possible
if persistent GPUart kernel is not running, since a constant memory variable is
immutable during kernel execution and its value is inherited from parent to child
kernel.
*/
/*! @brief Copy data from host memory to constant device memory.
*
* The copy is only possible if persistent GPUart kernel #GPUart_Persistent_Kernel
* is not running, since a constant memory data is immutable during kernel execution
* and its value is inherited from parent to child kernel.
*
* @param[in] void * variable_p -> The host variable to be copied
* @param[in] device_constant_memory_id_e id_p -> The ID of the constant memory buffer
*
* @return GPUART_SUCCESS if memory copy operation has been successfully.
* @return GPUART_ERROR_INVALID_ARGUMENT if id_p is an invalid ID.
*/
GPUart_Retval gpuI_memcpyConstantMemory(void * variable_p, device_constant_memory_id_e id_p)
{
GPUart_Retval retval = GPUART_SUCCESS;
device_constant_memory_s device_memory;
if((id_p >= E_CM_TOTAL_NR_OF_CONST_MEM_VARIABLES) || (variable_p == NULL))
{
retval = GPUART_ERROR_INVALID_ARGUMENT;
}
else
{
if(*perKer_isRunning_u32_host == C_TRUE)
{
retval = GPUART_ERROR_PESISTENT_KERNEL_IS_RUNNING;
}
else
{
device_memory = constant_memory_list_a[id_p];
CUDA_CHECK_RETURN(cudaMemcpyToSymbolAsync(*device_memory.mem_ptr, variable_p, device_memory.mem_size, 0,
cudaMemcpyHostToDevice, memory_command_queue_s));
CUDA_CHECK_RETURN(cudaStreamSynchronize(memory_command_queue_s));
}
}
return retval;
}
/*!
* @brief Request the launch of a GPGPU kernel.
*
* @param kernel_task_id_e task_id_e -> The ID of the kernel to be launched.
*
* @return GPUART_SUCCESS if kernel launch has been successfully.
* @return GPUART_ERROR_NOT_READY if launch request is already active.
*/
GPUart_Retval gpuI_runJob(kernel_task_id_e task_id_e)
{
GPUart_Retval retval = GPUART_SUCCESS;
uint32 eventQueueCntHost_u32_l;
uint32 kernelStatus = ((volatile uint32 *)perKer_kernelTasksRunningStates_u32_host)[task_id_e];
if((kernelStatus == C_KERNEL_SUSPENDED)||
(kernelStatus == C_KERNEL_READY)||
(kernelStatus == C_KERNEL_INIT))
{
perKer_kernelTasksRunningStates_u32_host[task_id_e] = C_KERNEL_ACTIVE;
// //Reset Preemption flag
if(device_preemption_flags_a[task_id_e] != NULL)
{
// printf("-> Setze PreemptionFlag zurueck fue Kernel %d", task_id_e);
**device_preemption_flags_a[task_id_e] = C_FALSE;
}
//Reset state machine
if((kernelStatus == C_KERNEL_READY)||(kernelStatus == C_KERNEL_INIT))
{
//Do not reset Kernel SM if kernel has been preempted
if(device_kernel_task_SM_a[task_id_e] != NULL)
{
//**device_kernel_task_SM_a[task_id_e] = 0; --> Old. Now, all SMs of an Kernel are set to zero
memset((void *)*device_kernel_task_SM_a[task_id_e], 0, nb_of_StateMachines_in_kernel_a[task_id_e] * sizeof(sint32));
}
}
//Calculate next position in persistent kernel event queue
eventQueueCntHost_u32_l = (perKer_eventQueueCntHost_u32_host[0] + 1)
% C_PERSISTENT_KERNEL_EVENT_QUEUE_LENGTH;
//Set kernel call event
perKer_eventQueue_s32_host[eventQueueCntHost_u32_l] = task_id_e;
//Make new event visible
*perKer_eventQueueCntHost_u32_host = eventQueueCntHost_u32_l;
if((eventQueueCntHost_u32_l == UINT32_MAX )||(eventQueueCntHost_u32_l > C_PERSISTENT_KERNEL_EVENT_QUEUE_LENGTH))
{
printf("\nFEHLER: Host Counter falsch");
}
}
else
{
retval = GPUART_ERROR_NOT_READY;
}
return retval;
}
/*************************************************************************************************
Function: gpuI_preemptJob
Description: Issue preemption of a specific kernel task
*/
GPUart_Retval gpuI_preemptJob(kernel_task_id_e task_id_p)
{
GPUart_Retval retval = GPUART_SUCCESS;
//Check if kernel task is preemptive
if(preemption_enabled_a[task_id_p] == C_TRUE)
{
//Set preemption flag
**device_preemption_flags_a[task_id_p] = C_TRUE;
}
else
{
//Kernel task is not preemptive -> no operation
retval = GPUART_ERROR_NO_OPERTATION;
}
return retval;
}
/*************************************************************************************************
Function: gpuI_queryKernelIsRunning
Description: Query kernel running status.
Returns GPUART_SUCCESS if kernel task is not running.
Returns GPUART_ERROR_NOT_READY if kernel task is still running.
*/
uint32 gpuI_queryKernelIsRunning(kernel_task_id_e task_id_e)
{
uint32 retval = C_TRUE;
//Query stream whether there is a running operation
if((perKer_kernelTasksRunningStates_u32_host[task_id_e] == C_KERNEL_TERMINATED_SUCESSFUL)||
(perKer_kernelTasksRunningStates_u32_host[task_id_e] == C_KERNEL_SUSPENDED)||
(perKer_kernelTasksRunningStates_u32_host[task_id_e] == C_KERNEL_INIT))
{
//Kernel task is not running -> success
retval = C_FALSE;
}
else
{
//Kernel is still running
retval = C_TRUE;
}
return retval;
}
/*************************************************************************************************
Function: gpuI_queryKernelTerminatedSuccessful
Description: Query kernel running status.
Returns GPUART_SUCCESS if kernel task is not running.
Returns GPUART_ERROR_NOT_READY if kernel task is still running.
*/
uint32 gpuI_queryKernelTerminatedSuccessful(kernel_task_id_e task_id_e)
{
uint32 retval = C_TRUE;
//Query stream whether there is a running operation
if(perKer_kernelTasksRunningStates_u32_host[task_id_e] == C_KERNEL_TERMINATED_SUCESSFUL)
{
//Kernel task is not running -> success
}
else
{
//Kernel is still running
retval = C_FALSE;
}
return retval;
}
/*************************************************************************************************
Function: gpuI_queryKernelTerminatedSuccessful
Description: Query kernel running status.
Returns GPUART_SUCCESS if kernel task is not running.
Returns GPUART_ERROR_NOT_READY if kernel task is still running.
*/
uint32 gpuI_queryKernelPreempted(kernel_task_id_e task_id_e)
{
uint32 retval = C_TRUE;
//Query stream whether there is a running operation
if(perKer_kernelTasksRunningStates_u32_host[task_id_e] == C_KERNEL_SUSPENDED)
{
//Kernel task is not running -> success
}
else
{
//Kernel is still running
retval = C_FALSE;
}
return retval;
}
/*************************************************************************************************
Function: gpuI_getJobCosts
Description: Returns the number of thread blocks, i.e. the number of Multiprocessors used for
this kernel.
*/
uint32 gpuI_getJobCosts(kernel_task_id_e task_id_e)
{
uint32 retval = kernel_job_costs[task_id_e];
if(retval > max_costs_per_kernel)
{
retval = max_costs_per_kernel;
}
return retval;
}
/*************************************************************************************************
Function: gpuI_getJobCosts
Description: Sets the internal status of the corresponding kernel to ready. This function is
called after a new job has been enqueued.
*/
GPUart_Retval gpuI_SetKernelStatusReady(kernel_task_id_e task_id_e)
{
GPUart_Retval retval = GPUART_SUCCESS;
perKer_kernelTasksRunningStates_u32_host[task_id_e] = C_KERNEL_READY;
return retval;
}
/*************************************************************************************************
Function: gpuI_getJobCosts
Description: Returns the number of thread blocks, i.e. the number of Multiprocessors used for
this kernel.
*/
GPUart_Retval gpuI_get_NrOfMultiprocessors(uint32* nrOfMultprocessors, uint32 resourceFactor)
{
GPUart_Retval retval = GPUART_SUCCESS;
cudaDeviceProp deviceProp_s;
CUDA_CHECK_RETURN(cudaGetDeviceProperties(&deviceProp_s, gpuI_deviceID_u8));
*nrOfMultprocessors = deviceProp_s.multiProcessorCount * resourceFactor;
max_costs_per_kernel = deviceProp_s.multiProcessorCount * resourceFactor;
printf("\nNumber of multiprocessors on the device: %d", *nrOfMultprocessors);
if(*nrOfMultprocessors == 0)
{
retval = GPUART_NO_SUCCESS;
}
return retval;
}
/*************************************************************************************************
Function: gpuI_init()
Description: Initializes GPGPU Runtime, thus it initializes command_queues, device variables
and host variables.
*/
GPUart_Retval gpuI_init(void)
{
GPUart_Retval retval = GPUART_SUCCESS;
int deviceCount_u32 = 0;
CUDA_CHECK_RETURN(cudaThreadExit());
CUDA_CHECK_RETURN(cudaGetDeviceCount(&deviceCount_u32));
for (int i = 0; i < deviceCount_u32; i++) {
cudaDeviceProp prop;
CUDA_CHECK_RETURN(cudaGetDeviceProperties(&prop, i));
if(prop.integrated)
{
printf("\nDevice %d with shared physical memory selected", i);
printf("\nMax Block Size: %d", prop.maxThreadsPerBlock);
printf("\nRegs per SM: %d", prop.regsPerMultiprocessor);
printf("\nShared memory per SM: %lu", prop.sharedMemPerBlock);
gpuI_deviceID_u8 = i;
break;
}
}
CUDA_CHECK_RETURN(cudaSetDevice(gpuI_deviceID_u8));
/* Initialize device configurations */
CUDA_CHECK_RETURN(cudaSetDeviceFlags(cudaDeviceMapHost));
CUDA_CHECK_RETURN(cudaDeviceSynchronize());
/* Initialize command queues */
CUDA_CHECK_RETURN( cudaStreamCreate(&memory_command_queue_s) );
CUDA_CHECK_RETURN( cudaStreamCreate(&persistent_kernel_command_queue_s) );
/* Device only variables */
/* Sobel1 ***********************************************************************************************************/
/* Initialize synchronization flags*/
CUDA_CHECK_RETURN( cudaMalloc( (void **)&sync_SOB1_flags_in_u32_g, C_SOB1_NUMBER_OF_BLOCKS * sizeof(uint32)) );
CUDA_CHECK_RETURN( cudaMalloc( (void **)&sync_SOB1_flags_out_u32_g, C_SOB1_NUMBER_OF_BLOCKS * sizeof(uint32)) );
/* Initialize preemption managment variables*/
CUDA_CHECK_RETURN( cudaMallocHost( (void **)&preempt_SOB1_flag_host, sizeof(sint32)) );
CUDA_CHECK_RETURN( cudaHostGetDevicePointer( (void **)&preempt_SOB1_flag_g, (void *)preempt_SOB1_flag_host, 0) );
CUDA_CHECK_RETURN( cudaMalloc( (void **)&preempt_SOB1_flag_internal_g, sizeof(sint32)) );
CUDA_CHECK_RETURN( cudaMallocHost( (void **)&preempt_SOB1_sm_host, sizeof(sint32)) );
CUDA_CHECK_RETURN( cudaHostGetDevicePointer( (void **)&preempt_SOB1_sm_g, (void *)preempt_SOB1_sm_host, 0) );
/* Initialize preemption buffer*/
CUDA_CHECK_RETURN( cudaMalloc( (void **)&sob1_buffer_loop_counter_u32_g, C_SOB1_GLOBAL_WORK_SIZE * sizeof(uint32)) );
/* Sobel2 ***********************************************************************************************************/
/* Initialize synchronization flags*/
CUDA_CHECK_RETURN( cudaMalloc( (void **)&sync_SOB2_flags_in_u32_g, C_SOB2_NUMBER_OF_BLOCKS * sizeof(uint32)) );
CUDA_CHECK_RETURN( cudaMalloc( (void **)&sync_SOB2_flags_out_u32_g, C_SOB2_NUMBER_OF_BLOCKS * sizeof(uint32)) );
/* Initialize preemption managment variables*/
CUDA_CHECK_RETURN( cudaMallocHost( (void **)&preempt_SOB2_flag_host, sizeof(sint32)) );
CUDA_CHECK_RETURN( cudaHostGetDevicePointer( (void **)&preempt_SOB2_flag_g, (void *)preempt_SOB2_flag_host, 0) );
CUDA_CHECK_RETURN( cudaMalloc( (void **)&preempt_SOB2_flag_internal_g, sizeof(sint32)) );
CUDA_CHECK_RETURN( cudaMallocHost( (void **)&preempt_SOB2_sm_host, sizeof(sint32)) );
CUDA_CHECK_RETURN( cudaHostGetDevicePointer( (void **)&preempt_SOB2_sm_g, (void *)preempt_SOB2_sm_host, 0) );
/* Initialize preemption buffer*/
CUDA_CHECK_RETURN( cudaMalloc( (void **)&sob2_buffer_loop_counter_u32_g, C_SOB2_GLOBAL_WORK_SIZE * sizeof(uint32)) );
/* MatrMul *********************************************************************************************************/
/* Initialize synchronization flags*/
CUDA_CHECK_RETURN( cudaMalloc( (void **)&sync_MM_flags_in_u32_g, C_MM_NUMBER_OF_BLOCKS * sizeof(uint32)) );
CUDA_CHECK_RETURN( cudaMalloc( (void **)&sync_MM_flags_out_u32_g, C_MM_NUMBER_OF_BLOCKS * sizeof(uint32)) );
/* Initialize preemption managment variables*/
CUDA_CHECK_RETURN( cudaMallocHost( (void **)&preempt_MM_flag_host, sizeof(sint32)) );
CUDA_CHECK_RETURN( cudaHostGetDevicePointer( (void **)&preempt_MM_flag_g, (void *)preempt_MM_flag_host, 0) );
CUDA_CHECK_RETURN( cudaMallocHost( (void **)&preempt_MM_sm_host, C_MM_NUMBER_OF_BLOCKS * sizeof(sint32)) );
CUDA_CHECK_RETURN( cudaHostGetDevicePointer( (void **)&preempt_MM_sm_g, (void *)preempt_MM_sm_host, 0) );
/* Initialize preemption buffer*/
CUDA_CHECK_RETURN( cudaMalloc( (void **)&mm_buffer_blockY_g, C_MM_NUMBER_OF_BLOCKS * sizeof(uint32)) );
CUDA_CHECK_RETURN( cudaMalloc( (void **)&mm_buffer_blockX_g, C_MM_NUMBER_OF_BLOCKS * sizeof(uint32)) );
CUDA_CHECK_RETURN( cudaMalloc( (void **)&mm_buffer_M_g, C_MM_NUMBER_OF_BLOCKS * sizeof(uint32)) );
/* Initialize persistent kernel management variables */
CUDA_CHECK_RETURN( cudaMallocHost( (void **) &perKer_isRunning_u32_host, sizeof(uint32)) );
CUDA_CHECK_RETURN( cudaHostGetDevicePointer( (void **)&perKer_isRunning_u32_g, (void *)perKer_isRunning_u32_host, 0) );
CUDA_CHECK_RETURN( cudaMallocHost( (void **) &perKer_eventQueueCntDevice_u32_host, sizeof(uint32)) );
CUDA_CHECK_RETURN( cudaHostGetDevicePointer( (void **)&perKer_eventQueueCntDevice_u32_g, (void *)perKer_eventQueueCntDevice_u32_host, 0) );
CUDA_CHECK_RETURN( cudaMallocHost( (void **) &perKer_eventQueueCntHost_u32_host, sizeof(uint32)) );
CUDA_CHECK_RETURN( cudaHostGetDevicePointer( (void **)&perKer_eventQueueCntHost_u32_g, (void *)perKer_eventQueueCntHost_u32_host, 0) );
CUDA_CHECK_RETURN( cudaMallocHost( (void **) &perKer_eventQueue_s32_host, C_PERSISTENT_KERNEL_EVENT_QUEUE_LENGTH * sizeof(sint32)) );
CUDA_CHECK_RETURN( cudaHostGetDevicePointer( (void **)&perKer_eventQueue_s32_g, (void *)perKer_eventQueue_s32_host, 0) );
CUDA_CHECK_RETURN( cudaMallocHost( (void **) &perKer_kernelTasksRunningStates_u32_host, E_KTID_NUMBER_OF_KERNEL_TASKS * sizeof(uint32)) );
CUDA_CHECK_RETURN( cudaHostGetDevicePointer( (void **)&perKer_kernelTasksRunningStates_u32_g, (void *)perKer_kernelTasksRunningStates_u32_host, 0) );
/* Initialize global device application variables */
for(int i = 0; i < E_GM_TOTAL_NR_OF_GLOB_MEM_VARIABLES; i++ )
{
#ifdef S_USE_ZERO_COPY_FOR_GLOBAL_APPLICATION_MEMORY
CUDA_CHECK_RETURN( cudaMallocHost( (void **)global_memory_list_a[i].host_ptr, global_memory_list_a[i].mem_size) );
CUDA_CHECK_RETURN( cudaHostGetDevicePointer( (void **)global_memory_list_a[i].mem_ptr, (void *) *global_memory_list_a[i].host_ptr, 0) );
#else
CUDA_CHECK_RETURN( cudaMalloc((void **)global_memory_list_a[i].mem_ptr, global_memory_list_a[i].mem_size) );
#endif
}
//Initialize status variables
*perKer_isRunning_u32_host = 0;
*perKer_eventQueueCntDevice_u32_host = 0;
*perKer_eventQueueCntHost_u32_host = 0;
for(int i = 0; i < E_KTID_NUMBER_OF_KERNEL_TASKS; i++)
{
perKer_kernelTasksRunningStates_u32_host[i] = C_KERNEL_INIT;
if(device_preemption_flags_a[i] != NULL)
{
**device_preemption_flags_a[i] = C_FALSE;
}
if(device_kernel_task_SM_a[i] != NULL)
{
**device_preemption_flags_a[i] = C_FALSE;
}
}
return retval;
}
//TODO:Wird der persistent Kernel gestartet, so sollte ein Flag gesetzt werden, was das Schreiben von COnstanten variablen ablehnt
/*************************************************************************************************
Function: gpuI_start()
Description: Start execution of persistent GPUart kernel.
*/
GPUart_Retval gpuI_start(void)
{
GPUart_Retval retval = GPUART_SUCCESS;
*perKer_isRunning_u32_host = C_TRUE; //After setting this flag constant memory writes are disabled
CUDA_CHECK_RETURN(cudaDeviceSynchronize());
GPUart_Persistent_Kernel <<<1, 1, 0, persistent_kernel_command_queue_s>>>
(
perKer_isRunning_u32_g,
perKer_eventQueueCntDevice_u32_g,
perKer_eventQueueCntHost_u32_g,
perKer_eventQueue_s32_g,
perKer_kernelTasksRunningStates_u32_g,
//Sobel1 variables
sob1_matrix_in_s32_g,
sob1_matrix_out_s32_g,
//Sobel2 variables
sob2_matrix_in_s32_g,
sob2_matrix_out_s32_g,
//MM variables
mm_matrix_A_f32_g,
mm_matrix_B_f32_g,
mm_matrix_C_f32_g,
//Synchronization variables
sync_SOB1_flags_in_u32_g,
sync_SOB1_flags_out_u32_g,
sync_SOB2_flags_in_u32_g,
sync_SOB2_flags_out_u32_g,
sync_MM_flags_in_u32_g,
sync_MM_flags_out_u32_g,
//Preemption variables
preempt_SOB1_flag_g,
preempt_SOB1_flag_internal_g,
preempt_SOB1_sm_g,
preempt_SOB2_flag_g,
preempt_SOB2_flag_internal_g,
preempt_SOB2_sm_g,
preempt_MM_flag_g,
preempt_MM_sm_g,
//Buffer variables
//SOB1
sob1_buffer_loop_counter_u32_g,
//SOB2
sob2_buffer_loop_counter_u32_g,
//MM
mm_buffer_blockY_g,
mm_buffer_blockX_g,
mm_buffer_M_g
);
printf(".. started");
fflush(stdout);
return retval;
}
/*************************************************************************************************
Function: gpuI_stop()
Description: Stop execution of persisten GPUart kernel.
*/
GPUart_Retval gpuI_stop(void)
{
GPUart_Retval retval = GPUART_SUCCESS;
uint32 eventQueueCntHost_u32_l;
printf("\nSTOP PERSISTENT KERNEL");
//Calculate next position in persistent kernel event queue
eventQueueCntHost_u32_l = (*perKer_eventQueueCntHost_u32_host + 1) % C_PERSISTENT_KERNEL_EVENT_QUEUE_LENGTH;
//Set termination event
perKer_eventQueue_s32_host[eventQueueCntHost_u32_l] = C_PERSISTENT_KERNEL_TERMINATE;
//Make new event visible
*perKer_eventQueueCntHost_u32_host = eventQueueCntHost_u32_l;
return retval;
}
/*************************************************************************************************
Function: gpuI_destroy()
Description: Terminates GPUart.
Free dedicated or shared device memory. Destroy command_queues.
*/
GPUart_Retval gpuI_destroy(void)
{
GPUart_Retval retval = GPUART_SUCCESS;
CUDA_CHECK_RETURN(cudaDeviceSynchronize());
/* Free global device variables */
for(int i = 0; i < (int)E_GM_TOTAL_NR_OF_GLOB_MEM_VARIABLES; i++ )
{
#ifdef S_USE_ZERO_COPY_FOR_GLOBAL_APPLICATION_MEMORY
CUDA_CHECK_RETURN( cudaFreeHost(*global_memory_list_a[i].host_ptr) );
#else
CUDA_CHECK_RETURN( cudaFree(*global_memory_list_a[i].mem_ptr) );
#endif
}
/* Destroy device only variables */
/* Destroy persistent kernel variables */
CUDA_CHECK_RETURN(cudaFreeHost((void *)perKer_isRunning_u32_host));
CUDA_CHECK_RETURN(cudaFreeHost((void *)perKer_eventQueueCntDevice_u32_host));
CUDA_CHECK_RETURN(cudaFreeHost((void *)perKer_eventQueueCntHost_u32_host));
CUDA_CHECK_RETURN(cudaFreeHost((void *)perKer_eventQueue_s32_host));
CUDA_CHECK_RETURN(cudaFreeHost((void *)perKer_kernelTasksRunningStates_u32_host));
/* Destroy command queues */
CUDA_CHECK_RETURN( cudaStreamDestroy(memory_command_queue_s) );
CUDA_CHECK_RETURN( cudaStreamDestroy(persistent_kernel_command_queue_s) );
CUDA_CHECK_RETURN( cudaDeviceReset());
return retval;
}
|
24ad61e4079391c52befa5f4e43fa11562fc4edc.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* \file update_k.cu
* \brief Kernel declarations for optical flow update computation.
* \copyright 2015, Juan David Adarve, ANU. See AUTHORS for more details
* \license 3-clause BSD, see LICENSE for more details
*/
#include "flowfilter/gpu/device/image_k.h"
#include "flowfilter/gpu/device/update_k.h"
namespace flowfilter {
namespace gpu {
__global__ void flowUpdate_k(gpuimage_t<float> newImage,
gpuimage_t<float2> newImageGradient,
gpuimage_t<float> oldImage, gpuimage_t<float2> oldFlow,
gpuimage_t<float> imageUpdated, gpuimage_t<float2> flowUpdated,
const float gamma, const float maxflow) {
const int height = flowUpdated.height;
const int width = flowUpdated.width;
// pixel coordinate
const int2 pix = make_int2(blockIdx.x*blockDim.x + threadIdx.x,
blockIdx.y*blockDim.y + threadIdx.y);
if(pix.x >= width || pix.y >= height) {
return;
}
// read elements from the different arrays
float2 a1 = *coordPitch(newImageGradient, pix);
float a0 = *coordPitch(newImage, pix);
float a0old = *coordPitch(oldImage, pix);
float2 ofOld = *coordPitch(oldFlow, pix);
//#################################
// FLOW UPDATE
//#################################
// temporal derivative
float Yt = a0old - a0;
float ax2 = a1.x*a1.x;
float ay2 = a1.y*a1.y;
// elements of the adjucate matrix of M
float N00 = gamma + ay2;
float N01 = -a1.x*a1.y;
float N10 = N01;
float N11 = gamma + ax2;
// reciprocal determinant of M
float rdetM = 1.0f / (gamma*(gamma + ax2 + ay2));
// q vector components
float qx = gamma*ofOld.x + a1.x*Yt;
float qy = gamma*ofOld.y + a1.y*Yt;
// computes the updated optical flow
float2 ofNew = make_float2( (N00*qx + N01*qy)*rdetM,
(N10*qx + N11*qy)*rdetM);
// truncates the flow to lie in its allowed interval
ofNew.x = max(-maxflow, min(ofNew.x, maxflow));
ofNew.y = max(-maxflow, min(ofNew.y, maxflow));
// sanitize the output
ofNew.x = isinf(ofNew.x) + isnan(ofNew.x) > 0? 0.0f : ofNew.x;
ofNew.y = isinf(ofNew.y) + isnan(ofNew.y) > 0? 0.0f : ofNew.y;
//#################################
// LOW TEXTURE RESULTS
//#################################
// if ((a1.x*a1.x)<0.0001 && (a1.y*a1.y)<0.0001)
// {
// ofNew.x = 0;
// ofNew.y = 0;
// }
//#################################
// PACK RESULTS
//#################################
*coordPitch(flowUpdated, pix) = ofNew;
*coordPitch(imageUpdated, pix) = a0;
}
__global__ void deltaFlowUpdate_k(gpuimage_t<float> newImage,
gpuimage_t<float2> newImageGradient,
gpuimage_t<float> oldImage,
gpuimage_t<float2> oldDeltaFlow,
hipTextureObject_t oldFlowTexture,
gpuimage_t<float> imageUpdated,
gpuimage_t<float2> deltaFlowUpdated,
gpuimage_t<float2> flowUpdated,
const float gamma, const float maxflow) {
const int height = flowUpdated.height;
const int width = flowUpdated.width;
// pixel coordinate
const int2 pix = make_int2(blockIdx.x*blockDim.x + threadIdx.x,
blockIdx.y*blockDim.y + threadIdx.y);
if(pix.x >= width || pix.y >= height) {
return;
}
// read elements from the different arrays
float2 a1 = *coordPitch(newImageGradient, pix);
float a0 = *coordPitch(newImage, pix);
float a0old = *coordPitch(oldImage, pix);
float2 deltaFlowOld = *coordPitch(oldDeltaFlow, pix);
//#################################
// FLOW UPDATE
//#################################
// temporal derivative
float Yt = a0old - a0;
float ax2 = a1.x*a1.x;
float ay2 = a1.y*a1.y;
// elements of the adjucate matrix of M
float N00 = gamma + ay2;
float N01 = -a1.x*a1.y;
float N10 = N01;
float N11 = gamma + ax2;
// reciprocal determinant of M
float rdetM = 1.0f / (gamma*(gamma + ax2 + ay2));
// q vector components
float qx = gamma*deltaFlowOld.x + a1.x*Yt;
float qy = gamma*deltaFlowOld.y + a1.y*Yt;
// computes updated optical flow
float2 dFlowNew = make_float2( (N00*qx + N01*qy)*rdetM,
(N10*qx + N11*qy)*rdetM);
// sanitize output
dFlowNew.x = isinf(dFlowNew.x) + isnan(dFlowNew.x) > 0? 0.0f : dFlowNew.x;
dFlowNew.y = isinf(dFlowNew.y) + isnan(dFlowNew.y) > 0? 0.0f : dFlowNew.y;
// truncates dflow to lie in its allowed interval
dFlowNew.x = max(-0.5f*maxflow, min(dFlowNew.x, 0.5f*maxflow));
dFlowNew.y = max(-0.5f*maxflow, min(dFlowNew.y, 0.5f*maxflow));
//#################################
// OPTICAL FLOW COMPUTATION
//#################################
// read upper level flow
// normalized texture coordinates
float u = (float)pix.x / (float)(width -1);
float v = (float)pix.y / (float)(height -1);
// linear interpolation of flow value
float2 fup = tex2D<float2>(oldFlowTexture, u, v);
float2 flowUp = make_float2(2.0*fup.x, 2.0*fup.y);
// update upsampled flow from top level
float2 flowNew = make_float2(dFlowNew.x + flowUp.x,
dFlowNew.y + flowUp.y);
// truncates flow to lie in its allowed interval
flowNew.x = max(-maxflow, min(flowNew.x, maxflow));
flowNew.y = max(-maxflow, min(flowNew.y, maxflow));
if ((a1.x*a1.x)<0.00005 && (a1.y*a1.y)<0.00005)
{
// dFlowNew.x = 0;
// dFlowNew.y = 0;
flowNew.x = 0;
flowNew.y = 0;
}
//#################################
// PACK RESULTS
//#################################
*coordPitch(deltaFlowUpdated, pix) = dFlowNew;
*coordPitch(flowUpdated, pix) = flowNew;
*coordPitch(imageUpdated, pix) = a0;
}
}; // namespace gpu
}; // namespace flowfilter
| 24ad61e4079391c52befa5f4e43fa11562fc4edc.cu | /**
* \file update_k.cu
* \brief Kernel declarations for optical flow update computation.
* \copyright 2015, Juan David Adarve, ANU. See AUTHORS for more details
* \license 3-clause BSD, see LICENSE for more details
*/
#include "flowfilter/gpu/device/image_k.h"
#include "flowfilter/gpu/device/update_k.h"
namespace flowfilter {
namespace gpu {
__global__ void flowUpdate_k(gpuimage_t<float> newImage,
gpuimage_t<float2> newImageGradient,
gpuimage_t<float> oldImage, gpuimage_t<float2> oldFlow,
gpuimage_t<float> imageUpdated, gpuimage_t<float2> flowUpdated,
const float gamma, const float maxflow) {
const int height = flowUpdated.height;
const int width = flowUpdated.width;
// pixel coordinate
const int2 pix = make_int2(blockIdx.x*blockDim.x + threadIdx.x,
blockIdx.y*blockDim.y + threadIdx.y);
if(pix.x >= width || pix.y >= height) {
return;
}
// read elements from the different arrays
float2 a1 = *coordPitch(newImageGradient, pix);
float a0 = *coordPitch(newImage, pix);
float a0old = *coordPitch(oldImage, pix);
float2 ofOld = *coordPitch(oldFlow, pix);
//#################################
// FLOW UPDATE
//#################################
// temporal derivative
float Yt = a0old - a0;
float ax2 = a1.x*a1.x;
float ay2 = a1.y*a1.y;
// elements of the adjucate matrix of M
float N00 = gamma + ay2;
float N01 = -a1.x*a1.y;
float N10 = N01;
float N11 = gamma + ax2;
// reciprocal determinant of M
float rdetM = 1.0f / (gamma*(gamma + ax2 + ay2));
// q vector components
float qx = gamma*ofOld.x + a1.x*Yt;
float qy = gamma*ofOld.y + a1.y*Yt;
// computes the updated optical flow
float2 ofNew = make_float2( (N00*qx + N01*qy)*rdetM,
(N10*qx + N11*qy)*rdetM);
// truncates the flow to lie in its allowed interval
ofNew.x = max(-maxflow, min(ofNew.x, maxflow));
ofNew.y = max(-maxflow, min(ofNew.y, maxflow));
// sanitize the output
ofNew.x = isinf(ofNew.x) + isnan(ofNew.x) > 0? 0.0f : ofNew.x;
ofNew.y = isinf(ofNew.y) + isnan(ofNew.y) > 0? 0.0f : ofNew.y;
//#################################
// LOW TEXTURE RESULTS
//#################################
// if ((a1.x*a1.x)<0.0001 && (a1.y*a1.y)<0.0001)
// {
// ofNew.x = 0;
// ofNew.y = 0;
// }
//#################################
// PACK RESULTS
//#################################
*coordPitch(flowUpdated, pix) = ofNew;
*coordPitch(imageUpdated, pix) = a0;
}
__global__ void deltaFlowUpdate_k(gpuimage_t<float> newImage,
gpuimage_t<float2> newImageGradient,
gpuimage_t<float> oldImage,
gpuimage_t<float2> oldDeltaFlow,
cudaTextureObject_t oldFlowTexture,
gpuimage_t<float> imageUpdated,
gpuimage_t<float2> deltaFlowUpdated,
gpuimage_t<float2> flowUpdated,
const float gamma, const float maxflow) {
const int height = flowUpdated.height;
const int width = flowUpdated.width;
// pixel coordinate
const int2 pix = make_int2(blockIdx.x*blockDim.x + threadIdx.x,
blockIdx.y*blockDim.y + threadIdx.y);
if(pix.x >= width || pix.y >= height) {
return;
}
// read elements from the different arrays
float2 a1 = *coordPitch(newImageGradient, pix);
float a0 = *coordPitch(newImage, pix);
float a0old = *coordPitch(oldImage, pix);
float2 deltaFlowOld = *coordPitch(oldDeltaFlow, pix);
//#################################
// FLOW UPDATE
//#################################
// temporal derivative
float Yt = a0old - a0;
float ax2 = a1.x*a1.x;
float ay2 = a1.y*a1.y;
// elements of the adjucate matrix of M
float N00 = gamma + ay2;
float N01 = -a1.x*a1.y;
float N10 = N01;
float N11 = gamma + ax2;
// reciprocal determinant of M
float rdetM = 1.0f / (gamma*(gamma + ax2 + ay2));
// q vector components
float qx = gamma*deltaFlowOld.x + a1.x*Yt;
float qy = gamma*deltaFlowOld.y + a1.y*Yt;
// computes updated optical flow
float2 dFlowNew = make_float2( (N00*qx + N01*qy)*rdetM,
(N10*qx + N11*qy)*rdetM);
// sanitize output
dFlowNew.x = isinf(dFlowNew.x) + isnan(dFlowNew.x) > 0? 0.0f : dFlowNew.x;
dFlowNew.y = isinf(dFlowNew.y) + isnan(dFlowNew.y) > 0? 0.0f : dFlowNew.y;
// truncates dflow to lie in its allowed interval
dFlowNew.x = max(-0.5f*maxflow, min(dFlowNew.x, 0.5f*maxflow));
dFlowNew.y = max(-0.5f*maxflow, min(dFlowNew.y, 0.5f*maxflow));
//#################################
// OPTICAL FLOW COMPUTATION
//#################################
// read upper level flow
// normalized texture coordinates
float u = (float)pix.x / (float)(width -1);
float v = (float)pix.y / (float)(height -1);
// linear interpolation of flow value
float2 fup = tex2D<float2>(oldFlowTexture, u, v);
float2 flowUp = make_float2(2.0*fup.x, 2.0*fup.y);
// update upsampled flow from top level
float2 flowNew = make_float2(dFlowNew.x + flowUp.x,
dFlowNew.y + flowUp.y);
// truncates flow to lie in its allowed interval
flowNew.x = max(-maxflow, min(flowNew.x, maxflow));
flowNew.y = max(-maxflow, min(flowNew.y, maxflow));
if ((a1.x*a1.x)<0.00005 && (a1.y*a1.y)<0.00005)
{
// dFlowNew.x = 0;
// dFlowNew.y = 0;
flowNew.x = 0;
flowNew.y = 0;
}
//#################################
// PACK RESULTS
//#################################
*coordPitch(deltaFlowUpdated, pix) = dFlowNew;
*coordPitch(flowUpdated, pix) = flowNew;
*coordPitch(imageUpdated, pix) = a0;
}
}; // namespace gpu
}; // namespace flowfilter
|
c3e21fac349847af16ed375f1110076937845ef5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void modified_insertion_sort(float * dist, int dist_pitch, int * index, int index_pitch, int width, int height, int k){
// Column position
unsigned int xIndex = blockIdx.x * blockDim.x + threadIdx.x;
// Do nothing if we are out of bounds
if (xIndex < width) {
// Pointer shift
float * p_dist = dist + xIndex;
int * p_index = index + xIndex;
// Initialise the first index
p_index[0] = 0;
// Go through all points
for (int i=1; i<height; ++i) {
// Store current distance and associated index
float curr_dist = p_dist[i*dist_pitch];
int curr_index = i;
// Skip the current value if its index is >= k and if it's higher the k-th slready sorted mallest value
if (i >= k && curr_dist >= p_dist[(k-1)*dist_pitch]) {
continue;
}
// Shift values (and indexes) higher that the current distance to the right
int j = min(i, k-1);
while (j > 0 && p_dist[(j-1)*dist_pitch] > curr_dist) {
p_dist[j*dist_pitch] = p_dist[(j-1)*dist_pitch];
p_index[j*index_pitch] = p_index[(j-1)*index_pitch];
--j;
}
// Write the current distance and index at their position
p_dist[j*dist_pitch] = curr_dist;
p_index[j*index_pitch] = curr_index;
}
}
} | c3e21fac349847af16ed375f1110076937845ef5.cu | #include "includes.h"
__global__ void modified_insertion_sort(float * dist, int dist_pitch, int * index, int index_pitch, int width, int height, int k){
// Column position
unsigned int xIndex = blockIdx.x * blockDim.x + threadIdx.x;
// Do nothing if we are out of bounds
if (xIndex < width) {
// Pointer shift
float * p_dist = dist + xIndex;
int * p_index = index + xIndex;
// Initialise the first index
p_index[0] = 0;
// Go through all points
for (int i=1; i<height; ++i) {
// Store current distance and associated index
float curr_dist = p_dist[i*dist_pitch];
int curr_index = i;
// Skip the current value if its index is >= k and if it's higher the k-th slready sorted mallest value
if (i >= k && curr_dist >= p_dist[(k-1)*dist_pitch]) {
continue;
}
// Shift values (and indexes) higher that the current distance to the right
int j = min(i, k-1);
while (j > 0 && p_dist[(j-1)*dist_pitch] > curr_dist) {
p_dist[j*dist_pitch] = p_dist[(j-1)*dist_pitch];
p_index[j*index_pitch] = p_index[(j-1)*index_pitch];
--j;
}
// Write the current distance and index at their position
p_dist[j*dist_pitch] = curr_dist;
p_index[j*index_pitch] = curr_index;
}
}
} |
04ada6b2c3c1223d575c7e86d2bdbbca8250701c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2019, Xuhao Chen
#include "fsm.h"
#include "timer.h"
#include "cutils.h"
#define USE_PID
#define USE_DOMAIN
#define EDGE_INDUCED
#define ENABLE_LABEL
#include <hipcub/hipcub.hpp>
#include "miner.cuh"
#include "bitsets.h"
#include <thrust/scan.h>
#include <thrust/extrema.h>
#include <thrust/execution_policy.h>
#define MAX_NUM_PATTERNS 21251
struct OrderedEdge {
IndexT src;
IndexT dst;
};
inline __device__ int get_init_pattern_id(node_data_type src_label, node_data_type dst_label, int nlabels) {
return (int)src_label * nlabels + (int)dst_label;
}
inline __device__ unsigned get_pattern_id(node_data_type label0, node_data_type label1, node_data_type label2, int nlabels) {
return nlabels * (nlabels * label0 + label1) + label2;
}
inline __device__ bool is_quick_automorphism(unsigned size, IndexT *vids, history_type his2, history_type his, IndexT src, IndexT dst) {
if (dst <= vids[0]) return true;
if (dst == vids[1]) return true;
if (his == 0 && dst < vids[1]) return true;
if (size == 2) {
} else if (size == 3) {
if (his == 0 && his2 == 0 && dst <= vids[2]) return true;
if (his == 0 && his2 == 1 && dst == vids[2]) return true;
if (his == 1 && his2 == 1 && dst <= vids[2]) return true;
} else {
}
return false;
}
inline __device__ void swap(IndexT first, IndexT second) {
if (first > second) {
IndexT tmp = first;
first = second;
second = tmp;
}
}
inline __device__ int compare(OrderedEdge oneEdge, OrderedEdge otherEdge) {
swap(oneEdge.src, oneEdge.dst);
swap(otherEdge.src, otherEdge.dst);
if(oneEdge.src == otherEdge.src) return oneEdge.dst - otherEdge.dst;
else return oneEdge.src - otherEdge.src;
}
inline __device__ bool is_edge_automorphism(unsigned size, IndexT *vids, history_type *hiss, history_type his, IndexT src, IndexT dst) {
if (size < 3) return is_quick_automorphism(size, vids, hiss[2], his, src, dst);
if (dst <= vids[0]) return true;
if (his == 0 && dst <= vids[1]) return true;
if (dst == vids[hiss[his]]) return true;
OrderedEdge added_edge;
added_edge.src = src;
added_edge.dst = dst;
for (unsigned index = his + 1; index < size; ++index) {
OrderedEdge edge;
edge.src = vids[hiss[index]];
edge.dst = vids[index];
int cmp = compare(added_edge, edge);
if(cmp <= 0) return true;
}
return false;
}
__global__ void extend_alloc(unsigned m, unsigned level, CSRGraph graph, EmbeddingList emb_list, IndexT *num_new_emb) {
unsigned tid = threadIdx.x;
unsigned pos = blockIdx.x * blockDim.x + threadIdx.x;
__shared__ IndexT vid[BLOCK_SIZE][MAX_SIZE];
__shared__ history_type his[BLOCK_SIZE][MAX_SIZE];
if(pos < m) {
emb_list.get_edge_embedding(level, pos, vid[tid], his[tid]);
num_new_emb[pos] = 0;
//if (pos == 1) printf("src=%d, dst=%d\n", vid[tid][0], vid[tid][1]);
for (unsigned i = 0; i < level+1; ++i) {
IndexT src = vid[tid][i];
IndexT row_begin = graph.edge_begin(src);
IndexT row_end = graph.edge_end(src);
for (IndexT e = row_begin; e < row_end; e++) {
IndexT dst = graph.getEdgeDst(e);
if (!is_edge_automorphism(level+1, vid[tid], his[tid], i, src, dst))
num_new_emb[pos] ++;
}
}
}
}
__global__ void extend_insert(unsigned m, unsigned level, CSRGraph graph, EmbeddingList emb_list, IndexT *indices) {
unsigned tid = threadIdx.x;
unsigned pos = blockIdx.x * blockDim.x + threadIdx.x;
__shared__ IndexT vids[BLOCK_SIZE][MAX_SIZE];
__shared__ history_type his[BLOCK_SIZE][MAX_SIZE];
if(pos < m) {
emb_list.get_edge_embedding(level, pos, vids[tid], his[tid]);
IndexT start = indices[pos];
for (unsigned i = 0; i < level+1; ++i) {
IndexT src = vids[tid][i];
IndexT row_begin = graph.edge_begin(src);
IndexT row_end = graph.edge_end(src);
for (IndexT e = row_begin; e < row_end; e++) {
IndexT dst = graph.getEdgeDst(e);
if (!is_edge_automorphism(level+1, vids[tid], his[tid], i, src, dst)) {
emb_list.set_idx(level+1, start, pos);
emb_list.set_his(level+1, start, i);
emb_list.set_vid(level+1, start++, dst);
}
}
}
}
}
__global__ void init_aggregate(unsigned m, unsigned num_emb, CSRGraph graph, EmbeddingList emb_list, unsigned *pids, int nlabels, unsigned threshold, Bitsets small_sets, Bitsets large_sets) {
unsigned pos = blockIdx.x * blockDim.x + threadIdx.x;
if(pos < num_emb) {
IndexT src = emb_list.get_idx(1, pos);
IndexT dst = emb_list.get_vid(1, pos);
node_data_type src_label = graph.getData(src);
node_data_type dst_label = graph.getData(dst);
int pid = 0;
if (src_label <= dst_label)
pid = get_init_pattern_id(src_label, dst_label, nlabels);
else pid = get_init_pattern_id(dst_label, src_label, nlabels);
pids[pos] = pid;
if (src_label < dst_label) {
small_sets.set(pid, src);
large_sets.set(pid, dst);
} else if (src_label > dst_label) {
small_sets.set(pid, dst);
large_sets.set(pid, src);
} else {
small_sets.set(pid, src);
small_sets.set(pid, dst);
large_sets.set(pid, src);
large_sets.set(pid, dst);
}
}
}
__global__ void count_ones(int id, Bitsets sets, int *count) {
typedef hipcub::BlockReduce<int, BLOCK_SIZE> BlockReduce;
unsigned pos = blockIdx.x * blockDim.x + threadIdx.x;
__shared__ typename BlockReduce::TempStorage temp_storage;
int num = 0;
if(pos < sets.vec_size())
num = sets.count_num_ones(id, pos);
int block_total = BlockReduce(temp_storage).Sum(num);
if(threadIdx.x == 0) atomicAdd(count, block_total);
}
int init_support_count(unsigned m, int npatterns, unsigned threshold, Bitsets small_sets, Bitsets large_sets, bool *init_support_map) {
int num_freq_patterns = 0;
for (int i = 0; i < npatterns; i++) {
int a, b, *d_count;
CUDA_SAFE_CALL(hipMalloc((void **)&d_count, sizeof(int)));
CUDA_SAFE_CALL(hipMemset(d_count, 0, sizeof(int)));
hipLaunchKernelGGL(( count_ones), dim3((m-1)/256+1), dim3(256), 0, 0, i, small_sets, d_count);
CudaTest("solving count_ones `failed");
CUDA_SAFE_CALL(hipMemcpy(&a, d_count, sizeof(int), hipMemcpyDeviceToHost));
CUDA_SAFE_CALL(hipMemset(d_count, 0, sizeof(int)));
hipLaunchKernelGGL(( count_ones), dim3((m-1)/256+1), dim3(256), 0, 0, i, large_sets, d_count);
CUDA_SAFE_CALL(hipMemcpy(&b, d_count, sizeof(int), hipMemcpyDeviceToHost));
unsigned support = a < b ? a : b;
if (support >= threshold) {
init_support_map[i] = 1;
num_freq_patterns ++;
} else init_support_map[i] = 0;
}
return num_freq_patterns;
}
//int support_count(unsigned m, unsigned npatterns, unsigned threshold, SetType *small_sets, SetType *middle_sets, SetType *large_sets, bool *support_map) {
int support_count(unsigned m, unsigned npatterns, unsigned threshold, Bitsets small_sets, Bitsets middle_sets, Bitsets large_sets, bool *support_map) {
int num_freq_patterns = 0;
for (int i = 0; i < npatterns; i++) {
int a, b, c, *d_count;
CUDA_SAFE_CALL(hipMalloc((void **)&d_count, sizeof(int)));
CUDA_SAFE_CALL(hipMemset(d_count, 0, sizeof(int)));
hipLaunchKernelGGL(( count_ones), dim3((m-1)/256+1), dim3(256), 0, 0, i, small_sets, d_count);
CUDA_SAFE_CALL(hipMemcpy(&a, d_count, sizeof(int), hipMemcpyDeviceToHost));
CUDA_SAFE_CALL(hipMemset(d_count, 0, sizeof(int)));
hipLaunchKernelGGL(( count_ones), dim3((m-1)/256+1), dim3(256), 0, 0, i, large_sets, d_count);
CUDA_SAFE_CALL(hipMemcpy(&b, d_count, sizeof(int), hipMemcpyDeviceToHost));
CUDA_SAFE_CALL(hipMemset(d_count, 0, sizeof(int)));
hipLaunchKernelGGL(( count_ones), dim3((m-1)/256+1), dim3(256), 0, 0, i, middle_sets, d_count);
CUDA_SAFE_CALL(hipMemcpy(&c, d_count, sizeof(int), hipMemcpyDeviceToHost));
unsigned small = a < b ? a : b;
unsigned support = small < c ? small : c;
if (support >= threshold) {
support_map[i] = 1;
num_freq_patterns ++;
} else support_map[i] = 0;
}
return num_freq_patterns;
}
__global__ void init_filter_check(unsigned m, unsigned *pids, bool *init_support_map, IndexT *is_frequent_emb) {
unsigned pos = blockIdx.x * blockDim.x + threadIdx.x;
if(pos < m) {
unsigned pid = pids[pos];
bool is_frequent = init_support_map[pid];
if (is_frequent) is_frequent_emb[pos] = 1;
}
}
__global__ void copy_vids(unsigned m, EmbeddingList emb_list, IndexT *vid_list0, IndexT *vid_list1) {
unsigned pos = blockIdx.x * blockDim.x + threadIdx.x;
if(pos < m) {
vid_list0[pos] = emb_list.get_idx(1, pos);
vid_list1[pos] = emb_list.get_vid(1, pos);
}
}
__global__ void init_filter(unsigned m, EmbeddingList emb_list, IndexT *vid_list0, IndexT *vid_list1, IndexT *indices, IndexT *is_frequent_emb) {
unsigned pos = blockIdx.x * blockDim.x + threadIdx.x;
if(pos < m) {
if (is_frequent_emb[pos]) {
IndexT src = vid_list0[pos];
IndexT dst = vid_list1[pos];
unsigned start = indices[pos];
emb_list.set_vid(1, start, dst);
emb_list.set_idx(1, start, src);
}
}
}
__global__ void aggregate_check(unsigned num_emb, unsigned level, CSRGraph graph, EmbeddingList emb_list, unsigned *pids, int nlabels, unsigned threshold, unsigned *ne) {
unsigned tid = threadIdx.x;
unsigned pos = blockIdx.x * blockDim.x + threadIdx.x;
__shared__ IndexT vids[BLOCK_SIZE][MAX_SIZE];
__shared__ history_type his[BLOCK_SIZE][MAX_SIZE];
if(pos < num_emb) {
emb_list.get_edge_embedding(level, pos, vids[tid], his[tid]);
unsigned n = level+1;
assert(n < 4);
IndexT first = vids[tid][0];
IndexT second = vids[tid][1];
IndexT third = vids[tid][2];
node_data_type l0 = graph.getData(first);
node_data_type l1 = graph.getData(second);
node_data_type l2 = graph.getData(third);
history_type h2 = his[tid][2];
unsigned pid = 0;
if (n == 3) {
if (h2 == 0) {
if (l1 < l2) {
pid = get_pattern_id(l0, l2, l1, nlabels);
} else {
pid = get_pattern_id(l0, l1, l2, nlabels);
}
} else {
assert(h2 == 1);
if (l0 < l2) {
pid = get_pattern_id(l1, l2, l0, nlabels);
} else {
pid = get_pattern_id(l1, l0, l2, nlabels);
}
}
} else {
}
pids[pos] = pid;
atomicAdd(&ne[pid], 1);
}
}
__global__ void find_candidate_patterns(unsigned num_patterns, unsigned *ne, unsigned minsup, unsigned *id_map, unsigned *num_new_patterns) {
unsigned pos = blockIdx.x * blockDim.x + threadIdx.x;
if (pos < num_patterns) {
if (ne[pos] >= minsup) {
unsigned new_id = atomicAdd(num_new_patterns, 1);
id_map[pos] = new_id;
}
}
}
__global__ void aggregate(unsigned m, unsigned num_emb, unsigned level, CSRGraph graph, EmbeddingList emb_list, unsigned *pids, unsigned *ne, unsigned *id_map, int nlabels, unsigned threshold, Bitsets small_sets, Bitsets middle_sets, Bitsets large_sets) {
unsigned tid = threadIdx.x;
unsigned pos = blockIdx.x * blockDim.x + threadIdx.x;
__shared__ IndexT vids[BLOCK_SIZE][MAX_SIZE];
__shared__ history_type his[BLOCK_SIZE][MAX_SIZE];
if(pos < num_emb) {
emb_list.get_edge_embedding(level, pos, vids[tid], his[tid]);
unsigned n = level+1;
assert(n == 3);
IndexT first = vids[tid][0];
IndexT second = vids[tid][1];
IndexT third = vids[tid][2];
node_data_type l0 = graph.getData(first);
node_data_type l1 = graph.getData(second);
node_data_type l2 = graph.getData(third);
history_type h2 = his[tid][2];
IndexT small, middle, large;
unsigned pid = pids[pos];
if (ne[pid] >= threshold) {
pid = id_map[pid];
if (h2 == 0) {
middle = first;
if (l1 < l2) {
small = second;
large = third;
} else {
small = third;
large = second;
}
small_sets.set(pid, small);
middle_sets.set(pid, middle);
large_sets.set(pid, large);
if (l1 == l2) {
small_sets.set(pid, large);
large_sets.set(pid, small);
}
} else {
assert(h2 == 1);
middle = second;
if (l0 < l2) {
small = first;
large = third;
} else {
small = third;
large = first;
}
small_sets.set(pid, small);
middle_sets.set(pid, middle);
large_sets.set(pid, large);
if (l0 == l2) {
small_sets.set(pid, large);
large_sets.set(pid, small);
}
}
}
}
}
void parallel_prefix_sum(int n, IndexT *in, IndexT *out) {
IndexT total = 0;
for (size_t i = 0; i < n; i++) {
out[i] = total;
total += in[i];
}
out[n] = total;
}
void fsm_gpu_solver(std::string fname, unsigned k, unsigned minsup, AccType &total_num) {
CSRGraph graph_cpu, graph_gpu;
int nlabels = graph_cpu.read(fname); // read graph into CPU memoryA
int m = graph_cpu.get_nnodes();
int nnz = graph_cpu.get_nedges();
graph_cpu.copy_to_gpu(graph_gpu); // copy graph to GPU memory
EmbeddingList emb_list;
emb_list.init(nnz, k+1, false);
emb_list.init_cpu(&graph_cpu);
int nthreads = BLOCK_SIZE;
int nblocks = DIVIDE_INTO(nnz, nthreads);
int num_init_patterns = (nlabels+1)*(nlabels+1);
std::cout << "Number of init patterns: " << num_init_patterns << std::endl;
unsigned num_emb = emb_list.size();
std::cout << "number of single-edge embeddings: " << num_emb << "\n";
unsigned *pids;
CUDA_SAFE_CALL(hipMalloc((void **)&pids, sizeof(unsigned)*num_emb));
bool *h_init_support_map = (bool *)malloc(sizeof(bool) * num_init_patterns);
bool *d_init_support_map;
CUDA_SAFE_CALL(hipMalloc((void **)&d_init_support_map, sizeof(bool)*num_init_patterns));
IndexT *is_frequent_emb;
CUDA_SAFE_CALL(hipMalloc((void **)&is_frequent_emb, sizeof(IndexT)*(num_emb+1)));
CUDA_SAFE_CALL(hipMemset(is_frequent_emb, 0, sizeof(IndexT)*(num_emb+1)));
IndexT *vid_list0, *vid_list1;
CUDA_SAFE_CALL(hipMalloc((void **)&vid_list0, sizeof(IndexT)*num_emb));
CUDA_SAFE_CALL(hipMalloc((void **)&vid_list1, sizeof(IndexT)*num_emb));
Bitsets small_sets, large_sets, middle_sets;
small_sets.alloc(MAX_NUM_PATTERNS, m);
large_sets.alloc(MAX_NUM_PATTERNS, m);
middle_sets.alloc(MAX_NUM_PATTERNS, m);
small_sets.set_size(num_init_patterns, m);
large_sets.set_size(num_init_patterns, m);
middle_sets.set_size(num_init_patterns, m);
IndexT *num_new_emb, *indices;
CUDA_SAFE_CALL(hipMalloc((void **)&indices, sizeof(IndexT) * (num_emb+1)));
CUDA_SAFE_CALL(hipDeviceSynchronize());
nblocks = (num_emb-1)/nthreads+1;
unsigned *d_num_new_patterns;
unsigned h_num_new_patterns = 0;
CUDA_SAFE_CALL(hipMalloc((void **)&d_num_new_patterns, sizeof(unsigned)));
printf("Launching CUDA TC solver (%d CTAs, %d threads/CTA) ...\n", nblocks, nthreads);
Timer t;
t.Start();
unsigned level = 1;
hipLaunchKernelGGL(( init_aggregate), dim3(nblocks), dim3(nthreads), 0, 0, m, num_emb, graph_gpu, emb_list, pids, nlabels, minsup, small_sets, large_sets);
CudaTest("solving init_aggregate `failed");
std::cout << "Init_aggregate Done\n";
int num_freq_patterns = init_support_count(m, num_init_patterns, minsup, small_sets, large_sets, h_init_support_map);
total_num += num_freq_patterns;
if (num_freq_patterns == 0) {
std::cout << "No frequent pattern found\n\n";
return;
}
std::cout << "Number of frequent single-edge patterns: " << num_freq_patterns << "\n";
CUDA_SAFE_CALL(hipMemcpy(d_init_support_map, h_init_support_map, sizeof(bool) * num_init_patterns, hipMemcpyHostToDevice));
hipLaunchKernelGGL(( init_filter_check), dim3(nblocks), dim3(nthreads), 0, 0, num_emb, pids, d_init_support_map, is_frequent_emb);
CudaTest("solving init_filter_check `failed");
thrust::exclusive_scan(thrust::device, is_frequent_emb, is_frequent_emb+num_emb+1, indices);
IndexT new_size;
CUDA_SAFE_CALL(hipMemcpy(&new_size, &indices[num_emb], sizeof(IndexT), hipMemcpyDeviceToHost));
std::cout << "number of embeddings after pruning: " << new_size << "\n";
hipLaunchKernelGGL(( copy_vids), dim3(nblocks), dim3(nthreads), 0, 0, num_emb, emb_list, vid_list0, vid_list1);
CudaTest("solving copy_vids `failed");
hipLaunchKernelGGL(( init_filter), dim3(nblocks), dim3(nthreads), 0, 0, num_emb, emb_list, vid_list0, vid_list1, indices, is_frequent_emb);
CudaTest("solving init_filter `failed");
CUDA_SAFE_CALL(hipFree(indices));
CUDA_SAFE_CALL(hipFree(is_frequent_emb));
CUDA_SAFE_CALL(hipFree(pids));
//small_sets.clean();
//large_sets.clean();
small_sets.clear();
large_sets.clear();
CUDA_SAFE_CALL(hipFree(vid_list0));
CUDA_SAFE_CALL(hipFree(vid_list1));
CUDA_SAFE_CALL(hipFree(d_init_support_map));
emb_list.remove_tail(new_size);
while (1) {
num_emb = emb_list.size();
std::cout << "number of embeddings in level " << level << ": " << num_emb << "\n";
CUDA_SAFE_CALL(hipMalloc((void **)&num_new_emb, sizeof(IndexT) * (num_emb+1)));
CUDA_SAFE_CALL(hipMalloc((void **)&indices, sizeof(IndexT) * (num_emb+1)));
std::cout << "Done allocating memory for embeddings in level " << level << "\n";
nblocks = (num_emb-1)/nthreads+1;
hipLaunchKernelGGL(( extend_alloc), dim3(nblocks), dim3(nthreads), 0, 0, num_emb, level, graph_gpu, emb_list, num_new_emb);
CudaTest("solving extend_alloc failed");
thrust::exclusive_scan(thrust::device, num_new_emb, num_new_emb+num_emb+1, indices);
CudaTest("Scan failed");
CUDA_SAFE_CALL(hipMemcpy(&new_size, &indices[num_emb], sizeof(IndexT), hipMemcpyDeviceToHost));
std::cout << "number of new embeddings: " << new_size << "\n";
emb_list.add_level(new_size);
hipLaunchKernelGGL(( extend_insert), dim3(nblocks), dim3(nthreads), 0, 0, num_emb, level, graph_gpu, emb_list, indices);
CudaTest("solving extend_insert failed");
std::cout << "Extend_insert Done\n";
num_emb = emb_list.size();
CUDA_SAFE_CALL(hipFree(num_new_emb));
CUDA_SAFE_CALL(hipFree(indices));
level ++;
int num_patterns = nlabels*num_init_patterns;
nblocks = (num_emb-1)/nthreads+1;
std::cout << "Number of patterns in level " << level << ": " << num_patterns << std::endl;
std::cout << "number of embeddings in level " << level << ": " << num_emb << "\n";
unsigned *ne, *id_map;
CUDA_SAFE_CALL(hipMalloc((void **)&ne, sizeof(unsigned)*num_patterns));
CUDA_SAFE_CALL(hipMalloc((void **)&id_map, sizeof(unsigned)*num_patterns));
CUDA_SAFE_CALL(hipMemset(ne, 0, sizeof(unsigned)*num_patterns));
CUDA_SAFE_CALL(hipMalloc((void **)&pids, sizeof(unsigned)*num_emb));
std::cout << "Done allocating memory for aggregation in level " << level << "\n";
hipLaunchKernelGGL(( aggregate_check), dim3(nblocks), dim3(nthreads), 0, 0, num_emb, level, graph_gpu, emb_list, pids, nlabels, minsup, ne);
CudaTest("solving aggregate_check failed");
CUDA_SAFE_CALL(hipMemset(d_num_new_patterns, 0, sizeof(unsigned)));
hipLaunchKernelGGL(( find_candidate_patterns), dim3((num_patterns-1)/nthreads+1), dim3(nthreads), 0, 0, num_patterns, ne, minsup, id_map, d_num_new_patterns);
CudaTest("solving find_candidate_patterns failed");
CUDA_SAFE_CALL(hipMemcpy(&h_num_new_patterns, d_num_new_patterns, sizeof(unsigned), hipMemcpyDeviceToHost));
std::cout << "Number of candidate patterns in level " << level << ": " << h_num_new_patterns << std::endl;
//small_sets.alloc(h_num_new_patterns, m);
//large_sets.alloc(h_num_new_patterns, m);
//middle_sets.alloc(h_num_new_patterns, m);
small_sets.set_size(h_num_new_patterns, m);
large_sets.set_size(h_num_new_patterns, m);
middle_sets.set_size(h_num_new_patterns, m);
std::cout << "Done allocating sets\n";
hipLaunchKernelGGL(( aggregate), dim3(nblocks), dim3(nthreads), 0, 0, m, num_emb, level, graph_gpu, emb_list, pids, ne, id_map, nlabels, minsup, small_sets, middle_sets, large_sets);
CudaTest("solving aggregate failed");
bool *h_support_map = (bool *)malloc(sizeof(bool) * h_num_new_patterns);
num_freq_patterns = support_count(m, h_num_new_patterns, minsup, small_sets, middle_sets, large_sets, h_support_map);
CudaTest("solving support_count failed");
CUDA_SAFE_CALL(hipFree(ne));
CUDA_SAFE_CALL(hipFree(id_map));
std::cout << "num_frequent_patterns: " << num_freq_patterns << "\n";
total_num += num_freq_patterns;
if (num_freq_patterns == 0) break;
if (level == k) break;
//filter<<<nblocks, nthreads>>>(level, emb_list);
}
CUDA_SAFE_CALL(hipDeviceSynchronize());
t.Stop();
printf("\truntime = %f ms.\n", t.Millisecs());
}
| 04ada6b2c3c1223d575c7e86d2bdbbca8250701c.cu | // Copyright (c) 2019, Xuhao Chen
#include "fsm.h"
#include "timer.h"
#include "cutils.h"
#define USE_PID
#define USE_DOMAIN
#define EDGE_INDUCED
#define ENABLE_LABEL
#include <cub/cub.cuh>
#include "miner.cuh"
#include "bitsets.h"
#include <thrust/scan.h>
#include <thrust/extrema.h>
#include <thrust/execution_policy.h>
#define MAX_NUM_PATTERNS 21251
struct OrderedEdge {
IndexT src;
IndexT dst;
};
inline __device__ int get_init_pattern_id(node_data_type src_label, node_data_type dst_label, int nlabels) {
return (int)src_label * nlabels + (int)dst_label;
}
inline __device__ unsigned get_pattern_id(node_data_type label0, node_data_type label1, node_data_type label2, int nlabels) {
return nlabels * (nlabels * label0 + label1) + label2;
}
inline __device__ bool is_quick_automorphism(unsigned size, IndexT *vids, history_type his2, history_type his, IndexT src, IndexT dst) {
if (dst <= vids[0]) return true;
if (dst == vids[1]) return true;
if (his == 0 && dst < vids[1]) return true;
if (size == 2) {
} else if (size == 3) {
if (his == 0 && his2 == 0 && dst <= vids[2]) return true;
if (his == 0 && his2 == 1 && dst == vids[2]) return true;
if (his == 1 && his2 == 1 && dst <= vids[2]) return true;
} else {
}
return false;
}
inline __device__ void swap(IndexT first, IndexT second) {
if (first > second) {
IndexT tmp = first;
first = second;
second = tmp;
}
}
inline __device__ int compare(OrderedEdge oneEdge, OrderedEdge otherEdge) {
swap(oneEdge.src, oneEdge.dst);
swap(otherEdge.src, otherEdge.dst);
if(oneEdge.src == otherEdge.src) return oneEdge.dst - otherEdge.dst;
else return oneEdge.src - otherEdge.src;
}
inline __device__ bool is_edge_automorphism(unsigned size, IndexT *vids, history_type *hiss, history_type his, IndexT src, IndexT dst) {
if (size < 3) return is_quick_automorphism(size, vids, hiss[2], his, src, dst);
if (dst <= vids[0]) return true;
if (his == 0 && dst <= vids[1]) return true;
if (dst == vids[hiss[his]]) return true;
OrderedEdge added_edge;
added_edge.src = src;
added_edge.dst = dst;
for (unsigned index = his + 1; index < size; ++index) {
OrderedEdge edge;
edge.src = vids[hiss[index]];
edge.dst = vids[index];
int cmp = compare(added_edge, edge);
if(cmp <= 0) return true;
}
return false;
}
__global__ void extend_alloc(unsigned m, unsigned level, CSRGraph graph, EmbeddingList emb_list, IndexT *num_new_emb) {
unsigned tid = threadIdx.x;
unsigned pos = blockIdx.x * blockDim.x + threadIdx.x;
__shared__ IndexT vid[BLOCK_SIZE][MAX_SIZE];
__shared__ history_type his[BLOCK_SIZE][MAX_SIZE];
if(pos < m) {
emb_list.get_edge_embedding(level, pos, vid[tid], his[tid]);
num_new_emb[pos] = 0;
//if (pos == 1) printf("src=%d, dst=%d\n", vid[tid][0], vid[tid][1]);
for (unsigned i = 0; i < level+1; ++i) {
IndexT src = vid[tid][i];
IndexT row_begin = graph.edge_begin(src);
IndexT row_end = graph.edge_end(src);
for (IndexT e = row_begin; e < row_end; e++) {
IndexT dst = graph.getEdgeDst(e);
if (!is_edge_automorphism(level+1, vid[tid], his[tid], i, src, dst))
num_new_emb[pos] ++;
}
}
}
}
__global__ void extend_insert(unsigned m, unsigned level, CSRGraph graph, EmbeddingList emb_list, IndexT *indices) {
unsigned tid = threadIdx.x;
unsigned pos = blockIdx.x * blockDim.x + threadIdx.x;
__shared__ IndexT vids[BLOCK_SIZE][MAX_SIZE];
__shared__ history_type his[BLOCK_SIZE][MAX_SIZE];
if(pos < m) {
emb_list.get_edge_embedding(level, pos, vids[tid], his[tid]);
IndexT start = indices[pos];
for (unsigned i = 0; i < level+1; ++i) {
IndexT src = vids[tid][i];
IndexT row_begin = graph.edge_begin(src);
IndexT row_end = graph.edge_end(src);
for (IndexT e = row_begin; e < row_end; e++) {
IndexT dst = graph.getEdgeDst(e);
if (!is_edge_automorphism(level+1, vids[tid], his[tid], i, src, dst)) {
emb_list.set_idx(level+1, start, pos);
emb_list.set_his(level+1, start, i);
emb_list.set_vid(level+1, start++, dst);
}
}
}
}
}
__global__ void init_aggregate(unsigned m, unsigned num_emb, CSRGraph graph, EmbeddingList emb_list, unsigned *pids, int nlabels, unsigned threshold, Bitsets small_sets, Bitsets large_sets) {
unsigned pos = blockIdx.x * blockDim.x + threadIdx.x;
if(pos < num_emb) {
IndexT src = emb_list.get_idx(1, pos);
IndexT dst = emb_list.get_vid(1, pos);
node_data_type src_label = graph.getData(src);
node_data_type dst_label = graph.getData(dst);
int pid = 0;
if (src_label <= dst_label)
pid = get_init_pattern_id(src_label, dst_label, nlabels);
else pid = get_init_pattern_id(dst_label, src_label, nlabels);
pids[pos] = pid;
if (src_label < dst_label) {
small_sets.set(pid, src);
large_sets.set(pid, dst);
} else if (src_label > dst_label) {
small_sets.set(pid, dst);
large_sets.set(pid, src);
} else {
small_sets.set(pid, src);
small_sets.set(pid, dst);
large_sets.set(pid, src);
large_sets.set(pid, dst);
}
}
}
__global__ void count_ones(int id, Bitsets sets, int *count) {
typedef cub::BlockReduce<int, BLOCK_SIZE> BlockReduce;
unsigned pos = blockIdx.x * blockDim.x + threadIdx.x;
__shared__ typename BlockReduce::TempStorage temp_storage;
int num = 0;
if(pos < sets.vec_size())
num = sets.count_num_ones(id, pos);
int block_total = BlockReduce(temp_storage).Sum(num);
if(threadIdx.x == 0) atomicAdd(count, block_total);
}
int init_support_count(unsigned m, int npatterns, unsigned threshold, Bitsets small_sets, Bitsets large_sets, bool *init_support_map) {
int num_freq_patterns = 0;
for (int i = 0; i < npatterns; i++) {
int a, b, *d_count;
CUDA_SAFE_CALL(cudaMalloc((void **)&d_count, sizeof(int)));
CUDA_SAFE_CALL(cudaMemset(d_count, 0, sizeof(int)));
count_ones<<<(m-1)/256+1, 256>>>(i, small_sets, d_count);
CudaTest("solving count_ones `failed");
CUDA_SAFE_CALL(cudaMemcpy(&a, d_count, sizeof(int), cudaMemcpyDeviceToHost));
CUDA_SAFE_CALL(cudaMemset(d_count, 0, sizeof(int)));
count_ones<<<(m-1)/256+1, 256>>>(i, large_sets, d_count);
CUDA_SAFE_CALL(cudaMemcpy(&b, d_count, sizeof(int), cudaMemcpyDeviceToHost));
unsigned support = a < b ? a : b;
if (support >= threshold) {
init_support_map[i] = 1;
num_freq_patterns ++;
} else init_support_map[i] = 0;
}
return num_freq_patterns;
}
//int support_count(unsigned m, unsigned npatterns, unsigned threshold, SetType *small_sets, SetType *middle_sets, SetType *large_sets, bool *support_map) {
int support_count(unsigned m, unsigned npatterns, unsigned threshold, Bitsets small_sets, Bitsets middle_sets, Bitsets large_sets, bool *support_map) {
int num_freq_patterns = 0;
for (int i = 0; i < npatterns; i++) {
int a, b, c, *d_count;
CUDA_SAFE_CALL(cudaMalloc((void **)&d_count, sizeof(int)));
CUDA_SAFE_CALL(cudaMemset(d_count, 0, sizeof(int)));
count_ones<<<(m-1)/256+1, 256>>>(i, small_sets, d_count);
CUDA_SAFE_CALL(cudaMemcpy(&a, d_count, sizeof(int), cudaMemcpyDeviceToHost));
CUDA_SAFE_CALL(cudaMemset(d_count, 0, sizeof(int)));
count_ones<<<(m-1)/256+1, 256>>>(i, large_sets, d_count);
CUDA_SAFE_CALL(cudaMemcpy(&b, d_count, sizeof(int), cudaMemcpyDeviceToHost));
CUDA_SAFE_CALL(cudaMemset(d_count, 0, sizeof(int)));
count_ones<<<(m-1)/256+1, 256>>>(i, middle_sets, d_count);
CUDA_SAFE_CALL(cudaMemcpy(&c, d_count, sizeof(int), cudaMemcpyDeviceToHost));
unsigned small = a < b ? a : b;
unsigned support = small < c ? small : c;
if (support >= threshold) {
support_map[i] = 1;
num_freq_patterns ++;
} else support_map[i] = 0;
}
return num_freq_patterns;
}
__global__ void init_filter_check(unsigned m, unsigned *pids, bool *init_support_map, IndexT *is_frequent_emb) {
unsigned pos = blockIdx.x * blockDim.x + threadIdx.x;
if(pos < m) {
unsigned pid = pids[pos];
bool is_frequent = init_support_map[pid];
if (is_frequent) is_frequent_emb[pos] = 1;
}
}
__global__ void copy_vids(unsigned m, EmbeddingList emb_list, IndexT *vid_list0, IndexT *vid_list1) {
unsigned pos = blockIdx.x * blockDim.x + threadIdx.x;
if(pos < m) {
vid_list0[pos] = emb_list.get_idx(1, pos);
vid_list1[pos] = emb_list.get_vid(1, pos);
}
}
__global__ void init_filter(unsigned m, EmbeddingList emb_list, IndexT *vid_list0, IndexT *vid_list1, IndexT *indices, IndexT *is_frequent_emb) {
unsigned pos = blockIdx.x * blockDim.x + threadIdx.x;
if(pos < m) {
if (is_frequent_emb[pos]) {
IndexT src = vid_list0[pos];
IndexT dst = vid_list1[pos];
unsigned start = indices[pos];
emb_list.set_vid(1, start, dst);
emb_list.set_idx(1, start, src);
}
}
}
__global__ void aggregate_check(unsigned num_emb, unsigned level, CSRGraph graph, EmbeddingList emb_list, unsigned *pids, int nlabels, unsigned threshold, unsigned *ne) {
unsigned tid = threadIdx.x;
unsigned pos = blockIdx.x * blockDim.x + threadIdx.x;
__shared__ IndexT vids[BLOCK_SIZE][MAX_SIZE];
__shared__ history_type his[BLOCK_SIZE][MAX_SIZE];
if(pos < num_emb) {
emb_list.get_edge_embedding(level, pos, vids[tid], his[tid]);
unsigned n = level+1;
assert(n < 4);
IndexT first = vids[tid][0];
IndexT second = vids[tid][1];
IndexT third = vids[tid][2];
node_data_type l0 = graph.getData(first);
node_data_type l1 = graph.getData(second);
node_data_type l2 = graph.getData(third);
history_type h2 = his[tid][2];
unsigned pid = 0;
if (n == 3) {
if (h2 == 0) {
if (l1 < l2) {
pid = get_pattern_id(l0, l2, l1, nlabels);
} else {
pid = get_pattern_id(l0, l1, l2, nlabels);
}
} else {
assert(h2 == 1);
if (l0 < l2) {
pid = get_pattern_id(l1, l2, l0, nlabels);
} else {
pid = get_pattern_id(l1, l0, l2, nlabels);
}
}
} else {
}
pids[pos] = pid;
atomicAdd(&ne[pid], 1);
}
}
__global__ void find_candidate_patterns(unsigned num_patterns, unsigned *ne, unsigned minsup, unsigned *id_map, unsigned *num_new_patterns) {
unsigned pos = blockIdx.x * blockDim.x + threadIdx.x;
if (pos < num_patterns) {
if (ne[pos] >= minsup) {
unsigned new_id = atomicAdd(num_new_patterns, 1);
id_map[pos] = new_id;
}
}
}
__global__ void aggregate(unsigned m, unsigned num_emb, unsigned level, CSRGraph graph, EmbeddingList emb_list, unsigned *pids, unsigned *ne, unsigned *id_map, int nlabels, unsigned threshold, Bitsets small_sets, Bitsets middle_sets, Bitsets large_sets) {
unsigned tid = threadIdx.x;
unsigned pos = blockIdx.x * blockDim.x + threadIdx.x;
__shared__ IndexT vids[BLOCK_SIZE][MAX_SIZE];
__shared__ history_type his[BLOCK_SIZE][MAX_SIZE];
if(pos < num_emb) {
emb_list.get_edge_embedding(level, pos, vids[tid], his[tid]);
unsigned n = level+1;
assert(n == 3);
IndexT first = vids[tid][0];
IndexT second = vids[tid][1];
IndexT third = vids[tid][2];
node_data_type l0 = graph.getData(first);
node_data_type l1 = graph.getData(second);
node_data_type l2 = graph.getData(third);
history_type h2 = his[tid][2];
IndexT small, middle, large;
unsigned pid = pids[pos];
if (ne[pid] >= threshold) {
pid = id_map[pid];
if (h2 == 0) {
middle = first;
if (l1 < l2) {
small = second;
large = third;
} else {
small = third;
large = second;
}
small_sets.set(pid, small);
middle_sets.set(pid, middle);
large_sets.set(pid, large);
if (l1 == l2) {
small_sets.set(pid, large);
large_sets.set(pid, small);
}
} else {
assert(h2 == 1);
middle = second;
if (l0 < l2) {
small = first;
large = third;
} else {
small = third;
large = first;
}
small_sets.set(pid, small);
middle_sets.set(pid, middle);
large_sets.set(pid, large);
if (l0 == l2) {
small_sets.set(pid, large);
large_sets.set(pid, small);
}
}
}
}
}
void parallel_prefix_sum(int n, IndexT *in, IndexT *out) {
IndexT total = 0;
for (size_t i = 0; i < n; i++) {
out[i] = total;
total += in[i];
}
out[n] = total;
}
void fsm_gpu_solver(std::string fname, unsigned k, unsigned minsup, AccType &total_num) {
CSRGraph graph_cpu, graph_gpu;
int nlabels = graph_cpu.read(fname); // read graph into CPU memoryA
int m = graph_cpu.get_nnodes();
int nnz = graph_cpu.get_nedges();
graph_cpu.copy_to_gpu(graph_gpu); // copy graph to GPU memory
EmbeddingList emb_list;
emb_list.init(nnz, k+1, false);
emb_list.init_cpu(&graph_cpu);
int nthreads = BLOCK_SIZE;
int nblocks = DIVIDE_INTO(nnz, nthreads);
int num_init_patterns = (nlabels+1)*(nlabels+1);
std::cout << "Number of init patterns: " << num_init_patterns << std::endl;
unsigned num_emb = emb_list.size();
std::cout << "number of single-edge embeddings: " << num_emb << "\n";
unsigned *pids;
CUDA_SAFE_CALL(cudaMalloc((void **)&pids, sizeof(unsigned)*num_emb));
bool *h_init_support_map = (bool *)malloc(sizeof(bool) * num_init_patterns);
bool *d_init_support_map;
CUDA_SAFE_CALL(cudaMalloc((void **)&d_init_support_map, sizeof(bool)*num_init_patterns));
IndexT *is_frequent_emb;
CUDA_SAFE_CALL(cudaMalloc((void **)&is_frequent_emb, sizeof(IndexT)*(num_emb+1)));
CUDA_SAFE_CALL(cudaMemset(is_frequent_emb, 0, sizeof(IndexT)*(num_emb+1)));
IndexT *vid_list0, *vid_list1;
CUDA_SAFE_CALL(cudaMalloc((void **)&vid_list0, sizeof(IndexT)*num_emb));
CUDA_SAFE_CALL(cudaMalloc((void **)&vid_list1, sizeof(IndexT)*num_emb));
Bitsets small_sets, large_sets, middle_sets;
small_sets.alloc(MAX_NUM_PATTERNS, m);
large_sets.alloc(MAX_NUM_PATTERNS, m);
middle_sets.alloc(MAX_NUM_PATTERNS, m);
small_sets.set_size(num_init_patterns, m);
large_sets.set_size(num_init_patterns, m);
middle_sets.set_size(num_init_patterns, m);
IndexT *num_new_emb, *indices;
CUDA_SAFE_CALL(cudaMalloc((void **)&indices, sizeof(IndexT) * (num_emb+1)));
CUDA_SAFE_CALL(cudaDeviceSynchronize());
nblocks = (num_emb-1)/nthreads+1;
unsigned *d_num_new_patterns;
unsigned h_num_new_patterns = 0;
CUDA_SAFE_CALL(cudaMalloc((void **)&d_num_new_patterns, sizeof(unsigned)));
printf("Launching CUDA TC solver (%d CTAs, %d threads/CTA) ...\n", nblocks, nthreads);
Timer t;
t.Start();
unsigned level = 1;
init_aggregate<<<nblocks, nthreads>>>(m, num_emb, graph_gpu, emb_list, pids, nlabels, minsup, small_sets, large_sets);
CudaTest("solving init_aggregate `failed");
std::cout << "Init_aggregate Done\n";
int num_freq_patterns = init_support_count(m, num_init_patterns, minsup, small_sets, large_sets, h_init_support_map);
total_num += num_freq_patterns;
if (num_freq_patterns == 0) {
std::cout << "No frequent pattern found\n\n";
return;
}
std::cout << "Number of frequent single-edge patterns: " << num_freq_patterns << "\n";
CUDA_SAFE_CALL(cudaMemcpy(d_init_support_map, h_init_support_map, sizeof(bool) * num_init_patterns, cudaMemcpyHostToDevice));
init_filter_check<<<nblocks, nthreads>>>(num_emb, pids, d_init_support_map, is_frequent_emb);
CudaTest("solving init_filter_check `failed");
thrust::exclusive_scan(thrust::device, is_frequent_emb, is_frequent_emb+num_emb+1, indices);
IndexT new_size;
CUDA_SAFE_CALL(cudaMemcpy(&new_size, &indices[num_emb], sizeof(IndexT), cudaMemcpyDeviceToHost));
std::cout << "number of embeddings after pruning: " << new_size << "\n";
copy_vids<<<nblocks, nthreads>>>(num_emb, emb_list, vid_list0, vid_list1);
CudaTest("solving copy_vids `failed");
init_filter<<<nblocks, nthreads>>>(num_emb, emb_list, vid_list0, vid_list1, indices, is_frequent_emb);
CudaTest("solving init_filter `failed");
CUDA_SAFE_CALL(cudaFree(indices));
CUDA_SAFE_CALL(cudaFree(is_frequent_emb));
CUDA_SAFE_CALL(cudaFree(pids));
//small_sets.clean();
//large_sets.clean();
small_sets.clear();
large_sets.clear();
CUDA_SAFE_CALL(cudaFree(vid_list0));
CUDA_SAFE_CALL(cudaFree(vid_list1));
CUDA_SAFE_CALL(cudaFree(d_init_support_map));
emb_list.remove_tail(new_size);
while (1) {
num_emb = emb_list.size();
std::cout << "number of embeddings in level " << level << ": " << num_emb << "\n";
CUDA_SAFE_CALL(cudaMalloc((void **)&num_new_emb, sizeof(IndexT) * (num_emb+1)));
CUDA_SAFE_CALL(cudaMalloc((void **)&indices, sizeof(IndexT) * (num_emb+1)));
std::cout << "Done allocating memory for embeddings in level " << level << "\n";
nblocks = (num_emb-1)/nthreads+1;
extend_alloc<<<nblocks, nthreads>>>(num_emb, level, graph_gpu, emb_list, num_new_emb);
CudaTest("solving extend_alloc failed");
thrust::exclusive_scan(thrust::device, num_new_emb, num_new_emb+num_emb+1, indices);
CudaTest("Scan failed");
CUDA_SAFE_CALL(cudaMemcpy(&new_size, &indices[num_emb], sizeof(IndexT), cudaMemcpyDeviceToHost));
std::cout << "number of new embeddings: " << new_size << "\n";
emb_list.add_level(new_size);
extend_insert<<<nblocks, nthreads>>>(num_emb, level, graph_gpu, emb_list, indices);
CudaTest("solving extend_insert failed");
std::cout << "Extend_insert Done\n";
num_emb = emb_list.size();
CUDA_SAFE_CALL(cudaFree(num_new_emb));
CUDA_SAFE_CALL(cudaFree(indices));
level ++;
int num_patterns = nlabels*num_init_patterns;
nblocks = (num_emb-1)/nthreads+1;
std::cout << "Number of patterns in level " << level << ": " << num_patterns << std::endl;
std::cout << "number of embeddings in level " << level << ": " << num_emb << "\n";
unsigned *ne, *id_map;
CUDA_SAFE_CALL(cudaMalloc((void **)&ne, sizeof(unsigned)*num_patterns));
CUDA_SAFE_CALL(cudaMalloc((void **)&id_map, sizeof(unsigned)*num_patterns));
CUDA_SAFE_CALL(cudaMemset(ne, 0, sizeof(unsigned)*num_patterns));
CUDA_SAFE_CALL(cudaMalloc((void **)&pids, sizeof(unsigned)*num_emb));
std::cout << "Done allocating memory for aggregation in level " << level << "\n";
aggregate_check<<<nblocks, nthreads>>>(num_emb, level, graph_gpu, emb_list, pids, nlabels, minsup, ne);
CudaTest("solving aggregate_check failed");
CUDA_SAFE_CALL(cudaMemset(d_num_new_patterns, 0, sizeof(unsigned)));
find_candidate_patterns<<<(num_patterns-1)/nthreads+1, nthreads>>>(num_patterns, ne, minsup, id_map, d_num_new_patterns);
CudaTest("solving find_candidate_patterns failed");
CUDA_SAFE_CALL(cudaMemcpy(&h_num_new_patterns, d_num_new_patterns, sizeof(unsigned), cudaMemcpyDeviceToHost));
std::cout << "Number of candidate patterns in level " << level << ": " << h_num_new_patterns << std::endl;
//small_sets.alloc(h_num_new_patterns, m);
//large_sets.alloc(h_num_new_patterns, m);
//middle_sets.alloc(h_num_new_patterns, m);
small_sets.set_size(h_num_new_patterns, m);
large_sets.set_size(h_num_new_patterns, m);
middle_sets.set_size(h_num_new_patterns, m);
std::cout << "Done allocating sets\n";
aggregate<<<nblocks, nthreads>>>(m, num_emb, level, graph_gpu, emb_list, pids, ne, id_map, nlabels, minsup, small_sets, middle_sets, large_sets);
CudaTest("solving aggregate failed");
bool *h_support_map = (bool *)malloc(sizeof(bool) * h_num_new_patterns);
num_freq_patterns = support_count(m, h_num_new_patterns, minsup, small_sets, middle_sets, large_sets, h_support_map);
CudaTest("solving support_count failed");
CUDA_SAFE_CALL(cudaFree(ne));
CUDA_SAFE_CALL(cudaFree(id_map));
std::cout << "num_frequent_patterns: " << num_freq_patterns << "\n";
total_num += num_freq_patterns;
if (num_freq_patterns == 0) break;
if (level == k) break;
//filter<<<nblocks, nthreads>>>(level, emb_list);
}
CUDA_SAFE_CALL(cudaDeviceSynchronize());
t.Stop();
printf("\truntime = %f ms.\n", t.Millisecs());
}
|
7505aed385d8da305c658a782f43f68b9b499325.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
//#define N 100
__device__ int gpu_hist[10];
__global__ void gpuhistogram(int *a,int N)
{
int *ptr;
int tid=blockIdx.x*blockDim.x+threadIdx.x;
int numthr=blockDim.x*gridDim.x;
if(tid==0)
for(int i=0;i<10;i++)
gpu_hist[i]=0;
__syncthreads();
while(tid<N)
{
ptr=&gpu_hist[a[tid]];
atomicAdd(ptr,1);
tid+=numthr;
}
}
int main()
{
int B,T;
int *a;
int *deva;
int N;
int hist[10],cist[10];
for(int i=0;i<10;i++)
{cist[i]=0;hist[i]=0;}
printf("Enter the number of elements .\n");
scanf("%d",&N);
printf("Enter the number of Blocks and Threads .\n");
again:;
printf("Blocks:");
scanf("%d",&B);
printf("Threads:\n");
scanf("%d",&T);
if(B*T<N)
{printf("The number of blocks and threads is less please enter again.\n");
goto again;
}
hipEvent_t start,stop;
float cput,gput;
hipEventCreate(&start);
hipEventCreate(&stop);
int size=N*sizeof(int);
a=(int*)malloc(size);
srand(1);
for(int i=0;i<N;i++)
a[i]=rand()%10;
hipMalloc((void**)&deva,size);
hipMemcpy(deva,a,size,hipMemcpyHostToDevice);
hipEventRecord(start,0);
hipLaunchKernelGGL(( gpuhistogram), dim3(B),dim3(T), 0, 0, deva,N);
//hipDeviceSynchronize();
hipEventRecord(stop,0);
hipEventSynchronize(stop);
hipEventElapsedTime(&gput,start,stop);
hipMemcpyFromSymbol(&hist,"gpu_hist",sizeof(hist),0,hipMemcpyDeviceToHost);
printf("GPU execution completed.\n");
int l;
for (int i=0;i<10;i++)
{
cist[i]=0;
}
hipEventRecord(start,0);
for(int i=0;i<N;i++)
{
l=a[i];
cist[l]++;
}
hipEventRecord(stop,0);
hipEventSynchronize(stop);
hipEventElapsedTime(&cput,start,stop);
for(int i=0;i<10;i++)
{printf("Number of %d's = gpu: %d cpu: %d \n",i,hist[i],cist[i]);
}
free(a);
hipFree(deva);
printf("CPUtime= %f and GPUtime= %f.\n",cput,gput);
return 0;
/////////
}
| 7505aed385d8da305c658a782f43f68b9b499325.cu | #include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
//#define N 100
__device__ int gpu_hist[10];
__global__ void gpuhistogram(int *a,int N)
{
int *ptr;
int tid=blockIdx.x*blockDim.x+threadIdx.x;
int numthr=blockDim.x*gridDim.x;
if(tid==0)
for(int i=0;i<10;i++)
gpu_hist[i]=0;
__syncthreads();
while(tid<N)
{
ptr=&gpu_hist[a[tid]];
atomicAdd(ptr,1);
tid+=numthr;
}
}
int main()
{
int B,T;
int *a;
int *deva;
int N;
int hist[10],cist[10];
for(int i=0;i<10;i++)
{cist[i]=0;hist[i]=0;}
printf("Enter the number of elements .\n");
scanf("%d",&N);
printf("Enter the number of Blocks and Threads .\n");
again:;
printf("Blocks:");
scanf("%d",&B);
printf("Threads:\n");
scanf("%d",&T);
if(B*T<N)
{printf("The number of blocks and threads is less please enter again.\n");
goto again;
}
cudaEvent_t start,stop;
float cput,gput;
cudaEventCreate(&start);
cudaEventCreate(&stop);
int size=N*sizeof(int);
a=(int*)malloc(size);
srand(1);
for(int i=0;i<N;i++)
a[i]=rand()%10;
cudaMalloc((void**)&deva,size);
cudaMemcpy(deva,a,size,cudaMemcpyHostToDevice);
cudaEventRecord(start,0);
gpuhistogram<<<B,T>>>(deva,N);
//cudaThreadSynchronize();
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&gput,start,stop);
cudaMemcpyFromSymbol(&hist,"gpu_hist",sizeof(hist),0,cudaMemcpyDeviceToHost);
printf("GPU execution completed.\n");
int l;
for (int i=0;i<10;i++)
{
cist[i]=0;
}
cudaEventRecord(start,0);
for(int i=0;i<N;i++)
{
l=a[i];
cist[l]++;
}
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&cput,start,stop);
for(int i=0;i<10;i++)
{printf("Number of %d's = gpu: %d cpu: %d \n",i,hist[i],cist[i]);
}
free(a);
cudaFree(deva);
printf("CPUtime= %f and GPUtime= %f.\n",cput,gput);
return 0;
/////////
}
|
2a98ee2f0b0eedb9dd1e8428adab31506673690a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <float.h>
#include <ctime>
#define MAX_RANDOM 2147483647
#define NMAX 100000
#define DEBUG 1 //set level of debug visibility [0=>off,1=>min,2=>max]
#define NOISEOFF 0 //set to suppress noise in channel
#define N_ITERATION 4 //no. of turbo decoder iterations
#define N 16000
//#define NA 16000
#define permutationseed 2
#define M 4 //no. of trellis states
int X[N],count=0;
int permutation[N];
int from[M][2]; //from[m][i] = next state (from state m with databit = i)
int to[M][2]; //to[m][i] = previous state (to state m with databit = i)
int parity[M][2]; //parity bit associated with transition from state m
int term[M][2]; //term[m] = pair of data bits required to terminate trellis
void randomInterleaver(){
int interleaver[NMAX];
int check[NMAX]; // Already permuted positions
int i;
int position;
srandom(permutationseed);
for (i=0; i<N; i++)
check[i] = 0;
for (i=0; i<N; i++)
{
do
position = (int) ( ( (double)(random())/MAX_RANDOM ) * N );
while ( check[position] );
check[position] = 1;
interleaver[i] = position;
}
for (i=0; i<N; i++)
{
permutation[i]=interleaver[i];
X[i]=interleaver[i]%2;
// printf("%5d -> %5d\n",X[i],permutation[i]);
}
}
// Normally distributed number generator (ubiquitous Box-Muller method)
//
double normal(void)
{
double x, y, rr, randn;
do{
x = (double) 2*rand()/RAND_MAX - 1.0; //uniform in range [-1,1]
y = (double) 2*rand()/RAND_MAX - 1.0; //uniform in range [-1,1]
rr = x*x + y*y;
} while( rr >= 1 );
randn = x*sqrt((-2.0*log(rr))/rr);
return(randn);
}
// modified BCJR algorithm (MAP decoder)
//
__global__ void calgamma(double *d_gammae,double *d_gamma,int *d_parity,double *d_La,double *d_x_d,double *d_p_d,int Lc)
{
int i = blockIdx.x*400+threadIdx.x;
int j = blockIdx.y;
int k = blockIdx.z;
double xk_h;
double pk_h;
xk_h=k ? +1 : -1;
pk_h=d_parity[j*2+k] ? +1 : -1;
d_gamma[M*2*i+2*j+k]=exp(0.5*(d_La[i] * xk_h + Lc * d_x_d[i] * xk_h +
Lc * d_p_d[i] * pk_h));
d_gammae[M*2*i+2*j+k] = exp(0.5*(Lc * d_p_d[i] * pk_h));
}
__global__ void calExtLLR(double *d_gammae,double *d_alpha,double *d_beta,int *d_from,double *d_Le)
{
int k = blockIdx.x;
double pr1,pr0;
pr1=0;
pr0=0;
int m;
for(m = 0; m < 4; m++)
{
//we use gammae rather than gamma as we want the
//extrinsic component of the overall likelihood
pr1 += (d_alpha[k*M+m] * d_gammae[k*M*2+m*2+1] * d_beta[(k+1)*M+d_from[m*2+1]]);
pr0 += (d_alpha[k*M+m] * d_gammae[k*M*2+m*2+0] * d_beta[(k+1)*M+d_from[m*2+0]]);
}
d_Le[k] = log(pr1/ pr0); //extrinsic likelihood
}
/*__global__ void calBeta(double *d_gamma,){
}*/
__global__ void calAlpha_beta(double *d_gamma,double *d_alpha,int *d_to,double *d_beta, int is_term,int *d_from){
__shared__ double *dd_gamma;
__shared__ int *dd_to;
dd_gamma = d_gamma;
//dd_alpha = d_alpha;
dd_to = d_to;
double total;
int k=blockIdx.x*8;
int q=8200+blockIdx.y*8;
int a,m;
d_alpha[k+0] = 1;
for(m = 1; m < M; m++)
d_alpha[k+m] = 0;
for(a = 1; a <= 7; a++)
{
total = 0;
for(m = 0; m < M; m++)
{
d_alpha[a*k+4+m] = d_alpha[(k+(a-1)+4)+dd_to[m*2+0]] * dd_gamma[(k+(a-1)*4*2)+(dd_to[m*2+0]*2)+0] + d_alpha[(k+(a-1)+4)+d_to[m*2+1]] * dd_gamma[(k+(a-1)*4*2)+(dd_to[m*2+1]*2)+1];
total += d_alpha[a+k+4+m];
}
//normalise
for(m = 0; m < M; m++)
d_alpha[a+k+4+m] /= total;
}
d_alpha[q+0] = 1;
for(m = 1; m < M; m++)
d_alpha[q+m] = 0;
for(a = 1; a <= 7; a++)
{
total = 0;
for(m = 0; m < M; m++)
{
d_alpha[a*q+4+m] = d_alpha[(q+(a-1)+4)+dd_to[m*2+0]] * dd_gamma[(q+(a-1)*4*2)+(dd_to[m*2+0]*2)+0] + d_alpha[(q+(a-1)+4)+d_to[m*2+1]] * dd_gamma[(q+(a-1)*4*2)+(dd_to[m*2+1]*2)+1];
total += d_alpha[a+q+4+m];
}
//normalise
for(m = 0; m < M; m++)
d_alpha[a+q+4+m] /= total;
}
/* Beta calculation */
__shared__ int *dd_from;
dd_from = d_from;
//dd_beta-d_beta;
//int k=blockIdx.x*8;
int /*a,m,*/f1,f2;
if(is_term) //if trellis terminated
{
//we know for sure the final state is 0
d_beta[k+7+4+0] = 1;
for(m = 1; m < M; m++)
d_beta[k+7+4-m] = 0;
}
else //else trellis not terminated
{
//we haven't a clue which is final state
//so the best we can do is say they're all equally likely
for(m = 0; m < M; m++)
d_beta[k+7+4-m] = 1.0 / (double) M;
}
//iterate backwards through trellis
for(a = 6; a >= 0; a--)
{
total = 0;
for(m = 0; m < 4; m++)
{
f1=dd_from[m*2+0];
f2=dd_from[m*2+1];
d_beta[a+k+4-m] = d_beta[(a+1)+k+4+f1] * dd_gamma[k+a*4*2+m*2+0] +
d_beta[(a+1)+k+4+f2] * dd_gamma[k+a*4*2+m*2+1];
total += d_beta[a+k+4-m];
}
//normalise
for(m = 0; m < 4; m++)
d_beta[a+k+4-m] /= total;
}
if(is_term) //if trellis terminated
{
//we know for sure the final state is 0
d_beta[q+7+4+0] = 1;
for(m = 1; m < M; m++)
d_beta[q+7+4-m] = 0;
}
else //else trellis not terminated
{
//we haven't a clue which is final state
//so the best we can do is say they're all equally likely
for(m = 0; m < M; m++)
d_beta[q+7+4-m] = 1.0 / (double) M;
}
//iterate backwards through trellis
for(a = 6; a >= 0; a--)
{
total = 0;
for(m = 0; m < 4; m++)
{
f1=dd_from[m*2+0];
f2=dd_from[m*2+1];
d_beta[a+q+4-m] = d_beta[(a+1)+q+4+f1] * dd_gamma[q+a*4*2+m*2+0] +
d_beta[(a+1)+q+4+f2] * dd_gamma[q+a*4*2+m*2+1];
total += d_beta[a+q+4-m];
}
//normalise
for(m = 0; m < 4; m++)
d_beta[a+q+4-m] /= total;
}
}
void modified_bcjr
(
int is_term, //indicates if trellis terminated
double Lc, //Lc = 2/(sigma*sigma) = channel reliability
double La[N], //apriori likelihood of each info bit
double x_d[N], //noisy data
double p_d[N], //noisy parity
double Le[N] //extrinsic log likelihood
)
{
//int k, m, i;
//double xk_h, pk_h; //databit & parity associated with a branch
double gammae[N][M][2]; //gammas for extrinsic likelihoods
double gamma[N][M][2]; //gammas for total likelihoods
// double alpha[N+1][M]; //probability of entering branch via state m
double beta[N+1][M]; //probability of exiting branch via state m
// double total; //used for normalising alpha's and beta's
//calculate branch gamma's
double *d_gammae;
double *d_gamma;
int *d_parity;
double *d_La;
double *d_x_d;
double *d_p_d;
int *d_to;
int *d_from;
hipMalloc((void**)&d_gammae,N*M*2*sizeof(double));
hipMalloc((void**)&d_gamma,N*M*2*sizeof(double));
hipMalloc((void**)&d_parity,M*2*sizeof(int));
hipMalloc((void**)&d_La,N*sizeof(double));
hipMalloc((void**)&d_x_d,N*sizeof(double));
hipMalloc((void**)&d_p_d,N*sizeof(double));
hipMalloc((void**)&d_to,M*2*sizeof(int));
hipMalloc((void**)&d_from,M*2*sizeof(int));
hipMemcpy(d_to,to,M*2*sizeof(int),hipMemcpyHostToDevice);
hipMemcpy(d_from,from,M*2*sizeof(int),hipMemcpyHostToDevice);
hipMemcpy(d_parity,parity,M*2*sizeof(int),hipMemcpyHostToDevice);
hipMemcpy(d_x_d,x_d,N*sizeof(double),hipMemcpyHostToDevice);
hipMemcpy(d_La,La,N*sizeof(double),hipMemcpyHostToDevice);
hipMemcpy(d_p_d,p_d,N*sizeof(double),hipMemcpyHostToDevice);
dim3 grid(N/400,M,2);
hipLaunchKernelGGL(( calgamma), dim3(grid),dim3(400), 0, 0, d_gammae,d_gamma,d_parity,d_La,d_x_d,d_p_d,Lc);
hipMemcpy(gamma,d_gamma,M*N*2*sizeof(double),hipMemcpyDeviceToHost);
hipMemcpy(gammae,d_gammae,M*N*2*sizeof(double),hipMemcpyDeviceToHost);
//hipFree(d_gamma);
hipFree(d_parity);
hipFree(d_La);
hipFree(d_x_d);
hipFree(d_p_d);
// Calculate state alpha's
//
double *d_alpha;
double *d_beta;
hipMalloc((void**)&d_beta,(N+1)*M*sizeof(double));
hipMalloc((void**)&d_alpha,(N+1)*M*sizeof(double));
dim3 grid1(1024,1024);
hipLaunchKernelGGL(( calAlpha_beta), dim3(grid1),dim3(1), 0, 0, d_gamma,d_alpha,d_to,d_beta,is_term,d_from);
hipMemcpy(gamma,d_gamma,(N+1)*M*sizeof(double),hipMemcpyDeviceToHost);
hipMemcpy(beta,d_beta,(N+1)*M*sizeof(double),hipMemcpyDeviceToHost);
// Calculate state beta's
//
// Calculate extrinsic likelihood
//
//double *d_alpha;
//int *d_from;
double *d_Le;
//hipMalloc((void**)&d_alpha,(N+1)*M*sizeof(double));
//hipMalloc((void**)&d_from,M*2*sizeof(int));
hipMalloc((void**)&d_Le,N*sizeof(double));
//hipMemcpy(d_alpha,alpha,(N+1)*M*sizeof(double),hipMemcpyHostToDevice);
//hipMemcpy(d_beta,beta,(N+1)*M*sizeof(double),hipMemcpyHostToDevice);
hipMemcpy(d_from,from,M*2*sizeof(int),hipMemcpyHostToDevice);
hipLaunchKernelGGL(( calExtLLR), dim3(N),dim3(1), 0, 0, d_gammae,d_alpha,d_beta,d_from,d_Le);
hipMemcpy(Le,d_Le,N*sizeof(double),hipMemcpyDeviceToHost);
hipFree(d_gammae);
hipFree(d_alpha);
hipFree(d_beta);
hipFree(d_Le);
#if DEBUG > 1
for(k = 0; k < N; k++)
{
for(m = 0; m < M; m++)
{
for(i = 0; i < 2; i++)
{
printf("gamma[%i][%i][%i] = %f\t", k, m, i, gamma[k][m][i]);
printf("gammae[%i][%i][%i] = %f\n", k, m, i, gammae[k][m][i]);
}
}
printf("\n");
}
for(k = 0; k <= N; k++)
{
for(m = 0; m < M; m++)
printf("alpha[%i][%i] = %f\n", k, m, alpha[k][m]);
printf("\n");
}
for(k = 0; k <= N; k++)
{
for(m = 0; m < M; m++)
printf("beta[%i][%i] = %f\n", k, m, beta[k][m]);
printf("\n");
}
#endif
}
//
// +--------------------------> Xk
// | fb
// | +---------(+)-------+
// | | | |
// Xk--+-(+)-+->[D]----->[D]--+
// | |
// +--------------(+)---> Pk
//
//
void gen_tab(void)
{
int m, i, b0, b1, fb, state;
//generate tables for 4 state RSC encoder
for(m = 0; m < M; m++) //for each starting state
for(i = 0; i < 2; i++) //for each possible databit
{
b0 = (m >> 0) & 1; //bit 0 of state
b1 = (m >> 1) & 1; //bit 1 of state
//parity from state m with databit i
parity[m][i] = b0 ^ i;
//from[m][i] = next state from state m with databit i
from[m][i] = (b0<<1) + (i ^ b0 ^ b1);
}
//to[m][i] = previous state to state m with databit i
for(m = 0; m < M; m++)
for(i = 0; i < 2; i++)
to[from[m][i]][i] = m;
for(m = 0; m < M; m++) //for each state
{
state = m;
b0 = (state >> 0) & 1; //bit 0 of state
b1 = (state >> 1) & 1; //bit 1 of state
fb = b0 ^ b1; //feedback bit
term[m][0] = fb; //will set X[N-2] = fb
state = from[m][fb]; //advance from state m with databit=fb
b0 = (state >> 0) & 1; //bit 0 of state
b1 = (state >> 1) & 1; //bit 1 of state
fb = b0 ^ b1; //feedback bit
term[m][1] = fb; //will set X[N-1] = fb
}
}
//
// +-----------> Xk
// |
// |
// |
// Xk---+--[E1]-----> P1k
// |
// [P]
// |
// +--[E2]-----> P2k
//
//
void turbo_encode
(
int X[N], //block of N-2 information bits + 2 to_be_decided bits
int P1[N], //encoder #1 parity bits
int P2[N] //encoder #2 parity bits
)
{
int k; //trellis stage
int state; //encoder state
int X_p[N]; //X_permuted = permuted bits
//encoder #1
state = 0; //encoder always starts in state 0
for(k = 0; k < N-2; k++)
{
P1[k] = parity[state][X[k]];
state = from[state][X[k]];
//printf("s[%i] = %i\n", k, state);
}
//terminate encoder #1 trellis to state 0
X[N-2] = term[state][0]; //databit to feed a 0 into delay line
X[N-1] = term[state][1]; //databit to feed another 0 into delay line
P1[N-2] = parity[state][X[N-2]]; //parity from state with databitX[N-2]
state = from[state][X[N-2]]; //next state from current state
P1[N-1] = parity[state][X[N-1]]; //parity from state with databit=X[N-1]
state = from[state][X[N-1]]; //next state from current state
if(state != 0)
{
//should never get here
printf("Error: Could not terminate encoder #1 trellis\n");
exit(1);
}
//permute tx databits for encoder #2
for(k = 0; k < N; k++)
X_p[k] = X[permutation[k]];
//encoder #2
state = 0; //encoder always starts in state 0
for(k = 0; k < N; k++)
{
P2[k] = parity[state][X_p[k]]; //parity from state with databit=X_p[k]
state = from[state][X_p[k]]; //next state from current state
}
//for(k = 0; k < N; k++)
// printf("%i %i %i %i\n", X[k], P1[k], X_p[k], P2[k]);
}
void turbo_decode(
int NA,
double sigma, //channel noise standard deviation
double x_d[N], //x_dash = noisy data symbol
double p1_d[N], //p1_dash = noisy parity#1 symbol
double p2_d[N], //p2_dash = noisy parity#2 symbol
double L_h[N], //L_hat = likelihood of databit given entire observation
int X_h[]
)
{
int i, k;
double Le1[N]; //decoder #1 extrinsic likelihood
double Le1_p[N]; //decoder #1 extrinsic likelihood permuted
double Le2[N]; //decoder #2 extrinsic likelihood
double Le2_ip[N]; //decoder #2 extrinsic likelihood inverse permuted
double Lc; //channel reliability value
Lc = 2.0 / (sigma*sigma); //requires sigma to be non-trivial
//zero apriori information into very first iteration of BCJR
for(k = 0; k < N; k++)
Le2_ip[k] = 0;
for(i = 0; i < N_ITERATION; i++)
{
modified_bcjr(1, Lc, Le2_ip, x_d, p1_d, Le1);
//permute decoder#1 likelihoods to match decoder#2 order
for(k = 0; k < N; k++)
Le1_p[k] = Le1[permutation[k]];
modified_bcjr(0, Lc, Le1_p, x_d, p2_d, Le2);
//inverse permute decoder#2 likelihoods to match decoder#1 order
for(k = 0; k < N; k++)
Le2_ip[permutation[k]] = Le2[k];
#if DEBUG > 1
for(k = 0; k < N; k++)
{
printf("Le1[%i] = %f\t", k, Le1[k]);
printf("Le2_ip[%i] = %f\t", k, Le2_ip[k]);
//printf("Lc*x_d[%i] = %f", k, Lc*x_d[k]);
printf("\n");
}
printf("\n");
#endif
}
//calculate overall likelihoods and then slice'em
for(k = 0; k < N; k++)
{
L_h[k] = Lc*x_d[k] + Le1[k] + Le2_ip[k]; //soft decision
X_h[count] = (L_h[k] > 0.0) ? 1 : 0;
count++; //hard decision
}
}
/*
gcc turbo_example.c -lm -o t; t
*/
int main(void)
{
int NA;
printf("Enter the NA value");
scanf("%d",&NA);
float snr;
int snrdb;
double noise;
int k,i; //databit index (trellis stage)
int signal_power=1;
double sigma;
int P1[N]; //encoder #1 parity bits
int P2[N]; //encoder #2 parity bits
double x[N]; //databit mapped to symbol
double p1[N]; //encoder #1 parity bit mapped to symbol
double p2[N]; //encoder #2 parity bit mapped to symbol
double x_d[N]; //x_dash = noisy data symbol
double p1_d[N]; //p1_dash = noisy parity#1 symbol
double p2_d[N]; //p2_dash = noisy parity#2 symbol
double L_h[N]; //L_hat = likelihood of databit given entire observation
//int X_h[N]; //X_hat = sliced MAP decisions
double elapsed;
clock_t t1, t2;
double time_count;
int input[NA],X_h[NA];
FILE *fp= fopen("time_gpu.dat","a+");
for(i=0; i< NA; i++){
X_h[i]=0;
}
/*printf("Enter the SNR value in db");
scanf("%d",&snrdb);*/
snrdb=5;
snr= pow(10,(float)snrdb/10);
noise = (float)signal_power/snr;
printf("signal power is %d \n",signal_power);
for(i=0;i < (NA/N);i++){
randomInterleaver();
for(k=0;k<N;k++){
input[k+i*N]= X[k];
}
srand(1); //init random number generator
gen_tab(); //generate trellis tables
sigma = sqrt(noise); //noise std deviation
/********************************
* ENCODER *
********************************/
turbo_encode(X, P1, P2);
//map bits to symbols
for(k = 0; k < N; k++) //for entire block length
{
x[k] = X[k] ? +1 : -1; //map databit to symbol
p1[k] = P1[k] ? +1 : -1; //map parity #1 to symbol
p2[k] = P2[k] ? +1 : -1; //map parity #2 to symbol
}
/********************************
* CHANNEL *
********************************/
//add some AWGN
for(k = 0; k < N; k++)
{
#if NOISEOFF
x_d[k] = x[k];
p1_d[k] = p1[k];
p2_d[k] = p2[k];
#else
x_d[k] = x[k] + sigma*normal();
p1_d[k] = p1[k] + sigma*normal();
p2_d[k] = p2[k] + sigma*normal();
#endif
}
#if DEBUG > 1
for(k = 0; k < N; k++)
printf("%f \t%f \t%f\n", x_d[k], p1_d[k], p2_d[k]);
#endif
/********************************
* DECODER *
********************************/
t1 = clock();
turbo_decode(NA,sigma, x_d, p1_d, p2_d, L_h, X_h);
t2=clock();
time_count=time_count+((double)t2-(double)t1);
}
/*printf("\n\n****INPUT****\n\n");
for(i=0;i < NA;i++){
printf("%d",input[i]);
}*/
/*printf("\n\n****OUTPUT****\n\n");
printf("X_h = ");
for(k = 0; k < NA; k++)
printf("%i", X_h[k]);
printf("\n");*/
int count=0;
//float ber;
for(k=0; k < NA; k++) {
if(X_h[k] != input[k])
count++;
}
//ber=(float)count/NA;
//printf("BER is %f",ber);
//printf("count is %d",count);
elapsed = time_count / CLOCKS_PER_SEC * 1000;
fprintf(fp,"%d %lf",NA, elapsed);
fprintf(fp,"\n");
fclose(fp);
printf("\nTime elapsed =%lf ms\n",elapsed);
return 0;
}
| 2a98ee2f0b0eedb9dd1e8428adab31506673690a.cu | #include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <float.h>
#include <ctime>
#define MAX_RANDOM 2147483647
#define NMAX 100000
#define DEBUG 1 //set level of debug visibility [0=>off,1=>min,2=>max]
#define NOISEOFF 0 //set to suppress noise in channel
#define N_ITERATION 4 //no. of turbo decoder iterations
#define N 16000
//#define NA 16000
#define permutationseed 2
#define M 4 //no. of trellis states
int X[N],count=0;
int permutation[N];
int from[M][2]; //from[m][i] = next state (from state m with databit = i)
int to[M][2]; //to[m][i] = previous state (to state m with databit = i)
int parity[M][2]; //parity bit associated with transition from state m
int term[M][2]; //term[m] = pair of data bits required to terminate trellis
void randomInterleaver(){
int interleaver[NMAX];
int check[NMAX]; // Already permuted positions
int i;
int position;
srandom(permutationseed);
for (i=0; i<N; i++)
check[i] = 0;
for (i=0; i<N; i++)
{
do
position = (int) ( ( (double)(random())/MAX_RANDOM ) * N );
while ( check[position] );
check[position] = 1;
interleaver[i] = position;
}
for (i=0; i<N; i++)
{
permutation[i]=interleaver[i];
X[i]=interleaver[i]%2;
// printf("%5d -> %5d\n",X[i],permutation[i]);
}
}
// Normally distributed number generator (ubiquitous Box-Muller method)
//
double normal(void)
{
double x, y, rr, randn;
do{
x = (double) 2*rand()/RAND_MAX - 1.0; //uniform in range [-1,1]
y = (double) 2*rand()/RAND_MAX - 1.0; //uniform in range [-1,1]
rr = x*x + y*y;
} while( rr >= 1 );
randn = x*sqrt((-2.0*log(rr))/rr);
return(randn);
}
// modified BCJR algorithm (MAP decoder)
//
__global__ void calgamma(double *d_gammae,double *d_gamma,int *d_parity,double *d_La,double *d_x_d,double *d_p_d,int Lc)
{
int i = blockIdx.x*400+threadIdx.x;
int j = blockIdx.y;
int k = blockIdx.z;
double xk_h;
double pk_h;
xk_h=k ? +1 : -1;
pk_h=d_parity[j*2+k] ? +1 : -1;
d_gamma[M*2*i+2*j+k]=exp(0.5*(d_La[i] * xk_h + Lc * d_x_d[i] * xk_h +
Lc * d_p_d[i] * pk_h));
d_gammae[M*2*i+2*j+k] = exp(0.5*(Lc * d_p_d[i] * pk_h));
}
__global__ void calExtLLR(double *d_gammae,double *d_alpha,double *d_beta,int *d_from,double *d_Le)
{
int k = blockIdx.x;
double pr1,pr0;
pr1=0;
pr0=0;
int m;
for(m = 0; m < 4; m++)
{
//we use gammae rather than gamma as we want the
//extrinsic component of the overall likelihood
pr1 += (d_alpha[k*M+m] * d_gammae[k*M*2+m*2+1] * d_beta[(k+1)*M+d_from[m*2+1]]);
pr0 += (d_alpha[k*M+m] * d_gammae[k*M*2+m*2+0] * d_beta[(k+1)*M+d_from[m*2+0]]);
}
d_Le[k] = log(pr1/ pr0); //extrinsic likelihood
}
/*__global__ void calBeta(double *d_gamma,){
}*/
__global__ void calAlpha_beta(double *d_gamma,double *d_alpha,int *d_to,double *d_beta, int is_term,int *d_from){
__shared__ double *dd_gamma;
__shared__ int *dd_to;
dd_gamma = d_gamma;
//dd_alpha = d_alpha;
dd_to = d_to;
double total;
int k=blockIdx.x*8;
int q=8200+blockIdx.y*8;
int a,m;
d_alpha[k+0] = 1;
for(m = 1; m < M; m++)
d_alpha[k+m] = 0;
for(a = 1; a <= 7; a++)
{
total = 0;
for(m = 0; m < M; m++)
{
d_alpha[a*k+4+m] = d_alpha[(k+(a-1)+4)+dd_to[m*2+0]] * dd_gamma[(k+(a-1)*4*2)+(dd_to[m*2+0]*2)+0] + d_alpha[(k+(a-1)+4)+d_to[m*2+1]] * dd_gamma[(k+(a-1)*4*2)+(dd_to[m*2+1]*2)+1];
total += d_alpha[a+k+4+m];
}
//normalise
for(m = 0; m < M; m++)
d_alpha[a+k+4+m] /= total;
}
d_alpha[q+0] = 1;
for(m = 1; m < M; m++)
d_alpha[q+m] = 0;
for(a = 1; a <= 7; a++)
{
total = 0;
for(m = 0; m < M; m++)
{
d_alpha[a*q+4+m] = d_alpha[(q+(a-1)+4)+dd_to[m*2+0]] * dd_gamma[(q+(a-1)*4*2)+(dd_to[m*2+0]*2)+0] + d_alpha[(q+(a-1)+4)+d_to[m*2+1]] * dd_gamma[(q+(a-1)*4*2)+(dd_to[m*2+1]*2)+1];
total += d_alpha[a+q+4+m];
}
//normalise
for(m = 0; m < M; m++)
d_alpha[a+q+4+m] /= total;
}
/* Beta calculation */
__shared__ int *dd_from;
dd_from = d_from;
//dd_beta-d_beta;
//int k=blockIdx.x*8;
int /*a,m,*/f1,f2;
if(is_term) //if trellis terminated
{
//we know for sure the final state is 0
d_beta[k+7+4+0] = 1;
for(m = 1; m < M; m++)
d_beta[k+7+4-m] = 0;
}
else //else trellis not terminated
{
//we haven't a clue which is final state
//so the best we can do is say they're all equally likely
for(m = 0; m < M; m++)
d_beta[k+7+4-m] = 1.0 / (double) M;
}
//iterate backwards through trellis
for(a = 6; a >= 0; a--)
{
total = 0;
for(m = 0; m < 4; m++)
{
f1=dd_from[m*2+0];
f2=dd_from[m*2+1];
d_beta[a+k+4-m] = d_beta[(a+1)+k+4+f1] * dd_gamma[k+a*4*2+m*2+0] +
d_beta[(a+1)+k+4+f2] * dd_gamma[k+a*4*2+m*2+1];
total += d_beta[a+k+4-m];
}
//normalise
for(m = 0; m < 4; m++)
d_beta[a+k+4-m] /= total;
}
if(is_term) //if trellis terminated
{
//we know for sure the final state is 0
d_beta[q+7+4+0] = 1;
for(m = 1; m < M; m++)
d_beta[q+7+4-m] = 0;
}
else //else trellis not terminated
{
//we haven't a clue which is final state
//so the best we can do is say they're all equally likely
for(m = 0; m < M; m++)
d_beta[q+7+4-m] = 1.0 / (double) M;
}
//iterate backwards through trellis
for(a = 6; a >= 0; a--)
{
total = 0;
for(m = 0; m < 4; m++)
{
f1=dd_from[m*2+0];
f2=dd_from[m*2+1];
d_beta[a+q+4-m] = d_beta[(a+1)+q+4+f1] * dd_gamma[q+a*4*2+m*2+0] +
d_beta[(a+1)+q+4+f2] * dd_gamma[q+a*4*2+m*2+1];
total += d_beta[a+q+4-m];
}
//normalise
for(m = 0; m < 4; m++)
d_beta[a+q+4-m] /= total;
}
}
void modified_bcjr
(
int is_term, //indicates if trellis terminated
double Lc, //Lc = 2/(sigma*sigma) = channel reliability
double La[N], //apriori likelihood of each info bit
double x_d[N], //noisy data
double p_d[N], //noisy parity
double Le[N] //extrinsic log likelihood
)
{
//int k, m, i;
//double xk_h, pk_h; //databit & parity associated with a branch
double gammae[N][M][2]; //gammas for extrinsic likelihoods
double gamma[N][M][2]; //gammas for total likelihoods
// double alpha[N+1][M]; //probability of entering branch via state m
double beta[N+1][M]; //probability of exiting branch via state m
// double total; //used for normalising alpha's and beta's
//calculate branch gamma's
double *d_gammae;
double *d_gamma;
int *d_parity;
double *d_La;
double *d_x_d;
double *d_p_d;
int *d_to;
int *d_from;
cudaMalloc((void**)&d_gammae,N*M*2*sizeof(double));
cudaMalloc((void**)&d_gamma,N*M*2*sizeof(double));
cudaMalloc((void**)&d_parity,M*2*sizeof(int));
cudaMalloc((void**)&d_La,N*sizeof(double));
cudaMalloc((void**)&d_x_d,N*sizeof(double));
cudaMalloc((void**)&d_p_d,N*sizeof(double));
cudaMalloc((void**)&d_to,M*2*sizeof(int));
cudaMalloc((void**)&d_from,M*2*sizeof(int));
cudaMemcpy(d_to,to,M*2*sizeof(int),cudaMemcpyHostToDevice);
cudaMemcpy(d_from,from,M*2*sizeof(int),cudaMemcpyHostToDevice);
cudaMemcpy(d_parity,parity,M*2*sizeof(int),cudaMemcpyHostToDevice);
cudaMemcpy(d_x_d,x_d,N*sizeof(double),cudaMemcpyHostToDevice);
cudaMemcpy(d_La,La,N*sizeof(double),cudaMemcpyHostToDevice);
cudaMemcpy(d_p_d,p_d,N*sizeof(double),cudaMemcpyHostToDevice);
dim3 grid(N/400,M,2);
calgamma<<<grid,400>>>(d_gammae,d_gamma,d_parity,d_La,d_x_d,d_p_d,Lc);
cudaMemcpy(gamma,d_gamma,M*N*2*sizeof(double),cudaMemcpyDeviceToHost);
cudaMemcpy(gammae,d_gammae,M*N*2*sizeof(double),cudaMemcpyDeviceToHost);
//cudaFree(d_gamma);
cudaFree(d_parity);
cudaFree(d_La);
cudaFree(d_x_d);
cudaFree(d_p_d);
// Calculate state alpha's
//
double *d_alpha;
double *d_beta;
cudaMalloc((void**)&d_beta,(N+1)*M*sizeof(double));
cudaMalloc((void**)&d_alpha,(N+1)*M*sizeof(double));
dim3 grid1(1024,1024);
calAlpha_beta<<<grid1,1>>>(d_gamma,d_alpha,d_to,d_beta,is_term,d_from);
cudaMemcpy(gamma,d_gamma,(N+1)*M*sizeof(double),cudaMemcpyDeviceToHost);
cudaMemcpy(beta,d_beta,(N+1)*M*sizeof(double),cudaMemcpyDeviceToHost);
// Calculate state beta's
//
// Calculate extrinsic likelihood
//
//double *d_alpha;
//int *d_from;
double *d_Le;
//cudaMalloc((void**)&d_alpha,(N+1)*M*sizeof(double));
//cudaMalloc((void**)&d_from,M*2*sizeof(int));
cudaMalloc((void**)&d_Le,N*sizeof(double));
//cudaMemcpy(d_alpha,alpha,(N+1)*M*sizeof(double),cudaMemcpyHostToDevice);
//cudaMemcpy(d_beta,beta,(N+1)*M*sizeof(double),cudaMemcpyHostToDevice);
cudaMemcpy(d_from,from,M*2*sizeof(int),cudaMemcpyHostToDevice);
calExtLLR<<<N,1>>>(d_gammae,d_alpha,d_beta,d_from,d_Le);
cudaMemcpy(Le,d_Le,N*sizeof(double),cudaMemcpyDeviceToHost);
cudaFree(d_gammae);
cudaFree(d_alpha);
cudaFree(d_beta);
cudaFree(d_Le);
#if DEBUG > 1
for(k = 0; k < N; k++)
{
for(m = 0; m < M; m++)
{
for(i = 0; i < 2; i++)
{
printf("gamma[%i][%i][%i] = %f\t", k, m, i, gamma[k][m][i]);
printf("gammae[%i][%i][%i] = %f\n", k, m, i, gammae[k][m][i]);
}
}
printf("\n");
}
for(k = 0; k <= N; k++)
{
for(m = 0; m < M; m++)
printf("alpha[%i][%i] = %f\n", k, m, alpha[k][m]);
printf("\n");
}
for(k = 0; k <= N; k++)
{
for(m = 0; m < M; m++)
printf("beta[%i][%i] = %f\n", k, m, beta[k][m]);
printf("\n");
}
#endif
}
//
// +--------------------------> Xk
// | fb
// | +---------(+)-------+
// | | | |
// Xk--+-(+)-+->[D]----->[D]--+
// | |
// +--------------(+)---> Pk
//
//
void gen_tab(void)
{
int m, i, b0, b1, fb, state;
//generate tables for 4 state RSC encoder
for(m = 0; m < M; m++) //for each starting state
for(i = 0; i < 2; i++) //for each possible databit
{
b0 = (m >> 0) & 1; //bit 0 of state
b1 = (m >> 1) & 1; //bit 1 of state
//parity from state m with databit i
parity[m][i] = b0 ^ i;
//from[m][i] = next state from state m with databit i
from[m][i] = (b0<<1) + (i ^ b0 ^ b1);
}
//to[m][i] = previous state to state m with databit i
for(m = 0; m < M; m++)
for(i = 0; i < 2; i++)
to[from[m][i]][i] = m;
for(m = 0; m < M; m++) //for each state
{
state = m;
b0 = (state >> 0) & 1; //bit 0 of state
b1 = (state >> 1) & 1; //bit 1 of state
fb = b0 ^ b1; //feedback bit
term[m][0] = fb; //will set X[N-2] = fb
state = from[m][fb]; //advance from state m with databit=fb
b0 = (state >> 0) & 1; //bit 0 of state
b1 = (state >> 1) & 1; //bit 1 of state
fb = b0 ^ b1; //feedback bit
term[m][1] = fb; //will set X[N-1] = fb
}
}
//
// +-----------> Xk
// |
// |
// |
// Xk---+--[E1]-----> P1k
// |
// [P]
// |
// +--[E2]-----> P2k
//
//
void turbo_encode
(
int X[N], //block of N-2 information bits + 2 to_be_decided bits
int P1[N], //encoder #1 parity bits
int P2[N] //encoder #2 parity bits
)
{
int k; //trellis stage
int state; //encoder state
int X_p[N]; //X_permuted = permuted bits
//encoder #1
state = 0; //encoder always starts in state 0
for(k = 0; k < N-2; k++)
{
P1[k] = parity[state][X[k]];
state = from[state][X[k]];
//printf("s[%i] = %i\n", k, state);
}
//terminate encoder #1 trellis to state 0
X[N-2] = term[state][0]; //databit to feed a 0 into delay line
X[N-1] = term[state][1]; //databit to feed another 0 into delay line
P1[N-2] = parity[state][X[N-2]]; //parity from state with databitX[N-2]
state = from[state][X[N-2]]; //next state from current state
P1[N-1] = parity[state][X[N-1]]; //parity from state with databit=X[N-1]
state = from[state][X[N-1]]; //next state from current state
if(state != 0)
{
//should never get here
printf("Error: Could not terminate encoder #1 trellis\n");
exit(1);
}
//permute tx databits for encoder #2
for(k = 0; k < N; k++)
X_p[k] = X[permutation[k]];
//encoder #2
state = 0; //encoder always starts in state 0
for(k = 0; k < N; k++)
{
P2[k] = parity[state][X_p[k]]; //parity from state with databit=X_p[k]
state = from[state][X_p[k]]; //next state from current state
}
//for(k = 0; k < N; k++)
// printf("%i %i %i %i\n", X[k], P1[k], X_p[k], P2[k]);
}
void turbo_decode(
int NA,
double sigma, //channel noise standard deviation
double x_d[N], //x_dash = noisy data symbol
double p1_d[N], //p1_dash = noisy parity#1 symbol
double p2_d[N], //p2_dash = noisy parity#2 symbol
double L_h[N], //L_hat = likelihood of databit given entire observation
int X_h[]
)
{
int i, k;
double Le1[N]; //decoder #1 extrinsic likelihood
double Le1_p[N]; //decoder #1 extrinsic likelihood permuted
double Le2[N]; //decoder #2 extrinsic likelihood
double Le2_ip[N]; //decoder #2 extrinsic likelihood inverse permuted
double Lc; //channel reliability value
Lc = 2.0 / (sigma*sigma); //requires sigma to be non-trivial
//zero apriori information into very first iteration of BCJR
for(k = 0; k < N; k++)
Le2_ip[k] = 0;
for(i = 0; i < N_ITERATION; i++)
{
modified_bcjr(1, Lc, Le2_ip, x_d, p1_d, Le1);
//permute decoder#1 likelihoods to match decoder#2 order
for(k = 0; k < N; k++)
Le1_p[k] = Le1[permutation[k]];
modified_bcjr(0, Lc, Le1_p, x_d, p2_d, Le2);
//inverse permute decoder#2 likelihoods to match decoder#1 order
for(k = 0; k < N; k++)
Le2_ip[permutation[k]] = Le2[k];
#if DEBUG > 1
for(k = 0; k < N; k++)
{
printf("Le1[%i] = %f\t", k, Le1[k]);
printf("Le2_ip[%i] = %f\t", k, Le2_ip[k]);
//printf("Lc*x_d[%i] = %f", k, Lc*x_d[k]);
printf("\n");
}
printf("\n");
#endif
}
//calculate overall likelihoods and then slice'em
for(k = 0; k < N; k++)
{
L_h[k] = Lc*x_d[k] + Le1[k] + Le2_ip[k]; //soft decision
X_h[count] = (L_h[k] > 0.0) ? 1 : 0;
count++; //hard decision
}
}
/*
gcc turbo_example.c -lm -o t; t
*/
int main(void)
{
int NA;
printf("Enter the NA value");
scanf("%d",&NA);
float snr;
int snrdb;
double noise;
int k,i; //databit index (trellis stage)
int signal_power=1;
double sigma;
int P1[N]; //encoder #1 parity bits
int P2[N]; //encoder #2 parity bits
double x[N]; //databit mapped to symbol
double p1[N]; //encoder #1 parity bit mapped to symbol
double p2[N]; //encoder #2 parity bit mapped to symbol
double x_d[N]; //x_dash = noisy data symbol
double p1_d[N]; //p1_dash = noisy parity#1 symbol
double p2_d[N]; //p2_dash = noisy parity#2 symbol
double L_h[N]; //L_hat = likelihood of databit given entire observation
//int X_h[N]; //X_hat = sliced MAP decisions
double elapsed;
clock_t t1, t2;
double time_count;
int input[NA],X_h[NA];
FILE *fp= fopen("time_gpu.dat","a+");
for(i=0; i< NA; i++){
X_h[i]=0;
}
/*printf("Enter the SNR value in db");
scanf("%d",&snrdb);*/
snrdb=5;
snr= pow(10,(float)snrdb/10);
noise = (float)signal_power/snr;
printf("signal power is %d \n",signal_power);
for(i=0;i < (NA/N);i++){
randomInterleaver();
for(k=0;k<N;k++){
input[k+i*N]= X[k];
}
srand(1); //init random number generator
gen_tab(); //generate trellis tables
sigma = sqrt(noise); //noise std deviation
/********************************
* ENCODER *
********************************/
turbo_encode(X, P1, P2);
//map bits to symbols
for(k = 0; k < N; k++) //for entire block length
{
x[k] = X[k] ? +1 : -1; //map databit to symbol
p1[k] = P1[k] ? +1 : -1; //map parity #1 to symbol
p2[k] = P2[k] ? +1 : -1; //map parity #2 to symbol
}
/********************************
* CHANNEL *
********************************/
//add some AWGN
for(k = 0; k < N; k++)
{
#if NOISEOFF
x_d[k] = x[k];
p1_d[k] = p1[k];
p2_d[k] = p2[k];
#else
x_d[k] = x[k] + sigma*normal();
p1_d[k] = p1[k] + sigma*normal();
p2_d[k] = p2[k] + sigma*normal();
#endif
}
#if DEBUG > 1
for(k = 0; k < N; k++)
printf("%f \t%f \t%f\n", x_d[k], p1_d[k], p2_d[k]);
#endif
/********************************
* DECODER *
********************************/
t1 = clock();
turbo_decode(NA,sigma, x_d, p1_d, p2_d, L_h, X_h);
t2=clock();
time_count=time_count+((double)t2-(double)t1);
}
/*printf("\n\n****INPUT****\n\n");
for(i=0;i < NA;i++){
printf("%d",input[i]);
}*/
/*printf("\n\n****OUTPUT****\n\n");
printf("X_h = ");
for(k = 0; k < NA; k++)
printf("%i", X_h[k]);
printf("\n");*/
int count=0;
//float ber;
for(k=0; k < NA; k++) {
if(X_h[k] != input[k])
count++;
}
//ber=(float)count/NA;
//printf("BER is %f",ber);
//printf("count is %d",count);
elapsed = time_count / CLOCKS_PER_SEC * 1000;
fprintf(fp,"%d %lf",NA, elapsed);
fprintf(fp,"\n");
fclose(fp);
printf("\nTime elapsed =%lf ms\n",elapsed);
return 0;
}
|
43dec14f448afc2c4595c4a6394245b1b66426c9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "haar.h"
#include "utils.h"
// The sqrt(2) factor is applied after two HAAR_*, so it becomes a 0.5 factor
#define HAAR_AVG(a, b) ((a+b))
#define HAAR_DIF(a, b) ((a-b))
// must be run with grid size = (Nc/2, Nr/2) where Nr = numrows of input
__global__ void kern_haar2d_fwd(DTYPE* img, DTYPE* c_a, DTYPE* c_h, DTYPE* c_v, DTYPE* c_d, int Nr, int Nc) {
int gidx = threadIdx.x + blockIdx.x*blockDim.x;
int gidy = threadIdx.y + blockIdx.y*blockDim.y;
int Nr_is_odd = (Nr & 1);
int Nc_is_odd = (Nc & 1);
int Nr2 = (Nr + Nr_is_odd)/2;
int Nc2 = (Nc + Nc_is_odd)/2;
if (gidy < Nr2 && gidx < Nc2) {
// for odd N, image is virtually extended by repeating the last element
int posx0 = 2*gidx;
int posx1 = 2*gidx+1;
if ((Nc_is_odd) && (posx1 == Nc)) posx1--;
int posy0 = 2*gidy;
int posy1 = 2*gidy+1;
if ((Nr_is_odd) && (posy1 == Nr)) posy1--;
DTYPE a = img[posy0*Nc + posx0];
DTYPE b = img[posy0*Nc + posx1];
DTYPE c = img[posy1*Nc + posx0];
DTYPE d = img[posy1*Nc + posx1];
c_a[gidy* Nc2 + gidx] = 0.5*HAAR_AVG(HAAR_AVG(a, c), HAAR_AVG(b, d)); // A
c_v[gidy* Nc2 + gidx] = 0.5*HAAR_DIF(HAAR_AVG(a, c), HAAR_AVG(b, d)); // V
c_h[gidy* Nc2 + gidx] = 0.5*HAAR_AVG(HAAR_DIF(a, c), HAAR_DIF(b, d)); // H
c_d[gidy* Nc2 + gidx] = 0.5*HAAR_DIF(HAAR_DIF(a, c), HAAR_DIF(b, d)); // D
}
}
// must be run with grid size = (2*Nr, 2*Nc) ; Nr = numrows of input
__global__ void kern_haar2d_inv(DTYPE* img, DTYPE* c_a, DTYPE* c_h, DTYPE* c_v, DTYPE* c_d, int Nr, int Nc, int Nr2, int Nc2) {
int gidx = threadIdx.x + blockIdx.x*blockDim.x;
int gidy = threadIdx.y + blockIdx.y*blockDim.y;
if (gidy < Nr2 && gidx < Nc2) {
DTYPE a = c_a[(gidy/2)*Nc + (gidx/2)];
DTYPE b = c_v[(gidy/2)*Nc + (gidx/2)];
DTYPE c = c_h[(gidy/2)*Nc + (gidx/2)];
DTYPE d = c_d[(gidy/2)*Nc + (gidx/2)];
DTYPE res = 0.0f;
int gx1 = (gidx & 1), gy1 = (gidy & 1);
if (gx1 == 0 && gy1 == 0) res = 0.5*HAAR_AVG(HAAR_AVG(a, c), HAAR_AVG(b, d));
if (gx1 == 1 && gy1 == 0) res = 0.5*HAAR_DIF(HAAR_AVG(a, c), HAAR_AVG(b, d));
if (gx1 == 0 && gy1 == 1) res = 0.5*HAAR_AVG(HAAR_DIF(a, c), HAAR_DIF(b, d));
if (gx1 == 1 && gy1 == 1) res = 0.5*HAAR_DIF(HAAR_DIF(a, c), HAAR_DIF(b, d));
img[gidy*Nc2 + gidx] = res;
}
}
int haar_forward2d(DTYPE* d_image, DTYPE** d_coeffs, DTYPE* d_tmp, w_info winfos) {
int Nr = winfos.Nr, Nc = winfos.Nc, levels = winfos.nlevels;
int Nc2 = Nc, Nr2 = Nr;
int Nc2_old = Nc2, Nr2_old = Nr2;
w_div2(&Nc2); w_div2(&Nr2);
int tpb = 16; // TODO : tune for max perfs.
DTYPE* d_tmp1, *d_tmp2;
d_tmp1 = d_coeffs[0];
d_tmp2 = d_tmp;
// First level
dim3 n_blocks = dim3(w_iDivUp(Nc2, tpb), w_iDivUp(Nr2, tpb), 1);
dim3 n_threads_per_block = dim3(tpb, tpb, 1);
hipLaunchKernelGGL(( kern_haar2d_fwd), dim3(n_blocks), dim3(n_threads_per_block), 0, 0, d_image, d_coeffs[0], d_coeffs[1], d_coeffs[2], d_coeffs[3], Nr, Nc);
for (int i=1; i < levels; i++) {
Nc2_old = Nc2; Nr2_old = Nr2;
w_div2(&Nc2); w_div2(&Nr2);
n_blocks = dim3(w_iDivUp(Nc2, tpb), w_iDivUp(Nr2, tpb), 1);
hipLaunchKernelGGL(( kern_haar2d_fwd), dim3(n_blocks), dim3(n_threads_per_block), 0, 0, d_tmp1, d_tmp2, d_coeffs[3*i+1], d_coeffs[3*i+2], d_coeffs[3*i+3], Nr2_old, Nc2_old);
w_swap_ptr(&d_tmp1, &d_tmp2);
}
if ((levels & 1) == 0) hipMemcpy(d_coeffs[0], d_tmp1, Nr2*Nc2*sizeof(DTYPE), hipMemcpyDeviceToDevice);
return 0;
}
int haar_inverse2d(DTYPE* d_image, DTYPE** d_coeffs, DTYPE* d_tmp, w_info winfos) {
int Nr = winfos.Nr, Nc = winfos.Nc, levels = winfos.nlevels;
// Table of sizes. FIXME: consider adding this in the w_info structure
int tNr[levels+1]; tNr[0] = Nr;
int tNc[levels+1]; tNc[0] = Nc;
for (int i = 1; i <= levels; i++) {
tNr[i] = tNr[i-1];
tNc[i] = tNc[i-1];
w_div2(tNr + i);
w_div2(tNc + i);
}
int tpb = 16; // TODO : tune for max perfs.
DTYPE* d_tmp1, *d_tmp2;
d_tmp1 = d_coeffs[0];
d_tmp2 = d_tmp;
dim3 n_threads_per_block = dim3(tpb, tpb, 1);
dim3 n_blocks;
for (int i = levels-1; i >= 1; i--) {
n_blocks = dim3(w_iDivUp(tNc[i], tpb), w_iDivUp(tNr[i], tpb), 1);
hipLaunchKernelGGL(( kern_haar2d_inv), dim3(n_blocks), dim3(n_threads_per_block), 0, 0, d_tmp2, d_tmp1, d_coeffs[3*i+1], d_coeffs[3*i+2], d_coeffs[3*i+3], tNr[i+1], tNc[i+1], tNr[i], tNc[i]);
w_swap_ptr(&d_tmp1, &d_tmp2);
}
if ((levels & 1) == 0) hipMemcpy(d_coeffs[0], d_tmp1, tNr[1]*tNc[1]*sizeof(DTYPE), hipMemcpyDeviceToDevice);
// First level
n_blocks = dim3(w_iDivUp(Nc, tpb), w_iDivUp(Nr, tpb), 1);
hipLaunchKernelGGL(( kern_haar2d_inv), dim3(n_blocks), dim3(n_threads_per_block), 0, 0, d_image, d_coeffs[0], d_coeffs[1], d_coeffs[2], d_coeffs[3], tNr[1], tNc[1], Nr, Nc);
return 0;
}
/// ----------------------------------------------------------------------------
/// ------------------------ 1D HAAR TRANSFORM ---------------------------------
/// ----------------------------------------------------------------------------
#define ONE_SQRT2 0.70710678118654746
// must be run with grid size = (Nc/2, Nr) where Nr = numrows of input
__global__ void kern_haar1d_fwd(DTYPE* img, DTYPE* c_a, DTYPE* c_d, int Nr, int Nc) {
int gidx = threadIdx.x + blockIdx.x*blockDim.x;
int gidy = threadIdx.y + blockIdx.y*blockDim.y;
int Nc_is_odd = (Nc & 1);
int Nc2 = (Nc + Nc_is_odd)/2;
if (gidy < Nr && gidx < Nc2) {
int gidx_next = 2*gidx+1;
if ((Nc_is_odd) && (gidx_next == Nc)) gidx_next--; // for odd size: repeat last element
DTYPE a = img[gidy*Nc + (gidx*2)];
DTYPE b = img[gidy*Nc + gidx_next];
c_a[gidy* Nc2 + gidx] = ONE_SQRT2 * HAAR_AVG(a, b);
c_d[gidy* Nc2 + gidx] = ONE_SQRT2 * HAAR_DIF(a, b);
}
}
// must be run with grid size = (Nr, 2*Nc) ; Nr = numrows of input
__global__ void kern_haar1d_inv(DTYPE* img, DTYPE* c_a, DTYPE* c_d, int Nr, int Nc, int Nc2) {
int gidx = threadIdx.x + blockIdx.x*blockDim.x;
int gidy = threadIdx.y + blockIdx.y*blockDim.y;
if (gidy < Nr && gidx < Nc2) {
DTYPE a = c_a[gidy*Nc + (gidx/2)];
DTYPE b = c_d[gidy*Nc + (gidx/2)];
DTYPE res = 0.0f;
if ((gidx & 1) == 0) res = ONE_SQRT2 * HAAR_AVG(a, b);
else res = ONE_SQRT2 * HAAR_DIF(a, b);
img[gidy*Nc2 + gidx] = res;
}
}
int haar_forward1d(DTYPE* d_image, DTYPE** d_coeffs, DTYPE* d_tmp, w_info winfos) {
int Nr = winfos.Nr, Nc = winfos.Nc, levels = winfos.nlevels;
int Nc2 = Nc;
int Nc2_old = Nc2;
w_div2(&Nc2);
int tpb = 16; // TODO : tune for max perfs.
DTYPE* d_tmp1, *d_tmp2;
d_tmp1 = d_coeffs[0];
d_tmp2 = d_tmp;
// First level
dim3 n_blocks = dim3(w_iDivUp(Nc2, tpb), w_iDivUp(Nr, tpb), 1);
dim3 n_threads_per_block = dim3(tpb, tpb, 1);
hipLaunchKernelGGL(( kern_haar1d_fwd), dim3(n_blocks), dim3(n_threads_per_block), 0, 0, d_image, d_coeffs[0], d_coeffs[1], Nr, Nc);
for (int i=1; i < levels; i++) {
Nc2_old = Nc2;
w_div2(&Nc2);
n_blocks = dim3(w_iDivUp(Nc2, tpb), w_iDivUp(Nr, tpb), 1);
hipLaunchKernelGGL(( kern_haar1d_fwd), dim3(n_blocks), dim3(n_threads_per_block), 0, 0, d_tmp1, d_tmp2, d_coeffs[i+1], Nr, Nc2_old);
w_swap_ptr(&d_tmp1, &d_tmp2);
}
if ((levels & 1) == 0) hipMemcpy(d_coeffs[0], d_tmp1, Nr*Nc2*sizeof(DTYPE), hipMemcpyDeviceToDevice);
return 0;
}
// FIXME: for some reason, the precision of the inverse(forward) for HAAR 1D
// is not as good as in 2D
// (I have 1e-13 error for [0, 255] range in 2D, and 1e-5 in 1D)
int haar_inverse1d(DTYPE* d_image, DTYPE** d_coeffs, DTYPE* d_tmp, w_info winfos) {
int Nr = winfos.Nr, Nc = winfos.Nc, levels = winfos.nlevels;
// Table of sizes. FIXME: consider adding this in the w_info structure
int tNc[levels+1]; tNc[0] = Nc;
for (int i = 1; i <= levels; i++) {
tNc[i] = tNc[i-1];
w_div2(tNc + i);
}
int tpb = 16; // TODO : tune for max perfs.
DTYPE* d_tmp1, *d_tmp2;
d_tmp1 = d_coeffs[0];
d_tmp2 = d_tmp;
dim3 n_threads_per_block = dim3(tpb, tpb, 1);
dim3 n_blocks;
for (int i = levels-1; i >= 1; i--) {
n_blocks = dim3(w_iDivUp(tNc[i], tpb), w_iDivUp(Nr, tpb), 1);
hipLaunchKernelGGL(( kern_haar1d_inv), dim3(n_blocks), dim3(n_threads_per_block), 0, 0, d_tmp2, d_tmp1, d_coeffs[i+1], Nr, tNc[i+1], tNc[i]);
w_swap_ptr(&d_tmp1, &d_tmp2);
}
if ((levels > 1) && ((levels & 1) == 0)) hipMemcpy(d_coeffs[0], d_tmp1, Nr*tNc[1]*sizeof(DTYPE), hipMemcpyDeviceToDevice);
// First level
n_blocks = dim3(w_iDivUp(Nc, tpb), w_iDivUp(Nr, tpb), 1);
hipLaunchKernelGGL(( kern_haar1d_inv), dim3(n_blocks), dim3(n_threads_per_block), 0, 0, d_image, d_coeffs[0], d_coeffs[1], Nr, tNc[1], Nc);
return 0;
}
| 43dec14f448afc2c4595c4a6394245b1b66426c9.cu | #include "haar.h"
#include "utils.h"
// The sqrt(2) factor is applied after two HAAR_*, so it becomes a 0.5 factor
#define HAAR_AVG(a, b) ((a+b))
#define HAAR_DIF(a, b) ((a-b))
// must be run with grid size = (Nc/2, Nr/2) where Nr = numrows of input
__global__ void kern_haar2d_fwd(DTYPE* img, DTYPE* c_a, DTYPE* c_h, DTYPE* c_v, DTYPE* c_d, int Nr, int Nc) {
int gidx = threadIdx.x + blockIdx.x*blockDim.x;
int gidy = threadIdx.y + blockIdx.y*blockDim.y;
int Nr_is_odd = (Nr & 1);
int Nc_is_odd = (Nc & 1);
int Nr2 = (Nr + Nr_is_odd)/2;
int Nc2 = (Nc + Nc_is_odd)/2;
if (gidy < Nr2 && gidx < Nc2) {
// for odd N, image is virtually extended by repeating the last element
int posx0 = 2*gidx;
int posx1 = 2*gidx+1;
if ((Nc_is_odd) && (posx1 == Nc)) posx1--;
int posy0 = 2*gidy;
int posy1 = 2*gidy+1;
if ((Nr_is_odd) && (posy1 == Nr)) posy1--;
DTYPE a = img[posy0*Nc + posx0];
DTYPE b = img[posy0*Nc + posx1];
DTYPE c = img[posy1*Nc + posx0];
DTYPE d = img[posy1*Nc + posx1];
c_a[gidy* Nc2 + gidx] = 0.5*HAAR_AVG(HAAR_AVG(a, c), HAAR_AVG(b, d)); // A
c_v[gidy* Nc2 + gidx] = 0.5*HAAR_DIF(HAAR_AVG(a, c), HAAR_AVG(b, d)); // V
c_h[gidy* Nc2 + gidx] = 0.5*HAAR_AVG(HAAR_DIF(a, c), HAAR_DIF(b, d)); // H
c_d[gidy* Nc2 + gidx] = 0.5*HAAR_DIF(HAAR_DIF(a, c), HAAR_DIF(b, d)); // D
}
}
// must be run with grid size = (2*Nr, 2*Nc) ; Nr = numrows of input
__global__ void kern_haar2d_inv(DTYPE* img, DTYPE* c_a, DTYPE* c_h, DTYPE* c_v, DTYPE* c_d, int Nr, int Nc, int Nr2, int Nc2) {
int gidx = threadIdx.x + blockIdx.x*blockDim.x;
int gidy = threadIdx.y + blockIdx.y*blockDim.y;
if (gidy < Nr2 && gidx < Nc2) {
DTYPE a = c_a[(gidy/2)*Nc + (gidx/2)];
DTYPE b = c_v[(gidy/2)*Nc + (gidx/2)];
DTYPE c = c_h[(gidy/2)*Nc + (gidx/2)];
DTYPE d = c_d[(gidy/2)*Nc + (gidx/2)];
DTYPE res = 0.0f;
int gx1 = (gidx & 1), gy1 = (gidy & 1);
if (gx1 == 0 && gy1 == 0) res = 0.5*HAAR_AVG(HAAR_AVG(a, c), HAAR_AVG(b, d));
if (gx1 == 1 && gy1 == 0) res = 0.5*HAAR_DIF(HAAR_AVG(a, c), HAAR_AVG(b, d));
if (gx1 == 0 && gy1 == 1) res = 0.5*HAAR_AVG(HAAR_DIF(a, c), HAAR_DIF(b, d));
if (gx1 == 1 && gy1 == 1) res = 0.5*HAAR_DIF(HAAR_DIF(a, c), HAAR_DIF(b, d));
img[gidy*Nc2 + gidx] = res;
}
}
int haar_forward2d(DTYPE* d_image, DTYPE** d_coeffs, DTYPE* d_tmp, w_info winfos) {
int Nr = winfos.Nr, Nc = winfos.Nc, levels = winfos.nlevels;
int Nc2 = Nc, Nr2 = Nr;
int Nc2_old = Nc2, Nr2_old = Nr2;
w_div2(&Nc2); w_div2(&Nr2);
int tpb = 16; // TODO : tune for max perfs.
DTYPE* d_tmp1, *d_tmp2;
d_tmp1 = d_coeffs[0];
d_tmp2 = d_tmp;
// First level
dim3 n_blocks = dim3(w_iDivUp(Nc2, tpb), w_iDivUp(Nr2, tpb), 1);
dim3 n_threads_per_block = dim3(tpb, tpb, 1);
kern_haar2d_fwd<<<n_blocks, n_threads_per_block>>>(d_image, d_coeffs[0], d_coeffs[1], d_coeffs[2], d_coeffs[3], Nr, Nc);
for (int i=1; i < levels; i++) {
Nc2_old = Nc2; Nr2_old = Nr2;
w_div2(&Nc2); w_div2(&Nr2);
n_blocks = dim3(w_iDivUp(Nc2, tpb), w_iDivUp(Nr2, tpb), 1);
kern_haar2d_fwd<<<n_blocks, n_threads_per_block>>>(d_tmp1, d_tmp2, d_coeffs[3*i+1], d_coeffs[3*i+2], d_coeffs[3*i+3], Nr2_old, Nc2_old);
w_swap_ptr(&d_tmp1, &d_tmp2);
}
if ((levels & 1) == 0) cudaMemcpy(d_coeffs[0], d_tmp1, Nr2*Nc2*sizeof(DTYPE), cudaMemcpyDeviceToDevice);
return 0;
}
int haar_inverse2d(DTYPE* d_image, DTYPE** d_coeffs, DTYPE* d_tmp, w_info winfos) {
int Nr = winfos.Nr, Nc = winfos.Nc, levels = winfos.nlevels;
// Table of sizes. FIXME: consider adding this in the w_info structure
int tNr[levels+1]; tNr[0] = Nr;
int tNc[levels+1]; tNc[0] = Nc;
for (int i = 1; i <= levels; i++) {
tNr[i] = tNr[i-1];
tNc[i] = tNc[i-1];
w_div2(tNr + i);
w_div2(tNc + i);
}
int tpb = 16; // TODO : tune for max perfs.
DTYPE* d_tmp1, *d_tmp2;
d_tmp1 = d_coeffs[0];
d_tmp2 = d_tmp;
dim3 n_threads_per_block = dim3(tpb, tpb, 1);
dim3 n_blocks;
for (int i = levels-1; i >= 1; i--) {
n_blocks = dim3(w_iDivUp(tNc[i], tpb), w_iDivUp(tNr[i], tpb), 1);
kern_haar2d_inv<<<n_blocks, n_threads_per_block>>>(d_tmp2, d_tmp1, d_coeffs[3*i+1], d_coeffs[3*i+2], d_coeffs[3*i+3], tNr[i+1], tNc[i+1], tNr[i], tNc[i]);
w_swap_ptr(&d_tmp1, &d_tmp2);
}
if ((levels & 1) == 0) cudaMemcpy(d_coeffs[0], d_tmp1, tNr[1]*tNc[1]*sizeof(DTYPE), cudaMemcpyDeviceToDevice);
// First level
n_blocks = dim3(w_iDivUp(Nc, tpb), w_iDivUp(Nr, tpb), 1);
kern_haar2d_inv<<<n_blocks, n_threads_per_block>>>(d_image, d_coeffs[0], d_coeffs[1], d_coeffs[2], d_coeffs[3], tNr[1], tNc[1], Nr, Nc);
return 0;
}
/// ----------------------------------------------------------------------------
/// ------------------------ 1D HAAR TRANSFORM ---------------------------------
/// ----------------------------------------------------------------------------
#define ONE_SQRT2 0.70710678118654746
// must be run with grid size = (Nc/2, Nr) where Nr = numrows of input
__global__ void kern_haar1d_fwd(DTYPE* img, DTYPE* c_a, DTYPE* c_d, int Nr, int Nc) {
int gidx = threadIdx.x + blockIdx.x*blockDim.x;
int gidy = threadIdx.y + blockIdx.y*blockDim.y;
int Nc_is_odd = (Nc & 1);
int Nc2 = (Nc + Nc_is_odd)/2;
if (gidy < Nr && gidx < Nc2) {
int gidx_next = 2*gidx+1;
if ((Nc_is_odd) && (gidx_next == Nc)) gidx_next--; // for odd size: repeat last element
DTYPE a = img[gidy*Nc + (gidx*2)];
DTYPE b = img[gidy*Nc + gidx_next];
c_a[gidy* Nc2 + gidx] = ONE_SQRT2 * HAAR_AVG(a, b);
c_d[gidy* Nc2 + gidx] = ONE_SQRT2 * HAAR_DIF(a, b);
}
}
// must be run with grid size = (Nr, 2*Nc) ; Nr = numrows of input
__global__ void kern_haar1d_inv(DTYPE* img, DTYPE* c_a, DTYPE* c_d, int Nr, int Nc, int Nc2) {
int gidx = threadIdx.x + blockIdx.x*blockDim.x;
int gidy = threadIdx.y + blockIdx.y*blockDim.y;
if (gidy < Nr && gidx < Nc2) {
DTYPE a = c_a[gidy*Nc + (gidx/2)];
DTYPE b = c_d[gidy*Nc + (gidx/2)];
DTYPE res = 0.0f;
if ((gidx & 1) == 0) res = ONE_SQRT2 * HAAR_AVG(a, b);
else res = ONE_SQRT2 * HAAR_DIF(a, b);
img[gidy*Nc2 + gidx] = res;
}
}
int haar_forward1d(DTYPE* d_image, DTYPE** d_coeffs, DTYPE* d_tmp, w_info winfos) {
int Nr = winfos.Nr, Nc = winfos.Nc, levels = winfos.nlevels;
int Nc2 = Nc;
int Nc2_old = Nc2;
w_div2(&Nc2);
int tpb = 16; // TODO : tune for max perfs.
DTYPE* d_tmp1, *d_tmp2;
d_tmp1 = d_coeffs[0];
d_tmp2 = d_tmp;
// First level
dim3 n_blocks = dim3(w_iDivUp(Nc2, tpb), w_iDivUp(Nr, tpb), 1);
dim3 n_threads_per_block = dim3(tpb, tpb, 1);
kern_haar1d_fwd<<<n_blocks, n_threads_per_block>>>(d_image, d_coeffs[0], d_coeffs[1], Nr, Nc);
for (int i=1; i < levels; i++) {
Nc2_old = Nc2;
w_div2(&Nc2);
n_blocks = dim3(w_iDivUp(Nc2, tpb), w_iDivUp(Nr, tpb), 1);
kern_haar1d_fwd<<<n_blocks, n_threads_per_block>>>(d_tmp1, d_tmp2, d_coeffs[i+1], Nr, Nc2_old);
w_swap_ptr(&d_tmp1, &d_tmp2);
}
if ((levels & 1) == 0) cudaMemcpy(d_coeffs[0], d_tmp1, Nr*Nc2*sizeof(DTYPE), cudaMemcpyDeviceToDevice);
return 0;
}
// FIXME: for some reason, the precision of the inverse(forward) for HAAR 1D
// is not as good as in 2D
// (I have 1e-13 error for [0, 255] range in 2D, and 1e-5 in 1D)
int haar_inverse1d(DTYPE* d_image, DTYPE** d_coeffs, DTYPE* d_tmp, w_info winfos) {
int Nr = winfos.Nr, Nc = winfos.Nc, levels = winfos.nlevels;
// Table of sizes. FIXME: consider adding this in the w_info structure
int tNc[levels+1]; tNc[0] = Nc;
for (int i = 1; i <= levels; i++) {
tNc[i] = tNc[i-1];
w_div2(tNc + i);
}
int tpb = 16; // TODO : tune for max perfs.
DTYPE* d_tmp1, *d_tmp2;
d_tmp1 = d_coeffs[0];
d_tmp2 = d_tmp;
dim3 n_threads_per_block = dim3(tpb, tpb, 1);
dim3 n_blocks;
for (int i = levels-1; i >= 1; i--) {
n_blocks = dim3(w_iDivUp(tNc[i], tpb), w_iDivUp(Nr, tpb), 1);
kern_haar1d_inv<<<n_blocks, n_threads_per_block>>>(d_tmp2, d_tmp1, d_coeffs[i+1], Nr, tNc[i+1], tNc[i]);
w_swap_ptr(&d_tmp1, &d_tmp2);
}
if ((levels > 1) && ((levels & 1) == 0)) cudaMemcpy(d_coeffs[0], d_tmp1, Nr*tNc[1]*sizeof(DTYPE), cudaMemcpyDeviceToDevice);
// First level
n_blocks = dim3(w_iDivUp(Nc, tpb), w_iDivUp(Nr, tpb), 1);
kern_haar1d_inv<<<n_blocks, n_threads_per_block>>>(d_image, d_coeffs[0], d_coeffs[1], Nr, tNc[1], Nc);
return 0;
}
|
1a3066926628672425c2c43ea58efb6bd4985da3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm>
#include <functional>
#include <iostream>
#include "GPU_AdjacentMatrix.cuh"
class GPUMemoryAdjMatrix : public GPUAdjMatrix {
public:
~GPUMemoryAdjMatrix() override {
hipFree(adj_nodes_);
hipFree(GPU_Matrix_cols_);
hipFree(GPU_Matrix_cols_capacity_);
}
//initialize the adjmatrix
GPUMemoryAdjMatrix(GPUAutoIndexing* indexing){
src_indexing_ = indexing;
hipMallocManaged(&adj_nodes_, init_row_cap * sizeof(IdType*));
hipMallocManaged(&adj_edges_, init_row_cap * sizeof(IdType*));
//row
GPU_Matrix_rows_ = 0;
GPU_Matrix_rows_capacity_ = init_row_cap;
//cols on CPU/GPU
//CPU_Matrix_cols_ = (IdType*)malloc(init_rol_cap * sizeof(IdType));
hipMallocManaged(&GPU_Matrix_cols_, init_row_cap * sizeof(IdType));
//CPU_Matrix_cols_capacity_ = (IdType*)malloc(init_rol_cap * sizeof(IdType));
hipMallocManaged(&GPU_Matrix_cols_capacity_, init_row_cap * sizeof(IdType));
//initialize
hipMemset(GPU_Matrix_cols_, 0, GPU_Matrix_rows_capacity_ * sizeof(IdType));
//hipMemset(GPU_Matrix_cols_capacity_, init_col_cap, GPU_Matrix_rows_capacity_ * sizeof(IdType));
dim3 kernel_init_block(GPU_Matrix_rows_capacity_/(kernel_init_thread_num * kernel_init_thread_num) + 1, 1);
dim3 kernel_init_thread(kernel_init_thread_num, kernel_init_thread_num);
hipLaunchKernelGGL(( Init_Device_Array), dim3(kernel_init_block), dim3(kernel_init_thread), 0, 0, GPU_Matrix_cols_capacity_, init_col_cap, GPU_Matrix_rows_capacity_);
hipDeviceSynchronize();
can_have_same_neighbor_ = true;
}
//return the row size
IdType Row_Size() const override {
return GPU_Matrix_rows_;
}
IdType Row_Cap() const override {
return GPU_Matrix_rows_capacity_;
}
//return the col sizes
IdType* Col_Size() const override {
return GPU_Matrix_cols_;
}
IdType* Col_Cap() const override {
return GPU_Matrix_cols_capacity_;
}
//void Can_Have_Same_neighbor(bool can_cannot) const {
// can_have_same_neighbor_ = can_cannot;
//}
//add node one by one
void Add(IdType edge_id, IdType src_id, IdType dst_id) override {
IdType* single_id_list;
hipMallocManaged(&single_id_list, sizeof(IdType));
single_id_list[0] = src_id;
IndexType* d_src_index;
hipMallocManaged(&d_src_index, sizeof(IndexType));
d_src_index = src_indexing_ -> Get(single_id_list, 1); //to be tested
IndexType src_index = d_src_index[0];
if(src_index < GPU_Matrix_rows_){
if(GPU_Matrix_cols_[src_index] + 1 >= GPU_Matrix_cols_capacity_[src_index] * LOAD_FACTOR){
Expand_Cols(src_index);
}
if(can_have_same_neighbor_){
adj_nodes_[src_index][GPU_Matrix_cols_[src_index]] = dst_id;
adj_edges_[src_index][GPU_Matrix_cols_[src_index]] = edge_id;
std::cout<<"new node: "<<src_index<<" "<<GPU_Matrix_cols_[src_index]<<" "<<adj_nodes_[src_index][GPU_Matrix_cols_[src_index]]<<std::endl;
//std::cout<<"new edge: "<<src_index<<" "<<GPU_Matrix_cols_[src_index]<<" "<<adj_edges_[src_index][GPU_Matrix_cols_[src_index]]<<endl;
GPU_Matrix_cols_[src_index] += 1;
}else{
int i;
for(i = 0; i < GPU_Matrix_cols_[src_index]; i++){
if(adj_nodes_[src_index][i] == dst_id || adj_edges_[src_index][i] == edge_id){
return;
}
if(adj_nodes_[src_index][i] == 0 && adj_edges_[src_index][i] == 0){
break;
}
}
adj_nodes_[src_index][i] = dst_id;
adj_edges_[src_index][i] = edge_id;
std::cout<<"new node: "<<src_index<<" "<<i<<" "<<adj_nodes_[src_index][i]<<std::endl;
//std::cout<<"new edge: "<<src_index<<" "<<i<<" "<<adj_edges_[src_index][i]<<endl;
GPU_Matrix_cols_[src_index] += 1;
}
}else{
if(src_index >= GPU_Matrix_rows_capacity_ * LOAD_FACTOR){
Expand_Rows();
}
IdType* new_node_row;
hipMallocManaged(&new_node_row, init_col_cap * sizeof(IdType));
hipMemset(new_node_row, 0, init_col_cap * sizeof(IdType));
IdType* new_edge_row;
hipMallocManaged(&new_edge_row, init_col_cap * sizeof(IdType));
hipMemset(new_edge_row, 0, init_col_cap * sizeof(IdType));
adj_nodes_[src_index] = new_node_row;
adj_edges_[src_index] = new_edge_row;
//src_index will only be one larger than GPU_Matrix_rows_+1 according to the way graph are built
GPU_Matrix_rows_ = GPU_Matrix_rows_ + 1;
adj_nodes_[src_index][0] = dst_id;
adj_edges_[src_index][0] = edge_id;
std::cout<<"new node: "<<src_index<<" "<<0<<" "<<adj_nodes_[src_index][0]<<std::endl;
//std::cout<<"new edge: "<<src_index<<" "<<0<<" "<<adj_edges_[src_index][0]<<endl;
GPU_Matrix_cols_[src_index] += 1;
}
}
//return the pointer of adjmatrix
IdType** GetNodeAdjMatrix() const override {
return adj_nodes_;
}
IdType** GetEdgeAdjMatrix() const override {
return adj_edges_;
}
IdType* GetNeighbors(IndexType src_index) const override {
return adj_nodes_[src_index];
}
IdType* GetOutEdges(IndexType src_index) const override {
return adj_edges_[src_index];
}
private:
IdType** adj_nodes_;
IdType** adj_edges_;
IdType* GPU_Matrix_cols_;
IdType* GPU_Matrix_cols_capacity_;
IdType GPU_Matrix_rows_;
IdType GPU_Matrix_rows_capacity_;
GPUAutoIndexing* src_indexing_;
bool can_have_same_neighbor_;
//IndexType* failed_index;
//expand the row of adjmatrix
void Expand_Rows(){
//std::cout<<"expand row called\n";
if(GPU_Matrix_rows_ + 1 >= GPU_Matrix_rows_capacity_ * LOAD_FACTOR){
//initialize
IdType** new_node_adjmatrix;
IdType** new_edge_adjmatrix;
IdType* new_cols;
IdType* new_cols_capacity;
hipMallocManaged(&new_cols, GPU_Matrix_rows_capacity_ * EXPAND_FACTOR * sizeof(IdType));
hipMallocManaged(&new_cols_capacity, GPU_Matrix_rows_capacity_ * EXPAND_FACTOR * sizeof(IdType));
hipMemset(new_cols, 0, GPU_Matrix_rows_capacity_ * EXPAND_FACTOR * sizeof(IdType));
//hipMemset(new_cols_capacity, init_col_cap, GPU_Matrix_rows_capacity_ * EXPAND_FACTOR * sizeof(IdType));
dim3 kernel_init_block(GPU_Matrix_rows_capacity_ * EXPAND_FACTOR / (kernel_init_thread_num * kernel_init_thread_num) + 1, 1);
dim3 kernel_init_thread(kernel_init_thread_num, kernel_init_thread_num);
hipLaunchKernelGGL(( Init_Device_Array), dim3(kernel_init_block), dim3(kernel_init_thread), 0, 0, new_cols_capacity, init_col_cap, GPU_Matrix_rows_capacity_ * EXPAND_FACTOR);
hipMallocManaged(&new_node_adjmatrix, GPU_Matrix_rows_capacity_ * EXPAND_FACTOR * sizeof(IdType*));
hipMallocManaged(&new_edge_adjmatrix, GPU_Matrix_rows_capacity_ * EXPAND_FACTOR * sizeof(IdType*));
//recover
hipMemcpy(new_cols, GPU_Matrix_cols_, GPU_Matrix_rows_ * sizeof(IdType), hipMemcpyDefault);
hipMemcpy(new_cols_capacity, GPU_Matrix_cols_capacity_, GPU_Matrix_rows_ * sizeof(IdType),hipMemcpyDefault);
dim3 kernel_block(GPU_Matrix_rows_/(kernel_repoint_thread_num * kernel_repoint_thread_num) + 1, 1);
dim3 kernel_thread(kernel_repoint_thread_num, kernel_repoint_thread_num);
hipLaunchKernelGGL(( RePoint), dim3(kernel_block),dim3(kernel_thread), 0, 0, new_node_adjmatrix, adj_nodes_, GPU_Matrix_rows_);
hipLaunchKernelGGL(( RePoint), dim3(kernel_block),dim3(kernel_thread), 0, 0, new_edge_adjmatrix, adj_edges_, GPU_Matrix_rows_);
GPU_Matrix_rows_capacity_ = GPU_Matrix_rows_capacity_ * EXPAND_FACTOR;
hipFree(adj_nodes_);
adj_nodes_ = new_node_adjmatrix;
hipFree(adj_edges_);
adj_edges_ = new_edge_adjmatrix;
hipFree(GPU_Matrix_cols_);
GPU_Matrix_cols_ = new_cols;
hipFree(GPU_Matrix_cols_capacity_);
GPU_Matrix_cols_capacity_ = new_cols_capacity;
hipDeviceSynchronize();
}
}
//expand the src_index'th col of adjmatrix
void Expand_Cols(IndexType src_index){
//std::cout<<"expand cols called!\n";
if(GPU_Matrix_cols_[src_index] + 1 >= GPU_Matrix_cols_capacity_[src_index] * LOAD_FACTOR){
//initialize
IdType* new_node_cols;
IdType* new_edge_cols;
hipMallocManaged(&new_node_cols, GPU_Matrix_cols_capacity_[src_index] * EXPAND_FACTOR * sizeof(IdType));
hipMemset(new_node_cols, 0, GPU_Matrix_cols_capacity_[src_index] * EXPAND_FACTOR * sizeof(IdType));
hipMallocManaged(&new_edge_cols, GPU_Matrix_cols_capacity_[src_index] * EXPAND_FACTOR * sizeof(IdType));
hipMemset(new_edge_cols, 0, GPU_Matrix_cols_capacity_[src_index] * EXPAND_FACTOR * sizeof(IdType));
//recover
hipMemcpy(new_node_cols, adj_nodes_[src_index], GPU_Matrix_cols_[src_index] * sizeof(IdType), hipMemcpyDefault);
hipMemcpy(new_edge_cols, adj_edges_[src_index], GPU_Matrix_cols_[src_index] * sizeof(IdType), hipMemcpyDefault);
GPU_Matrix_cols_capacity_[src_index] *= EXPAND_FACTOR;
hipFree(adj_nodes_[src_index]);
hipFree(adj_edges_[src_index]);
adj_nodes_[src_index] = new_node_cols;
adj_edges_[src_index] = new_edge_cols;
hipDeviceSynchronize();
}
}
};
//repoint the adjmatrix to a new space
__global__ void RePoint(IdType** new_matrix, IdType** old_matrix,
IdType GPU_Matrix_rows){
int ix = blockDim.x * blockIdx.x + threadIdx.x;
int iy = blockDim.y * blockIdx.y + threadIdx.y;
int idx = ix + (gridDim.x * blockDim.x) * iy;
if(idx < GPU_Matrix_rows){
new_matrix[idx] = old_matrix[idx];
}
}
//initialize the array element by element(not by bytes)
__global__ void Init_Device_Array(IdType* array, IdType init_value, IndexType batch_size){
int ix = blockDim.x * blockIdx.x + threadIdx.x;
int iy = blockDim.y * blockIdx.y + threadIdx.y;
int idx = ix + (gridDim.x * blockDim.x) * iy;
if(idx < batch_size){
array[idx] = init_value;
}
}
GPUAdjMatrix* NewGPUMemoryAdjMatrix(GPUAutoIndexing* indexing){
return new GPUMemoryAdjMatrix(indexing);
}
/*
int main(){
GPUMemoryAdjMatrix matrix;
IdType row;
IdType** adj_nodes;
for(int i = 0; i<15; i++){
matrix.Add(0, i, i+1);
}
for(int j = 0; j<200; j++){
matrix.Add(j, j, j);
}
adj_nodes = matrix.GetNodeAdjMatrix();
//IdType* GPU_Matrix_cols_ = matrix.Col_Cap();
// IndexType src[10] = {1,2,3,1,2,4,5,6,7,8};
//IdType dst[10] = {1,2,3,4,5,6,7,8,9,0};
//IndexType* failed_index = matrix.Bulk_Add(src, dst, 10);
row = matrix.Row_Size();
std::cout<<"row: "<<row<<endl;
std::cout<<"adj_nodes[0][0] "<<adj_nodes[0][0]<<endl;
}*/
//__global__ void kernel_self_mul(IdType* old, IdType factor){
// int idx = threadIdx.x + BlockDim.x * BlockIdx.x;
// old[idx] = old[idx] * factor;
//}
/*
__global__ void kernel_single_add(IdType** adj_nodes_, IndexType src_index, IdType* GPU_Matrix_cols_, IdType dst_id){
int idx = threadIdx.x + blockDim.x * blockIdx.x;
if(idx <= GPU_Matrix_cols_[src_index] && adj_nodes_[src_index][idx] != dst_id){
if(adj_nodes_[src_index][idx] == 0){
adj_nodes_[src_index][idx] = dst_id;
GPU_Matrix_cols_[src_index] = GPU_Matrix_cols_[src_index] + 1;
}//idx = GPU_Matrix_cols_[src_index]
}
}
//assume the space is large enough
__global__ void kernel_bulk_add(IdType** adj_nodes_, IndexType* src_indexs, IdType* GPU_Matrix_cols_, IdType* dst_ids, IndexType batch_size, IndexType* failed_index){
int idx = threadIdx.x + blockDim.x * blockIdx.x;
if(idx < batch_size){
int col_num;
for(col_num = 0; col_num < GPU_Matrix_cols_[idx]; col_num++){
if(adj_nodes_[idx][col_num] == dst_ids[idx]){
return;
}
if(adj_nodes_[idx][col_num] == 0){
break;
}
}
IdType check;
check = atomicCAS((int*)(adj_nodes_[idx] + col_num), 0, (int)(dst_ids[idx]));
if(check == 0){
atomicAdd((int*)(GPU_Matrix_cols_ + idx), 1);
}else{
atomicAdd((int*)(failed_index + idx), 1);
}
}
}
//consider the conflic problem
__global__ void Expand_Cols_bulk(IdType**adj_nodes_, IdType* GPU_Matrix_cols_, IdType* GPU_Matrix_cols_capacity_,
IndexType* src_indexs, IndexType batch_size){
int idx = threadIdx.x + blockDim.x * blockIdx.x;
if(idx < batch_size && ( GPU_Matrix_cols_[(src_indexs[idx])] + 1 >= GPU_Matrix_cols_capacity_[(src_indexs[idx])] * LOAD_FACTOR) ){
IdType* new_col;
new_col = (IdType*)malloc(GPU_Matrix_cols_capacity_[(src_indexs[idx])] * EXPAND_FACTOR * sizeof(IdType));
//hipMemcpyAsync(new_col, adj_nodes_[(src_indexs[idx])], GPU_Matrix_cols_[(src_indexs[idx])] * sizeof(IdType), hipMemcpyDeviceToDevice);
free(adj_nodes_[(src_indexs[idx])]);
atomicExch((int*)(GPU_Matrix_cols_capacity_ + src_indexs[idx]), (int)(GPU_Matrix_cols_capacity_[(src_indexs[idx])] * EXPAND_FACTOR));
//adj_nodes_[(src_indexs[idx])] = new_col;
atomicExch((int*)(adj_nodes_ + src_indexs[idx]), (int)new_col);
}
}*/
//add node bulk by bulk
/*
IndexType* Bulk_Add(IndexType* src_indexs, IdType* dst_ids, IndexType batch_size){
while(batch_size + GPU_Matrix_rows_ >= GPU_Matrix_rows_capacity_ * LOAD_FACTOR){
Expand_Rows();
}
dim3 kernel_expand_block(batch_size/(kernel_expand_thread_num * kernel_expand_thread_num) + 1, 1);
dim3 kernel_expand_thread(kernel_expand_thread_num, kernel_expand_thread_num);
Expand_Cols_bulk<<<kernel_expand_block, kernel_expand_thread>>>(adj_nodes_, GPU_Matrix_cols_, GPU_Matrix_cols_capacity_, src_indexs, batch_size);
//ensured enough space
hipMallocManaged(&failed_index, batch_size * sizeof(IdType));
dim3 kernel_add_block(batch_size/(kernel_add_thread_num * kernel_add_thread_num) + 1, 1);
dim3 kernel_add_thread(kernel_add_thread_num, kernel_add_thread_num);
kernel_bulk_add<<<kernel_add_block, kernel_add_thread>>>(adj_nodes_, src_indexs, GPU_Matrix_cols_, dst_ids, batch_size, failed_index);
return failed_index;
}
*/
| 1a3066926628672425c2c43ea58efb6bd4985da3.cu | #include <algorithm>
#include <functional>
#include <iostream>
#include "GPU_AdjacentMatrix.cuh"
class GPUMemoryAdjMatrix : public GPUAdjMatrix {
public:
~GPUMemoryAdjMatrix() override {
cudaFree(adj_nodes_);
cudaFree(GPU_Matrix_cols_);
cudaFree(GPU_Matrix_cols_capacity_);
}
//initialize the adjmatrix
GPUMemoryAdjMatrix(GPUAutoIndexing* indexing){
src_indexing_ = indexing;
cudaMallocManaged(&adj_nodes_, init_row_cap * sizeof(IdType*));
cudaMallocManaged(&adj_edges_, init_row_cap * sizeof(IdType*));
//row
GPU_Matrix_rows_ = 0;
GPU_Matrix_rows_capacity_ = init_row_cap;
//cols on CPU/GPU
//CPU_Matrix_cols_ = (IdType*)malloc(init_rol_cap * sizeof(IdType));
cudaMallocManaged(&GPU_Matrix_cols_, init_row_cap * sizeof(IdType));
//CPU_Matrix_cols_capacity_ = (IdType*)malloc(init_rol_cap * sizeof(IdType));
cudaMallocManaged(&GPU_Matrix_cols_capacity_, init_row_cap * sizeof(IdType));
//initialize
cudaMemset(GPU_Matrix_cols_, 0, GPU_Matrix_rows_capacity_ * sizeof(IdType));
//cudaMemset(GPU_Matrix_cols_capacity_, init_col_cap, GPU_Matrix_rows_capacity_ * sizeof(IdType));
dim3 kernel_init_block(GPU_Matrix_rows_capacity_/(kernel_init_thread_num * kernel_init_thread_num) + 1, 1);
dim3 kernel_init_thread(kernel_init_thread_num, kernel_init_thread_num);
Init_Device_Array<<<kernel_init_block, kernel_init_thread>>>(GPU_Matrix_cols_capacity_, init_col_cap, GPU_Matrix_rows_capacity_);
cudaDeviceSynchronize();
can_have_same_neighbor_ = true;
}
//return the row size
IdType Row_Size() const override {
return GPU_Matrix_rows_;
}
IdType Row_Cap() const override {
return GPU_Matrix_rows_capacity_;
}
//return the col sizes
IdType* Col_Size() const override {
return GPU_Matrix_cols_;
}
IdType* Col_Cap() const override {
return GPU_Matrix_cols_capacity_;
}
//void Can_Have_Same_neighbor(bool can_cannot) const {
// can_have_same_neighbor_ = can_cannot;
//}
//add node one by one
void Add(IdType edge_id, IdType src_id, IdType dst_id) override {
IdType* single_id_list;
cudaMallocManaged(&single_id_list, sizeof(IdType));
single_id_list[0] = src_id;
IndexType* d_src_index;
cudaMallocManaged(&d_src_index, sizeof(IndexType));
d_src_index = src_indexing_ -> Get(single_id_list, 1); //to be tested
IndexType src_index = d_src_index[0];
if(src_index < GPU_Matrix_rows_){
if(GPU_Matrix_cols_[src_index] + 1 >= GPU_Matrix_cols_capacity_[src_index] * LOAD_FACTOR){
Expand_Cols(src_index);
}
if(can_have_same_neighbor_){
adj_nodes_[src_index][GPU_Matrix_cols_[src_index]] = dst_id;
adj_edges_[src_index][GPU_Matrix_cols_[src_index]] = edge_id;
std::cout<<"new node: "<<src_index<<" "<<GPU_Matrix_cols_[src_index]<<" "<<adj_nodes_[src_index][GPU_Matrix_cols_[src_index]]<<std::endl;
//std::cout<<"new edge: "<<src_index<<" "<<GPU_Matrix_cols_[src_index]<<" "<<adj_edges_[src_index][GPU_Matrix_cols_[src_index]]<<endl;
GPU_Matrix_cols_[src_index] += 1;
}else{
int i;
for(i = 0; i < GPU_Matrix_cols_[src_index]; i++){
if(adj_nodes_[src_index][i] == dst_id || adj_edges_[src_index][i] == edge_id){
return;
}
if(adj_nodes_[src_index][i] == 0 && adj_edges_[src_index][i] == 0){
break;
}
}
adj_nodes_[src_index][i] = dst_id;
adj_edges_[src_index][i] = edge_id;
std::cout<<"new node: "<<src_index<<" "<<i<<" "<<adj_nodes_[src_index][i]<<std::endl;
//std::cout<<"new edge: "<<src_index<<" "<<i<<" "<<adj_edges_[src_index][i]<<endl;
GPU_Matrix_cols_[src_index] += 1;
}
}else{
if(src_index >= GPU_Matrix_rows_capacity_ * LOAD_FACTOR){
Expand_Rows();
}
IdType* new_node_row;
cudaMallocManaged(&new_node_row, init_col_cap * sizeof(IdType));
cudaMemset(new_node_row, 0, init_col_cap * sizeof(IdType));
IdType* new_edge_row;
cudaMallocManaged(&new_edge_row, init_col_cap * sizeof(IdType));
cudaMemset(new_edge_row, 0, init_col_cap * sizeof(IdType));
adj_nodes_[src_index] = new_node_row;
adj_edges_[src_index] = new_edge_row;
//src_index will only be one larger than GPU_Matrix_rows_+1 according to the way graph are built
GPU_Matrix_rows_ = GPU_Matrix_rows_ + 1;
adj_nodes_[src_index][0] = dst_id;
adj_edges_[src_index][0] = edge_id;
std::cout<<"new node: "<<src_index<<" "<<0<<" "<<adj_nodes_[src_index][0]<<std::endl;
//std::cout<<"new edge: "<<src_index<<" "<<0<<" "<<adj_edges_[src_index][0]<<endl;
GPU_Matrix_cols_[src_index] += 1;
}
}
//return the pointer of adjmatrix
IdType** GetNodeAdjMatrix() const override {
return adj_nodes_;
}
IdType** GetEdgeAdjMatrix() const override {
return adj_edges_;
}
IdType* GetNeighbors(IndexType src_index) const override {
return adj_nodes_[src_index];
}
IdType* GetOutEdges(IndexType src_index) const override {
return adj_edges_[src_index];
}
private:
IdType** adj_nodes_;
IdType** adj_edges_;
IdType* GPU_Matrix_cols_;
IdType* GPU_Matrix_cols_capacity_;
IdType GPU_Matrix_rows_;
IdType GPU_Matrix_rows_capacity_;
GPUAutoIndexing* src_indexing_;
bool can_have_same_neighbor_;
//IndexType* failed_index;
//expand the row of adjmatrix
void Expand_Rows(){
//std::cout<<"expand row called\n";
if(GPU_Matrix_rows_ + 1 >= GPU_Matrix_rows_capacity_ * LOAD_FACTOR){
//initialize
IdType** new_node_adjmatrix;
IdType** new_edge_adjmatrix;
IdType* new_cols;
IdType* new_cols_capacity;
cudaMallocManaged(&new_cols, GPU_Matrix_rows_capacity_ * EXPAND_FACTOR * sizeof(IdType));
cudaMallocManaged(&new_cols_capacity, GPU_Matrix_rows_capacity_ * EXPAND_FACTOR * sizeof(IdType));
cudaMemset(new_cols, 0, GPU_Matrix_rows_capacity_ * EXPAND_FACTOR * sizeof(IdType));
//cudaMemset(new_cols_capacity, init_col_cap, GPU_Matrix_rows_capacity_ * EXPAND_FACTOR * sizeof(IdType));
dim3 kernel_init_block(GPU_Matrix_rows_capacity_ * EXPAND_FACTOR / (kernel_init_thread_num * kernel_init_thread_num) + 1, 1);
dim3 kernel_init_thread(kernel_init_thread_num, kernel_init_thread_num);
Init_Device_Array<<<kernel_init_block, kernel_init_thread>>>(new_cols_capacity, init_col_cap, GPU_Matrix_rows_capacity_ * EXPAND_FACTOR);
cudaMallocManaged(&new_node_adjmatrix, GPU_Matrix_rows_capacity_ * EXPAND_FACTOR * sizeof(IdType*));
cudaMallocManaged(&new_edge_adjmatrix, GPU_Matrix_rows_capacity_ * EXPAND_FACTOR * sizeof(IdType*));
//recover
cudaMemcpy(new_cols, GPU_Matrix_cols_, GPU_Matrix_rows_ * sizeof(IdType), cudaMemcpyDefault);
cudaMemcpy(new_cols_capacity, GPU_Matrix_cols_capacity_, GPU_Matrix_rows_ * sizeof(IdType),cudaMemcpyDefault);
dim3 kernel_block(GPU_Matrix_rows_/(kernel_repoint_thread_num * kernel_repoint_thread_num) + 1, 1);
dim3 kernel_thread(kernel_repoint_thread_num, kernel_repoint_thread_num);
RePoint<<<kernel_block,kernel_thread>>>(new_node_adjmatrix, adj_nodes_, GPU_Matrix_rows_);
RePoint<<<kernel_block,kernel_thread>>>(new_edge_adjmatrix, adj_edges_, GPU_Matrix_rows_);
GPU_Matrix_rows_capacity_ = GPU_Matrix_rows_capacity_ * EXPAND_FACTOR;
cudaFree(adj_nodes_);
adj_nodes_ = new_node_adjmatrix;
cudaFree(adj_edges_);
adj_edges_ = new_edge_adjmatrix;
cudaFree(GPU_Matrix_cols_);
GPU_Matrix_cols_ = new_cols;
cudaFree(GPU_Matrix_cols_capacity_);
GPU_Matrix_cols_capacity_ = new_cols_capacity;
cudaDeviceSynchronize();
}
}
//expand the src_index'th col of adjmatrix
void Expand_Cols(IndexType src_index){
//std::cout<<"expand cols called!\n";
if(GPU_Matrix_cols_[src_index] + 1 >= GPU_Matrix_cols_capacity_[src_index] * LOAD_FACTOR){
//initialize
IdType* new_node_cols;
IdType* new_edge_cols;
cudaMallocManaged(&new_node_cols, GPU_Matrix_cols_capacity_[src_index] * EXPAND_FACTOR * sizeof(IdType));
cudaMemset(new_node_cols, 0, GPU_Matrix_cols_capacity_[src_index] * EXPAND_FACTOR * sizeof(IdType));
cudaMallocManaged(&new_edge_cols, GPU_Matrix_cols_capacity_[src_index] * EXPAND_FACTOR * sizeof(IdType));
cudaMemset(new_edge_cols, 0, GPU_Matrix_cols_capacity_[src_index] * EXPAND_FACTOR * sizeof(IdType));
//recover
cudaMemcpy(new_node_cols, adj_nodes_[src_index], GPU_Matrix_cols_[src_index] * sizeof(IdType), cudaMemcpyDefault);
cudaMemcpy(new_edge_cols, adj_edges_[src_index], GPU_Matrix_cols_[src_index] * sizeof(IdType), cudaMemcpyDefault);
GPU_Matrix_cols_capacity_[src_index] *= EXPAND_FACTOR;
cudaFree(adj_nodes_[src_index]);
cudaFree(adj_edges_[src_index]);
adj_nodes_[src_index] = new_node_cols;
adj_edges_[src_index] = new_edge_cols;
cudaDeviceSynchronize();
}
}
};
//repoint the adjmatrix to a new space
__global__ void RePoint(IdType** new_matrix, IdType** old_matrix,
IdType GPU_Matrix_rows){
int ix = blockDim.x * blockIdx.x + threadIdx.x;
int iy = blockDim.y * blockIdx.y + threadIdx.y;
int idx = ix + (gridDim.x * blockDim.x) * iy;
if(idx < GPU_Matrix_rows){
new_matrix[idx] = old_matrix[idx];
}
}
//initialize the array element by element(not by bytes)
__global__ void Init_Device_Array(IdType* array, IdType init_value, IndexType batch_size){
int ix = blockDim.x * blockIdx.x + threadIdx.x;
int iy = blockDim.y * blockIdx.y + threadIdx.y;
int idx = ix + (gridDim.x * blockDim.x) * iy;
if(idx < batch_size){
array[idx] = init_value;
}
}
GPUAdjMatrix* NewGPUMemoryAdjMatrix(GPUAutoIndexing* indexing){
return new GPUMemoryAdjMatrix(indexing);
}
/*
int main(){
GPUMemoryAdjMatrix matrix;
IdType row;
IdType** adj_nodes;
for(int i = 0; i<15; i++){
matrix.Add(0, i, i+1);
}
for(int j = 0; j<200; j++){
matrix.Add(j, j, j);
}
adj_nodes = matrix.GetNodeAdjMatrix();
//IdType* GPU_Matrix_cols_ = matrix.Col_Cap();
// IndexType src[10] = {1,2,3,1,2,4,5,6,7,8};
//IdType dst[10] = {1,2,3,4,5,6,7,8,9,0};
//IndexType* failed_index = matrix.Bulk_Add(src, dst, 10);
row = matrix.Row_Size();
std::cout<<"row: "<<row<<endl;
std::cout<<"adj_nodes[0][0] "<<adj_nodes[0][0]<<endl;
}*/
//__global__ void kernel_self_mul(IdType* old, IdType factor){
// int idx = threadIdx.x + BlockDim.x * BlockIdx.x;
// old[idx] = old[idx] * factor;
//}
/*
__global__ void kernel_single_add(IdType** adj_nodes_, IndexType src_index, IdType* GPU_Matrix_cols_, IdType dst_id){
int idx = threadIdx.x + blockDim.x * blockIdx.x;
if(idx <= GPU_Matrix_cols_[src_index] && adj_nodes_[src_index][idx] != dst_id){
if(adj_nodes_[src_index][idx] == 0){
adj_nodes_[src_index][idx] = dst_id;
GPU_Matrix_cols_[src_index] = GPU_Matrix_cols_[src_index] + 1;
}//idx = GPU_Matrix_cols_[src_index]
}
}
//assume the space is large enough
__global__ void kernel_bulk_add(IdType** adj_nodes_, IndexType* src_indexs, IdType* GPU_Matrix_cols_, IdType* dst_ids, IndexType batch_size, IndexType* failed_index){
int idx = threadIdx.x + blockDim.x * blockIdx.x;
if(idx < batch_size){
int col_num;
for(col_num = 0; col_num < GPU_Matrix_cols_[idx]; col_num++){
if(adj_nodes_[idx][col_num] == dst_ids[idx]){
return;
}
if(adj_nodes_[idx][col_num] == 0){
break;
}
}
IdType check;
check = atomicCAS((int*)(adj_nodes_[idx] + col_num), 0, (int)(dst_ids[idx]));
if(check == 0){
atomicAdd((int*)(GPU_Matrix_cols_ + idx), 1);
}else{
atomicAdd((int*)(failed_index + idx), 1);
}
}
}
//consider the conflic problem
__global__ void Expand_Cols_bulk(IdType**adj_nodes_, IdType* GPU_Matrix_cols_, IdType* GPU_Matrix_cols_capacity_,
IndexType* src_indexs, IndexType batch_size){
int idx = threadIdx.x + blockDim.x * blockIdx.x;
if(idx < batch_size && ( GPU_Matrix_cols_[(src_indexs[idx])] + 1 >= GPU_Matrix_cols_capacity_[(src_indexs[idx])] * LOAD_FACTOR) ){
IdType* new_col;
new_col = (IdType*)malloc(GPU_Matrix_cols_capacity_[(src_indexs[idx])] * EXPAND_FACTOR * sizeof(IdType));
//cudaMemcpyAsync(new_col, adj_nodes_[(src_indexs[idx])], GPU_Matrix_cols_[(src_indexs[idx])] * sizeof(IdType), cudaMemcpyDeviceToDevice);
free(adj_nodes_[(src_indexs[idx])]);
atomicExch((int*)(GPU_Matrix_cols_capacity_ + src_indexs[idx]), (int)(GPU_Matrix_cols_capacity_[(src_indexs[idx])] * EXPAND_FACTOR));
//adj_nodes_[(src_indexs[idx])] = new_col;
atomicExch((int*)(adj_nodes_ + src_indexs[idx]), (int)new_col);
}
}*/
//add node bulk by bulk
/*
IndexType* Bulk_Add(IndexType* src_indexs, IdType* dst_ids, IndexType batch_size){
while(batch_size + GPU_Matrix_rows_ >= GPU_Matrix_rows_capacity_ * LOAD_FACTOR){
Expand_Rows();
}
dim3 kernel_expand_block(batch_size/(kernel_expand_thread_num * kernel_expand_thread_num) + 1, 1);
dim3 kernel_expand_thread(kernel_expand_thread_num, kernel_expand_thread_num);
Expand_Cols_bulk<<<kernel_expand_block, kernel_expand_thread>>>(adj_nodes_, GPU_Matrix_cols_, GPU_Matrix_cols_capacity_, src_indexs, batch_size);
//ensured enough space
cudaMallocManaged(&failed_index, batch_size * sizeof(IdType));
dim3 kernel_add_block(batch_size/(kernel_add_thread_num * kernel_add_thread_num) + 1, 1);
dim3 kernel_add_thread(kernel_add_thread_num, kernel_add_thread_num);
kernel_bulk_add<<<kernel_add_block, kernel_add_thread>>>(adj_nodes_, src_indexs, GPU_Matrix_cols_, dst_ids, batch_size, failed_index);
return failed_index;
}
*/
|
964398e5211cb71bfc12948095132955c41c80ca.hip | // !!! This is a file automatically generated by hipify!!!
#include "object/geometry/quadric.hpp"
using namespace px;
BaseQuadric::BaseQuadric(Point const ¢er,
PREC const &a,
PREC const &b,
PREC const &c,
PREC const &d,
PREC const &e,
PREC const &f,
PREC const &g,
PREC const &h,
PREC const &i,
PREC const &j,
PREC const &x0, PREC const &x1,
PREC const &y0, PREC const &y1,
PREC const &z0, PREC const &z1)
: _center(center), _dev_obj(nullptr)
{
setCoef(a, b, c, d, e, f, g, h, i, j, x0, x1, y0, y1, z0, z1);
}
PX_CUDA_CALLABLE
GeometryObj * BaseQuadric::hitCheck(void * const &obj,
Ray const &ray,
PREC const &t_start,
PREC const &t_end,
PREC &hit_at)
{
auto o = reinterpret_cast<BaseQuadric*>(obj);
auto xo = ray.original.x - o->_center.x;
auto yo = ray.original.y - o->_center.y;
auto zo = ray.original.z - o->_center.z;
// @see http://www.bmsc.washington.edu/people/merritt/graphics/quadrics.html
auto A = o->_a * ray.direction.x * ray.direction.x +
o->_b * ray.direction.y * ray.direction.y +
o->_c * ray.direction.z * ray.direction.z +
o->_d * ray.direction.x * ray.direction.y +
o->_e * ray.direction.x * ray.direction.z +
o->_f * ray.direction.y * ray.direction.z;
auto B = 2 * o->_a * xo * ray.direction.x +
2 * o->_b * yo * ray.direction.y +
2 * o->_c * zo * ray.direction.z +
o->_d * (xo * ray.direction.y + yo * ray.direction.x) +
o->_e * (xo * ray.direction.z + zo * ray.direction.x) +
o->_f * (yo * ray.direction.z + zo * ray.direction.y) +
o->_g * ray.direction.x +
o->_h * ray.direction.y +
o->_i * ray.direction.z;
auto C = o->_a * xo * xo +
o->_b * yo * yo +
o->_c * zo * zo +
o->_d * xo * yo +
o->_e * xo * zo +
o->_f * yo * zo +
o->_g * xo +
o->_h * yo +
o->_i * zo +
o->_j;
if (A == 0)
{
if (B == 0) return o->_dev_obj;
C = - C / B;
if (C > t_start && C < t_end)
{
B = ray.original.x + ray.direction.x * C;
if (B > o->_x0 && B < o->_x1)
{
B = ray.original.y + ray.direction.y * C;
if (B > o->_y0 && B < o->_y1)
{
B = ray.original.z + ray.direction.z * C;
if (B > o->_z0 && B < o->_z1)
{
hit_at = C;
return o->_dev_obj;
}
}
}
}
return nullptr;
}
C = B * B - 4 * A * C;
if (C < 0)
return nullptr;
C = std::sqrt(C);
xo = (-B - C)/ (2.0 * A);
yo = (-B + C)/ (2.0 * A);
if (xo > yo)
{
zo = xo;
xo = yo;
yo = zo;
}
if (xo > t_start && xo < t_end)
{
B = ray.original.x + ray.direction.x * xo;
if (B > o->_x0 && B < o->_x1)
{
B = ray.original.y + ray.direction.y * xo;
if (B > o->_y0 && B < o->_y1)
{
B = ray.original.z + ray.direction.z * xo;
if (B > o->_z0 && B < o->_z1)
{
hit_at = xo;
return o->_dev_obj;
}
}
}
}
if (yo > t_start && yo < t_end)
{
B = ray.original.x + ray.direction.x * yo;
if (B > o->_x0 && B < o->_x1)
{
B = ray.original.y + ray.direction.y * yo;
if (B >o->_y0 && B < o->_y1)
{
B = ray.original.z + ray.direction.z * yo;
if (B >o->_z0 && B < o->_z1)
{
hit_at = yo;
return o->_dev_obj;
}
}
}
}
return nullptr;
}
PX_CUDA_CALLABLE
Direction BaseQuadric::normalVec(void * const &obj,
PREC const &x, PREC const &y, PREC const &z,
bool &double_face)
{
double_face = false;
auto o = reinterpret_cast<BaseQuadric*>(obj);
auto dx = x - o->_center.x;
auto dy = y - o->_center.y;
auto dz = z - o->_center.z;
return {2 * o->_a * dx + o->_d * dy + o->_e * dz + o->_g,
2 * o->_b * dy + o->_d * dx + o->_f * dz + o->_h,
2 * o->_c * dz + o->_e * dx + o->_f * dy + o->_i};
}
PX_CUDA_CALLABLE
Vec3<PREC> BaseQuadric::getTextureCoord(void * const &obj,
PREC const &x, PREC const &y, PREC const &z)
{
auto o = reinterpret_cast<BaseQuadric*>(obj);
// FIXME better way for quadric surface texture mapping
if (o->_sym_o)
{
if (o->_a == 0 && o->_b == 0)
return {x-o->_center.x, y-o->_center.y, 0};
if (o->_a == 0 && o->_c == 0)
return {z-o->_center.z, x-o->_center.x, 0};
if (o->_b == 0 && o->_c == 0)
return {y-o->_center.y, z-o->_center.z, 0};
if (o->_c == 0)
{
auto discriminant = o->_h*o->_h - 4*o->_b*o->_j;
auto cy = discriminant < 0 ? 0 : (std::sqrt(discriminant) - o->_h)*PREC(0.5)/o->_b;
// auto dx = x - o->_center.x;
// auto dy = y - o->_center.y;
// auto dz = z - o->_center.z;
//
// auto du = (o->_a*dx*dx/2.0 + o->_c*dz*dz + o->_e*dx*dz/2.0 + o->_g*dx/2.0 + o->_i*dz + o->_j) * dx * (dy-cy) +
// (o->_d*dx*dx/2.0 + o->_f*dx*dz + o->_h*dx)/2.0 * (dy*dy - cy*cy) +
// o->_b*dx/3.0 * (dy*dy*dy - cy*cy*cy) ;
return {x-o->_center.x > 0 ? y - cy : cy - y, z-o->_center.z, 0};
// return {x-o->_center.x > 0 ? y - o->_center.y : o->_center.y - y, z-o->_center.z, 0};
// return {du, z-o->_center.z, 0};
}
if (o->_b == 0)
{
auto discriminant = o->_g*o->_g - 4*o->_a*o->_j;
auto cx = discriminant < 0 ? o->_center.x : ((std::sqrt(discriminant) - o->_g)*PREC(0.5)/o->_a + o->_center.x);
return {z-o->_center.z > 0 ? x - cx : cx - x, y-o->_center.y, 0};
}
if (o->_a == 0)
{
auto discriminant = o->_i*o->_i - 4*o->_c*o->_j;
auto cz = discriminant < 0 ? o->_center.z : ((std::sqrt(discriminant) - o->_i)*PREC(0.5)/o->_c + o->_center.z);
return {y-o->_center.y > 0 ? z-cz : cz-z, x-o->_center.x, 0};
}
if (o->_a > 0 && o->_b > 0 && o->_c > 0)
{
auto dx = x - o->_center.x;
auto dy = y - o->_center.y;
auto dz = z - o->_center.z;
return {(1 + std::atan2(dz, dx) / PREC(PI)) * PREC(0.5),
std::acos(dy / (dx*dx+dy*dy+dz*dz)) / PREC(PI),
0};;
}
return {x - o->_center.x,
y - o->_center.y,
0};;
}
if (o->_sym_x)
{
return {y-o->_center.y, z-o->_center.z, 0};
}
if (o->_sym_y)
{
return {x-o->_center.x, z-o->_center.z, 0};
}
if (o->_sym_z)
{
return {x-o->_center.x, y-o->_center.y, 0};
}
if (o->_sym_xy)
return {z-o->_center.z, x-o->_center.x, 0};
if (o->_sym_yz)
return {x-o->_center.x, y-o->_center.y, 0};
if (o->_sym_xz)
return {y-o->_center.y, x-o->_center.x, 0};
return {x-o->_center.x, y-o->_center.y, 0};
}
void BaseQuadric::setCenter(Point const ¢er)
{
_center = center;
}
void BaseQuadric::setCoef(PREC const &a,
PREC const &b,
PREC const &c,
PREC const &d,
PREC const &e,
PREC const &f,
PREC const &g,
PREC const &h,
PREC const &i,
PREC const &j,
PREC const &x0, PREC const &x1,
PREC const &y0, PREC const &y1,
PREC const &z0, PREC const &z1)
{
if (j < 0)
{
_a = -a, _b = -b, _c = -c, _d = -d;
_e = -e, _f = -f, _g = -g, _h = -h;
_i = -i, _j = -j;
}
else
{
_a = a, _b = b, _c = c, _d = d;
_e = e, _f = f, _g = g, _h = h;
_i = i, _j = j;
}
_sym_xy = ((e == 0) && (f == 0) && (i == 0));
_sym_yz = ((d == 0) && (e == 0) && (g == 0));
_sym_xz = ((d == 0) && (f == 0) && (h == 0));
_sym_o = _sym_xy && _sym_yz && _sym_xz;
_sym_z = _sym_xz && _sym_yz;
_sym_x = _sym_xy && _sym_xz;
_sym_y = _sym_xy && _sym_yz;
_x0 = x0 < x1 ? (_x1 = x1, x0) : (_x1 = x0, x1);
_y0 = y0 < y1 ? (_y1 = y1, y0) : (_y1 = y0, y1);
_z0 = z0 < z1 ? (_z1 = z1, z0) : (_z1 = z0, z1);
}
std::shared_ptr<BaseGeometry> Quadric::create(Point const ¢er,
PREC const &a,
PREC const &b,
PREC const &c,
PREC const &d,
PREC const &e,
PREC const &f,
PREC const &g,
PREC const &h,
PREC const &i,
PREC const &j,
PREC const &x0, PREC const &x1,
PREC const &y0, PREC const &y1,
PREC const &z0, PREC const &z1,
std::shared_ptr<BaseMaterial> const &material,
std::shared_ptr<Transformation> const &trans)
{
return std::shared_ptr<BaseGeometry>(new Quadric(center,
a, b, c, d, e, f, g, h, i,
j,
x0, x1, y0, y1, z0, z1,
material, trans));
}
Quadric::Quadric(Point const ¢er,
PREC const &a,
PREC const &b,
PREC const &c,
PREC const &d,
PREC const &e,
PREC const &f,
PREC const &g,
PREC const &h,
PREC const &i,
PREC const &j,
PREC const &x0, PREC const &x1,
PREC const &y0, PREC const &y1,
PREC const &z0, PREC const &z1,
std::shared_ptr<BaseMaterial> const &material,
std::shared_ptr<Transformation> const &trans)
: BaseGeometry(material, trans, 8),
_obj(new BaseQuadric(center, a, b, c, d, e, f, g, h, i, j, x0, x1, y0, y1, z0, z1)),
_gpu_obj(nullptr), _need_upload(true)
{
_obj->_dev_obj = reinterpret_cast<GeometryObj*>(this);
_updateVertices();
}
Quadric::~Quadric()
{
delete _obj;
#ifdef USE_ROCM
clearGpuData();
#endif
}
#ifdef USE_ROCM
__device__ fnHit_t __fn_hit_quadric= BaseQuadric::hitCheck;
__device__ fnNormal_t __fn_normal_quadric= BaseQuadric::normalVec;
__device__ fnTextureCoord_t __fn_texture_coord_quadric= BaseQuadric::getTextureCoord;
#endif
void Quadric::up2Gpu()
{
#ifdef USE_ROCM
static fnHit_t fn_hit_h = nullptr;
static fnNormal_t fn_normal_h;
static fnTextureCoord_t fn_texture_coord_h;
if (_need_upload)
{
if (dev_ptr == nullptr)
{
PX_CUDA_CHECK(hipMalloc(&_gpu_obj, sizeof(BaseQuadric)));
PX_CUDA_CHECK(hipMalloc(&dev_ptr, sizeof(GeometryObj)));
}
if (fn_hit_h == nullptr)
{
PX_CUDA_CHECK(hipMemcpyFromSymbol(&fn_hit_h, __fn_hit_quadric, sizeof(fnHit_t)));
PX_CUDA_CHECK(hipMemcpyFromSymbol(&fn_normal_h, __fn_normal_quadric, sizeof(fnNormal_t)));
PX_CUDA_CHECK(hipMemcpyFromSymbol(&fn_texture_coord_h, __fn_texture_coord_quadric, sizeof(fnTextureCoord_t)));
}
if (mat != nullptr)
mat->up2Gpu();
if (trans != nullptr)
trans->up2Gpu();
_obj->_dev_obj = dev_ptr;
PX_CUDA_CHECK(hipMemcpy(_gpu_obj, _obj, sizeof(BaseQuadric), hipMemcpyHostToDevice));
_obj->_dev_obj = reinterpret_cast<GeometryObj*>(this);
GeometryObj tmp(_gpu_obj, fn_hit_h, fn_normal_h, fn_texture_coord_h,
mat == nullptr ? nullptr : mat->devPtr(),
trans == nullptr ? nullptr : trans->devPtr());
PX_CUDA_CHECK(hipMemcpy(dev_ptr, &tmp, sizeof(GeometryObj),
hipMemcpyHostToDevice))
_need_upload = false;
}
#endif
}
void Quadric::clearGpuData()
{
#ifdef USE_ROCM
BaseGeometry::clearGpuData();
if (_gpu_obj != nullptr)
{
PX_CUDA_CHECK(hipFree(_gpu_obj));
_gpu_obj = nullptr;
}
_need_upload = true;
#endif
}
void Quadric::setCenter(Point const ¢er)
{
_obj->setCenter(center);
// _updateVertices();
#ifdef USE_ROCM
_need_upload = true;
#endif
}
void Quadric::setCoef(PREC const &a,
PREC const &b,
PREC const &c,
PREC const &d,
PREC const &e,
PREC const &f,
PREC const &g,
PREC const &h,
PREC const &i,
PREC const &j,
PREC const &x0, PREC const &x1,
PREC const &y0, PREC const &y1,
PREC const &z0, PREC const &z1)
{
_obj->setCoef(a, b, c, d, e, f, g, h, i, j, x0, x1, y0, y1, z0, z1);
_updateVertices();
#ifdef USE_ROCM
_need_upload = true;
#endif
}
void Quadric::_updateVertices()
{
// FIXME better way to find the bound of vertices of quadric surface
raw_vertices[0].x = _obj->_x0;
raw_vertices[0].y = _obj->_y0;
raw_vertices[0].z = _obj->_z0;
raw_vertices[1].x = _obj->_x1;
raw_vertices[1].y = _obj->_y0;
raw_vertices[1].z = _obj->_z0;
raw_vertices[2].x = _obj->_x0;
raw_vertices[2].y = _obj->_y1;
raw_vertices[2].z = _obj->_z0;
raw_vertices[3].x = _obj->_x0;
raw_vertices[3].y = _obj->_y0;
raw_vertices[3].z = _obj->_z1;
raw_vertices[4].x = _obj->_x1;
raw_vertices[4].y = _obj->_y1;
raw_vertices[4].z = _obj->_z0;
raw_vertices[5].x = _obj->_x1;
raw_vertices[5].y = _obj->_y0;
raw_vertices[5].z = _obj->_z1;
raw_vertices[6].x = _obj->_x0;
raw_vertices[6].y = _obj->_y1;
raw_vertices[6].z = _obj->_z1;
raw_vertices[7].x = _obj->_x1;
raw_vertices[7].y = _obj->_y1;
raw_vertices[7].z = _obj->_z1;
}
Vec3<PREC> Quadric::getTextureCoord(PREC const &x,
PREC const &y,
PREC const &z) const
{
return BaseQuadric::getTextureCoord(_obj, x, y, z);
}
const BaseGeometry *Quadric::hitCheck(Ray const &ray,
PREC const &t_start,
PREC const &t_end,
PREC &hit_at) const
{
return BaseQuadric::hitCheck(_obj, ray, t_start, t_end, hit_at) ? this : nullptr;
}
Direction Quadric::normalVec(PREC const &x, PREC const &y,
PREC const &z,
bool &double_face) const
{
return BaseQuadric::normalVec(_obj, x, y, z, double_face);
}
| 964398e5211cb71bfc12948095132955c41c80ca.cu | #include "object/geometry/quadric.hpp"
using namespace px;
BaseQuadric::BaseQuadric(Point const ¢er,
PREC const &a,
PREC const &b,
PREC const &c,
PREC const &d,
PREC const &e,
PREC const &f,
PREC const &g,
PREC const &h,
PREC const &i,
PREC const &j,
PREC const &x0, PREC const &x1,
PREC const &y0, PREC const &y1,
PREC const &z0, PREC const &z1)
: _center(center), _dev_obj(nullptr)
{
setCoef(a, b, c, d, e, f, g, h, i, j, x0, x1, y0, y1, z0, z1);
}
PX_CUDA_CALLABLE
GeometryObj * BaseQuadric::hitCheck(void * const &obj,
Ray const &ray,
PREC const &t_start,
PREC const &t_end,
PREC &hit_at)
{
auto o = reinterpret_cast<BaseQuadric*>(obj);
auto xo = ray.original.x - o->_center.x;
auto yo = ray.original.y - o->_center.y;
auto zo = ray.original.z - o->_center.z;
// @see http://www.bmsc.washington.edu/people/merritt/graphics/quadrics.html
auto A = o->_a * ray.direction.x * ray.direction.x +
o->_b * ray.direction.y * ray.direction.y +
o->_c * ray.direction.z * ray.direction.z +
o->_d * ray.direction.x * ray.direction.y +
o->_e * ray.direction.x * ray.direction.z +
o->_f * ray.direction.y * ray.direction.z;
auto B = 2 * o->_a * xo * ray.direction.x +
2 * o->_b * yo * ray.direction.y +
2 * o->_c * zo * ray.direction.z +
o->_d * (xo * ray.direction.y + yo * ray.direction.x) +
o->_e * (xo * ray.direction.z + zo * ray.direction.x) +
o->_f * (yo * ray.direction.z + zo * ray.direction.y) +
o->_g * ray.direction.x +
o->_h * ray.direction.y +
o->_i * ray.direction.z;
auto C = o->_a * xo * xo +
o->_b * yo * yo +
o->_c * zo * zo +
o->_d * xo * yo +
o->_e * xo * zo +
o->_f * yo * zo +
o->_g * xo +
o->_h * yo +
o->_i * zo +
o->_j;
if (A == 0)
{
if (B == 0) return o->_dev_obj;
C = - C / B;
if (C > t_start && C < t_end)
{
B = ray.original.x + ray.direction.x * C;
if (B > o->_x0 && B < o->_x1)
{
B = ray.original.y + ray.direction.y * C;
if (B > o->_y0 && B < o->_y1)
{
B = ray.original.z + ray.direction.z * C;
if (B > o->_z0 && B < o->_z1)
{
hit_at = C;
return o->_dev_obj;
}
}
}
}
return nullptr;
}
C = B * B - 4 * A * C;
if (C < 0)
return nullptr;
C = std::sqrt(C);
xo = (-B - C)/ (2.0 * A);
yo = (-B + C)/ (2.0 * A);
if (xo > yo)
{
zo = xo;
xo = yo;
yo = zo;
}
if (xo > t_start && xo < t_end)
{
B = ray.original.x + ray.direction.x * xo;
if (B > o->_x0 && B < o->_x1)
{
B = ray.original.y + ray.direction.y * xo;
if (B > o->_y0 && B < o->_y1)
{
B = ray.original.z + ray.direction.z * xo;
if (B > o->_z0 && B < o->_z1)
{
hit_at = xo;
return o->_dev_obj;
}
}
}
}
if (yo > t_start && yo < t_end)
{
B = ray.original.x + ray.direction.x * yo;
if (B > o->_x0 && B < o->_x1)
{
B = ray.original.y + ray.direction.y * yo;
if (B >o->_y0 && B < o->_y1)
{
B = ray.original.z + ray.direction.z * yo;
if (B >o->_z0 && B < o->_z1)
{
hit_at = yo;
return o->_dev_obj;
}
}
}
}
return nullptr;
}
PX_CUDA_CALLABLE
Direction BaseQuadric::normalVec(void * const &obj,
PREC const &x, PREC const &y, PREC const &z,
bool &double_face)
{
double_face = false;
auto o = reinterpret_cast<BaseQuadric*>(obj);
auto dx = x - o->_center.x;
auto dy = y - o->_center.y;
auto dz = z - o->_center.z;
return {2 * o->_a * dx + o->_d * dy + o->_e * dz + o->_g,
2 * o->_b * dy + o->_d * dx + o->_f * dz + o->_h,
2 * o->_c * dz + o->_e * dx + o->_f * dy + o->_i};
}
PX_CUDA_CALLABLE
Vec3<PREC> BaseQuadric::getTextureCoord(void * const &obj,
PREC const &x, PREC const &y, PREC const &z)
{
auto o = reinterpret_cast<BaseQuadric*>(obj);
// FIXME better way for quadric surface texture mapping
if (o->_sym_o)
{
if (o->_a == 0 && o->_b == 0)
return {x-o->_center.x, y-o->_center.y, 0};
if (o->_a == 0 && o->_c == 0)
return {z-o->_center.z, x-o->_center.x, 0};
if (o->_b == 0 && o->_c == 0)
return {y-o->_center.y, z-o->_center.z, 0};
if (o->_c == 0)
{
auto discriminant = o->_h*o->_h - 4*o->_b*o->_j;
auto cy = discriminant < 0 ? 0 : (std::sqrt(discriminant) - o->_h)*PREC(0.5)/o->_b;
// auto dx = x - o->_center.x;
// auto dy = y - o->_center.y;
// auto dz = z - o->_center.z;
//
// auto du = (o->_a*dx*dx/2.0 + o->_c*dz*dz + o->_e*dx*dz/2.0 + o->_g*dx/2.0 + o->_i*dz + o->_j) * dx * (dy-cy) +
// (o->_d*dx*dx/2.0 + o->_f*dx*dz + o->_h*dx)/2.0 * (dy*dy - cy*cy) +
// o->_b*dx/3.0 * (dy*dy*dy - cy*cy*cy) ;
return {x-o->_center.x > 0 ? y - cy : cy - y, z-o->_center.z, 0};
// return {x-o->_center.x > 0 ? y - o->_center.y : o->_center.y - y, z-o->_center.z, 0};
// return {du, z-o->_center.z, 0};
}
if (o->_b == 0)
{
auto discriminant = o->_g*o->_g - 4*o->_a*o->_j;
auto cx = discriminant < 0 ? o->_center.x : ((std::sqrt(discriminant) - o->_g)*PREC(0.5)/o->_a + o->_center.x);
return {z-o->_center.z > 0 ? x - cx : cx - x, y-o->_center.y, 0};
}
if (o->_a == 0)
{
auto discriminant = o->_i*o->_i - 4*o->_c*o->_j;
auto cz = discriminant < 0 ? o->_center.z : ((std::sqrt(discriminant) - o->_i)*PREC(0.5)/o->_c + o->_center.z);
return {y-o->_center.y > 0 ? z-cz : cz-z, x-o->_center.x, 0};
}
if (o->_a > 0 && o->_b > 0 && o->_c > 0)
{
auto dx = x - o->_center.x;
auto dy = y - o->_center.y;
auto dz = z - o->_center.z;
return {(1 + std::atan2(dz, dx) / PREC(PI)) * PREC(0.5),
std::acos(dy / (dx*dx+dy*dy+dz*dz)) / PREC(PI),
0};;
}
return {x - o->_center.x,
y - o->_center.y,
0};;
}
if (o->_sym_x)
{
return {y-o->_center.y, z-o->_center.z, 0};
}
if (o->_sym_y)
{
return {x-o->_center.x, z-o->_center.z, 0};
}
if (o->_sym_z)
{
return {x-o->_center.x, y-o->_center.y, 0};
}
if (o->_sym_xy)
return {z-o->_center.z, x-o->_center.x, 0};
if (o->_sym_yz)
return {x-o->_center.x, y-o->_center.y, 0};
if (o->_sym_xz)
return {y-o->_center.y, x-o->_center.x, 0};
return {x-o->_center.x, y-o->_center.y, 0};
}
void BaseQuadric::setCenter(Point const ¢er)
{
_center = center;
}
void BaseQuadric::setCoef(PREC const &a,
PREC const &b,
PREC const &c,
PREC const &d,
PREC const &e,
PREC const &f,
PREC const &g,
PREC const &h,
PREC const &i,
PREC const &j,
PREC const &x0, PREC const &x1,
PREC const &y0, PREC const &y1,
PREC const &z0, PREC const &z1)
{
if (j < 0)
{
_a = -a, _b = -b, _c = -c, _d = -d;
_e = -e, _f = -f, _g = -g, _h = -h;
_i = -i, _j = -j;
}
else
{
_a = a, _b = b, _c = c, _d = d;
_e = e, _f = f, _g = g, _h = h;
_i = i, _j = j;
}
_sym_xy = ((e == 0) && (f == 0) && (i == 0));
_sym_yz = ((d == 0) && (e == 0) && (g == 0));
_sym_xz = ((d == 0) && (f == 0) && (h == 0));
_sym_o = _sym_xy && _sym_yz && _sym_xz;
_sym_z = _sym_xz && _sym_yz;
_sym_x = _sym_xy && _sym_xz;
_sym_y = _sym_xy && _sym_yz;
_x0 = x0 < x1 ? (_x1 = x1, x0) : (_x1 = x0, x1);
_y0 = y0 < y1 ? (_y1 = y1, y0) : (_y1 = y0, y1);
_z0 = z0 < z1 ? (_z1 = z1, z0) : (_z1 = z0, z1);
}
std::shared_ptr<BaseGeometry> Quadric::create(Point const ¢er,
PREC const &a,
PREC const &b,
PREC const &c,
PREC const &d,
PREC const &e,
PREC const &f,
PREC const &g,
PREC const &h,
PREC const &i,
PREC const &j,
PREC const &x0, PREC const &x1,
PREC const &y0, PREC const &y1,
PREC const &z0, PREC const &z1,
std::shared_ptr<BaseMaterial> const &material,
std::shared_ptr<Transformation> const &trans)
{
return std::shared_ptr<BaseGeometry>(new Quadric(center,
a, b, c, d, e, f, g, h, i,
j,
x0, x1, y0, y1, z0, z1,
material, trans));
}
Quadric::Quadric(Point const ¢er,
PREC const &a,
PREC const &b,
PREC const &c,
PREC const &d,
PREC const &e,
PREC const &f,
PREC const &g,
PREC const &h,
PREC const &i,
PREC const &j,
PREC const &x0, PREC const &x1,
PREC const &y0, PREC const &y1,
PREC const &z0, PREC const &z1,
std::shared_ptr<BaseMaterial> const &material,
std::shared_ptr<Transformation> const &trans)
: BaseGeometry(material, trans, 8),
_obj(new BaseQuadric(center, a, b, c, d, e, f, g, h, i, j, x0, x1, y0, y1, z0, z1)),
_gpu_obj(nullptr), _need_upload(true)
{
_obj->_dev_obj = reinterpret_cast<GeometryObj*>(this);
_updateVertices();
}
Quadric::~Quadric()
{
delete _obj;
#ifdef USE_CUDA
clearGpuData();
#endif
}
#ifdef USE_CUDA
__device__ fnHit_t __fn_hit_quadric= BaseQuadric::hitCheck;
__device__ fnNormal_t __fn_normal_quadric= BaseQuadric::normalVec;
__device__ fnTextureCoord_t __fn_texture_coord_quadric= BaseQuadric::getTextureCoord;
#endif
void Quadric::up2Gpu()
{
#ifdef USE_CUDA
static fnHit_t fn_hit_h = nullptr;
static fnNormal_t fn_normal_h;
static fnTextureCoord_t fn_texture_coord_h;
if (_need_upload)
{
if (dev_ptr == nullptr)
{
PX_CUDA_CHECK(cudaMalloc(&_gpu_obj, sizeof(BaseQuadric)));
PX_CUDA_CHECK(cudaMalloc(&dev_ptr, sizeof(GeometryObj)));
}
if (fn_hit_h == nullptr)
{
PX_CUDA_CHECK(cudaMemcpyFromSymbol(&fn_hit_h, __fn_hit_quadric, sizeof(fnHit_t)));
PX_CUDA_CHECK(cudaMemcpyFromSymbol(&fn_normal_h, __fn_normal_quadric, sizeof(fnNormal_t)));
PX_CUDA_CHECK(cudaMemcpyFromSymbol(&fn_texture_coord_h, __fn_texture_coord_quadric, sizeof(fnTextureCoord_t)));
}
if (mat != nullptr)
mat->up2Gpu();
if (trans != nullptr)
trans->up2Gpu();
_obj->_dev_obj = dev_ptr;
PX_CUDA_CHECK(cudaMemcpy(_gpu_obj, _obj, sizeof(BaseQuadric), cudaMemcpyHostToDevice));
_obj->_dev_obj = reinterpret_cast<GeometryObj*>(this);
GeometryObj tmp(_gpu_obj, fn_hit_h, fn_normal_h, fn_texture_coord_h,
mat == nullptr ? nullptr : mat->devPtr(),
trans == nullptr ? nullptr : trans->devPtr());
PX_CUDA_CHECK(cudaMemcpy(dev_ptr, &tmp, sizeof(GeometryObj),
cudaMemcpyHostToDevice))
_need_upload = false;
}
#endif
}
void Quadric::clearGpuData()
{
#ifdef USE_CUDA
BaseGeometry::clearGpuData();
if (_gpu_obj != nullptr)
{
PX_CUDA_CHECK(cudaFree(_gpu_obj));
_gpu_obj = nullptr;
}
_need_upload = true;
#endif
}
void Quadric::setCenter(Point const ¢er)
{
_obj->setCenter(center);
// _updateVertices();
#ifdef USE_CUDA
_need_upload = true;
#endif
}
void Quadric::setCoef(PREC const &a,
PREC const &b,
PREC const &c,
PREC const &d,
PREC const &e,
PREC const &f,
PREC const &g,
PREC const &h,
PREC const &i,
PREC const &j,
PREC const &x0, PREC const &x1,
PREC const &y0, PREC const &y1,
PREC const &z0, PREC const &z1)
{
_obj->setCoef(a, b, c, d, e, f, g, h, i, j, x0, x1, y0, y1, z0, z1);
_updateVertices();
#ifdef USE_CUDA
_need_upload = true;
#endif
}
void Quadric::_updateVertices()
{
// FIXME better way to find the bound of vertices of quadric surface
raw_vertices[0].x = _obj->_x0;
raw_vertices[0].y = _obj->_y0;
raw_vertices[0].z = _obj->_z0;
raw_vertices[1].x = _obj->_x1;
raw_vertices[1].y = _obj->_y0;
raw_vertices[1].z = _obj->_z0;
raw_vertices[2].x = _obj->_x0;
raw_vertices[2].y = _obj->_y1;
raw_vertices[2].z = _obj->_z0;
raw_vertices[3].x = _obj->_x0;
raw_vertices[3].y = _obj->_y0;
raw_vertices[3].z = _obj->_z1;
raw_vertices[4].x = _obj->_x1;
raw_vertices[4].y = _obj->_y1;
raw_vertices[4].z = _obj->_z0;
raw_vertices[5].x = _obj->_x1;
raw_vertices[5].y = _obj->_y0;
raw_vertices[5].z = _obj->_z1;
raw_vertices[6].x = _obj->_x0;
raw_vertices[6].y = _obj->_y1;
raw_vertices[6].z = _obj->_z1;
raw_vertices[7].x = _obj->_x1;
raw_vertices[7].y = _obj->_y1;
raw_vertices[7].z = _obj->_z1;
}
Vec3<PREC> Quadric::getTextureCoord(PREC const &x,
PREC const &y,
PREC const &z) const
{
return BaseQuadric::getTextureCoord(_obj, x, y, z);
}
const BaseGeometry *Quadric::hitCheck(Ray const &ray,
PREC const &t_start,
PREC const &t_end,
PREC &hit_at) const
{
return BaseQuadric::hitCheck(_obj, ray, t_start, t_end, hit_at) ? this : nullptr;
}
Direction Quadric::normalVec(PREC const &x, PREC const &y,
PREC const &z,
bool &double_face) const
{
return BaseQuadric::normalVec(_obj, x, y, z, double_face);
}
|
5d981e5ec0eede83b0d166d3b209914d933727a4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright 2014 BVLC and contributors.
#include <vector>
#include "caffe/layer.hpp"
#include "caffe/layers/resample_layer.hpp"
#include "caffe/util/math_functions.hpp"
#include <opencv2/opencv.hpp>
#include <opencv2/gpu/gpu.hpp>
namespace caffe {
static __device__ __forceinline__ float bicubicCoeff(float x_)
{
float x = fabsf(x_);
if (x <= 1.0f) return x * x * (1.5f * x - 2.5f) + 1.0f;
else if (x < 2.0f) return x * (x * (-0.5f * x + 2.5f) - 4.0f) + 2.0f;
else return 0.0f;
}
static __device__ __forceinline__ float boxCoeff(float x)
{
if (-0.5 <= x && x<0.5) return 1.0;
return 0;
}
static __device__ __forceinline__ float triangleCoeff(float x)
{
if (-1<=x && x<0) return x+1;
if (0<=x && x<=1) return 1-x;
return 0;
}
#define FILTER_BICUBIC 0
#define FILTER_BOX 1
#define FILTER_TRIANGLE 2
template <typename Dtype>
__global__ void InterpolationKernel(
const int nthreads,
const int in_channelsize,
const int out_channelsize,
const Dtype* in_ptr,
const int in_width,
const int in_height,
const float fx,
const float fy,
Dtype* out_ptr,
const int out_width,
const int out_height,
int filter_type,
int kernel_width,
const bool antialias)
{
CUDA_KERNEL_LOOP(index, nthreads)
{
int c = index / out_channelsize;
int x_out = (index % out_channelsize) % out_width;
int y_out = (index % out_channelsize) / out_width;
float x_in = x_out * fx + fy / 2.0f - 0.5f;
float y_in = y_out * fy + fx / 2.0f - 0.5f;
int x_in_round = round(x_in);
int y_in_round = round(y_in);
Dtype sum=0;
Dtype wsum=0;
float ax = 1.0f / (antialias ? fx : 1.0f);
float ay = 1.0f / (antialias ? fy : 1.0f);
int rx = (fx < 1.0f) ? 2 : ceil(float(kernel_width)/ax);
int ry = (fy < 1.0f) ? 2 : ceil(float(kernel_width)/ay);
for(int y=y_in_round-ry; y<=y_in_round+ry; y++)
for(int x=x_in_round-rx; x<=x_in_round+rx; x++)
{
if(y<0 || x<0) continue;
if(y>=in_height || x>=in_width) continue;
float dx = x_in - x;
float dy = y_in - y;
float w;
if(filter_type == FILTER_BICUBIC) w = ax*bicubicCoeff(ax*dx) * ay*bicubicCoeff(ay*dy);
else if(filter_type == FILTER_BOX) w = ax*boxCoeff(ax*dx) * ay*boxCoeff(ay*dy);
else w = ax*triangleCoeff(ax*dx) * ay*triangleCoeff(ay*dy);
sum += w * in_ptr[c*in_channelsize + y*in_width+x];
wsum += w;
}
out_ptr[index] = (!wsum) ? 0 : (sum / wsum);
}
}
template <typename Dtype>
__global__ void NearestNeighborKernel(
const int nthreads,
const int in_channelsize,
const int out_channelsize,
const Dtype* in_ptr,
const int in_width,
const int in_height,
const float fx,
const float fy,
Dtype* out_ptr,
const int out_width,
const int out_height)
{
CUDA_KERNEL_LOOP(index, nthreads)
{
int c = index / out_channelsize;
int x_out = (index % out_channelsize) % out_width;
int y_out = (index % out_channelsize) / out_width;
float x_in = x_out * fx + fy / 2.0f - 0.5f;
float y_in = y_out * fy + fx / 2.0f - 0.5f;
int x_in_round = round(x_in);
int y_in_round = round(y_in);
out_ptr[index] = in_ptr[c*in_channelsize + y_in_round*in_width+x_in_round];
}
}
template <typename Dtype>
void ResampleLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
Dtype* top_data = top[0]->mutable_gpu_data(); // dest
int topwidth = top[0]->width();
int topheight = top[0]->height();
int topchannels = top[0]->channels();
int topcount = top[0]->count();
Dtype* bottom_data = bottom[0]->mutable_gpu_data(); // source
int bottomnum = (bottom)[0]->num();
int bottomchannels = (bottom)[0]->channels();
int bottomwidth = (bottom)[0]->width();
int bottomheight = (bottom)[0]->height();
int bottomcount = (bottom)[0]->count();
CHECK_EQ(topchannels, bottomchannels) << "ResampleLayer top channel count must match bottom channel count";
float fx = float(bottomwidth)/float(topwidth);
float fy = float(bottomheight)/float(topheight);
//int botsize = bottomwidth*bottomheight*bottomchannels*bottomnum;
int topsize = topwidth*topheight*topchannels*bottomnum;
int topchannelsize = topwidth*topheight;
int botchannelsize = bottomwidth*bottomheight;
if(this->layer_param().resample_param().type() == ResampleParameter_ResampleType_NEAREST)
{
hipLaunchKernelGGL(( NearestNeighborKernel<Dtype>), dim3(CAFFE_GET_BLOCKS(topsize)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
topsize,
botchannelsize,
topchannelsize,
(Dtype*)bottom_data,
bottomwidth,
bottomheight,
fx,
fy,
(Dtype*)top_data,
topwidth,
topheight
);
CUDA_POST_KERNEL_CHECK;
}
else if(this->layer_param().resample_param().type() == ResampleParameter_ResampleType_CUBIC || this->layer_param().resample_param().type() == ResampleParameter_ResampleType_LINEAR)
{
int filter_type;
if(this->layer_param().resample_param().type() == ResampleParameter_ResampleType_CUBIC)
filter_type = FILTER_BICUBIC;
else if(this->layer_param().resample_param().type() == ResampleParameter_ResampleType_LINEAR)
filter_type = FILTER_TRIANGLE;
bool isDownsample = (fx > 1) || (fy > 1);
bool antialias = isDownsample && this->layer_param_.resample_param().antialias();
int kernel_width;
if(filter_type == FILTER_BICUBIC) kernel_width = 4;
else if(filter_type == FILTER_BOX) kernel_width = 1;
else kernel_width = 2;
hipLaunchKernelGGL(( InterpolationKernel<Dtype>), dim3(CAFFE_GET_BLOCKS(topsize)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
topsize,
botchannelsize,
topchannelsize,
(Dtype*)bottom_data,
bottomwidth,
bottomheight,
fx,
fy,
(Dtype*)top_data,
topwidth,
topheight,
filter_type,
kernel_width,
antialias);
CUDA_POST_KERNEL_CHECK;
}
else
LOG(FATAL) << "unsupported downsampling type";
}
template <typename Dtype>
void ResampleLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
return;
//LOG(FATAL) << "ResampleLayer cannot do backward.";
}
INSTANTIATE_LAYER_GPU_FUNCS(ResampleLayer);
} // namespace caffe
// cv::gpu::GpuMat input(bottomheight, bottomwidth, CV_32FC3);
// float* input_ptr=(float*)input.data;
// int input_stride=input.step/4;
// BlobToOpenCV<Dtype><<<CAFFE_GET_BLOCKS(bottomwidth*bottomheight), CAFFE_CUDA_NUM_THREADS>>>(
// bottomwidth*bottomheight,
// (Dtype*)bottom_data,
// bottomwidth,
// bottomheight,
// input_stride,
// (Dtype*)input_ptr);
// cv::gpu::GpuMat output;
// cv::Size output_size;
// output_size.width = topwidth;
// output_size.height = topheight;
// cv::gpu::resize(input,output,output_size,0,0,interpolation,cv::gpu::Stream::Null(),false);
// float* output_ptr=(float*)output.data;
// int output_stride=output.step/4;
// OpenCVToBlob<Dtype><<<CAFFE_GET_BLOCKS(topwidth*topheight), CAFFE_CUDA_NUM_THREADS>>>(
// topwidth*topheight,
// (Dtype*)output_ptr,
// topwidth,
// topheight,
// output_stride,
// (Dtype*)top_data);
// top_data += topsize;
// bottom_data += botsize;
//template <typename Dtype>
//__global__ void BlobToOpenCV(
// const int nthreads,
// const Dtype* blob_ptr,
// const int width,
// const int height,
// const int stride,
// Dtype* mat_ptr)
//{
// CUDA_KERNEL_LOOP(index, nthreads)
// {
// int x=index % width;
// int y=index / width;
// for(int c=0; c<3; c++)
// mat_ptr[y*stride+x*3+c]=blob_ptr[((c*height)+y)*width+x];
// }
//}
//template <typename Dtype>
//__global__ void OpenCVToBlob(
// const int nthreads,
// const Dtype* mat_ptr,
// const int width,
// const int height,
// const int stride,
// Dtype* blob_ptr)
//{
// CUDA_KERNEL_LOOP(index, nthreads)
// {
// int x=index % width;
// int y=index / width;
// for(int c=0; c<3; c++)
// blob_ptr[((c*height)+y)*width+x]=mat_ptr[y*stride+x*3+c];
// }
//}
| 5d981e5ec0eede83b0d166d3b209914d933727a4.cu | // Copyright 2014 BVLC and contributors.
#include <vector>
#include "caffe/layer.hpp"
#include "caffe/layers/resample_layer.hpp"
#include "caffe/util/math_functions.hpp"
#include <opencv2/opencv.hpp>
#include <opencv2/gpu/gpu.hpp>
namespace caffe {
static __device__ __forceinline__ float bicubicCoeff(float x_)
{
float x = fabsf(x_);
if (x <= 1.0f) return x * x * (1.5f * x - 2.5f) + 1.0f;
else if (x < 2.0f) return x * (x * (-0.5f * x + 2.5f) - 4.0f) + 2.0f;
else return 0.0f;
}
static __device__ __forceinline__ float boxCoeff(float x)
{
if (-0.5 <= x && x<0.5) return 1.0;
return 0;
}
static __device__ __forceinline__ float triangleCoeff(float x)
{
if (-1<=x && x<0) return x+1;
if (0<=x && x<=1) return 1-x;
return 0;
}
#define FILTER_BICUBIC 0
#define FILTER_BOX 1
#define FILTER_TRIANGLE 2
template <typename Dtype>
__global__ void InterpolationKernel(
const int nthreads,
const int in_channelsize,
const int out_channelsize,
const Dtype* in_ptr,
const int in_width,
const int in_height,
const float fx,
const float fy,
Dtype* out_ptr,
const int out_width,
const int out_height,
int filter_type,
int kernel_width,
const bool antialias)
{
CUDA_KERNEL_LOOP(index, nthreads)
{
int c = index / out_channelsize;
int x_out = (index % out_channelsize) % out_width;
int y_out = (index % out_channelsize) / out_width;
float x_in = x_out * fx + fy / 2.0f - 0.5f;
float y_in = y_out * fy + fx / 2.0f - 0.5f;
int x_in_round = round(x_in);
int y_in_round = round(y_in);
Dtype sum=0;
Dtype wsum=0;
float ax = 1.0f / (antialias ? fx : 1.0f);
float ay = 1.0f / (antialias ? fy : 1.0f);
int rx = (fx < 1.0f) ? 2 : ceil(float(kernel_width)/ax);
int ry = (fy < 1.0f) ? 2 : ceil(float(kernel_width)/ay);
for(int y=y_in_round-ry; y<=y_in_round+ry; y++)
for(int x=x_in_round-rx; x<=x_in_round+rx; x++)
{
if(y<0 || x<0) continue;
if(y>=in_height || x>=in_width) continue;
float dx = x_in - x;
float dy = y_in - y;
float w;
if(filter_type == FILTER_BICUBIC) w = ax*bicubicCoeff(ax*dx) * ay*bicubicCoeff(ay*dy);
else if(filter_type == FILTER_BOX) w = ax*boxCoeff(ax*dx) * ay*boxCoeff(ay*dy);
else w = ax*triangleCoeff(ax*dx) * ay*triangleCoeff(ay*dy);
sum += w * in_ptr[c*in_channelsize + y*in_width+x];
wsum += w;
}
out_ptr[index] = (!wsum) ? 0 : (sum / wsum);
}
}
template <typename Dtype>
__global__ void NearestNeighborKernel(
const int nthreads,
const int in_channelsize,
const int out_channelsize,
const Dtype* in_ptr,
const int in_width,
const int in_height,
const float fx,
const float fy,
Dtype* out_ptr,
const int out_width,
const int out_height)
{
CUDA_KERNEL_LOOP(index, nthreads)
{
int c = index / out_channelsize;
int x_out = (index % out_channelsize) % out_width;
int y_out = (index % out_channelsize) / out_width;
float x_in = x_out * fx + fy / 2.0f - 0.5f;
float y_in = y_out * fy + fx / 2.0f - 0.5f;
int x_in_round = round(x_in);
int y_in_round = round(y_in);
out_ptr[index] = in_ptr[c*in_channelsize + y_in_round*in_width+x_in_round];
}
}
template <typename Dtype>
void ResampleLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
Dtype* top_data = top[0]->mutable_gpu_data(); // dest
int topwidth = top[0]->width();
int topheight = top[0]->height();
int topchannels = top[0]->channels();
int topcount = top[0]->count();
Dtype* bottom_data = bottom[0]->mutable_gpu_data(); // source
int bottomnum = (bottom)[0]->num();
int bottomchannels = (bottom)[0]->channels();
int bottomwidth = (bottom)[0]->width();
int bottomheight = (bottom)[0]->height();
int bottomcount = (bottom)[0]->count();
CHECK_EQ(topchannels, bottomchannels) << "ResampleLayer top channel count must match bottom channel count";
float fx = float(bottomwidth)/float(topwidth);
float fy = float(bottomheight)/float(topheight);
//int botsize = bottomwidth*bottomheight*bottomchannels*bottomnum;
int topsize = topwidth*topheight*topchannels*bottomnum;
int topchannelsize = topwidth*topheight;
int botchannelsize = bottomwidth*bottomheight;
if(this->layer_param().resample_param().type() == ResampleParameter_ResampleType_NEAREST)
{
NearestNeighborKernel<Dtype><<<CAFFE_GET_BLOCKS(topsize), CAFFE_CUDA_NUM_THREADS>>>(
topsize,
botchannelsize,
topchannelsize,
(Dtype*)bottom_data,
bottomwidth,
bottomheight,
fx,
fy,
(Dtype*)top_data,
topwidth,
topheight
);
CUDA_POST_KERNEL_CHECK;
}
else if(this->layer_param().resample_param().type() == ResampleParameter_ResampleType_CUBIC || this->layer_param().resample_param().type() == ResampleParameter_ResampleType_LINEAR)
{
int filter_type;
if(this->layer_param().resample_param().type() == ResampleParameter_ResampleType_CUBIC)
filter_type = FILTER_BICUBIC;
else if(this->layer_param().resample_param().type() == ResampleParameter_ResampleType_LINEAR)
filter_type = FILTER_TRIANGLE;
bool isDownsample = (fx > 1) || (fy > 1);
bool antialias = isDownsample && this->layer_param_.resample_param().antialias();
int kernel_width;
if(filter_type == FILTER_BICUBIC) kernel_width = 4;
else if(filter_type == FILTER_BOX) kernel_width = 1;
else kernel_width = 2;
InterpolationKernel<Dtype><<<CAFFE_GET_BLOCKS(topsize), CAFFE_CUDA_NUM_THREADS>>>(
topsize,
botchannelsize,
topchannelsize,
(Dtype*)bottom_data,
bottomwidth,
bottomheight,
fx,
fy,
(Dtype*)top_data,
topwidth,
topheight,
filter_type,
kernel_width,
antialias);
CUDA_POST_KERNEL_CHECK;
}
else
LOG(FATAL) << "unsupported downsampling type";
}
template <typename Dtype>
void ResampleLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
return;
//LOG(FATAL) << "ResampleLayer cannot do backward.";
}
INSTANTIATE_LAYER_GPU_FUNCS(ResampleLayer);
} // namespace caffe
// cv::gpu::GpuMat input(bottomheight, bottomwidth, CV_32FC3);
// float* input_ptr=(float*)input.data;
// int input_stride=input.step/4;
// BlobToOpenCV<Dtype><<<CAFFE_GET_BLOCKS(bottomwidth*bottomheight), CAFFE_CUDA_NUM_THREADS>>>(
// bottomwidth*bottomheight,
// (Dtype*)bottom_data,
// bottomwidth,
// bottomheight,
// input_stride,
// (Dtype*)input_ptr);
// cv::gpu::GpuMat output;
// cv::Size output_size;
// output_size.width = topwidth;
// output_size.height = topheight;
// cv::gpu::resize(input,output,output_size,0,0,interpolation,cv::gpu::Stream::Null(),false);
// float* output_ptr=(float*)output.data;
// int output_stride=output.step/4;
// OpenCVToBlob<Dtype><<<CAFFE_GET_BLOCKS(topwidth*topheight), CAFFE_CUDA_NUM_THREADS>>>(
// topwidth*topheight,
// (Dtype*)output_ptr,
// topwidth,
// topheight,
// output_stride,
// (Dtype*)top_data);
// top_data += topsize;
// bottom_data += botsize;
//template <typename Dtype>
//__global__ void BlobToOpenCV(
// const int nthreads,
// const Dtype* blob_ptr,
// const int width,
// const int height,
// const int stride,
// Dtype* mat_ptr)
//{
// CUDA_KERNEL_LOOP(index, nthreads)
// {
// int x=index % width;
// int y=index / width;
// for(int c=0; c<3; c++)
// mat_ptr[y*stride+x*3+c]=blob_ptr[((c*height)+y)*width+x];
// }
//}
//template <typename Dtype>
//__global__ void OpenCVToBlob(
// const int nthreads,
// const Dtype* mat_ptr,
// const int width,
// const int height,
// const int stride,
// Dtype* blob_ptr)
//{
// CUDA_KERNEL_LOOP(index, nthreads)
// {
// int x=index % width;
// int y=index / width;
// for(int c=0; c<3; c++)
// blob_ptr[((c*height)+y)*width+x]=mat_ptr[y*stride+x*3+c];
// }
//}
|
2542620c872b137a48015d266c4b112dd83a17a3.hip | // !!! This is a file automatically generated by hipify!!!
// Fast parallel reduction for Kepler hardware
//
// Based on devblogs.nvidia.com/parallelforall/faster-parallel-reductions-kepler/
#include <hip/hip_runtime.h>
#include <stdio.h>
#define APICALL(code) { check_code((code), __FILE__, __LINE__); }
inline void check_code(hipError_t code, const char *file, int line)
{
if (code != hipSuccess)
{
fprintf(stderr,"CUDA error: %s %s %d\n", hipGetErrorString(code), file, line);
exit(code);
}
}
//
// Main implementation
//
// Reduce a value across a warp
__inline__ __device__
int sumReduce_warp(int val) {
for (int offset = warpSize/2; offset > 0; offset /= 2)
val += __shfl_down_sync(0xFFFFFFFF, val, offset);
return val;
}
// Reduce a value across a block, using shared memory for communication
__inline__ __device__ int sumReduce_block(int val) {
// shared mem for 32 partial sums
static __shared__ int shared[32];
int lane = threadIdx.x % warpSize;
int wid = threadIdx.x / warpSize;
// each warp performs partial reduction
val = sumReduce_warp(val);
// write reduced value to shared memory
if (lane==0) shared[wid]=val;
// wait for all partial reductions
__syncthreads();
// read from shared memory only if that warp existed
val = (threadIdx.x < blockDim.x / warpSize) ? shared[lane] : 0;
// final reduce within first warp
if (wid==0) {
val = sumReduce_warp(val);
}
return val;
}
// Reduce an array across a complete grid
__global__ void sumReduce_grid(int *input, int* output, int N) {
int sum = 0;
// reduce multiple elements per thread (grid-stride loop)
for (int i = blockIdx.x * blockDim.x + threadIdx.x;
i < N;
i += blockDim.x * gridDim.x) {
sum += input[i];
}
sum = sumReduce_block(sum);
if (threadIdx.x==0)
output[blockIdx.x]=sum;
}
void sumReduce(int *input, int* output, int N) {
int threads = 512;
int blocks = min((N + threads - 1) / threads, 1024);
hipLaunchKernelGGL(( sumReduce_grid), dim3(blocks), dim3(threads), 0, 0, input, output, N);
hipLaunchKernelGGL(( sumReduce_grid), dim3(1), dim3(1024), 0, 0, output, output, blocks);
}
//
// Benchmark entry-points
//
struct State
{
size_t len;
int *gpu_input;
int *gpu_output;
};
extern "C"
State *setup(int *input, size_t len)
{
State *state = new State();
state->len = len;
APICALL(hipMalloc(&state->gpu_input, len*sizeof(int)));
APICALL(hipMemcpy(state->gpu_input, input, len*sizeof(int), hipMemcpyHostToDevice));
APICALL(hipMalloc(&state->gpu_output, len*sizeof(int)));
return state;
}
extern "C"
int run(State *state)
{
sumReduce(state->gpu_input, state->gpu_output, state->len);
int* output = (int*) malloc(state->len * sizeof(int));
APICALL(hipMemcpy(output, state->gpu_output, state->len*sizeof(int), hipMemcpyDeviceToHost));
int val = output[0];
free(output);
return val;
}
extern "C"
void teardown(State *state)
{
APICALL(hipFree(state->gpu_output));
APICALL(hipFree(state->gpu_input));
}
| 2542620c872b137a48015d266c4b112dd83a17a3.cu | // Fast parallel reduction for Kepler hardware
//
// Based on devblogs.nvidia.com/parallelforall/faster-parallel-reductions-kepler/
#include <cuda.h>
#include <stdio.h>
#define APICALL(code) { check_code((code), __FILE__, __LINE__); }
inline void check_code(cudaError_t code, const char *file, int line)
{
if (code != cudaSuccess)
{
fprintf(stderr,"CUDA error: %s %s %d\n", cudaGetErrorString(code), file, line);
exit(code);
}
}
//
// Main implementation
//
// Reduce a value across a warp
__inline__ __device__
int sumReduce_warp(int val) {
for (int offset = warpSize/2; offset > 0; offset /= 2)
val += __shfl_down_sync(0xFFFFFFFF, val, offset);
return val;
}
// Reduce a value across a block, using shared memory for communication
__inline__ __device__ int sumReduce_block(int val) {
// shared mem for 32 partial sums
static __shared__ int shared[32];
int lane = threadIdx.x % warpSize;
int wid = threadIdx.x / warpSize;
// each warp performs partial reduction
val = sumReduce_warp(val);
// write reduced value to shared memory
if (lane==0) shared[wid]=val;
// wait for all partial reductions
__syncthreads();
// read from shared memory only if that warp existed
val = (threadIdx.x < blockDim.x / warpSize) ? shared[lane] : 0;
// final reduce within first warp
if (wid==0) {
val = sumReduce_warp(val);
}
return val;
}
// Reduce an array across a complete grid
__global__ void sumReduce_grid(int *input, int* output, int N) {
int sum = 0;
// reduce multiple elements per thread (grid-stride loop)
for (int i = blockIdx.x * blockDim.x + threadIdx.x;
i < N;
i += blockDim.x * gridDim.x) {
sum += input[i];
}
sum = sumReduce_block(sum);
if (threadIdx.x==0)
output[blockIdx.x]=sum;
}
void sumReduce(int *input, int* output, int N) {
int threads = 512;
int blocks = min((N + threads - 1) / threads, 1024);
sumReduce_grid<<<blocks, threads>>>(input, output, N);
sumReduce_grid<<<1, 1024>>>(output, output, blocks);
}
//
// Benchmark entry-points
//
struct State
{
size_t len;
int *gpu_input;
int *gpu_output;
};
extern "C"
State *setup(int *input, size_t len)
{
State *state = new State();
state->len = len;
APICALL(cudaMalloc(&state->gpu_input, len*sizeof(int)));
APICALL(cudaMemcpy(state->gpu_input, input, len*sizeof(int), cudaMemcpyHostToDevice));
APICALL(cudaMalloc(&state->gpu_output, len*sizeof(int)));
return state;
}
extern "C"
int run(State *state)
{
sumReduce(state->gpu_input, state->gpu_output, state->len);
int* output = (int*) malloc(state->len * sizeof(int));
APICALL(cudaMemcpy(output, state->gpu_output, state->len*sizeof(int), cudaMemcpyDeviceToHost));
int val = output[0];
free(output);
return val;
}
extern "C"
void teardown(State *state)
{
APICALL(cudaFree(state->gpu_output));
APICALL(cudaFree(state->gpu_input));
}
|
691ca1abe1647952cc4df4cdb8b1b2af5f32506e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <book.h>
#define N (1024 * 1024)
#define FULL_DATA_SIZE (N * 20)
__global__ void kernel(int *a, int *b, int *c)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < N)
{
int idx1 = (idx + 1) % 256;
int idx2 = (idx + 2) % 256;
float as = (a[idx] + a[idx1] + a[idx2]) / 3.0f;
float bs = (b[idx] + b[idx1] + b[idx2]) / 3.0f;
c[idx] = (as + bs) / 2;
}
}
int main(void)
{
hipEvent_t start, stop;
float elapsedTime;
hipStream_t stream;
int *h_a, *h_b, *h_c;
int *d_a, *d_b, *d_c;
// start the timers
HANDLE_ERROR(hipEventCreate(&start));
HANDLE_ERROR(hipEventCreate(&stop));
// initialize the stream
HANDLE_ERROR(hipStreamCreate(&stream));
// allocate the memory on the GPU
HANDLE_ERROR(hipMalloc((void**)&d_a, N * sizeof(int)));
HANDLE_ERROR(hipMalloc((void**)&d_b, N * sizeof(int)));
HANDLE_ERROR(hipMalloc((void**)&d_c, N * sizeof(int)));
// allocate host locked memory, used to stream
HANDLE_ERROR(hipHostMalloc((void**)&h_a, FULL_DATA_SIZE * sizeof(int), hipHostMallocDefault));
HANDLE_ERROR(hipHostMalloc((void**)&h_b, FULL_DATA_SIZE * sizeof(int), hipHostMallocDefault));
HANDLE_ERROR(hipHostMalloc((void**)&h_c, FULL_DATA_SIZE * sizeof(int), hipHostMallocDefault));
for (int i = 0; i<FULL_DATA_SIZE; i++)
{
h_a[i] = rand();
h_b[i] = rand();
}
HANDLE_ERROR(hipEventRecord(start, 0));
// now loop over full data, in bite-sized chunks
for (int i = 0; i<FULL_DATA_SIZE; i += N)
{
// copy the locked memory to the device, async
HANDLE_ERROR(hipMemcpyAsync(d_a, h_a + i, N * sizeof(int), hipMemcpyHostToDevice, stream));
HANDLE_ERROR(hipMemcpyAsync(d_b, h_b + i, N * sizeof(int), hipMemcpyHostToDevice, stream));
kernel << <N / 256, 256, 0, stream >> >(d_a, d_b, d_c);
// copy the data from device to locked memory
HANDLE_ERROR(hipMemcpyAsync(h_c + i, d_c, N * sizeof(int), hipMemcpyDeviceToHost, stream));
}
// copy result chunk from locked to full buffer
HANDLE_ERROR(hipStreamSynchronize(stream));
HANDLE_ERROR(hipEventRecord(stop, 0));
HANDLE_ERROR(hipEventSynchronize(stop));
HANDLE_ERROR(hipEventElapsedTime(&elapsedTime, start, stop));
printf("Time taken: %3.2f ms\n", elapsedTime);
// cleanup the streams and memory
HANDLE_ERROR(hipHostFree(h_a));
HANDLE_ERROR(hipHostFree(h_b));
HANDLE_ERROR(hipHostFree(h_c));
HANDLE_ERROR(hipFree(d_a));
HANDLE_ERROR(hipFree(d_b));
HANDLE_ERROR(hipFree(d_c));
HANDLE_ERROR(hipStreamDestroy(stream));
return 0;
}
| 691ca1abe1647952cc4df4cdb8b1b2af5f32506e.cu |
#include <book.h>
#define N (1024 * 1024)
#define FULL_DATA_SIZE (N * 20)
__global__ void kernel(int *a, int *b, int *c)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < N)
{
int idx1 = (idx + 1) % 256;
int idx2 = (idx + 2) % 256;
float as = (a[idx] + a[idx1] + a[idx2]) / 3.0f;
float bs = (b[idx] + b[idx1] + b[idx2]) / 3.0f;
c[idx] = (as + bs) / 2;
}
}
int main(void)
{
cudaEvent_t start, stop;
float elapsedTime;
cudaStream_t stream;
int *h_a, *h_b, *h_c;
int *d_a, *d_b, *d_c;
// start the timers
HANDLE_ERROR(cudaEventCreate(&start));
HANDLE_ERROR(cudaEventCreate(&stop));
// initialize the stream
HANDLE_ERROR(cudaStreamCreate(&stream));
// allocate the memory on the GPU
HANDLE_ERROR(cudaMalloc((void**)&d_a, N * sizeof(int)));
HANDLE_ERROR(cudaMalloc((void**)&d_b, N * sizeof(int)));
HANDLE_ERROR(cudaMalloc((void**)&d_c, N * sizeof(int)));
// allocate host locked memory, used to stream
HANDLE_ERROR(cudaHostAlloc((void**)&h_a, FULL_DATA_SIZE * sizeof(int), cudaHostAllocDefault));
HANDLE_ERROR(cudaHostAlloc((void**)&h_b, FULL_DATA_SIZE * sizeof(int), cudaHostAllocDefault));
HANDLE_ERROR(cudaHostAlloc((void**)&h_c, FULL_DATA_SIZE * sizeof(int), cudaHostAllocDefault));
for (int i = 0; i<FULL_DATA_SIZE; i++)
{
h_a[i] = rand();
h_b[i] = rand();
}
HANDLE_ERROR(cudaEventRecord(start, 0));
// now loop over full data, in bite-sized chunks
for (int i = 0; i<FULL_DATA_SIZE; i += N)
{
// copy the locked memory to the device, async
HANDLE_ERROR(cudaMemcpyAsync(d_a, h_a + i, N * sizeof(int), cudaMemcpyHostToDevice, stream));
HANDLE_ERROR(cudaMemcpyAsync(d_b, h_b + i, N * sizeof(int), cudaMemcpyHostToDevice, stream));
kernel << <N / 256, 256, 0, stream >> >(d_a, d_b, d_c);
// copy the data from device to locked memory
HANDLE_ERROR(cudaMemcpyAsync(h_c + i, d_c, N * sizeof(int), cudaMemcpyDeviceToHost, stream));
}
// copy result chunk from locked to full buffer
HANDLE_ERROR(cudaStreamSynchronize(stream));
HANDLE_ERROR(cudaEventRecord(stop, 0));
HANDLE_ERROR(cudaEventSynchronize(stop));
HANDLE_ERROR(cudaEventElapsedTime(&elapsedTime, start, stop));
printf("Time taken: %3.2f ms\n", elapsedTime);
// cleanup the streams and memory
HANDLE_ERROR(cudaFreeHost(h_a));
HANDLE_ERROR(cudaFreeHost(h_b));
HANDLE_ERROR(cudaFreeHost(h_c));
HANDLE_ERROR(cudaFree(d_a));
HANDLE_ERROR(cudaFree(d_b));
HANDLE_ERROR(cudaFree(d_c));
HANDLE_ERROR(cudaStreamDestroy(stream));
return 0;
}
|
0722fee49c1122da14795a220c3d445ed970c28a.hip | // !!! This is a file automatically generated by hipify!!!
///sta programa calcula la versin paralelizada del algoritmo FFT_DIF_DIT_TD
///(31/08/2016)
///sta versin sirve para graficar en matlab los tiempos de ejecucin, considerando (RADIX-3) N = 3^13, Li = 43 y Lo = Vara
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <hipfft.h>
#include <cufftw.h>
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_complex.h>
#include <math.h>
#include <math_constants.h>
#include <iostream>
#include <time.h>
//////////////////////////////////////////////////////////////////////////
///////////////////////DECLARACIN DE FUNCIONES///////////////////////////
//////////////////////////////////////////////////////////////////////////
void vector_entrada_xn(int Li);
void arreglo_W(int N);
void asign_rap(int N,int Li,int Lo);
void factor(int N);
void product(int vector_1[50],int vector_2[50],int valor);
void etapa_entrada(void);
__global__ void inputStage_kernel(int N, int Li,int Dip,int Dop,int P,cuFloatComplex *x,cuFloatComplex *W,cuFloatComplex *y);
void etapa_intermedia(void);
void etapa_salida(void);
__global__ void outputStage_kernel(int N,int Lo,int Dip,int Dop,int P,cuFloatComplex *z,cuFloatComplex *W,cuFloatComplex *X);
//////////////////////////////////////////////////////////////////////////
/////////////////////DECLARACIN DE VARIABLES GLOBALES////////////////////
//////////////////////////////////////////////////////////////////////////
cuFloatComplex *x_host;
cuFloatComplex *W_host;
//cuFloatComplex *y_host;
//cuFloatComplex *z_host;
cuFloatComplex *X_host;
cuFloatComplex *x_device;
cuFloatComplex *W_device;
cuFloatComplex *y_device;
cuFloatComplex *z_device;
cuFloatComplex *X_device;
hipfftComplex *in,*out;
FILE *db_open,*dc_open;
int Dip,Dop,P,N,Li,Lo;
int vF[50]; //Almacena los factores de N
int svF; //Almacena el numero de factores de N
int Prod[50];
int a;
#define inf 99999
//////////////////////////////////////////////////////////////////////////
//////////////////////////DATOS DE ENTRADA////////////////////////////////
//////////////////////////////////////////////////////////////////////////
/// N >>> Nmero de elementos del vector de entrada
/// Li >>> Nmero de elementos de entrada diferentes de cero
/// Lo >>> Nmero de elementos de salida requeridos
/// loop >>> Nmero de iteraciones
/// muestras >>> Nmero de muestras
//////////////////////////////////////////////////////////////////////////
///////////////////////////DATOS DE SALIDA////////////////////////////////
//////////////////////////////////////////////////////////////////////////
/// X >>> Vector de salida
//////////////////////////////////////////////////////////////////////////
/////////////////// SE INGRESAN LOS DATOS DE ENTRADA /////////////////////
//////////////////////////////////////////////////////////////////////////
///Ingrese el nmero de iteraciones requeridas
const int loop = 300;
///Ingrese el valor de N_max
const int N_max = 13;
///Ingrese el valor de Li_max
const int Li_max = 43;
//////////////////////////////////////////////////////////////////////////
//////////////////////////FUNCION PRINCIPAL///////////////////////////////
//////////////////////////////////////////////////////////////////////////
//Funcin principal
int main()
{
int i,j,i_N,j_res,k_res,cont,i_prom;
float suma;
float promedio[N_max];
//////////////////////////////////////////////////////////////////////////
//////////////////////////SELECCIN DEL DEVICE////////////////////////////
//////////////////////////////////////////////////////////////////////////
int device;
FILE *da;
hipSetDevice(1);
hipGetDevice(&device);
if(device == 0)
{
printf("\n\n---DEVICE = GeForce GTX 970---\n\n");
da = fopen("Tiempos_N13_Li43_LoVARIA_CUDA_GTX970.bin","a+b"); //Crea o sobre escribe archivo
}
if(device == 1)
{
printf("\n\n---DEVICE = TESLA K20---\n\n");
da = fopen("Tiempos_N13_Li43_LoVARIA_CUDA_TESLAK20c.bin","a+b"); //Crea o sobre escribe archivo
}
//Pausa
printf("\n---PRESIONA UNA TECLA PARA CONTINUAR---\n\n");
getchar();
for(i_N = N_max;i_N <= N_max;i_N++)
{
N = (int )pow(3,i_N);
printf("\n N = %d \n",N);
for(j_res=Li_max;j_res <= Li_max;j_res++)
{
Li=j_res;
for(k_res=1;k_res <= N_max;k_res++)
{
Lo=(int )pow(3,k_res);
printf("\n Li = %d Lo = %d",Li,Lo);
///Se abre el archivo binario
db_open = fopen("Entrada_real_N13_C.bin","rb");
dc_open = fopen("Entrada_imag_N13_C.bin","rb");
suma=0.0;
for(j=0;j<loop;j++)
{
//Comandos necesarios para medir el tiempo
float elapsedTime_app;
hipEvent_t start_app, stop_app;
hipEventCreate(&start_app);
hipEventCreate(&stop_app);
//Se generan en el host los valores del vector de entrada x[n]
vector_entrada_xn(Li);
///Se genera el arreglo W[N]
arreglo_W(N);
//Se generan en el host los factores Dip y Dop
asign_rap(N,Li,Lo);
//Clculo en el host del factor P
P = N/(Dip*Dop);
//printf("\n\n FACTOR P:\n\n");
//printf("\n Dip = %d Dop = %d P = %d ",Dip,Dop,P);
//---------------------------------------------------------------------------------------------
//Se empieza a medir el tiempo de ejecucion de la aplicacion
hipEventRecord(start_app,0);
//Funcin auxiliar del host para ejecutar la etapa de entrada
etapa_entrada();
//Funcin auxiliar del host para ejecutar la etapa intermedia
etapa_intermedia();
//Funcin auxiliar del host para ejecutar la etapa de salida
etapa_salida();
//---------------------------------------------------------------------------------------------
//Comandos necesarios para medir el tiempo de la aplicacion (app)
hipEventRecord(stop_app,0);
hipEventSynchronize(stop_app);
hipEventElapsedTime(&elapsedTime_app,start_app,stop_app);
//Suma de todos los tiempos
suma = suma + elapsedTime_app;
//Se destruyen los eventos que miden el tiempo de la aplicacion
hipEventDestroy(start_app);
hipEventDestroy(stop_app);
//Se liberan memorias del Host y Device
free(x_host);
free(W_host);
free(X_host);
hipFree(x_device);
hipFree(W_device);
hipFree(y_device);
hipFree(z_device);
hipFree(X_device);
}
promedio[k_res-1] = suma/(float)loop;
fclose(db_open);
fclose(dc_open);
}
}
}
fwrite(promedio,sizeof(float),N_max,da);
fclose(da);
return EXIT_SUCCESS;
}
//////////////////////////////////////////////////////////////////////////
/////////////////////////FUNCIONES SECUNDARIAS////////////////////////////
//////////////////////////////////////////////////////////////////////////
//sta funcin genera el vector de entrada x[n]
void vector_entrada_xn(int Li)
{
//Declaracin de variables locales
int k;
float *buffer_real,*buffer_imag;
//Se reserva memoria para xn_host en el host
x_host = (cuFloatComplex*)malloc(sizeof(cuFloatComplex)*Li);
buffer_real = (float*)malloc(sizeof(float)*(pow(3,N_max)));
buffer_imag = (float*)malloc(sizeof(float)*(pow(3,N_max)));
///Se lee el vector de entrada del archivo binario
fread(buffer_real,sizeof(float),(int)pow(3,N_max),db_open);
fread(buffer_imag,sizeof(float),(int)pow(3,N_max),dc_open);
//Se dan valores a x[n]
for(k = 0;k < Li; k++)
{
//x_host[k] = make_cuFloatComplex((float)(rand()%11),(float)(rand()%11));
//x_host[k] = make_cuFloatComplex((float)(k + 1),(float)(0.0));
x_host[k] = make_cuFloatComplex(buffer_real[k],buffer_imag[k]);
}
/*
//Se imprimen los valores de entrada x[n]
printf("\n---ELEMENTOS DE ENTRADA x[n]---\n\n");
for(k=0;k<Li;k++)
{
printf(" %d-> (%f) + (%f)\n",k+1,cuCrealf(x_host[k]),cuCimagf(x_host[k]));
}
*/
free(buffer_real);
free(buffer_imag);
}
//sta funcin genera el arreglo W
void arreglo_W(int N)
{
//Declaracin de variables locales
int n;
//Se reserva memoria para W_host en el host
W_host = (cuFloatComplex*)malloc(sizeof(cuFloatComplex)*N);
//Se genera el arreglo W
for(n = 1;n <= N;n++)
{
W_host[n-1] = make_cuFloatComplex((float)cos((2*CUDART_PI*n)/N),(float)(-1)*sin((2*CUDART_PI*n)/N));
}
/*
//Se imprimen los valores del arreglo W[N]
printf("\n---ARREGLO W[N]---\n\n");
for(n = 0;n < N; n++)
{
printf(" W[%d]-> (%f) + (%f)\n",n+1,cuCrealf(W_host[n]),cuCimagf(W_host[n]));
}
*/
}
//sta funcin genera los factores Dip y Dop
void asign_rap(int N,int Li,int Lo)
{
//Declaracin de variables locales
float NLi,NLo,Diprapt,Doprapt;
int Nh[50];
int k[50];
int G;
int g,i,t,ta;
int Dipt[50],Dopt[50];
float distrapt,distrap;
int Pos,h,Poss;
int nk[50];
int r;
//Inicializaciones
G = 0;
svF = 0;
//Factores Dip y Dop ideales
NLi=(float)N/(float)Li;
NLo=(float)N/(float)Lo;
Diprapt=NLi;
Doprapt=NLo;
//Se encuentran los factores de "N"
//vF almacena los factores de "N"
//svF almacena el nmero de factores de "N"
factor(N);
/*
Almacena en el vector Nh los factores que son diferentes de del vector vF
En el vector k se almacena la cantidad de veces que se repite cada
elemento almacenado en el vector Nh.
*/
Nh[0] = vF[0];
k[0]=1;
for(g=1;g<=svF-1;g=g+1)
{
if(vF[g]!=vF[g-1])
{
G=G+1;
Nh[G]=vF[g];
k[G]=1;
}
else
{
k[G]=k[G]+1;
}
}
/*
Almacena en el vector Nh todas las posibles combinaciones que den como
producto a N. t almacena el numero de elementos del vector Nh.
*/
product(Nh,k,G);
t = a;
for(i=0;i<t;i=i+1)
{
Dipt[i]=Prod[i];
}
distrapt=inf;
for(g=1;g<=t;g=g+1)
{
if(Dipt[g-1]<=NLi)
{
Pos=g-1;
for(h=0;h<=G;h=h+1)
{
Poss=floor(Pos/(k[h]+1));
nk[h]=k[h]+Poss*(k[h]+1)-Pos;
Pos=Poss;
}
product(Nh,nk,G);
ta=a;
for(i=0;i<ta;i=i+1)
{
Dopt[i]=Prod[i];
}
////////////////////////////////////////////
//int j;
//for(j=0;j<ta;j++)
//{
// printf(" %d ",Dopt[j]);
//}
//printf("\n\n ta=%d\n\n",ta);
///////////////////////////////////////////
for(r=0;r<ta;r=r+1)
{
distrap=sqrt(pow(Diprapt-(Dipt[g-1]),2)+pow(Doprapt-(Dopt[r]),2));
if(distrap<distrapt)
{
distrapt=distrap;
Dip=Dipt[g-1];
Dop=Dopt[r];
}
}
}
}
/*
printf("\n\n FACTOR Dip :\n\n");
printf(" %d ",Dip);
printf("\n\n FACTOR Dop:\n\n");
printf(" %d ",Dop);
*/
}
//sta funcin encuentra los factores de "N"
void factor(int N)
{
//Se empieza a verificar los factores desde 2
int i=2;
long N_factor;
N_factor = N;
while(i<=N_factor)
{
while((N_factor%i)==0)
{
vF[svF]=i;
N_factor=N_factor/i;
// printf("Factores: %d ",vF[svF]);
svF++;
}
i++;
}
}
//sta funcin encuentra todas las posibles combinaciones de factores que den como resultado "N"
void product(int vector_1[50],int vector_2[50],int valor)
{
int d,e,s,pNh,i;
int cont=0;
Prod[0]=1;
a=1;
for(d=0;d<=valor;d=d+1)
{
s=a;
pNh=1;
for(e=1;e<=vector_2[d];e=e+1)
{
pNh=pNh*vector_1[d];
for(i=(s*e+1);i<=(s*e+s);i=i+1)
{
Prod[i-1]=pNh*Prod[cont];
cont=cont+1;
}
a=a+s;
cont=0;
}
}
}
//Funcin auxiliar del host para calcular la etapa de entrada en el device
void etapa_entrada(void)
{
//////////////////////////////////////////////////////////////////////////
////////////////////////////ETAPA DE ENTRADA//////////////////////////////
//////////////////////////////////////////////////////////////////////////
//Declaracin de variables locales
int k1,n1,n2;
//Asignacin de memoria en el device para el arreglo "x_device"
hipMalloc((void**)&x_device,Li*sizeof(cuFloatComplex));
//Se reserva memoria en el device para el arreglo "W_device"
hipMalloc((void**)&W_device,N*sizeof(cuFloatComplex));
//Asignacin de memoria en el device para el arreglo "y"
hipMalloc((void**)&y_device,P*Dip*Dop*sizeof(cuFloatComplex));
//Se pasa el arreglo x_host a x_device
hipMemcpy(x_device,x_host,Li*sizeof(cuFloatComplex),hipMemcpyHostToDevice);
//Envo de los arreglos W hacia la memoria global del device
hipMemcpy(W_device,W_host,N*sizeof(cuFloatComplex),hipMemcpyHostToDevice);
//Asignacin de memoria en el host para "y"
//y_host = (cuFloatComplex*)malloc(sizeof(cuFloatComplex)*P*Dip*Dop);
//Dimensionamiento del grid para la funcin kernel "inputStage"
//Dimensionamiento del Grid
dim3 gridDim(1,1,1);
//Dimensionamiento del block
dim3 blockDim(1,1,1);
if((P*Dop) < 32 && (Dip) < 32)
{
blockDim.x = (P*Dop);
blockDim.y = (Dip);
gridDim.x = 1;
gridDim.y = 1;
}
else
{
blockDim.x = 32;
blockDim.y = 32;
gridDim.x = (unsigned int) (ceilf((float)(P*Dop)/(float)blockDim.x));
gridDim.y = (unsigned int) (ceilf((float)Dip/(float)blockDim.y));
}
//Lanzamiento del kernel "inputStage_kernel"
hipLaunchKernelGGL(( inputStage_kernel), dim3(gridDim),dim3(blockDim), 0, 0, N,Li,Dip,Dop,P,x_device,W_device,y_device);
//Esperar que el kernel termine de ejecutarse totalmente
hipDeviceSynchronize();
/*
//Copia del arreglo "y" del device hacia el host
hipMemcpy(y_host,y_device,sizeof(cuFloatComplex)*P*Dip*Dop,hipMemcpyDeviceToHost);
//Se imprimen los valores de "y"
printf("\n\n--- ARREGLO y(n1,n2,k1) ---\n\n");
for(k1 = 0;k1 < Dip;k1++)
{
for(n1 = 0;n1 < Dop;n1++)
{
for(n2 = 0;n2 < P;n2++)
{
printf(" (%f) + (%f) ",cuCrealf(y_host[(k1*Dop*P)+(n1*P)+n2]),cuCimagf(y_host[(k1*Dop*P)+(n1*P)+n2]));
}
printf("\n");
}
printf("\n\n");
}
printf("\n");
*/
}
//funcin kernel que ejecuta la etapa de entrada en el device
__global__ void inputStage_kernel(int N, int Li,int Dip,int Dop,int P,cuFloatComplex *x,cuFloatComplex *W,cuFloatComplex *y)
{
int n1,n2;
cuFloatComplex t1;
//Threads
int n = blockDim.x *blockIdx.x + threadIdx.x;
int k1 = blockDim.y *blockIdx.y + threadIdx.y;
//Se resetean las flags
//flag_inputstage_1_d[0] = 0;
//flag_inputstage_2_d[0] = 0;
//flag_inputstage_3_d[0] = 0;
//printf("\n n = %d k1 = %d",n,k1);
if( (n < (P*Dop)) && (k1 < Dip))
{
n2 = floorf(n/Dop);
n1 = n - (Dop*n2);
//Generacin de los elementos que dependen de x[0]
if(n == 0)
{
y[(k1*Dop*P)+(0*P)+ 0] = x[0];
///Flag
//flag_inputstage_1_d[0] = 1;
}
//Mapeo de x[n] a las entradas del primer conjunto de Dop DFT's
if((n >= 1) && (n <= (Li-1)))
{
t1 = x[n];
if(k1 == 0)
{
y[(0*Dop*P)+(n1*P)+ n2] = t1;
}
if(k1 >= 1)
{
y[(k1*Dop*P)+(n1*P)+ n2] = cuCmulf(W[((n*k1)%N)-1],t1);
}
///Flag
//flag_inputstage_2_d[0] = 1;
}
//Rellenado de ceros para los elementos de "y" para Li <= n <= (P*Dop)-1
if((n >= Li) && (n <= (P*Dop)-1))
{
y[(k1*Dop*P)+(n1*P)+ n2] = make_cuFloatComplex(0.0,0.0);
///Flag
//flag_inputstage_3_d[0] = 1;
}
//printf("\n (%f) + (%f)\n ",cuCrealf(y[(k1*Dop*P)+(n1*P)+ n2]),cuCimagf(y[(k1*Dop*P)+(n1*P)+ n2]));
}
}
//Funcin auxiliar del host para calcular la etapa intermedia en el device
void etapa_intermedia(void)
{
//////////////////////////////////////////////////////////////////////////
////////////////////////////ETAPA INTERMEDIA//////////////////////////////
//////////////////////////////////////////////////////////////////////////
//Declaracin de variables locales
int k1,k2,n1;
int n[1] = {P};
int inembed[1] = {P};
int onembed[1] = {P};
//Asignacin de memoria en el device para "z"
hipMalloc((void**)&z_device,P*Dip*Dop*sizeof(cuFloatComplex));
//Asignacin de memoria en el host para "z"
//z_host = (cuFloatComplex*)malloc(sizeof(cuFloatComplex)*P*Dip*Dop);
//Asignacin de memoria en el device para "in" y "out"
hipMalloc((void**)&in,sizeof(hipfftComplex)*P*Dip*Dop);
hipMalloc((void**)&out,sizeof(hipfftComplex)*P*Dip*Dop);
//Se copia el arreglo "y" al arreglo "in"
hipMemcpy(in,y_device,sizeof(cuFloatComplex)*P*Dip*Dop,hipMemcpyDeviceToDevice);
//Se crea un plan
hipfftHandle plan;
hipfftPlanMany(&plan,1,n,inembed,1,P,onembed,1,P,HIPFFT_C2C,Dip*Dop);
//Ejecucin del plan
hipfftExecC2C(plan,in,out,HIPFFT_FORWARD);
//Esperar que el kernel termine de ejecutarse totalmente
hipDeviceSynchronize();
//Se copian los datos del arreglo "out" al arreglo "z_device"
hipMemcpy(z_device,out,sizeof(hipfftComplex)*P*Dip*Dop,hipMemcpyDeviceToDevice);
//Se destruye el plan
hipfftDestroy(plan);
//Se liberan los arreglos "in" y "out"
hipFree(in);
hipFree(out);
/*
//Se copian los datos del arreglo "z_device" al arreglo "z_host"
hipMemcpy(z_host,z_device,sizeof(cuFloatComplex)*P*Dip*Dop,hipMemcpyDeviceToHost);
///Se imprimen los valores de z(n1,k2,k1)
printf("\n\n--- ARREGLO z(n1,k2,k1) ---\n\n");
for(k1 = 0;k1 < Dip;k1++)
{
for(n1 = 0;n1 < Dop;n1++)
{
for(k2 = 0;k2 < P;k2++)
{
printf(" (%f) + (%f) ",cuCrealf(z_host[(k1*Dop*P)+(n1*P)+k2]),cuCimagf(z_host[(k1*Dop*P)+(n1*P)+k2]));
}
printf("\n");
}
printf("\n\n");
}
printf("\n");
*/
}
//Funcin auxiliar del host para calcular la etapa de salida en el device
void etapa_salida(void)
{
//////////////////////////////////////////////////////////////////////////
////////////////////////////ETAPA DE SALIDA///////////////////////////////
//////////////////////////////////////////////////////////////////////////
//Declaracin de variables locales
int m;
//Asignacin de memoria en el device para "X"
hipMalloc((void**)&X_device,Lo*sizeof(cuFloatComplex));
//Asignacin de memoria en el host para "X"
X_host = (cuFloatComplex*)malloc(sizeof(cuFloatComplex)*Lo);
//Dimensionamiento del grid para la funcin kernel "outputStage"
//Dimensionamiento del Grid
dim3 gridDim(1,1,1);
//Dimensionamiento del block
dim3 blockDim(1,1,1);
if((Lo) < 1024)
{
blockDim.x = Lo;
gridDim.x = 1;
}
else
{
blockDim.x = 1024;
gridDim.x = (unsigned int) (ceilf((float)Lo/(float)blockDim.x));
}
//Lanzamiento del kernel "outputStage_kernel"
hipLaunchKernelGGL(( outputStage_kernel), dim3(gridDim),dim3(blockDim), 0, 0, N,Lo,Dip,Dop,P,z_device,W_device,X_device);
//Esperar que el kernel termine de ejecutarse totalmente
hipDeviceSynchronize();
//Copia del arreglo "X" del device hacia el host
hipMemcpy(X_host,X_device,sizeof(cuFloatComplex)*Lo,hipMemcpyDeviceToHost);
/*
//Se imprimen los valores de "X_host"
///Imprimir X[k]
printf("\n\n--- ARREGLO X[k] ---\n\n");
for(m=0;m<=Lo-1;m++)
{
printf("\n X[%d] = %.4f + (%.4f)",m,cuCrealf(X_host[m]),cuCimagf(X_host[m]));
//fprintf(da,"%.4f %.4f\n",creal(X[i]),cimag(X[i]));
}
*/
}
//funcin kernel que ejecuta la etapa de salida en el device
__global__ void outputStage_kernel(int N,int Lo,int Dip,int Dop,int P,cuFloatComplex *z,cuFloatComplex *W,cuFloatComplex *X)
{
//Declaracin de variables locales
int n1,k_aux,k1,k2,a,b;
cuFloatComplex t1,t2,t3,t4,t5;
//Threads
int k = blockDim.x *blockIdx.x + threadIdx.x;
//Se resetean las flags
//flag_outputstage_1_d[0] = 0;
//flag_outputstage_2_d[0] = 0;
//flag_outputstage_3_d[0] = 0;
if(k < Lo)
{
for(n1 = 0; n1 <= (Dop-1); n1 = n1+1)
{
if(Lo <= Dip)
{
//Clculo de X(k) para 0<=k<=Lo-1.
//printf("\n--- Caso (Lo <= Dip) ---\n");
//En la descomposicin k = k1 + Dipk2; k2 = 0, y por lo tanto, k = k1
if(n1 == 0) //Caso para lograr que por lo menos ingrese una vez
{
X[k] = z[(k*Dop*P)+(0*P) + 0];
///Flag
//flag_outputstage_1_d[0] = 1;
}
else
{
if(n1 == 1)
{
X[k] = z[(k*Dop*P)+(0*P) + 0];
}
X[k] = cuCaddf(z[(k*Dop*P)+(n1*P) + 0],X[k]);
///Flag
//flag_outputstage_1_d[0] = 1;
}
}
else
{
if((k >= 0) && (k <= (Dip-1)))
{
//Clculo de X(k) para 0<=k<=Dip-1.
//En la descomposicin k = k1 + Dipk2; k2 = 0, y por lo tanto, k = k1
if(n1 == 0) //Caso para lograr que por lo menos ingrese una vez
{
X[k] = z[(k*Dop*P)+(0*P) + 0];
}
else
{
if(n1 == 1)
{
X[k] = z[(k*Dop*P)+(0*P) + 0];
}
X[k] = cuCaddf(z[(k*Dop*P)+(n1*P) + 0],X[k]);
}
}
else
{
if(Dop <= 4)
{
//Usando el mtodo directo
//printf("\n--- Caso (Metodo directo) ---\n");
if(n1 == 0) //Caso para lograr que por lo menos ingrese una vez
{
k_aux = k-((Dip*P)*floorf(k/(Dip*P)));
k2 = floorf(k_aux/Dip);
k1 = k_aux-(Dip*k2);
X[k] = z[(k1*Dop*P)+(0*P)+ (k2%P)];
///Flag
//flag_outputstage_2_d[0] = 1;
}
else
{
if(n1 == 1)
{
k_aux = k-((Dip*P)*floorf(k/(Dip*P)));
k2 = floorf(k_aux/Dip);
k1 = k_aux-(Dip*k2);
X[k] = z[(k1*Dop*P)+(0*P)+ (k2%P)];
}
a = floorf(k/(Dip*P));
X[k] = cuCaddf(X[k],cuCmulf(z[(k1*Dop*P)+(n1*P)+ (k2%P)],W[((n1*(k2+P*(a))*Dip)%N)-1]));
///Flag
//flag_outputstage_2_d[0] = 1;
}
}
else
{
//Usando el mtodo filtering 2BF
//printf("\n--- Caso (Filtro 2BF) ---\n");
if((Dop-2) >= 1)
{
if(n1 == 0)
{
k_aux = k-((Dip*P)*floorf(k/(Dip*P)));
k2 = floorf(k_aux/Dip);
k1 = k_aux-(Dip*k2);
t1 = z[(k1*Dop*P)+((Dop-1)*P)+ (k2%P)];
b = floorf(k/(Dip*P));
t4 = cuCmulf(t1,make_cuFloatComplex(2*cuCrealf(W[(((k2+P*(b))*Dip)%N)-1]),0.0));
///Flag
//flag_outputstage_3_d[0] = 1;
}
if((n1 >= 1) && (n1 <= (Dop-2)))
{
t2 = t1;
t1 = cuCaddf(z[(k1*Dop*P)+((-(n1-(Dop-1)))*P)+ (k2%P)],t4);
t3 = cuCmulf(t1,make_cuFloatComplex(2*cuCrealf(W[(((k2+P*(b))*Dip)%N)-1]),0.0));
t4 = cuCsubf(t3,t2);
}
if(n1 == (Dop-1))
{
t5 = cuCaddf(z[(k1*Dop*P)+(0*P)+ (k2%P)],t4);
X[k] = cuCsubf(t5,cuCmulf(t1,cuConjf(W[(((k2+P*(b))*Dip)%N)-1])));
}
}
else
{
if(Dop == 1)
{
k_aux = k-((Dip*P)*floorf(k/(Dip*P)));
k2 = floorf(k_aux/Dip);
k1 = k_aux-(Dip*k2);
t1 = z[(k1*Dop*P)+((Dop-1)*P)+ (k2%P)];
X[k] = t1;
///Flag
//flag_outputstage_3_d[0] = 1;
}
else
{
k_aux = k-((Dip*P)*floorf(k/(Dip*P)));
k2 = floorf(k_aux/Dip);
k1 = k_aux-(Dip*k2);
t1 = z[(k1*Dop*P)+((Dop-1)*P)+ (k2%P)];
b = floorf(k/(Dip*P));
t4 = cuCmulf(t1,make_cuFloatComplex(2*cuCrealf(W[(((k2+P*(b))*Dip)%N)-1]),0.0));
t5 = cuCaddf(z[(k1*Dop*P)+(0*P)+ (k2%P)],t4);
X[k] = cuCsubf(t5,cuCmulf(t1,cuConjf(W[(((k2+P*(b))*Dip)%N)-1])));
///Flag
//flag_outputstage_3_d[0] = 1;
}
}
}
}
}
}
}
}
| 0722fee49c1122da14795a220c3d445ed970c28a.cu | ///Ésta programa calcula la versión paralelizada del algoritmo FFT_DIF_DIT_TD
///(31/08/2016)
///Ésta versión sirve para graficar en matlab los tiempos de ejecución, considerando (RADIX-3) N = 3^13, Li = 43 y Lo = Varía
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <cufft.h>
#include <cufftw.h>
#include <stdio.h>
#include <stdlib.h>
#include <cuComplex.h>
#include <math.h>
#include <math_constants.h>
#include <iostream>
#include <time.h>
//////////////////////////////////////////////////////////////////////////
///////////////////////DECLARACIÓN DE FUNCIONES///////////////////////////
//////////////////////////////////////////////////////////////////////////
void vector_entrada_xn(int Li);
void arreglo_W(int N);
void asign_rap(int N,int Li,int Lo);
void factor(int N);
void product(int vector_1[50],int vector_2[50],int valor);
void etapa_entrada(void);
__global__ void inputStage_kernel(int N, int Li,int Dip,int Dop,int P,cuFloatComplex *x,cuFloatComplex *W,cuFloatComplex *y);
void etapa_intermedia(void);
void etapa_salida(void);
__global__ void outputStage_kernel(int N,int Lo,int Dip,int Dop,int P,cuFloatComplex *z,cuFloatComplex *W,cuFloatComplex *X);
//////////////////////////////////////////////////////////////////////////
/////////////////////DECLARACIÓN DE VARIABLES GLOBALES////////////////////
//////////////////////////////////////////////////////////////////////////
cuFloatComplex *x_host;
cuFloatComplex *W_host;
//cuFloatComplex *y_host;
//cuFloatComplex *z_host;
cuFloatComplex *X_host;
cuFloatComplex *x_device;
cuFloatComplex *W_device;
cuFloatComplex *y_device;
cuFloatComplex *z_device;
cuFloatComplex *X_device;
cufftComplex *in,*out;
FILE *db_open,*dc_open;
int Dip,Dop,P,N,Li,Lo;
int vF[50]; //Almacena los factores de N
int svF; //Almacena el numero de factores de N
int Prod[50];
int a;
#define inf 99999
//////////////////////////////////////////////////////////////////////////
//////////////////////////DATOS DE ENTRADA////////////////////////////////
//////////////////////////////////////////////////////////////////////////
/// N >>> Número de elementos del vector de entrada
/// Li >>> Número de elementos de entrada diferentes de cero
/// Lo >>> Número de elementos de salida requeridos
/// loop >>> Número de iteraciones
/// muestras >>> Número de muestras
//////////////////////////////////////////////////////////////////////////
///////////////////////////DATOS DE SALIDA////////////////////////////////
//////////////////////////////////////////////////////////////////////////
/// X >>> Vector de salida
//////////////////////////////////////////////////////////////////////////
/////////////////// SE INGRESAN LOS DATOS DE ENTRADA /////////////////////
//////////////////////////////////////////////////////////////////////////
///Ingrese el número de iteraciones requeridas
const int loop = 300;
///Ingrese el valor de N_max
const int N_max = 13;
///Ingrese el valor de Li_max
const int Li_max = 43;
//////////////////////////////////////////////////////////////////////////
//////////////////////////FUNCION PRINCIPAL///////////////////////////////
//////////////////////////////////////////////////////////////////////////
//Función principal
int main()
{
int i,j,i_N,j_res,k_res,cont,i_prom;
float suma;
float promedio[N_max];
//////////////////////////////////////////////////////////////////////////
//////////////////////////SELECCIÓN DEL DEVICE////////////////////////////
//////////////////////////////////////////////////////////////////////////
int device;
FILE *da;
cudaSetDevice(1);
cudaGetDevice(&device);
if(device == 0)
{
printf("\n\n---DEVICE = GeForce GTX 970---\n\n");
da = fopen("Tiempos_N13_Li43_LoVARIA_CUDA_GTX970.bin","a+b"); //Crea o sobre escribe archivo
}
if(device == 1)
{
printf("\n\n---DEVICE = TESLA K20---\n\n");
da = fopen("Tiempos_N13_Li43_LoVARIA_CUDA_TESLAK20c.bin","a+b"); //Crea o sobre escribe archivo
}
//Pausa
printf("\n---PRESIONA UNA TECLA PARA CONTINUAR---\n\n");
getchar();
for(i_N = N_max;i_N <= N_max;i_N++)
{
N = (int )pow(3,i_N);
printf("\n N = %d \n",N);
for(j_res=Li_max;j_res <= Li_max;j_res++)
{
Li=j_res;
for(k_res=1;k_res <= N_max;k_res++)
{
Lo=(int )pow(3,k_res);
printf("\n Li = %d Lo = %d",Li,Lo);
///Se abre el archivo binario
db_open = fopen("Entrada_real_N13_C.bin","rb");
dc_open = fopen("Entrada_imag_N13_C.bin","rb");
suma=0.0;
for(j=0;j<loop;j++)
{
//Comandos necesarios para medir el tiempo
float elapsedTime_app;
cudaEvent_t start_app, stop_app;
cudaEventCreate(&start_app);
cudaEventCreate(&stop_app);
//Se generan en el host los valores del vector de entrada x[n]
vector_entrada_xn(Li);
///Se genera el arreglo W[N]
arreglo_W(N);
//Se generan en el host los factores Dip y Dop
asign_rap(N,Li,Lo);
//Cálculo en el host del factor P
P = N/(Dip*Dop);
//printf("\n\n FACTOR P:\n\n");
//printf("\n Dip = %d Dop = %d P = %d ",Dip,Dop,P);
//---------------------------------------------------------------------------------------------
//Se empieza a medir el tiempo de ejecucion de la aplicacion
cudaEventRecord(start_app,0);
//Función auxiliar del host para ejecutar la etapa de entrada
etapa_entrada();
//Función auxiliar del host para ejecutar la etapa intermedia
etapa_intermedia();
//Función auxiliar del host para ejecutar la etapa de salida
etapa_salida();
//---------------------------------------------------------------------------------------------
//Comandos necesarios para medir el tiempo de la aplicacion (app)
cudaEventRecord(stop_app,0);
cudaEventSynchronize(stop_app);
cudaEventElapsedTime(&elapsedTime_app,start_app,stop_app);
//Suma de todos los tiempos
suma = suma + elapsedTime_app;
//Se destruyen los eventos que miden el tiempo de la aplicacion
cudaEventDestroy(start_app);
cudaEventDestroy(stop_app);
//Se liberan memorias del Host y Device
free(x_host);
free(W_host);
free(X_host);
cudaFree(x_device);
cudaFree(W_device);
cudaFree(y_device);
cudaFree(z_device);
cudaFree(X_device);
}
promedio[k_res-1] = suma/(float)loop;
fclose(db_open);
fclose(dc_open);
}
}
}
fwrite(promedio,sizeof(float),N_max,da);
fclose(da);
return EXIT_SUCCESS;
}
//////////////////////////////////////////////////////////////////////////
/////////////////////////FUNCIONES SECUNDARIAS////////////////////////////
//////////////////////////////////////////////////////////////////////////
//Ésta función genera el vector de entrada x[n]
void vector_entrada_xn(int Li)
{
//Declaración de variables locales
int k;
float *buffer_real,*buffer_imag;
//Se reserva memoria para xn_host en el host
x_host = (cuFloatComplex*)malloc(sizeof(cuFloatComplex)*Li);
buffer_real = (float*)malloc(sizeof(float)*(pow(3,N_max)));
buffer_imag = (float*)malloc(sizeof(float)*(pow(3,N_max)));
///Se lee el vector de entrada del archivo binario
fread(buffer_real,sizeof(float),(int)pow(3,N_max),db_open);
fread(buffer_imag,sizeof(float),(int)pow(3,N_max),dc_open);
//Se dan valores a x[n]
for(k = 0;k < Li; k++)
{
//x_host[k] = make_cuFloatComplex((float)(rand()%11),(float)(rand()%11));
//x_host[k] = make_cuFloatComplex((float)(k + 1),(float)(0.0));
x_host[k] = make_cuFloatComplex(buffer_real[k],buffer_imag[k]);
}
/*
//Se imprimen los valores de entrada x[n]
printf("\n---ELEMENTOS DE ENTRADA x[n]---\n\n");
for(k=0;k<Li;k++)
{
printf(" %d-> (%f) + (%f)\n",k+1,cuCrealf(x_host[k]),cuCimagf(x_host[k]));
}
*/
free(buffer_real);
free(buffer_imag);
}
//Ésta función genera el arreglo W
void arreglo_W(int N)
{
//Declaración de variables locales
int n;
//Se reserva memoria para W_host en el host
W_host = (cuFloatComplex*)malloc(sizeof(cuFloatComplex)*N);
//Se genera el arreglo W
for(n = 1;n <= N;n++)
{
W_host[n-1] = make_cuFloatComplex((float)cos((2*CUDART_PI*n)/N),(float)(-1)*sin((2*CUDART_PI*n)/N));
}
/*
//Se imprimen los valores del arreglo W[N]
printf("\n---ARREGLO W[N]---\n\n");
for(n = 0;n < N; n++)
{
printf(" W[%d]-> (%f) + (%f)\n",n+1,cuCrealf(W_host[n]),cuCimagf(W_host[n]));
}
*/
}
//Ésta función genera los factores Dip y Dop
void asign_rap(int N,int Li,int Lo)
{
//Declaración de variables locales
float NLi,NLo,Diprapt,Doprapt;
int Nh[50];
int k[50];
int G;
int g,i,t,ta;
int Dipt[50],Dopt[50];
float distrapt,distrap;
int Pos,h,Poss;
int nk[50];
int r;
//Inicializaciones
G = 0;
svF = 0;
//Factores Dip y Dop ideales
NLi=(float)N/(float)Li;
NLo=(float)N/(float)Lo;
Diprapt=NLi;
Doprapt=NLo;
//Se encuentran los factores de "N"
//vF almacena los factores de "N"
//svF almacena el número de factores de "N"
factor(N);
/*
Almacena en el vector Nh los factores que son diferentes de del vector vF
En el vector k se almacena la cantidad de veces que se repite cada
elemento almacenado en el vector Nh.
*/
Nh[0] = vF[0];
k[0]=1;
for(g=1;g<=svF-1;g=g+1)
{
if(vF[g]!=vF[g-1])
{
G=G+1;
Nh[G]=vF[g];
k[G]=1;
}
else
{
k[G]=k[G]+1;
}
}
/*
Almacena en el vector Nh todas las posibles combinaciones que den como
producto a N. t almacena el numero de elementos del vector Nh.
*/
product(Nh,k,G);
t = a;
for(i=0;i<t;i=i+1)
{
Dipt[i]=Prod[i];
}
distrapt=inf;
for(g=1;g<=t;g=g+1)
{
if(Dipt[g-1]<=NLi)
{
Pos=g-1;
for(h=0;h<=G;h=h+1)
{
Poss=floor(Pos/(k[h]+1));
nk[h]=k[h]+Poss*(k[h]+1)-Pos;
Pos=Poss;
}
product(Nh,nk,G);
ta=a;
for(i=0;i<ta;i=i+1)
{
Dopt[i]=Prod[i];
}
////////////////////////////////////////////
//int j;
//for(j=0;j<ta;j++)
//{
// printf(" %d ",Dopt[j]);
//}
//printf("\n\n ta=%d\n\n",ta);
///////////////////////////////////////////
for(r=0;r<ta;r=r+1)
{
distrap=sqrt(pow(Diprapt-(Dipt[g-1]),2)+pow(Doprapt-(Dopt[r]),2));
if(distrap<distrapt)
{
distrapt=distrap;
Dip=Dipt[g-1];
Dop=Dopt[r];
}
}
}
}
/*
printf("\n\n FACTOR Dip :\n\n");
printf(" %d ",Dip);
printf("\n\n FACTOR Dop:\n\n");
printf(" %d ",Dop);
*/
}
//Ésta función encuentra los factores de "N"
void factor(int N)
{
//Se empieza a verificar los factores desde 2
int i=2;
long N_factor;
N_factor = N;
while(i<=N_factor)
{
while((N_factor%i)==0)
{
vF[svF]=i;
N_factor=N_factor/i;
// printf("Factores: %d ",vF[svF]);
svF++;
}
i++;
}
}
//Ésta función encuentra todas las posibles combinaciones de factores que den como resultado "N"
void product(int vector_1[50],int vector_2[50],int valor)
{
int d,e,s,pNh,i;
int cont=0;
Prod[0]=1;
a=1;
for(d=0;d<=valor;d=d+1)
{
s=a;
pNh=1;
for(e=1;e<=vector_2[d];e=e+1)
{
pNh=pNh*vector_1[d];
for(i=(s*e+1);i<=(s*e+s);i=i+1)
{
Prod[i-1]=pNh*Prod[cont];
cont=cont+1;
}
a=a+s;
cont=0;
}
}
}
//Función auxiliar del host para calcular la etapa de entrada en el device
void etapa_entrada(void)
{
//////////////////////////////////////////////////////////////////////////
////////////////////////////ETAPA DE ENTRADA//////////////////////////////
//////////////////////////////////////////////////////////////////////////
//Declaración de variables locales
int k1,n1,n2;
//Asignación de memoria en el device para el arreglo "x_device"
cudaMalloc((void**)&x_device,Li*sizeof(cuFloatComplex));
//Se reserva memoria en el device para el arreglo "W_device"
cudaMalloc((void**)&W_device,N*sizeof(cuFloatComplex));
//Asignación de memoria en el device para el arreglo "y"
cudaMalloc((void**)&y_device,P*Dip*Dop*sizeof(cuFloatComplex));
//Se pasa el arreglo x_host a x_device
cudaMemcpy(x_device,x_host,Li*sizeof(cuFloatComplex),cudaMemcpyHostToDevice);
//Envío de los arreglos W hacia la memoria global del device
cudaMemcpy(W_device,W_host,N*sizeof(cuFloatComplex),cudaMemcpyHostToDevice);
//Asignación de memoria en el host para "y"
//y_host = (cuFloatComplex*)malloc(sizeof(cuFloatComplex)*P*Dip*Dop);
//Dimensionamiento del grid para la función kernel "inputStage"
//Dimensionamiento del Grid
dim3 gridDim(1,1,1);
//Dimensionamiento del block
dim3 blockDim(1,1,1);
if((P*Dop) < 32 && (Dip) < 32)
{
blockDim.x = (P*Dop);
blockDim.y = (Dip);
gridDim.x = 1;
gridDim.y = 1;
}
else
{
blockDim.x = 32;
blockDim.y = 32;
gridDim.x = (unsigned int) (ceilf((float)(P*Dop)/(float)blockDim.x));
gridDim.y = (unsigned int) (ceilf((float)Dip/(float)blockDim.y));
}
//Lanzamiento del kernel "inputStage_kernel"
inputStage_kernel<<<gridDim,blockDim>>>(N,Li,Dip,Dop,P,x_device,W_device,y_device);
//Esperar que el kernel termine de ejecutarse totalmente
cudaDeviceSynchronize();
/*
//Copia del arreglo "y" del device hacia el host
cudaMemcpy(y_host,y_device,sizeof(cuFloatComplex)*P*Dip*Dop,cudaMemcpyDeviceToHost);
//Se imprimen los valores de "y"
printf("\n\n--- ARREGLO y(n1,n2,k1) ---\n\n");
for(k1 = 0;k1 < Dip;k1++)
{
for(n1 = 0;n1 < Dop;n1++)
{
for(n2 = 0;n2 < P;n2++)
{
printf(" (%f) + (%f) ",cuCrealf(y_host[(k1*Dop*P)+(n1*P)+n2]),cuCimagf(y_host[(k1*Dop*P)+(n1*P)+n2]));
}
printf("\n");
}
printf("\n\n");
}
printf("\n");
*/
}
//función kernel que ejecuta la etapa de entrada en el device
__global__ void inputStage_kernel(int N, int Li,int Dip,int Dop,int P,cuFloatComplex *x,cuFloatComplex *W,cuFloatComplex *y)
{
int n1,n2;
cuFloatComplex t1;
//Threads
int n = blockDim.x *blockIdx.x + threadIdx.x;
int k1 = blockDim.y *blockIdx.y + threadIdx.y;
//Se resetean las flags
//flag_inputstage_1_d[0] = 0;
//flag_inputstage_2_d[0] = 0;
//flag_inputstage_3_d[0] = 0;
//printf("\n n = %d k1 = %d",n,k1);
if( (n < (P*Dop)) && (k1 < Dip))
{
n2 = floorf(n/Dop);
n1 = n - (Dop*n2);
//Generación de los elementos que dependen de x[0]
if(n == 0)
{
y[(k1*Dop*P)+(0*P)+ 0] = x[0];
///Flag
//flag_inputstage_1_d[0] = 1;
}
//Mapeo de x[n] a las entradas del primer conjunto de Dop DFT's
if((n >= 1) && (n <= (Li-1)))
{
t1 = x[n];
if(k1 == 0)
{
y[(0*Dop*P)+(n1*P)+ n2] = t1;
}
if(k1 >= 1)
{
y[(k1*Dop*P)+(n1*P)+ n2] = cuCmulf(W[((n*k1)%N)-1],t1);
}
///Flag
//flag_inputstage_2_d[0] = 1;
}
//Rellenado de ceros para los elementos de "y" para Li <= n <= (P*Dop)-1
if((n >= Li) && (n <= (P*Dop)-1))
{
y[(k1*Dop*P)+(n1*P)+ n2] = make_cuFloatComplex(0.0,0.0);
///Flag
//flag_inputstage_3_d[0] = 1;
}
//printf("\n (%f) + (%f)\n ",cuCrealf(y[(k1*Dop*P)+(n1*P)+ n2]),cuCimagf(y[(k1*Dop*P)+(n1*P)+ n2]));
}
}
//Función auxiliar del host para calcular la etapa intermedia en el device
void etapa_intermedia(void)
{
//////////////////////////////////////////////////////////////////////////
////////////////////////////ETAPA INTERMEDIA//////////////////////////////
//////////////////////////////////////////////////////////////////////////
//Declaración de variables locales
int k1,k2,n1;
int n[1] = {P};
int inembed[1] = {P};
int onembed[1] = {P};
//Asignación de memoria en el device para "z"
cudaMalloc((void**)&z_device,P*Dip*Dop*sizeof(cuFloatComplex));
//Asignación de memoria en el host para "z"
//z_host = (cuFloatComplex*)malloc(sizeof(cuFloatComplex)*P*Dip*Dop);
//Asignación de memoria en el device para "in" y "out"
cudaMalloc((void**)&in,sizeof(cufftComplex)*P*Dip*Dop);
cudaMalloc((void**)&out,sizeof(cufftComplex)*P*Dip*Dop);
//Se copia el arreglo "y" al arreglo "in"
cudaMemcpy(in,y_device,sizeof(cuFloatComplex)*P*Dip*Dop,cudaMemcpyDeviceToDevice);
//Se crea un plan
cufftHandle plan;
cufftPlanMany(&plan,1,n,inembed,1,P,onembed,1,P,CUFFT_C2C,Dip*Dop);
//Ejecución del plan
cufftExecC2C(plan,in,out,CUFFT_FORWARD);
//Esperar que el kernel termine de ejecutarse totalmente
cudaDeviceSynchronize();
//Se copian los datos del arreglo "out" al arreglo "z_device"
cudaMemcpy(z_device,out,sizeof(cufftComplex)*P*Dip*Dop,cudaMemcpyDeviceToDevice);
//Se destruye el plan
cufftDestroy(plan);
//Se liberan los arreglos "in" y "out"
cudaFree(in);
cudaFree(out);
/*
//Se copian los datos del arreglo "z_device" al arreglo "z_host"
cudaMemcpy(z_host,z_device,sizeof(cuFloatComplex)*P*Dip*Dop,cudaMemcpyDeviceToHost);
///Se imprimen los valores de z(n1,k2,k1)
printf("\n\n--- ARREGLO z(n1,k2,k1) ---\n\n");
for(k1 = 0;k1 < Dip;k1++)
{
for(n1 = 0;n1 < Dop;n1++)
{
for(k2 = 0;k2 < P;k2++)
{
printf(" (%f) + (%f) ",cuCrealf(z_host[(k1*Dop*P)+(n1*P)+k2]),cuCimagf(z_host[(k1*Dop*P)+(n1*P)+k2]));
}
printf("\n");
}
printf("\n\n");
}
printf("\n");
*/
}
//Función auxiliar del host para calcular la etapa de salida en el device
void etapa_salida(void)
{
//////////////////////////////////////////////////////////////////////////
////////////////////////////ETAPA DE SALIDA///////////////////////////////
//////////////////////////////////////////////////////////////////////////
//Declaración de variables locales
int m;
//Asignación de memoria en el device para "X"
cudaMalloc((void**)&X_device,Lo*sizeof(cuFloatComplex));
//Asignación de memoria en el host para "X"
X_host = (cuFloatComplex*)malloc(sizeof(cuFloatComplex)*Lo);
//Dimensionamiento del grid para la función kernel "outputStage"
//Dimensionamiento del Grid
dim3 gridDim(1,1,1);
//Dimensionamiento del block
dim3 blockDim(1,1,1);
if((Lo) < 1024)
{
blockDim.x = Lo;
gridDim.x = 1;
}
else
{
blockDim.x = 1024;
gridDim.x = (unsigned int) (ceilf((float)Lo/(float)blockDim.x));
}
//Lanzamiento del kernel "outputStage_kernel"
outputStage_kernel<<<gridDim,blockDim>>>(N,Lo,Dip,Dop,P,z_device,W_device,X_device);
//Esperar que el kernel termine de ejecutarse totalmente
cudaDeviceSynchronize();
//Copia del arreglo "X" del device hacia el host
cudaMemcpy(X_host,X_device,sizeof(cuFloatComplex)*Lo,cudaMemcpyDeviceToHost);
/*
//Se imprimen los valores de "X_host"
///Imprimir X[k]
printf("\n\n--- ARREGLO X[k] ---\n\n");
for(m=0;m<=Lo-1;m++)
{
printf("\n X[%d] = %.4f + (%.4f)",m,cuCrealf(X_host[m]),cuCimagf(X_host[m]));
//fprintf(da,"%.4f %.4f\n",creal(X[i]),cimag(X[i]));
}
*/
}
//función kernel que ejecuta la etapa de salida en el device
__global__ void outputStage_kernel(int N,int Lo,int Dip,int Dop,int P,cuFloatComplex *z,cuFloatComplex *W,cuFloatComplex *X)
{
//Declaración de variables locales
int n1,k_aux,k1,k2,a,b;
cuFloatComplex t1,t2,t3,t4,t5;
//Threads
int k = blockDim.x *blockIdx.x + threadIdx.x;
//Se resetean las flags
//flag_outputstage_1_d[0] = 0;
//flag_outputstage_2_d[0] = 0;
//flag_outputstage_3_d[0] = 0;
if(k < Lo)
{
for(n1 = 0; n1 <= (Dop-1); n1 = n1+1)
{
if(Lo <= Dip)
{
//Cálculo de X(k) para 0<=k<=Lo-1.
//printf("\n--- Caso (Lo <= Dip) ---\n");
//En la descomposición k = k1 + Dipk2; k2 = 0, y por lo tanto, k = k1
if(n1 == 0) //Caso para lograr que por lo menos ingrese una vez
{
X[k] = z[(k*Dop*P)+(0*P) + 0];
///Flag
//flag_outputstage_1_d[0] = 1;
}
else
{
if(n1 == 1)
{
X[k] = z[(k*Dop*P)+(0*P) + 0];
}
X[k] = cuCaddf(z[(k*Dop*P)+(n1*P) + 0],X[k]);
///Flag
//flag_outputstage_1_d[0] = 1;
}
}
else
{
if((k >= 0) && (k <= (Dip-1)))
{
//Cálculo de X(k) para 0<=k<=Dip-1.
//En la descomposición k = k1 + Dipk2; k2 = 0, y por lo tanto, k = k1
if(n1 == 0) //Caso para lograr que por lo menos ingrese una vez
{
X[k] = z[(k*Dop*P)+(0*P) + 0];
}
else
{
if(n1 == 1)
{
X[k] = z[(k*Dop*P)+(0*P) + 0];
}
X[k] = cuCaddf(z[(k*Dop*P)+(n1*P) + 0],X[k]);
}
}
else
{
if(Dop <= 4)
{
//Usando el método directo
//printf("\n--- Caso (Metodo directo) ---\n");
if(n1 == 0) //Caso para lograr que por lo menos ingrese una vez
{
k_aux = k-((Dip*P)*floorf(k/(Dip*P)));
k2 = floorf(k_aux/Dip);
k1 = k_aux-(Dip*k2);
X[k] = z[(k1*Dop*P)+(0*P)+ (k2%P)];
///Flag
//flag_outputstage_2_d[0] = 1;
}
else
{
if(n1 == 1)
{
k_aux = k-((Dip*P)*floorf(k/(Dip*P)));
k2 = floorf(k_aux/Dip);
k1 = k_aux-(Dip*k2);
X[k] = z[(k1*Dop*P)+(0*P)+ (k2%P)];
}
a = floorf(k/(Dip*P));
X[k] = cuCaddf(X[k],cuCmulf(z[(k1*Dop*P)+(n1*P)+ (k2%P)],W[((n1*(k2+P*(a))*Dip)%N)-1]));
///Flag
//flag_outputstage_2_d[0] = 1;
}
}
else
{
//Usando el método filtering 2BF
//printf("\n--- Caso (Filtro 2BF) ---\n");
if((Dop-2) >= 1)
{
if(n1 == 0)
{
k_aux = k-((Dip*P)*floorf(k/(Dip*P)));
k2 = floorf(k_aux/Dip);
k1 = k_aux-(Dip*k2);
t1 = z[(k1*Dop*P)+((Dop-1)*P)+ (k2%P)];
b = floorf(k/(Dip*P));
t4 = cuCmulf(t1,make_cuFloatComplex(2*cuCrealf(W[(((k2+P*(b))*Dip)%N)-1]),0.0));
///Flag
//flag_outputstage_3_d[0] = 1;
}
if((n1 >= 1) && (n1 <= (Dop-2)))
{
t2 = t1;
t1 = cuCaddf(z[(k1*Dop*P)+((-(n1-(Dop-1)))*P)+ (k2%P)],t4);
t3 = cuCmulf(t1,make_cuFloatComplex(2*cuCrealf(W[(((k2+P*(b))*Dip)%N)-1]),0.0));
t4 = cuCsubf(t3,t2);
}
if(n1 == (Dop-1))
{
t5 = cuCaddf(z[(k1*Dop*P)+(0*P)+ (k2%P)],t4);
X[k] = cuCsubf(t5,cuCmulf(t1,cuConjf(W[(((k2+P*(b))*Dip)%N)-1])));
}
}
else
{
if(Dop == 1)
{
k_aux = k-((Dip*P)*floorf(k/(Dip*P)));
k2 = floorf(k_aux/Dip);
k1 = k_aux-(Dip*k2);
t1 = z[(k1*Dop*P)+((Dop-1)*P)+ (k2%P)];
X[k] = t1;
///Flag
//flag_outputstage_3_d[0] = 1;
}
else
{
k_aux = k-((Dip*P)*floorf(k/(Dip*P)));
k2 = floorf(k_aux/Dip);
k1 = k_aux-(Dip*k2);
t1 = z[(k1*Dop*P)+((Dop-1)*P)+ (k2%P)];
b = floorf(k/(Dip*P));
t4 = cuCmulf(t1,make_cuFloatComplex(2*cuCrealf(W[(((k2+P*(b))*Dip)%N)-1]),0.0));
t5 = cuCaddf(z[(k1*Dop*P)+(0*P)+ (k2%P)],t4);
X[k] = cuCsubf(t5,cuCmulf(t1,cuConjf(W[(((k2+P*(b))*Dip)%N)-1])));
///Flag
//flag_outputstage_3_d[0] = 1;
}
}
}
}
}
}
}
}
|
d8b0b193e167e8add38fec60639bcf7b768d1eb0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Vector-Matrix multiplication: Y = A * X.
* Device code.
*/
#ifndef _MATRIXMUL_KERNEL_H_
#define _MATRIXMUL_KERNEL_H_
#include "vec_mat_mult.h"
/* Write the kernel for vector-matrix multiplication using GPU global memory. */
__global__ void vec_mat_kernel_naive(float *Ad, float *Xd, float *Yd)
{
//Multiply A and X
double tmp = 0.0;
int i;
int product=blockDim.x*blockIdx.x+threadIdx.x;
for(i=0;i<MATRIX_SIZE;i++){
double A=Ad[MATRIX_SIZE*product+i];
double X=Xd[i];
tmp+=A*X;
}
Yd[product]=(float)tmp;
}
/* Write the kernel for vector-matrix multiplication using GPU shared memory. */
__global__ void vec_mat_kernel_optimized(Matrix A, Matrix X, Matrix Y)
{
__shared__ float sharedA[TILE_SIZE][TILE_SIZE];
__shared__ float sharedX[TILE_SIZE][TILE_SIZE];
int tx = threadIdx.x;
int ty = threadIdx.y;
int row = (blockDim.y * blockIdx.y + ty);
int column = blockDim.x * blockIdx.x + tx;
int i = 0;
int temp;
double YElement = 0.0f;
while(i < A.num_columns){
if(i + tx < A.num_columns && row < A.num_rows){
sharedA[ty][tx] = A.elements[row*A.num_columns + i + tx];
}
else{
sharedA[ty][tx] = 0.0f;
}
if(i + threadIdx.y < X.num_rows && column < X.num_columns){
sharedX[ty][tx] = X.elements[(i+ty)*X.num_columns + column];
}
else{
sharedX[ty][tx] = 0.0f;
}
__syncthreads();
for(temp = 0; temp < TILE_SIZE; temp++){
YElement += sharedA[ty][temp] * sharedX[temp][tx];
}
__syncthreads();
i += TILE_SIZE;
}
if(column < Y.num_columns && row < Y.num_rows){
Y.elements[row*Y.num_columns + column] = (float)YElement;
}
return;
}
#endif // #ifndef _MATRIXMUL_KERNEL_H_
| d8b0b193e167e8add38fec60639bcf7b768d1eb0.cu | /* Vector-Matrix multiplication: Y = A * X.
* Device code.
*/
#ifndef _MATRIXMUL_KERNEL_H_
#define _MATRIXMUL_KERNEL_H_
#include "vec_mat_mult.h"
/* Write the kernel for vector-matrix multiplication using GPU global memory. */
__global__ void vec_mat_kernel_naive(float *Ad, float *Xd, float *Yd)
{
//Multiply A and X
double tmp = 0.0;
int i;
int product=blockDim.x*blockIdx.x+threadIdx.x;
for(i=0;i<MATRIX_SIZE;i++){
double A=Ad[MATRIX_SIZE*product+i];
double X=Xd[i];
tmp+=A*X;
}
Yd[product]=(float)tmp;
}
/* Write the kernel for vector-matrix multiplication using GPU shared memory. */
__global__ void vec_mat_kernel_optimized(Matrix A, Matrix X, Matrix Y)
{
__shared__ float sharedA[TILE_SIZE][TILE_SIZE];
__shared__ float sharedX[TILE_SIZE][TILE_SIZE];
int tx = threadIdx.x;
int ty = threadIdx.y;
int row = (blockDim.y * blockIdx.y + ty);
int column = blockDim.x * blockIdx.x + tx;
int i = 0;
int temp;
double YElement = 0.0f;
while(i < A.num_columns){
if(i + tx < A.num_columns && row < A.num_rows){
sharedA[ty][tx] = A.elements[row*A.num_columns + i + tx];
}
else{
sharedA[ty][tx] = 0.0f;
}
if(i + threadIdx.y < X.num_rows && column < X.num_columns){
sharedX[ty][tx] = X.elements[(i+ty)*X.num_columns + column];
}
else{
sharedX[ty][tx] = 0.0f;
}
__syncthreads();
for(temp = 0; temp < TILE_SIZE; temp++){
YElement += sharedA[ty][temp] * sharedX[temp][tx];
}
__syncthreads();
i += TILE_SIZE;
}
if(column < Y.num_columns && row < Y.num_rows){
Y.elements[row*Y.num_columns + column] = (float)YElement;
}
return;
}
#endif // #ifndef _MATRIXMUL_KERNEL_H_
|
734001f4f3af1c4741f94519499540d10c155ed9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 1993-2006 NVIDIA Corporation. All rights reserved.
*
* NOTICE TO USER:
*
* This source code is subject to NVIDIA ownership rights under U.S. and
* international Copyright laws.
*
* This software and the information contained herein is PROPRIETARY and
* CONFIDENTIAL to NVIDIA and is being provided under the terms and
* conditions of a Non-Disclosure Agreement. Any reproduction or
* disclosure to any third party without the express written consent of
* NVIDIA is prohibited.
*
* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
* OR PERFORMANCE OF THIS SOURCE CODE.
*
* U.S. Government End Users. This source code is a "commercial item" as
* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
* "commercial computer software" and "commercial computer software
* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
* and is provided to the U.S. Government only as a commercial end item.
* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
* source code with only those rights set forth herein.
*/
/* Matrix multiplication: C = A * B.
* Device code.
*/
#ifndef _MATRIXMUL_KERNEL_H_
#define _MATRIXMUL_KERNEL_H_
#include <stdio.h>
#include "matrixmul.h"
////////////////////////////////////////////////////////////////////////////////
//! Simple test kernel for device functionality
//! @param g_idata input data in global memory
//! @param g_odata output data in global memory
////////////////////////////////////////////////////////////////////////////////
// Matrix multiplication kernel thread specification
// with shared memory
__global__ void MatrixMulKernel_SharedMemory(Matrix M, Matrix N, Matrix P)
{
const int TILEWIDTH = 32;
const int SUMWIDTH = M.width;
int Col = blockDim.x * blockIdx.x + threadIdx.x;
int Row = blockDim.y * blockIdx.y + threadIdx.y;
__shared__ float M_tile[TILEWIDTH][TILEWIDTH];
__shared__ float N_tile[TILEWIDTH][TILEWIDTH];
float Pelement = 0;
int npart = int(SUMWIDTH/TILEWIDTH);
if (SUMWIDTH%TILEWIDTH >0) npart++;
for(int part = 0; part<npart; part++){
// copy from globle memory to shared memonry Row<M.height && Col<N.width &&
if ( Row<M.height && (threadIdx.x + part * TILEWIDTH) < SUMWIDTH ) {
int Melement_idx_forcopy = Row * M.width + (threadIdx.x + part * TILEWIDTH);
M_tile[threadIdx.y][threadIdx.x] = M.elements[Melement_idx_forcopy];
}
else{
M_tile[threadIdx.y][threadIdx.x] = 0;
}
if ( Col<N.width && (threadIdx.y + part*TILEWIDTH) < SUMWIDTH ){
int Nelement_idx_forcopy = Col + N.width * (threadIdx.y + part * TILEWIDTH);
N_tile[threadIdx.y][threadIdx.x] = N.elements[Nelement_idx_forcopy];
}
else{
N_tile[threadIdx.y][threadIdx.x] = 0;
}
__syncthreads();
// Cacluate partial results && (part * TILEWIDTH + k) < SUMWIDTH
for ( int k = 0; (k < TILEWIDTH ) ; ++k){
Pelement += M_tile[threadIdx.y][k] * N_tile[k][threadIdx.x];
}
__syncthreads();
}
if ( Col < P.width && Row < P.height ){
P.elements[ Row * P.width + Col ] = Pelement;
}
}
// without shared mamory
__global__ void MatrixMulKernel(Matrix M, Matrix N, Matrix P)
{
int Col = blockDim.x * blockIdx.x + threadIdx.x;
int Row = blockDim.y * blockIdx.y + threadIdx.y;
// check if the Pelement is inside P matrix
bool INRANGE = false;
if ( (Col < P.width) && (Row < P.height)) {
INRANGE = true;
}
// Calculate Pelement if InRange is True
if (INRANGE){
int SUMWIDTH = M.width; // == N.height
float Pelement = 0;
for (int k = 0; k < SUMWIDTH; ++k) {
float Melement = M.elements[ Row * M.width + k ];
float Nelement = N.elements[ Col + N.width * k ];
Pelement += Melement * Nelement;
}
P.elements[ Row * P.width + Col ] = Pelement;
}
}
#endif // #ifndef _MATRIXMUL_KERNEL_H_
| 734001f4f3af1c4741f94519499540d10c155ed9.cu | /*
* Copyright 1993-2006 NVIDIA Corporation. All rights reserved.
*
* NOTICE TO USER:
*
* This source code is subject to NVIDIA ownership rights under U.S. and
* international Copyright laws.
*
* This software and the information contained herein is PROPRIETARY and
* CONFIDENTIAL to NVIDIA and is being provided under the terms and
* conditions of a Non-Disclosure Agreement. Any reproduction or
* disclosure to any third party without the express written consent of
* NVIDIA is prohibited.
*
* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
* OR PERFORMANCE OF THIS SOURCE CODE.
*
* U.S. Government End Users. This source code is a "commercial item" as
* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
* "commercial computer software" and "commercial computer software
* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
* and is provided to the U.S. Government only as a commercial end item.
* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
* source code with only those rights set forth herein.
*/
/* Matrix multiplication: C = A * B.
* Device code.
*/
#ifndef _MATRIXMUL_KERNEL_H_
#define _MATRIXMUL_KERNEL_H_
#include <stdio.h>
#include "matrixmul.h"
////////////////////////////////////////////////////////////////////////////////
//! Simple test kernel for device functionality
//! @param g_idata input data in global memory
//! @param g_odata output data in global memory
////////////////////////////////////////////////////////////////////////////////
// Matrix multiplication kernel thread specification
// with shared memory
__global__ void MatrixMulKernel_SharedMemory(Matrix M, Matrix N, Matrix P)
{
const int TILEWIDTH = 32;
const int SUMWIDTH = M.width;
int Col = blockDim.x * blockIdx.x + threadIdx.x;
int Row = blockDim.y * blockIdx.y + threadIdx.y;
__shared__ float M_tile[TILEWIDTH][TILEWIDTH];
__shared__ float N_tile[TILEWIDTH][TILEWIDTH];
float Pelement = 0;
int npart = int(SUMWIDTH/TILEWIDTH);
if (SUMWIDTH%TILEWIDTH >0) npart++;
for(int part = 0; part<npart; part++){
// copy from globle memory to shared memonry Row<M.height && Col<N.width &&
if ( Row<M.height && (threadIdx.x + part * TILEWIDTH) < SUMWIDTH ) {
int Melement_idx_forcopy = Row * M.width + (threadIdx.x + part * TILEWIDTH);
M_tile[threadIdx.y][threadIdx.x] = M.elements[Melement_idx_forcopy];
}
else{
M_tile[threadIdx.y][threadIdx.x] = 0;
}
if ( Col<N.width && (threadIdx.y + part*TILEWIDTH) < SUMWIDTH ){
int Nelement_idx_forcopy = Col + N.width * (threadIdx.y + part * TILEWIDTH);
N_tile[threadIdx.y][threadIdx.x] = N.elements[Nelement_idx_forcopy];
}
else{
N_tile[threadIdx.y][threadIdx.x] = 0;
}
__syncthreads();
// Cacluate partial results && (part * TILEWIDTH + k) < SUMWIDTH
for ( int k = 0; (k < TILEWIDTH ) ; ++k){
Pelement += M_tile[threadIdx.y][k] * N_tile[k][threadIdx.x];
}
__syncthreads();
}
if ( Col < P.width && Row < P.height ){
P.elements[ Row * P.width + Col ] = Pelement;
}
}
// without shared mamory
__global__ void MatrixMulKernel(Matrix M, Matrix N, Matrix P)
{
int Col = blockDim.x * blockIdx.x + threadIdx.x;
int Row = blockDim.y * blockIdx.y + threadIdx.y;
// check if the Pelement is inside P matrix
bool INRANGE = false;
if ( (Col < P.width) && (Row < P.height)) {
INRANGE = true;
}
// Calculate Pelement if InRange is True
if (INRANGE){
int SUMWIDTH = M.width; // == N.height
float Pelement = 0;
for (int k = 0; k < SUMWIDTH; ++k) {
float Melement = M.elements[ Row * M.width + k ];
float Nelement = N.elements[ Col + N.width * k ];
Pelement += Melement * Nelement;
}
P.elements[ Row * P.width + Col ] = Pelement;
}
}
#endif // #ifndef _MATRIXMUL_KERNEL_H_
|
6d3a5839934df1af571bc56f3605393d50ce9fcd.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include"common_structs.h"
//#include"imageOperations.h"
#include<stdio.h>
#include<iostream>
#define BLOCK_WIDTH 10
#define BLOCK_HEIGHT 60
using namespace std;
// This will output the proper CUDA error strings in the event that a CUDA host call returns an error
#define cudaCheckErrors(err) __checkCudaErrors (err, __FILE__, __LINE__)
inline void __checkCudaErrors(hipError_t err, const char *file, const int line)
{
if (hipSuccess != err)
{
fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",
file, line, (int)err, hipGetErrorString(err));
exit(EXIT_FAILURE);
}
}
__global__ void cropImageFunctionCuda(tUInt_8* input, tUInt_8* output, tUInt_32 InputPitch, tUInt_32 outputPitch, tUInt_32 offsetX, tUInt_32 offsetY,tUInt_32 channels) {
tUInt_32 idxValX = blockDim.x * blockIdx.x + threadIdx.x;
tUInt_32 idxValY = blockDim.y * blockIdx.y + threadIdx.y;
tUInt_32 inputIdx = (idxValY + offsetY) * InputPitch + (idxValX + offsetX) * channels;
tUInt_32 outputIdx = idxValY * outputPitch + idxValX * channels;
output[outputIdx] = input[inputIdx];
output[outputIdx + 1] = input[inputIdx + 1];
output[outputIdx + 2] = input[inputIdx + 2];
}
extern void cropImageFunctionHost(tUInt_8* inputBuffer, tUInt_8* outputBuffer, IMAGE_INFO inputImageInfo, IMAGE_INFO outputImageInfo, tUInt_32 offsetX, tUInt_32 offsetY, tUInt_8** d_cropOutImage) {
tUInt_8* d_inputImage;
size_t sizeInput = inputImageInfo.width * inputImageInfo.height * inputImageInfo.channnels * sizeof(tUInt_8);
size_t sizeOutput = outputImageInfo.width * outputImageInfo.height * outputImageInfo.channnels * sizeof(tUInt_8);
tUInt_32 inputPitch = inputImageInfo.width * inputImageInfo.channnels;
tUInt_32 outputPitch = outputImageInfo.width * outputImageInfo.channnels;
cudaCheckErrors(hipMalloc(&d_inputImage, sizeInput));
cudaCheckErrors(hipMalloc(d_cropOutImage, sizeOutput));
cudaCheckErrors(hipMemcpy(d_inputImage, inputBuffer, sizeInput, hipMemcpyHostToDevice));
dim3 threadsPerBlock(10, 6);
dim3 numBlocks(outputImageInfo.width / threadsPerBlock.x, outputImageInfo.height / threadsPerBlock.y);
cropImageFunctionCuda<<<numBlocks, threadsPerBlock, 0>> >(d_inputImage, *d_cropOutImage, inputPitch, outputPitch, offsetX, offsetY, inputImageInfo.channnels);
cudaCheckErrors(hipMemcpy(outputBuffer, *d_cropOutImage, sizeOutput, hipMemcpyDeviceToHost));
cudaCheckErrors(hipFree(d_inputImage));
}
void temp_func() {
cout<<"SUccccesssssssssssssssssssssssssssssssss"<<endl;
// temp code
}
| 6d3a5839934df1af571bc56f3605393d50ce9fcd.cu | #include"common_structs.h"
//#include"imageOperations.h"
#include<stdio.h>
#include<iostream>
#define BLOCK_WIDTH 10
#define BLOCK_HEIGHT 60
using namespace std;
// This will output the proper CUDA error strings in the event that a CUDA host call returns an error
#define cudaCheckErrors(err) __checkCudaErrors (err, __FILE__, __LINE__)
inline void __checkCudaErrors(cudaError err, const char *file, const int line)
{
if (cudaSuccess != err)
{
fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",
file, line, (int)err, cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
}
__global__ void cropImageFunctionCuda(tUInt_8* input, tUInt_8* output, tUInt_32 InputPitch, tUInt_32 outputPitch, tUInt_32 offsetX, tUInt_32 offsetY,tUInt_32 channels) {
tUInt_32 idxValX = blockDim.x * blockIdx.x + threadIdx.x;
tUInt_32 idxValY = blockDim.y * blockIdx.y + threadIdx.y;
tUInt_32 inputIdx = (idxValY + offsetY) * InputPitch + (idxValX + offsetX) * channels;
tUInt_32 outputIdx = idxValY * outputPitch + idxValX * channels;
output[outputIdx] = input[inputIdx];
output[outputIdx + 1] = input[inputIdx + 1];
output[outputIdx + 2] = input[inputIdx + 2];
}
extern void cropImageFunctionHost(tUInt_8* inputBuffer, tUInt_8* outputBuffer, IMAGE_INFO inputImageInfo, IMAGE_INFO outputImageInfo, tUInt_32 offsetX, tUInt_32 offsetY, tUInt_8** d_cropOutImage) {
tUInt_8* d_inputImage;
size_t sizeInput = inputImageInfo.width * inputImageInfo.height * inputImageInfo.channnels * sizeof(tUInt_8);
size_t sizeOutput = outputImageInfo.width * outputImageInfo.height * outputImageInfo.channnels * sizeof(tUInt_8);
tUInt_32 inputPitch = inputImageInfo.width * inputImageInfo.channnels;
tUInt_32 outputPitch = outputImageInfo.width * outputImageInfo.channnels;
cudaCheckErrors(cudaMalloc(&d_inputImage, sizeInput));
cudaCheckErrors(cudaMalloc(d_cropOutImage, sizeOutput));
cudaCheckErrors(cudaMemcpy(d_inputImage, inputBuffer, sizeInput, cudaMemcpyHostToDevice));
dim3 threadsPerBlock(10, 6);
dim3 numBlocks(outputImageInfo.width / threadsPerBlock.x, outputImageInfo.height / threadsPerBlock.y);
cropImageFunctionCuda<<<numBlocks, threadsPerBlock, 0>> >(d_inputImage, *d_cropOutImage, inputPitch, outputPitch, offsetX, offsetY, inputImageInfo.channnels);
cudaCheckErrors(cudaMemcpy(outputBuffer, *d_cropOutImage, sizeOutput, cudaMemcpyDeviceToHost));
cudaCheckErrors(cudaFree(d_inputImage));
}
void temp_func() {
cout<<"SUccccesssssssssssssssssssssssssssssssss"<<endl;
// temp code
}
|
702a2be33c182ae7102d4594bfed0b7d90d3a000.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/***************************************************************************
ucl_test_kernel.cu
--------------------
W. Michael Brown
Test code for UCL (vector add).
__________________________________________________________________________
This file is part of the Geryon Unified Coprocessor Library (UCL)
__________________________________________________________________________
begin : Thu Feb 11 2010
copyright : (C) 2010 by W. Michael Brown
email : [email protected]
***************************************************************************/
/* -----------------------------------------------------------------------
Copyright (2010) Sandia Corporation. Under the terms of Contract
DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government retains
certain rights in this software. This software is distributed under
the Simplified BSD License.
----------------------------------------------------------------------- */
#ifdef NV_KERNEL
#define DEV_PTR Scalar
#define GLOBAL_ID_X threadIdx.x+__mul24(blockIdx.x,blockDim.x)
#define __kernel extern "C" __global__
#else
#define DEV_PTR __global Scalar
#define GLOBAL_ID_X get_global_id(0)
#endif
__kernel void vec_add(DEV_PTR *a, DEV_PTR *b, DEV_PTR *ans)
{ Ordinal i=GLOBAL_ID_X; ans[i]=a[i]+b[i];}
| 702a2be33c182ae7102d4594bfed0b7d90d3a000.cu | /***************************************************************************
ucl_test_kernel.cu
--------------------
W. Michael Brown
Test code for UCL (vector add).
__________________________________________________________________________
This file is part of the Geryon Unified Coprocessor Library (UCL)
__________________________________________________________________________
begin : Thu Feb 11 2010
copyright : (C) 2010 by W. Michael Brown
email : [email protected]
***************************************************************************/
/* -----------------------------------------------------------------------
Copyright (2010) Sandia Corporation. Under the terms of Contract
DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government retains
certain rights in this software. This software is distributed under
the Simplified BSD License.
----------------------------------------------------------------------- */
#ifdef NV_KERNEL
#define DEV_PTR Scalar
#define GLOBAL_ID_X threadIdx.x+__mul24(blockIdx.x,blockDim.x)
#define __kernel extern "C" __global__
#else
#define DEV_PTR __global Scalar
#define GLOBAL_ID_X get_global_id(0)
#endif
__kernel void vec_add(DEV_PTR *a, DEV_PTR *b, DEV_PTR *ans)
{ Ordinal i=GLOBAL_ID_X; ans[i]=a[i]+b[i];}
|
0c2af9d8e9cf74d2fae847b89eb2bdaf91fc7e77.hip | // !!! This is a file automatically generated by hipify!!!
#include <cfloat>
#include <string>
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include "macro.h"
#include "type.h"
#include "util.h"
#include "redutil2.h"
#define VECTOR_FMT (%12.4le, %12.4le, %12.4le)
#define NDIM 3 // Number of space dimension
#define NVPO 6 // Number of variables per object (3 space and 3 velocity coordinates)
using namespace std;
namespace kernel
{
// 36 FLOP
inline __host__ __device__
void body_body_grav_accel(const var3_t& ri, const var3_t& rj, var_t mj, var3_t& ai)
{
// compute r_ij = r_j - r_i [3 FLOPS] [6 read, 3 write]
var3_t r_ij = { rj.x - ri.x, rj.y - ri.y, rj.z - ri.z };
// compute square of r_ij vector [5 FLOPS + ] [3 read, 1 write]
var_t d2 = SQR(r_ij.x) + SQR(r_ij.y) + SQR(r_ij.z);
var_t s = K2 * mj / (d2 * sqrt(d2));
// 6 FLOP
ai.x += s * r_ij.x;
ai.y += s * r_ij.y;
ai.z += s * r_ij.z;
} /* body_body_grav_accel() */
__global__
void calc_grav_accel_naive(uint32_t n_obj, const nbp_t::metadata_t* md, const var3_t* r, const nbp_t::param_t* p, var3_t* a)
{
// i is the index of the SINK body
const uint32_t i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < n_obj)
{
// j is the index of the SOURCE body
for (uint32_t j = 0; j < n_obj; j++)
{
if (i == j) continue;
body_body_grav_accel(r[i], r[j], p[j].mass, a[i]);
}
}
} /* calc_grav_accel_naive () */
__global__
void calc_grav_accel_naive(uint2_t snk, uint2_t src, const nbp_t::metadata_t* md, const var3_t* r, const nbp_t::param_t* p, var3_t* a)
{
// i is the index of the SINK body
const uint32_t i = snk.n1 + blockIdx.x * blockDim.x + threadIdx.x;
if (snk.n2 > i)
{
// j is the index of the SOURCE body
for (uint32_t j = src.n1; j < src.n2; j++)
{
if (i == j) continue;
body_body_grav_accel(r[i], r[j], p[j].mass, a[i]);
}
}
} /* calc_grav_accel_naive () */
__global__
void calc_grav_accel_tile(uint32_t n_obj, const nbp_t::metadata_t* md, const var3_t* r, const nbp_t::param_t* p, var3_t* a)
{
extern __shared__ var3_t sh_pos[];
const uint32_t i = blockIdx.x * blockDim.x + threadIdx.x;
var3_t acc = a[i];
var3_t my_pos;
// To avoid overruning the r buffer
if (n_obj > i)
{
my_pos = r[i];
}
// Note! : the for cycle must be outside the upper if clause, otherwise the sh_pos array will
// not recive the input for the last tile! The reason is that some thread will be not considered
// in the if (n_obj > idx) clause.
for (uint32_t tile = 0; (tile * blockDim.x) < n_obj; tile++)
{
const uint32_t idx = tile * blockDim.x + threadIdx.x;
// To avoid overruning the r and mass buffer
if (n_obj > idx)
{
sh_pos[threadIdx.x] = r[idx];
}
__syncthreads();
for (int j = 0; j < blockDim.x; j++)
{
// To avoid overrun the input arrays
if (n_obj <= (tile * blockDim.x) + j)
break;
// To avoid self-interaction
if (i == (tile * blockDim.x) + j)
continue;
body_body_grav_accel(my_pos, sh_pos[j], p[(tile * blockDim.x) + j].mass, acc);
}
__syncthreads();
}
if (n_obj > i)
{
a[i] = acc;
}
}
__global__
void calc_grav_accel_tile(uint2_t snk, uint2_t src, const nbp_t::metadata_t* md, const var3_t* r, const nbp_t::param_t* p, var3_t* a)
{
extern __shared__ var3_t sh_pos[];
const uint32_t i = snk.n1 + blockIdx.x * blockDim.x + threadIdx.x;
var3_t acc = a[i];
var3_t my_pos;
// To avoid overruning the r buffer
if (snk.n2 > i)
{
my_pos = r[i];
}
// Note! : the for cycle must be outside the upper if clause, otherwise the sh_pos array will
// not recive the input for the last tile! The reason is that some thread will be not considered
// in the if (n_obj > idx) clause.
for (uint32_t tile = 0; (tile * blockDim.x) < src.n2; tile++)
{
const uint32_t idx = src.n1 + tile * blockDim.x + threadIdx.x;
// To avoid overruning the r and mass buffer
if (src.n2 > idx)
{
sh_pos[threadIdx.x] = r[idx];
}
__syncthreads();
for (int j = 0; j < blockDim.x; j++)
{
// To avoid overrun then input arrays
if (src.n2 <= src.n1 + (tile * blockDim.x) + j)
break;
// To avoid self-interaction
if (i == src.n1 + (tile * blockDim.x) + j)
continue;
body_body_grav_accel(my_pos, sh_pos[j], p[(tile * blockDim.x) + j].mass, acc);
}
__syncthreads();
}
if (snk.n2 > i)
{
a[i] = acc;
}
}
} /* namespace kernel */
float gpu_calc_grav_accel_naive(uint32_t n_obj, unsigned int n_tpb, hipEvent_t& start, hipEvent_t& stop, const nbp_t::metadata_t* md, const var3_t* r, const nbp_t::param_t* p, var3_t* a)
{
float elapsed_time = 0.0f;
dim3 grid((n_obj + n_tpb - 1) / n_tpb);
dim3 block(n_tpb);
CUDA_SAFE_CALL(hipEventRecord(start, 0));
hipLaunchKernelGGL(( kernel::calc_grav_accel_naive) , dim3(grid), dim3(block) , 0, 0, n_obj, md, r, p, a);
CUDA_CHECK_ERROR();
CUDA_SAFE_CALL(hipEventRecord(stop, 0));
CUDA_SAFE_CALL(hipEventSynchronize(stop));
// Computes the elapsed time between two events in milliseconds with a resolution of around 0.5 microseconds.
CUDA_SAFE_CALL(hipEventElapsedTime(&elapsed_time, start, stop));
return elapsed_time;
}
float gpu_calc_grav_accel_naive(uint2_t snk, uint2_t src, unsigned int n_tpb, hipEvent_t& start, hipEvent_t& stop, const nbp_t::metadata_t* md, const var3_t* r, const nbp_t::param_t* p, var3_t* a)
{
float elapsed_time = 0.0f;
dim3 grid(((snk.n2 - snk.n1) + n_tpb - 1) / n_tpb);
dim3 block(n_tpb);
CUDA_SAFE_CALL(hipEventRecord(start, 0));
hipLaunchKernelGGL(( kernel::calc_grav_accel_naive) , dim3(grid), dim3(block) , 0, 0, snk, src, md, r, p, a);
CUDA_CHECK_ERROR();
CUDA_SAFE_CALL(hipEventRecord(stop, 0));
CUDA_SAFE_CALL(hipEventSynchronize(stop));
// Computes the elapsed time between two events in milliseconds with a resolution of around 0.5 microseconds.
CUDA_SAFE_CALL(hipEventElapsedTime(&elapsed_time, start, stop));
return elapsed_time;
}
float gpu_calc_grav_accel_tile(uint32_t n_obj, unsigned int n_tpb, hipEvent_t& start, hipEvent_t& stop, const nbp_t::metadata_t* md, const var3_t* r, const nbp_t::param_t* p, var3_t* a)
{
float elapsed_time = 0.0f;
dim3 grid((n_obj + n_tpb - 1) / n_tpb);
dim3 block(n_tpb);
CUDA_SAFE_CALL(hipEventRecord(start, 0));
size_t sh_mem_size = n_tpb * sizeof(var3_t);
hipLaunchKernelGGL(( kernel::calc_grav_accel_tile) , dim3(grid), dim3(block), sh_mem_size , 0, n_obj, md, r, p, a);
CUDA_CHECK_ERROR();
CUDA_SAFE_CALL(hipEventRecord(stop, 0));
CUDA_SAFE_CALL(hipEventSynchronize(stop));
// Computes the elapsed time between two events in milliseconds with a resolution of around 0.5 microseconds.
CUDA_SAFE_CALL(hipEventElapsedTime(&elapsed_time, start, stop));
return elapsed_time;
}
float gpu_calc_grav_accel_tile(uint2_t snk, uint2_t src, unsigned int n_tpb, hipEvent_t& start, hipEvent_t& stop, const nbp_t::metadata_t* md, const var3_t* r, const nbp_t::param_t* p, var3_t* a)
{
float elapsed_time = 0.0f;
dim3 grid(((snk.n2 - snk.n1) + n_tpb - 1) / n_tpb);
dim3 block(n_tpb);
CUDA_SAFE_CALL(hipEventRecord(start, 0));
size_t sh_mem_size = n_tpb * sizeof(var3_t);
kernel::calc_grav_accel_tile << < grid, block, sh_mem_size >> >(snk, src, md, r, p, a);
CUDA_CHECK_ERROR();
CUDA_SAFE_CALL(hipEventRecord(stop, 0));
CUDA_SAFE_CALL(hipEventSynchronize(stop));
// Computes the elapsed time between two events in milliseconds with a resolution of around 0.5 microseconds.
CUDA_SAFE_CALL(hipEventElapsedTime(&elapsed_time, start, stop));
return elapsed_time;
}
float2 gpu_calc_grav_accel_naive(uint32_t n_obj, unsigned int max_n_tpb, const nbp_t::metadata_t* d_md, const var_t* d_y, const var_t* d_p, var_t* d_dy)
{
static bool first_call = true;
static uint32_t n_last;
static unsigned int opt_n_tpb;
float2 result = { 0.0f, FLT_MAX };
hipEvent_t start, stop;
CUDA_SAFE_CALL(hipEventCreate(&start));
CUDA_SAFE_CALL(hipEventCreate(&stop));
if (first_call)
{
n_last = n_obj;
opt_n_tpb = 16;
}
// Number of space and velocity coordinates
const uint32_t nv = NDIM * n_obj;
// Create aliases
const var3_t* r = (var3_t*)d_y;
const nbp_t::param_t* p = (nbp_t::param_t*)d_p;
var3_t* a = (var3_t*)(d_dy + nv);
if (first_call || n_last != n_obj)
{
for (unsigned int n_tpb = 16; n_tpb <= max_n_tpb/2; n_tpb += 16)
{
float elapsed_time = gpu_calc_grav_accel_naive(n_obj, n_tpb, start, stop, d_md, r, p, a);
printf(" %4d %12.4e [sec]\n", n_tpb, elapsed_time / 1.0e3);
if (elapsed_time < result.y)
{
result.x = (float)n_tpb;
result.y = elapsed_time; // [ms]
}
}
opt_n_tpb = (unsigned int)result.x;
n_last = n_obj;
}
else
{
float elapsed_time = gpu_calc_grav_accel_naive(n_obj, opt_n_tpb, start, stop, d_md, r, p, a);
result.x = (float)opt_n_tpb;
result.y = elapsed_time; // [ms]
}
first_call = false;
CUDA_SAFE_CALL(hipEventDestroy(stop));
CUDA_SAFE_CALL(hipEventDestroy(start));
return result;
}
float2 gpu_calc_grav_accel_naive(uint32_t n_obj, uint2_t snk, uint2_t src, unsigned int max_n_tpb, const nbp_t::metadata_t* d_md, const var_t* d_y, const var_t* d_p, var_t* d_dy)
{
static bool first_call = true;
static uint32_t n_last;
static unsigned int opt_n_tpb;
float2 result = { 0.0f, FLT_MAX };
hipEvent_t start, stop;
CUDA_SAFE_CALL(hipEventCreate(&start));
CUDA_SAFE_CALL(hipEventCreate(&stop));
if (first_call)
{
n_last = n_obj;
opt_n_tpb = 16;
}
// Number of space and velocity coordinates
const uint32_t nv = NDIM * n_obj;
// Create aliases
const var3_t* r = (var3_t*)d_y;
const nbp_t::param_t* p = (nbp_t::param_t*)d_p;
var3_t* a = (var3_t*)(d_dy + nv);
if (first_call || n_last != n_obj)
{
for (unsigned int n_tpb = 16; n_tpb <= max_n_tpb / 2; n_tpb += 16)
{
float elapsed_time = gpu_calc_grav_accel_naive(snk, src, n_tpb, start, stop, d_md, r, p, a);
printf(" %4d %12.4e [sec]\n", n_tpb, elapsed_time / 1.0e3);
if (elapsed_time < result.y)
{
result.x = (float)n_tpb;
result.y = elapsed_time; // [ms]
}
}
opt_n_tpb = (unsigned int)result.x;
n_last = n_obj;
}
else
{
float elapsed_time = gpu_calc_grav_accel_naive(snk, src, opt_n_tpb, start, stop, d_md, r, p, a);
result.x = (float)opt_n_tpb;
result.y = elapsed_time; // [ms]
}
first_call = false;
CUDA_SAFE_CALL(hipEventDestroy(stop));
CUDA_SAFE_CALL(hipEventDestroy(start));
return result;
}
float2 gpu_calc_grav_accel_tile(uint32_t n_obj, unsigned int max_n_tpb, const nbp_t::metadata_t* d_md, const var_t* d_y, const var_t* d_p, var_t* d_dy)
{
static bool first_call = true;
static uint32_t n_last;
static unsigned int opt_n_tpb;
float2 result = { 0.0f, FLT_MAX };
hipEvent_t start, stop;
CUDA_SAFE_CALL(hipEventCreate(&start));
CUDA_SAFE_CALL(hipEventCreate(&stop));
if (first_call)
{
n_last = n_obj;
opt_n_tpb = 16;
}
// Number of space and velocity coordinates
const uint32_t nv = NDIM * n_obj;
// Create aliases
const var3_t* r = (var3_t*)d_y;
const nbp_t::param_t* p = (nbp_t::param_t*)d_p;
var3_t* a = (var3_t*)(d_dy + nv);
if (first_call || n_last != n_obj)
{
for (unsigned int n_tpb = 16; n_tpb <= max_n_tpb / 2; n_tpb += 16)
{
float elapsed_time = gpu_calc_grav_accel_tile(n_obj, n_tpb, start, stop, d_md, r, p, a);
printf(" %4d %12.4e [sec]\n", n_tpb, elapsed_time / 1.0e3);
if (elapsed_time < result.y)
{
result.x = (float)n_tpb;
result.y = elapsed_time;
}
}
opt_n_tpb = (unsigned int)result.x;
n_last = n_obj;
}
else
{
float elapsed_time = gpu_calc_grav_accel_tile(n_obj, opt_n_tpb, start, stop, d_md, r, p, a);
result.x = (float)opt_n_tpb;
result.y = elapsed_time;
}
first_call = false;
CUDA_SAFE_CALL(hipEventDestroy(stop));
CUDA_SAFE_CALL(hipEventDestroy(start));
return result;
}
float2 gpu_calc_grav_accel_tile(uint32_t n_obj, uint2_t snk, uint2_t src, unsigned int max_n_tpb, const nbp_t::metadata_t* d_md, const var_t* d_y, const var_t* d_p, var_t* d_dy)
{
static bool first_call = true;
static uint32_t n_last;
static unsigned int opt_n_tpb;
float2 result = { 0.0f, FLT_MAX };
hipEvent_t start, stop;
CUDA_SAFE_CALL(hipEventCreate(&start));
CUDA_SAFE_CALL(hipEventCreate(&stop));
if (first_call)
{
n_last = n_obj;
opt_n_tpb = 16;
}
// Number of space and velocity coordinates
const uint32_t nv = NDIM * n_obj;
// Create aliases
const var3_t* r = (var3_t*)d_y;
const nbp_t::param_t* p = (nbp_t::param_t*)d_p;
var3_t* a = (var3_t*)(d_dy + nv);
if (first_call || n_last != n_obj)
{
for (unsigned int n_tpb = 16; n_tpb <= max_n_tpb / 2; n_tpb += 16)
{
float elapsed_time = gpu_calc_grav_accel_tile(snk, src, n_tpb, start, stop, d_md, r, p, a);
printf(" %4d %12.4e [sec]\n", n_tpb, elapsed_time / 1.0e3);
if (elapsed_time < result.y)
{
result.x = (float)n_tpb;
result.y = elapsed_time; // [ms]
}
}
opt_n_tpb = (unsigned int)result.x;
n_last = n_obj;
}
else
{
float elapsed_time = gpu_calc_grav_accel_tile(snk, src, opt_n_tpb, start, stop, d_md, r, p, a);
result.x = (float)opt_n_tpb;
result.y = elapsed_time; // [ms]
}
first_call = false;
CUDA_SAFE_CALL(hipEventDestroy(stop));
CUDA_SAFE_CALL(hipEventDestroy(start));
return result;
}
void benchmark_GPU(int id_dev, uint32_t n_obj, const nbp_t::metadata_t* d_md, const var_t* d_y, const var_t* d_p, var_t* d_dy, ofstream& o_result)
{
static string method_name[] = { "base", "base_with_sym", "tile", "tile_advanced" };
static string param_name[] = { "n_body", "snk_src" };
uint2_t snk = { 0, 0 };
uint2_t src = { 0, 0 };
var_t Dt_CPU = 0.0;
hipDeviceProp_t deviceProp;
CUDA_SAFE_CALL(hipGetDeviceProperties(&deviceProp, id_dev));
// 1. Naive method on the GPU with n_obj parameter
float2 result = gpu_calc_grav_accel_naive(n_obj, deviceProp.maxThreadsPerBlock, d_md, d_y, d_p, d_dy);
unsigned int n_tpb = (unsigned int)result.x;
var_t Dt_GPU = result.y; // [ms]
print(PROC_UNIT_GPU, method_name[0], param_name[0], snk, src, n_obj, n_tpb, Dt_CPU, Dt_GPU, o_result, true);
// 2. Naive method on the GPU with snk and src parameters
snk.n2 = n_obj;
src.n2 = n_obj;
result = gpu_calc_grav_accel_naive(n_obj, snk, src, deviceProp.maxThreadsPerBlock, d_md, d_y, d_p, d_dy);
n_tpb = (unsigned int)result.x;
Dt_GPU = result.y; // [ms]
print(PROC_UNIT_GPU, method_name[0], param_name[1], snk, src, n_obj, n_tpb, Dt_CPU, Dt_GPU, o_result, true);
// 3. Tile method on the GPU with n_obj parameter
result = gpu_calc_grav_accel_tile(n_obj, deviceProp.maxThreadsPerBlock, d_md, d_y, d_p, d_dy);
n_tpb = (unsigned int)result.x;
Dt_GPU = result.y; // [ms]
snk.n2 = 0;
src.n2 = 0;
print(PROC_UNIT_GPU, method_name[2], param_name[0], snk, src, n_obj, n_tpb, Dt_CPU, Dt_GPU, o_result, true);
// 4. Tile method on the GPU with snk and src parameters
snk.n2 = n_obj;
src.n2 = n_obj;
result = gpu_calc_grav_accel_tile(n_obj, snk, src, deviceProp.maxThreadsPerBlock, d_md, d_y, d_p, d_dy);
n_tpb = (unsigned int)result.x;
Dt_GPU = result.y; // [ms]
print(PROC_UNIT_GPU, method_name[2], param_name[1], snk, src, n_obj, n_tpb, Dt_CPU, Dt_GPU, o_result, true);
}
#undef NDIM
#undef NVPO
| 0c2af9d8e9cf74d2fae847b89eb2bdaf91fc7e77.cu | #include <cfloat>
#include <string>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "macro.h"
#include "type.h"
#include "util.h"
#include "redutil2.h"
#define VECTOR_FMT (%12.4le, %12.4le, %12.4le)
#define NDIM 3 // Number of space dimension
#define NVPO 6 // Number of variables per object (3 space and 3 velocity coordinates)
using namespace std;
namespace kernel
{
// 36 FLOP
inline __host__ __device__
void body_body_grav_accel(const var3_t& ri, const var3_t& rj, var_t mj, var3_t& ai)
{
// compute r_ij = r_j - r_i [3 FLOPS] [6 read, 3 write]
var3_t r_ij = { rj.x - ri.x, rj.y - ri.y, rj.z - ri.z };
// compute square of r_ij vector [5 FLOPS + ] [3 read, 1 write]
var_t d2 = SQR(r_ij.x) + SQR(r_ij.y) + SQR(r_ij.z);
var_t s = K2 * mj / (d2 * sqrt(d2));
// 6 FLOP
ai.x += s * r_ij.x;
ai.y += s * r_ij.y;
ai.z += s * r_ij.z;
} /* body_body_grav_accel() */
__global__
void calc_grav_accel_naive(uint32_t n_obj, const nbp_t::metadata_t* md, const var3_t* r, const nbp_t::param_t* p, var3_t* a)
{
// i is the index of the SINK body
const uint32_t i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < n_obj)
{
// j is the index of the SOURCE body
for (uint32_t j = 0; j < n_obj; j++)
{
if (i == j) continue;
body_body_grav_accel(r[i], r[j], p[j].mass, a[i]);
}
}
} /* calc_grav_accel_naive () */
__global__
void calc_grav_accel_naive(uint2_t snk, uint2_t src, const nbp_t::metadata_t* md, const var3_t* r, const nbp_t::param_t* p, var3_t* a)
{
// i is the index of the SINK body
const uint32_t i = snk.n1 + blockIdx.x * blockDim.x + threadIdx.x;
if (snk.n2 > i)
{
// j is the index of the SOURCE body
for (uint32_t j = src.n1; j < src.n2; j++)
{
if (i == j) continue;
body_body_grav_accel(r[i], r[j], p[j].mass, a[i]);
}
}
} /* calc_grav_accel_naive () */
__global__
void calc_grav_accel_tile(uint32_t n_obj, const nbp_t::metadata_t* md, const var3_t* r, const nbp_t::param_t* p, var3_t* a)
{
extern __shared__ var3_t sh_pos[];
const uint32_t i = blockIdx.x * blockDim.x + threadIdx.x;
var3_t acc = a[i];
var3_t my_pos;
// To avoid overruning the r buffer
if (n_obj > i)
{
my_pos = r[i];
}
// Note! : the for cycle must be outside the upper if clause, otherwise the sh_pos array will
// not recive the input for the last tile! The reason is that some thread will be not considered
// in the if (n_obj > idx) clause.
for (uint32_t tile = 0; (tile * blockDim.x) < n_obj; tile++)
{
const uint32_t idx = tile * blockDim.x + threadIdx.x;
// To avoid overruning the r and mass buffer
if (n_obj > idx)
{
sh_pos[threadIdx.x] = r[idx];
}
__syncthreads();
for (int j = 0; j < blockDim.x; j++)
{
// To avoid overrun the input arrays
if (n_obj <= (tile * blockDim.x) + j)
break;
// To avoid self-interaction
if (i == (tile * blockDim.x) + j)
continue;
body_body_grav_accel(my_pos, sh_pos[j], p[(tile * blockDim.x) + j].mass, acc);
}
__syncthreads();
}
if (n_obj > i)
{
a[i] = acc;
}
}
__global__
void calc_grav_accel_tile(uint2_t snk, uint2_t src, const nbp_t::metadata_t* md, const var3_t* r, const nbp_t::param_t* p, var3_t* a)
{
extern __shared__ var3_t sh_pos[];
const uint32_t i = snk.n1 + blockIdx.x * blockDim.x + threadIdx.x;
var3_t acc = a[i];
var3_t my_pos;
// To avoid overruning the r buffer
if (snk.n2 > i)
{
my_pos = r[i];
}
// Note! : the for cycle must be outside the upper if clause, otherwise the sh_pos array will
// not recive the input for the last tile! The reason is that some thread will be not considered
// in the if (n_obj > idx) clause.
for (uint32_t tile = 0; (tile * blockDim.x) < src.n2; tile++)
{
const uint32_t idx = src.n1 + tile * blockDim.x + threadIdx.x;
// To avoid overruning the r and mass buffer
if (src.n2 > idx)
{
sh_pos[threadIdx.x] = r[idx];
}
__syncthreads();
for (int j = 0; j < blockDim.x; j++)
{
// To avoid overrun then input arrays
if (src.n2 <= src.n1 + (tile * blockDim.x) + j)
break;
// To avoid self-interaction
if (i == src.n1 + (tile * blockDim.x) + j)
continue;
body_body_grav_accel(my_pos, sh_pos[j], p[(tile * blockDim.x) + j].mass, acc);
}
__syncthreads();
}
if (snk.n2 > i)
{
a[i] = acc;
}
}
} /* namespace kernel */
float gpu_calc_grav_accel_naive(uint32_t n_obj, unsigned int n_tpb, cudaEvent_t& start, cudaEvent_t& stop, const nbp_t::metadata_t* md, const var3_t* r, const nbp_t::param_t* p, var3_t* a)
{
float elapsed_time = 0.0f;
dim3 grid((n_obj + n_tpb - 1) / n_tpb);
dim3 block(n_tpb);
CUDA_SAFE_CALL(cudaEventRecord(start, 0));
kernel::calc_grav_accel_naive <<< grid, block >>>(n_obj, md, r, p, a);
CUDA_CHECK_ERROR();
CUDA_SAFE_CALL(cudaEventRecord(stop, 0));
CUDA_SAFE_CALL(cudaEventSynchronize(stop));
// Computes the elapsed time between two events in milliseconds with a resolution of around 0.5 microseconds.
CUDA_SAFE_CALL(cudaEventElapsedTime(&elapsed_time, start, stop));
return elapsed_time;
}
float gpu_calc_grav_accel_naive(uint2_t snk, uint2_t src, unsigned int n_tpb, cudaEvent_t& start, cudaEvent_t& stop, const nbp_t::metadata_t* md, const var3_t* r, const nbp_t::param_t* p, var3_t* a)
{
float elapsed_time = 0.0f;
dim3 grid(((snk.n2 - snk.n1) + n_tpb - 1) / n_tpb);
dim3 block(n_tpb);
CUDA_SAFE_CALL(cudaEventRecord(start, 0));
kernel::calc_grav_accel_naive <<< grid, block >>>(snk, src, md, r, p, a);
CUDA_CHECK_ERROR();
CUDA_SAFE_CALL(cudaEventRecord(stop, 0));
CUDA_SAFE_CALL(cudaEventSynchronize(stop));
// Computes the elapsed time between two events in milliseconds with a resolution of around 0.5 microseconds.
CUDA_SAFE_CALL(cudaEventElapsedTime(&elapsed_time, start, stop));
return elapsed_time;
}
float gpu_calc_grav_accel_tile(uint32_t n_obj, unsigned int n_tpb, cudaEvent_t& start, cudaEvent_t& stop, const nbp_t::metadata_t* md, const var3_t* r, const nbp_t::param_t* p, var3_t* a)
{
float elapsed_time = 0.0f;
dim3 grid((n_obj + n_tpb - 1) / n_tpb);
dim3 block(n_tpb);
CUDA_SAFE_CALL(cudaEventRecord(start, 0));
size_t sh_mem_size = n_tpb * sizeof(var3_t);
kernel::calc_grav_accel_tile <<< grid, block, sh_mem_size >>>(n_obj, md, r, p, a);
CUDA_CHECK_ERROR();
CUDA_SAFE_CALL(cudaEventRecord(stop, 0));
CUDA_SAFE_CALL(cudaEventSynchronize(stop));
// Computes the elapsed time between two events in milliseconds with a resolution of around 0.5 microseconds.
CUDA_SAFE_CALL(cudaEventElapsedTime(&elapsed_time, start, stop));
return elapsed_time;
}
float gpu_calc_grav_accel_tile(uint2_t snk, uint2_t src, unsigned int n_tpb, cudaEvent_t& start, cudaEvent_t& stop, const nbp_t::metadata_t* md, const var3_t* r, const nbp_t::param_t* p, var3_t* a)
{
float elapsed_time = 0.0f;
dim3 grid(((snk.n2 - snk.n1) + n_tpb - 1) / n_tpb);
dim3 block(n_tpb);
CUDA_SAFE_CALL(cudaEventRecord(start, 0));
size_t sh_mem_size = n_tpb * sizeof(var3_t);
kernel::calc_grav_accel_tile << < grid, block, sh_mem_size >> >(snk, src, md, r, p, a);
CUDA_CHECK_ERROR();
CUDA_SAFE_CALL(cudaEventRecord(stop, 0));
CUDA_SAFE_CALL(cudaEventSynchronize(stop));
// Computes the elapsed time between two events in milliseconds with a resolution of around 0.5 microseconds.
CUDA_SAFE_CALL(cudaEventElapsedTime(&elapsed_time, start, stop));
return elapsed_time;
}
float2 gpu_calc_grav_accel_naive(uint32_t n_obj, unsigned int max_n_tpb, const nbp_t::metadata_t* d_md, const var_t* d_y, const var_t* d_p, var_t* d_dy)
{
static bool first_call = true;
static uint32_t n_last;
static unsigned int opt_n_tpb;
float2 result = { 0.0f, FLT_MAX };
cudaEvent_t start, stop;
CUDA_SAFE_CALL(cudaEventCreate(&start));
CUDA_SAFE_CALL(cudaEventCreate(&stop));
if (first_call)
{
n_last = n_obj;
opt_n_tpb = 16;
}
// Number of space and velocity coordinates
const uint32_t nv = NDIM * n_obj;
// Create aliases
const var3_t* r = (var3_t*)d_y;
const nbp_t::param_t* p = (nbp_t::param_t*)d_p;
var3_t* a = (var3_t*)(d_dy + nv);
if (first_call || n_last != n_obj)
{
for (unsigned int n_tpb = 16; n_tpb <= max_n_tpb/2; n_tpb += 16)
{
float elapsed_time = gpu_calc_grav_accel_naive(n_obj, n_tpb, start, stop, d_md, r, p, a);
printf(" %4d %12.4e [sec]\n", n_tpb, elapsed_time / 1.0e3);
if (elapsed_time < result.y)
{
result.x = (float)n_tpb;
result.y = elapsed_time; // [ms]
}
}
opt_n_tpb = (unsigned int)result.x;
n_last = n_obj;
}
else
{
float elapsed_time = gpu_calc_grav_accel_naive(n_obj, opt_n_tpb, start, stop, d_md, r, p, a);
result.x = (float)opt_n_tpb;
result.y = elapsed_time; // [ms]
}
first_call = false;
CUDA_SAFE_CALL(cudaEventDestroy(stop));
CUDA_SAFE_CALL(cudaEventDestroy(start));
return result;
}
float2 gpu_calc_grav_accel_naive(uint32_t n_obj, uint2_t snk, uint2_t src, unsigned int max_n_tpb, const nbp_t::metadata_t* d_md, const var_t* d_y, const var_t* d_p, var_t* d_dy)
{
static bool first_call = true;
static uint32_t n_last;
static unsigned int opt_n_tpb;
float2 result = { 0.0f, FLT_MAX };
cudaEvent_t start, stop;
CUDA_SAFE_CALL(cudaEventCreate(&start));
CUDA_SAFE_CALL(cudaEventCreate(&stop));
if (first_call)
{
n_last = n_obj;
opt_n_tpb = 16;
}
// Number of space and velocity coordinates
const uint32_t nv = NDIM * n_obj;
// Create aliases
const var3_t* r = (var3_t*)d_y;
const nbp_t::param_t* p = (nbp_t::param_t*)d_p;
var3_t* a = (var3_t*)(d_dy + nv);
if (first_call || n_last != n_obj)
{
for (unsigned int n_tpb = 16; n_tpb <= max_n_tpb / 2; n_tpb += 16)
{
float elapsed_time = gpu_calc_grav_accel_naive(snk, src, n_tpb, start, stop, d_md, r, p, a);
printf(" %4d %12.4e [sec]\n", n_tpb, elapsed_time / 1.0e3);
if (elapsed_time < result.y)
{
result.x = (float)n_tpb;
result.y = elapsed_time; // [ms]
}
}
opt_n_tpb = (unsigned int)result.x;
n_last = n_obj;
}
else
{
float elapsed_time = gpu_calc_grav_accel_naive(snk, src, opt_n_tpb, start, stop, d_md, r, p, a);
result.x = (float)opt_n_tpb;
result.y = elapsed_time; // [ms]
}
first_call = false;
CUDA_SAFE_CALL(cudaEventDestroy(stop));
CUDA_SAFE_CALL(cudaEventDestroy(start));
return result;
}
float2 gpu_calc_grav_accel_tile(uint32_t n_obj, unsigned int max_n_tpb, const nbp_t::metadata_t* d_md, const var_t* d_y, const var_t* d_p, var_t* d_dy)
{
static bool first_call = true;
static uint32_t n_last;
static unsigned int opt_n_tpb;
float2 result = { 0.0f, FLT_MAX };
cudaEvent_t start, stop;
CUDA_SAFE_CALL(cudaEventCreate(&start));
CUDA_SAFE_CALL(cudaEventCreate(&stop));
if (first_call)
{
n_last = n_obj;
opt_n_tpb = 16;
}
// Number of space and velocity coordinates
const uint32_t nv = NDIM * n_obj;
// Create aliases
const var3_t* r = (var3_t*)d_y;
const nbp_t::param_t* p = (nbp_t::param_t*)d_p;
var3_t* a = (var3_t*)(d_dy + nv);
if (first_call || n_last != n_obj)
{
for (unsigned int n_tpb = 16; n_tpb <= max_n_tpb / 2; n_tpb += 16)
{
float elapsed_time = gpu_calc_grav_accel_tile(n_obj, n_tpb, start, stop, d_md, r, p, a);
printf(" %4d %12.4e [sec]\n", n_tpb, elapsed_time / 1.0e3);
if (elapsed_time < result.y)
{
result.x = (float)n_tpb;
result.y = elapsed_time;
}
}
opt_n_tpb = (unsigned int)result.x;
n_last = n_obj;
}
else
{
float elapsed_time = gpu_calc_grav_accel_tile(n_obj, opt_n_tpb, start, stop, d_md, r, p, a);
result.x = (float)opt_n_tpb;
result.y = elapsed_time;
}
first_call = false;
CUDA_SAFE_CALL(cudaEventDestroy(stop));
CUDA_SAFE_CALL(cudaEventDestroy(start));
return result;
}
float2 gpu_calc_grav_accel_tile(uint32_t n_obj, uint2_t snk, uint2_t src, unsigned int max_n_tpb, const nbp_t::metadata_t* d_md, const var_t* d_y, const var_t* d_p, var_t* d_dy)
{
static bool first_call = true;
static uint32_t n_last;
static unsigned int opt_n_tpb;
float2 result = { 0.0f, FLT_MAX };
cudaEvent_t start, stop;
CUDA_SAFE_CALL(cudaEventCreate(&start));
CUDA_SAFE_CALL(cudaEventCreate(&stop));
if (first_call)
{
n_last = n_obj;
opt_n_tpb = 16;
}
// Number of space and velocity coordinates
const uint32_t nv = NDIM * n_obj;
// Create aliases
const var3_t* r = (var3_t*)d_y;
const nbp_t::param_t* p = (nbp_t::param_t*)d_p;
var3_t* a = (var3_t*)(d_dy + nv);
if (first_call || n_last != n_obj)
{
for (unsigned int n_tpb = 16; n_tpb <= max_n_tpb / 2; n_tpb += 16)
{
float elapsed_time = gpu_calc_grav_accel_tile(snk, src, n_tpb, start, stop, d_md, r, p, a);
printf(" %4d %12.4e [sec]\n", n_tpb, elapsed_time / 1.0e3);
if (elapsed_time < result.y)
{
result.x = (float)n_tpb;
result.y = elapsed_time; // [ms]
}
}
opt_n_tpb = (unsigned int)result.x;
n_last = n_obj;
}
else
{
float elapsed_time = gpu_calc_grav_accel_tile(snk, src, opt_n_tpb, start, stop, d_md, r, p, a);
result.x = (float)opt_n_tpb;
result.y = elapsed_time; // [ms]
}
first_call = false;
CUDA_SAFE_CALL(cudaEventDestroy(stop));
CUDA_SAFE_CALL(cudaEventDestroy(start));
return result;
}
void benchmark_GPU(int id_dev, uint32_t n_obj, const nbp_t::metadata_t* d_md, const var_t* d_y, const var_t* d_p, var_t* d_dy, ofstream& o_result)
{
static string method_name[] = { "base", "base_with_sym", "tile", "tile_advanced" };
static string param_name[] = { "n_body", "snk_src" };
uint2_t snk = { 0, 0 };
uint2_t src = { 0, 0 };
var_t Dt_CPU = 0.0;
cudaDeviceProp deviceProp;
CUDA_SAFE_CALL(cudaGetDeviceProperties(&deviceProp, id_dev));
// 1. Naive method on the GPU with n_obj parameter
float2 result = gpu_calc_grav_accel_naive(n_obj, deviceProp.maxThreadsPerBlock, d_md, d_y, d_p, d_dy);
unsigned int n_tpb = (unsigned int)result.x;
var_t Dt_GPU = result.y; // [ms]
print(PROC_UNIT_GPU, method_name[0], param_name[0], snk, src, n_obj, n_tpb, Dt_CPU, Dt_GPU, o_result, true);
// 2. Naive method on the GPU with snk and src parameters
snk.n2 = n_obj;
src.n2 = n_obj;
result = gpu_calc_grav_accel_naive(n_obj, snk, src, deviceProp.maxThreadsPerBlock, d_md, d_y, d_p, d_dy);
n_tpb = (unsigned int)result.x;
Dt_GPU = result.y; // [ms]
print(PROC_UNIT_GPU, method_name[0], param_name[1], snk, src, n_obj, n_tpb, Dt_CPU, Dt_GPU, o_result, true);
// 3. Tile method on the GPU with n_obj parameter
result = gpu_calc_grav_accel_tile(n_obj, deviceProp.maxThreadsPerBlock, d_md, d_y, d_p, d_dy);
n_tpb = (unsigned int)result.x;
Dt_GPU = result.y; // [ms]
snk.n2 = 0;
src.n2 = 0;
print(PROC_UNIT_GPU, method_name[2], param_name[0], snk, src, n_obj, n_tpb, Dt_CPU, Dt_GPU, o_result, true);
// 4. Tile method on the GPU with snk and src parameters
snk.n2 = n_obj;
src.n2 = n_obj;
result = gpu_calc_grav_accel_tile(n_obj, snk, src, deviceProp.maxThreadsPerBlock, d_md, d_y, d_p, d_dy);
n_tpb = (unsigned int)result.x;
Dt_GPU = result.y; // [ms]
print(PROC_UNIT_GPU, method_name[2], param_name[1], snk, src, n_obj, n_tpb, Dt_CPU, Dt_GPU, o_result, true);
}
#undef NDIM
#undef NVPO
|
0c8cd54b8b256c17181ef37a00c9a451b3d86b9d.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdlib.h>
#include <stdio.h>
#include <typeinfo>
#include <color_spinor_field.h>
#include <blas_quda.h>
#include <string.h>
#include <iostream>
#include <misc_helpers.h>
#include <face_quda.h>
#include <dslash_quda.h>
#ifdef DEVICE_PACK
#define REORDER_LOCATION QUDA_CUDA_FIELD_LOCATION
#else
#define REORDER_LOCATION QUDA_CPU_FIELD_LOCATION
#endif
int zeroCopy = 0;
namespace quda {
int cudaColorSpinorField::bufferIndex = 0;
int cudaColorSpinorField::initGhostFaceBuffer = 0;
void* cudaColorSpinorField::ghostFaceBuffer[2]; //gpu memory
void* cudaColorSpinorField::fwdGhostFaceBuffer[2][QUDA_MAX_DIM]; //pointers to ghostFaceBuffer
void* cudaColorSpinorField::backGhostFaceBuffer[2][QUDA_MAX_DIM]; //pointers to ghostFaceBuffer
size_t cudaColorSpinorField::ghostFaceBytes = 0;
cudaColorSpinorField::cudaColorSpinorField(const ColorSpinorParam ¶m) :
ColorSpinorField(param), alloc(false), init(true), texInit(false),
initComms(false), bufferMessageHandler(0), nFaceComms(0) {
// this must come before create
if (param.create == QUDA_REFERENCE_FIELD_CREATE) {
v = param.v;
norm = param.norm;
}
create(param.create);
if (param.create == QUDA_NULL_FIELD_CREATE) {
// do nothing
} else if (param.create == QUDA_ZERO_FIELD_CREATE) {
zero();
} else if (param.create == QUDA_REFERENCE_FIELD_CREATE) {
// dp nothing
} else if (param.create == QUDA_COPY_FIELD_CREATE){
errorQuda("not implemented");
}
checkCudaError();
}
cudaColorSpinorField::cudaColorSpinorField(const cudaColorSpinorField &src) :
ColorSpinorField(src), alloc(false), init(true), texInit(false),
initComms(false), bufferMessageHandler(0), nFaceComms(0) {
create(QUDA_COPY_FIELD_CREATE);
copySpinorField(src);
}
// creates a copy of src, any differences defined in param
cudaColorSpinorField::cudaColorSpinorField(const ColorSpinorField &src,
const ColorSpinorParam ¶m) :
ColorSpinorField(src), alloc(false), init(true), texInit(false),
initComms(false), bufferMessageHandler(0), nFaceComms(0) {
// can only overide if we are not using a reference or parity special case
if (param.create != QUDA_REFERENCE_FIELD_CREATE ||
(param.create == QUDA_REFERENCE_FIELD_CREATE &&
src.SiteSubset() == QUDA_FULL_SITE_SUBSET &&
param.siteSubset == QUDA_PARITY_SITE_SUBSET &&
typeid(src) == typeid(cudaColorSpinorField) ) ||
(param.create == QUDA_REFERENCE_FIELD_CREATE && param.eigv_dim > 0)) {
reset(param);
} else {
errorQuda("Undefined behaviour"); // else silent bug possible?
}
// This must be set before create is called
if (param.create == QUDA_REFERENCE_FIELD_CREATE) {
if (typeid(src) == typeid(cudaColorSpinorField)) {
v = (void*)src.V();
norm = (void*)src.Norm();
} else {
errorQuda("Cannot reference a non-cuda field");
}
if (this->EigvDim() > 0)
{//setup eigenvector form the set
if(eigv_dim != this->EigvDim()) errorQuda("\nEigenvector set does not match..\n") ;//for debug only.
if(eigv_id > -1)
{
//printfQuda("\nSetting pointers for vector id %d\n", eigv_id); //for debug only.
v = (void*)((char*)v + eigv_id*bytes);
norm = (void*)((char*)norm + eigv_id*norm_bytes);
}
//do nothing for the eigenvector subset...
}
}
create(param.create);
if (param.create == QUDA_NULL_FIELD_CREATE) {
// do nothing
} else if (param.create == QUDA_ZERO_FIELD_CREATE) {
zero();
} else if (param.create == QUDA_COPY_FIELD_CREATE) {
copySpinorField(src);
} else if (param.create == QUDA_REFERENCE_FIELD_CREATE) {
// do nothing
} else {
errorQuda("CreateType %d not implemented", param.create);
}
}
cudaColorSpinorField::cudaColorSpinorField(const ColorSpinorField &src)
: ColorSpinorField(src), alloc(false), init(true), texInit(false),
initComms(false), bufferMessageHandler(0), nFaceComms(0) {
create(QUDA_COPY_FIELD_CREATE);
copySpinorField(src);
}
ColorSpinorField& cudaColorSpinorField::operator=(const ColorSpinorField &src) {
if (typeid(src) == typeid(cudaColorSpinorField)) {
*this = (dynamic_cast<const cudaColorSpinorField&>(src));
} else if (typeid(src) == typeid(cpuColorSpinorField)) {
*this = (dynamic_cast<const cpuColorSpinorField&>(src));
} else {
errorQuda("Unknown input ColorSpinorField %s", typeid(src).name());
}
return *this;
}
cudaColorSpinorField& cudaColorSpinorField::operator=(const cudaColorSpinorField &src) {
if (&src != this) {
// keep current attributes unless unset
if (!ColorSpinorField::init) { // note this will turn a reference field into a regular field
destroy();
destroyComms(); // not sure if this necessary
ColorSpinorField::operator=(src);
create(QUDA_COPY_FIELD_CREATE);
}
copySpinorField(src);
}
return *this;
}
cudaColorSpinorField& cudaColorSpinorField::operator=(const cpuColorSpinorField &src) {
// keep current attributes unless unset
if (!ColorSpinorField::init) { // note this will turn a reference field into a regular field
destroy();
ColorSpinorField::operator=(src);
create(QUDA_COPY_FIELD_CREATE);
}
loadSpinorField(src);
return *this;
}
cudaColorSpinorField::~cudaColorSpinorField() {
destroyComms();
destroy();
}
void cudaColorSpinorField::create(const QudaFieldCreate create) {
if (siteSubset == QUDA_FULL_SITE_SUBSET && siteOrder != QUDA_EVEN_ODD_SITE_ORDER) {
errorQuda("Subset not implemented");
}
if (create != QUDA_REFERENCE_FIELD_CREATE) {
v = device_malloc(bytes);
if (precision == QUDA_HALF_PRECISION) {
norm = device_malloc(norm_bytes);
}
alloc = true;
}
if (siteSubset == QUDA_FULL_SITE_SUBSET) {
if(eigv_dim != 0) errorQuda("Eigenvectors must be parity fields!");
// create the associated even and odd subsets
ColorSpinorParam param;
param.siteSubset = QUDA_PARITY_SITE_SUBSET;
param.nDim = nDim;
memcpy(param.x, x, nDim*sizeof(int));
param.x[0] /= 2; // set single parity dimensions
param.create = QUDA_REFERENCE_FIELD_CREATE;
param.v = v;
param.norm = norm;
even = new cudaColorSpinorField(*this, param);
odd = new cudaColorSpinorField(*this, param);
// need this hackery for the moment (need to locate the odd pointers half way into the full field)
(dynamic_cast<cudaColorSpinorField*>(odd))->v = (void*)((char*)v + bytes/2);
if (precision == QUDA_HALF_PRECISION)
(dynamic_cast<cudaColorSpinorField*>(odd))->norm = (void*)((char*)norm + norm_bytes/2);
for(int i=0; i<nDim; ++i){
if(commDimPartitioned(i)){
(dynamic_cast<cudaColorSpinorField*>(odd))->ghost[i] =
static_cast<char*>((dynamic_cast<cudaColorSpinorField*>(odd))->ghost[i]) + bytes/2;
if(precision == QUDA_HALF_PRECISION)
(dynamic_cast<cudaColorSpinorField*>(odd))->ghostNorm[i] =
static_cast<char*>((dynamic_cast<cudaColorSpinorField*>(odd))->ghostNorm[i]) + norm_bytes/2;
}
}
#ifdef USE_TEXTURE_OBJECTS
dynamic_cast<cudaColorSpinorField*>(even)->destroyTexObject();
dynamic_cast<cudaColorSpinorField*>(even)->createTexObject();
dynamic_cast<cudaColorSpinorField*>(odd)->destroyTexObject();
dynamic_cast<cudaColorSpinorField*>(odd)->createTexObject();
#endif
}
else{//siteSubset == QUDA_PARITY_SITE_SUBSET
//! setup an object for selected eigenvector (the 1st one as a default):
if ((eigv_dim > 0) && (create != QUDA_REFERENCE_FIELD_CREATE) && (eigv_id == -1))
{
//if(bytes > 1811939328) warningQuda("\nCUDA API probably won't be able to create texture object for the eigenvector set... Object size is : %u bytes\n", bytes);
if (getVerbosity() == QUDA_DEBUG_VERBOSE) printfQuda("\nEigenvector set constructor...\n");
// create the associated even and odd subsets
ColorSpinorParam param;
param.siteSubset = QUDA_PARITY_SITE_SUBSET;
param.nDim = nDim;
memcpy(param.x, x, nDim*sizeof(int));
param.create = QUDA_REFERENCE_FIELD_CREATE;
param.v = v;
param.norm = norm;
param.eigv_dim = eigv_dim;
//reserve eigvector set
eigenvectors.reserve(eigv_dim);
//setup volume, [real_]length and stride for a single eigenvector
for(int id = 0; id < eigv_dim; id++)
{
param.eigv_id = id;
eigenvectors.push_back(new cudaColorSpinorField(*this, param));
#ifdef USE_TEXTURE_OBJECTS //(a lot of texture objects...)
dynamic_cast<cudaColorSpinorField*>(eigenvectors[id])->destroyTexObject();
dynamic_cast<cudaColorSpinorField*>(eigenvectors[id])->createTexObject();
#endif
}
}
}
if (create != QUDA_REFERENCE_FIELD_CREATE) {
if (siteSubset != QUDA_FULL_SITE_SUBSET) {
zeroPad();
} else {
(dynamic_cast<cudaColorSpinorField*>(even))->zeroPad();
(dynamic_cast<cudaColorSpinorField*>(odd))->zeroPad();
}
}
#ifdef USE_TEXTURE_OBJECTS
if((eigv_dim == 0) || (eigv_dim > 0 && eigv_id > -1))
createTexObject();
#endif
// initialize the ghost pointers
if(siteSubset == QUDA_PARITY_SITE_SUBSET) {
for(int i=0; i<nDim; ++i){
if(commDimPartitioned(i)){
ghost[i] = (char*)v + (stride + ghostOffset[i])*nColor*nSpin*2*precision;
if(precision == QUDA_HALF_PRECISION)
ghostNorm[i] = (char*)norm + (stride + ghostNormOffset[i])*QUDA_SINGLE_PRECISION;
}
}
}
checkCudaError();
}
#ifdef USE_TEXTURE_OBJECTS
void cudaColorSpinorField::createTexObject() {
if (isNative()) {
if (texInit) errorQuda("Already bound textures");
// create the texture for the field components
hipChannelFormatDesc desc;
memset(&desc, 0, sizeof(hipChannelFormatDesc));
if (precision == QUDA_SINGLE_PRECISION) desc.f = hipChannelFormatKindFloat;
else desc.f = hipChannelFormatKindSigned; // half is short, double is int2
// staggered and coarse fields in half and single are always two component
if ( (nSpin == 1 || nSpin == 2) && (precision == QUDA_HALF_PRECISION || precision == QUDA_SINGLE_PRECISION)) {
desc.x = 8*precision;
desc.y = 8*precision;
desc.z = 0;
desc.w = 0;
} else { // all others are four component (double2 is spread across int4)
desc.x = (precision == QUDA_DOUBLE_PRECISION) ? 32 : 8*precision;
desc.y = (precision == QUDA_DOUBLE_PRECISION) ? 32 : 8*precision;
desc.z = (precision == QUDA_DOUBLE_PRECISION) ? 32 : 8*precision;
desc.w = (precision == QUDA_DOUBLE_PRECISION) ? 32 : 8*precision;
}
hipResourceDesc resDesc;
memset(&resDesc, 0, sizeof(resDesc));
resDesc.resType = hipResourceTypeLinear;
resDesc.res.linear.devPtr = v;
resDesc.res.linear.desc = desc;
resDesc.res.linear.sizeInBytes = bytes;
hipTextureDesc texDesc;
memset(&texDesc, 0, sizeof(texDesc));
if (precision == QUDA_HALF_PRECISION) texDesc.readMode = hipReadModeNormalizedFloat;
else texDesc.readMode = hipReadModeElementType;
hipCreateTextureObject(&tex, &resDesc, &texDesc, NULL);
checkCudaError();
// create the texture for the norm components
if (precision == QUDA_HALF_PRECISION) {
hipChannelFormatDesc desc;
memset(&desc, 0, sizeof(hipChannelFormatDesc));
desc.f = hipChannelFormatKindFloat;
desc.x = 8*QUDA_SINGLE_PRECISION; desc.y = 0; desc.z = 0; desc.w = 0;
hipResourceDesc resDesc;
memset(&resDesc, 0, sizeof(resDesc));
resDesc.resType = hipResourceTypeLinear;
resDesc.res.linear.devPtr = norm;
resDesc.res.linear.desc = desc;
resDesc.res.linear.sizeInBytes = norm_bytes;
hipTextureDesc texDesc;
memset(&texDesc, 0, sizeof(texDesc));
texDesc.readMode = hipReadModeElementType;
hipCreateTextureObject(&texNorm, &resDesc, &texDesc, NULL);
checkCudaError();
}
texInit = true;
}
}
void cudaColorSpinorField::destroyTexObject() {
if (isNative() && texInit) {
hipDestroyTextureObject(tex);
if (precision == QUDA_HALF_PRECISION) hipDestroyTextureObject(texNorm);
texInit = false;
checkCudaError();
}
}
#endif
void cudaColorSpinorField::destroy() {
if (alloc) {
device_free(v);
if (precision == QUDA_HALF_PRECISION) device_free(norm);
if (siteSubset != QUDA_FULL_SITE_SUBSET) {
//! for deflated solvers:
if (eigv_dim > 0)
{
std::vector<ColorSpinorField*>::iterator vec;
for(vec = eigenvectors.begin(); vec != eigenvectors.end(); vec++) delete *vec;
}
}
alloc = false;
}
if (siteSubset == QUDA_FULL_SITE_SUBSET) {
delete even;
delete odd;
}
#ifdef USE_TEXTURE_OBJECTS
if((eigv_dim == 0) || (eigv_dim > 0 && eigv_id > -1))
destroyTexObject();
#endif
}
// cuda's floating point format, IEEE-754, represents the floating point
// zero as 4 zero bytes
void cudaColorSpinorField::zero() {
hipMemsetAsync(v, 0, bytes, streams[Nstream-1]);
if (precision == QUDA_HALF_PRECISION) hipMemsetAsync(norm, 0, norm_bytes, streams[Nstream-1]);
}
void cudaColorSpinorField::zeroPad() {
size_t pad_bytes = (stride - volume) * precision * fieldOrder;
int Npad = nColor * nSpin * 2 / fieldOrder;
if (eigv_dim > 0 && eigv_id == -1){//we consider the whole eigenvector set:
Npad *= eigv_dim;
pad_bytes /= eigv_dim;
}
size_t pitch = ((eigv_dim == 0 || eigv_id != -1) ? stride : eigv_stride)*fieldOrder*precision;
char *dst = (char*)v + ((eigv_dim == 0 || eigv_id != -1) ? volume : eigv_volume)*fieldOrder*precision;
if(pad_bytes) hipMemset2D(dst, pitch, 0, pad_bytes, Npad);
//for (int i=0; i<Npad; i++) {
// if (pad_bytes) hipMemset((char*)v + (volume + i*stride)*fieldOrder*precision, 0, pad_bytes);
//}
}
void cudaColorSpinorField::copy(const cudaColorSpinorField &src) {
checkField(*this, src);
if (this->GammaBasis() != src.GammaBasis()) errorQuda("cannot call this copy with different basis");
blas::copy(*this, src);
}
void cudaColorSpinorField::copySpinorField(const ColorSpinorField &src) {
// src is on the device and is native
if (typeid(src) == typeid(cudaColorSpinorField) &&
isNative() && dynamic_cast<const cudaColorSpinorField &>(src).isNative() &&
this->GammaBasis() == src.GammaBasis()) {
copy(dynamic_cast<const cudaColorSpinorField&>(src));
} else if (typeid(src) == typeid(cudaColorSpinorField)) {
copyGenericColorSpinor(*this, src, QUDA_CUDA_FIELD_LOCATION);
} else if (typeid(src) == typeid(cpuColorSpinorField)) { // src is on the host
loadSpinorField(src);
} else {
errorQuda("Unknown input ColorSpinorField %s", typeid(src).name());
}
}
void cudaColorSpinorField::loadSpinorField(const ColorSpinorField &src) {
if (REORDER_LOCATION == QUDA_CPU_FIELD_LOCATION &&
typeid(src) == typeid(cpuColorSpinorField)) {
for(int b=0; b<2; ++b){
resizeBufferPinned(bytes + norm_bytes, b);
memset(bufferPinned[b], 0, bytes+norm_bytes); // FIXME (temporary?) bug fix for padding
}
copyGenericColorSpinor(*this, src, QUDA_CPU_FIELD_LOCATION,
bufferPinned[bufferIndex], 0, (char*)bufferPinned[bufferIndex]+bytes, 0);
hipMemcpy(v, bufferPinned[bufferIndex], bytes, hipMemcpyHostToDevice);
hipMemcpy(norm, (char*)bufferPinned[bufferIndex]+bytes, norm_bytes, hipMemcpyHostToDevice);
} else if (typeid(src) == typeid(cudaColorSpinorField)) {
copyGenericColorSpinor(*this, src, QUDA_CUDA_FIELD_LOCATION);
} else {
void *Src, *srcNorm;
if (!zeroCopy) {
resizeBufferDevice(src.Bytes()+src.NormBytes());
Src = bufferDevice;
srcNorm = (char*)bufferDevice + src.Bytes();
hipMemcpy(Src, src.V(), src.Bytes(), hipMemcpyHostToDevice);
hipMemcpy(srcNorm, src.Norm(), src.NormBytes(), hipMemcpyHostToDevice);
} else {
for(int b=0; b<2; ++b){
resizeBufferPinned(src.Bytes()+src.NormBytes(), b);
}
memcpy(bufferPinned[bufferIndex], src.V(), src.Bytes());
memcpy((char*)bufferPinned[bufferIndex]+src.Bytes(), src.Norm(), src.NormBytes());
hipHostGetDevicePointer(&Src, bufferPinned[bufferIndex], 0);
srcNorm = (void*)((char*)Src + src.Bytes());
}
hipMemset(v, 0, bytes); // FIXME (temporary?) bug fix for padding
copyGenericColorSpinor(*this, src, QUDA_CUDA_FIELD_LOCATION, 0, Src, 0, srcNorm);
}
checkCudaError();
return;
}
void cudaColorSpinorField::saveSpinorField(ColorSpinorField &dest) const {
if (REORDER_LOCATION == QUDA_CPU_FIELD_LOCATION &&
typeid(dest) == typeid(cpuColorSpinorField)) {
for(int b=0; b<2; ++b) resizeBufferPinned(bytes+norm_bytes,b);
hipMemcpy(bufferPinned[bufferIndex], v, bytes, hipMemcpyDeviceToHost);
hipMemcpy((char*)bufferPinned[bufferIndex]+bytes, norm, norm_bytes, hipMemcpyDeviceToHost);
copyGenericColorSpinor(dest, *this, QUDA_CPU_FIELD_LOCATION,
0, bufferPinned[bufferIndex], 0, (char*)bufferPinned[bufferIndex]+bytes);
} else if (typeid(dest) == typeid(cudaColorSpinorField)) {
copyGenericColorSpinor(dest, *this, QUDA_CUDA_FIELD_LOCATION);
} else {
void *dst, *dstNorm;
if (!zeroCopy) {
resizeBufferDevice(dest.Bytes()+dest.NormBytes());
dst = bufferDevice;
dstNorm = (char*)bufferDevice+dest.Bytes();
} else {
for(int b=0; b<2; ++b) resizeBufferPinned(dest.Bytes()+dest.NormBytes(),b);
hipHostGetDevicePointer(&dst, bufferPinned[bufferIndex], 0);
dstNorm = (char*)dst+dest.Bytes();
}
copyGenericColorSpinor(dest, *this, QUDA_CUDA_FIELD_LOCATION, dst, v, dstNorm, 0);
if (!zeroCopy) {
hipMemcpy(dest.V(), dst, dest.Bytes(), hipMemcpyDeviceToHost);
hipMemcpy(dest.Norm(), dstNorm, dest.NormBytes(), hipMemcpyDeviceToHost);
} else {
memcpy(dest.V(), bufferPinned[bufferIndex], dest.Bytes());
memcpy(dest.Norm(), (char*)bufferPinned[bufferIndex]+dest.Bytes(), dest.NormBytes());
}
}
checkCudaError();
return;
}
void cudaColorSpinorField::allocateGhostBuffer(int nFace) {
int Nint = nColor * nSpin * 2; // number of internal degrees of freedom
if (nSpin == 4) Nint /= 2; // spin projection for Wilson
// compute size of buffer required
size_t faceBytes = 0;
for (int i=0; i<4; i++) {
if(!commDimPartitioned(i)) continue;
faceBytes += 2*nFace*ghostFace[i]*Nint*precision;
// add extra space for the norms for half precision
if (precision == QUDA_HALF_PRECISION) faceBytes += 2*nFace*ghostFace[i]*sizeof(float);
}
// only allocate if not already allocated or buffer required is bigger than previously
if(initGhostFaceBuffer == 0 || faceBytes > ghostFaceBytes) {
if (initGhostFaceBuffer) {
for(int b=0; b<2; ++b) device_free(ghostFaceBuffer[b]);
}
if (faceBytes > 0) {
for(int b=0; b<2; ++b) ghostFaceBuffer[b] = device_malloc(faceBytes);
initGhostFaceBuffer = 1;
ghostFaceBytes = faceBytes;
}
}
size_t offset = 0;
for (int i=0; i<4; i++) {
if(!commDimPartitioned(i)) continue;
for(int b=0; b<2; ++b) backGhostFaceBuffer[b][i] = (void*)(((char*)ghostFaceBuffer[b]) + offset);
offset += nFace*ghostFace[i]*Nint*precision;
if (precision == QUDA_HALF_PRECISION) offset += nFace*ghostFace[i]*sizeof(float);
for(int b=0; b<2; ++b) fwdGhostFaceBuffer[b][i] = (void*)(((char*)ghostFaceBuffer[b]) + offset);
offset += nFace*ghostFace[i]*Nint*precision;
if (precision == QUDA_HALF_PRECISION) offset += nFace*ghostFace[i]*sizeof(float);
}
}
void cudaColorSpinorField::allocateGhostBuffer(void *send_buf[], void *recv_buf[]) const
{
int num_faces = 1;
if(nSpin == 1) num_faces = 3; // staggered
int spinor_size = 2*nSpin*nColor*precision;
// resize face only if requested size is larger than previously allocated one
size_t faceBytes = 0;
for (int i=0; i<nDimComms; i++) {
faceBytes += 2*siteSubset*num_faces*surfaceCB[i]*spinor_size;
}
if (!initGhostFaceBuffer || faceBytes > ghostFaceBytes) {
if (initGhostFaceBuffer) {
for (int b=0; b<2; ++b) device_free(ghostFaceBuffer[b]);
}
if (faceBytes > 0) {
for (int b=0; b<2; ++b) ghostFaceBuffer[b] = device_malloc(faceBytes);
initGhostFaceBuffer = 1;
ghostFaceBytes = faceBytes;
}
}
size_t offset = 0;
for (int i=0; i<nDimComms; i++) {
// use first buffer for recv and second for send
recv_buf[2*i+0] = static_cast<void*>((static_cast<char*>(ghostFaceBuffer[0]) + offset));
send_buf[2*i+0] = static_cast<void*>((static_cast<char*>(ghostFaceBuffer[1]) + offset));
offset += siteSubset*num_faces*surfaceCB[i]*spinor_size;
recv_buf[2*i+1] = static_cast<void*>((static_cast<char*>(ghostFaceBuffer[0]) + offset));
send_buf[2*i+1] = static_cast<void*>((static_cast<char*>(ghostFaceBuffer[1]) + offset));
offset += siteSubset*num_faces*surfaceCB[i]*spinor_size;
}
}
void cudaColorSpinorField::freeGhostBuffer(void)
{
if (!initGhostFaceBuffer) return;
for(int b=0; b<2; ++b) device_free(ghostFaceBuffer[b]);
for(int i=0;i < 4; i++){
if(!commDimPartitioned(i)) continue;
for(int b=0; b<2; ++b){
backGhostFaceBuffer[b][i] = NULL;
fwdGhostFaceBuffer[b][i] = NULL;
}
}
initGhostFaceBuffer = 0;
}
// pack the ghost zone into a contiguous buffer for communications
void cudaColorSpinorField::packGhost(const int nFace, const QudaParity parity,
const int dim, const QudaDirection dir,
const int dagger, hipStream_t *stream,
void *buffer, double a, double b)
{
#ifdef MULTI_GPU
int face_num;
if(dir == QUDA_BACKWARDS){
face_num = 0;
}else if(dir == QUDA_FORWARDS){
face_num = 1;
}else{
face_num = 2;
}
void *packBuffer = buffer ? buffer : ghostFaceBuffer[bufferIndex];
packFace(packBuffer, *this, nFace, dagger, parity, dim, face_num, *stream, a, b);
#else
errorQuda("packGhost not built on single-GPU build");
#endif
}
// send the ghost zone to the host
void cudaColorSpinorField::sendGhost(void *ghost_spinor, const int nFace, const int dim,
const QudaDirection dir, const int dagger,
hipStream_t *stream) {
#ifdef MULTI_GPU
int Nvec = (nSpin == 1 || precision == QUDA_DOUBLE_PRECISION) ? 2 : 4;
int Nint = (nColor * nSpin * 2) / (nSpin == 4 ? 2 : 1); // (spin proj.) degrees of freedom
if (dim !=3 || getKernelPackT() || getTwistPack()) { // use kernels to pack into contiguous buffers then a single hipMemcpy
size_t bytes = nFace*Nint*ghostFace[dim]*precision;
if (precision == QUDA_HALF_PRECISION) bytes += nFace*ghostFace[dim]*sizeof(float);
void* gpu_buf =
(dir == QUDA_BACKWARDS) ? this->backGhostFaceBuffer[bufferIndex][dim] : this->fwdGhostFaceBuffer[bufferIndex][dim];
hipMemcpyAsync(ghost_spinor, gpu_buf, bytes, hipMemcpyDeviceToHost, *stream);
} else if(this->TwistFlavor() != QUDA_TWIST_NONDEG_DOUBLET){ // do multiple cudaMemcpys
int Npad = Nint / Nvec; // number Nvec buffers we have
int Nt_minus1_offset = (volume - nFace*ghostFace[3]); // N_t -1 = Vh-Vsh
int offset = 0;
if (nSpin == 1) {
offset = (dir == QUDA_BACKWARDS) ? 0 : Nt_minus1_offset;
} else if (nSpin == 4) {
// !dagger: send lower components backwards, send upper components forwards
// dagger: send upper components backwards, send lower components forwards
bool upper = dagger ? true : false; // Fwd is !Back
if (dir == QUDA_FORWARDS) upper = !upper;
int lower_spin_offset = Npad*stride;
if (upper) offset = (dir == QUDA_BACKWARDS ? 0 : Nt_minus1_offset);
else offset = lower_spin_offset + (dir == QUDA_BACKWARDS ? 0 : Nt_minus1_offset);
}
// QUDA Memcpy NPad's worth.
// -- Dest will point to the right beginning PAD.
// -- Each Pad has size Nvec*Vsh Floats.
// -- There is Nvec*Stride Floats from the start of one PAD to the start of the next
void *dst = (char*)ghost_spinor;
void *src = (char*)v + offset*Nvec*precision;
size_t len = nFace*ghostFace[3]*Nvec*precision;
size_t spitch = stride*Nvec*precision;
hipMemcpy2DAsync(dst, len, src, spitch, len, Npad, hipMemcpyDeviceToHost, *stream);
if (precision == QUDA_HALF_PRECISION) {
int norm_offset = (dir == QUDA_BACKWARDS) ? 0 : Nt_minus1_offset*sizeof(float);
void *dst = (char*)ghost_spinor + nFace*Nint*ghostFace[3]*precision;
void *src = (char*)norm + norm_offset;
hipMemcpyAsync(dst, src, nFace*ghostFace[3]*sizeof(float), hipMemcpyDeviceToHost, *stream);
}
}else{
int flavorVolume = volume / 2;
int flavorTFace = ghostFace[3] / 2;
int Npad = Nint / Nvec; // number Nvec buffers we have
int flavor1_Nt_minus1_offset = (flavorVolume - flavorTFace);
int flavor2_Nt_minus1_offset = (volume - flavorTFace);
int flavor1_offset = 0;
int flavor2_offset = 0;
// !dagger: send lower components backwards, send upper components forwards
// dagger: send upper components backwards, send lower components forwards
bool upper = dagger ? true : false; // Fwd is !Back
if (dir == QUDA_FORWARDS) upper = !upper;
int lower_spin_offset = Npad*stride;//ndeg tm: stride=2*flavor_volume+pad
if (upper){
flavor1_offset = (dir == QUDA_BACKWARDS ? 0 : flavor1_Nt_minus1_offset);
flavor2_offset = (dir == QUDA_BACKWARDS ? flavorVolume : flavor2_Nt_minus1_offset);
}else{
flavor1_offset = lower_spin_offset + (dir == QUDA_BACKWARDS ? 0 : flavor1_Nt_minus1_offset);
flavor2_offset = lower_spin_offset + (dir == QUDA_BACKWARDS ? flavorVolume : flavor2_Nt_minus1_offset);
}
// QUDA Memcpy NPad's worth.
// -- Dest will point to the right beginning PAD.
// -- Each Pad has size Nvec*Vsh Floats.
// -- There is Nvec*Stride Floats from the start of one PAD to the start of the next
void *dst = (char*)ghost_spinor;
void *src = (char*)v + flavor1_offset*Nvec*precision;
size_t len = flavorTFace*Nvec*precision;
size_t spitch = stride*Nvec*precision;//ndeg tm: stride=2*flavor_volume+pad
size_t dpitch = 2*len;
hipMemcpy2DAsync(dst, dpitch, src, spitch, len, Npad, hipMemcpyDeviceToHost, *stream);
dst = (char*)ghost_spinor+len;
src = (char*)v + flavor2_offset*Nvec*precision;
hipMemcpy2DAsync(dst, dpitch, src, spitch, len, Npad, hipMemcpyDeviceToHost, *stream);
if (precision == QUDA_HALF_PRECISION) {
int Nt_minus1_offset = (flavorVolume - flavorTFace);
int norm_offset = (dir == QUDA_BACKWARDS) ? 0 : Nt_minus1_offset*sizeof(float);
void *dst = (char*)ghost_spinor + Nint*ghostFace[3]*precision;
void *src = (char*)norm + norm_offset;
size_t dpitch = flavorTFace*sizeof(float);
size_t spitch = flavorVolume*sizeof(float);
hipMemcpy2DAsync(dst, dpitch, src, spitch, flavorTFace*sizeof(float), 2, hipMemcpyDeviceToHost, *stream);
}
}
#else
errorQuda("sendGhost not built on single-GPU build");
#endif
}
void cudaColorSpinorField::unpackGhost(const void* ghost_spinor, const int nFace,
const int dim, const QudaDirection dir,
const int dagger, hipStream_t* stream)
{
int Nint = (nColor * nSpin * 2) / (nSpin == 4 ? 2 : 1); // (spin proj.) degrees of freedom
int len = nFace*ghostFace[dim]*Nint;
int offset = length + ghostOffset[dim]*nColor*nSpin*2;
offset += (dir == QUDA_BACKWARDS) ? 0 : len;
void *dst = (char*)v + precision*offset;
const void *src = ghost_spinor;
hipMemcpyAsync(dst, src, len*precision, hipMemcpyHostToDevice, *stream);
if (precision == QUDA_HALF_PRECISION) {
// norm region of host ghost zone is at the end of the ghost_spinor
int normlen = nFace*ghostFace[dim];
int norm_offset = stride + ghostNormOffset[dim];
norm_offset += (dir == QUDA_BACKWARDS) ? 0 : normlen;
void *dst = static_cast<char*>(norm) + norm_offset*sizeof(float);
const void *src = static_cast<const char*>(ghost_spinor)+nFace*Nint*ghostFace[dim]*precision;
hipMemcpyAsync(dst, src, normlen*sizeof(float), hipMemcpyHostToDevice, *stream);
}
}
// pack the ghost zone into a contiguous buffer for communications
void cudaColorSpinorField::packGhostExtended(const int nFace, const int R[], const QudaParity parity,
const int dim, const QudaDirection dir,
const int dagger, hipStream_t *stream,
void *buffer)
{
#ifdef MULTI_GPU
int face_num;
if(dir == QUDA_BACKWARDS){
face_num = 0;
}else if(dir == QUDA_FORWARDS){
face_num = 1;
}else{
face_num = 2;
}
void *packBuffer = buffer ? buffer : ghostFaceBuffer[bufferIndex];
packFaceExtended(packBuffer, *this, nFace, R, dagger, parity, dim, face_num, *stream);
#else
errorQuda("packGhostExtended not built on single-GPU build");
#endif
}
// copy data from host buffer into boundary region of device field
void cudaColorSpinorField::unpackGhostExtended(const void* ghost_spinor, const int nFace, const QudaParity parity,
const int dim, const QudaDirection dir,
const int dagger, hipStream_t* stream)
{
// First call the regular unpackGhost routine to copy data into the `usual' ghost-zone region
// of the data array
unpackGhost(ghost_spinor, nFace, dim, dir, dagger, stream);
// Next step is to copy data from the ghost zone back to the interior region
int Nint = (nColor * nSpin * 2) / (nSpin == 4 ? 2 : 1); // (spin proj.) degrees of freedom
int len = nFace*ghostFace[dim]*Nint;
int offset = length + ghostOffset[dim]*nColor*nSpin*2;
offset += (dir == QUDA_BACKWARDS) ? 0 : len;
#ifdef MULTI_GPU
const int face_num = 2;
const bool unpack = true;
const int R[4] = {0,0,0,0};
packFaceExtended(ghostFaceBuffer[bufferIndex], *this, nFace, R, dagger, parity, dim, face_num, *stream, unpack);
#else
errorQuda("unpackGhostExtended not built on single-GPU build");
#endif
}
hipStream_t *stream;
void cudaColorSpinorField::createComms(int nFace) {
if(bufferMessageHandler != bufferPinnedResizeCount) destroyComms();
if (!initComms || nFaceComms != nFace) {
// if we are requesting a new number of faces destroy and start over
if(nFace != nFaceComms) destroyComms();
if (siteSubset != QUDA_PARITY_SITE_SUBSET)
errorQuda("Only supports single parity fields");
#ifdef GPU_COMMS
bool comms = false;
for (int i=0; i<nDimComms; i++) if (commDimPartitioned(i)) comms = true;
#endif
if (nFace > maxNface)
errorQuda("Requested number of faces %d in communicator is greater than supported %d",
nFace, maxNface);
// faceBytes is the sum of all face sizes
size_t faceBytes = 0;
// nbytes is the size in bytes of each face
size_t nbytes[QUDA_MAX_DIM];
// The number of degrees of freedom per site for the given
// field. Currently assumes spin projection of a Wilson-like
// field (so half the number of degrees of freedom).
int Ndof = (2 * nSpin * nColor) / (nSpin==4 ? 2 : 1);
for (int i=0; i<nDimComms; i++) {
nbytes[i] = maxNface*surfaceCB[i]*Ndof*precision;
if (precision == QUDA_HALF_PRECISION) nbytes[i] += maxNface*surfaceCB[i]*sizeof(float);
if (!commDimPartitioned(i)) continue;
faceBytes += 2*nbytes[i];
}
#ifndef GPU_COMMS
// use static pinned memory for face buffers
for(int b=0; b<2; ++b){
resizeBufferPinned(2*faceBytes, b); // oversizes for GPU_COMMS case
my_face[b] = bufferPinned[b];
from_face[b] = static_cast<char*>(bufferPinned[b]) + faceBytes;
}
// assign pointers for each face - it's ok to alias for different Nface parameters
size_t offset = 0;
#endif
for (int i=0; i<nDimComms; i++) {
if (!commDimPartitioned(i)) continue;
#ifdef GPU_COMMS
for(int b=0; b<2; ++b){
my_back_face[b][i] = backGhostFaceBuffer[b][i];
from_back_face[b][i] = ghost[i];
if(precision == QUDA_HALF_PRECISION){
my_back_norm_face[b][i] = static_cast<char*>(backGhostFaceBuffer[b][i]) + nFace*ghostFace[i]*Ndof*precision;
from_back_norm_face[b][i] = ghostNorm[i];
}
} // loop over b
#else
for(int b=0; b<2; ++b){
my_back_face[b][i] = static_cast<char*>(my_face[b]) + offset;
from_back_face[b][i] = static_cast<char*>(from_face[b]) + offset;
}
offset += nbytes[i];
#endif
#ifdef GPU_COMMS
for(int b=0; b<2; ++b){
my_fwd_face[b][i] = fwdGhostFaceBuffer[b][i];
from_fwd_face[b][i] = ghost[i] + nFace*ghostFace[i]*Ndof*precision;
if(precision == QUDA_HALF_PRECISION){
my_fwd_norm_face[b][i] = static_cast<char*>(fwdGhostFaceBuffer[b][i]) + nFace*ghostFace[i]*Ndof*precision;
from_fwd_norm_face[b][i] = static_cast<char*>(ghostNorm[i]) + nFace*ghostFace[i]*sizeof(float);
}
} // loop over b
#else
for(int b=0; b<2; ++b){
my_fwd_face[b][i] = static_cast<char*>(my_face[b]) + offset;
from_fwd_face[b][i] = static_cast<char*>(from_face[b]) + offset;
}
offset += nbytes[i];
#endif
}
// create a different message handler for each direction and Nface
for(int b=0; b<2; ++b){
mh_send_fwd[b] = new MsgHandle**[maxNface];
mh_send_back[b] = new MsgHandle**[maxNface];
mh_recv_fwd[b] = new MsgHandle**[maxNface];
mh_recv_back[b] = new MsgHandle**[maxNface];
#ifdef GPU_COMMS
if(precision == QUDA_HALF_PRECISION){
mh_send_norm_fwd[b] = new MsgHandle**[maxNface];
mh_send_norm_back[b] = new MsgHandle**[maxNface];
mh_recv_norm_fwd[b] = new MsgHandle**[maxNface];
mh_recv_norm_back[b] = new MsgHandle**[maxNface];
}
#endif
} // loop over b
for (int j=0; j<maxNface; j++) {
for(int b=0; b<2; ++b){
mh_send_fwd[b][j] = new MsgHandle*[2*nDimComms];
mh_send_back[b][j] = new MsgHandle*[2*nDimComms];
mh_recv_fwd[b][j] = new MsgHandle*[nDimComms];
mh_recv_back[b][j] = new MsgHandle*[nDimComms];
#ifdef GPU_COMMS
if(precision == QUDA_HALF_PRECISION){
mh_send_norm_fwd[b][j] = new MsgHandle*[2*nDimComms];
mh_send_norm_back[b][j] = new MsgHandle*[2*nDimComms];
mh_recv_norm_fwd[b][j] = new MsgHandle*[nDimComms];
mh_recv_norm_back[b][j] = new MsgHandle*[nDimComms];
}
#endif
} // loop over b
for (int i=0; i<nDimComms; i++) {
if (!commDimPartitioned(i)) continue;
#ifdef GPU_COMMS
size_t nbytes_Nface = surfaceCB[i]*Ndof*precision*(j+1);
size_t nbytes_Nface_norm = surfaceCB[i]*(j+1)*sizeof(float);
if (i != 3 || getKernelPackT() || getTwistPack()) {
#else
size_t nbytes_Nface = (nbytes[i] / maxNface) * (j+1);
#endif
for(int b=0; b<2; ++b){
mh_send_fwd[b][j][2*i+0] = (j+1 == nFace) ? comm_declare_send_relative(my_fwd_face[b][i], i, +1, nbytes_Nface) : NULL;
mh_send_back[b][j][2*i+0] = (j+1 == nFace) ? comm_declare_send_relative(my_back_face[b][i], i, -1, nbytes_Nface) : NULL;
mh_send_fwd[b][j][2*i+1] = mh_send_fwd[b][j][2*i]; // alias pointers
mh_send_back[b][j][2*i+1] = mh_send_back[b][j][2*i]; // alias pointers
}
#ifdef GPU_COMMS
if(precision == QUDA_HALF_PRECISION){
for(int b=0; b<2; ++b){
mh_send_norm_fwd[b][j][2*i+0] = (j+1 == nFace) ? comm_declare_send_relative(my_fwd_norm_face[b][i], i, +1, nbytes_Nface_norm) : NULL;
mh_send_norm_back[b][j][2*i+0] = (j+1 == nFace) ? comm_declare_send_relative(my_back_norm_face[b][i], i, -1, nbytes_Nface_norm) : NULL;
mh_send_norm_fwd[b][j][2*i+1] = mh_send_norm_fwd[b][j][2*i];
mh_send_norm_back[b][j][2*i+1] = mh_send_norm_back[b][j][2*i];
}
}
} else if (this->TwistFlavor() == QUDA_TWIST_NONDEG_DOUBLET) {
errorQuda("GPU_COMMS for non-degenerate doublet only supported with time-dimension kernel packing enabled.");
} else {
/*
use a strided communicator, here we can't really use
the previously declared my_fwd_face and my_back_face
pointers since they don't really map 1-to-1 so let's
just compute the required base pointers and pass these
directly into the communicator construction
*/
int Nblocks = Ndof / Nvec(); // number of Nvec buffers we have
// start of last time slice chunk we are sending forwards
int endOffset = (volume - (j+1)*ghostFace[i]);
size_t offset[4];
void *base[4];
if (nSpin == 1) { // staggered is invariant with dagger
offset[2*0 + 0] = 0;
offset[2*1 + 0] = endOffset;
offset[2*0 + 1] = offset[2*0 + 0];
offset[2*1 + 1] = offset[2*1 + 0];
} else if (nSpin == 4) {
// !dagger: send last components backwards, send first components forwards
offset[2*0 + 0] = Nblocks*stride;
offset[2*1 + 0] = endOffset;
// dagger: send first components backwards, send last components forwards
offset[2*0 + 1] = 0;
offset[2*1 + 1] = Nblocks*stride + endOffset;
} else {
errorQuda("Unsupported number of spin components");
}
for (int k=0; k<4; k++) {
base[k] = static_cast<char*>(v) + offset[k]*Nvec()*precision; // total offset in bytes
}
size_t blksize = (j+1)*ghostFace[i]*Nvec()*precision; // (j+1) is number of faces
size_t Stride = stride*Nvec()*precision;
if (blksize * Nblocks != nbytes_Nface)
errorQuda("Total strided message size does not match expected size");
//printf("%d strided sends with Nface=%d Nblocks=%d blksize=%d Stride=%d\n", i, j+1, Nblocks, blksize, Stride);
for(int b=0; b<2; ++b){
// only allocate a communicator for the present face (this needs cleaned up)
mh_send_fwd[b][j][2*i+0] = (j+1 == nFace) ? comm_declare_strided_send_relative(base[2], i, +1, blksize, Nblocks, Stride) : NULL;
mh_send_back[b][j][2*i+0] = (j+1 == nFace) ? comm_declare_strided_send_relative(base[0], i, -1, blksize, Nblocks, Stride) : NULL;
if (nSpin ==4) { // dagger communicators
mh_send_fwd[b][j][2*i+1] = (j+1 == nFace) ? comm_declare_strided_send_relative(base[3], i, +1, blksize, Nblocks, Stride) : NULL;
mh_send_back[b][j][2*i+1] = (j+1 == nFace) ? comm_declare_strided_send_relative(base[1], i, -1, blksize, Nblocks, Stride) : NULL;
} else {
mh_send_fwd[b][j][2*i+1] = mh_send_fwd[b][j][2*i+0];
mh_send_back[b][j][2*i+1] = mh_send_back[b][j][2*i+0];
}
} // loop over b
if(precision == QUDA_HALF_PRECISION){
int Nt_minus1_offset = (volume - nFace*ghostFace[3]); // The space-time coordinate of the start of the last time slice
void *norm_fwd = static_cast<float*>(norm) + Nt_minus1_offset;
void *norm_back = norm; // the first time slice has zero offset
for(int b=0; b<2; ++b){
mh_send_norm_fwd[b][j][2*i+0] = (j+1 == nFace) ? comm_declare_send_relative(norm_fwd, i, +1, surfaceCB[i]*(j+1)*sizeof(float)) : NULL;
mh_send_norm_back[b][j][2*i+0] = (j+1 == nFace) ? comm_declare_send_relative(norm_back, i, -1, surfaceCB[i]*(j+1)*sizeof(float)) : NULL;
mh_send_norm_fwd[b][j][2*i+1] = mh_send_norm_fwd[b][j][2*i];
mh_send_norm_back[b][j][2*i+1] = mh_send_norm_back[b][j][2*i];
}
}
}
if(precision == QUDA_HALF_PRECISION){
for(int b=0; b<2; ++b){
mh_recv_norm_fwd[b][j][i] = (j+1 == nFace) ? comm_declare_receive_relative(from_fwd_norm_face[b][i], i, +1, nbytes_Nface_norm) : NULL;
mh_recv_norm_back[b][j][i] = (j+1 == nFace) ? comm_declare_receive_relative(from_back_norm_face[b][i], i, -1, nbytes_Nface_norm) : NULL;
}
}
#endif // GPU_COMMS
for(int b=0; b<2; ++b){
mh_recv_fwd[b][j][i] = (j+1 == nFace) ? comm_declare_receive_relative(from_fwd_face[b][i], i, +1, nbytes_Nface) : NULL;
mh_recv_back[b][j][i] = (j+1 == nFace) ? comm_declare_receive_relative(from_back_face[b][i], i, -1, nbytes_Nface) : NULL;
}
} // loop over dimension
}
bufferMessageHandler = bufferPinnedResizeCount;
initComms = true;
nFaceComms = nFace;
}
checkCudaError();
}
void cudaColorSpinorField::destroyComms() {
if (initComms) {
for(int b=0; b<2; ++b){
for (int j=0; j<maxNface; j++) {
for (int i=0; i<nDimComms; i++) {
if (commDimPartitioned(i)) {
if (mh_recv_fwd[b][j][i]) comm_free(mh_recv_fwd[b][j][i]);
if (mh_recv_fwd[b][j][i]) comm_free(mh_recv_back[b][j][i]);
if (mh_send_fwd[b][j][2*i]) comm_free(mh_send_fwd[b][j][2*i]);
if (mh_send_back[b][j][2*i]) comm_free(mh_send_back[b][j][2*i]);
// only in a special case are these not aliasing pointers
#ifdef GPU_COMMS
if(precision == QUDA_HALF_PRECISION){
if (mh_recv_norm_fwd[b][j][i]) comm_free(mh_recv_norm_fwd[b][j][i]);
if (mh_recv_norm_back[b][j][i]) comm_free(mh_recv_norm_back[b][j][i]);
if (mh_send_norm_fwd[b][j][2*i]) comm_free(mh_send_norm_fwd[b][j][2*i]);
if (mh_send_norm_back[b][j][2*i]) comm_free(mh_send_norm_back[b][j][2*i]);
}
if (i == 3 && !getKernelPackT() && nSpin == 4) {
if (mh_send_fwd[b][j][2*i+1]) comm_free(mh_send_fwd[b][j][2*i+1]);
if (mh_send_back[b][j][2*i+1]) comm_free(mh_send_back[b][j][2*i+1]);
}
#endif // GPU_COMMS
}
}
delete []mh_recv_fwd[b][j];
delete []mh_recv_back[b][j];
delete []mh_send_fwd[b][j];
delete []mh_send_back[b][j];
#ifdef GPU_COMMS
if(precision == QUDA_HALF_PRECISION){
delete []mh_recv_norm_fwd[b][j];
delete []mh_recv_norm_back[b][j];
delete []mh_send_norm_fwd[b][j];
delete []mh_send_norm_back[b][j];
}
#endif
}
delete []mh_recv_fwd[b];
delete []mh_recv_back[b];
delete []mh_send_fwd[b];
delete []mh_send_back[b];
for (int i=0; i<nDimComms; i++) {
my_fwd_face[b][i] = NULL;
my_back_face[b][i] = NULL;
from_fwd_face[b][i] = NULL;
from_back_face[b][i] = NULL;
}
#ifdef GPU_COMMS
if(precision == QUDA_HALF_PRECISION){
delete []mh_recv_norm_fwd[b];
delete []mh_recv_norm_back[b];
delete []mh_send_norm_fwd[b];
delete []mh_send_norm_back[b];
}
for(int i=0; i<nDimComms; i++){
my_fwd_norm_face[b][i] = NULL;
my_back_norm_face[b][i] = NULL;
from_fwd_norm_face[b][i] = NULL;
from_back_norm_face[b][i] = NULL;
}
#endif
} // loop over b
initComms = false;
checkCudaError();
}
}
void cudaColorSpinorField::streamInit(hipStream_t *stream_p){
stream = stream_p;
}
void cudaColorSpinorField::pack(int nFace, int parity, int dagger, hipStream_t *stream_p,
bool zeroCopyPack, double a, double b) {
allocateGhostBuffer(nFace); // allocate the ghost buffer if not yet allocated
createComms(nFace); // must call this first
stream = stream_p;
const int dim=-1; // pack all partitioned dimensions
if (zeroCopyPack) {
void *my_face_d;
hipHostGetDevicePointer(&my_face_d, my_face[bufferIndex], 0); // set the matching device pointer
packGhost(nFace, (QudaParity)parity, dim, QUDA_BOTH_DIRS, dagger, &stream[0], my_face_d, a, b);
} else {
packGhost(nFace, (QudaParity)parity, dim, QUDA_BOTH_DIRS, dagger, &stream[Nstream-1], 0, a, b);
}
}
void cudaColorSpinorField::pack(int nFace, int parity, int dagger, int stream_idx,
bool zeroCopyPack, double a, double b) {
allocateGhostBuffer(nFace); // allocate the ghost buffer if not yet allocated
createComms(nFace); // must call this first
const int dim=-1; // pack all partitioned dimensions
if (zeroCopyPack) {
void *my_face_d;
hipHostGetDevicePointer(&my_face_d, my_face[bufferIndex], 0); // set the matching device pointer
packGhost(nFace, (QudaParity)parity, dim, QUDA_BOTH_DIRS, dagger, &stream[stream_idx], my_face_d, a, b);
} else {
packGhost(nFace, (QudaParity)parity, dim, QUDA_BOTH_DIRS, dagger, &stream[stream_idx], 0, a, b);
}
}
void cudaColorSpinorField::packExtended(const int nFace, const int R[], const int parity,
const int dagger, const int dim,
hipStream_t *stream_p, const bool zeroCopyPack){
allocateGhostBuffer(nFace); // allocate the ghost buffer if not yet allocated
createComms(nFace); // must call this first
stream = stream_p;
void *my_face_d = NULL;
if(zeroCopyPack){
hipHostGetDevicePointer(&my_face_d, my_face[bufferIndex], 0);
packGhostExtended(nFace, R, (QudaParity)parity, dim, QUDA_BOTH_DIRS, dagger, &stream[0], my_face_d);
}else{
packGhostExtended(nFace, R, (QudaParity)parity, dim, QUDA_BOTH_DIRS, dagger, &stream[Nstream-1], my_face_d);
}
}
void cudaColorSpinorField::gather(int nFace, int dagger, int dir, hipStream_t* stream_p)
{
int dim = dir/2;
// If stream_p != 0, use pack_stream, else use the stream array
hipStream_t *pack_stream = (stream_p) ? stream_p : stream+dir;
if(dir%2 == 0){
// backwards copy to host
sendGhost(my_back_face[bufferIndex][dim], nFace, dim, QUDA_BACKWARDS, dagger, pack_stream);
} else {
// forwards copy to host
sendGhost(my_fwd_face[bufferIndex][dim], nFace, dim, QUDA_FORWARDS, dagger, pack_stream);
}
}
void cudaColorSpinorField::recvStart(int nFace, int dir, int dagger) {
int dim = dir/2;
if(!commDimPartitioned(dim)) return;
if (dir%2 == 0) { // sending backwards
// Prepost receive
comm_start(mh_recv_fwd[bufferIndex][nFace-1][dim]);
} else { //sending forwards
// Prepost receive
comm_start(mh_recv_back[bufferIndex][nFace-1][dim]);
}
#ifdef GPU_COMMS
if(precision != QUDA_HALF_PRECISION) return;
if (dir%2 == 0) { // sending backwards
// Prepost receive
comm_start(mh_recv_norm_fwd[bufferIndex][nFace-1][dim]);
} else { //sending forwards
// Prepost receive
comm_start(mh_recv_norm_back[bufferIndex][nFace-1][dim]);
}
#endif
}
void cudaColorSpinorField::sendStart(int nFace, int dir, int dagger) {
int dim = dir / 2;
if(!commDimPartitioned(dim)) return;
if (dir%2 == 0) { // sending backwards
comm_start(mh_send_back[bufferIndex][nFace-1][2*dim+dagger]);
} else { //sending forwards
comm_start(mh_send_fwd[bufferIndex][nFace-1][2*dim+dagger]);
}
#ifdef GPU_COMMS
if(precision != QUDA_HALF_PRECISION) return;
if (dir%2 == 0) { // sending backwards
comm_start(mh_send_norm_back[bufferIndex][nFace-1][2*dim+dagger]);
} else { //sending forwards
comm_start(mh_send_norm_fwd[bufferIndex][nFace-1][2*dim+dagger]);
}
#endif
}
void cudaColorSpinorField::commsStart(int nFace, int dir, int dagger) {
int dim = dir / 2;
if(!commDimPartitioned(dim)) return;
if (dir%2 == 0) { // sending backwards
// Prepost receive
comm_start(mh_recv_fwd[bufferIndex][nFace-1][dim]);
comm_start(mh_send_back[bufferIndex][nFace-1][2*dim+dagger]);
} else { //sending forwards
// Prepost receive
comm_start(mh_recv_back[bufferIndex][nFace-1][dim]);
// Begin forward send
comm_start(mh_send_fwd[bufferIndex][nFace-1][2*dim+dagger]);
}
#ifdef GPU_COMMS
if(precision != QUDA_HALF_PRECISION) return;
if (dir%2 == 0) { // sending backwards
// Prepost receive
comm_start(mh_recv_norm_fwd[bufferIndex][nFace-1][dim]);
comm_start(mh_send_norm_back[bufferIndex][nFace-1][2*dim+dagger]);
} else { //sending forwards
// Prepost receive
comm_start(mh_recv_norm_back[bufferIndex][nFace-1][dim]);
// Begin forward send
comm_start(mh_send_norm_fwd[bufferIndex][nFace-1][2*dim+dagger]);
}
#endif
}
int cudaColorSpinorField::commsQuery(int nFace, int dir, int dagger) {
int dim = dir / 2;
if(!commDimPartitioned(dim)) return 0;
#ifdef GPU_COMMS
if(precision != QUDA_HALF_PRECISION){
#endif
if(dir%2==0) {
if (comm_query(mh_recv_fwd[bufferIndex][nFace-1][dim]) &&
comm_query(mh_send_back[bufferIndex][nFace-1][2*dim+dagger])) return 1;
} else {
if (comm_query(mh_recv_back[bufferIndex][nFace-1][dim]) &&
comm_query(mh_send_fwd[bufferIndex][nFace-1][2*dim+dagger])) return 1;
}
#ifdef GPU_COMMS
}else{ // half precision
if(dir%2==0) {
if (comm_query(mh_recv_fwd[bufferIndex][nFace-1][dim]) &&
comm_query(mh_send_back[bufferIndex][nFace-1][2*dim+dagger]) &&
comm_query(mh_recv_norm_fwd[bufferIndex][nFace-1][dim]) &&
comm_query(mh_send_norm_back[bufferIndex][nFace-1][2*dim+dagger])) return 1;
} else {
if (comm_query(mh_recv_back[bufferIndex][nFace-1][dim]) &&
comm_query(mh_send_fwd[bufferIndex][nFace-1][2*dim+dagger]) &&
comm_query(mh_recv_norm_back[bufferIndex][nFace-1][dim]) &&
comm_query(mh_send_norm_fwd[bufferIndex][nFace-1][2*dim+dagger])) return 1;
}
} // half precision
#endif
return 0;
}
void cudaColorSpinorField::commsWait(int nFace, int dir, int dagger) {
int dim = dir / 2;
if(!commDimPartitioned(dim)) return;
#ifdef GPU_COMMS
if(precision != QUDA_HALF_PRECISION){
#endif
if (dir%2==0) {
comm_wait(mh_recv_fwd[bufferIndex][nFace-1][dim]);
comm_wait(mh_send_back[bufferIndex][nFace-1][2*dim+dagger]);
} else {
comm_wait(mh_recv_back[bufferIndex][nFace-1][dim]);
comm_wait(mh_send_fwd[bufferIndex][nFace-1][2*dim+dagger]);
}
#ifdef GPU_COMMS
} else { // half precision
if (dir%2==0) {
comm_wait(mh_recv_fwd[bufferIndex][nFace-1][dim]);
comm_wait(mh_send_back[bufferIndex][nFace-1][2*dim+dagger]);
comm_wait(mh_recv_norm_fwd[bufferIndex][nFace-1][dim]);
comm_wait(mh_send_norm_back[bufferIndex][nFace-1][2*dim+dagger]);
} else {
comm_wait(mh_recv_back[bufferIndex][nFace-1][dim]);
comm_wait(mh_send_fwd[bufferIndex][nFace-1][2*dim+dagger]);
comm_wait(mh_recv_norm_back[bufferIndex][nFace-1][dim]);
comm_wait(mh_send_norm_fwd[bufferIndex][nFace-1][2*dim+dagger]);
}
} // half precision
#endif
return;
}
void cudaColorSpinorField::scatter(int nFace, int dagger, int dir, hipStream_t* stream_p)
{
int dim = dir/2;
if(!commDimPartitioned(dim)) return;
// both scattering occurances now go through the same stream
if (dir%2==0) {// receive from forwards
unpackGhost(from_fwd_face[bufferIndex][dim], nFace, dim, QUDA_FORWARDS, dagger, stream_p);
} else { // receive from backwards
unpackGhost(from_back_face[bufferIndex][dim], nFace, dim, QUDA_BACKWARDS, dagger, stream_p);
}
}
void cudaColorSpinorField::scatter(int nFace, int dagger, int dir)
{
int dim = dir/2;
if(!commDimPartitioned(dim)) return;
// both scattering occurances now go through the same stream
if (dir%2==0) {// receive from forwards
unpackGhost(from_fwd_face[bufferIndex][dim], nFace, dim, QUDA_FORWARDS, dagger, &stream[2*dim/*+0*/]);
} else { // receive from backwards
unpackGhost(from_back_face[bufferIndex][dim], nFace, dim, QUDA_BACKWARDS, dagger, &stream[2*dim/*+1*/]);
}
}
void cudaColorSpinorField::scatterExtended(int nFace, int parity, int dagger, int dir)
{
int dim = dir/2;
if(!commDimPartitioned(dim)) return;
if (dir%2==0) {// receive from forwards
unpackGhostExtended(from_fwd_face[bufferIndex][dim], nFace, static_cast<QudaParity>(parity), dim, QUDA_FORWARDS, dagger, &stream[2*dim/*+0*/]);
} else { // receive from backwards
unpackGhostExtended(from_back_face[bufferIndex][dim], nFace, static_cast<QudaParity>(parity), dim, QUDA_BACKWARDS, dagger, &stream[2*dim/*+1*/]);
}
}
void cudaColorSpinorField::exchangeGhost(QudaParity parity, int dagger) const {
void **send = static_cast<void**>(safe_malloc(nDimComms * 2 * sizeof(void*)));
// allocate ghost buffer if not yet allocated
allocateGhostBuffer(send, ghost_fixme);
genericPackGhost(send, *this, parity, dagger);
int nFace = (nSpin == 1) ? 3 : 1;
exchange(ghost_fixme, send, nFace);
host_free(send);
}
std::ostream& operator<<(std::ostream &out, const cudaColorSpinorField &a) {
out << (const ColorSpinorField&)a;
out << "v = " << a.v << std::endl;
out << "norm = " << a.norm << std::endl;
out << "alloc = " << a.alloc << std::endl;
out << "init = " << a.init << std::endl;
return out;
}
//! for deflated solvers:
cudaColorSpinorField& cudaColorSpinorField::Eigenvec(const int idx) const {
if (siteSubset == QUDA_PARITY_SITE_SUBSET && this->EigvId() == -1) {
if (idx < this->EigvDim()) {//setup eigenvector form the set
return *(dynamic_cast<cudaColorSpinorField*>(eigenvectors[idx]));
}
else{
errorQuda("Incorrect eigenvector index...");
}
}
errorQuda("Eigenvector must be a parity spinor");
exit(-1);
}
//copyCuda currently cannot not work with set of spinor fields..
void cudaColorSpinorField::CopyEigenvecSubset(cudaColorSpinorField &dst, const int range, const int first_element) const{
#if 0
if(first_element < 0) errorQuda("\nError: trying to set negative first element.\n");
if (siteSubset == QUDA_PARITY_SITE_SUBSET && this->EigvId() == -1) {
if (first_element == 0 && range == this->EigvDim())
{
if(range != dst.EigvDim())errorQuda("\nError: eigenvector range to big.\n");
checkField(dst, *this);
copyCuda(dst, *this);
}
else if ((first_element+range) < this->EigvDim())
{//setup eigenvector subset
cudaColorSpinorField *eigv_subset;
ColorSpinorParam param;
param.nColor = nColor;
param.nSpin = nSpin;
param.twistFlavor = twistFlavor;
param.precision = precision;
param.nDim = nDim;
param.pad = pad;
param.siteSubset = siteSubset;
param.siteOrder = siteOrder;
param.fieldOrder = fieldOrder;
param.gammaBasis = gammaBasis;
memcpy(param.x, x, nDim*sizeof(int));
param.create = QUDA_REFERENCE_FIELD_CREATE;
param.eigv_dim = range;
param.eigv_id = -1;
param.v = (void*)((char*)v + first_element*eigv_bytes);
param.norm = (void*)((char*)norm + first_element*eigv_norm_bytes);
eigv_subset = new cudaColorSpinorField(param);
//Not really needed:
eigv_subset->eigenvectors.reserve(param.eigv_dim);
for(int id = first_element; id < (first_element+range); id++)
{
param.eigv_id = id;
eigv_subset->eigenvectors.push_back(new cudaColorSpinorField(*this, param));
}
checkField(dst, *eigv_subset);
copyCuda(dst, *eigv_subset);
delete eigv_subset;
}
else{
errorQuda("Incorrect eigenvector dimension...");
}
}
else{
errorQuda("Eigenvector must be a parity spinor");
exit(-1);
}
#endif
}
void cudaColorSpinorField::getTexObjectInfo() const
{
#ifdef USE_TEXTURE_OBJECTS
printfQuda("\nPrint texture info for the field:\n");
std::cout << *this;
hipResourceDesc resDesc;
//memset(&resDesc, 0, sizeof(resDesc));
hipGetTextureObjectResourceDesc(&resDesc, this->Tex());
printfQuda("\nDevice pointer: %p\n", resDesc.res.linear.devPtr);
printfQuda("\nVolume (in bytes): %lu\n", resDesc.res.linear.sizeInBytes);
if (resDesc.resType == hipResourceTypeLinear) printfQuda("\nResource type: linear \n");
checkCudaError();
#endif
}
void cudaColorSpinorField::Source(const QudaSourceType sourceType, const int st, const int s, const int c) {
ColorSpinorParam param(*this);
param.fieldOrder = QUDA_SPACE_SPIN_COLOR_FIELD_ORDER;
param.location = QUDA_CPU_FIELD_LOCATION;
param.create = QUDA_NULL_FIELD_CREATE;
cpuColorSpinorField tmp(param);
tmp.Source(sourceType, st, s, c);
*this = tmp;
}
} // namespace quda
| 0c8cd54b8b256c17181ef37a00c9a451b3d86b9d.cu | #include <stdlib.h>
#include <stdio.h>
#include <typeinfo>
#include <color_spinor_field.h>
#include <blas_quda.h>
#include <string.h>
#include <iostream>
#include <misc_helpers.h>
#include <face_quda.h>
#include <dslash_quda.h>
#ifdef DEVICE_PACK
#define REORDER_LOCATION QUDA_CUDA_FIELD_LOCATION
#else
#define REORDER_LOCATION QUDA_CPU_FIELD_LOCATION
#endif
int zeroCopy = 0;
namespace quda {
int cudaColorSpinorField::bufferIndex = 0;
int cudaColorSpinorField::initGhostFaceBuffer = 0;
void* cudaColorSpinorField::ghostFaceBuffer[2]; //gpu memory
void* cudaColorSpinorField::fwdGhostFaceBuffer[2][QUDA_MAX_DIM]; //pointers to ghostFaceBuffer
void* cudaColorSpinorField::backGhostFaceBuffer[2][QUDA_MAX_DIM]; //pointers to ghostFaceBuffer
size_t cudaColorSpinorField::ghostFaceBytes = 0;
cudaColorSpinorField::cudaColorSpinorField(const ColorSpinorParam ¶m) :
ColorSpinorField(param), alloc(false), init(true), texInit(false),
initComms(false), bufferMessageHandler(0), nFaceComms(0) {
// this must come before create
if (param.create == QUDA_REFERENCE_FIELD_CREATE) {
v = param.v;
norm = param.norm;
}
create(param.create);
if (param.create == QUDA_NULL_FIELD_CREATE) {
// do nothing
} else if (param.create == QUDA_ZERO_FIELD_CREATE) {
zero();
} else if (param.create == QUDA_REFERENCE_FIELD_CREATE) {
// dp nothing
} else if (param.create == QUDA_COPY_FIELD_CREATE){
errorQuda("not implemented");
}
checkCudaError();
}
cudaColorSpinorField::cudaColorSpinorField(const cudaColorSpinorField &src) :
ColorSpinorField(src), alloc(false), init(true), texInit(false),
initComms(false), bufferMessageHandler(0), nFaceComms(0) {
create(QUDA_COPY_FIELD_CREATE);
copySpinorField(src);
}
// creates a copy of src, any differences defined in param
cudaColorSpinorField::cudaColorSpinorField(const ColorSpinorField &src,
const ColorSpinorParam ¶m) :
ColorSpinorField(src), alloc(false), init(true), texInit(false),
initComms(false), bufferMessageHandler(0), nFaceComms(0) {
// can only overide if we are not using a reference or parity special case
if (param.create != QUDA_REFERENCE_FIELD_CREATE ||
(param.create == QUDA_REFERENCE_FIELD_CREATE &&
src.SiteSubset() == QUDA_FULL_SITE_SUBSET &&
param.siteSubset == QUDA_PARITY_SITE_SUBSET &&
typeid(src) == typeid(cudaColorSpinorField) ) ||
(param.create == QUDA_REFERENCE_FIELD_CREATE && param.eigv_dim > 0)) {
reset(param);
} else {
errorQuda("Undefined behaviour"); // else silent bug possible?
}
// This must be set before create is called
if (param.create == QUDA_REFERENCE_FIELD_CREATE) {
if (typeid(src) == typeid(cudaColorSpinorField)) {
v = (void*)src.V();
norm = (void*)src.Norm();
} else {
errorQuda("Cannot reference a non-cuda field");
}
if (this->EigvDim() > 0)
{//setup eigenvector form the set
if(eigv_dim != this->EigvDim()) errorQuda("\nEigenvector set does not match..\n") ;//for debug only.
if(eigv_id > -1)
{
//printfQuda("\nSetting pointers for vector id %d\n", eigv_id); //for debug only.
v = (void*)((char*)v + eigv_id*bytes);
norm = (void*)((char*)norm + eigv_id*norm_bytes);
}
//do nothing for the eigenvector subset...
}
}
create(param.create);
if (param.create == QUDA_NULL_FIELD_CREATE) {
// do nothing
} else if (param.create == QUDA_ZERO_FIELD_CREATE) {
zero();
} else if (param.create == QUDA_COPY_FIELD_CREATE) {
copySpinorField(src);
} else if (param.create == QUDA_REFERENCE_FIELD_CREATE) {
// do nothing
} else {
errorQuda("CreateType %d not implemented", param.create);
}
}
cudaColorSpinorField::cudaColorSpinorField(const ColorSpinorField &src)
: ColorSpinorField(src), alloc(false), init(true), texInit(false),
initComms(false), bufferMessageHandler(0), nFaceComms(0) {
create(QUDA_COPY_FIELD_CREATE);
copySpinorField(src);
}
ColorSpinorField& cudaColorSpinorField::operator=(const ColorSpinorField &src) {
if (typeid(src) == typeid(cudaColorSpinorField)) {
*this = (dynamic_cast<const cudaColorSpinorField&>(src));
} else if (typeid(src) == typeid(cpuColorSpinorField)) {
*this = (dynamic_cast<const cpuColorSpinorField&>(src));
} else {
errorQuda("Unknown input ColorSpinorField %s", typeid(src).name());
}
return *this;
}
cudaColorSpinorField& cudaColorSpinorField::operator=(const cudaColorSpinorField &src) {
if (&src != this) {
// keep current attributes unless unset
if (!ColorSpinorField::init) { // note this will turn a reference field into a regular field
destroy();
destroyComms(); // not sure if this necessary
ColorSpinorField::operator=(src);
create(QUDA_COPY_FIELD_CREATE);
}
copySpinorField(src);
}
return *this;
}
cudaColorSpinorField& cudaColorSpinorField::operator=(const cpuColorSpinorField &src) {
// keep current attributes unless unset
if (!ColorSpinorField::init) { // note this will turn a reference field into a regular field
destroy();
ColorSpinorField::operator=(src);
create(QUDA_COPY_FIELD_CREATE);
}
loadSpinorField(src);
return *this;
}
cudaColorSpinorField::~cudaColorSpinorField() {
destroyComms();
destroy();
}
void cudaColorSpinorField::create(const QudaFieldCreate create) {
if (siteSubset == QUDA_FULL_SITE_SUBSET && siteOrder != QUDA_EVEN_ODD_SITE_ORDER) {
errorQuda("Subset not implemented");
}
if (create != QUDA_REFERENCE_FIELD_CREATE) {
v = device_malloc(bytes);
if (precision == QUDA_HALF_PRECISION) {
norm = device_malloc(norm_bytes);
}
alloc = true;
}
if (siteSubset == QUDA_FULL_SITE_SUBSET) {
if(eigv_dim != 0) errorQuda("Eigenvectors must be parity fields!");
// create the associated even and odd subsets
ColorSpinorParam param;
param.siteSubset = QUDA_PARITY_SITE_SUBSET;
param.nDim = nDim;
memcpy(param.x, x, nDim*sizeof(int));
param.x[0] /= 2; // set single parity dimensions
param.create = QUDA_REFERENCE_FIELD_CREATE;
param.v = v;
param.norm = norm;
even = new cudaColorSpinorField(*this, param);
odd = new cudaColorSpinorField(*this, param);
// need this hackery for the moment (need to locate the odd pointers half way into the full field)
(dynamic_cast<cudaColorSpinorField*>(odd))->v = (void*)((char*)v + bytes/2);
if (precision == QUDA_HALF_PRECISION)
(dynamic_cast<cudaColorSpinorField*>(odd))->norm = (void*)((char*)norm + norm_bytes/2);
for(int i=0; i<nDim; ++i){
if(commDimPartitioned(i)){
(dynamic_cast<cudaColorSpinorField*>(odd))->ghost[i] =
static_cast<char*>((dynamic_cast<cudaColorSpinorField*>(odd))->ghost[i]) + bytes/2;
if(precision == QUDA_HALF_PRECISION)
(dynamic_cast<cudaColorSpinorField*>(odd))->ghostNorm[i] =
static_cast<char*>((dynamic_cast<cudaColorSpinorField*>(odd))->ghostNorm[i]) + norm_bytes/2;
}
}
#ifdef USE_TEXTURE_OBJECTS
dynamic_cast<cudaColorSpinorField*>(even)->destroyTexObject();
dynamic_cast<cudaColorSpinorField*>(even)->createTexObject();
dynamic_cast<cudaColorSpinorField*>(odd)->destroyTexObject();
dynamic_cast<cudaColorSpinorField*>(odd)->createTexObject();
#endif
}
else{//siteSubset == QUDA_PARITY_SITE_SUBSET
//! setup an object for selected eigenvector (the 1st one as a default):
if ((eigv_dim > 0) && (create != QUDA_REFERENCE_FIELD_CREATE) && (eigv_id == -1))
{
//if(bytes > 1811939328) warningQuda("\nCUDA API probably won't be able to create texture object for the eigenvector set... Object size is : %u bytes\n", bytes);
if (getVerbosity() == QUDA_DEBUG_VERBOSE) printfQuda("\nEigenvector set constructor...\n");
// create the associated even and odd subsets
ColorSpinorParam param;
param.siteSubset = QUDA_PARITY_SITE_SUBSET;
param.nDim = nDim;
memcpy(param.x, x, nDim*sizeof(int));
param.create = QUDA_REFERENCE_FIELD_CREATE;
param.v = v;
param.norm = norm;
param.eigv_dim = eigv_dim;
//reserve eigvector set
eigenvectors.reserve(eigv_dim);
//setup volume, [real_]length and stride for a single eigenvector
for(int id = 0; id < eigv_dim; id++)
{
param.eigv_id = id;
eigenvectors.push_back(new cudaColorSpinorField(*this, param));
#ifdef USE_TEXTURE_OBJECTS //(a lot of texture objects...)
dynamic_cast<cudaColorSpinorField*>(eigenvectors[id])->destroyTexObject();
dynamic_cast<cudaColorSpinorField*>(eigenvectors[id])->createTexObject();
#endif
}
}
}
if (create != QUDA_REFERENCE_FIELD_CREATE) {
if (siteSubset != QUDA_FULL_SITE_SUBSET) {
zeroPad();
} else {
(dynamic_cast<cudaColorSpinorField*>(even))->zeroPad();
(dynamic_cast<cudaColorSpinorField*>(odd))->zeroPad();
}
}
#ifdef USE_TEXTURE_OBJECTS
if((eigv_dim == 0) || (eigv_dim > 0 && eigv_id > -1))
createTexObject();
#endif
// initialize the ghost pointers
if(siteSubset == QUDA_PARITY_SITE_SUBSET) {
for(int i=0; i<nDim; ++i){
if(commDimPartitioned(i)){
ghost[i] = (char*)v + (stride + ghostOffset[i])*nColor*nSpin*2*precision;
if(precision == QUDA_HALF_PRECISION)
ghostNorm[i] = (char*)norm + (stride + ghostNormOffset[i])*QUDA_SINGLE_PRECISION;
}
}
}
checkCudaError();
}
#ifdef USE_TEXTURE_OBJECTS
void cudaColorSpinorField::createTexObject() {
if (isNative()) {
if (texInit) errorQuda("Already bound textures");
// create the texture for the field components
cudaChannelFormatDesc desc;
memset(&desc, 0, sizeof(cudaChannelFormatDesc));
if (precision == QUDA_SINGLE_PRECISION) desc.f = cudaChannelFormatKindFloat;
else desc.f = cudaChannelFormatKindSigned; // half is short, double is int2
// staggered and coarse fields in half and single are always two component
if ( (nSpin == 1 || nSpin == 2) && (precision == QUDA_HALF_PRECISION || precision == QUDA_SINGLE_PRECISION)) {
desc.x = 8*precision;
desc.y = 8*precision;
desc.z = 0;
desc.w = 0;
} else { // all others are four component (double2 is spread across int4)
desc.x = (precision == QUDA_DOUBLE_PRECISION) ? 32 : 8*precision;
desc.y = (precision == QUDA_DOUBLE_PRECISION) ? 32 : 8*precision;
desc.z = (precision == QUDA_DOUBLE_PRECISION) ? 32 : 8*precision;
desc.w = (precision == QUDA_DOUBLE_PRECISION) ? 32 : 8*precision;
}
cudaResourceDesc resDesc;
memset(&resDesc, 0, sizeof(resDesc));
resDesc.resType = cudaResourceTypeLinear;
resDesc.res.linear.devPtr = v;
resDesc.res.linear.desc = desc;
resDesc.res.linear.sizeInBytes = bytes;
cudaTextureDesc texDesc;
memset(&texDesc, 0, sizeof(texDesc));
if (precision == QUDA_HALF_PRECISION) texDesc.readMode = cudaReadModeNormalizedFloat;
else texDesc.readMode = cudaReadModeElementType;
cudaCreateTextureObject(&tex, &resDesc, &texDesc, NULL);
checkCudaError();
// create the texture for the norm components
if (precision == QUDA_HALF_PRECISION) {
cudaChannelFormatDesc desc;
memset(&desc, 0, sizeof(cudaChannelFormatDesc));
desc.f = cudaChannelFormatKindFloat;
desc.x = 8*QUDA_SINGLE_PRECISION; desc.y = 0; desc.z = 0; desc.w = 0;
cudaResourceDesc resDesc;
memset(&resDesc, 0, sizeof(resDesc));
resDesc.resType = cudaResourceTypeLinear;
resDesc.res.linear.devPtr = norm;
resDesc.res.linear.desc = desc;
resDesc.res.linear.sizeInBytes = norm_bytes;
cudaTextureDesc texDesc;
memset(&texDesc, 0, sizeof(texDesc));
texDesc.readMode = cudaReadModeElementType;
cudaCreateTextureObject(&texNorm, &resDesc, &texDesc, NULL);
checkCudaError();
}
texInit = true;
}
}
void cudaColorSpinorField::destroyTexObject() {
if (isNative() && texInit) {
cudaDestroyTextureObject(tex);
if (precision == QUDA_HALF_PRECISION) cudaDestroyTextureObject(texNorm);
texInit = false;
checkCudaError();
}
}
#endif
void cudaColorSpinorField::destroy() {
if (alloc) {
device_free(v);
if (precision == QUDA_HALF_PRECISION) device_free(norm);
if (siteSubset != QUDA_FULL_SITE_SUBSET) {
//! for deflated solvers:
if (eigv_dim > 0)
{
std::vector<ColorSpinorField*>::iterator vec;
for(vec = eigenvectors.begin(); vec != eigenvectors.end(); vec++) delete *vec;
}
}
alloc = false;
}
if (siteSubset == QUDA_FULL_SITE_SUBSET) {
delete even;
delete odd;
}
#ifdef USE_TEXTURE_OBJECTS
if((eigv_dim == 0) || (eigv_dim > 0 && eigv_id > -1))
destroyTexObject();
#endif
}
// cuda's floating point format, IEEE-754, represents the floating point
// zero as 4 zero bytes
void cudaColorSpinorField::zero() {
cudaMemsetAsync(v, 0, bytes, streams[Nstream-1]);
if (precision == QUDA_HALF_PRECISION) cudaMemsetAsync(norm, 0, norm_bytes, streams[Nstream-1]);
}
void cudaColorSpinorField::zeroPad() {
size_t pad_bytes = (stride - volume) * precision * fieldOrder;
int Npad = nColor * nSpin * 2 / fieldOrder;
if (eigv_dim > 0 && eigv_id == -1){//we consider the whole eigenvector set:
Npad *= eigv_dim;
pad_bytes /= eigv_dim;
}
size_t pitch = ((eigv_dim == 0 || eigv_id != -1) ? stride : eigv_stride)*fieldOrder*precision;
char *dst = (char*)v + ((eigv_dim == 0 || eigv_id != -1) ? volume : eigv_volume)*fieldOrder*precision;
if(pad_bytes) cudaMemset2D(dst, pitch, 0, pad_bytes, Npad);
//for (int i=0; i<Npad; i++) {
// if (pad_bytes) cudaMemset((char*)v + (volume + i*stride)*fieldOrder*precision, 0, pad_bytes);
//}
}
void cudaColorSpinorField::copy(const cudaColorSpinorField &src) {
checkField(*this, src);
if (this->GammaBasis() != src.GammaBasis()) errorQuda("cannot call this copy with different basis");
blas::copy(*this, src);
}
void cudaColorSpinorField::copySpinorField(const ColorSpinorField &src) {
// src is on the device and is native
if (typeid(src) == typeid(cudaColorSpinorField) &&
isNative() && dynamic_cast<const cudaColorSpinorField &>(src).isNative() &&
this->GammaBasis() == src.GammaBasis()) {
copy(dynamic_cast<const cudaColorSpinorField&>(src));
} else if (typeid(src) == typeid(cudaColorSpinorField)) {
copyGenericColorSpinor(*this, src, QUDA_CUDA_FIELD_LOCATION);
} else if (typeid(src) == typeid(cpuColorSpinorField)) { // src is on the host
loadSpinorField(src);
} else {
errorQuda("Unknown input ColorSpinorField %s", typeid(src).name());
}
}
void cudaColorSpinorField::loadSpinorField(const ColorSpinorField &src) {
if (REORDER_LOCATION == QUDA_CPU_FIELD_LOCATION &&
typeid(src) == typeid(cpuColorSpinorField)) {
for(int b=0; b<2; ++b){
resizeBufferPinned(bytes + norm_bytes, b);
memset(bufferPinned[b], 0, bytes+norm_bytes); // FIXME (temporary?) bug fix for padding
}
copyGenericColorSpinor(*this, src, QUDA_CPU_FIELD_LOCATION,
bufferPinned[bufferIndex], 0, (char*)bufferPinned[bufferIndex]+bytes, 0);
cudaMemcpy(v, bufferPinned[bufferIndex], bytes, cudaMemcpyHostToDevice);
cudaMemcpy(norm, (char*)bufferPinned[bufferIndex]+bytes, norm_bytes, cudaMemcpyHostToDevice);
} else if (typeid(src) == typeid(cudaColorSpinorField)) {
copyGenericColorSpinor(*this, src, QUDA_CUDA_FIELD_LOCATION);
} else {
void *Src, *srcNorm;
if (!zeroCopy) {
resizeBufferDevice(src.Bytes()+src.NormBytes());
Src = bufferDevice;
srcNorm = (char*)bufferDevice + src.Bytes();
cudaMemcpy(Src, src.V(), src.Bytes(), cudaMemcpyHostToDevice);
cudaMemcpy(srcNorm, src.Norm(), src.NormBytes(), cudaMemcpyHostToDevice);
} else {
for(int b=0; b<2; ++b){
resizeBufferPinned(src.Bytes()+src.NormBytes(), b);
}
memcpy(bufferPinned[bufferIndex], src.V(), src.Bytes());
memcpy((char*)bufferPinned[bufferIndex]+src.Bytes(), src.Norm(), src.NormBytes());
cudaHostGetDevicePointer(&Src, bufferPinned[bufferIndex], 0);
srcNorm = (void*)((char*)Src + src.Bytes());
}
cudaMemset(v, 0, bytes); // FIXME (temporary?) bug fix for padding
copyGenericColorSpinor(*this, src, QUDA_CUDA_FIELD_LOCATION, 0, Src, 0, srcNorm);
}
checkCudaError();
return;
}
void cudaColorSpinorField::saveSpinorField(ColorSpinorField &dest) const {
if (REORDER_LOCATION == QUDA_CPU_FIELD_LOCATION &&
typeid(dest) == typeid(cpuColorSpinorField)) {
for(int b=0; b<2; ++b) resizeBufferPinned(bytes+norm_bytes,b);
cudaMemcpy(bufferPinned[bufferIndex], v, bytes, cudaMemcpyDeviceToHost);
cudaMemcpy((char*)bufferPinned[bufferIndex]+bytes, norm, norm_bytes, cudaMemcpyDeviceToHost);
copyGenericColorSpinor(dest, *this, QUDA_CPU_FIELD_LOCATION,
0, bufferPinned[bufferIndex], 0, (char*)bufferPinned[bufferIndex]+bytes);
} else if (typeid(dest) == typeid(cudaColorSpinorField)) {
copyGenericColorSpinor(dest, *this, QUDA_CUDA_FIELD_LOCATION);
} else {
void *dst, *dstNorm;
if (!zeroCopy) {
resizeBufferDevice(dest.Bytes()+dest.NormBytes());
dst = bufferDevice;
dstNorm = (char*)bufferDevice+dest.Bytes();
} else {
for(int b=0; b<2; ++b) resizeBufferPinned(dest.Bytes()+dest.NormBytes(),b);
cudaHostGetDevicePointer(&dst, bufferPinned[bufferIndex], 0);
dstNorm = (char*)dst+dest.Bytes();
}
copyGenericColorSpinor(dest, *this, QUDA_CUDA_FIELD_LOCATION, dst, v, dstNorm, 0);
if (!zeroCopy) {
cudaMemcpy(dest.V(), dst, dest.Bytes(), cudaMemcpyDeviceToHost);
cudaMemcpy(dest.Norm(), dstNorm, dest.NormBytes(), cudaMemcpyDeviceToHost);
} else {
memcpy(dest.V(), bufferPinned[bufferIndex], dest.Bytes());
memcpy(dest.Norm(), (char*)bufferPinned[bufferIndex]+dest.Bytes(), dest.NormBytes());
}
}
checkCudaError();
return;
}
void cudaColorSpinorField::allocateGhostBuffer(int nFace) {
int Nint = nColor * nSpin * 2; // number of internal degrees of freedom
if (nSpin == 4) Nint /= 2; // spin projection for Wilson
// compute size of buffer required
size_t faceBytes = 0;
for (int i=0; i<4; i++) {
if(!commDimPartitioned(i)) continue;
faceBytes += 2*nFace*ghostFace[i]*Nint*precision;
// add extra space for the norms for half precision
if (precision == QUDA_HALF_PRECISION) faceBytes += 2*nFace*ghostFace[i]*sizeof(float);
}
// only allocate if not already allocated or buffer required is bigger than previously
if(initGhostFaceBuffer == 0 || faceBytes > ghostFaceBytes) {
if (initGhostFaceBuffer) {
for(int b=0; b<2; ++b) device_free(ghostFaceBuffer[b]);
}
if (faceBytes > 0) {
for(int b=0; b<2; ++b) ghostFaceBuffer[b] = device_malloc(faceBytes);
initGhostFaceBuffer = 1;
ghostFaceBytes = faceBytes;
}
}
size_t offset = 0;
for (int i=0; i<4; i++) {
if(!commDimPartitioned(i)) continue;
for(int b=0; b<2; ++b) backGhostFaceBuffer[b][i] = (void*)(((char*)ghostFaceBuffer[b]) + offset);
offset += nFace*ghostFace[i]*Nint*precision;
if (precision == QUDA_HALF_PRECISION) offset += nFace*ghostFace[i]*sizeof(float);
for(int b=0; b<2; ++b) fwdGhostFaceBuffer[b][i] = (void*)(((char*)ghostFaceBuffer[b]) + offset);
offset += nFace*ghostFace[i]*Nint*precision;
if (precision == QUDA_HALF_PRECISION) offset += nFace*ghostFace[i]*sizeof(float);
}
}
void cudaColorSpinorField::allocateGhostBuffer(void *send_buf[], void *recv_buf[]) const
{
int num_faces = 1;
if(nSpin == 1) num_faces = 3; // staggered
int spinor_size = 2*nSpin*nColor*precision;
// resize face only if requested size is larger than previously allocated one
size_t faceBytes = 0;
for (int i=0; i<nDimComms; i++) {
faceBytes += 2*siteSubset*num_faces*surfaceCB[i]*spinor_size;
}
if (!initGhostFaceBuffer || faceBytes > ghostFaceBytes) {
if (initGhostFaceBuffer) {
for (int b=0; b<2; ++b) device_free(ghostFaceBuffer[b]);
}
if (faceBytes > 0) {
for (int b=0; b<2; ++b) ghostFaceBuffer[b] = device_malloc(faceBytes);
initGhostFaceBuffer = 1;
ghostFaceBytes = faceBytes;
}
}
size_t offset = 0;
for (int i=0; i<nDimComms; i++) {
// use first buffer for recv and second for send
recv_buf[2*i+0] = static_cast<void*>((static_cast<char*>(ghostFaceBuffer[0]) + offset));
send_buf[2*i+0] = static_cast<void*>((static_cast<char*>(ghostFaceBuffer[1]) + offset));
offset += siteSubset*num_faces*surfaceCB[i]*spinor_size;
recv_buf[2*i+1] = static_cast<void*>((static_cast<char*>(ghostFaceBuffer[0]) + offset));
send_buf[2*i+1] = static_cast<void*>((static_cast<char*>(ghostFaceBuffer[1]) + offset));
offset += siteSubset*num_faces*surfaceCB[i]*spinor_size;
}
}
void cudaColorSpinorField::freeGhostBuffer(void)
{
if (!initGhostFaceBuffer) return;
for(int b=0; b<2; ++b) device_free(ghostFaceBuffer[b]);
for(int i=0;i < 4; i++){
if(!commDimPartitioned(i)) continue;
for(int b=0; b<2; ++b){
backGhostFaceBuffer[b][i] = NULL;
fwdGhostFaceBuffer[b][i] = NULL;
}
}
initGhostFaceBuffer = 0;
}
// pack the ghost zone into a contiguous buffer for communications
void cudaColorSpinorField::packGhost(const int nFace, const QudaParity parity,
const int dim, const QudaDirection dir,
const int dagger, cudaStream_t *stream,
void *buffer, double a, double b)
{
#ifdef MULTI_GPU
int face_num;
if(dir == QUDA_BACKWARDS){
face_num = 0;
}else if(dir == QUDA_FORWARDS){
face_num = 1;
}else{
face_num = 2;
}
void *packBuffer = buffer ? buffer : ghostFaceBuffer[bufferIndex];
packFace(packBuffer, *this, nFace, dagger, parity, dim, face_num, *stream, a, b);
#else
errorQuda("packGhost not built on single-GPU build");
#endif
}
// send the ghost zone to the host
void cudaColorSpinorField::sendGhost(void *ghost_spinor, const int nFace, const int dim,
const QudaDirection dir, const int dagger,
cudaStream_t *stream) {
#ifdef MULTI_GPU
int Nvec = (nSpin == 1 || precision == QUDA_DOUBLE_PRECISION) ? 2 : 4;
int Nint = (nColor * nSpin * 2) / (nSpin == 4 ? 2 : 1); // (spin proj.) degrees of freedom
if (dim !=3 || getKernelPackT() || getTwistPack()) { // use kernels to pack into contiguous buffers then a single cudaMemcpy
size_t bytes = nFace*Nint*ghostFace[dim]*precision;
if (precision == QUDA_HALF_PRECISION) bytes += nFace*ghostFace[dim]*sizeof(float);
void* gpu_buf =
(dir == QUDA_BACKWARDS) ? this->backGhostFaceBuffer[bufferIndex][dim] : this->fwdGhostFaceBuffer[bufferIndex][dim];
cudaMemcpyAsync(ghost_spinor, gpu_buf, bytes, cudaMemcpyDeviceToHost, *stream);
} else if(this->TwistFlavor() != QUDA_TWIST_NONDEG_DOUBLET){ // do multiple cudaMemcpys
int Npad = Nint / Nvec; // number Nvec buffers we have
int Nt_minus1_offset = (volume - nFace*ghostFace[3]); // N_t -1 = Vh-Vsh
int offset = 0;
if (nSpin == 1) {
offset = (dir == QUDA_BACKWARDS) ? 0 : Nt_minus1_offset;
} else if (nSpin == 4) {
// !dagger: send lower components backwards, send upper components forwards
// dagger: send upper components backwards, send lower components forwards
bool upper = dagger ? true : false; // Fwd is !Back
if (dir == QUDA_FORWARDS) upper = !upper;
int lower_spin_offset = Npad*stride;
if (upper) offset = (dir == QUDA_BACKWARDS ? 0 : Nt_minus1_offset);
else offset = lower_spin_offset + (dir == QUDA_BACKWARDS ? 0 : Nt_minus1_offset);
}
// QUDA Memcpy NPad's worth.
// -- Dest will point to the right beginning PAD.
// -- Each Pad has size Nvec*Vsh Floats.
// -- There is Nvec*Stride Floats from the start of one PAD to the start of the next
void *dst = (char*)ghost_spinor;
void *src = (char*)v + offset*Nvec*precision;
size_t len = nFace*ghostFace[3]*Nvec*precision;
size_t spitch = stride*Nvec*precision;
cudaMemcpy2DAsync(dst, len, src, spitch, len, Npad, cudaMemcpyDeviceToHost, *stream);
if (precision == QUDA_HALF_PRECISION) {
int norm_offset = (dir == QUDA_BACKWARDS) ? 0 : Nt_minus1_offset*sizeof(float);
void *dst = (char*)ghost_spinor + nFace*Nint*ghostFace[3]*precision;
void *src = (char*)norm + norm_offset;
cudaMemcpyAsync(dst, src, nFace*ghostFace[3]*sizeof(float), cudaMemcpyDeviceToHost, *stream);
}
}else{
int flavorVolume = volume / 2;
int flavorTFace = ghostFace[3] / 2;
int Npad = Nint / Nvec; // number Nvec buffers we have
int flavor1_Nt_minus1_offset = (flavorVolume - flavorTFace);
int flavor2_Nt_minus1_offset = (volume - flavorTFace);
int flavor1_offset = 0;
int flavor2_offset = 0;
// !dagger: send lower components backwards, send upper components forwards
// dagger: send upper components backwards, send lower components forwards
bool upper = dagger ? true : false; // Fwd is !Back
if (dir == QUDA_FORWARDS) upper = !upper;
int lower_spin_offset = Npad*stride;//ndeg tm: stride=2*flavor_volume+pad
if (upper){
flavor1_offset = (dir == QUDA_BACKWARDS ? 0 : flavor1_Nt_minus1_offset);
flavor2_offset = (dir == QUDA_BACKWARDS ? flavorVolume : flavor2_Nt_minus1_offset);
}else{
flavor1_offset = lower_spin_offset + (dir == QUDA_BACKWARDS ? 0 : flavor1_Nt_minus1_offset);
flavor2_offset = lower_spin_offset + (dir == QUDA_BACKWARDS ? flavorVolume : flavor2_Nt_minus1_offset);
}
// QUDA Memcpy NPad's worth.
// -- Dest will point to the right beginning PAD.
// -- Each Pad has size Nvec*Vsh Floats.
// -- There is Nvec*Stride Floats from the start of one PAD to the start of the next
void *dst = (char*)ghost_spinor;
void *src = (char*)v + flavor1_offset*Nvec*precision;
size_t len = flavorTFace*Nvec*precision;
size_t spitch = stride*Nvec*precision;//ndeg tm: stride=2*flavor_volume+pad
size_t dpitch = 2*len;
cudaMemcpy2DAsync(dst, dpitch, src, spitch, len, Npad, cudaMemcpyDeviceToHost, *stream);
dst = (char*)ghost_spinor+len;
src = (char*)v + flavor2_offset*Nvec*precision;
cudaMemcpy2DAsync(dst, dpitch, src, spitch, len, Npad, cudaMemcpyDeviceToHost, *stream);
if (precision == QUDA_HALF_PRECISION) {
int Nt_minus1_offset = (flavorVolume - flavorTFace);
int norm_offset = (dir == QUDA_BACKWARDS) ? 0 : Nt_minus1_offset*sizeof(float);
void *dst = (char*)ghost_spinor + Nint*ghostFace[3]*precision;
void *src = (char*)norm + norm_offset;
size_t dpitch = flavorTFace*sizeof(float);
size_t spitch = flavorVolume*sizeof(float);
cudaMemcpy2DAsync(dst, dpitch, src, spitch, flavorTFace*sizeof(float), 2, cudaMemcpyDeviceToHost, *stream);
}
}
#else
errorQuda("sendGhost not built on single-GPU build");
#endif
}
void cudaColorSpinorField::unpackGhost(const void* ghost_spinor, const int nFace,
const int dim, const QudaDirection dir,
const int dagger, cudaStream_t* stream)
{
int Nint = (nColor * nSpin * 2) / (nSpin == 4 ? 2 : 1); // (spin proj.) degrees of freedom
int len = nFace*ghostFace[dim]*Nint;
int offset = length + ghostOffset[dim]*nColor*nSpin*2;
offset += (dir == QUDA_BACKWARDS) ? 0 : len;
void *dst = (char*)v + precision*offset;
const void *src = ghost_spinor;
cudaMemcpyAsync(dst, src, len*precision, cudaMemcpyHostToDevice, *stream);
if (precision == QUDA_HALF_PRECISION) {
// norm region of host ghost zone is at the end of the ghost_spinor
int normlen = nFace*ghostFace[dim];
int norm_offset = stride + ghostNormOffset[dim];
norm_offset += (dir == QUDA_BACKWARDS) ? 0 : normlen;
void *dst = static_cast<char*>(norm) + norm_offset*sizeof(float);
const void *src = static_cast<const char*>(ghost_spinor)+nFace*Nint*ghostFace[dim]*precision;
cudaMemcpyAsync(dst, src, normlen*sizeof(float), cudaMemcpyHostToDevice, *stream);
}
}
// pack the ghost zone into a contiguous buffer for communications
void cudaColorSpinorField::packGhostExtended(const int nFace, const int R[], const QudaParity parity,
const int dim, const QudaDirection dir,
const int dagger, cudaStream_t *stream,
void *buffer)
{
#ifdef MULTI_GPU
int face_num;
if(dir == QUDA_BACKWARDS){
face_num = 0;
}else if(dir == QUDA_FORWARDS){
face_num = 1;
}else{
face_num = 2;
}
void *packBuffer = buffer ? buffer : ghostFaceBuffer[bufferIndex];
packFaceExtended(packBuffer, *this, nFace, R, dagger, parity, dim, face_num, *stream);
#else
errorQuda("packGhostExtended not built on single-GPU build");
#endif
}
// copy data from host buffer into boundary region of device field
void cudaColorSpinorField::unpackGhostExtended(const void* ghost_spinor, const int nFace, const QudaParity parity,
const int dim, const QudaDirection dir,
const int dagger, cudaStream_t* stream)
{
// First call the regular unpackGhost routine to copy data into the `usual' ghost-zone region
// of the data array
unpackGhost(ghost_spinor, nFace, dim, dir, dagger, stream);
// Next step is to copy data from the ghost zone back to the interior region
int Nint = (nColor * nSpin * 2) / (nSpin == 4 ? 2 : 1); // (spin proj.) degrees of freedom
int len = nFace*ghostFace[dim]*Nint;
int offset = length + ghostOffset[dim]*nColor*nSpin*2;
offset += (dir == QUDA_BACKWARDS) ? 0 : len;
#ifdef MULTI_GPU
const int face_num = 2;
const bool unpack = true;
const int R[4] = {0,0,0,0};
packFaceExtended(ghostFaceBuffer[bufferIndex], *this, nFace, R, dagger, parity, dim, face_num, *stream, unpack);
#else
errorQuda("unpackGhostExtended not built on single-GPU build");
#endif
}
cudaStream_t *stream;
void cudaColorSpinorField::createComms(int nFace) {
if(bufferMessageHandler != bufferPinnedResizeCount) destroyComms();
if (!initComms || nFaceComms != nFace) {
// if we are requesting a new number of faces destroy and start over
if(nFace != nFaceComms) destroyComms();
if (siteSubset != QUDA_PARITY_SITE_SUBSET)
errorQuda("Only supports single parity fields");
#ifdef GPU_COMMS
bool comms = false;
for (int i=0; i<nDimComms; i++) if (commDimPartitioned(i)) comms = true;
#endif
if (nFace > maxNface)
errorQuda("Requested number of faces %d in communicator is greater than supported %d",
nFace, maxNface);
// faceBytes is the sum of all face sizes
size_t faceBytes = 0;
// nbytes is the size in bytes of each face
size_t nbytes[QUDA_MAX_DIM];
// The number of degrees of freedom per site for the given
// field. Currently assumes spin projection of a Wilson-like
// field (so half the number of degrees of freedom).
int Ndof = (2 * nSpin * nColor) / (nSpin==4 ? 2 : 1);
for (int i=0; i<nDimComms; i++) {
nbytes[i] = maxNface*surfaceCB[i]*Ndof*precision;
if (precision == QUDA_HALF_PRECISION) nbytes[i] += maxNface*surfaceCB[i]*sizeof(float);
if (!commDimPartitioned(i)) continue;
faceBytes += 2*nbytes[i];
}
#ifndef GPU_COMMS
// use static pinned memory for face buffers
for(int b=0; b<2; ++b){
resizeBufferPinned(2*faceBytes, b); // oversizes for GPU_COMMS case
my_face[b] = bufferPinned[b];
from_face[b] = static_cast<char*>(bufferPinned[b]) + faceBytes;
}
// assign pointers for each face - it's ok to alias for different Nface parameters
size_t offset = 0;
#endif
for (int i=0; i<nDimComms; i++) {
if (!commDimPartitioned(i)) continue;
#ifdef GPU_COMMS
for(int b=0; b<2; ++b){
my_back_face[b][i] = backGhostFaceBuffer[b][i];
from_back_face[b][i] = ghost[i];
if(precision == QUDA_HALF_PRECISION){
my_back_norm_face[b][i] = static_cast<char*>(backGhostFaceBuffer[b][i]) + nFace*ghostFace[i]*Ndof*precision;
from_back_norm_face[b][i] = ghostNorm[i];
}
} // loop over b
#else
for(int b=0; b<2; ++b){
my_back_face[b][i] = static_cast<char*>(my_face[b]) + offset;
from_back_face[b][i] = static_cast<char*>(from_face[b]) + offset;
}
offset += nbytes[i];
#endif
#ifdef GPU_COMMS
for(int b=0; b<2; ++b){
my_fwd_face[b][i] = fwdGhostFaceBuffer[b][i];
from_fwd_face[b][i] = ghost[i] + nFace*ghostFace[i]*Ndof*precision;
if(precision == QUDA_HALF_PRECISION){
my_fwd_norm_face[b][i] = static_cast<char*>(fwdGhostFaceBuffer[b][i]) + nFace*ghostFace[i]*Ndof*precision;
from_fwd_norm_face[b][i] = static_cast<char*>(ghostNorm[i]) + nFace*ghostFace[i]*sizeof(float);
}
} // loop over b
#else
for(int b=0; b<2; ++b){
my_fwd_face[b][i] = static_cast<char*>(my_face[b]) + offset;
from_fwd_face[b][i] = static_cast<char*>(from_face[b]) + offset;
}
offset += nbytes[i];
#endif
}
// create a different message handler for each direction and Nface
for(int b=0; b<2; ++b){
mh_send_fwd[b] = new MsgHandle**[maxNface];
mh_send_back[b] = new MsgHandle**[maxNface];
mh_recv_fwd[b] = new MsgHandle**[maxNface];
mh_recv_back[b] = new MsgHandle**[maxNface];
#ifdef GPU_COMMS
if(precision == QUDA_HALF_PRECISION){
mh_send_norm_fwd[b] = new MsgHandle**[maxNface];
mh_send_norm_back[b] = new MsgHandle**[maxNface];
mh_recv_norm_fwd[b] = new MsgHandle**[maxNface];
mh_recv_norm_back[b] = new MsgHandle**[maxNface];
}
#endif
} // loop over b
for (int j=0; j<maxNface; j++) {
for(int b=0; b<2; ++b){
mh_send_fwd[b][j] = new MsgHandle*[2*nDimComms];
mh_send_back[b][j] = new MsgHandle*[2*nDimComms];
mh_recv_fwd[b][j] = new MsgHandle*[nDimComms];
mh_recv_back[b][j] = new MsgHandle*[nDimComms];
#ifdef GPU_COMMS
if(precision == QUDA_HALF_PRECISION){
mh_send_norm_fwd[b][j] = new MsgHandle*[2*nDimComms];
mh_send_norm_back[b][j] = new MsgHandle*[2*nDimComms];
mh_recv_norm_fwd[b][j] = new MsgHandle*[nDimComms];
mh_recv_norm_back[b][j] = new MsgHandle*[nDimComms];
}
#endif
} // loop over b
for (int i=0; i<nDimComms; i++) {
if (!commDimPartitioned(i)) continue;
#ifdef GPU_COMMS
size_t nbytes_Nface = surfaceCB[i]*Ndof*precision*(j+1);
size_t nbytes_Nface_norm = surfaceCB[i]*(j+1)*sizeof(float);
if (i != 3 || getKernelPackT() || getTwistPack()) {
#else
size_t nbytes_Nface = (nbytes[i] / maxNface) * (j+1);
#endif
for(int b=0; b<2; ++b){
mh_send_fwd[b][j][2*i+0] = (j+1 == nFace) ? comm_declare_send_relative(my_fwd_face[b][i], i, +1, nbytes_Nface) : NULL;
mh_send_back[b][j][2*i+0] = (j+1 == nFace) ? comm_declare_send_relative(my_back_face[b][i], i, -1, nbytes_Nface) : NULL;
mh_send_fwd[b][j][2*i+1] = mh_send_fwd[b][j][2*i]; // alias pointers
mh_send_back[b][j][2*i+1] = mh_send_back[b][j][2*i]; // alias pointers
}
#ifdef GPU_COMMS
if(precision == QUDA_HALF_PRECISION){
for(int b=0; b<2; ++b){
mh_send_norm_fwd[b][j][2*i+0] = (j+1 == nFace) ? comm_declare_send_relative(my_fwd_norm_face[b][i], i, +1, nbytes_Nface_norm) : NULL;
mh_send_norm_back[b][j][2*i+0] = (j+1 == nFace) ? comm_declare_send_relative(my_back_norm_face[b][i], i, -1, nbytes_Nface_norm) : NULL;
mh_send_norm_fwd[b][j][2*i+1] = mh_send_norm_fwd[b][j][2*i];
mh_send_norm_back[b][j][2*i+1] = mh_send_norm_back[b][j][2*i];
}
}
} else if (this->TwistFlavor() == QUDA_TWIST_NONDEG_DOUBLET) {
errorQuda("GPU_COMMS for non-degenerate doublet only supported with time-dimension kernel packing enabled.");
} else {
/*
use a strided communicator, here we can't really use
the previously declared my_fwd_face and my_back_face
pointers since they don't really map 1-to-1 so let's
just compute the required base pointers and pass these
directly into the communicator construction
*/
int Nblocks = Ndof / Nvec(); // number of Nvec buffers we have
// start of last time slice chunk we are sending forwards
int endOffset = (volume - (j+1)*ghostFace[i]);
size_t offset[4];
void *base[4];
if (nSpin == 1) { // staggered is invariant with dagger
offset[2*0 + 0] = 0;
offset[2*1 + 0] = endOffset;
offset[2*0 + 1] = offset[2*0 + 0];
offset[2*1 + 1] = offset[2*1 + 0];
} else if (nSpin == 4) {
// !dagger: send last components backwards, send first components forwards
offset[2*0 + 0] = Nblocks*stride;
offset[2*1 + 0] = endOffset;
// dagger: send first components backwards, send last components forwards
offset[2*0 + 1] = 0;
offset[2*1 + 1] = Nblocks*stride + endOffset;
} else {
errorQuda("Unsupported number of spin components");
}
for (int k=0; k<4; k++) {
base[k] = static_cast<char*>(v) + offset[k]*Nvec()*precision; // total offset in bytes
}
size_t blksize = (j+1)*ghostFace[i]*Nvec()*precision; // (j+1) is number of faces
size_t Stride = stride*Nvec()*precision;
if (blksize * Nblocks != nbytes_Nface)
errorQuda("Total strided message size does not match expected size");
//printf("%d strided sends with Nface=%d Nblocks=%d blksize=%d Stride=%d\n", i, j+1, Nblocks, blksize, Stride);
for(int b=0; b<2; ++b){
// only allocate a communicator for the present face (this needs cleaned up)
mh_send_fwd[b][j][2*i+0] = (j+1 == nFace) ? comm_declare_strided_send_relative(base[2], i, +1, blksize, Nblocks, Stride) : NULL;
mh_send_back[b][j][2*i+0] = (j+1 == nFace) ? comm_declare_strided_send_relative(base[0], i, -1, blksize, Nblocks, Stride) : NULL;
if (nSpin ==4) { // dagger communicators
mh_send_fwd[b][j][2*i+1] = (j+1 == nFace) ? comm_declare_strided_send_relative(base[3], i, +1, blksize, Nblocks, Stride) : NULL;
mh_send_back[b][j][2*i+1] = (j+1 == nFace) ? comm_declare_strided_send_relative(base[1], i, -1, blksize, Nblocks, Stride) : NULL;
} else {
mh_send_fwd[b][j][2*i+1] = mh_send_fwd[b][j][2*i+0];
mh_send_back[b][j][2*i+1] = mh_send_back[b][j][2*i+0];
}
} // loop over b
if(precision == QUDA_HALF_PRECISION){
int Nt_minus1_offset = (volume - nFace*ghostFace[3]); // The space-time coordinate of the start of the last time slice
void *norm_fwd = static_cast<float*>(norm) + Nt_minus1_offset;
void *norm_back = norm; // the first time slice has zero offset
for(int b=0; b<2; ++b){
mh_send_norm_fwd[b][j][2*i+0] = (j+1 == nFace) ? comm_declare_send_relative(norm_fwd, i, +1, surfaceCB[i]*(j+1)*sizeof(float)) : NULL;
mh_send_norm_back[b][j][2*i+0] = (j+1 == nFace) ? comm_declare_send_relative(norm_back, i, -1, surfaceCB[i]*(j+1)*sizeof(float)) : NULL;
mh_send_norm_fwd[b][j][2*i+1] = mh_send_norm_fwd[b][j][2*i];
mh_send_norm_back[b][j][2*i+1] = mh_send_norm_back[b][j][2*i];
}
}
}
if(precision == QUDA_HALF_PRECISION){
for(int b=0; b<2; ++b){
mh_recv_norm_fwd[b][j][i] = (j+1 == nFace) ? comm_declare_receive_relative(from_fwd_norm_face[b][i], i, +1, nbytes_Nface_norm) : NULL;
mh_recv_norm_back[b][j][i] = (j+1 == nFace) ? comm_declare_receive_relative(from_back_norm_face[b][i], i, -1, nbytes_Nface_norm) : NULL;
}
}
#endif // GPU_COMMS
for(int b=0; b<2; ++b){
mh_recv_fwd[b][j][i] = (j+1 == nFace) ? comm_declare_receive_relative(from_fwd_face[b][i], i, +1, nbytes_Nface) : NULL;
mh_recv_back[b][j][i] = (j+1 == nFace) ? comm_declare_receive_relative(from_back_face[b][i], i, -1, nbytes_Nface) : NULL;
}
} // loop over dimension
}
bufferMessageHandler = bufferPinnedResizeCount;
initComms = true;
nFaceComms = nFace;
}
checkCudaError();
}
void cudaColorSpinorField::destroyComms() {
if (initComms) {
for(int b=0; b<2; ++b){
for (int j=0; j<maxNface; j++) {
for (int i=0; i<nDimComms; i++) {
if (commDimPartitioned(i)) {
if (mh_recv_fwd[b][j][i]) comm_free(mh_recv_fwd[b][j][i]);
if (mh_recv_fwd[b][j][i]) comm_free(mh_recv_back[b][j][i]);
if (mh_send_fwd[b][j][2*i]) comm_free(mh_send_fwd[b][j][2*i]);
if (mh_send_back[b][j][2*i]) comm_free(mh_send_back[b][j][2*i]);
// only in a special case are these not aliasing pointers
#ifdef GPU_COMMS
if(precision == QUDA_HALF_PRECISION){
if (mh_recv_norm_fwd[b][j][i]) comm_free(mh_recv_norm_fwd[b][j][i]);
if (mh_recv_norm_back[b][j][i]) comm_free(mh_recv_norm_back[b][j][i]);
if (mh_send_norm_fwd[b][j][2*i]) comm_free(mh_send_norm_fwd[b][j][2*i]);
if (mh_send_norm_back[b][j][2*i]) comm_free(mh_send_norm_back[b][j][2*i]);
}
if (i == 3 && !getKernelPackT() && nSpin == 4) {
if (mh_send_fwd[b][j][2*i+1]) comm_free(mh_send_fwd[b][j][2*i+1]);
if (mh_send_back[b][j][2*i+1]) comm_free(mh_send_back[b][j][2*i+1]);
}
#endif // GPU_COMMS
}
}
delete []mh_recv_fwd[b][j];
delete []mh_recv_back[b][j];
delete []mh_send_fwd[b][j];
delete []mh_send_back[b][j];
#ifdef GPU_COMMS
if(precision == QUDA_HALF_PRECISION){
delete []mh_recv_norm_fwd[b][j];
delete []mh_recv_norm_back[b][j];
delete []mh_send_norm_fwd[b][j];
delete []mh_send_norm_back[b][j];
}
#endif
}
delete []mh_recv_fwd[b];
delete []mh_recv_back[b];
delete []mh_send_fwd[b];
delete []mh_send_back[b];
for (int i=0; i<nDimComms; i++) {
my_fwd_face[b][i] = NULL;
my_back_face[b][i] = NULL;
from_fwd_face[b][i] = NULL;
from_back_face[b][i] = NULL;
}
#ifdef GPU_COMMS
if(precision == QUDA_HALF_PRECISION){
delete []mh_recv_norm_fwd[b];
delete []mh_recv_norm_back[b];
delete []mh_send_norm_fwd[b];
delete []mh_send_norm_back[b];
}
for(int i=0; i<nDimComms; i++){
my_fwd_norm_face[b][i] = NULL;
my_back_norm_face[b][i] = NULL;
from_fwd_norm_face[b][i] = NULL;
from_back_norm_face[b][i] = NULL;
}
#endif
} // loop over b
initComms = false;
checkCudaError();
}
}
void cudaColorSpinorField::streamInit(cudaStream_t *stream_p){
stream = stream_p;
}
void cudaColorSpinorField::pack(int nFace, int parity, int dagger, cudaStream_t *stream_p,
bool zeroCopyPack, double a, double b) {
allocateGhostBuffer(nFace); // allocate the ghost buffer if not yet allocated
createComms(nFace); // must call this first
stream = stream_p;
const int dim=-1; // pack all partitioned dimensions
if (zeroCopyPack) {
void *my_face_d;
cudaHostGetDevicePointer(&my_face_d, my_face[bufferIndex], 0); // set the matching device pointer
packGhost(nFace, (QudaParity)parity, dim, QUDA_BOTH_DIRS, dagger, &stream[0], my_face_d, a, b);
} else {
packGhost(nFace, (QudaParity)parity, dim, QUDA_BOTH_DIRS, dagger, &stream[Nstream-1], 0, a, b);
}
}
void cudaColorSpinorField::pack(int nFace, int parity, int dagger, int stream_idx,
bool zeroCopyPack, double a, double b) {
allocateGhostBuffer(nFace); // allocate the ghost buffer if not yet allocated
createComms(nFace); // must call this first
const int dim=-1; // pack all partitioned dimensions
if (zeroCopyPack) {
void *my_face_d;
cudaHostGetDevicePointer(&my_face_d, my_face[bufferIndex], 0); // set the matching device pointer
packGhost(nFace, (QudaParity)parity, dim, QUDA_BOTH_DIRS, dagger, &stream[stream_idx], my_face_d, a, b);
} else {
packGhost(nFace, (QudaParity)parity, dim, QUDA_BOTH_DIRS, dagger, &stream[stream_idx], 0, a, b);
}
}
void cudaColorSpinorField::packExtended(const int nFace, const int R[], const int parity,
const int dagger, const int dim,
cudaStream_t *stream_p, const bool zeroCopyPack){
allocateGhostBuffer(nFace); // allocate the ghost buffer if not yet allocated
createComms(nFace); // must call this first
stream = stream_p;
void *my_face_d = NULL;
if(zeroCopyPack){
cudaHostGetDevicePointer(&my_face_d, my_face[bufferIndex], 0);
packGhostExtended(nFace, R, (QudaParity)parity, dim, QUDA_BOTH_DIRS, dagger, &stream[0], my_face_d);
}else{
packGhostExtended(nFace, R, (QudaParity)parity, dim, QUDA_BOTH_DIRS, dagger, &stream[Nstream-1], my_face_d);
}
}
void cudaColorSpinorField::gather(int nFace, int dagger, int dir, cudaStream_t* stream_p)
{
int dim = dir/2;
// If stream_p != 0, use pack_stream, else use the stream array
cudaStream_t *pack_stream = (stream_p) ? stream_p : stream+dir;
if(dir%2 == 0){
// backwards copy to host
sendGhost(my_back_face[bufferIndex][dim], nFace, dim, QUDA_BACKWARDS, dagger, pack_stream);
} else {
// forwards copy to host
sendGhost(my_fwd_face[bufferIndex][dim], nFace, dim, QUDA_FORWARDS, dagger, pack_stream);
}
}
void cudaColorSpinorField::recvStart(int nFace, int dir, int dagger) {
int dim = dir/2;
if(!commDimPartitioned(dim)) return;
if (dir%2 == 0) { // sending backwards
// Prepost receive
comm_start(mh_recv_fwd[bufferIndex][nFace-1][dim]);
} else { //sending forwards
// Prepost receive
comm_start(mh_recv_back[bufferIndex][nFace-1][dim]);
}
#ifdef GPU_COMMS
if(precision != QUDA_HALF_PRECISION) return;
if (dir%2 == 0) { // sending backwards
// Prepost receive
comm_start(mh_recv_norm_fwd[bufferIndex][nFace-1][dim]);
} else { //sending forwards
// Prepost receive
comm_start(mh_recv_norm_back[bufferIndex][nFace-1][dim]);
}
#endif
}
void cudaColorSpinorField::sendStart(int nFace, int dir, int dagger) {
int dim = dir / 2;
if(!commDimPartitioned(dim)) return;
if (dir%2 == 0) { // sending backwards
comm_start(mh_send_back[bufferIndex][nFace-1][2*dim+dagger]);
} else { //sending forwards
comm_start(mh_send_fwd[bufferIndex][nFace-1][2*dim+dagger]);
}
#ifdef GPU_COMMS
if(precision != QUDA_HALF_PRECISION) return;
if (dir%2 == 0) { // sending backwards
comm_start(mh_send_norm_back[bufferIndex][nFace-1][2*dim+dagger]);
} else { //sending forwards
comm_start(mh_send_norm_fwd[bufferIndex][nFace-1][2*dim+dagger]);
}
#endif
}
void cudaColorSpinorField::commsStart(int nFace, int dir, int dagger) {
int dim = dir / 2;
if(!commDimPartitioned(dim)) return;
if (dir%2 == 0) { // sending backwards
// Prepost receive
comm_start(mh_recv_fwd[bufferIndex][nFace-1][dim]);
comm_start(mh_send_back[bufferIndex][nFace-1][2*dim+dagger]);
} else { //sending forwards
// Prepost receive
comm_start(mh_recv_back[bufferIndex][nFace-1][dim]);
// Begin forward send
comm_start(mh_send_fwd[bufferIndex][nFace-1][2*dim+dagger]);
}
#ifdef GPU_COMMS
if(precision != QUDA_HALF_PRECISION) return;
if (dir%2 == 0) { // sending backwards
// Prepost receive
comm_start(mh_recv_norm_fwd[bufferIndex][nFace-1][dim]);
comm_start(mh_send_norm_back[bufferIndex][nFace-1][2*dim+dagger]);
} else { //sending forwards
// Prepost receive
comm_start(mh_recv_norm_back[bufferIndex][nFace-1][dim]);
// Begin forward send
comm_start(mh_send_norm_fwd[bufferIndex][nFace-1][2*dim+dagger]);
}
#endif
}
int cudaColorSpinorField::commsQuery(int nFace, int dir, int dagger) {
int dim = dir / 2;
if(!commDimPartitioned(dim)) return 0;
#ifdef GPU_COMMS
if(precision != QUDA_HALF_PRECISION){
#endif
if(dir%2==0) {
if (comm_query(mh_recv_fwd[bufferIndex][nFace-1][dim]) &&
comm_query(mh_send_back[bufferIndex][nFace-1][2*dim+dagger])) return 1;
} else {
if (comm_query(mh_recv_back[bufferIndex][nFace-1][dim]) &&
comm_query(mh_send_fwd[bufferIndex][nFace-1][2*dim+dagger])) return 1;
}
#ifdef GPU_COMMS
}else{ // half precision
if(dir%2==0) {
if (comm_query(mh_recv_fwd[bufferIndex][nFace-1][dim]) &&
comm_query(mh_send_back[bufferIndex][nFace-1][2*dim+dagger]) &&
comm_query(mh_recv_norm_fwd[bufferIndex][nFace-1][dim]) &&
comm_query(mh_send_norm_back[bufferIndex][nFace-1][2*dim+dagger])) return 1;
} else {
if (comm_query(mh_recv_back[bufferIndex][nFace-1][dim]) &&
comm_query(mh_send_fwd[bufferIndex][nFace-1][2*dim+dagger]) &&
comm_query(mh_recv_norm_back[bufferIndex][nFace-1][dim]) &&
comm_query(mh_send_norm_fwd[bufferIndex][nFace-1][2*dim+dagger])) return 1;
}
} // half precision
#endif
return 0;
}
void cudaColorSpinorField::commsWait(int nFace, int dir, int dagger) {
int dim = dir / 2;
if(!commDimPartitioned(dim)) return;
#ifdef GPU_COMMS
if(precision != QUDA_HALF_PRECISION){
#endif
if (dir%2==0) {
comm_wait(mh_recv_fwd[bufferIndex][nFace-1][dim]);
comm_wait(mh_send_back[bufferIndex][nFace-1][2*dim+dagger]);
} else {
comm_wait(mh_recv_back[bufferIndex][nFace-1][dim]);
comm_wait(mh_send_fwd[bufferIndex][nFace-1][2*dim+dagger]);
}
#ifdef GPU_COMMS
} else { // half precision
if (dir%2==0) {
comm_wait(mh_recv_fwd[bufferIndex][nFace-1][dim]);
comm_wait(mh_send_back[bufferIndex][nFace-1][2*dim+dagger]);
comm_wait(mh_recv_norm_fwd[bufferIndex][nFace-1][dim]);
comm_wait(mh_send_norm_back[bufferIndex][nFace-1][2*dim+dagger]);
} else {
comm_wait(mh_recv_back[bufferIndex][nFace-1][dim]);
comm_wait(mh_send_fwd[bufferIndex][nFace-1][2*dim+dagger]);
comm_wait(mh_recv_norm_back[bufferIndex][nFace-1][dim]);
comm_wait(mh_send_norm_fwd[bufferIndex][nFace-1][2*dim+dagger]);
}
} // half precision
#endif
return;
}
void cudaColorSpinorField::scatter(int nFace, int dagger, int dir, cudaStream_t* stream_p)
{
int dim = dir/2;
if(!commDimPartitioned(dim)) return;
// both scattering occurances now go through the same stream
if (dir%2==0) {// receive from forwards
unpackGhost(from_fwd_face[bufferIndex][dim], nFace, dim, QUDA_FORWARDS, dagger, stream_p);
} else { // receive from backwards
unpackGhost(from_back_face[bufferIndex][dim], nFace, dim, QUDA_BACKWARDS, dagger, stream_p);
}
}
void cudaColorSpinorField::scatter(int nFace, int dagger, int dir)
{
int dim = dir/2;
if(!commDimPartitioned(dim)) return;
// both scattering occurances now go through the same stream
if (dir%2==0) {// receive from forwards
unpackGhost(from_fwd_face[bufferIndex][dim], nFace, dim, QUDA_FORWARDS, dagger, &stream[2*dim/*+0*/]);
} else { // receive from backwards
unpackGhost(from_back_face[bufferIndex][dim], nFace, dim, QUDA_BACKWARDS, dagger, &stream[2*dim/*+1*/]);
}
}
void cudaColorSpinorField::scatterExtended(int nFace, int parity, int dagger, int dir)
{
int dim = dir/2;
if(!commDimPartitioned(dim)) return;
if (dir%2==0) {// receive from forwards
unpackGhostExtended(from_fwd_face[bufferIndex][dim], nFace, static_cast<QudaParity>(parity), dim, QUDA_FORWARDS, dagger, &stream[2*dim/*+0*/]);
} else { // receive from backwards
unpackGhostExtended(from_back_face[bufferIndex][dim], nFace, static_cast<QudaParity>(parity), dim, QUDA_BACKWARDS, dagger, &stream[2*dim/*+1*/]);
}
}
void cudaColorSpinorField::exchangeGhost(QudaParity parity, int dagger) const {
void **send = static_cast<void**>(safe_malloc(nDimComms * 2 * sizeof(void*)));
// allocate ghost buffer if not yet allocated
allocateGhostBuffer(send, ghost_fixme);
genericPackGhost(send, *this, parity, dagger);
int nFace = (nSpin == 1) ? 3 : 1;
exchange(ghost_fixme, send, nFace);
host_free(send);
}
std::ostream& operator<<(std::ostream &out, const cudaColorSpinorField &a) {
out << (const ColorSpinorField&)a;
out << "v = " << a.v << std::endl;
out << "norm = " << a.norm << std::endl;
out << "alloc = " << a.alloc << std::endl;
out << "init = " << a.init << std::endl;
return out;
}
//! for deflated solvers:
cudaColorSpinorField& cudaColorSpinorField::Eigenvec(const int idx) const {
if (siteSubset == QUDA_PARITY_SITE_SUBSET && this->EigvId() == -1) {
if (idx < this->EigvDim()) {//setup eigenvector form the set
return *(dynamic_cast<cudaColorSpinorField*>(eigenvectors[idx]));
}
else{
errorQuda("Incorrect eigenvector index...");
}
}
errorQuda("Eigenvector must be a parity spinor");
exit(-1);
}
//copyCuda currently cannot not work with set of spinor fields..
void cudaColorSpinorField::CopyEigenvecSubset(cudaColorSpinorField &dst, const int range, const int first_element) const{
#if 0
if(first_element < 0) errorQuda("\nError: trying to set negative first element.\n");
if (siteSubset == QUDA_PARITY_SITE_SUBSET && this->EigvId() == -1) {
if (first_element == 0 && range == this->EigvDim())
{
if(range != dst.EigvDim())errorQuda("\nError: eigenvector range to big.\n");
checkField(dst, *this);
copyCuda(dst, *this);
}
else if ((first_element+range) < this->EigvDim())
{//setup eigenvector subset
cudaColorSpinorField *eigv_subset;
ColorSpinorParam param;
param.nColor = nColor;
param.nSpin = nSpin;
param.twistFlavor = twistFlavor;
param.precision = precision;
param.nDim = nDim;
param.pad = pad;
param.siteSubset = siteSubset;
param.siteOrder = siteOrder;
param.fieldOrder = fieldOrder;
param.gammaBasis = gammaBasis;
memcpy(param.x, x, nDim*sizeof(int));
param.create = QUDA_REFERENCE_FIELD_CREATE;
param.eigv_dim = range;
param.eigv_id = -1;
param.v = (void*)((char*)v + first_element*eigv_bytes);
param.norm = (void*)((char*)norm + first_element*eigv_norm_bytes);
eigv_subset = new cudaColorSpinorField(param);
//Not really needed:
eigv_subset->eigenvectors.reserve(param.eigv_dim);
for(int id = first_element; id < (first_element+range); id++)
{
param.eigv_id = id;
eigv_subset->eigenvectors.push_back(new cudaColorSpinorField(*this, param));
}
checkField(dst, *eigv_subset);
copyCuda(dst, *eigv_subset);
delete eigv_subset;
}
else{
errorQuda("Incorrect eigenvector dimension...");
}
}
else{
errorQuda("Eigenvector must be a parity spinor");
exit(-1);
}
#endif
}
void cudaColorSpinorField::getTexObjectInfo() const
{
#ifdef USE_TEXTURE_OBJECTS
printfQuda("\nPrint texture info for the field:\n");
std::cout << *this;
cudaResourceDesc resDesc;
//memset(&resDesc, 0, sizeof(resDesc));
cudaGetTextureObjectResourceDesc(&resDesc, this->Tex());
printfQuda("\nDevice pointer: %p\n", resDesc.res.linear.devPtr);
printfQuda("\nVolume (in bytes): %lu\n", resDesc.res.linear.sizeInBytes);
if (resDesc.resType == cudaResourceTypeLinear) printfQuda("\nResource type: linear \n");
checkCudaError();
#endif
}
void cudaColorSpinorField::Source(const QudaSourceType sourceType, const int st, const int s, const int c) {
ColorSpinorParam param(*this);
param.fieldOrder = QUDA_SPACE_SPIN_COLOR_FIELD_ORDER;
param.location = QUDA_CPU_FIELD_LOCATION;
param.create = QUDA_NULL_FIELD_CREATE;
cpuColorSpinorField tmp(param);
tmp.Source(sourceType, st, s, c);
*this = tmp;
}
} // namespace quda
|
1f5134a57475a9726cf197df3e2d381b51af67bb.hip | // !!! This is a file automatically generated by hipify!!!
//
// Created by shijiashuai on 5/7/18.
//
#include <thundergbm/util/cub_wrapper.h>
#include <thundergbm/sparse_columns.h>
#include "thundergbm/sparse_columns.h"
#include "thundergbm/util/device_lambda.cuh"
#include "hipsparse.h"
#include "thundergbm/util/multi_device.h"
#include "omp.h"
//FIXME remove this function
void correct_start(int *csc_col_ptr_2d_data, int first_col_start, int n_column_sub){
device_loop(n_column_sub + 1, [=] __device__(int col_id) {
csc_col_ptr_2d_data[col_id] = csc_col_ptr_2d_data[col_id] - first_col_start;
});
};
void SparseColumns::csr2csc_gpu(const DataSet &dataset, vector<std::unique_ptr<SparseColumns>> &v_columns) {
LOG(INFO) << "convert csr to csc using gpu...";
std::chrono::high_resolution_clock timer;
auto t_start = timer.now();
//three arrays (on GPU/CPU) for csr representation
this->column_offset = 0;
SyncArray<float_type> val;
SyncArray<int> col_idx;
SyncArray<int> row_ptr;
val.resize(dataset.csr_val.size());
col_idx.resize(dataset.csr_col_idx.size());
row_ptr.resize(dataset.csr_row_ptr.size());
//copy data to the three arrays
val.copy_from(dataset.csr_val.data(), val.size());
col_idx.copy_from(dataset.csr_col_idx.data(), col_idx.size());
row_ptr.copy_from(dataset.csr_row_ptr.data(), row_ptr.size());
hipsparseHandle_t handle;
hipsparseMatDescr_t descr;
hipsparseCreate(&handle);
hipsparseCreateMatDescr(&descr);
hipsparseSetMatIndexBase(descr, HIPSPARSE_INDEX_BASE_ZERO);
hipsparseSetMatType(descr, HIPSPARSE_MATRIX_TYPE_GENERAL);
n_column = dataset.n_features_;
n_row = dataset.n_instances();
nnz = dataset.csr_val.size();
csc_val.resize(nnz);
csc_row_idx.resize(nnz);
csc_col_ptr.resize(n_column + 1);
hipsparseScsr2csc(handle, dataset.n_instances(), n_column, nnz, val.device_data(), row_ptr.device_data(),
col_idx.device_data(), csc_val.device_data(), csc_row_idx.device_data(), csc_col_ptr.device_data(),
HIPSPARSE_ACTION_NUMERIC, HIPSPARSE_INDEX_BASE_ZERO);
hipDeviceSynchronize();
hipsparseDestroy(handle);
hipsparseDestroyMatDescr(descr);
val.resize(0);
row_ptr.resize(0);
col_idx.resize(0);
// SyncMem::clear_cache();
int gpu_num;
hipError_t err = hipGetDeviceCount(&gpu_num);
std::atexit([](){
SyncMem::clear_cache();
});
int n_device = v_columns.size();
int ave_n_columns = n_column / n_device;
DO_ON_MULTI_DEVICES(n_device, [&](int device_id) {
SparseColumns &columns = *v_columns[device_id];
const int *csc_col_ptr_data = csc_col_ptr.host_data();
int first_col_id = device_id * ave_n_columns;
int n_column_sub = (device_id < n_device - 1) ? ave_n_columns : n_column - first_col_id;
int first_col_start = csc_col_ptr_data[first_col_id];
int nnz_sub = (device_id < n_device - 1) ?
(csc_col_ptr_data[(device_id + 1) * ave_n_columns] - first_col_start) : (nnz -
first_col_start);
columns.column_offset = first_col_id + this->column_offset;
columns.nnz = nnz_sub;
columns.n_column = n_column_sub;
columns.n_row = n_row;
columns.csc_val.resize(nnz_sub);
columns.csc_row_idx.resize(nnz_sub);
columns.csc_col_ptr.resize(n_column_sub + 1);
columns.csc_val.copy_from(csc_val.host_data() + first_col_start, nnz_sub);
columns.csc_row_idx.copy_from(csc_row_idx.host_data() + first_col_start, nnz_sub);
columns.csc_col_ptr.copy_from(csc_col_ptr.host_data() + first_col_id, n_column_sub + 1);
int *csc_col_ptr_2d_data = columns.csc_col_ptr.device_data();
correct_start(csc_col_ptr_2d_data, first_col_start, n_column_sub);
//correct segment start positions
LOG(TRACE) << "sorting feature values (multi-device)";
cub_seg_sort_by_key(columns.csc_val, columns.csc_row_idx, columns.csc_col_ptr, false);
});
auto t_end = timer.now();
std::chrono::duration<float> used_time = t_end - t_start;
LOG(INFO) << "Converting csr to csc using time: " << used_time.count() << " s";
}
void SparseColumns::csr2csc_cpu(const DataSet &dataset, vector<std::unique_ptr<SparseColumns>> &v_columns) {
LOG(INFO) << "convert csr to csc using cpu...";
this->column_offset = 0;
//cpu transpose
n_column = dataset.n_features();
n_row = dataset.n_instances();
nnz = dataset.csr_val.size();
float_type *csc_val_ptr = new float_type[nnz];
int *csc_row_ptr = new int[nnz];
int *csc_col_ptr = new int[n_column + 1];
LOG(INFO) << string_format("#non-zeros = %ld, density = %.2f%%", nnz,
(float) nnz / n_column / dataset.n_instances() * 100);
for (int i = 0; i <= n_column; ++i) {
csc_col_ptr[i] = 0;
}
#pragma omp parallel for // about 5s
for (int i = 0; i < nnz; ++i) {
int idx = dataset.csr_col_idx[i] + 1;
#pragma omp atomic
csc_col_ptr[idx] += 1;
}
for (int i = 1; i < n_column + 1; ++i){
csc_col_ptr[i] += csc_col_ptr[i - 1];
}
// TODO to parallelize here
for (int row = 0; row < dataset.n_instances(); ++row) {
for (int j = dataset.csr_row_ptr[row]; j < dataset.csr_row_ptr[row + 1]; ++j) {
int col = dataset.csr_col_idx[j]; // csr col
int dest = csc_col_ptr[col]; // destination index in csc array
csc_val_ptr[dest] = dataset.csr_val[j];
csc_row_ptr[dest] = row;
csc_col_ptr[col] += 1; //increment sscolumn start position
}
}
//recover column start position
for (int i = 0, last = 0; i < n_column; ++i) {
int next_last = csc_col_ptr[i];
csc_col_ptr[i] = last;
last = next_last;
}
// split data to multiple device
int n_device = v_columns.size();
int ave_n_columns = n_column / n_device;
DO_ON_MULTI_DEVICES(n_device, [&](int device_id){
SparseColumns &columns = *v_columns[device_id];
int first_col_id = device_id * ave_n_columns;
int n_column_sub = (device_id < n_device - 1) ? ave_n_columns : n_column - first_col_id;
int first_col_start = csc_col_ptr[first_col_id];
int nnz_sub = (device_id < n_device - 1) ?
(csc_col_ptr[(device_id + 1) * ave_n_columns] - first_col_start) : (nnz - first_col_start);
columns.column_offset = first_col_id + this->column_offset;
columns.nnz = nnz_sub;
columns.n_column = n_column_sub;
columns.n_row = n_row;
columns.csc_val.resize(nnz_sub);
columns.csc_row_idx.resize(nnz_sub);
columns.csc_col_ptr.resize(n_column_sub + 1);
columns.csc_val.copy_from(csc_val_ptr + first_col_start, nnz_sub);
columns.csc_row_idx.copy_from(csc_row_ptr + first_col_start, nnz_sub);
columns.csc_col_ptr.copy_from(csc_col_ptr + first_col_id, n_column_sub + 1);
int *csc_col_ptr_2d_data = columns.csc_col_ptr.host_data();
correct_start(csc_col_ptr_2d_data, first_col_start, n_column_sub);
seg_sort_by_key_cpu(columns.csc_val, columns.csc_row_idx, columns.csc_col_ptr);
});
delete[](csc_val_ptr);
delete[](csc_row_ptr);
delete[](csc_col_ptr);
}
void SparseColumns::csc_by_default(const DataSet &dataset, vector<std::unique_ptr<SparseColumns>> &v_columns) {
const float_type *csc_val_ptr = dataset.csc_val.data();
const int *csc_row_ptr = dataset.csc_row_idx.data();
const int *csc_col_ptr = dataset.csc_col_ptr.data();
n_column = dataset.n_features();
n_row = dataset.n_instances();
nnz = dataset.csc_val.size();
// split data to multiple device
int n_device = v_columns.size();
int ave_n_columns = n_column / n_device;
DO_ON_MULTI_DEVICES(n_device, [&](int device_id){
SparseColumns &columns = *v_columns[device_id];
int first_col_id = device_id * ave_n_columns;
int n_column_sub = (device_id < n_device - 1) ? ave_n_columns : n_column - first_col_id;
int first_col_start = csc_col_ptr[first_col_id];
int nnz_sub = (device_id < n_device - 1) ?
(csc_col_ptr[(device_id + 1) * ave_n_columns] - first_col_start) : (nnz - first_col_start);
columns.column_offset = first_col_id + this->column_offset;
columns.nnz = nnz_sub;
columns.n_column = n_column_sub;
columns.n_row = n_row;
columns.csc_val.resize(nnz_sub);
columns.csc_row_idx.resize(nnz_sub);
columns.csc_col_ptr.resize(n_column_sub + 1);
columns.csc_val.copy_from(csc_val_ptr + first_col_start, nnz_sub);
columns.csc_row_idx.copy_from(csc_row_ptr + first_col_start, nnz_sub);
columns.csc_col_ptr.copy_from(csc_col_ptr + first_col_id, n_column_sub + 1);
int *csc_col_ptr_2d_data = columns.csc_col_ptr.host_data();
correct_start(csc_col_ptr_2d_data, first_col_start, n_column_sub);
cub_seg_sort_by_key(columns.csc_val, columns.csc_row_idx, columns.csc_col_ptr, false);
});
} | 1f5134a57475a9726cf197df3e2d381b51af67bb.cu | //
// Created by shijiashuai on 5/7/18.
//
#include <thundergbm/util/cub_wrapper.h>
#include <thundergbm/sparse_columns.h>
#include "thundergbm/sparse_columns.h"
#include "thundergbm/util/device_lambda.cuh"
#include "cusparse.h"
#include "thundergbm/util/multi_device.h"
#include "omp.h"
//FIXME remove this function
void correct_start(int *csc_col_ptr_2d_data, int first_col_start, int n_column_sub){
device_loop(n_column_sub + 1, [=] __device__(int col_id) {
csc_col_ptr_2d_data[col_id] = csc_col_ptr_2d_data[col_id] - first_col_start;
});
};
void SparseColumns::csr2csc_gpu(const DataSet &dataset, vector<std::unique_ptr<SparseColumns>> &v_columns) {
LOG(INFO) << "convert csr to csc using gpu...";
std::chrono::high_resolution_clock timer;
auto t_start = timer.now();
//three arrays (on GPU/CPU) for csr representation
this->column_offset = 0;
SyncArray<float_type> val;
SyncArray<int> col_idx;
SyncArray<int> row_ptr;
val.resize(dataset.csr_val.size());
col_idx.resize(dataset.csr_col_idx.size());
row_ptr.resize(dataset.csr_row_ptr.size());
//copy data to the three arrays
val.copy_from(dataset.csr_val.data(), val.size());
col_idx.copy_from(dataset.csr_col_idx.data(), col_idx.size());
row_ptr.copy_from(dataset.csr_row_ptr.data(), row_ptr.size());
cusparseHandle_t handle;
cusparseMatDescr_t descr;
cusparseCreate(&handle);
cusparseCreateMatDescr(&descr);
cusparseSetMatIndexBase(descr, CUSPARSE_INDEX_BASE_ZERO);
cusparseSetMatType(descr, CUSPARSE_MATRIX_TYPE_GENERAL);
n_column = dataset.n_features_;
n_row = dataset.n_instances();
nnz = dataset.csr_val.size();
csc_val.resize(nnz);
csc_row_idx.resize(nnz);
csc_col_ptr.resize(n_column + 1);
cusparseScsr2csc(handle, dataset.n_instances(), n_column, nnz, val.device_data(), row_ptr.device_data(),
col_idx.device_data(), csc_val.device_data(), csc_row_idx.device_data(), csc_col_ptr.device_data(),
CUSPARSE_ACTION_NUMERIC, CUSPARSE_INDEX_BASE_ZERO);
cudaDeviceSynchronize();
cusparseDestroy(handle);
cusparseDestroyMatDescr(descr);
val.resize(0);
row_ptr.resize(0);
col_idx.resize(0);
// SyncMem::clear_cache();
int gpu_num;
cudaError_t err = cudaGetDeviceCount(&gpu_num);
std::atexit([](){
SyncMem::clear_cache();
});
int n_device = v_columns.size();
int ave_n_columns = n_column / n_device;
DO_ON_MULTI_DEVICES(n_device, [&](int device_id) {
SparseColumns &columns = *v_columns[device_id];
const int *csc_col_ptr_data = csc_col_ptr.host_data();
int first_col_id = device_id * ave_n_columns;
int n_column_sub = (device_id < n_device - 1) ? ave_n_columns : n_column - first_col_id;
int first_col_start = csc_col_ptr_data[first_col_id];
int nnz_sub = (device_id < n_device - 1) ?
(csc_col_ptr_data[(device_id + 1) * ave_n_columns] - first_col_start) : (nnz -
first_col_start);
columns.column_offset = first_col_id + this->column_offset;
columns.nnz = nnz_sub;
columns.n_column = n_column_sub;
columns.n_row = n_row;
columns.csc_val.resize(nnz_sub);
columns.csc_row_idx.resize(nnz_sub);
columns.csc_col_ptr.resize(n_column_sub + 1);
columns.csc_val.copy_from(csc_val.host_data() + first_col_start, nnz_sub);
columns.csc_row_idx.copy_from(csc_row_idx.host_data() + first_col_start, nnz_sub);
columns.csc_col_ptr.copy_from(csc_col_ptr.host_data() + first_col_id, n_column_sub + 1);
int *csc_col_ptr_2d_data = columns.csc_col_ptr.device_data();
correct_start(csc_col_ptr_2d_data, first_col_start, n_column_sub);
//correct segment start positions
LOG(TRACE) << "sorting feature values (multi-device)";
cub_seg_sort_by_key(columns.csc_val, columns.csc_row_idx, columns.csc_col_ptr, false);
});
auto t_end = timer.now();
std::chrono::duration<float> used_time = t_end - t_start;
LOG(INFO) << "Converting csr to csc using time: " << used_time.count() << " s";
}
void SparseColumns::csr2csc_cpu(const DataSet &dataset, vector<std::unique_ptr<SparseColumns>> &v_columns) {
LOG(INFO) << "convert csr to csc using cpu...";
this->column_offset = 0;
//cpu transpose
n_column = dataset.n_features();
n_row = dataset.n_instances();
nnz = dataset.csr_val.size();
float_type *csc_val_ptr = new float_type[nnz];
int *csc_row_ptr = new int[nnz];
int *csc_col_ptr = new int[n_column + 1];
LOG(INFO) << string_format("#non-zeros = %ld, density = %.2f%%", nnz,
(float) nnz / n_column / dataset.n_instances() * 100);
for (int i = 0; i <= n_column; ++i) {
csc_col_ptr[i] = 0;
}
#pragma omp parallel for // about 5s
for (int i = 0; i < nnz; ++i) {
int idx = dataset.csr_col_idx[i] + 1;
#pragma omp atomic
csc_col_ptr[idx] += 1;
}
for (int i = 1; i < n_column + 1; ++i){
csc_col_ptr[i] += csc_col_ptr[i - 1];
}
// TODO to parallelize here
for (int row = 0; row < dataset.n_instances(); ++row) {
for (int j = dataset.csr_row_ptr[row]; j < dataset.csr_row_ptr[row + 1]; ++j) {
int col = dataset.csr_col_idx[j]; // csr col
int dest = csc_col_ptr[col]; // destination index in csc array
csc_val_ptr[dest] = dataset.csr_val[j];
csc_row_ptr[dest] = row;
csc_col_ptr[col] += 1; //increment sscolumn start position
}
}
//recover column start position
for (int i = 0, last = 0; i < n_column; ++i) {
int next_last = csc_col_ptr[i];
csc_col_ptr[i] = last;
last = next_last;
}
// split data to multiple device
int n_device = v_columns.size();
int ave_n_columns = n_column / n_device;
DO_ON_MULTI_DEVICES(n_device, [&](int device_id){
SparseColumns &columns = *v_columns[device_id];
int first_col_id = device_id * ave_n_columns;
int n_column_sub = (device_id < n_device - 1) ? ave_n_columns : n_column - first_col_id;
int first_col_start = csc_col_ptr[first_col_id];
int nnz_sub = (device_id < n_device - 1) ?
(csc_col_ptr[(device_id + 1) * ave_n_columns] - first_col_start) : (nnz - first_col_start);
columns.column_offset = first_col_id + this->column_offset;
columns.nnz = nnz_sub;
columns.n_column = n_column_sub;
columns.n_row = n_row;
columns.csc_val.resize(nnz_sub);
columns.csc_row_idx.resize(nnz_sub);
columns.csc_col_ptr.resize(n_column_sub + 1);
columns.csc_val.copy_from(csc_val_ptr + first_col_start, nnz_sub);
columns.csc_row_idx.copy_from(csc_row_ptr + first_col_start, nnz_sub);
columns.csc_col_ptr.copy_from(csc_col_ptr + first_col_id, n_column_sub + 1);
int *csc_col_ptr_2d_data = columns.csc_col_ptr.host_data();
correct_start(csc_col_ptr_2d_data, first_col_start, n_column_sub);
seg_sort_by_key_cpu(columns.csc_val, columns.csc_row_idx, columns.csc_col_ptr);
});
delete[](csc_val_ptr);
delete[](csc_row_ptr);
delete[](csc_col_ptr);
}
void SparseColumns::csc_by_default(const DataSet &dataset, vector<std::unique_ptr<SparseColumns>> &v_columns) {
const float_type *csc_val_ptr = dataset.csc_val.data();
const int *csc_row_ptr = dataset.csc_row_idx.data();
const int *csc_col_ptr = dataset.csc_col_ptr.data();
n_column = dataset.n_features();
n_row = dataset.n_instances();
nnz = dataset.csc_val.size();
// split data to multiple device
int n_device = v_columns.size();
int ave_n_columns = n_column / n_device;
DO_ON_MULTI_DEVICES(n_device, [&](int device_id){
SparseColumns &columns = *v_columns[device_id];
int first_col_id = device_id * ave_n_columns;
int n_column_sub = (device_id < n_device - 1) ? ave_n_columns : n_column - first_col_id;
int first_col_start = csc_col_ptr[first_col_id];
int nnz_sub = (device_id < n_device - 1) ?
(csc_col_ptr[(device_id + 1) * ave_n_columns] - first_col_start) : (nnz - first_col_start);
columns.column_offset = first_col_id + this->column_offset;
columns.nnz = nnz_sub;
columns.n_column = n_column_sub;
columns.n_row = n_row;
columns.csc_val.resize(nnz_sub);
columns.csc_row_idx.resize(nnz_sub);
columns.csc_col_ptr.resize(n_column_sub + 1);
columns.csc_val.copy_from(csc_val_ptr + first_col_start, nnz_sub);
columns.csc_row_idx.copy_from(csc_row_ptr + first_col_start, nnz_sub);
columns.csc_col_ptr.copy_from(csc_col_ptr + first_col_id, n_column_sub + 1);
int *csc_col_ptr_2d_data = columns.csc_col_ptr.host_data();
correct_start(csc_col_ptr_2d_data, first_col_start, n_column_sub);
cub_seg_sort_by_key(columns.csc_val, columns.csc_row_idx, columns.csc_col_ptr, false);
});
} |
31016b0eb2e2b0e702523ab892bddb6687da83e0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cstdio>
#include <cstdlib>
#include "template.hu"
#define TILE_SZ_A 128
#define TILE_SZ_B 16
#define TILE_SZ_RATIO (TILE_SZ_A/TILE_SZ_B)
__global__ void mysgemm(int m, int n, int k, const float *A, const float *B, float* C) {
/********************************************************************
*
* Compute C = A x B
* where A is a (m x k) matrix
* where B is a (k x n) matrix
* where C is a (m x n) matrix
*
* Use register and shared memory tiling and thread coarsening
*
* NOTE: A and C are column major, B is row major
*
********************************************************************/
// Macros for accessing flattened matrices
#define A(row,col) A[(row) + (col)*m]
#define B(row,col) B[(row)*n + (col)]
#define C(row,col) C[(row) + (col)*m]
// INSERT KERNEL CODE HERE
}
void basicSgemm(char transa, char transb, int m, int n, int k, float alpha, const float *A, int lda, const float *B, int ldb, float beta, float *C, int ldc)
{
if ((transa != 'N') && (transa != 'n')) {
printf("unsupported value of 'transa'\n");
return;
}
if ((transb != 'T') && (transb != 't')) {
printf("unsupported value of 'transb'\n");
return;
}
if ((alpha - 1.0f > 1e-10) || (alpha - 1.0f < -1e-10)) {
printf("unsupported value of alpha\n");
return;
}
if ((beta - 0.0f > 1e-10) || (beta - 0.0f < -1e-10)) {
printf("unsupported value of beta\n");
return;
}
// Initialize thread block and kernel grid dimensions ---------------------
//INSERT CODE HERE
// Invoke CUDA kernel -----------------------------------------------------
//INSERT CODE HERE
}
| 31016b0eb2e2b0e702523ab892bddb6687da83e0.cu | #include <cstdio>
#include <cstdlib>
#include "template.hu"
#define TILE_SZ_A 128
#define TILE_SZ_B 16
#define TILE_SZ_RATIO (TILE_SZ_A/TILE_SZ_B)
__global__ void mysgemm(int m, int n, int k, const float *A, const float *B, float* C) {
/********************************************************************
*
* Compute C = A x B
* where A is a (m x k) matrix
* where B is a (k x n) matrix
* where C is a (m x n) matrix
*
* Use register and shared memory tiling and thread coarsening
*
* NOTE: A and C are column major, B is row major
*
********************************************************************/
// Macros for accessing flattened matrices
#define A(row,col) A[(row) + (col)*m]
#define B(row,col) B[(row)*n + (col)]
#define C(row,col) C[(row) + (col)*m]
// INSERT KERNEL CODE HERE
}
void basicSgemm(char transa, char transb, int m, int n, int k, float alpha, const float *A, int lda, const float *B, int ldb, float beta, float *C, int ldc)
{
if ((transa != 'N') && (transa != 'n')) {
printf("unsupported value of 'transa'\n");
return;
}
if ((transb != 'T') && (transb != 't')) {
printf("unsupported value of 'transb'\n");
return;
}
if ((alpha - 1.0f > 1e-10) || (alpha - 1.0f < -1e-10)) {
printf("unsupported value of alpha\n");
return;
}
if ((beta - 0.0f > 1e-10) || (beta - 0.0f < -1e-10)) {
printf("unsupported value of beta\n");
return;
}
// Initialize thread block and kernel grid dimensions ---------------------
//INSERT CODE HERE
// Invoke CUDA kernel -----------------------------------------------------
//INSERT CODE HERE
}
|
04bbc237d462b0f42f71f771c675a66d928e6689.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hip/hip_runtime_api.h>
#include <hip/hip_runtime.h>
#include <thrust\device_vector.h>
#include <thrust/extrema.h>
#include <thrust/reduce.h>
#include <thrust/sort.h>
#include "helper_cuda.h"
#include "helper_string.h"
#include "sph_kernel_impl.cuh"
#include "sph_solver.cuh"
void CopyParam2Device() {
hipMemcpyToSymbol(dParam, &hParam, sizeof(SimParam_SPH));
}
void CopyParamFromDevice() {
hipMemcpyFromSymbol(&hParam, dParam, sizeof(SimParam_SPH));
}
void calcHash(SimData_SPH data, int num_particles) {
getLastCudaError("Kernel execution failed:before calc hash");
uint num_blocks, num_threads;
computeGridSize(num_particles, 256, num_blocks, num_threads);
calcHashD << <num_blocks, num_threads >> > (data.particleHash,
data.particleIndex,
data.pos,
num_particles);
hipDeviceSynchronize();
getLastCudaError("Kernel execution failed: calc hash");
}
void sortParticle(SimData_SPH data, int pnum) {
thrust::sort_by_key(
thrust::device_ptr<int>(data.particleHash),
thrust::device_ptr<int>(data.particleHash + pnum),
thrust::device_ptr<int>(data.particleIndex)
);
}
void reorderDataAndFindCellStart(
SimData_SPH data,
int num_particles,
int numGridCells
) {
uint num_threads, num_blocks;
computeGridSize(num_particles, 256, num_blocks, num_threads);
hipMemset(data.gridCellStart, 0xffffffff, numGridCells * sizeof(uint));
//shared memory size
uint smemSize = sizeof(uint)*(num_threads + 1);
reorderDataAndFindCellStartD << < num_blocks, num_threads, smemSize >> >(
data,
num_particles);
hipDeviceSynchronize();
getLastCudaError("Kernel execution failed: reorder data");
}
void ComputePressure(SimData_SPH data, int num_particles) {
uint num_threads, num_blocks;
computeGridSize(num_particles, 256, num_blocks, num_threads);
hipLaunchKernelGGL(( ComputePressureKernel) , dim3(num_blocks), dim3(num_threads), 0, 0, data, num_particles);
hipDeviceSynchronize();
getLastCudaError("Kernel execution failed: compute pressure");
}
void computeForce(SimData_SPH data, int num_particles) {
uint num_threads, num_blocks;
computeGridSize(num_particles, 256, num_blocks, num_threads);
hipLaunchKernelGGL(( computeF) , dim3(num_blocks), dim3(num_threads), 0, 0, data, num_particles);
hipDeviceSynchronize();
getLastCudaError("Kernel execution failed: compute force");
}
void Advect(SimData_SPH data, int num_particles) {
uint num_threads, num_blocks;
computeGridSize(num_particles, 256, num_blocks, num_threads);
hipLaunchKernelGGL(( AdvectKernel) , dim3(num_blocks), dim3(num_threads), 0, 0, data, num_particles);
hipDeviceSynchronize();
getLastCudaError("Kernel execution failed: advection");
}
//==================================================
//
// DFSPH
//
//==================================================
void computeDensityAlpha(SimData_SPH data, int num_particles) {
uint num_threads, num_blocks;
computeGridSize(num_particles, 256, num_blocks, num_threads);
hipLaunchKernelGGL(( computeDensityAlpha_kernel) , dim3(num_blocks), dim3(num_threads), 0, 0, data, num_particles);
hipDeviceSynchronize();
getLastCudaError("Kernel execution failed: compute df alpha");
}
void computeNonPForce(SimData_SPH data, int num_particles) {
uint num_threads, num_blocks;
computeGridSize(num_particles, 256, num_blocks, num_threads);
hipLaunchKernelGGL(( computeNPF_kernel) , dim3(num_blocks), dim3(num_threads), 0, 0, data, num_particles);
hipDeviceSynchronize();
getLastCudaError("Kernel execution failed: compute non-pressure force");
}
void correctDensityError(SimData_SPH data,
int num_particles,
int maxiter,
float ethres,
bool bDebug)
{
uint num_threads, num_blocks;
computeGridSize(num_particles, 256, num_blocks, num_threads);
float error;
int iter = 0;
//jacobi iteration
float* debug = new float[num_particles];
/*
cfloat3* dbg3 = new cfloat3[num_particles];
hipMemcpy(dbg3, data.v_star, num_particles*sizeof(cfloat3), hipMemcpyDeviceToHost);
FILE* fdbg;
fdbg = fopen("vstar0.txt", "w+");
for (int i=0; i<num_particles; i++)
{
fprintf(fdbg, "%d %f %f %f\n", i, dbg3[i].x, dbg3[i].y, dbg3[i].z);
}
fclose(fdbg);
*/
while (true && iter<maxiter) {
hipLaunchKernelGGL(( solveDensityStiff) , dim3(num_blocks), dim3(num_threads), 0, 0, data, num_particles);
hipDeviceSynchronize();
getLastCudaError("Kernel execution failed: solve density stiff");
//get error
hipMemcpy(debug, data.error, num_particles*sizeof(float), hipMemcpyDeviceToHost);
error = -9999;
for (int i=0; i<num_particles; i++) {
error = debug[i]>error? debug[i]:error;
}
if(bDebug)
printf("%d error: %f\n", iter, error);
if (error<ethres)
break;
hipLaunchKernelGGL(( applyPStiff) , dim3(num_blocks), dim3(num_threads), 0, 0, data, num_particles);
hipDeviceSynchronize();
getLastCudaError("Kernel execution failed: apply density stiff");
iter++;
}
hipLaunchKernelGGL(( updatePosition) , dim3(num_blocks), dim3(num_threads), 0, 0, data, num_particles);
hipDeviceSynchronize();
getLastCudaError("Kernel execution failed: update position");
/*
//cfloat3* dbg3 = new cfloat3[num_particles];
hipMemcpy(dbg3, data.v_star, num_particles*sizeof(cfloat3), hipMemcpyDeviceToHost);
fdbg = fopen("vstar.txt","w+");
for (int i=0; i<num_particles; i++)
{
fprintf(fdbg, "%d %f %f %f\n",i, dbg3[i].x, dbg3[i].y, dbg3[i].z);
}
fclose(fdbg);
*/
}
void correctDivergenceError(SimData_SPH data,
int num_particles,
int maxiter,
float ethres,
bool bDebug)
{
uint num_threads, num_blocks;
computeGridSize(num_particles, 256, num_blocks, num_threads);
float error;
int iter = 0;
//jacobi iteration
float* debug = new float[num_particles];
//warm start
hipLaunchKernelGGL(( solveDivergenceStiff) , dim3(num_blocks), dim3(num_threads), 0, 0, data, num_particles);
while (true && iter<maxiter) {
hipLaunchKernelGGL(( solveDivergenceStiff) , dim3(num_blocks), dim3(num_threads), 0, 0, data, num_particles);
hipDeviceSynchronize();
getLastCudaError("Kernel execution failed: compute divergence stiff");
hipMemcpy(debug, data.error, num_particles*sizeof(float), hipMemcpyDeviceToHost);
error = 0;
for (int i=0; i<num_particles; i++)
error = debug[i]>error? debug[i]:error;
if (error<ethres)
break;
hipLaunchKernelGGL(( applyPStiff) , dim3(num_blocks), dim3(num_threads), 0, 0, data, num_particles);
hipDeviceSynchronize();
getLastCudaError("Kernel execution failed: apply divergence stiff");
iter++;
}
if (bDebug)
printf("%d error: %f\n", iter, error);
hipLaunchKernelGGL(( UpdateVelocities), dim3(num_blocks), dim3(num_threads), 0, 0, data,num_particles);
hipDeviceSynchronize();
getLastCudaError("Kernel execution failed: update velocities");
}
//==================================================
//
// Multiphase SPH
//
//==================================================
void DFSPHFactor_Multiphase(SimData_SPH data, int num_particles) {
uint num_threads, num_blocks;
computeGridSize(num_particles, 256, num_blocks, num_threads);
hipLaunchKernelGGL(( DFSPHFactorKernel_Multiphase) , dim3(num_blocks), dim3(num_threads), 0, 0, data, num_particles);
hipDeviceSynchronize();
getLastCudaError("Kernel failed: compute df alpha multiphase");
}
void EffectiveMass(SimData_SPH data, int num_particles) {
uint num_threads, num_blocks;
computeGridSize(num_particles, 256, num_blocks, num_threads);
hipLaunchKernelGGL(( EffectiveMassKernel) , dim3(num_blocks), dim3(num_threads), 0, 0,
data,
num_particles);
hipDeviceSynchronize();
getLastCudaError("Kernel failed: update mass factor");
}
void NonPressureForce_Multiphase(SimData_SPH data, int num_particles) {
uint num_threads, num_blocks;
computeGridSize(num_particles, 256, num_blocks, num_threads);
hipLaunchKernelGGL(( NonPressureForceKernel_Multiphase), dim3(num_blocks),dim3(num_threads), 0, 0,
data,
num_particles);
hipDeviceSynchronize();
getLastCudaError("Kernel failed: non-pressure force multiphase");
}
#include <thrust/device_vector.h>
#include <thrust/reduce.h>
void EnforceDensity_Multiphase(SimData_SPH data, int num_particles,
int maxiter,
float ethres_avg,
float ethres_max,
bool bDebug,
bool warm_start)
{
uint num_threads, num_blocks;
computeGridSize(num_particles, 256, num_blocks, num_threads);
float err_max;
int iter = 0;
float* debug = new float[num_particles];
if (warm_start)
{
hipLaunchKernelGGL(( EnforceDensityWarmStart) , dim3(num_blocks), dim3(num_threads), 0, 0, data, num_particles);
hipDeviceSynchronize();
getLastCudaError("Kernel execution failed: solve density stiff warm start");
}
hipMemset(data.rho_stiff, 0, sizeof(float)*num_particles);
float err_avg=0;
int num_p = hParam.num_deformable_p + hParam.num_fluid_p;
while (true && iter<maxiter)
{
hipLaunchKernelGGL(( DensityStiff_Multiphase) , dim3(num_blocks), dim3(num_threads), 0, 0, data, num_particles);
hipDeviceSynchronize();
getLastCudaError("Kernel execution failed: solve density stiff");
//get error
hipMemcpy(debug, data.error, num_particles*sizeof(float), hipMemcpyDeviceToHost);
err_max = 0;
err_avg = 0;
for (int i=0; i<num_particles; i++)
{
err_max = debug[i]>err_max ? debug[i] : err_max;
err_avg += debug[i];
}
err_avg /= num_p;
if (err_avg < ethres_avg && err_max < ethres_max) break;
hipLaunchKernelGGL(( ApplyPressureKernel_Multiphase) , dim3(num_blocks), dim3(num_threads), 0, 0, data, num_particles, data.rho_stiff );
hipDeviceSynchronize();
getLastCudaError("Kernel execution failed: apply density stiff");
iter++;
}
if (bDebug) printf("%d density error: %f %f\n", iter, err_max, err_avg);
delete debug;
hipLaunchKernelGGL(( updatePosition) , dim3(num_blocks), dim3(num_threads), 0, 0, data, num_particles);
hipDeviceSynchronize();
getLastCudaError("Kernel execution failed: update position");
}
void EnforceDivergenceFree_Multiphase(SimData_SPH data, int num_particles,
int maxiter,
float ethres,
bool bDebug,
bool warm_start)
{
uint num_threads, num_blocks;
computeGridSize(num_particles, 256, num_blocks, num_threads);
float err_max;
int iter = 0;
float* debug = new float[num_particles];
if (warm_start)
{
hipLaunchKernelGGL(( EnforceDivergenceWarmStart) , dim3(num_blocks), dim3(num_threads), 0, 0, data, num_particles);
hipDeviceSynchronize();
getLastCudaError("Kernel execution failed: solve density stiff warm start");
}
hipMemset(data.div_stiff, 0, sizeof(float)*num_particles);
while (true && iter<maxiter)
{
hipLaunchKernelGGL(( DivergenceFreeStiff_Multiphase) , dim3(num_blocks), dim3(num_threads), 0, 0, data, num_particles);
hipDeviceSynchronize();
getLastCudaError("Kernel execution failed: compute divergence stiff");
hipMemcpy(debug, data.error, num_particles*sizeof(float), hipMemcpyDeviceToHost);
err_max = 0;
for (int i=0; i<num_particles; i++)
err_max = debug[i]>err_max ? debug[i] : err_max;
if (err_max<ethres) break;
hipLaunchKernelGGL(( ApplyPressureKernel_Multiphase) , dim3(num_blocks), dim3(num_threads), 0, 0, data, num_particles, data.div_stiff );
hipDeviceSynchronize();
getLastCudaError("Kernel execution failed: apply divergence stiff");
iter++;
}
if (bDebug) printf("%d divergence-free error: %f\n", iter, err_max);
delete debug;
hipLaunchKernelGGL(( UpdateVelocities), dim3(num_blocks), dim3(num_threads), 0, 0, data, num_particles);
hipDeviceSynchronize();
getLastCudaError("Kernel execution failed: update velocities");
}
void DriftVelocity(SimData_SPH data, int num_particles) {
uint num_threads, num_blocks;
computeGridSize(num_particles, 256, num_blocks, num_threads);
hipLaunchKernelGGL(( DriftVelocityKernel), dim3(num_blocks), dim3(num_threads), 0, 0, data, num_particles);
hipDeviceSynchronize();
getLastCudaError("Kernel execution failed: drift velocity.");
}
void PhaseDiffusion(SimData_SPH data, int num_particles) {
uint num_threads, num_blocks;
computeGridSize(num_particles, 256, num_blocks, num_threads);
hipLaunchKernelGGL(( PredictPhaseDiffusionKernel) , dim3(num_blocks), dim3(num_threads), 0, 0, data, num_particles);
hipDeviceSynchronize();
getLastCudaError("Kernel execution failed: predict phase diffusion.");
hipLaunchKernelGGL(( PhaseDiffusionKernel), dim3(num_blocks), dim3(num_threads), 0, 0, data, num_particles);
hipDeviceSynchronize();
getLastCudaError("Kernel execution failed: phase diffusion.");
hipLaunchKernelGGL(( UpdateVolumeFraction), dim3(num_blocks), dim3(num_threads), 0, 0, data, num_particles);
hipDeviceSynchronize();
getLastCudaError("Kernel execution failed: update volume fraction.");
/*float* dbg_pt = new float[num_particles*hParam.maxtypenum];
hipMemcpy(dbg_pt, data.vFrac, num_particles*hParam.maxtypenum*sizeof(float),
hipMemcpyDeviceToHost);
float verify=0;
for(int i=0; i<num_particles; i++)
verify += dbg_pt[i*hParam.maxtypenum];
printf("total volume fraction phase 0: %f\n", verify);
delete dbg_pt;*/
}
void PhaseDiffusion(SimData_SPH data, int num_particles, float* dbg, int frameNo) {
uint num_threads, num_blocks;
computeGridSize(num_particles, 256, num_blocks, num_threads);
hipLaunchKernelGGL(( PredictPhaseDiffusionKernel) , dim3(num_blocks), dim3(num_threads), 0, 0, data, num_particles);
hipDeviceSynchronize();
getLastCudaError("Kernel execution failed: predict phase diffusion.");
hipLaunchKernelGGL(( PhaseDiffusionKernel), dim3(num_blocks), dim3(num_threads), 0, 0, data, num_particles);
hipDeviceSynchronize();
getLastCudaError("Kernel execution failed: phase diffusion.");
hipLaunchKernelGGL(( UpdateVolumeFraction), dim3(num_blocks), dim3(num_threads), 0, 0, data, num_particles);
hipDeviceSynchronize();
getLastCudaError("Kernel execution failed: update volume fraction.");
if (frameNo%10==0) {
float* dbg_pt = new float[num_particles*hParam.maxtypenum];
hipMemcpy(dbg_pt, data.vFrac, num_particles*hParam.maxtypenum*sizeof(float),
hipMemcpyDeviceToHost);
float verify[10]; for (int k=0; k<10; k++) verify[k]=0;
for (int i=0; i<num_particles; i++) {
for (int k=0; k<hParam.maxtypenum; k++)
verify[k] += dbg_pt[i*hParam.maxtypenum+k];
}
printf("%d %f %f %f\n", frameNo, verify[0], verify[1], verify[2]);
delete dbg_pt;
}
}
void HeatConduction(SimData_SPH data, int num_particles)
{
uint num_threads, num_blocks;
computeGridSize(num_particles, 256, num_blocks, num_threads);
hipLaunchKernelGGL(( HeatConductionKernel) , dim3(num_blocks), dim3(num_threads), 0, 0, data, num_particles);
hipDeviceSynchronize();
getLastCudaError("Kernel execution failed: rigid particle volume");
}
void RigidParticleVolume(SimData_SPH data, int num_particles) {
uint num_threads, num_blocks;
computeGridSize(num_particles, 256, num_blocks, num_threads);
hipLaunchKernelGGL(( RigidParticleVolumeKernel) , dim3(num_blocks), dim3(num_threads), 0, 0, data, num_particles);
hipDeviceSynchronize();
getLastCudaError("Kernel execution failed: rigid particle volume");
}
void MoveConstraintBoxAway(SimData_SPH data, int num_particles) {
uint num_threads, num_blocks;
computeGridSize(num_particles, 256, num_blocks, num_threads);
hipLaunchKernelGGL(( MoveConstraintBoxKernel) , dim3(num_blocks), dim3(num_threads), 0, 0, data, num_particles);
hipDeviceSynchronize();
getLastCudaError("Kernel execution failed: move constraint box");
}
void DetectDispersedParticles(SimData_SPH data, int num_particles)
{
uint num_threads, num_blocks;
computeGridSize(num_particles, 256, num_blocks, num_threads);
hipLaunchKernelGGL(( DetectDispersedParticlesKernel) , dim3(num_blocks), dim3(num_threads), 0, 0, data, num_particles);
hipDeviceSynchronize();
getLastCudaError("Kernel execution failed: detect dispersed particles");
}
void ComputeTension(SimData_SPH data, int num_particles) {
uint num_threads, num_blocks;
computeGridSize(num_particles, 256, num_blocks, num_threads);
hipLaunchKernelGGL(( ComputeTensionWithP_Kernel) , dim3(num_blocks), dim3(num_threads), 0, 0, data, num_particles);
hipDeviceSynchronize();
//HourglassControl_Kernel << <num_blocks, num_threads >> >(data, num_particles);
//hipDeviceSynchronize();
getLastCudaError("Kernel execution failed: detect dispersed particles");
}
void UpdateSolidState(
SimData_SPH data,
int num_particles,
int projection_type
)
{
uint num_threads, num_blocks;
computeGridSize(num_particles, 256, num_blocks, num_threads);
hipLaunchKernelGGL(( UpdateSolidStateF_Kernel) , dim3(num_blocks), dim3(num_threads), 0, 0,
data,
num_particles,
projection_type);
hipDeviceSynchronize();
getLastCudaError("Kernel failed: update solid state");
}
void UpdateSolidTopology(
SimData_SPH data,
int num_particles
)
{
uint num_threads, num_blocks;
computeGridSize(num_particles, 256, num_blocks, num_threads);
hipMemset(data.trim_tag, 0, hParam.num_deformable_p*NUM_NEIGHBOR*sizeof(int));
hipLaunchKernelGGL(( SpatialColorFieldKernel), dim3(num_blocks), dim3(num_threads), 0, 0,
data,
num_particles);
hipDeviceSynchronize();
getLastCudaError("Kernel failed: spatial color field");
hipLaunchKernelGGL(( Trim0) , dim3(num_blocks), dim3(num_threads), 0, 0,
data,
num_particles);
hipDeviceSynchronize();
getLastCudaError("Kernel failed: trim0");
hipLaunchKernelGGL(( Trim1) , dim3(num_blocks), dim3(num_threads), 0, 0,
data,
num_particles);
hipDeviceSynchronize();
getLastCudaError("Kernel failed: trim1");
}
void InitializeDeformable(SimData_SPH data, int num_particles) {
uint num_threads, num_blocks;
computeGridSize(num_particles, 256, num_blocks, num_threads);
hipLaunchKernelGGL(( InitializeDeformable_Kernel) , dim3(num_blocks), dim3(num_threads), 0, 0, data, num_particles);
hipDeviceSynchronize();
getLastCudaError("Kernel failed: initialize deformables");
}
void AdvectScriptObject(SimData_SPH data,
int num_particles,
cfloat3 vel)
{
uint num_threads, num_blocks;
computeGridSize(num_particles, 256, num_blocks, num_threads);
hipLaunchKernelGGL(( AdvectScriptObjectKernel) , dim3(num_blocks), dim3(num_threads), 0, 0,
data,
num_particles,
vel);
hipDeviceSynchronize();
getLastCudaError("Kernel failed: initialize deformables");
}
/*
Compare with Ren's method.
*/
void ComputeForceMultiphase(SimData_SPH data, int num_p)
{
uint num_threads, num_blocks;
computeGridSize(num_p, 256, num_blocks, num_threads);
hipLaunchKernelGGL(( ComputeForceMultiphase_Kernel) , dim3(num_blocks), dim3(num_threads), 0, 0, data, num_p);
hipDeviceSynchronize();
getLastCudaError("Kernel failed: compute force multiphase");
}
void DriftVel_Ren(SimData_SPH data, int num_p)
{
uint num_threads, num_blocks;
computeGridSize(num_p, 256, num_blocks, num_threads);
hipLaunchKernelGGL(( DriftVelRenKernel) , dim3(num_blocks), dim3(num_threads), 0, 0, data, num_p);
hipDeviceSynchronize();
getLastCudaError("Kernel failed: drift vel Ren");
}
void PhaseDiffusion_Ren(SimData_SPH data, int num_p)
{
uint num_threads, num_blocks;
computeGridSize(num_p, 256, num_blocks, num_threads);
hipLaunchKernelGGL(( PhaseDiffusionRenKernel) , dim3(num_blocks), dim3(num_threads), 0, 0, data, num_p);
hipDeviceSynchronize();
getLastCudaError("Kernel failed: phase diffusion Ren");
hipLaunchKernelGGL(( UpdateVolumeFraction) , dim3(num_blocks), dim3(num_threads), 0, 0, data, num_p);
hipDeviceSynchronize();
getLastCudaError("Kernel failed: update volume fraction");
float* dbg_pt = new float[num_p*hParam.maxtypenum];
hipMemcpy(dbg_pt, data.vFrac, num_p*hParam.maxtypenum*sizeof(float),
hipMemcpyDeviceToHost);
float verify=0;
for(int i=0; i<num_p; i++)
verify += dbg_pt[i*hParam.maxtypenum];
printf("total volume fraction phase 0: %f\n", verify);
delete dbg_pt;
}
//About SVD
HDFUNC void cswap(float& x, float& y)
{
float temp = x;
x = y;
y = temp;
}
/**
Copyright (c) 2016 Theodore Gast, Chuyuan Fu, Chenfanfu Jiang, Joseph Teran
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do
so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
If the code is used in an article, the following paper shall be cited:
@techreport{qrsvd:2016,
title={Implicit-shifted Symmetric QR Singular Value Decomposition of 3x3 Matrices},
author={Gast, Theodore and Fu, Chuyuan and Jiang, Chenfanfu and Teran, Joseph},
year={2016},
institution={University of California Los Angeles}
}
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
################################################################################
This file implements 2D and 3D polar decompositions and SVDs.
float may be float or double.
2D Polar:
Eigen::Matrix<float, 2, 2> A,R,S;
A<<1,2,3,4;
JIXIE::polarDecomposition(A, R, S);
// R will be the closest rotation to A
// S will be symmetric
2D SVD:
Eigen::Matrix<float, 2, 2> A;
A<<1,2,3,4;
Eigen::Matrix<float, 2, 1> S;
Eigen::Matrix<float, 2, 2> U;
Eigen::Matrix<float, 2, 2> V;
JIXIE::singularValueDecomposition(A,U,S,V);
// A = U S V'
// U and V will be rotations
// S will be singular values sorted by decreasing magnitude. Only the last one may be negative.
3D Polar:
Eigen::Matrix<float, 3, 3> A,R,S;
A<<1,2,3,4,5,6;
JIXIE::polarDecomposition(A, R, S);
// R will be the closest rotation to A
// S will be symmetric
3D SVD:
Eigen::Matrix<float, 3, 3> A;
A<<1,2,3,4,5,6;
Eigen::Matrix<float, 3, 1> S;
Eigen::Matrix<float, 3, 3> U;
Eigen::Matrix<float, 3, 3> V;
JIXIE::singularValueDecomposition(A,U,S,V);
// A = U S V'
// U and V will be rotations
// S will be singular values sorted by decreasing magnitude. Only the last one may be negative.
################################################################################
*/
/**
SVD based on implicit QR with Wilkinson Shift
*/
/**
Class for givens rotation.
Row rotation G*A corresponds to something like
c -s 0
( s c 0 ) A
0 0 1
Column rotation A G' corresponds to something like
c -s 0
A ( s c 0 )
0 0 1
c and s are always computed so that
( c -s ) ( a ) = ( * )
s c b ( 0 )
Assume rowi<rowk.
*/
HDFUNC inline void zeroChase(cmat3& H, cmat3& U, cmat3& V)
{
GivensRotation r1(H[0][0], H[1][0], 0, 1);
GivensRotation r2(1, 2);
if (!ZERO(H[1][0]))
r2.compute(H[0][0] * H[0][1] + H[1][0] * H[1][1], H[0][0] * H[0][2] + H[1][0] * H[1][2]);
else
r2.compute(H[0][1], H[0][2]);
r1.rowRotation3(H);
/* GivensRotation<float> r2(H(0, 1), H(0, 2), 1, 2); */
r2.columnRotation3(H);
r2.columnRotation3(V);
/**
Reduce H to of form
x x 0
0 x x
0 0 x
*/
GivensRotation r3(H[1][1], H[2][1], 1, 2);
r3.rowRotation3(H);
// Save this till end for better cache coherency
// r1.rowRotation(u_transpose);
// r3.rowRotation(u_transpose);
r1.columnRotation3(U);
r3.columnRotation3(U);
}
/**
\brief make a 3X3 matrix to upper bidiagonal form
original form of H: x x x
x x x
x x x
after zero chase:
x x 0
0 x x
0 0 x
*/
HDFUNC inline void makeUpperBidiag(cmat3& H, cmat3& U, cmat3& V)
{
U.Identity();
V.Identity();
/**
Reduce H to of form
x x x
x x x
0 x x
*/
GivensRotation r(H[1][0], H[2][0], 1, 2);
r.rowRotation3(H);
// r.rowRotation(u_transpose);
r.columnRotation3(U);
// zeroChase(H, u_transpose, V);
zeroChase(H, U, V);
}
/**
\brief make a 3X3 matrix to lambda shape
original form of H: x x x
* x x x
* x x x
after :
* x 0 0
* x x 0
* x 0 x
*/
HDFUNC inline void makeLambdaShape(cmat3& H, cmat3& U, cmat3& V)
{
U.Identity();
V.Identity();
/**
Reduce H to of form
* x x 0
* x x x
* x x x
*/
GivensRotation r1(H[0][1], H[0][2], 1, 2);
r1.columnRotation3(H);
r1.columnRotation3(V);
/**
Reduce H to of form
* x x 0
* x x 0
* x x x
*/
r1.computeUnconventional(H[1][2], H[2][2]);
r1.rowRotation3(H);
r1.columnRotation3(U);
/**
Reduce H to of form
* x x 0
* x x 0
* x 0 x
*/
GivensRotation r2(H[2][0], H[2][1], 0, 1);
r2.columnRotation3(H);
r2.columnRotation3(V);
/**
Reduce H to of form
* x 0 0
* x x 0
* x 0 x
*/
r2.computeUnconventional(H[0][1], H[1][1]);
r2.rowRotation3(H);
r2.columnRotation3(U);
}
/**
\brief 2x2 polar decomposition.
\param[in] A matrix.
\param[out] R Robustly a rotation matrix in givens form
\param[out] S_Sym Symmetric. Whole matrix is stored
Whole matrix S is stored since its faster to calculate due to simd vectorization
Polar guarantees negative sign is on the small magnitude singular value.
S is guaranteed to be the closest one to identity.
R is guaranteed to be the closest rotation to A.
*/
HDFUNC inline void polarDecomposition2(cmat2& A, GivensRotation& R, cmat2& S)
{
float x0 = A[0][0] + A[1][1];
float x1 = A[1][0] - A[0][1];
float denominator = sqrt(x0 * x0 + x1 * x1);
R.c = (float)1;
R.s = (float)0;
if (denominator != 0) {
R.c = x0 / denominator;
R.s = -x1 / denominator;
}
S = A;
R.rowRotation2(S);
}
/**
\brief 2x2 polar decomposition.
\param[in] A matrix.
\param[out] R Robustly a rotation matrix.
\param[out] S_Sym Symmetric. Whole matrix is stored
Whole matrix S is stored since its faster to calculate due to simd vectorization
Polar guarantees negative sign is on the small magnitude singular value.
S is guaranteed to be the closest one to identity.
R is guaranteed to be the closest rotation to A.
*/
HDFUNC inline void polarDecomposition2(cmat2& A, cmat2& R, cmat2& S)
{
GivensRotation r(0, 1);
polarDecomposition2(A, r, S);
r.fill2(R);
}
/**
\brief 2x2 SVD (singular value decomposition) A=USV'
\param[in] A Input matrix.
\param[out] U Robustly a rotation matrix in Givens form
\param[out] Sigma Vector of singular values sorted with decreasing magnitude. The second one can be negative.
\param[out] V Robustly a rotation matrix in Givens form
*/
HDFUNC inline void singularValueDecomposition2(cmat2& A, GivensRotation& U, cfloat2& Sigma, GivensRotation& V, const float tol = 64 * 1e-10)
{
cmat2 S;
polarDecomposition2(A, U, S);
float cosine, sine;
float x = S[0][0];
float y = S[0][1];
float z = S[1][1];
if (ZERO(y))
{
cosine = 1;
sine = 0;
Sigma.x = x;
Sigma.y = z;
}
else
{
float tau = 0.5 * (x - z);
float w = sqrt(tau * tau + y * y);
// w > y > 0
float t;
if (tau > 0) {
// tau + w > w > y > 0 ==> division is safe
t = y / (tau + w);
}
else {
// tau - w < -w < -y < 0 ==> division is safe
t = y / (tau - w);
}
cosine = float(1) / sqrt(t * t + float(1));
sine = -t * cosine;
/*
V = [cosine -sine; sine cosine]
Sigma = V'SV. Only compute the diagonals for efficiency.
Also utilize symmetry of S and don't form V yet.
*/
float c2 = cosine * cosine;
float csy = 2 * cosine * sine * y;
float s2 = sine * sine;
Sigma.x = c2 * x - csy + s2 * z;
Sigma.y = s2 * x + csy + c2 * z;
}
// Sorting
// Polar already guarantees negative sign is on the small magnitude singular value.
if (Sigma.x < Sigma.y) {
cswap(Sigma.x, Sigma.y);
V.c = -sine;
V.s = cosine;
}
else {
V.c = cosine;
V.s = sine;
}
U *= V;
}
/**
\brief 2x2 SVD (singular value decomposition) A=USV'
\param[in] A Input matrix.
\param[out] U Robustly a rotation matrix.
\param[out] Sigma Vector of singular values sorted with decreasing magnitude. The second one can be negative.
\param[out] V Robustly a rotation matrix.
*/
HDFUNC inline void singularValueDecomposition2(cmat2& A, cmat2& U, cfloat2& Sigma, cmat2& V, const float tol = 64 * 1e-10)
{
GivensRotation gv(0, 1);
GivensRotation gu(0, 1);
singularValueDecomposition2(A, gu, Sigma, gv, tol);
gu.fill2(U);
gv.fill2(V);
}
/**
\brief compute wilkinsonShift of the block
a1 b1
b1 a2
based on the wilkinsonShift formula
mu = c + d - sign (d) \ sqrt (d*d + b*b), where d = (a-c)/2
*/
HDFUNC float wilkinsonShift(const float a1, const float b1, const float a2)
{
float d = (float)0.5 * (a1 - a2);
float bs = b1 * b1;
float mu = a2 - copysign(bs / (fabs(d) + sqrt(d * d + bs)), d);
// float mu = a2 - bs / ( d + sign_d*sqrt (d*d + bs));
return mu;
}
/**
\brief Helper function of 3X3 SVD for processing 2X2 SVD
*/
HDFUNC inline void process(int t, cmat3& B, cmat3& U, cfloat3& sigma, cmat3& V)
{
int other = (t == 1) ? 0 : 2;
GivensRotation u(0, 1);
GivensRotation v(0, 1);
sigma[other] = B[other][other];
cfloat2 sigma2 = other == 0 ? cfloat2(sigma[1], sigma[2]) : cfloat2(sigma[0], sigma[1]);
cmat2 b2 = other == 0 ? cmat2(B[1][1], B[1][2], B[2][1], B[2][2]) : cmat2(B[0][0], B[0][1], B[1][0], B[1][1]);
singularValueDecomposition2(b2, u, sigma2, v);
if (other == 0)
{
B[1][1] = b2[0][0];
B[1][2] = b2[0][1];
B[2][1] = b2[1][0];
B[2][2] = b2[1][1];
sigma[1] = sigma2.x;
sigma[2] = sigma2.y;
}
else
{
B[0][0] = b2[0][0];
B[0][1] = b2[0][1];
B[1][0] = b2[1][0];
B[1][1] = b2[1][1];
sigma[0] = sigma2.x;
sigma[1] = sigma2.y;
}
u.rowi += t;
u.rowk += t;
v.rowi += t;
v.rowk += t;
u.columnRotation3(U);
v.columnRotation3(V);
}
/**
\brief Helper function of 3X3 SVD for flipping signs due to flipping signs of sigma
*/
HDFUNC inline void flipSign(int i, cmat3& U, cfloat3& sigma)
{
sigma[i] = -sigma[i];
U[0][i] = -U[0][i];
U[1][i] = -U[1][i];
U[2][i] = -U[2][i];
}
HDFUNC inline void colswap(cmat3& A, int c1, int c2)
{
cswap(A[0][c1], A[0][c2]);
cswap(A[1][c1], A[1][c2]);
cswap(A[2][c1], A[2][c2]);
}
/**
\brief Helper function of 3X3 SVD for sorting singular values
*/
HDFUNC inline void sort0(cmat3& U, cfloat3& sigma, cmat3& V)
{
// Case: sigma(0) > |sigma(1)| >= |sigma(2)|
if (fabs(sigma[1]) >= fabs(sigma[2])) {
if (sigma[1] < 0) {
flipSign(1, U, sigma);
flipSign(2, U, sigma);
}
return;
}
//fix sign of sigma for both cases
if (sigma[2] < 0) {
flipSign(1, U, sigma);
flipSign(2, U, sigma);
}
//swap sigma(1) and sigma(2) for both cases
cswap(sigma[1], sigma[2]);
colswap(U, 1, 2);
colswap(V, 1, 2);
// Case: |sigma(2)| >= sigma(0) > |simga(1)|
if (sigma[1] > sigma[0]) {
cswap(sigma[0], sigma[1]);
colswap(U, 0, 1);
colswap(V, 0, 1);
}
// Case: sigma(0) >= |sigma(2)| > |simga(1)|
else {
U[0][2] = -U[0][2];
U[1][2] = -U[1][2];
U[2][2] = -U[2][2];
V[0][2] = -V[0][2];
V[1][2] = -V[1][2];
V[2][2] = -V[2][2];
}
}
/**
\brief Helper function of 3X3 SVD for sorting singular values
*/
HDFUNC inline void sort1(cmat3& U, cfloat3& sigma, cmat3& V)
{
// Case: |sigma(0)| >= sigma(1) > |sigma(2)|
if (fabs(sigma[0]) >= sigma[1]) {
if (sigma[0] < 0) {
flipSign(0, U, sigma);
flipSign(2, U, sigma);
}
return;
}
//swap sigma(0) and sigma(1) for both cases
cswap(sigma[0], sigma[1]);
colswap(U, 0, 1);
colswap(V, 0, 1);
// Case: sigma(1) > |sigma(2)| >= |sigma(0)|
if (fabs(sigma[1]) < fabs(sigma[2])) {
cswap(sigma[1], sigma[2]);
colswap(U, 1, 2);
colswap(V, 1, 2);
}
// Case: sigma(1) >= |sigma(0)| > |sigma(2)|
else {
U[0][1] = -U[0][1];
U[1][1] = -U[1][1];
U[2][1] = -U[2][1];
V[0][1] = -V[0][1];
V[1][1] = -V[1][1];
V[2][1] = -V[2][1];
}
// fix sign for both cases
if (sigma[1] < 0) {
flipSign(1, U, sigma);
flipSign(2, U, sigma);
}
}
HDFUNC float mycmax(float a, float b)
{
return a > b ? a : b;
}
/**
\brief 3X3 SVD (singular value decomposition) A=USV'
\param[in] A Input matrix.
\param[out] U is a rotation matrix.
\param[out] sigma Diagonal matrix, sorted with decreasing magnitude. The third one can be negative.
\param[out] V is a rotation matrix.
*/
HDFUNC int singularValueDecomposition(cmat3& A, cmat3& U, cfloat3& sigma, cmat3& V, float tol = 128 * 1e-8)
{
cmat3 B = A;
U.Identity();
V.Identity();
makeUpperBidiag(B, U, V);
int count = 0;
float mu = 0.0f;
GivensRotation r(0, 1);
float alpha_1 = B[0][0];
float beta_1 = B[0][1];
float alpha_2 = B[1][1];
float alpha_3 = B[2][2];
float beta_2 = B[1][2];
float gamma_1 = alpha_1 * beta_1;
float gamma_2 = alpha_2 * beta_2;
tol *= mycmax(0.5 * sqrt(alpha_1 * alpha_1 + alpha_2 * alpha_2 + alpha_3 * alpha_3 + beta_1 * beta_1 + beta_2 * beta_2), 1);
/**
Do implicit shift QR until A^float A is block diagonal
*/
while (fabsf(beta_2) > tol && fabsf(beta_1) > tol && fabsf(alpha_1) > tol && fabsf(alpha_2) > tol && fabsf(alpha_3) > tol) {
mu = wilkinsonShift(alpha_2 * alpha_2 + beta_1 * beta_1, gamma_2, alpha_3 * alpha_3 + beta_2 * beta_2);
r.compute(alpha_1 * alpha_1 - mu, gamma_1);
r.columnRotation3(B);
r.columnRotation3(V);
zeroChase(B, U, V);
alpha_1 = B[0][0];
beta_1 = B[0][1];
alpha_2 = B[1][1];
alpha_3 = B[2][2];
beta_2 = B[1][2];
gamma_1 = alpha_1 * beta_1;
gamma_2 = alpha_2 * beta_2;
count++;
}
/**
Handle the cases of one of the alphas and betas being 0
Sorted by ease of handling and then frequency
of occurrence
If B is of form
x x 0
0 x 0
0 0 x
*/
if (fabs(beta_2) <= tol) {
process(0, B, U, sigma, V);
sort0(U, sigma, V);
}
/**
If B is of form
x 0 0
0 x x
0 0 x
*/
else if (fabs(beta_1) <= tol) {
process(1, B, U, sigma, V);
sort1(U, sigma, V);
}
/**
If B is of form
x x 0
0 0 x
0 0 x
*/
else if (fabs(alpha_2) <= tol) {
/**
Reduce B to
x x 0
0 0 0
0 0 x
*/
GivensRotation r1(1, 2);
r1.computeUnconventional(B[1][2], B[2][2]);
r1.rowRotation3(B);
r1.columnRotation3(U);
process(0, B, U, sigma, V);
sort0(U, sigma, V);
}
/**
If B is of form
x x 0
0 x x
0 0 0
*/
else if (fabs(alpha_3) <= tol) {
/**
Reduce B to
x x +
0 x 0
0 0 0
*/
GivensRotation r1(1, 2);
r1.compute(B[1][1], B[1][2]);
r1.columnRotation3(B);
r1.columnRotation3(V);
/**
Reduce B to
x x 0
+ x 0
0 0 0
*/
GivensRotation r2(0, 2);
r2.compute(B[0][0], B[0][2]);
r2.columnRotation3(B);
r2.columnRotation3(V);
process(0, B, U, sigma, V);
sort0(U, sigma, V);
}
/**
If B is of form
0 x 0
0 x x
0 0 x
*/
else if (fabs(alpha_1) <= tol) {
/**
Reduce B to
0 0 +
0 x x
0 0 x
*/
GivensRotation r1(0, 1);
r1.computeUnconventional(B[0][1], B[1][1]);
r1.rowRotation3(B);
r1.columnRotation3(U);
/**
Reduce B to
0 0 0
0 x x
0 + x
*/
GivensRotation r2(0, 2);
r2.computeUnconventional(B[0][2], B[2][2]);
r2.rowRotation3(B);
r2.columnRotation3(U);
process(1, B, U, sigma, V);
sort1(U, sigma, V);
}
return count;
}
HDFUNC cmat3 MooreInv(cmat3 A)
{
cmat3 U, V;
cfloat3 sigma;
singularValueDecomposition(A, U, sigma, V);
cmat3 S;
S[0][0] = sigma.x;
S[1][1] = sigma.y;
S[2][2] = sigma.z;
cmat3 s = S.Reci();
cmat3 UT;
mat3transpose(U, UT);
cmat3 mid;
mat3prod(V, s, mid);
cmat3 ret;
mat3prod(mid, UT, ret);
return ret;
}
//IISPH
void IISPHFactor(SimData_SPH data, int num_particles) {
uint num_threads, num_blocks;
computeGridSize(num_particles, 256, num_blocks, num_threads);
IISPHFactorKernel << < num_blocks, num_threads >> > (data, num_particles);
hipDeviceSynchronize();
getLastCudaError("Kernel failed: compute df alpha multiphase");
}
void IISPHPredictDensity(SimData_SPH data, int num_particles) {
uint num_threads, num_blocks;
computeGridSize(num_particles, 256, num_blocks, num_threads);
IISPHPredictDensityKernel << < num_blocks, num_threads >> > (data, num_particles);
hipDeviceSynchronize();
getLastCudaError("Kernel failed: compute df alpha multiphase");
}
void IISPHSolvePressure(SimData_SPH data, int num_particles) {
uint num_threads, num_blocks;
computeGridSize(num_particles, 256, num_blocks, num_threads);
printf("%d\n", num_particles);
float err_sum = 0.0f;
for (int iter = 0; iter < 10; iter++) //temp 10
{
CalcDIJPJLKernel << < num_blocks, num_threads >> > (data, num_particles);
hipDeviceSynchronize();
getLastCudaError("Kernel failed: CalcDIJPJLKernel");
CalcNewPressureKernel << < num_blocks, num_threads >> > (data, num_particles);
hipDeviceSynchronize();
getLastCudaError("Kernel failed: CalcNewPressureKernel");
float* err_host = new float[num_particles];
hipMemcpy(err_host, data.error, sizeof(float) * num_particles, hipMemcpyDeviceToHost);
float last_err_sum = err_sum;
err_sum = 0.0f;
for (int i = 0; i < num_particles; i++)
err_sum += err_host[i];
delete[] err_host;
err_sum /= num_particles;
//printf("%d %f\n", iter, err_sum);
if (abs(err_sum) < 1e-6 || abs(err_sum - last_err_sum) < 1e-6)
break;
}
}
void IISPHUpdate(SimData_SPH data, int num_particles) {
uint num_threads, num_blocks;
computeGridSize(num_particles, 256, num_blocks, num_threads);
CalcPressureForceKernel << < num_blocks, num_threads >> > (data, num_particles);
hipDeviceSynchronize();
IISPHUpdateKernel << < num_blocks, num_threads >> > (data, num_particles);
hipDeviceSynchronize();
getLastCudaError("Kernel failed: compute df alpha multiphase");
} | 04bbc237d462b0f42f71f771c675a66d928e6689.cu | #include <cuda.h>
#include <cuda_runtime_api.h>
#include <cuda_runtime.h>
#include <thrust\device_vector.h>
#include <thrust/extrema.h>
#include <thrust/reduce.h>
#include <thrust/sort.h>
#include "helper_cuda.h"
#include "helper_string.h"
#include "sph_kernel_impl.cuh"
#include "sph_solver.cuh"
void CopyParam2Device() {
cudaMemcpyToSymbol(dParam, &hParam, sizeof(SimParam_SPH));
}
void CopyParamFromDevice() {
cudaMemcpyFromSymbol(&hParam, dParam, sizeof(SimParam_SPH));
}
void calcHash(SimData_SPH data, int num_particles) {
getLastCudaError("Kernel execution failed:before calc hash");
uint num_blocks, num_threads;
computeGridSize(num_particles, 256, num_blocks, num_threads);
calcHashD << <num_blocks, num_threads >> > (data.particleHash,
data.particleIndex,
data.pos,
num_particles);
cudaThreadSynchronize();
getLastCudaError("Kernel execution failed: calc hash");
}
void sortParticle(SimData_SPH data, int pnum) {
thrust::sort_by_key(
thrust::device_ptr<int>(data.particleHash),
thrust::device_ptr<int>(data.particleHash + pnum),
thrust::device_ptr<int>(data.particleIndex)
);
}
void reorderDataAndFindCellStart(
SimData_SPH data,
int num_particles,
int numGridCells
) {
uint num_threads, num_blocks;
computeGridSize(num_particles, 256, num_blocks, num_threads);
cudaMemset(data.gridCellStart, 0xffffffff, numGridCells * sizeof(uint));
//shared memory size
uint smemSize = sizeof(uint)*(num_threads + 1);
reorderDataAndFindCellStartD << < num_blocks, num_threads, smemSize >> >(
data,
num_particles);
cudaThreadSynchronize();
getLastCudaError("Kernel execution failed: reorder data");
}
void ComputePressure(SimData_SPH data, int num_particles) {
uint num_threads, num_blocks;
computeGridSize(num_particles, 256, num_blocks, num_threads);
ComputePressureKernel <<< num_blocks, num_threads>>>(data, num_particles);
cudaThreadSynchronize();
getLastCudaError("Kernel execution failed: compute pressure");
}
void computeForce(SimData_SPH data, int num_particles) {
uint num_threads, num_blocks;
computeGridSize(num_particles, 256, num_blocks, num_threads);
computeF <<< num_blocks, num_threads>>>(data, num_particles);
cudaThreadSynchronize();
getLastCudaError("Kernel execution failed: compute force");
}
void Advect(SimData_SPH data, int num_particles) {
uint num_threads, num_blocks;
computeGridSize(num_particles, 256, num_blocks, num_threads);
AdvectKernel <<< num_blocks, num_threads>>> (data, num_particles);
cudaThreadSynchronize();
getLastCudaError("Kernel execution failed: advection");
}
//==================================================
//
// DFSPH
//
//==================================================
void computeDensityAlpha(SimData_SPH data, int num_particles) {
uint num_threads, num_blocks;
computeGridSize(num_particles, 256, num_blocks, num_threads);
computeDensityAlpha_kernel <<< num_blocks, num_threads>>> (data, num_particles);
cudaThreadSynchronize();
getLastCudaError("Kernel execution failed: compute df alpha");
}
void computeNonPForce(SimData_SPH data, int num_particles) {
uint num_threads, num_blocks;
computeGridSize(num_particles, 256, num_blocks, num_threads);
computeNPF_kernel <<< num_blocks, num_threads>>> (data, num_particles);
cudaThreadSynchronize();
getLastCudaError("Kernel execution failed: compute non-pressure force");
}
void correctDensityError(SimData_SPH data,
int num_particles,
int maxiter,
float ethres,
bool bDebug)
{
uint num_threads, num_blocks;
computeGridSize(num_particles, 256, num_blocks, num_threads);
float error;
int iter = 0;
//jacobi iteration
float* debug = new float[num_particles];
/*
cfloat3* dbg3 = new cfloat3[num_particles];
cudaMemcpy(dbg3, data.v_star, num_particles*sizeof(cfloat3), cudaMemcpyDeviceToHost);
FILE* fdbg;
fdbg = fopen("vstar0.txt", "w+");
for (int i=0; i<num_particles; i++)
{
fprintf(fdbg, "%d %f %f %f\n", i, dbg3[i].x, dbg3[i].y, dbg3[i].z);
}
fclose(fdbg);
*/
while (true && iter<maxiter) {
solveDensityStiff <<< num_blocks, num_threads>>> (data, num_particles);
cudaThreadSynchronize();
getLastCudaError("Kernel execution failed: solve density stiff");
//get error
cudaMemcpy(debug, data.error, num_particles*sizeof(float), cudaMemcpyDeviceToHost);
error = -9999;
for (int i=0; i<num_particles; i++) {
error = debug[i]>error? debug[i]:error;
}
if(bDebug)
printf("%d error: %f\n", iter, error);
if (error<ethres)
break;
applyPStiff <<<num_blocks, num_threads>>>(data, num_particles);
cudaThreadSynchronize();
getLastCudaError("Kernel execution failed: apply density stiff");
iter++;
}
updatePosition <<<num_blocks, num_threads>>>(data, num_particles);
cudaThreadSynchronize();
getLastCudaError("Kernel execution failed: update position");
/*
//cfloat3* dbg3 = new cfloat3[num_particles];
cudaMemcpy(dbg3, data.v_star, num_particles*sizeof(cfloat3), cudaMemcpyDeviceToHost);
fdbg = fopen("vstar.txt","w+");
for (int i=0; i<num_particles; i++)
{
fprintf(fdbg, "%d %f %f %f\n",i, dbg3[i].x, dbg3[i].y, dbg3[i].z);
}
fclose(fdbg);
*/
}
void correctDivergenceError(SimData_SPH data,
int num_particles,
int maxiter,
float ethres,
bool bDebug)
{
uint num_threads, num_blocks;
computeGridSize(num_particles, 256, num_blocks, num_threads);
float error;
int iter = 0;
//jacobi iteration
float* debug = new float[num_particles];
//warm start
solveDivergenceStiff <<< num_blocks, num_threads>>> (data, num_particles);
while (true && iter<maxiter) {
solveDivergenceStiff <<< num_blocks, num_threads>>> (data, num_particles);
cudaThreadSynchronize();
getLastCudaError("Kernel execution failed: compute divergence stiff");
cudaMemcpy(debug, data.error, num_particles*sizeof(float), cudaMemcpyDeviceToHost);
error = 0;
for (int i=0; i<num_particles; i++)
error = debug[i]>error? debug[i]:error;
if (error<ethres)
break;
applyPStiff <<<num_blocks, num_threads>>>(data, num_particles);
cudaThreadSynchronize();
getLastCudaError("Kernel execution failed: apply divergence stiff");
iter++;
}
if (bDebug)
printf("%d error: %f\n", iter, error);
UpdateVelocities<<<num_blocks, num_threads>>>(data,num_particles);
cudaThreadSynchronize();
getLastCudaError("Kernel execution failed: update velocities");
}
//==================================================
//
// Multiphase SPH
//
//==================================================
void DFSPHFactor_Multiphase(SimData_SPH data, int num_particles) {
uint num_threads, num_blocks;
computeGridSize(num_particles, 256, num_blocks, num_threads);
DFSPHFactorKernel_Multiphase <<< num_blocks, num_threads>>> (data, num_particles);
cudaThreadSynchronize();
getLastCudaError("Kernel failed: compute df alpha multiphase");
}
void EffectiveMass(SimData_SPH data, int num_particles) {
uint num_threads, num_blocks;
computeGridSize(num_particles, 256, num_blocks, num_threads);
EffectiveMassKernel <<< num_blocks, num_threads>>> (
data,
num_particles);
cudaThreadSynchronize();
getLastCudaError("Kernel failed: update mass factor");
}
void NonPressureForce_Multiphase(SimData_SPH data, int num_particles) {
uint num_threads, num_blocks;
computeGridSize(num_particles, 256, num_blocks, num_threads);
NonPressureForceKernel_Multiphase<<<num_blocks,num_threads>>>(
data,
num_particles);
cudaThreadSynchronize();
getLastCudaError("Kernel failed: non-pressure force multiphase");
}
#include <thrust/device_vector.h>
#include <thrust/reduce.h>
void EnforceDensity_Multiphase(SimData_SPH data, int num_particles,
int maxiter,
float ethres_avg,
float ethres_max,
bool bDebug,
bool warm_start)
{
uint num_threads, num_blocks;
computeGridSize(num_particles, 256, num_blocks, num_threads);
float err_max;
int iter = 0;
float* debug = new float[num_particles];
if (warm_start)
{
EnforceDensityWarmStart <<< num_blocks, num_threads>>>(data, num_particles);
cudaThreadSynchronize();
getLastCudaError("Kernel execution failed: solve density stiff warm start");
}
cudaMemset(data.rho_stiff, 0, sizeof(float)*num_particles);
float err_avg=0;
int num_p = hParam.num_deformable_p + hParam.num_fluid_p;
while (true && iter<maxiter)
{
DensityStiff_Multiphase <<< num_blocks, num_threads>>> (data, num_particles);
cudaThreadSynchronize();
getLastCudaError("Kernel execution failed: solve density stiff");
//get error
cudaMemcpy(debug, data.error, num_particles*sizeof(float), cudaMemcpyDeviceToHost);
err_max = 0;
err_avg = 0;
for (int i=0; i<num_particles; i++)
{
err_max = debug[i]>err_max ? debug[i] : err_max;
err_avg += debug[i];
}
err_avg /= num_p;
if (err_avg < ethres_avg && err_max < ethres_max) break;
ApplyPressureKernel_Multiphase <<<num_blocks, num_threads>>> ( data, num_particles, data.rho_stiff );
cudaThreadSynchronize();
getLastCudaError("Kernel execution failed: apply density stiff");
iter++;
}
if (bDebug) printf("%d density error: %f %f\n", iter, err_max, err_avg);
delete debug;
updatePosition <<<num_blocks, num_threads>>>(data, num_particles);
cudaThreadSynchronize();
getLastCudaError("Kernel execution failed: update position");
}
void EnforceDivergenceFree_Multiphase(SimData_SPH data, int num_particles,
int maxiter,
float ethres,
bool bDebug,
bool warm_start)
{
uint num_threads, num_blocks;
computeGridSize(num_particles, 256, num_blocks, num_threads);
float err_max;
int iter = 0;
float* debug = new float[num_particles];
if (warm_start)
{
EnforceDivergenceWarmStart <<< num_blocks, num_threads>>>(data, num_particles);
cudaThreadSynchronize();
getLastCudaError("Kernel execution failed: solve density stiff warm start");
}
cudaMemset(data.div_stiff, 0, sizeof(float)*num_particles);
while (true && iter<maxiter)
{
DivergenceFreeStiff_Multiphase <<< num_blocks, num_threads>>> (data, num_particles);
cudaThreadSynchronize();
getLastCudaError("Kernel execution failed: compute divergence stiff");
cudaMemcpy(debug, data.error, num_particles*sizeof(float), cudaMemcpyDeviceToHost);
err_max = 0;
for (int i=0; i<num_particles; i++)
err_max = debug[i]>err_max ? debug[i] : err_max;
if (err_max<ethres) break;
ApplyPressureKernel_Multiphase <<<num_blocks, num_threads>>>( data, num_particles, data.div_stiff );
cudaThreadSynchronize();
getLastCudaError("Kernel execution failed: apply divergence stiff");
iter++;
}
if (bDebug) printf("%d divergence-free error: %f\n", iter, err_max);
delete debug;
UpdateVelocities<<<num_blocks, num_threads>>>(data, num_particles);
cudaThreadSynchronize();
getLastCudaError("Kernel execution failed: update velocities");
}
void DriftVelocity(SimData_SPH data, int num_particles) {
uint num_threads, num_blocks;
computeGridSize(num_particles, 256, num_blocks, num_threads);
DriftVelocityKernel<<<num_blocks, num_threads>>>(data, num_particles);
cudaThreadSynchronize();
getLastCudaError("Kernel execution failed: drift velocity.");
}
void PhaseDiffusion(SimData_SPH data, int num_particles) {
uint num_threads, num_blocks;
computeGridSize(num_particles, 256, num_blocks, num_threads);
PredictPhaseDiffusionKernel <<<num_blocks, num_threads>>>(data, num_particles);
cudaThreadSynchronize();
getLastCudaError("Kernel execution failed: predict phase diffusion.");
PhaseDiffusionKernel<<<num_blocks, num_threads>>>(data, num_particles);
cudaThreadSynchronize();
getLastCudaError("Kernel execution failed: phase diffusion.");
UpdateVolumeFraction<<<num_blocks, num_threads>>>(data, num_particles);
cudaThreadSynchronize();
getLastCudaError("Kernel execution failed: update volume fraction.");
/*float* dbg_pt = new float[num_particles*hParam.maxtypenum];
cudaMemcpy(dbg_pt, data.vFrac, num_particles*hParam.maxtypenum*sizeof(float),
cudaMemcpyDeviceToHost);
float verify=0;
for(int i=0; i<num_particles; i++)
verify += dbg_pt[i*hParam.maxtypenum];
printf("total volume fraction phase 0: %f\n", verify);
delete dbg_pt;*/
}
void PhaseDiffusion(SimData_SPH data, int num_particles, float* dbg, int frameNo) {
uint num_threads, num_blocks;
computeGridSize(num_particles, 256, num_blocks, num_threads);
PredictPhaseDiffusionKernel <<<num_blocks, num_threads>>>(data, num_particles);
cudaThreadSynchronize();
getLastCudaError("Kernel execution failed: predict phase diffusion.");
PhaseDiffusionKernel<<<num_blocks, num_threads>>>(data, num_particles);
cudaThreadSynchronize();
getLastCudaError("Kernel execution failed: phase diffusion.");
UpdateVolumeFraction<<<num_blocks, num_threads>>>(data, num_particles);
cudaThreadSynchronize();
getLastCudaError("Kernel execution failed: update volume fraction.");
if (frameNo%10==0) {
float* dbg_pt = new float[num_particles*hParam.maxtypenum];
cudaMemcpy(dbg_pt, data.vFrac, num_particles*hParam.maxtypenum*sizeof(float),
cudaMemcpyDeviceToHost);
float verify[10]; for (int k=0; k<10; k++) verify[k]=0;
for (int i=0; i<num_particles; i++) {
for (int k=0; k<hParam.maxtypenum; k++)
verify[k] += dbg_pt[i*hParam.maxtypenum+k];
}
printf("%d %f %f %f\n", frameNo, verify[0], verify[1], verify[2]);
delete dbg_pt;
}
}
void HeatConduction(SimData_SPH data, int num_particles)
{
uint num_threads, num_blocks;
computeGridSize(num_particles, 256, num_blocks, num_threads);
HeatConductionKernel <<<num_blocks, num_threads>>>(data, num_particles);
cudaThreadSynchronize();
getLastCudaError("Kernel execution failed: rigid particle volume");
}
void RigidParticleVolume(SimData_SPH data, int num_particles) {
uint num_threads, num_blocks;
computeGridSize(num_particles, 256, num_blocks, num_threads);
RigidParticleVolumeKernel <<<num_blocks, num_threads>>>(data, num_particles);
cudaThreadSynchronize();
getLastCudaError("Kernel execution failed: rigid particle volume");
}
void MoveConstraintBoxAway(SimData_SPH data, int num_particles) {
uint num_threads, num_blocks;
computeGridSize(num_particles, 256, num_blocks, num_threads);
MoveConstraintBoxKernel <<<num_blocks, num_threads>>>(data, num_particles);
cudaThreadSynchronize();
getLastCudaError("Kernel execution failed: move constraint box");
}
void DetectDispersedParticles(SimData_SPH data, int num_particles)
{
uint num_threads, num_blocks;
computeGridSize(num_particles, 256, num_blocks, num_threads);
DetectDispersedParticlesKernel <<<num_blocks, num_threads>>>(data, num_particles);
cudaThreadSynchronize();
getLastCudaError("Kernel execution failed: detect dispersed particles");
}
void ComputeTension(SimData_SPH data, int num_particles) {
uint num_threads, num_blocks;
computeGridSize(num_particles, 256, num_blocks, num_threads);
ComputeTensionWithP_Kernel <<<num_blocks, num_threads>>>(data, num_particles);
cudaThreadSynchronize();
//HourglassControl_Kernel << <num_blocks, num_threads >> >(data, num_particles);
//cudaThreadSynchronize();
getLastCudaError("Kernel execution failed: detect dispersed particles");
}
void UpdateSolidState(
SimData_SPH data,
int num_particles,
int projection_type
)
{
uint num_threads, num_blocks;
computeGridSize(num_particles, 256, num_blocks, num_threads);
UpdateSolidStateF_Kernel <<<num_blocks, num_threads>>>(
data,
num_particles,
projection_type);
cudaThreadSynchronize();
getLastCudaError("Kernel failed: update solid state");
}
void UpdateSolidTopology(
SimData_SPH data,
int num_particles
)
{
uint num_threads, num_blocks;
computeGridSize(num_particles, 256, num_blocks, num_threads);
cudaMemset(data.trim_tag, 0, hParam.num_deformable_p*NUM_NEIGHBOR*sizeof(int));
SpatialColorFieldKernel<<<num_blocks, num_threads>>>(
data,
num_particles);
cudaThreadSynchronize();
getLastCudaError("Kernel failed: spatial color field");
Trim0 <<<num_blocks, num_threads>>>(
data,
num_particles);
cudaThreadSynchronize();
getLastCudaError("Kernel failed: trim0");
Trim1 <<<num_blocks, num_threads>>>(
data,
num_particles);
cudaThreadSynchronize();
getLastCudaError("Kernel failed: trim1");
}
void InitializeDeformable(SimData_SPH data, int num_particles) {
uint num_threads, num_blocks;
computeGridSize(num_particles, 256, num_blocks, num_threads);
InitializeDeformable_Kernel <<<num_blocks, num_threads>>>(data, num_particles);
cudaThreadSynchronize();
getLastCudaError("Kernel failed: initialize deformables");
}
void AdvectScriptObject(SimData_SPH data,
int num_particles,
cfloat3 vel)
{
uint num_threads, num_blocks;
computeGridSize(num_particles, 256, num_blocks, num_threads);
AdvectScriptObjectKernel <<<num_blocks, num_threads>>>(
data,
num_particles,
vel);
cudaThreadSynchronize();
getLastCudaError("Kernel failed: initialize deformables");
}
/*
Compare with Ren's method.
*/
void ComputeForceMultiphase(SimData_SPH data, int num_p)
{
uint num_threads, num_blocks;
computeGridSize(num_p, 256, num_blocks, num_threads);
ComputeForceMultiphase_Kernel <<<num_blocks, num_threads>>>(data, num_p);
cudaThreadSynchronize();
getLastCudaError("Kernel failed: compute force multiphase");
}
void DriftVel_Ren(SimData_SPH data, int num_p)
{
uint num_threads, num_blocks;
computeGridSize(num_p, 256, num_blocks, num_threads);
DriftVelRenKernel <<<num_blocks, num_threads>>>(data, num_p);
cudaThreadSynchronize();
getLastCudaError("Kernel failed: drift vel Ren");
}
void PhaseDiffusion_Ren(SimData_SPH data, int num_p)
{
uint num_threads, num_blocks;
computeGridSize(num_p, 256, num_blocks, num_threads);
PhaseDiffusionRenKernel <<<num_blocks, num_threads>>>(data, num_p);
cudaThreadSynchronize();
getLastCudaError("Kernel failed: phase diffusion Ren");
UpdateVolumeFraction <<<num_blocks, num_threads>>>(data, num_p);
cudaThreadSynchronize();
getLastCudaError("Kernel failed: update volume fraction");
float* dbg_pt = new float[num_p*hParam.maxtypenum];
cudaMemcpy(dbg_pt, data.vFrac, num_p*hParam.maxtypenum*sizeof(float),
cudaMemcpyDeviceToHost);
float verify=0;
for(int i=0; i<num_p; i++)
verify += dbg_pt[i*hParam.maxtypenum];
printf("total volume fraction phase 0: %f\n", verify);
delete dbg_pt;
}
//About SVD
HDFUNC void cswap(float& x, float& y)
{
float temp = x;
x = y;
y = temp;
}
/**
Copyright (c) 2016 Theodore Gast, Chuyuan Fu, Chenfanfu Jiang, Joseph Teran
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do
so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
If the code is used in an article, the following paper shall be cited:
@techreport{qrsvd:2016,
title={Implicit-shifted Symmetric QR Singular Value Decomposition of 3x3 Matrices},
author={Gast, Theodore and Fu, Chuyuan and Jiang, Chenfanfu and Teran, Joseph},
year={2016},
institution={University of California Los Angeles}
}
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
################################################################################
This file implements 2D and 3D polar decompositions and SVDs.
float may be float or double.
2D Polar:
Eigen::Matrix<float, 2, 2> A,R,S;
A<<1,2,3,4;
JIXIE::polarDecomposition(A, R, S);
// R will be the closest rotation to A
// S will be symmetric
2D SVD:
Eigen::Matrix<float, 2, 2> A;
A<<1,2,3,4;
Eigen::Matrix<float, 2, 1> S;
Eigen::Matrix<float, 2, 2> U;
Eigen::Matrix<float, 2, 2> V;
JIXIE::singularValueDecomposition(A,U,S,V);
// A = U S V'
// U and V will be rotations
// S will be singular values sorted by decreasing magnitude. Only the last one may be negative.
3D Polar:
Eigen::Matrix<float, 3, 3> A,R,S;
A<<1,2,3,4,5,6;
JIXIE::polarDecomposition(A, R, S);
// R will be the closest rotation to A
// S will be symmetric
3D SVD:
Eigen::Matrix<float, 3, 3> A;
A<<1,2,3,4,5,6;
Eigen::Matrix<float, 3, 1> S;
Eigen::Matrix<float, 3, 3> U;
Eigen::Matrix<float, 3, 3> V;
JIXIE::singularValueDecomposition(A,U,S,V);
// A = U S V'
// U and V will be rotations
// S will be singular values sorted by decreasing magnitude. Only the last one may be negative.
################################################################################
*/
/**
SVD based on implicit QR with Wilkinson Shift
*/
/**
Class for givens rotation.
Row rotation G*A corresponds to something like
c -s 0
( s c 0 ) A
0 0 1
Column rotation A G' corresponds to something like
c -s 0
A ( s c 0 )
0 0 1
c and s are always computed so that
( c -s ) ( a ) = ( * )
s c b ( 0 )
Assume rowi<rowk.
*/
HDFUNC inline void zeroChase(cmat3& H, cmat3& U, cmat3& V)
{
GivensRotation r1(H[0][0], H[1][0], 0, 1);
GivensRotation r2(1, 2);
if (!ZERO(H[1][0]))
r2.compute(H[0][0] * H[0][1] + H[1][0] * H[1][1], H[0][0] * H[0][2] + H[1][0] * H[1][2]);
else
r2.compute(H[0][1], H[0][2]);
r1.rowRotation3(H);
/* GivensRotation<float> r2(H(0, 1), H(0, 2), 1, 2); */
r2.columnRotation3(H);
r2.columnRotation3(V);
/**
Reduce H to of form
x x 0
0 x x
0 0 x
*/
GivensRotation r3(H[1][1], H[2][1], 1, 2);
r3.rowRotation3(H);
// Save this till end for better cache coherency
// r1.rowRotation(u_transpose);
// r3.rowRotation(u_transpose);
r1.columnRotation3(U);
r3.columnRotation3(U);
}
/**
\brief make a 3X3 matrix to upper bidiagonal form
original form of H: x x x
x x x
x x x
after zero chase:
x x 0
0 x x
0 0 x
*/
HDFUNC inline void makeUpperBidiag(cmat3& H, cmat3& U, cmat3& V)
{
U.Identity();
V.Identity();
/**
Reduce H to of form
x x x
x x x
0 x x
*/
GivensRotation r(H[1][0], H[2][0], 1, 2);
r.rowRotation3(H);
// r.rowRotation(u_transpose);
r.columnRotation3(U);
// zeroChase(H, u_transpose, V);
zeroChase(H, U, V);
}
/**
\brief make a 3X3 matrix to lambda shape
original form of H: x x x
* x x x
* x x x
after :
* x 0 0
* x x 0
* x 0 x
*/
HDFUNC inline void makeLambdaShape(cmat3& H, cmat3& U, cmat3& V)
{
U.Identity();
V.Identity();
/**
Reduce H to of form
* x x 0
* x x x
* x x x
*/
GivensRotation r1(H[0][1], H[0][2], 1, 2);
r1.columnRotation3(H);
r1.columnRotation3(V);
/**
Reduce H to of form
* x x 0
* x x 0
* x x x
*/
r1.computeUnconventional(H[1][2], H[2][2]);
r1.rowRotation3(H);
r1.columnRotation3(U);
/**
Reduce H to of form
* x x 0
* x x 0
* x 0 x
*/
GivensRotation r2(H[2][0], H[2][1], 0, 1);
r2.columnRotation3(H);
r2.columnRotation3(V);
/**
Reduce H to of form
* x 0 0
* x x 0
* x 0 x
*/
r2.computeUnconventional(H[0][1], H[1][1]);
r2.rowRotation3(H);
r2.columnRotation3(U);
}
/**
\brief 2x2 polar decomposition.
\param[in] A matrix.
\param[out] R Robustly a rotation matrix in givens form
\param[out] S_Sym Symmetric. Whole matrix is stored
Whole matrix S is stored since its faster to calculate due to simd vectorization
Polar guarantees negative sign is on the small magnitude singular value.
S is guaranteed to be the closest one to identity.
R is guaranteed to be the closest rotation to A.
*/
HDFUNC inline void polarDecomposition2(cmat2& A, GivensRotation& R, cmat2& S)
{
float x0 = A[0][0] + A[1][1];
float x1 = A[1][0] - A[0][1];
float denominator = sqrt(x0 * x0 + x1 * x1);
R.c = (float)1;
R.s = (float)0;
if (denominator != 0) {
R.c = x0 / denominator;
R.s = -x1 / denominator;
}
S = A;
R.rowRotation2(S);
}
/**
\brief 2x2 polar decomposition.
\param[in] A matrix.
\param[out] R Robustly a rotation matrix.
\param[out] S_Sym Symmetric. Whole matrix is stored
Whole matrix S is stored since its faster to calculate due to simd vectorization
Polar guarantees negative sign is on the small magnitude singular value.
S is guaranteed to be the closest one to identity.
R is guaranteed to be the closest rotation to A.
*/
HDFUNC inline void polarDecomposition2(cmat2& A, cmat2& R, cmat2& S)
{
GivensRotation r(0, 1);
polarDecomposition2(A, r, S);
r.fill2(R);
}
/**
\brief 2x2 SVD (singular value decomposition) A=USV'
\param[in] A Input matrix.
\param[out] U Robustly a rotation matrix in Givens form
\param[out] Sigma Vector of singular values sorted with decreasing magnitude. The second one can be negative.
\param[out] V Robustly a rotation matrix in Givens form
*/
HDFUNC inline void singularValueDecomposition2(cmat2& A, GivensRotation& U, cfloat2& Sigma, GivensRotation& V, const float tol = 64 * 1e-10)
{
cmat2 S;
polarDecomposition2(A, U, S);
float cosine, sine;
float x = S[0][0];
float y = S[0][1];
float z = S[1][1];
if (ZERO(y))
{
cosine = 1;
sine = 0;
Sigma.x = x;
Sigma.y = z;
}
else
{
float tau = 0.5 * (x - z);
float w = sqrt(tau * tau + y * y);
// w > y > 0
float t;
if (tau > 0) {
// tau + w > w > y > 0 ==> division is safe
t = y / (tau + w);
}
else {
// tau - w < -w < -y < 0 ==> division is safe
t = y / (tau - w);
}
cosine = float(1) / sqrt(t * t + float(1));
sine = -t * cosine;
/*
V = [cosine -sine; sine cosine]
Sigma = V'SV. Only compute the diagonals for efficiency.
Also utilize symmetry of S and don't form V yet.
*/
float c2 = cosine * cosine;
float csy = 2 * cosine * sine * y;
float s2 = sine * sine;
Sigma.x = c2 * x - csy + s2 * z;
Sigma.y = s2 * x + csy + c2 * z;
}
// Sorting
// Polar already guarantees negative sign is on the small magnitude singular value.
if (Sigma.x < Sigma.y) {
cswap(Sigma.x, Sigma.y);
V.c = -sine;
V.s = cosine;
}
else {
V.c = cosine;
V.s = sine;
}
U *= V;
}
/**
\brief 2x2 SVD (singular value decomposition) A=USV'
\param[in] A Input matrix.
\param[out] U Robustly a rotation matrix.
\param[out] Sigma Vector of singular values sorted with decreasing magnitude. The second one can be negative.
\param[out] V Robustly a rotation matrix.
*/
HDFUNC inline void singularValueDecomposition2(cmat2& A, cmat2& U, cfloat2& Sigma, cmat2& V, const float tol = 64 * 1e-10)
{
GivensRotation gv(0, 1);
GivensRotation gu(0, 1);
singularValueDecomposition2(A, gu, Sigma, gv, tol);
gu.fill2(U);
gv.fill2(V);
}
/**
\brief compute wilkinsonShift of the block
a1 b1
b1 a2
based on the wilkinsonShift formula
mu = c + d - sign (d) \ sqrt (d*d + b*b), where d = (a-c)/2
*/
HDFUNC float wilkinsonShift(const float a1, const float b1, const float a2)
{
float d = (float)0.5 * (a1 - a2);
float bs = b1 * b1;
float mu = a2 - copysign(bs / (fabs(d) + sqrt(d * d + bs)), d);
// float mu = a2 - bs / ( d + sign_d*sqrt (d*d + bs));
return mu;
}
/**
\brief Helper function of 3X3 SVD for processing 2X2 SVD
*/
HDFUNC inline void process(int t, cmat3& B, cmat3& U, cfloat3& sigma, cmat3& V)
{
int other = (t == 1) ? 0 : 2;
GivensRotation u(0, 1);
GivensRotation v(0, 1);
sigma[other] = B[other][other];
cfloat2 sigma2 = other == 0 ? cfloat2(sigma[1], sigma[2]) : cfloat2(sigma[0], sigma[1]);
cmat2 b2 = other == 0 ? cmat2(B[1][1], B[1][2], B[2][1], B[2][2]) : cmat2(B[0][0], B[0][1], B[1][0], B[1][1]);
singularValueDecomposition2(b2, u, sigma2, v);
if (other == 0)
{
B[1][1] = b2[0][0];
B[1][2] = b2[0][1];
B[2][1] = b2[1][0];
B[2][2] = b2[1][1];
sigma[1] = sigma2.x;
sigma[2] = sigma2.y;
}
else
{
B[0][0] = b2[0][0];
B[0][1] = b2[0][1];
B[1][0] = b2[1][0];
B[1][1] = b2[1][1];
sigma[0] = sigma2.x;
sigma[1] = sigma2.y;
}
u.rowi += t;
u.rowk += t;
v.rowi += t;
v.rowk += t;
u.columnRotation3(U);
v.columnRotation3(V);
}
/**
\brief Helper function of 3X3 SVD for flipping signs due to flipping signs of sigma
*/
HDFUNC inline void flipSign(int i, cmat3& U, cfloat3& sigma)
{
sigma[i] = -sigma[i];
U[0][i] = -U[0][i];
U[1][i] = -U[1][i];
U[2][i] = -U[2][i];
}
HDFUNC inline void colswap(cmat3& A, int c1, int c2)
{
cswap(A[0][c1], A[0][c2]);
cswap(A[1][c1], A[1][c2]);
cswap(A[2][c1], A[2][c2]);
}
/**
\brief Helper function of 3X3 SVD for sorting singular values
*/
HDFUNC inline void sort0(cmat3& U, cfloat3& sigma, cmat3& V)
{
// Case: sigma(0) > |sigma(1)| >= |sigma(2)|
if (fabs(sigma[1]) >= fabs(sigma[2])) {
if (sigma[1] < 0) {
flipSign(1, U, sigma);
flipSign(2, U, sigma);
}
return;
}
//fix sign of sigma for both cases
if (sigma[2] < 0) {
flipSign(1, U, sigma);
flipSign(2, U, sigma);
}
//swap sigma(1) and sigma(2) for both cases
cswap(sigma[1], sigma[2]);
colswap(U, 1, 2);
colswap(V, 1, 2);
// Case: |sigma(2)| >= sigma(0) > |simga(1)|
if (sigma[1] > sigma[0]) {
cswap(sigma[0], sigma[1]);
colswap(U, 0, 1);
colswap(V, 0, 1);
}
// Case: sigma(0) >= |sigma(2)| > |simga(1)|
else {
U[0][2] = -U[0][2];
U[1][2] = -U[1][2];
U[2][2] = -U[2][2];
V[0][2] = -V[0][2];
V[1][2] = -V[1][2];
V[2][2] = -V[2][2];
}
}
/**
\brief Helper function of 3X3 SVD for sorting singular values
*/
HDFUNC inline void sort1(cmat3& U, cfloat3& sigma, cmat3& V)
{
// Case: |sigma(0)| >= sigma(1) > |sigma(2)|
if (fabs(sigma[0]) >= sigma[1]) {
if (sigma[0] < 0) {
flipSign(0, U, sigma);
flipSign(2, U, sigma);
}
return;
}
//swap sigma(0) and sigma(1) for both cases
cswap(sigma[0], sigma[1]);
colswap(U, 0, 1);
colswap(V, 0, 1);
// Case: sigma(1) > |sigma(2)| >= |sigma(0)|
if (fabs(sigma[1]) < fabs(sigma[2])) {
cswap(sigma[1], sigma[2]);
colswap(U, 1, 2);
colswap(V, 1, 2);
}
// Case: sigma(1) >= |sigma(0)| > |sigma(2)|
else {
U[0][1] = -U[0][1];
U[1][1] = -U[1][1];
U[2][1] = -U[2][1];
V[0][1] = -V[0][1];
V[1][1] = -V[1][1];
V[2][1] = -V[2][1];
}
// fix sign for both cases
if (sigma[1] < 0) {
flipSign(1, U, sigma);
flipSign(2, U, sigma);
}
}
HDFUNC float mycmax(float a, float b)
{
return a > b ? a : b;
}
/**
\brief 3X3 SVD (singular value decomposition) A=USV'
\param[in] A Input matrix.
\param[out] U is a rotation matrix.
\param[out] sigma Diagonal matrix, sorted with decreasing magnitude. The third one can be negative.
\param[out] V is a rotation matrix.
*/
HDFUNC int singularValueDecomposition(cmat3& A, cmat3& U, cfloat3& sigma, cmat3& V, float tol = 128 * 1e-8)
{
cmat3 B = A;
U.Identity();
V.Identity();
makeUpperBidiag(B, U, V);
int count = 0;
float mu = 0.0f;
GivensRotation r(0, 1);
float alpha_1 = B[0][0];
float beta_1 = B[0][1];
float alpha_2 = B[1][1];
float alpha_3 = B[2][2];
float beta_2 = B[1][2];
float gamma_1 = alpha_1 * beta_1;
float gamma_2 = alpha_2 * beta_2;
tol *= mycmax(0.5 * sqrt(alpha_1 * alpha_1 + alpha_2 * alpha_2 + alpha_3 * alpha_3 + beta_1 * beta_1 + beta_2 * beta_2), 1);
/**
Do implicit shift QR until A^float A is block diagonal
*/
while (fabsf(beta_2) > tol && fabsf(beta_1) > tol && fabsf(alpha_1) > tol && fabsf(alpha_2) > tol && fabsf(alpha_3) > tol) {
mu = wilkinsonShift(alpha_2 * alpha_2 + beta_1 * beta_1, gamma_2, alpha_3 * alpha_3 + beta_2 * beta_2);
r.compute(alpha_1 * alpha_1 - mu, gamma_1);
r.columnRotation3(B);
r.columnRotation3(V);
zeroChase(B, U, V);
alpha_1 = B[0][0];
beta_1 = B[0][1];
alpha_2 = B[1][1];
alpha_3 = B[2][2];
beta_2 = B[1][2];
gamma_1 = alpha_1 * beta_1;
gamma_2 = alpha_2 * beta_2;
count++;
}
/**
Handle the cases of one of the alphas and betas being 0
Sorted by ease of handling and then frequency
of occurrence
If B is of form
x x 0
0 x 0
0 0 x
*/
if (fabs(beta_2) <= tol) {
process(0, B, U, sigma, V);
sort0(U, sigma, V);
}
/**
If B is of form
x 0 0
0 x x
0 0 x
*/
else if (fabs(beta_1) <= tol) {
process(1, B, U, sigma, V);
sort1(U, sigma, V);
}
/**
If B is of form
x x 0
0 0 x
0 0 x
*/
else if (fabs(alpha_2) <= tol) {
/**
Reduce B to
x x 0
0 0 0
0 0 x
*/
GivensRotation r1(1, 2);
r1.computeUnconventional(B[1][2], B[2][2]);
r1.rowRotation3(B);
r1.columnRotation3(U);
process(0, B, U, sigma, V);
sort0(U, sigma, V);
}
/**
If B is of form
x x 0
0 x x
0 0 0
*/
else if (fabs(alpha_3) <= tol) {
/**
Reduce B to
x x +
0 x 0
0 0 0
*/
GivensRotation r1(1, 2);
r1.compute(B[1][1], B[1][2]);
r1.columnRotation3(B);
r1.columnRotation3(V);
/**
Reduce B to
x x 0
+ x 0
0 0 0
*/
GivensRotation r2(0, 2);
r2.compute(B[0][0], B[0][2]);
r2.columnRotation3(B);
r2.columnRotation3(V);
process(0, B, U, sigma, V);
sort0(U, sigma, V);
}
/**
If B is of form
0 x 0
0 x x
0 0 x
*/
else if (fabs(alpha_1) <= tol) {
/**
Reduce B to
0 0 +
0 x x
0 0 x
*/
GivensRotation r1(0, 1);
r1.computeUnconventional(B[0][1], B[1][1]);
r1.rowRotation3(B);
r1.columnRotation3(U);
/**
Reduce B to
0 0 0
0 x x
0 + x
*/
GivensRotation r2(0, 2);
r2.computeUnconventional(B[0][2], B[2][2]);
r2.rowRotation3(B);
r2.columnRotation3(U);
process(1, B, U, sigma, V);
sort1(U, sigma, V);
}
return count;
}
HDFUNC cmat3 MooreInv(cmat3 A)
{
cmat3 U, V;
cfloat3 sigma;
singularValueDecomposition(A, U, sigma, V);
cmat3 S;
S[0][0] = sigma.x;
S[1][1] = sigma.y;
S[2][2] = sigma.z;
cmat3 s = S.Reci();
cmat3 UT;
mat3transpose(U, UT);
cmat3 mid;
mat3prod(V, s, mid);
cmat3 ret;
mat3prod(mid, UT, ret);
return ret;
}
//IISPH
void IISPHFactor(SimData_SPH data, int num_particles) {
uint num_threads, num_blocks;
computeGridSize(num_particles, 256, num_blocks, num_threads);
IISPHFactorKernel << < num_blocks, num_threads >> > (data, num_particles);
cudaThreadSynchronize();
getLastCudaError("Kernel failed: compute df alpha multiphase");
}
void IISPHPredictDensity(SimData_SPH data, int num_particles) {
uint num_threads, num_blocks;
computeGridSize(num_particles, 256, num_blocks, num_threads);
IISPHPredictDensityKernel << < num_blocks, num_threads >> > (data, num_particles);
cudaThreadSynchronize();
getLastCudaError("Kernel failed: compute df alpha multiphase");
}
void IISPHSolvePressure(SimData_SPH data, int num_particles) {
uint num_threads, num_blocks;
computeGridSize(num_particles, 256, num_blocks, num_threads);
printf("%d\n", num_particles);
float err_sum = 0.0f;
for (int iter = 0; iter < 10; iter++) //temp 10
{
CalcDIJPJLKernel << < num_blocks, num_threads >> > (data, num_particles);
cudaThreadSynchronize();
getLastCudaError("Kernel failed: CalcDIJPJLKernel");
CalcNewPressureKernel << < num_blocks, num_threads >> > (data, num_particles);
cudaThreadSynchronize();
getLastCudaError("Kernel failed: CalcNewPressureKernel");
float* err_host = new float[num_particles];
cudaMemcpy(err_host, data.error, sizeof(float) * num_particles, cudaMemcpyDeviceToHost);
float last_err_sum = err_sum;
err_sum = 0.0f;
for (int i = 0; i < num_particles; i++)
err_sum += err_host[i];
delete[] err_host;
err_sum /= num_particles;
//printf("%d %f\n", iter, err_sum);
if (abs(err_sum) < 1e-6 || abs(err_sum - last_err_sum) < 1e-6)
break;
}
}
void IISPHUpdate(SimData_SPH data, int num_particles) {
uint num_threads, num_blocks;
computeGridSize(num_particles, 256, num_blocks, num_threads);
CalcPressureForceKernel << < num_blocks, num_threads >> > (data, num_particles);
cudaThreadSynchronize();
IISPHUpdateKernel << < num_blocks, num_threads >> > (data, num_particles);
cudaThreadSynchronize();
getLastCudaError("Kernel failed: compute df alpha multiphase");
} |
d2c74417efa521ecf4074095a766cf4d39bb6128.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "THHUNN.h"
#include "common.h"
#include <thrust/device_ptr.h>
#include <thrust/execution_policy.h>
#include <thrust/iterator/constant_iterator.h>
#include <thrust/transform_reduce.h>
#if TORCH_HIP_VERSION >= 7000
#include <thrust/system/hip/execution_policy.h>
#endif
#include <thrust/unique.h>
#ifndef DIVUP
#define DIVUP(x, y) (((x) + (y) - 1) / (y))
#endif
const int WARP_SIZE = 32;
__device__ __forceinline__ bool warpHasCollision(int val)
{
// Compare our value to the values stored in the next 16 lanes,
// wrapping around at 32. If any pair of values is the same than
// there is a collision in the warp.
bool dup = 0;
const int laneId = threadIdx.x % 32;
#if __CUDA_ARCH__ >= 300
#pragma unroll
for (int i = 1; i <= 16; i++)
{
dup |= (__shfl(val, (laneId + i) % 32) == val);
}
#else
volatile __shared__ int values[128];
values[threadIdx.x] = val;
const int offset = threadIdx.x - laneId;
#pragma unroll
for (int i = 1; i <= 16; i++)
{
dup |= (values[offset + ((laneId + i) % 32)] == val);
}
#endif
return __any(dup) != 0;
}
__global__ void cunn_LookupTable_accGradParametersKernelByFeature(
float *input, float *gradOutput, float *gradWeight, float scale, long numel,
long stride, int paddingValue) {
const int featureDim = blockIdx.x * 4 + threadIdx.x / 32;
if (featureDim >= stride) {
return;
}
// The strategy here is that each warp handles a single feature
// dimension.
// Within that feature dimension, points in the [batch][element]
// dimension can overlap, and we need to determine if threads want
// to add to the gradient in a colliding manner.
// Typically one would use floating-point atomicAdd() to resolve
// these collisions, but that is non-deterministic if there are
// collisions. Non-determinism for this code is really bad,
// especially in RNNs, and is prone to snowballing error.
// In order to get a deterministic order of execution, we handle
// non-colliding updates separately from colliding ones. Colliding
// updates are serialized in their order of execution by using the
// warp-wide collision detector `warpHasCollision`.
const int laneId = threadIdx.x % 32;
for (int i = laneId; i < numel; i += WARP_SIZE) {
const int weightIndex = (int) (input[i] - 1);
if (weightIndex == paddingValue - 1) {
continue;
}
float update = gradOutput[i*stride + featureDim] * scale;
// Check for collision
if (warpHasCollision(weightIndex)) {
// Run all lanes sequentially; warp divergence
for (int i = 0; i < WARP_SIZE; ++i) {
if (laneId == i) {
gradWeight[weightIndex*stride + featureDim] += update;
}
}
} else {
// No collision; warp coherence
gradWeight[weightIndex*stride + featureDim] += update;
}
}
}
__global__ void cunn_LookupTable_accGradParametersKernel(
float *input, float *indices, float *gradOutput, float *gradWeight,
float *count, float defaultScale, long numel, long stride, int paddingValue) {
int idx = blockIdx.x * 4 + threadIdx.y;
// Each warp is responsible for an input into the LookupTable.
// If the preceeding input has the same as this input, then the warp
// exits immediately. The warp also processes subsequent inputs with the
// same value.
//
// Input Warp
// 1 <warp 1>
// 1 <warp 1> (<warp 2> exits without doing any work)
// 5 <warp 3>
// 8 <warp 4>
// Number of values proceessed by each thread (grain size)
const int SZ = 4;
if (idx < numel
&& (idx == 0 || input[idx] != input[idx - 1])
&& input[idx] != paddingValue) {
do {
const int startFeature = threadIdx.x + blockIdx.y * blockDim.x * SZ;
const int weightRow = ((int) input[idx] - 1) * stride;
const int gradOutputRow = ((int) indices[idx] - 1) * stride;
const float scale = count ? defaultScale / count[idx] : defaultScale;
float gradient[SZ];
float weight[SZ];
#pragma unroll
for (int ii = 0; ii < SZ; ii++)
{
int featureDim = startFeature + ii * WARP_SIZE;
if (featureDim < stride)
{
gradient[ii] = gradOutput[gradOutputRow + featureDim];
weight[ii] = gradWeight[weightRow + featureDim];
}
}
#pragma unroll
for (int ii = 0; ii < SZ; ii++)
{
weight[ii] += gradient[ii] * scale;
}
#pragma unroll
for (int ii = 0; ii < SZ; ii++)
{
int featureDim = startFeature + ii * WARP_SIZE;
if (featureDim < stride)
{
gradWeight[weightRow + featureDim] = weight[ii];
}
}
idx++;
} while (idx < numel && input[idx] == input[idx - 1]);
}
}
void THNN_CudaLookupTable_accGradParameters(
THCState *state,
THIndexTensor *input,
THCudaTensor *gradOutput,
THCudaTensor *gradWeight,
THIntegerTensor *count,
THCudaTensor *sorted,
THCudaTensor *indices,
bool scaleGradByFreq,
int paddingValue,
float scale)
{
THCUNN_assertSameGPU(state, 5, input, gradOutput, gradWeight, sorted, indices);
if (!(THCudaTensor_isContiguous(state, input) &&
THCudaTensor_isContiguous(state, gradOutput) &&
THCudaTensor_isContiguous(state, gradWeight)))
{
THError("Tensors must be contiguous");
}
int nDim = THCudaTensor_nDimension(state, input);
if (nDim != 1 && nDim != 2)
THError("input must be a vector or matrix");
long numel = THCudaTensor_nElement(state, input);
long stride = gradWeight->stride[0];
hipStream_t stream = THCState_getCurrentStream(state);
if (numel <= 768 && !scaleGradByFreq) {
hipLaunchKernelGGL(( cunn_LookupTable_accGradParametersKernelByFeature), dim3(DIVUP(stride,4)), dim3(128), 0, stream,
THCudaTensor_data(state, input),
THCudaTensor_data(state, gradOutput),
THCudaTensor_data(state, gradWeight),
scale,
numel,
stride,
paddingValue);
THCudaCheck(hipGetLastError());
return;
}
THCudaTensor_resizeAs(state, sorted, input);
THCudaTensor_resizeAs(state, indices, input);
// Sort the inputs into sorted with the corresponding indices
THCudaTensor_sort(state, sorted, indices, input, 0, 0);
float *sorted_data = THCudaTensor_data(state, sorted);
float *indices_data = THCudaTensor_data(state, indices);
float *count_data = NULL;
if (scaleGradByFreq)
{
THIntegerTensor_(resizeAs)(state, count, input);
count_data = THIntegerTensor_(data)(state, count);
thrust::device_ptr<float> sorted_ptr(sorted_data);
thrust::device_ptr<float> count_ptr(count_data);
// Compute an increasing sequence per unique item in sorted:
// sorted: 2 5 5 5 7 7 8 9 9
// count: 1 1 2 3 1 2 1 1 2
thrust::inclusive_scan_by_key(
#if TORCH_HIP_VERSION >= 7000
thrust::hip::par.on(THCState_getCurrentStream(state)),
#endif
sorted_ptr,
sorted_ptr + numel,
thrust::make_constant_iterator(1),
count_ptr
);
// Take the maximum of each count per unique key in reverse:
// sorted: 2 5 5 5 7 7 8 9 9
// count: 1 3 3 3 2 2 1 2 2
thrust::inclusive_scan_by_key(
#if TORCH_HIP_VERSION >= 7000
thrust::hip::par.on(THCState_getCurrentStream(state)),
#endif
thrust::make_reverse_iterator(sorted_ptr + numel),
thrust::make_reverse_iterator(sorted_ptr),
thrust::make_reverse_iterator(count_ptr + numel),
thrust::make_reverse_iterator(count_ptr + numel),
thrust::equal_to<float>(),
thrust::maximum<float>()
);
}
dim3 grid(DIVUP(numel,4), DIVUP(stride,128));
dim3 block(32, 4);
hipLaunchKernelGGL(( cunn_LookupTable_accGradParametersKernel), dim3(grid), dim3(block), 0, stream,
sorted_data,
indices_data,
THCudaTensor_data(state, gradOutput),
THCudaTensor_data(state, gradWeight),
count_data,
scale,
numel,
stride,
paddingValue
);
THCudaCheck(hipGetLastError());
}
/*
* Keep the norm of weight smaller than maxNorm
*/
template <typename T>
struct pow_v
{
T normType;
pow_v(T v) : normType(v) {}
__host__ __device__
T operator()(const T& x) const {
if (normType == 1)
return std::abs(x);
else if (normType == 2)
return x * x;
else
return ::pow(std::abs(x), normType);
}
};
template <typename T>
struct multiply_s
{
T scale;
multiply_s(T s) : scale(s) {}
__host__ __device__
T operator()(const T& x) const {
return x * scale;
}
};
void THNN_CudaLookupTable_renorm(
THCState *state,
THIndexTensor *idx,
THCudaTensor *weight,
float maxNorm,
float normType)
{
THCUNN_assertSameGPU(state, 2, idx, weight);
if (!(THCudaTensor_isContiguous(state, idx) &&
THCudaTensor_isContiguous(state, weight)))
{
THError("Tensors must be contiguous");
}
if (THCudaTensor_nDimension(state, idx) != 1)
THError("idx must be a vector");
if (normType <= 0)
THError("non-positive-norm not supported");
long numel = THCudaTensor_nElement(state, idx);
long stride = weight->stride[0];
// get the unique indices
thrust::device_ptr<float> weight_ptr(THCudaTensor_data(state, weight));
thrust::device_ptr<float> idx_ptr(THCudaTensor_data(state, idx));
thrust::device_ptr<float> end_ptr = thrust::unique(idx_ptr, idx_ptr+numel);
numel = end_ptr - idx_ptr;
pow_v<float> unary_pow(normType);
thrust::plus<float> binary_plus;
// numel << stride, since idx usually contains sparse row indices
for (long i = 0; i < numel; i++)
{
long k = idx_ptr[i] - 1;
thrust::device_ptr<float> row_ptr = weight_ptr + k * stride;
float norm = thrust::transform_reduce(row_ptr, row_ptr + stride,
unary_pow, 0, binary_plus);
norm = ::pow(norm, (float) (1.0 / normType));
if (norm > maxNorm)
{
multiply_s<float> unary_mul(maxNorm / (norm + 1e-7));
thrust::transform(row_ptr, row_ptr + stride, row_ptr, unary_mul);
}
}
}
| d2c74417efa521ecf4074095a766cf4d39bb6128.cu | #include "THCUNN.h"
#include "common.h"
#include <thrust/device_ptr.h>
#include <thrust/execution_policy.h>
#include <thrust/iterator/constant_iterator.h>
#include <thrust/transform_reduce.h>
#if CUDA_VERSION >= 7000
#include <thrust/system/cuda/execution_policy.h>
#endif
#include <thrust/unique.h>
#ifndef DIVUP
#define DIVUP(x, y) (((x) + (y) - 1) / (y))
#endif
const int WARP_SIZE = 32;
__device__ __forceinline__ bool warpHasCollision(int val)
{
// Compare our value to the values stored in the next 16 lanes,
// wrapping around at 32. If any pair of values is the same than
// there is a collision in the warp.
bool dup = 0;
const int laneId = threadIdx.x % 32;
#if __CUDA_ARCH__ >= 300
#pragma unroll
for (int i = 1; i <= 16; i++)
{
dup |= (__shfl(val, (laneId + i) % 32) == val);
}
#else
volatile __shared__ int values[128];
values[threadIdx.x] = val;
const int offset = threadIdx.x - laneId;
#pragma unroll
for (int i = 1; i <= 16; i++)
{
dup |= (values[offset + ((laneId + i) % 32)] == val);
}
#endif
return __any(dup) != 0;
}
__global__ void cunn_LookupTable_accGradParametersKernelByFeature(
float *input, float *gradOutput, float *gradWeight, float scale, long numel,
long stride, int paddingValue) {
const int featureDim = blockIdx.x * 4 + threadIdx.x / 32;
if (featureDim >= stride) {
return;
}
// The strategy here is that each warp handles a single feature
// dimension.
// Within that feature dimension, points in the [batch][element]
// dimension can overlap, and we need to determine if threads want
// to add to the gradient in a colliding manner.
// Typically one would use floating-point atomicAdd() to resolve
// these collisions, but that is non-deterministic if there are
// collisions. Non-determinism for this code is really bad,
// especially in RNNs, and is prone to snowballing error.
// In order to get a deterministic order of execution, we handle
// non-colliding updates separately from colliding ones. Colliding
// updates are serialized in their order of execution by using the
// warp-wide collision detector `warpHasCollision`.
const int laneId = threadIdx.x % 32;
for (int i = laneId; i < numel; i += WARP_SIZE) {
const int weightIndex = (int) (input[i] - 1);
if (weightIndex == paddingValue - 1) {
continue;
}
float update = gradOutput[i*stride + featureDim] * scale;
// Check for collision
if (warpHasCollision(weightIndex)) {
// Run all lanes sequentially; warp divergence
for (int i = 0; i < WARP_SIZE; ++i) {
if (laneId == i) {
gradWeight[weightIndex*stride + featureDim] += update;
}
}
} else {
// No collision; warp coherence
gradWeight[weightIndex*stride + featureDim] += update;
}
}
}
__global__ void cunn_LookupTable_accGradParametersKernel(
float *input, float *indices, float *gradOutput, float *gradWeight,
float *count, float defaultScale, long numel, long stride, int paddingValue) {
int idx = blockIdx.x * 4 + threadIdx.y;
// Each warp is responsible for an input into the LookupTable.
// If the preceeding input has the same as this input, then the warp
// exits immediately. The warp also processes subsequent inputs with the
// same value.
//
// Input Warp
// 1 <warp 1>
// 1 <warp 1> (<warp 2> exits without doing any work)
// 5 <warp 3>
// 8 <warp 4>
// Number of values proceessed by each thread (grain size)
const int SZ = 4;
if (idx < numel
&& (idx == 0 || input[idx] != input[idx - 1])
&& input[idx] != paddingValue) {
do {
const int startFeature = threadIdx.x + blockIdx.y * blockDim.x * SZ;
const int weightRow = ((int) input[idx] - 1) * stride;
const int gradOutputRow = ((int) indices[idx] - 1) * stride;
const float scale = count ? defaultScale / count[idx] : defaultScale;
float gradient[SZ];
float weight[SZ];
#pragma unroll
for (int ii = 0; ii < SZ; ii++)
{
int featureDim = startFeature + ii * WARP_SIZE;
if (featureDim < stride)
{
gradient[ii] = gradOutput[gradOutputRow + featureDim];
weight[ii] = gradWeight[weightRow + featureDim];
}
}
#pragma unroll
for (int ii = 0; ii < SZ; ii++)
{
weight[ii] += gradient[ii] * scale;
}
#pragma unroll
for (int ii = 0; ii < SZ; ii++)
{
int featureDim = startFeature + ii * WARP_SIZE;
if (featureDim < stride)
{
gradWeight[weightRow + featureDim] = weight[ii];
}
}
idx++;
} while (idx < numel && input[idx] == input[idx - 1]);
}
}
void THNN_CudaLookupTable_accGradParameters(
THCState *state,
THIndexTensor *input,
THCudaTensor *gradOutput,
THCudaTensor *gradWeight,
THIntegerTensor *count,
THCudaTensor *sorted,
THCudaTensor *indices,
bool scaleGradByFreq,
int paddingValue,
float scale)
{
THCUNN_assertSameGPU(state, 5, input, gradOutput, gradWeight, sorted, indices);
if (!(THCudaTensor_isContiguous(state, input) &&
THCudaTensor_isContiguous(state, gradOutput) &&
THCudaTensor_isContiguous(state, gradWeight)))
{
THError("Tensors must be contiguous");
}
int nDim = THCudaTensor_nDimension(state, input);
if (nDim != 1 && nDim != 2)
THError("input must be a vector or matrix");
long numel = THCudaTensor_nElement(state, input);
long stride = gradWeight->stride[0];
cudaStream_t stream = THCState_getCurrentStream(state);
if (numel <= 768 && !scaleGradByFreq) {
cunn_LookupTable_accGradParametersKernelByFeature<<<DIVUP(stride,4), 128, 0, stream>>>(
THCudaTensor_data(state, input),
THCudaTensor_data(state, gradOutput),
THCudaTensor_data(state, gradWeight),
scale,
numel,
stride,
paddingValue);
THCudaCheck(cudaGetLastError());
return;
}
THCudaTensor_resizeAs(state, sorted, input);
THCudaTensor_resizeAs(state, indices, input);
// Sort the inputs into sorted with the corresponding indices
THCudaTensor_sort(state, sorted, indices, input, 0, 0);
float *sorted_data = THCudaTensor_data(state, sorted);
float *indices_data = THCudaTensor_data(state, indices);
float *count_data = NULL;
if (scaleGradByFreq)
{
THIntegerTensor_(resizeAs)(state, count, input);
count_data = THIntegerTensor_(data)(state, count);
thrust::device_ptr<float> sorted_ptr(sorted_data);
thrust::device_ptr<float> count_ptr(count_data);
// Compute an increasing sequence per unique item in sorted:
// sorted: 2 5 5 5 7 7 8 9 9
// count: 1 1 2 3 1 2 1 1 2
thrust::inclusive_scan_by_key(
#if CUDA_VERSION >= 7000
thrust::cuda::par.on(THCState_getCurrentStream(state)),
#endif
sorted_ptr,
sorted_ptr + numel,
thrust::make_constant_iterator(1),
count_ptr
);
// Take the maximum of each count per unique key in reverse:
// sorted: 2 5 5 5 7 7 8 9 9
// count: 1 3 3 3 2 2 1 2 2
thrust::inclusive_scan_by_key(
#if CUDA_VERSION >= 7000
thrust::cuda::par.on(THCState_getCurrentStream(state)),
#endif
thrust::make_reverse_iterator(sorted_ptr + numel),
thrust::make_reverse_iterator(sorted_ptr),
thrust::make_reverse_iterator(count_ptr + numel),
thrust::make_reverse_iterator(count_ptr + numel),
thrust::equal_to<float>(),
thrust::maximum<float>()
);
}
dim3 grid(DIVUP(numel,4), DIVUP(stride,128));
dim3 block(32, 4);
cunn_LookupTable_accGradParametersKernel<<<grid, block, 0, stream>>>(
sorted_data,
indices_data,
THCudaTensor_data(state, gradOutput),
THCudaTensor_data(state, gradWeight),
count_data,
scale,
numel,
stride,
paddingValue
);
THCudaCheck(cudaGetLastError());
}
/*
* Keep the norm of weight smaller than maxNorm
*/
template <typename T>
struct pow_v
{
T normType;
pow_v(T v) : normType(v) {}
__host__ __device__
T operator()(const T& x) const {
if (normType == 1)
return std::abs(x);
else if (normType == 2)
return x * x;
else
return std::pow(std::abs(x), normType);
}
};
template <typename T>
struct multiply_s
{
T scale;
multiply_s(T s) : scale(s) {}
__host__ __device__
T operator()(const T& x) const {
return x * scale;
}
};
void THNN_CudaLookupTable_renorm(
THCState *state,
THIndexTensor *idx,
THCudaTensor *weight,
float maxNorm,
float normType)
{
THCUNN_assertSameGPU(state, 2, idx, weight);
if (!(THCudaTensor_isContiguous(state, idx) &&
THCudaTensor_isContiguous(state, weight)))
{
THError("Tensors must be contiguous");
}
if (THCudaTensor_nDimension(state, idx) != 1)
THError("idx must be a vector");
if (normType <= 0)
THError("non-positive-norm not supported");
long numel = THCudaTensor_nElement(state, idx);
long stride = weight->stride[0];
// get the unique indices
thrust::device_ptr<float> weight_ptr(THCudaTensor_data(state, weight));
thrust::device_ptr<float> idx_ptr(THCudaTensor_data(state, idx));
thrust::device_ptr<float> end_ptr = thrust::unique(idx_ptr, idx_ptr+numel);
numel = end_ptr - idx_ptr;
pow_v<float> unary_pow(normType);
thrust::plus<float> binary_plus;
// numel << stride, since idx usually contains sparse row indices
for (long i = 0; i < numel; i++)
{
long k = idx_ptr[i] - 1;
thrust::device_ptr<float> row_ptr = weight_ptr + k * stride;
float norm = thrust::transform_reduce(row_ptr, row_ptr + stride,
unary_pow, 0, binary_plus);
norm = std::pow(norm, (float) (1.0 / normType));
if (norm > maxNorm)
{
multiply_s<float> unary_mul(maxNorm / (norm + 1e-7));
thrust::transform(row_ptr, row_ptr + stride, row_ptr, unary_mul);
}
}
}
|
de144bed308c0dfae89bad561b661cbad15edc0f.hip | // !!! This is a file automatically generated by hipify!!!
#include "practice.h"
#include <hip/hip_runtime.h>
#include <hiprand/hiprand.h>
#include <device_launch_parameters.h>
#include <chrono>
#include <iostream>
using namespace std;
__global__ void addTen(float *d, int count) {
int tpb = blockDim.x * blockDim.y * blockDim.z;
int tpib = threadIdx.x + blockDim.x * threadIdx.y +
blockDim.x * blockDim.y * threadIdx.z;
int bpg =
blockIdx.x + gridDim.x * blockIdx.y + gridDim.x * gridDim.y * blockIdx.z;
int tid = bpg * tpb + tpib;
if (tid < count)
d[tid] += 10;
}
int map_fun(int count) {
hiprandGenerator_t gen;
hiprandCreateGenerator(&gen, HIPRAND_RNG_PSEUDO_MTGP32);
hiprandSetPseudoRandomGeneratorSeed(gen, time(nullptr));
// const int count = 123456;
int size = count * sizeof(float);
float *d;
float h[count];
hipMalloc(&d, size);
hiprandGenerateUniform(gen, d, count);
dim3 block(8, 8, 8);
dim3 grid(16, 16);
hipLaunchKernelGGL(( addTen), dim3(grid), dim3(block), 0, 0, d, count);
hipMemcpy(h, d, size, hipMemcpyDeviceToHost);
hipFree(d);
for (auto n : h) {
cout << n << endl;
}
return 0;
}
int gather_fun(int count){
hiprandGenerator_t gen;
hiprandCreateGenerator(&gen, HIPRAND_RNG_PSEUDO_MTGP32);
cout << count << endl;
return 9;
} | de144bed308c0dfae89bad561b661cbad15edc0f.cu | #include "practice.h"
#include <cuda_runtime.h>
#include <curand.h>
#include <device_launch_parameters.h>
#include <chrono>
#include <iostream>
using namespace std;
__global__ void addTen(float *d, int count) {
int tpb = blockDim.x * blockDim.y * blockDim.z;
int tpib = threadIdx.x + blockDim.x * threadIdx.y +
blockDim.x * blockDim.y * threadIdx.z;
int bpg =
blockIdx.x + gridDim.x * blockIdx.y + gridDim.x * gridDim.y * blockIdx.z;
int tid = bpg * tpb + tpib;
if (tid < count)
d[tid] += 10;
}
int map_fun(int count) {
curandGenerator_t gen;
curandCreateGenerator(&gen, CURAND_RNG_PSEUDO_MTGP32);
curandSetPseudoRandomGeneratorSeed(gen, time(nullptr));
// const int count = 123456;
int size = count * sizeof(float);
float *d;
float h[count];
cudaMalloc(&d, size);
curandGenerateUniform(gen, d, count);
dim3 block(8, 8, 8);
dim3 grid(16, 16);
addTen<<<grid, block>>>(d, count);
cudaMemcpy(h, d, size, cudaMemcpyDeviceToHost);
cudaFree(d);
for (auto n : h) {
cout << n << endl;
}
return 0;
}
int gather_fun(int count){
curandGenerator_t gen;
curandCreateGenerator(&gen, CURAND_RNG_PSEUDO_MTGP32);
cout << count << endl;
return 9;
} |
da850ee0deec5cd4fb9f57fc5ced7d7059d77499.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* *
* Copyright 1993-2012 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*/
#include <stdio.h>
#include <stdlib.h>
#define N 16
void init_mat(int mat[N][N]){
for(int i=0;i<N;i++){
for(int j=0;j<N;j++){
mat[i][j] = rand() % 100;
}
}
}
void init_i(int mat[N][N]){
for(int i=0;i<N;i++){
for(int j=0;j<N;j++){
if(i == j) mat[i][j] = 1;
else mat[i][j] = 0;
}
}
}
void init_zeros(int mat[N][N]){
for(int i=0;i<N;i++){
for(int j=0;j<N;j++){
mat[i][j] = 0;
}
}
}
void print_mat(int mat[N][N]){
for(int i=0;i<N;i++){
for(int j=0;j<N;j++){
printf("%3d ", mat[i][j]);
}
printf("\n");
}
printf("\n");
}
void print_mat2(int *mat){
for(int i=0;i<N;i++){
for(int j=0;j<N;j++){
printf("%3d ", mat[i*N+j]);
}
printf("\n");
}
printf("\n");
}
__global__
void mat_mul(int *x, int *y, int *z){
int id = blockIdx.x *blockDim.x + threadIdx.x;
int row = id/N;
int col = id%N;
z[row*N+col] = 0;
for(int i=0;i<N;i++){
z[row*N+col] += x[row*N+i] * y[i*N+col];
}
}
int main(void) {
int x[N][N], y[N][N], z[N][N];
// cannot be prined
// device functions cannot invoke host functions
// otherwise add __device__ before the function you want to invoke
int *xd, *yd, *zd;
int mat_size = N*N*sizeof(int);
init_mat(x);
init_i(y);
init_zeros(z);
print_mat(x);
hipMalloc(&xd, mat_size);
hipMalloc(&yd, mat_size);
hipMalloc(&zd, mat_size);
hipMemcpy(xd, x, mat_size, hipMemcpyHostToDevice);
hipMemcpy(yd, y, mat_size, hipMemcpyHostToDevice);
hipMemcpy(zd, z, mat_size, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( mat_mul), dim3(N), dim3(N), 0, 0, xd, yd, zd);
hipMemcpy(z, zd, mat_size, hipMemcpyDeviceToHost);
print_mat(z);
}
| da850ee0deec5cd4fb9f57fc5ced7d7059d77499.cu | /* *
* Copyright 1993-2012 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*/
#include <stdio.h>
#include <stdlib.h>
#define N 16
void init_mat(int mat[N][N]){
for(int i=0;i<N;i++){
for(int j=0;j<N;j++){
mat[i][j] = rand() % 100;
}
}
}
void init_i(int mat[N][N]){
for(int i=0;i<N;i++){
for(int j=0;j<N;j++){
if(i == j) mat[i][j] = 1;
else mat[i][j] = 0;
}
}
}
void init_zeros(int mat[N][N]){
for(int i=0;i<N;i++){
for(int j=0;j<N;j++){
mat[i][j] = 0;
}
}
}
void print_mat(int mat[N][N]){
for(int i=0;i<N;i++){
for(int j=0;j<N;j++){
printf("%3d ", mat[i][j]);
}
printf("\n");
}
printf("\n");
}
void print_mat2(int *mat){
for(int i=0;i<N;i++){
for(int j=0;j<N;j++){
printf("%3d ", mat[i*N+j]);
}
printf("\n");
}
printf("\n");
}
__global__
void mat_mul(int *x, int *y, int *z){
int id = blockIdx.x *blockDim.x + threadIdx.x;
int row = id/N;
int col = id%N;
z[row*N+col] = 0;
for(int i=0;i<N;i++){
z[row*N+col] += x[row*N+i] * y[i*N+col];
}
}
int main(void) {
int x[N][N], y[N][N], z[N][N];
// cannot be prined
// device functions cannot invoke host functions
// otherwise add __device__ before the function you want to invoke
int *xd, *yd, *zd;
int mat_size = N*N*sizeof(int);
init_mat(x);
init_i(y);
init_zeros(z);
print_mat(x);
cudaMalloc(&xd, mat_size);
cudaMalloc(&yd, mat_size);
cudaMalloc(&zd, mat_size);
cudaMemcpy(xd, x, mat_size, cudaMemcpyHostToDevice);
cudaMemcpy(yd, y, mat_size, cudaMemcpyHostToDevice);
cudaMemcpy(zd, z, mat_size, cudaMemcpyHostToDevice);
mat_mul<<<N, N>>>(xd, yd, zd);
cudaMemcpy(z, zd, mat_size, cudaMemcpyDeviceToHost);
print_mat(z);
}
|
2347e1c2cbe6124ab3dd175e5f6f6497e11cb764.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <iomanip>
#include <chrono>
//#include <CImg.h>
//#define TIMERS
//using cimg_library::CImg;
using std::cout;
using std::cerr;
using std::endl;
/* Kernel for the device */
__global__ void rgb_gray(const int width, const int height,
const unsigned char *inputImage,
unsigned char *darkGrayImage) {
int x;
int y;
// calculate the thread index for both x, y, by the use of the dimension
// of the block the id of the current block and the id of the thread
y = blockDim.y * blockIdx.y + threadIdx.y;
x = blockDim.x * blockIdx.x + threadIdx.x;
// check if we are out of bounds
if ((y * width + x) > (width * height)) {
return;
}
// do the transformation
float grayPix = 0.0f;
float r = static_cast<float>(inputImage[(y * width) + x]);
float g = static_cast<float>(inputImage[(width * height) + (y * width) + x]);
float b =
static_cast<float>(inputImage[(2 * width * height) + (y * width) + x]);
grayPix = ((0.3f * r) + (0.59f * g) + (0.11f * b));
grayPix = (grayPix * 0.6f) + 0.5f;
darkGrayImage[(y * width) + x] = static_cast<unsigned char>(grayPix);
}
// End Kernel
// Host
void darkGray(const int width, const int height, const int size,
const unsigned char *inputImage, unsigned char *darkGrayImage,
const int sizeOut) {
unsigned char *inputImageDev; // Input image on device
unsigned char *darkGrayImageDev; // Output image on device
int size_image, outImageSize; // Size of the image
/* Find the size of the image */
size_image = size * sizeof(*inputImage);
outImageSize = sizeOut * sizeof(*darkGrayImage);
#ifdef TIMERS
/* timer for input creation */
std::chrono::time_point<std::chrono::system_clock> start, stop;
start = std::chrono::system_clock::now();
#endif
if (hipMalloc((void**)&inputImageDev, size_image) != hipSuccess) {
cerr << "Cuda Malloc FAILED " << endl;
}
if (hipMalloc((void**)&darkGrayImageDev, outImageSize) != hipSuccess) {
cerr << "Cuda Malloc FAILED " << endl;
}
hipMemset(darkGrayImageDev, 0 , outImageSize);
#ifdef TIMERS
stop = std::chrono::system_clock::now();
std::chrono::duration<double> elapsed = stop - start;
cout << "DarkGray malloc: " << elapsed.count() << " sec." << endl;
/*timer for input creation*/
start = std::chrono::system_clock::now();
#endif
//transfer image from host to device
if (hipMemcpy(inputImageDev, inputImage, size_image , hipMemcpyHostToDevice)!=hipSuccess){
cerr << "Cuda MemCpy H2D FAILED " << endl;
}
#ifdef TIMERS
stop = std::chrono::system_clock::now();
elapsed = stop - start;
cout << "DarkGray H2D: " << elapsed.count() << " sec." << endl;
/*timer for input creation*/
start = std::chrono::system_clock::now();
#endif
//find the width of the block
int wBlock = static_cast<unsigned int>(ceil(width / static_cast<float>(32)));
int hBlock = static_cast<unsigned int>(ceil(height / static_cast<float>(16)));
//execution configuration
dim3 dimGrid(wBlock,hBlock);//grid dimensions: (wBlock*hBlock) thread blocks
dim3 dimBlock(32 , 16);//block dimensions: 32*16=512 threads per block
//launch the kernel with dimGrid num of blocks and dimBlock num of threads eac
hipLaunchKernelGGL(( rgb_gray), dim3(dimGrid), dim3(dimBlock), 0, 0, width, height, inputImageDev,darkGrayImageDev);
hipError_t err = hipGetLastError();
#ifdef TIMERS
hipDeviceSynchronize();
stop = std::chrono::system_clock::now();
elapsed = stop - start;
cout<<std::fixed << "DarkGray kernel: " << elapsed.count() << " sec." << endl;
/*timer for input creation*/
start = std::chrono::system_clock::now();
#endif
if (err != hipSuccess)
cerr << "Error: " << hipGetErrorString(err) << endl;
if (hipMemcpy(darkGrayImage, darkGrayImageDev, outImageSize, hipMemcpyDeviceToHost)!=hipSuccess){
cerr << "Cuda MemCpy D2H FAILED "<<endl;
}
#ifdef TIMERS
stop = std::chrono::system_clock::now();
elapsed = stop - start;
cout << "DarkGray D2H: " << elapsed.count() << " sec." << endl;
/*timer for input creation*/
start = std::chrono::system_clock::now();
#endif
//clean up
hipFree(inputImageDev);
hipFree(darkGrayImageDev);
#ifdef TIMERS
stop = std::chrono::system_clock::now();
elapsed = stop - start;
cout << "DarkGray Free: " << elapsed.count() << " sec." << endl;
#endif
}
| 2347e1c2cbe6124ab3dd175e5f6f6497e11cb764.cu | #include <iostream>
#include <iomanip>
#include <chrono>
//#include <CImg.h>
//#define TIMERS
//using cimg_library::CImg;
using std::cout;
using std::cerr;
using std::endl;
/* Kernel for the device */
__global__ void rgb_gray(const int width, const int height,
const unsigned char *inputImage,
unsigned char *darkGrayImage) {
int x;
int y;
// calculate the thread index for both x, y, by the use of the dimension
// of the block the id of the current block and the id of the thread
y = blockDim.y * blockIdx.y + threadIdx.y;
x = blockDim.x * blockIdx.x + threadIdx.x;
// check if we are out of bounds
if ((y * width + x) > (width * height)) {
return;
}
// do the transformation
float grayPix = 0.0f;
float r = static_cast<float>(inputImage[(y * width) + x]);
float g = static_cast<float>(inputImage[(width * height) + (y * width) + x]);
float b =
static_cast<float>(inputImage[(2 * width * height) + (y * width) + x]);
grayPix = ((0.3f * r) + (0.59f * g) + (0.11f * b));
grayPix = (grayPix * 0.6f) + 0.5f;
darkGrayImage[(y * width) + x] = static_cast<unsigned char>(grayPix);
}
// End Kernel
// Host
void darkGray(const int width, const int height, const int size,
const unsigned char *inputImage, unsigned char *darkGrayImage,
const int sizeOut) {
unsigned char *inputImageDev; // Input image on device
unsigned char *darkGrayImageDev; // Output image on device
int size_image, outImageSize; // Size of the image
/* Find the size of the image */
size_image = size * sizeof(*inputImage);
outImageSize = sizeOut * sizeof(*darkGrayImage);
#ifdef TIMERS
/* timer for input creation */
std::chrono::time_point<std::chrono::system_clock> start, stop;
start = std::chrono::system_clock::now();
#endif
if (cudaMalloc((void**)&inputImageDev, size_image) != cudaSuccess) {
cerr << "Cuda Malloc FAILED " << endl;
}
if (cudaMalloc((void**)&darkGrayImageDev, outImageSize) != cudaSuccess) {
cerr << "Cuda Malloc FAILED " << endl;
}
cudaMemset(darkGrayImageDev, 0 , outImageSize);
#ifdef TIMERS
stop = std::chrono::system_clock::now();
std::chrono::duration<double> elapsed = stop - start;
cout << "DarkGray malloc: " << elapsed.count() << " sec." << endl;
/*timer for input creation*/
start = std::chrono::system_clock::now();
#endif
//transfer image from host to device
if (cudaMemcpy(inputImageDev, inputImage, size_image , cudaMemcpyHostToDevice)!=cudaSuccess){
cerr << "Cuda MemCpy H2D FAILED " << endl;
}
#ifdef TIMERS
stop = std::chrono::system_clock::now();
elapsed = stop - start;
cout << "DarkGray H2D: " << elapsed.count() << " sec." << endl;
/*timer for input creation*/
start = std::chrono::system_clock::now();
#endif
//find the width of the block
int wBlock = static_cast<unsigned int>(ceil(width / static_cast<float>(32)));
int hBlock = static_cast<unsigned int>(ceil(height / static_cast<float>(16)));
//execution configuration
dim3 dimGrid(wBlock,hBlock);//grid dimensions: (wBlock*hBlock) thread blocks
dim3 dimBlock(32 , 16);//block dimensions: 32*16=512 threads per block
//launch the kernel with dimGrid num of blocks and dimBlock num of threads eac
rgb_gray<<<dimGrid, dimBlock>>>(width, height, inputImageDev,darkGrayImageDev);
cudaError_t err = cudaGetLastError();
#ifdef TIMERS
cudaDeviceSynchronize();
stop = std::chrono::system_clock::now();
elapsed = stop - start;
cout<<std::fixed << "DarkGray kernel: " << elapsed.count() << " sec." << endl;
/*timer for input creation*/
start = std::chrono::system_clock::now();
#endif
if (err != cudaSuccess)
cerr << "Error: " << cudaGetErrorString(err) << endl;
if (cudaMemcpy(darkGrayImage, darkGrayImageDev, outImageSize, cudaMemcpyDeviceToHost)!=cudaSuccess){
cerr << "Cuda MemCpy D2H FAILED "<<endl;
}
#ifdef TIMERS
stop = std::chrono::system_clock::now();
elapsed = stop - start;
cout << "DarkGray D2H: " << elapsed.count() << " sec." << endl;
/*timer for input creation*/
start = std::chrono::system_clock::now();
#endif
//clean up
cudaFree(inputImageDev);
cudaFree(darkGrayImageDev);
#ifdef TIMERS
stop = std::chrono::system_clock::now();
elapsed = stop - start;
cout << "DarkGray Free: " << elapsed.count() << " sec." << endl;
#endif
}
|
4feaeca7145cdd7e0f17f2c07fd6d2592ca2d86f.hip | // !!! This is a file automatically generated by hipify!!!
// Copyright (c) 2020 Daniel Mlakar [email protected]
// Martin Winter [email protected]
// Pascal Stadlbauer [email protected]
// Hans-Peter Seidel [email protected]
// Markus Steinberger [email protected]
// Rhaleb Zayer [email protected]
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
#include "LAKCatmullClark.h"
#include "LAKCatmullClarkKernels.cuh"
#include <iostream>
#include <iomanip>
#include <hipsparse.h>
#include "math/spgemm.cuh"
#include "math/spmv.cuh"
namespace
{
template<typename T>
void getCircMapQ(std::vector<T>& Q, unsigned size, unsigned pwr, unsigned index_base = 0)
{
Q.resize((size + index_base) * (size + index_base), 0);
for (auto j = 1; j < size + 1; ++j)
{
for (auto i = 1; i < size + 1; ++i)
{
Q[(i - 1 + index_base) * (size + index_base) + (j - 1 + index_base)] = (j == ((i + pwr - 1) % size) + 1 ? 1 : 0);
}
}
};
template<typename T>
std::vector<T> getFromDev(const T* d, size_t num)
{
std::vector<T> h(num);
succeed(hipMemcpy(&h[0], d, num * sizeof(T), hipMemcpyDeviceToHost));
return h;
}
}
////////////////////////////////////////////////////////////////////////////////
/// Quadrilateral Mesh Subdiv
template<typename MESH_INFO, typename MEMORY_MANAGER, typename PROFILING>
void LAKCatmullClark::subdivideVertexDataQuadMesh(MESH_INFO const& cmesh, MESH_INFO& rmesh, Context<MESH_INFO>& ctx, MEMORY_MANAGER& mem, PROFILING& prof)
{
using offset_t = typename MESH_INFO::offset_t;
using index_t = typename MESH_INFO::index_t;
using vertex_t = typename MESH_INFO::vertex_t;
hipEvent_t start, stop;
succeed(hipEventCreate(&start));
succeed(hipEventCreate(&stop));
subdivideVertexDataPolyMesh(cmesh, rmesh, ctx, mem, prof);
succeed(hipEventDestroy(start));
succeed(hipEventDestroy(stop));
}
template<typename MESH_INFO, typename MEMORY_MANAGER, typename PROFILING>
void LAKCatmullClark::subdivideTopologyQuadMesh(MESH_INFO const& cmesh, MESH_INFO& rmesh, Context<MESH_INFO>& ctx, MEMORY_MANAGER& mem, PROFILING& prof)
{
using offset_t = typename MESH_INFO::offset_t;
using index_t = typename MESH_INFO::index_t;
using value_t = typename MESH_INFO::value_t;
hipEvent_t start, stop;
succeed(hipEventCreate(&start));
succeed(hipEventCreate(&stop));
mem.giveOwnership(cmesh.ptr, (cmesh.nfaces + 1) * sizeof(index_t));
mem.giveOwnership(cmesh.ids, cmesh.nnz * sizeof(offset_t));
mem.giveOwnership(cmesh.vals, cmesh.nnz * sizeof(value_t));
rmesh.nfaces = cmesh.nnz;
rmesh.nnz = rmesh.nfaces * 4;
rmesh.nverts = cmesh.nverts + cmesh.nfaces + (cmesh.nnz - ctx.nextern) / 2 + ctx.nextern;
rmesh.max_face_order = 4;
size_t grid_dim = divup(cmesh.nfaces, ctx.block_dim);
prof.start(start, "sorting row ids for topo refine");
LAKHelperKernels::sortKeyValuePairsSegmentedInPlace(cmesh.ptr, cmesh.vals, cmesh.ids, cmesh.nfaces);
prof.stop(start, stop);
rmesh.ptr = reinterpret_cast<offset_t*>(mem.getMemory((rmesh.nfaces + 1) * sizeof(offset_t)));
rmesh.ids = reinterpret_cast<offset_t*>(mem.getMemory(rmesh.nnz * sizeof(index_t)));
rmesh.vals = reinterpret_cast<offset_t*>(mem.getMemory(rmesh.nnz * sizeof(value_t)));
grid_dim = divup(cmesh.nnz, ctx.block_dim);
prof.start(start, "Refining topology - universal");
LAKCCKernels::refineTopologyHomogeneous << <grid_dim, ctx.block_dim >> > (
cmesh.ids, ctx.d_E_ptr, ctx.d_E_ids, ctx.d_E_vals, rmesh.ids, cmesh.nnz, cmesh.nverts, cmesh.nfaces, cmesh.max_face_order);
grid_dim = divup(rmesh.nfaces + 1, ctx.block_dim);
LAKCCKernels::createQuadColPtr << <grid_dim, ctx.block_dim >> > (rmesh.ptr, rmesh.nfaces);
grid_dim = divup(rmesh.nnz, ctx.block_dim);
LAKCCKernels::createQuadVals << <grid_dim, ctx.block_dim >> > (rmesh.vals, rmesh.nfaces);
prof.stop(start, stop);
mem.takeOwnership(rmesh.ptr);
mem.takeOwnership(rmesh.ids);
mem.takeOwnership(rmesh.vals);
mem.takeOwnership(cmesh.ptr);
mem.takeOwnership(cmesh.ids);
mem.takeOwnership(cmesh.vals);
succeed(hipEventDestroy(start));
succeed(hipEventDestroy(stop));
}
template<typename MESH_INFO, typename MEMORY_MANAGER, typename PROFILING>
void LAKCatmullClark::initQuadMesh(MESH_INFO const& cmesh, MESH_INFO& rmesh, Context<MESH_INFO>& ctx, MEMORY_MANAGER& mem, PROFILING& prof)
{
using index_t = typename MESH_INFO::index_t;
hipEvent_t start, stop;
succeed(hipEventCreate(&start));
succeed(hipEventCreate(&stop));
initPolyMesh(cmesh, rmesh, ctx, mem, prof);
succeed(hipEventDestroy(start));
succeed(hipEventDestroy(stop));
}
////////////////////////////////////////////////////////////////////////////////
/// Polygonal Mesh Subdiv
template<typename MESH_INFO, typename MEMORY_MANAGER, typename PROFILING>
void LAKCatmullClark::subdivideVertexDataPolyMesh(MESH_INFO const& cmesh, MESH_INFO& rmesh, Context<MESH_INFO>& ctx, MEMORY_MANAGER& mem, PROFILING& prof)
{
using offset_t = typename MESH_INFO::offset_t;
using index_t = typename MESH_INFO::index_t;
using value_t = typename MESH_INFO::value_t;
using vertex_t = typename MESH_INFO::vertex_t;
constexpr int ncomponents{ 4 };
hipEvent_t start, stop;
succeed(hipEventCreate(&start));
succeed(hipEventCreate(&stop));
mem.giveOwnership(cmesh.ptr, (cmesh.nfaces + 1) * sizeof(index_t));
mem.giveOwnership(cmesh.ids, cmesh.nnz * sizeof(offset_t));
mem.giveOwnership(cmesh.vals, cmesh.nnz * sizeof(value_t));
mem.giveOwnership(cmesh.verts, cmesh.nverts * ncomponents * sizeof(vertex_t));
rmesh.verts = reinterpret_cast<vertex_t*>(mem.getMemory(rmesh.nverts * ncomponents * sizeof(vertex_t)));
//caclulate facepoints
vertex_t* d_facepoints = reinterpret_cast<vertex_t*>(rmesh.verts + ncomponents * cmesh.nverts);
size_t grid_dim = divup(cmesh.nfaces, ctx.block_dim);
prof.start(start, ("calculating facepoints"));
spmv_left_mapped_f4(cmesh.ptr, cmesh.ids, cmesh.vals,
reinterpret_cast<const float4*>(cmesh.verts), reinterpret_cast<float4*>(d_facepoints), cmesh.nverts, cmesh.nfaces, ctx.d_map, cmesh.max_face_order + 1);
LAKHelperKernels::divElementWise(d_facepoints, ctx.d_order_buffer + cmesh.nverts, vertex_t(0), d_facepoints, ncomponents * cmesh.nfaces);
prof.stop(start, stop);
//calculate edgepoints
vertex_t* d_edgepoints = reinterpret_cast<vertex_t*>(rmesh.verts + (cmesh.nverts + cmesh.nfaces) * ncomponents);
grid_dim = divup(size_t(ctx.nedges - ctx.nextern), ctx.block_dim);
prof.start(start, ("calculating internal edgepoints"));
LAKCCKernels::calculateInternalEdgepoints << <grid_dim, ctx.block_dim >> > (
ctx.d_internal0, ctx.d_internal1, ctx.d_intids, ctx.d_f0, ctx.d_f1, cmesh.verts, d_facepoints, d_edgepoints, ctx.nedges - ctx.nextern);
prof.stop(start, stop);
if (ctx.nextern != 0)
{
grid_dim = divup(size_t(ctx.nextern), ctx.block_dim);
prof.start(start, ("calculating external edgepoints"));
LAKCCKernels::calculateExternalEdgepoints << <grid_dim, ctx.block_dim >> > (
ctx.d_external0, ctx.d_external1, ctx.d_extids, cmesh.verts, d_edgepoints, ctx.nextern);
prof.stop(start, stop);
}
//update original vertices
vertex_t* d_p_norm = reinterpret_cast<vertex_t*>(mem.getMemory(cmesh.nverts * ncomponents * sizeof(vertex_t)));
vertex_t* d_fp_vp_sum_norm = reinterpret_cast<vertex_t*>(mem.getMemory(cmesh.nverts * ncomponents * sizeof(vertex_t)));
succeed(hipMemset(d_fp_vp_sum_norm, 0, cmesh.nverts * ncomponents * sizeof(vertex_t)));
grid_dim = divup(cmesh.nverts, ctx.block_dim);
prof.start(start, "updating internal positions");
LAKHelperKernels::multElementWise(cmesh.verts, ctx.d_order_buffer, vertex_t(-2), d_p_norm, ncomponents * cmesh.nverts);
spmv_right_mapped_f4(cmesh.ptr, cmesh.ids, cmesh.vals,
reinterpret_cast<float4*>(d_facepoints), reinterpret_cast<float4*>(d_fp_vp_sum_norm), cmesh.nverts, cmesh.nfaces, ctx.d_map, cmesh.max_face_order + 1);
spmv_right_mapped_f4(ctx.d_F_ptr, ctx.d_F_ids, ctx.d_F_vals,
reinterpret_cast<float4*>(cmesh.verts), reinterpret_cast<float4*>(d_fp_vp_sum_norm), cmesh.nverts, cmesh.nverts, ctx.d_F_map, cmesh.nfaces + 1);
LAKHelperKernels::divElementWise(d_fp_vp_sum_norm, ctx.d_order_buffer, vertex_t(0), d_fp_vp_sum_norm, ncomponents * cmesh.nverts);
LAKHelperKernels::addElementWise(d_fp_vp_sum_norm, d_p_norm, rmesh.verts, ncomponents * cmesh.nverts);
LAKHelperKernels::divElementWise(rmesh.verts, ctx.d_order_buffer, vertex_t(0), rmesh.verts, ncomponents * cmesh.nverts);
prof.stop(start, stop);
if (ctx.nextern)
{
grid_dim = divup(size_t(ctx.nextern), ctx.block_dim);
prof.start(start, "updating external positions");
LAKCCKernels::prepareExternalVertexUpdate << <grid_dim, ctx.block_dim >> > (
ctx.d_external0, ctx.d_external1, cmesh.verts, rmesh.verts, ctx.nextern);
LAKCCKernels::calculateExternalVertexUpdate << <grid_dim, ctx.block_dim >> > (
ctx.d_external0, ctx.d_external1, cmesh.verts, rmesh.verts, ctx.nextern);
prof.stop(start, stop);
}
mem.freeMemory(d_p_norm);
mem.freeMemory(d_fp_vp_sum_norm);
//TODO: re-implement aka. port
//if (has_creases && !creases_decayed)
// time += handleCreases(C, d_vertex_data, d_refined_vertexdata, nf, nv);
mem.takeOwnership(rmesh.verts);
mem.takeOwnership(cmesh.ptr);
mem.takeOwnership(cmesh.ids);
mem.takeOwnership(cmesh.vals);
mem.takeOwnership(cmesh.verts);
succeed(hipEventDestroy(start));
succeed(hipEventDestroy(stop));
}
template<typename MESH_INFO, typename MEMORY_MANAGER, typename PROFILING>
void LAKCatmullClark::subdivideTopologyPolyMesh(MESH_INFO const& cmesh, MESH_INFO& rmesh, Context<MESH_INFO>& ctx, MEMORY_MANAGER& mem, PROFILING& prof)
{
using offset_t = typename MESH_INFO::offset_t;
using index_t = typename MESH_INFO::index_t;
using value_t = typename MESH_INFO::value_t;
hipEvent_t start, stop;
succeed(hipEventCreate(&start));
succeed(hipEventCreate(&stop));
mem.giveOwnership(cmesh.ptr, (cmesh.nfaces + 1) * sizeof(index_t));
mem.giveOwnership(cmesh.ids, cmesh.nnz * sizeof(offset_t));
mem.giveOwnership(cmesh.vals, cmesh.nnz * sizeof(value_t));
rmesh.nfaces = cmesh.nnz;
rmesh.nnz = rmesh.nfaces * 4;
rmesh.nverts = cmesh.nverts + cmesh.nfaces + (cmesh.nnz - ctx.nextern) / 2 + ctx.nextern;
rmesh.max_face_order = 4;
size_t grid_dim = divup(cmesh.nfaces, ctx.block_dim);
prof.start(start, "sorting row ids for topo refine");
LAKHelperKernels::sortKeyValuePairsSegmentedInPlace(cmesh.ptr, cmesh.vals, cmesh.ids, cmesh.nfaces);
prof.stop(start, stop);
rmesh.ptr = reinterpret_cast<offset_t*>(mem.getMemory((rmesh.nfaces + 1) * sizeof(offset_t)));
rmesh.ids = reinterpret_cast<offset_t*>(mem.getMemory(rmesh.nnz * sizeof(index_t)));
rmesh.vals = reinterpret_cast<offset_t*>(mem.getMemory(rmesh.nnz * sizeof(value_t)));
grid_dim = divup(cmesh.nfaces, ctx.block_dim);
prof.start(start, "Refining topology - universal");
LAKCCKernels::refineTopology << <grid_dim, ctx.block_dim >> > (
cmesh.ptr, cmesh.ids,
ctx.d_E_ptr, ctx.d_E_ids, ctx.d_E_vals, rmesh.ids, cmesh.nfaces, cmesh.nverts);
grid_dim = divup(rmesh.nfaces + 1, ctx.block_dim);
LAKCCKernels::createQuadColPtr << <grid_dim, ctx.block_dim >> > (rmesh.ptr, rmesh.nfaces);
grid_dim = divup(rmesh.nnz, ctx.block_dim);
LAKCCKernels::createQuadVals << <grid_dim, ctx.block_dim >> > (rmesh.vals, rmesh.nfaces);
prof.stop(start, stop);
mem.takeOwnership(rmesh.ptr);
mem.takeOwnership(rmesh.ids);
mem.takeOwnership(rmesh.vals);
mem.takeOwnership(cmesh.ptr);
mem.takeOwnership(cmesh.ids);
mem.takeOwnership(cmesh.vals);
succeed(hipEventDestroy(start));
succeed(hipEventDestroy(stop));
}
template<typename MESH_INFO, typename MEMORY_MANAGER, typename PROFILING>
void LAKCatmullClark::initPolyMesh(MESH_INFO const& cmesh, MESH_INFO& rmesh, Context<MESH_INFO>& ctx, MEMORY_MANAGER& mem, PROFILING& prof)
{
using offset_t = typename MESH_INFO::offset_t;
using index_t = typename MESH_INFO::index_t;
using value_t = typename MESH_INFO::value_t;
hipEvent_t start, stop;
succeed(hipEventCreate(&start));
succeed(hipEventCreate(&stop));
mem.giveOwnership(cmesh.ptr, (cmesh.nfaces + 1) * sizeof(index_t));
mem.giveOwnership(cmesh.ids, cmesh.nnz * sizeof(offset_t));
mem.giveOwnership(cmesh.vals, cmesh.nnz * sizeof(value_t));
size_t grid_dim = divup(cmesh.nfaces, ctx.block_dim);
prof.start(start, "sorting row ids");
LAKHelperKernels::sortKeyValuePairsSegmentedInPlace(cmesh.ptr, cmesh.ids, cmesh.vals, cmesh.nfaces);
prof.stop(start, stop);
//create map for spmv (+1 because values start at 1)
std::vector<value_t> spmv_map(cmesh.max_face_order + 1, static_cast<value_t>(1));
ctx.d_map = reinterpret_cast<value_t*>(mem.getMemory((cmesh.max_face_order + 1) * sizeof(value_t)));
succeed(hipMemcpy(ctx.d_map, &spmv_map[0], (cmesh.max_face_order + 1) * sizeof(value_t), hipMemcpyHostToDevice));
//create map for F*p (+1 because values start at 1)
std::vector<value_t> F_map(cmesh.nfaces + 1, static_cast<value_t>(1));
ctx.d_F_map = reinterpret_cast<value_t*>(mem.getMemory((cmesh.nfaces + 1) * sizeof(value_t)));
succeed(hipMemcpy(ctx.d_F_map, &F_map[0], (cmesh.nfaces + 1) * sizeof(value_t), hipMemcpyHostToDevice));
std::vector<value_t> nf_ones(cmesh.nfaces, 1.0f);
value_t* d_nf_ones = reinterpret_cast<value_t*>(mem.getMemory(cmesh.nfaces * sizeof(value_t)));
succeed(hipMemcpy(d_nf_ones, &nf_ones[0], cmesh.nfaces * sizeof(value_t), hipMemcpyHostToDevice));
ctx.d_order_buffer = reinterpret_cast<value_t*>(mem.getMemory((cmesh.nverts + cmesh.nfaces) * sizeof(value_t)));
succeed(hipMemset(ctx.d_order_buffer, 0, cmesh.nverts * sizeof(value_t)));
prof.start(start, ("calculating vertex orders"));
spmv_right_mapped(
cmesh.ptr,
cmesh.ids,
cmesh.vals, d_nf_ones, ctx.d_order_buffer,
cmesh.nverts,
cmesh.nfaces,
ctx.d_map,
cmesh.max_face_order + 1);
prof.stop(start, stop);
mem.freeMemory(d_nf_ones);
//face order
std::vector<value_t> nv_ones(cmesh.nverts, 1.0f);
value_t* d_nv_ones = reinterpret_cast<value_t*>(mem.getMemory(cmesh.nverts * sizeof(value_t)));
succeed(hipMemcpy(d_nv_ones, &nv_ones[0], cmesh.nverts * sizeof(value_t), hipMemcpyHostToDevice));
value_t* d_faceorders = ctx.d_order_buffer + cmesh.nverts;
prof.start(start, ("calculating face orders"));
spmv_left_mapped(cmesh.ptr, cmesh.ids, cmesh.vals, d_nv_ones, d_faceorders, cmesh.nverts, cmesh.nfaces, ctx.d_map, cmesh.max_face_order + 1);
prof.stop(start, stop);
mem.freeMemory(d_nv_ones);
mem.registerConsumption((cmesh.nverts + 1) * sizeof(unsigned));
mem.registerConsumption(cmesh.nnz * sizeof(unsigned));
mem.registerConsumption(cmesh.nnz * sizeof(float));
offset_t* d_ptr_t = reinterpret_cast<offset_t*>(mem.getMemory((cmesh.nverts + 1) * sizeof(offset_t)));
index_t* d_ids_t = reinterpret_cast<index_t*>(mem.getMemory(cmesh.nnz * sizeof(index_t)));
value_t* d_vals_t = reinterpret_cast<value_t*>(mem.getMemory(cmesh.nnz * sizeof(value_t)));
hipsparseHandle_t handle;
cuSparseSucceed(hipsparseCreate(&handle));
prof.start(start, "transposing M");
cuSparseSucceed(hipsparseScsr2csc(handle,
cmesh.nfaces, cmesh.nverts, cmesh.nnz,
reinterpret_cast<const float*>(cmesh.vals), cmesh.ptr, cmesh.ids,
reinterpret_cast<float*>(d_vals_t), d_ids_t, d_ptr_t,
HIPSPARSE_ACTION_NUMERIC,
HIPSPARSE_INDEX_BASE_ZERO));
prof.stop(start, stop);
//This would be the non-deprecated version... doesn't work
//size_t buffer_size{ 42 };
//prof.start(start, "transposing M 1/2");
//cuSparseSucceed(hipsparseCsr2cscEx2_bufferSize(
// handle,
// cmesh.nfaces,
// cmesh.nverts,
// cmesh.nnz,
// cmesh.vals,
// cmesh.ptr,
// cmesh.ids,
// d_vals_t,
// d_ptr_t,
// d_ids_t,
// HIP_R_32I,
// HIPSPARSE_ACTION_SYMBOLIC,
// HIPSPARSE_INDEX_BASE_ZERO,
// HIPSPARSE_CSR2CSC_ALG1,
// &buffer_size));
//prof.stop(start, stop);
//void* buffer = mem.getMemory(buffer_size);
//prof.start(start, "transposing M 2/2");
//cuSparseSucceed(hipsparseCsr2cscEx2(handle,
// cmesh.nfaces,
// cmesh.nverts,
// cmesh.nnz,
// cmesh.vals,
// cmesh.ptr,
// cmesh.ids,
// d_vals_t,
// d_ptr_t,
// d_ids_t,
// HIP_R_32I,
// HIPSPARSE_ACTION_NUMERIC,
// HIPSPARSE_INDEX_BASE_ZERO,
// HIPSPARSE_CSR2CSC_ALG1,
// buffer));
//prof.stop(start, stop);
//mem.freeMemory(buffer);
std::vector<value_t> map;
getCircMapQ(map, cmesh.max_face_order, 1, 1); // Q_{cmesh.max_face_order}
ctx.d_map0 = reinterpret_cast<value_t*>(mem.getMemory(map.size() * sizeof(value_t)));
succeed(hipMemcpy(ctx.d_map0, &map[0], map.size() * sizeof(value_t), hipMemcpyHostToDevice));
getCircMapQ(map, cmesh.max_face_order, cmesh.max_face_order - 1, 1); // // Q_{cmesh.max_face_order}^{cmesh.max_face_order-1}
ctx.d_map1 = reinterpret_cast<value_t*>(mem.getMemory(map.size() * sizeof(value_t)));
succeed(hipMemcpy(ctx.d_map1, &map[0], map.size() * sizeof(value_t), hipMemcpyHostToDevice));
offset_t* d_F_ptr_t;
index_t* d_F_ids_t;
value_t* d_F_vals_t;
size_t f_nnz, f_nnz_t, e_nnz;
prof.time += spgemm_mapped<offset_t, index_t, value_t, MEMORY_MANAGER>(cmesh.ptr, cmesh.ids, cmesh.vals,
d_ptr_t, d_ids_t, d_vals_t,
ctx.d_F_ptr, ctx.d_F_ids, ctx.d_F_vals,
d_F_ptr_t, d_F_ids_t, d_F_vals_t,
ctx.d_E_ptr, ctx.d_E_ids, ctx.d_E_vals,
cmesh.nverts, cmesh.nfaces, cmesh.nverts, cmesh.nnz, cmesh.nnz,
f_nnz, f_nnz_t, e_nnz, ctx.nextern,
ctx.d_map0, cmesh.max_face_order + 1, cmesh.max_face_order + 1,
ctx.d_map1, cmesh.max_face_order + 1, cmesh.max_face_order + 1, mem);
mem.freeMemory(d_ptr_t);
mem.freeMemory(d_ids_t);
mem.freeMemory(d_vals_t);
mem.unregisterConsumption((cmesh.nverts + 1) * sizeof(offset_t));
mem.unregisterConsumption(cmesh.nnz * sizeof(index_t));
mem.unregisterConsumption(cmesh.nnz * sizeof(value_t));
prof.start(start, "compressing F F^T and E");
LAKHelperKernels::compress(ctx.d_F_ptr, ctx.d_F_ids, ctx.d_F_vals, cmesh.nverts, cmesh.nverts, f_nnz, mem);
LAKHelperKernels::compress(d_F_ptr_t, d_F_ids_t, d_F_vals_t, cmesh.nverts, cmesh.nverts, f_nnz_t, mem);
LAKHelperKernels::compress(ctx.d_E_ptr, ctx.d_E_ids, ctx.d_E_vals, cmesh.nverts, cmesh.nverts, e_nnz, mem);
prof.stop(start, stop);
ctx.nedges = e_nnz;
auto nintern = ctx.nedges - ctx.nextern;
ctx.d_internal0 = reinterpret_cast<index_t*>(mem.getMemory(nintern * sizeof(index_t)));
ctx.d_internal1 = reinterpret_cast<index_t*>(mem.getMemory(nintern * sizeof(index_t)));
ctx.d_intids = reinterpret_cast<index_t*>(mem.getMemory(nintern * sizeof(index_t)));
if (ctx.nextern)
{
ctx.d_external0 = reinterpret_cast<index_t*>(mem.getMemory(ctx.nextern * sizeof(unsigned)));
ctx.d_external1 = reinterpret_cast<index_t*>(mem.getMemory(ctx.nextern * sizeof(unsigned)));
ctx.d_extids = reinterpret_cast<index_t*>(mem.getMemory(ctx.nextern * sizeof(unsigned)));
}
index_t* d_nintext = reinterpret_cast<index_t*>(mem.getMemory(2 * sizeof(unsigned)));
succeed(hipMemset((void*)d_nintext, 0, 2 * sizeof(unsigned)));
grid_dim = divup(cmesh.nverts, ctx.block_dim);
prof.start(start, "getting Edge info from E");
LAKCCKernels::extractEdgeInfoFromE << <grid_dim, ctx.block_dim >> > (
ctx.d_E_ptr, ctx.d_E_ids, ctx.d_E_vals, cmesh.nverts,
ctx.d_internal0, ctx.d_internal1, ctx.d_intids, ctx.d_external0, ctx.d_external1, ctx.d_extids, d_nintext);
prof.stop(start, stop);
ctx.d_f0 = reinterpret_cast<index_t*>(mem.getMemory(nintern * sizeof(unsigned)));
ctx.d_f1 = reinterpret_cast<index_t*>(mem.getMemory(nintern * sizeof(unsigned)));
grid_dim = divup(size_t(nintern), ctx.block_dim);
prof.start(start, "getting Face info from F and Ft");
LAKCCKernels::extractFaceInfoFromFFt << <grid_dim, ctx.block_dim >> > (
ctx.d_F_ptr, ctx.d_F_ids, ctx.d_F_vals, d_F_ptr_t, d_F_ids_t, d_F_vals_t,
ctx.d_internal0, ctx.d_internal1, nintern, ctx.d_f0, ctx.d_f1);
prof.stop(start, stop);
mem.freeMemory(d_F_ptr_t);
mem.freeMemory(d_F_ids_t);
mem.freeMemory(d_F_vals_t);
mem.takeOwnership(cmesh.ptr);
mem.takeOwnership(cmesh.ids);
mem.takeOwnership(cmesh.vals);
succeed(hipEventDestroy(start));
succeed(hipEventDestroy(stop));
}
////////////////////////////////////////////////////////////////////////////////
/// Publicly exposed
template<typename MESH_INFO>
void LAKCatmullClark::subdivideIteratively(MESH_INFO const& cmesh, MESH_INFO& rmesh, int target_level)
{
using offset_t = typename MESH_INFO::offset_t;
using index_t = typename MESH_INFO::index_t;
using value_t = typename MESH_INFO::value_t;
using vertex_t = typename MESH_INFO::vertex_t;
int current_level = 0;
ProfilinInfo<TimeProfileAccumulate<NoStateClock>, DeviceMemManager> profiling;
Context<MESH_INFO> ctx;
MESH_INFO tmp_cmesh = cmesh;
if (tmp_cmesh.type != MESH_INFO::MeshType::QUAD)
{
initPolyMesh(tmp_cmesh, rmesh, ctx, profiling.mem, profiling.prof);
subdivideTopologyPolyMesh(tmp_cmesh, rmesh, ctx, profiling.mem, profiling.prof);
subdivideVertexDataPolyMesh(tmp_cmesh, rmesh, ctx, profiling.mem, profiling.prof);
rmesh.type = MESH_INFO::MeshType::QUAD;
rmesh.is_reduced = false;
tmp_cmesh = rmesh;
profiling.mem.freeAll();
current_level++;
}
for (; current_level < target_level; ++current_level)
{
initQuadMesh(tmp_cmesh, rmesh, ctx, profiling.mem, profiling.prof);
subdivideTopologyQuadMesh(tmp_cmesh, rmesh, ctx, profiling.mem, profiling.prof);
subdivideVertexDataQuadMesh(tmp_cmesh, rmesh, ctx, profiling.mem, profiling.prof);
rmesh.type = MESH_INFO::MeshType::QUAD;
rmesh.is_reduced = false;
if (current_level != 0) tmp_cmesh.freeAndReset();
tmp_cmesh = rmesh;
profiling.mem.freeAll();
}
std::cout << "==========LAK===========\n";
std::cout << "Subdivision to level " << target_level;
std::cout << " took " << std::setprecision(2) << std::fixed << profiling.prof.time << " ms.";
std::cout << " peak mem " << profiling.mem.peakConsumption() / (1000 * 1000) << " MB";
std::cout << " \nCtrl. Mesh:";
std::cout << " nf: " << cmesh.nfaces;
std::cout << " nv: " << cmesh.nverts;
std::cout << " \nSubd. Mesh:";
std::cout << " nf: " << rmesh.nfaces;
std::cout << " nv: " << rmesh.nverts;
std::cout << "\n\n";
}
////////////////////////////////////////////////////////////////////////////////
/// Instantiations
using LAKCCMeshInfo = LAKCatmullClark::MeshInfo<int, int, int, float>;
template void LAKCatmullClark::subdivideIteratively(LAKCCMeshInfo const&, LAKCCMeshInfo&, int);
| 4feaeca7145cdd7e0f17f2c07fd6d2592ca2d86f.cu | // Copyright (c) 2020 Daniel Mlakar [email protected]
// Martin Winter [email protected]
// Pascal Stadlbauer [email protected]
// Hans-Peter Seidel [email protected]
// Markus Steinberger [email protected]
// Rhaleb Zayer [email protected]
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
#include "LAKCatmullClark.h"
#include "LAKCatmullClarkKernels.cuh"
#include <iostream>
#include <iomanip>
#include <cusparse.h>
#include "math/spgemm.cuh"
#include "math/spmv.cuh"
namespace
{
template<typename T>
void getCircMapQ(std::vector<T>& Q, unsigned size, unsigned pwr, unsigned index_base = 0)
{
Q.resize((size + index_base) * (size + index_base), 0);
for (auto j = 1; j < size + 1; ++j)
{
for (auto i = 1; i < size + 1; ++i)
{
Q[(i - 1 + index_base) * (size + index_base) + (j - 1 + index_base)] = (j == ((i + pwr - 1) % size) + 1 ? 1 : 0);
}
}
};
template<typename T>
std::vector<T> getFromDev(const T* d, size_t num)
{
std::vector<T> h(num);
succeed(cudaMemcpy(&h[0], d, num * sizeof(T), cudaMemcpyDeviceToHost));
return h;
}
}
////////////////////////////////////////////////////////////////////////////////
/// Quadrilateral Mesh Subdiv
template<typename MESH_INFO, typename MEMORY_MANAGER, typename PROFILING>
void LAKCatmullClark::subdivideVertexDataQuadMesh(MESH_INFO const& cmesh, MESH_INFO& rmesh, Context<MESH_INFO>& ctx, MEMORY_MANAGER& mem, PROFILING& prof)
{
using offset_t = typename MESH_INFO::offset_t;
using index_t = typename MESH_INFO::index_t;
using vertex_t = typename MESH_INFO::vertex_t;
cudaEvent_t start, stop;
succeed(cudaEventCreate(&start));
succeed(cudaEventCreate(&stop));
subdivideVertexDataPolyMesh(cmesh, rmesh, ctx, mem, prof);
succeed(cudaEventDestroy(start));
succeed(cudaEventDestroy(stop));
}
template<typename MESH_INFO, typename MEMORY_MANAGER, typename PROFILING>
void LAKCatmullClark::subdivideTopologyQuadMesh(MESH_INFO const& cmesh, MESH_INFO& rmesh, Context<MESH_INFO>& ctx, MEMORY_MANAGER& mem, PROFILING& prof)
{
using offset_t = typename MESH_INFO::offset_t;
using index_t = typename MESH_INFO::index_t;
using value_t = typename MESH_INFO::value_t;
cudaEvent_t start, stop;
succeed(cudaEventCreate(&start));
succeed(cudaEventCreate(&stop));
mem.giveOwnership(cmesh.ptr, (cmesh.nfaces + 1) * sizeof(index_t));
mem.giveOwnership(cmesh.ids, cmesh.nnz * sizeof(offset_t));
mem.giveOwnership(cmesh.vals, cmesh.nnz * sizeof(value_t));
rmesh.nfaces = cmesh.nnz;
rmesh.nnz = rmesh.nfaces * 4;
rmesh.nverts = cmesh.nverts + cmesh.nfaces + (cmesh.nnz - ctx.nextern) / 2 + ctx.nextern;
rmesh.max_face_order = 4;
size_t grid_dim = divup(cmesh.nfaces, ctx.block_dim);
prof.start(start, "sorting row ids for topo refine");
LAKHelperKernels::sortKeyValuePairsSegmentedInPlace(cmesh.ptr, cmesh.vals, cmesh.ids, cmesh.nfaces);
prof.stop(start, stop);
rmesh.ptr = reinterpret_cast<offset_t*>(mem.getMemory((rmesh.nfaces + 1) * sizeof(offset_t)));
rmesh.ids = reinterpret_cast<offset_t*>(mem.getMemory(rmesh.nnz * sizeof(index_t)));
rmesh.vals = reinterpret_cast<offset_t*>(mem.getMemory(rmesh.nnz * sizeof(value_t)));
grid_dim = divup(cmesh.nnz, ctx.block_dim);
prof.start(start, "Refining topology - universal");
LAKCCKernels::refineTopologyHomogeneous << <grid_dim, ctx.block_dim >> > (
cmesh.ids, ctx.d_E_ptr, ctx.d_E_ids, ctx.d_E_vals, rmesh.ids, cmesh.nnz, cmesh.nverts, cmesh.nfaces, cmesh.max_face_order);
grid_dim = divup(rmesh.nfaces + 1, ctx.block_dim);
LAKCCKernels::createQuadColPtr << <grid_dim, ctx.block_dim >> > (rmesh.ptr, rmesh.nfaces);
grid_dim = divup(rmesh.nnz, ctx.block_dim);
LAKCCKernels::createQuadVals << <grid_dim, ctx.block_dim >> > (rmesh.vals, rmesh.nfaces);
prof.stop(start, stop);
mem.takeOwnership(rmesh.ptr);
mem.takeOwnership(rmesh.ids);
mem.takeOwnership(rmesh.vals);
mem.takeOwnership(cmesh.ptr);
mem.takeOwnership(cmesh.ids);
mem.takeOwnership(cmesh.vals);
succeed(cudaEventDestroy(start));
succeed(cudaEventDestroy(stop));
}
template<typename MESH_INFO, typename MEMORY_MANAGER, typename PROFILING>
void LAKCatmullClark::initQuadMesh(MESH_INFO const& cmesh, MESH_INFO& rmesh, Context<MESH_INFO>& ctx, MEMORY_MANAGER& mem, PROFILING& prof)
{
using index_t = typename MESH_INFO::index_t;
cudaEvent_t start, stop;
succeed(cudaEventCreate(&start));
succeed(cudaEventCreate(&stop));
initPolyMesh(cmesh, rmesh, ctx, mem, prof);
succeed(cudaEventDestroy(start));
succeed(cudaEventDestroy(stop));
}
////////////////////////////////////////////////////////////////////////////////
/// Polygonal Mesh Subdiv
template<typename MESH_INFO, typename MEMORY_MANAGER, typename PROFILING>
void LAKCatmullClark::subdivideVertexDataPolyMesh(MESH_INFO const& cmesh, MESH_INFO& rmesh, Context<MESH_INFO>& ctx, MEMORY_MANAGER& mem, PROFILING& prof)
{
using offset_t = typename MESH_INFO::offset_t;
using index_t = typename MESH_INFO::index_t;
using value_t = typename MESH_INFO::value_t;
using vertex_t = typename MESH_INFO::vertex_t;
constexpr int ncomponents{ 4 };
cudaEvent_t start, stop;
succeed(cudaEventCreate(&start));
succeed(cudaEventCreate(&stop));
mem.giveOwnership(cmesh.ptr, (cmesh.nfaces + 1) * sizeof(index_t));
mem.giveOwnership(cmesh.ids, cmesh.nnz * sizeof(offset_t));
mem.giveOwnership(cmesh.vals, cmesh.nnz * sizeof(value_t));
mem.giveOwnership(cmesh.verts, cmesh.nverts * ncomponents * sizeof(vertex_t));
rmesh.verts = reinterpret_cast<vertex_t*>(mem.getMemory(rmesh.nverts * ncomponents * sizeof(vertex_t)));
//caclulate facepoints
vertex_t* d_facepoints = reinterpret_cast<vertex_t*>(rmesh.verts + ncomponents * cmesh.nverts);
size_t grid_dim = divup(cmesh.nfaces, ctx.block_dim);
prof.start(start, ("calculating facepoints"));
spmv_left_mapped_f4(cmesh.ptr, cmesh.ids, cmesh.vals,
reinterpret_cast<const float4*>(cmesh.verts), reinterpret_cast<float4*>(d_facepoints), cmesh.nverts, cmesh.nfaces, ctx.d_map, cmesh.max_face_order + 1);
LAKHelperKernels::divElementWise(d_facepoints, ctx.d_order_buffer + cmesh.nverts, vertex_t(0), d_facepoints, ncomponents * cmesh.nfaces);
prof.stop(start, stop);
//calculate edgepoints
vertex_t* d_edgepoints = reinterpret_cast<vertex_t*>(rmesh.verts + (cmesh.nverts + cmesh.nfaces) * ncomponents);
grid_dim = divup(size_t(ctx.nedges - ctx.nextern), ctx.block_dim);
prof.start(start, ("calculating internal edgepoints"));
LAKCCKernels::calculateInternalEdgepoints << <grid_dim, ctx.block_dim >> > (
ctx.d_internal0, ctx.d_internal1, ctx.d_intids, ctx.d_f0, ctx.d_f1, cmesh.verts, d_facepoints, d_edgepoints, ctx.nedges - ctx.nextern);
prof.stop(start, stop);
if (ctx.nextern != 0)
{
grid_dim = divup(size_t(ctx.nextern), ctx.block_dim);
prof.start(start, ("calculating external edgepoints"));
LAKCCKernels::calculateExternalEdgepoints << <grid_dim, ctx.block_dim >> > (
ctx.d_external0, ctx.d_external1, ctx.d_extids, cmesh.verts, d_edgepoints, ctx.nextern);
prof.stop(start, stop);
}
//update original vertices
vertex_t* d_p_norm = reinterpret_cast<vertex_t*>(mem.getMemory(cmesh.nverts * ncomponents * sizeof(vertex_t)));
vertex_t* d_fp_vp_sum_norm = reinterpret_cast<vertex_t*>(mem.getMemory(cmesh.nverts * ncomponents * sizeof(vertex_t)));
succeed(cudaMemset(d_fp_vp_sum_norm, 0, cmesh.nverts * ncomponents * sizeof(vertex_t)));
grid_dim = divup(cmesh.nverts, ctx.block_dim);
prof.start(start, "updating internal positions");
LAKHelperKernels::multElementWise(cmesh.verts, ctx.d_order_buffer, vertex_t(-2), d_p_norm, ncomponents * cmesh.nverts);
spmv_right_mapped_f4(cmesh.ptr, cmesh.ids, cmesh.vals,
reinterpret_cast<float4*>(d_facepoints), reinterpret_cast<float4*>(d_fp_vp_sum_norm), cmesh.nverts, cmesh.nfaces, ctx.d_map, cmesh.max_face_order + 1);
spmv_right_mapped_f4(ctx.d_F_ptr, ctx.d_F_ids, ctx.d_F_vals,
reinterpret_cast<float4*>(cmesh.verts), reinterpret_cast<float4*>(d_fp_vp_sum_norm), cmesh.nverts, cmesh.nverts, ctx.d_F_map, cmesh.nfaces + 1);
LAKHelperKernels::divElementWise(d_fp_vp_sum_norm, ctx.d_order_buffer, vertex_t(0), d_fp_vp_sum_norm, ncomponents * cmesh.nverts);
LAKHelperKernels::addElementWise(d_fp_vp_sum_norm, d_p_norm, rmesh.verts, ncomponents * cmesh.nverts);
LAKHelperKernels::divElementWise(rmesh.verts, ctx.d_order_buffer, vertex_t(0), rmesh.verts, ncomponents * cmesh.nverts);
prof.stop(start, stop);
if (ctx.nextern)
{
grid_dim = divup(size_t(ctx.nextern), ctx.block_dim);
prof.start(start, "updating external positions");
LAKCCKernels::prepareExternalVertexUpdate << <grid_dim, ctx.block_dim >> > (
ctx.d_external0, ctx.d_external1, cmesh.verts, rmesh.verts, ctx.nextern);
LAKCCKernels::calculateExternalVertexUpdate << <grid_dim, ctx.block_dim >> > (
ctx.d_external0, ctx.d_external1, cmesh.verts, rmesh.verts, ctx.nextern);
prof.stop(start, stop);
}
mem.freeMemory(d_p_norm);
mem.freeMemory(d_fp_vp_sum_norm);
//TODO: re-implement aka. port
//if (has_creases && !creases_decayed)
// time += handleCreases(C, d_vertex_data, d_refined_vertexdata, nf, nv);
mem.takeOwnership(rmesh.verts);
mem.takeOwnership(cmesh.ptr);
mem.takeOwnership(cmesh.ids);
mem.takeOwnership(cmesh.vals);
mem.takeOwnership(cmesh.verts);
succeed(cudaEventDestroy(start));
succeed(cudaEventDestroy(stop));
}
template<typename MESH_INFO, typename MEMORY_MANAGER, typename PROFILING>
void LAKCatmullClark::subdivideTopologyPolyMesh(MESH_INFO const& cmesh, MESH_INFO& rmesh, Context<MESH_INFO>& ctx, MEMORY_MANAGER& mem, PROFILING& prof)
{
using offset_t = typename MESH_INFO::offset_t;
using index_t = typename MESH_INFO::index_t;
using value_t = typename MESH_INFO::value_t;
cudaEvent_t start, stop;
succeed(cudaEventCreate(&start));
succeed(cudaEventCreate(&stop));
mem.giveOwnership(cmesh.ptr, (cmesh.nfaces + 1) * sizeof(index_t));
mem.giveOwnership(cmesh.ids, cmesh.nnz * sizeof(offset_t));
mem.giveOwnership(cmesh.vals, cmesh.nnz * sizeof(value_t));
rmesh.nfaces = cmesh.nnz;
rmesh.nnz = rmesh.nfaces * 4;
rmesh.nverts = cmesh.nverts + cmesh.nfaces + (cmesh.nnz - ctx.nextern) / 2 + ctx.nextern;
rmesh.max_face_order = 4;
size_t grid_dim = divup(cmesh.nfaces, ctx.block_dim);
prof.start(start, "sorting row ids for topo refine");
LAKHelperKernels::sortKeyValuePairsSegmentedInPlace(cmesh.ptr, cmesh.vals, cmesh.ids, cmesh.nfaces);
prof.stop(start, stop);
rmesh.ptr = reinterpret_cast<offset_t*>(mem.getMemory((rmesh.nfaces + 1) * sizeof(offset_t)));
rmesh.ids = reinterpret_cast<offset_t*>(mem.getMemory(rmesh.nnz * sizeof(index_t)));
rmesh.vals = reinterpret_cast<offset_t*>(mem.getMemory(rmesh.nnz * sizeof(value_t)));
grid_dim = divup(cmesh.nfaces, ctx.block_dim);
prof.start(start, "Refining topology - universal");
LAKCCKernels::refineTopology << <grid_dim, ctx.block_dim >> > (
cmesh.ptr, cmesh.ids,
ctx.d_E_ptr, ctx.d_E_ids, ctx.d_E_vals, rmesh.ids, cmesh.nfaces, cmesh.nverts);
grid_dim = divup(rmesh.nfaces + 1, ctx.block_dim);
LAKCCKernels::createQuadColPtr << <grid_dim, ctx.block_dim >> > (rmesh.ptr, rmesh.nfaces);
grid_dim = divup(rmesh.nnz, ctx.block_dim);
LAKCCKernels::createQuadVals << <grid_dim, ctx.block_dim >> > (rmesh.vals, rmesh.nfaces);
prof.stop(start, stop);
mem.takeOwnership(rmesh.ptr);
mem.takeOwnership(rmesh.ids);
mem.takeOwnership(rmesh.vals);
mem.takeOwnership(cmesh.ptr);
mem.takeOwnership(cmesh.ids);
mem.takeOwnership(cmesh.vals);
succeed(cudaEventDestroy(start));
succeed(cudaEventDestroy(stop));
}
template<typename MESH_INFO, typename MEMORY_MANAGER, typename PROFILING>
void LAKCatmullClark::initPolyMesh(MESH_INFO const& cmesh, MESH_INFO& rmesh, Context<MESH_INFO>& ctx, MEMORY_MANAGER& mem, PROFILING& prof)
{
using offset_t = typename MESH_INFO::offset_t;
using index_t = typename MESH_INFO::index_t;
using value_t = typename MESH_INFO::value_t;
cudaEvent_t start, stop;
succeed(cudaEventCreate(&start));
succeed(cudaEventCreate(&stop));
mem.giveOwnership(cmesh.ptr, (cmesh.nfaces + 1) * sizeof(index_t));
mem.giveOwnership(cmesh.ids, cmesh.nnz * sizeof(offset_t));
mem.giveOwnership(cmesh.vals, cmesh.nnz * sizeof(value_t));
size_t grid_dim = divup(cmesh.nfaces, ctx.block_dim);
prof.start(start, "sorting row ids");
LAKHelperKernels::sortKeyValuePairsSegmentedInPlace(cmesh.ptr, cmesh.ids, cmesh.vals, cmesh.nfaces);
prof.stop(start, stop);
//create map for spmv (+1 because values start at 1)
std::vector<value_t> spmv_map(cmesh.max_face_order + 1, static_cast<value_t>(1));
ctx.d_map = reinterpret_cast<value_t*>(mem.getMemory((cmesh.max_face_order + 1) * sizeof(value_t)));
succeed(cudaMemcpy(ctx.d_map, &spmv_map[0], (cmesh.max_face_order + 1) * sizeof(value_t), cudaMemcpyHostToDevice));
//create map for F*p (+1 because values start at 1)
std::vector<value_t> F_map(cmesh.nfaces + 1, static_cast<value_t>(1));
ctx.d_F_map = reinterpret_cast<value_t*>(mem.getMemory((cmesh.nfaces + 1) * sizeof(value_t)));
succeed(cudaMemcpy(ctx.d_F_map, &F_map[0], (cmesh.nfaces + 1) * sizeof(value_t), cudaMemcpyHostToDevice));
std::vector<value_t> nf_ones(cmesh.nfaces, 1.0f);
value_t* d_nf_ones = reinterpret_cast<value_t*>(mem.getMemory(cmesh.nfaces * sizeof(value_t)));
succeed(cudaMemcpy(d_nf_ones, &nf_ones[0], cmesh.nfaces * sizeof(value_t), cudaMemcpyHostToDevice));
ctx.d_order_buffer = reinterpret_cast<value_t*>(mem.getMemory((cmesh.nverts + cmesh.nfaces) * sizeof(value_t)));
succeed(cudaMemset(ctx.d_order_buffer, 0, cmesh.nverts * sizeof(value_t)));
prof.start(start, ("calculating vertex orders"));
spmv_right_mapped(
cmesh.ptr,
cmesh.ids,
cmesh.vals, d_nf_ones, ctx.d_order_buffer,
cmesh.nverts,
cmesh.nfaces,
ctx.d_map,
cmesh.max_face_order + 1);
prof.stop(start, stop);
mem.freeMemory(d_nf_ones);
//face order
std::vector<value_t> nv_ones(cmesh.nverts, 1.0f);
value_t* d_nv_ones = reinterpret_cast<value_t*>(mem.getMemory(cmesh.nverts * sizeof(value_t)));
succeed(cudaMemcpy(d_nv_ones, &nv_ones[0], cmesh.nverts * sizeof(value_t), cudaMemcpyHostToDevice));
value_t* d_faceorders = ctx.d_order_buffer + cmesh.nverts;
prof.start(start, ("calculating face orders"));
spmv_left_mapped(cmesh.ptr, cmesh.ids, cmesh.vals, d_nv_ones, d_faceorders, cmesh.nverts, cmesh.nfaces, ctx.d_map, cmesh.max_face_order + 1);
prof.stop(start, stop);
mem.freeMemory(d_nv_ones);
mem.registerConsumption((cmesh.nverts + 1) * sizeof(unsigned));
mem.registerConsumption(cmesh.nnz * sizeof(unsigned));
mem.registerConsumption(cmesh.nnz * sizeof(float));
offset_t* d_ptr_t = reinterpret_cast<offset_t*>(mem.getMemory((cmesh.nverts + 1) * sizeof(offset_t)));
index_t* d_ids_t = reinterpret_cast<index_t*>(mem.getMemory(cmesh.nnz * sizeof(index_t)));
value_t* d_vals_t = reinterpret_cast<value_t*>(mem.getMemory(cmesh.nnz * sizeof(value_t)));
cusparseHandle_t handle;
cuSparseSucceed(cusparseCreate(&handle));
prof.start(start, "transposing M");
cuSparseSucceed(cusparseScsr2csc(handle,
cmesh.nfaces, cmesh.nverts, cmesh.nnz,
reinterpret_cast<const float*>(cmesh.vals), cmesh.ptr, cmesh.ids,
reinterpret_cast<float*>(d_vals_t), d_ids_t, d_ptr_t,
CUSPARSE_ACTION_NUMERIC,
CUSPARSE_INDEX_BASE_ZERO));
prof.stop(start, stop);
//This would be the non-deprecated version... doesn't work
//size_t buffer_size{ 42 };
//prof.start(start, "transposing M 1/2");
//cuSparseSucceed(cusparseCsr2cscEx2_bufferSize(
// handle,
// cmesh.nfaces,
// cmesh.nverts,
// cmesh.nnz,
// cmesh.vals,
// cmesh.ptr,
// cmesh.ids,
// d_vals_t,
// d_ptr_t,
// d_ids_t,
// CUDA_R_32I,
// CUSPARSE_ACTION_SYMBOLIC,
// CUSPARSE_INDEX_BASE_ZERO,
// CUSPARSE_CSR2CSC_ALG1,
// &buffer_size));
//prof.stop(start, stop);
//void* buffer = mem.getMemory(buffer_size);
//prof.start(start, "transposing M 2/2");
//cuSparseSucceed(cusparseCsr2cscEx2(handle,
// cmesh.nfaces,
// cmesh.nverts,
// cmesh.nnz,
// cmesh.vals,
// cmesh.ptr,
// cmesh.ids,
// d_vals_t,
// d_ptr_t,
// d_ids_t,
// CUDA_R_32I,
// CUSPARSE_ACTION_NUMERIC,
// CUSPARSE_INDEX_BASE_ZERO,
// CUSPARSE_CSR2CSC_ALG1,
// buffer));
//prof.stop(start, stop);
//mem.freeMemory(buffer);
std::vector<value_t> map;
getCircMapQ(map, cmesh.max_face_order, 1, 1); // Q_{cmesh.max_face_order}
ctx.d_map0 = reinterpret_cast<value_t*>(mem.getMemory(map.size() * sizeof(value_t)));
succeed(cudaMemcpy(ctx.d_map0, &map[0], map.size() * sizeof(value_t), cudaMemcpyHostToDevice));
getCircMapQ(map, cmesh.max_face_order, cmesh.max_face_order - 1, 1); // // Q_{cmesh.max_face_order}^{cmesh.max_face_order-1}
ctx.d_map1 = reinterpret_cast<value_t*>(mem.getMemory(map.size() * sizeof(value_t)));
succeed(cudaMemcpy(ctx.d_map1, &map[0], map.size() * sizeof(value_t), cudaMemcpyHostToDevice));
offset_t* d_F_ptr_t;
index_t* d_F_ids_t;
value_t* d_F_vals_t;
size_t f_nnz, f_nnz_t, e_nnz;
prof.time += spgemm_mapped<offset_t, index_t, value_t, MEMORY_MANAGER>(cmesh.ptr, cmesh.ids, cmesh.vals,
d_ptr_t, d_ids_t, d_vals_t,
ctx.d_F_ptr, ctx.d_F_ids, ctx.d_F_vals,
d_F_ptr_t, d_F_ids_t, d_F_vals_t,
ctx.d_E_ptr, ctx.d_E_ids, ctx.d_E_vals,
cmesh.nverts, cmesh.nfaces, cmesh.nverts, cmesh.nnz, cmesh.nnz,
f_nnz, f_nnz_t, e_nnz, ctx.nextern,
ctx.d_map0, cmesh.max_face_order + 1, cmesh.max_face_order + 1,
ctx.d_map1, cmesh.max_face_order + 1, cmesh.max_face_order + 1, mem);
mem.freeMemory(d_ptr_t);
mem.freeMemory(d_ids_t);
mem.freeMemory(d_vals_t);
mem.unregisterConsumption((cmesh.nverts + 1) * sizeof(offset_t));
mem.unregisterConsumption(cmesh.nnz * sizeof(index_t));
mem.unregisterConsumption(cmesh.nnz * sizeof(value_t));
prof.start(start, "compressing F F^T and E");
LAKHelperKernels::compress(ctx.d_F_ptr, ctx.d_F_ids, ctx.d_F_vals, cmesh.nverts, cmesh.nverts, f_nnz, mem);
LAKHelperKernels::compress(d_F_ptr_t, d_F_ids_t, d_F_vals_t, cmesh.nverts, cmesh.nverts, f_nnz_t, mem);
LAKHelperKernels::compress(ctx.d_E_ptr, ctx.d_E_ids, ctx.d_E_vals, cmesh.nverts, cmesh.nverts, e_nnz, mem);
prof.stop(start, stop);
ctx.nedges = e_nnz;
auto nintern = ctx.nedges - ctx.nextern;
ctx.d_internal0 = reinterpret_cast<index_t*>(mem.getMemory(nintern * sizeof(index_t)));
ctx.d_internal1 = reinterpret_cast<index_t*>(mem.getMemory(nintern * sizeof(index_t)));
ctx.d_intids = reinterpret_cast<index_t*>(mem.getMemory(nintern * sizeof(index_t)));
if (ctx.nextern)
{
ctx.d_external0 = reinterpret_cast<index_t*>(mem.getMemory(ctx.nextern * sizeof(unsigned)));
ctx.d_external1 = reinterpret_cast<index_t*>(mem.getMemory(ctx.nextern * sizeof(unsigned)));
ctx.d_extids = reinterpret_cast<index_t*>(mem.getMemory(ctx.nextern * sizeof(unsigned)));
}
index_t* d_nintext = reinterpret_cast<index_t*>(mem.getMemory(2 * sizeof(unsigned)));
succeed(cudaMemset((void*)d_nintext, 0, 2 * sizeof(unsigned)));
grid_dim = divup(cmesh.nverts, ctx.block_dim);
prof.start(start, "getting Edge info from E");
LAKCCKernels::extractEdgeInfoFromE << <grid_dim, ctx.block_dim >> > (
ctx.d_E_ptr, ctx.d_E_ids, ctx.d_E_vals, cmesh.nverts,
ctx.d_internal0, ctx.d_internal1, ctx.d_intids, ctx.d_external0, ctx.d_external1, ctx.d_extids, d_nintext);
prof.stop(start, stop);
ctx.d_f0 = reinterpret_cast<index_t*>(mem.getMemory(nintern * sizeof(unsigned)));
ctx.d_f1 = reinterpret_cast<index_t*>(mem.getMemory(nintern * sizeof(unsigned)));
grid_dim = divup(size_t(nintern), ctx.block_dim);
prof.start(start, "getting Face info from F and Ft");
LAKCCKernels::extractFaceInfoFromFFt << <grid_dim, ctx.block_dim >> > (
ctx.d_F_ptr, ctx.d_F_ids, ctx.d_F_vals, d_F_ptr_t, d_F_ids_t, d_F_vals_t,
ctx.d_internal0, ctx.d_internal1, nintern, ctx.d_f0, ctx.d_f1);
prof.stop(start, stop);
mem.freeMemory(d_F_ptr_t);
mem.freeMemory(d_F_ids_t);
mem.freeMemory(d_F_vals_t);
mem.takeOwnership(cmesh.ptr);
mem.takeOwnership(cmesh.ids);
mem.takeOwnership(cmesh.vals);
succeed(cudaEventDestroy(start));
succeed(cudaEventDestroy(stop));
}
////////////////////////////////////////////////////////////////////////////////
/// Publicly exposed
template<typename MESH_INFO>
void LAKCatmullClark::subdivideIteratively(MESH_INFO const& cmesh, MESH_INFO& rmesh, int target_level)
{
using offset_t = typename MESH_INFO::offset_t;
using index_t = typename MESH_INFO::index_t;
using value_t = typename MESH_INFO::value_t;
using vertex_t = typename MESH_INFO::vertex_t;
int current_level = 0;
ProfilinInfo<TimeProfileAccumulate<NoStateClock>, DeviceMemManager> profiling;
Context<MESH_INFO> ctx;
MESH_INFO tmp_cmesh = cmesh;
if (tmp_cmesh.type != MESH_INFO::MeshType::QUAD)
{
initPolyMesh(tmp_cmesh, rmesh, ctx, profiling.mem, profiling.prof);
subdivideTopologyPolyMesh(tmp_cmesh, rmesh, ctx, profiling.mem, profiling.prof);
subdivideVertexDataPolyMesh(tmp_cmesh, rmesh, ctx, profiling.mem, profiling.prof);
rmesh.type = MESH_INFO::MeshType::QUAD;
rmesh.is_reduced = false;
tmp_cmesh = rmesh;
profiling.mem.freeAll();
current_level++;
}
for (; current_level < target_level; ++current_level)
{
initQuadMesh(tmp_cmesh, rmesh, ctx, profiling.mem, profiling.prof);
subdivideTopologyQuadMesh(tmp_cmesh, rmesh, ctx, profiling.mem, profiling.prof);
subdivideVertexDataQuadMesh(tmp_cmesh, rmesh, ctx, profiling.mem, profiling.prof);
rmesh.type = MESH_INFO::MeshType::QUAD;
rmesh.is_reduced = false;
if (current_level != 0) tmp_cmesh.freeAndReset();
tmp_cmesh = rmesh;
profiling.mem.freeAll();
}
std::cout << "==========LAK===========\n";
std::cout << "Subdivision to level " << target_level;
std::cout << " took " << std::setprecision(2) << std::fixed << profiling.prof.time << " ms.";
std::cout << " peak mem " << profiling.mem.peakConsumption() / (1000 * 1000) << " MB";
std::cout << " \nCtrl. Mesh:";
std::cout << " nf: " << cmesh.nfaces;
std::cout << " nv: " << cmesh.nverts;
std::cout << " \nSubd. Mesh:";
std::cout << " nf: " << rmesh.nfaces;
std::cout << " nv: " << rmesh.nverts;
std::cout << "\n\n";
}
////////////////////////////////////////////////////////////////////////////////
/// Instantiations
using LAKCCMeshInfo = LAKCatmullClark::MeshInfo<int, int, int, float>;
template void LAKCatmullClark::subdivideIteratively(LAKCCMeshInfo const&, LAKCCMeshInfo&, int);
|
1f57595d0fe9911212f0e68dacb276e83318431f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.4.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
August 2013
@generated d Tue Aug 13 16:45:17 2013
*/
#include "common_magma.h"
/*********************************************************/
/*
* Swap diagonal blocks of two matrices.
* For more detail see the description below.
*/
__global__ void
magmagpu_dswapdblk(int nb,
double *dA1, int ldda1, int inca1,
double *dA2, int ldda2, int inca2 )
{
const int tx = threadIdx.x;
const int bx = blockIdx.x;
dA1 += tx + bx * nb * (ldda1 + inca1);
dA2 += tx + bx * nb * (ldda2 + inca2);
double tmp;
#pragma unroll
for( int i = 0; i < nb; i++ ){
tmp = dA1[i*ldda1];
dA1[i*ldda1] = dA2[i*ldda2];
dA2[i*ldda2] = tmp;
}
}
extern "C" void
magmablas_dswapdblk(magma_int_t n, magma_int_t nb,
double *dA1, magma_int_t ldda1, magma_int_t inca1,
double *dA2, magma_int_t ldda2, magma_int_t inca2 )
{
/* -- MAGMA (version 1.4.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
August 2013
Purpose
=======
This is an auxiliary MAGMA routine. It swaps diagonal blocks
of size nb x nb between matrices dA1 and dA2 on the GPU.
The number of blocks swapped is (n-1)/nb. For i = 1 .. (n-1)/nb matrices
dA1 + i * nb * (ldda1 + inca1) and
dA2 + i * nb * (ldda2 + inca2) are swapped.
*/
magma_int_t blocksize = nb;
dim3 blocks( (n-1) / blocksize, 1, 1);
hipLaunchKernelGGL(( magmagpu_dswapdblk), dim3(blocks), dim3(blocksize), 0, magma_stream , nb,
dA1, ldda1, inca1,
dA2, ldda2, inca2 );
}
| 1f57595d0fe9911212f0e68dacb276e83318431f.cu | /*
-- MAGMA (version 1.4.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
August 2013
@generated d Tue Aug 13 16:45:17 2013
*/
#include "common_magma.h"
/*********************************************************/
/*
* Swap diagonal blocks of two matrices.
* For more detail see the description below.
*/
__global__ void
magmagpu_dswapdblk(int nb,
double *dA1, int ldda1, int inca1,
double *dA2, int ldda2, int inca2 )
{
const int tx = threadIdx.x;
const int bx = blockIdx.x;
dA1 += tx + bx * nb * (ldda1 + inca1);
dA2 += tx + bx * nb * (ldda2 + inca2);
double tmp;
#pragma unroll
for( int i = 0; i < nb; i++ ){
tmp = dA1[i*ldda1];
dA1[i*ldda1] = dA2[i*ldda2];
dA2[i*ldda2] = tmp;
}
}
extern "C" void
magmablas_dswapdblk(magma_int_t n, magma_int_t nb,
double *dA1, magma_int_t ldda1, magma_int_t inca1,
double *dA2, magma_int_t ldda2, magma_int_t inca2 )
{
/* -- MAGMA (version 1.4.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
August 2013
Purpose
=======
This is an auxiliary MAGMA routine. It swaps diagonal blocks
of size nb x nb between matrices dA1 and dA2 on the GPU.
The number of blocks swapped is (n-1)/nb. For i = 1 .. (n-1)/nb matrices
dA1 + i * nb * (ldda1 + inca1) and
dA2 + i * nb * (ldda2 + inca2) are swapped.
*/
magma_int_t blocksize = nb;
dim3 blocks( (n-1) / blocksize, 1, 1);
magmagpu_dswapdblk<<< blocks, blocksize, 0, magma_stream >>>( nb,
dA1, ldda1, inca1,
dA2, ldda2, inca2 );
}
|
bf1f7109175b86548efaaf2dd090c51ca35b3b51.hip | // !!! This is a file automatically generated by hipify!!!
#define NEW_STYLE
#include <SPH/adaptivity/AdaptivityDevice.cuh>
#include <SPH/adaptivity/Blending.cuh>
#include <SPH/adaptivity/ContinuousAdaptivity.cuh>
#include <SPH/adaptivity/Merging.cuh>
#include <SPH/adaptivity/Sharing.cuh>
#include <SPH/adaptivity/Splitting.cuh>
#include <utility/include_all.h>
/** This function is used to adjust the resolution of particles in the simulation it does splitting,
* merging and mass sharing closely following the reference paper**/
void SPH::adaptive::adapt(Memory mem) {
uint32_t split_ptcls;
/* To avoid certain extra checks in the code we can restrict the function to either merge
particles (decreasing resolution) or to split particles (increasing resolution). As this is
done on a 2 frame period this should have no appreciable effect on the adaptation rate.*/
if (parameters::frame{} % 2 == 0) {
launch<decide>(mem.num_ptcls, mem);
cuda::Memset(mem.adaptiveMergeCounter, 0x00, sizeof(float) * mem.num_ptcls);
cuda::Memset(mem.adaptiveNumPtcls, 0x00, sizeof(float));
cuda::Memset(mem.adaptiveMergeable, 0x00, sizeof(uint32_t) * mem.num_ptcls);
launch<detectMergingParticles>(mem.num_ptcls, mem);
launch<grabEvenMergingParticles>(mem.num_ptcls, mem);
launch<grabOddMergingParticles>(mem.num_ptcls, mem);
MergeGrabbed(sorting_list, mem);
cuda::memcpy(&split_ptcls, mem.adaptiveNumPtcls, sizeof(uint), hipMemcpyDeviceToHost);
get<parameters::merged_ptcls>() = split_ptcls;
} else {
// Share particles
launch<decide>(mem.num_ptcls, mem);
cuda::Memset(mem.adaptiveNumPtcls, 0x00, sizeof(float));
cuda::Memset(mem.adaptiveMergeable, 0x00, sizeof(uint32_t) * mem.num_ptcls);
cuda::Memset(mem.adaptiveMergeCounter, 0x00, sizeof(float) * mem.num_ptcls);
launch<detectSharingParticles>(mem.num_ptcls, mem);
launch<grabEvenSharingParticles>(mem.num_ptcls, mem);
ShareGrabbed(sorting_list, mem);
cuda::memcpy(&split_ptcls, mem.adaptiveNumPtcls, sizeof(uint), hipMemcpyDeviceToHost);
get<parameters::shared_ptcls>() = split_ptcls;
launch<decide>(mem.num_ptcls, mem);
// Split particles, if the old particle count is close to the maximum particle count of the
// simulation do nothing.
int32_t old = mem.num_ptcls;
cuda::memcpy(mem.adaptiveNumPtcls, &mem.num_ptcls, sizeof(int32_t), hipMemcpyHostToDevice);
callSplit(sorting_list, mem);
cuda::memcpy(&mem.num_ptcls, mem.adaptiveNumPtcls, sizeof(int32_t), hipMemcpyDeviceToHost);
get<parameters::num_ptcls>() = mem.num_ptcls;
get<parameters::split_ptcls>() = mem.num_ptcls - old;
}
auto min = algorithm::reduce_min(mem.volume, mem.num_ptcls);
auto max = PI4O3 * math::power<3>(mem.radius);
auto ratio = max / min;
*parameters::adaptiveRatio::ptr = ratio.val;
}
basicFunctionType genparticleIndex(SPH::adaptive::Memory arrays) {
checkedParticleIdx(i);
if (arrays.adaptiveSplitIndicator[i] != 1)
arrays.particleIndex[i] = INT_MAX;
else
arrays.particleIndex[i] = i;
}
basicFunction(indexBlendingParticles, genparticleIndex, "Adaptive: indexing blending particles");
struct is_valid {
hostDeviceInline bool operator()(const int x) { return x != INT_MAX; }
};
// Main function to call the density blending funciton
void SPH::adaptive::blendDensity(Memory mem) {
launch<indexBlendingParticles>(mem.num_ptcls, mem);
*parameters::blend_ptcls::ptr = (int32_t) algorithm::copy_if(mem.particleIndex, mem.particleIndexCompact, mem.num_ptcls, is_valid());
launch<blendDensities>(*parameters::blend_ptcls::ptr, mem, *parameters::blend_ptcls::ptr);
}
// Main function to call the velocity blending funciton
void SPH::adaptive::blendVelocity(Memory mem) {
launch<indexBlendingParticles>(mem.num_ptcls, mem);
*parameters::blend_ptcls::ptr = (int32_t) algorithm::copy_if(mem.particleIndex, mem.particleIndexCompact, mem.num_ptcls, is_valid());
launch<blendVelocities>(*parameters::blend_ptcls::ptr, mem, *parameters::blend_ptcls::ptr);
}
| bf1f7109175b86548efaaf2dd090c51ca35b3b51.cu | #define NEW_STYLE
#include <SPH/adaptivity/AdaptivityDevice.cuh>
#include <SPH/adaptivity/Blending.cuh>
#include <SPH/adaptivity/ContinuousAdaptivity.cuh>
#include <SPH/adaptivity/Merging.cuh>
#include <SPH/adaptivity/Sharing.cuh>
#include <SPH/adaptivity/Splitting.cuh>
#include <utility/include_all.h>
/** This function is used to adjust the resolution of particles in the simulation it does splitting,
* merging and mass sharing closely following the reference paper**/
void SPH::adaptive::adapt(Memory mem) {
uint32_t split_ptcls;
/* To avoid certain extra checks in the code we can restrict the function to either merge
particles (decreasing resolution) or to split particles (increasing resolution). As this is
done on a 2 frame period this should have no appreciable effect on the adaptation rate.*/
if (parameters::frame{} % 2 == 0) {
launch<decide>(mem.num_ptcls, mem);
cuda::Memset(mem.adaptiveMergeCounter, 0x00, sizeof(float) * mem.num_ptcls);
cuda::Memset(mem.adaptiveNumPtcls, 0x00, sizeof(float));
cuda::Memset(mem.adaptiveMergeable, 0x00, sizeof(uint32_t) * mem.num_ptcls);
launch<detectMergingParticles>(mem.num_ptcls, mem);
launch<grabEvenMergingParticles>(mem.num_ptcls, mem);
launch<grabOddMergingParticles>(mem.num_ptcls, mem);
MergeGrabbed(sorting_list, mem);
cuda::memcpy(&split_ptcls, mem.adaptiveNumPtcls, sizeof(uint), cudaMemcpyDeviceToHost);
get<parameters::merged_ptcls>() = split_ptcls;
} else {
// Share particles
launch<decide>(mem.num_ptcls, mem);
cuda::Memset(mem.adaptiveNumPtcls, 0x00, sizeof(float));
cuda::Memset(mem.adaptiveMergeable, 0x00, sizeof(uint32_t) * mem.num_ptcls);
cuda::Memset(mem.adaptiveMergeCounter, 0x00, sizeof(float) * mem.num_ptcls);
launch<detectSharingParticles>(mem.num_ptcls, mem);
launch<grabEvenSharingParticles>(mem.num_ptcls, mem);
ShareGrabbed(sorting_list, mem);
cuda::memcpy(&split_ptcls, mem.adaptiveNumPtcls, sizeof(uint), cudaMemcpyDeviceToHost);
get<parameters::shared_ptcls>() = split_ptcls;
launch<decide>(mem.num_ptcls, mem);
// Split particles, if the old particle count is close to the maximum particle count of the
// simulation do nothing.
int32_t old = mem.num_ptcls;
cuda::memcpy(mem.adaptiveNumPtcls, &mem.num_ptcls, sizeof(int32_t), cudaMemcpyHostToDevice);
callSplit(sorting_list, mem);
cuda::memcpy(&mem.num_ptcls, mem.adaptiveNumPtcls, sizeof(int32_t), cudaMemcpyDeviceToHost);
get<parameters::num_ptcls>() = mem.num_ptcls;
get<parameters::split_ptcls>() = mem.num_ptcls - old;
}
auto min = algorithm::reduce_min(mem.volume, mem.num_ptcls);
auto max = PI4O3 * math::power<3>(mem.radius);
auto ratio = max / min;
*parameters::adaptiveRatio::ptr = ratio.val;
}
basicFunctionType genparticleIndex(SPH::adaptive::Memory arrays) {
checkedParticleIdx(i);
if (arrays.adaptiveSplitIndicator[i] != 1)
arrays.particleIndex[i] = INT_MAX;
else
arrays.particleIndex[i] = i;
}
basicFunction(indexBlendingParticles, genparticleIndex, "Adaptive: indexing blending particles");
struct is_valid {
hostDeviceInline bool operator()(const int x) { return x != INT_MAX; }
};
// Main function to call the density blending funciton
void SPH::adaptive::blendDensity(Memory mem) {
launch<indexBlendingParticles>(mem.num_ptcls, mem);
*parameters::blend_ptcls::ptr = (int32_t) algorithm::copy_if(mem.particleIndex, mem.particleIndexCompact, mem.num_ptcls, is_valid());
launch<blendDensities>(*parameters::blend_ptcls::ptr, mem, *parameters::blend_ptcls::ptr);
}
// Main function to call the velocity blending funciton
void SPH::adaptive::blendVelocity(Memory mem) {
launch<indexBlendingParticles>(mem.num_ptcls, mem);
*parameters::blend_ptcls::ptr = (int32_t) algorithm::copy_if(mem.particleIndex, mem.particleIndexCompact, mem.num_ptcls, is_valid());
launch<blendVelocities>(*parameters::blend_ptcls::ptr, mem, *parameters::blend_ptcls::ptr);
}
|
920e7861685d1a1305d9573e88c0743692415b4b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Distributed under MIT licence. See https://github.com/aniabrown/QuEST_GPU/blob/master/LICENCE.txt for details
/** @file
* An implementation of the backend in ../QuEST_internal.h for a GPU environment.
*/
# include "../QuEST.h"
# include "../QuEST_precision.h"
# include "../QuEST_internal.h" // purely to resolve getQuESTDefaultSeedKey
# include "../mt19937ar.h"
# include <stdlib.h>
# include <stdio.h>
# include <math.h>
# define REDUCE_SHARED_SIZE 512
# define DEBUG 0
static __device__ int extractBit (int locationOfBitFromRight, long long int theEncodedNumber)
{
return (theEncodedNumber & ( 1LL << locationOfBitFromRight )) >> locationOfBitFromRight;
}
#ifdef __cplusplus
extern "C" {
#endif
void statevec_setAmps(Qureg qureg, long long int startInd, qreal* reals, qreal* imags, long long int numAmps) {
hipDeviceSynchronize();
hipMemcpy(
qureg.deviceStateVec.real + startInd,
reals,
numAmps * sizeof(*(qureg.deviceStateVec.real)),
hipMemcpyHostToDevice);
hipMemcpy(
qureg.deviceStateVec.imag + startInd,
imags,
numAmps * sizeof(*(qureg.deviceStateVec.real)),
hipMemcpyHostToDevice);
}
/** works for both statevectors and density matrices */
void statevec_cloneQureg(Qureg targetQureg, Qureg copyQureg) {
// copy copyQureg's GPU statevec to targetQureg's GPU statevec
hipDeviceSynchronize();
hipMemcpy(
targetQureg.deviceStateVec.real,
copyQureg.deviceStateVec.real,
targetQureg.numAmpsPerChunk*sizeof(*(targetQureg.deviceStateVec.real)),
hipMemcpyDeviceToDevice);
hipMemcpy(
targetQureg.deviceStateVec.imag,
copyQureg.deviceStateVec.imag,
targetQureg.numAmpsPerChunk*sizeof(*(targetQureg.deviceStateVec.imag)),
hipMemcpyDeviceToDevice);
}
__global__ void densmatr_initPureStateKernel(
long long int numPureAmps,
qreal *targetVecReal, qreal *targetVecImag,
qreal *copyVecReal, qreal *copyVecImag)
{
// this is a particular index of the pure copyQureg
long long int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index>=numPureAmps) return;
qreal realRow = copyVecReal[index];
qreal imagRow = copyVecImag[index];
for (long long int col=0; col < numPureAmps; col++) {
qreal realCol = copyVecReal[col];
qreal imagCol = - copyVecImag[col]; // minus for conjugation
targetVecReal[col*numPureAmps + index] = realRow*realCol - imagRow*imagCol;
targetVecImag[col*numPureAmps + index] = realRow*imagCol + imagRow*realCol;
}
}
void densmatr_initPureState(Qureg targetQureg, Qureg copyQureg)
{
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil((qreal)(copyQureg.numAmpsPerChunk)/threadsPerCUDABlock);
hipLaunchKernelGGL(( densmatr_initPureStateKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0,
copyQureg.numAmpsPerChunk,
targetQureg.deviceStateVec.real, targetQureg.deviceStateVec.imag,
copyQureg.deviceStateVec.real, copyQureg.deviceStateVec.imag);
}
__global__ void densmatr_initPlusStateKernel(long long int stateVecSize, qreal probFactor, qreal *stateVecReal, qreal *stateVecImag){
long long int index;
index = blockIdx.x*blockDim.x + threadIdx.x;
if (index>=stateVecSize) return;
stateVecReal[index] = probFactor;
stateVecImag[index] = 0.0;
}
void densmatr_initPlusState(Qureg qureg)
{
qreal probFactor = 1.0/((qreal) (1LL << qureg.numQubitsRepresented));
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk)/threadsPerCUDABlock);
hipLaunchKernelGGL(( densmatr_initPlusStateKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0,
qureg.numAmpsPerChunk,
probFactor,
qureg.deviceStateVec.real,
qureg.deviceStateVec.imag);
}
__global__ void densmatr_initClassicalStateKernel(
long long int densityNumElems,
qreal *densityReal, qreal *densityImag,
long long int densityInd)
{
// initialise the state to all zeros
long long int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index >= densityNumElems) return;
densityReal[index] = 0.0;
densityImag[index] = 0.0;
if (index==densityInd){
// classical state has probability 1
densityReal[densityInd] = 1.0;
densityImag[densityInd] = 0.0;
}
}
void densmatr_initClassicalState(Qureg qureg, long long int stateInd)
{
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk)/threadsPerCUDABlock);
// index of the desired state in the flat density matrix
long long int densityDim = 1LL << qureg.numQubitsRepresented;
long long int densityInd = (densityDim + 1)*stateInd;
// identical to pure version
hipLaunchKernelGGL(( densmatr_initClassicalStateKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0,
qureg.numAmpsPerChunk,
qureg.deviceStateVec.real,
qureg.deviceStateVec.imag, densityInd);
}
void statevec_createQureg(Qureg *qureg, int numQubits, QuESTEnv env)
{
// allocate CPU memory
long long int numAmps = 1L << numQubits;
long long int numAmpsPerRank = numAmps/env.numRanks;
qureg->stateVec.real = (qreal*) malloc(numAmpsPerRank * sizeof(qureg->stateVec.real));
qureg->stateVec.imag = (qreal*) malloc(numAmpsPerRank * sizeof(qureg->stateVec.imag));
if (env.numRanks>1){
qureg->pairStateVec.real = (qreal*) malloc(numAmpsPerRank * sizeof(qureg->pairStateVec.real));
qureg->pairStateVec.imag = (qreal*) malloc(numAmpsPerRank * sizeof(qureg->pairStateVec.imag));
}
// check cpu memory allocation was successful
if ( (!(qureg->stateVec.real) || !(qureg->stateVec.imag))
&& numAmpsPerRank ) {
printf("Could not allocate memory!\n");
exit (EXIT_FAILURE);
}
if ( env.numRanks>1 && (!(qureg->pairStateVec.real) || !(qureg->pairStateVec.imag))
&& numAmpsPerRank ) {
printf("Could not allocate memory!\n");
exit (EXIT_FAILURE);
}
qureg->numQubitsInStateVec = numQubits;
qureg->numAmpsPerChunk = numAmpsPerRank;
qureg->numAmpsTotal = numAmps;
qureg->chunkId = env.rank;
qureg->numChunks = env.numRanks;
qureg->isDensityMatrix = 0;
// allocate GPU memory
hipMalloc(&(qureg->deviceStateVec.real), qureg->numAmpsPerChunk*sizeof(*(qureg->deviceStateVec.real)));
hipMalloc(&(qureg->deviceStateVec.imag), qureg->numAmpsPerChunk*sizeof(*(qureg->deviceStateVec.imag)));
hipMalloc(&(qureg->firstLevelReduction), ceil(qureg->numAmpsPerChunk/(qreal)REDUCE_SHARED_SIZE)*sizeof(qreal));
hipMalloc(&(qureg->secondLevelReduction), ceil(qureg->numAmpsPerChunk/(qreal)(REDUCE_SHARED_SIZE*REDUCE_SHARED_SIZE))*
sizeof(qreal));
// check gpu memory allocation was successful
if (!(qureg->deviceStateVec.real) || !(qureg->deviceStateVec.imag)){
printf("Could not allocate memory on GPU!\n");
exit (EXIT_FAILURE);
}
}
void statevec_destroyQureg(Qureg qureg, QuESTEnv env)
{
// Free CPU memory
free(qureg.stateVec.real);
free(qureg.stateVec.imag);
if (env.numRanks>1){
free(qureg.pairStateVec.real);
free(qureg.pairStateVec.imag);
}
// Free GPU memory
hipFree(qureg.deviceStateVec.real);
hipFree(qureg.deviceStateVec.imag);
}
int GPUExists(void){
int deviceCount, device;
int gpuDeviceCount = 0;
struct hipDeviceProp_t properties;
hipError_t cudaResultCode = hipGetDeviceCount(&deviceCount);
if (cudaResultCode != hipSuccess) deviceCount = 0;
/* machines with no GPUs can still report one emulation device */
for (device = 0; device < deviceCount; ++device) {
hipGetDeviceProperties(&properties, device);
if (properties.major != 9999) { /* 9999 means emulation only */
++gpuDeviceCount;
}
}
if (gpuDeviceCount) return 1;
else return 0;
}
QuESTEnv createQuESTEnv(void) {
// init MPI environment
if (!GPUExists()){
printf("Trying to run GPU code with no GPU available\n");
exit(EXIT_FAILURE);
}
QuESTEnv env;
env.rank=0;
env.numRanks=1;
seedQuESTDefault();
return env;
}
void syncQuESTEnv(QuESTEnv env){
hipDeviceSynchronize();
}
int syncQuESTSuccess(int successCode){
return successCode;
}
void destroyQuESTEnv(QuESTEnv env){
// MPI finalize goes here in MPI version. Call this function anyway for consistency
}
void reportQuESTEnv(QuESTEnv env){
printf("EXECUTION ENVIRONMENT:\n");
printf("Running locally on one node with GPU\n");
printf("Number of ranks is %d\n", env.numRanks);
# ifdef _OPENMP
printf("OpenMP enabled\n");
printf("Number of threads available is %d\n", omp_get_max_threads());
# else
printf("OpenMP disabled\n");
# endif
}
void getEnvironmentString(QuESTEnv env, Qureg qureg, char str[200]){
sprintf(str, "%dqubits_GPU_noMpi_noOMP", qureg.numQubitsInStateVec);
}
void copyStateToGPU(Qureg qureg)
{
if (DEBUG) printf("Copying data to GPU\n");
hipMemcpy(qureg.deviceStateVec.real, qureg.stateVec.real,
qureg.numAmpsPerChunk*sizeof(*(qureg.deviceStateVec.real)), hipMemcpyHostToDevice);
hipMemcpy(qureg.deviceStateVec.real, qureg.stateVec.real,
qureg.numAmpsPerChunk*sizeof(*(qureg.deviceStateVec.real)), hipMemcpyHostToDevice);
hipMemcpy(qureg.deviceStateVec.imag, qureg.stateVec.imag,
qureg.numAmpsPerChunk*sizeof(*(qureg.deviceStateVec.imag)), hipMemcpyHostToDevice);
hipMemcpy(qureg.deviceStateVec.imag, qureg.stateVec.imag,
qureg.numAmpsPerChunk*sizeof(*(qureg.deviceStateVec.imag)), hipMemcpyHostToDevice);
if (DEBUG) printf("Finished copying data to GPU\n");
}
void copyStateFromGPU(Qureg qureg)
{
hipDeviceSynchronize();
if (DEBUG) printf("Copying data from GPU\n");
hipMemcpy(qureg.stateVec.real, qureg.deviceStateVec.real,
qureg.numAmpsPerChunk*sizeof(*(qureg.deviceStateVec.real)), hipMemcpyDeviceToHost);
hipMemcpy(qureg.stateVec.imag, qureg.deviceStateVec.imag,
qureg.numAmpsPerChunk*sizeof(*(qureg.deviceStateVec.imag)), hipMemcpyDeviceToHost);
if (DEBUG) printf("Finished copying data from GPU\n");
}
/** Print the current state vector of probability amplitudes for a set of qubits to standard out.
For debugging purposes. Each rank should print output serially. Only print output for systems <= 5 qubits
*/
void statevec_reportStateToScreen(Qureg qureg, QuESTEnv env, int reportRank){
long long int index;
int rank;
copyStateFromGPU(qureg);
if (qureg.numQubitsInStateVec<=5){
for (rank=0; rank<qureg.numChunks; rank++){
if (qureg.chunkId==rank){
if (reportRank) {
printf("Reporting state from rank %d [\n", qureg.chunkId);
//printf("\trank, index, real, imag\n");
printf("real, imag\n");
} else if (rank==0) {
printf("Reporting state [\n");
printf("real, imag\n");
}
for(index=0; index<qureg.numAmpsPerChunk; index++){
printf(REAL_STRING_FORMAT ", " REAL_STRING_FORMAT "\n", qureg.stateVec.real[index], qureg.stateVec.imag[index]);
}
if (reportRank || rank==qureg.numChunks-1) printf("]\n");
}
syncQuESTEnv(env);
}
}
}
qreal statevec_getRealAmp(Qureg qureg, long long int index){
qreal el=0;
hipMemcpy(&el, &(qureg.deviceStateVec.real[index]),
sizeof(*(qureg.deviceStateVec.real)), hipMemcpyDeviceToHost);
return el;
}
qreal statevec_getImagAmp(Qureg qureg, long long int index){
qreal el=0;
hipMemcpy(&el, &(qureg.deviceStateVec.imag[index]),
sizeof(*(qureg.deviceStateVec.imag)), hipMemcpyDeviceToHost);
return el;
}
__global__ void statevec_initZeroStateKernel(long long int stateVecSize, qreal *stateVecReal, qreal *stateVecImag){
long long int index;
// initialise the state to |0000..0000>
index = blockIdx.x*blockDim.x + threadIdx.x;
if (index>=stateVecSize) return;
stateVecReal[index] = 0.0;
stateVecImag[index] = 0.0;
if (index==0){
// zero state |0000..0000> has probability 1
stateVecReal[0] = 1.0;
stateVecImag[0] = 0.0;
}
}
void statevec_initZeroState(Qureg qureg)
{
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk)/threadsPerCUDABlock);
hipLaunchKernelGGL(( statevec_initZeroStateKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0,
qureg.numAmpsPerChunk,
qureg.deviceStateVec.real,
qureg.deviceStateVec.imag);
}
__global__ void statevec_initPlusStateKernel(long long int stateVecSize, qreal *stateVecReal, qreal *stateVecImag){
long long int index;
index = blockIdx.x*blockDim.x + threadIdx.x;
if (index>=stateVecSize) return;
qreal normFactor = 1.0/sqrt((qreal)stateVecSize);
stateVecReal[index] = normFactor;
stateVecImag[index] = 0.0;
}
void statevec_initPlusState(Qureg qureg)
{
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk)/threadsPerCUDABlock);
hipLaunchKernelGGL(( statevec_initPlusStateKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0,
qureg.numAmpsPerChunk,
qureg.deviceStateVec.real,
qureg.deviceStateVec.imag);
}
__global__ void statevec_initClassicalStateKernel(long long int stateVecSize, qreal *stateVecReal, qreal *stateVecImag, long long int stateInd){
long long int index;
// initialise the state to |stateInd>
index = blockIdx.x*blockDim.x + threadIdx.x;
if (index>=stateVecSize) return;
stateVecReal[index] = 0.0;
stateVecImag[index] = 0.0;
if (index==stateInd){
// classical state has probability 1
stateVecReal[stateInd] = 1.0;
stateVecImag[stateInd] = 0.0;
}
}
void statevec_initClassicalState(Qureg qureg, long long int stateInd)
{
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk)/threadsPerCUDABlock);
hipLaunchKernelGGL(( statevec_initClassicalStateKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0,
qureg.numAmpsPerChunk,
qureg.deviceStateVec.real,
qureg.deviceStateVec.imag, stateInd);
}
__global__ void statevec_initStateDebugKernel(long long int stateVecSize, qreal *stateVecReal, qreal *stateVecImag){
long long int index;
index = blockIdx.x*blockDim.x + threadIdx.x;
if (index>=stateVecSize) return;
stateVecReal[index] = (index*2.0)/10.0;
stateVecImag[index] = (index*2.0+1.0)/10.0;
}
void statevec_initStateDebug(Qureg qureg)
{
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk)/threadsPerCUDABlock);
hipLaunchKernelGGL(( statevec_initStateDebugKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0,
qureg.numAmpsPerChunk,
qureg.deviceStateVec.real,
qureg.deviceStateVec.imag);
}
__global__ void statevec_initStateOfSingleQubitKernel(long long int stateVecSize, qreal *stateVecReal, qreal *stateVecImag, int qubitId, int outcome){
long long int index;
int bit;
index = blockIdx.x*blockDim.x + threadIdx.x;
if (index>=stateVecSize) return;
qreal normFactor = 1.0/sqrt((qreal)stateVecSize/2);
bit = extractBit(qubitId, index);
if (bit==outcome) {
stateVecReal[index] = normFactor;
stateVecImag[index] = 0.0;
} else {
stateVecReal[index] = 0.0;
stateVecImag[index] = 0.0;
}
}
void statevec_initStateOfSingleQubit(Qureg *qureg, int qubitId, int outcome)
{
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil((qreal)(qureg->numAmpsPerChunk)/threadsPerCUDABlock);
hipLaunchKernelGGL(( statevec_initStateOfSingleQubitKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0, qureg->numAmpsPerChunk, qureg->deviceStateVec.real, qureg->deviceStateVec.imag, qubitId, outcome);
}
// returns 1 if successful, else 0
int statevec_initStateFromSingleFile(Qureg *qureg, char filename[200], QuESTEnv env){
long long int chunkSize, stateVecSize;
long long int indexInChunk, totalIndex;
chunkSize = qureg->numAmpsPerChunk;
stateVecSize = chunkSize*qureg->numChunks;
qreal *stateVecReal = qureg->stateVec.real;
qreal *stateVecImag = qureg->stateVec.imag;
FILE *fp;
char line[200];
fp = fopen(filename, "r");
if (fp == NULL)
return 0;
indexInChunk = 0; totalIndex = 0;
while (fgets(line, sizeof(char)*200, fp) != NULL && totalIndex<stateVecSize){
if (line[0]!='#'){
int chunkId = totalIndex/chunkSize;
if (chunkId==qureg->chunkId){
# if QuEST_PREC==1
sscanf(line, "%f, %f", &(stateVecReal[indexInChunk]),
&(stateVecImag[indexInChunk]));
# elif QuEST_PREC==2
sscanf(line, "%lf, %lf", &(stateVecReal[indexInChunk]),
&(stateVecImag[indexInChunk]));
# elif QuEST_PREC==4
sscanf(line, "%lf, %lf", &(stateVecReal[indexInChunk]),
&(stateVecImag[indexInChunk]));
# endif
indexInChunk += 1;
}
totalIndex += 1;
}
}
fclose(fp);
copyStateToGPU(*qureg);
// indicate success
return 1;
}
int statevec_compareStates(Qureg mq1, Qureg mq2, qreal precision){
qreal diff;
int chunkSize = mq1.numAmpsPerChunk;
copyStateFromGPU(mq1);
copyStateFromGPU(mq2);
for (int i=0; i<chunkSize; i++){
diff = mq1.stateVec.real[i] - mq2.stateVec.real[i];
if (diff<0) diff *= -1;
if (diff>precision) return 0;
diff = mq1.stateVec.imag[i] - mq2.stateVec.imag[i];
if (diff<0) diff *= -1;
if (diff>precision) return 0;
}
return 1;
}
__global__ void statevec_compactUnitaryKernel (Qureg qureg, const int rotQubit, Complex alpha, Complex beta){
// ----- sizes
long long int sizeBlock, // size of blocks
sizeHalfBlock; // size of blocks halved
// ----- indices
long long int thisBlock, // current block
indexUp,indexLo; // current index and corresponding index in lower half block
// ----- temp variables
qreal stateRealUp,stateRealLo, // storage for previous state values
stateImagUp,stateImagLo; // (used in updates)
// ----- temp variables
long long int thisTask; // task based approach for expose loop with small granularity
const long long int numTasks=qureg.numAmpsPerChunk>>1;
sizeHalfBlock = 1LL << rotQubit; // size of blocks halved
sizeBlock = 2LL * sizeHalfBlock; // size of blocks
// ---------------------------------------------------------------- //
// rotate //
// ---------------------------------------------------------------- //
//! fix -- no necessary for GPU version
qreal *stateVecReal = qureg.deviceStateVec.real;
qreal *stateVecImag = qureg.deviceStateVec.imag;
qreal alphaImag=alpha.imag, alphaReal=alpha.real;
qreal betaImag=beta.imag, betaReal=beta.real;
thisTask = blockIdx.x*blockDim.x + threadIdx.x;
if (thisTask>=numTasks) return;
thisBlock = thisTask / sizeHalfBlock;
indexUp = thisBlock*sizeBlock + thisTask%sizeHalfBlock;
indexLo = indexUp + sizeHalfBlock;
// store current state vector values in temp variables
stateRealUp = stateVecReal[indexUp];
stateImagUp = stateVecImag[indexUp];
stateRealLo = stateVecReal[indexLo];
stateImagLo = stateVecImag[indexLo];
// state[indexUp] = alpha * state[indexUp] - conj(beta) * state[indexLo]
stateVecReal[indexUp] = alphaReal*stateRealUp - alphaImag*stateImagUp
- betaReal*stateRealLo - betaImag*stateImagLo;
stateVecImag[indexUp] = alphaReal*stateImagUp + alphaImag*stateRealUp
- betaReal*stateImagLo + betaImag*stateRealLo;
// state[indexLo] = beta * state[indexUp] + conj(alpha) * state[indexLo]
stateVecReal[indexLo] = betaReal*stateRealUp - betaImag*stateImagUp
+ alphaReal*stateRealLo + alphaImag*stateImagLo;
stateVecImag[indexLo] = betaReal*stateImagUp + betaImag*stateRealUp
+ alphaReal*stateImagLo - alphaImag*stateRealLo;
}
void statevec_compactUnitary(Qureg qureg, const int targetQubit, Complex alpha, Complex beta)
{
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk>>1)/threadsPerCUDABlock);
hipLaunchKernelGGL(( statevec_compactUnitaryKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0, qureg, targetQubit, alpha, beta);
}
__global__ void statevec_controlledCompactUnitaryKernel (Qureg qureg, const int controlQubit, const int targetQubit, Complex alpha, Complex beta){
// ----- sizes
long long int sizeBlock, // size of blocks
sizeHalfBlock; // size of blocks halved
// ----- indices
long long int thisBlock, // current block
indexUp,indexLo; // current index and corresponding index in lower half block
// ----- temp variables
qreal stateRealUp,stateRealLo, // storage for previous state values
stateImagUp,stateImagLo; // (used in updates)
// ----- temp variables
long long int thisTask; // task based approach for expose loop with small granularity
const long long int numTasks=qureg.numAmpsPerChunk>>1;
int controlBit;
sizeHalfBlock = 1LL << targetQubit; // size of blocks halved
sizeBlock = 2LL * sizeHalfBlock; // size of blocks
// ---------------------------------------------------------------- //
// rotate //
// ---------------------------------------------------------------- //
//! fix -- no necessary for GPU version
qreal *stateVecReal = qureg.deviceStateVec.real;
qreal *stateVecImag = qureg.deviceStateVec.imag;
qreal alphaImag=alpha.imag, alphaReal=alpha.real;
qreal betaImag=beta.imag, betaReal=beta.real;
thisTask = blockIdx.x*blockDim.x + threadIdx.x;
if (thisTask>=numTasks) return;
thisBlock = thisTask / sizeHalfBlock;
indexUp = thisBlock*sizeBlock + thisTask%sizeHalfBlock;
indexLo = indexUp + sizeHalfBlock;
controlBit = extractBit(controlQubit, indexUp);
if (controlBit){
// store current state vector values in temp variables
stateRealUp = stateVecReal[indexUp];
stateImagUp = stateVecImag[indexUp];
stateRealLo = stateVecReal[indexLo];
stateImagLo = stateVecImag[indexLo];
// state[indexUp] = alpha * state[indexUp] - conj(beta) * state[indexLo]
stateVecReal[indexUp] = alphaReal*stateRealUp - alphaImag*stateImagUp
- betaReal*stateRealLo - betaImag*stateImagLo;
stateVecImag[indexUp] = alphaReal*stateImagUp + alphaImag*stateRealUp
- betaReal*stateImagLo + betaImag*stateRealLo;
// state[indexLo] = beta * state[indexUp] + conj(alpha) * state[indexLo]
stateVecReal[indexLo] = betaReal*stateRealUp - betaImag*stateImagUp
+ alphaReal*stateRealLo + alphaImag*stateImagLo;
stateVecImag[indexLo] = betaReal*stateImagUp + betaImag*stateRealUp
+ alphaReal*stateImagLo - alphaImag*stateRealLo;
}
}
void statevec_controlledCompactUnitary(Qureg qureg, const int controlQubit, const int targetQubit, Complex alpha, Complex beta)
{
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk>>1)/threadsPerCUDABlock);
hipLaunchKernelGGL(( statevec_controlledCompactUnitaryKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0, qureg, controlQubit, targetQubit, alpha, beta);
}
__global__ void statevec_unitaryKernel(Qureg qureg, const int targetQubit, ComplexMatrix2 u){
// ----- sizes
long long int sizeBlock, // size of blocks
sizeHalfBlock; // size of blocks halved
// ----- indices
long long int thisBlock, // current block
indexUp,indexLo; // current index and corresponding index in lower half block
// ----- temp variables
qreal stateRealUp,stateRealLo, // storage for previous state values
stateImagUp,stateImagLo; // (used in updates)
// ----- temp variables
long long int thisTask; // task based approach for expose loop with small granularity
const long long int numTasks=qureg.numAmpsPerChunk>>1;
sizeHalfBlock = 1LL << targetQubit; // size of blocks halved
sizeBlock = 2LL * sizeHalfBlock; // size of blocks
// ---------------------------------------------------------------- //
// rotate //
// ---------------------------------------------------------------- //
//! fix -- no necessary for GPU version
qreal *stateVecReal = qureg.deviceStateVec.real;
qreal *stateVecImag = qureg.deviceStateVec.imag;
thisTask = blockIdx.x*blockDim.x + threadIdx.x;
if (thisTask>=numTasks) return;
thisBlock = thisTask / sizeHalfBlock;
indexUp = thisBlock*sizeBlock + thisTask%sizeHalfBlock;
indexLo = indexUp + sizeHalfBlock;
// store current state vector values in temp variables
stateRealUp = stateVecReal[indexUp];
stateImagUp = stateVecImag[indexUp];
stateRealLo = stateVecReal[indexLo];
stateImagLo = stateVecImag[indexLo];
// state[indexUp] = u00 * state[indexUp] + u01 * state[indexLo]
stateVecReal[indexUp] = u.r0c0.real*stateRealUp - u.r0c0.imag*stateImagUp
+ u.r0c1.real*stateRealLo - u.r0c1.imag*stateImagLo;
stateVecImag[indexUp] = u.r0c0.real*stateImagUp + u.r0c0.imag*stateRealUp
+ u.r0c1.real*stateImagLo + u.r0c1.imag*stateRealLo;
// state[indexLo] = u10 * state[indexUp] + u11 * state[indexLo]
stateVecReal[indexLo] = u.r1c0.real*stateRealUp - u.r1c0.imag*stateImagUp
+ u.r1c1.real*stateRealLo - u.r1c1.imag*stateImagLo;
stateVecImag[indexLo] = u.r1c0.real*stateImagUp + u.r1c0.imag*stateRealUp
+ u.r1c1.real*stateImagLo + u.r1c1.imag*stateRealLo;
}
void statevec_unitary(Qureg qureg, const int targetQubit, ComplexMatrix2 u)
{
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk>>1)/threadsPerCUDABlock);
hipLaunchKernelGGL(( statevec_unitaryKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0, qureg, targetQubit, u);
}
__global__ void statevec_controlledUnitaryKernel(Qureg qureg, const int controlQubit, const int targetQubit, ComplexMatrix2 u){
// ----- sizes
long long int sizeBlock, // size of blocks
sizeHalfBlock; // size of blocks halved
// ----- indices
long long int thisBlock, // current block
indexUp,indexLo; // current index and corresponding index in lower half block
// ----- temp variables
qreal stateRealUp,stateRealLo, // storage for previous state values
stateImagUp,stateImagLo; // (used in updates)
// ----- temp variables
long long int thisTask; // task based approach for expose loop with small granularity
const long long int numTasks=qureg.numAmpsPerChunk>>1;
int controlBit;
sizeHalfBlock = 1LL << targetQubit; // size of blocks halved
sizeBlock = 2LL * sizeHalfBlock; // size of blocks
// ---------------------------------------------------------------- //
// rotate //
// ---------------------------------------------------------------- //
//! fix -- no necessary for GPU version
qreal *stateVecReal = qureg.deviceStateVec.real;
qreal *stateVecImag = qureg.deviceStateVec.imag;
thisTask = blockIdx.x*blockDim.x + threadIdx.x;
if (thisTask>=numTasks) return;
thisBlock = thisTask / sizeHalfBlock;
indexUp = thisBlock*sizeBlock + thisTask%sizeHalfBlock;
indexLo = indexUp + sizeHalfBlock;
// store current state vector values in temp variables
stateRealUp = stateVecReal[indexUp];
stateImagUp = stateVecImag[indexUp];
stateRealLo = stateVecReal[indexLo];
stateImagLo = stateVecImag[indexLo];
controlBit = extractBit(controlQubit, indexUp);
if (controlBit){
// state[indexUp] = u00 * state[indexUp] + u01 * state[indexLo]
stateVecReal[indexUp] = u.r0c0.real*stateRealUp - u.r0c0.imag*stateImagUp
+ u.r0c1.real*stateRealLo - u.r0c1.imag*stateImagLo;
stateVecImag[indexUp] = u.r0c0.real*stateImagUp + u.r0c0.imag*stateRealUp
+ u.r0c1.real*stateImagLo + u.r0c1.imag*stateRealLo;
// state[indexLo] = u10 * state[indexUp] + u11 * state[indexLo]
stateVecReal[indexLo] = u.r1c0.real*stateRealUp - u.r1c0.imag*stateImagUp
+ u.r1c1.real*stateRealLo - u.r1c1.imag*stateImagLo;
stateVecImag[indexLo] = u.r1c0.real*stateImagUp + u.r1c0.imag*stateRealUp
+ u.r1c1.real*stateImagLo + u.r1c1.imag*stateRealLo;
}
}
void statevec_controlledUnitary(Qureg qureg, const int controlQubit, const int targetQubit, ComplexMatrix2 u)
{
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk>>1)/threadsPerCUDABlock);
hipLaunchKernelGGL(( statevec_controlledUnitaryKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0, qureg, controlQubit, targetQubit, u);
}
__global__ void statevec_multiControlledUnitaryKernel(Qureg qureg, long long int mask, const int targetQubit, ComplexMatrix2 u){
// ----- sizes
long long int sizeBlock, // size of blocks
sizeHalfBlock; // size of blocks halved
// ----- indices
long long int thisBlock, // current block
indexUp,indexLo; // current index and corresponding index in lower half block
// ----- temp variables
qreal stateRealUp,stateRealLo, // storage for previous state values
stateImagUp,stateImagLo; // (used in updates)
// ----- temp variables
long long int thisTask; // task based approach for expose loop with small granularity
const long long int numTasks=qureg.numAmpsPerChunk>>1;
sizeHalfBlock = 1LL << targetQubit; // size of blocks halved
sizeBlock = 2LL * sizeHalfBlock; // size of blocks
// ---------------------------------------------------------------- //
// rotate //
// ---------------------------------------------------------------- //
//! fix -- no necessary for GPU version
qreal *stateVecReal = qureg.deviceStateVec.real;
qreal *stateVecImag = qureg.deviceStateVec.imag;
thisTask = blockIdx.x*blockDim.x + threadIdx.x;
if (thisTask>=numTasks) return;
thisBlock = thisTask / sizeHalfBlock;
indexUp = thisBlock*sizeBlock + thisTask%sizeHalfBlock;
indexLo = indexUp + sizeHalfBlock;
if (mask == (mask & indexUp) ){
// store current state vector values in temp variables
stateRealUp = stateVecReal[indexUp];
stateImagUp = stateVecImag[indexUp];
stateRealLo = stateVecReal[indexLo];
stateImagLo = stateVecImag[indexLo];
// state[indexUp] = u00 * state[indexUp] + u01 * state[indexLo]
stateVecReal[indexUp] = u.r0c0.real*stateRealUp - u.r0c0.imag*stateImagUp
+ u.r0c1.real*stateRealLo - u.r0c1.imag*stateImagLo;
stateVecImag[indexUp] = u.r0c0.real*stateImagUp + u.r0c0.imag*stateRealUp
+ u.r0c1.real*stateImagLo + u.r0c1.imag*stateRealLo;
// state[indexLo] = u10 * state[indexUp] + u11 * state[indexLo]
stateVecReal[indexLo] = u.r1c0.real*stateRealUp - u.r1c0.imag*stateImagUp
+ u.r1c1.real*stateRealLo - u.r1c1.imag*stateImagLo;
stateVecImag[indexLo] = u.r1c0.real*stateImagUp + u.r1c0.imag*stateRealUp
+ u.r1c1.real*stateImagLo + u.r1c1.imag*stateRealLo;
}
}
void statevec_multiControlledUnitary(Qureg qureg, int *controlQubits, int numControlQubits, const int targetQubit, ComplexMatrix2 u)
{
int threadsPerCUDABlock, CUDABlocks;
long long int mask=0;
for (int i=0; i<numControlQubits; i++) mask = mask | (1LL<<controlQubits[i]);
threadsPerCUDABlock = 128;
CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk>>1)/threadsPerCUDABlock);
hipLaunchKernelGGL(( statevec_multiControlledUnitaryKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0, qureg, mask, targetQubit, u);
}
__global__ void statevec_pauliXKernel(Qureg qureg, const int targetQubit){
// ----- sizes
long long int sizeBlock, // size of blocks
sizeHalfBlock; // size of blocks halved
// ----- indices
long long int thisBlock, // current block
indexUp,indexLo; // current index and corresponding index in lower half block
// ----- temp variables
qreal stateRealUp, // storage for previous state values
stateImagUp; // (used in updates)
// ----- temp variables
long long int thisTask; // task based approach for expose loop with small granularity
const long long int numTasks=qureg.numAmpsPerChunk>>1;
sizeHalfBlock = 1LL << targetQubit; // size of blocks halved
sizeBlock = 2LL * sizeHalfBlock; // size of blocks
// ---------------------------------------------------------------- //
// rotate //
// ---------------------------------------------------------------- //
//! fix -- no necessary for GPU version
qreal *stateVecReal = qureg.deviceStateVec.real;
qreal *stateVecImag = qureg.deviceStateVec.imag;
thisTask = blockIdx.x*blockDim.x + threadIdx.x;
if (thisTask>=numTasks) return;
thisBlock = thisTask / sizeHalfBlock;
indexUp = thisBlock*sizeBlock + thisTask%sizeHalfBlock;
indexLo = indexUp + sizeHalfBlock;
// store current state vector values in temp variables
stateRealUp = stateVecReal[indexUp];
stateImagUp = stateVecImag[indexUp];
stateVecReal[indexUp] = stateVecReal[indexLo];
stateVecImag[indexUp] = stateVecImag[indexLo];
stateVecReal[indexLo] = stateRealUp;
stateVecImag[indexLo] = stateImagUp;
}
void statevec_pauliX(Qureg qureg, const int targetQubit)
{
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk>>1)/threadsPerCUDABlock);
hipLaunchKernelGGL(( statevec_pauliXKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0, qureg, targetQubit);
}
__global__ void statevec_pauliYKernel(Qureg qureg, const int targetQubit, const int conjFac){
long long int sizeHalfBlock = 1LL << targetQubit;
long long int sizeBlock = 2LL * sizeHalfBlock;
long long int numTasks = qureg.numAmpsPerChunk >> 1;
long long int thisTask = blockIdx.x*blockDim.x + threadIdx.x;
if (thisTask>=numTasks) return;
long long int thisBlock = thisTask / sizeHalfBlock;
long long int indexUp = thisBlock*sizeBlock + thisTask%sizeHalfBlock;
long long int indexLo = indexUp + sizeHalfBlock;
qreal stateRealUp, stateImagUp;
qreal *stateVecReal = qureg.deviceStateVec.real;
qreal *stateVecImag = qureg.deviceStateVec.imag;
stateRealUp = stateVecReal[indexUp];
stateImagUp = stateVecImag[indexUp];
// update under +-{{0, -i}, {i, 0}}
stateVecReal[indexUp] = conjFac * stateVecImag[indexLo];
stateVecImag[indexUp] = conjFac * -stateVecReal[indexLo];
stateVecReal[indexLo] = conjFac * -stateImagUp;
stateVecImag[indexLo] = conjFac * stateRealUp;
}
void statevec_pauliY(Qureg qureg, const int targetQubit)
{
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk>>1)/threadsPerCUDABlock);
hipLaunchKernelGGL(( statevec_pauliYKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0, qureg, targetQubit, 1);
}
void statevec_pauliYConj(Qureg qureg, const int targetQubit)
{
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk>>1)/threadsPerCUDABlock);
hipLaunchKernelGGL(( statevec_pauliYKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0, qureg, targetQubit, -1);
}
__global__ void statevec_controlledPauliYKernel(Qureg qureg, const int controlQubit, const int targetQubit, const int conjFac)
{
long long int index;
long long int sizeBlock, sizeHalfBlock;
long long int stateVecSize;
int controlBit;
qreal stateRealUp, stateImagUp;
long long int thisBlock, indexUp, indexLo;
sizeHalfBlock = 1LL << targetQubit;
sizeBlock = 2LL * sizeHalfBlock;
stateVecSize = qureg.numAmpsPerChunk;
qreal *stateVecReal = qureg.deviceStateVec.real;
qreal *stateVecImag = qureg.deviceStateVec.imag;
index = blockIdx.x*blockDim.x + threadIdx.x;
if (index>=(stateVecSize>>1)) return;
thisBlock = index / sizeHalfBlock;
indexUp = thisBlock*sizeBlock + index%sizeHalfBlock;
indexLo = indexUp + sizeHalfBlock;
controlBit = extractBit(controlQubit, indexUp);
if (controlBit){
stateRealUp = stateVecReal[indexUp];
stateImagUp = stateVecImag[indexUp];
// update under +-{{0, -i}, {i, 0}}
stateVecReal[indexUp] = conjFac * stateVecImag[indexLo];
stateVecImag[indexUp] = conjFac * -stateVecReal[indexLo];
stateVecReal[indexLo] = conjFac * -stateImagUp;
stateVecImag[indexLo] = conjFac * stateRealUp;
}
}
void statevec_controlledPauliY(Qureg qureg, const int controlQubit, const int targetQubit)
{
int conjFactor = 1;
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk)/threadsPerCUDABlock);
hipLaunchKernelGGL(( statevec_controlledPauliYKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0, qureg, controlQubit, targetQubit, conjFactor);
}
void statevec_controlledPauliYConj(Qureg qureg, const int controlQubit, const int targetQubit)
{
int conjFactor = -1;
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk)/threadsPerCUDABlock);
hipLaunchKernelGGL(( statevec_controlledPauliYKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0, qureg, controlQubit, targetQubit, conjFactor);
}
__global__ void statevec_phaseShiftByTermKernel(Qureg qureg, const int targetQubit, qreal cosAngle, qreal sinAngle) {
long long int sizeBlock, sizeHalfBlock;
long long int thisBlock, indexUp,indexLo;
qreal stateRealLo, stateImagLo;
long long int thisTask;
const long long int numTasks = qureg.numAmpsPerChunk >> 1;
sizeHalfBlock = 1LL << targetQubit;
sizeBlock = 2LL * sizeHalfBlock;
qreal *stateVecReal = qureg.deviceStateVec.real;
qreal *stateVecImag = qureg.deviceStateVec.imag;
thisTask = blockIdx.x*blockDim.x + threadIdx.x;
if (thisTask>=numTasks) return;
thisBlock = thisTask / sizeHalfBlock;
indexUp = thisBlock*sizeBlock + thisTask%sizeHalfBlock;
indexLo = indexUp + sizeHalfBlock;
stateRealLo = stateVecReal[indexLo];
stateImagLo = stateVecImag[indexLo];
stateVecReal[indexLo] = cosAngle*stateRealLo - sinAngle*stateImagLo;
stateVecImag[indexLo] = sinAngle*stateRealLo + cosAngle*stateImagLo;
}
void statevec_phaseShiftByTerm(Qureg qureg, const int targetQubit, Complex term)
{
qreal cosAngle = term.real;
qreal sinAngle = term.imag;
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk>>1)/threadsPerCUDABlock);
hipLaunchKernelGGL(( statevec_phaseShiftByTermKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0, qureg, targetQubit, cosAngle, sinAngle);
}
__global__ void statevec_controlledPhaseShiftKernel(Qureg qureg, const int idQubit1, const int idQubit2, qreal cosAngle, qreal sinAngle)
{
long long int index;
long long int stateVecSize;
int bit1, bit2;
qreal stateRealLo, stateImagLo;
stateVecSize = qureg.numAmpsPerChunk;
qreal *stateVecReal = qureg.deviceStateVec.real;
qreal *stateVecImag = qureg.deviceStateVec.imag;
index = blockIdx.x*blockDim.x + threadIdx.x;
if (index>=stateVecSize) return;
bit1 = extractBit (idQubit1, index);
bit2 = extractBit (idQubit2, index);
if (bit1 && bit2) {
stateRealLo = stateVecReal[index];
stateImagLo = stateVecImag[index];
stateVecReal[index] = cosAngle*stateRealLo - sinAngle*stateImagLo;
stateVecImag[index] = sinAngle*stateRealLo + cosAngle*stateImagLo;
}
}
void statevec_controlledPhaseShift(Qureg qureg, const int idQubit1, const int idQubit2, qreal angle)
{
qreal cosAngle = cos(angle);
qreal sinAngle = sin(angle);
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk>>1)/threadsPerCUDABlock);
hipLaunchKernelGGL(( statevec_controlledPhaseShiftKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0, qureg, idQubit1, idQubit2, cosAngle, sinAngle);
}
__global__ void statevec_multiControlledPhaseShiftKernel(Qureg qureg, long long int mask, qreal cosAngle, qreal sinAngle) {
qreal stateRealLo, stateImagLo;
long long int index;
long long int stateVecSize;
stateVecSize = qureg.numAmpsPerChunk;
qreal *stateVecReal = qureg.deviceStateVec.real;
qreal *stateVecImag = qureg.deviceStateVec.imag;
index = blockIdx.x*blockDim.x + threadIdx.x;
if (index>=stateVecSize) return;
if (mask == (mask & index) ){
stateRealLo = stateVecReal[index];
stateImagLo = stateVecImag[index];
stateVecReal[index] = cosAngle*stateRealLo - sinAngle*stateImagLo;
stateVecImag[index] = sinAngle*stateRealLo + cosAngle*stateImagLo;
}
}
void statevec_multiControlledPhaseShift(Qureg qureg, int *controlQubits, int numControlQubits, qreal angle)
{
qreal cosAngle = cos(angle);
qreal sinAngle = sin(angle);
long long int mask=0;
for (int i=0; i<numControlQubits; i++)
mask = mask | (1LL<<controlQubits[i]);
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk)/threadsPerCUDABlock);
hipLaunchKernelGGL(( statevec_multiControlledPhaseShiftKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0, qureg, mask, cosAngle, sinAngle);
}
qreal densmatr_calcTotalProb(Qureg qureg) {
// computes the trace using Kahan summation
qreal pTotal=0;
qreal y, t, c;
c = 0;
long long int numCols = 1LL << qureg.numQubitsRepresented;
long long diagIndex;
copyStateFromGPU(qureg);
for (int col=0; col< numCols; col++) {
diagIndex = col*(numCols + 1);
y = qureg.stateVec.real[diagIndex] - c;
t = pTotal + y;
c = ( t - pTotal ) - y; // brackets are important
pTotal = t;
}
return pTotal;
}
qreal statevec_calcTotalProb(Qureg qureg){
/* IJB - implemented using Kahan summation for greater accuracy at a slight floating
point operation overhead. For more details see https://en.wikipedia.org/wiki/Kahan_summation_algorithm */
/* Don't change the bracketing in this routine! */
qreal pTotal=0;
qreal y, t, c;
long long int index;
long long int numAmpsPerRank = qureg.numAmpsPerChunk;
copyStateFromGPU(qureg);
c = 0.0;
for (index=0; index<numAmpsPerRank; index++){
/* Perform pTotal+=qureg.stateVec.real[index]*qureg.stateVec.real[index]; by Kahan */
// pTotal+=qureg.stateVec.real[index]*qureg.stateVec.real[index];
y = qureg.stateVec.real[index]*qureg.stateVec.real[index] - c;
t = pTotal + y;
c = ( t - pTotal ) - y;
pTotal = t;
/* Perform pTotal+=qureg.stateVec.imag[index]*qureg.stateVec.imag[index]; by Kahan */
//pTotal+=qureg.stateVec.imag[index]*qureg.stateVec.imag[index];
y = qureg.stateVec.imag[index]*qureg.stateVec.imag[index] - c;
t = pTotal + y;
c = ( t - pTotal ) - y;
pTotal = t;
}
return pTotal;
}
__global__ void statevec_controlledPhaseFlipKernel(Qureg qureg, const int idQubit1, const int idQubit2)
{
long long int index;
long long int stateVecSize;
int bit1, bit2;
stateVecSize = qureg.numAmpsPerChunk;
qreal *stateVecReal = qureg.deviceStateVec.real;
qreal *stateVecImag = qureg.deviceStateVec.imag;
index = blockIdx.x*blockDim.x + threadIdx.x;
if (index>=stateVecSize) return;
bit1 = extractBit (idQubit1, index);
bit2 = extractBit (idQubit2, index);
if (bit1 && bit2) {
stateVecReal [index] = - stateVecReal [index];
stateVecImag [index] = - stateVecImag [index];
}
}
void statevec_controlledPhaseFlip(Qureg qureg, const int idQubit1, const int idQubit2)
{
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk)/threadsPerCUDABlock);
hipLaunchKernelGGL(( statevec_controlledPhaseFlipKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0, qureg, idQubit1, idQubit2);
}
__global__ void statevec_multiControlledPhaseFlipKernel(Qureg qureg, long long int mask)
{
long long int index;
long long int stateVecSize;
stateVecSize = qureg.numAmpsPerChunk;
qreal *stateVecReal = qureg.deviceStateVec.real;
qreal *stateVecImag = qureg.deviceStateVec.imag;
index = blockIdx.x*blockDim.x + threadIdx.x;
if (index>=stateVecSize) return;
if (mask == (mask & index) ){
stateVecReal [index] = - stateVecReal [index];
stateVecImag [index] = - stateVecImag [index];
}
}
void statevec_multiControlledPhaseFlip(Qureg qureg, int *controlQubits, int numControlQubits)
{
int threadsPerCUDABlock, CUDABlocks;
long long int mask=0;
for (int i=0; i<numControlQubits; i++) mask = mask | (1LL<<controlQubits[i]);
threadsPerCUDABlock = 128;
CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk)/threadsPerCUDABlock);
hipLaunchKernelGGL(( statevec_multiControlledPhaseFlipKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0, qureg, mask);
}
__global__ void statevec_hadamardKernel (Qureg qureg, const int targetQubit){
// ----- sizes
long long int sizeBlock, // size of blocks
sizeHalfBlock; // size of blocks halved
// ----- indices
long long int thisBlock, // current block
indexUp,indexLo; // current index and corresponding index in lower half block
// ----- temp variables
qreal stateRealUp,stateRealLo, // storage for previous state values
stateImagUp,stateImagLo; // (used in updates)
// ----- temp variables
long long int thisTask; // task based approach for expose loop with small granularity
const long long int numTasks=qureg.numAmpsPerChunk>>1;
sizeHalfBlock = 1LL << targetQubit; // size of blocks halved
sizeBlock = 2LL * sizeHalfBlock; // size of blocks
// ---------------------------------------------------------------- //
// rotate //
// ---------------------------------------------------------------- //
//! fix -- no necessary for GPU version
qreal *stateVecReal = qureg.deviceStateVec.real;
qreal *stateVecImag = qureg.deviceStateVec.imag;
qreal recRoot2 = 1.0/sqrt(2.0);
thisTask = blockIdx.x*blockDim.x + threadIdx.x;
if (thisTask>=numTasks) return;
thisBlock = thisTask / sizeHalfBlock;
indexUp = thisBlock*sizeBlock + thisTask%sizeHalfBlock;
indexLo = indexUp + sizeHalfBlock;
// store current state vector values in temp variables
stateRealUp = stateVecReal[indexUp];
stateImagUp = stateVecImag[indexUp];
stateRealLo = stateVecReal[indexLo];
stateImagLo = stateVecImag[indexLo];
stateVecReal[indexUp] = recRoot2*(stateRealUp + stateRealLo);
stateVecImag[indexUp] = recRoot2*(stateImagUp + stateImagLo);
stateVecReal[indexLo] = recRoot2*(stateRealUp - stateRealLo);
stateVecImag[indexLo] = recRoot2*(stateImagUp - stateImagLo);
}
void statevec_hadamard(Qureg qureg, const int targetQubit)
{
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk>>1)/threadsPerCUDABlock);
hipLaunchKernelGGL(( statevec_hadamardKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0, qureg, targetQubit);
}
__global__ void statevec_controlledNotKernel(Qureg qureg, const int controlQubit, const int targetQubit)
{
long long int index;
long long int sizeBlock, // size of blocks
sizeHalfBlock; // size of blocks halved
long long int stateVecSize;
int controlBit;
// ----- temp variables
qreal stateRealUp, // storage for previous state values
stateImagUp; // (used in updates)
long long int thisBlock, // current block
indexUp,indexLo; // current index and corresponding index in lower half block
sizeHalfBlock = 1LL << targetQubit; // size of blocks halved
sizeBlock = 2LL * sizeHalfBlock; // size of blocks
stateVecSize = qureg.numAmpsPerChunk;
qreal *stateVecReal = qureg.deviceStateVec.real;
qreal *stateVecImag = qureg.deviceStateVec.imag;
index = blockIdx.x*blockDim.x + threadIdx.x;
if (index>=(stateVecSize>>1)) return;
thisBlock = index / sizeHalfBlock;
indexUp = thisBlock*sizeBlock + index%sizeHalfBlock;
indexLo = indexUp + sizeHalfBlock;
controlBit = extractBit(controlQubit, indexUp);
if (controlBit){
stateRealUp = stateVecReal[indexUp];
stateImagUp = stateVecImag[indexUp];
stateVecReal[indexUp] = stateVecReal[indexLo];
stateVecImag[indexUp] = stateVecImag[indexLo];
stateVecReal[indexLo] = stateRealUp;
stateVecImag[indexLo] = stateImagUp;
}
}
void statevec_controlledNot(Qureg qureg, const int controlQubit, const int targetQubit)
{
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk)/threadsPerCUDABlock);
hipLaunchKernelGGL(( statevec_controlledNotKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0, qureg, controlQubit, targetQubit);
}
__device__ __host__ unsigned int log2Int( unsigned int x )
{
unsigned int ans = 0 ;
while( x>>=1 ) ans++;
return ans ;
}
__device__ void reduceBlock(qreal *arrayIn, qreal *reducedArray, int length){
int i, l, r;
int threadMax, maxDepth;
threadMax = length/2;
maxDepth = log2Int(length/2);
for (i=0; i<maxDepth+1; i++){
if (threadIdx.x<threadMax){
l = threadIdx.x;
r = l + threadMax;
arrayIn[l] = arrayIn[r] + arrayIn[l];
}
threadMax = threadMax >> 1;
__syncthreads(); // optimise -- use warp shuffle instead
}
if (threadIdx.x==0) reducedArray[blockIdx.x] = arrayIn[0];
}
__global__ void copySharedReduceBlock(qreal*arrayIn, qreal *reducedArray, int length){
extern __shared__ qreal tempReductionArray[];
int blockOffset = blockIdx.x*length;
tempReductionArray[threadIdx.x*2] = arrayIn[blockOffset + threadIdx.x*2];
tempReductionArray[threadIdx.x*2+1] = arrayIn[blockOffset + threadIdx.x*2+1];
__syncthreads();
reduceBlock(tempReductionArray, reducedArray, length);
}
__global__ void densmatr_findProbabilityOfZeroKernel(
Qureg qureg, const int measureQubit, qreal *reducedArray
) {
// run by each thread
// use of block here refers to contiguous amplitudes where measureQubit = 0,
// (then =1) and NOT the CUDA block, which is the partitioning of CUDA threads
long long int densityDim = 1LL << qureg.numQubitsRepresented;
long long int numTasks = densityDim >> 1;
long long int sizeHalfBlock = 1LL << (measureQubit);
long long int sizeBlock = 2LL * sizeHalfBlock;
long long int thisBlock; // which block this thread is processing
long long int thisTask; // which part of the block this thread is processing
long long int basisIndex; // index of this thread's computational basis state
long long int densityIndex; // " " index of |basis><basis| in the flat density matrix
// array of each thread's collected probability, to be summed
extern __shared__ qreal tempReductionArray[];
// figure out which density matrix prob that this thread is assigned
thisTask = blockIdx.x*blockDim.x + threadIdx.x;
if (thisTask>=numTasks) return;
thisBlock = thisTask / sizeHalfBlock;
basisIndex = thisBlock*sizeBlock + thisTask%sizeHalfBlock;
densityIndex = (densityDim + 1) * basisIndex;
// record the probability in the CUDA-BLOCK-wide array
qreal prob = qureg.deviceStateVec.real[densityIndex]; // im[densityIndex] assumed ~ 0
tempReductionArray[threadIdx.x] = prob;
// sum the probs collected by this CUDA-BLOCK's threads into a per-CUDA-BLOCK array
__syncthreads();
if (threadIdx.x<blockDim.x/2){
reduceBlock(tempReductionArray, reducedArray, blockDim.x);
}
}
__global__ void statevec_findProbabilityOfZeroKernel(
Qureg qureg, const int measureQubit, qreal *reducedArray
) {
// ----- sizes
long long int sizeBlock, // size of blocks
sizeHalfBlock; // size of blocks halved
// ----- indices
long long int thisBlock, // current block
index; // current index for first half block
// ----- temp variables
long long int thisTask; // task based approach for expose loop with small granularity
long long int numTasks=qureg.numAmpsPerChunk>>1;
// (good for shared memory parallelism)
extern __shared__ qreal tempReductionArray[];
// ---------------------------------------------------------------- //
// dimensions //
// ---------------------------------------------------------------- //
sizeHalfBlock = 1LL << (measureQubit); // number of state vector elements to sum,
// and then the number to skip
sizeBlock = 2LL * sizeHalfBlock; // size of blocks (pairs of measure and skip entries)
// ---------------------------------------------------------------- //
// find probability //
// ---------------------------------------------------------------- //
//
// --- task-based shared-memory parallel implementation
//
qreal *stateVecReal = qureg.deviceStateVec.real;
qreal *stateVecImag = qureg.deviceStateVec.imag;
thisTask = blockIdx.x*blockDim.x + threadIdx.x;
if (thisTask>=numTasks) return;
thisBlock = thisTask / sizeHalfBlock;
index = thisBlock*sizeBlock + thisTask%sizeHalfBlock;
qreal realVal, imagVal;
realVal = stateVecReal[index];
imagVal = stateVecImag[index];
tempReductionArray[threadIdx.x] = realVal*realVal + imagVal*imagVal;
__syncthreads();
if (threadIdx.x<blockDim.x/2){
reduceBlock(tempReductionArray, reducedArray, blockDim.x);
}
}
int getNumReductionLevels(long long int numValuesToReduce, int numReducedPerLevel){
int levels=0;
while (numValuesToReduce){
numValuesToReduce = numValuesToReduce/numReducedPerLevel;
levels++;
}
return levels;
}
void swapDouble(qreal **a, qreal **b){
qreal *temp;
temp = *a;
*a = *b;
*b = temp;
}
qreal densmatr_findProbabilityOfZero(Qureg qureg, const int measureQubit)
{
long long int densityDim = 1LL << qureg.numQubitsRepresented;
long long int numValuesToReduce = densityDim >> 1; // half of the diagonal has measureQubit=0
int valuesPerCUDABlock, numCUDABlocks, sharedMemSize;
int maxReducedPerLevel = REDUCE_SHARED_SIZE;
int firstTime = 1;
while (numValuesToReduce > 1) {
// need less than one CUDA-BLOCK to reduce
if (numValuesToReduce < maxReducedPerLevel) {
valuesPerCUDABlock = numValuesToReduce;
numCUDABlocks = 1;
}
// otherwise use only full CUDA-BLOCKS
else {
valuesPerCUDABlock = maxReducedPerLevel; // constrained by shared memory
numCUDABlocks = ceil((qreal)numValuesToReduce/valuesPerCUDABlock);
}
sharedMemSize = valuesPerCUDABlock*sizeof(qreal);
// spawn threads to sum the probs in each block
if (firstTime) {
hipLaunchKernelGGL(( densmatr_findProbabilityOfZeroKernel), dim3(numCUDABlocks), dim3(valuesPerCUDABlock), sharedMemSize, 0,
qureg, measureQubit, qureg.firstLevelReduction);
firstTime = 0;
// sum the block probs
} else {
hipDeviceSynchronize();
hipLaunchKernelGGL(( copySharedReduceBlock), dim3(numCUDABlocks), dim3(valuesPerCUDABlock/2), sharedMemSize, 0,
qureg.firstLevelReduction,
qureg.secondLevelReduction, valuesPerCUDABlock);
hipDeviceSynchronize();
swapDouble(&(qureg.firstLevelReduction), &(qureg.secondLevelReduction));
}
numValuesToReduce = numValuesToReduce/maxReducedPerLevel;
}
qreal zeroProb;
hipMemcpy(&zeroProb, qureg.firstLevelReduction, sizeof(qreal), hipMemcpyDeviceToHost);
return zeroProb;
}
qreal statevec_findProbabilityOfZero(Qureg qureg, const int measureQubit)
{
long long int numValuesToReduce = qureg.numAmpsPerChunk>>1;
int valuesPerCUDABlock, numCUDABlocks, sharedMemSize;
qreal stateProb=0;
int firstTime=1;
int maxReducedPerLevel = REDUCE_SHARED_SIZE;
while(numValuesToReduce>1){
if (numValuesToReduce<maxReducedPerLevel){
// Need less than one CUDA block to reduce values
valuesPerCUDABlock = numValuesToReduce;
numCUDABlocks = 1;
} else {
// Use full CUDA blocks, with block size constrained by shared mem usage
valuesPerCUDABlock = maxReducedPerLevel;
numCUDABlocks = ceil((qreal)numValuesToReduce/valuesPerCUDABlock);
}
sharedMemSize = valuesPerCUDABlock*sizeof(qreal);
if (firstTime){
hipLaunchKernelGGL(( statevec_findProbabilityOfZeroKernel), dim3(numCUDABlocks), dim3(valuesPerCUDABlock), sharedMemSize, 0,
qureg, measureQubit, qureg.firstLevelReduction);
firstTime=0;
} else {
hipDeviceSynchronize();
hipLaunchKernelGGL(( copySharedReduceBlock), dim3(numCUDABlocks), dim3(valuesPerCUDABlock/2), sharedMemSize, 0,
qureg.firstLevelReduction,
qureg.secondLevelReduction, valuesPerCUDABlock);
hipDeviceSynchronize();
swapDouble(&(qureg.firstLevelReduction), &(qureg.secondLevelReduction));
}
numValuesToReduce = numValuesToReduce/maxReducedPerLevel;
}
hipMemcpy(&stateProb, qureg.firstLevelReduction, sizeof(qreal), hipMemcpyDeviceToHost);
return stateProb;
}
qreal statevec_calcProbOfOutcome(Qureg qureg, const int measureQubit, int outcome)
{
qreal outcomeProb = statevec_findProbabilityOfZero(qureg, measureQubit);
if (outcome==1)
outcomeProb = 1.0 - outcomeProb;
return outcomeProb;
}
qreal densmatr_calcProbOfOutcome(Qureg qureg, const int measureQubit, int outcome)
{
qreal outcomeProb = densmatr_findProbabilityOfZero(qureg, measureQubit);
if (outcome==1)
outcomeProb = 1.0 - outcomeProb;
return outcomeProb;
}
/** computes either a real or imag term in the inner product */
__global__ void statevec_calcInnerProductKernel(
int getRealComp,
qreal* vecReal1, qreal* vecImag1, qreal* vecReal2, qreal* vecImag2,
long long int numTermsToSum, qreal* reducedArray)
{
long long int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index >= numTermsToSum) return;
// choose whether to calculate the real or imaginary term of the inner product
qreal innerProdTerm;
if (getRealComp)
innerProdTerm = vecReal1[index]*vecReal2[index] + vecImag1[index]*vecImag2[index];
else
innerProdTerm = vecReal1[index]*vecImag2[index] - vecImag1[index]*vecReal2[index];
// array of each thread's collected probability, to be summed
extern __shared__ qreal tempReductionArray[];
tempReductionArray[threadIdx.x] = innerProdTerm;
__syncthreads();
// every second thread reduces
if (threadIdx.x<blockDim.x/2)
reduceBlock(tempReductionArray, reducedArray, blockDim.x);
}
/** Terrible code which unnecessarily individually computes and sums the real and imaginary components of the
* inner product, so as to not have to worry about keeping the sums separated during reduction.
* Truly disgusting, probably doubles runtime, please fix.
* @TODO could even do the kernel twice, storing real in bra.reduc and imag in ket.reduc?
*/
Complex statevec_calcInnerProduct(Qureg bra, Qureg ket) {
qreal innerProdReal, innerProdImag;
int getRealComp;
long long int numValuesToReduce;
int valuesPerCUDABlock, numCUDABlocks, sharedMemSize;
int maxReducedPerLevel;
int firstTime;
// compute real component of inner product
getRealComp = 1;
numValuesToReduce = bra.numAmpsPerChunk;
maxReducedPerLevel = REDUCE_SHARED_SIZE;
firstTime = 1;
while (numValuesToReduce > 1) {
if (numValuesToReduce < maxReducedPerLevel) {
valuesPerCUDABlock = numValuesToReduce;
numCUDABlocks = 1;
}
else {
valuesPerCUDABlock = maxReducedPerLevel;
numCUDABlocks = ceil((qreal)numValuesToReduce/valuesPerCUDABlock);
}
sharedMemSize = valuesPerCUDABlock*sizeof(qreal);
if (firstTime) {
hipLaunchKernelGGL(( statevec_calcInnerProductKernel), dim3(numCUDABlocks), dim3(valuesPerCUDABlock), sharedMemSize, 0,
getRealComp,
bra.deviceStateVec.real, bra.deviceStateVec.imag,
ket.deviceStateVec.real, ket.deviceStateVec.imag,
numValuesToReduce,
bra.firstLevelReduction);
firstTime = 0;
} else {
hipDeviceSynchronize();
hipLaunchKernelGGL(( copySharedReduceBlock), dim3(numCUDABlocks), dim3(valuesPerCUDABlock/2), sharedMemSize, 0,
bra.firstLevelReduction,
bra.secondLevelReduction, valuesPerCUDABlock);
hipDeviceSynchronize();
swapDouble(&(bra.firstLevelReduction), &(bra.secondLevelReduction));
}
numValuesToReduce = numValuesToReduce/maxReducedPerLevel;
}
hipMemcpy(&innerProdReal, bra.firstLevelReduction, sizeof(qreal), hipMemcpyDeviceToHost);
// compute imag component of inner product
getRealComp = 0;
numValuesToReduce = bra.numAmpsPerChunk;
maxReducedPerLevel = REDUCE_SHARED_SIZE;
firstTime = 1;
while (numValuesToReduce > 1) {
if (numValuesToReduce < maxReducedPerLevel) {
valuesPerCUDABlock = numValuesToReduce;
numCUDABlocks = 1;
}
else {
valuesPerCUDABlock = maxReducedPerLevel;
numCUDABlocks = ceil((qreal)numValuesToReduce/valuesPerCUDABlock);
}
sharedMemSize = valuesPerCUDABlock*sizeof(qreal);
if (firstTime) {
hipLaunchKernelGGL(( statevec_calcInnerProductKernel), dim3(numCUDABlocks), dim3(valuesPerCUDABlock), sharedMemSize, 0,
getRealComp,
bra.deviceStateVec.real, bra.deviceStateVec.imag,
ket.deviceStateVec.real, ket.deviceStateVec.imag,
numValuesToReduce,
bra.firstLevelReduction);
firstTime = 0;
} else {
hipDeviceSynchronize();
hipLaunchKernelGGL(( copySharedReduceBlock), dim3(numCUDABlocks), dim3(valuesPerCUDABlock/2), sharedMemSize, 0,
bra.firstLevelReduction,
bra.secondLevelReduction, valuesPerCUDABlock);
hipDeviceSynchronize();
swapDouble(&(bra.firstLevelReduction), &(bra.secondLevelReduction));
}
numValuesToReduce = numValuesToReduce/maxReducedPerLevel;
}
hipMemcpy(&innerProdImag, bra.firstLevelReduction, sizeof(qreal), hipMemcpyDeviceToHost);
// return complex
Complex innerProd;
innerProd.real = innerProdReal;
innerProd.imag = innerProdImag;
return innerProd;
}
/** computes one term of (vec^*T) dens * vec */
__global__ void densmatr_calcFidelityKernel(Qureg dens, Qureg vec, long long int dim, qreal* reducedArray) {
// figure out which density matrix row to consider
long long int col;
long long int row = blockIdx.x*blockDim.x + threadIdx.x;
if (row >= dim) return;
qreal* densReal = dens.deviceStateVec.real;
qreal* densImag = dens.deviceStateVec.imag;
qreal* vecReal = vec.deviceStateVec.real;
qreal* vecImag = vec.deviceStateVec.imag;
// compute the row-th element of the product dens*vec
qreal prodReal = 0;
qreal prodImag = 0;
for (col=0LL; col < dim; col++) {
qreal densElemReal = densReal[dim*col + row];
qreal densElemImag = densImag[dim*col + row];
prodReal += densElemReal*vecReal[col] - densElemImag*vecImag[col];
prodImag += densElemReal*vecImag[col] + densElemImag*vecReal[col];
}
// multiply with row-th elem of (vec^*)
qreal termReal = prodImag*vecImag[row] + prodReal*vecReal[row];
// imag of every term should be zero, because each is a valid fidelity calc of an eigenstate
//qreal termImag = prodImag*vecReal[row] - prodReal*vecImag[row];
extern __shared__ qreal tempReductionArray[];
tempReductionArray[threadIdx.x] = termReal;
__syncthreads();
// every second thread reduces
if (threadIdx.x<blockDim.x/2)
reduceBlock(tempReductionArray, reducedArray, blockDim.x);
}
// @TODO implement
qreal densmatr_calcFidelity(Qureg qureg, Qureg pureState) {
// we're summing the square of every term in the density matrix
long long int densityDim = 1LL << qureg.numQubitsRepresented;
long long int numValuesToReduce = densityDim;
int valuesPerCUDABlock, numCUDABlocks, sharedMemSize;
int maxReducedPerLevel = REDUCE_SHARED_SIZE;
int firstTime = 1;
while (numValuesToReduce > 1) {
// need less than one CUDA-BLOCK to reduce
if (numValuesToReduce < maxReducedPerLevel) {
valuesPerCUDABlock = numValuesToReduce;
numCUDABlocks = 1;
}
// otherwise use only full CUDA-BLOCKS
else {
valuesPerCUDABlock = maxReducedPerLevel; // constrained by shared memory
numCUDABlocks = ceil((qreal)numValuesToReduce/valuesPerCUDABlock);
}
// dictates size of reduction array
sharedMemSize = valuesPerCUDABlock*sizeof(qreal);
// spawn threads to sum the probs in each block
// store the reduction in the pureState array
if (firstTime) {
hipLaunchKernelGGL(( densmatr_calcFidelityKernel), dim3(numCUDABlocks), dim3(valuesPerCUDABlock), sharedMemSize, 0,
qureg, pureState, densityDim, pureState.firstLevelReduction);
firstTime = 0;
// sum the block probs
} else {
hipDeviceSynchronize();
hipLaunchKernelGGL(( copySharedReduceBlock), dim3(numCUDABlocks), dim3(valuesPerCUDABlock/2), sharedMemSize, 0,
pureState.firstLevelReduction,
pureState.secondLevelReduction, valuesPerCUDABlock);
hipDeviceSynchronize();
swapDouble(&(pureState.firstLevelReduction), &(pureState.secondLevelReduction));
}
numValuesToReduce = numValuesToReduce/maxReducedPerLevel;
}
qreal fidelity;
hipMemcpy(&fidelity, pureState.firstLevelReduction, sizeof(qreal), hipMemcpyDeviceToHost);
return fidelity;
}
__global__ void densmatr_calcPurityKernel(qreal* vecReal, qreal* vecImag, long long int numAmpsToSum, qreal *reducedArray) {
// figure out which density matrix term this thread is assigned
long long int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index >= numAmpsToSum) return;
qreal term = vecReal[index]*vecReal[index] + vecImag[index]*vecImag[index];
// array of each thread's collected probability, to be summed
extern __shared__ qreal tempReductionArray[];
tempReductionArray[threadIdx.x] = term;
__syncthreads();
// every second thread reduces
if (threadIdx.x<blockDim.x/2)
reduceBlock(tempReductionArray, reducedArray, blockDim.x);
}
/** Computes the trace of the density matrix squared */
qreal densmatr_calcPurity(Qureg qureg) {
// we're summing the square of every term in the density matrix
long long int numValuesToReduce = qureg.numAmpsPerChunk;
int valuesPerCUDABlock, numCUDABlocks, sharedMemSize;
int maxReducedPerLevel = REDUCE_SHARED_SIZE;
int firstTime = 1;
while (numValuesToReduce > 1) {
// need less than one CUDA-BLOCK to reduce
if (numValuesToReduce < maxReducedPerLevel) {
valuesPerCUDABlock = numValuesToReduce;
numCUDABlocks = 1;
}
// otherwise use only full CUDA-BLOCKS
else {
valuesPerCUDABlock = maxReducedPerLevel; // constrained by shared memory
numCUDABlocks = ceil((qreal)numValuesToReduce/valuesPerCUDABlock);
}
// dictates size of reduction array
sharedMemSize = valuesPerCUDABlock*sizeof(qreal);
// spawn threads to sum the probs in each block
if (firstTime) {
hipLaunchKernelGGL(( densmatr_calcPurityKernel), dim3(numCUDABlocks), dim3(valuesPerCUDABlock), sharedMemSize, 0,
qureg.deviceStateVec.real, qureg.deviceStateVec.imag,
numValuesToReduce, qureg.firstLevelReduction);
firstTime = 0;
// sum the block probs
} else {
hipDeviceSynchronize();
hipLaunchKernelGGL(( copySharedReduceBlock), dim3(numCUDABlocks), dim3(valuesPerCUDABlock/2), sharedMemSize, 0,
qureg.firstLevelReduction,
qureg.secondLevelReduction, valuesPerCUDABlock);
hipDeviceSynchronize();
swapDouble(&(qureg.firstLevelReduction), &(qureg.secondLevelReduction));
}
numValuesToReduce = numValuesToReduce/maxReducedPerLevel;
}
qreal traceDensSquared;
hipMemcpy(&traceDensSquared, qureg.firstLevelReduction, sizeof(qreal), hipMemcpyDeviceToHost);
return traceDensSquared;
}
__global__ void statevec_collapseToKnownProbOutcomeKernel(Qureg qureg, int measureQubit, int outcome, qreal totalProbability)
{
// ----- sizes
long long int sizeBlock, // size of blocks
sizeHalfBlock; // size of blocks halved
// ----- indices
long long int thisBlock, // current block
index; // current index for first half block
// ----- measured probability
qreal renorm; // probability (returned) value
// ----- temp variables
long long int thisTask; // task based approach for expose loop with small granularity
// (good for shared memory parallelism)
long long int numTasks=qureg.numAmpsPerChunk>>1;
// ---------------------------------------------------------------- //
// dimensions //
// ---------------------------------------------------------------- //
sizeHalfBlock = 1LL << (measureQubit); // number of state vector elements to sum,
// and then the number to skip
sizeBlock = 2LL * sizeHalfBlock; // size of blocks (pairs of measure and skip entries)
// ---------------------------------------------------------------- //
// find probability //
// ---------------------------------------------------------------- //
//
// --- task-based shared-memory parallel implementation
//
renorm=1/sqrt(totalProbability);
qreal *stateVecReal = qureg.deviceStateVec.real;
qreal *stateVecImag = qureg.deviceStateVec.imag;
thisTask = blockIdx.x*blockDim.x + threadIdx.x;
if (thisTask>=numTasks) return;
thisBlock = thisTask / sizeHalfBlock;
index = thisBlock*sizeBlock + thisTask%sizeHalfBlock;
if (outcome==0){
stateVecReal[index]=stateVecReal[index]*renorm;
stateVecImag[index]=stateVecImag[index]*renorm;
stateVecReal[index+sizeHalfBlock]=0;
stateVecImag[index+sizeHalfBlock]=0;
} else if (outcome==1){
stateVecReal[index]=0;
stateVecImag[index]=0;
stateVecReal[index+sizeHalfBlock]=stateVecReal[index+sizeHalfBlock]*renorm;
stateVecImag[index+sizeHalfBlock]=stateVecImag[index+sizeHalfBlock]*renorm;
}
}
/*
* outcomeProb must accurately be the probability of that qubit outcome in the state-vector, or
* else the state-vector will lose normalisation
*/
void statevec_collapseToKnownProbOutcome(Qureg qureg, const int measureQubit, int outcome, qreal outcomeProb)
{
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk>>1)/threadsPerCUDABlock);
hipLaunchKernelGGL(( statevec_collapseToKnownProbOutcomeKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0, qureg, measureQubit, outcome, outcomeProb);
}
/** Maps thread ID to a |..0..><..0..| state and then locates |0><1|, |1><0| and |1><1| */
__global__ void densmatr_collapseToKnownProbOutcomeKernel(
qreal outcomeProb, qreal* vecReal, qreal *vecImag, long long int numBasesToVisit,
long long int part1, long long int part2, long long int part3,
long long int rowBit, long long int colBit, long long int desired, long long int undesired)
{
long long int scanInd = blockIdx.x*blockDim.x + threadIdx.x;
if (scanInd >= numBasesToVisit) return;
long long int base = (scanInd&part1) + ((scanInd&part2)<<1) + ((scanInd&part3)<<2);
// renormalise desired outcome
vecReal[base + desired] /= outcomeProb;
vecImag[base + desired] /= outcomeProb;
// kill undesired outcome
vecReal[base + undesired] = 0;
vecImag[base + undesired] = 0;
// kill |..0..><..1..| states
vecReal[base + colBit] = 0;
vecImag[base + colBit] = 0;
vecReal[base + rowBit] = 0;
vecImag[base + rowBit] = 0;
}
/** This involves finding |...i...><...j...| states and killing those where i!=j */
void densmatr_collapseToKnownProbOutcome(Qureg qureg, const int measureQubit, int outcome, qreal outcomeProb) {
int rowQubit = measureQubit + qureg.numQubitsRepresented;
int colBit = 1LL << measureQubit;
int rowBit = 1LL << rowQubit;
long long int numBasesToVisit = qureg.numAmpsPerChunk/4;
long long int part1 = colBit -1;
long long int part2 = (rowBit >> 1) - colBit;
long long int part3 = numBasesToVisit - (rowBit >> 1);
long long int desired, undesired;
if (outcome == 0) {
desired = 0;
undesired = colBit | rowBit;
} else {
desired = colBit | rowBit;
undesired = 0;
}
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil(numBasesToVisit / (qreal) threadsPerCUDABlock);
hipLaunchKernelGGL(( densmatr_collapseToKnownProbOutcomeKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0,
outcomeProb, qureg.deviceStateVec.real, qureg.deviceStateVec.imag, numBasesToVisit,
part1, part2, part3, rowBit, colBit, desired, undesired);
}
__global__ void densmatr_addDensityMatrixKernel(Qureg combineQureg, qreal otherProb, Qureg otherQureg, long long int numAmpsToVisit) {
long long int ampInd = blockIdx.x*blockDim.x + threadIdx.x;
if (ampInd >= numAmpsToVisit) return;
combineQureg.deviceStateVec.real[ampInd] *= 1-otherProb;
combineQureg.deviceStateVec.imag[ampInd] *= 1-otherProb;
combineQureg.deviceStateVec.real[ampInd] += otherProb*otherQureg.deviceStateVec.real[ampInd];
combineQureg.deviceStateVec.imag[ampInd] += otherProb*otherQureg.deviceStateVec.imag[ampInd];
}
void densmatr_addDensityMatrix(Qureg combineQureg, qreal otherProb, Qureg otherQureg) {
long long int numAmpsToVisit = combineQureg.numAmpsPerChunk;
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil(numAmpsToVisit / (qreal) threadsPerCUDABlock);
hipLaunchKernelGGL(( densmatr_addDensityMatrixKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0,
combineQureg, otherProb, otherQureg, numAmpsToVisit
);
}
/** Called once for every 4 amplitudes in density matrix
* Works by establishing the |..0..><..0..| state (for its given index) then
* visiting |..1..><..0..| and |..0..><..1..|. Labels |part1 X pa><rt2 NOT(X) part3|
* From the brain of Simon Benjamin
*/
__global__ void densmatr_oneQubitDephaseKernel(
qreal fac, qreal* vecReal, qreal *vecImag, long long int numAmpsToVisit,
long long int part1, long long int part2, long long int part3,
long long int colBit, long long int rowBit)
{
long long int scanInd = blockIdx.x*blockDim.x + threadIdx.x;
if (scanInd >= numAmpsToVisit) return;
long long int ampInd = (scanInd&part1) + ((scanInd&part2)<<1) + ((scanInd&part3)<<2);
vecReal[ampInd + colBit] *= fac;
vecImag[ampInd + colBit] *= fac;
vecReal[ampInd + rowBit] *= fac;
vecImag[ampInd + rowBit] *= fac;
}
void densmatr_oneQubitDephase(Qureg qureg, const int targetQubit, qreal dephase) {
if (dephase == 0)
return;
long long int numAmpsToVisit = qureg.numAmpsPerChunk/4;
int rowQubit = targetQubit + qureg.numQubitsRepresented;
long long int colBit = 1LL << targetQubit;
long long int rowBit = 1LL << rowQubit;
long long int part1 = colBit - 1;
long long int part2 = (rowBit >> 1) - colBit;
long long int part3 = numAmpsToVisit - (rowBit >> 1);
qreal dephFac = 1 - dephase;
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil(numAmpsToVisit / (qreal) threadsPerCUDABlock);
hipLaunchKernelGGL(( densmatr_oneQubitDephaseKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0,
dephFac, qureg.deviceStateVec.real, qureg.deviceStateVec.imag, numAmpsToVisit,
part1, part2, part3, colBit, rowBit);
}
/** Called 12 times for every 16 amplitudes in density matrix
* Each sums from the |..0..0..><..0..0..| index to visit either
* |..0..0..><..0..1..|, |..0..0..><..1..0..|, |..0..0..><..1..1..|, |..0..1..><..0..0..|
* etc and so on to |..1..1..><..1..0|. Labels |part1 0 part2 0 par><t3 0 part4 0 part5|.
* From the brain of Simon Benjamin
*/
__global__ void densmatr_twoQubitDephaseKernel(
qreal fac, qreal* vecReal, qreal *vecImag, long long int numBackgroundStates, long long int numAmpsToVisit,
long long int part1, long long int part2, long long int part3, long long int part4, long long int part5,
long long int colBit1, long long int rowBit1, long long int colBit2, long long int rowBit2)
{
long long int outerInd = blockIdx.x*blockDim.x + threadIdx.x;
if (outerInd >= numAmpsToVisit) return;
// sets meta in 1...14 excluding 5, 10, creating bit string DCBA for |..D..C..><..B..A|
int meta = 1 + (outerInd/numBackgroundStates);
if (meta > 4) meta++;
if (meta > 9) meta++;
long long int shift = rowBit2*((meta>>3)%2) + rowBit1*((meta>>2)%2) + colBit2*((meta>>1)%2) + colBit1*(meta%2);
long long int scanInd = outerInd % numBackgroundStates;
long long int stateInd = (
shift +
(scanInd&part1) + ((scanInd&part2)<<1) + ((scanInd&part3)<<2) + ((scanInd&part4)<<3) + ((scanInd&part5)<<4));
vecReal[stateInd] *= fac;
vecImag[stateInd] *= fac;
}
// @TODO is separating these 12 amplitudes really faster than letting every 16th base modify 12 elems?
void densmatr_twoQubitDephase(Qureg qureg, int qubit1, int qubit2, qreal dephase) {
if (dephase == 0)
return;
// assumes qubit2 > qubit1
int rowQubit1 = qubit1 + qureg.numQubitsRepresented;
int rowQubit2 = qubit2 + qureg.numQubitsRepresented;
long long int colBit1 = 1LL << qubit1;
long long int rowBit1 = 1LL << rowQubit1;
long long int colBit2 = 1LL << qubit2;
long long int rowBit2 = 1LL << rowQubit2;
long long int part1 = colBit1 - 1;
long long int part2 = (colBit2 >> 1) - colBit1;
long long int part3 = (rowBit1 >> 2) - (colBit2 >> 1);
long long int part4 = (rowBit2 >> 3) - (rowBit1 >> 2);
long long int part5 = (qureg.numAmpsPerChunk/16) - (rowBit2 >> 3);
qreal dephFac = 1 - dephase;
// refers to states |a 0 b 0 c><d 0 e 0 f| (target qubits are fixed)
long long int numBackgroundStates = qureg.numAmpsPerChunk/16;
// 12 of these states experience dephasing
long long int numAmpsToVisit = 12 * numBackgroundStates;
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil(numAmpsToVisit / (qreal) threadsPerCUDABlock);
hipLaunchKernelGGL(( densmatr_twoQubitDephaseKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0,
dephFac, qureg.deviceStateVec.real, qureg.deviceStateVec.imag, numBackgroundStates, numAmpsToVisit,
part1, part2, part3, part4, part5, colBit1, rowBit1, colBit2, rowBit2);
}
/** Works like oneQubitDephase but modifies every other element, and elements are averaged in pairs */
__global__ void densmatr_oneQubitDepolariseKernel(
qreal depolLevel, qreal* vecReal, qreal *vecImag, long long int numAmpsToVisit,
long long int part1, long long int part2, long long int part3,
long long int bothBits)
{
long long int scanInd = blockIdx.x*blockDim.x + threadIdx.x;
if (scanInd >= numAmpsToVisit) return;
long long int baseInd = (scanInd&part1) + ((scanInd&part2)<<1) + ((scanInd&part3)<<2);
long long int targetInd = baseInd + bothBits;
qreal realAvDepol = depolLevel * 0.5 * (vecReal[baseInd] + vecReal[targetInd]);
qreal imagAvDepol = depolLevel * 0.5 * (vecImag[baseInd] + vecImag[targetInd]);
vecReal[baseInd] *= 1 - depolLevel;
vecImag[baseInd] *= 1 - depolLevel;
vecReal[targetInd] *= 1 - depolLevel;
vecImag[targetInd] *= 1 - depolLevel;
vecReal[baseInd] += realAvDepol;
vecImag[baseInd] += imagAvDepol;
vecReal[targetInd] += realAvDepol;
vecImag[targetInd] += imagAvDepol;
}
void densmatr_oneQubitDepolarise(Qureg qureg, const int targetQubit, qreal depolLevel) {
if (depolLevel == 0)
return;
densmatr_oneQubitDephase(qureg, targetQubit, depolLevel);
long long int numAmpsToVisit = qureg.numAmpsPerChunk/4;
int rowQubit = targetQubit + qureg.numQubitsRepresented;
long long int colBit = 1LL << targetQubit;
long long int rowBit = 1LL << rowQubit;
long long int bothBits = colBit | rowBit;
long long int part1 = colBit - 1;
long long int part2 = (rowBit >> 1) - colBit;
long long int part3 = numAmpsToVisit - (rowBit >> 1);
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil(numAmpsToVisit / (qreal) threadsPerCUDABlock);
hipLaunchKernelGGL(( densmatr_oneQubitDepolariseKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0,
depolLevel, qureg.deviceStateVec.real, qureg.deviceStateVec.imag, numAmpsToVisit,
part1, part2, part3, bothBits);
}
/** Called once for every 16 amplitudes */
__global__ void densmatr_twoQubitDepolariseKernel(
qreal depolLevel, qreal* vecReal, qreal *vecImag, long long int numAmpsToVisit,
long long int part1, long long int part2, long long int part3,
long long int part4, long long int part5,
long long int rowCol1, long long int rowCol2)
{
long long int scanInd = blockIdx.x*blockDim.x + threadIdx.x;
if (scanInd >= numAmpsToVisit) return;
// index of |..0..0..><..0..0|
long long int ind00 = (scanInd&part1) + ((scanInd&part2)<<1) + ((scanInd&part3)<<2) + ((scanInd&part4)<<3) + ((scanInd&part5)<<4);
long long int ind01 = ind00 + rowCol1;
long long int ind10 = ind00 + rowCol2;
long long int ind11 = ind00 + rowCol1 + rowCol2;
qreal realAvDepol = depolLevel * 0.25 * (
vecReal[ind00] + vecReal[ind01] + vecReal[ind10] + vecReal[ind11]);
qreal imagAvDepol = depolLevel * 0.25 * (
vecImag[ind00] + vecImag[ind01] + vecImag[ind10] + vecImag[ind11]);
qreal retain = 1 - depolLevel;
vecReal[ind00] *= retain; vecImag[ind00] *= retain;
vecReal[ind01] *= retain; vecImag[ind01] *= retain;
vecReal[ind10] *= retain; vecImag[ind10] *= retain;
vecReal[ind11] *= retain; vecImag[ind11] *= retain;
vecReal[ind00] += realAvDepol; vecImag[ind00] += imagAvDepol;
vecReal[ind01] += realAvDepol; vecImag[ind01] += imagAvDepol;
vecReal[ind10] += realAvDepol; vecImag[ind10] += imagAvDepol;
vecReal[ind11] += realAvDepol; vecImag[ind11] += imagAvDepol;
}
void densmatr_twoQubitDepolarise(Qureg qureg, int qubit1, int qubit2, qreal depolLevel) {
if (depolLevel == 0)
return;
// assumes qubit2 > qubit1
densmatr_twoQubitDephase(qureg, qubit1, qubit2, depolLevel);
int rowQubit1 = qubit1 + qureg.numQubitsRepresented;
int rowQubit2 = qubit2 + qureg.numQubitsRepresented;
long long int colBit1 = 1LL << qubit1;
long long int rowBit1 = 1LL << rowQubit1;
long long int colBit2 = 1LL << qubit2;
long long int rowBit2 = 1LL << rowQubit2;
long long int rowCol1 = colBit1 | rowBit1;
long long int rowCol2 = colBit2 | rowBit2;
long long int numAmpsToVisit = qureg.numAmpsPerChunk/16;
long long int part1 = colBit1 - 1;
long long int part2 = (colBit2 >> 1) - colBit1;
long long int part3 = (rowBit1 >> 2) - (colBit2 >> 1);
long long int part4 = (rowBit2 >> 3) - (rowBit1 >> 2);
long long int part5 = numAmpsToVisit - (rowBit2 >> 3);
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil(numAmpsToVisit / (qreal) threadsPerCUDABlock);
hipLaunchKernelGGL(( densmatr_twoQubitDepolariseKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0,
depolLevel, qureg.deviceStateVec.real, qureg.deviceStateVec.imag, numAmpsToVisit,
part1, part2, part3, part4, part5, rowCol1, rowCol2);
}
void seedQuESTDefault(){
// init MT random number generator with three keys -- time, pid and a hash of hostname
// for the MPI version, it is ok that all procs will get the same seed as random numbers will only be
// used by the master process
unsigned long int key[3];
getQuESTDefaultSeedKey(key);
init_by_array(key, 3);
}
#ifdef __cplusplus
}
#endif
| 920e7861685d1a1305d9573e88c0743692415b4b.cu | // Distributed under MIT licence. See https://github.com/aniabrown/QuEST_GPU/blob/master/LICENCE.txt for details
/** @file
* An implementation of the backend in ../QuEST_internal.h for a GPU environment.
*/
# include "../QuEST.h"
# include "../QuEST_precision.h"
# include "../QuEST_internal.h" // purely to resolve getQuESTDefaultSeedKey
# include "../mt19937ar.h"
# include <stdlib.h>
# include <stdio.h>
# include <math.h>
# define REDUCE_SHARED_SIZE 512
# define DEBUG 0
static __device__ int extractBit (int locationOfBitFromRight, long long int theEncodedNumber)
{
return (theEncodedNumber & ( 1LL << locationOfBitFromRight )) >> locationOfBitFromRight;
}
#ifdef __cplusplus
extern "C" {
#endif
void statevec_setAmps(Qureg qureg, long long int startInd, qreal* reals, qreal* imags, long long int numAmps) {
cudaDeviceSynchronize();
cudaMemcpy(
qureg.deviceStateVec.real + startInd,
reals,
numAmps * sizeof(*(qureg.deviceStateVec.real)),
cudaMemcpyHostToDevice);
cudaMemcpy(
qureg.deviceStateVec.imag + startInd,
imags,
numAmps * sizeof(*(qureg.deviceStateVec.real)),
cudaMemcpyHostToDevice);
}
/** works for both statevectors and density matrices */
void statevec_cloneQureg(Qureg targetQureg, Qureg copyQureg) {
// copy copyQureg's GPU statevec to targetQureg's GPU statevec
cudaDeviceSynchronize();
cudaMemcpy(
targetQureg.deviceStateVec.real,
copyQureg.deviceStateVec.real,
targetQureg.numAmpsPerChunk*sizeof(*(targetQureg.deviceStateVec.real)),
cudaMemcpyDeviceToDevice);
cudaMemcpy(
targetQureg.deviceStateVec.imag,
copyQureg.deviceStateVec.imag,
targetQureg.numAmpsPerChunk*sizeof(*(targetQureg.deviceStateVec.imag)),
cudaMemcpyDeviceToDevice);
}
__global__ void densmatr_initPureStateKernel(
long long int numPureAmps,
qreal *targetVecReal, qreal *targetVecImag,
qreal *copyVecReal, qreal *copyVecImag)
{
// this is a particular index of the pure copyQureg
long long int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index>=numPureAmps) return;
qreal realRow = copyVecReal[index];
qreal imagRow = copyVecImag[index];
for (long long int col=0; col < numPureAmps; col++) {
qreal realCol = copyVecReal[col];
qreal imagCol = - copyVecImag[col]; // minus for conjugation
targetVecReal[col*numPureAmps + index] = realRow*realCol - imagRow*imagCol;
targetVecImag[col*numPureAmps + index] = realRow*imagCol + imagRow*realCol;
}
}
void densmatr_initPureState(Qureg targetQureg, Qureg copyQureg)
{
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil((qreal)(copyQureg.numAmpsPerChunk)/threadsPerCUDABlock);
densmatr_initPureStateKernel<<<CUDABlocks, threadsPerCUDABlock>>>(
copyQureg.numAmpsPerChunk,
targetQureg.deviceStateVec.real, targetQureg.deviceStateVec.imag,
copyQureg.deviceStateVec.real, copyQureg.deviceStateVec.imag);
}
__global__ void densmatr_initPlusStateKernel(long long int stateVecSize, qreal probFactor, qreal *stateVecReal, qreal *stateVecImag){
long long int index;
index = blockIdx.x*blockDim.x + threadIdx.x;
if (index>=stateVecSize) return;
stateVecReal[index] = probFactor;
stateVecImag[index] = 0.0;
}
void densmatr_initPlusState(Qureg qureg)
{
qreal probFactor = 1.0/((qreal) (1LL << qureg.numQubitsRepresented));
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk)/threadsPerCUDABlock);
densmatr_initPlusStateKernel<<<CUDABlocks, threadsPerCUDABlock>>>(
qureg.numAmpsPerChunk,
probFactor,
qureg.deviceStateVec.real,
qureg.deviceStateVec.imag);
}
__global__ void densmatr_initClassicalStateKernel(
long long int densityNumElems,
qreal *densityReal, qreal *densityImag,
long long int densityInd)
{
// initialise the state to all zeros
long long int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index >= densityNumElems) return;
densityReal[index] = 0.0;
densityImag[index] = 0.0;
if (index==densityInd){
// classical state has probability 1
densityReal[densityInd] = 1.0;
densityImag[densityInd] = 0.0;
}
}
void densmatr_initClassicalState(Qureg qureg, long long int stateInd)
{
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk)/threadsPerCUDABlock);
// index of the desired state in the flat density matrix
long long int densityDim = 1LL << qureg.numQubitsRepresented;
long long int densityInd = (densityDim + 1)*stateInd;
// identical to pure version
densmatr_initClassicalStateKernel<<<CUDABlocks, threadsPerCUDABlock>>>(
qureg.numAmpsPerChunk,
qureg.deviceStateVec.real,
qureg.deviceStateVec.imag, densityInd);
}
void statevec_createQureg(Qureg *qureg, int numQubits, QuESTEnv env)
{
// allocate CPU memory
long long int numAmps = 1L << numQubits;
long long int numAmpsPerRank = numAmps/env.numRanks;
qureg->stateVec.real = (qreal*) malloc(numAmpsPerRank * sizeof(qureg->stateVec.real));
qureg->stateVec.imag = (qreal*) malloc(numAmpsPerRank * sizeof(qureg->stateVec.imag));
if (env.numRanks>1){
qureg->pairStateVec.real = (qreal*) malloc(numAmpsPerRank * sizeof(qureg->pairStateVec.real));
qureg->pairStateVec.imag = (qreal*) malloc(numAmpsPerRank * sizeof(qureg->pairStateVec.imag));
}
// check cpu memory allocation was successful
if ( (!(qureg->stateVec.real) || !(qureg->stateVec.imag))
&& numAmpsPerRank ) {
printf("Could not allocate memory!\n");
exit (EXIT_FAILURE);
}
if ( env.numRanks>1 && (!(qureg->pairStateVec.real) || !(qureg->pairStateVec.imag))
&& numAmpsPerRank ) {
printf("Could not allocate memory!\n");
exit (EXIT_FAILURE);
}
qureg->numQubitsInStateVec = numQubits;
qureg->numAmpsPerChunk = numAmpsPerRank;
qureg->numAmpsTotal = numAmps;
qureg->chunkId = env.rank;
qureg->numChunks = env.numRanks;
qureg->isDensityMatrix = 0;
// allocate GPU memory
cudaMalloc(&(qureg->deviceStateVec.real), qureg->numAmpsPerChunk*sizeof(*(qureg->deviceStateVec.real)));
cudaMalloc(&(qureg->deviceStateVec.imag), qureg->numAmpsPerChunk*sizeof(*(qureg->deviceStateVec.imag)));
cudaMalloc(&(qureg->firstLevelReduction), ceil(qureg->numAmpsPerChunk/(qreal)REDUCE_SHARED_SIZE)*sizeof(qreal));
cudaMalloc(&(qureg->secondLevelReduction), ceil(qureg->numAmpsPerChunk/(qreal)(REDUCE_SHARED_SIZE*REDUCE_SHARED_SIZE))*
sizeof(qreal));
// check gpu memory allocation was successful
if (!(qureg->deviceStateVec.real) || !(qureg->deviceStateVec.imag)){
printf("Could not allocate memory on GPU!\n");
exit (EXIT_FAILURE);
}
}
void statevec_destroyQureg(Qureg qureg, QuESTEnv env)
{
// Free CPU memory
free(qureg.stateVec.real);
free(qureg.stateVec.imag);
if (env.numRanks>1){
free(qureg.pairStateVec.real);
free(qureg.pairStateVec.imag);
}
// Free GPU memory
cudaFree(qureg.deviceStateVec.real);
cudaFree(qureg.deviceStateVec.imag);
}
int GPUExists(void){
int deviceCount, device;
int gpuDeviceCount = 0;
struct cudaDeviceProp properties;
cudaError_t cudaResultCode = cudaGetDeviceCount(&deviceCount);
if (cudaResultCode != cudaSuccess) deviceCount = 0;
/* machines with no GPUs can still report one emulation device */
for (device = 0; device < deviceCount; ++device) {
cudaGetDeviceProperties(&properties, device);
if (properties.major != 9999) { /* 9999 means emulation only */
++gpuDeviceCount;
}
}
if (gpuDeviceCount) return 1;
else return 0;
}
QuESTEnv createQuESTEnv(void) {
// init MPI environment
if (!GPUExists()){
printf("Trying to run GPU code with no GPU available\n");
exit(EXIT_FAILURE);
}
QuESTEnv env;
env.rank=0;
env.numRanks=1;
seedQuESTDefault();
return env;
}
void syncQuESTEnv(QuESTEnv env){
cudaDeviceSynchronize();
}
int syncQuESTSuccess(int successCode){
return successCode;
}
void destroyQuESTEnv(QuESTEnv env){
// MPI finalize goes here in MPI version. Call this function anyway for consistency
}
void reportQuESTEnv(QuESTEnv env){
printf("EXECUTION ENVIRONMENT:\n");
printf("Running locally on one node with GPU\n");
printf("Number of ranks is %d\n", env.numRanks);
# ifdef _OPENMP
printf("OpenMP enabled\n");
printf("Number of threads available is %d\n", omp_get_max_threads());
# else
printf("OpenMP disabled\n");
# endif
}
void getEnvironmentString(QuESTEnv env, Qureg qureg, char str[200]){
sprintf(str, "%dqubits_GPU_noMpi_noOMP", qureg.numQubitsInStateVec);
}
void copyStateToGPU(Qureg qureg)
{
if (DEBUG) printf("Copying data to GPU\n");
cudaMemcpy(qureg.deviceStateVec.real, qureg.stateVec.real,
qureg.numAmpsPerChunk*sizeof(*(qureg.deviceStateVec.real)), cudaMemcpyHostToDevice);
cudaMemcpy(qureg.deviceStateVec.real, qureg.stateVec.real,
qureg.numAmpsPerChunk*sizeof(*(qureg.deviceStateVec.real)), cudaMemcpyHostToDevice);
cudaMemcpy(qureg.deviceStateVec.imag, qureg.stateVec.imag,
qureg.numAmpsPerChunk*sizeof(*(qureg.deviceStateVec.imag)), cudaMemcpyHostToDevice);
cudaMemcpy(qureg.deviceStateVec.imag, qureg.stateVec.imag,
qureg.numAmpsPerChunk*sizeof(*(qureg.deviceStateVec.imag)), cudaMemcpyHostToDevice);
if (DEBUG) printf("Finished copying data to GPU\n");
}
void copyStateFromGPU(Qureg qureg)
{
cudaDeviceSynchronize();
if (DEBUG) printf("Copying data from GPU\n");
cudaMemcpy(qureg.stateVec.real, qureg.deviceStateVec.real,
qureg.numAmpsPerChunk*sizeof(*(qureg.deviceStateVec.real)), cudaMemcpyDeviceToHost);
cudaMemcpy(qureg.stateVec.imag, qureg.deviceStateVec.imag,
qureg.numAmpsPerChunk*sizeof(*(qureg.deviceStateVec.imag)), cudaMemcpyDeviceToHost);
if (DEBUG) printf("Finished copying data from GPU\n");
}
/** Print the current state vector of probability amplitudes for a set of qubits to standard out.
For debugging purposes. Each rank should print output serially. Only print output for systems <= 5 qubits
*/
void statevec_reportStateToScreen(Qureg qureg, QuESTEnv env, int reportRank){
long long int index;
int rank;
copyStateFromGPU(qureg);
if (qureg.numQubitsInStateVec<=5){
for (rank=0; rank<qureg.numChunks; rank++){
if (qureg.chunkId==rank){
if (reportRank) {
printf("Reporting state from rank %d [\n", qureg.chunkId);
//printf("\trank, index, real, imag\n");
printf("real, imag\n");
} else if (rank==0) {
printf("Reporting state [\n");
printf("real, imag\n");
}
for(index=0; index<qureg.numAmpsPerChunk; index++){
printf(REAL_STRING_FORMAT ", " REAL_STRING_FORMAT "\n", qureg.stateVec.real[index], qureg.stateVec.imag[index]);
}
if (reportRank || rank==qureg.numChunks-1) printf("]\n");
}
syncQuESTEnv(env);
}
}
}
qreal statevec_getRealAmp(Qureg qureg, long long int index){
qreal el=0;
cudaMemcpy(&el, &(qureg.deviceStateVec.real[index]),
sizeof(*(qureg.deviceStateVec.real)), cudaMemcpyDeviceToHost);
return el;
}
qreal statevec_getImagAmp(Qureg qureg, long long int index){
qreal el=0;
cudaMemcpy(&el, &(qureg.deviceStateVec.imag[index]),
sizeof(*(qureg.deviceStateVec.imag)), cudaMemcpyDeviceToHost);
return el;
}
__global__ void statevec_initZeroStateKernel(long long int stateVecSize, qreal *stateVecReal, qreal *stateVecImag){
long long int index;
// initialise the state to |0000..0000>
index = blockIdx.x*blockDim.x + threadIdx.x;
if (index>=stateVecSize) return;
stateVecReal[index] = 0.0;
stateVecImag[index] = 0.0;
if (index==0){
// zero state |0000..0000> has probability 1
stateVecReal[0] = 1.0;
stateVecImag[0] = 0.0;
}
}
void statevec_initZeroState(Qureg qureg)
{
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk)/threadsPerCUDABlock);
statevec_initZeroStateKernel<<<CUDABlocks, threadsPerCUDABlock>>>(
qureg.numAmpsPerChunk,
qureg.deviceStateVec.real,
qureg.deviceStateVec.imag);
}
__global__ void statevec_initPlusStateKernel(long long int stateVecSize, qreal *stateVecReal, qreal *stateVecImag){
long long int index;
index = blockIdx.x*blockDim.x + threadIdx.x;
if (index>=stateVecSize) return;
qreal normFactor = 1.0/sqrt((qreal)stateVecSize);
stateVecReal[index] = normFactor;
stateVecImag[index] = 0.0;
}
void statevec_initPlusState(Qureg qureg)
{
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk)/threadsPerCUDABlock);
statevec_initPlusStateKernel<<<CUDABlocks, threadsPerCUDABlock>>>(
qureg.numAmpsPerChunk,
qureg.deviceStateVec.real,
qureg.deviceStateVec.imag);
}
__global__ void statevec_initClassicalStateKernel(long long int stateVecSize, qreal *stateVecReal, qreal *stateVecImag, long long int stateInd){
long long int index;
// initialise the state to |stateInd>
index = blockIdx.x*blockDim.x + threadIdx.x;
if (index>=stateVecSize) return;
stateVecReal[index] = 0.0;
stateVecImag[index] = 0.0;
if (index==stateInd){
// classical state has probability 1
stateVecReal[stateInd] = 1.0;
stateVecImag[stateInd] = 0.0;
}
}
void statevec_initClassicalState(Qureg qureg, long long int stateInd)
{
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk)/threadsPerCUDABlock);
statevec_initClassicalStateKernel<<<CUDABlocks, threadsPerCUDABlock>>>(
qureg.numAmpsPerChunk,
qureg.deviceStateVec.real,
qureg.deviceStateVec.imag, stateInd);
}
__global__ void statevec_initStateDebugKernel(long long int stateVecSize, qreal *stateVecReal, qreal *stateVecImag){
long long int index;
index = blockIdx.x*blockDim.x + threadIdx.x;
if (index>=stateVecSize) return;
stateVecReal[index] = (index*2.0)/10.0;
stateVecImag[index] = (index*2.0+1.0)/10.0;
}
void statevec_initStateDebug(Qureg qureg)
{
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk)/threadsPerCUDABlock);
statevec_initStateDebugKernel<<<CUDABlocks, threadsPerCUDABlock>>>(
qureg.numAmpsPerChunk,
qureg.deviceStateVec.real,
qureg.deviceStateVec.imag);
}
__global__ void statevec_initStateOfSingleQubitKernel(long long int stateVecSize, qreal *stateVecReal, qreal *stateVecImag, int qubitId, int outcome){
long long int index;
int bit;
index = blockIdx.x*blockDim.x + threadIdx.x;
if (index>=stateVecSize) return;
qreal normFactor = 1.0/sqrt((qreal)stateVecSize/2);
bit = extractBit(qubitId, index);
if (bit==outcome) {
stateVecReal[index] = normFactor;
stateVecImag[index] = 0.0;
} else {
stateVecReal[index] = 0.0;
stateVecImag[index] = 0.0;
}
}
void statevec_initStateOfSingleQubit(Qureg *qureg, int qubitId, int outcome)
{
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil((qreal)(qureg->numAmpsPerChunk)/threadsPerCUDABlock);
statevec_initStateOfSingleQubitKernel<<<CUDABlocks, threadsPerCUDABlock>>>(qureg->numAmpsPerChunk, qureg->deviceStateVec.real, qureg->deviceStateVec.imag, qubitId, outcome);
}
// returns 1 if successful, else 0
int statevec_initStateFromSingleFile(Qureg *qureg, char filename[200], QuESTEnv env){
long long int chunkSize, stateVecSize;
long long int indexInChunk, totalIndex;
chunkSize = qureg->numAmpsPerChunk;
stateVecSize = chunkSize*qureg->numChunks;
qreal *stateVecReal = qureg->stateVec.real;
qreal *stateVecImag = qureg->stateVec.imag;
FILE *fp;
char line[200];
fp = fopen(filename, "r");
if (fp == NULL)
return 0;
indexInChunk = 0; totalIndex = 0;
while (fgets(line, sizeof(char)*200, fp) != NULL && totalIndex<stateVecSize){
if (line[0]!='#'){
int chunkId = totalIndex/chunkSize;
if (chunkId==qureg->chunkId){
# if QuEST_PREC==1
sscanf(line, "%f, %f", &(stateVecReal[indexInChunk]),
&(stateVecImag[indexInChunk]));
# elif QuEST_PREC==2
sscanf(line, "%lf, %lf", &(stateVecReal[indexInChunk]),
&(stateVecImag[indexInChunk]));
# elif QuEST_PREC==4
sscanf(line, "%lf, %lf", &(stateVecReal[indexInChunk]),
&(stateVecImag[indexInChunk]));
# endif
indexInChunk += 1;
}
totalIndex += 1;
}
}
fclose(fp);
copyStateToGPU(*qureg);
// indicate success
return 1;
}
int statevec_compareStates(Qureg mq1, Qureg mq2, qreal precision){
qreal diff;
int chunkSize = mq1.numAmpsPerChunk;
copyStateFromGPU(mq1);
copyStateFromGPU(mq2);
for (int i=0; i<chunkSize; i++){
diff = mq1.stateVec.real[i] - mq2.stateVec.real[i];
if (diff<0) diff *= -1;
if (diff>precision) return 0;
diff = mq1.stateVec.imag[i] - mq2.stateVec.imag[i];
if (diff<0) diff *= -1;
if (diff>precision) return 0;
}
return 1;
}
__global__ void statevec_compactUnitaryKernel (Qureg qureg, const int rotQubit, Complex alpha, Complex beta){
// ----- sizes
long long int sizeBlock, // size of blocks
sizeHalfBlock; // size of blocks halved
// ----- indices
long long int thisBlock, // current block
indexUp,indexLo; // current index and corresponding index in lower half block
// ----- temp variables
qreal stateRealUp,stateRealLo, // storage for previous state values
stateImagUp,stateImagLo; // (used in updates)
// ----- temp variables
long long int thisTask; // task based approach for expose loop with small granularity
const long long int numTasks=qureg.numAmpsPerChunk>>1;
sizeHalfBlock = 1LL << rotQubit; // size of blocks halved
sizeBlock = 2LL * sizeHalfBlock; // size of blocks
// ---------------------------------------------------------------- //
// rotate //
// ---------------------------------------------------------------- //
//! fix -- no necessary for GPU version
qreal *stateVecReal = qureg.deviceStateVec.real;
qreal *stateVecImag = qureg.deviceStateVec.imag;
qreal alphaImag=alpha.imag, alphaReal=alpha.real;
qreal betaImag=beta.imag, betaReal=beta.real;
thisTask = blockIdx.x*blockDim.x + threadIdx.x;
if (thisTask>=numTasks) return;
thisBlock = thisTask / sizeHalfBlock;
indexUp = thisBlock*sizeBlock + thisTask%sizeHalfBlock;
indexLo = indexUp + sizeHalfBlock;
// store current state vector values in temp variables
stateRealUp = stateVecReal[indexUp];
stateImagUp = stateVecImag[indexUp];
stateRealLo = stateVecReal[indexLo];
stateImagLo = stateVecImag[indexLo];
// state[indexUp] = alpha * state[indexUp] - conj(beta) * state[indexLo]
stateVecReal[indexUp] = alphaReal*stateRealUp - alphaImag*stateImagUp
- betaReal*stateRealLo - betaImag*stateImagLo;
stateVecImag[indexUp] = alphaReal*stateImagUp + alphaImag*stateRealUp
- betaReal*stateImagLo + betaImag*stateRealLo;
// state[indexLo] = beta * state[indexUp] + conj(alpha) * state[indexLo]
stateVecReal[indexLo] = betaReal*stateRealUp - betaImag*stateImagUp
+ alphaReal*stateRealLo + alphaImag*stateImagLo;
stateVecImag[indexLo] = betaReal*stateImagUp + betaImag*stateRealUp
+ alphaReal*stateImagLo - alphaImag*stateRealLo;
}
void statevec_compactUnitary(Qureg qureg, const int targetQubit, Complex alpha, Complex beta)
{
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk>>1)/threadsPerCUDABlock);
statevec_compactUnitaryKernel<<<CUDABlocks, threadsPerCUDABlock>>>(qureg, targetQubit, alpha, beta);
}
__global__ void statevec_controlledCompactUnitaryKernel (Qureg qureg, const int controlQubit, const int targetQubit, Complex alpha, Complex beta){
// ----- sizes
long long int sizeBlock, // size of blocks
sizeHalfBlock; // size of blocks halved
// ----- indices
long long int thisBlock, // current block
indexUp,indexLo; // current index and corresponding index in lower half block
// ----- temp variables
qreal stateRealUp,stateRealLo, // storage for previous state values
stateImagUp,stateImagLo; // (used in updates)
// ----- temp variables
long long int thisTask; // task based approach for expose loop with small granularity
const long long int numTasks=qureg.numAmpsPerChunk>>1;
int controlBit;
sizeHalfBlock = 1LL << targetQubit; // size of blocks halved
sizeBlock = 2LL * sizeHalfBlock; // size of blocks
// ---------------------------------------------------------------- //
// rotate //
// ---------------------------------------------------------------- //
//! fix -- no necessary for GPU version
qreal *stateVecReal = qureg.deviceStateVec.real;
qreal *stateVecImag = qureg.deviceStateVec.imag;
qreal alphaImag=alpha.imag, alphaReal=alpha.real;
qreal betaImag=beta.imag, betaReal=beta.real;
thisTask = blockIdx.x*blockDim.x + threadIdx.x;
if (thisTask>=numTasks) return;
thisBlock = thisTask / sizeHalfBlock;
indexUp = thisBlock*sizeBlock + thisTask%sizeHalfBlock;
indexLo = indexUp + sizeHalfBlock;
controlBit = extractBit(controlQubit, indexUp);
if (controlBit){
// store current state vector values in temp variables
stateRealUp = stateVecReal[indexUp];
stateImagUp = stateVecImag[indexUp];
stateRealLo = stateVecReal[indexLo];
stateImagLo = stateVecImag[indexLo];
// state[indexUp] = alpha * state[indexUp] - conj(beta) * state[indexLo]
stateVecReal[indexUp] = alphaReal*stateRealUp - alphaImag*stateImagUp
- betaReal*stateRealLo - betaImag*stateImagLo;
stateVecImag[indexUp] = alphaReal*stateImagUp + alphaImag*stateRealUp
- betaReal*stateImagLo + betaImag*stateRealLo;
// state[indexLo] = beta * state[indexUp] + conj(alpha) * state[indexLo]
stateVecReal[indexLo] = betaReal*stateRealUp - betaImag*stateImagUp
+ alphaReal*stateRealLo + alphaImag*stateImagLo;
stateVecImag[indexLo] = betaReal*stateImagUp + betaImag*stateRealUp
+ alphaReal*stateImagLo - alphaImag*stateRealLo;
}
}
void statevec_controlledCompactUnitary(Qureg qureg, const int controlQubit, const int targetQubit, Complex alpha, Complex beta)
{
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk>>1)/threadsPerCUDABlock);
statevec_controlledCompactUnitaryKernel<<<CUDABlocks, threadsPerCUDABlock>>>(qureg, controlQubit, targetQubit, alpha, beta);
}
__global__ void statevec_unitaryKernel(Qureg qureg, const int targetQubit, ComplexMatrix2 u){
// ----- sizes
long long int sizeBlock, // size of blocks
sizeHalfBlock; // size of blocks halved
// ----- indices
long long int thisBlock, // current block
indexUp,indexLo; // current index and corresponding index in lower half block
// ----- temp variables
qreal stateRealUp,stateRealLo, // storage for previous state values
stateImagUp,stateImagLo; // (used in updates)
// ----- temp variables
long long int thisTask; // task based approach for expose loop with small granularity
const long long int numTasks=qureg.numAmpsPerChunk>>1;
sizeHalfBlock = 1LL << targetQubit; // size of blocks halved
sizeBlock = 2LL * sizeHalfBlock; // size of blocks
// ---------------------------------------------------------------- //
// rotate //
// ---------------------------------------------------------------- //
//! fix -- no necessary for GPU version
qreal *stateVecReal = qureg.deviceStateVec.real;
qreal *stateVecImag = qureg.deviceStateVec.imag;
thisTask = blockIdx.x*blockDim.x + threadIdx.x;
if (thisTask>=numTasks) return;
thisBlock = thisTask / sizeHalfBlock;
indexUp = thisBlock*sizeBlock + thisTask%sizeHalfBlock;
indexLo = indexUp + sizeHalfBlock;
// store current state vector values in temp variables
stateRealUp = stateVecReal[indexUp];
stateImagUp = stateVecImag[indexUp];
stateRealLo = stateVecReal[indexLo];
stateImagLo = stateVecImag[indexLo];
// state[indexUp] = u00 * state[indexUp] + u01 * state[indexLo]
stateVecReal[indexUp] = u.r0c0.real*stateRealUp - u.r0c0.imag*stateImagUp
+ u.r0c1.real*stateRealLo - u.r0c1.imag*stateImagLo;
stateVecImag[indexUp] = u.r0c0.real*stateImagUp + u.r0c0.imag*stateRealUp
+ u.r0c1.real*stateImagLo + u.r0c1.imag*stateRealLo;
// state[indexLo] = u10 * state[indexUp] + u11 * state[indexLo]
stateVecReal[indexLo] = u.r1c0.real*stateRealUp - u.r1c0.imag*stateImagUp
+ u.r1c1.real*stateRealLo - u.r1c1.imag*stateImagLo;
stateVecImag[indexLo] = u.r1c0.real*stateImagUp + u.r1c0.imag*stateRealUp
+ u.r1c1.real*stateImagLo + u.r1c1.imag*stateRealLo;
}
void statevec_unitary(Qureg qureg, const int targetQubit, ComplexMatrix2 u)
{
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk>>1)/threadsPerCUDABlock);
statevec_unitaryKernel<<<CUDABlocks, threadsPerCUDABlock>>>(qureg, targetQubit, u);
}
__global__ void statevec_controlledUnitaryKernel(Qureg qureg, const int controlQubit, const int targetQubit, ComplexMatrix2 u){
// ----- sizes
long long int sizeBlock, // size of blocks
sizeHalfBlock; // size of blocks halved
// ----- indices
long long int thisBlock, // current block
indexUp,indexLo; // current index and corresponding index in lower half block
// ----- temp variables
qreal stateRealUp,stateRealLo, // storage for previous state values
stateImagUp,stateImagLo; // (used in updates)
// ----- temp variables
long long int thisTask; // task based approach for expose loop with small granularity
const long long int numTasks=qureg.numAmpsPerChunk>>1;
int controlBit;
sizeHalfBlock = 1LL << targetQubit; // size of blocks halved
sizeBlock = 2LL * sizeHalfBlock; // size of blocks
// ---------------------------------------------------------------- //
// rotate //
// ---------------------------------------------------------------- //
//! fix -- no necessary for GPU version
qreal *stateVecReal = qureg.deviceStateVec.real;
qreal *stateVecImag = qureg.deviceStateVec.imag;
thisTask = blockIdx.x*blockDim.x + threadIdx.x;
if (thisTask>=numTasks) return;
thisBlock = thisTask / sizeHalfBlock;
indexUp = thisBlock*sizeBlock + thisTask%sizeHalfBlock;
indexLo = indexUp + sizeHalfBlock;
// store current state vector values in temp variables
stateRealUp = stateVecReal[indexUp];
stateImagUp = stateVecImag[indexUp];
stateRealLo = stateVecReal[indexLo];
stateImagLo = stateVecImag[indexLo];
controlBit = extractBit(controlQubit, indexUp);
if (controlBit){
// state[indexUp] = u00 * state[indexUp] + u01 * state[indexLo]
stateVecReal[indexUp] = u.r0c0.real*stateRealUp - u.r0c0.imag*stateImagUp
+ u.r0c1.real*stateRealLo - u.r0c1.imag*stateImagLo;
stateVecImag[indexUp] = u.r0c0.real*stateImagUp + u.r0c0.imag*stateRealUp
+ u.r0c1.real*stateImagLo + u.r0c1.imag*stateRealLo;
// state[indexLo] = u10 * state[indexUp] + u11 * state[indexLo]
stateVecReal[indexLo] = u.r1c0.real*stateRealUp - u.r1c0.imag*stateImagUp
+ u.r1c1.real*stateRealLo - u.r1c1.imag*stateImagLo;
stateVecImag[indexLo] = u.r1c0.real*stateImagUp + u.r1c0.imag*stateRealUp
+ u.r1c1.real*stateImagLo + u.r1c1.imag*stateRealLo;
}
}
void statevec_controlledUnitary(Qureg qureg, const int controlQubit, const int targetQubit, ComplexMatrix2 u)
{
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk>>1)/threadsPerCUDABlock);
statevec_controlledUnitaryKernel<<<CUDABlocks, threadsPerCUDABlock>>>(qureg, controlQubit, targetQubit, u);
}
__global__ void statevec_multiControlledUnitaryKernel(Qureg qureg, long long int mask, const int targetQubit, ComplexMatrix2 u){
// ----- sizes
long long int sizeBlock, // size of blocks
sizeHalfBlock; // size of blocks halved
// ----- indices
long long int thisBlock, // current block
indexUp,indexLo; // current index and corresponding index in lower half block
// ----- temp variables
qreal stateRealUp,stateRealLo, // storage for previous state values
stateImagUp,stateImagLo; // (used in updates)
// ----- temp variables
long long int thisTask; // task based approach for expose loop with small granularity
const long long int numTasks=qureg.numAmpsPerChunk>>1;
sizeHalfBlock = 1LL << targetQubit; // size of blocks halved
sizeBlock = 2LL * sizeHalfBlock; // size of blocks
// ---------------------------------------------------------------- //
// rotate //
// ---------------------------------------------------------------- //
//! fix -- no necessary for GPU version
qreal *stateVecReal = qureg.deviceStateVec.real;
qreal *stateVecImag = qureg.deviceStateVec.imag;
thisTask = blockIdx.x*blockDim.x + threadIdx.x;
if (thisTask>=numTasks) return;
thisBlock = thisTask / sizeHalfBlock;
indexUp = thisBlock*sizeBlock + thisTask%sizeHalfBlock;
indexLo = indexUp + sizeHalfBlock;
if (mask == (mask & indexUp) ){
// store current state vector values in temp variables
stateRealUp = stateVecReal[indexUp];
stateImagUp = stateVecImag[indexUp];
stateRealLo = stateVecReal[indexLo];
stateImagLo = stateVecImag[indexLo];
// state[indexUp] = u00 * state[indexUp] + u01 * state[indexLo]
stateVecReal[indexUp] = u.r0c0.real*stateRealUp - u.r0c0.imag*stateImagUp
+ u.r0c1.real*stateRealLo - u.r0c1.imag*stateImagLo;
stateVecImag[indexUp] = u.r0c0.real*stateImagUp + u.r0c0.imag*stateRealUp
+ u.r0c1.real*stateImagLo + u.r0c1.imag*stateRealLo;
// state[indexLo] = u10 * state[indexUp] + u11 * state[indexLo]
stateVecReal[indexLo] = u.r1c0.real*stateRealUp - u.r1c0.imag*stateImagUp
+ u.r1c1.real*stateRealLo - u.r1c1.imag*stateImagLo;
stateVecImag[indexLo] = u.r1c0.real*stateImagUp + u.r1c0.imag*stateRealUp
+ u.r1c1.real*stateImagLo + u.r1c1.imag*stateRealLo;
}
}
void statevec_multiControlledUnitary(Qureg qureg, int *controlQubits, int numControlQubits, const int targetQubit, ComplexMatrix2 u)
{
int threadsPerCUDABlock, CUDABlocks;
long long int mask=0;
for (int i=0; i<numControlQubits; i++) mask = mask | (1LL<<controlQubits[i]);
threadsPerCUDABlock = 128;
CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk>>1)/threadsPerCUDABlock);
statevec_multiControlledUnitaryKernel<<<CUDABlocks, threadsPerCUDABlock>>>(qureg, mask, targetQubit, u);
}
__global__ void statevec_pauliXKernel(Qureg qureg, const int targetQubit){
// ----- sizes
long long int sizeBlock, // size of blocks
sizeHalfBlock; // size of blocks halved
// ----- indices
long long int thisBlock, // current block
indexUp,indexLo; // current index and corresponding index in lower half block
// ----- temp variables
qreal stateRealUp, // storage for previous state values
stateImagUp; // (used in updates)
// ----- temp variables
long long int thisTask; // task based approach for expose loop with small granularity
const long long int numTasks=qureg.numAmpsPerChunk>>1;
sizeHalfBlock = 1LL << targetQubit; // size of blocks halved
sizeBlock = 2LL * sizeHalfBlock; // size of blocks
// ---------------------------------------------------------------- //
// rotate //
// ---------------------------------------------------------------- //
//! fix -- no necessary for GPU version
qreal *stateVecReal = qureg.deviceStateVec.real;
qreal *stateVecImag = qureg.deviceStateVec.imag;
thisTask = blockIdx.x*blockDim.x + threadIdx.x;
if (thisTask>=numTasks) return;
thisBlock = thisTask / sizeHalfBlock;
indexUp = thisBlock*sizeBlock + thisTask%sizeHalfBlock;
indexLo = indexUp + sizeHalfBlock;
// store current state vector values in temp variables
stateRealUp = stateVecReal[indexUp];
stateImagUp = stateVecImag[indexUp];
stateVecReal[indexUp] = stateVecReal[indexLo];
stateVecImag[indexUp] = stateVecImag[indexLo];
stateVecReal[indexLo] = stateRealUp;
stateVecImag[indexLo] = stateImagUp;
}
void statevec_pauliX(Qureg qureg, const int targetQubit)
{
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk>>1)/threadsPerCUDABlock);
statevec_pauliXKernel<<<CUDABlocks, threadsPerCUDABlock>>>(qureg, targetQubit);
}
__global__ void statevec_pauliYKernel(Qureg qureg, const int targetQubit, const int conjFac){
long long int sizeHalfBlock = 1LL << targetQubit;
long long int sizeBlock = 2LL * sizeHalfBlock;
long long int numTasks = qureg.numAmpsPerChunk >> 1;
long long int thisTask = blockIdx.x*blockDim.x + threadIdx.x;
if (thisTask>=numTasks) return;
long long int thisBlock = thisTask / sizeHalfBlock;
long long int indexUp = thisBlock*sizeBlock + thisTask%sizeHalfBlock;
long long int indexLo = indexUp + sizeHalfBlock;
qreal stateRealUp, stateImagUp;
qreal *stateVecReal = qureg.deviceStateVec.real;
qreal *stateVecImag = qureg.deviceStateVec.imag;
stateRealUp = stateVecReal[indexUp];
stateImagUp = stateVecImag[indexUp];
// update under +-{{0, -i}, {i, 0}}
stateVecReal[indexUp] = conjFac * stateVecImag[indexLo];
stateVecImag[indexUp] = conjFac * -stateVecReal[indexLo];
stateVecReal[indexLo] = conjFac * -stateImagUp;
stateVecImag[indexLo] = conjFac * stateRealUp;
}
void statevec_pauliY(Qureg qureg, const int targetQubit)
{
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk>>1)/threadsPerCUDABlock);
statevec_pauliYKernel<<<CUDABlocks, threadsPerCUDABlock>>>(qureg, targetQubit, 1);
}
void statevec_pauliYConj(Qureg qureg, const int targetQubit)
{
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk>>1)/threadsPerCUDABlock);
statevec_pauliYKernel<<<CUDABlocks, threadsPerCUDABlock>>>(qureg, targetQubit, -1);
}
__global__ void statevec_controlledPauliYKernel(Qureg qureg, const int controlQubit, const int targetQubit, const int conjFac)
{
long long int index;
long long int sizeBlock, sizeHalfBlock;
long long int stateVecSize;
int controlBit;
qreal stateRealUp, stateImagUp;
long long int thisBlock, indexUp, indexLo;
sizeHalfBlock = 1LL << targetQubit;
sizeBlock = 2LL * sizeHalfBlock;
stateVecSize = qureg.numAmpsPerChunk;
qreal *stateVecReal = qureg.deviceStateVec.real;
qreal *stateVecImag = qureg.deviceStateVec.imag;
index = blockIdx.x*blockDim.x + threadIdx.x;
if (index>=(stateVecSize>>1)) return;
thisBlock = index / sizeHalfBlock;
indexUp = thisBlock*sizeBlock + index%sizeHalfBlock;
indexLo = indexUp + sizeHalfBlock;
controlBit = extractBit(controlQubit, indexUp);
if (controlBit){
stateRealUp = stateVecReal[indexUp];
stateImagUp = stateVecImag[indexUp];
// update under +-{{0, -i}, {i, 0}}
stateVecReal[indexUp] = conjFac * stateVecImag[indexLo];
stateVecImag[indexUp] = conjFac * -stateVecReal[indexLo];
stateVecReal[indexLo] = conjFac * -stateImagUp;
stateVecImag[indexLo] = conjFac * stateRealUp;
}
}
void statevec_controlledPauliY(Qureg qureg, const int controlQubit, const int targetQubit)
{
int conjFactor = 1;
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk)/threadsPerCUDABlock);
statevec_controlledPauliYKernel<<<CUDABlocks, threadsPerCUDABlock>>>(qureg, controlQubit, targetQubit, conjFactor);
}
void statevec_controlledPauliYConj(Qureg qureg, const int controlQubit, const int targetQubit)
{
int conjFactor = -1;
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk)/threadsPerCUDABlock);
statevec_controlledPauliYKernel<<<CUDABlocks, threadsPerCUDABlock>>>(qureg, controlQubit, targetQubit, conjFactor);
}
__global__ void statevec_phaseShiftByTermKernel(Qureg qureg, const int targetQubit, qreal cosAngle, qreal sinAngle) {
long long int sizeBlock, sizeHalfBlock;
long long int thisBlock, indexUp,indexLo;
qreal stateRealLo, stateImagLo;
long long int thisTask;
const long long int numTasks = qureg.numAmpsPerChunk >> 1;
sizeHalfBlock = 1LL << targetQubit;
sizeBlock = 2LL * sizeHalfBlock;
qreal *stateVecReal = qureg.deviceStateVec.real;
qreal *stateVecImag = qureg.deviceStateVec.imag;
thisTask = blockIdx.x*blockDim.x + threadIdx.x;
if (thisTask>=numTasks) return;
thisBlock = thisTask / sizeHalfBlock;
indexUp = thisBlock*sizeBlock + thisTask%sizeHalfBlock;
indexLo = indexUp + sizeHalfBlock;
stateRealLo = stateVecReal[indexLo];
stateImagLo = stateVecImag[indexLo];
stateVecReal[indexLo] = cosAngle*stateRealLo - sinAngle*stateImagLo;
stateVecImag[indexLo] = sinAngle*stateRealLo + cosAngle*stateImagLo;
}
void statevec_phaseShiftByTerm(Qureg qureg, const int targetQubit, Complex term)
{
qreal cosAngle = term.real;
qreal sinAngle = term.imag;
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk>>1)/threadsPerCUDABlock);
statevec_phaseShiftByTermKernel<<<CUDABlocks, threadsPerCUDABlock>>>(qureg, targetQubit, cosAngle, sinAngle);
}
__global__ void statevec_controlledPhaseShiftKernel(Qureg qureg, const int idQubit1, const int idQubit2, qreal cosAngle, qreal sinAngle)
{
long long int index;
long long int stateVecSize;
int bit1, bit2;
qreal stateRealLo, stateImagLo;
stateVecSize = qureg.numAmpsPerChunk;
qreal *stateVecReal = qureg.deviceStateVec.real;
qreal *stateVecImag = qureg.deviceStateVec.imag;
index = blockIdx.x*blockDim.x + threadIdx.x;
if (index>=stateVecSize) return;
bit1 = extractBit (idQubit1, index);
bit2 = extractBit (idQubit2, index);
if (bit1 && bit2) {
stateRealLo = stateVecReal[index];
stateImagLo = stateVecImag[index];
stateVecReal[index] = cosAngle*stateRealLo - sinAngle*stateImagLo;
stateVecImag[index] = sinAngle*stateRealLo + cosAngle*stateImagLo;
}
}
void statevec_controlledPhaseShift(Qureg qureg, const int idQubit1, const int idQubit2, qreal angle)
{
qreal cosAngle = cos(angle);
qreal sinAngle = sin(angle);
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk>>1)/threadsPerCUDABlock);
statevec_controlledPhaseShiftKernel<<<CUDABlocks, threadsPerCUDABlock>>>(qureg, idQubit1, idQubit2, cosAngle, sinAngle);
}
__global__ void statevec_multiControlledPhaseShiftKernel(Qureg qureg, long long int mask, qreal cosAngle, qreal sinAngle) {
qreal stateRealLo, stateImagLo;
long long int index;
long long int stateVecSize;
stateVecSize = qureg.numAmpsPerChunk;
qreal *stateVecReal = qureg.deviceStateVec.real;
qreal *stateVecImag = qureg.deviceStateVec.imag;
index = blockIdx.x*blockDim.x + threadIdx.x;
if (index>=stateVecSize) return;
if (mask == (mask & index) ){
stateRealLo = stateVecReal[index];
stateImagLo = stateVecImag[index];
stateVecReal[index] = cosAngle*stateRealLo - sinAngle*stateImagLo;
stateVecImag[index] = sinAngle*stateRealLo + cosAngle*stateImagLo;
}
}
void statevec_multiControlledPhaseShift(Qureg qureg, int *controlQubits, int numControlQubits, qreal angle)
{
qreal cosAngle = cos(angle);
qreal sinAngle = sin(angle);
long long int mask=0;
for (int i=0; i<numControlQubits; i++)
mask = mask | (1LL<<controlQubits[i]);
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk)/threadsPerCUDABlock);
statevec_multiControlledPhaseShiftKernel<<<CUDABlocks, threadsPerCUDABlock>>>(qureg, mask, cosAngle, sinAngle);
}
qreal densmatr_calcTotalProb(Qureg qureg) {
// computes the trace using Kahan summation
qreal pTotal=0;
qreal y, t, c;
c = 0;
long long int numCols = 1LL << qureg.numQubitsRepresented;
long long diagIndex;
copyStateFromGPU(qureg);
for (int col=0; col< numCols; col++) {
diagIndex = col*(numCols + 1);
y = qureg.stateVec.real[diagIndex] - c;
t = pTotal + y;
c = ( t - pTotal ) - y; // brackets are important
pTotal = t;
}
return pTotal;
}
qreal statevec_calcTotalProb(Qureg qureg){
/* IJB - implemented using Kahan summation for greater accuracy at a slight floating
point operation overhead. For more details see https://en.wikipedia.org/wiki/Kahan_summation_algorithm */
/* Don't change the bracketing in this routine! */
qreal pTotal=0;
qreal y, t, c;
long long int index;
long long int numAmpsPerRank = qureg.numAmpsPerChunk;
copyStateFromGPU(qureg);
c = 0.0;
for (index=0; index<numAmpsPerRank; index++){
/* Perform pTotal+=qureg.stateVec.real[index]*qureg.stateVec.real[index]; by Kahan */
// pTotal+=qureg.stateVec.real[index]*qureg.stateVec.real[index];
y = qureg.stateVec.real[index]*qureg.stateVec.real[index] - c;
t = pTotal + y;
c = ( t - pTotal ) - y;
pTotal = t;
/* Perform pTotal+=qureg.stateVec.imag[index]*qureg.stateVec.imag[index]; by Kahan */
//pTotal+=qureg.stateVec.imag[index]*qureg.stateVec.imag[index];
y = qureg.stateVec.imag[index]*qureg.stateVec.imag[index] - c;
t = pTotal + y;
c = ( t - pTotal ) - y;
pTotal = t;
}
return pTotal;
}
__global__ void statevec_controlledPhaseFlipKernel(Qureg qureg, const int idQubit1, const int idQubit2)
{
long long int index;
long long int stateVecSize;
int bit1, bit2;
stateVecSize = qureg.numAmpsPerChunk;
qreal *stateVecReal = qureg.deviceStateVec.real;
qreal *stateVecImag = qureg.deviceStateVec.imag;
index = blockIdx.x*blockDim.x + threadIdx.x;
if (index>=stateVecSize) return;
bit1 = extractBit (idQubit1, index);
bit2 = extractBit (idQubit2, index);
if (bit1 && bit2) {
stateVecReal [index] = - stateVecReal [index];
stateVecImag [index] = - stateVecImag [index];
}
}
void statevec_controlledPhaseFlip(Qureg qureg, const int idQubit1, const int idQubit2)
{
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk)/threadsPerCUDABlock);
statevec_controlledPhaseFlipKernel<<<CUDABlocks, threadsPerCUDABlock>>>(qureg, idQubit1, idQubit2);
}
__global__ void statevec_multiControlledPhaseFlipKernel(Qureg qureg, long long int mask)
{
long long int index;
long long int stateVecSize;
stateVecSize = qureg.numAmpsPerChunk;
qreal *stateVecReal = qureg.deviceStateVec.real;
qreal *stateVecImag = qureg.deviceStateVec.imag;
index = blockIdx.x*blockDim.x + threadIdx.x;
if (index>=stateVecSize) return;
if (mask == (mask & index) ){
stateVecReal [index] = - stateVecReal [index];
stateVecImag [index] = - stateVecImag [index];
}
}
void statevec_multiControlledPhaseFlip(Qureg qureg, int *controlQubits, int numControlQubits)
{
int threadsPerCUDABlock, CUDABlocks;
long long int mask=0;
for (int i=0; i<numControlQubits; i++) mask = mask | (1LL<<controlQubits[i]);
threadsPerCUDABlock = 128;
CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk)/threadsPerCUDABlock);
statevec_multiControlledPhaseFlipKernel<<<CUDABlocks, threadsPerCUDABlock>>>(qureg, mask);
}
__global__ void statevec_hadamardKernel (Qureg qureg, const int targetQubit){
// ----- sizes
long long int sizeBlock, // size of blocks
sizeHalfBlock; // size of blocks halved
// ----- indices
long long int thisBlock, // current block
indexUp,indexLo; // current index and corresponding index in lower half block
// ----- temp variables
qreal stateRealUp,stateRealLo, // storage for previous state values
stateImagUp,stateImagLo; // (used in updates)
// ----- temp variables
long long int thisTask; // task based approach for expose loop with small granularity
const long long int numTasks=qureg.numAmpsPerChunk>>1;
sizeHalfBlock = 1LL << targetQubit; // size of blocks halved
sizeBlock = 2LL * sizeHalfBlock; // size of blocks
// ---------------------------------------------------------------- //
// rotate //
// ---------------------------------------------------------------- //
//! fix -- no necessary for GPU version
qreal *stateVecReal = qureg.deviceStateVec.real;
qreal *stateVecImag = qureg.deviceStateVec.imag;
qreal recRoot2 = 1.0/sqrt(2.0);
thisTask = blockIdx.x*blockDim.x + threadIdx.x;
if (thisTask>=numTasks) return;
thisBlock = thisTask / sizeHalfBlock;
indexUp = thisBlock*sizeBlock + thisTask%sizeHalfBlock;
indexLo = indexUp + sizeHalfBlock;
// store current state vector values in temp variables
stateRealUp = stateVecReal[indexUp];
stateImagUp = stateVecImag[indexUp];
stateRealLo = stateVecReal[indexLo];
stateImagLo = stateVecImag[indexLo];
stateVecReal[indexUp] = recRoot2*(stateRealUp + stateRealLo);
stateVecImag[indexUp] = recRoot2*(stateImagUp + stateImagLo);
stateVecReal[indexLo] = recRoot2*(stateRealUp - stateRealLo);
stateVecImag[indexLo] = recRoot2*(stateImagUp - stateImagLo);
}
void statevec_hadamard(Qureg qureg, const int targetQubit)
{
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk>>1)/threadsPerCUDABlock);
statevec_hadamardKernel<<<CUDABlocks, threadsPerCUDABlock>>>(qureg, targetQubit);
}
__global__ void statevec_controlledNotKernel(Qureg qureg, const int controlQubit, const int targetQubit)
{
long long int index;
long long int sizeBlock, // size of blocks
sizeHalfBlock; // size of blocks halved
long long int stateVecSize;
int controlBit;
// ----- temp variables
qreal stateRealUp, // storage for previous state values
stateImagUp; // (used in updates)
long long int thisBlock, // current block
indexUp,indexLo; // current index and corresponding index in lower half block
sizeHalfBlock = 1LL << targetQubit; // size of blocks halved
sizeBlock = 2LL * sizeHalfBlock; // size of blocks
stateVecSize = qureg.numAmpsPerChunk;
qreal *stateVecReal = qureg.deviceStateVec.real;
qreal *stateVecImag = qureg.deviceStateVec.imag;
index = blockIdx.x*blockDim.x + threadIdx.x;
if (index>=(stateVecSize>>1)) return;
thisBlock = index / sizeHalfBlock;
indexUp = thisBlock*sizeBlock + index%sizeHalfBlock;
indexLo = indexUp + sizeHalfBlock;
controlBit = extractBit(controlQubit, indexUp);
if (controlBit){
stateRealUp = stateVecReal[indexUp];
stateImagUp = stateVecImag[indexUp];
stateVecReal[indexUp] = stateVecReal[indexLo];
stateVecImag[indexUp] = stateVecImag[indexLo];
stateVecReal[indexLo] = stateRealUp;
stateVecImag[indexLo] = stateImagUp;
}
}
void statevec_controlledNot(Qureg qureg, const int controlQubit, const int targetQubit)
{
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk)/threadsPerCUDABlock);
statevec_controlledNotKernel<<<CUDABlocks, threadsPerCUDABlock>>>(qureg, controlQubit, targetQubit);
}
__device__ __host__ unsigned int log2Int( unsigned int x )
{
unsigned int ans = 0 ;
while( x>>=1 ) ans++;
return ans ;
}
__device__ void reduceBlock(qreal *arrayIn, qreal *reducedArray, int length){
int i, l, r;
int threadMax, maxDepth;
threadMax = length/2;
maxDepth = log2Int(length/2);
for (i=0; i<maxDepth+1; i++){
if (threadIdx.x<threadMax){
l = threadIdx.x;
r = l + threadMax;
arrayIn[l] = arrayIn[r] + arrayIn[l];
}
threadMax = threadMax >> 1;
__syncthreads(); // optimise -- use warp shuffle instead
}
if (threadIdx.x==0) reducedArray[blockIdx.x] = arrayIn[0];
}
__global__ void copySharedReduceBlock(qreal*arrayIn, qreal *reducedArray, int length){
extern __shared__ qreal tempReductionArray[];
int blockOffset = blockIdx.x*length;
tempReductionArray[threadIdx.x*2] = arrayIn[blockOffset + threadIdx.x*2];
tempReductionArray[threadIdx.x*2+1] = arrayIn[blockOffset + threadIdx.x*2+1];
__syncthreads();
reduceBlock(tempReductionArray, reducedArray, length);
}
__global__ void densmatr_findProbabilityOfZeroKernel(
Qureg qureg, const int measureQubit, qreal *reducedArray
) {
// run by each thread
// use of block here refers to contiguous amplitudes where measureQubit = 0,
// (then =1) and NOT the CUDA block, which is the partitioning of CUDA threads
long long int densityDim = 1LL << qureg.numQubitsRepresented;
long long int numTasks = densityDim >> 1;
long long int sizeHalfBlock = 1LL << (measureQubit);
long long int sizeBlock = 2LL * sizeHalfBlock;
long long int thisBlock; // which block this thread is processing
long long int thisTask; // which part of the block this thread is processing
long long int basisIndex; // index of this thread's computational basis state
long long int densityIndex; // " " index of |basis><basis| in the flat density matrix
// array of each thread's collected probability, to be summed
extern __shared__ qreal tempReductionArray[];
// figure out which density matrix prob that this thread is assigned
thisTask = blockIdx.x*blockDim.x + threadIdx.x;
if (thisTask>=numTasks) return;
thisBlock = thisTask / sizeHalfBlock;
basisIndex = thisBlock*sizeBlock + thisTask%sizeHalfBlock;
densityIndex = (densityDim + 1) * basisIndex;
// record the probability in the CUDA-BLOCK-wide array
qreal prob = qureg.deviceStateVec.real[densityIndex]; // im[densityIndex] assumed ~ 0
tempReductionArray[threadIdx.x] = prob;
// sum the probs collected by this CUDA-BLOCK's threads into a per-CUDA-BLOCK array
__syncthreads();
if (threadIdx.x<blockDim.x/2){
reduceBlock(tempReductionArray, reducedArray, blockDim.x);
}
}
__global__ void statevec_findProbabilityOfZeroKernel(
Qureg qureg, const int measureQubit, qreal *reducedArray
) {
// ----- sizes
long long int sizeBlock, // size of blocks
sizeHalfBlock; // size of blocks halved
// ----- indices
long long int thisBlock, // current block
index; // current index for first half block
// ----- temp variables
long long int thisTask; // task based approach for expose loop with small granularity
long long int numTasks=qureg.numAmpsPerChunk>>1;
// (good for shared memory parallelism)
extern __shared__ qreal tempReductionArray[];
// ---------------------------------------------------------------- //
// dimensions //
// ---------------------------------------------------------------- //
sizeHalfBlock = 1LL << (measureQubit); // number of state vector elements to sum,
// and then the number to skip
sizeBlock = 2LL * sizeHalfBlock; // size of blocks (pairs of measure and skip entries)
// ---------------------------------------------------------------- //
// find probability //
// ---------------------------------------------------------------- //
//
// --- task-based shared-memory parallel implementation
//
qreal *stateVecReal = qureg.deviceStateVec.real;
qreal *stateVecImag = qureg.deviceStateVec.imag;
thisTask = blockIdx.x*blockDim.x + threadIdx.x;
if (thisTask>=numTasks) return;
thisBlock = thisTask / sizeHalfBlock;
index = thisBlock*sizeBlock + thisTask%sizeHalfBlock;
qreal realVal, imagVal;
realVal = stateVecReal[index];
imagVal = stateVecImag[index];
tempReductionArray[threadIdx.x] = realVal*realVal + imagVal*imagVal;
__syncthreads();
if (threadIdx.x<blockDim.x/2){
reduceBlock(tempReductionArray, reducedArray, blockDim.x);
}
}
int getNumReductionLevels(long long int numValuesToReduce, int numReducedPerLevel){
int levels=0;
while (numValuesToReduce){
numValuesToReduce = numValuesToReduce/numReducedPerLevel;
levels++;
}
return levels;
}
void swapDouble(qreal **a, qreal **b){
qreal *temp;
temp = *a;
*a = *b;
*b = temp;
}
qreal densmatr_findProbabilityOfZero(Qureg qureg, const int measureQubit)
{
long long int densityDim = 1LL << qureg.numQubitsRepresented;
long long int numValuesToReduce = densityDim >> 1; // half of the diagonal has measureQubit=0
int valuesPerCUDABlock, numCUDABlocks, sharedMemSize;
int maxReducedPerLevel = REDUCE_SHARED_SIZE;
int firstTime = 1;
while (numValuesToReduce > 1) {
// need less than one CUDA-BLOCK to reduce
if (numValuesToReduce < maxReducedPerLevel) {
valuesPerCUDABlock = numValuesToReduce;
numCUDABlocks = 1;
}
// otherwise use only full CUDA-BLOCKS
else {
valuesPerCUDABlock = maxReducedPerLevel; // constrained by shared memory
numCUDABlocks = ceil((qreal)numValuesToReduce/valuesPerCUDABlock);
}
sharedMemSize = valuesPerCUDABlock*sizeof(qreal);
// spawn threads to sum the probs in each block
if (firstTime) {
densmatr_findProbabilityOfZeroKernel<<<numCUDABlocks, valuesPerCUDABlock, sharedMemSize>>>(
qureg, measureQubit, qureg.firstLevelReduction);
firstTime = 0;
// sum the block probs
} else {
cudaDeviceSynchronize();
copySharedReduceBlock<<<numCUDABlocks, valuesPerCUDABlock/2, sharedMemSize>>>(
qureg.firstLevelReduction,
qureg.secondLevelReduction, valuesPerCUDABlock);
cudaDeviceSynchronize();
swapDouble(&(qureg.firstLevelReduction), &(qureg.secondLevelReduction));
}
numValuesToReduce = numValuesToReduce/maxReducedPerLevel;
}
qreal zeroProb;
cudaMemcpy(&zeroProb, qureg.firstLevelReduction, sizeof(qreal), cudaMemcpyDeviceToHost);
return zeroProb;
}
qreal statevec_findProbabilityOfZero(Qureg qureg, const int measureQubit)
{
long long int numValuesToReduce = qureg.numAmpsPerChunk>>1;
int valuesPerCUDABlock, numCUDABlocks, sharedMemSize;
qreal stateProb=0;
int firstTime=1;
int maxReducedPerLevel = REDUCE_SHARED_SIZE;
while(numValuesToReduce>1){
if (numValuesToReduce<maxReducedPerLevel){
// Need less than one CUDA block to reduce values
valuesPerCUDABlock = numValuesToReduce;
numCUDABlocks = 1;
} else {
// Use full CUDA blocks, with block size constrained by shared mem usage
valuesPerCUDABlock = maxReducedPerLevel;
numCUDABlocks = ceil((qreal)numValuesToReduce/valuesPerCUDABlock);
}
sharedMemSize = valuesPerCUDABlock*sizeof(qreal);
if (firstTime){
statevec_findProbabilityOfZeroKernel<<<numCUDABlocks, valuesPerCUDABlock, sharedMemSize>>>(
qureg, measureQubit, qureg.firstLevelReduction);
firstTime=0;
} else {
cudaDeviceSynchronize();
copySharedReduceBlock<<<numCUDABlocks, valuesPerCUDABlock/2, sharedMemSize>>>(
qureg.firstLevelReduction,
qureg.secondLevelReduction, valuesPerCUDABlock);
cudaDeviceSynchronize();
swapDouble(&(qureg.firstLevelReduction), &(qureg.secondLevelReduction));
}
numValuesToReduce = numValuesToReduce/maxReducedPerLevel;
}
cudaMemcpy(&stateProb, qureg.firstLevelReduction, sizeof(qreal), cudaMemcpyDeviceToHost);
return stateProb;
}
qreal statevec_calcProbOfOutcome(Qureg qureg, const int measureQubit, int outcome)
{
qreal outcomeProb = statevec_findProbabilityOfZero(qureg, measureQubit);
if (outcome==1)
outcomeProb = 1.0 - outcomeProb;
return outcomeProb;
}
qreal densmatr_calcProbOfOutcome(Qureg qureg, const int measureQubit, int outcome)
{
qreal outcomeProb = densmatr_findProbabilityOfZero(qureg, measureQubit);
if (outcome==1)
outcomeProb = 1.0 - outcomeProb;
return outcomeProb;
}
/** computes either a real or imag term in the inner product */
__global__ void statevec_calcInnerProductKernel(
int getRealComp,
qreal* vecReal1, qreal* vecImag1, qreal* vecReal2, qreal* vecImag2,
long long int numTermsToSum, qreal* reducedArray)
{
long long int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index >= numTermsToSum) return;
// choose whether to calculate the real or imaginary term of the inner product
qreal innerProdTerm;
if (getRealComp)
innerProdTerm = vecReal1[index]*vecReal2[index] + vecImag1[index]*vecImag2[index];
else
innerProdTerm = vecReal1[index]*vecImag2[index] - vecImag1[index]*vecReal2[index];
// array of each thread's collected probability, to be summed
extern __shared__ qreal tempReductionArray[];
tempReductionArray[threadIdx.x] = innerProdTerm;
__syncthreads();
// every second thread reduces
if (threadIdx.x<blockDim.x/2)
reduceBlock(tempReductionArray, reducedArray, blockDim.x);
}
/** Terrible code which unnecessarily individually computes and sums the real and imaginary components of the
* inner product, so as to not have to worry about keeping the sums separated during reduction.
* Truly disgusting, probably doubles runtime, please fix.
* @TODO could even do the kernel twice, storing real in bra.reduc and imag in ket.reduc?
*/
Complex statevec_calcInnerProduct(Qureg bra, Qureg ket) {
qreal innerProdReal, innerProdImag;
int getRealComp;
long long int numValuesToReduce;
int valuesPerCUDABlock, numCUDABlocks, sharedMemSize;
int maxReducedPerLevel;
int firstTime;
// compute real component of inner product
getRealComp = 1;
numValuesToReduce = bra.numAmpsPerChunk;
maxReducedPerLevel = REDUCE_SHARED_SIZE;
firstTime = 1;
while (numValuesToReduce > 1) {
if (numValuesToReduce < maxReducedPerLevel) {
valuesPerCUDABlock = numValuesToReduce;
numCUDABlocks = 1;
}
else {
valuesPerCUDABlock = maxReducedPerLevel;
numCUDABlocks = ceil((qreal)numValuesToReduce/valuesPerCUDABlock);
}
sharedMemSize = valuesPerCUDABlock*sizeof(qreal);
if (firstTime) {
statevec_calcInnerProductKernel<<<numCUDABlocks, valuesPerCUDABlock, sharedMemSize>>>(
getRealComp,
bra.deviceStateVec.real, bra.deviceStateVec.imag,
ket.deviceStateVec.real, ket.deviceStateVec.imag,
numValuesToReduce,
bra.firstLevelReduction);
firstTime = 0;
} else {
cudaDeviceSynchronize();
copySharedReduceBlock<<<numCUDABlocks, valuesPerCUDABlock/2, sharedMemSize>>>(
bra.firstLevelReduction,
bra.secondLevelReduction, valuesPerCUDABlock);
cudaDeviceSynchronize();
swapDouble(&(bra.firstLevelReduction), &(bra.secondLevelReduction));
}
numValuesToReduce = numValuesToReduce/maxReducedPerLevel;
}
cudaMemcpy(&innerProdReal, bra.firstLevelReduction, sizeof(qreal), cudaMemcpyDeviceToHost);
// compute imag component of inner product
getRealComp = 0;
numValuesToReduce = bra.numAmpsPerChunk;
maxReducedPerLevel = REDUCE_SHARED_SIZE;
firstTime = 1;
while (numValuesToReduce > 1) {
if (numValuesToReduce < maxReducedPerLevel) {
valuesPerCUDABlock = numValuesToReduce;
numCUDABlocks = 1;
}
else {
valuesPerCUDABlock = maxReducedPerLevel;
numCUDABlocks = ceil((qreal)numValuesToReduce/valuesPerCUDABlock);
}
sharedMemSize = valuesPerCUDABlock*sizeof(qreal);
if (firstTime) {
statevec_calcInnerProductKernel<<<numCUDABlocks, valuesPerCUDABlock, sharedMemSize>>>(
getRealComp,
bra.deviceStateVec.real, bra.deviceStateVec.imag,
ket.deviceStateVec.real, ket.deviceStateVec.imag,
numValuesToReduce,
bra.firstLevelReduction);
firstTime = 0;
} else {
cudaDeviceSynchronize();
copySharedReduceBlock<<<numCUDABlocks, valuesPerCUDABlock/2, sharedMemSize>>>(
bra.firstLevelReduction,
bra.secondLevelReduction, valuesPerCUDABlock);
cudaDeviceSynchronize();
swapDouble(&(bra.firstLevelReduction), &(bra.secondLevelReduction));
}
numValuesToReduce = numValuesToReduce/maxReducedPerLevel;
}
cudaMemcpy(&innerProdImag, bra.firstLevelReduction, sizeof(qreal), cudaMemcpyDeviceToHost);
// return complex
Complex innerProd;
innerProd.real = innerProdReal;
innerProd.imag = innerProdImag;
return innerProd;
}
/** computes one term of (vec^*T) dens * vec */
__global__ void densmatr_calcFidelityKernel(Qureg dens, Qureg vec, long long int dim, qreal* reducedArray) {
// figure out which density matrix row to consider
long long int col;
long long int row = blockIdx.x*blockDim.x + threadIdx.x;
if (row >= dim) return;
qreal* densReal = dens.deviceStateVec.real;
qreal* densImag = dens.deviceStateVec.imag;
qreal* vecReal = vec.deviceStateVec.real;
qreal* vecImag = vec.deviceStateVec.imag;
// compute the row-th element of the product dens*vec
qreal prodReal = 0;
qreal prodImag = 0;
for (col=0LL; col < dim; col++) {
qreal densElemReal = densReal[dim*col + row];
qreal densElemImag = densImag[dim*col + row];
prodReal += densElemReal*vecReal[col] - densElemImag*vecImag[col];
prodImag += densElemReal*vecImag[col] + densElemImag*vecReal[col];
}
// multiply with row-th elem of (vec^*)
qreal termReal = prodImag*vecImag[row] + prodReal*vecReal[row];
// imag of every term should be zero, because each is a valid fidelity calc of an eigenstate
//qreal termImag = prodImag*vecReal[row] - prodReal*vecImag[row];
extern __shared__ qreal tempReductionArray[];
tempReductionArray[threadIdx.x] = termReal;
__syncthreads();
// every second thread reduces
if (threadIdx.x<blockDim.x/2)
reduceBlock(tempReductionArray, reducedArray, blockDim.x);
}
// @TODO implement
qreal densmatr_calcFidelity(Qureg qureg, Qureg pureState) {
// we're summing the square of every term in the density matrix
long long int densityDim = 1LL << qureg.numQubitsRepresented;
long long int numValuesToReduce = densityDim;
int valuesPerCUDABlock, numCUDABlocks, sharedMemSize;
int maxReducedPerLevel = REDUCE_SHARED_SIZE;
int firstTime = 1;
while (numValuesToReduce > 1) {
// need less than one CUDA-BLOCK to reduce
if (numValuesToReduce < maxReducedPerLevel) {
valuesPerCUDABlock = numValuesToReduce;
numCUDABlocks = 1;
}
// otherwise use only full CUDA-BLOCKS
else {
valuesPerCUDABlock = maxReducedPerLevel; // constrained by shared memory
numCUDABlocks = ceil((qreal)numValuesToReduce/valuesPerCUDABlock);
}
// dictates size of reduction array
sharedMemSize = valuesPerCUDABlock*sizeof(qreal);
// spawn threads to sum the probs in each block
// store the reduction in the pureState array
if (firstTime) {
densmatr_calcFidelityKernel<<<numCUDABlocks, valuesPerCUDABlock, sharedMemSize>>>(
qureg, pureState, densityDim, pureState.firstLevelReduction);
firstTime = 0;
// sum the block probs
} else {
cudaDeviceSynchronize();
copySharedReduceBlock<<<numCUDABlocks, valuesPerCUDABlock/2, sharedMemSize>>>(
pureState.firstLevelReduction,
pureState.secondLevelReduction, valuesPerCUDABlock);
cudaDeviceSynchronize();
swapDouble(&(pureState.firstLevelReduction), &(pureState.secondLevelReduction));
}
numValuesToReduce = numValuesToReduce/maxReducedPerLevel;
}
qreal fidelity;
cudaMemcpy(&fidelity, pureState.firstLevelReduction, sizeof(qreal), cudaMemcpyDeviceToHost);
return fidelity;
}
__global__ void densmatr_calcPurityKernel(qreal* vecReal, qreal* vecImag, long long int numAmpsToSum, qreal *reducedArray) {
// figure out which density matrix term this thread is assigned
long long int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index >= numAmpsToSum) return;
qreal term = vecReal[index]*vecReal[index] + vecImag[index]*vecImag[index];
// array of each thread's collected probability, to be summed
extern __shared__ qreal tempReductionArray[];
tempReductionArray[threadIdx.x] = term;
__syncthreads();
// every second thread reduces
if (threadIdx.x<blockDim.x/2)
reduceBlock(tempReductionArray, reducedArray, blockDim.x);
}
/** Computes the trace of the density matrix squared */
qreal densmatr_calcPurity(Qureg qureg) {
// we're summing the square of every term in the density matrix
long long int numValuesToReduce = qureg.numAmpsPerChunk;
int valuesPerCUDABlock, numCUDABlocks, sharedMemSize;
int maxReducedPerLevel = REDUCE_SHARED_SIZE;
int firstTime = 1;
while (numValuesToReduce > 1) {
// need less than one CUDA-BLOCK to reduce
if (numValuesToReduce < maxReducedPerLevel) {
valuesPerCUDABlock = numValuesToReduce;
numCUDABlocks = 1;
}
// otherwise use only full CUDA-BLOCKS
else {
valuesPerCUDABlock = maxReducedPerLevel; // constrained by shared memory
numCUDABlocks = ceil((qreal)numValuesToReduce/valuesPerCUDABlock);
}
// dictates size of reduction array
sharedMemSize = valuesPerCUDABlock*sizeof(qreal);
// spawn threads to sum the probs in each block
if (firstTime) {
densmatr_calcPurityKernel<<<numCUDABlocks, valuesPerCUDABlock, sharedMemSize>>>(
qureg.deviceStateVec.real, qureg.deviceStateVec.imag,
numValuesToReduce, qureg.firstLevelReduction);
firstTime = 0;
// sum the block probs
} else {
cudaDeviceSynchronize();
copySharedReduceBlock<<<numCUDABlocks, valuesPerCUDABlock/2, sharedMemSize>>>(
qureg.firstLevelReduction,
qureg.secondLevelReduction, valuesPerCUDABlock);
cudaDeviceSynchronize();
swapDouble(&(qureg.firstLevelReduction), &(qureg.secondLevelReduction));
}
numValuesToReduce = numValuesToReduce/maxReducedPerLevel;
}
qreal traceDensSquared;
cudaMemcpy(&traceDensSquared, qureg.firstLevelReduction, sizeof(qreal), cudaMemcpyDeviceToHost);
return traceDensSquared;
}
__global__ void statevec_collapseToKnownProbOutcomeKernel(Qureg qureg, int measureQubit, int outcome, qreal totalProbability)
{
// ----- sizes
long long int sizeBlock, // size of blocks
sizeHalfBlock; // size of blocks halved
// ----- indices
long long int thisBlock, // current block
index; // current index for first half block
// ----- measured probability
qreal renorm; // probability (returned) value
// ----- temp variables
long long int thisTask; // task based approach for expose loop with small granularity
// (good for shared memory parallelism)
long long int numTasks=qureg.numAmpsPerChunk>>1;
// ---------------------------------------------------------------- //
// dimensions //
// ---------------------------------------------------------------- //
sizeHalfBlock = 1LL << (measureQubit); // number of state vector elements to sum,
// and then the number to skip
sizeBlock = 2LL * sizeHalfBlock; // size of blocks (pairs of measure and skip entries)
// ---------------------------------------------------------------- //
// find probability //
// ---------------------------------------------------------------- //
//
// --- task-based shared-memory parallel implementation
//
renorm=1/sqrt(totalProbability);
qreal *stateVecReal = qureg.deviceStateVec.real;
qreal *stateVecImag = qureg.deviceStateVec.imag;
thisTask = blockIdx.x*blockDim.x + threadIdx.x;
if (thisTask>=numTasks) return;
thisBlock = thisTask / sizeHalfBlock;
index = thisBlock*sizeBlock + thisTask%sizeHalfBlock;
if (outcome==0){
stateVecReal[index]=stateVecReal[index]*renorm;
stateVecImag[index]=stateVecImag[index]*renorm;
stateVecReal[index+sizeHalfBlock]=0;
stateVecImag[index+sizeHalfBlock]=0;
} else if (outcome==1){
stateVecReal[index]=0;
stateVecImag[index]=0;
stateVecReal[index+sizeHalfBlock]=stateVecReal[index+sizeHalfBlock]*renorm;
stateVecImag[index+sizeHalfBlock]=stateVecImag[index+sizeHalfBlock]*renorm;
}
}
/*
* outcomeProb must accurately be the probability of that qubit outcome in the state-vector, or
* else the state-vector will lose normalisation
*/
void statevec_collapseToKnownProbOutcome(Qureg qureg, const int measureQubit, int outcome, qreal outcomeProb)
{
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk>>1)/threadsPerCUDABlock);
statevec_collapseToKnownProbOutcomeKernel<<<CUDABlocks, threadsPerCUDABlock>>>(qureg, measureQubit, outcome, outcomeProb);
}
/** Maps thread ID to a |..0..><..0..| state and then locates |0><1|, |1><0| and |1><1| */
__global__ void densmatr_collapseToKnownProbOutcomeKernel(
qreal outcomeProb, qreal* vecReal, qreal *vecImag, long long int numBasesToVisit,
long long int part1, long long int part2, long long int part3,
long long int rowBit, long long int colBit, long long int desired, long long int undesired)
{
long long int scanInd = blockIdx.x*blockDim.x + threadIdx.x;
if (scanInd >= numBasesToVisit) return;
long long int base = (scanInd&part1) + ((scanInd&part2)<<1) + ((scanInd&part3)<<2);
// renormalise desired outcome
vecReal[base + desired] /= outcomeProb;
vecImag[base + desired] /= outcomeProb;
// kill undesired outcome
vecReal[base + undesired] = 0;
vecImag[base + undesired] = 0;
// kill |..0..><..1..| states
vecReal[base + colBit] = 0;
vecImag[base + colBit] = 0;
vecReal[base + rowBit] = 0;
vecImag[base + rowBit] = 0;
}
/** This involves finding |...i...><...j...| states and killing those where i!=j */
void densmatr_collapseToKnownProbOutcome(Qureg qureg, const int measureQubit, int outcome, qreal outcomeProb) {
int rowQubit = measureQubit + qureg.numQubitsRepresented;
int colBit = 1LL << measureQubit;
int rowBit = 1LL << rowQubit;
long long int numBasesToVisit = qureg.numAmpsPerChunk/4;
long long int part1 = colBit -1;
long long int part2 = (rowBit >> 1) - colBit;
long long int part3 = numBasesToVisit - (rowBit >> 1);
long long int desired, undesired;
if (outcome == 0) {
desired = 0;
undesired = colBit | rowBit;
} else {
desired = colBit | rowBit;
undesired = 0;
}
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil(numBasesToVisit / (qreal) threadsPerCUDABlock);
densmatr_collapseToKnownProbOutcomeKernel<<<CUDABlocks, threadsPerCUDABlock>>>(
outcomeProb, qureg.deviceStateVec.real, qureg.deviceStateVec.imag, numBasesToVisit,
part1, part2, part3, rowBit, colBit, desired, undesired);
}
__global__ void densmatr_addDensityMatrixKernel(Qureg combineQureg, qreal otherProb, Qureg otherQureg, long long int numAmpsToVisit) {
long long int ampInd = blockIdx.x*blockDim.x + threadIdx.x;
if (ampInd >= numAmpsToVisit) return;
combineQureg.deviceStateVec.real[ampInd] *= 1-otherProb;
combineQureg.deviceStateVec.imag[ampInd] *= 1-otherProb;
combineQureg.deviceStateVec.real[ampInd] += otherProb*otherQureg.deviceStateVec.real[ampInd];
combineQureg.deviceStateVec.imag[ampInd] += otherProb*otherQureg.deviceStateVec.imag[ampInd];
}
void densmatr_addDensityMatrix(Qureg combineQureg, qreal otherProb, Qureg otherQureg) {
long long int numAmpsToVisit = combineQureg.numAmpsPerChunk;
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil(numAmpsToVisit / (qreal) threadsPerCUDABlock);
densmatr_addDensityMatrixKernel<<<CUDABlocks, threadsPerCUDABlock>>>(
combineQureg, otherProb, otherQureg, numAmpsToVisit
);
}
/** Called once for every 4 amplitudes in density matrix
* Works by establishing the |..0..><..0..| state (for its given index) then
* visiting |..1..><..0..| and |..0..><..1..|. Labels |part1 X pa><rt2 NOT(X) part3|
* From the brain of Simon Benjamin
*/
__global__ void densmatr_oneQubitDephaseKernel(
qreal fac, qreal* vecReal, qreal *vecImag, long long int numAmpsToVisit,
long long int part1, long long int part2, long long int part3,
long long int colBit, long long int rowBit)
{
long long int scanInd = blockIdx.x*blockDim.x + threadIdx.x;
if (scanInd >= numAmpsToVisit) return;
long long int ampInd = (scanInd&part1) + ((scanInd&part2)<<1) + ((scanInd&part3)<<2);
vecReal[ampInd + colBit] *= fac;
vecImag[ampInd + colBit] *= fac;
vecReal[ampInd + rowBit] *= fac;
vecImag[ampInd + rowBit] *= fac;
}
void densmatr_oneQubitDephase(Qureg qureg, const int targetQubit, qreal dephase) {
if (dephase == 0)
return;
long long int numAmpsToVisit = qureg.numAmpsPerChunk/4;
int rowQubit = targetQubit + qureg.numQubitsRepresented;
long long int colBit = 1LL << targetQubit;
long long int rowBit = 1LL << rowQubit;
long long int part1 = colBit - 1;
long long int part2 = (rowBit >> 1) - colBit;
long long int part3 = numAmpsToVisit - (rowBit >> 1);
qreal dephFac = 1 - dephase;
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil(numAmpsToVisit / (qreal) threadsPerCUDABlock);
densmatr_oneQubitDephaseKernel<<<CUDABlocks, threadsPerCUDABlock>>>(
dephFac, qureg.deviceStateVec.real, qureg.deviceStateVec.imag, numAmpsToVisit,
part1, part2, part3, colBit, rowBit);
}
/** Called 12 times for every 16 amplitudes in density matrix
* Each sums from the |..0..0..><..0..0..| index to visit either
* |..0..0..><..0..1..|, |..0..0..><..1..0..|, |..0..0..><..1..1..|, |..0..1..><..0..0..|
* etc and so on to |..1..1..><..1..0|. Labels |part1 0 part2 0 par><t3 0 part4 0 part5|.
* From the brain of Simon Benjamin
*/
__global__ void densmatr_twoQubitDephaseKernel(
qreal fac, qreal* vecReal, qreal *vecImag, long long int numBackgroundStates, long long int numAmpsToVisit,
long long int part1, long long int part2, long long int part3, long long int part4, long long int part5,
long long int colBit1, long long int rowBit1, long long int colBit2, long long int rowBit2)
{
long long int outerInd = blockIdx.x*blockDim.x + threadIdx.x;
if (outerInd >= numAmpsToVisit) return;
// sets meta in 1...14 excluding 5, 10, creating bit string DCBA for |..D..C..><..B..A|
int meta = 1 + (outerInd/numBackgroundStates);
if (meta > 4) meta++;
if (meta > 9) meta++;
long long int shift = rowBit2*((meta>>3)%2) + rowBit1*((meta>>2)%2) + colBit2*((meta>>1)%2) + colBit1*(meta%2);
long long int scanInd = outerInd % numBackgroundStates;
long long int stateInd = (
shift +
(scanInd&part1) + ((scanInd&part2)<<1) + ((scanInd&part3)<<2) + ((scanInd&part4)<<3) + ((scanInd&part5)<<4));
vecReal[stateInd] *= fac;
vecImag[stateInd] *= fac;
}
// @TODO is separating these 12 amplitudes really faster than letting every 16th base modify 12 elems?
void densmatr_twoQubitDephase(Qureg qureg, int qubit1, int qubit2, qreal dephase) {
if (dephase == 0)
return;
// assumes qubit2 > qubit1
int rowQubit1 = qubit1 + qureg.numQubitsRepresented;
int rowQubit2 = qubit2 + qureg.numQubitsRepresented;
long long int colBit1 = 1LL << qubit1;
long long int rowBit1 = 1LL << rowQubit1;
long long int colBit2 = 1LL << qubit2;
long long int rowBit2 = 1LL << rowQubit2;
long long int part1 = colBit1 - 1;
long long int part2 = (colBit2 >> 1) - colBit1;
long long int part3 = (rowBit1 >> 2) - (colBit2 >> 1);
long long int part4 = (rowBit2 >> 3) - (rowBit1 >> 2);
long long int part5 = (qureg.numAmpsPerChunk/16) - (rowBit2 >> 3);
qreal dephFac = 1 - dephase;
// refers to states |a 0 b 0 c><d 0 e 0 f| (target qubits are fixed)
long long int numBackgroundStates = qureg.numAmpsPerChunk/16;
// 12 of these states experience dephasing
long long int numAmpsToVisit = 12 * numBackgroundStates;
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil(numAmpsToVisit / (qreal) threadsPerCUDABlock);
densmatr_twoQubitDephaseKernel<<<CUDABlocks, threadsPerCUDABlock>>>(
dephFac, qureg.deviceStateVec.real, qureg.deviceStateVec.imag, numBackgroundStates, numAmpsToVisit,
part1, part2, part3, part4, part5, colBit1, rowBit1, colBit2, rowBit2);
}
/** Works like oneQubitDephase but modifies every other element, and elements are averaged in pairs */
__global__ void densmatr_oneQubitDepolariseKernel(
qreal depolLevel, qreal* vecReal, qreal *vecImag, long long int numAmpsToVisit,
long long int part1, long long int part2, long long int part3,
long long int bothBits)
{
long long int scanInd = blockIdx.x*blockDim.x + threadIdx.x;
if (scanInd >= numAmpsToVisit) return;
long long int baseInd = (scanInd&part1) + ((scanInd&part2)<<1) + ((scanInd&part3)<<2);
long long int targetInd = baseInd + bothBits;
qreal realAvDepol = depolLevel * 0.5 * (vecReal[baseInd] + vecReal[targetInd]);
qreal imagAvDepol = depolLevel * 0.5 * (vecImag[baseInd] + vecImag[targetInd]);
vecReal[baseInd] *= 1 - depolLevel;
vecImag[baseInd] *= 1 - depolLevel;
vecReal[targetInd] *= 1 - depolLevel;
vecImag[targetInd] *= 1 - depolLevel;
vecReal[baseInd] += realAvDepol;
vecImag[baseInd] += imagAvDepol;
vecReal[targetInd] += realAvDepol;
vecImag[targetInd] += imagAvDepol;
}
void densmatr_oneQubitDepolarise(Qureg qureg, const int targetQubit, qreal depolLevel) {
if (depolLevel == 0)
return;
densmatr_oneQubitDephase(qureg, targetQubit, depolLevel);
long long int numAmpsToVisit = qureg.numAmpsPerChunk/4;
int rowQubit = targetQubit + qureg.numQubitsRepresented;
long long int colBit = 1LL << targetQubit;
long long int rowBit = 1LL << rowQubit;
long long int bothBits = colBit | rowBit;
long long int part1 = colBit - 1;
long long int part2 = (rowBit >> 1) - colBit;
long long int part3 = numAmpsToVisit - (rowBit >> 1);
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil(numAmpsToVisit / (qreal) threadsPerCUDABlock);
densmatr_oneQubitDepolariseKernel<<<CUDABlocks, threadsPerCUDABlock>>>(
depolLevel, qureg.deviceStateVec.real, qureg.deviceStateVec.imag, numAmpsToVisit,
part1, part2, part3, bothBits);
}
/** Called once for every 16 amplitudes */
__global__ void densmatr_twoQubitDepolariseKernel(
qreal depolLevel, qreal* vecReal, qreal *vecImag, long long int numAmpsToVisit,
long long int part1, long long int part2, long long int part3,
long long int part4, long long int part5,
long long int rowCol1, long long int rowCol2)
{
long long int scanInd = blockIdx.x*blockDim.x + threadIdx.x;
if (scanInd >= numAmpsToVisit) return;
// index of |..0..0..><..0..0|
long long int ind00 = (scanInd&part1) + ((scanInd&part2)<<1) + ((scanInd&part3)<<2) + ((scanInd&part4)<<3) + ((scanInd&part5)<<4);
long long int ind01 = ind00 + rowCol1;
long long int ind10 = ind00 + rowCol2;
long long int ind11 = ind00 + rowCol1 + rowCol2;
qreal realAvDepol = depolLevel * 0.25 * (
vecReal[ind00] + vecReal[ind01] + vecReal[ind10] + vecReal[ind11]);
qreal imagAvDepol = depolLevel * 0.25 * (
vecImag[ind00] + vecImag[ind01] + vecImag[ind10] + vecImag[ind11]);
qreal retain = 1 - depolLevel;
vecReal[ind00] *= retain; vecImag[ind00] *= retain;
vecReal[ind01] *= retain; vecImag[ind01] *= retain;
vecReal[ind10] *= retain; vecImag[ind10] *= retain;
vecReal[ind11] *= retain; vecImag[ind11] *= retain;
vecReal[ind00] += realAvDepol; vecImag[ind00] += imagAvDepol;
vecReal[ind01] += realAvDepol; vecImag[ind01] += imagAvDepol;
vecReal[ind10] += realAvDepol; vecImag[ind10] += imagAvDepol;
vecReal[ind11] += realAvDepol; vecImag[ind11] += imagAvDepol;
}
void densmatr_twoQubitDepolarise(Qureg qureg, int qubit1, int qubit2, qreal depolLevel) {
if (depolLevel == 0)
return;
// assumes qubit2 > qubit1
densmatr_twoQubitDephase(qureg, qubit1, qubit2, depolLevel);
int rowQubit1 = qubit1 + qureg.numQubitsRepresented;
int rowQubit2 = qubit2 + qureg.numQubitsRepresented;
long long int colBit1 = 1LL << qubit1;
long long int rowBit1 = 1LL << rowQubit1;
long long int colBit2 = 1LL << qubit2;
long long int rowBit2 = 1LL << rowQubit2;
long long int rowCol1 = colBit1 | rowBit1;
long long int rowCol2 = colBit2 | rowBit2;
long long int numAmpsToVisit = qureg.numAmpsPerChunk/16;
long long int part1 = colBit1 - 1;
long long int part2 = (colBit2 >> 1) - colBit1;
long long int part3 = (rowBit1 >> 2) - (colBit2 >> 1);
long long int part4 = (rowBit2 >> 3) - (rowBit1 >> 2);
long long int part5 = numAmpsToVisit - (rowBit2 >> 3);
int threadsPerCUDABlock, CUDABlocks;
threadsPerCUDABlock = 128;
CUDABlocks = ceil(numAmpsToVisit / (qreal) threadsPerCUDABlock);
densmatr_twoQubitDepolariseKernel<<<CUDABlocks, threadsPerCUDABlock>>>(
depolLevel, qureg.deviceStateVec.real, qureg.deviceStateVec.imag, numAmpsToVisit,
part1, part2, part3, part4, part5, rowCol1, rowCol2);
}
void seedQuESTDefault(){
// init MT random number generator with three keys -- time, pid and a hash of hostname
// for the MPI version, it is ok that all procs will get the same seed as random numbers will only be
// used by the master process
unsigned long int key[3];
getQuESTDefaultSeedKey(key);
init_by_array(key, 3);
}
#ifdef __cplusplus
}
#endif
|
da991fd75ef9019c27e1a79c8a9b538ab039d346.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date January 2019
@precisions normal z -> s d c
@author Theo Mary
*/
#include "magma_internal.h"
#define NB 64
// each thread block does one NB x n block row of A.
// each thread does one row, starting from left edge and moving right.
__global__ void
zlascl2_full(int m, int n, const double* D, magmaDoubleComplex* A, int lda)
{
int ind = blockIdx.x * NB + threadIdx.x;
double mul = D[ind];
A += ind;
if (ind < m) {
for (int j=0; j < n; j++ )
A[j*lda] *= mul;
}
}
// each thread block does one NB x n block row of A.
// each thread does one row, starting from left edge and moving right to diagonal.
__global__ void
zlascl2_lower(int m, int n, const double* D, magmaDoubleComplex* A, int lda)
{
int ind = blockIdx.x * NB + threadIdx.x;
int break_d = (ind < n) ? ind : n-1;
double mul = D[ind];
A += ind;
if (ind < m) {
for (int j=0; j <= break_d; j++ )
A[j*lda] *= mul;
}
}
// each thread block does one NB x n block row of A.
// each thread does one row, starting from right edge and moving left to diagonal.
__global__ void
zlascl2_upper(int m, int n, const double *D, magmaDoubleComplex* A, int lda)
{
int ind = blockIdx.x * NB + threadIdx.x;
double mul = D[ind];
A += ind;
if (ind < m) {
for (int j=n-1; j >= ind; j--)
A[j*lda] *= mul;
}
}
/***************************************************************************//**
Purpose
-------
ZLASCL2 scales the M by N complex matrix A by the real diagonal matrix dD.
TYPE specifies that A may be full, upper triangular, lower triangular.
Arguments
---------
@param[in]
type magma_type_t
TYPE indices the storage type of the input matrix A.
= MagmaFull: full matrix.
= MagmaLower: lower triangular matrix.
= MagmaUpper: upper triangular matrix.
Other formats that LAPACK supports, MAGMA does not currently support.
@param[in]
m INTEGER
The number of rows of the matrix A. M >= 0.
@param[in]
n INTEGER
The number of columns of the matrix A. N >= 0.
@param[in]
dD DOUBLE PRECISION vector, dimension (M)
The diagonal matrix containing the scalar factors. Stored as a vector.
@param[in,out]
dA COMPLEX*16 array, dimension (LDDA,N)
The matrix to be scaled by dD. See TYPE for the
storage type.
@param[in]
ldda INTEGER
The leading dimension of the array A. LDDA >= max(1,M).
@param[out]
info INTEGER
- = 0: successful exit
- < 0: if INFO = -i, the i-th argument had an illegal value.
@param[in]
queue magma_queue_t
Queue to execute in.
@see magma_zlascl_diag
@ingroup magma_lascl_diag
*******************************************************************************/
extern "C" void
magmablas_zlascl2(
magma_type_t type, magma_int_t m, magma_int_t n,
magmaDouble_const_ptr dD,
magmaDoubleComplex_ptr dA, magma_int_t ldda,
magma_queue_t queue,
magma_int_t *info )
{
*info = 0;
if ( type != MagmaLower && type != MagmaUpper && type != MagmaFull )
*info = -1;
else if ( m < 0 )
*info = -2;
else if ( n < 0 )
*info = -3;
else if ( ldda < max(1,m) )
*info = -5;
if (*info != 0) {
magma_xerbla( __func__, -(*info) );
return; //info;
}
dim3 grid( magma_ceildiv( m, NB ) );
dim3 threads( NB );
if (type == MagmaLower) {
hipLaunchKernelGGL(( zlascl2_lower) , dim3(grid), dim3(threads), 0, queue->cuda_stream() , m, n, dD, dA, ldda);
}
else if (type == MagmaUpper) {
hipLaunchKernelGGL(( zlascl2_upper) , dim3(grid), dim3(threads), 0, queue->cuda_stream() , m, n, dD, dA, ldda);
}
else if (type == MagmaFull) {
hipLaunchKernelGGL(( zlascl2_full) , dim3(grid), dim3(threads), 0, queue->cuda_stream() , m, n, dD, dA, ldda);
}
}
| da991fd75ef9019c27e1a79c8a9b538ab039d346.cu | /*
-- MAGMA (version 2.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date January 2019
@precisions normal z -> s d c
@author Theo Mary
*/
#include "magma_internal.h"
#define NB 64
// each thread block does one NB x n block row of A.
// each thread does one row, starting from left edge and moving right.
__global__ void
zlascl2_full(int m, int n, const double* D, magmaDoubleComplex* A, int lda)
{
int ind = blockIdx.x * NB + threadIdx.x;
double mul = D[ind];
A += ind;
if (ind < m) {
for (int j=0; j < n; j++ )
A[j*lda] *= mul;
}
}
// each thread block does one NB x n block row of A.
// each thread does one row, starting from left edge and moving right to diagonal.
__global__ void
zlascl2_lower(int m, int n, const double* D, magmaDoubleComplex* A, int lda)
{
int ind = blockIdx.x * NB + threadIdx.x;
int break_d = (ind < n) ? ind : n-1;
double mul = D[ind];
A += ind;
if (ind < m) {
for (int j=0; j <= break_d; j++ )
A[j*lda] *= mul;
}
}
// each thread block does one NB x n block row of A.
// each thread does one row, starting from right edge and moving left to diagonal.
__global__ void
zlascl2_upper(int m, int n, const double *D, magmaDoubleComplex* A, int lda)
{
int ind = blockIdx.x * NB + threadIdx.x;
double mul = D[ind];
A += ind;
if (ind < m) {
for (int j=n-1; j >= ind; j--)
A[j*lda] *= mul;
}
}
/***************************************************************************//**
Purpose
-------
ZLASCL2 scales the M by N complex matrix A by the real diagonal matrix dD.
TYPE specifies that A may be full, upper triangular, lower triangular.
Arguments
---------
@param[in]
type magma_type_t
TYPE indices the storage type of the input matrix A.
= MagmaFull: full matrix.
= MagmaLower: lower triangular matrix.
= MagmaUpper: upper triangular matrix.
Other formats that LAPACK supports, MAGMA does not currently support.
@param[in]
m INTEGER
The number of rows of the matrix A. M >= 0.
@param[in]
n INTEGER
The number of columns of the matrix A. N >= 0.
@param[in]
dD DOUBLE PRECISION vector, dimension (M)
The diagonal matrix containing the scalar factors. Stored as a vector.
@param[in,out]
dA COMPLEX*16 array, dimension (LDDA,N)
The matrix to be scaled by dD. See TYPE for the
storage type.
@param[in]
ldda INTEGER
The leading dimension of the array A. LDDA >= max(1,M).
@param[out]
info INTEGER
- = 0: successful exit
- < 0: if INFO = -i, the i-th argument had an illegal value.
@param[in]
queue magma_queue_t
Queue to execute in.
@see magma_zlascl_diag
@ingroup magma_lascl_diag
*******************************************************************************/
extern "C" void
magmablas_zlascl2(
magma_type_t type, magma_int_t m, magma_int_t n,
magmaDouble_const_ptr dD,
magmaDoubleComplex_ptr dA, magma_int_t ldda,
magma_queue_t queue,
magma_int_t *info )
{
*info = 0;
if ( type != MagmaLower && type != MagmaUpper && type != MagmaFull )
*info = -1;
else if ( m < 0 )
*info = -2;
else if ( n < 0 )
*info = -3;
else if ( ldda < max(1,m) )
*info = -5;
if (*info != 0) {
magma_xerbla( __func__, -(*info) );
return; //info;
}
dim3 grid( magma_ceildiv( m, NB ) );
dim3 threads( NB );
if (type == MagmaLower) {
zlascl2_lower <<< grid, threads, 0, queue->cuda_stream() >>> (m, n, dD, dA, ldda);
}
else if (type == MagmaUpper) {
zlascl2_upper <<< grid, threads, 0, queue->cuda_stream() >>> (m, n, dD, dA, ldda);
}
else if (type == MagmaFull) {
zlascl2_full <<< grid, threads, 0, queue->cuda_stream() >>> (m, n, dD, dA, ldda);
}
}
|
5283a5f99d3a1a67588858bcf3c117f63f48ee6c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm>
#include <vector>
#include <iostream>
#include "caffe/layer.hpp"
#include "caffe/vision_layers.hpp"
namespace caffe {
template <typename Dtype>
void SelfHintLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
sh_coe_ = iteration_ >= max_step_ ? limit_ : (exp(iteration_ * smoothness_ / max_step_) - 1) / (exp(smoothness_) - 1) * limit_ ;
top[0]->mutable_cpu_data()[0] = sh_coe_;
iteration_ += 1;
}
template <typename Dtype>
__global__ void CreateBuffer(const int n, const Dtype* label_data, const Dtype* bottom_data, Dtype* buffer,
const int channels, const int height, const int width) {
CUDA_KERNEL_LOOP(index, n) {
const int size = height * width;
const int c = index / size;
const int cr = index % size;
const int label = static_cast<int>(label_data[0]);
const Dtype t = bottom_data[index] - bottom_data[label*size + cr];
if (c == label) buffer[index] = 0;
else buffer[index] = (t >= 0) ? t : -t;
}
}
template <typename Dtype>
__global__ void SelfHintBackward(const int n, const Dtype* prob, const Dtype* label_data, const Dtype* bottom_data, Dtype* bottom_diff,
const int channels, const int height, const int width, const Dtype sh_coe, const Dtype* buffer, const int num) {
CUDA_KERNEL_LOOP(index, n) {
const int size = height * width;
const int c = index / size;
const int label = static_cast<int>(label_data[0]);
/***************** VERSION 0 *****************/
// const int cr = index % size;
// Dtype sum = 0;
// if (c == label)
// bottom_diff[index] = (prob[c] - 1) / size / num *3;
// else
// bottom_diff[index] = prob[c] / size / num * 3;
/***************** VERSION 1 *****************/
// Dtype sum = 0;
// for (int i = 0; i < size; ++i)
// sum += bottom_data[c*size+i];
// if (c == label) {
// if (sum == 0)
// bottom_diff[index] = (prob[c] - 1) / size / num;
// else
// bottom_diff[index] = ((1 - sh_coe) * (prob[c] - 1) / size + sh_coe * (prob[c] - 1) * bottom_data[index] / sum) / num;
// }
// else {
// if (sum == 0)
// bottom_diff[index] = prob[c] / size / num;
// else
// bottom_diff[index] = ((1 - sh_coe) * prob[c] / size + sh_coe * prob[c] * bottom_data[index] / sum) / num;
// }
/***************** VERSION 2 *****************/
// const int cr = index % size;
// Dtype sum = 0;
// for (int i = 0; i < size; ++i)
// sum += bottom_data[label * size + i];
// if (c == label) {
// if (sum == 0)
// bottom_diff[index] = (prob[c] - 1) / size / num;
// else
// bottom_diff[index] = ((1 - sh_coe) * (prob[c] - 1) / size + sh_coe * (prob[c] - 1) * bottom_data[index] / sum) / num;
// }
// else {
// if (sum == 0)
// bottom_diff[index] = prob[c] / size / num;
// else
// bottom_diff[index] = ((1 - sh_coe) * prob[c] / size + sh_coe * prob[c] * bottom_data[label * size + cr] / sum) / num;
// }
/***************** VERSION 3 *****************/
const int cr = index % size;
Dtype sum = 0;
if (c == label) {
for (int i = 0; i < size; ++i)
sum += bottom_data[label * size + i];
if (sum == 0)
bottom_diff[index] = (prob[c] - 1) / size / num;
else
bottom_diff[index] = ((1 - sh_coe) * (prob[c] - 1) / size + sh_coe * (prob[c] - 1) * bottom_data[index] / sum) / num;
}
else {
for (int i = 0; i < size; ++i)
sum += bottom_data[label * size + i] * bottom_data[c*size+i];
if (sum == 0)
bottom_diff[index] = prob[c] / size / num;
else
bottom_diff[index] = ((1 - sh_coe) * prob[c] / size + sh_coe * prob[c] * bottom_data[label * size + cr] * bottom_data[index] / sum) / num;
}
/***************** VERSION 4 *****************/ //TEST FOR ONLY GIVE LABEL MAP
// if (c == label) {
// Dtype sum = 0;
// for (int i = 0; i < size; ++i)
// sum += bottom_data[label * size + i];
// if (sum == 0)
// bottom_diff[index] = (prob[c] - 1) / size / num;
// else
// bottom_diff[index] = ((1 - sh_coe) * (prob[c] - 1) / size + sh_coe * (prob[c] - 1) * bottom_data[index] / sum) / num;
// }
// else {
// bottom_diff[index] = prob[c] / size / num;
// }
/***************** VERSION 5 *****************/
// const int cr = index % size;
// Dtype sum = 0;
// if (c == label) {
// for (int i = 0; i < size; ++i)
// sum += bottom_data[label * size + i];
// if (sum == 0)
// bottom_diff[index] = (prob[c] - 1) / size / num;
// else
// bottom_diff[index] = ((1 - sh_coe) * (prob[c] - 1) / size + sh_coe * (prob[c] - 1) * bottom_data[index] / sum) / num;
// }
// else {
// for (int i = 0; i < size; ++i)
// sum += bottom_data[label * size + i] * bottom_data[c*size+i];
// Dtype sumB = 0;
// for (int i = 0; i < size; ++i)
// sumB += bottom_data[c*size+i];
// if (sum == 0)
// bottom_diff[index] = prob[c] / size / num;
// else
// bottom_diff[index] = ((1 - 2 * sh_coe) * prob[c] / size
// + sh_coe * prob[c] * bottom_data[label * size + cr] * bottom_data[index] / sum
// + sh_coe * prob[c] * bottom_data[index] / sumB) / num;
// }
}
}
template <typename Dtype>
void SelfHintLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
const int count = height_ * width_ * channels_;
const Dtype* bottom_data = bottom[0]->gpu_data();
const Dtype* prob_data = bottom[1]->gpu_data();
const Dtype* label = bottom[2]->gpu_data();
const int num = bottom[0]->num();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
Dtype* buffer;
hipMalloc((void**) &buffer, count*sizeof(Dtype));
for (int n = 0; n < num; ++n) {
// CreateBuffer<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(count, label + n, bottom_data + n * count, buffer, channels_, height_, width_);
hipLaunchKernelGGL(( SelfHintBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, prob_data + n * channels_, label + n,
bottom_data + n * count, bottom_diff + n * count, channels_, height_, width_, sh_coe_, buffer, num);
}
hipFree(buffer);
}
INSTANTIATE_LAYER_GPU_FUNCS(SelfHintLayer);
} // namespace caffe
| 5283a5f99d3a1a67588858bcf3c117f63f48ee6c.cu | #include <algorithm>
#include <vector>
#include <iostream>
#include "caffe/layer.hpp"
#include "caffe/vision_layers.hpp"
namespace caffe {
template <typename Dtype>
void SelfHintLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
sh_coe_ = iteration_ >= max_step_ ? limit_ : (exp(iteration_ * smoothness_ / max_step_) - 1) / (exp(smoothness_) - 1) * limit_ ;
top[0]->mutable_cpu_data()[0] = sh_coe_;
iteration_ += 1;
}
template <typename Dtype>
__global__ void CreateBuffer(const int n, const Dtype* label_data, const Dtype* bottom_data, Dtype* buffer,
const int channels, const int height, const int width) {
CUDA_KERNEL_LOOP(index, n) {
const int size = height * width;
const int c = index / size;
const int cr = index % size;
const int label = static_cast<int>(label_data[0]);
const Dtype t = bottom_data[index] - bottom_data[label*size + cr];
if (c == label) buffer[index] = 0;
else buffer[index] = (t >= 0) ? t : -t;
}
}
template <typename Dtype>
__global__ void SelfHintBackward(const int n, const Dtype* prob, const Dtype* label_data, const Dtype* bottom_data, Dtype* bottom_diff,
const int channels, const int height, const int width, const Dtype sh_coe, const Dtype* buffer, const int num) {
CUDA_KERNEL_LOOP(index, n) {
const int size = height * width;
const int c = index / size;
const int label = static_cast<int>(label_data[0]);
/***************** VERSION 0 *****************/
// const int cr = index % size;
// Dtype sum = 0;
// if (c == label)
// bottom_diff[index] = (prob[c] - 1) / size / num *3;
// else
// bottom_diff[index] = prob[c] / size / num * 3;
/***************** VERSION 1 *****************/
// Dtype sum = 0;
// for (int i = 0; i < size; ++i)
// sum += bottom_data[c*size+i];
// if (c == label) {
// if (sum == 0)
// bottom_diff[index] = (prob[c] - 1) / size / num;
// else
// bottom_diff[index] = ((1 - sh_coe) * (prob[c] - 1) / size + sh_coe * (prob[c] - 1) * bottom_data[index] / sum) / num;
// }
// else {
// if (sum == 0)
// bottom_diff[index] = prob[c] / size / num;
// else
// bottom_diff[index] = ((1 - sh_coe) * prob[c] / size + sh_coe * prob[c] * bottom_data[index] / sum) / num;
// }
/***************** VERSION 2 *****************/
// const int cr = index % size;
// Dtype sum = 0;
// for (int i = 0; i < size; ++i)
// sum += bottom_data[label * size + i];
// if (c == label) {
// if (sum == 0)
// bottom_diff[index] = (prob[c] - 1) / size / num;
// else
// bottom_diff[index] = ((1 - sh_coe) * (prob[c] - 1) / size + sh_coe * (prob[c] - 1) * bottom_data[index] / sum) / num;
// }
// else {
// if (sum == 0)
// bottom_diff[index] = prob[c] / size / num;
// else
// bottom_diff[index] = ((1 - sh_coe) * prob[c] / size + sh_coe * prob[c] * bottom_data[label * size + cr] / sum) / num;
// }
/***************** VERSION 3 *****************/
const int cr = index % size;
Dtype sum = 0;
if (c == label) {
for (int i = 0; i < size; ++i)
sum += bottom_data[label * size + i];
if (sum == 0)
bottom_diff[index] = (prob[c] - 1) / size / num;
else
bottom_diff[index] = ((1 - sh_coe) * (prob[c] - 1) / size + sh_coe * (prob[c] - 1) * bottom_data[index] / sum) / num;
}
else {
for (int i = 0; i < size; ++i)
sum += bottom_data[label * size + i] * bottom_data[c*size+i];
if (sum == 0)
bottom_diff[index] = prob[c] / size / num;
else
bottom_diff[index] = ((1 - sh_coe) * prob[c] / size + sh_coe * prob[c] * bottom_data[label * size + cr] * bottom_data[index] / sum) / num;
}
/***************** VERSION 4 *****************/ //TEST FOR ONLY GIVE LABEL MAP
// if (c == label) {
// Dtype sum = 0;
// for (int i = 0; i < size; ++i)
// sum += bottom_data[label * size + i];
// if (sum == 0)
// bottom_diff[index] = (prob[c] - 1) / size / num;
// else
// bottom_diff[index] = ((1 - sh_coe) * (prob[c] - 1) / size + sh_coe * (prob[c] - 1) * bottom_data[index] / sum) / num;
// }
// else {
// bottom_diff[index] = prob[c] / size / num;
// }
/***************** VERSION 5 *****************/
// const int cr = index % size;
// Dtype sum = 0;
// if (c == label) {
// for (int i = 0; i < size; ++i)
// sum += bottom_data[label * size + i];
// if (sum == 0)
// bottom_diff[index] = (prob[c] - 1) / size / num;
// else
// bottom_diff[index] = ((1 - sh_coe) * (prob[c] - 1) / size + sh_coe * (prob[c] - 1) * bottom_data[index] / sum) / num;
// }
// else {
// for (int i = 0; i < size; ++i)
// sum += bottom_data[label * size + i] * bottom_data[c*size+i];
// Dtype sumB = 0;
// for (int i = 0; i < size; ++i)
// sumB += bottom_data[c*size+i];
// if (sum == 0)
// bottom_diff[index] = prob[c] / size / num;
// else
// bottom_diff[index] = ((1 - 2 * sh_coe) * prob[c] / size
// + sh_coe * prob[c] * bottom_data[label * size + cr] * bottom_data[index] / sum
// + sh_coe * prob[c] * bottom_data[index] / sumB) / num;
// }
}
}
template <typename Dtype>
void SelfHintLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
const int count = height_ * width_ * channels_;
const Dtype* bottom_data = bottom[0]->gpu_data();
const Dtype* prob_data = bottom[1]->gpu_data();
const Dtype* label = bottom[2]->gpu_data();
const int num = bottom[0]->num();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
Dtype* buffer;
cudaMalloc((void**) &buffer, count*sizeof(Dtype));
for (int n = 0; n < num; ++n) {
// CreateBuffer<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(count, label + n, bottom_data + n * count, buffer, channels_, height_, width_);
SelfHintBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(count, prob_data + n * channels_, label + n,
bottom_data + n * count, bottom_diff + n * count, channels_, height_, width_, sh_coe_, buffer, num);
}
cudaFree(buffer);
}
INSTANTIATE_LAYER_GPU_FUNCS(SelfHintLayer);
} // namespace caffe
|
d4732922c503e235cfeaf1eec0e91f5996c92382.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdlib.h>
#include <stdio.h>
#include <time.h>
#include <string.h>
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include "gputimer.h"
#define n 0.0002
#define p 0.5
#define G 0.75
#define N 512
__global__ void synthesis(float* u, float* u1, float* u2, int numBlocks, int threadsPerBlock) {
for (int i = threadIdx.x * numBlocks + blockIdx.x; i < N*N; i+= threadsPerBlock*numBlocks) {
// corners
if (i == 0 || i == N-1 || i == (N-1) * N || i == (N*N-1)) {
u[i] = G * G * ((p * (u1[i+1] + u1[i+2*N+1] + u1[i+N] + u1[i+N+2] - 4 * u1[i+N+1]) + 2 * u1[i+N+1] - (1-n) * u2[i+N+1]) / (1+n));
}
// borders
else if (i < N) {
u[i] = G * ((p * (u1[i] + u1[i+2*N] + u1[i+N-1] + u1[i+N+1] - 4 * u1[i+N]) + 2 * u1[i+N] - (1-n) * u2[i+N]) / (1+n));
}
else if (i > N * N - N) {
u[i] = G * ((p * (u1[i-2*N] + u1[i] + u1[i-N-1] + u1[i-N+1] - 4 * u1[i-N]) + 2 * u1[i-N] - (1-n) * u2[i-N]) / (1+n));
}
else if (i % N == 0) {
u[i] = G * ((p * (u1[i+1-N] + u1[i+1+N] + u1[i] + u1[i+2] - 4 * u1[i+1]) + 2 * u1[i+1] - (1-n) * u2[i+1]) / (1 + n));
}
else if (i % N == N - 1) {
u[i] = G * ((p * (u1[i-1-N] + u1[i-1+N] + u1[i-2] + u1[i] - 4 * u1[i-1]) + 2 * u1[i-1] - (1-n) * u2[i-1]) / (1+n));
}
// interior
else {
u[i] = (p * (u1[i-N] + u1[i+N] + u1[i-1] + u1[i+1] - 4 * u1[i]) + 2 * u1[i] - (1-n) * u2[i]) / (1+n);
}
}
}
int main(int argc, char* argv[]) {
if(argc<2) {
printf("Not enough arguments.\n");
return -1;
}
int T = atoi(argv[1]);
float* cuda_u, * cuda_u1, * cuda_u2;
hipMallocManaged(&cuda_u, N * N * sizeof(float));
hipMallocManaged(&cuda_u1, N * N * sizeof(float));
hipMallocManaged(&cuda_u2, N * N * sizeof(float));
cuda_u1[((N * N)/ 2 + N / 2)]=1.0; // Drum hit
int numBlocks = 128;
int threadsPerBlock = 1024;
GpuTimer timer;
timer.Start();
for (int iter = 0; iter < T; iter++) {
hipLaunchKernelGGL(( synthesis), dim3(numBlocks),dim3(threadsPerBlock), 0, 0, cuda_u, cuda_u1, cuda_u2, numBlocks, threadsPerBlock);
hipDeviceSynchronize();
// Print result
printf("Iteration %d | u(N/2, N/2): %f\n", iter, cuda_u[(N*(N/2))+N/2]);
// Update u1 and u2
for (int i=0; i<N*N; i++) {
cuda_u2[i]=cuda_u1[i];
cuda_u1[i] = cuda_u[i];
}
hipFree(cuda_u);
hipMallocManaged(&cuda_u, N*N*sizeof(float));
}
timer.Stop();
printf("Time elapsed for %d iterations: %lf\n", T, timer.Elapsed());
hipFree(cuda_u);
hipFree(cuda_u1);
hipFree(cuda_u2);
return 0;
} | d4732922c503e235cfeaf1eec0e91f5996c92382.cu | #include <stdlib.h>
#include <stdio.h>
#include <time.h>
#include <string.h>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "gputimer.h"
#define n 0.0002
#define p 0.5
#define G 0.75
#define N 512
__global__ void synthesis(float* u, float* u1, float* u2, int numBlocks, int threadsPerBlock) {
for (int i = threadIdx.x * numBlocks + blockIdx.x; i < N*N; i+= threadsPerBlock*numBlocks) {
// corners
if (i == 0 || i == N-1 || i == (N-1) * N || i == (N*N-1)) {
u[i] = G * G * ((p * (u1[i+1] + u1[i+2*N+1] + u1[i+N] + u1[i+N+2] - 4 * u1[i+N+1]) + 2 * u1[i+N+1] - (1-n) * u2[i+N+1]) / (1+n));
}
// borders
else if (i < N) {
u[i] = G * ((p * (u1[i] + u1[i+2*N] + u1[i+N-1] + u1[i+N+1] - 4 * u1[i+N]) + 2 * u1[i+N] - (1-n) * u2[i+N]) / (1+n));
}
else if (i > N * N - N) {
u[i] = G * ((p * (u1[i-2*N] + u1[i] + u1[i-N-1] + u1[i-N+1] - 4 * u1[i-N]) + 2 * u1[i-N] - (1-n) * u2[i-N]) / (1+n));
}
else if (i % N == 0) {
u[i] = G * ((p * (u1[i+1-N] + u1[i+1+N] + u1[i] + u1[i+2] - 4 * u1[i+1]) + 2 * u1[i+1] - (1-n) * u2[i+1]) / (1 + n));
}
else if (i % N == N - 1) {
u[i] = G * ((p * (u1[i-1-N] + u1[i-1+N] + u1[i-2] + u1[i] - 4 * u1[i-1]) + 2 * u1[i-1] - (1-n) * u2[i-1]) / (1+n));
}
// interior
else {
u[i] = (p * (u1[i-N] + u1[i+N] + u1[i-1] + u1[i+1] - 4 * u1[i]) + 2 * u1[i] - (1-n) * u2[i]) / (1+n);
}
}
}
int main(int argc, char* argv[]) {
if(argc<2) {
printf("Not enough arguments.\n");
return -1;
}
int T = atoi(argv[1]);
float* cuda_u, * cuda_u1, * cuda_u2;
cudaMallocManaged(&cuda_u, N * N * sizeof(float));
cudaMallocManaged(&cuda_u1, N * N * sizeof(float));
cudaMallocManaged(&cuda_u2, N * N * sizeof(float));
cuda_u1[((N * N)/ 2 + N / 2)]=1.0; // Drum hit
int numBlocks = 128;
int threadsPerBlock = 1024;
GpuTimer timer;
timer.Start();
for (int iter = 0; iter < T; iter++) {
synthesis<<<numBlocks,threadsPerBlock>>>(cuda_u, cuda_u1, cuda_u2, numBlocks, threadsPerBlock);
cudaDeviceSynchronize();
// Print result
printf("Iteration %d | u(N/2, N/2): %f\n", iter, cuda_u[(N*(N/2))+N/2]);
// Update u1 and u2
for (int i=0; i<N*N; i++) {
cuda_u2[i]=cuda_u1[i];
cuda_u1[i] = cuda_u[i];
}
cudaFree(cuda_u);
cudaMallocManaged(&cuda_u, N*N*sizeof(float));
}
timer.Stop();
printf("Time elapsed for %d iterations: %lf\n", T, timer.Elapsed());
cudaFree(cuda_u);
cudaFree(cuda_u1);
cudaFree(cuda_u2);
return 0;
} |
730fc032a054d4958b6e44af5ba00037cf882085.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2009-2019 The Regents of the University of Michigan
// This file is part of the HOOMD-blue project, released under the BSD 3-Clause License.
// Maintainer: joaander
#include "TwoStepRATTLENVEGPU.cuh"
#include "hoomd/VectorMath.h"
#include <assert.h>
//! Takes the first half-step forward in the velocity-verlet NVE integration on a group of particles
/*! \param d_pos array of particle positions
\param d_vel array of particle velocities
\param d_accel array of particle accelerations
\param d_image array of particle images
\param d_group_members Device array listing the indices of the members of the group to integrate
\param group_size Number of members in the group
\param box Box dimensions for periodic boundary condition handling
\param deltaT timestep
\param limit If \a limit is true, then the dynamics will be limited so that particles do not move
a distance further than \a limit_val in one step.
\param limit_val Length to limit particle distance movement to
\param zero_force Set to true to always assign an acceleration of 0 to all particles in the group
This kernel must be executed with a 1D grid of any block size such that the number of threads is greater than or
equal to the number of members in the group. The kernel's implementation simply reads one particle in each thread
and updates that particle.
<b>Performance notes:</b>
Particle properties are read via the texture cache to optimize the bandwidth obtained with sparse groups. The writes
in sparse groups will not be coalesced. However, because ParticleGroup sorts the index list the writes will be as
contiguous as possible leading to fewer memory transactions on compute 1.3 hardware and more cache hits on Fermi.
*/
extern "C" __global__ void gpu_rattle_nve_step_one_kernel(Scalar4 *d_pos,
Scalar4 *d_vel,
const Scalar3 *d_accel,
int3 *d_image,
unsigned int *d_group_members,
const unsigned int nwork,
const unsigned int offset,
BoxDim box,
Scalar deltaT,
bool limit,
Scalar limit_val)
{
// determine which particle this thread works on (MEM TRANSFER: 4 bytes)
int work_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (work_idx < nwork)
{
const unsigned int group_idx = work_idx + offset;
unsigned int idx = d_group_members[group_idx];
// do velocity verlet update
// r(t+deltaT) = r(t) + v(t)*deltaT + (1/2)a(t)*deltaT^2
// v(t+deltaT/2) = v(t) + (1/2)a*deltaT
// read the particle's position (MEM TRANSFER: 16 bytes)
Scalar4 postype = d_pos[idx];
Scalar3 pos = make_scalar3(postype.x, postype.y, postype.z);
// read the particle's velocity and acceleration (MEM TRANSFER: 32 bytes)
Scalar4 velmass = d_vel[idx];
Scalar3 vel = make_scalar3(velmass.x, velmass.y, velmass.z);
Scalar3 accel = d_accel[idx];
Scalar deltaT_half = Scalar(1.0/2.0)*deltaT;
Scalar3 half_vel;
half_vel = vel + deltaT_half*accel;
// update the position (FLOPS: 15)
Scalar3 dx = deltaT*half_vel;
// limit the movement of the particles
if (limit)
{
Scalar len = sqrtf(dot(dx, dx));
if (len > limit_val)
dx = dx / len * limit_val;
}
// FLOPS: 3
pos += dx;
// update the velocity (FLOPS: 3)
vel = half_vel;
// read in the particle's image (MEM TRANSFER: 16 bytes)
int3 image = d_image[idx];
// fix the periodic boundary conditions (FLOPS: 15)
box.wrap(pos, image);
// write out the results (MEM_TRANSFER: 48 bytes)
d_pos[idx] = make_scalar4(pos.x, pos.y, pos.z, postype.w);
d_vel[idx] = make_scalar4(vel.x, vel.y, vel.z, velmass.w);
d_image[idx] = image;
}
}
/*! \param d_pos array of particle positions
\param d_vel array of particle velocities
\param d_accel array of particle accelerations
\param d_image array of particle images
\param d_group_members Device array listing the indices of the members of the group to integrate
\param group_size Number of members in the group
\param box Box dimensions for periodic boundary condition handling
\param deltaT timestep
\param limit If \a limit is true, then the dynamics will be limited so that particles do not move
a distance further than \a limit_val in one step.
\param limit_val Length to limit particle distance movement to
\param zero_force Set to true to always assign an acceleration of 0 to all particles in the group
See gpu_rattle_nve_step_one_kernel() for full documentation, this function is just a driver.
*/
hipError_t gpu_rattle_nve_step_one(Scalar4 *d_pos,
Scalar4 *d_vel,
const Scalar3 *d_accel,
int3 *d_image,
unsigned int *d_group_members,
const GPUPartition& gpu_partition,
const BoxDim& box,
Scalar deltaT,
bool limit,
Scalar limit_val,
unsigned int block_size)
{
static unsigned int max_block_size = UINT_MAX;
if (max_block_size == UINT_MAX)
{
hipFuncAttributes attr;
hipFuncGetAttributes(&attr, (const void*)gpu_rattle_nve_step_one_kernel);
max_block_size = attr.maxThreadsPerBlock;
}
unsigned int run_block_size = min(block_size, max_block_size);
// iterate over active GPUs in reverse, to end up on first GPU when returning from this function
for (int idev = gpu_partition.getNumActiveGPUs() - 1; idev >= 0; --idev)
{
auto range = gpu_partition.getRangeAndSetGPU(idev);
unsigned int nwork = range.second - range.first;
// setup the grid to run the kernel
dim3 grid( (nwork/run_block_size) + 1, 1, 1);
dim3 threads(run_block_size, 1, 1);
// run the kernel
hipLaunchKernelGGL((gpu_rattle_nve_step_one_kernel), dim3(grid), dim3(threads), 0, 0, d_pos, d_vel, d_accel, d_image, d_group_members, nwork, range.first, box, deltaT, limit, limit_val);
}
return hipSuccess;
}
//! NO_SQUISH angular part of the first half step
/*! \param d_orientation array of particle orientations
\param d_angmom array of particle conjugate quaternions
\param d_inertia array of moments of inertia
\param d_net_torque array of net torques
\param d_group_members Device array listing the indices of the members of the group to integrate
\param group_size Number of members in the group
\param deltaT timestep
*/
extern "C" __global__ void gpu_rattle_nve_angular_step_one_kernel(Scalar4 *d_orientation,
Scalar4 *d_angmom,
const Scalar3 *d_inertia,
const Scalar4 *d_net_torque,
const unsigned int *d_group_members,
const unsigned int nwork,
const unsigned int offset,
Scalar deltaT,
Scalar scale)
{
// determine which particle this thread works on (MEM TRANSFER: 4 bytes)
int work_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (work_idx < nwork)
{
const unsigned int group_idx = work_idx + offset;
unsigned int idx = d_group_members[group_idx];
// read the particle's orientation, conjugate quaternion, moment of inertia and net torque
quat<Scalar> q(d_orientation[idx]);
quat<Scalar> p(d_angmom[idx]);
vec3<Scalar> t(d_net_torque[idx]);
vec3<Scalar> I(d_inertia[idx]);
// rotate torque into principal frame
t = rotate(conj(q),t);
// check for zero moment of inertia
bool x_zero, y_zero, z_zero;
x_zero = (I.x < Scalar(EPSILON)); y_zero = (I.y < Scalar(EPSILON)); z_zero = (I.z < Scalar(EPSILON));
// ignore torque component along an axis for which the moment of inertia zero
if (x_zero) t.x = Scalar(0.0);
if (y_zero) t.y = Scalar(0.0);
if (z_zero) t.z = Scalar(0.0);
// advance p(t)->p(t+deltaT/2), q(t)->q(t+deltaT)
p += deltaT*q*t;
p = p*scale;
quat<Scalar> p1, p2, p3; // permutated quaternions
quat<Scalar> q1, q2, q3;
Scalar phi1, cphi1, sphi1;
Scalar phi2, cphi2, sphi2;
Scalar phi3, cphi3, sphi3;
if (!z_zero)
{
p3 = quat<Scalar>(-p.v.z,vec3<Scalar>(p.v.y,-p.v.x,p.s));
q3 = quat<Scalar>(-q.v.z,vec3<Scalar>(q.v.y,-q.v.x,q.s));
phi3 = Scalar(1./4.)/I.z*dot(p,q3);
cphi3 = slow::cos(Scalar(1./2.)*deltaT*phi3);
sphi3 = slow::sin(Scalar(1./2.)*deltaT*phi3);
p=cphi3*p+sphi3*p3;
q=cphi3*q+sphi3*q3;
}
if (!y_zero)
{
p2 = quat<Scalar>(-p.v.y,vec3<Scalar>(-p.v.z,p.s,p.v.x));
q2 = quat<Scalar>(-q.v.y,vec3<Scalar>(-q.v.z,q.s,q.v.x));
phi2 = Scalar(1./4.)/I.y*dot(p,q2);
cphi2 = slow::cos(Scalar(1./2.)*deltaT*phi2);
sphi2 = slow::sin(Scalar(1./2.)*deltaT*phi2);
p=cphi2*p+sphi2*p2;
q=cphi2*q+sphi2*q2;
}
if (!x_zero)
{
p1 = quat<Scalar>(-p.v.x,vec3<Scalar>(p.s,p.v.z,-p.v.y));
q1 = quat<Scalar>(-q.v.x,vec3<Scalar>(q.s,q.v.z,-q.v.y));
phi1 = Scalar(1./4.)/I.x*dot(p,q1);
cphi1 = slow::cos(deltaT*phi1);
sphi1 = slow::sin(deltaT*phi1);
p=cphi1*p+sphi1*p1;
q=cphi1*q+sphi1*q1;
}
if (! y_zero)
{
p2 = quat<Scalar>(-p.v.y,vec3<Scalar>(-p.v.z,p.s,p.v.x));
q2 = quat<Scalar>(-q.v.y,vec3<Scalar>(-q.v.z,q.s,q.v.x));
phi2 = Scalar(1./4.)/I.y*dot(p,q2);
cphi2 = slow::cos(Scalar(1./2.)*deltaT*phi2);
sphi2 = slow::sin(Scalar(1./2.)*deltaT*phi2);
p=cphi2*p+sphi2*p2;
q=cphi2*q+sphi2*q2;
}
if (! z_zero)
{
p3 = quat<Scalar>(-p.v.z,vec3<Scalar>(p.v.y,-p.v.x,p.s));
q3 = quat<Scalar>(-q.v.z,vec3<Scalar>(q.v.y,-q.v.x,q.s));
phi3 = Scalar(1./4.)/I.z*dot(p,q3);
cphi3 = slow::cos(Scalar(1./2.)*deltaT*phi3);
sphi3 = slow::sin(Scalar(1./2.)*deltaT*phi3);
p=cphi3*p+sphi3*p3;
q=cphi3*q+sphi3*q3;
}
// renormalize (improves stability)
q = q*(Scalar(1.0)/slow::sqrt(norm2(q)));
d_orientation[idx] = quat_to_scalar4(q);
d_angmom[idx] = quat_to_scalar4(p);
}
}
/*! \param d_orientation array of particle orientations
\param d_angmom array of particle conjugate quaternions
\param d_inertia array of moments of inertia
\param d_net_torque array of net torques
\param d_group_members Device array listing the indices of the members of the group to integrate
\param group_size Number of members in the group
\param deltaT timestep
*/
hipError_t gpu_rattle_nve_angular_step_one(Scalar4 *d_orientation,
Scalar4 *d_angmom,
const Scalar3 *d_inertia,
const Scalar4 *d_net_torque,
unsigned int *d_group_members,
const GPUPartition& gpu_partition,
Scalar deltaT,
Scalar scale,
const unsigned int block_size)
{
static unsigned int max_block_size = UINT_MAX;
if (max_block_size == UINT_MAX)
{
hipFuncAttributes attr;
hipFuncGetAttributes(&attr, (const void *)gpu_rattle_nve_angular_step_one_kernel);
max_block_size = attr.maxThreadsPerBlock;
}
unsigned int run_block_size = min(block_size, max_block_size);
// iterate over active GPUs in reverse, to end up on first GPU when returning from this function
for (int idev = gpu_partition.getNumActiveGPUs() - 1; idev >= 0; --idev)
{
auto range = gpu_partition.getRangeAndSetGPU(idev);
unsigned int nwork = range.second - range.first;
// setup the grid to run the kernel
dim3 grid( (nwork/run_block_size) + 1, 1, 1);
dim3 threads(run_block_size, 1, 1);
// run the kernel
hipLaunchKernelGGL((gpu_rattle_nve_angular_step_one_kernel), dim3(grid), dim3(threads), 0, 0, d_orientation, d_angmom, d_inertia, d_net_torque, d_group_members, nwork, range.first, deltaT, scale);
}
return hipSuccess;
}
//! NO_SQUISH angular part of the second half step
/*! \param d_orientation array of particle orientations
\param d_angmom array of particle conjugate quaternions
\param d_inertia array of moments of inertia
\param d_net_torque array of net torques
\param d_group_members Device array listing the indices of the members of the group to integrate
\param group_size Number of members in the group
\param deltaT timestep
*/
extern "C" __global__ void gpu_rattle_nve_angular_step_two_kernel(const Scalar4 *d_orientation,
Scalar4 *d_angmom,
const Scalar3 *d_inertia,
const Scalar4 *d_net_torque,
unsigned int *d_group_members,
const unsigned int nwork,
const unsigned int offset,
Scalar deltaT,
Scalar scale)
{
// determine which particle this thread works on (MEM TRANSFER: 4 bytes)
int work_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (work_idx < nwork)
{
const unsigned int group_idx = work_idx + offset;
unsigned int idx = d_group_members[group_idx];
// read the particle's orientation, conjugate quaternion, moment of inertia and net torque
quat<Scalar> q(d_orientation[idx]);
quat<Scalar> p(d_angmom[idx]);
vec3<Scalar> t(d_net_torque[idx]);
vec3<Scalar> I(d_inertia[idx]);
// rotate torque into principal frame
t = rotate(conj(q),t);
// check for zero moment of inertia
bool x_zero, y_zero, z_zero;
x_zero = (I.x < Scalar(EPSILON)); y_zero = (I.y < Scalar(EPSILON)); z_zero = (I.z < Scalar(EPSILON));
// ignore torque component along an axis for which the moment of inertia zero
if (x_zero) t.x = Scalar(0.0);
if (y_zero) t.y = Scalar(0.0);
if (z_zero) t.z = Scalar(0.0);
// rescale
p = p*scale;
// advance p(t)->p(t+deltaT/2), q(t)->q(t+deltaT)
p += deltaT*q*t;
d_angmom[idx] = quat_to_scalar4(p);
}
}
/*! \param d_orientation array of particle orientations
\param d_angmom array of particle conjugate quaternions
\param d_inertia array of moments of inertia
\param d_net_torque array of net torques
\param d_group_members Device array listing the indices of the members of the group to integrate
\param group_size Number of members in the group
\param deltaT timestep
*/
hipError_t gpu_rattle_nve_angular_step_two(const Scalar4 *d_orientation,
Scalar4 *d_angmom,
const Scalar3 *d_inertia,
const Scalar4 *d_net_torque,
unsigned int *d_group_members,
const GPUPartition& gpu_partition,
Scalar deltaT,
Scalar scale,
const unsigned int block_size)
{
static unsigned int max_block_size = UINT_MAX;
if (max_block_size == UINT_MAX)
{
hipFuncAttributes attr;
hipFuncGetAttributes(&attr, (const void *)gpu_rattle_nve_angular_step_two_kernel);
max_block_size = attr.maxThreadsPerBlock;
}
unsigned int run_block_size = min(block_size, max_block_size);
// iterate over active GPUs in reverse, to end up on first GPU when returning from this function
for (int idev = gpu_partition.getNumActiveGPUs() - 1; idev >= 0; --idev)
{
auto range = gpu_partition.getRangeAndSetGPU(idev);
unsigned int nwork = range.second - range.first;
// setup the grid to run the kernel
dim3 grid( (nwork/run_block_size) + 1, 1, 1);
dim3 threads(run_block_size, 1, 1);
// run the kernel
hipLaunchKernelGGL((gpu_rattle_nve_angular_step_two_kernel), dim3(grid), dim3(threads), 0, 0, d_orientation, d_angmom, d_inertia, d_net_torque, d_group_members, nwork, range.first, deltaT, scale);
}
return hipSuccess;
}
| 730fc032a054d4958b6e44af5ba00037cf882085.cu | #include "hip/hip_runtime.h"
// Copyright (c) 2009-2019 The Regents of the University of Michigan
// This file is part of the HOOMD-blue project, released under the BSD 3-Clause License.
// Maintainer: joaander
#include "TwoStepRATTLENVEGPU.cuh"
#include "hoomd/VectorMath.h"
#include <assert.h>
//! Takes the first half-step forward in the velocity-verlet NVE integration on a group of particles
/*! \param d_pos array of particle positions
\param d_vel array of particle velocities
\param d_accel array of particle accelerations
\param d_image array of particle images
\param d_group_members Device array listing the indices of the members of the group to integrate
\param group_size Number of members in the group
\param box Box dimensions for periodic boundary condition handling
\param deltaT timestep
\param limit If \a limit is true, then the dynamics will be limited so that particles do not move
a distance further than \a limit_val in one step.
\param limit_val Length to limit particle distance movement to
\param zero_force Set to true to always assign an acceleration of 0 to all particles in the group
This kernel must be executed with a 1D grid of any block size such that the number of threads is greater than or
equal to the number of members in the group. The kernel's implementation simply reads one particle in each thread
and updates that particle.
<b>Performance notes:</b>
Particle properties are read via the texture cache to optimize the bandwidth obtained with sparse groups. The writes
in sparse groups will not be coalesced. However, because ParticleGroup sorts the index list the writes will be as
contiguous as possible leading to fewer memory transactions on compute 1.3 hardware and more cache hits on Fermi.
*/
extern "C" __global__ void gpu_rattle_nve_step_one_kernel(Scalar4 *d_pos,
Scalar4 *d_vel,
const Scalar3 *d_accel,
int3 *d_image,
unsigned int *d_group_members,
const unsigned int nwork,
const unsigned int offset,
BoxDim box,
Scalar deltaT,
bool limit,
Scalar limit_val)
{
// determine which particle this thread works on (MEM TRANSFER: 4 bytes)
int work_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (work_idx < nwork)
{
const unsigned int group_idx = work_idx + offset;
unsigned int idx = d_group_members[group_idx];
// do velocity verlet update
// r(t+deltaT) = r(t) + v(t)*deltaT + (1/2)a(t)*deltaT^2
// v(t+deltaT/2) = v(t) + (1/2)a*deltaT
// read the particle's position (MEM TRANSFER: 16 bytes)
Scalar4 postype = d_pos[idx];
Scalar3 pos = make_scalar3(postype.x, postype.y, postype.z);
// read the particle's velocity and acceleration (MEM TRANSFER: 32 bytes)
Scalar4 velmass = d_vel[idx];
Scalar3 vel = make_scalar3(velmass.x, velmass.y, velmass.z);
Scalar3 accel = d_accel[idx];
Scalar deltaT_half = Scalar(1.0/2.0)*deltaT;
Scalar3 half_vel;
half_vel = vel + deltaT_half*accel;
// update the position (FLOPS: 15)
Scalar3 dx = deltaT*half_vel;
// limit the movement of the particles
if (limit)
{
Scalar len = sqrtf(dot(dx, dx));
if (len > limit_val)
dx = dx / len * limit_val;
}
// FLOPS: 3
pos += dx;
// update the velocity (FLOPS: 3)
vel = half_vel;
// read in the particle's image (MEM TRANSFER: 16 bytes)
int3 image = d_image[idx];
// fix the periodic boundary conditions (FLOPS: 15)
box.wrap(pos, image);
// write out the results (MEM_TRANSFER: 48 bytes)
d_pos[idx] = make_scalar4(pos.x, pos.y, pos.z, postype.w);
d_vel[idx] = make_scalar4(vel.x, vel.y, vel.z, velmass.w);
d_image[idx] = image;
}
}
/*! \param d_pos array of particle positions
\param d_vel array of particle velocities
\param d_accel array of particle accelerations
\param d_image array of particle images
\param d_group_members Device array listing the indices of the members of the group to integrate
\param group_size Number of members in the group
\param box Box dimensions for periodic boundary condition handling
\param deltaT timestep
\param limit If \a limit is true, then the dynamics will be limited so that particles do not move
a distance further than \a limit_val in one step.
\param limit_val Length to limit particle distance movement to
\param zero_force Set to true to always assign an acceleration of 0 to all particles in the group
See gpu_rattle_nve_step_one_kernel() for full documentation, this function is just a driver.
*/
hipError_t gpu_rattle_nve_step_one(Scalar4 *d_pos,
Scalar4 *d_vel,
const Scalar3 *d_accel,
int3 *d_image,
unsigned int *d_group_members,
const GPUPartition& gpu_partition,
const BoxDim& box,
Scalar deltaT,
bool limit,
Scalar limit_val,
unsigned int block_size)
{
static unsigned int max_block_size = UINT_MAX;
if (max_block_size == UINT_MAX)
{
cudaFuncAttributes attr;
cudaFuncGetAttributes(&attr, (const void*)gpu_rattle_nve_step_one_kernel);
max_block_size = attr.maxThreadsPerBlock;
}
unsigned int run_block_size = min(block_size, max_block_size);
// iterate over active GPUs in reverse, to end up on first GPU when returning from this function
for (int idev = gpu_partition.getNumActiveGPUs() - 1; idev >= 0; --idev)
{
auto range = gpu_partition.getRangeAndSetGPU(idev);
unsigned int nwork = range.second - range.first;
// setup the grid to run the kernel
dim3 grid( (nwork/run_block_size) + 1, 1, 1);
dim3 threads(run_block_size, 1, 1);
// run the kernel
hipLaunchKernelGGL((gpu_rattle_nve_step_one_kernel), dim3(grid), dim3(threads), 0, 0, d_pos, d_vel, d_accel, d_image, d_group_members, nwork, range.first, box, deltaT, limit, limit_val);
}
return hipSuccess;
}
//! NO_SQUISH angular part of the first half step
/*! \param d_orientation array of particle orientations
\param d_angmom array of particle conjugate quaternions
\param d_inertia array of moments of inertia
\param d_net_torque array of net torques
\param d_group_members Device array listing the indices of the members of the group to integrate
\param group_size Number of members in the group
\param deltaT timestep
*/
extern "C" __global__ void gpu_rattle_nve_angular_step_one_kernel(Scalar4 *d_orientation,
Scalar4 *d_angmom,
const Scalar3 *d_inertia,
const Scalar4 *d_net_torque,
const unsigned int *d_group_members,
const unsigned int nwork,
const unsigned int offset,
Scalar deltaT,
Scalar scale)
{
// determine which particle this thread works on (MEM TRANSFER: 4 bytes)
int work_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (work_idx < nwork)
{
const unsigned int group_idx = work_idx + offset;
unsigned int idx = d_group_members[group_idx];
// read the particle's orientation, conjugate quaternion, moment of inertia and net torque
quat<Scalar> q(d_orientation[idx]);
quat<Scalar> p(d_angmom[idx]);
vec3<Scalar> t(d_net_torque[idx]);
vec3<Scalar> I(d_inertia[idx]);
// rotate torque into principal frame
t = rotate(conj(q),t);
// check for zero moment of inertia
bool x_zero, y_zero, z_zero;
x_zero = (I.x < Scalar(EPSILON)); y_zero = (I.y < Scalar(EPSILON)); z_zero = (I.z < Scalar(EPSILON));
// ignore torque component along an axis for which the moment of inertia zero
if (x_zero) t.x = Scalar(0.0);
if (y_zero) t.y = Scalar(0.0);
if (z_zero) t.z = Scalar(0.0);
// advance p(t)->p(t+deltaT/2), q(t)->q(t+deltaT)
p += deltaT*q*t;
p = p*scale;
quat<Scalar> p1, p2, p3; // permutated quaternions
quat<Scalar> q1, q2, q3;
Scalar phi1, cphi1, sphi1;
Scalar phi2, cphi2, sphi2;
Scalar phi3, cphi3, sphi3;
if (!z_zero)
{
p3 = quat<Scalar>(-p.v.z,vec3<Scalar>(p.v.y,-p.v.x,p.s));
q3 = quat<Scalar>(-q.v.z,vec3<Scalar>(q.v.y,-q.v.x,q.s));
phi3 = Scalar(1./4.)/I.z*dot(p,q3);
cphi3 = slow::cos(Scalar(1./2.)*deltaT*phi3);
sphi3 = slow::sin(Scalar(1./2.)*deltaT*phi3);
p=cphi3*p+sphi3*p3;
q=cphi3*q+sphi3*q3;
}
if (!y_zero)
{
p2 = quat<Scalar>(-p.v.y,vec3<Scalar>(-p.v.z,p.s,p.v.x));
q2 = quat<Scalar>(-q.v.y,vec3<Scalar>(-q.v.z,q.s,q.v.x));
phi2 = Scalar(1./4.)/I.y*dot(p,q2);
cphi2 = slow::cos(Scalar(1./2.)*deltaT*phi2);
sphi2 = slow::sin(Scalar(1./2.)*deltaT*phi2);
p=cphi2*p+sphi2*p2;
q=cphi2*q+sphi2*q2;
}
if (!x_zero)
{
p1 = quat<Scalar>(-p.v.x,vec3<Scalar>(p.s,p.v.z,-p.v.y));
q1 = quat<Scalar>(-q.v.x,vec3<Scalar>(q.s,q.v.z,-q.v.y));
phi1 = Scalar(1./4.)/I.x*dot(p,q1);
cphi1 = slow::cos(deltaT*phi1);
sphi1 = slow::sin(deltaT*phi1);
p=cphi1*p+sphi1*p1;
q=cphi1*q+sphi1*q1;
}
if (! y_zero)
{
p2 = quat<Scalar>(-p.v.y,vec3<Scalar>(-p.v.z,p.s,p.v.x));
q2 = quat<Scalar>(-q.v.y,vec3<Scalar>(-q.v.z,q.s,q.v.x));
phi2 = Scalar(1./4.)/I.y*dot(p,q2);
cphi2 = slow::cos(Scalar(1./2.)*deltaT*phi2);
sphi2 = slow::sin(Scalar(1./2.)*deltaT*phi2);
p=cphi2*p+sphi2*p2;
q=cphi2*q+sphi2*q2;
}
if (! z_zero)
{
p3 = quat<Scalar>(-p.v.z,vec3<Scalar>(p.v.y,-p.v.x,p.s));
q3 = quat<Scalar>(-q.v.z,vec3<Scalar>(q.v.y,-q.v.x,q.s));
phi3 = Scalar(1./4.)/I.z*dot(p,q3);
cphi3 = slow::cos(Scalar(1./2.)*deltaT*phi3);
sphi3 = slow::sin(Scalar(1./2.)*deltaT*phi3);
p=cphi3*p+sphi3*p3;
q=cphi3*q+sphi3*q3;
}
// renormalize (improves stability)
q = q*(Scalar(1.0)/slow::sqrt(norm2(q)));
d_orientation[idx] = quat_to_scalar4(q);
d_angmom[idx] = quat_to_scalar4(p);
}
}
/*! \param d_orientation array of particle orientations
\param d_angmom array of particle conjugate quaternions
\param d_inertia array of moments of inertia
\param d_net_torque array of net torques
\param d_group_members Device array listing the indices of the members of the group to integrate
\param group_size Number of members in the group
\param deltaT timestep
*/
hipError_t gpu_rattle_nve_angular_step_one(Scalar4 *d_orientation,
Scalar4 *d_angmom,
const Scalar3 *d_inertia,
const Scalar4 *d_net_torque,
unsigned int *d_group_members,
const GPUPartition& gpu_partition,
Scalar deltaT,
Scalar scale,
const unsigned int block_size)
{
static unsigned int max_block_size = UINT_MAX;
if (max_block_size == UINT_MAX)
{
hipFuncAttributes attr;
hipFuncGetAttributes(&attr, (const void *)gpu_rattle_nve_angular_step_one_kernel);
max_block_size = attr.maxThreadsPerBlock;
}
unsigned int run_block_size = min(block_size, max_block_size);
// iterate over active GPUs in reverse, to end up on first GPU when returning from this function
for (int idev = gpu_partition.getNumActiveGPUs() - 1; idev >= 0; --idev)
{
auto range = gpu_partition.getRangeAndSetGPU(idev);
unsigned int nwork = range.second - range.first;
// setup the grid to run the kernel
dim3 grid( (nwork/run_block_size) + 1, 1, 1);
dim3 threads(run_block_size, 1, 1);
// run the kernel
hipLaunchKernelGGL((gpu_rattle_nve_angular_step_one_kernel), dim3(grid), dim3(threads), 0, 0, d_orientation, d_angmom, d_inertia, d_net_torque, d_group_members, nwork, range.first, deltaT, scale);
}
return hipSuccess;
}
//! NO_SQUISH angular part of the second half step
/*! \param d_orientation array of particle orientations
\param d_angmom array of particle conjugate quaternions
\param d_inertia array of moments of inertia
\param d_net_torque array of net torques
\param d_group_members Device array listing the indices of the members of the group to integrate
\param group_size Number of members in the group
\param deltaT timestep
*/
extern "C" __global__ void gpu_rattle_nve_angular_step_two_kernel(const Scalar4 *d_orientation,
Scalar4 *d_angmom,
const Scalar3 *d_inertia,
const Scalar4 *d_net_torque,
unsigned int *d_group_members,
const unsigned int nwork,
const unsigned int offset,
Scalar deltaT,
Scalar scale)
{
// determine which particle this thread works on (MEM TRANSFER: 4 bytes)
int work_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (work_idx < nwork)
{
const unsigned int group_idx = work_idx + offset;
unsigned int idx = d_group_members[group_idx];
// read the particle's orientation, conjugate quaternion, moment of inertia and net torque
quat<Scalar> q(d_orientation[idx]);
quat<Scalar> p(d_angmom[idx]);
vec3<Scalar> t(d_net_torque[idx]);
vec3<Scalar> I(d_inertia[idx]);
// rotate torque into principal frame
t = rotate(conj(q),t);
// check for zero moment of inertia
bool x_zero, y_zero, z_zero;
x_zero = (I.x < Scalar(EPSILON)); y_zero = (I.y < Scalar(EPSILON)); z_zero = (I.z < Scalar(EPSILON));
// ignore torque component along an axis for which the moment of inertia zero
if (x_zero) t.x = Scalar(0.0);
if (y_zero) t.y = Scalar(0.0);
if (z_zero) t.z = Scalar(0.0);
// rescale
p = p*scale;
// advance p(t)->p(t+deltaT/2), q(t)->q(t+deltaT)
p += deltaT*q*t;
d_angmom[idx] = quat_to_scalar4(p);
}
}
/*! \param d_orientation array of particle orientations
\param d_angmom array of particle conjugate quaternions
\param d_inertia array of moments of inertia
\param d_net_torque array of net torques
\param d_group_members Device array listing the indices of the members of the group to integrate
\param group_size Number of members in the group
\param deltaT timestep
*/
hipError_t gpu_rattle_nve_angular_step_two(const Scalar4 *d_orientation,
Scalar4 *d_angmom,
const Scalar3 *d_inertia,
const Scalar4 *d_net_torque,
unsigned int *d_group_members,
const GPUPartition& gpu_partition,
Scalar deltaT,
Scalar scale,
const unsigned int block_size)
{
static unsigned int max_block_size = UINT_MAX;
if (max_block_size == UINT_MAX)
{
hipFuncAttributes attr;
hipFuncGetAttributes(&attr, (const void *)gpu_rattle_nve_angular_step_two_kernel);
max_block_size = attr.maxThreadsPerBlock;
}
unsigned int run_block_size = min(block_size, max_block_size);
// iterate over active GPUs in reverse, to end up on first GPU when returning from this function
for (int idev = gpu_partition.getNumActiveGPUs() - 1; idev >= 0; --idev)
{
auto range = gpu_partition.getRangeAndSetGPU(idev);
unsigned int nwork = range.second - range.first;
// setup the grid to run the kernel
dim3 grid( (nwork/run_block_size) + 1, 1, 1);
dim3 threads(run_block_size, 1, 1);
// run the kernel
hipLaunchKernelGGL((gpu_rattle_nve_angular_step_two_kernel), dim3(grid), dim3(threads), 0, 0, d_orientation, d_angmom, d_inertia, d_net_torque, d_group_members, nwork, range.first, deltaT, scale);
}
return hipSuccess;
}
|
f0899fbbd5b52b9bac0d8acb4c405a317f185ca1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2020 NVIDIA CORPORATION.
* Copyright (c) 2018-2020 Chris Choy ([email protected])
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
* Please cite "4D Spatio-Temporal ConvNets: Minkowski Convolutional Neural
* Networks", CVPR'19 (https://arxiv.org/abs/1904.08755) if you use any part
* of the code.
*/
#include "coordinate_map_functors.cuh"
#include "coordinate_map_gpu.cuh"
#include "gpu.cuh"
#include "kernel_region.hpp"
#include "types.hpp"
#include "utils.hpp"
#include <thrust/device_vector.h>
#include <thrust/for_each.h>
#include <thrust/host_vector.h>
#include <torch/extension.h>
namespace minkowski {
using coordinate_type = int32_t;
using index_type = default_types::index_type;
using size_type = default_types::size_type;
/*
* The number of threads must be > coordinate_size
*/
__global__ void kernel_region_iterator_test(
coordinate_type const *__restrict__ p_coordinate,
size_type number_of_coordinates, //
gpu_kernel_region<coordinate_type> kernel,
coordinate_type *__restrict__ p_return_coordinates) {
extern __shared__ coordinate_type sh_coordinate[];
auto const tx = threadIdx.x;
auto const bx = blockIdx.x;
auto const x = blockDim.x * bx + tx;
size_type coordinate_size = kernel.coordinate_size();
size_type volume = kernel.volume();
coordinate_type *sh_tmp = sh_coordinate + tx * coordinate_size;
coordinate_type *sh_lb =
sh_coordinate + (CUDA_NUM_THREADS + tx) * coordinate_size;
coordinate_type *sh_ub =
sh_coordinate + (2 * CUDA_NUM_THREADS + tx) * coordinate_size;
index_type *sh_index = reinterpret_cast<index_type *>(
sh_coordinate + 3 * CUDA_NUM_THREADS * coordinate_size);
index_type *sh_tensor_stride = sh_index;
index_type *sh_kernel_size = sh_index + coordinate_size;
index_type *sh_dilation = sh_index + 2 * coordinate_size;
if (tx < coordinate_size - 1) {
sh_tensor_stride[tx] = kernel.tensor_stride()[tx];
sh_kernel_size[tx] = kernel.kernel_size()[tx];
sh_dilation[tx] = kernel.dilation()[tx];
}
__syncthreads();
if (x >= number_of_coordinates)
return;
// iterate and copy
index_type out_index = x * kernel.volume();
kernel.set_bounds(&p_coordinate[x * coordinate_size], sh_lb, sh_ub, sh_tmp);
for (auto const &coordinate : kernel) {
for (index_type i = 0; i < coordinate_size; ++i) {
p_return_coordinates[out_index * coordinate_size + i] = coordinate[i];
}
++out_index;
}
}
at::Tensor region_iterator_test(const torch::Tensor &coordinates,
const torch::Tensor &th_kernel_size) {
// Create TensorArgs. These record the names and positions of each tensor as
// parameters.
torch::TensorArg arg_coordinates(coordinates, "coordinates", 0);
torch::TensorArg arg_kernel_size(th_kernel_size, "kernel_size", 1);
torch::CheckedFrom c = "region_iterator_test";
torch::checkContiguous(c, arg_coordinates);
torch::checkContiguous(c, arg_kernel_size);
// must match coordinate_type
torch::checkScalarType(c, arg_coordinates, torch::kInt);
torch::checkScalarType(c, arg_kernel_size, torch::kInt);
torch::checkBackend(c, arg_coordinates.tensor, torch::Backend::CUDA);
torch::checkBackend(c, arg_kernel_size.tensor, torch::Backend::CPU);
torch::checkDim(c, arg_coordinates, 2);
torch::checkDim(c, arg_kernel_size, 1);
auto const N = (index_type)coordinates.size(0);
auto const coordinate_size = (index_type)coordinates.size(1);
coordinate_type *p_coordinate = coordinates.data_ptr<coordinate_type>();
coordinate_type *p_kernel_size = th_kernel_size.data_ptr<coordinate_type>();
default_types::stride_type tensor_stride(coordinate_size - 1);
default_types::stride_type kernel_size(coordinate_size - 1);
default_types::stride_type dilation(coordinate_size - 1);
for (index_type i = 0; i < coordinate_size - 1; ++i) {
tensor_stride[i] = 1;
kernel_size[i] = p_kernel_size[i];
dilation[i] = 1;
}
auto cpu_kernel = cpu_kernel_region<coordinate_type>(
RegionType::HYPER_CUBE, coordinate_size, tensor_stride.data(),
kernel_size.data(), dilation.data());
auto kernel = gpu_kernel_region<coordinate_type>(cpu_kernel.to_gpu());
LOG_DEBUG("initialize vectors");
torch::Tensor out_coordinates = torch::empty(
{N * kernel.volume(), coordinate_size}, coordinates.options());
uint32_t shared_memory_size_in_bytes =
3 * CUDA_NUM_THREADS * coordinate_size * sizeof(coordinate_type) +
3 * coordinate_size * sizeof(index_type);
hipLaunchKernelGGL(( kernel_region_iterator_test), dim3(GET_BLOCKS(N, CUDA_NUM_THREADS)),
dim3(CUDA_NUM_THREADS),
shared_memory_size_in_bytes, 0,
p_coordinate, //
N, //
kernel, //
out_coordinates.data_ptr<coordinate_type>());
LOG_DEBUG("End call");
CUDA_CHECK(hipStreamSynchronize(0));
return out_coordinates;
}
std::tuple<std::pair<cpu_in_maps, cpu_out_maps>, size_type, double>
kernel_map_test(const torch::Tensor &in_coordinates,
const torch::Tensor &out_coordinates,
const torch::Tensor &kernel_size,
uint32_t occupancy, //
uint32_t thread_dim) {
// Create TensorArgs. These record the names and positions of each tensor as
// parameters.
torch::TensorArg arg_in_coordinates(in_coordinates, "coordinates", 0);
torch::TensorArg arg_out_coordinates(out_coordinates, "coordinates", 1);
torch::TensorArg arg_kernel_size(kernel_size, "kernel_size", 2);
torch::CheckedFrom c = "region_iterator_test";
torch::checkContiguous(c, arg_in_coordinates);
torch::checkContiguous(c, arg_out_coordinates);
torch::checkContiguous(c, arg_kernel_size);
// must match coordinate_type
torch::checkScalarType(c, arg_in_coordinates, torch::kInt);
torch::checkScalarType(c, arg_out_coordinates, torch::kInt);
torch::checkScalarType(c, arg_kernel_size, torch::kInt);
torch::checkBackend(c, arg_in_coordinates.tensor, torch::Backend::CUDA);
torch::checkBackend(c, arg_out_coordinates.tensor, torch::Backend::CUDA);
torch::checkBackend(c, arg_kernel_size.tensor, torch::Backend::CPU);
torch::checkDim(c, arg_in_coordinates, 2);
torch::checkDim(c, arg_out_coordinates, 2);
torch::checkDim(c, arg_kernel_size, 1);
auto const N_in = (index_type)in_coordinates.size(0);
auto const D = (index_type)in_coordinates.size(1);
auto const N_out = (index_type)out_coordinates.size(0);
auto const D_out = (index_type)out_coordinates.size(1);
ASSERT(D == D_out, "dimension mismatch");
coordinate_type const *ptr_in = in_coordinates.data_ptr<coordinate_type>();
coordinate_type const *ptr_out = out_coordinates.data_ptr<coordinate_type>();
CoordinateMapGPU<coordinate_type> in_map{N_in, D, occupancy};
CoordinateMapGPU<coordinate_type> out_map{N_out, D, occupancy};
auto in_coordinate_range = coordinate_range<coordinate_type>(N_in, D, ptr_in);
in_map.insert<false>(in_coordinate_range.begin(), // key begin
in_coordinate_range.end()); // key end
auto out_coordinate_range =
coordinate_range<coordinate_type>(N_out, D, ptr_out);
out_map.insert<false>(out_coordinate_range.begin(), // key begin
out_coordinate_range.end()); // key end
LOG_DEBUG("coordinate initialization");
// Kernel region
coordinate_type *p_kernel_size = kernel_size.data_ptr<coordinate_type>();
default_types::stride_type tensor_stride;
default_types::stride_type s_kernel_size;
default_types::stride_type dilation;
for (index_type i = 0; i < D - 1; ++i) {
tensor_stride.push_back(1);
s_kernel_size.push_back(p_kernel_size[i]);
dilation.push_back(1);
}
auto region = cpu_kernel_region<coordinate_type>(
RegionType::HYPER_CUBE, D, tensor_stride.data(), s_kernel_size.data(),
dilation.data());
LOG_DEBUG("cpu_kernel_region initialized with volume", region.volume());
region.to_gpu();
auto gpu_region = gpu_kernel_region<coordinate_type>(region);
LOG_DEBUG("gpu_kernel_region initialization");
timer t;
t.tic();
auto kernel_map = in_map.kernel_map(
out_map, gpu_region, CUDAKernelMapMode::SPEED_OPTIMIZED, thread_dim);
double k_time = t.toc();
const auto volume = region.volume();
LOG_DEBUG("kernel_map done");
cpu_in_maps in_maps(volume);
cpu_in_maps out_maps(volume);
for (index_type i = 0; i < volume; ++i) {
size_type size = kernel_map.kernels.size(i);
LOG_DEBUG("kernel index", i, "/", volume, "with size", size);
in_maps[i].resize(size);
out_maps[i].resize(size);
if (size > 0) {
hipMemcpy(in_maps[i].data(), //
kernel_map.in_maps.begin(i), sizeof(index_type) * size,
hipMemcpyDeviceToHost);
hipMemcpy(out_maps[i].data(), //
kernel_map.out_maps.begin(i), sizeof(index_type) * size,
hipMemcpyDeviceToHost);
}
}
CUDA_CHECK(hipStreamSynchronize(0));
return std::make_tuple(std::make_pair(in_maps, out_maps), out_map.size(),
k_time);
}
} // namespace minkowski
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
m.def("region_iterator_test", &minkowski::region_iterator_test,
"Minkowski Engine region iterator test");
m.def("kernel_map_test", &minkowski::kernel_map_test,
"Minkowski Engine kernel map test");
}
| f0899fbbd5b52b9bac0d8acb4c405a317f185ca1.cu | /*
* Copyright (c) 2020 NVIDIA CORPORATION.
* Copyright (c) 2018-2020 Chris Choy ([email protected])
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
* Please cite "4D Spatio-Temporal ConvNets: Minkowski Convolutional Neural
* Networks", CVPR'19 (https://arxiv.org/abs/1904.08755) if you use any part
* of the code.
*/
#include "coordinate_map_functors.cuh"
#include "coordinate_map_gpu.cuh"
#include "gpu.cuh"
#include "kernel_region.hpp"
#include "types.hpp"
#include "utils.hpp"
#include <thrust/device_vector.h>
#include <thrust/for_each.h>
#include <thrust/host_vector.h>
#include <torch/extension.h>
namespace minkowski {
using coordinate_type = int32_t;
using index_type = default_types::index_type;
using size_type = default_types::size_type;
/*
* The number of threads must be > coordinate_size
*/
__global__ void kernel_region_iterator_test(
coordinate_type const *__restrict__ p_coordinate,
size_type number_of_coordinates, //
gpu_kernel_region<coordinate_type> kernel,
coordinate_type *__restrict__ p_return_coordinates) {
extern __shared__ coordinate_type sh_coordinate[];
auto const tx = threadIdx.x;
auto const bx = blockIdx.x;
auto const x = blockDim.x * bx + tx;
size_type coordinate_size = kernel.coordinate_size();
size_type volume = kernel.volume();
coordinate_type *sh_tmp = sh_coordinate + tx * coordinate_size;
coordinate_type *sh_lb =
sh_coordinate + (CUDA_NUM_THREADS + tx) * coordinate_size;
coordinate_type *sh_ub =
sh_coordinate + (2 * CUDA_NUM_THREADS + tx) * coordinate_size;
index_type *sh_index = reinterpret_cast<index_type *>(
sh_coordinate + 3 * CUDA_NUM_THREADS * coordinate_size);
index_type *sh_tensor_stride = sh_index;
index_type *sh_kernel_size = sh_index + coordinate_size;
index_type *sh_dilation = sh_index + 2 * coordinate_size;
if (tx < coordinate_size - 1) {
sh_tensor_stride[tx] = kernel.tensor_stride()[tx];
sh_kernel_size[tx] = kernel.kernel_size()[tx];
sh_dilation[tx] = kernel.dilation()[tx];
}
__syncthreads();
if (x >= number_of_coordinates)
return;
// iterate and copy
index_type out_index = x * kernel.volume();
kernel.set_bounds(&p_coordinate[x * coordinate_size], sh_lb, sh_ub, sh_tmp);
for (auto const &coordinate : kernel) {
for (index_type i = 0; i < coordinate_size; ++i) {
p_return_coordinates[out_index * coordinate_size + i] = coordinate[i];
}
++out_index;
}
}
at::Tensor region_iterator_test(const torch::Tensor &coordinates,
const torch::Tensor &th_kernel_size) {
// Create TensorArgs. These record the names and positions of each tensor as
// parameters.
torch::TensorArg arg_coordinates(coordinates, "coordinates", 0);
torch::TensorArg arg_kernel_size(th_kernel_size, "kernel_size", 1);
torch::CheckedFrom c = "region_iterator_test";
torch::checkContiguous(c, arg_coordinates);
torch::checkContiguous(c, arg_kernel_size);
// must match coordinate_type
torch::checkScalarType(c, arg_coordinates, torch::kInt);
torch::checkScalarType(c, arg_kernel_size, torch::kInt);
torch::checkBackend(c, arg_coordinates.tensor, torch::Backend::CUDA);
torch::checkBackend(c, arg_kernel_size.tensor, torch::Backend::CPU);
torch::checkDim(c, arg_coordinates, 2);
torch::checkDim(c, arg_kernel_size, 1);
auto const N = (index_type)coordinates.size(0);
auto const coordinate_size = (index_type)coordinates.size(1);
coordinate_type *p_coordinate = coordinates.data_ptr<coordinate_type>();
coordinate_type *p_kernel_size = th_kernel_size.data_ptr<coordinate_type>();
default_types::stride_type tensor_stride(coordinate_size - 1);
default_types::stride_type kernel_size(coordinate_size - 1);
default_types::stride_type dilation(coordinate_size - 1);
for (index_type i = 0; i < coordinate_size - 1; ++i) {
tensor_stride[i] = 1;
kernel_size[i] = p_kernel_size[i];
dilation[i] = 1;
}
auto cpu_kernel = cpu_kernel_region<coordinate_type>(
RegionType::HYPER_CUBE, coordinate_size, tensor_stride.data(),
kernel_size.data(), dilation.data());
auto kernel = gpu_kernel_region<coordinate_type>(cpu_kernel.to_gpu());
LOG_DEBUG("initialize vectors");
torch::Tensor out_coordinates = torch::empty(
{N * kernel.volume(), coordinate_size}, coordinates.options());
uint32_t shared_memory_size_in_bytes =
3 * CUDA_NUM_THREADS * coordinate_size * sizeof(coordinate_type) +
3 * coordinate_size * sizeof(index_type);
kernel_region_iterator_test<<<GET_BLOCKS(N, CUDA_NUM_THREADS),
CUDA_NUM_THREADS,
shared_memory_size_in_bytes>>>(
p_coordinate, //
N, //
kernel, //
out_coordinates.data_ptr<coordinate_type>());
LOG_DEBUG("End call");
CUDA_CHECK(cudaStreamSynchronize(0));
return out_coordinates;
}
std::tuple<std::pair<cpu_in_maps, cpu_out_maps>, size_type, double>
kernel_map_test(const torch::Tensor &in_coordinates,
const torch::Tensor &out_coordinates,
const torch::Tensor &kernel_size,
uint32_t occupancy, //
uint32_t thread_dim) {
// Create TensorArgs. These record the names and positions of each tensor as
// parameters.
torch::TensorArg arg_in_coordinates(in_coordinates, "coordinates", 0);
torch::TensorArg arg_out_coordinates(out_coordinates, "coordinates", 1);
torch::TensorArg arg_kernel_size(kernel_size, "kernel_size", 2);
torch::CheckedFrom c = "region_iterator_test";
torch::checkContiguous(c, arg_in_coordinates);
torch::checkContiguous(c, arg_out_coordinates);
torch::checkContiguous(c, arg_kernel_size);
// must match coordinate_type
torch::checkScalarType(c, arg_in_coordinates, torch::kInt);
torch::checkScalarType(c, arg_out_coordinates, torch::kInt);
torch::checkScalarType(c, arg_kernel_size, torch::kInt);
torch::checkBackend(c, arg_in_coordinates.tensor, torch::Backend::CUDA);
torch::checkBackend(c, arg_out_coordinates.tensor, torch::Backend::CUDA);
torch::checkBackend(c, arg_kernel_size.tensor, torch::Backend::CPU);
torch::checkDim(c, arg_in_coordinates, 2);
torch::checkDim(c, arg_out_coordinates, 2);
torch::checkDim(c, arg_kernel_size, 1);
auto const N_in = (index_type)in_coordinates.size(0);
auto const D = (index_type)in_coordinates.size(1);
auto const N_out = (index_type)out_coordinates.size(0);
auto const D_out = (index_type)out_coordinates.size(1);
ASSERT(D == D_out, "dimension mismatch");
coordinate_type const *ptr_in = in_coordinates.data_ptr<coordinate_type>();
coordinate_type const *ptr_out = out_coordinates.data_ptr<coordinate_type>();
CoordinateMapGPU<coordinate_type> in_map{N_in, D, occupancy};
CoordinateMapGPU<coordinate_type> out_map{N_out, D, occupancy};
auto in_coordinate_range = coordinate_range<coordinate_type>(N_in, D, ptr_in);
in_map.insert<false>(in_coordinate_range.begin(), // key begin
in_coordinate_range.end()); // key end
auto out_coordinate_range =
coordinate_range<coordinate_type>(N_out, D, ptr_out);
out_map.insert<false>(out_coordinate_range.begin(), // key begin
out_coordinate_range.end()); // key end
LOG_DEBUG("coordinate initialization");
// Kernel region
coordinate_type *p_kernel_size = kernel_size.data_ptr<coordinate_type>();
default_types::stride_type tensor_stride;
default_types::stride_type s_kernel_size;
default_types::stride_type dilation;
for (index_type i = 0; i < D - 1; ++i) {
tensor_stride.push_back(1);
s_kernel_size.push_back(p_kernel_size[i]);
dilation.push_back(1);
}
auto region = cpu_kernel_region<coordinate_type>(
RegionType::HYPER_CUBE, D, tensor_stride.data(), s_kernel_size.data(),
dilation.data());
LOG_DEBUG("cpu_kernel_region initialized with volume", region.volume());
region.to_gpu();
auto gpu_region = gpu_kernel_region<coordinate_type>(region);
LOG_DEBUG("gpu_kernel_region initialization");
timer t;
t.tic();
auto kernel_map = in_map.kernel_map(
out_map, gpu_region, CUDAKernelMapMode::SPEED_OPTIMIZED, thread_dim);
double k_time = t.toc();
const auto volume = region.volume();
LOG_DEBUG("kernel_map done");
cpu_in_maps in_maps(volume);
cpu_in_maps out_maps(volume);
for (index_type i = 0; i < volume; ++i) {
size_type size = kernel_map.kernels.size(i);
LOG_DEBUG("kernel index", i, "/", volume, "with size", size);
in_maps[i].resize(size);
out_maps[i].resize(size);
if (size > 0) {
cudaMemcpy(in_maps[i].data(), //
kernel_map.in_maps.begin(i), sizeof(index_type) * size,
cudaMemcpyDeviceToHost);
cudaMemcpy(out_maps[i].data(), //
kernel_map.out_maps.begin(i), sizeof(index_type) * size,
cudaMemcpyDeviceToHost);
}
}
CUDA_CHECK(cudaStreamSynchronize(0));
return std::make_tuple(std::make_pair(in_maps, out_maps), out_map.size(),
k_time);
}
} // namespace minkowski
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
m.def("region_iterator_test", &minkowski::region_iterator_test,
"Minkowski Engine region iterator test");
m.def("kernel_map_test", &minkowski::kernel_map_test,
"Minkowski Engine kernel map test");
}
|
de690d651423f38ff2b5775532568622005acde9.hip | // !!! This is a file automatically generated by hipify!!!
/*************************************************************************
* Copyright (c) 2015-2016, NVIDIA CORPORATION. All rights reserved.
*
* See LICENSE.txt for license information
************************************************************************/
#include "hip/hip_runtime.h"
#include "common.h"
void print_header() {
PRINT("# %10s %12s %6s %6s out-of-place in-place \n", "", "", "", "");
PRINT("# %10s %12s %6s %6s %7s %6s %6s %5s %7s %6s %6s %5s\n", "size", "count", "type", "root",
"time", "algbw", "busbw", "error", "time", "algbw", "busbw", "error");
PRINT("# %10s %12s %6s %6s %7s %6s %6s %5s %7s %6s %6s %5s\n", "(B)", "(elements)", "", "",
"(us)", "(GB/s)", "(GB/s)", "", "(us)", "(GB/s)", "(GB/s)", "");
}
void print_line_header (size_t size, size_t count, const char *typeName, const char *opName, int root) {
PRINT("%12li %12li %8s %6i", size, count, typeName, root);
}
void BroadcastGetCollByteCount(size_t *sendcount, size_t *recvcount, size_t *paramcount, size_t *sendInplaceOffset, size_t *recvInplaceOffset, size_t count, int nranks) {
*sendcount = count;
*recvcount = count;
*sendInplaceOffset = 0;
*recvInplaceOffset = 0;
*paramcount = *sendcount;
}
testResult_t BroadcastInitData(struct threadArgs* args, ncclDataType_t type, ncclRedOp_t op, int root, int rep, int in_place) {
size_t sendcount = args->sendBytes / wordSize(type);
size_t recvcount = args->expectedBytes / wordSize(type);
for (int i=0; i<args->nGpus; i++) {
int gpuid = args->localRank*args->nThreads*args->nGpus + args->thread*args->nGpus + i;
CUDACHECK(hipSetDevice(gpuid));
int rank = ((args->proc*args->nThreads + args->thread)*args->nGpus + i);
CUDACHECK(hipMemset(args->recvbuffs[i], 0, args->expectedBytes));
void* data = in_place ? args->recvbuffs[i] : args->sendbuffs[i];
if (rank == root) TESTCHECK(InitData(data, sendcount, type, rep, rank));
TESTCHECK(InitData(args->expected[i], recvcount, type, rep, root));
CUDACHECK(hipDeviceSynchronize());
}
return testSuccess;
}
void BroadcastGetBw(size_t count, int typesize, double sec, double* algBw, double* busBw, int nranks) {
double baseBw = (double)(count * typesize) / 1.0E9 / sec;
*algBw = baseBw;
double factor = 1;
*busBw = baseBw * factor;
}
testResult_t BroadcastRunColl(void* sendbuff, void* recvbuff, size_t count, ncclDataType_t type, ncclRedOp_t op, int root, ncclComm_t comm, hipStream_t stream) {
int rank;
NCCLCHECK(ncclCommUserRank(comm, &rank));
#if NCCL_MAJOR >= 2 && NCCL_MINOR >= 2
NCCLCHECK(ncclBroadcast(sendbuff, recvbuff, count, type, root, comm, stream));
#else
if (rank == root) {
NCCLCHECK(ncclBcast(sendbuff, count, type, root, comm, stream));
} else {
NCCLCHECK(ncclBcast(recvbuff, count, type, root, comm, stream));
}
#endif
return testSuccess;
}
struct testColl broadcastTest = {
"Broadcast",
BroadcastGetCollByteCount,
BroadcastInitData,
BroadcastGetBw,
BroadcastRunColl
};
void BroadcastGetBuffSize(size_t *sendcount, size_t *recvcount, size_t count, int nranks) {
size_t paramcount, sendInplaceOffset, recvInplaceOffset;
BroadcastGetCollByteCount(sendcount, recvcount, ¶mcount, &sendInplaceOffset, &recvInplaceOffset, count, nranks);
}
testResult_t BroadcastRunTest(struct threadArgs* args, int root, ncclDataType_t type, const char* typeName, ncclRedOp_t op, const char* opName) {
args->collTest = &broadcastTest;
ncclDataType_t *run_types;
const char **run_typenames;
int type_count;
int begin_root, end_root;
if ((int)type != -1) {
type_count = 1;
run_types = &type;
run_typenames = &typeName;
} else {
type_count = ncclNumTypes;
run_types = test_types;
run_typenames = test_typenames;
}
if (root != -1) {
begin_root = end_root = root;
} else {
begin_root = 0;
end_root = args->nProcs*args->nThreads*args->nGpus-1;
}
for (int i=0; i<type_count; i++) {
for (int j=begin_root; j<=end_root; j++) {
TESTCHECK(TimeTest(args, run_types[i], run_typenames[i], (ncclRedOp_t)0, "", j));
}
}
return testSuccess;
}
// refer to https://github.com/NVIDIA/nccl-tests/issues/50
#if defined(__APPLE__) && defined(__MACH__)
struct testEngine ncclTestEngine = {
BroadcastGetBuffSize,
BroadcastRunTest
};
#else
struct testEngine broadcastEngine = {
BroadcastGetBuffSize,
BroadcastRunTest
};
#pragma weak ncclTestEngine=broadcastEngine
#endif
| de690d651423f38ff2b5775532568622005acde9.cu | /*************************************************************************
* Copyright (c) 2015-2016, NVIDIA CORPORATION. All rights reserved.
*
* See LICENSE.txt for license information
************************************************************************/
#include "cuda_runtime.h"
#include "common.h"
void print_header() {
PRINT("# %10s %12s %6s %6s out-of-place in-place \n", "", "", "", "");
PRINT("# %10s %12s %6s %6s %7s %6s %6s %5s %7s %6s %6s %5s\n", "size", "count", "type", "root",
"time", "algbw", "busbw", "error", "time", "algbw", "busbw", "error");
PRINT("# %10s %12s %6s %6s %7s %6s %6s %5s %7s %6s %6s %5s\n", "(B)", "(elements)", "", "",
"(us)", "(GB/s)", "(GB/s)", "", "(us)", "(GB/s)", "(GB/s)", "");
}
void print_line_header (size_t size, size_t count, const char *typeName, const char *opName, int root) {
PRINT("%12li %12li %8s %6i", size, count, typeName, root);
}
void BroadcastGetCollByteCount(size_t *sendcount, size_t *recvcount, size_t *paramcount, size_t *sendInplaceOffset, size_t *recvInplaceOffset, size_t count, int nranks) {
*sendcount = count;
*recvcount = count;
*sendInplaceOffset = 0;
*recvInplaceOffset = 0;
*paramcount = *sendcount;
}
testResult_t BroadcastInitData(struct threadArgs* args, ncclDataType_t type, ncclRedOp_t op, int root, int rep, int in_place) {
size_t sendcount = args->sendBytes / wordSize(type);
size_t recvcount = args->expectedBytes / wordSize(type);
for (int i=0; i<args->nGpus; i++) {
int gpuid = args->localRank*args->nThreads*args->nGpus + args->thread*args->nGpus + i;
CUDACHECK(cudaSetDevice(gpuid));
int rank = ((args->proc*args->nThreads + args->thread)*args->nGpus + i);
CUDACHECK(cudaMemset(args->recvbuffs[i], 0, args->expectedBytes));
void* data = in_place ? args->recvbuffs[i] : args->sendbuffs[i];
if (rank == root) TESTCHECK(InitData(data, sendcount, type, rep, rank));
TESTCHECK(InitData(args->expected[i], recvcount, type, rep, root));
CUDACHECK(cudaDeviceSynchronize());
}
return testSuccess;
}
void BroadcastGetBw(size_t count, int typesize, double sec, double* algBw, double* busBw, int nranks) {
double baseBw = (double)(count * typesize) / 1.0E9 / sec;
*algBw = baseBw;
double factor = 1;
*busBw = baseBw * factor;
}
testResult_t BroadcastRunColl(void* sendbuff, void* recvbuff, size_t count, ncclDataType_t type, ncclRedOp_t op, int root, ncclComm_t comm, cudaStream_t stream) {
int rank;
NCCLCHECK(ncclCommUserRank(comm, &rank));
#if NCCL_MAJOR >= 2 && NCCL_MINOR >= 2
NCCLCHECK(ncclBroadcast(sendbuff, recvbuff, count, type, root, comm, stream));
#else
if (rank == root) {
NCCLCHECK(ncclBcast(sendbuff, count, type, root, comm, stream));
} else {
NCCLCHECK(ncclBcast(recvbuff, count, type, root, comm, stream));
}
#endif
return testSuccess;
}
struct testColl broadcastTest = {
"Broadcast",
BroadcastGetCollByteCount,
BroadcastInitData,
BroadcastGetBw,
BroadcastRunColl
};
void BroadcastGetBuffSize(size_t *sendcount, size_t *recvcount, size_t count, int nranks) {
size_t paramcount, sendInplaceOffset, recvInplaceOffset;
BroadcastGetCollByteCount(sendcount, recvcount, ¶mcount, &sendInplaceOffset, &recvInplaceOffset, count, nranks);
}
testResult_t BroadcastRunTest(struct threadArgs* args, int root, ncclDataType_t type, const char* typeName, ncclRedOp_t op, const char* opName) {
args->collTest = &broadcastTest;
ncclDataType_t *run_types;
const char **run_typenames;
int type_count;
int begin_root, end_root;
if ((int)type != -1) {
type_count = 1;
run_types = &type;
run_typenames = &typeName;
} else {
type_count = ncclNumTypes;
run_types = test_types;
run_typenames = test_typenames;
}
if (root != -1) {
begin_root = end_root = root;
} else {
begin_root = 0;
end_root = args->nProcs*args->nThreads*args->nGpus-1;
}
for (int i=0; i<type_count; i++) {
for (int j=begin_root; j<=end_root; j++) {
TESTCHECK(TimeTest(args, run_types[i], run_typenames[i], (ncclRedOp_t)0, "", j));
}
}
return testSuccess;
}
// refer to https://github.com/NVIDIA/nccl-tests/issues/50
#if defined(__APPLE__) && defined(__MACH__)
struct testEngine ncclTestEngine = {
BroadcastGetBuffSize,
BroadcastRunTest
};
#else
struct testEngine broadcastEngine = {
BroadcastGetBuffSize,
BroadcastRunTest
};
#pragma weak ncclTestEngine=broadcastEngine
#endif
|
9f76290f7f98af447bdaebc1e8bc3f01e69d0b63.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <assert.h>
#ifndef THREADS_PER_BLOCK
#define THREADS_PER_BLOCK 1024
#endif
//#define VERBOSE
//#define PROF
#define CUDA_ERROR_CHECK
#define CudaSafeCall( err ) __cudaSafeCall( err, __FILE__, __LINE__ )
#define CudaCheckError() __cudaCheckError( __FILE__, __LINE__ )
inline void __cudaSafeCall( hipError_t err, const char *file, const int line )
{
#ifdef CUDA_ERROR_CHECK
if ( hipSuccess != err )
{
fprintf( stderr, "cudaSafeCall() failed at %s:%i : %s\n",
file, line, hipGetErrorString( err ) );
exit( -1 );
}
#endif
return;
}
inline void __cudaCheckError( const char *file, const int line )
{
#ifdef CUDA_ERROR_CHECK
hipError_t err = hipGetLastError();
if ( hipSuccess != err )
{
fprintf( stderr, "cudaCheckError() failed at %s:%i : %s\n",
file, line, hipGetErrorString( err ) );
exit( -1 );
}
// More careful checking. However, this will affect performance.
// Comment away if needed.
err = hipDeviceSynchronize();
if( hipSuccess != err )
{
fprintf( stderr, "cudaCheckError() with sync failed at %s:%i : %s\n",
file, line, hipGetErrorString( err ) );
exit( -1 );
}
#endif
}
__global__ void vc(float *dA, float *dB, int N) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id < N) {
dA[id] = dB[id];
}
}
extern "C" {
void vcCUDA(float* A, float *B, int start, int end, int GPUN) {
float *dA, *dB;
if (GPUN > 0) {
assert(end - start + 1 == GPUN);
#ifdef VERBOSE
printf("In vcCUDA\n");
printf("\t GPUN: %d\n", GPUN);
printf("\t range: %d..%d\n", start, end);
#endif
#ifdef PROF
hipEvent_t startCudaKernelEvent, endCudaKernelEvent;
CudaSafeCall(hipEventCreate(&startCudaKernelEvent));
CudaSafeCall(hipEventCreate(&endCudaKernelEvent));
#endif
CudaSafeCall(hipMalloc(&dA, sizeof(float) * GPUN));
CudaSafeCall(hipMalloc(&dB, sizeof(float) * GPUN));
CudaSafeCall(hipMemcpy(dB, B + start, sizeof(float) * GPUN, hipMemcpyHostToDevice));
#ifdef PROF
CudaSafeCall(hipEventRecord(startCudaKernelEvent));
#endif
hipLaunchKernelGGL(( vc), dim3(ceil(((float)GPUN)/THREADS_PER_BLOCK)), dim3(THREADS_PER_BLOCK), 0, 0, dA, dB, GPUN);
#ifdef PROF
CudaSafeCall(hipEventRecord(endCudaKernelEvent));
CudaSafeCall(hipEventSynchronize(endCudaKernelEvent));
#endif
CudaCheckError();
CudaSafeCall(hipDeviceSynchronize());
CudaSafeCall(hipMemcpy(A + start, dA, sizeof(float) * GPUN, hipMemcpyDeviceToHost));
#ifdef PROF
float msecKernel;
CudaSafeCall(hipEventElapsedTime(&msecKernel, startCudaKernelEvent, endCudaKernelEvent));
printf("CUDA kernel: %lf msec\n", msecKernel);
#endif
CudaSafeCall(hipFree(dA));
CudaSafeCall(hipFree(dB));
}
}
}
| 9f76290f7f98af447bdaebc1e8bc3f01e69d0b63.cu | #include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <assert.h>
#ifndef THREADS_PER_BLOCK
#define THREADS_PER_BLOCK 1024
#endif
//#define VERBOSE
//#define PROF
#define CUDA_ERROR_CHECK
#define CudaSafeCall( err ) __cudaSafeCall( err, __FILE__, __LINE__ )
#define CudaCheckError() __cudaCheckError( __FILE__, __LINE__ )
inline void __cudaSafeCall( cudaError err, const char *file, const int line )
{
#ifdef CUDA_ERROR_CHECK
if ( cudaSuccess != err )
{
fprintf( stderr, "cudaSafeCall() failed at %s:%i : %s\n",
file, line, cudaGetErrorString( err ) );
exit( -1 );
}
#endif
return;
}
inline void __cudaCheckError( const char *file, const int line )
{
#ifdef CUDA_ERROR_CHECK
cudaError err = cudaGetLastError();
if ( cudaSuccess != err )
{
fprintf( stderr, "cudaCheckError() failed at %s:%i : %s\n",
file, line, cudaGetErrorString( err ) );
exit( -1 );
}
// More careful checking. However, this will affect performance.
// Comment away if needed.
err = cudaDeviceSynchronize();
if( cudaSuccess != err )
{
fprintf( stderr, "cudaCheckError() with sync failed at %s:%i : %s\n",
file, line, cudaGetErrorString( err ) );
exit( -1 );
}
#endif
}
__global__ void vc(float *dA, float *dB, int N) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id < N) {
dA[id] = dB[id];
}
}
extern "C" {
void vcCUDA(float* A, float *B, int start, int end, int GPUN) {
float *dA, *dB;
if (GPUN > 0) {
assert(end - start + 1 == GPUN);
#ifdef VERBOSE
printf("In vcCUDA\n");
printf("\t GPUN: %d\n", GPUN);
printf("\t range: %d..%d\n", start, end);
#endif
#ifdef PROF
cudaEvent_t startCudaKernelEvent, endCudaKernelEvent;
CudaSafeCall(cudaEventCreate(&startCudaKernelEvent));
CudaSafeCall(cudaEventCreate(&endCudaKernelEvent));
#endif
CudaSafeCall(cudaMalloc(&dA, sizeof(float) * GPUN));
CudaSafeCall(cudaMalloc(&dB, sizeof(float) * GPUN));
CudaSafeCall(cudaMemcpy(dB, B + start, sizeof(float) * GPUN, cudaMemcpyHostToDevice));
#ifdef PROF
CudaSafeCall(cudaEventRecord(startCudaKernelEvent));
#endif
vc<<<ceil(((float)GPUN)/THREADS_PER_BLOCK), THREADS_PER_BLOCK>>>(dA, dB, GPUN);
#ifdef PROF
CudaSafeCall(cudaEventRecord(endCudaKernelEvent));
CudaSafeCall(cudaEventSynchronize(endCudaKernelEvent));
#endif
CudaCheckError();
CudaSafeCall(cudaDeviceSynchronize());
CudaSafeCall(cudaMemcpy(A + start, dA, sizeof(float) * GPUN, cudaMemcpyDeviceToHost));
#ifdef PROF
float msecKernel;
CudaSafeCall(cudaEventElapsedTime(&msecKernel, startCudaKernelEvent, endCudaKernelEvent));
printf("CUDA kernel: %lf msec\n", msecKernel);
#endif
CudaSafeCall(cudaFree(dA));
CudaSafeCall(cudaFree(dB));
}
}
}
|
2a9cf36b290c027c55ec4eced2531acbc93bf33a.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "set_permutation.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int *d_permutation = NULL;
hipMalloc(&d_permutation, XSIZE*YSIZE);
int M = 2;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
set_permutation), dim3(gridBlock),dim3(threadBlock), 0, 0, d_permutation,M);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
set_permutation), dim3(gridBlock),dim3(threadBlock), 0, 0, d_permutation,M);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
set_permutation), dim3(gridBlock),dim3(threadBlock), 0, 0, d_permutation,M);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 2a9cf36b290c027c55ec4eced2531acbc93bf33a.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "set_permutation.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int *d_permutation = NULL;
cudaMalloc(&d_permutation, XSIZE*YSIZE);
int M = 2;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
set_permutation<<<gridBlock,threadBlock>>>(d_permutation,M);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
set_permutation<<<gridBlock,threadBlock>>>(d_permutation,M);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
set_permutation<<<gridBlock,threadBlock>>>(d_permutation,M);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
b412f1a5af531c99f40445b2b4c4c44c39c53bfe.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include "hip/hip_runtime.h"
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
#define ceil(a,b) ((a) % (b) == 0 ? (a) / (b) : ((a) / (b)) + 1)
void check_error (const char* message) {
hipError_t error = hipGetLastError ();
if (error != hipSuccess) {
printf ("CUDA error : %s, %s\n", message, hipGetErrorString (error));
exit(-1);
}
}
__global__ void hypterm_0 (double * __restrict__ flux_in_0, double * __restrict__ flux_in_1, double * __restrict__ flux_in_2, double * __restrict__ flux_in_3, double * __restrict__ cons_in_1, double * __restrict__ cons_in_2, double * __restrict__ cons_in_3, double * __restrict__ q_in_1, double * __restrict__ q_in_2, double * __restrict__ q_in_3, double * __restrict__ q_in_4, double dxinv0, double dxinv1, double dxinv2, int L, int M, int N) {
//Determing the block's indices
int blockdim_i= (int)(blockDim.x);
int i0 = (int)(blockIdx.x)*(blockdim_i);
int i = max (i0, 0) + (int)(threadIdx.x);
int blockdim_j= (int)(blockDim.y);
int j0 = (int)(blockIdx.y)*(blockdim_j);
int j = max (j0, 0) + (int)(threadIdx.y);
int blockdim_k= (int)(blockDim.z);
int k0 = (int)(blockIdx.z)*(blockdim_k);
int k = max (k0, 0) + (int)(threadIdx.z);
double (*flux_0)[308][308] = (double (*)[308][308])flux_in_0;
double (*flux_1)[308][308] = (double (*)[308][308])flux_in_1;
double (*flux_2)[308][308] = (double (*)[308][308])flux_in_2;
double (*flux_3)[308][308] = (double (*)[308][308])flux_in_3;
double (*cons_1)[308][308] = (double (*)[308][308])cons_in_1;
double (*cons_2)[308][308] = (double (*)[308][308])cons_in_2;
double (*cons_3)[308][308] = (double (*)[308][308])cons_in_3;
double (*q_1)[308][308] = (double (*)[308][308])q_in_1;
double (*q_2)[308][308] = (double (*)[308][308])q_in_2;
double (*q_3)[308][308] = (double (*)[308][308])q_in_3;
double (*q_4)[308][308] = (double (*)[308][308])q_in_4;
if (i>=4 & j>=4 & k>=4 & i<=N-5 & j<=N-5 & k<=N-5) {
double _t_1_ = cons_1[k][j][i+1];
_t_1_ -= cons_1[k][j][i-1];
double _t_0_ = 0.8 * _t_1_;
double _t_2_ = cons_1[k][j][i+2];
_t_2_ -= cons_1[k][j][i-2];
_t_0_ -= 0.2 * _t_2_;
double _t_3_ = cons_1[k][j][i+3];
_t_3_ -= cons_1[k][j][i-3];
_t_0_ += 0.038 * _t_3_;
double _t_4_ = cons_1[k][j][i+4];
_t_4_ -= cons_1[k][j][i-4];
_t_0_ -= 0.0035 * _t_4_;
double flux_0kc0jc0ic0 = _t_0_ * dxinv0;
double _t_12_ = cons_1[k][j][i+1] * q_1[k][j][i+1];
_t_12_ -= cons_1[k][j][i-1] * q_1[k][j][i-1];
_t_12_ += q_4[k][j][i+1];
_t_12_ -= q_4[k][j][i-1];
double _t_11_ = 0.8 * _t_12_;
double _t_13_ = cons_1[k][j][i+2] * q_1[k][j][i+2];
_t_13_ -= cons_1[k][j][i-2] * q_1[k][j][i-2];
_t_13_ += q_4[k][j][i+2];
_t_13_ -= q_4[k][j][i-2];
_t_11_ -= 0.2 * _t_13_;
double _t_14_ = cons_1[k][j][i+3] * q_1[k][j][i+3];
_t_14_ -= cons_1[k][j][i-3] * q_1[k][j][i-3];
_t_14_ += q_4[k][j][i+3];
_t_14_ -= q_4[k][j][i-3];
_t_11_ += 0.038 * _t_14_;
double _t_15_ = cons_1[k][j][i+4] * q_1[k][j][i+4];
_t_15_ -= cons_1[k][j][i-4] * q_1[k][j][i-4];
_t_15_ += q_4[k][j][i+4];
_t_15_ -= q_4[k][j][i-4];
_t_11_ -= 0.0035 * _t_15_;
double flux_1kc0jc0ic0 = _t_11_ * dxinv0;
double _t_23_ = cons_2[k][j][i+1] * q_1[k][j][i+1];
_t_23_ -= cons_2[k][j][i-1] * q_1[k][j][i-1];
double _t_22_ = 0.8 * _t_23_;
double _t_24_ = cons_2[k][j][i+2] * q_1[k][j][i+2];
_t_24_ -= cons_2[k][j][i-2] * q_1[k][j][i-2];
_t_22_ -= 0.2 * _t_24_;
double _t_25_ = cons_2[k][j][i+3] * q_1[k][j][i+3];
_t_25_ -= cons_2[k][j][i-3] * q_1[k][j][i-3];
_t_22_ += 0.038 * _t_25_;
double _t_26_ = cons_2[k][j][i+4] * q_1[k][j][i+4];
_t_26_ -= cons_2[k][j][i-4] * q_1[k][j][i-4];
_t_22_ -= 0.0035 * _t_26_;
double flux_2kc0jc0ic0 = _t_22_ * dxinv0;
double _t_34_ = cons_3[k][j][i+1] * q_1[k][j][i+1];
_t_34_ -= cons_3[k][j][i-1] * q_1[k][j][i-1];
double _t_33_ = 0.8 * _t_34_;
double _t_35_ = cons_3[k][j][i+2] * q_1[k][j][i+2];
_t_35_ -= cons_3[k][j][i-2] * q_1[k][j][i-2];
_t_33_ -= 0.2 * _t_35_;
double _t_36_ = cons_3[k][j][i+3] * q_1[k][j][i+3];
_t_36_ -= cons_3[k][j][i-3] * q_1[k][j][i-3];
_t_33_ += 0.038 * _t_36_;
double _t_37_ = cons_3[k][j][i+4] * q_1[k][j][i+4];
_t_37_ -= cons_3[k][j][i-4] * q_1[k][j][i-4];
_t_33_ -= 0.0035 * _t_37_;
double flux_3kc0jc0ic0 = _t_33_ * dxinv0;
double _t_7_ = cons_2[k][j+1][i];
_t_7_ -= cons_2[k][j-1][i];
double _t_6_ = 0.8 * _t_7_;
double _t_8_ = cons_2[k][j+2][i];
_t_8_ -= cons_2[k][j-2][i];
_t_6_ -= 0.2 * _t_8_;
double _t_9_ = cons_2[k][j+3][i];
_t_9_ -= cons_2[k][j-3][i];
_t_6_ += 0.038 * _t_9_;
double _t_10_ = cons_2[k][j+4][i];
_t_10_ -= cons_2[k][j-4][i];
_t_6_ -= 0.0035 * _t_10_;
flux_0kc0jc0ic0 -= _t_6_ * dxinv1;
double _t_29_ = cons_2[k][j+1][i] * q_2[k][j+1][i];
_t_29_ -= cons_2[k][j-1][i] * q_2[k][j-1][i];
_t_29_ += q_4[k][j+1][i];
_t_29_ -= q_4[k][j-1][i];
double _t_28_ = 0.8 * _t_29_;
double _t_30_ = cons_2[k][j+2][i] * q_2[k][j+2][i];
_t_30_ -= cons_2[k][j-2][i] * q_2[k][j-2][i];
_t_30_ += q_4[k][j+2][i];
_t_30_ -= q_4[k][j-2][i];
_t_28_ -= 0.2 * _t_30_;
double _t_31_ = cons_2[k][j+3][i] * q_2[k][j+3][i];
_t_31_ -= cons_2[k][j-3][i] * q_2[k][j-3][i];
_t_31_ += q_4[k][j+3][i];
_t_31_ -= q_4[k][j-3][i];
_t_28_ += 0.038 * _t_31_;
double _t_32_ = cons_2[k][j+4][i] * q_2[k][j+4][i];
_t_32_ -= cons_2[k][j-4][i] * q_2[k][j-4][i];
_t_32_ += q_4[k][j+4][i];
_t_32_ -= q_4[k][j-4][i];
_t_28_ -= 0.0035 * _t_32_;
flux_2kc0jc0ic0 -= _t_28_ * dxinv1;
double _t_18_ = cons_1[k][j+1][i] * q_2[k][j+1][i];
_t_18_ -= cons_1[k][j-1][i] * q_2[k][j-1][i];
double _t_17_ = 0.8 * _t_18_;
double _t_19_ = cons_1[k][j+2][i] * q_2[k][j+2][i];
_t_19_ -= cons_1[k][j-2][i] * q_2[k][j-2][i];
_t_17_ -= 0.2 * _t_19_;
double _t_20_ = cons_1[k][j+3][i] * q_2[k][j+3][i];
_t_20_ -= cons_1[k][j-3][i] * q_2[k][j-3][i];
_t_17_ += 0.038 * _t_20_;
double _t_21_ = cons_1[k][j+4][i] * q_2[k][j+4][i];
_t_21_ -= cons_1[k][j-4][i] * q_2[k][j-4][i];
_t_17_ -= 0.0035 * _t_21_;
flux_1kc0jc0ic0 -= _t_17_ * dxinv1;
double _t_40_ = cons_3[k][j+1][i] * q_2[k][j+1][i];
_t_40_ -= cons_3[k][j-1][i] * q_2[k][j-1][i];
double _t_39_ = 0.8 * _t_40_;
double _t_41_ = cons_3[k][j+2][i] * q_2[k][j+2][i];
_t_41_ -= cons_3[k][j-2][i] * q_2[k][j-2][i];
_t_39_ -= 0.2 * _t_41_;
double _t_42_ = cons_3[k][j+3][i] * q_2[k][j+3][i];
_t_42_ -= cons_3[k][j-3][i] * q_2[k][j-3][i];
_t_39_ += 0.038 * _t_42_;
double _t_43_ = cons_3[k][j+4][i] * q_2[k][j+4][i];
_t_43_ -= cons_3[k][j-4][i] * q_2[k][j-4][i];
_t_39_ -= 0.0035 * _t_43_;
flux_3kc0jc0ic0 -= _t_39_ * dxinv1;
flux_0[k][j][i] = flux_0kc0jc0ic0;
flux_1[k][j][i] = flux_1kc0jc0ic0;
flux_2[k][j][i] = flux_2kc0jc0ic0;
flux_3[k][j][i] = flux_3kc0jc0ic0;
}
}
__global__ void hypterm_1 (double * __restrict__ flux_in_0, double * __restrict__ flux_in_1, double * __restrict__ flux_in_2, double * __restrict__ flux_in_3, double * __restrict__ cons_in_1, double * __restrict__ cons_in_2, double * __restrict__ cons_in_3, double * __restrict__ q_in_1, double * __restrict__ q_in_2, double * __restrict__ q_in_3, double * __restrict__ q_in_4, double dxinv0, double dxinv1, double dxinv2, int L, int M, int N) {
//Determing the block's indices
int blockdim_i= (int)(blockDim.x);
int i0 = (int)(blockIdx.x)*(blockdim_i);
int i = max (i0, 0) + (int)(threadIdx.x);
int blockdim_j= (int)(blockDim.y);
int j0 = (int)(blockIdx.y)*(blockdim_j);
int j = max (j0, 0) + (int)(threadIdx.y);
int blockdim_k= (int)(blockDim.z);
int k0 = (int)(blockIdx.z)*(4*blockdim_k);
int k = max (k0, 0) + (int)(4*threadIdx.z);
double (*flux_0)[308][308] = (double (*)[308][308])flux_in_0;
double (*flux_1)[308][308] = (double (*)[308][308])flux_in_1;
double (*flux_2)[308][308] = (double (*)[308][308])flux_in_2;
double (*flux_3)[308][308] = (double (*)[308][308])flux_in_3;
double (*cons_1)[308][308] = (double (*)[308][308])cons_in_1;
double (*cons_2)[308][308] = (double (*)[308][308])cons_in_2;
double (*cons_3)[308][308] = (double (*)[308][308])cons_in_3;
double (*q_1)[308][308] = (double (*)[308][308])q_in_1;
double (*q_2)[308][308] = (double (*)[308][308])q_in_2;
double (*q_3)[308][308] = (double (*)[308][308])q_in_3;
double (*q_4)[308][308] = (double (*)[308][308])q_in_4;
double flux0_a, flux1_a, flux2_a, flux3_a;
double flux0_b, flux1_b, flux2_b, flux3_b;
double flux0_c, flux1_c, flux2_c, flux3_c;
double flux0_d, flux1_d, flux2_d, flux3_d;
if (i>=4 & j>=4 & k>=4 & i<=N-5 & j<=N-5 & k<=N-5) {
flux0_a = flux_0[k][j][i];
double flux_0kc0jc0ic0 = flux0_a;
double _t_1_ = cons_3[k+1][j][i];
_t_1_ -= cons_3[k-1][j][i];
double _t_0_ = 0.8 * _t_1_;
double _t_2_ = cons_3[k+2][j][i];
_t_2_ -= cons_3[k-2][j][i];
_t_0_ -= 0.2 * _t_2_;
double _t_3_ = cons_3[k+3][j][i];
_t_3_ -= cons_3[k-3][j][i];
_t_0_ += 0.038 * _t_3_;
double _t_4_ = cons_3[k+4][j][i];
_t_4_ -= cons_3[k-4][j][i];
_t_0_ -= 0.0035 * _t_4_;
flux_0kc0jc0ic0 -= _t_0_ * dxinv2;
flux0_b = flux_0[k+1][j][i];
double flux_0kp1jc0ic0 = flux0_b;
double _t_7_ = cons_3[k+3][j][i];
_t_7_ -= cons_3[k-1][j][i];
double _t_5_ = -(0.2 * _t_7_);
double _t_8_ = cons_3[k+4][j][i];
_t_8_ -= cons_3[k-2][j][i];
_t_5_ += 0.038 * _t_8_;
double _t_6_ = cons_3[k+2][j][i];
_t_6_ -= cons_3[k][j][i];
_t_5_ += 0.8 * _t_6_;
double _t_9_ = -(cons_3[k-3][j][i]);
_t_9_ += cons_3[k+5][j][i];
_t_5_ -= 0.0035 * _t_9_;
flux_0kp1jc0ic0 -= _t_5_ * dxinv2;
flux0_c = flux_0[k+2][j][i];
double flux_0kp2jc0ic0 = flux0_c;
double _t_11_ = cons_3[k+3][j][i];
_t_11_ -= cons_3[k+1][j][i];
double _t_10_ = 0.8 * _t_11_;
double _t_12_ = cons_3[k+4][j][i];
_t_12_ -= cons_3[k][j][i];
_t_10_ -= 0.2 * _t_12_;
double _t_13_ = cons_3[k+5][j][i];
_t_13_ -= cons_3[k-1][j][i];
_t_10_ += 0.038 * _t_13_;
double _t_14_ = -(cons_3[k-2][j][i]);
_t_14_ += cons_3[k+6][j][i];
_t_10_ -= 0.0035 * _t_14_;
flux_0kp2jc0ic0 -= _t_10_ * dxinv2;
flux0_d = flux_0[k+3][j][i];
double flux_0kp3jc0ic0 = flux0_d;
double _t_16_ = cons_3[k+4][j][i];
_t_16_ -= cons_3[k+2][j][i];
double _t_15_ = 0.8 * _t_16_;
double _t_17_ = cons_3[k+5][j][i];
_t_17_ -= cons_3[k+1][j][i];
_t_15_ -= 0.2 * _t_17_;
double _t_18_ = cons_3[k+6][j][i];
_t_18_ -= cons_3[k][j][i];
_t_15_ += 0.038 * _t_18_;
double _t_19_ = -(cons_3[k-1][j][i]);
_t_19_ += cons_3[k+7][j][i];
_t_15_ -= 0.0035 * _t_19_;
flux_0kp3jc0ic0 -= _t_15_ * dxinv2;
flux1_a = flux_1[k][j][i];
double flux_1kc0jc0ic0 = flux1_a;
double _t_24_ = -(cons_1[k-4][j][i] * q_3[k-4][j][i]);
_t_24_ += cons_1[k+4][j][i] * q_3[k+4][j][i];
double _t_20_ = -(0.0035 * _t_24_);
double _t_21_ = cons_1[k+1][j][i] * q_3[k+1][j][i];
_t_21_ -= cons_1[k-1][j][i] * q_3[k-1][j][i];
_t_20_ += 0.8 * _t_21_;
double _t_22_ = cons_1[k+2][j][i] * q_3[k+2][j][i];
_t_22_ -= cons_1[k-2][j][i] * q_3[k-2][j][i];
_t_20_ -= 0.2 * _t_22_;
double _t_23_ = cons_1[k+3][j][i] * q_3[k+3][j][i];
_t_23_ -= cons_1[k-3][j][i] * q_3[k-3][j][i];
_t_20_ += 0.038 * _t_23_;
flux_1kc0jc0ic0 -= _t_20_ * dxinv2;
flux1_b = flux_1[k+1][j][i];
double flux_1kp1jc0ic0 = flux1_b;
double _v_15_ = cons_1[k+3][j][i] * q_3[k+3][j][i];
double _v_16_ = cons_1[k-1][j][i] * q_3[k-1][j][i];
double _v_17_ = cons_1[k+4][j][i] * q_3[k+4][j][i];
double _v_18_ = cons_1[k-2][j][i] * q_3[k-2][j][i];
double _v_20_ = cons_1[k-3][j][i] * q_3[k-3][j][i];
double _v_13_ = cons_1[k+2][j][i] * q_3[k+2][j][i];
double _v_23_ = cons_1[k+1][j][i] * q_3[k+1][j][i];
double _v_47_ = cons_2[k-4][j][i] * q_3[k-4][j][i];
double _v_83_ = cons_3[k-4][j][i] * q_3[k-4][j][i];
double _v_82_ = cons_3[k+4][j][i] * q_3[k+4][j][i];
double _v_76_ = cons_3[k+1][j][i] * q_3[k+1][j][i];
double _v_77_ = cons_3[k-1][j][i] * q_3[k-1][j][i];
double _v_78_ = cons_3[k+2][j][i] * q_3[k+2][j][i];
double _v_79_ = cons_3[k-2][j][i] * q_3[k-2][j][i];
double _v_80_ = cons_3[k+3][j][i] * q_3[k+3][j][i];
double _v_81_ = cons_3[k-3][j][i] * q_3[k-3][j][i];
double _t_27_ = _v_15_;
_t_27_ -= _v_16_;
double _t_25_ = -(0.2 * _t_27_);
double _t_28_ = _v_17_;
_t_28_ -= _v_18_;
_t_25_ += 0.038 * _t_28_;
double _t_29_ = -(_v_20_);
_t_29_ += cons_1[k+5][j][i] * q_3[k+5][j][i];
_t_25_ -= 0.0035 * _t_29_;
double _t_26_ = _v_13_;
_t_26_ -= cons_1[k][j][i] * q_3[k][j][i];
_t_25_ += 0.8 * _t_26_;
flux_1kp1jc0ic0 -= _t_25_ * dxinv2;
flux1_c = flux_1[k+2][j][i];
double flux_1kp2jc0ic0 = flux1_c;
double _v_25_ = cons_1[k][j][i] * q_3[k][j][i];
double _v_26_ = cons_1[k+5][j][i] * q_3[k+5][j][i];
double _v_91_ = cons_3[k+5][j][i] * q_3[k+5][j][i];
double _v_86_ = cons_3[k][j][i] * q_3[k][j][i];
double _t_31_ = _v_15_;
_t_31_ -= _v_23_;
double _t_30_ = 0.8 * _t_31_;
double _t_32_ = _v_17_;
_t_32_ -= _v_25_;
_t_30_ -= 0.2 * _t_32_;
double _t_33_ = _v_26_;
_t_33_ -= _v_16_;
_t_30_ += 0.038 * _t_33_;
double _t_34_ = -(_v_18_);
_t_34_ += cons_1[k+6][j][i] * q_3[k+6][j][i];
_t_30_ -= 0.0035 * _t_34_;
flux_1kp2jc0ic0 -= _t_30_ * dxinv2;
flux1_d = flux_1[k+3][j][i];
double flux_1kp3jc0ic0 = flux1_d;
double _v_35_ = cons_1[k+6][j][i] * q_3[k+6][j][i];
double _v_100_ = cons_3[k+6][j][i] * q_3[k+6][j][i];
double _t_36_ = _v_17_;
_t_36_ -= _v_13_;
double _t_35_ = 0.8 * _t_36_;
double _t_37_ = _v_26_;
_t_37_ -= _v_23_;
_t_35_ -= 0.2 * _t_37_;
double _t_38_ = _v_35_;
_t_38_ -= _v_25_;
_t_35_ += 0.038 * _t_38_;
double _t_39_ = -(_v_16_);
_t_39_ += cons_1[k+7][j][i] * q_3[k+7][j][i];
_t_35_ -= 0.0035 * _t_39_;
flux_1kp3jc0ic0 -= _t_35_ * dxinv2;
flux2_a = flux_2[k][j][i];
double flux_2kc0jc0ic0 = flux2_a;
double _v_73_ = cons_2[k+7][j][i] * q_3[k+7][j][i];
double _v_109_ = cons_3[k+7][j][i] * q_3[k+7][j][i];
double _t_44_ = -(_v_47_);
double _v_46_ = cons_2[k+4][j][i] * q_3[k+4][j][i];
_t_44_ += _v_46_;
double _t_40_ = -(0.0035 * _t_44_);
double _v_40_ = cons_2[k+1][j][i] * q_3[k+1][j][i];
double _t_41_ = _v_40_;
double _v_41_ = cons_2[k-1][j][i] * q_3[k-1][j][i];
_t_41_ -= _v_41_;
_t_40_ += 0.8 * _t_41_;
double _v_42_ = cons_2[k+2][j][i] * q_3[k+2][j][i];
double _t_42_ = _v_42_;
double _v_43_ = cons_2[k-2][j][i] * q_3[k-2][j][i];
_t_42_ -= _v_43_;
_t_40_ -= 0.2 * _t_42_;
double _v_44_ = cons_2[k+3][j][i] * q_3[k+3][j][i];
double _t_43_ = _v_44_;
double _v_45_ = cons_2[k-3][j][i] * q_3[k-3][j][i];
_t_43_ -= _v_45_;
_t_40_ += 0.038 * _t_43_;
flux_2kc0jc0ic0 -= _t_40_ * dxinv2;
flux2_b = flux_2[k+1][j][i];
double flux_2kp1jc0ic0 = flux2_b;
double _t_47_ = _v_44_;
_t_47_ -= _v_41_;
double _t_45_ = -(0.2 * _t_47_);
double _t_48_ = _v_46_;
_t_48_ -= _v_43_;
_t_45_ += 0.038 * _t_48_;
double _t_49_ = -(_v_45_);
double _v_55_ = cons_2[k+5][j][i] * q_3[k+5][j][i];
_t_49_ += _v_55_;
_t_45_ -= 0.0035 * _t_49_;
double _t_46_ = _v_42_;
double _v_50_ = cons_2[k][j][i] * q_3[k][j][i];
_t_46_ -= _v_50_;
_t_45_ += 0.8 * _t_46_;
flux_2kp1jc0ic0 -= _t_45_ * dxinv2;
flux2_c = flux_2[k+2][j][i];
double flux_2kp2jc0ic0 = flux2_c;
double _t_51_ = _v_44_;
_t_51_ -= _v_40_;
double _t_50_ = 0.8 * _t_51_;
double _t_52_ = _v_46_;
_t_52_ -= _v_50_;
_t_50_ -= 0.2 * _t_52_;
double _t_53_ = _v_55_;
_t_53_ -= _v_41_;
_t_50_ += 0.038 * _t_53_;
double _t_54_ = -(_v_43_);
double _v_64_ = cons_2[k+6][j][i] * q_3[k+6][j][i];
_t_54_ += _v_64_;
_t_50_ -= 0.0035 * _t_54_;
flux_2kp2jc0ic0 -= _t_50_ * dxinv2;
flux2_d = flux_2[k+3][j][i];
double flux_2kp3jc0ic0 = flux2_d;
double _t_56_ = _v_46_;
_t_56_ -= _v_42_;
double _t_55_ = 0.8 * _t_56_;
double _t_57_ = _v_55_;
_t_57_ -= _v_40_;
_t_55_ -= 0.2 * _t_57_;
double _t_58_ = _v_64_;
_t_58_ -= _v_50_;
_t_55_ += 0.038 * _t_58_;
double _t_59_ = -(_v_41_);
_t_59_ += _v_73_;
_t_55_ -= 0.0035 * _t_59_;
flux_2kp3jc0ic0 -= _t_55_ * dxinv2;
flux3_a = flux_3[k][j][i];
double flux_3kc0jc0ic0 = flux3_a;
double _t_64_ = -(_v_83_);
_t_64_ += _v_82_;
_t_64_ -= q_4[k-4][j][i];
_t_64_ += q_4[k+4][j][i];
double _t_60_ = -(0.0035 * _t_64_);
double _t_61_ = _v_76_;
_t_61_ -= _v_77_;
_t_61_ += q_4[k+1][j][i];
_t_61_ -= q_4[k-1][j][i];
_t_60_ += 0.8 * _t_61_;
double _t_62_ = _v_78_;
_t_62_ -= _v_79_;
_t_62_ += q_4[k+2][j][i];
_t_62_ -= q_4[k-2][j][i];
_t_60_ -= 0.2 * _t_62_;
double _t_63_ = _v_80_;
_t_63_ -= _v_81_;
_t_63_ += q_4[k+3][j][i];
_t_63_ -= q_4[k-3][j][i];
_t_60_ += 0.038 * _t_63_;
flux_3kc0jc0ic0 -= _t_60_ * dxinv2;
flux3_b = flux_3[k+1][j][i];
double flux_3kp1jc0ic0 = flux3_b;
double _t_69_ = -(q_4[k-3][j][i]);
_t_69_ -= _v_81_;
_t_69_ += _v_91_;
_t_69_ += q_4[k+5][j][i];
double _t_65_ = -(0.0035 * _t_69_);
double _t_67_ = _v_80_;
_t_67_ -= _v_77_;
_t_67_ += q_4[k+3][j][i];
_t_67_ -= q_4[k-1][j][i];
_t_65_ -= 0.2 * _t_67_;
double _t_68_ = _v_82_;
_t_68_ -= _v_79_;
_t_68_ += q_4[k+4][j][i];
_t_68_ -= q_4[k-2][j][i];
_t_65_ += 0.038 * _t_68_;
double _t_66_ = _v_78_;
_t_66_ -= _v_86_;
_t_66_ += q_4[k+2][j][i];
_t_66_ -= q_4[k][j][i];
_t_65_ += 0.8 * _t_66_;
flux_3kp1jc0ic0 -= _t_65_ * dxinv2;
flux3_c = flux_3[k+2][j][i];
double flux_3kp2jc0ic0 = flux3_c;
double _t_71_ = q_4[k+3][j][i];
_t_71_ -= q_4[k+1][j][i];
double _t_74_ = -(q_4[k-2][j][i]);
double _t_72_ = q_4[k+4][j][i];
_t_72_ -= q_4[k][j][i];
double _t_73_ = q_4[k+5][j][i];
_t_73_ -= q_4[k-1][j][i];
double _t_76_ = q_4[k+4][j][i];
_t_76_ -= q_4[k+2][j][i];
double _t_77_ = q_4[k+5][j][i];
_t_77_ -= q_4[k+1][j][i];
double _t_78_ = -(q_4[k][j][i]);
double _t_79_ = -(q_4[k-1][j][i]);
_t_71_ += _v_80_;
_t_71_ -= _v_76_;
double _t_70_ = 0.8 * _t_71_;
_t_74_ -= _v_79_;
_t_74_ += _v_100_;
_t_74_ += q_4[k+6][j][i];
_t_78_ += q_4[k+6][j][i];
_t_70_ -= 0.0035 * _t_74_;
_t_72_ += _v_82_;
_t_72_ -= _v_86_;
_t_70_ -= 0.2 * _t_72_;
_t_73_ += _v_91_;
_t_73_ -= _v_77_;
_t_70_ += 0.038 * _t_73_;
flux_3kp2jc0ic0 -= _t_70_ * dxinv2;
flux3_d = flux_3[k+3][j][i];
double flux_3kp3jc0ic0 = flux3_d;
_t_76_ += _v_82_;
_t_76_ -= _v_78_;
double _t_75_ = 0.8 * _t_76_;
_t_77_ += _v_91_;
_t_77_ -= _v_76_;
_t_75_ -= 0.2 * _t_77_;
_t_78_ += _v_100_;
_t_78_ -= _v_86_;
_t_75_ += 0.038 * _t_78_;
_t_79_ += _v_109_;
_t_79_ -= _v_77_;
_t_79_ += q_4[k+7][j][i];
_t_75_ -= 0.0035 * _t_79_;
flux_3kp3jc0ic0 -= _t_75_ * dxinv2;
flux_0[k][j][i] = flux_0kc0jc0ic0;
flux_0[k+1][j][i] = flux_0kp1jc0ic0;
flux_0[k+2][j][i] = flux_0kp2jc0ic0;
flux_0[k+3][j][i] = flux_0kp3jc0ic0;
flux_1[k][j][i] = flux_1kc0jc0ic0;
flux_1[k+1][j][i] = flux_1kp1jc0ic0;
flux_1[k+2][j][i] = flux_1kp2jc0ic0;
flux_1[k+3][j][i] = flux_1kp3jc0ic0;
flux_2[k][j][i] = flux_2kc0jc0ic0;
flux_2[k+1][j][i] = flux_2kp1jc0ic0;
flux_2[k+2][j][i] = flux_2kp2jc0ic0;
flux_2[k+3][j][i] = flux_2kp3jc0ic0;
flux_3[k][j][i] = flux_3kc0jc0ic0;
flux_3[k+1][j][i] = flux_3kp1jc0ic0;
flux_3[k+2][j][i] = flux_3kp2jc0ic0;
flux_3[k+3][j][i] = flux_3kp3jc0ic0;
}
}
__global__ void hypterm_2 (double * __restrict__ flux_in_4, double * __restrict__ cons_in_4, double * __restrict__ q_in_1, double * __restrict__ q_in_2, double * __restrict__ q_in_3, double * __restrict__ q_in_4, double dxinv0, double dxinv1, double dxinv2, int L, int M, int N) {
//Determing the block's indices
int blockdim_i= (int)(blockDim.x);
int i0 = (int)(blockIdx.x)*(blockdim_i);
int i = max (i0, 0) + (int)(threadIdx.x);
int blockdim_j= (int)(blockDim.y);
int j0 = (int)(blockIdx.y)*(blockdim_j);
int j = max (j0, 0) + (int)(threadIdx.y);
int blockdim_k= (int)(blockDim.z);
int k0 = (int)(blockIdx.z)*(2*blockdim_k);
int k = max (k0, 0) + (int)(2*threadIdx.z);
double (*flux_4)[308][308] = (double (*)[308][308])flux_in_4;
double (*q_1)[308][308] = (double (*)[308][308])q_in_1;
double (*q_2)[308][308] = (double (*)[308][308])q_in_2;
double (*q_3)[308][308] = (double (*)[308][308])q_in_3;
double (*q_4)[308][308] = (double (*)[308][308])q_in_4;
double (*cons_4)[308][308] = (double (*)[308][308])cons_in_4;
if (i>=4 & j>=4 & k>=4 & i<=N-5 & j<=N-5 & k<=N-5) {
flux_4[k][j][i] = ((0.8*(cons_4[k][j][i+1]*q_1[k][j][i+1]-cons_4[k][j][i-1]*q_1[k][j][i-1]+(q_4[k][j][i+1]*q_1[k][j][i+1]-q_4[k][j][i-1]*q_1[k][j][i-1]))-0.2*(cons_4[k][j][i+2]*q_1[k][j][i+2]-cons_4[k][j][i-2]*q_1[k][j][i-2]+(q_4[k][j][i+2]*q_1[k][j][i+2]-q_4[k][j][i-2]*q_1[k][j][i-2]))+0.038*(cons_4[k][j][i+3]*q_1[k][j][i+3]-cons_4[k][j][i-3]*q_1[k][j][i-3]+(q_4[k][j][i+3]*q_1[k][j][i+3]-q_4[k][j][i-3]*q_1[k][j][i-3]))-0.0035*(cons_4[k][j][i+4]*q_1[k][j][i+4]-cons_4[k][j][i-4]*q_1[k][j][i-4]+(q_4[k][j][i+4]*q_1[k][j][i+4]-q_4[k][j][i-4]*q_1[k][j][i-4])))*dxinv0);
flux_4[k+1][j][i] = ((0.8*(cons_4[k+1][j][i+1]*q_1[k+1][j][i+1]-cons_4[k+1][j][i-1]*q_1[k+1][j][i-1]+(q_4[k+1][j][i+1]*q_1[k+1][j][i+1]-q_4[k+1][j][i-1]*q_1[k+1][j][i-1]))-0.2*(cons_4[k+1][j][i+2]*q_1[k+1][j][i+2]-cons_4[k+1][j][i-2]*q_1[k+1][j][i-2]+(q_4[k+1][j][i+2]*q_1[k+1][j][i+2]-q_4[k+1][j][i-2]*q_1[k+1][j][i-2]))+0.038*(cons_4[k+1][j][i+3]*q_1[k+1][j][i+3]-cons_4[k+1][j][i-3]*q_1[k+1][j][i-3]+(q_4[k+1][j][i+3]*q_1[k+1][j][i+3]-q_4[k+1][j][i-3]*q_1[k+1][j][i-3]))-0.0035*(cons_4[k+1][j][i+4]*q_1[k+1][j][i+4]-cons_4[k+1][j][i-4]*q_1[k+1][j][i-4]+(q_4[k+1][j][i+4]*q_1[k+1][j][i+4]-q_4[k+1][j][i-4]*q_1[k+1][j][i-4])))*dxinv0);
flux_4[k][j][i] -= (0.8*(cons_4[k][j+1][i]*q_2[k][j+1][i]-cons_4[k][j-1][i]*q_2[k][j-1][i]+(q_4[k][j+1][i]*q_2[k][j+1][i]-q_4[k][j-1][i]*q_2[k][j-1][i]))-0.2*(cons_4[k][j+2][i]*q_2[k][j+2][i]-cons_4[k][j-2][i]*q_2[k][j-2][i]+(q_4[k][j+2][i]*q_2[k][j+2][i]-q_4[k][j-2][i]*q_2[k][j-2][i]))+0.038*(cons_4[k][j+3][i]*q_2[k][j+3][i]-cons_4[k][j-3][i]*q_2[k][j-3][i]+(q_4[k][j+3][i]*q_2[k][j+3][i]-q_4[k][j-3][i]*q_2[k][j-3][i]))-0.0035*(cons_4[k][j+4][i]*q_2[k][j+4][i]-cons_4[k][j-4][i]*q_2[k][j-4][i]+(q_4[k][j+4][i]*q_2[k][j+4][i]-q_4[k][j-4][i]*q_2[k][j-4][i])))*dxinv1;
flux_4[k+1][j][i] -= (0.8*(cons_4[k+1][j+1][i]*q_2[k+1][j+1][i]-cons_4[k+1][j-1][i]*q_2[k+1][j-1][i]+(q_4[k+1][j+1][i]*q_2[k+1][j+1][i]-q_4[k+1][j-1][i]*q_2[k+1][j-1][i]))-0.2*(cons_4[k+1][j+2][i]*q_2[k+1][j+2][i]-cons_4[k+1][j-2][i]*q_2[k+1][j-2][i]+(q_4[k+1][j+2][i]*q_2[k+1][j+2][i]-q_4[k+1][j-2][i]*q_2[k+1][j-2][i]))+0.038*(cons_4[k+1][j+3][i]*q_2[k+1][j+3][i]-cons_4[k+1][j-3][i]*q_2[k+1][j-3][i]+(q_4[k+1][j+3][i]*q_2[k+1][j+3][i]-q_4[k+1][j-3][i]*q_2[k+1][j-3][i]))-0.0035*(cons_4[k+1][j+4][i]*q_2[k+1][j+4][i]-cons_4[k+1][j-4][i]*q_2[k+1][j-4][i]+(q_4[k+1][j+4][i]*q_2[k+1][j+4][i]-q_4[k+1][j-4][i]*q_2[k+1][j-4][i])))*dxinv1;
flux_4[k][j][i] -= (0.8*(cons_4[k+1][j][i]*q_3[k+1][j][i]-cons_4[k-1][j][i]*q_3[k-1][j][i]+(q_4[k+1][j][i]*q_3[k+1][j][i]-q_4[k-1][j][i]*q_3[k-1][j][i]))-0.2*(cons_4[k+2][j][i]*q_3[k+2][j][i]-cons_4[k-2][j][i]*q_3[k-2][j][i]+(q_4[k+2][j][i]*q_3[k+2][j][i]-q_4[k-2][j][i]*q_3[k-2][j][i]))+0.038*(cons_4[k+3][j][i]*q_3[k+3][j][i]-cons_4[k-3][j][i]*q_3[k-3][j][i]+(q_4[k+3][j][i]*q_3[k+3][j][i]-q_4[k-3][j][i]*q_3[k-3][j][i]))-0.0035*(cons_4[k+4][j][i]*q_3[k+4][j][i]-cons_4[k-4][j][i]*q_3[k-4][j][i]+(q_4[k+4][j][i]*q_3[k+4][j][i]-q_4[k-4][j][i]*q_3[k-4][j][i])))*dxinv2;
flux_4[k+1][j][i] -= (0.8*(cons_4[k+1+1][j][i]*q_3[k+1+1][j][i]-cons_4[k+1-1][j][i]*q_3[k+1-1][j][i]+(q_4[k+1+1][j][i]*q_3[k+1+1][j][i]-q_4[k+1-1][j][i]*q_3[k+1-1][j][i]))-0.2*(cons_4[k+1+2][j][i]*q_3[k+1+2][j][i]-cons_4[k+1-2][j][i]*q_3[k+1-2][j][i]+(q_4[k+1+2][j][i]*q_3[k+1+2][j][i]-q_4[k+1-2][j][i]*q_3[k+1-2][j][i]))+0.038*(cons_4[k+1+3][j][i]*q_3[k+1+3][j][i]-cons_4[k+1-3][j][i]*q_3[k+1-3][j][i]+(q_4[k+1+3][j][i]*q_3[k+1+3][j][i]-q_4[k+1-3][j][i]*q_3[k+1-3][j][i]))-0.0035*(cons_4[k+1+4][j][i]*q_3[k+1+4][j][i]-cons_4[k+1-4][j][i]*q_3[k+1-4][j][i]+(q_4[k+1+4][j][i]*q_3[k+1+4][j][i]-q_4[k+1-4][j][i]*q_3[k+1-4][j][i])))*dxinv2;
}
}
extern "C" void host_code (double *h_flux_0, double *h_flux_1, double *h_flux_2, double *h_flux_3, double *h_flux_4, double *h_cons_1, double *h_cons_2, double *h_cons_3, double *h_cons_4, double *h_q_1, double *h_q_2, double *h_q_3, double *h_q_4, double dxinv0, double dxinv1, double dxinv2, int L, int M, int N) {
double *flux_0;
hipMalloc (&flux_0, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for flux_0\n");
hipMemcpy (flux_0, h_flux_0, sizeof(double)*L*M*N, hipMemcpyHostToDevice);
double *flux_1;
hipMalloc (&flux_1, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for flux_1\n");
hipMemcpy (flux_1, h_flux_1, sizeof(double)*L*M*N, hipMemcpyHostToDevice);
double *flux_2;
hipMalloc (&flux_2, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for flux_2\n");
hipMemcpy (flux_2, h_flux_2, sizeof(double)*L*M*N, hipMemcpyHostToDevice);
double *flux_3;
hipMalloc (&flux_3, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for flux_3\n");
hipMemcpy (flux_3, h_flux_3, sizeof(double)*L*M*N, hipMemcpyHostToDevice);
double *flux_4;
hipMalloc (&flux_4, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for flux_4\n");
hipMemcpy (flux_4, h_flux_4, sizeof(double)*L*M*N, hipMemcpyHostToDevice);
double *cons_1;
hipMalloc (&cons_1, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for cons_1\n");
hipMemcpy (cons_1, h_cons_1, sizeof(double)*L*M*N, hipMemcpyHostToDevice);
double *cons_2;
hipMalloc (&cons_2, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for cons_2\n");
hipMemcpy (cons_2, h_cons_2, sizeof(double)*L*M*N, hipMemcpyHostToDevice);
double *cons_3;
hipMalloc (&cons_3, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for cons_3\n");
hipMemcpy (cons_3, h_cons_3, sizeof(double)*L*M*N, hipMemcpyHostToDevice);
double *cons_4;
hipMalloc (&cons_4, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for cons_4\n");
hipMemcpy (cons_4, h_cons_4, sizeof(double)*L*M*N, hipMemcpyHostToDevice);
double *q_1;
hipMalloc (&q_1, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for q_1\n");
hipMemcpy (q_1, h_q_1, sizeof(double)*L*M*N, hipMemcpyHostToDevice);
double *q_2;
hipMalloc (&q_2, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for q_2\n");
hipMemcpy (q_2, h_q_2, sizeof(double)*L*M*N, hipMemcpyHostToDevice);
double *q_3;
hipMalloc (&q_3, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for q_3\n");
hipMemcpy (q_3, h_q_3, sizeof(double)*L*M*N, hipMemcpyHostToDevice);
double *q_4;
hipMalloc (&q_4, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for q_4\n");
hipMemcpy (q_4, h_q_4, sizeof(double)*L*M*N, hipMemcpyHostToDevice);
dim3 blockconfig (16, 4, 4);
dim3 gridconfig_0 (ceil(N, blockconfig.x), ceil(M, blockconfig.y), ceil(L, blockconfig.z));
hipLaunchKernelGGL(( hypterm_0) , dim3(gridconfig_0), dim3(blockconfig), 0, 0, flux_0, flux_1, flux_2, flux_3, cons_1, cons_2, cons_3, q_1, q_2, q_3, q_4, -dxinv0, dxinv1, dxinv2, L, M, N);
dim3 gridconfig_1 (ceil(N, blockconfig.x), ceil(M, blockconfig.y), ceil(L, 4*blockconfig.z));
hipLaunchKernelGGL(( hypterm_1) , dim3(gridconfig_1), dim3(blockconfig), 0, 0, flux_0, flux_1, flux_2, flux_3, cons_1, cons_2, cons_3, q_1, q_2, q_3, q_4, -dxinv0, dxinv1, dxinv2, L, M, N);
dim3 gridconfig_2 (ceil(N, blockconfig.x), ceil(M, blockconfig.y), ceil(L, 2*blockconfig.z));
hipLaunchKernelGGL(( hypterm_2) , dim3(gridconfig_2), dim3(blockconfig), 0, 0, flux_4, cons_4, q_1, q_2, q_3, q_4, -dxinv0, dxinv1, dxinv2, L, M, N);
hipMemcpy (h_flux_0, flux_0, sizeof(double)*L*M*N, hipMemcpyDeviceToHost);
hipMemcpy (h_flux_1, flux_1, sizeof(double)*L*M*N, hipMemcpyDeviceToHost);
hipMemcpy (h_flux_3, flux_3, sizeof(double)*L*M*N, hipMemcpyDeviceToHost);
hipMemcpy (h_flux_4, flux_4, sizeof(double)*L*M*N, hipMemcpyDeviceToHost);
hipMemcpy (h_flux_2, flux_2, sizeof(double)*L*M*N, hipMemcpyDeviceToHost);
}
| b412f1a5af531c99f40445b2b4c4c44c39c53bfe.cu | #include <stdio.h>
#include "cuda.h"
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
#define ceil(a,b) ((a) % (b) == 0 ? (a) / (b) : ((a) / (b)) + 1)
void check_error (const char* message) {
cudaError_t error = cudaGetLastError ();
if (error != cudaSuccess) {
printf ("CUDA error : %s, %s\n", message, cudaGetErrorString (error));
exit(-1);
}
}
__global__ void hypterm_0 (double * __restrict__ flux_in_0, double * __restrict__ flux_in_1, double * __restrict__ flux_in_2, double * __restrict__ flux_in_3, double * __restrict__ cons_in_1, double * __restrict__ cons_in_2, double * __restrict__ cons_in_3, double * __restrict__ q_in_1, double * __restrict__ q_in_2, double * __restrict__ q_in_3, double * __restrict__ q_in_4, double dxinv0, double dxinv1, double dxinv2, int L, int M, int N) {
//Determing the block's indices
int blockdim_i= (int)(blockDim.x);
int i0 = (int)(blockIdx.x)*(blockdim_i);
int i = max (i0, 0) + (int)(threadIdx.x);
int blockdim_j= (int)(blockDim.y);
int j0 = (int)(blockIdx.y)*(blockdim_j);
int j = max (j0, 0) + (int)(threadIdx.y);
int blockdim_k= (int)(blockDim.z);
int k0 = (int)(blockIdx.z)*(blockdim_k);
int k = max (k0, 0) + (int)(threadIdx.z);
double (*flux_0)[308][308] = (double (*)[308][308])flux_in_0;
double (*flux_1)[308][308] = (double (*)[308][308])flux_in_1;
double (*flux_2)[308][308] = (double (*)[308][308])flux_in_2;
double (*flux_3)[308][308] = (double (*)[308][308])flux_in_3;
double (*cons_1)[308][308] = (double (*)[308][308])cons_in_1;
double (*cons_2)[308][308] = (double (*)[308][308])cons_in_2;
double (*cons_3)[308][308] = (double (*)[308][308])cons_in_3;
double (*q_1)[308][308] = (double (*)[308][308])q_in_1;
double (*q_2)[308][308] = (double (*)[308][308])q_in_2;
double (*q_3)[308][308] = (double (*)[308][308])q_in_3;
double (*q_4)[308][308] = (double (*)[308][308])q_in_4;
if (i>=4 & j>=4 & k>=4 & i<=N-5 & j<=N-5 & k<=N-5) {
double _t_1_ = cons_1[k][j][i+1];
_t_1_ -= cons_1[k][j][i-1];
double _t_0_ = 0.8 * _t_1_;
double _t_2_ = cons_1[k][j][i+2];
_t_2_ -= cons_1[k][j][i-2];
_t_0_ -= 0.2 * _t_2_;
double _t_3_ = cons_1[k][j][i+3];
_t_3_ -= cons_1[k][j][i-3];
_t_0_ += 0.038 * _t_3_;
double _t_4_ = cons_1[k][j][i+4];
_t_4_ -= cons_1[k][j][i-4];
_t_0_ -= 0.0035 * _t_4_;
double flux_0kc0jc0ic0 = _t_0_ * dxinv0;
double _t_12_ = cons_1[k][j][i+1] * q_1[k][j][i+1];
_t_12_ -= cons_1[k][j][i-1] * q_1[k][j][i-1];
_t_12_ += q_4[k][j][i+1];
_t_12_ -= q_4[k][j][i-1];
double _t_11_ = 0.8 * _t_12_;
double _t_13_ = cons_1[k][j][i+2] * q_1[k][j][i+2];
_t_13_ -= cons_1[k][j][i-2] * q_1[k][j][i-2];
_t_13_ += q_4[k][j][i+2];
_t_13_ -= q_4[k][j][i-2];
_t_11_ -= 0.2 * _t_13_;
double _t_14_ = cons_1[k][j][i+3] * q_1[k][j][i+3];
_t_14_ -= cons_1[k][j][i-3] * q_1[k][j][i-3];
_t_14_ += q_4[k][j][i+3];
_t_14_ -= q_4[k][j][i-3];
_t_11_ += 0.038 * _t_14_;
double _t_15_ = cons_1[k][j][i+4] * q_1[k][j][i+4];
_t_15_ -= cons_1[k][j][i-4] * q_1[k][j][i-4];
_t_15_ += q_4[k][j][i+4];
_t_15_ -= q_4[k][j][i-4];
_t_11_ -= 0.0035 * _t_15_;
double flux_1kc0jc0ic0 = _t_11_ * dxinv0;
double _t_23_ = cons_2[k][j][i+1] * q_1[k][j][i+1];
_t_23_ -= cons_2[k][j][i-1] * q_1[k][j][i-1];
double _t_22_ = 0.8 * _t_23_;
double _t_24_ = cons_2[k][j][i+2] * q_1[k][j][i+2];
_t_24_ -= cons_2[k][j][i-2] * q_1[k][j][i-2];
_t_22_ -= 0.2 * _t_24_;
double _t_25_ = cons_2[k][j][i+3] * q_1[k][j][i+3];
_t_25_ -= cons_2[k][j][i-3] * q_1[k][j][i-3];
_t_22_ += 0.038 * _t_25_;
double _t_26_ = cons_2[k][j][i+4] * q_1[k][j][i+4];
_t_26_ -= cons_2[k][j][i-4] * q_1[k][j][i-4];
_t_22_ -= 0.0035 * _t_26_;
double flux_2kc0jc0ic0 = _t_22_ * dxinv0;
double _t_34_ = cons_3[k][j][i+1] * q_1[k][j][i+1];
_t_34_ -= cons_3[k][j][i-1] * q_1[k][j][i-1];
double _t_33_ = 0.8 * _t_34_;
double _t_35_ = cons_3[k][j][i+2] * q_1[k][j][i+2];
_t_35_ -= cons_3[k][j][i-2] * q_1[k][j][i-2];
_t_33_ -= 0.2 * _t_35_;
double _t_36_ = cons_3[k][j][i+3] * q_1[k][j][i+3];
_t_36_ -= cons_3[k][j][i-3] * q_1[k][j][i-3];
_t_33_ += 0.038 * _t_36_;
double _t_37_ = cons_3[k][j][i+4] * q_1[k][j][i+4];
_t_37_ -= cons_3[k][j][i-4] * q_1[k][j][i-4];
_t_33_ -= 0.0035 * _t_37_;
double flux_3kc0jc0ic0 = _t_33_ * dxinv0;
double _t_7_ = cons_2[k][j+1][i];
_t_7_ -= cons_2[k][j-1][i];
double _t_6_ = 0.8 * _t_7_;
double _t_8_ = cons_2[k][j+2][i];
_t_8_ -= cons_2[k][j-2][i];
_t_6_ -= 0.2 * _t_8_;
double _t_9_ = cons_2[k][j+3][i];
_t_9_ -= cons_2[k][j-3][i];
_t_6_ += 0.038 * _t_9_;
double _t_10_ = cons_2[k][j+4][i];
_t_10_ -= cons_2[k][j-4][i];
_t_6_ -= 0.0035 * _t_10_;
flux_0kc0jc0ic0 -= _t_6_ * dxinv1;
double _t_29_ = cons_2[k][j+1][i] * q_2[k][j+1][i];
_t_29_ -= cons_2[k][j-1][i] * q_2[k][j-1][i];
_t_29_ += q_4[k][j+1][i];
_t_29_ -= q_4[k][j-1][i];
double _t_28_ = 0.8 * _t_29_;
double _t_30_ = cons_2[k][j+2][i] * q_2[k][j+2][i];
_t_30_ -= cons_2[k][j-2][i] * q_2[k][j-2][i];
_t_30_ += q_4[k][j+2][i];
_t_30_ -= q_4[k][j-2][i];
_t_28_ -= 0.2 * _t_30_;
double _t_31_ = cons_2[k][j+3][i] * q_2[k][j+3][i];
_t_31_ -= cons_2[k][j-3][i] * q_2[k][j-3][i];
_t_31_ += q_4[k][j+3][i];
_t_31_ -= q_4[k][j-3][i];
_t_28_ += 0.038 * _t_31_;
double _t_32_ = cons_2[k][j+4][i] * q_2[k][j+4][i];
_t_32_ -= cons_2[k][j-4][i] * q_2[k][j-4][i];
_t_32_ += q_4[k][j+4][i];
_t_32_ -= q_4[k][j-4][i];
_t_28_ -= 0.0035 * _t_32_;
flux_2kc0jc0ic0 -= _t_28_ * dxinv1;
double _t_18_ = cons_1[k][j+1][i] * q_2[k][j+1][i];
_t_18_ -= cons_1[k][j-1][i] * q_2[k][j-1][i];
double _t_17_ = 0.8 * _t_18_;
double _t_19_ = cons_1[k][j+2][i] * q_2[k][j+2][i];
_t_19_ -= cons_1[k][j-2][i] * q_2[k][j-2][i];
_t_17_ -= 0.2 * _t_19_;
double _t_20_ = cons_1[k][j+3][i] * q_2[k][j+3][i];
_t_20_ -= cons_1[k][j-3][i] * q_2[k][j-3][i];
_t_17_ += 0.038 * _t_20_;
double _t_21_ = cons_1[k][j+4][i] * q_2[k][j+4][i];
_t_21_ -= cons_1[k][j-4][i] * q_2[k][j-4][i];
_t_17_ -= 0.0035 * _t_21_;
flux_1kc0jc0ic0 -= _t_17_ * dxinv1;
double _t_40_ = cons_3[k][j+1][i] * q_2[k][j+1][i];
_t_40_ -= cons_3[k][j-1][i] * q_2[k][j-1][i];
double _t_39_ = 0.8 * _t_40_;
double _t_41_ = cons_3[k][j+2][i] * q_2[k][j+2][i];
_t_41_ -= cons_3[k][j-2][i] * q_2[k][j-2][i];
_t_39_ -= 0.2 * _t_41_;
double _t_42_ = cons_3[k][j+3][i] * q_2[k][j+3][i];
_t_42_ -= cons_3[k][j-3][i] * q_2[k][j-3][i];
_t_39_ += 0.038 * _t_42_;
double _t_43_ = cons_3[k][j+4][i] * q_2[k][j+4][i];
_t_43_ -= cons_3[k][j-4][i] * q_2[k][j-4][i];
_t_39_ -= 0.0035 * _t_43_;
flux_3kc0jc0ic0 -= _t_39_ * dxinv1;
flux_0[k][j][i] = flux_0kc0jc0ic0;
flux_1[k][j][i] = flux_1kc0jc0ic0;
flux_2[k][j][i] = flux_2kc0jc0ic0;
flux_3[k][j][i] = flux_3kc0jc0ic0;
}
}
__global__ void hypterm_1 (double * __restrict__ flux_in_0, double * __restrict__ flux_in_1, double * __restrict__ flux_in_2, double * __restrict__ flux_in_3, double * __restrict__ cons_in_1, double * __restrict__ cons_in_2, double * __restrict__ cons_in_3, double * __restrict__ q_in_1, double * __restrict__ q_in_2, double * __restrict__ q_in_3, double * __restrict__ q_in_4, double dxinv0, double dxinv1, double dxinv2, int L, int M, int N) {
//Determing the block's indices
int blockdim_i= (int)(blockDim.x);
int i0 = (int)(blockIdx.x)*(blockdim_i);
int i = max (i0, 0) + (int)(threadIdx.x);
int blockdim_j= (int)(blockDim.y);
int j0 = (int)(blockIdx.y)*(blockdim_j);
int j = max (j0, 0) + (int)(threadIdx.y);
int blockdim_k= (int)(blockDim.z);
int k0 = (int)(blockIdx.z)*(4*blockdim_k);
int k = max (k0, 0) + (int)(4*threadIdx.z);
double (*flux_0)[308][308] = (double (*)[308][308])flux_in_0;
double (*flux_1)[308][308] = (double (*)[308][308])flux_in_1;
double (*flux_2)[308][308] = (double (*)[308][308])flux_in_2;
double (*flux_3)[308][308] = (double (*)[308][308])flux_in_3;
double (*cons_1)[308][308] = (double (*)[308][308])cons_in_1;
double (*cons_2)[308][308] = (double (*)[308][308])cons_in_2;
double (*cons_3)[308][308] = (double (*)[308][308])cons_in_3;
double (*q_1)[308][308] = (double (*)[308][308])q_in_1;
double (*q_2)[308][308] = (double (*)[308][308])q_in_2;
double (*q_3)[308][308] = (double (*)[308][308])q_in_3;
double (*q_4)[308][308] = (double (*)[308][308])q_in_4;
double flux0_a, flux1_a, flux2_a, flux3_a;
double flux0_b, flux1_b, flux2_b, flux3_b;
double flux0_c, flux1_c, flux2_c, flux3_c;
double flux0_d, flux1_d, flux2_d, flux3_d;
if (i>=4 & j>=4 & k>=4 & i<=N-5 & j<=N-5 & k<=N-5) {
flux0_a = flux_0[k][j][i];
double flux_0kc0jc0ic0 = flux0_a;
double _t_1_ = cons_3[k+1][j][i];
_t_1_ -= cons_3[k-1][j][i];
double _t_0_ = 0.8 * _t_1_;
double _t_2_ = cons_3[k+2][j][i];
_t_2_ -= cons_3[k-2][j][i];
_t_0_ -= 0.2 * _t_2_;
double _t_3_ = cons_3[k+3][j][i];
_t_3_ -= cons_3[k-3][j][i];
_t_0_ += 0.038 * _t_3_;
double _t_4_ = cons_3[k+4][j][i];
_t_4_ -= cons_3[k-4][j][i];
_t_0_ -= 0.0035 * _t_4_;
flux_0kc0jc0ic0 -= _t_0_ * dxinv2;
flux0_b = flux_0[k+1][j][i];
double flux_0kp1jc0ic0 = flux0_b;
double _t_7_ = cons_3[k+3][j][i];
_t_7_ -= cons_3[k-1][j][i];
double _t_5_ = -(0.2 * _t_7_);
double _t_8_ = cons_3[k+4][j][i];
_t_8_ -= cons_3[k-2][j][i];
_t_5_ += 0.038 * _t_8_;
double _t_6_ = cons_3[k+2][j][i];
_t_6_ -= cons_3[k][j][i];
_t_5_ += 0.8 * _t_6_;
double _t_9_ = -(cons_3[k-3][j][i]);
_t_9_ += cons_3[k+5][j][i];
_t_5_ -= 0.0035 * _t_9_;
flux_0kp1jc0ic0 -= _t_5_ * dxinv2;
flux0_c = flux_0[k+2][j][i];
double flux_0kp2jc0ic0 = flux0_c;
double _t_11_ = cons_3[k+3][j][i];
_t_11_ -= cons_3[k+1][j][i];
double _t_10_ = 0.8 * _t_11_;
double _t_12_ = cons_3[k+4][j][i];
_t_12_ -= cons_3[k][j][i];
_t_10_ -= 0.2 * _t_12_;
double _t_13_ = cons_3[k+5][j][i];
_t_13_ -= cons_3[k-1][j][i];
_t_10_ += 0.038 * _t_13_;
double _t_14_ = -(cons_3[k-2][j][i]);
_t_14_ += cons_3[k+6][j][i];
_t_10_ -= 0.0035 * _t_14_;
flux_0kp2jc0ic0 -= _t_10_ * dxinv2;
flux0_d = flux_0[k+3][j][i];
double flux_0kp3jc0ic0 = flux0_d;
double _t_16_ = cons_3[k+4][j][i];
_t_16_ -= cons_3[k+2][j][i];
double _t_15_ = 0.8 * _t_16_;
double _t_17_ = cons_3[k+5][j][i];
_t_17_ -= cons_3[k+1][j][i];
_t_15_ -= 0.2 * _t_17_;
double _t_18_ = cons_3[k+6][j][i];
_t_18_ -= cons_3[k][j][i];
_t_15_ += 0.038 * _t_18_;
double _t_19_ = -(cons_3[k-1][j][i]);
_t_19_ += cons_3[k+7][j][i];
_t_15_ -= 0.0035 * _t_19_;
flux_0kp3jc0ic0 -= _t_15_ * dxinv2;
flux1_a = flux_1[k][j][i];
double flux_1kc0jc0ic0 = flux1_a;
double _t_24_ = -(cons_1[k-4][j][i] * q_3[k-4][j][i]);
_t_24_ += cons_1[k+4][j][i] * q_3[k+4][j][i];
double _t_20_ = -(0.0035 * _t_24_);
double _t_21_ = cons_1[k+1][j][i] * q_3[k+1][j][i];
_t_21_ -= cons_1[k-1][j][i] * q_3[k-1][j][i];
_t_20_ += 0.8 * _t_21_;
double _t_22_ = cons_1[k+2][j][i] * q_3[k+2][j][i];
_t_22_ -= cons_1[k-2][j][i] * q_3[k-2][j][i];
_t_20_ -= 0.2 * _t_22_;
double _t_23_ = cons_1[k+3][j][i] * q_3[k+3][j][i];
_t_23_ -= cons_1[k-3][j][i] * q_3[k-3][j][i];
_t_20_ += 0.038 * _t_23_;
flux_1kc0jc0ic0 -= _t_20_ * dxinv2;
flux1_b = flux_1[k+1][j][i];
double flux_1kp1jc0ic0 = flux1_b;
double _v_15_ = cons_1[k+3][j][i] * q_3[k+3][j][i];
double _v_16_ = cons_1[k-1][j][i] * q_3[k-1][j][i];
double _v_17_ = cons_1[k+4][j][i] * q_3[k+4][j][i];
double _v_18_ = cons_1[k-2][j][i] * q_3[k-2][j][i];
double _v_20_ = cons_1[k-3][j][i] * q_3[k-3][j][i];
double _v_13_ = cons_1[k+2][j][i] * q_3[k+2][j][i];
double _v_23_ = cons_1[k+1][j][i] * q_3[k+1][j][i];
double _v_47_ = cons_2[k-4][j][i] * q_3[k-4][j][i];
double _v_83_ = cons_3[k-4][j][i] * q_3[k-4][j][i];
double _v_82_ = cons_3[k+4][j][i] * q_3[k+4][j][i];
double _v_76_ = cons_3[k+1][j][i] * q_3[k+1][j][i];
double _v_77_ = cons_3[k-1][j][i] * q_3[k-1][j][i];
double _v_78_ = cons_3[k+2][j][i] * q_3[k+2][j][i];
double _v_79_ = cons_3[k-2][j][i] * q_3[k-2][j][i];
double _v_80_ = cons_3[k+3][j][i] * q_3[k+3][j][i];
double _v_81_ = cons_3[k-3][j][i] * q_3[k-3][j][i];
double _t_27_ = _v_15_;
_t_27_ -= _v_16_;
double _t_25_ = -(0.2 * _t_27_);
double _t_28_ = _v_17_;
_t_28_ -= _v_18_;
_t_25_ += 0.038 * _t_28_;
double _t_29_ = -(_v_20_);
_t_29_ += cons_1[k+5][j][i] * q_3[k+5][j][i];
_t_25_ -= 0.0035 * _t_29_;
double _t_26_ = _v_13_;
_t_26_ -= cons_1[k][j][i] * q_3[k][j][i];
_t_25_ += 0.8 * _t_26_;
flux_1kp1jc0ic0 -= _t_25_ * dxinv2;
flux1_c = flux_1[k+2][j][i];
double flux_1kp2jc0ic0 = flux1_c;
double _v_25_ = cons_1[k][j][i] * q_3[k][j][i];
double _v_26_ = cons_1[k+5][j][i] * q_3[k+5][j][i];
double _v_91_ = cons_3[k+5][j][i] * q_3[k+5][j][i];
double _v_86_ = cons_3[k][j][i] * q_3[k][j][i];
double _t_31_ = _v_15_;
_t_31_ -= _v_23_;
double _t_30_ = 0.8 * _t_31_;
double _t_32_ = _v_17_;
_t_32_ -= _v_25_;
_t_30_ -= 0.2 * _t_32_;
double _t_33_ = _v_26_;
_t_33_ -= _v_16_;
_t_30_ += 0.038 * _t_33_;
double _t_34_ = -(_v_18_);
_t_34_ += cons_1[k+6][j][i] * q_3[k+6][j][i];
_t_30_ -= 0.0035 * _t_34_;
flux_1kp2jc0ic0 -= _t_30_ * dxinv2;
flux1_d = flux_1[k+3][j][i];
double flux_1kp3jc0ic0 = flux1_d;
double _v_35_ = cons_1[k+6][j][i] * q_3[k+6][j][i];
double _v_100_ = cons_3[k+6][j][i] * q_3[k+6][j][i];
double _t_36_ = _v_17_;
_t_36_ -= _v_13_;
double _t_35_ = 0.8 * _t_36_;
double _t_37_ = _v_26_;
_t_37_ -= _v_23_;
_t_35_ -= 0.2 * _t_37_;
double _t_38_ = _v_35_;
_t_38_ -= _v_25_;
_t_35_ += 0.038 * _t_38_;
double _t_39_ = -(_v_16_);
_t_39_ += cons_1[k+7][j][i] * q_3[k+7][j][i];
_t_35_ -= 0.0035 * _t_39_;
flux_1kp3jc0ic0 -= _t_35_ * dxinv2;
flux2_a = flux_2[k][j][i];
double flux_2kc0jc0ic0 = flux2_a;
double _v_73_ = cons_2[k+7][j][i] * q_3[k+7][j][i];
double _v_109_ = cons_3[k+7][j][i] * q_3[k+7][j][i];
double _t_44_ = -(_v_47_);
double _v_46_ = cons_2[k+4][j][i] * q_3[k+4][j][i];
_t_44_ += _v_46_;
double _t_40_ = -(0.0035 * _t_44_);
double _v_40_ = cons_2[k+1][j][i] * q_3[k+1][j][i];
double _t_41_ = _v_40_;
double _v_41_ = cons_2[k-1][j][i] * q_3[k-1][j][i];
_t_41_ -= _v_41_;
_t_40_ += 0.8 * _t_41_;
double _v_42_ = cons_2[k+2][j][i] * q_3[k+2][j][i];
double _t_42_ = _v_42_;
double _v_43_ = cons_2[k-2][j][i] * q_3[k-2][j][i];
_t_42_ -= _v_43_;
_t_40_ -= 0.2 * _t_42_;
double _v_44_ = cons_2[k+3][j][i] * q_3[k+3][j][i];
double _t_43_ = _v_44_;
double _v_45_ = cons_2[k-3][j][i] * q_3[k-3][j][i];
_t_43_ -= _v_45_;
_t_40_ += 0.038 * _t_43_;
flux_2kc0jc0ic0 -= _t_40_ * dxinv2;
flux2_b = flux_2[k+1][j][i];
double flux_2kp1jc0ic0 = flux2_b;
double _t_47_ = _v_44_;
_t_47_ -= _v_41_;
double _t_45_ = -(0.2 * _t_47_);
double _t_48_ = _v_46_;
_t_48_ -= _v_43_;
_t_45_ += 0.038 * _t_48_;
double _t_49_ = -(_v_45_);
double _v_55_ = cons_2[k+5][j][i] * q_3[k+5][j][i];
_t_49_ += _v_55_;
_t_45_ -= 0.0035 * _t_49_;
double _t_46_ = _v_42_;
double _v_50_ = cons_2[k][j][i] * q_3[k][j][i];
_t_46_ -= _v_50_;
_t_45_ += 0.8 * _t_46_;
flux_2kp1jc0ic0 -= _t_45_ * dxinv2;
flux2_c = flux_2[k+2][j][i];
double flux_2kp2jc0ic0 = flux2_c;
double _t_51_ = _v_44_;
_t_51_ -= _v_40_;
double _t_50_ = 0.8 * _t_51_;
double _t_52_ = _v_46_;
_t_52_ -= _v_50_;
_t_50_ -= 0.2 * _t_52_;
double _t_53_ = _v_55_;
_t_53_ -= _v_41_;
_t_50_ += 0.038 * _t_53_;
double _t_54_ = -(_v_43_);
double _v_64_ = cons_2[k+6][j][i] * q_3[k+6][j][i];
_t_54_ += _v_64_;
_t_50_ -= 0.0035 * _t_54_;
flux_2kp2jc0ic0 -= _t_50_ * dxinv2;
flux2_d = flux_2[k+3][j][i];
double flux_2kp3jc0ic0 = flux2_d;
double _t_56_ = _v_46_;
_t_56_ -= _v_42_;
double _t_55_ = 0.8 * _t_56_;
double _t_57_ = _v_55_;
_t_57_ -= _v_40_;
_t_55_ -= 0.2 * _t_57_;
double _t_58_ = _v_64_;
_t_58_ -= _v_50_;
_t_55_ += 0.038 * _t_58_;
double _t_59_ = -(_v_41_);
_t_59_ += _v_73_;
_t_55_ -= 0.0035 * _t_59_;
flux_2kp3jc0ic0 -= _t_55_ * dxinv2;
flux3_a = flux_3[k][j][i];
double flux_3kc0jc0ic0 = flux3_a;
double _t_64_ = -(_v_83_);
_t_64_ += _v_82_;
_t_64_ -= q_4[k-4][j][i];
_t_64_ += q_4[k+4][j][i];
double _t_60_ = -(0.0035 * _t_64_);
double _t_61_ = _v_76_;
_t_61_ -= _v_77_;
_t_61_ += q_4[k+1][j][i];
_t_61_ -= q_4[k-1][j][i];
_t_60_ += 0.8 * _t_61_;
double _t_62_ = _v_78_;
_t_62_ -= _v_79_;
_t_62_ += q_4[k+2][j][i];
_t_62_ -= q_4[k-2][j][i];
_t_60_ -= 0.2 * _t_62_;
double _t_63_ = _v_80_;
_t_63_ -= _v_81_;
_t_63_ += q_4[k+3][j][i];
_t_63_ -= q_4[k-3][j][i];
_t_60_ += 0.038 * _t_63_;
flux_3kc0jc0ic0 -= _t_60_ * dxinv2;
flux3_b = flux_3[k+1][j][i];
double flux_3kp1jc0ic0 = flux3_b;
double _t_69_ = -(q_4[k-3][j][i]);
_t_69_ -= _v_81_;
_t_69_ += _v_91_;
_t_69_ += q_4[k+5][j][i];
double _t_65_ = -(0.0035 * _t_69_);
double _t_67_ = _v_80_;
_t_67_ -= _v_77_;
_t_67_ += q_4[k+3][j][i];
_t_67_ -= q_4[k-1][j][i];
_t_65_ -= 0.2 * _t_67_;
double _t_68_ = _v_82_;
_t_68_ -= _v_79_;
_t_68_ += q_4[k+4][j][i];
_t_68_ -= q_4[k-2][j][i];
_t_65_ += 0.038 * _t_68_;
double _t_66_ = _v_78_;
_t_66_ -= _v_86_;
_t_66_ += q_4[k+2][j][i];
_t_66_ -= q_4[k][j][i];
_t_65_ += 0.8 * _t_66_;
flux_3kp1jc0ic0 -= _t_65_ * dxinv2;
flux3_c = flux_3[k+2][j][i];
double flux_3kp2jc0ic0 = flux3_c;
double _t_71_ = q_4[k+3][j][i];
_t_71_ -= q_4[k+1][j][i];
double _t_74_ = -(q_4[k-2][j][i]);
double _t_72_ = q_4[k+4][j][i];
_t_72_ -= q_4[k][j][i];
double _t_73_ = q_4[k+5][j][i];
_t_73_ -= q_4[k-1][j][i];
double _t_76_ = q_4[k+4][j][i];
_t_76_ -= q_4[k+2][j][i];
double _t_77_ = q_4[k+5][j][i];
_t_77_ -= q_4[k+1][j][i];
double _t_78_ = -(q_4[k][j][i]);
double _t_79_ = -(q_4[k-1][j][i]);
_t_71_ += _v_80_;
_t_71_ -= _v_76_;
double _t_70_ = 0.8 * _t_71_;
_t_74_ -= _v_79_;
_t_74_ += _v_100_;
_t_74_ += q_4[k+6][j][i];
_t_78_ += q_4[k+6][j][i];
_t_70_ -= 0.0035 * _t_74_;
_t_72_ += _v_82_;
_t_72_ -= _v_86_;
_t_70_ -= 0.2 * _t_72_;
_t_73_ += _v_91_;
_t_73_ -= _v_77_;
_t_70_ += 0.038 * _t_73_;
flux_3kp2jc0ic0 -= _t_70_ * dxinv2;
flux3_d = flux_3[k+3][j][i];
double flux_3kp3jc0ic0 = flux3_d;
_t_76_ += _v_82_;
_t_76_ -= _v_78_;
double _t_75_ = 0.8 * _t_76_;
_t_77_ += _v_91_;
_t_77_ -= _v_76_;
_t_75_ -= 0.2 * _t_77_;
_t_78_ += _v_100_;
_t_78_ -= _v_86_;
_t_75_ += 0.038 * _t_78_;
_t_79_ += _v_109_;
_t_79_ -= _v_77_;
_t_79_ += q_4[k+7][j][i];
_t_75_ -= 0.0035 * _t_79_;
flux_3kp3jc0ic0 -= _t_75_ * dxinv2;
flux_0[k][j][i] = flux_0kc0jc0ic0;
flux_0[k+1][j][i] = flux_0kp1jc0ic0;
flux_0[k+2][j][i] = flux_0kp2jc0ic0;
flux_0[k+3][j][i] = flux_0kp3jc0ic0;
flux_1[k][j][i] = flux_1kc0jc0ic0;
flux_1[k+1][j][i] = flux_1kp1jc0ic0;
flux_1[k+2][j][i] = flux_1kp2jc0ic0;
flux_1[k+3][j][i] = flux_1kp3jc0ic0;
flux_2[k][j][i] = flux_2kc0jc0ic0;
flux_2[k+1][j][i] = flux_2kp1jc0ic0;
flux_2[k+2][j][i] = flux_2kp2jc0ic0;
flux_2[k+3][j][i] = flux_2kp3jc0ic0;
flux_3[k][j][i] = flux_3kc0jc0ic0;
flux_3[k+1][j][i] = flux_3kp1jc0ic0;
flux_3[k+2][j][i] = flux_3kp2jc0ic0;
flux_3[k+3][j][i] = flux_3kp3jc0ic0;
}
}
__global__ void hypterm_2 (double * __restrict__ flux_in_4, double * __restrict__ cons_in_4, double * __restrict__ q_in_1, double * __restrict__ q_in_2, double * __restrict__ q_in_3, double * __restrict__ q_in_4, double dxinv0, double dxinv1, double dxinv2, int L, int M, int N) {
//Determing the block's indices
int blockdim_i= (int)(blockDim.x);
int i0 = (int)(blockIdx.x)*(blockdim_i);
int i = max (i0, 0) + (int)(threadIdx.x);
int blockdim_j= (int)(blockDim.y);
int j0 = (int)(blockIdx.y)*(blockdim_j);
int j = max (j0, 0) + (int)(threadIdx.y);
int blockdim_k= (int)(blockDim.z);
int k0 = (int)(blockIdx.z)*(2*blockdim_k);
int k = max (k0, 0) + (int)(2*threadIdx.z);
double (*flux_4)[308][308] = (double (*)[308][308])flux_in_4;
double (*q_1)[308][308] = (double (*)[308][308])q_in_1;
double (*q_2)[308][308] = (double (*)[308][308])q_in_2;
double (*q_3)[308][308] = (double (*)[308][308])q_in_3;
double (*q_4)[308][308] = (double (*)[308][308])q_in_4;
double (*cons_4)[308][308] = (double (*)[308][308])cons_in_4;
if (i>=4 & j>=4 & k>=4 & i<=N-5 & j<=N-5 & k<=N-5) {
flux_4[k][j][i] = ((0.8*(cons_4[k][j][i+1]*q_1[k][j][i+1]-cons_4[k][j][i-1]*q_1[k][j][i-1]+(q_4[k][j][i+1]*q_1[k][j][i+1]-q_4[k][j][i-1]*q_1[k][j][i-1]))-0.2*(cons_4[k][j][i+2]*q_1[k][j][i+2]-cons_4[k][j][i-2]*q_1[k][j][i-2]+(q_4[k][j][i+2]*q_1[k][j][i+2]-q_4[k][j][i-2]*q_1[k][j][i-2]))+0.038*(cons_4[k][j][i+3]*q_1[k][j][i+3]-cons_4[k][j][i-3]*q_1[k][j][i-3]+(q_4[k][j][i+3]*q_1[k][j][i+3]-q_4[k][j][i-3]*q_1[k][j][i-3]))-0.0035*(cons_4[k][j][i+4]*q_1[k][j][i+4]-cons_4[k][j][i-4]*q_1[k][j][i-4]+(q_4[k][j][i+4]*q_1[k][j][i+4]-q_4[k][j][i-4]*q_1[k][j][i-4])))*dxinv0);
flux_4[k+1][j][i] = ((0.8*(cons_4[k+1][j][i+1]*q_1[k+1][j][i+1]-cons_4[k+1][j][i-1]*q_1[k+1][j][i-1]+(q_4[k+1][j][i+1]*q_1[k+1][j][i+1]-q_4[k+1][j][i-1]*q_1[k+1][j][i-1]))-0.2*(cons_4[k+1][j][i+2]*q_1[k+1][j][i+2]-cons_4[k+1][j][i-2]*q_1[k+1][j][i-2]+(q_4[k+1][j][i+2]*q_1[k+1][j][i+2]-q_4[k+1][j][i-2]*q_1[k+1][j][i-2]))+0.038*(cons_4[k+1][j][i+3]*q_1[k+1][j][i+3]-cons_4[k+1][j][i-3]*q_1[k+1][j][i-3]+(q_4[k+1][j][i+3]*q_1[k+1][j][i+3]-q_4[k+1][j][i-3]*q_1[k+1][j][i-3]))-0.0035*(cons_4[k+1][j][i+4]*q_1[k+1][j][i+4]-cons_4[k+1][j][i-4]*q_1[k+1][j][i-4]+(q_4[k+1][j][i+4]*q_1[k+1][j][i+4]-q_4[k+1][j][i-4]*q_1[k+1][j][i-4])))*dxinv0);
flux_4[k][j][i] -= (0.8*(cons_4[k][j+1][i]*q_2[k][j+1][i]-cons_4[k][j-1][i]*q_2[k][j-1][i]+(q_4[k][j+1][i]*q_2[k][j+1][i]-q_4[k][j-1][i]*q_2[k][j-1][i]))-0.2*(cons_4[k][j+2][i]*q_2[k][j+2][i]-cons_4[k][j-2][i]*q_2[k][j-2][i]+(q_4[k][j+2][i]*q_2[k][j+2][i]-q_4[k][j-2][i]*q_2[k][j-2][i]))+0.038*(cons_4[k][j+3][i]*q_2[k][j+3][i]-cons_4[k][j-3][i]*q_2[k][j-3][i]+(q_4[k][j+3][i]*q_2[k][j+3][i]-q_4[k][j-3][i]*q_2[k][j-3][i]))-0.0035*(cons_4[k][j+4][i]*q_2[k][j+4][i]-cons_4[k][j-4][i]*q_2[k][j-4][i]+(q_4[k][j+4][i]*q_2[k][j+4][i]-q_4[k][j-4][i]*q_2[k][j-4][i])))*dxinv1;
flux_4[k+1][j][i] -= (0.8*(cons_4[k+1][j+1][i]*q_2[k+1][j+1][i]-cons_4[k+1][j-1][i]*q_2[k+1][j-1][i]+(q_4[k+1][j+1][i]*q_2[k+1][j+1][i]-q_4[k+1][j-1][i]*q_2[k+1][j-1][i]))-0.2*(cons_4[k+1][j+2][i]*q_2[k+1][j+2][i]-cons_4[k+1][j-2][i]*q_2[k+1][j-2][i]+(q_4[k+1][j+2][i]*q_2[k+1][j+2][i]-q_4[k+1][j-2][i]*q_2[k+1][j-2][i]))+0.038*(cons_4[k+1][j+3][i]*q_2[k+1][j+3][i]-cons_4[k+1][j-3][i]*q_2[k+1][j-3][i]+(q_4[k+1][j+3][i]*q_2[k+1][j+3][i]-q_4[k+1][j-3][i]*q_2[k+1][j-3][i]))-0.0035*(cons_4[k+1][j+4][i]*q_2[k+1][j+4][i]-cons_4[k+1][j-4][i]*q_2[k+1][j-4][i]+(q_4[k+1][j+4][i]*q_2[k+1][j+4][i]-q_4[k+1][j-4][i]*q_2[k+1][j-4][i])))*dxinv1;
flux_4[k][j][i] -= (0.8*(cons_4[k+1][j][i]*q_3[k+1][j][i]-cons_4[k-1][j][i]*q_3[k-1][j][i]+(q_4[k+1][j][i]*q_3[k+1][j][i]-q_4[k-1][j][i]*q_3[k-1][j][i]))-0.2*(cons_4[k+2][j][i]*q_3[k+2][j][i]-cons_4[k-2][j][i]*q_3[k-2][j][i]+(q_4[k+2][j][i]*q_3[k+2][j][i]-q_4[k-2][j][i]*q_3[k-2][j][i]))+0.038*(cons_4[k+3][j][i]*q_3[k+3][j][i]-cons_4[k-3][j][i]*q_3[k-3][j][i]+(q_4[k+3][j][i]*q_3[k+3][j][i]-q_4[k-3][j][i]*q_3[k-3][j][i]))-0.0035*(cons_4[k+4][j][i]*q_3[k+4][j][i]-cons_4[k-4][j][i]*q_3[k-4][j][i]+(q_4[k+4][j][i]*q_3[k+4][j][i]-q_4[k-4][j][i]*q_3[k-4][j][i])))*dxinv2;
flux_4[k+1][j][i] -= (0.8*(cons_4[k+1+1][j][i]*q_3[k+1+1][j][i]-cons_4[k+1-1][j][i]*q_3[k+1-1][j][i]+(q_4[k+1+1][j][i]*q_3[k+1+1][j][i]-q_4[k+1-1][j][i]*q_3[k+1-1][j][i]))-0.2*(cons_4[k+1+2][j][i]*q_3[k+1+2][j][i]-cons_4[k+1-2][j][i]*q_3[k+1-2][j][i]+(q_4[k+1+2][j][i]*q_3[k+1+2][j][i]-q_4[k+1-2][j][i]*q_3[k+1-2][j][i]))+0.038*(cons_4[k+1+3][j][i]*q_3[k+1+3][j][i]-cons_4[k+1-3][j][i]*q_3[k+1-3][j][i]+(q_4[k+1+3][j][i]*q_3[k+1+3][j][i]-q_4[k+1-3][j][i]*q_3[k+1-3][j][i]))-0.0035*(cons_4[k+1+4][j][i]*q_3[k+1+4][j][i]-cons_4[k+1-4][j][i]*q_3[k+1-4][j][i]+(q_4[k+1+4][j][i]*q_3[k+1+4][j][i]-q_4[k+1-4][j][i]*q_3[k+1-4][j][i])))*dxinv2;
}
}
extern "C" void host_code (double *h_flux_0, double *h_flux_1, double *h_flux_2, double *h_flux_3, double *h_flux_4, double *h_cons_1, double *h_cons_2, double *h_cons_3, double *h_cons_4, double *h_q_1, double *h_q_2, double *h_q_3, double *h_q_4, double dxinv0, double dxinv1, double dxinv2, int L, int M, int N) {
double *flux_0;
cudaMalloc (&flux_0, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for flux_0\n");
cudaMemcpy (flux_0, h_flux_0, sizeof(double)*L*M*N, cudaMemcpyHostToDevice);
double *flux_1;
cudaMalloc (&flux_1, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for flux_1\n");
cudaMemcpy (flux_1, h_flux_1, sizeof(double)*L*M*N, cudaMemcpyHostToDevice);
double *flux_2;
cudaMalloc (&flux_2, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for flux_2\n");
cudaMemcpy (flux_2, h_flux_2, sizeof(double)*L*M*N, cudaMemcpyHostToDevice);
double *flux_3;
cudaMalloc (&flux_3, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for flux_3\n");
cudaMemcpy (flux_3, h_flux_3, sizeof(double)*L*M*N, cudaMemcpyHostToDevice);
double *flux_4;
cudaMalloc (&flux_4, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for flux_4\n");
cudaMemcpy (flux_4, h_flux_4, sizeof(double)*L*M*N, cudaMemcpyHostToDevice);
double *cons_1;
cudaMalloc (&cons_1, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for cons_1\n");
cudaMemcpy (cons_1, h_cons_1, sizeof(double)*L*M*N, cudaMemcpyHostToDevice);
double *cons_2;
cudaMalloc (&cons_2, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for cons_2\n");
cudaMemcpy (cons_2, h_cons_2, sizeof(double)*L*M*N, cudaMemcpyHostToDevice);
double *cons_3;
cudaMalloc (&cons_3, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for cons_3\n");
cudaMemcpy (cons_3, h_cons_3, sizeof(double)*L*M*N, cudaMemcpyHostToDevice);
double *cons_4;
cudaMalloc (&cons_4, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for cons_4\n");
cudaMemcpy (cons_4, h_cons_4, sizeof(double)*L*M*N, cudaMemcpyHostToDevice);
double *q_1;
cudaMalloc (&q_1, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for q_1\n");
cudaMemcpy (q_1, h_q_1, sizeof(double)*L*M*N, cudaMemcpyHostToDevice);
double *q_2;
cudaMalloc (&q_2, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for q_2\n");
cudaMemcpy (q_2, h_q_2, sizeof(double)*L*M*N, cudaMemcpyHostToDevice);
double *q_3;
cudaMalloc (&q_3, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for q_3\n");
cudaMemcpy (q_3, h_q_3, sizeof(double)*L*M*N, cudaMemcpyHostToDevice);
double *q_4;
cudaMalloc (&q_4, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for q_4\n");
cudaMemcpy (q_4, h_q_4, sizeof(double)*L*M*N, cudaMemcpyHostToDevice);
dim3 blockconfig (16, 4, 4);
dim3 gridconfig_0 (ceil(N, blockconfig.x), ceil(M, blockconfig.y), ceil(L, blockconfig.z));
hypterm_0 <<<gridconfig_0, blockconfig>>> (flux_0, flux_1, flux_2, flux_3, cons_1, cons_2, cons_3, q_1, q_2, q_3, q_4, -dxinv0, dxinv1, dxinv2, L, M, N);
dim3 gridconfig_1 (ceil(N, blockconfig.x), ceil(M, blockconfig.y), ceil(L, 4*blockconfig.z));
hypterm_1 <<<gridconfig_1, blockconfig>>> (flux_0, flux_1, flux_2, flux_3, cons_1, cons_2, cons_3, q_1, q_2, q_3, q_4, -dxinv0, dxinv1, dxinv2, L, M, N);
dim3 gridconfig_2 (ceil(N, blockconfig.x), ceil(M, blockconfig.y), ceil(L, 2*blockconfig.z));
hypterm_2 <<<gridconfig_2, blockconfig>>> (flux_4, cons_4, q_1, q_2, q_3, q_4, -dxinv0, dxinv1, dxinv2, L, M, N);
cudaMemcpy (h_flux_0, flux_0, sizeof(double)*L*M*N, cudaMemcpyDeviceToHost);
cudaMemcpy (h_flux_1, flux_1, sizeof(double)*L*M*N, cudaMemcpyDeviceToHost);
cudaMemcpy (h_flux_3, flux_3, sizeof(double)*L*M*N, cudaMemcpyDeviceToHost);
cudaMemcpy (h_flux_4, flux_4, sizeof(double)*L*M*N, cudaMemcpyDeviceToHost);
cudaMemcpy (h_flux_2, flux_2, sizeof(double)*L*M*N, cudaMemcpyDeviceToHost);
}
|
26c415ffe292d075ed08dcac6f26beba103f5419.hip | // !!! This is a file automatically generated by hipify!!!
#include "omp.h"
#include "stdio.h"
#include <math.h>
#include <hip/hip_runtime.h>
#include <helper_cuda.h>
/*__global__ void
calcularPi( float *sum, int operaciones, int t)
{
int i = ((blockDim.x * blockIdx.x + threadIdx.x));
if (i < t){
sum[i] = 0;
if (i % 2 == 0){
for(int j = 0; j < operaciones; j=j+2 ){
if ((i+j) == 0 ) j++;
sum[i] += 1.0/(i + j);
j =j+2;
sum[i] -= 1.0/(i + j);
}
}else{
for(int j = 0; j < operaciones; j=j+2 ){
sum[i] -= 1.0/(i + j);
j =j+2;
sum[i] += 1.0/(i + j);
}
}
}
}*/
__global__ void
calcularPi( float *sum, int operaciones, int t)
{
int i = ((blockDim.x * blockIdx.x + threadIdx.x));
int total = t;
int op = operaciones;
if (i < total ){
sum[i] = 0;
for(int j = 0; j < op; j++ ){
sum[i] += 2.0/((4.0*(i+j)+1)*(4.0*(i+j)+3));
}
}
}
int main(void)
{
// declarar la cantidad de hilos segun la gpu
hipError_t err = hipSuccess;
int dev = 0;
size_t size = sizeof(float);
hipSetDevice(dev);
hipDeviceProp_t deviceProp;
hipGetDeviceProperties(&deviceProp, dev);
int threadsPerBlock = _ConvertSMVer2Cores(deviceProp.major, deviceProp.minor);
threadsPerBlock = threadsPerBlock*2;
int blocksPerGrid = deviceProp.multiProcessorCount;
float numIt = 1e10;
int hilosTotales = blocksPerGrid*threadsPerBlock;
int operacionPorHilo;
size_t size_pi = sizeof(float) * hilosTotales;
operacionPorHilo = (numIt > hilosTotales ) ? (int)(ceil(numIt/hilosTotales) ) : 1;
float h_pi = 0.0;
float *h_sum = (float*)malloc(size_pi);
float *d_sum = NULL;
err = hipMalloc((void **)&d_sum, size_pi);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to allocate device d_sum (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipMemcpy(d_sum, h_sum, size_pi, hipMemcpyHostToDevice);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to copy vector pi from host to device (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
printf("CUDA kernel launch with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock);
printf("Operaciones por Hilo %d\n",operacionPorHilo);
hipLaunchKernelGGL(( calcularPi), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_sum, operacionPorHilo, hilosTotales);
err = hipGetLastError();
if (err != hipSuccess)
{
fprintf(stderr, "Failed to launch calcularPi kernel (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
printf("Copy output data from the CUDA device to the host memory\n");
err = hipMemcpy(h_sum, d_sum, size, hipMemcpyDeviceToHost);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to copy h_pi from device to host (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
for(int i = 0 ; i < hilosTotales; i ++){
h_pi += h_sum[i];
}
h_pi = h_pi * 4;
printf("valor de pi %.10f\n",h_pi );
return 0;
} | 26c415ffe292d075ed08dcac6f26beba103f5419.cu | #include "omp.h"
#include "stdio.h"
#include <math.h>
#include <cuda_runtime.h>
#include <helper_cuda.h>
/*__global__ void
calcularPi( float *sum, int operaciones, int t)
{
int i = ((blockDim.x * blockIdx.x + threadIdx.x));
if (i < t){
sum[i] = 0;
if (i % 2 == 0){
for(int j = 0; j < operaciones; j=j+2 ){
if ((i+j) == 0 ) j++;
sum[i] += 1.0/(i + j);
j =j+2;
sum[i] -= 1.0/(i + j);
}
}else{
for(int j = 0; j < operaciones; j=j+2 ){
sum[i] -= 1.0/(i + j);
j =j+2;
sum[i] += 1.0/(i + j);
}
}
}
}*/
__global__ void
calcularPi( float *sum, int operaciones, int t)
{
int i = ((blockDim.x * blockIdx.x + threadIdx.x));
int total = t;
int op = operaciones;
if (i < total ){
sum[i] = 0;
for(int j = 0; j < op; j++ ){
sum[i] += 2.0/((4.0*(i+j)+1)*(4.0*(i+j)+3));
}
}
}
int main(void)
{
// declarar la cantidad de hilos segun la gpu
cudaError_t err = cudaSuccess;
int dev = 0;
size_t size = sizeof(float);
cudaSetDevice(dev);
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, dev);
int threadsPerBlock = _ConvertSMVer2Cores(deviceProp.major, deviceProp.minor);
threadsPerBlock = threadsPerBlock*2;
int blocksPerGrid = deviceProp.multiProcessorCount;
float numIt = 1e10;
int hilosTotales = blocksPerGrid*threadsPerBlock;
int operacionPorHilo;
size_t size_pi = sizeof(float) * hilosTotales;
operacionPorHilo = (numIt > hilosTotales ) ? (int)(ceil(numIt/hilosTotales) ) : 1;
float h_pi = 0.0;
float *h_sum = (float*)malloc(size_pi);
float *d_sum = NULL;
err = cudaMalloc((void **)&d_sum, size_pi);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device d_sum (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMemcpy(d_sum, h_sum, size_pi, cudaMemcpyHostToDevice);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy vector pi from host to device (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
printf("CUDA kernel launch with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock);
printf("Operaciones por Hilo %d\n",operacionPorHilo);
calcularPi<<<blocksPerGrid, threadsPerBlock>>>(d_sum, operacionPorHilo, hilosTotales);
err = cudaGetLastError();
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to launch calcularPi kernel (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
printf("Copy output data from the CUDA device to the host memory\n");
err = cudaMemcpy(h_sum, d_sum, size, cudaMemcpyDeviceToHost);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy h_pi from device to host (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
for(int i = 0 ; i < hilosTotales; i ++){
h_pi += h_sum[i];
}
h_pi = h_pi * 4;
printf("valor de pi %.10f\n",h_pi );
return 0;
} |
f966b6ea6dd61a4df066c06748ad75987faf24bc.hip | // !!! This is a file automatically generated by hipify!!!
#ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "generic/THCTensorMathPointwise.cu"
#else
#define IMPLEMENT_CUDA_TENSOR_BASIC_FUNC_(NAME, CFUNC, REAL) \
struct Tensor_##NAME##_##REAL##_Op { \
__device__ __forceinline__ void operator()(real* out, real* in) const { \
*out = CFUNC(*in); \
} \
\
__device__ __forceinline__ void operator()(real* v) const { \
*v = CFUNC(*v); \
} \
}; \
\
void THCTensor_(NAME)(THCState* state, THCTensor* self_, THCTensor* src) { \
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src)); \
if (self_ == src) { \
if (!THC_pointwiseApply1(state, self_, Tensor_##NAME##_##REAL##_Op())) { \
THArgCheck(false, 2, CUTORCH_DIM_WARNING); \
} \
} else { \
THCTensor_(resizeAs)(state, self_, src); \
\
if (!THC_pointwiseApply2(state, self_, src, Tensor_##NAME##_##REAL##_Op())) { \
THArgCheck(false, 2, CUTORCH_DIM_WARNING); \
} \
} \
\
THCudaCheck(hipGetLastError()); \
}
#define IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(NAME, CFUNC, REAL) \
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC_(NAME, CFUNC, REAL)
#if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_HALF)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( log, THCNumerics<real>::log, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(lgamma, THCNumerics<real>::lgamma, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(log10, THCNumerics<real>::log10, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(log1p, THCNumerics<real>::log1p, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( log2, THCNumerics<real>::log2, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( exp, THCNumerics<real>::exp, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(expm1, THCNumerics<real>::expm1, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( cos, THCNumerics<real>::cos, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( sin, THCNumerics<real>::sin, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( sqrt, THCNumerics<real>::sqrt, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(rsqrt, THCNumerics<real>::rsqrt, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( ceil, THCNumerics<real>::ceil, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(floor, THCNumerics<real>::floor, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(trunc, THCNumerics<real>::trunc, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( acos, THCNumerics<real>::acos, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( cosh, THCNumerics<real>::cosh, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( asin, THCNumerics<real>::asin, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( sinh, THCNumerics<real>::sinh, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( tan, THCNumerics<real>::tan, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( atan, THCNumerics<real>::atan, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( tanh, THCNumerics<real>::tanh, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( erf, THCNumerics<real>::erf, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(erfinv, THCNumerics<real>::erfinv,Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( round, THCNumerics<real>::round, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( frac, THCNumerics<real>::frac, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( cinv, THCNumerics<real>::cinv, Real)
#endif
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( neg, THCNumerics<real>::neg, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( abs, THCNumerics<real>::abs, Real)
#undef IMPLEMENT_CUDA_TENSOR_BASIC_FUNC_
#undef IMPLEMENT_CUDA_TENSOR_BASIC_FUNC
void THCTensor_(sign)(THCState* state, THCTensor* self_, THCTensor* src) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src));
if (self_ == src) {
if (!THC_pointwiseApply1(state, self_, TensorSignOp<real>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self_, src);
if (!THC_pointwiseApply2(state, self_, src, TensorSignOp<real>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(hipGetLastError());
}
void THCTensor_(clamp)(THCState *state, THCTensor *self_, THCTensor *src, real min_value,
real max_value)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src));
if (self_ == src) {
if (!THC_pointwiseApply1(state, self_, TensorClampOp<real>(min_value, max_value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self_, src);
if (!THC_pointwiseApply2(state, self_, src, TensorClampOp<real>(min_value, max_value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(hipGetLastError());
}
THC_API void
THCTensor_(cross)(THCState *state, THCTensor *self, THCTensor *x, THCTensor *y, int dimension)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, self, x, y));
int i;
int nd = THCTensor_(nDimension)(state, x);
ptrdiff_t nelem = THCTensor_(nElement)(state, x);
THArgCheck(nd == THCTensor_(nDimension)(state, y), 1, "tensors must have same number of dimensions");
for (i = 0; i < nd; i++) {
THArgCheck(THCTensor_(size)(state, x, i) == THCTensor_(size)(state, y, i), 1, "dimension %i of x and y does not match", i);
if (dimension < 0 && THCTensor_(size)(state, x, i) == 3) {
dimension = i;
}
}
THArgCheck(dimension >= 0 && dimension < nd, 3, "dimension %d out of range", dimension+1);
THArgCheck(THCTensor_(size)(state, x, dimension) == 3, 3,
"dimension %d does not have size 3", dimension+1);
THCTensor_(resizeAs)(state, self, x);
int64_t sx = THCTensor_(stride)(state, x, dimension);
int64_t sy = THCTensor_(stride)(state, y, dimension);
int64_t so = THCTensor_(stride)(state, self, dimension);
THCTensor *nx = THCTensor_(newNarrow)(state, x, dimension, 0, 1);
THCTensor *ny = THCTensor_(newNarrow)(state, y, dimension, 0, 1);
THCTensor *nself = THCTensor_(newNarrow)(state, self, dimension, 0, 1);
if (!THC_pointwiseApply3(state, nself, nx, ny, TensorCrossOp<real>(sx, sy, so))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
THCTensor_(free)(state, nx);
THCTensor_(free)(state, ny);
THCTensor_(free)(state, nself);
}
#if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_HALF)
void THCTensor_(atan2)(THCState *state, THCTensor *self_, THCTensor *tx, THCTensor *ty)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, self_, tx, ty));
THArgCheck(THCTensor_(nElement)(state, tx) ==
THCTensor_(nElement)(state, ty), 3, "sizes do not match");
THCTensor_(resizeAs)(state, self_, tx);
if (!THC_pointwiseApply3(state, self_, tx, ty, TensorATan2Op<real>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
THCudaCheck(hipGetLastError());
}
void THCTensor_(sigmoid)(THCState* state, THCTensor* self_, THCTensor* src) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src));
if (self_ == src) {
if (!THC_pointwiseApply1(state, self_, TensorSigmoidOp<real>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self_, src);
if (!THC_pointwiseApply2(state, self_, src, TensorSigmoidOp<real>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(hipGetLastError());
}
void THCTensor_(digamma)(THCState* state, THCTensor* self_, THCTensor* src) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src));
if (self_ != src) {
THCTensor_(resizeAs)(state, self_, src);
}
if (!THC_pointwiseApply2(state, self_, src, TensorDigammaOp<real, accreal>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
THCudaCheck(hipGetLastError());
}
void THCTensor_(polygamma)(THCState* state, THCTensor* self_, int64_t n, THCTensor* src) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src));
if (self_ != src) {
THCTensor_(resizeAs)(state, self_, src);
}
switch (n) {
case 0:
if (!THC_pointwiseApply2(state, self_, src, TensorDigammaOp<real, accreal>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
break;
case 1:
if (!THC_pointwiseApply2(state, self_, src, TensorTrigammaOp<real, accreal>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
break;
default:
THError("polygamma(n,x) is not implemented for n>=2");
}
THCudaCheck(hipGetLastError());
}
THC_API void
THCTensor_(lerp)(THCState *state, THCTensor *result, THCTensor *a, THCTensor *b, real w)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, result, a, b));
THArgCheck(THCTensor_(nElement)(state, a) ==
THCTensor_(nElement)(state, b), 3, "sizes do not match");
THCTensor_(resizeAs)(state, result, a);
if (!THC_pointwiseApply3(state, result, a, b, TensorLerpOp<real>(w))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
THCudaCheck(hipGetLastError());
}
#endif
THC_API void
THCTensor_(cadd)(THCState *state, THCTensor *self_, THCTensor* src1, real value, THCTensor *src2)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, self_, src1, src2));
THArgCheck(THCTensor_(nElement)(state, src1) ==
THCTensor_(nElement)(state, src2), 3, "sizes do not match");
if (self_ == src1) {
if (value == ScalarConvert<int, real>::to(1)) {
// self += src2
if (!THC_pointwiseApply2(state, self_, src2, TensorAddOp<real>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
// self += value * src2
if (!THC_pointwiseApply2(state, self_, src2, TensorCAddOp<real>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
} else {
THCTensor_(resizeAs)(state, self_, src1);
if (value == ScalarConvert<int, real>::to(1)) {
// self = src1 + src2
if (!THC_pointwiseApply3(state, self_, src1, src2, TensorAddOp<real>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
// self = src1 + value * src2
if (!THC_pointwiseApply3(state, self_, src1, src2, TensorCAddOp<real>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
}
THCudaCheck(hipGetLastError());
}
THC_API void
THCTensor_(csub)(THCState *state, THCTensor *self_, THCTensor* src1, real value, THCTensor *src2)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, self_, src1, src2));
THArgCheck(THCTensor_(nElement)(state, src1) ==
THCTensor_(nElement)(state, src2), 3, "sizes do not match");
if (self_ == src1) {
if (value == ScalarConvert<int, real>::to(1)) {
// self -= src2
if (!THC_pointwiseApply2(state, self_, src2, TensorSubOp<real>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
// self += -value * src2
if (!THC_pointwiseApply2(state, self_, src2,
TensorCAddOp<real>(
ScalarNegate<real>::to(value)))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
} else {
THCTensor_(resizeAs)(state, self_, src1);
if (value == ScalarConvert<int, real>::to(1)) {
// self = src1 - src2
if (!THC_pointwiseApply3(state, self_, src1, src2, TensorSubOp<real>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
// self = src1 - value * src2
if (!THC_pointwiseApply3(state, self_, src1, src2,
TensorCAddOp<real>(
ScalarNegate<real>::to(value)))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
}
THCudaCheck(hipGetLastError());
}
THC_API void
THCTensor_(cmul)(THCState *state, THCTensor *self_, THCTensor *src1, THCTensor *src2)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, self_, src1, src2));
THArgCheck(THCTensor_(nElement)(state, src1) ==
THCTensor_(nElement)(state, src2), 3, "sizes do not match");
if (self_ == src1) {
// self *= src2
if (!THC_pointwiseApply2(state, self_, src2, TensorMulOp<real>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self_, src1);
// self = src1 * src2
if (!THC_pointwiseApply3(state, self_, src1, src2, TensorMulOp<real>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(hipGetLastError());
}
THC_API void
THCTensor_(cpow)(THCState *state, THCTensor *self_, THCTensor *src1, THCTensor *src2)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, self_, src1, src2));
THArgCheck(THCTensor_(nElement)(state, src1) ==
THCTensor_(nElement)(state, src2), 3, "sizes do not match");
if (self_ == src1) {
// self = pow(self, src2)
if (!THC_pointwiseApply2(state, self_, src2, TensorCPowOp<real>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self_, src1);
// self = pow(src1, src2)
if (!THC_pointwiseApply3(state, self_, src1, src2, TensorCPowOp<real>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(hipGetLastError());
}
void THCTensor_(pow)(THCState *state, THCTensor *self_, THCTensor *src, real value) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src));
if (self_ == src) {
if (THCNumerics<real>::eq(value, ScalarConvert<int, real>::to(1))) {
if (!THC_pointwiseApply1(state, self_, TensorPowOp<real, 1>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else if (THCNumerics<real>::eq(value, ScalarConvert<int, real>::to(2))) {
if (!THC_pointwiseApply1(state, self_, TensorPowOp<real, 2>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else if (THCNumerics<real>::eq(value, ScalarConvert<int, real>::to(3))) {
if (!THC_pointwiseApply1(state, self_, TensorPowOp<real, 3>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
#if defined(THC_REAL_IS_HALF) || defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE)
} else if (THCNumerics<real>::eq(value, ScalarConvert<int, real>::to(-1))) {
if (!THC_pointwiseApply1(state, self_, TensorPowOp<real, -1>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else if (THCNumerics<real>::eq(value, ScalarConvert<int, real>::to(-2))) {
if (!THC_pointwiseApply1(state, self_, TensorPowOp<real, -2>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
#endif
} else {
// fallback implementation using pow
if (!THC_pointwiseApply1(state, self_, TensorPowOp<real, -3>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
} else {
THCTensor_(resizeAs)(state, self_, src);
if (THCNumerics<real>::eq(value, ScalarConvert<int, real>::to(1))) {
if (!THC_pointwiseApply2(state, self_, src, TensorPowOp<real, 1>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else if (THCNumerics<real>::eq(value, ScalarConvert<int, real>::to(2))) {
if (!THC_pointwiseApply2(state, self_, src, TensorPowOp<real, 2>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else if (THCNumerics<real>::eq(value, ScalarConvert<int, real>::to(3))) {
if (!THC_pointwiseApply2(state, self_, src, TensorPowOp<real, 3>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
#if defined(THC_REAL_IS_HALF) || defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE)
} else if (THCNumerics<real>::eq(value, ScalarConvert<int, real>::to(-1))) {
if (!THC_pointwiseApply2(state, self_, src, TensorPowOp<real, -1>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else if (THCNumerics<real>::eq(value, ScalarConvert<int, real>::to(-2))) {
if (!THC_pointwiseApply2(state, self_, src, TensorPowOp<real, -2>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
#endif
} else {
// fallback implementation using pow
if (!THC_pointwiseApply2(state, self_, src, TensorPowOp<real, -3>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
}
THCudaCheck(hipGetLastError());
}
void THCTensor_(tpow)(THCState *state, THCTensor *self_, real value, THCTensor *src)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src));
if (self_ == src) {
if (!THC_pointwiseApply1(state, self_, TensorTPowOp<real>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self_, src);
if (!THC_pointwiseApply2(state, self_, src, TensorTPowOp<real>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(hipGetLastError());
}
THC_API void
THCTensor_(cdiv)(THCState* state, THCTensor *self_, THCTensor *src1, THCTensor *src2)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, self_, src1, src2));
THArgCheck(THCTensor_(nElement)(state, src1) ==
THCTensor_(nElement)(state, src2), 3, "sizes do not match");
if (self_ == src1) {
// self /= src2
if (!THC_pointwiseApply2(state, self_, src2, TensorDivOp<real>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self_, src1);
// self = src1 / src2
if (!THC_pointwiseApply3(state, self_, src1, src2, TensorDivOp<real>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(hipGetLastError());
}
THC_API void
THCTensor_(clshift)(THCState* state, THCTensor *self_, THCTensor *src1, THCTensor *src2)
{
#if defined(THC_REAL_IS_HALF)
return THError("clshift not supported for torch.CudaHalfTensor");
#else
THAssert(THCTensor_(checkGPU)(state, 3, self_, src1, src2));
THArgCheck(THCTensor_(nElement)(state, src1) ==
THCTensor_(nElement)(state, src2), 3, "sizes do not match");
if (self_ == src1) {
// self /= src2
if (!THC_pointwiseApply2(state, self_, src2, TensorLShiftOp<real>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self_, src1);
// self = src1 / src2
if (!THC_pointwiseApply3(state, self_, src1, src2, TensorLShiftOp<real>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(hipGetLastError());
#endif
}
THC_API void
THCTensor_(crshift)(THCState* state, THCTensor *self_, THCTensor *src1, THCTensor *src2)
{
#if defined(THC_REAL_IS_HALF)
return THError("crshift not supported for torch.CudaHalfTensor");
#else
THAssert(THCTensor_(checkGPU)(state, 3, self_, src1, src2));
THArgCheck(THCTensor_(nElement)(state, src1) ==
THCTensor_(nElement)(state, src2), 3, "sizes do not match");
if (self_ == src1) {
// self /= src2
if (!THC_pointwiseApply2(state, self_, src2, TensorRShiftOp<real>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self_, src1);
// self = src1 / src2
if (!THC_pointwiseApply3(state, self_, src1, src2, TensorRShiftOp<real>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(hipGetLastError());
#endif
}
THC_API void
THCTensor_(cmax)(THCState *state, THCTensor *self, THCTensor *src1, THCTensor *src2)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, self, src1, src2));
THArgCheck(THCTensor_(nElement)(state, src1) ==
THCTensor_(nElement)(state, src2), 2, "sizes do not match");
if (self == src1) {
if (!THC_pointwiseApply2(state, self, src2, TensorMaxOp<real>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self, src1);
if (!THC_pointwiseApply3(state, self, src1, src2, TensorMaxOp<real>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
}
THC_API void
THCTensor_(cmin)(THCState *state, THCTensor *self, THCTensor *src1, THCTensor *src2)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, self, src1, src2));
THArgCheck(THCTensor_(nElement)(state, src1) ==
THCTensor_(nElement)(state, src2), 2, "sizes do not match");
if (self == src1) {
if (!THC_pointwiseApply2(state, self, src2, TensorMinOp<real>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self, src1);
if (!THC_pointwiseApply3(state, self, src1, src2, TensorMinOp<real>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
}
THC_API void
THCTensor_(cremainder)(THCState *state, THCTensor *self, THCTensor *src1, THCTensor *src2)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, self, src1, src2));
THArgCheck(THCTensor_(nElement)(state, src1) ==
THCTensor_(nElement)(state, src2), 2, "sizes do not match");
if (self == src1) {
if (!THC_pointwiseApply2(state, self, src2, TensorCRemainderOp<real>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self, src1);
if (!THC_pointwiseApply3(state, self, src1, src2, TensorCRemainderOp<real>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
}
THC_API void
THCTensor_(cfmod)(THCState *state, THCTensor *self, THCTensor *src1, THCTensor *src2)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, self, src1, src2));
THArgCheck(THCTensor_(nElement)(state, src1) ==
THCTensor_(nElement)(state, src2), 2, "sizes do not match");
if (self == src1) {
if (!THC_pointwiseApply2(state, self, src2, TensorCFmodOp<real>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self, src1);
if (!THC_pointwiseApply3(state, self, src1, src2, TensorCFmodOp<real>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
}
THC_API void
THCTensor_(cmaxValue)(THCState *state, THCTensor *self, THCTensor *src, real value)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self, src));
if (self == src) {
if (!THC_pointwiseApply1(state, self, TensorMaxValueOp<real>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self, src);
if (!THC_pointwiseApply2(state, self, src, TensorMaxValueOp<real>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
}
THC_API void
THCTensor_(cminValue)(THCState *state, THCTensor *self, THCTensor *src, real value)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self, src));
if (self == src) {
if (!THC_pointwiseApply1(state, self, TensorMinValueOp<real>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self, src);
if (!THC_pointwiseApply2(state, self, src, TensorMinValueOp<real>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
}
THC_API void
THCTensor_(addcmul)(THCState *state, THCTensor *self_, THCTensor *t, real value, THCTensor *src1, THCTensor *src2)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 4, self_, t, src1, src2));
if(self_ != t)
{
THCTensor_(resizeAs)(state, self_, t);
THCTensor_(copy)(state, self_, t);
}
else
{
THArgCheck(THCTensor_(nElement)(state, self_) == THCTensor_(nElement)(state, src1),
1, "sizes do not match");
}
THArgCheck(THCTensor_(nElement)(state, src1) == THCTensor_(nElement)(state, src2),
3, "sizes do not match");
if (!THC_pointwiseApply3(state, self_, src1, src2, TensorAddCMulOp<real>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
THCudaCheck(hipGetLastError());
}
THC_API void
THCTensor_(addcdiv)(THCState *state, THCTensor *self_, THCTensor *t, real value, THCTensor *src1, THCTensor *src2)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 4, self_, t, src1, src2));
if(self_ != t)
{
THCTensor_(resizeAs)(state, self_, t);
THCTensor_(copy)(state, self_, t);
}
else
{
THArgCheck(THCTensor_(nElement)(state, self_) == THCTensor_(nElement)(state, src1),
1, "sizes do not match");
}
THArgCheck(THCTensor_(nElement)(state, src1) == THCTensor_(nElement)(state, src2),
3, "sizes do not match");
if (!THC_pointwiseApply3(state, self_, src1, src2, TensorAddCDivOp<real>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
THCudaCheck(hipGetLastError());
}
THC_API void
THCTensor_(cbitand)(THCState* state, THCTensor *self_, THCTensor *src1, THCTensor *src2)
{
#if defined(THC_REAL_IS_HALF) || defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE)
return THError("cbitand is only supported for integer type tensors");
#else
THAssert(THCTensor_(checkGPU)(state, 3, self_, src1, src2));
THArgCheck(THCTensor_(nElement)(state, src1) ==
THCTensor_(nElement)(state, src2), 3, "sizes do not match");
if (self_ == src1) {
// self /= src2
if (!THC_pointwiseApply2(state, self_, src2, TensorBitAndOp<real>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self_, src1);
// self = src1 / src2
if (!THC_pointwiseApply3(state, self_, src1, src2, TensorBitAndOp<real>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(hipGetLastError());
#endif
}
THC_API void
THCTensor_(cbitor)(THCState* state, THCTensor *self_, THCTensor *src1, THCTensor *src2)
{
#if defined(THC_REAL_IS_HALF) || defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE)
return THError("cbitor is only supported for integer type tensors");
#else
THAssert(THCTensor_(checkGPU)(state, 3, self_, src1, src2));
THArgCheck(THCTensor_(nElement)(state, src1) ==
THCTensor_(nElement)(state, src2), 3, "sizes do not match");
if (self_ == src1) {
// self /= src2
if (!THC_pointwiseApply2(state, self_, src2, TensorBitOrOp<real>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self_, src1);
// self = src1 / src2
if (!THC_pointwiseApply3(state, self_, src1, src2, TensorBitOrOp<real>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(hipGetLastError());
#endif
}
THC_API void
THCTensor_(cbitxor)(THCState* state, THCTensor *self_, THCTensor *src1, THCTensor *src2)
{
#if defined(THC_REAL_IS_HALF) || defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE)
return THError("cbitor is only supported for integer type tensors");
#else
THAssert(THCTensor_(checkGPU)(state, 3, self_, src1, src2));
THArgCheck(THCTensor_(nElement)(state, src1) ==
THCTensor_(nElement)(state, src2), 3, "sizes do not match");
if (self_ == src1) {
// self /= src2
if (!THC_pointwiseApply2(state, self_, src2, TensorBitXorOp<real>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self_, src1);
// self = src1 / src2
if (!THC_pointwiseApply3(state, self_, src1, src2, TensorBitXorOp<real>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(hipGetLastError());
#endif
}
#endif
| f966b6ea6dd61a4df066c06748ad75987faf24bc.cu | #ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "generic/THCTensorMathPointwise.cu"
#else
#define IMPLEMENT_CUDA_TENSOR_BASIC_FUNC_(NAME, CFUNC, REAL) \
struct Tensor_##NAME##_##REAL##_Op { \
__device__ __forceinline__ void operator()(real* out, real* in) const { \
*out = CFUNC(*in); \
} \
\
__device__ __forceinline__ void operator()(real* v) const { \
*v = CFUNC(*v); \
} \
}; \
\
void THCTensor_(NAME)(THCState* state, THCTensor* self_, THCTensor* src) { \
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src)); \
if (self_ == src) { \
if (!THC_pointwiseApply1(state, self_, Tensor_##NAME##_##REAL##_Op())) { \
THArgCheck(false, 2, CUTORCH_DIM_WARNING); \
} \
} else { \
THCTensor_(resizeAs)(state, self_, src); \
\
if (!THC_pointwiseApply2(state, self_, src, Tensor_##NAME##_##REAL##_Op())) { \
THArgCheck(false, 2, CUTORCH_DIM_WARNING); \
} \
} \
\
THCudaCheck(cudaGetLastError()); \
}
#define IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(NAME, CFUNC, REAL) \
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC_(NAME, CFUNC, REAL)
#if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_HALF)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( log, THCNumerics<real>::log, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(lgamma, THCNumerics<real>::lgamma, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(log10, THCNumerics<real>::log10, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(log1p, THCNumerics<real>::log1p, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( log2, THCNumerics<real>::log2, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( exp, THCNumerics<real>::exp, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(expm1, THCNumerics<real>::expm1, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( cos, THCNumerics<real>::cos, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( sin, THCNumerics<real>::sin, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( sqrt, THCNumerics<real>::sqrt, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(rsqrt, THCNumerics<real>::rsqrt, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( ceil, THCNumerics<real>::ceil, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(floor, THCNumerics<real>::floor, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(trunc, THCNumerics<real>::trunc, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( acos, THCNumerics<real>::acos, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( cosh, THCNumerics<real>::cosh, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( asin, THCNumerics<real>::asin, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( sinh, THCNumerics<real>::sinh, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( tan, THCNumerics<real>::tan, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( atan, THCNumerics<real>::atan, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( tanh, THCNumerics<real>::tanh, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( erf, THCNumerics<real>::erf, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(erfinv, THCNumerics<real>::erfinv,Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( round, THCNumerics<real>::round, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( frac, THCNumerics<real>::frac, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( cinv, THCNumerics<real>::cinv, Real)
#endif
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( neg, THCNumerics<real>::neg, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( abs, THCNumerics<real>::abs, Real)
#undef IMPLEMENT_CUDA_TENSOR_BASIC_FUNC_
#undef IMPLEMENT_CUDA_TENSOR_BASIC_FUNC
void THCTensor_(sign)(THCState* state, THCTensor* self_, THCTensor* src) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src));
if (self_ == src) {
if (!THC_pointwiseApply1(state, self_, TensorSignOp<real>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self_, src);
if (!THC_pointwiseApply2(state, self_, src, TensorSignOp<real>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(cudaGetLastError());
}
void THCTensor_(clamp)(THCState *state, THCTensor *self_, THCTensor *src, real min_value,
real max_value)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src));
if (self_ == src) {
if (!THC_pointwiseApply1(state, self_, TensorClampOp<real>(min_value, max_value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self_, src);
if (!THC_pointwiseApply2(state, self_, src, TensorClampOp<real>(min_value, max_value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(cudaGetLastError());
}
THC_API void
THCTensor_(cross)(THCState *state, THCTensor *self, THCTensor *x, THCTensor *y, int dimension)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, self, x, y));
int i;
int nd = THCTensor_(nDimension)(state, x);
ptrdiff_t nelem = THCTensor_(nElement)(state, x);
THArgCheck(nd == THCTensor_(nDimension)(state, y), 1, "tensors must have same number of dimensions");
for (i = 0; i < nd; i++) {
THArgCheck(THCTensor_(size)(state, x, i) == THCTensor_(size)(state, y, i), 1, "dimension %i of x and y does not match", i);
if (dimension < 0 && THCTensor_(size)(state, x, i) == 3) {
dimension = i;
}
}
THArgCheck(dimension >= 0 && dimension < nd, 3, "dimension %d out of range", dimension+1);
THArgCheck(THCTensor_(size)(state, x, dimension) == 3, 3,
"dimension %d does not have size 3", dimension+1);
THCTensor_(resizeAs)(state, self, x);
int64_t sx = THCTensor_(stride)(state, x, dimension);
int64_t sy = THCTensor_(stride)(state, y, dimension);
int64_t so = THCTensor_(stride)(state, self, dimension);
THCTensor *nx = THCTensor_(newNarrow)(state, x, dimension, 0, 1);
THCTensor *ny = THCTensor_(newNarrow)(state, y, dimension, 0, 1);
THCTensor *nself = THCTensor_(newNarrow)(state, self, dimension, 0, 1);
if (!THC_pointwiseApply3(state, nself, nx, ny, TensorCrossOp<real>(sx, sy, so))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
THCTensor_(free)(state, nx);
THCTensor_(free)(state, ny);
THCTensor_(free)(state, nself);
}
#if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_HALF)
void THCTensor_(atan2)(THCState *state, THCTensor *self_, THCTensor *tx, THCTensor *ty)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, self_, tx, ty));
THArgCheck(THCTensor_(nElement)(state, tx) ==
THCTensor_(nElement)(state, ty), 3, "sizes do not match");
THCTensor_(resizeAs)(state, self_, tx);
if (!THC_pointwiseApply3(state, self_, tx, ty, TensorATan2Op<real>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
THCudaCheck(cudaGetLastError());
}
void THCTensor_(sigmoid)(THCState* state, THCTensor* self_, THCTensor* src) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src));
if (self_ == src) {
if (!THC_pointwiseApply1(state, self_, TensorSigmoidOp<real>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self_, src);
if (!THC_pointwiseApply2(state, self_, src, TensorSigmoidOp<real>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(cudaGetLastError());
}
void THCTensor_(digamma)(THCState* state, THCTensor* self_, THCTensor* src) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src));
if (self_ != src) {
THCTensor_(resizeAs)(state, self_, src);
}
if (!THC_pointwiseApply2(state, self_, src, TensorDigammaOp<real, accreal>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
THCudaCheck(cudaGetLastError());
}
void THCTensor_(polygamma)(THCState* state, THCTensor* self_, int64_t n, THCTensor* src) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src));
if (self_ != src) {
THCTensor_(resizeAs)(state, self_, src);
}
switch (n) {
case 0:
if (!THC_pointwiseApply2(state, self_, src, TensorDigammaOp<real, accreal>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
break;
case 1:
if (!THC_pointwiseApply2(state, self_, src, TensorTrigammaOp<real, accreal>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
break;
default:
THError("polygamma(n,x) is not implemented for n>=2");
}
THCudaCheck(cudaGetLastError());
}
THC_API void
THCTensor_(lerp)(THCState *state, THCTensor *result, THCTensor *a, THCTensor *b, real w)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, result, a, b));
THArgCheck(THCTensor_(nElement)(state, a) ==
THCTensor_(nElement)(state, b), 3, "sizes do not match");
THCTensor_(resizeAs)(state, result, a);
if (!THC_pointwiseApply3(state, result, a, b, TensorLerpOp<real>(w))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
THCudaCheck(cudaGetLastError());
}
#endif
THC_API void
THCTensor_(cadd)(THCState *state, THCTensor *self_, THCTensor* src1, real value, THCTensor *src2)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, self_, src1, src2));
THArgCheck(THCTensor_(nElement)(state, src1) ==
THCTensor_(nElement)(state, src2), 3, "sizes do not match");
if (self_ == src1) {
if (value == ScalarConvert<int, real>::to(1)) {
// self += src2
if (!THC_pointwiseApply2(state, self_, src2, TensorAddOp<real>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
// self += value * src2
if (!THC_pointwiseApply2(state, self_, src2, TensorCAddOp<real>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
} else {
THCTensor_(resizeAs)(state, self_, src1);
if (value == ScalarConvert<int, real>::to(1)) {
// self = src1 + src2
if (!THC_pointwiseApply3(state, self_, src1, src2, TensorAddOp<real>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
// self = src1 + value * src2
if (!THC_pointwiseApply3(state, self_, src1, src2, TensorCAddOp<real>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
}
THCudaCheck(cudaGetLastError());
}
THC_API void
THCTensor_(csub)(THCState *state, THCTensor *self_, THCTensor* src1, real value, THCTensor *src2)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, self_, src1, src2));
THArgCheck(THCTensor_(nElement)(state, src1) ==
THCTensor_(nElement)(state, src2), 3, "sizes do not match");
if (self_ == src1) {
if (value == ScalarConvert<int, real>::to(1)) {
// self -= src2
if (!THC_pointwiseApply2(state, self_, src2, TensorSubOp<real>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
// self += -value * src2
if (!THC_pointwiseApply2(state, self_, src2,
TensorCAddOp<real>(
ScalarNegate<real>::to(value)))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
} else {
THCTensor_(resizeAs)(state, self_, src1);
if (value == ScalarConvert<int, real>::to(1)) {
// self = src1 - src2
if (!THC_pointwiseApply3(state, self_, src1, src2, TensorSubOp<real>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
// self = src1 - value * src2
if (!THC_pointwiseApply3(state, self_, src1, src2,
TensorCAddOp<real>(
ScalarNegate<real>::to(value)))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
}
THCudaCheck(cudaGetLastError());
}
THC_API void
THCTensor_(cmul)(THCState *state, THCTensor *self_, THCTensor *src1, THCTensor *src2)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, self_, src1, src2));
THArgCheck(THCTensor_(nElement)(state, src1) ==
THCTensor_(nElement)(state, src2), 3, "sizes do not match");
if (self_ == src1) {
// self *= src2
if (!THC_pointwiseApply2(state, self_, src2, TensorMulOp<real>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self_, src1);
// self = src1 * src2
if (!THC_pointwiseApply3(state, self_, src1, src2, TensorMulOp<real>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(cudaGetLastError());
}
THC_API void
THCTensor_(cpow)(THCState *state, THCTensor *self_, THCTensor *src1, THCTensor *src2)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, self_, src1, src2));
THArgCheck(THCTensor_(nElement)(state, src1) ==
THCTensor_(nElement)(state, src2), 3, "sizes do not match");
if (self_ == src1) {
// self = pow(self, src2)
if (!THC_pointwiseApply2(state, self_, src2, TensorCPowOp<real>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self_, src1);
// self = pow(src1, src2)
if (!THC_pointwiseApply3(state, self_, src1, src2, TensorCPowOp<real>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(cudaGetLastError());
}
void THCTensor_(pow)(THCState *state, THCTensor *self_, THCTensor *src, real value) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src));
if (self_ == src) {
if (THCNumerics<real>::eq(value, ScalarConvert<int, real>::to(1))) {
if (!THC_pointwiseApply1(state, self_, TensorPowOp<real, 1>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else if (THCNumerics<real>::eq(value, ScalarConvert<int, real>::to(2))) {
if (!THC_pointwiseApply1(state, self_, TensorPowOp<real, 2>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else if (THCNumerics<real>::eq(value, ScalarConvert<int, real>::to(3))) {
if (!THC_pointwiseApply1(state, self_, TensorPowOp<real, 3>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
#if defined(THC_REAL_IS_HALF) || defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE)
} else if (THCNumerics<real>::eq(value, ScalarConvert<int, real>::to(-1))) {
if (!THC_pointwiseApply1(state, self_, TensorPowOp<real, -1>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else if (THCNumerics<real>::eq(value, ScalarConvert<int, real>::to(-2))) {
if (!THC_pointwiseApply1(state, self_, TensorPowOp<real, -2>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
#endif
} else {
// fallback implementation using pow
if (!THC_pointwiseApply1(state, self_, TensorPowOp<real, -3>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
} else {
THCTensor_(resizeAs)(state, self_, src);
if (THCNumerics<real>::eq(value, ScalarConvert<int, real>::to(1))) {
if (!THC_pointwiseApply2(state, self_, src, TensorPowOp<real, 1>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else if (THCNumerics<real>::eq(value, ScalarConvert<int, real>::to(2))) {
if (!THC_pointwiseApply2(state, self_, src, TensorPowOp<real, 2>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else if (THCNumerics<real>::eq(value, ScalarConvert<int, real>::to(3))) {
if (!THC_pointwiseApply2(state, self_, src, TensorPowOp<real, 3>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
#if defined(THC_REAL_IS_HALF) || defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE)
} else if (THCNumerics<real>::eq(value, ScalarConvert<int, real>::to(-1))) {
if (!THC_pointwiseApply2(state, self_, src, TensorPowOp<real, -1>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else if (THCNumerics<real>::eq(value, ScalarConvert<int, real>::to(-2))) {
if (!THC_pointwiseApply2(state, self_, src, TensorPowOp<real, -2>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
#endif
} else {
// fallback implementation using pow
if (!THC_pointwiseApply2(state, self_, src, TensorPowOp<real, -3>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
}
THCudaCheck(cudaGetLastError());
}
void THCTensor_(tpow)(THCState *state, THCTensor *self_, real value, THCTensor *src)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src));
if (self_ == src) {
if (!THC_pointwiseApply1(state, self_, TensorTPowOp<real>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self_, src);
if (!THC_pointwiseApply2(state, self_, src, TensorTPowOp<real>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(cudaGetLastError());
}
THC_API void
THCTensor_(cdiv)(THCState* state, THCTensor *self_, THCTensor *src1, THCTensor *src2)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, self_, src1, src2));
THArgCheck(THCTensor_(nElement)(state, src1) ==
THCTensor_(nElement)(state, src2), 3, "sizes do not match");
if (self_ == src1) {
// self /= src2
if (!THC_pointwiseApply2(state, self_, src2, TensorDivOp<real>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self_, src1);
// self = src1 / src2
if (!THC_pointwiseApply3(state, self_, src1, src2, TensorDivOp<real>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(cudaGetLastError());
}
THC_API void
THCTensor_(clshift)(THCState* state, THCTensor *self_, THCTensor *src1, THCTensor *src2)
{
#if defined(THC_REAL_IS_HALF)
return THError("clshift not supported for torch.CudaHalfTensor");
#else
THAssert(THCTensor_(checkGPU)(state, 3, self_, src1, src2));
THArgCheck(THCTensor_(nElement)(state, src1) ==
THCTensor_(nElement)(state, src2), 3, "sizes do not match");
if (self_ == src1) {
// self /= src2
if (!THC_pointwiseApply2(state, self_, src2, TensorLShiftOp<real>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self_, src1);
// self = src1 / src2
if (!THC_pointwiseApply3(state, self_, src1, src2, TensorLShiftOp<real>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(cudaGetLastError());
#endif
}
THC_API void
THCTensor_(crshift)(THCState* state, THCTensor *self_, THCTensor *src1, THCTensor *src2)
{
#if defined(THC_REAL_IS_HALF)
return THError("crshift not supported for torch.CudaHalfTensor");
#else
THAssert(THCTensor_(checkGPU)(state, 3, self_, src1, src2));
THArgCheck(THCTensor_(nElement)(state, src1) ==
THCTensor_(nElement)(state, src2), 3, "sizes do not match");
if (self_ == src1) {
// self /= src2
if (!THC_pointwiseApply2(state, self_, src2, TensorRShiftOp<real>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self_, src1);
// self = src1 / src2
if (!THC_pointwiseApply3(state, self_, src1, src2, TensorRShiftOp<real>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(cudaGetLastError());
#endif
}
THC_API void
THCTensor_(cmax)(THCState *state, THCTensor *self, THCTensor *src1, THCTensor *src2)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, self, src1, src2));
THArgCheck(THCTensor_(nElement)(state, src1) ==
THCTensor_(nElement)(state, src2), 2, "sizes do not match");
if (self == src1) {
if (!THC_pointwiseApply2(state, self, src2, TensorMaxOp<real>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self, src1);
if (!THC_pointwiseApply3(state, self, src1, src2, TensorMaxOp<real>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
}
THC_API void
THCTensor_(cmin)(THCState *state, THCTensor *self, THCTensor *src1, THCTensor *src2)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, self, src1, src2));
THArgCheck(THCTensor_(nElement)(state, src1) ==
THCTensor_(nElement)(state, src2), 2, "sizes do not match");
if (self == src1) {
if (!THC_pointwiseApply2(state, self, src2, TensorMinOp<real>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self, src1);
if (!THC_pointwiseApply3(state, self, src1, src2, TensorMinOp<real>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
}
THC_API void
THCTensor_(cremainder)(THCState *state, THCTensor *self, THCTensor *src1, THCTensor *src2)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, self, src1, src2));
THArgCheck(THCTensor_(nElement)(state, src1) ==
THCTensor_(nElement)(state, src2), 2, "sizes do not match");
if (self == src1) {
if (!THC_pointwiseApply2(state, self, src2, TensorCRemainderOp<real>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self, src1);
if (!THC_pointwiseApply3(state, self, src1, src2, TensorCRemainderOp<real>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
}
THC_API void
THCTensor_(cfmod)(THCState *state, THCTensor *self, THCTensor *src1, THCTensor *src2)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, self, src1, src2));
THArgCheck(THCTensor_(nElement)(state, src1) ==
THCTensor_(nElement)(state, src2), 2, "sizes do not match");
if (self == src1) {
if (!THC_pointwiseApply2(state, self, src2, TensorCFmodOp<real>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self, src1);
if (!THC_pointwiseApply3(state, self, src1, src2, TensorCFmodOp<real>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
}
THC_API void
THCTensor_(cmaxValue)(THCState *state, THCTensor *self, THCTensor *src, real value)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self, src));
if (self == src) {
if (!THC_pointwiseApply1(state, self, TensorMaxValueOp<real>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self, src);
if (!THC_pointwiseApply2(state, self, src, TensorMaxValueOp<real>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
}
THC_API void
THCTensor_(cminValue)(THCState *state, THCTensor *self, THCTensor *src, real value)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self, src));
if (self == src) {
if (!THC_pointwiseApply1(state, self, TensorMinValueOp<real>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self, src);
if (!THC_pointwiseApply2(state, self, src, TensorMinValueOp<real>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
}
THC_API void
THCTensor_(addcmul)(THCState *state, THCTensor *self_, THCTensor *t, real value, THCTensor *src1, THCTensor *src2)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 4, self_, t, src1, src2));
if(self_ != t)
{
THCTensor_(resizeAs)(state, self_, t);
THCTensor_(copy)(state, self_, t);
}
else
{
THArgCheck(THCTensor_(nElement)(state, self_) == THCTensor_(nElement)(state, src1),
1, "sizes do not match");
}
THArgCheck(THCTensor_(nElement)(state, src1) == THCTensor_(nElement)(state, src2),
3, "sizes do not match");
if (!THC_pointwiseApply3(state, self_, src1, src2, TensorAddCMulOp<real>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
THCudaCheck(cudaGetLastError());
}
THC_API void
THCTensor_(addcdiv)(THCState *state, THCTensor *self_, THCTensor *t, real value, THCTensor *src1, THCTensor *src2)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 4, self_, t, src1, src2));
if(self_ != t)
{
THCTensor_(resizeAs)(state, self_, t);
THCTensor_(copy)(state, self_, t);
}
else
{
THArgCheck(THCTensor_(nElement)(state, self_) == THCTensor_(nElement)(state, src1),
1, "sizes do not match");
}
THArgCheck(THCTensor_(nElement)(state, src1) == THCTensor_(nElement)(state, src2),
3, "sizes do not match");
if (!THC_pointwiseApply3(state, self_, src1, src2, TensorAddCDivOp<real>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
THCudaCheck(cudaGetLastError());
}
THC_API void
THCTensor_(cbitand)(THCState* state, THCTensor *self_, THCTensor *src1, THCTensor *src2)
{
#if defined(THC_REAL_IS_HALF) || defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE)
return THError("cbitand is only supported for integer type tensors");
#else
THAssert(THCTensor_(checkGPU)(state, 3, self_, src1, src2));
THArgCheck(THCTensor_(nElement)(state, src1) ==
THCTensor_(nElement)(state, src2), 3, "sizes do not match");
if (self_ == src1) {
// self /= src2
if (!THC_pointwiseApply2(state, self_, src2, TensorBitAndOp<real>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self_, src1);
// self = src1 / src2
if (!THC_pointwiseApply3(state, self_, src1, src2, TensorBitAndOp<real>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(cudaGetLastError());
#endif
}
THC_API void
THCTensor_(cbitor)(THCState* state, THCTensor *self_, THCTensor *src1, THCTensor *src2)
{
#if defined(THC_REAL_IS_HALF) || defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE)
return THError("cbitor is only supported for integer type tensors");
#else
THAssert(THCTensor_(checkGPU)(state, 3, self_, src1, src2));
THArgCheck(THCTensor_(nElement)(state, src1) ==
THCTensor_(nElement)(state, src2), 3, "sizes do not match");
if (self_ == src1) {
// self /= src2
if (!THC_pointwiseApply2(state, self_, src2, TensorBitOrOp<real>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self_, src1);
// self = src1 / src2
if (!THC_pointwiseApply3(state, self_, src1, src2, TensorBitOrOp<real>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(cudaGetLastError());
#endif
}
THC_API void
THCTensor_(cbitxor)(THCState* state, THCTensor *self_, THCTensor *src1, THCTensor *src2)
{
#if defined(THC_REAL_IS_HALF) || defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE)
return THError("cbitor is only supported for integer type tensors");
#else
THAssert(THCTensor_(checkGPU)(state, 3, self_, src1, src2));
THArgCheck(THCTensor_(nElement)(state, src1) ==
THCTensor_(nElement)(state, src2), 3, "sizes do not match");
if (self_ == src1) {
// self /= src2
if (!THC_pointwiseApply2(state, self_, src2, TensorBitXorOp<real>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self_, src1);
// self = src1 / src2
if (!THC_pointwiseApply3(state, self_, src1, src2, TensorBitXorOp<real>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(cudaGetLastError());
#endif
}
#endif
|
10c70a367d83d92318114a642b0fa075b4a6b2dd.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//******************************************
// operators
// based on min-app code written by Oliver Fuhrer, MeteoSwiss
// modified by Ben Cumming, CSCS
//
// implements
// *****************************************
// Description: Contains simple operators which can be used on 3d-meshes
#include <mpi.h>
#include "cuda_helpers.h"
#include "data.h"
#include "operators.h"
#include "stats.h"
namespace operators {
// POD type holding information for device
struct DiffusionParams {
int nx;
int ny;
double alpha;
double dxs;
double *x_old;
double *bndN;
double *bndE;
double *bndS;
double *bndW;
};
// copy of global parameters for kernels to use directly
__device__
DiffusionParams params;
// copies the global parameters to the device
// must be called once at setup, before any of the stencil kernels are executed
void setup_params_on_device(
int nx, int ny,
double alpha, double dxs)
{
auto p = DiffusionParams {
nx,
ny,
alpha,
dxs,
data::x_old.device_data(),
data::bndN.device_data(),
data::bndE.device_data(),
data::bndS.device_data(),
data::bndW.device_data()
};
cuda_api_call(
hipMemcpyToSymbol(params, &p, sizeof(DiffusionParams))
);
}
namespace kernels {
__global__
void stencil_interior(double* S, const double *U) {
auto nx = params.nx;
auto ny = params.ny;
auto i = threadIdx.x + blockDim.x*blockIdx.x;
auto j = threadIdx.y + blockDim.y*blockIdx.y;
auto pos = i + j * nx;
// stencil is applied to interior grid pints, i.e. (i,j) such that
// i \in [1, nx-1)
// j \in [1, ny-1)
auto is_interior = i<(nx-1) && j<(ny-1) && (i>0 && j>0);
if(is_interior) {
S[pos] = -(4. + params.alpha) * U[pos] // central point
+ U[pos-1] + U[pos+1] // east and west
+ U[pos-nx] + U[pos+nx] // north and south
+ params.alpha * params.x_old[pos]
+ params.dxs * U[pos] * (1.0 - U[pos]);
}
}
__global__
void stencil_east_west(double* S, const double *U) {
auto j = threadIdx.x + blockDim.x*blockIdx.x;
auto nx = params.nx;
auto ny = params.ny;
auto alpha = params.alpha;
auto dxs = params.dxs;
auto find_pos = [&nx] (size_t i, size_t j) {
return i + j * nx;
};
if(j>0 && j<ny-1) {
// EAST : i = nx-1
auto pos = find_pos(nx-1, j);
S[pos] = -(4. + alpha) * U[pos]
+ U[pos-1] + U[pos-nx] + U[pos+nx]
+ alpha*params.x_old[pos] + params.bndE[j]
+ dxs * U[pos] * (1.0 - U[pos]);
// WEST : i = 0
pos = find_pos(0, j);
S[pos] = -(4. + alpha) * U[pos]
+ U[pos+1] + U[pos-ny] + U[pos+nx]
+ alpha * params.x_old[pos] + params.bndW[j]
+ dxs * U[pos] * (1.0 - U[pos]);
}
}
__global__
void stencil_north_south(double* S, const double *U) {
auto i = threadIdx.x + blockDim.x*blockIdx.x;
auto nx = params.nx;
auto ny = params.ny;
auto alpha = params.alpha;
auto dxs = params.dxs;
if(i>0 && i<nx-1) {
// NORTH : j = ny -1
auto pos = i + nx*(ny-1);
S[pos] = -(4. + alpha) * U[pos]
+ U[pos-1] + U[pos+1] + U[pos-nx]
+ alpha*params.x_old[pos] + params.bndN[i]
+ dxs * U[pos] * (1.0 - U[pos]);
// SOUTH : j = 0
pos = i;
S[pos] = -(4. + alpha) * U[pos]
+ U[pos-1] + U[pos+1] + U[pos+nx]
+ alpha * params.x_old[pos] + params.bndS[i]
+ dxs * U[pos] * (1.0 - U[pos]);
}
}
__global__
void stencil_corners(double* S, const double* U) {
auto i = threadIdx.x + blockDim.x*blockIdx.x;
auto nx = params.nx;
auto ny = params.ny;
auto alpha = params.alpha;
auto dxs = params.dxs;
auto find_pos = [&nx] (size_t i, size_t j) {
return i + j * nx;
};
// only 1 thread executes this kernel
if(i==0) {
// NORTH-EAST
auto pos = find_pos(nx-1, ny-1);
S[pos] = -(4. + alpha) * U[pos] // central point
+ U[pos-1] + params.bndE[ny-1] // east and west
+ U[pos-nx] + params.bndN[nx-1] // north and south
+ alpha * params.x_old[pos]
+ dxs * U[pos] * (1.0 - U[pos]);
// SOUTH-EAST
pos = find_pos(nx-1, 0);
S[pos] = -(4. + alpha) * U[pos] // central point
+ U[pos-1] + params.bndE[0] // east and west
+ params.bndS[nx-1]+ U[pos+nx] // north and south
+ alpha * params.x_old[pos]
+ dxs * U[pos] * (1.0 - U[pos]);
// SOUTH-WEST
pos = find_pos(0, 0);
S[pos] = -(4. + alpha) * U[pos] // central point
+ params.bndW[0] + U[pos+1] // east and west
+ params.bndS[0] + U[pos+nx] // north and south
+ alpha * params.x_old[pos]
+ dxs * U[pos] * (1.0 - U[pos]);
// NORTH-WEST
pos = find_pos(0, ny-1);
S[pos] = -(4. + alpha) * U[pos] // central point
+ params.bndW[nx-1]+ U[pos+1] // east and west
+ U[pos-nx] + params.bndN[0] // north and south
+ alpha * params.x_old[pos]
+ dxs * U[pos] * (1.0 - U[pos]);
}
}
}
// This function will copy a 1D strip from a 2D field to a 1D buffer.
// It is used to copy the values from along the edge of a field to
// a flat buffer for sending to MPI neighbors.
void pack_buffer(data::Field const& from, data::Field &buffer, int startx, int starty, int stride) {
int nx = from.xdim();
int ny = from.ydim();
int pos = startx + starty*nx;
auto status = hipblasDcopy(
cublas_handle(), buffer.length(),
from.device_data() + pos, stride,
buffer.device_data(), 1
);
if(status != HIPBLAS_STATUS_SUCCESS) {
std::cerr << "error : cublas copy for boundary condition" << std::endl;
exit(-1);
}
}
// Exchange that performs MPI send/recv from/to host memory, and copies
// results from and to the GPU.
void exchange_rdma(data::Field const& U) {
using data::domain;
using data::bndE;
using data::bndW;
using data::bndN;
using data::bndS;
using data::buffE;
using data::buffW;
using data::buffN;
using data::buffS;
using data::x_old;
int nx = domain.nx;
int ny = domain.ny;
// NOTE TO TEACHERS:
//
// Synchronization of the pack, Isend and Irecv operations is very important for
// RDMA communication.
// Students will get subtle bugs in the application if they aren't careful.
//
// The Problem:
// The Cray MPI uses internal CUDA streams and RDMA to copy from the buffX on
// the other MPI rank into the bndX field on this GPU.
// The miniapp launches all kernels on the default stream, so the bndX field
// may be updated by the MPI_Irecv call at the same time the bndX field
// is being read by a kernel running a stencil from the previous iteration.
//
// The Solution: There are two possible solutions:
// option 1. A single call to hipDeviceSynchronize() before the first MPI_Irecv will
// ensure that all kernels that depend on the current values in bndX have
// finished executing. This is the simplest and most reliable method.
// option 2. Call the pack_buffer() function before the MPI_Irecv() for each boundary.
// The reason that this works is as a result of a subtle interaction.
// pack_buffer() uses cublas to perform the copy.
// Cublas calls are blocking, i.e. the host waits until the GPU work is finished,
// and they are performed in CUDA stream 0. These two side effects mean
// that all operations from previous steps will be completed before the call
// Irecv can start filling bndX.
// If we were using a kernel we wrote ourselves to perform the pack, the problem
// would still persist, because the kernel would not block on the host side,
// so I don't consider this to be a very robust solution.
//
// This issue often doesn't affect 1 MPI rank, and usually isn't triggered with 2 MPI
// ranks. However, with 4 MPI ranks and large domains (512x512 and greater), the solution
// won't converge, and it will happen at different points on each run.
// If students get to this point, get them to set the CUDA_LAUNCH_BLOCKING=1 environment
// variable, and the problem will go away.
// Then work with them to isolate the issue by placing hipDeviceSynchronize() calls in
// the code. I would suggest that they put a hipDeviceSynchronize() at the top of
// the diffusion() function, where it will fix the problem. Then get them to zero in on
// exactly where it has to be placed.
int num_requests = 0;
MPI_Status status[8];
MPI_Request requests[8];
hipDeviceSynchronize();
if(domain.neighbour_north>=0) {
pack_buffer(U, buffN, 0, ny-1, 1);
MPI_Isend(buffN.device_data(), nx, MPI_DOUBLE, domain.neighbour_north, 0,
MPI_COMM_WORLD, &requests[num_requests++]);
MPI_Irecv(bndN.device_data(), nx, MPI_DOUBLE, domain.neighbour_north, 1,
MPI_COMM_WORLD, &requests[num_requests++]);
}
if(domain.neighbour_south>=0) {
pack_buffer(U, buffS, 0, 0, 1);
MPI_Isend(buffS.device_data(), nx, MPI_DOUBLE, domain.neighbour_south, 1,
MPI_COMM_WORLD, &requests[num_requests++]);
MPI_Irecv(bndS.device_data(), nx, MPI_DOUBLE, domain.neighbour_south, 0,
MPI_COMM_WORLD, &requests[num_requests++]);
}
if(domain.neighbour_east>=0) {
pack_buffer(U, buffE, nx-1, 0, nx);
MPI_Isend(buffE.device_data(), ny, MPI_DOUBLE, domain.neighbour_east, 2,
MPI_COMM_WORLD, &requests[num_requests++]);
MPI_Irecv(bndE.device_data(), ny, MPI_DOUBLE, domain.neighbour_east, 3,
MPI_COMM_WORLD, &requests[num_requests++]);
}
if(domain.neighbour_west>=0) {
pack_buffer(U, buffW, 0, 0, nx);
MPI_Isend(buffW.device_data(), ny, MPI_DOUBLE, domain.neighbour_west, 3,
MPI_COMM_WORLD, &requests[num_requests++]);
MPI_Irecv(bndW.device_data(), ny, MPI_DOUBLE, domain.neighbour_west, 2,
MPI_COMM_WORLD, &requests[num_requests++]);
}
MPI_Waitall(num_requests, requests, status);
}
// overlap communication by computation by splitting the exchange
void start_exchange_rdma(data::Field const& U, MPI_Request requests[], int& num_requests) {
using data::domain;
using data::bndE;
using data::bndW;
using data::bndN;
using data::bndS;
using data::buffE;
using data::buffW;
using data::buffN;
using data::buffS;
int nx = domain.nx;
int ny = domain.ny;
num_requests = 0;
hipDeviceSynchronize();
if(domain.neighbour_north>=0) {
}
if(domain.neighbour_south>=0) {
}
if(domain.neighbour_east>=0) {
}
if(domain.neighbour_west>=0) {
}
}
void wait_exchange_rdma(MPI_Request requests[], int num_requests) {
using data::domain;
using data::bndE;
using data::bndW;
using data::bndN;
using data::bndS;
}
void diffusion(data::Field const& U, data::Field &S)
{
using data::options;
using data::domain;
using data::bndE;
using data::bndW;
using data::bndN;
using data::bndS;
using data::buffE;
using data::buffW;
using data::buffN;
using data::buffS;
using data::x_old;
double dxs = 1000. * (options.dx * options.dx);
double alpha = options.alpha;
int nx = domain.nx;
int ny = domain.ny;
static bool is_initialized = false;
if (!is_initialized) {
setup_params_on_device(nx, ny, alpha, dxs);
is_initialized = true;
}
//do exchange
exchange_rdma(U);
// apply stencil to the interior grid points
auto calculate_grid_dim = [] (size_t n, size_t block_dim) {
return (n+block_dim-1)/block_dim;
};
dim3 block_dim(8, 8); // use 8x8 thread block dimensions
dim3 grid_dim(
calculate_grid_dim(nx, block_dim.x),
calculate_grid_dim(ny, block_dim.y));
hipLaunchKernelGGL(( kernels::stencil_interior), dim3(grid_dim), dim3(block_dim), 0, 0, S.device_data(), U.device_data());
// apply stencil at boundaries
auto bnd_grid_dim_y = calculate_grid_dim(ny, 64);
hipLaunchKernelGGL(( kernels::stencil_east_west), dim3(bnd_grid_dim_y), dim3(64), 0, 0, S.device_data(), U.device_data());
auto bnd_grid_dim_x = calculate_grid_dim(nx, 64);
hipLaunchKernelGGL(( kernels::stencil_north_south), dim3(bnd_grid_dim_x), dim3(64), 0, 0, S.device_data(), U.device_data());
hipLaunchKernelGGL(( kernels::stencil_corners), dim3(1), dim3(1), 0, 0, S.device_data(), U.device_data());
}
} // namespace operators
| 10c70a367d83d92318114a642b0fa075b4a6b2dd.cu | //******************************************
// operators
// based on min-app code written by Oliver Fuhrer, MeteoSwiss
// modified by Ben Cumming, CSCS
//
// implements
// *****************************************
// Description: Contains simple operators which can be used on 3d-meshes
#include <mpi.h>
#include "cuda_helpers.h"
#include "data.h"
#include "operators.h"
#include "stats.h"
namespace operators {
// POD type holding information for device
struct DiffusionParams {
int nx;
int ny;
double alpha;
double dxs;
double *x_old;
double *bndN;
double *bndE;
double *bndS;
double *bndW;
};
// copy of global parameters for kernels to use directly
__device__
DiffusionParams params;
// copies the global parameters to the device
// must be called once at setup, before any of the stencil kernels are executed
void setup_params_on_device(
int nx, int ny,
double alpha, double dxs)
{
auto p = DiffusionParams {
nx,
ny,
alpha,
dxs,
data::x_old.device_data(),
data::bndN.device_data(),
data::bndE.device_data(),
data::bndS.device_data(),
data::bndW.device_data()
};
cuda_api_call(
cudaMemcpyToSymbol(params, &p, sizeof(DiffusionParams))
);
}
namespace kernels {
__global__
void stencil_interior(double* S, const double *U) {
auto nx = params.nx;
auto ny = params.ny;
auto i = threadIdx.x + blockDim.x*blockIdx.x;
auto j = threadIdx.y + blockDim.y*blockIdx.y;
auto pos = i + j * nx;
// stencil is applied to interior grid pints, i.e. (i,j) such that
// i \in [1, nx-1)
// j \in [1, ny-1)
auto is_interior = i<(nx-1) && j<(ny-1) && (i>0 && j>0);
if(is_interior) {
S[pos] = -(4. + params.alpha) * U[pos] // central point
+ U[pos-1] + U[pos+1] // east and west
+ U[pos-nx] + U[pos+nx] // north and south
+ params.alpha * params.x_old[pos]
+ params.dxs * U[pos] * (1.0 - U[pos]);
}
}
__global__
void stencil_east_west(double* S, const double *U) {
auto j = threadIdx.x + blockDim.x*blockIdx.x;
auto nx = params.nx;
auto ny = params.ny;
auto alpha = params.alpha;
auto dxs = params.dxs;
auto find_pos = [&nx] (size_t i, size_t j) {
return i + j * nx;
};
if(j>0 && j<ny-1) {
// EAST : i = nx-1
auto pos = find_pos(nx-1, j);
S[pos] = -(4. + alpha) * U[pos]
+ U[pos-1] + U[pos-nx] + U[pos+nx]
+ alpha*params.x_old[pos] + params.bndE[j]
+ dxs * U[pos] * (1.0 - U[pos]);
// WEST : i = 0
pos = find_pos(0, j);
S[pos] = -(4. + alpha) * U[pos]
+ U[pos+1] + U[pos-ny] + U[pos+nx]
+ alpha * params.x_old[pos] + params.bndW[j]
+ dxs * U[pos] * (1.0 - U[pos]);
}
}
__global__
void stencil_north_south(double* S, const double *U) {
auto i = threadIdx.x + blockDim.x*blockIdx.x;
auto nx = params.nx;
auto ny = params.ny;
auto alpha = params.alpha;
auto dxs = params.dxs;
if(i>0 && i<nx-1) {
// NORTH : j = ny -1
auto pos = i + nx*(ny-1);
S[pos] = -(4. + alpha) * U[pos]
+ U[pos-1] + U[pos+1] + U[pos-nx]
+ alpha*params.x_old[pos] + params.bndN[i]
+ dxs * U[pos] * (1.0 - U[pos]);
// SOUTH : j = 0
pos = i;
S[pos] = -(4. + alpha) * U[pos]
+ U[pos-1] + U[pos+1] + U[pos+nx]
+ alpha * params.x_old[pos] + params.bndS[i]
+ dxs * U[pos] * (1.0 - U[pos]);
}
}
__global__
void stencil_corners(double* S, const double* U) {
auto i = threadIdx.x + blockDim.x*blockIdx.x;
auto nx = params.nx;
auto ny = params.ny;
auto alpha = params.alpha;
auto dxs = params.dxs;
auto find_pos = [&nx] (size_t i, size_t j) {
return i + j * nx;
};
// only 1 thread executes this kernel
if(i==0) {
// NORTH-EAST
auto pos = find_pos(nx-1, ny-1);
S[pos] = -(4. + alpha) * U[pos] // central point
+ U[pos-1] + params.bndE[ny-1] // east and west
+ U[pos-nx] + params.bndN[nx-1] // north and south
+ alpha * params.x_old[pos]
+ dxs * U[pos] * (1.0 - U[pos]);
// SOUTH-EAST
pos = find_pos(nx-1, 0);
S[pos] = -(4. + alpha) * U[pos] // central point
+ U[pos-1] + params.bndE[0] // east and west
+ params.bndS[nx-1]+ U[pos+nx] // north and south
+ alpha * params.x_old[pos]
+ dxs * U[pos] * (1.0 - U[pos]);
// SOUTH-WEST
pos = find_pos(0, 0);
S[pos] = -(4. + alpha) * U[pos] // central point
+ params.bndW[0] + U[pos+1] // east and west
+ params.bndS[0] + U[pos+nx] // north and south
+ alpha * params.x_old[pos]
+ dxs * U[pos] * (1.0 - U[pos]);
// NORTH-WEST
pos = find_pos(0, ny-1);
S[pos] = -(4. + alpha) * U[pos] // central point
+ params.bndW[nx-1]+ U[pos+1] // east and west
+ U[pos-nx] + params.bndN[0] // north and south
+ alpha * params.x_old[pos]
+ dxs * U[pos] * (1.0 - U[pos]);
}
}
}
// This function will copy a 1D strip from a 2D field to a 1D buffer.
// It is used to copy the values from along the edge of a field to
// a flat buffer for sending to MPI neighbors.
void pack_buffer(data::Field const& from, data::Field &buffer, int startx, int starty, int stride) {
int nx = from.xdim();
int ny = from.ydim();
int pos = startx + starty*nx;
auto status = cublasDcopy(
cublas_handle(), buffer.length(),
from.device_data() + pos, stride,
buffer.device_data(), 1
);
if(status != CUBLAS_STATUS_SUCCESS) {
std::cerr << "error : cublas copy for boundary condition" << std::endl;
exit(-1);
}
}
// Exchange that performs MPI send/recv from/to host memory, and copies
// results from and to the GPU.
void exchange_rdma(data::Field const& U) {
using data::domain;
using data::bndE;
using data::bndW;
using data::bndN;
using data::bndS;
using data::buffE;
using data::buffW;
using data::buffN;
using data::buffS;
using data::x_old;
int nx = domain.nx;
int ny = domain.ny;
// NOTE TO TEACHERS:
//
// Synchronization of the pack, Isend and Irecv operations is very important for
// RDMA communication.
// Students will get subtle bugs in the application if they aren't careful.
//
// The Problem:
// The Cray MPI uses internal CUDA streams and RDMA to copy from the buffX on
// the other MPI rank into the bndX field on this GPU.
// The miniapp launches all kernels on the default stream, so the bndX field
// may be updated by the MPI_Irecv call at the same time the bndX field
// is being read by a kernel running a stencil from the previous iteration.
//
// The Solution: There are two possible solutions:
// option 1. A single call to cudaDeviceSynchronize() before the first MPI_Irecv will
// ensure that all kernels that depend on the current values in bndX have
// finished executing. This is the simplest and most reliable method.
// option 2. Call the pack_buffer() function before the MPI_Irecv() for each boundary.
// The reason that this works is as a result of a subtle interaction.
// pack_buffer() uses cublas to perform the copy.
// Cublas calls are blocking, i.e. the host waits until the GPU work is finished,
// and they are performed in CUDA stream 0. These two side effects mean
// that all operations from previous steps will be completed before the call
// Irecv can start filling bndX.
// If we were using a kernel we wrote ourselves to perform the pack, the problem
// would still persist, because the kernel would not block on the host side,
// so I don't consider this to be a very robust solution.
//
// This issue often doesn't affect 1 MPI rank, and usually isn't triggered with 2 MPI
// ranks. However, with 4 MPI ranks and large domains (512x512 and greater), the solution
// won't converge, and it will happen at different points on each run.
// If students get to this point, get them to set the CUDA_LAUNCH_BLOCKING=1 environment
// variable, and the problem will go away.
// Then work with them to isolate the issue by placing cudaDeviceSynchronize() calls in
// the code. I would suggest that they put a cudaDeviceSynchronize() at the top of
// the diffusion() function, where it will fix the problem. Then get them to zero in on
// exactly where it has to be placed.
int num_requests = 0;
MPI_Status status[8];
MPI_Request requests[8];
cudaDeviceSynchronize();
if(domain.neighbour_north>=0) {
pack_buffer(U, buffN, 0, ny-1, 1);
MPI_Isend(buffN.device_data(), nx, MPI_DOUBLE, domain.neighbour_north, 0,
MPI_COMM_WORLD, &requests[num_requests++]);
MPI_Irecv(bndN.device_data(), nx, MPI_DOUBLE, domain.neighbour_north, 1,
MPI_COMM_WORLD, &requests[num_requests++]);
}
if(domain.neighbour_south>=0) {
pack_buffer(U, buffS, 0, 0, 1);
MPI_Isend(buffS.device_data(), nx, MPI_DOUBLE, domain.neighbour_south, 1,
MPI_COMM_WORLD, &requests[num_requests++]);
MPI_Irecv(bndS.device_data(), nx, MPI_DOUBLE, domain.neighbour_south, 0,
MPI_COMM_WORLD, &requests[num_requests++]);
}
if(domain.neighbour_east>=0) {
pack_buffer(U, buffE, nx-1, 0, nx);
MPI_Isend(buffE.device_data(), ny, MPI_DOUBLE, domain.neighbour_east, 2,
MPI_COMM_WORLD, &requests[num_requests++]);
MPI_Irecv(bndE.device_data(), ny, MPI_DOUBLE, domain.neighbour_east, 3,
MPI_COMM_WORLD, &requests[num_requests++]);
}
if(domain.neighbour_west>=0) {
pack_buffer(U, buffW, 0, 0, nx);
MPI_Isend(buffW.device_data(), ny, MPI_DOUBLE, domain.neighbour_west, 3,
MPI_COMM_WORLD, &requests[num_requests++]);
MPI_Irecv(bndW.device_data(), ny, MPI_DOUBLE, domain.neighbour_west, 2,
MPI_COMM_WORLD, &requests[num_requests++]);
}
MPI_Waitall(num_requests, requests, status);
}
// overlap communication by computation by splitting the exchange
void start_exchange_rdma(data::Field const& U, MPI_Request requests[], int& num_requests) {
using data::domain;
using data::bndE;
using data::bndW;
using data::bndN;
using data::bndS;
using data::buffE;
using data::buffW;
using data::buffN;
using data::buffS;
int nx = domain.nx;
int ny = domain.ny;
num_requests = 0;
cudaDeviceSynchronize();
if(domain.neighbour_north>=0) {
}
if(domain.neighbour_south>=0) {
}
if(domain.neighbour_east>=0) {
}
if(domain.neighbour_west>=0) {
}
}
void wait_exchange_rdma(MPI_Request requests[], int num_requests) {
using data::domain;
using data::bndE;
using data::bndW;
using data::bndN;
using data::bndS;
}
void diffusion(data::Field const& U, data::Field &S)
{
using data::options;
using data::domain;
using data::bndE;
using data::bndW;
using data::bndN;
using data::bndS;
using data::buffE;
using data::buffW;
using data::buffN;
using data::buffS;
using data::x_old;
double dxs = 1000. * (options.dx * options.dx);
double alpha = options.alpha;
int nx = domain.nx;
int ny = domain.ny;
static bool is_initialized = false;
if (!is_initialized) {
setup_params_on_device(nx, ny, alpha, dxs);
is_initialized = true;
}
//do exchange
exchange_rdma(U);
// apply stencil to the interior grid points
auto calculate_grid_dim = [] (size_t n, size_t block_dim) {
return (n+block_dim-1)/block_dim;
};
dim3 block_dim(8, 8); // use 8x8 thread block dimensions
dim3 grid_dim(
calculate_grid_dim(nx, block_dim.x),
calculate_grid_dim(ny, block_dim.y));
kernels::stencil_interior<<<grid_dim, block_dim>>>(S.device_data(), U.device_data());
// apply stencil at boundaries
auto bnd_grid_dim_y = calculate_grid_dim(ny, 64);
kernels::stencil_east_west<<<bnd_grid_dim_y, 64>>>(S.device_data(), U.device_data());
auto bnd_grid_dim_x = calculate_grid_dim(nx, 64);
kernels::stencil_north_south<<<bnd_grid_dim_x, 64>>>(S.device_data(), U.device_data());
kernels::stencil_corners<<<1, 1>>>(S.device_data(), U.device_data());
}
} // namespace operators
|
c9a2bbc7c9fcf1a5e2f7868e6a0fe70b5da5c7d4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "DepthwiseConvOp.h"
#include "paddle/math/BaseMatrix.h"
namespace paddle {
// CUDA kernel to compute the depthwise convolution forward pass
template <class T>
__global__ void ConvolutionDepthwiseForward(const int nthreads,
const T* const inputData,
const T* const filterData,
const int batchSize,
const int outputChannels,
const int outputHeight,
const int outputWidth,
const int inputChannels,
const int inputHeight,
const int inputWidth,
const int filterMultiplier,
const int filterHeight,
const int filterWidth,
const int strideH,
const int strideW,
const int paddingH,
const int paddingW,
T* const outputData) {
int index = (blockIdx.x * gridDim.y + blockIdx.y) * blockDim.x + threadIdx.x;
if (index < nthreads) {
const int batch = index / outputChannels / outputHeight / outputWidth;
const int c_out = (index / outputHeight / outputWidth) % outputChannels;
const int h_out = (index / outputWidth) % outputHeight;
const int w_out = index % outputWidth;
const int c_in = c_out / filterMultiplier;
const T* weight = filterData + c_out * filterHeight * filterWidth;
T value = 0;
const int h_in_start = -paddingH + h_out * strideH;
const int w_in_start = -paddingW + w_out * strideW;
const int h_in_end = -paddingH + h_out * strideH + filterHeight - 1;
const int w_in_end = -paddingW + w_out * strideW + filterWidth - 1;
if ((h_in_start >= 0) && (h_in_end < inputHeight) && (w_in_start >= 0) &&
(w_in_end < inputWidth)) {
for (int kh = 0; kh < filterHeight; ++kh) {
for (int kw = 0; kw < filterWidth; ++kw) {
const int h_in = -paddingH + h_out * strideH + kh;
const int w_in = -paddingW + w_out * strideW + kw;
const int offset =
((batch * inputChannels + c_in) * inputHeight + h_in) *
inputWidth +
w_in;
value += (*weight) * inputData[offset];
++weight;
}
}
} else {
for (int kh = 0; kh < filterHeight; ++kh) {
for (int kw = 0; kw < filterWidth; ++kw) {
const int h_in = -paddingH + h_out * strideH + kh;
const int w_in = -paddingW + w_out * strideW + kw;
if ((h_in >= 0) && (h_in < inputHeight) && (w_in >= 0) &&
(w_in < inputWidth)) {
const int offset =
((batch * inputChannels + c_in) * inputHeight + h_in) *
inputWidth +
w_in;
value += (*weight) * inputData[offset];
}
++weight;
}
}
}
outputData[index] = value;
}
}
// CUDA kernel to compute the depthwise convolution backprop w.r.t input.
template <class T>
__global__ void ConvolutionDepthwiseInputBackward(const int nthreads,
const T* const top_diff,
const T* const weight_data,
const int num,
const int outputChannels,
const int outputHeight,
const int outputWidth,
const int inputChannels,
const int inputHeight,
const int inputWidth,
const int filterMultiplier,
const int filterHeight,
const int filterWidth,
const int strideH,
const int strideW,
const int paddingH,
const int paddingW,
T* const bottom_diff) {
int index = (blockIdx.x * gridDim.y + blockIdx.y) * blockDim.x + threadIdx.x;
if (index < nthreads) {
const int batch = index / inputChannels / inputHeight / inputWidth;
const int c_in = (index / inputHeight / inputWidth) % inputChannels;
const int h_in = (index / inputWidth) % inputHeight;
const int w_in = index % inputWidth;
const int c_out_start = c_in * filterMultiplier;
int h_out_start = (h_in - filterHeight + paddingH + strideH) / strideH;
h_out_start = 0 > h_out_start ? 0 : h_out_start;
int h_out_end = (h_in + paddingH) / strideH;
h_out_end = outputHeight - 1 < h_out_end ? outputHeight - 1 : h_out_end;
int w_out_start = (w_in - filterWidth + paddingW + strideW) / strideW;
w_out_start = 0 > w_out_start ? 0 : w_out_start;
int w_out_end = (w_in + paddingW) / strideW;
w_out_end = outputWidth - 1 < w_out_end ? outputWidth - 1 : w_out_end;
T value = 0;
for (int c_out = c_out_start; c_out < c_out_start + filterMultiplier;
c_out++) {
for (int h_out = h_out_start; h_out <= h_out_end; ++h_out) {
const int filter_h = h_in + paddingH - h_out * strideH;
for (int w_out = w_out_start; w_out <= w_out_end; ++w_out) {
const int filter_w = w_in + paddingW - w_out * strideW;
const int filter_offset = c_out * filterHeight * filterWidth +
filter_h * filterWidth + filter_w;
const int top_diff_offset =
((batch * outputChannels + c_out) * outputHeight + h_out) *
outputWidth +
w_out;
value += top_diff[top_diff_offset] * weight_data[filter_offset];
}
}
}
bottom_diff[index] += value;
}
}
// CUDA kernel to compute the depthwise convolution backprop w.r.t filter.
template <class T>
__global__ void ConvolutionDepthwiseFilterBackward(const int num_i,
const int nthreads,
const T* const top_diff,
const T* const inputData,
const int num,
const int outputChannels,
const int outputHeight,
const int outputWidth,
const int inputChannels,
const int inputHeight,
const int inputWidth,
const int filterMultiplier,
const int filterHeight,
const int filterWidth,
const int strideH,
const int strideW,
const int paddingH,
const int paddingW,
T* const buffer_data) {
int index = (blockIdx.x * gridDim.y + blockIdx.y) * blockDim.x + threadIdx.x;
if (index < nthreads) {
const int h_out = (index / outputWidth) % outputHeight;
const int w_out = index % outputWidth;
const int kh =
(index / filterWidth / outputHeight / outputWidth) % filterHeight;
const int kw = (index / outputHeight / outputWidth) % filterWidth;
const int h_in = -paddingH + h_out * strideH + kh;
const int w_in = -paddingW + w_out * strideW + kw;
if ((h_in >= 0) && (h_in < inputHeight) && (w_in >= 0) &&
(w_in < inputWidth)) {
const int c_out =
index / (filterHeight * filterWidth * outputHeight * outputWidth);
const int c_in = c_out / filterMultiplier;
const int batch = num_i;
const int top_offset =
((batch * outputChannels + c_out) * outputHeight + h_out) *
outputWidth +
w_out;
const int bottom_offset =
((batch * inputChannels + c_in) * inputHeight + h_in) * inputWidth +
w_in;
buffer_data[index] = top_diff[top_offset] * inputData[bottom_offset];
} else {
buffer_data[index] = 0;
}
}
}
template <class T>
class DepthwiseConvFunctor<DEVICE_TYPE_GPU, T> {
public:
void operator()(const T* inputData,
const T* filterData,
int batchSize,
int outputChannels,
int outputHeight,
int outputWidth,
int inputChannels,
int inputHeight,
int inputWidth,
int filterMultiplier,
int filterHeight,
int filterWidth,
int strideH,
int strideW,
int paddingH,
int paddingW,
T* outputData) {
int outputSize = batchSize * outputChannels * outputHeight * outputWidth;
size_t blocks = (outputSize + 1024 - 1) / 1024;
size_t blockX = 512;
size_t blockY = (blocks + 512 - 1) / 512;
dim3 threads(1024, 1);
dim3 grid(blockX, blockY);
hipLaunchKernelGGL(( ConvolutionDepthwiseForward<T>), dim3(grid), dim3(threads), 0, STREAM_DEFAULT,
outputSize,
inputData,
filterData,
batchSize,
outputChannels,
outputHeight,
outputWidth,
inputChannels,
inputHeight,
inputWidth,
filterMultiplier,
filterHeight,
filterWidth,
strideH,
strideW,
paddingH,
paddingW,
outputData);
}
};
template <class T>
class DepthwiseConvGradInputFunctor<DEVICE_TYPE_GPU, T> {
public:
void operator()(const T* outputGrad,
const T* filterData,
int batchSize,
int outputChannels,
int outputHeight,
int outputWidth,
int inputChannels,
int inputHeight,
int inputWidth,
int filterMultiplier,
int filterHeight,
int filterWidth,
int strideH,
int strideW,
int paddingH,
int paddingW,
T* inputGrad) {
int inputSize = batchSize * inputChannels * inputHeight * inputWidth;
size_t blocks = (inputSize + 1024 - 1) / 1024;
size_t blockX = 512;
size_t blockY = (blocks + 512 - 1) / 512;
dim3 threads(1024, 1);
dim3 grid(blockX, blockY);
ConvolutionDepthwiseInputBackward<T>
// NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(grid), dim3(threads), 0, STREAM_DEFAULT, inputSize,
outputGrad,
filterData,
batchSize,
outputChannels,
outputHeight,
outputWidth,
inputChannels,
inputHeight,
inputWidth,
filterMultiplier,
filterHeight,
filterWidth,
strideH,
strideW,
paddingH,
paddingW,
inputGrad);
}
};
template <class T>
class DepthwiseConvGradFilterFunctor<DEVICE_TYPE_GPU, T> {
public:
void operator()(const T* outputGrad,
const T* inputData,
int batchSize,
int outputChannels,
int outputHeight,
int outputWidth,
int inputChannels,
int inputHeight,
int inputWidth,
int filterMultiplier,
int filterHeight,
int filterWidth,
int strideH,
int strideW,
int paddingH,
int paddingW,
T* colData,
T* filterGrad) {
int colDataSize = outputChannels * filterHeight * filterWidth *
outputHeight * outputWidth;
size_t blocks = (colDataSize + 1024 - 1) / 1024;
size_t blockX = 512;
size_t blockY = (blocks + 512 - 1) / 512;
dim3 threads(1024, 1);
dim3 grid(blockX, blockY);
BaseMatrix filterGradMatrix(outputChannels * filterHeight * filterWidth,
1,
filterGrad,
false,
true);
for (int i = 0; i < batchSize; i++) {
hipLaunchKernelGGL(( ConvolutionDepthwiseFilterBackward<
T>), dim3(grid), dim3(threads), 0, STREAM_DEFAULT, i,
colDataSize,
outputGrad,
inputData,
batchSize,
outputChannels,
outputHeight,
outputWidth,
inputChannels,
inputHeight,
inputWidth,
filterMultiplier,
filterHeight,
filterWidth,
strideH,
strideW,
paddingH,
paddingW,
colData);
int K = outputHeight * outputWidth;
int M = colDataSize / K;
BaseMatrix colMatrix(M, K, colData, false, true);
filterGradMatrix.sumRows(colMatrix, (T)1.0, (T)1.0);
}
}
};
#ifdef PADDLE_TYPE_DOUBLE
template class DepthwiseConvGradInputFunctor<DEVICE_TYPE_GPU, double>;
template class DepthwiseConvFunctor<DEVICE_TYPE_GPU, double>;
template class DepthwiseConvGradFilterFunctor<DEVICE_TYPE_GPU, double>;
#else
template class DepthwiseConvGradInputFunctor<DEVICE_TYPE_GPU, float>;
template class DepthwiseConvFunctor<DEVICE_TYPE_GPU, float>;
template class DepthwiseConvGradFilterFunctor<DEVICE_TYPE_GPU, float>;
#endif
} // namespace paddle
| c9a2bbc7c9fcf1a5e2f7868e6a0fe70b5da5c7d4.cu | /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "DepthwiseConvOp.h"
#include "paddle/math/BaseMatrix.h"
namespace paddle {
// CUDA kernel to compute the depthwise convolution forward pass
template <class T>
__global__ void ConvolutionDepthwiseForward(const int nthreads,
const T* const inputData,
const T* const filterData,
const int batchSize,
const int outputChannels,
const int outputHeight,
const int outputWidth,
const int inputChannels,
const int inputHeight,
const int inputWidth,
const int filterMultiplier,
const int filterHeight,
const int filterWidth,
const int strideH,
const int strideW,
const int paddingH,
const int paddingW,
T* const outputData) {
int index = (blockIdx.x * gridDim.y + blockIdx.y) * blockDim.x + threadIdx.x;
if (index < nthreads) {
const int batch = index / outputChannels / outputHeight / outputWidth;
const int c_out = (index / outputHeight / outputWidth) % outputChannels;
const int h_out = (index / outputWidth) % outputHeight;
const int w_out = index % outputWidth;
const int c_in = c_out / filterMultiplier;
const T* weight = filterData + c_out * filterHeight * filterWidth;
T value = 0;
const int h_in_start = -paddingH + h_out * strideH;
const int w_in_start = -paddingW + w_out * strideW;
const int h_in_end = -paddingH + h_out * strideH + filterHeight - 1;
const int w_in_end = -paddingW + w_out * strideW + filterWidth - 1;
if ((h_in_start >= 0) && (h_in_end < inputHeight) && (w_in_start >= 0) &&
(w_in_end < inputWidth)) {
for (int kh = 0; kh < filterHeight; ++kh) {
for (int kw = 0; kw < filterWidth; ++kw) {
const int h_in = -paddingH + h_out * strideH + kh;
const int w_in = -paddingW + w_out * strideW + kw;
const int offset =
((batch * inputChannels + c_in) * inputHeight + h_in) *
inputWidth +
w_in;
value += (*weight) * inputData[offset];
++weight;
}
}
} else {
for (int kh = 0; kh < filterHeight; ++kh) {
for (int kw = 0; kw < filterWidth; ++kw) {
const int h_in = -paddingH + h_out * strideH + kh;
const int w_in = -paddingW + w_out * strideW + kw;
if ((h_in >= 0) && (h_in < inputHeight) && (w_in >= 0) &&
(w_in < inputWidth)) {
const int offset =
((batch * inputChannels + c_in) * inputHeight + h_in) *
inputWidth +
w_in;
value += (*weight) * inputData[offset];
}
++weight;
}
}
}
outputData[index] = value;
}
}
// CUDA kernel to compute the depthwise convolution backprop w.r.t input.
template <class T>
__global__ void ConvolutionDepthwiseInputBackward(const int nthreads,
const T* const top_diff,
const T* const weight_data,
const int num,
const int outputChannels,
const int outputHeight,
const int outputWidth,
const int inputChannels,
const int inputHeight,
const int inputWidth,
const int filterMultiplier,
const int filterHeight,
const int filterWidth,
const int strideH,
const int strideW,
const int paddingH,
const int paddingW,
T* const bottom_diff) {
int index = (blockIdx.x * gridDim.y + blockIdx.y) * blockDim.x + threadIdx.x;
if (index < nthreads) {
const int batch = index / inputChannels / inputHeight / inputWidth;
const int c_in = (index / inputHeight / inputWidth) % inputChannels;
const int h_in = (index / inputWidth) % inputHeight;
const int w_in = index % inputWidth;
const int c_out_start = c_in * filterMultiplier;
int h_out_start = (h_in - filterHeight + paddingH + strideH) / strideH;
h_out_start = 0 > h_out_start ? 0 : h_out_start;
int h_out_end = (h_in + paddingH) / strideH;
h_out_end = outputHeight - 1 < h_out_end ? outputHeight - 1 : h_out_end;
int w_out_start = (w_in - filterWidth + paddingW + strideW) / strideW;
w_out_start = 0 > w_out_start ? 0 : w_out_start;
int w_out_end = (w_in + paddingW) / strideW;
w_out_end = outputWidth - 1 < w_out_end ? outputWidth - 1 : w_out_end;
T value = 0;
for (int c_out = c_out_start; c_out < c_out_start + filterMultiplier;
c_out++) {
for (int h_out = h_out_start; h_out <= h_out_end; ++h_out) {
const int filter_h = h_in + paddingH - h_out * strideH;
for (int w_out = w_out_start; w_out <= w_out_end; ++w_out) {
const int filter_w = w_in + paddingW - w_out * strideW;
const int filter_offset = c_out * filterHeight * filterWidth +
filter_h * filterWidth + filter_w;
const int top_diff_offset =
((batch * outputChannels + c_out) * outputHeight + h_out) *
outputWidth +
w_out;
value += top_diff[top_diff_offset] * weight_data[filter_offset];
}
}
}
bottom_diff[index] += value;
}
}
// CUDA kernel to compute the depthwise convolution backprop w.r.t filter.
template <class T>
__global__ void ConvolutionDepthwiseFilterBackward(const int num_i,
const int nthreads,
const T* const top_diff,
const T* const inputData,
const int num,
const int outputChannels,
const int outputHeight,
const int outputWidth,
const int inputChannels,
const int inputHeight,
const int inputWidth,
const int filterMultiplier,
const int filterHeight,
const int filterWidth,
const int strideH,
const int strideW,
const int paddingH,
const int paddingW,
T* const buffer_data) {
int index = (blockIdx.x * gridDim.y + blockIdx.y) * blockDim.x + threadIdx.x;
if (index < nthreads) {
const int h_out = (index / outputWidth) % outputHeight;
const int w_out = index % outputWidth;
const int kh =
(index / filterWidth / outputHeight / outputWidth) % filterHeight;
const int kw = (index / outputHeight / outputWidth) % filterWidth;
const int h_in = -paddingH + h_out * strideH + kh;
const int w_in = -paddingW + w_out * strideW + kw;
if ((h_in >= 0) && (h_in < inputHeight) && (w_in >= 0) &&
(w_in < inputWidth)) {
const int c_out =
index / (filterHeight * filterWidth * outputHeight * outputWidth);
const int c_in = c_out / filterMultiplier;
const int batch = num_i;
const int top_offset =
((batch * outputChannels + c_out) * outputHeight + h_out) *
outputWidth +
w_out;
const int bottom_offset =
((batch * inputChannels + c_in) * inputHeight + h_in) * inputWidth +
w_in;
buffer_data[index] = top_diff[top_offset] * inputData[bottom_offset];
} else {
buffer_data[index] = 0;
}
}
}
template <class T>
class DepthwiseConvFunctor<DEVICE_TYPE_GPU, T> {
public:
void operator()(const T* inputData,
const T* filterData,
int batchSize,
int outputChannels,
int outputHeight,
int outputWidth,
int inputChannels,
int inputHeight,
int inputWidth,
int filterMultiplier,
int filterHeight,
int filterWidth,
int strideH,
int strideW,
int paddingH,
int paddingW,
T* outputData) {
int outputSize = batchSize * outputChannels * outputHeight * outputWidth;
size_t blocks = (outputSize + 1024 - 1) / 1024;
size_t blockX = 512;
size_t blockY = (blocks + 512 - 1) / 512;
dim3 threads(1024, 1);
dim3 grid(blockX, blockY);
ConvolutionDepthwiseForward<T><<<grid, threads, 0, STREAM_DEFAULT>>>(
outputSize,
inputData,
filterData,
batchSize,
outputChannels,
outputHeight,
outputWidth,
inputChannels,
inputHeight,
inputWidth,
filterMultiplier,
filterHeight,
filterWidth,
strideH,
strideW,
paddingH,
paddingW,
outputData);
}
};
template <class T>
class DepthwiseConvGradInputFunctor<DEVICE_TYPE_GPU, T> {
public:
void operator()(const T* outputGrad,
const T* filterData,
int batchSize,
int outputChannels,
int outputHeight,
int outputWidth,
int inputChannels,
int inputHeight,
int inputWidth,
int filterMultiplier,
int filterHeight,
int filterWidth,
int strideH,
int strideW,
int paddingH,
int paddingW,
T* inputGrad) {
int inputSize = batchSize * inputChannels * inputHeight * inputWidth;
size_t blocks = (inputSize + 1024 - 1) / 1024;
size_t blockX = 512;
size_t blockY = (blocks + 512 - 1) / 512;
dim3 threads(1024, 1);
dim3 grid(blockX, blockY);
ConvolutionDepthwiseInputBackward<T>
// NOLINT_NEXT_LINE(whitespace/operators)
<<<grid, threads, 0, STREAM_DEFAULT>>>(inputSize,
outputGrad,
filterData,
batchSize,
outputChannels,
outputHeight,
outputWidth,
inputChannels,
inputHeight,
inputWidth,
filterMultiplier,
filterHeight,
filterWidth,
strideH,
strideW,
paddingH,
paddingW,
inputGrad);
}
};
template <class T>
class DepthwiseConvGradFilterFunctor<DEVICE_TYPE_GPU, T> {
public:
void operator()(const T* outputGrad,
const T* inputData,
int batchSize,
int outputChannels,
int outputHeight,
int outputWidth,
int inputChannels,
int inputHeight,
int inputWidth,
int filterMultiplier,
int filterHeight,
int filterWidth,
int strideH,
int strideW,
int paddingH,
int paddingW,
T* colData,
T* filterGrad) {
int colDataSize = outputChannels * filterHeight * filterWidth *
outputHeight * outputWidth;
size_t blocks = (colDataSize + 1024 - 1) / 1024;
size_t blockX = 512;
size_t blockY = (blocks + 512 - 1) / 512;
dim3 threads(1024, 1);
dim3 grid(blockX, blockY);
BaseMatrix filterGradMatrix(outputChannels * filterHeight * filterWidth,
1,
filterGrad,
false,
true);
for (int i = 0; i < batchSize; i++) {
ConvolutionDepthwiseFilterBackward<
T><<<grid, threads, 0, STREAM_DEFAULT>>>(i,
colDataSize,
outputGrad,
inputData,
batchSize,
outputChannels,
outputHeight,
outputWidth,
inputChannels,
inputHeight,
inputWidth,
filterMultiplier,
filterHeight,
filterWidth,
strideH,
strideW,
paddingH,
paddingW,
colData);
int K = outputHeight * outputWidth;
int M = colDataSize / K;
BaseMatrix colMatrix(M, K, colData, false, true);
filterGradMatrix.sumRows(colMatrix, (T)1.0, (T)1.0);
}
}
};
#ifdef PADDLE_TYPE_DOUBLE
template class DepthwiseConvGradInputFunctor<DEVICE_TYPE_GPU, double>;
template class DepthwiseConvFunctor<DEVICE_TYPE_GPU, double>;
template class DepthwiseConvGradFilterFunctor<DEVICE_TYPE_GPU, double>;
#else
template class DepthwiseConvGradInputFunctor<DEVICE_TYPE_GPU, float>;
template class DepthwiseConvFunctor<DEVICE_TYPE_GPU, float>;
template class DepthwiseConvGradFilterFunctor<DEVICE_TYPE_GPU, float>;
#endif
} // namespace paddle
|
27676ef637b08a627ac47ab04d2e72f6323bfe95.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by ops.py
//
__constant__ int xdim0_tea_leaf_cheby_init_kernel;
int xdim0_tea_leaf_cheby_init_kernel_h = -1;
int ydim0_tea_leaf_cheby_init_kernel_h = -1;
__constant__ int xdim1_tea_leaf_cheby_init_kernel;
int xdim1_tea_leaf_cheby_init_kernel_h = -1;
int ydim1_tea_leaf_cheby_init_kernel_h = -1;
__constant__ int xdim2_tea_leaf_cheby_init_kernel;
int xdim2_tea_leaf_cheby_init_kernel_h = -1;
int ydim2_tea_leaf_cheby_init_kernel_h = -1;
__constant__ int xdim3_tea_leaf_cheby_init_kernel;
int xdim3_tea_leaf_cheby_init_kernel_h = -1;
int ydim3_tea_leaf_cheby_init_kernel_h = -1;
__constant__ int xdim4_tea_leaf_cheby_init_kernel;
int xdim4_tea_leaf_cheby_init_kernel_h = -1;
int ydim4_tea_leaf_cheby_init_kernel_h = -1;
__constant__ int xdim5_tea_leaf_cheby_init_kernel;
int xdim5_tea_leaf_cheby_init_kernel_h = -1;
int ydim5_tea_leaf_cheby_init_kernel_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
#undef OPS_ACC3
#undef OPS_ACC4
#undef OPS_ACC5
#define OPS_ACC0(x,y) (x+xdim0_tea_leaf_cheby_init_kernel*(y))
#define OPS_ACC1(x,y) (x+xdim1_tea_leaf_cheby_init_kernel*(y))
#define OPS_ACC2(x,y) (x+xdim2_tea_leaf_cheby_init_kernel*(y))
#define OPS_ACC3(x,y) (x+xdim3_tea_leaf_cheby_init_kernel*(y))
#define OPS_ACC4(x,y) (x+xdim4_tea_leaf_cheby_init_kernel*(y))
#define OPS_ACC5(x,y) (x+xdim5_tea_leaf_cheby_init_kernel*(y))
//user function
__device__
void tea_leaf_cheby_init_kernel_gpu(double *w, double *r, const double *Kx, const double *Ky,
const double *u,const double *u0,const double *rx,const double *ry) {
w[OPS_ACC0(0,0)] = (1.0
+ (*ry)*(Ky[OPS_ACC3(0, 1)] + Ky[OPS_ACC3(0,0)])
+ (*rx)*(Kx[OPS_ACC2(1, 0)] + Kx[OPS_ACC2(0,0)]))*u[OPS_ACC4(0,0)]
- (*ry)*(Ky[OPS_ACC3(0, 1)] *u[OPS_ACC4(0, 1)] + Ky[OPS_ACC3(0,0)]*u[OPS_ACC4(0, -1)])
- (*rx)*(Kx[OPS_ACC2(1, 0)] *u[OPS_ACC4(1, 0)] + Kx[OPS_ACC2(0,0)]*u[OPS_ACC4(-1, 0)]);
r[OPS_ACC1(0,0)] = u0[OPS_ACC5(0,0)] - w[OPS_ACC0(0,0)];
}
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
#undef OPS_ACC3
#undef OPS_ACC4
#undef OPS_ACC5
__global__ void ops_tea_leaf_cheby_init_kernel(
double* __restrict arg0,
double* __restrict arg1,
const double* __restrict arg2,
const double* __restrict arg3,
const double* __restrict arg4,
const double* __restrict arg5,
const double arg6,
const double arg7,
int size0,
int size1 ){
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1*1 + idx_y * 1*1 * xdim0_tea_leaf_cheby_init_kernel;
arg1 += idx_x * 1*1 + idx_y * 1*1 * xdim1_tea_leaf_cheby_init_kernel;
arg2 += idx_x * 1*1 + idx_y * 1*1 * xdim2_tea_leaf_cheby_init_kernel;
arg3 += idx_x * 1*1 + idx_y * 1*1 * xdim3_tea_leaf_cheby_init_kernel;
arg4 += idx_x * 1*1 + idx_y * 1*1 * xdim4_tea_leaf_cheby_init_kernel;
arg5 += idx_x * 1*1 + idx_y * 1*1 * xdim5_tea_leaf_cheby_init_kernel;
if (idx_x < size0 && idx_y < size1) {
tea_leaf_cheby_init_kernel_gpu(arg0, arg1, arg2, arg3,
arg4, arg5, &arg6, &arg7);
}
}
// host stub function
void ops_par_loop_tea_leaf_cheby_init_kernel(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3,
ops_arg arg4, ops_arg arg5, ops_arg arg6, ops_arg arg7) {
//Timing
double t1,t2,c1,c2;
ops_arg args[8] = { arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7};
#ifdef CHECKPOINTING
if (!ops_checkpointing_before(args,8,range,23)) return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(23,"tea_leaf_cheby_init_kernel");
OPS_kernels[23].count++;
ops_timers_core(&c1,&t1);
}
//compute locally allocated range for the sub-block
int start[2];
int end[2];
#ifdef OPS_MPI
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned) return;
for ( int n=0; n<2; n++ ){
start[n] = sb->decomp_disp[n];end[n] = sb->decomp_disp[n]+sb->decomp_size[n];
if (start[n] >= range[2*n]) {
start[n] = 0;
}
else {
start[n] = range[2*n] - start[n];
}
if (sb->id_m[n]==MPI_PROC_NULL && range[2*n] < 0) start[n] = range[2*n];
if (end[n] >= range[2*n+1]) {
end[n] = range[2*n+1] - sb->decomp_disp[n];
}
else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n]==MPI_PROC_NULL && (range[2*n+1] > sb->decomp_disp[n]+sb->decomp_size[n]))
end[n] += (range[2*n+1]-sb->decomp_disp[n]-sb->decomp_size[n]);
}
#else
for ( int n=0; n<2; n++ ){
start[n] = range[2*n];end[n] = range[2*n+1];
}
#endif
int x_size = MAX(0,end[0]-start[0]);
int y_size = MAX(0,end[1]-start[1]);
int xdim0 = args[0].dat->size[0];
int xdim1 = args[1].dat->size[0];
int xdim2 = args[2].dat->size[0];
int xdim3 = args[3].dat->size[0];
int xdim4 = args[4].dat->size[0];
int xdim5 = args[5].dat->size[0];
if (xdim0 != xdim0_tea_leaf_cheby_init_kernel_h || xdim1 != xdim1_tea_leaf_cheby_init_kernel_h || xdim2 != xdim2_tea_leaf_cheby_init_kernel_h || xdim3 != xdim3_tea_leaf_cheby_init_kernel_h || xdim4 != xdim4_tea_leaf_cheby_init_kernel_h || xdim5 != xdim5_tea_leaf_cheby_init_kernel_h) {
hipMemcpyToSymbol( xdim0_tea_leaf_cheby_init_kernel, &xdim0, sizeof(int) );
xdim0_tea_leaf_cheby_init_kernel_h = xdim0;
hipMemcpyToSymbol( xdim1_tea_leaf_cheby_init_kernel, &xdim1, sizeof(int) );
xdim1_tea_leaf_cheby_init_kernel_h = xdim1;
hipMemcpyToSymbol( xdim2_tea_leaf_cheby_init_kernel, &xdim2, sizeof(int) );
xdim2_tea_leaf_cheby_init_kernel_h = xdim2;
hipMemcpyToSymbol( xdim3_tea_leaf_cheby_init_kernel, &xdim3, sizeof(int) );
xdim3_tea_leaf_cheby_init_kernel_h = xdim3;
hipMemcpyToSymbol( xdim4_tea_leaf_cheby_init_kernel, &xdim4, sizeof(int) );
xdim4_tea_leaf_cheby_init_kernel_h = xdim4;
hipMemcpyToSymbol( xdim5_tea_leaf_cheby_init_kernel, &xdim5, sizeof(int) );
xdim5_tea_leaf_cheby_init_kernel_h = xdim5;
}
dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, 1);
dim3 tblock(OPS_block_size_x,OPS_block_size_y,1);
int dat0 = args[0].dat->elem_size;
int dat1 = args[1].dat->elem_size;
int dat2 = args[2].dat->elem_size;
int dat3 = args[3].dat->elem_size;
int dat4 = args[4].dat->elem_size;
int dat5 = args[5].dat->elem_size;
char *p_a[8];
//set up initial pointers
int d_m[OPS_MAX_DIM];
#ifdef OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d];
#endif
int base0 = dat0 * 1 *
(start[0] * args[0].stencil->stride[0] - args[0].dat->base[0] - d_m[0]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
(start[1] * args[0].stencil->stride[1] - args[0].dat->base[1] - d_m[1]);
p_a[0] = (char *)args[0].data_d + base0;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d];
#endif
int base1 = dat1 * 1 *
(start[0] * args[1].stencil->stride[0] - args[1].dat->base[0] - d_m[0]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
(start[1] * args[1].stencil->stride[1] - args[1].dat->base[1] - d_m[1]);
p_a[1] = (char *)args[1].data_d + base1;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[2].dat->d_m[d] + OPS_sub_dat_list[args[2].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++) d_m[d] = args[2].dat->d_m[d];
#endif
int base2 = dat2 * 1 *
(start[0] * args[2].stencil->stride[0] - args[2].dat->base[0] - d_m[0]);
base2 = base2+ dat2 *
args[2].dat->size[0] *
(start[1] * args[2].stencil->stride[1] - args[2].dat->base[1] - d_m[1]);
p_a[2] = (char *)args[2].data_d + base2;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[3].dat->d_m[d] + OPS_sub_dat_list[args[3].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++) d_m[d] = args[3].dat->d_m[d];
#endif
int base3 = dat3 * 1 *
(start[0] * args[3].stencil->stride[0] - args[3].dat->base[0] - d_m[0]);
base3 = base3+ dat3 *
args[3].dat->size[0] *
(start[1] * args[3].stencil->stride[1] - args[3].dat->base[1] - d_m[1]);
p_a[3] = (char *)args[3].data_d + base3;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[4].dat->d_m[d] + OPS_sub_dat_list[args[4].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++) d_m[d] = args[4].dat->d_m[d];
#endif
int base4 = dat4 * 1 *
(start[0] * args[4].stencil->stride[0] - args[4].dat->base[0] - d_m[0]);
base4 = base4+ dat4 *
args[4].dat->size[0] *
(start[1] * args[4].stencil->stride[1] - args[4].dat->base[1] - d_m[1]);
p_a[4] = (char *)args[4].data_d + base4;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[5].dat->d_m[d] + OPS_sub_dat_list[args[5].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++) d_m[d] = args[5].dat->d_m[d];
#endif
int base5 = dat5 * 1 *
(start[0] * args[5].stencil->stride[0] - args[5].dat->base[0] - d_m[0]);
base5 = base5+ dat5 *
args[5].dat->size[0] *
(start[1] * args[5].stencil->stride[1] - args[5].dat->base[1] - d_m[1]);
p_a[5] = (char *)args[5].data_d + base5;
ops_H_D_exchanges_device(args, 8);
ops_halo_exchanges(args,8,range);
if (OPS_diags > 1) {
ops_timers_core(&c2,&t2);
OPS_kernels[23].mpi_time += t2-t1;
}
//call kernel wrapper function, passing in pointers to data
hipLaunchKernelGGL(( ops_tea_leaf_cheby_init_kernel), dim3(grid), dim3(tblock) , 0, 0, (double *)p_a[0], (double *)p_a[1],
(double *)p_a[2], (double *)p_a[3],
(double *)p_a[4], (double *)p_a[5],
*(double *)arg6.data, *(double *)arg7.data,x_size, y_size);
if (OPS_diags>1) {
cutilSafeCall(hipDeviceSynchronize());
ops_timers_core(&c1,&t1);
OPS_kernels[23].time += t1-t2;
}
ops_set_dirtybit_device(args, 8);
ops_set_halo_dirtybit3(&args[0],range);
ops_set_halo_dirtybit3(&args[1],range);
if (OPS_diags > 1) {
//Update kernel record
ops_timers_core(&c2,&t2);
OPS_kernels[23].mpi_time += t2-t1;
OPS_kernels[23].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[23].transfer += ops_compute_transfer(dim, start, end, &arg1);
OPS_kernels[23].transfer += ops_compute_transfer(dim, start, end, &arg2);
OPS_kernels[23].transfer += ops_compute_transfer(dim, start, end, &arg3);
OPS_kernels[23].transfer += ops_compute_transfer(dim, start, end, &arg4);
OPS_kernels[23].transfer += ops_compute_transfer(dim, start, end, &arg5);
}
}
| 27676ef637b08a627ac47ab04d2e72f6323bfe95.cu | //
// auto-generated by ops.py
//
__constant__ int xdim0_tea_leaf_cheby_init_kernel;
int xdim0_tea_leaf_cheby_init_kernel_h = -1;
int ydim0_tea_leaf_cheby_init_kernel_h = -1;
__constant__ int xdim1_tea_leaf_cheby_init_kernel;
int xdim1_tea_leaf_cheby_init_kernel_h = -1;
int ydim1_tea_leaf_cheby_init_kernel_h = -1;
__constant__ int xdim2_tea_leaf_cheby_init_kernel;
int xdim2_tea_leaf_cheby_init_kernel_h = -1;
int ydim2_tea_leaf_cheby_init_kernel_h = -1;
__constant__ int xdim3_tea_leaf_cheby_init_kernel;
int xdim3_tea_leaf_cheby_init_kernel_h = -1;
int ydim3_tea_leaf_cheby_init_kernel_h = -1;
__constant__ int xdim4_tea_leaf_cheby_init_kernel;
int xdim4_tea_leaf_cheby_init_kernel_h = -1;
int ydim4_tea_leaf_cheby_init_kernel_h = -1;
__constant__ int xdim5_tea_leaf_cheby_init_kernel;
int xdim5_tea_leaf_cheby_init_kernel_h = -1;
int ydim5_tea_leaf_cheby_init_kernel_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
#undef OPS_ACC3
#undef OPS_ACC4
#undef OPS_ACC5
#define OPS_ACC0(x,y) (x+xdim0_tea_leaf_cheby_init_kernel*(y))
#define OPS_ACC1(x,y) (x+xdim1_tea_leaf_cheby_init_kernel*(y))
#define OPS_ACC2(x,y) (x+xdim2_tea_leaf_cheby_init_kernel*(y))
#define OPS_ACC3(x,y) (x+xdim3_tea_leaf_cheby_init_kernel*(y))
#define OPS_ACC4(x,y) (x+xdim4_tea_leaf_cheby_init_kernel*(y))
#define OPS_ACC5(x,y) (x+xdim5_tea_leaf_cheby_init_kernel*(y))
//user function
__device__
void tea_leaf_cheby_init_kernel_gpu(double *w, double *r, const double *Kx, const double *Ky,
const double *u,const double *u0,const double *rx,const double *ry) {
w[OPS_ACC0(0,0)] = (1.0
+ (*ry)*(Ky[OPS_ACC3(0, 1)] + Ky[OPS_ACC3(0,0)])
+ (*rx)*(Kx[OPS_ACC2(1, 0)] + Kx[OPS_ACC2(0,0)]))*u[OPS_ACC4(0,0)]
- (*ry)*(Ky[OPS_ACC3(0, 1)] *u[OPS_ACC4(0, 1)] + Ky[OPS_ACC3(0,0)]*u[OPS_ACC4(0, -1)])
- (*rx)*(Kx[OPS_ACC2(1, 0)] *u[OPS_ACC4(1, 0)] + Kx[OPS_ACC2(0,0)]*u[OPS_ACC4(-1, 0)]);
r[OPS_ACC1(0,0)] = u0[OPS_ACC5(0,0)] - w[OPS_ACC0(0,0)];
}
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
#undef OPS_ACC3
#undef OPS_ACC4
#undef OPS_ACC5
__global__ void ops_tea_leaf_cheby_init_kernel(
double* __restrict arg0,
double* __restrict arg1,
const double* __restrict arg2,
const double* __restrict arg3,
const double* __restrict arg4,
const double* __restrict arg5,
const double arg6,
const double arg7,
int size0,
int size1 ){
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1*1 + idx_y * 1*1 * xdim0_tea_leaf_cheby_init_kernel;
arg1 += idx_x * 1*1 + idx_y * 1*1 * xdim1_tea_leaf_cheby_init_kernel;
arg2 += idx_x * 1*1 + idx_y * 1*1 * xdim2_tea_leaf_cheby_init_kernel;
arg3 += idx_x * 1*1 + idx_y * 1*1 * xdim3_tea_leaf_cheby_init_kernel;
arg4 += idx_x * 1*1 + idx_y * 1*1 * xdim4_tea_leaf_cheby_init_kernel;
arg5 += idx_x * 1*1 + idx_y * 1*1 * xdim5_tea_leaf_cheby_init_kernel;
if (idx_x < size0 && idx_y < size1) {
tea_leaf_cheby_init_kernel_gpu(arg0, arg1, arg2, arg3,
arg4, arg5, &arg6, &arg7);
}
}
// host stub function
void ops_par_loop_tea_leaf_cheby_init_kernel(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3,
ops_arg arg4, ops_arg arg5, ops_arg arg6, ops_arg arg7) {
//Timing
double t1,t2,c1,c2;
ops_arg args[8] = { arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7};
#ifdef CHECKPOINTING
if (!ops_checkpointing_before(args,8,range,23)) return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(23,"tea_leaf_cheby_init_kernel");
OPS_kernels[23].count++;
ops_timers_core(&c1,&t1);
}
//compute locally allocated range for the sub-block
int start[2];
int end[2];
#ifdef OPS_MPI
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned) return;
for ( int n=0; n<2; n++ ){
start[n] = sb->decomp_disp[n];end[n] = sb->decomp_disp[n]+sb->decomp_size[n];
if (start[n] >= range[2*n]) {
start[n] = 0;
}
else {
start[n] = range[2*n] - start[n];
}
if (sb->id_m[n]==MPI_PROC_NULL && range[2*n] < 0) start[n] = range[2*n];
if (end[n] >= range[2*n+1]) {
end[n] = range[2*n+1] - sb->decomp_disp[n];
}
else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n]==MPI_PROC_NULL && (range[2*n+1] > sb->decomp_disp[n]+sb->decomp_size[n]))
end[n] += (range[2*n+1]-sb->decomp_disp[n]-sb->decomp_size[n]);
}
#else
for ( int n=0; n<2; n++ ){
start[n] = range[2*n];end[n] = range[2*n+1];
}
#endif
int x_size = MAX(0,end[0]-start[0]);
int y_size = MAX(0,end[1]-start[1]);
int xdim0 = args[0].dat->size[0];
int xdim1 = args[1].dat->size[0];
int xdim2 = args[2].dat->size[0];
int xdim3 = args[3].dat->size[0];
int xdim4 = args[4].dat->size[0];
int xdim5 = args[5].dat->size[0];
if (xdim0 != xdim0_tea_leaf_cheby_init_kernel_h || xdim1 != xdim1_tea_leaf_cheby_init_kernel_h || xdim2 != xdim2_tea_leaf_cheby_init_kernel_h || xdim3 != xdim3_tea_leaf_cheby_init_kernel_h || xdim4 != xdim4_tea_leaf_cheby_init_kernel_h || xdim5 != xdim5_tea_leaf_cheby_init_kernel_h) {
cudaMemcpyToSymbol( xdim0_tea_leaf_cheby_init_kernel, &xdim0, sizeof(int) );
xdim0_tea_leaf_cheby_init_kernel_h = xdim0;
cudaMemcpyToSymbol( xdim1_tea_leaf_cheby_init_kernel, &xdim1, sizeof(int) );
xdim1_tea_leaf_cheby_init_kernel_h = xdim1;
cudaMemcpyToSymbol( xdim2_tea_leaf_cheby_init_kernel, &xdim2, sizeof(int) );
xdim2_tea_leaf_cheby_init_kernel_h = xdim2;
cudaMemcpyToSymbol( xdim3_tea_leaf_cheby_init_kernel, &xdim3, sizeof(int) );
xdim3_tea_leaf_cheby_init_kernel_h = xdim3;
cudaMemcpyToSymbol( xdim4_tea_leaf_cheby_init_kernel, &xdim4, sizeof(int) );
xdim4_tea_leaf_cheby_init_kernel_h = xdim4;
cudaMemcpyToSymbol( xdim5_tea_leaf_cheby_init_kernel, &xdim5, sizeof(int) );
xdim5_tea_leaf_cheby_init_kernel_h = xdim5;
}
dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, 1);
dim3 tblock(OPS_block_size_x,OPS_block_size_y,1);
int dat0 = args[0].dat->elem_size;
int dat1 = args[1].dat->elem_size;
int dat2 = args[2].dat->elem_size;
int dat3 = args[3].dat->elem_size;
int dat4 = args[4].dat->elem_size;
int dat5 = args[5].dat->elem_size;
char *p_a[8];
//set up initial pointers
int d_m[OPS_MAX_DIM];
#ifdef OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d];
#endif
int base0 = dat0 * 1 *
(start[0] * args[0].stencil->stride[0] - args[0].dat->base[0] - d_m[0]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
(start[1] * args[0].stencil->stride[1] - args[0].dat->base[1] - d_m[1]);
p_a[0] = (char *)args[0].data_d + base0;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d];
#endif
int base1 = dat1 * 1 *
(start[0] * args[1].stencil->stride[0] - args[1].dat->base[0] - d_m[0]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
(start[1] * args[1].stencil->stride[1] - args[1].dat->base[1] - d_m[1]);
p_a[1] = (char *)args[1].data_d + base1;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[2].dat->d_m[d] + OPS_sub_dat_list[args[2].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++) d_m[d] = args[2].dat->d_m[d];
#endif
int base2 = dat2 * 1 *
(start[0] * args[2].stencil->stride[0] - args[2].dat->base[0] - d_m[0]);
base2 = base2+ dat2 *
args[2].dat->size[0] *
(start[1] * args[2].stencil->stride[1] - args[2].dat->base[1] - d_m[1]);
p_a[2] = (char *)args[2].data_d + base2;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[3].dat->d_m[d] + OPS_sub_dat_list[args[3].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++) d_m[d] = args[3].dat->d_m[d];
#endif
int base3 = dat3 * 1 *
(start[0] * args[3].stencil->stride[0] - args[3].dat->base[0] - d_m[0]);
base3 = base3+ dat3 *
args[3].dat->size[0] *
(start[1] * args[3].stencil->stride[1] - args[3].dat->base[1] - d_m[1]);
p_a[3] = (char *)args[3].data_d + base3;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[4].dat->d_m[d] + OPS_sub_dat_list[args[4].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++) d_m[d] = args[4].dat->d_m[d];
#endif
int base4 = dat4 * 1 *
(start[0] * args[4].stencil->stride[0] - args[4].dat->base[0] - d_m[0]);
base4 = base4+ dat4 *
args[4].dat->size[0] *
(start[1] * args[4].stencil->stride[1] - args[4].dat->base[1] - d_m[1]);
p_a[4] = (char *)args[4].data_d + base4;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[5].dat->d_m[d] + OPS_sub_dat_list[args[5].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++) d_m[d] = args[5].dat->d_m[d];
#endif
int base5 = dat5 * 1 *
(start[0] * args[5].stencil->stride[0] - args[5].dat->base[0] - d_m[0]);
base5 = base5+ dat5 *
args[5].dat->size[0] *
(start[1] * args[5].stencil->stride[1] - args[5].dat->base[1] - d_m[1]);
p_a[5] = (char *)args[5].data_d + base5;
ops_H_D_exchanges_device(args, 8);
ops_halo_exchanges(args,8,range);
if (OPS_diags > 1) {
ops_timers_core(&c2,&t2);
OPS_kernels[23].mpi_time += t2-t1;
}
//call kernel wrapper function, passing in pointers to data
ops_tea_leaf_cheby_init_kernel<<<grid, tblock >>> ( (double *)p_a[0], (double *)p_a[1],
(double *)p_a[2], (double *)p_a[3],
(double *)p_a[4], (double *)p_a[5],
*(double *)arg6.data, *(double *)arg7.data,x_size, y_size);
if (OPS_diags>1) {
cutilSafeCall(cudaDeviceSynchronize());
ops_timers_core(&c1,&t1);
OPS_kernels[23].time += t1-t2;
}
ops_set_dirtybit_device(args, 8);
ops_set_halo_dirtybit3(&args[0],range);
ops_set_halo_dirtybit3(&args[1],range);
if (OPS_diags > 1) {
//Update kernel record
ops_timers_core(&c2,&t2);
OPS_kernels[23].mpi_time += t2-t1;
OPS_kernels[23].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[23].transfer += ops_compute_transfer(dim, start, end, &arg1);
OPS_kernels[23].transfer += ops_compute_transfer(dim, start, end, &arg2);
OPS_kernels[23].transfer += ops_compute_transfer(dim, start, end, &arg3);
OPS_kernels[23].transfer += ops_compute_transfer(dim, start, end, &arg4);
OPS_kernels[23].transfer += ops_compute_transfer(dim, start, end, &arg5);
}
}
|
8f382e61ae8e5d4ea0af4d2c4eeb5d0705720acb.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <math.h>
#include<sys/time.h>
#include <hip/hip_cooperative_groups.h>
#define N 512
namespace cg = cooperative_groups;
__global__ void decompose(float *A, float *pivots, int iteration)
{
int blockID = blockIdx.x;
int threadId = threadIdx.x;
float p = 0;
if(blockID >= iteration){
p = A[blockIdx.x * N + iteration - 1]/A[(iteration - 1)*N + iteration - 1];
A[blockID*N + threadId] -= p * A[(iteration-1)*N + threadId];
A[blockID*N + iteration-1] = p;
}
}
void printA(float *A){
for(int i=0;i<N;i++){
for(int j=0;j<N;j++)
printf(" %8.2f ", A[i*N + j]);
printf("\n");
}
}
int main(int argc, char *argv[]){
float *A;
float *pivots;
float *dev_a, *dev_pivots;
int *devItr;
A=(float *)malloc(sizeof(float)*N*N);
hipEvent_t start, stop;
float time;
float totalTime=0;
hipMalloc ( (void**)&dev_a, N*N* sizeof (float) );
hipMalloc ( (void**)&dev_pivots, N*sizeof (float) );
hipMalloc ( (void**)&devItr, sizeof (int) );
pivots=(float *)malloc(sizeof(float)*N);
for(int i=0;i<N*N;i++)
A[i] = (float)(rand()%100);;
hipMemcpy(dev_a, A, N*N*sizeof(float), hipMemcpyHostToDevice);
/*for(int i=0;i<N;i++){
for(int j=0;j<N;j++)
printf(" %6.2f ", A[i*N + j]);
printf("\n");
}
printf("\n\n");*/
for(int i=1;i<N;i++)
pivots[i] = A[(i)*N]/A[0];
hipMemcpy(dev_pivots, pivots, N*sizeof(float), hipMemcpyHostToDevice);
hipEventCreate(&start);
hipEventCreate(&stop);
for(int i=1;i<N;i++) {
hipEventRecord(start, 0);
hipLaunchKernelGGL(( decompose), dim3(N),dim3(N), 0, 0, dev_a,dev_pivots,i);
hipEventRecord(stop, 0);
hipDeviceSynchronize();
//printf("\n");
hipMemcpy(A, dev_a, N*N*sizeof(float), hipMemcpyDeviceToHost);
hipEventElapsedTime(&time, start, stop);
totalTime += time;
}
printf("\n \n GPU kernel execution time = %f ms\n",totalTime);
}
| 8f382e61ae8e5d4ea0af4d2c4eeb5d0705720acb.cu | #include <cuda.h>
#include <stdio.h>
#include <math.h>
#include<sys/time.h>
#include <cooperative_groups.h>
#define N 512
namespace cg = cooperative_groups;
__global__ void decompose(float *A, float *pivots, int iteration)
{
int blockID = blockIdx.x;
int threadId = threadIdx.x;
float p = 0;
if(blockID >= iteration){
p = A[blockIdx.x * N + iteration - 1]/A[(iteration - 1)*N + iteration - 1];
A[blockID*N + threadId] -= p * A[(iteration-1)*N + threadId];
A[blockID*N + iteration-1] = p;
}
}
void printA(float *A){
for(int i=0;i<N;i++){
for(int j=0;j<N;j++)
printf(" %8.2f ", A[i*N + j]);
printf("\n");
}
}
int main(int argc, char *argv[]){
float *A;
float *pivots;
float *dev_a, *dev_pivots;
int *devItr;
A=(float *)malloc(sizeof(float)*N*N);
cudaEvent_t start, stop;
float time;
float totalTime=0;
cudaMalloc ( (void**)&dev_a, N*N* sizeof (float) );
cudaMalloc ( (void**)&dev_pivots, N*sizeof (float) );
cudaMalloc ( (void**)&devItr, sizeof (int) );
pivots=(float *)malloc(sizeof(float)*N);
for(int i=0;i<N*N;i++)
A[i] = (float)(rand()%100);;
cudaMemcpy(dev_a, A, N*N*sizeof(float), cudaMemcpyHostToDevice);
/*for(int i=0;i<N;i++){
for(int j=0;j<N;j++)
printf(" %6.2f ", A[i*N + j]);
printf("\n");
}
printf("\n\n");*/
for(int i=1;i<N;i++)
pivots[i] = A[(i)*N]/A[0];
cudaMemcpy(dev_pivots, pivots, N*sizeof(float), cudaMemcpyHostToDevice);
cudaEventCreate(&start);
cudaEventCreate(&stop);
for(int i=1;i<N;i++) {
cudaEventRecord(start, 0);
decompose<<<N,N>>>(dev_a,dev_pivots,i);
cudaEventRecord(stop, 0);
cudaThreadSynchronize();
//printf("\n");
cudaMemcpy(A, dev_a, N*N*sizeof(float), cudaMemcpyDeviceToHost);
cudaEventElapsedTime(&time, start, stop);
totalTime += time;
}
printf("\n \n GPU kernel execution time = %f ms\n",totalTime);
}
|
5c75d56c94389d13e2f0e583eb711890ae824567.hip | // !!! This is a file automatically generated by hipify!!!
#include <ATen/Context.h>
#include <ATen/hip/HIPContext.h>
#include <ATen/Dispatch.h>
#include <ATen/NativeFunctions.h>
#include <ATen/hip/PinnedMemoryAllocator.h>
#include <ATen/hip/HIPApplyUtils.cuh>
#include <ATen/hip/detail/IndexUtils.cuh>
#include <ATen/native/LinearAlgebraUtils.h>
#include <ATen/native/hip/MiscUtils.h>
#include <ATen/native/Resize.h>
#include <ATen/native/BatchLinearAlgebra.h>
#include <ATen/native/hip/BatchLinearAlgebraLib.h>
#include <ATen/native/cpu/zmath.h>
#include <THH/THH.h> // for USE_MAGMA
#ifdef USE_MAGMA
#include <magma_types.h>
#include <magma_v2.h>
const bool use_magma_ = true;
#else
const bool use_magma_ = false;
#endif
namespace at {
namespace native {
#ifdef USE_MAGMA
template<class scalar_t>
void magmaSolve(
magma_int_t n, magma_int_t nrhs, scalar_t* dA, magma_int_t ldda,
magma_int_t* ipiv, scalar_t* dB, magma_int_t lddb, magma_int_t* info);
template<class scalar_t>
void magmaSolveBatched(
magma_int_t n, magma_int_t nrhs, scalar_t** dA_array, magma_int_t ldda,
magma_int_t** dipiv_array, scalar_t** dB_array, magma_int_t lddb,
magma_int_t* dinfo_array, magma_int_t batch_count, const MAGMAQueue& magma_queue);
template<class scalar_t>
void magmaLu(
magma_int_t m, magma_int_t n, scalar_t* dA, magma_int_t ldda,
magma_int_t* ipiv, magma_int_t* info);
template<class scalar_t>
void magmaLuBatched(
magma_int_t m, magma_int_t n, scalar_t** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, magma_int_t* info_array, magma_int_t batchsize,
const MAGMAQueue& magma_queue);
template<class scalar_t>
void magmaLuNoPiv(
magma_int_t m, magma_int_t n, scalar_t* dA, magma_int_t ldda,
magma_int_t* info);
template<class scalar_t>
void magmaLuNoPivBatched(
magma_int_t m, magma_int_t n, scalar_t** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue);
template<class scalar_t>
inline magma_int_t magmaGetriOptimalBlocksize(magma_int_t n);
template<class scalar_t>
void magmaGetri(
magma_int_t n, scalar_t* dA, magma_int_t ldda, magma_int_t* ipiv, scalar_t* dwork,
magma_int_t lwork, magma_int_t* info);
template<class scalar_t>
void magmaGetriBatched(
magma_int_t n, scalar_t** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, scalar_t** dinvA_array, magma_int_t lddia,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue);
template<class scalar_t>
void magmaCholeskySolve(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, scalar_t* dA, magma_int_t ldda,
scalar_t* dB, magma_int_t lddb, magma_int_t* info);
template<class scalar_t>
void magmaCholeskySolveBatched(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, scalar_t** dA_array, magma_int_t ldda,
scalar_t** dB_array, magma_int_t lddb, magma_int_t& info, magma_int_t batchsize, const MAGMAQueue& magma_queue);
template<class scalar_t>
void magmaCholesky(
magma_uplo_t uplo, magma_int_t n, scalar_t* dA,
magma_int_t ldda, magma_int_t* info);
template<class scalar_t>
void magmaCholeskyBatched(
magma_uplo_t uplo, magma_int_t n, scalar_t** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue);
template<class scalar_t>
void magmaTriangularSolveBatched(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n,
scalar_t** dA_array, magma_int_t ldda, scalar_t** dB_array, magma_int_t lddb, magma_int_t batchsize,
const MAGMAQueue& magma_queue);
template<class scalar_t>
inline magma_int_t magmaGeqrfOptimalBlocksize(magma_int_t m, magma_int_t n);
template<class scalar_t>
void magmaGeqrf(
magma_int_t m, magma_int_t n, scalar_t* dA, magma_int_t ldda,
scalar_t* tau, scalar_t* dT, magma_int_t* info, bool is_v2);
template<class scalar_t>
void magmaOrgqr(
magma_int_t m, magma_int_t n, magma_int_t k, scalar_t* dA,
magma_int_t ldda, scalar_t* tau, scalar_t* dT, magma_int_t nb, magma_int_t* info);
template<class scalar_t, class value_t=scalar_t>
void magmaSyevd(
magma_vec_t jobz, magma_uplo_t uplo, magma_int_t n, scalar_t* dA, magma_int_t ldda,
value_t* w, scalar_t* wA, magma_int_t ldwa, scalar_t* work, magma_int_t lwork, value_t* rwork,
magma_int_t lrwork, magma_int_t* iwork, magma_int_t liwork, magma_int_t* info);
template<class scalar_t, class value_t=scalar_t>
void magmaEig(
magma_vec_t jobvl, magma_vec_t jobvr, magma_int_t n, scalar_t *A, magma_int_t lda,
scalar_t *w, scalar_t *VL, magma_int_t ldvl,
scalar_t *VR, magma_int_t ldvr, scalar_t *work, magma_int_t lwork,
value_t *rwork,
magma_int_t *info);
template<class scalar_t, class value_t=scalar_t>
void magmaSvd(
magma_vec_t jobz, magma_int_t m, magma_int_t n, scalar_t* A,
magma_int_t lda, value_t* s, scalar_t* U, magma_int_t ldu,
scalar_t* VT, magma_int_t ldvt, scalar_t* work, magma_int_t lwork,
value_t* rwork,
magma_int_t* iwork, magma_int_t* info);
template<class scalar_t>
void magmaLuSolve(
magma_int_t n, magma_int_t nrhs, scalar_t* dA, magma_int_t ldda, magma_int_t* ipiv,
scalar_t* dB, magma_int_t lddb, magma_int_t* info);
template<class scalar_t>
void magmaLuSolveBatched(
magma_int_t n, magma_int_t nrhs, scalar_t** dA_array, magma_int_t ldda, magma_int_t** dipiv_array,
scalar_t** dB_array, magma_int_t lddb, magma_int_t& info,
magma_int_t batchsize, const MAGMAQueue& magma_queue);
template<class scalar_t>
void magmaGels(
magma_trans_t trans, magma_int_t m, magma_int_t n, magma_int_t nrhs,
scalar_t* dA, magma_int_t ldda, scalar_t* dB, magma_int_t lddb,
scalar_t* hwork, magma_int_t lwork, magma_int_t* info);
template<>
void magmaSolve<double>(
magma_int_t n, magma_int_t nrhs, double* dA, magma_int_t ldda,
magma_int_t* ipiv, double* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_dgesv_gpu(n, nrhs, dA, ldda, ipiv, dB, lddb, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaSolve<float>(
magma_int_t n, magma_int_t nrhs, float* dA, magma_int_t ldda,
magma_int_t* ipiv, float* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_sgesv_gpu(n, nrhs, dA, ldda, ipiv, dB, lddb, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaSolve<c10::complex<double>>(
magma_int_t n, magma_int_t nrhs, c10::complex<double>* dA, magma_int_t ldda,
magma_int_t* ipiv, c10::complex<double>* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_zgesv_gpu(n, nrhs,
reinterpret_cast<magmaDoubleComplex*>(dA), ldda, ipiv,
reinterpret_cast<magmaDoubleComplex*>(dB), lddb, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaSolve<c10::complex<float>>(
magma_int_t n, magma_int_t nrhs, c10::complex<float>* dA, magma_int_t ldda,
magma_int_t* ipiv, c10::complex<float>* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_cgesv_gpu(n, nrhs,
reinterpret_cast<magmaFloatComplex*>(dA), ldda, ipiv,
reinterpret_cast<magmaFloatComplex*>(dB), lddb, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaSolveBatched<double>(
magma_int_t n, magma_int_t nrhs, double** dA_array, magma_int_t ldda,
magma_int_t** dipiv_array, double** dB_array, magma_int_t lddb,
magma_int_t* dinfo_array, magma_int_t batch_count, const MAGMAQueue& magma_queue) {
magma_dgesv_batched(n, nrhs, dA_array, ldda, dipiv_array, dB_array, lddb, dinfo_array, batch_count, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaSolveBatched<float>(
magma_int_t n, magma_int_t nrhs, float** dA_array, magma_int_t ldda,
magma_int_t** dipiv_array, float** dB_array, magma_int_t lddb,
magma_int_t* dinfo_array, magma_int_t batch_count, const MAGMAQueue& magma_queue) {
magma_sgesv_batched(n, nrhs, dA_array, ldda, dipiv_array, dB_array, lddb, dinfo_array, batch_count, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaSolveBatched<c10::complex<double>>(
magma_int_t n, magma_int_t nrhs, c10::complex<double>** dA_array, magma_int_t ldda,
magma_int_t** dipiv_array, c10::complex<double>** dB_array, magma_int_t lddb,
magma_int_t* dinfo_array, magma_int_t batch_count, const MAGMAQueue& magma_queue) {
magma_zgesv_batched(n, nrhs,
reinterpret_cast<magmaDoubleComplex**>(dA_array), ldda, dipiv_array,
reinterpret_cast<magmaDoubleComplex**>(dB_array), lddb, dinfo_array, batch_count, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaSolveBatched<c10::complex<float>>(
magma_int_t n, magma_int_t nrhs, c10::complex<float>** dA_array, magma_int_t ldda,
magma_int_t** dipiv_array, c10::complex<float>** dB_array, magma_int_t lddb,
magma_int_t* dinfo_array, magma_int_t batch_count, const MAGMAQueue& magma_queue) {
magma_cgesv_batched(n, nrhs,
reinterpret_cast<magmaFloatComplex**>(dA_array), ldda, dipiv_array,
reinterpret_cast<magmaFloatComplex**>(dB_array), lddb, dinfo_array, batch_count, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLu<double>(
magma_int_t m, magma_int_t n, double* dA, magma_int_t ldda,
magma_int_t* ipiv, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_dgetrf_gpu(m, n, dA, ldda, ipiv, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLu<float>(
magma_int_t m, magma_int_t n, float* dA, magma_int_t ldda,
magma_int_t* ipiv, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_sgetrf_gpu(m, n, dA, ldda, ipiv, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLu<c10::complex<double>>(
magma_int_t m, magma_int_t n, c10::complex<double>* dA, magma_int_t ldda,
magma_int_t* ipiv, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_zgetrf_gpu(m, n, reinterpret_cast<magmaDoubleComplex*>(dA), ldda, ipiv, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLu<c10::complex<float>>(
magma_int_t m, magma_int_t n, c10::complex<float>* dA, magma_int_t ldda,
magma_int_t* ipiv, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_cgetrf_gpu(m, n, reinterpret_cast<magmaFloatComplex*>(dA), ldda, ipiv, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuBatched<double>(
magma_int_t m, magma_int_t n, double** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, magma_int_t* info_array, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magma_dgetrf_batched(m, n, dA_array, ldda, ipiv_array, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuBatched<float>(
magma_int_t m, magma_int_t n, float** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, magma_int_t* info_array, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magma_sgetrf_batched(m, n, dA_array, ldda, ipiv_array, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuBatched<c10::complex<double>>(
magma_int_t m, magma_int_t n, c10::complex<double>** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, magma_int_t* info_array, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magma_zgetrf_batched(m, n, reinterpret_cast<magmaDoubleComplex**>(dA_array), ldda, ipiv_array, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuBatched<c10::complex<float>>(
magma_int_t m, magma_int_t n, c10::complex<float>** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, magma_int_t* info_array, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magma_cgetrf_batched(m, n, reinterpret_cast<magmaFloatComplex**>(dA_array), ldda, ipiv_array, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuNoPiv<double>(
magma_int_t m, magma_int_t n, double* dA, magma_int_t ldda,
magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_dgetrf_nopiv_gpu(m, n, dA, ldda, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuNoPiv<float>(
magma_int_t m, magma_int_t n, float* dA, magma_int_t ldda,
magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_sgetrf_nopiv_gpu(m, n, dA, ldda, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuNoPiv<c10::complex<double>>(
magma_int_t m, magma_int_t n, c10::complex<double>* dA, magma_int_t ldda,
magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_zgetrf_nopiv_gpu(m, n, reinterpret_cast<magmaDoubleComplex*>(dA), ldda, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuNoPiv<c10::complex<float>>(
magma_int_t m, magma_int_t n, c10::complex<float>* dA, magma_int_t ldda,
magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_cgetrf_nopiv_gpu(m, n, reinterpret_cast<magmaFloatComplex*>(dA), ldda, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuNoPivBatched<double>(
magma_int_t m, magma_int_t n, double** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_dgetrf_nopiv_batched(m, n, dA_array, ldda, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuNoPivBatched<float>(
magma_int_t m, magma_int_t n, float** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_sgetrf_nopiv_batched(m, n, dA_array, ldda, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuNoPivBatched<c10::complex<double>>(
magma_int_t m, magma_int_t n, c10::complex<double>** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_zgetrf_nopiv_batched(m, n, reinterpret_cast<magmaDoubleComplex**>(dA_array), ldda, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuNoPivBatched<c10::complex<float>>(
magma_int_t m, magma_int_t n, c10::complex<float>** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_cgetrf_nopiv_batched(m, n, reinterpret_cast<magmaFloatComplex**>(dA_array), ldda, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
inline magma_int_t magmaGetriOptimalBlocksize<double>(magma_int_t n) {
return magma_get_dgetri_nb(n);
}
template<>
inline magma_int_t magmaGetriOptimalBlocksize<float>(magma_int_t n) {
return magma_get_sgetri_nb(n);
}
template <>
inline magma_int_t magmaGetriOptimalBlocksize<c10::complex<double>>(
magma_int_t n) {
return magma_get_zgetri_nb(n);
}
template <>
inline magma_int_t magmaGetriOptimalBlocksize<c10::complex<float>>(
magma_int_t n) {
return magma_get_cgetri_nb(n);
}
template<>
void magmaGetri<double>(
magma_int_t n, double* dA, magma_int_t ldda, magma_int_t* ipiv, double* dwork,
magma_int_t lwork, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_dgetri_gpu(n, dA, ldda, ipiv, dwork, lwork, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaGetri<float>(
magma_int_t n, float* dA, magma_int_t ldda, magma_int_t* ipiv, float* dwork,
magma_int_t lwork, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_sgetri_gpu(n, dA, ldda, ipiv, dwork, lwork, info);
AT_CUDA_CHECK(hipGetLastError());
}
template <>
void magmaGetri<c10::complex<double>>(
magma_int_t n,
c10::complex<double>* dA,
magma_int_t ldda,
magma_int_t* ipiv,
c10::complex<double>* dwork,
magma_int_t lwork,
magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_zgetri_gpu(
n,
reinterpret_cast<magmaDoubleComplex*>(dA),
ldda,
ipiv,
reinterpret_cast<magmaDoubleComplex*>(dwork),
lwork,
info);
AT_CUDA_CHECK(hipGetLastError());
}
template <>
void magmaGetri<c10::complex<float>>(
magma_int_t n,
c10::complex<float>* dA,
magma_int_t ldda,
magma_int_t* ipiv,
c10::complex<float>* dwork,
magma_int_t lwork,
magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_cgetri_gpu(
n,
reinterpret_cast<magmaFloatComplex*>(dA),
ldda,
ipiv,
reinterpret_cast<magmaFloatComplex*>(dwork),
lwork,
info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaGetriBatched<double>(
magma_int_t n, double** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, double** dinvA_array, magma_int_t lddia,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_dgetri_outofplace_batched(n, dA_array, ldda, ipiv_array, dinvA_array, lddia, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaGetriBatched<float>(
magma_int_t n, float** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, float** dinvA_array, magma_int_t lddia,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_sgetri_outofplace_batched(n, dA_array, ldda, ipiv_array, dinvA_array, lddia, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template <>
void magmaGetriBatched<c10::complex<double>>(
magma_int_t n,
c10::complex<double>** dA_array,
magma_int_t ldda,
magma_int_t** ipiv_array,
c10::complex<double>** dinvA_array,
magma_int_t lddia,
magma_int_t* info_array,
magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magma_zgetri_outofplace_batched(
n,
reinterpret_cast<magmaDoubleComplex**>(dA_array),
ldda,
ipiv_array,
reinterpret_cast<magmaDoubleComplex**>(dinvA_array),
lddia,
info_array,
batchsize,
magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template <>
void magmaGetriBatched<c10::complex<float>>(
magma_int_t n,
c10::complex<float>** dA_array,
magma_int_t ldda,
magma_int_t** ipiv_array,
c10::complex<float>** dinvA_array,
magma_int_t lddia,
magma_int_t* info_array,
magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magma_cgetri_outofplace_batched(
n,
reinterpret_cast<magmaFloatComplex**>(dA_array),
ldda,
ipiv_array,
reinterpret_cast<magmaFloatComplex**>(dinvA_array),
lddia,
info_array,
batchsize,
magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaCholeskySolve<double>(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, double* dA, magma_int_t ldda,
double* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_dpotrs_gpu(uplo, n, nrhs, dA, ldda, dB, lddb, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaCholeskySolve<float>(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, float* dA, magma_int_t ldda,
float* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_spotrs_gpu(uplo, n, nrhs, dA, ldda, dB, lddb, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaCholeskySolve<c10::complex<double>>(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, c10::complex<double>* dA, magma_int_t ldda,
c10::complex<double>* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_zpotrs_gpu(uplo, n, nrhs,
reinterpret_cast<magmaDoubleComplex*>(dA), ldda,
reinterpret_cast<magmaDoubleComplex*>(dB), lddb, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaCholeskySolve<c10::complex<float>>(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, c10::complex<float>* dA, magma_int_t ldda,
c10::complex<float>* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_cpotrs_gpu(uplo, n, nrhs,
reinterpret_cast<magmaFloatComplex*>(dA), ldda,
reinterpret_cast<magmaFloatComplex*>(dB), lddb, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaCholeskySolveBatched<double>(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, double** dA_array, magma_int_t ldda,
double** dB_array, magma_int_t lddb, magma_int_t& info, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
info = magma_dpotrs_batched(uplo, n, nrhs, dA_array, ldda, dB_array, lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaCholeskySolveBatched<float>(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, float** dA_array, magma_int_t ldda,
float** dB_array, magma_int_t lddb, magma_int_t& info, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
info = magma_spotrs_batched(uplo, n, nrhs, dA_array, ldda, dB_array, lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaCholeskySolveBatched<c10::complex<double>>(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, c10::complex<double>** dA_array, magma_int_t ldda,
c10::complex<double>** dB_array, magma_int_t lddb, magma_int_t& info, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
info = magma_zpotrs_batched(uplo, n, nrhs,
reinterpret_cast<magmaDoubleComplex**>(dA_array), ldda,
reinterpret_cast<magmaDoubleComplex**>(dB_array), lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaCholeskySolveBatched<c10::complex<float>>(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, c10::complex<float>** dA_array, magma_int_t ldda,
c10::complex<float>** dB_array, magma_int_t lddb, magma_int_t& info, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
info = magma_cpotrs_batched(uplo, n, nrhs,
reinterpret_cast<magmaFloatComplex**>(dA_array), ldda,
reinterpret_cast<magmaFloatComplex**>(dB_array), lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaCholesky<double>(
magma_uplo_t uplo, magma_int_t n, double* dA,
magma_int_t ldda, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_dpotrf_gpu(uplo, n, dA, ldda, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaCholesky<float>(
magma_uplo_t uplo, magma_int_t n, float* dA,
magma_int_t ldda, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_spotrf_gpu(uplo, n, dA, ldda, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaCholesky<c10::complex<double>>(
magma_uplo_t uplo, magma_int_t n, c10::complex<double>* dA,
magma_int_t ldda, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_zpotrf_gpu(uplo, n, reinterpret_cast<magmaDoubleComplex*>(dA), ldda, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaCholesky<c10::complex<float>>(
magma_uplo_t uplo, magma_int_t n, c10::complex<float>* dA,
magma_int_t ldda, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_cpotrf_gpu(uplo, n, reinterpret_cast<magmaFloatComplex*>(dA), ldda, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaCholeskyBatched<double>(
magma_uplo_t uplo, magma_int_t n, double** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_dpotrf_batched(uplo, n, dA_array, ldda, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaCholeskyBatched<float>(
magma_uplo_t uplo, magma_int_t n, float** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_spotrf_batched(uplo, n, dA_array, ldda, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaCholeskyBatched<c10::complex<double>>(
magma_uplo_t uplo, magma_int_t n, c10::complex<double>** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_zpotrf_batched(uplo, n, reinterpret_cast<magmaDoubleComplex**>(dA_array), ldda, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaCholeskyBatched<c10::complex<float>>(
magma_uplo_t uplo, magma_int_t n, c10::complex<float>** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_cpotrf_batched(uplo, n, reinterpret_cast<magmaFloatComplex**>(dA_array), ldda, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaTriangularSolveBatched<double>(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n,
double** dA_array, magma_int_t ldda, double** dB_array, magma_int_t lddb, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magmablas_dtrsm_batched(MagmaLeft, uplo, trans, diag, m, n, 1, dA_array, ldda, dB_array, lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaTriangularSolveBatched<float>(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n,
float** dA_array, magma_int_t ldda, float** dB_array, magma_int_t lddb, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magmablas_strsm_batched(MagmaLeft, uplo, trans, diag, m, n, 1, dA_array, ldda, dB_array, lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaTriangularSolveBatched<c10::complex<double>>(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n,
c10::complex<double>** dA_array, magma_int_t ldda, c10::complex<double>** dB_array, magma_int_t lddb, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magmaDoubleComplex alpha({1, 0});
magmablas_ztrsm_batched(MagmaLeft, uplo, trans, diag, m, n, alpha,
reinterpret_cast<magmaDoubleComplex**>(dA_array), ldda,
reinterpret_cast<magmaDoubleComplex**>(dB_array), lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaTriangularSolveBatched<c10::complex<float>>(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n,
c10::complex<float>** dA_array, magma_int_t ldda, c10::complex<float>** dB_array, magma_int_t lddb, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magmaFloatComplex alpha({1, 0});
magmablas_ctrsm_batched(MagmaLeft, uplo, trans, diag, m, n, alpha,
reinterpret_cast<magmaFloatComplex**>(dA_array), ldda,
reinterpret_cast<magmaFloatComplex**>(dB_array), lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
inline magma_int_t magmaGeqrfOptimalBlocksize<double>(magma_int_t m, magma_int_t n) {
return magma_get_dgeqrf_nb(m, n);
}
template<>
inline magma_int_t magmaGeqrfOptimalBlocksize<float>(magma_int_t m, magma_int_t n) {
return magma_get_sgeqrf_nb(m, n);
}
template <>
inline magma_int_t magmaGeqrfOptimalBlocksize<c10::complex<double>>(
magma_int_t m,
magma_int_t n) {
return magma_get_zgeqrf_nb(m, n);
}
template <>
inline magma_int_t magmaGeqrfOptimalBlocksize<c10::complex<float>>(
magma_int_t m,
magma_int_t n) {
return magma_get_cgeqrf_nb(m, n);
}
template<>
void magmaGeqrf<double>(
magma_int_t m, magma_int_t n, double* dA, magma_int_t ldda,
double* tau, double* dT, magma_int_t* info, bool is_v2) {
MagmaStreamSyncGuard guard;
if (!is_v2) {
magma_dgeqrf_gpu(m, n, dA, ldda, tau, dT, info);
} else {
magma_dgeqrf2_gpu(m, n, dA, ldda, tau, info);
}
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaGeqrf<float>(
magma_int_t m, magma_int_t n, float* dA, magma_int_t ldda,
float* tau, float* dT, magma_int_t* info, bool is_v2) {
MagmaStreamSyncGuard guard;
if (!is_v2) {
magma_sgeqrf_gpu(m, n, dA, ldda, tau, dT, info);
} else {
magma_sgeqrf2_gpu(m, n, dA, ldda, tau, info);
}
AT_CUDA_CHECK(hipGetLastError());
}
template <>
void magmaGeqrf<c10::complex<double>>(
magma_int_t m,
magma_int_t n,
c10::complex<double>* dA,
magma_int_t ldda,
c10::complex<double>* tau,
c10::complex<double>* dT,
magma_int_t* info,
bool is_v2) {
MagmaStreamSyncGuard guard;
if (!is_v2) {
magma_zgeqrf_gpu(
m,
n,
reinterpret_cast<magmaDoubleComplex*>(dA),
ldda,
reinterpret_cast<magmaDoubleComplex*>(tau),
reinterpret_cast<magmaDoubleComplex*>(dT),
info);
} else {
magma_zgeqrf2_gpu(
m,
n,
reinterpret_cast<magmaDoubleComplex*>(dA),
ldda,
reinterpret_cast<magmaDoubleComplex*>(tau),
info);
}
AT_CUDA_CHECK(hipGetLastError());
}
template <>
void magmaGeqrf<c10::complex<float>>(
magma_int_t m,
magma_int_t n,
c10::complex<float>* dA,
magma_int_t ldda,
c10::complex<float>* tau,
c10::complex<float>* dT,
magma_int_t* info,
bool is_v2) {
MagmaStreamSyncGuard guard;
if (!is_v2) {
magma_cgeqrf_gpu(
m,
n,
reinterpret_cast<magmaFloatComplex*>(dA),
ldda,
reinterpret_cast<magmaFloatComplex*>(tau),
reinterpret_cast<magmaFloatComplex*>(dT),
info);
} else {
magma_cgeqrf2_gpu(
m,
n,
reinterpret_cast<magmaFloatComplex*>(dA),
ldda,
reinterpret_cast<magmaFloatComplex*>(tau),
info);
}
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaOrgqr<double>(
magma_int_t m, magma_int_t n, magma_int_t k, double* dA, magma_int_t ldda,
double* tau, double* dT, magma_int_t nb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_dorgqr_gpu(m, n, k, dA, ldda, tau, dT, nb, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaOrgqr<float>(
magma_int_t m, magma_int_t n, magma_int_t k, float* dA, magma_int_t ldda,
float* tau, float* dT, magma_int_t nb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_sorgqr_gpu(m, n, k, dA, ldda, tau, dT, nb, info);
AT_CUDA_CHECK(hipGetLastError());
}
template <>
void magmaOrgqr<c10::complex<double>>(
magma_int_t m,
magma_int_t n,
magma_int_t k,
c10::complex<double>* dA,
magma_int_t ldda,
c10::complex<double>* tau,
c10::complex<double>* dT,
magma_int_t nb,
magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_zungqr_gpu(
m,
n,
k,
reinterpret_cast<magmaDoubleComplex*>(dA),
ldda,
reinterpret_cast<magmaDoubleComplex*>(tau),
reinterpret_cast<magmaDoubleComplex*>(dT),
nb,
info);
AT_CUDA_CHECK(hipGetLastError());
}
template <>
void magmaOrgqr<c10::complex<float>>(
magma_int_t m,
magma_int_t n,
magma_int_t k,
c10::complex<float>* dA,
magma_int_t ldda,
c10::complex<float>* tau,
c10::complex<float>* dT,
magma_int_t nb,
magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_cungqr_gpu(
m,
n,
k,
reinterpret_cast<magmaFloatComplex*>(dA),
ldda,
reinterpret_cast<magmaFloatComplex*>(tau),
reinterpret_cast<magmaFloatComplex*>(dT),
nb,
info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaSyevd<double>(
magma_vec_t jobz, magma_uplo_t uplo, magma_int_t n, double* dA, magma_int_t ldda,
double* w, double* wA, magma_int_t ldwa, double* work, magma_int_t lwork, double* rwork,
magma_int_t lrwork, magma_int_t* iwork, magma_int_t liwork, magma_int_t* info) {
(void)rwork; // unused
(void)lrwork; // unused
MagmaStreamSyncGuard guard;
magma_dsyevd_gpu(jobz, uplo, n, dA, ldda, w, wA, ldwa, work, lwork, iwork, liwork, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaSyevd<float>(
magma_vec_t jobz, magma_uplo_t uplo, magma_int_t n, float* dA, magma_int_t ldda,
float* w, float* wA, magma_int_t ldwa, float* work, magma_int_t lwork, float* rwork,
magma_int_t lrwork, magma_int_t* iwork, magma_int_t liwork, magma_int_t* info) {
(void)rwork; // unused
(void)lrwork; // unused
MagmaStreamSyncGuard guard;
magma_ssyevd_gpu(jobz, uplo, n, dA, ldda, w, wA, ldwa, work, lwork, iwork, liwork, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaSyevd<c10::complex<double>, double>(
magma_vec_t jobz, magma_uplo_t uplo, magma_int_t n, c10::complex<double>* dA, magma_int_t ldda,
double* w, c10::complex<double>* wA, magma_int_t ldwa, c10::complex<double>* work, magma_int_t lwork, double* rwork,
magma_int_t lrwork, magma_int_t* iwork, magma_int_t liwork, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_zheevd_gpu(
jobz, uplo, n, reinterpret_cast<magmaDoubleComplex*>(dA), ldda, w, reinterpret_cast<magmaDoubleComplex*>(wA),
ldwa, reinterpret_cast<magmaDoubleComplex*>(work), lwork, rwork, lrwork, iwork, liwork, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaSyevd<c10::complex<float>, float>(
magma_vec_t jobz, magma_uplo_t uplo, magma_int_t n, c10::complex<float>* dA, magma_int_t ldda,
float* w, c10::complex<float>* wA, magma_int_t ldwa, c10::complex<float>* work, magma_int_t lwork, float* rwork,
magma_int_t lrwork, magma_int_t* iwork, magma_int_t liwork, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_cheevd_gpu(
jobz, uplo, n, reinterpret_cast<magmaFloatComplex*>(dA), ldda, w, reinterpret_cast<magmaFloatComplex*>(wA),
ldwa, reinterpret_cast<magmaFloatComplex*>(work), lwork, rwork, lrwork, iwork, liwork, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaEig<double>(
magma_vec_t jobvl, magma_vec_t jobvr, magma_int_t n,
double *A, magma_int_t lda,
double *w,
double *VL, magma_int_t ldvl,
double *VR, magma_int_t ldvr,
double *work, magma_int_t lwork,
double *rwork,
magma_int_t *info) {
MagmaStreamSyncGuard guard;
// magma [sd]geev wants to separate output arrays: wr and wi for the real
// and imaginary parts
double *wr = w;
double *wi = w + n;
(void)rwork; // unused
magma_dgeev(jobvl, jobvr, n, A, lda, wr, wi, VL, ldvl, VR, ldvr, work, lwork, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaEig<float>(
magma_vec_t jobvl, magma_vec_t jobvr, magma_int_t n,
float *A, magma_int_t lda,
float *w,
float *VL, magma_int_t ldvl,
float *VR, magma_int_t ldvr,
float *work, magma_int_t lwork,
float *rwork,
magma_int_t *info) {
MagmaStreamSyncGuard guard;
float *wr = w;
float *wi = w + n;
(void)rwork; // unused
magma_sgeev(jobvl, jobvr, n, A, lda, wr, wi, VL, ldvl, VR, ldvr, work, lwork, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaEig<c10::complex<double>, double>(
magma_vec_t jobvl, magma_vec_t jobvr, magma_int_t n,
c10::complex<double> *A, magma_int_t lda,
c10::complex<double> *w,
c10::complex<double> *VL, magma_int_t ldvl,
c10::complex<double> *VR, magma_int_t ldvr,
c10::complex<double> *work, magma_int_t lwork,
double *rwork,
magma_int_t *info) {
MagmaStreamSyncGuard guard;
magma_zgeev(jobvl, jobvr, n,
reinterpret_cast<magmaDoubleComplex*>(A), lda,
reinterpret_cast<magmaDoubleComplex*>(w),
reinterpret_cast<magmaDoubleComplex*>(VL), ldvl,
reinterpret_cast<magmaDoubleComplex*>(VR), ldvr,
reinterpret_cast<magmaDoubleComplex*>(work), lwork,
rwork, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaEig<c10::complex<float>, float>(
magma_vec_t jobvl, magma_vec_t jobvr, magma_int_t n,
c10::complex<float> *A, magma_int_t lda,
c10::complex<float> *w,
c10::complex<float> *VL, magma_int_t ldvl,
c10::complex<float> *VR, magma_int_t ldvr,
c10::complex<float> *work, magma_int_t lwork,
float *rwork,
magma_int_t *info) {
MagmaStreamSyncGuard guard;
magma_cgeev(jobvl, jobvr, n,
reinterpret_cast<magmaFloatComplex*>(A), lda,
reinterpret_cast<magmaFloatComplex*>(w),
reinterpret_cast<magmaFloatComplex*>(VL), ldvl,
reinterpret_cast<magmaFloatComplex*>(VR), ldvr,
reinterpret_cast<magmaFloatComplex*>(work), lwork,
rwork, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaSvd<double>(
magma_vec_t jobz, magma_int_t m, magma_int_t n, double* A,
magma_int_t lda, double* s, double* U, magma_int_t ldu,
double* VT, magma_int_t ldvt, double* work, magma_int_t lwork,
double *rwork, magma_int_t* iwork, magma_int_t* info) {
(void)rwork; // unused
MagmaStreamSyncGuard guard;
magma_dgesdd(jobz, m, n, A, lda, s, U, ldu, VT, ldvt, work, lwork, iwork, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaSvd<float>(
magma_vec_t jobz, magma_int_t m, magma_int_t n, float* A,
magma_int_t lda, float* s, float* U, magma_int_t ldu,
float* VT, magma_int_t ldvt, float* work, magma_int_t lwork,
float* rwork, magma_int_t* iwork, magma_int_t* info) {
(void)rwork; // unused
MagmaStreamSyncGuard guard;
magma_sgesdd(jobz, m, n, A, lda, s, U, ldu, VT, ldvt, work, lwork, iwork, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaSvd<c10::complex<float>, float>(
magma_vec_t jobz, magma_int_t m, magma_int_t n, c10::complex<float>* A,
magma_int_t lda, float* s, c10::complex<float>* U, magma_int_t ldu,
c10::complex<float>* VT, magma_int_t ldvt, c10::complex<float>* work, magma_int_t lwork,
float *rwork, magma_int_t* iwork, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_cgesdd(jobz, m, n, reinterpret_cast<magmaFloatComplex*>(A), lda, s,
reinterpret_cast<magmaFloatComplex*>(U), ldu,
reinterpret_cast<magmaFloatComplex*>(VT), ldvt,
reinterpret_cast<magmaFloatComplex*>(work), lwork,
rwork, iwork, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaSvd<c10::complex<double>, double>(
magma_vec_t jobz, magma_int_t m, magma_int_t n, c10::complex<double>* A,
magma_int_t lda, double* s, c10::complex<double>* U, magma_int_t ldu,
c10::complex<double>* VT, magma_int_t ldvt, c10::complex<double>* work, magma_int_t lwork,
double *rwork, magma_int_t* iwork, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_zgesdd(jobz, m, n, reinterpret_cast<magmaDoubleComplex*>(A), lda, s,
reinterpret_cast<magmaDoubleComplex*>(U), ldu,
reinterpret_cast<magmaDoubleComplex*>(VT), ldvt,
reinterpret_cast<magmaDoubleComplex*>(work), lwork,
rwork, iwork, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuSolve<double>(
magma_int_t n, magma_int_t nrhs, double* dA, magma_int_t ldda, magma_int_t* ipiv,
double* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_dgetrs_gpu(MagmaNoTrans, n, nrhs, dA, ldda, ipiv, dB, lddb, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuSolve<float>(
magma_int_t n, magma_int_t nrhs, float* dA, magma_int_t ldda, magma_int_t* ipiv,
float* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_sgetrs_gpu(MagmaNoTrans, n, nrhs, dA, ldda, ipiv, dB, lddb, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuSolve<c10::complex<double>>(
magma_int_t n, magma_int_t nrhs, c10::complex<double>* dA, magma_int_t ldda, magma_int_t* ipiv,
c10::complex<double>* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_zgetrs_gpu(MagmaNoTrans, n, nrhs, reinterpret_cast<magmaDoubleComplex*>(dA), ldda, ipiv, reinterpret_cast<magmaDoubleComplex*>(dB), lddb, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuSolve<c10::complex<float>>(
magma_int_t n, magma_int_t nrhs, c10::complex<float>* dA, magma_int_t ldda, magma_int_t* ipiv,
c10::complex<float>* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_cgetrs_gpu(MagmaNoTrans, n, nrhs, reinterpret_cast<magmaFloatComplex*>(dA), ldda, ipiv, reinterpret_cast<magmaFloatComplex*>(dB), lddb, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuSolveBatched<double>(
magma_int_t n, magma_int_t nrhs, double** dA_array, magma_int_t ldda, magma_int_t** dipiv_array,
double** dB_array, magma_int_t lddb, magma_int_t& info,
magma_int_t batchsize, const MAGMAQueue& magma_queue) {
info = magma_dgetrs_batched(MagmaNoTrans, n, nrhs, dA_array, ldda, dipiv_array, dB_array, lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuSolveBatched<float>(
magma_int_t n, magma_int_t nrhs, float** dA_array, magma_int_t ldda, magma_int_t** dipiv_array,
float** dB_array, magma_int_t lddb, magma_int_t& info,
magma_int_t batchsize, const MAGMAQueue& magma_queue) {
info = magma_sgetrs_batched(MagmaNoTrans, n, nrhs, dA_array, ldda, dipiv_array, dB_array, lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuSolveBatched<c10::complex<double>>(
magma_int_t n, magma_int_t nrhs, c10::complex<double>** dA_array, magma_int_t ldda, magma_int_t** dipiv_array,
c10::complex<double>** dB_array, magma_int_t lddb, magma_int_t& info,
magma_int_t batchsize, const MAGMAQueue& magma_queue) {
info = magma_zgetrs_batched(MagmaNoTrans, n, nrhs, reinterpret_cast<magmaDoubleComplex**>(dA_array), ldda, dipiv_array, reinterpret_cast<magmaDoubleComplex**>(dB_array), lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuSolveBatched<c10::complex<float>>(
magma_int_t n, magma_int_t nrhs, c10::complex<float>** dA_array, magma_int_t ldda, magma_int_t** dipiv_array,
c10::complex<float>** dB_array, magma_int_t lddb, magma_int_t& info,
magma_int_t batchsize, const MAGMAQueue& magma_queue) {
info = magma_cgetrs_batched(MagmaNoTrans, n, nrhs, reinterpret_cast<magmaFloatComplex**>(dA_array), ldda, dipiv_array, reinterpret_cast<magmaFloatComplex**>(dB_array), lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaGels<float>(
magma_trans_t trans, magma_int_t m, magma_int_t n, magma_int_t nrhs,
float* dA, magma_int_t ldda, float* dB, magma_int_t lddb,
float* hwork, magma_int_t lwork, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_sgels_gpu(trans, m, n, nrhs,
dA, ldda, dB, lddb,
hwork, lwork, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaGels<double>(
magma_trans_t trans, magma_int_t m, magma_int_t n, magma_int_t nrhs,
double* dA, magma_int_t ldda, double* dB, magma_int_t lddb,
double* hwork, magma_int_t lwork, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_dgels_gpu(trans, m, n, nrhs,
dA, ldda, dB, lddb,
hwork, lwork, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaGels<c10::complex<float>>(
magma_trans_t trans, magma_int_t m, magma_int_t n, magma_int_t nrhs,
c10::complex<float>* dA, magma_int_t ldda, c10::complex<float>* dB, magma_int_t lddb,
c10::complex<float>* hwork, magma_int_t lwork, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_cgels_gpu(trans, m, n, nrhs,
reinterpret_cast<magmaFloatComplex*>(dA), ldda,
reinterpret_cast<magmaFloatComplex*>(dB), lddb,
reinterpret_cast<magmaFloatComplex*>(hwork), lwork, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaGels<c10::complex<double>>(
magma_trans_t trans, magma_int_t m, magma_int_t n, magma_int_t nrhs,
c10::complex<double>* dA, magma_int_t ldda, c10::complex<double>* dB, magma_int_t lddb,
c10::complex<double>* hwork, magma_int_t lwork, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_zgels_gpu(trans, m, n, nrhs,
reinterpret_cast<magmaDoubleComplex*>(dA), ldda,
reinterpret_cast<magmaDoubleComplex*>(dB), lddb,
reinterpret_cast<magmaDoubleComplex*>(hwork), lwork, info);
AT_CUDA_CHECK(hipGetLastError());
}
namespace {
/*
MAGMA can return errors both as a return value and in the info argument.
The return value and info should always be identical.
In general, the meaning is as given in this table.
Predefined error codes are large negative numbers. Using the symbolic
constants below is preferred, but the numeric values can be found in
include/magma_types.h.
Info | Description
----------- | -----------
info = 0 (MAGMA_SUCCESS) | Successful exit
info < 0, but small | For info = -i, the i-th argument had an illegal value
info > 0 | Function-specific error such as singular matrix
MAGMA_ERR_DEVICE_ALLOC | Could not allocate GPU device memory
MAGMA_ERR_HOST_ALLOC | Could not allocate CPU host memory
MAGMA_ERR_ILLEGAL_VALUE | An argument had an illegal value (deprecated; instead it should return -i to say the i-th argument was bad)
MAGMA_ERR_INVALID_PTR | Can't free pointer
MAGMA_ERR_NOT_IMPLEMENTED | Function or option not implemented
MAGMA_ERR_NOT_SUPPORTED | Function or option not supported on the current architecture
*/
void checkMagmaInternalError(magma_int_t info, const std::string& magma_function_name) {
// if info > 0 the error is function-specific, do nothing in this case
TORCH_CHECK(info >= 0,
"MAGMA error: ",
magma_strerror(info),
", info = ", info,
", when calling ", magma_function_name);
}
} // anonymous namespace
#endif // USE_MAGMA
#define ALLOCATE_ARRAY(name, type, size) \
auto storage_##name = pin_memory<type>(size); \
name = static_cast<type*>(storage_##name.data());
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ solve ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_solve(Tensor& b, Tensor& A, Tensor& infos_out) {
#ifndef USE_MAGMA
AT_ERROR("solve: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
auto A_data = A.data_ptr<scalar_t>();
auto b_data = b.data_ptr<scalar_t>();
magma_int_t n = magma_int_cast(A.size(-2), "A.size(-2)");
magma_int_t nrhs = magma_int_cast(b.size(-1), "b.size(-1)");
magma_int_t lda = ::max(magma_int_t{1}, n);
if (b.dim() == 2) {
auto ipiv = at::empty({n}, at::kInt);
// magmaSolve requires infos tensor to live on CPU
Tensor infos = at::empty(infos_out.sizes(), infos_out.options().device(kCPU));
magmaSolve<scalar_t>(n, nrhs, A_data, lda, ipiv.data_ptr<magma_int_t>(),
b_data, lda, infos.data_ptr<magma_int_t>());
infos_out.copy_(infos);
} else {
auto infos_data = infos_out.data_ptr<magma_int_t>();
auto A_mat_stride = matrixStride(A);
auto b_mat_stride = matrixStride(b);
magma_int_t batch_size = magma_int_cast(batchCount(A), "batchCount");
magma_int_t* ipiv_data;
magma_int_t** ipiv_array;
scalar_t** A_array;
scalar_t** b_array;
ALLOCATE_ARRAY(ipiv_data, magma_int_t, batch_size * n);
ALLOCATE_ARRAY(ipiv_array, magma_int_t*, batch_size);
ALLOCATE_ARRAY(A_array, scalar_t*, batch_size);
ALLOCATE_ARRAY(b_array, scalar_t*, batch_size);
// Set up the created arrays
for (int64_t i = 0; i < batch_size; i++) {
A_array[i] = &A_data[i * A_mat_stride];
b_array[i] = &b_data[i * b_mat_stride];
ipiv_array[i] = &ipiv_data[i * n];
}
MAGMAQueue magma_queue(b.get_device());
constexpr int64_t batch_limit = 65535;
// Compute as many batches of 65535 possible
// The number of "mini"-batches are floor(batch_size / batch_limit)
// and these cover floor(batch_size / batch_limit) * batch_limit matrix solves
int64_t mini_batches = batch_size / batch_limit, mini_idx;
for (mini_idx = 0; mini_idx < mini_batches * batch_limit; mini_idx += batch_limit) {
scalar_t** A_array_cur = &A_array[mini_idx];
scalar_t** b_array_cur = &b_array[mini_idx];
magma_int_t** ipiv_array_cur = &ipiv_array[mini_idx];
magma_int_t* info_array_cur = &infos_data[mini_idx];
magmaSolveBatched<scalar_t>(
n, nrhs, A_array_cur, lda, ipiv_array_cur, b_array_cur, lda,
info_array_cur, batch_limit, magma_queue);
}
// Compute whatever is left = batch_size - floor(batch_size / batch_limit) * batch_limit
// which concisely is equal to batch_size % batch_limit
if (batch_size % batch_limit != 0) {
magmaSolveBatched<scalar_t>(
n, nrhs, &A_array[mini_idx], lda, &ipiv_array[mini_idx], &b_array[mini_idx], lda,
&infos_data[mini_idx], batch_size % batch_limit, magma_queue);
}
}
#endif
}
std::tuple<Tensor, Tensor> _solve_helper_cuda(const Tensor& self, const Tensor& A) {
auto self_working_copy = cloneBatchedColumnMajor(self);
auto A_working_copy = cloneBatchedColumnMajor(A);
// infos might not get filled for empty inputs therefore at::zeros is used instead of at::empty
auto infos = at::zeros({std::max<int64_t>(1, batchCount(self))}, self.options().dtype(kInt));
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "solve_cuda", [&]{
apply_solve<scalar_t>(self_working_copy, A_working_copy, infos);
});
if (self.dim() > 2) {
batchCheckErrors(infos, "solve_cuda");
} else {
singleCheckErrors(infos.item().toInt(), "solve_cuda");
}
return std::tuple<Tensor, Tensor>(self_working_copy, A_working_copy);
}
// This is a type dispatching helper function for 'apply_solve'
Tensor& _linalg_solve_out_helper_cuda(Tensor& result, Tensor& input, Tensor& infos) {
// 'result' and 'input' should be in column major order (it should be checked before calling this function)
// the content of 'result', 'input' and 'infos' is overwritten by 'apply_solve'
// 'result' should contain data of 'other' tensor (right-hand-side of the linear system of equations)
// 'input' should contain data of origianl 'input' tensor (left-hand-side of the linear system)
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(result.scalar_type(), "linalg_solve_out_cpu", [&]{
apply_solve<scalar_t>(result, input, infos);
});
return result;
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ inverse ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
/*
Computes the inverse of n-by-n matrix 'self', it is saved to 'self_inv'.
'infos' is an int Tensor containing error codes for each matrix in the batched input.
'infos_lu' is for holding magmaLU errors, and 'infos_getri' is for holding magmaGetri errors
For more information see MAGMA's documentation for GETRI and GETRF routines.
*/
template <typename scalar_t>
static void apply_batched_inverse(Tensor& self, Tensor& self_inv, Tensor& infos_lu, Tensor& infos_getri) {
#ifndef USE_MAGMA
AT_ERROR("inverse: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
auto self_data = self.data_ptr<scalar_t>();
auto self_mat_stride = matrixStride(self);
auto self_inv_data = self_inv.data_ptr<scalar_t>();
auto self_inv_mat_stride = matrixStride(self_inv);
auto infos_lu_data = infos_lu.data_ptr<magma_int_t>();
auto infos_getri_data = infos_getri.data_ptr<magma_int_t>();
magma_int_t batch_size = magma_int_cast(batchCount(self), "batchCount");
// MAGMA does not work with batch_size == 0, let's return early in this case
if (batch_size == 0) {
return;
}
magma_int_t n = magma_int_cast(self.size(-2), "self.size(-2)");
magma_int_t lda = std::max<magma_int_t>(1, n);
magma_int_t* ipiv_data;
magma_int_t** ipiv_array;
scalar_t** self_array;
scalar_t** self_inv_array;
ALLOCATE_ARRAY(ipiv_data, magma_int_t, batch_size * lda);
ALLOCATE_ARRAY(ipiv_array, magma_int_t*, batch_size);
ALLOCATE_ARRAY(self_array, scalar_t*, batch_size);
ALLOCATE_ARRAY(self_inv_array, scalar_t*, batch_size);
// Set up the created arrays
for (int64_t i = 0; i < batch_size; i++) {
self_array[i] = &self_data[i * self_mat_stride];
self_inv_array[i] = &self_inv_data[i * self_inv_mat_stride];
ipiv_array[i] = &ipiv_data[i * n];
}
// magmaLuBatched leaves ipiv_data values unwritten for singular matrices.
// Initialize to avoid memory access violations inside magma kernels (gh-51930).
std::fill_n(ipiv_data, batch_size * n, 1);
MAGMAQueue magma_queue(self.get_device());
magmaLuBatched<scalar_t>(
n, n, self_array, lda, ipiv_array, infos_lu_data,
batch_size, magma_queue);
constexpr int64_t batch_limit = 65535;
// Compute as many batches of 65535 possible
// The number of "mini"-batches are floor(batch_size / batch_limit)
// and these cover floor(batch_size / batch_limit) * batch_limit matrix solves
int64_t mini_batches = batch_size / batch_limit, mini_idx;
for (mini_idx = 0; mini_idx < mini_batches * batch_limit; mini_idx += batch_limit) {
scalar_t** self_array_cur = &self_array[mini_idx];
scalar_t** self_inv_array_cur = &self_inv_array[mini_idx];
magma_int_t** ipiv_array_cur = &ipiv_array[mini_idx];
magma_int_t* info_array_cur_getri = &infos_getri_data[mini_idx];
magmaGetriBatched<scalar_t>(
n, self_array_cur, lda, ipiv_array_cur, self_inv_array_cur,
lda, info_array_cur_getri, batch_limit, magma_queue);
}
// Compute whatever is left = batch_size - floor(batch_size / batch_limit) * batch_limit
// which concisely is equal to batch_size % batch_limit
if (batch_size % batch_limit != 0) {
magmaGetriBatched<scalar_t>(
n, &self_array[mini_idx], lda, &ipiv_array[mini_idx], &self_inv_array[mini_idx],
lda, &infos_getri_data[mini_idx], batch_size % batch_limit, magma_queue);
}
#endif
}
template <typename scalar_t>
static void apply_single_inverse(Tensor& self, Tensor& infos_lu, Tensor& infos_getri) {
#ifndef USE_MAGMA
AT_ERROR("inverse: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
auto self_data = self.data_ptr<scalar_t>();
magma_int_t n = magma_int_cast(self.size(-2), "self.size(-2)");
magma_int_t lda = std::max<magma_int_t>(1, n);
magma_int_t lwork = n * magmaGetriOptimalBlocksize<scalar_t>(n);
// magmaLu and magmaGetri requires infos tensor to live on CPU
infos_lu = infos_lu.to(at::kCPU);
infos_getri = infos_getri.to(at::kCPU);
Tensor ipiv = at::empty({lda}, at::kInt);
Tensor dwork = at::empty({lwork}, self.options());
magmaLu<scalar_t>(n, n, self_data, lda, ipiv.data_ptr<magma_int_t>(), infos_lu.data_ptr<magma_int_t>());
magmaGetri<scalar_t>(
n, self_data, lda, ipiv.data_ptr<magma_int_t>(), dwork.data_ptr<scalar_t>(), lwork, infos_getri.data_ptr<magma_int_t>());
#endif
}
Tensor _inverse_helper_cuda_legacy(const Tensor& self) {
auto self_inv_working_copy = cloneBatchedColumnMajor(self);
if (self.dim() > 2) {
auto infos_lu = at::zeros({std::max<int64_t>(1, batchCount(self))}, self.options().dtype(kInt));
auto infos_getri = at::zeros({std::max<int64_t>(1, batchCount(self))}, self.options().dtype(kInt));
auto self_working_copy = cloneBatchedColumnMajor(self);
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "inverse_cuda", [&]{
apply_batched_inverse<scalar_t>(
self_working_copy, self_inv_working_copy, infos_lu, infos_getri);
});
batchCheckErrors(infos_lu, "inverse_cuda");
batchCheckErrors(infos_getri, "inverse_cuda");
} else {
// magmaLu and magmaGetri requires infos tensor to live on CPU
auto infos_lu = at::zeros({1}, self.options().dtype(kInt).device(kCPU));
auto infos_getri = at::zeros({1}, self.options().dtype(kInt).device(kCPU));
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "inverse_cuda", [&]{
apply_single_inverse<scalar_t>(self_inv_working_copy, infos_lu, infos_getri);
});
singleCheckErrors(infos_lu.item().toInt(), "inverse_cuda");
singleCheckErrors(infos_getri.item().toInt(), "inverse_cuda");
}
return self_inv_working_copy;
}
Tensor _inverse_helper_cuda(const Tensor& self) {
#ifdef USE_CUSOLVER
if ((self.dim() == 2) || (/* self.dim() > 2 && */ batchCount(self) <= 2) || !use_magma_) {
return _inverse_helper_cuda_lib(self); // cusolver or cublas
} else {
return _inverse_helper_cuda_legacy(self); // magma-cuda
}
#else
return _inverse_helper_cuda_legacy(self); // magma-cuda
#endif
}
// This is a type dispatching helper function for 'apply_batched_inverse' and 'singleCheckErrors'
Tensor& _linalg_inv_out_helper_cuda_legacy(Tensor& result, Tensor& infos_lu, Tensor& infos_getri) {
// assuming result is in column major order and contains the matrices to invert
if (result.dim() > 2) {
auto input_working_copy = cloneBatchedColumnMajor(result);
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(result.scalar_type(), "linalg_inv_out_cuda", [&]{
apply_batched_inverse<scalar_t>(
input_working_copy, result, infos_lu, infos_getri);
});
} else {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(result.scalar_type(), "linalg_inv_out_cuda", [&]{
apply_single_inverse<scalar_t>(result, infos_lu, infos_getri);
});
}
return result;
}
// This is a MAGMA/cuSOLVER dispatching helper function
Tensor& _linalg_inv_out_helper_cuda(Tensor &result, Tensor& infos_lu, Tensor& infos_getri) {
// This function calculates the inverse matrix in-place
// result should be in column major order and contain matrices to invert
#ifdef USE_CUSOLVER
if ((result.dim() == 2) || (/* result.dim() > 2 && */ batchCount(result) <= 2) || !use_magma_) {
return _linalg_inv_out_helper_cuda_lib(result, infos_lu, infos_getri); // cusolver or cublas
} else {
return _linalg_inv_out_helper_cuda_legacy(result, infos_lu, infos_getri); // magma-cuda
}
#else
return _linalg_inv_out_helper_cuda_legacy(result, infos_lu, infos_getri); // magma-cuda
#endif
return result;
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ cholesky_solve ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_cholesky_solve(Tensor& b, Tensor& A, bool upper, int64_t& info) {
#ifndef USE_MAGMA
AT_ERROR("cholesky_solve: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
magma_uplo_t uplo = upper ? MagmaUpper : MagmaLower;
auto A_data = A.data_ptr<scalar_t>();
auto b_data = b.data_ptr<scalar_t>();
magma_int_t n = magma_int_cast(A.size(-2), "A.size(-2)");
magma_int_t lda = std::max<magma_int_t>(1, n);
magma_int_t nrhs = magma_int_cast(b.size(-1), "b.size(-1)");
int info_tmp = 0;
if (b.dim() == 2) {
magmaCholeskySolve<scalar_t>(uplo, n, nrhs, A_data, lda,
b_data, lda, &info_tmp);
info = info_tmp;
} else {
auto A_mat_stride = matrixStride(A);
auto b_mat_stride = matrixStride(b);
magma_int_t batch_size = magma_int_cast(batchCount(A), "batchCount");
scalar_t** A_array;
scalar_t** b_array;
ALLOCATE_ARRAY(A_array, scalar_t*, batch_size);
ALLOCATE_ARRAY(b_array, scalar_t*, batch_size);
// Set up the created arrays
for (int64_t i = 0; i < batch_size; i++) {
A_array[i] = &A_data[i * A_mat_stride];
b_array[i] = &b_data[i * b_mat_stride];
}
MAGMAQueue magma_queue(b.get_device());
constexpr int64_t batch_limit = 65535;
// Compute as many batches of 65535 possible
// The number of "mini"-batches are floor(batch_size / batch_limit)
// and these cover floor(batch_size / batch_limit) * batch_limit matrix solves
int64_t mini_batches = batch_size / batch_limit, mini_idx;
for (mini_idx = 0; mini_idx < mini_batches * batch_limit; mini_idx += batch_limit) {
scalar_t** A_array_cur = &A_array[mini_idx];
scalar_t** b_array_cur = &b_array[mini_idx];
magmaCholeskySolveBatched<scalar_t>(
uplo, n, nrhs, A_array_cur, lda, b_array_cur, lda,
info_tmp, batch_limit, magma_queue);
if (info_tmp != 0) {
break;
}
}
// Compute whatever is left = batch_size - floor(batch_size / batch_limit) * batch_limit
// which concisely is equal to batch_size % batch_limit
if (batch_size % batch_limit != 0 && info_tmp == 0) {
magmaCholeskySolveBatched<scalar_t>(
uplo, n, nrhs, &A_array[mini_idx], lda, &b_array[mini_idx], lda,
info_tmp, batch_size % batch_limit, magma_queue);
}
info = info_tmp;
}
#endif
}
Tensor _cholesky_solve_helper_cuda_magma(const Tensor& self, const Tensor& A, bool upper) {
int64_t info = 0;
auto self_working_copy = cloneBatchedColumnMajor(self);
auto A_working_copy = cloneBatchedColumnMajor(A);
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "cholesky_solve_cuda", [&]{
apply_cholesky_solve<scalar_t>(self_working_copy, A_working_copy, upper, info);
});
TORCH_CHECK(info == 0, "MAGMA cholesky_solve : invalid argument: ", -info);
return self_working_copy;
}
// Todo: cusolverDn<T>potrsBatched only supports nrhs == 1 and does not have good performance.
// Batched cholesky_solve is dispatched to magma.
Tensor _cholesky_solve_helper_cuda(const Tensor& self, const Tensor& A, bool upper) {
#ifdef USE_CUSOLVER
if (batchCount(self) == 1 || !use_magma_) {
return _cholesky_solve_helper_cuda_cusolver(self, A, upper);
} else {
return _cholesky_solve_helper_cuda_magma(self, A, upper);
}
#else
return _cholesky_solve_helper_cuda_magma(self, A, upper);
#endif
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ cholesky ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_cholesky(const Tensor& self, bool upper, const Tensor& info) {
#ifndef USE_MAGMA
TORCH_CHECK(
false,
"Calling torch.linalg.cholesky on a CUDA tensor requires compiling ",
"PyTorch with MAGMA. Please use PyTorch built with MAGMA support.");
#else
magma_uplo_t uplo = upper ? MagmaUpper : MagmaLower;
auto self_data = self.data_ptr<scalar_t>();
magma_int_t n = magma_int_cast(self.size(-2), "self.size(-2)");
auto lda = std::max<magma_int_t>(1, n);
if (self.dim() == 2) {
// magmaCholesky requires info to be on CPU
magma_int_t info_cpu = 0;
magmaCholesky<scalar_t>(uplo, n, self_data, lda, &info_cpu);
info.fill_(info_cpu);
} else {
TORCH_INTERNAL_ASSERT(info.is_cuda());
auto info_data = info.data_ptr<magma_int_t>();
// magmaCholeskyBatched supports only upper=false
uplo = MagmaLower;
auto self_mat_stride = matrixStride(self);
magma_int_t batch_size = magma_int_cast(batchCount(self), "batchCount");
scalar_t** self_array;
ALLOCATE_ARRAY(self_array, scalar_t*, batch_size);
// Set up the created arrays
for (int64_t i = 0; i < batch_size; i++) {
self_array[i] = &self_data[i * self_mat_stride];
}
MAGMAQueue magma_queue(self.get_device());
// Compute as many batches of 262140 possible
// 262140 is the size of the largest batch of matrices that can be run with
// violating maximum kernel configuration
// For complex input the batch limit is 65535 (determined experimentally, see https://github.com/pytorch/pytorch/pull/47047#discussion_r516086923 for more information)
int64_t batch_limit = self.is_complex() ? 65535 : 262140;
for (int64_t mini_idx = 0; mini_idx < batch_size; mini_idx += batch_limit) {
int64_t nbatches = ::min(batch_limit, batch_size - mini_idx);
scalar_t** self_array_cur = &self_array[mini_idx];
magma_int_t* info_array_cur = &info_data[mini_idx];
magmaCholeskyBatched<scalar_t>(
uplo, n, self_array_cur, lda, info_array_cur, nbatches, magma_queue);
}
}
#endif
}
void cholesky_helper_magma(const Tensor& input, bool upper, const Tensor& info) {
Tensor result = input;
if (input.dim() > 2) {
// MAGMA's batched cholesky operator has an off-by-one error causing IMA
// (see https://github.com/pytorch/pytorch/issues/42666). This code is based
// on the #cloneBatchedColumnMajor function however it pads the input with
// one extra element utilizing the fact that the resize_as_ method preserves
// the storage even if it's larger than the new sizes. This way if MAGMA
// reads off bounds it will still be valid user memory.
result = at::empty(input.numel() + 1, input.options());
result.resize_as_(input).transpose_(-2, -1);
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(result.transpose(-2, -1).is_contiguous());
// batched MAGMA doesn't support upper=true
// we transpose and conjugate the input as a workaround
result.copy_(upper ? input.conj().transpose(-2, -1) : input);
}
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(
input.scalar_type(), "cholesky_cuda", [&] {
apply_cholesky<scalar_t>(result, upper, info);
});
if (input.dim() > 2) {
// if upper=true we need to tranpose and conjugate the result tensor
// because the cholesky decomposition is stored in the lower triangular part
if (upper) {
input.copy_(result.conj().transpose(-2, -1));
} else {
input.copy_(result);
}
}
}
// Todo: cusolverDnXpotrfBatched has some numerical issue and is not used
// here. Batched cholesky is dispatched to magma.
// We will switch to cusolverDnXpotrfBatched after the issue is fixed.
// See https://github.com/pytorch/pytorch/issues/53879.
static void cholesky_kernel(const Tensor& input, const Tensor& info, bool upper) {
#ifdef USE_CUSOLVER
if (batchCount(input) == 1 || !use_magma_) {
cholesky_helper_cusolver(input, upper, info);
} else {
cholesky_helper_magma(input, upper, info);
}
#else
cholesky_helper_magma(input, upper, info);
#endif // USE_CUSOLVER
}
REGISTER_DISPATCH(cholesky_stub, &cholesky_kernel)
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ cholesky_inverse ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
/*
Computes the inverse of a symmetric (Hermitian) positive-definite matrix n-by-n matrix 'input' using the Cholesky solver
This is an in-place routine, content of 'input' is overwritten.
'infos' is an int Tensor containing error codes for each matrix in the batched input.
MAGMA requires 'infos' to reside in CPU memory.
For more information see MAGMA's documentation for POTRS routine.
*/
template <typename scalar_t>
static void apply_cholesky_inverse(Tensor& input, Tensor& infos, bool upper) {
#ifndef USE_MAGMA
TORCH_CHECK(false, "cholesky_inverse: MAGMA library not found in compilation. Please rebuild with MAGMA.");
#else
// magmaCholeskyInverse (magma_dpotri_gpu) is slow because internally
// it transfers data several times between GPU and CPU and calls lapack routine on CPU
// using magmaCholeskySolveBatched is a lot faster
// note that magmaCholeskySolve is also slow
// 'input' is modified in-place we need to clone it and replace with a diagonal matrix
// for apply_cholesky_solve
auto input_working_copy = cloneBatchedColumnMajor(input);
// 'input' tensor has to be a batch of diagonal matrix
input.fill_(0);
input.diagonal(/*offset=*/0, /*dim1=*/-2, /*dim2=*/-1).fill_(1);
Tensor result_u, input_u;
if (input.dim() == 2) {
// unsqueezing here so that the batched version is used
result_u = input.unsqueeze(0);
input_u = input_working_copy.unsqueeze(0);
} else {
result_u = input;
input_u = input_working_copy;
}
// magma's potrs_batched doesn't take matrix-wise array of ints as an 'info' argument
// it returns a single 'magma_int_t'
// if info = 0 the operation is successful, if info = -i, the i-th parameter had an illegal value.
int64_t info_tmp = 0;
apply_cholesky_solve<scalar_t>(result_u, input_u, upper, info_tmp);
infos.fill_(info_tmp);
#endif
}
// This is a type dispatching helper function for 'apply_cholesky_inverse'
Tensor& cholesky_inverse_kernel_impl_magma(Tensor &result, Tensor& infos, bool upper) {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(result.scalar_type(), "cholesky_inverse_out_cuda", [&]{
apply_cholesky_inverse<scalar_t>(result, infos, upper);
});
return result;
}
Tensor& cholesky_inverse_kernel_impl(Tensor &result, Tensor& infos, bool upper) {
// This function calculates the inverse matrix in-place
// result should be in column major order and contain matrices to invert
// the content of result is overwritten by 'apply_cholesky_inverse'
#ifdef USE_CUSOLVER
if (batchCount(result) == 1 || !use_magma_) {
return cholesky_inverse_kernel_impl_cusolver(result, infos, upper);
} else {
return cholesky_inverse_kernel_impl_magma(result, infos, upper);
}
#else
return cholesky_inverse_kernel_impl_magma(result, infos, upper);
#endif
}
REGISTER_DISPATCH(cholesky_inverse_stub, &cholesky_inverse_kernel_impl);
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ lu ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
/*
Computes the LU decomposition of a mn matrix or batch of matrices in 'input' tensor.
This is an in-place routine, content of 'input', 'pivots', and 'infos' is overwritten.
This is a "looped" variant for calling single input MAGMA function on batched input.
Args:
* `input` - [in] the input matrix for LU decomposition
[out] the LU decomposition
* `pivots` - [out] the pivot indices
* `infos` - [out] error codes, positive values indicate singular matrices
* `compute_pivots` - controls whether LU is computed with or without pivoting
For further details, please see the MAGMA documentation for magma_dgetrf_gpu.
*/
template <typename scalar_t>
static void apply_lu_looped_magma(const Tensor& input, const Tensor& pivots, const Tensor& infos, bool compute_pivots) {
#ifndef USE_MAGMA
TORCH_CHECK(
false,
"Calling torch.lu on a CUDA tensor requires compiling ",
"PyTorch with MAGMA. lease rebuild with MAGMA.");
#else
// magmaLu and magmaLuNoPiv require infos and pivots tensor to be on CPU
// the data is later copied back to the appropriate output tensor
Tensor infos_cpu = at::empty_like(infos, infos.options().device(kCPU).pinned_memory(true));
auto input_data = input.data_ptr<scalar_t>();
auto infos_data = infos_cpu.data_ptr<magma_int_t>();
auto input_matrix_stride = matrixStride(input);
auto pivots_stride = pivots.size(-1);
auto batch_size = batchCount(input);
magma_int_t m = magma_int_cast(input.size(-2), "m");
magma_int_t n = magma_int_cast(input.size(-1), "n");
auto leading_dimension = std::max<magma_int_t>(1, m);
if (compute_pivots) {
Tensor pivots_cpu = at::empty_like(pivots, pivots.options().device(kCPU).pinned_memory(true));
auto pivots_data = pivots_cpu.data_ptr<magma_int_t>();
for (decltype(batch_size) i = 0; i < batch_size; i++) {
scalar_t* input_working_ptr = &input_data[i * input_matrix_stride];
int* pivots_working_ptr = &pivots_data[i * pivots_stride];
int* infos_working_ptr = &infos_data[i];
magmaLu<scalar_t>(m, n, input_working_ptr, leading_dimension, pivots_working_ptr, infos_working_ptr);
}
pivots.copy_(pivots_cpu, /*non_blocking=*/true);
} else {
for (decltype(batch_size) i = 0; i < batch_size; i++) {
scalar_t* input_working_ptr = &input_data[i * input_matrix_stride];
int* infos_working_ptr = &infos_data[i];
magmaLuNoPiv<scalar_t>(m, n, input_working_ptr, leading_dimension, infos_working_ptr);
}
// fill the pivots tensor with indices using 1-based (Fortran) indexing
auto k = ::min(m, n);
Tensor pivots_tmp = at::arange(1, k + 1, input.options().dtype(at::kInt)).expand_as(pivots);
pivots.copy_(pivots_tmp);
}
infos.copy_(infos_cpu, /*non_blocking=*/true);
#endif
}
/*
Computes the LU decomposition of a mn matrix or batch of matrices in 'input' tensor.
This is an in-place routine, content of 'input', 'pivots', and 'infos' is overwritten.
This is a specialized batched variant, it is expected to be faster than the "looped" version only for small inputs.
Args:
* `input` - [in] the input matrix for LU decomposition
[out] the LU decomposition
* `pivots` - [out] the pivot indices
* `infos` - [out] error codes, positive values indicate singular matrices
* `compute_pivots` - controls whether LU is computed with or without pivoting
For further details, please see the MAGMA documentation for magma_dgetrf_batched.
*/
template <typename scalar_t>
static void apply_lu_batched_magma(const Tensor& input, const Tensor& pivots, const Tensor& infos, bool compute_pivots) {
#ifndef USE_MAGMA
TORCH_CHECK(
false,
"Calling torch.lu on a CUDA tensor requires compiling ",
"PyTorch with MAGMA. lease rebuild with MAGMA.");
#else
auto input_data = input.data_ptr<scalar_t>();
auto infos_data = infos.data_ptr<magma_int_t>();
auto input_matrix_stride = matrixStride(input);
magma_int_t batch_size = magma_int_cast(batchCount(input), "batchCount");
// magmaLuBatched doesn't work with zero batch dimensions
// it gives CUDA error: invalid configuration argument
if (batch_size == 0) {
infos.fill_(0);
return;
}
magma_int_t m = magma_int_cast(input.size(-2), "m");
magma_int_t n = magma_int_cast(input.size(-1), "n");
auto leading_dimension = std::max<magma_int_t>(1, m);
scalar_t** input_array;
ALLOCATE_ARRAY(input_array, scalar_t*, batch_size);
// Set up array of pointers to matrices
for (int64_t i = 0; i < batch_size; i++) {
input_array[i] = &input_data[i * input_matrix_stride];
}
MAGMAQueue magma_queue(input.get_device());
if (compute_pivots) {
auto pivots_data = pivots.data_ptr<magma_int_t>();
auto pivots_stride = pivots.size(-1);
magma_int_t** pivots_array;
ALLOCATE_ARRAY(pivots_array, magma_int_t*, batch_size);
for (int64_t i = 0; i < batch_size; i++) {
pivots_array[i] = &pivots_data[i * pivots_stride];
}
magmaLuBatched<scalar_t>(m, n, input_array, leading_dimension, pivots_array, infos_data, batch_size, magma_queue);
} else {
magmaLuNoPivBatched<scalar_t>(m, n, input_array, leading_dimension, infos_data, batch_size, magma_queue);
// fill the pivots tensor with indices using 1-based (Fortran) indexing
auto k = ::min(m, n);
Tensor pivots_tmp = at::arange(1, k + 1, input.options().dtype(at::kInt)).expand_as(pivots);
pivots.copy_(pivots_tmp);
}
#endif
}
static void lu_magma(const Tensor& input, const Tensor& pivots, const Tensor& infos, bool compute_pivots) {
// TODO: compare performance and use the best performing option based on input's sizes
if (input.dim() == 2) {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(input.scalar_type(), "lu_magma", [&]{
apply_lu_looped_magma<scalar_t>(input, pivots, infos, compute_pivots);
});
} else {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(input.scalar_type(), "lu_magma", [&]{
apply_lu_batched_magma<scalar_t>(input, pivots, infos, compute_pivots);
});
}
}
REGISTER_DISPATCH(lu_stub, &lu_magma);
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ triangular_solve ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_triangular_solve_batched(Tensor& A, Tensor& b, bool upper, bool transpose, bool conjugate_transpose, bool unitriangular) {
#ifndef USE_MAGMA
AT_ERROR("triangular_solve: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
magma_uplo_t uplo = upper ? MagmaUpper : MagmaLower;
magma_trans_t trans = transpose ? MagmaTrans : MagmaNoTrans;
trans = conjugate_transpose ? MagmaConjTrans : trans;
magma_diag_t diag = unitriangular ? MagmaUnit : MagmaNonUnit;
auto A_data = A.data_ptr<scalar_t>();
auto b_data = b.data_ptr<scalar_t>();
magma_int_t m = magma_int_cast(A.size(-2), "A.size(-2)");
magma_int_t n = magma_int_cast(A.size(-1), "A.size(-1)");
magma_int_t nrhs = magma_int_cast(b.size(-1), "b.size(-1)");
// magma returns early if m <= 0 || n <= 0 for magmaTriangularSolveBatched
// magmaTriangularSolve is calling cuBLAS and it prints
// ** On entry to DTRSM parameter number 9 had an illegal value
// so let's use proper lda parameter here
magma_int_t lda = std::max<magma_int_t>(1, m);
magma_int_t batch_size = magma_int_cast(batchCount(A), "batchCount");
auto A_mat_stride = matrixStride(A);
auto b_mat_stride = matrixStride(b);
scalar_t** A_array;
scalar_t** b_array;
ALLOCATE_ARRAY(A_array, scalar_t*, batch_size);
ALLOCATE_ARRAY(b_array, scalar_t*, batch_size);
// Set up the created arrays
for (int64_t i = 0; i < batch_size; i++) {
A_array[i] = &A_data[i * A_mat_stride];
b_array[i] = &b_data[i * b_mat_stride];
}
MAGMAQueue magma_queue(b.get_device());
constexpr int64_t batch_limit = 65535;
// Compute as many batches of 65535 possible
// The number of "mini"-batches are floor(batch_size / batch_limit)
// and these cover floor(batch_size / batch_limit) * batch_limit matrix solves
int64_t mini_batches = batch_size / batch_limit;
int64_t mini_idx; // this is outside the loop because it is used for the case batch_size % batch_limit != 0
for (mini_idx = 0; mini_idx < mini_batches * batch_limit; mini_idx += batch_limit) {
scalar_t** A_array_cur = &A_array[mini_idx];
scalar_t** b_array_cur = &b_array[mini_idx];
magmaTriangularSolveBatched<scalar_t>(
uplo, trans, diag, n, nrhs, A_array_cur,
lda, b_array_cur, lda, batch_limit, magma_queue);
}
// Compute whatever is left = batch_size - floor(batch_size / batch_limit) * batch_limit
// which concisely is equal to batch_size % batch_limit
if (batch_size % batch_limit != 0) {
magmaTriangularSolveBatched<scalar_t>(
uplo, trans, diag, n, nrhs, &A_array[mini_idx],
lda, &b_array[mini_idx], lda, batch_size % batch_limit, magma_queue);
}
#endif
}
void triangular_solve_batched_magma(Tensor& A, Tensor& B, Tensor& infos, bool upper, bool transpose, bool conjugate_transpose, bool unitriangular) {
(void)infos; // unused
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(A.scalar_type(), "triangular_solve_cuda", [&]{
apply_triangular_solve_batched<scalar_t>(A, B, upper, transpose, conjugate_transpose, unitriangular);
});
}
void triangular_solve_kernel(Tensor& A, Tensor& B, Tensor& infos, bool upper, bool transpose, bool conjugate_transpose, bool unitriangular) {
// For batches smaller than 8 and matrix sizes larger than 64x64 cuBLAS forloop is faster than batched version
if (batchCount(A) <= 8 && A.size(-1) >= 64) {
triangular_solve_cublas(A, B, infos, upper, transpose, conjugate_transpose, unitriangular);
} else {
#ifndef USE_MAGMA
triangular_solve_batched_cublas(A, B, infos, upper, transpose, conjugate_transpose, unitriangular);
#else
// cuBLAS batched is faster than MAGMA batched up until 512x512, after that MAGMA is faster
if (A.size(-1) <= 512) {
triangular_solve_batched_cublas(A, B, infos, upper, transpose, conjugate_transpose, unitriangular);
} else {
triangular_solve_batched_magma(A, B, infos, upper, transpose, conjugate_transpose, unitriangular);
}
#endif // USE_MAGMA
}
}
REGISTER_DISPATCH(triangular_solve_stub, &triangular_solve_kernel);
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ orgqr ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Tensor& orgqr_kernel_impl(Tensor& result, const Tensor& tau) {
// TODO: It is possible to implement efficient batched orgqr for small tau (tau.size(-1) <= 32)
// using MAGMA, however it fails on Windows because of some illegal memory reads inside MAGMA.
// See discussions in https://github.com/pytorch/pytorch/pull/51348 for comparison of cuSOLVER-MAGMA
// and Windows failure.
// For reference here is the MAGMA-based implementation: https://gist.github.com/IvanYashchuk/2db50002c9d3c1462ff769e6410ad983
#if defined(USE_CUSOLVER)
return orgqr_helper_cusolver(result, tau); // cusolver
#else
TORCH_CHECK(false, "Calling torch.orgqr on a CUDA tensor requires compiling ",
"PyTorch with cuSOLVER. Please use PyTorch built with cuSOLVER support.");
#endif
}
REGISTER_DISPATCH(orgqr_stub, &orgqr_kernel_impl);
void ormqr_kernel(const Tensor& input, const Tensor& tau, const Tensor& other, bool left, bool transpose) {
#if defined(USE_CUSOLVER)
ormqr_cusolver(input, tau, other, left, transpose);
#else
TORCH_CHECK(false,
"Calling torch.ormqr on a CUDA tensor requires compiling ",
"PyTorch with cuSOLVER. Please use PyTorch built with cuSOLVER support.");
#endif
}
REGISTER_DISPATCH(ormqr_stub, &ormqr_kernel);
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ qr ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_geqrf(const Tensor& input, const Tensor& tau) {
#ifndef USE_MAGMA
TORCH_CHECK(
false,
"Calling torch.geqrf on a CUDA tensor requires compiling ",
"PyTorch with MAGMA. Please use PyTorch built with MAGMA support.");
#else
magma_int_t m = magma_int_cast(input.size(-2), "m");
magma_int_t n = magma_int_cast(input.size(-1), "n");
auto input_data = input.data_ptr<scalar_t>();
auto input_matrix_stride = matrixStride(input);
auto tau_stride = tau.size(-1);
auto batch_size = batchCount(input);
auto lda = std::max<int>(1, m);
// magmaGeqrf uses a hybrid CPU-GPU algorithm to compute the elementary reflectors.
// The driver routine geqrf2_gpu accepts a tensor on the CPU for elementary reflectors.
Tensor tau_cpu = at::empty(tau.sizes(), tau.options().device(at::kCPU).pinned_memory(true));
scalar_t* tau_data = tau_cpu.data_ptr<scalar_t>();
scalar_t* work_data = nullptr; // workspace is not needed for geqrf2_gpu
magma_int_t info = 0;
for (int64_t i = 0; i < batch_size; i++) {
scalar_t* input_working_ptr = &input_data[i * input_matrix_stride];
scalar_t* tau_working_ptr = &tau_data[i * tau_stride];
// now compute the actual QR and tau
// MAGMA's geqrf2_gpu function is used, this version has LAPACK-complaint arguments.
magmaGeqrf<scalar_t>(m, n, input_working_ptr, lda, tau_working_ptr, work_data, &info, /*is_v2=*/true);
checkMagmaInternalError(info, "geqrf");
}
tau.copy_(tau_cpu, /*non_blocking=*/true);
#endif
}
// This is a type dispatching helper function for 'apply_geqrf'
void geqrf_magma(const Tensor& input, const Tensor& tau) {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(input.scalar_type(), "geqrf_magma", [&]{
apply_geqrf<scalar_t>(input, tau);
});
}
// This is a backend library dispatching helper function for calling looped batch implementation
void geqrf_looped(const Tensor& input, const Tensor& tau) {
#if defined(USE_CUSOLVER)
return geqrf_cusolver(input, tau);
#else
return geqrf_magma(input, tau);
#endif
}
// This is a backend library dispatching helper function for calling specialized batched implementation
void geqrf_batched(const Tensor& input, const Tensor& tau) {
#ifdef CUDART_VERSION
// if cuBLAS is available
return geqrf_batched_cublas(input, tau);
#else
// TODO: implement MAGMA-based path using magma_zgeqrf_expert_batched
return geqrf_looped(input, tau);
#endif
}
void geqrf_kernel(const Tensor& input, const Tensor& tau) {
// if number of rows is smaller than 32 batched is always faster for batch size > 1
// for larger number of rows number of batches condition
if (input.size(-2) <= 256 && batchCount(input) >= std::max<int64_t>(2, input.size(-2) / 16)) {
return geqrf_batched(input, tau);
} else {
return geqrf_looped(input, tau);
}
}
REGISTER_DISPATCH(geqrf_stub, &geqrf_kernel);
template <typename scalar_t>
static void apply_qr(Tensor& Q, Tensor& R, int64_t q_size_minus_2, int64_t r_size_minus_1, int64_t n_columns,
bool compute_q) {
#ifndef USE_MAGMA
AT_ERROR("qr: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
magma_int_t m = magma_int_cast(q_size_minus_2, "Q.size(-2)");
magma_int_t n = magma_int_cast(r_size_minus_1, "R.size(-1)");
auto r_data = R.data_ptr<scalar_t>();
auto r_matrix_stride = matrixStride(R);
magma_int_t k = m < n ? m : n;
magma_int_t nb = magmaGeqrfOptimalBlocksize<scalar_t>(m, n);
int64_t batch_size = batchCount(R);
// magmaGeqrf uses a hybrid CPU-GPU algorithm to compute the elementary reflectors.
// The driver routine magma_(d/s)geqrf2_gpu accepts a tensor on the CPU for elementary reflectors.
Tensor tau = at::empty({k}, Q.options().device(at::kCPU));
Tensor work = at::empty({(2 * k + magma_roundup(n, 32)) * nb}, R.options());
scalar_t* tau_data = tau.data_ptr<scalar_t>();
scalar_t* work_data = work.data_ptr<scalar_t>();
// This phase computes R (the raw version)
// This uses MAGMA's ?geqrf2_gpu function
magma_int_t info = 0;
for (int64_t i = 0; i < batch_size; i++) {
scalar_t* r_working_ptr = &r_data[i * r_matrix_stride];
magmaGeqrf<scalar_t>(m, n, r_working_ptr, m, tau_data, work_data, &info, /*is_v2=*/true);
checkMagmaInternalError(info, "geqrf");
}
if (!compute_q) {
// this is for mode='r'
return;
}
// This phase computes Q (the raw version)
// We require to perform ?geqrf_gpu again due to this bug in MAGMA:
// - ?geqrf_gpu allows fast computation of Q via ?orgqr_gpu, but doesn't give R properly.
// - ?geqrf2_gpu gives correct R, but doesn't allow computation of Q via ?orgqr_gpu
// Refer to the below link for more details:
// http://icl.cs.utk.edu/magma/forum/viewtopic.php?f=2&t=1015&p=2800&hilit=geqrf_gpu#p2800
auto q_data = Q.data_ptr<scalar_t>();
auto q_matrix_stride = matrixStride(Q);
for (int64_t i = 0; i < batch_size; i++) {
scalar_t* q_working_ptr = &q_data[i * q_matrix_stride];
magmaGeqrf<scalar_t>(m, n, q_working_ptr, m, tau_data, work_data, &info, /*is_v2=*/false);
checkMagmaInternalError(info, "geqrf");
magmaOrgqr<scalar_t>(m, n_columns, k, q_working_ptr, m, tau_data, work_data, nb, &info);
checkMagmaInternalError(info, "orgqr");
}
#endif
}
std::tuple<Tensor, Tensor> linalg_qr_helper_magma(const Tensor& self, std::string mode) {
bool compute_q, reduced;
std::tie(compute_q, reduced) = _parse_qr_mode(mode);
// Setup input geometry and inputs for apply_qr
std::vector<int64_t> q_sizes, q_strides;
int64_t n_columns_q;
std::tie(q_sizes, q_strides, n_columns_q) = _compute_geometry_for_Q(self, reduced);
Tensor q_working_copy, r_working_copy;
// If there are no elements, then we simply return a pair of tensors of required dimensions
if (self.numel() == 0) {
int64_t n = self.size(-1);
auto r_shape = self.sizes().vec();
r_shape.end()[-2] = n_columns_q;
r_shape.end()[-1] = n;
r_working_copy = at::empty(r_shape, self.options());
if (compute_q) {
auto q_shape = q_sizes;
q_shape.end()[-1] = n_columns_q;
q_working_copy = at::zeros(q_shape, self.options());
q_working_copy.diagonal(/*offset=*/0, /*dim1=*/-2, /*dim2=*/-1).fill_(1);
} else {
q_working_copy = at::empty({0}, self.options());
}
return std::make_tuple(q_working_copy, r_working_copy);
}
if (compute_q) {
q_working_copy = at::empty_strided(q_sizes, q_strides, self.options());
q_working_copy.narrow(-1, 0, self.size(-1)).copy_(self);
} else {
q_working_copy = at::empty({0}, self.options());
}
r_working_copy = cloneBatchedColumnMajor(self);
int64_t m = q_sizes[self.dim() - 2];
int64_t n = r_working_copy.size(-1);
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "qr_cuda", [&]{
apply_qr<scalar_t>(q_working_copy, r_working_copy, m, n, n_columns_q, compute_q);
});
if (compute_q) {
q_working_copy = q_working_copy.narrow(-1, 0, n_columns_q);
}
r_working_copy = r_working_copy.narrow(-2, 0, n_columns_q).triu();
return std::make_tuple(q_working_copy, r_working_copy);
}
std::tuple<Tensor, Tensor> _linalg_qr_helper_cuda(const Tensor& input, std::string mode) {
#if defined(USE_CUSOLVER)
// _linalg_qr_helper_default is a generic function that is implemented using
// geqrf_stub and orgqr_stub. It dispatches to cuSOLVER for CUDA inputs if USE_CUSOLVER is defined
return _linalg_qr_helper_default(input, mode);
#else
return linalg_qr_helper_magma(input, mode);
#endif
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ symeig ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_magma_eigh(Tensor& values, Tensor& vectors, Tensor& infos, bool upper, bool compute_eigenvectors) {
#ifndef USE_MAGMA
TORCH_CHECK(
false,
"Calling torch.linalg.eigh/eigvalsh on a CUDA tensor requires compiling ",
"PyTorch with MAGMA. Please use PyTorch built with MAGMA support.");
#else
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(values.device() == kCPU);
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(infos.device() == kCPU);
using value_t = typename c10::scalar_value_type<scalar_t>::type;
magma_uplo_t uplo = upper ? MagmaUpper : MagmaLower;
magma_vec_t jobz = compute_eigenvectors ? MagmaVec : MagmaNoVec;
magma_int_t n = magma_int_cast(vectors.size(-1), "n");
auto lda = std::max<magma_int_t>(1, n);
auto batch_size = batchCount(vectors);
auto vectors_stride = matrixStride(vectors);
auto values_stride = values.size(-1);
auto vectors_data = vectors.data_ptr<scalar_t>();
auto values_data = values.data_ptr<value_t>();
auto infos_data = infos.data_ptr<magma_int_t>();
scalar_t* wA;
ALLOCATE_ARRAY(wA, scalar_t, lda * lda);
// Run once, first to get the optimum work sizes.
// Since we deal with batches of matrices with the same dimensions, doing this outside
// the loop saves (batch_size - 1) workspace queries which would provide the same result
// and (batch_size - 1) calls to allocate and deallocate workspace using at::empty()
magma_int_t lwork = -1;
scalar_t wkopt;
magma_int_t liwork = -1;
magma_int_t iwkopt;
magma_int_t lrwork = -1;
value_t rwkopt;
magmaSyevd<scalar_t, value_t>(jobz, uplo, n, vectors_data, lda, values_data,
wA, lda, &wkopt, lwork, &rwkopt, lrwork, &iwkopt, liwork, infos_data);
scalar_t* work;
magma_int_t* iwork;
lwork = magma_int_cast(std::max<int64_t>(1, real_impl<scalar_t, value_t>(wkopt)), "work_size");
liwork = magma_int_cast(std::max<int64_t>(1, iwkopt), "iwork_size");
ALLOCATE_ARRAY(work, scalar_t, lwork);
ALLOCATE_ARRAY(iwork, magma_int_t, liwork);
value_t* rwork = nullptr;
c10::Storage storage_rwork;
if (vectors.is_complex()) {
lrwork = magma_int_cast(std::max<int64_t>(1, rwkopt), "rwork_size");
storage_rwork = pin_memory<value_t>(lrwork);
rwork = static_cast<value_t*>(storage_rwork.data());
}
for (decltype(batch_size) i = 0; i < batch_size; i++) {
scalar_t* vectors_working_ptr = &vectors_data[i * vectors_stride];
value_t* values_working_ptr = &values_data[i * values_stride];
magma_int_t* info_working_ptr = &infos_data[i];
magmaSyevd<scalar_t, value_t>(jobz, uplo, n, vectors_working_ptr, lda, values_working_ptr,
wA, lda, work, lwork, rwork, lrwork, iwork, liwork, info_working_ptr);
// The current behaviour for Linear Algebra functions to raise an error if something goes wrong
// or input doesn't satisfy some requirement
// therefore return early since further computations will be wasted anyway
if (*info_working_ptr != 0) {
return;
}
}
#endif
}
std::tuple<Tensor, Tensor> _symeig_helper_cuda(const Tensor& self, bool eigenvectors, bool upper) {
Tensor infos = at::zeros({std::max<int64_t>(1, batchCount(self))}, self.options().dtype(kInt).device(at::kCPU));
auto eigvals_shape = IntArrayRef(self.sizes().data(), self.dim()-1); // self.shape[:-1]
ScalarType real_dtype = toValueType(self.scalar_type());
// magmaSyevd uses a hybrid CPU-GPU algorithm to compute the eigenvalues and eigenvectors.
// The driver routine magma_(d/s)syev_gpu accepts a tensor on the CPU for eigvalenvalues.
// The data is later moved to the appropriate device.
// In the case where self.numel() == 0, we just return an empty tensor of
// dimensions on the CUDA (to avoid the unnecessary "to(at::kCUDA)")
auto eigvals_working_copy = self.numel() == 0
? at::empty(eigvals_shape, self.options().dtype(real_dtype))
: at::empty(eigvals_shape, self.options().dtype(real_dtype).device(at::kCPU));
if (self.numel() == 0) {
return std::tuple<Tensor, Tensor>(eigvals_working_copy, at::empty_like(self, LEGACY_CONTIGUOUS_MEMORY_FORMAT));
}
auto self_working_copy = cloneBatchedColumnMajor(self);
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "symeig_cuda", [&]{
apply_magma_eigh<scalar_t>(eigvals_working_copy, self_working_copy, infos, upper, eigenvectors);
});
if (self.dim() > 2) {
batchCheckErrors(infos, "symeig_cuda");
} else {
singleCheckErrors(infos.item().toInt(), "symeig_cuda");
}
if (eigenvectors) {
return std::tuple<Tensor, Tensor>(eigvals_working_copy.to(self.device()), self_working_copy);
} else {
return std::tuple<Tensor, Tensor>(eigvals_working_copy.to(self.device()), at::empty({0}, self.options()));
}
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ linalg_eigh ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// This is a type dispatch function for 'apply_magma_eigh'
// For small inputs result is computed on CPU
void linalg_eigh_magma(Tensor& eigenvalues, Tensor& eigenvectors, Tensor& infos, bool upper, bool compute_eigenvectors) {
// MAGMA just calls LAPACK for eigenvectors.size(-1) <= 128
// See https://bitbucket.org/icl/magma/src/e6fdca447bd402693e8b0b950a898b6879bbcc41/src/zheevd_gpu.cpp?at=master#lines-258
// in addition lda is ignored breaking 0x0 inputs
if (eigenvectors.size(-1) > 128) {
// MAGMA requires eigenvalues and infos tensors to reside on CPU
Tensor eigenvalues_cpu = eigenvalues.to(kCPU);
infos = infos.to(kCPU);
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(
eigenvectors.scalar_type(), "linalg_eigh_cpu", [&] {
apply_magma_eigh<scalar_t>(
eigenvalues_cpu, eigenvectors, infos, upper, compute_eigenvectors);
});
// Transfer computed by MAGMA results from CPU to GPU
eigenvalues.copy_(eigenvalues_cpu);
} else { // eigenvectors.size(-1) <= 128
// transfer to CPU, compute the result and copy back to GPU
// this is faster than going through MAGMA that does the same
Tensor eigenvalues_cpu = at::empty_like(eigenvalues, eigenvalues.options().device(kCPU));
if (compute_eigenvectors) {
Tensor eigenvectors_cpu = at::empty_like(eigenvectors, eigenvectors.options().device(kCPU));
at::linalg_eigh_out(eigenvalues_cpu, eigenvectors_cpu, eigenvectors.to(kCPU), upper ? "U" : "L");
eigenvectors.copy_(eigenvectors_cpu);
} else {
at::linalg_eigvalsh_out(eigenvalues_cpu, eigenvectors.to(kCPU), upper ? "U" : "L");
}
eigenvalues.copy_(eigenvalues_cpu);
}
}
void linalg_eigh_kernel(Tensor& eigenvalues, Tensor& eigenvectors, Tensor& infos, bool upper, bool compute_eigenvectors) {
#if defined(USE_CUSOLVER)
linalg_eigh_cusolver(eigenvalues, eigenvectors, infos, upper, compute_eigenvectors);
#else
linalg_eigh_magma(eigenvalues, eigenvectors, infos, upper, compute_eigenvectors);
#endif
}
REGISTER_DISPATCH(linalg_eigh_stub, &linalg_eigh_kernel);
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ eig ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// magmaEig uses a hybrid CPU-GPU algorithm, which takes and return CPU
// memory. So, we accept a GPU tensor, copy it to CPU memory, and later copy
// the returned values from CPU to GPU. See also magmaSymeig, which uses a
// similar approach.
template <typename scalar_t>
static void apply_eig(const Tensor& self, bool eigenvectors, Tensor& out_eigvals, Tensor& out_eigvecs,
int64_t *info_ptr) {
#ifndef USE_MAGMA
TORCH_CHECK(false, "Calling torch.eig on a CUDA tensor requires compiling PyTorch with MAGMA. "
"Either transfer the tensor to the CPU before calling torch.eig or recompile with MAGMA.");
#else
TORCH_INTERNAL_ASSERT(self.device() == at::kCPU, "Internal error: apply_eig needs a CPU tensor");
using value_t = typename c10::scalar_value_type<scalar_t>::type;
magma_vec_t jobvr = eigenvectors ? MagmaVec : MagmaNoVec;
magma_int_t n = magma_int_cast(self.size(-1), "n");
auto self_data = self.data_ptr<scalar_t>();
auto out_eigvals_data = out_eigvals.data_ptr<scalar_t>();
scalar_t *wr = out_eigvals_data;
scalar_t *vr_data = NULL;
magma_int_t ldvr = 1;
if (jobvr == MagmaVec)
{
vr_data = out_eigvecs.data_ptr<scalar_t>();
ldvr = n;
}
value_t *rwork_data = nullptr;
if (isComplexType(at::typeMetaToScalarType(self.dtype()))) {
ALLOCATE_ARRAY(rwork_data, value_t, n*2);
}
if (n > 0) {
// call magmaEig once to get the optimal size of work_data
scalar_t wkopt;
magma_int_t info;
magmaEig<scalar_t, value_t>(MagmaNoVec, jobvr, n, self_data, n, wr, NULL, 1, vr_data, ldvr, &wkopt, -1, rwork_data, &info);
magma_int_t lwork = static_cast<magma_int_t>(real_impl<scalar_t, value_t>(wkopt));
// call it a 2nd time to to the actual work
scalar_t *work_data = nullptr;
ALLOCATE_ARRAY(work_data, scalar_t, lwork);
magmaEig<scalar_t, value_t>(MagmaNoVec, jobvr, n, self_data, n, wr, NULL, 1, vr_data, ldvr, work_data, lwork, rwork_data, &info);
*info_ptr = info;
}
#endif
}
/*
* Internal helper; like eig_cuda but:
* 1. assume that self is a square matrix of side "n"
* 2. return CPU tensors (because this is what magmaEig returns), which will be copied to GPU memory
* by the caller
*/
std::tuple<Tensor, Tensor> eig_kernel_impl(const Tensor& self, bool& eigenvectors) {
int64_t n = self.size(-1);
// copy self to pinned CPU memory
auto self_working_copy = at::empty_strided(
{n, n}, // square matrix
{1, n}, // column-ordered, as magmaEig expects
at::TensorOptions(at::kCPU).dtype(self.dtype()).pinned_memory(true));
self_working_copy.copy_(self);
// tensors holding the results. We use empty_strided to make them column-ordered
auto options = self.options().device(at::kCPU).memory_format(LEGACY_CONTIGUOUS_MEMORY_FORMAT);
Tensor out_eigvals;
if (isComplexType(at::typeMetaToScalarType(self.dtype()))) {
out_eigvals = at::empty({n}, options);
} else {
out_eigvals = at::empty_strided({n, 2}, {1, n}, options);
}
auto out_eigvecs = eigenvectors
? at::empty_strided({n, n}, {1, n}, options)
: Tensor();
int64_t info;
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "eig_cuda", [&]{
apply_eig<scalar_t>(self_working_copy, eigenvectors, out_eigvals, out_eigvecs, &info);
});
singleCheckErrors(info, "eig_cuda");
return std::tuple<Tensor, Tensor>(out_eigvals, out_eigvecs);
}
REGISTER_DISPATCH(eig_stub, &eig_kernel_impl);
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ linalg_eig ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
/*
Computes the eigenvalues and eigenvectors of n-by-n matrix 'input'.
This is an in-place routine, content of 'input', 'values', 'vectors' is overwritten.
'infos' is an int Tensor containing error codes for each matrix in the batched input.
For more information see MAGMA's documentation for GEEV routine.
*/
template <typename scalar_t>
void apply_linalg_eig(Tensor& values, Tensor& vectors, Tensor& input, Tensor& infos, bool compute_eigenvectors) {
#ifndef USE_MAGMA
TORCH_CHECK(false, "Calling torch.linalg.eig on a CUDA tensor requires compiling PyTorch with MAGMA. "
"Either transfer the tensor to the CPU before calling torch.linalg.eig or recompile with MAGMA.");
#else
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(input.device() == at::kCPU);
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(values.device() == at::kCPU);
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(infos.device() == at::kCPU);
if (compute_eigenvectors) {
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(vectors.device() == at::kCPU);
}
using value_t = typename c10::scalar_value_type<scalar_t>::type;
magma_vec_t jobvr = compute_eigenvectors ? MagmaVec : MagmaNoVec;
magma_vec_t jobvl = MagmaNoVec; // only right eigenvectors are computed
magma_int_t n = magma_int_cast(input.size(-1), "n");
auto lda = std::max<magma_int_t>(1, n);
auto batch_size = batchCount(input);
auto input_matrix_stride = matrixStride(input);
auto values_stride = values.size(-1);
auto input_data = input.data_ptr<scalar_t>();
auto values_data = values.data_ptr<scalar_t>();
auto infos_data = infos.data_ptr<magma_int_t>();
auto rvectors_data = compute_eigenvectors ? vectors.data_ptr<scalar_t>() : nullptr;
scalar_t* lvectors_data = nullptr; // only right eigenvectors are computed
int64_t ldvr = compute_eigenvectors ? lda : 1;
int64_t ldvl = 1;
Tensor rwork;
value_t* rwork_data = nullptr;
if (input.is_complex()) {
ScalarType real_dtype = toValueType(input.scalar_type());
rwork = at::empty({lda * 2}, input.options().dtype(real_dtype));
rwork_data = rwork.data_ptr<value_t>();
}
// call magmaEig once to get the optimal size of work_data
scalar_t work_query;
magmaEig<scalar_t, value_t>(jobvl, jobvr, n, input_data, lda, values_data,
lvectors_data, ldvl, rvectors_data, ldvr, &work_query, -1, rwork_data, &infos_data[0]);
magma_int_t lwork = std::max<magma_int_t>(1, static_cast<magma_int_t>(real_impl<scalar_t, value_t>(work_query)));
Tensor work = at::empty({lwork}, input.dtype());
auto work_data = work.data_ptr<scalar_t>();
for (auto i = decltype(batch_size){0}; i < batch_size; i++) {
scalar_t* input_working_ptr = &input_data[i * input_matrix_stride];
scalar_t* values_working_ptr = &values_data[i * values_stride];
scalar_t* rvectors_working_ptr = compute_eigenvectors ? &rvectors_data[i * input_matrix_stride] : nullptr;
int* info_working_ptr = &infos_data[i];
magmaEig<scalar_t, value_t>(jobvl, jobvr, n, input_working_ptr, lda, values_working_ptr,
lvectors_data, ldvl, rvectors_working_ptr, ldvr, work_data, lwork, rwork_data, info_working_ptr);
}
#endif
}
// This is a type dispatching helper function for 'apply_linalg_eig'
void linalg_eig_kernel(Tensor& eigenvalues, Tensor& eigenvectors, Tensor& infos, const Tensor& input, bool compute_eigenvectors) {
// This function calculates the non-symmetric eigendecomposition in-place
// tensors should be in batched column major memory format
// the content of eigenvalues, eigenvectors and infos is overwritten by 'apply_linalg_eig'
// apply_linalg_eig modifies the provided input matrix in-place, therefore we need a copy
// MAGMA doesn't have GPU interface for the eigendecomposition and it forces us to transfer 'input' to CPU
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(input.is_cuda());
Tensor input_working_copy = at::empty(input.sizes(), input.options().device(kCPU));
input_working_copy.transpose_(-2, -1); // make input_working_copy to have Fortran contiguous memory layout
input_working_copy.copy_(input);
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(input.scalar_type(), "linalg_eig_out_cuda", [&]{
apply_linalg_eig<scalar_t>(eigenvalues, eigenvectors, input_working_copy, infos, compute_eigenvectors);
});
}
REGISTER_DISPATCH(linalg_eig_stub, &linalg_eig_kernel);
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ svd ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template<typename scalar_t>
static void apply_svd(Tensor& self, Tensor& U, Tensor& S, Tensor& VT,
char jobchar, std::vector<int64_t>& infos) {
#ifndef USE_MAGMA
AT_ERROR("svd: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
using value_t = typename c10::scalar_value_type<scalar_t>::type;
auto self_data = self.data_ptr<scalar_t>();
auto U_data = U.data_ptr<scalar_t>();
auto S_data = S.data_ptr<value_t>();
auto VT_data = VT.data_ptr<scalar_t>();
auto self_stride = matrixStride(self);
auto U_stride = matrixStride(U);
auto S_stride = S.size(-1);
auto VT_stride = matrixStride(VT);
auto batchsize = batchCount(self);
magma_vec_t jobz = jobchar == 'A' ? MagmaAllVec : (jobchar == 'S' ? MagmaSomeVec : MagmaNoVec);
magma_int_t m = magma_int_cast(self.size(-2), "m");
magma_int_t n = magma_int_cast(self.size(-1), "n");
auto lda = std::max<magma_int_t>(1, m);
auto ldvt = std::max<magma_int_t>(1, n);
auto mn = ::min(m, n);
c10::Storage storage_rwork;
value_t* rwork = nullptr;
magma_int_t* iwork;
ALLOCATE_ARRAY(iwork, magma_int_t, 8 * mn);
if (isComplexType(at::typeMetaToScalarType(self.dtype()))) {
auto lrwork = computeLRWorkDim(jobchar, m, n);
storage_rwork = pin_memory<value_t>(lrwork);
rwork = static_cast<value_t*>(storage_rwork.data());
}
magma_int_t info = 0;
// Run once, first to get the optimum work size.
// Since we deal with batches of matrices with the same dimensions, doing this outside
// the loop saves (batch_size - 1) workspace queries which would provide the same result
// and (batch_size - 1) calls to allocate and deallocate workspace using at::empty()
magma_int_t lwork = -1;
scalar_t wkopt = 1; // MAGMA might not set the value for the optimal workspace therefore use 1 as the default value
magmaSvd<scalar_t, value_t>(jobz, m, n, self_data, lda, S_data, U_data, lda, VT_data, ldvt, &wkopt, lwork, rwork, iwork, &info);
lwork = magma_int_cast(real_impl<scalar_t, value_t>(wkopt), "work_size");
scalar_t* work;
ALLOCATE_ARRAY(work, scalar_t, lwork);
for (int64_t i = 0; i < batchsize; i++) {
scalar_t* self_working_ptr = &self_data[i * self_stride];
value_t* S_working_ptr = &S_data[i * S_stride];
scalar_t* U_working_ptr = &U_data[i * U_stride];
scalar_t* VT_working_ptr = &VT_data[i * VT_stride];
// Compute S, U (optionally), VT (optionally)
magmaSvd<scalar_t, value_t>(jobz, m, n, self_working_ptr, lda,
S_working_ptr, U_working_ptr, lda, VT_working_ptr, ldvt, work, lwork, rwork, iwork, &info);
infos[i] = info;
if (info != 0) {
return;
}
}
#endif
}
std::tuple<Tensor, Tensor, Tensor> _svd_helper_cuda_legacy(const Tensor& self, bool some, bool compute_uv) {
std::vector<int64_t> infos(batchCount(self), 0);
int64_t m = self.size(-2), n = self.size(-1);
int64_t k = ::min(m, n);
char jobchar = compute_uv ? (some ? 'S' : 'A') : 'N';
Tensor U_working_copy, S_working_copy, VT_working_copy;
std::tie(U_working_copy, S_working_copy, VT_working_copy) = _create_U_S_VT(self, some, compute_uv);
// The input matrix, U, S and VT have to reside in pinned memory.
// Additionally, the input and U have to be in column major format.
// _create_U_S_VT takes care of a part of these requirements (for U, S and VT)
// For the input matrix, this requirements are being taken care of below.
// Specify strides
auto self_col_major_strides = at::detail::defaultStrides(self.sizes());
self_col_major_strides[self.dim() - 2] = 1;
self_col_major_strides[self.dim() - 1] = m;
// Create strided tensor in pinned memory
auto self_working_copy = at::empty_strided(self.sizes(), self_col_major_strides,
at::TensorOptions(at::kCPU).dtype(self.dtype()).pinned_memory(true));
self_working_copy.copy_(self);
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "svd_cuda", [&] {
apply_svd<scalar_t>(self_working_copy, U_working_copy, S_working_copy, VT_working_copy, jobchar, infos);
});
if (self.dim() > 2) {
batchCheckErrors(infos, "svd_cuda");
} else {
singleCheckErrors(infos[0], "svd_cuda");
}
U_working_copy = same_stride_to(U_working_copy, self.options());
S_working_copy = same_stride_to(S_working_copy, S_working_copy.options().device(self.device()));
VT_working_copy = same_stride_to(VT_working_copy, self.options());
if (!compute_uv) {
VT_working_copy.zero_();
U_working_copy.zero_();
}
if (some) {
VT_working_copy = VT_working_copy.narrow(-2, 0, k);
}
// so far we have computed VT, but torch.svd returns V instead. Adjust accordingly.
// Note that the 'apply_svd' routine returns VT = V^T (for real inputs) or VT = V^H (for complex inputs), not V.
VT_working_copy = VT_working_copy.conj();
VT_working_copy.transpose_(-2, -1);
return std::make_tuple(U_working_copy, S_working_copy, VT_working_copy);
}
std::tuple<Tensor, Tensor, Tensor> _svd_helper_cuda(const Tensor& self, bool some, bool compute_uv) {
#ifdef USE_CUSOLVER
return _svd_helper_cuda_lib(self, some, compute_uv);
#else
return _svd_helper_cuda_legacy(self, some, compute_uv);
#endif
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ lu_solve ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
/*
Solves the matrix equation A X = B
X and B are n-by-nrhs matrices, A is represented using the LU factorization.
This is an in-place routine, content of `b` is overwritten.
This is a "looped" variant for calling single input MAGMA function on batched input.
Args:
* `b` - [in] the right hand side matrix B
[out] the solution matrix X
* `lu` - [in] the LU factorization of matrix A (see at::_lu_with_info)
* `pivots` - [in] the pivot indices (see at::_lu_with_info)
For further details, please see the MAGMA documentation for magma_dgetrs_gpu.
*/
template <typename scalar_t>
static void apply_lu_solve_looped_magma(const Tensor& b, const Tensor& lu, const Tensor& pivots) {
#ifndef USE_MAGMA
TORCH_CHECK(
false,
"Calling torch.lu_solve on a CUDA tensor requires compiling ",
"PyTorch with MAGMA. lease rebuild with MAGMA.");
#else
auto b_data = b.data_ptr<scalar_t>();
auto lu_data = lu.data_ptr<scalar_t>();
// MAGMA requires pivots to be a CPU tensor
Tensor pivots_cpu = pivots.cpu();
auto pivots_data = pivots_cpu.data_ptr<magma_int_t>();
auto b_stride = matrixStride(b);
auto lu_stride = matrixStride(lu);
auto pivots_stride = pivots_cpu.size(-1);
auto batch_size = batchCount(b);
magma_int_t n = magma_int_cast(lu.size(-2), "n");
magma_int_t nrhs = magma_int_cast(b.size(-1), "nrhs");
auto leading_dimension = std::max<magma_int_t>(1, n);
int info = 0;
for (decltype(batch_size) i = 0; i < batch_size; i++) {
scalar_t* b_working_ptr = &b_data[i * b_stride];
scalar_t* lu_working_ptr = &lu_data[i * lu_stride];
int* pivots_working_ptr = &pivots_data[i * pivots_stride];
magmaLuSolve<scalar_t>(n, nrhs, lu_working_ptr, leading_dimension, pivots_working_ptr, b_working_ptr, leading_dimension, &info);
// info from magmaLuSolve only reports if the i-th parameter is wrong
// so we don't need to check it all the time
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(info == 0);
}
#endif
}
/*
Solves the matrix equation A X = B
X and B are n-by-nrhs matrices, A is represented using the LU factorization.
This is an in-place routine, content of `b` is overwritten.
This is a specialized batched variant, it is expected to be faster than the "looped" version only for small inputs.
Args:
* `b` - [in] the right hand side matrix B
[out] the solution matrix X
* `lu` - [in] the LU factorization of matrix A (see at::_lu_with_info)
* `pivots` - [in] the pivot indices (see at::_lu_with_info)
For further details, please see the MAGMA documentation for magma_dgetrs_batched.
*/
template <typename scalar_t>
static void apply_lu_solve_batched_magma(const Tensor& b, const Tensor& lu, const Tensor& pivots) {
#ifndef USE_MAGMA
TORCH_CHECK(
false,
"Calling torch.lu_solve on a CUDA tensor requires compiling ",
"PyTorch with MAGMA. lease rebuild with MAGMA.");
#else
auto b_data = b.data_ptr<scalar_t>();
auto lu_data = lu.data_ptr<scalar_t>();
magma_int_t n = magma_int_cast(lu.size(-2), "n");
magma_int_t nrhs = magma_int_cast(b.size(-1), "nrhs");
auto leading_dimension = std::max<magma_int_t>(1, n);
auto pivots_data = pivots.data_ptr<magma_int_t>();
auto b_stride = matrixStride(b);
auto lu_stride = matrixStride(lu);
auto pivots_stride = pivots.size(-1);
magma_int_t batch_size = magma_int_cast(batchCount(b), "batchCount");
magma_int_t** pivots_array;
scalar_t** lu_array;
scalar_t** b_array;
ALLOCATE_ARRAY(pivots_array, magma_int_t*, batch_size);
ALLOCATE_ARRAY(lu_array, scalar_t*, batch_size);
ALLOCATE_ARRAY(b_array, scalar_t*, batch_size);
for (int64_t i = 0; i < batch_size; i++) {
pivots_array[i] = &pivots_data[i * pivots_stride];
b_array[i] = &b_data[i * b_stride];
lu_array[i] = &lu_data[i * lu_stride];
}
MAGMAQueue magma_queue(b.get_device());
// Compute the result in batches of 65535
// that is the maximum allowed number for batch_size in MAGMA
constexpr int64_t batch_limit = 65535;
for (int64_t mini_idx = 0; mini_idx < batch_size; mini_idx += batch_limit) {
int64_t nbatches = ::min(batch_limit, batch_size - mini_idx);
scalar_t** lu_array_cur = &lu_array[mini_idx];
scalar_t** b_array_cur = &b_array[mini_idx];
magma_int_t** pivots_array_cur = &pivots_array[mini_idx];
int info;
magmaLuSolveBatched<scalar_t>(
n, nrhs, lu_array_cur, leading_dimension,
pivots_array_cur, b_array_cur, leading_dimension,
info, nbatches, magma_queue);
// info from magmaLuSolveBatched only reports if the i-th parameter is wrong
// so we don't need to check it all the time
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(info == 0);
}
#endif
}
static void lu_solve_magma(const Tensor& b, const Tensor& lu, const Tensor& pivots) {
// TODO: compare performance and use the best performing option based on lu's sizes
if (b.dim() == 2) {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(b.scalar_type(), "lu_solve_magma", [&]{
apply_lu_solve_looped_magma<scalar_t>(b, lu, pivots);
});
} else {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(b.scalar_type(), "lu_solve_magma", [&]{
apply_lu_solve_batched_magma<scalar_t>(b, lu, pivots);
});
}
}
REGISTER_DISPATCH(lu_solve_stub, &lu_solve_magma);
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ lstsq ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_gels(const Tensor& a, Tensor& b, Tensor& infos) {
#ifndef USE_MAGMA
TORCH_CHECK(false, "torch.linalg.lstsq: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
auto trans = MagmaNoTrans;
auto m = magma_int_cast(a.size(-2), "m");
auto n = magma_int_cast(a.size(-1), "n");
TORCH_CHECK(
m >= n,
"torch.linalg.lstsq: only overdetermined systems (input.size(-2) >= input.size(-1)) are allowed on CUDA");
auto nrhs = magma_int_cast(b.size(-1), "nrhs");
auto ldda = std::max<magma_int_t>(1, m);
auto lddb = std::max<magma_int_t>(1, ::max(m, n));
auto nb = magmaGeqrfOptimalBlocksize<scalar_t>(m, n);
auto lwork = (m - n + nb) * (nrhs + nb) + nrhs * nb;
Tensor hwork = at::empty({static_cast<int64_t>(lwork)}, a.scalar_type());
auto* hwork_ptr = hwork.data_ptr<scalar_t>();
// MAGMA requires infos tensor to live on CPU
infos = infos.to(at::kCPU);
auto infos_data = infos.data_ptr<magma_int_t>();
batch_iterator_with_broadcasting<scalar_t>(a, b,
[&](scalar_t* a_working_ptr, scalar_t* b_working_ptr,
int64_t a_linear_batch_idx) {
magma_int_t* infos_working_ptr = &infos_data[a_linear_batch_idx];
magmaGels<scalar_t>(trans, m, n, nrhs,
a_working_ptr, ldda, b_working_ptr, lddb,
hwork_ptr, lwork, infos_working_ptr);
}
);
#endif
}
void gels_magma(const Tensor& a, Tensor& b, Tensor& infos) {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(a.scalar_type(), "gels_magma", [&] {
apply_gels<scalar_t>(a, b, infos);
});
}
void linalg_lstsq_gels(const Tensor& A, const Tensor& B, const Tensor& infos) {
// The steps for using the QR decomposition for solving least squares problems
// are outlined here https://en.wikipedia.org/wiki/QR_decomposition#Using_for_solution_to_linear_inverse_problems
auto m = A.size(-2);
auto n = A.size(-1);
auto mn = ::min(m, n);
// explicitly broadcast the batch dimensions of A
// TODO: revisit this later to use batch_iterator_with_broadcasting in triangular_solve
IntArrayRef A_batch_sizes(A.sizes().data(), A.dim() - 2);
IntArrayRef B_batch_sizes(B.sizes().data(), B.dim() - 2);
std::vector<int64_t> expand_batch_portion = at::infer_size(A_batch_sizes, B_batch_sizes);
auto tau_shape = A.sizes().vec();
tau_shape.pop_back();
tau_shape.back() = mn;
Tensor tau = at::empty(tau_shape, A.options());
if (m >= n) {
// Step 1: compute QR factorization using geqrf
geqrf_kernel(A, tau);
// explicitly broadcast the batch dimensions of A
// we do it after geqrf so that we don't do redundant computations for the same input
auto A_expand_batch = expand_batch_portion;
A_expand_batch.insert(A_expand_batch.end(), {A.size(-2), A.size(-1)});
Tensor A_expanded = A.expand({A_expand_batch});
bool is_fortran_contiguous = A_expanded.transpose(-2, -1).is_contiguous();
Tensor A_broadcasted = is_fortran_contiguous ? A_expanded : cloneBatchedColumnMajor(A_expanded);
auto tau_expand_batch = expand_batch_portion;
tau_expand_batch.push_back(tau.size(-1));
Tensor tau_broadcasted = tau.expand({tau_expand_batch}).contiguous();
// Step 2: B <- Q^H B
ormqr_kernel(A_broadcasted, tau_broadcasted, B, /*left=*/true, /*transpose=*/true);
// Step 3: solve R X = B
bool upper = true;
bool transpose = false;
bool conjugate_transpose = false;
bool unitriangular = false;
triangular_solve_kernel(
const_cast<Tensor&>(A_broadcasted),
const_cast<Tensor&>(B),
const_cast<Tensor&>(infos),
upper, transpose, conjugate_transpose, unitriangular);
} else { // underdetermined case
Tensor Ah = cloneBatchedColumnMajor(A.conj().transpose(-2, -1));
// Step 1: compute QR factorization of conjugate transpose of A using geqrf
geqrf_kernel(Ah, tau);
// explicitly broadcast the batch dimensions of A
// we do it after geqrf so that we don't do redundant computations for the same input
auto A_expand_batch = expand_batch_portion;
A_expand_batch.insert(A_expand_batch.end(), {Ah.size(-2), Ah.size(-1)});
Tensor Ah_expanded = Ah.expand({A_expand_batch});
bool is_fortran_contiguous = Ah_expanded.transpose(-2, -1).is_contiguous();
Tensor Ah_broadcasted = is_fortran_contiguous ? Ah_expanded : cloneBatchedColumnMajor(Ah_expanded);
// Step 2: R^H Z = B
bool upper = true;
bool transpose = true;
bool conjugate_transpose = true;
bool unitriangular = false;
triangular_solve_kernel(
const_cast<Tensor&>(Ah_broadcasted),
const_cast<Tensor&>(B),
const_cast<Tensor&>(infos),
upper, transpose, conjugate_transpose, unitriangular);
// B matrix has the size max(m, n) x nrhs
// triangular_solve_kernel writes its output into the first m rows of B leaving the rest untouched
// we need to set the rest of the rows to zero so that the multiplication from step 3 is correct
B.narrow(-2, m, n - m).zero_();
auto tau_expand_batch = expand_batch_portion;
tau_expand_batch.push_back(tau.size(-1));
Tensor tau_broadcasted = tau.expand({tau_expand_batch}).contiguous();
// Step 3: X <- Q Z
ormqr_kernel(Ah_broadcasted, tau_broadcasted, B, /*left=*/true, /*transpose=*/false);
}
}
void gels_looped(const Tensor& a, Tensor& b, Tensor& infos) {
#if defined(USE_CUSOLVER)
// linalg_lstsq_gels is a generic function that is implemented using
// geqrf_stub, ormqr_stub, and triangular_solve_stub
// It dispatches to cuSOLVER for CUDA inputs if USE_CUSOLVER is defined
return linalg_lstsq_gels(a, b, infos);
#else
return gels_magma(a, b, infos);
#endif
}
void lstsq_kernel(const Tensor& a, Tensor& b, Tensor& /*rank*/, Tensor& /*singular_values*/, Tensor& infos, double /*rcond*/, std::string /*driver_name*/) {
auto m = a.size(-2);
auto n = a.size(-1);
// first handle the underdetermined case (m < n)
// this case is not supported by MAGMA or cuBLAS
if (m < n) {
#if defined(USE_CUSOLVER)
linalg_lstsq_gels(a, b, infos);
#else
TORCH_CHECK(
false,
"torch.linalg.lstsq: only overdetermined systems (input.size(-2) >= input.size(-1)) are allowed on CUDA. ",
"Please rebuild with cuSOLVER.");
#endif
} else { // m >= n
#ifndef USE_MAGMA
// MAGMA is not available we can either use cuBLAS or cuSOLVER here
// the batched vs looped dispatch is implemented based on the following performance results
// https://github.com/pytorch/pytorch/pull/54725#issuecomment-832234456
if (m <= 256 && batchCount(b) >= std::max<int64_t>(2, m / 16)) {
// if CUDART_VERSION is defined then cuBLAS is available
#ifdef CUDART_VERSION
gels_batched_cublas(a, b, infos);
#else
// this would either call cuSOLVER or MAGMA,
// if MAGMA is called a runtime error is thrown about not finding MAGMA in compilation
gels_looped(a, b, infos);
#endif // CUDART_VERSION
} else {
gels_looped(a, b, infos);
}
#else
// if both MAGMA and cuSOLVER are available this would call cuSOLVER
// MAGMA is called if cuSOLVER is not available
gels_looped(a, b, infos);
#endif // USE_MAGMA
}
}
REGISTER_DISPATCH(lstsq_stub, &lstsq_kernel);
}} // namespace at::native
#undef ALLOCATE_ARRAY
| 5c75d56c94389d13e2f0e583eb711890ae824567.cu | #include <ATen/Context.h>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/Dispatch.h>
#include <ATen/NativeFunctions.h>
#include <ATen/cuda/PinnedMemoryAllocator.h>
#include <ATen/cuda/CUDAApplyUtils.cuh>
#include <ATen/cuda/detail/IndexUtils.cuh>
#include <ATen/native/LinearAlgebraUtils.h>
#include <ATen/native/cuda/MiscUtils.h>
#include <ATen/native/Resize.h>
#include <ATen/native/BatchLinearAlgebra.h>
#include <ATen/native/cuda/BatchLinearAlgebraLib.h>
#include <ATen/native/cpu/zmath.h>
#include <THC/THC.h> // for USE_MAGMA
#ifdef USE_MAGMA
#include <magma_types.h>
#include <magma_v2.h>
const bool use_magma_ = true;
#else
const bool use_magma_ = false;
#endif
namespace at {
namespace native {
#ifdef USE_MAGMA
template<class scalar_t>
void magmaSolve(
magma_int_t n, magma_int_t nrhs, scalar_t* dA, magma_int_t ldda,
magma_int_t* ipiv, scalar_t* dB, magma_int_t lddb, magma_int_t* info);
template<class scalar_t>
void magmaSolveBatched(
magma_int_t n, magma_int_t nrhs, scalar_t** dA_array, magma_int_t ldda,
magma_int_t** dipiv_array, scalar_t** dB_array, magma_int_t lddb,
magma_int_t* dinfo_array, magma_int_t batch_count, const MAGMAQueue& magma_queue);
template<class scalar_t>
void magmaLu(
magma_int_t m, magma_int_t n, scalar_t* dA, magma_int_t ldda,
magma_int_t* ipiv, magma_int_t* info);
template<class scalar_t>
void magmaLuBatched(
magma_int_t m, magma_int_t n, scalar_t** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, magma_int_t* info_array, magma_int_t batchsize,
const MAGMAQueue& magma_queue);
template<class scalar_t>
void magmaLuNoPiv(
magma_int_t m, magma_int_t n, scalar_t* dA, magma_int_t ldda,
magma_int_t* info);
template<class scalar_t>
void magmaLuNoPivBatched(
magma_int_t m, magma_int_t n, scalar_t** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue);
template<class scalar_t>
inline magma_int_t magmaGetriOptimalBlocksize(magma_int_t n);
template<class scalar_t>
void magmaGetri(
magma_int_t n, scalar_t* dA, magma_int_t ldda, magma_int_t* ipiv, scalar_t* dwork,
magma_int_t lwork, magma_int_t* info);
template<class scalar_t>
void magmaGetriBatched(
magma_int_t n, scalar_t** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, scalar_t** dinvA_array, magma_int_t lddia,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue);
template<class scalar_t>
void magmaCholeskySolve(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, scalar_t* dA, magma_int_t ldda,
scalar_t* dB, magma_int_t lddb, magma_int_t* info);
template<class scalar_t>
void magmaCholeskySolveBatched(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, scalar_t** dA_array, magma_int_t ldda,
scalar_t** dB_array, magma_int_t lddb, magma_int_t& info, magma_int_t batchsize, const MAGMAQueue& magma_queue);
template<class scalar_t>
void magmaCholesky(
magma_uplo_t uplo, magma_int_t n, scalar_t* dA,
magma_int_t ldda, magma_int_t* info);
template<class scalar_t>
void magmaCholeskyBatched(
magma_uplo_t uplo, magma_int_t n, scalar_t** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue);
template<class scalar_t>
void magmaTriangularSolveBatched(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n,
scalar_t** dA_array, magma_int_t ldda, scalar_t** dB_array, magma_int_t lddb, magma_int_t batchsize,
const MAGMAQueue& magma_queue);
template<class scalar_t>
inline magma_int_t magmaGeqrfOptimalBlocksize(magma_int_t m, magma_int_t n);
template<class scalar_t>
void magmaGeqrf(
magma_int_t m, magma_int_t n, scalar_t* dA, magma_int_t ldda,
scalar_t* tau, scalar_t* dT, magma_int_t* info, bool is_v2);
template<class scalar_t>
void magmaOrgqr(
magma_int_t m, magma_int_t n, magma_int_t k, scalar_t* dA,
magma_int_t ldda, scalar_t* tau, scalar_t* dT, magma_int_t nb, magma_int_t* info);
template<class scalar_t, class value_t=scalar_t>
void magmaSyevd(
magma_vec_t jobz, magma_uplo_t uplo, magma_int_t n, scalar_t* dA, magma_int_t ldda,
value_t* w, scalar_t* wA, magma_int_t ldwa, scalar_t* work, magma_int_t lwork, value_t* rwork,
magma_int_t lrwork, magma_int_t* iwork, magma_int_t liwork, magma_int_t* info);
template<class scalar_t, class value_t=scalar_t>
void magmaEig(
magma_vec_t jobvl, magma_vec_t jobvr, magma_int_t n, scalar_t *A, magma_int_t lda,
scalar_t *w, scalar_t *VL, magma_int_t ldvl,
scalar_t *VR, magma_int_t ldvr, scalar_t *work, magma_int_t lwork,
value_t *rwork,
magma_int_t *info);
template<class scalar_t, class value_t=scalar_t>
void magmaSvd(
magma_vec_t jobz, magma_int_t m, magma_int_t n, scalar_t* A,
magma_int_t lda, value_t* s, scalar_t* U, magma_int_t ldu,
scalar_t* VT, magma_int_t ldvt, scalar_t* work, magma_int_t lwork,
value_t* rwork,
magma_int_t* iwork, magma_int_t* info);
template<class scalar_t>
void magmaLuSolve(
magma_int_t n, magma_int_t nrhs, scalar_t* dA, magma_int_t ldda, magma_int_t* ipiv,
scalar_t* dB, magma_int_t lddb, magma_int_t* info);
template<class scalar_t>
void magmaLuSolveBatched(
magma_int_t n, magma_int_t nrhs, scalar_t** dA_array, magma_int_t ldda, magma_int_t** dipiv_array,
scalar_t** dB_array, magma_int_t lddb, magma_int_t& info,
magma_int_t batchsize, const MAGMAQueue& magma_queue);
template<class scalar_t>
void magmaGels(
magma_trans_t trans, magma_int_t m, magma_int_t n, magma_int_t nrhs,
scalar_t* dA, magma_int_t ldda, scalar_t* dB, magma_int_t lddb,
scalar_t* hwork, magma_int_t lwork, magma_int_t* info);
template<>
void magmaSolve<double>(
magma_int_t n, magma_int_t nrhs, double* dA, magma_int_t ldda,
magma_int_t* ipiv, double* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_dgesv_gpu(n, nrhs, dA, ldda, ipiv, dB, lddb, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaSolve<float>(
magma_int_t n, magma_int_t nrhs, float* dA, magma_int_t ldda,
magma_int_t* ipiv, float* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_sgesv_gpu(n, nrhs, dA, ldda, ipiv, dB, lddb, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaSolve<c10::complex<double>>(
magma_int_t n, magma_int_t nrhs, c10::complex<double>* dA, magma_int_t ldda,
magma_int_t* ipiv, c10::complex<double>* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_zgesv_gpu(n, nrhs,
reinterpret_cast<magmaDoubleComplex*>(dA), ldda, ipiv,
reinterpret_cast<magmaDoubleComplex*>(dB), lddb, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaSolve<c10::complex<float>>(
magma_int_t n, magma_int_t nrhs, c10::complex<float>* dA, magma_int_t ldda,
magma_int_t* ipiv, c10::complex<float>* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_cgesv_gpu(n, nrhs,
reinterpret_cast<magmaFloatComplex*>(dA), ldda, ipiv,
reinterpret_cast<magmaFloatComplex*>(dB), lddb, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaSolveBatched<double>(
magma_int_t n, magma_int_t nrhs, double** dA_array, magma_int_t ldda,
magma_int_t** dipiv_array, double** dB_array, magma_int_t lddb,
magma_int_t* dinfo_array, magma_int_t batch_count, const MAGMAQueue& magma_queue) {
magma_dgesv_batched(n, nrhs, dA_array, ldda, dipiv_array, dB_array, lddb, dinfo_array, batch_count, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaSolveBatched<float>(
magma_int_t n, magma_int_t nrhs, float** dA_array, magma_int_t ldda,
magma_int_t** dipiv_array, float** dB_array, magma_int_t lddb,
magma_int_t* dinfo_array, magma_int_t batch_count, const MAGMAQueue& magma_queue) {
magma_sgesv_batched(n, nrhs, dA_array, ldda, dipiv_array, dB_array, lddb, dinfo_array, batch_count, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaSolveBatched<c10::complex<double>>(
magma_int_t n, magma_int_t nrhs, c10::complex<double>** dA_array, magma_int_t ldda,
magma_int_t** dipiv_array, c10::complex<double>** dB_array, magma_int_t lddb,
magma_int_t* dinfo_array, magma_int_t batch_count, const MAGMAQueue& magma_queue) {
magma_zgesv_batched(n, nrhs,
reinterpret_cast<magmaDoubleComplex**>(dA_array), ldda, dipiv_array,
reinterpret_cast<magmaDoubleComplex**>(dB_array), lddb, dinfo_array, batch_count, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaSolveBatched<c10::complex<float>>(
magma_int_t n, magma_int_t nrhs, c10::complex<float>** dA_array, magma_int_t ldda,
magma_int_t** dipiv_array, c10::complex<float>** dB_array, magma_int_t lddb,
magma_int_t* dinfo_array, magma_int_t batch_count, const MAGMAQueue& magma_queue) {
magma_cgesv_batched(n, nrhs,
reinterpret_cast<magmaFloatComplex**>(dA_array), ldda, dipiv_array,
reinterpret_cast<magmaFloatComplex**>(dB_array), lddb, dinfo_array, batch_count, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLu<double>(
magma_int_t m, magma_int_t n, double* dA, magma_int_t ldda,
magma_int_t* ipiv, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_dgetrf_gpu(m, n, dA, ldda, ipiv, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLu<float>(
magma_int_t m, magma_int_t n, float* dA, magma_int_t ldda,
magma_int_t* ipiv, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_sgetrf_gpu(m, n, dA, ldda, ipiv, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLu<c10::complex<double>>(
magma_int_t m, magma_int_t n, c10::complex<double>* dA, magma_int_t ldda,
magma_int_t* ipiv, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_zgetrf_gpu(m, n, reinterpret_cast<magmaDoubleComplex*>(dA), ldda, ipiv, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLu<c10::complex<float>>(
magma_int_t m, magma_int_t n, c10::complex<float>* dA, magma_int_t ldda,
magma_int_t* ipiv, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_cgetrf_gpu(m, n, reinterpret_cast<magmaFloatComplex*>(dA), ldda, ipiv, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuBatched<double>(
magma_int_t m, magma_int_t n, double** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, magma_int_t* info_array, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magma_dgetrf_batched(m, n, dA_array, ldda, ipiv_array, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuBatched<float>(
magma_int_t m, magma_int_t n, float** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, magma_int_t* info_array, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magma_sgetrf_batched(m, n, dA_array, ldda, ipiv_array, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuBatched<c10::complex<double>>(
magma_int_t m, magma_int_t n, c10::complex<double>** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, magma_int_t* info_array, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magma_zgetrf_batched(m, n, reinterpret_cast<magmaDoubleComplex**>(dA_array), ldda, ipiv_array, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuBatched<c10::complex<float>>(
magma_int_t m, magma_int_t n, c10::complex<float>** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, magma_int_t* info_array, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magma_cgetrf_batched(m, n, reinterpret_cast<magmaFloatComplex**>(dA_array), ldda, ipiv_array, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuNoPiv<double>(
magma_int_t m, magma_int_t n, double* dA, magma_int_t ldda,
magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_dgetrf_nopiv_gpu(m, n, dA, ldda, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuNoPiv<float>(
magma_int_t m, magma_int_t n, float* dA, magma_int_t ldda,
magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_sgetrf_nopiv_gpu(m, n, dA, ldda, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuNoPiv<c10::complex<double>>(
magma_int_t m, magma_int_t n, c10::complex<double>* dA, magma_int_t ldda,
magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_zgetrf_nopiv_gpu(m, n, reinterpret_cast<magmaDoubleComplex*>(dA), ldda, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuNoPiv<c10::complex<float>>(
magma_int_t m, magma_int_t n, c10::complex<float>* dA, magma_int_t ldda,
magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_cgetrf_nopiv_gpu(m, n, reinterpret_cast<magmaFloatComplex*>(dA), ldda, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuNoPivBatched<double>(
magma_int_t m, magma_int_t n, double** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_dgetrf_nopiv_batched(m, n, dA_array, ldda, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuNoPivBatched<float>(
magma_int_t m, magma_int_t n, float** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_sgetrf_nopiv_batched(m, n, dA_array, ldda, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuNoPivBatched<c10::complex<double>>(
magma_int_t m, magma_int_t n, c10::complex<double>** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_zgetrf_nopiv_batched(m, n, reinterpret_cast<magmaDoubleComplex**>(dA_array), ldda, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuNoPivBatched<c10::complex<float>>(
magma_int_t m, magma_int_t n, c10::complex<float>** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_cgetrf_nopiv_batched(m, n, reinterpret_cast<magmaFloatComplex**>(dA_array), ldda, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
inline magma_int_t magmaGetriOptimalBlocksize<double>(magma_int_t n) {
return magma_get_dgetri_nb(n);
}
template<>
inline magma_int_t magmaGetriOptimalBlocksize<float>(magma_int_t n) {
return magma_get_sgetri_nb(n);
}
template <>
inline magma_int_t magmaGetriOptimalBlocksize<c10::complex<double>>(
magma_int_t n) {
return magma_get_zgetri_nb(n);
}
template <>
inline magma_int_t magmaGetriOptimalBlocksize<c10::complex<float>>(
magma_int_t n) {
return magma_get_cgetri_nb(n);
}
template<>
void magmaGetri<double>(
magma_int_t n, double* dA, magma_int_t ldda, magma_int_t* ipiv, double* dwork,
magma_int_t lwork, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_dgetri_gpu(n, dA, ldda, ipiv, dwork, lwork, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaGetri<float>(
magma_int_t n, float* dA, magma_int_t ldda, magma_int_t* ipiv, float* dwork,
magma_int_t lwork, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_sgetri_gpu(n, dA, ldda, ipiv, dwork, lwork, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template <>
void magmaGetri<c10::complex<double>>(
magma_int_t n,
c10::complex<double>* dA,
magma_int_t ldda,
magma_int_t* ipiv,
c10::complex<double>* dwork,
magma_int_t lwork,
magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_zgetri_gpu(
n,
reinterpret_cast<magmaDoubleComplex*>(dA),
ldda,
ipiv,
reinterpret_cast<magmaDoubleComplex*>(dwork),
lwork,
info);
AT_CUDA_CHECK(cudaGetLastError());
}
template <>
void magmaGetri<c10::complex<float>>(
magma_int_t n,
c10::complex<float>* dA,
magma_int_t ldda,
magma_int_t* ipiv,
c10::complex<float>* dwork,
magma_int_t lwork,
magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_cgetri_gpu(
n,
reinterpret_cast<magmaFloatComplex*>(dA),
ldda,
ipiv,
reinterpret_cast<magmaFloatComplex*>(dwork),
lwork,
info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaGetriBatched<double>(
magma_int_t n, double** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, double** dinvA_array, magma_int_t lddia,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_dgetri_outofplace_batched(n, dA_array, ldda, ipiv_array, dinvA_array, lddia, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaGetriBatched<float>(
magma_int_t n, float** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, float** dinvA_array, magma_int_t lddia,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_sgetri_outofplace_batched(n, dA_array, ldda, ipiv_array, dinvA_array, lddia, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template <>
void magmaGetriBatched<c10::complex<double>>(
magma_int_t n,
c10::complex<double>** dA_array,
magma_int_t ldda,
magma_int_t** ipiv_array,
c10::complex<double>** dinvA_array,
magma_int_t lddia,
magma_int_t* info_array,
magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magma_zgetri_outofplace_batched(
n,
reinterpret_cast<magmaDoubleComplex**>(dA_array),
ldda,
ipiv_array,
reinterpret_cast<magmaDoubleComplex**>(dinvA_array),
lddia,
info_array,
batchsize,
magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template <>
void magmaGetriBatched<c10::complex<float>>(
magma_int_t n,
c10::complex<float>** dA_array,
magma_int_t ldda,
magma_int_t** ipiv_array,
c10::complex<float>** dinvA_array,
magma_int_t lddia,
magma_int_t* info_array,
magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magma_cgetri_outofplace_batched(
n,
reinterpret_cast<magmaFloatComplex**>(dA_array),
ldda,
ipiv_array,
reinterpret_cast<magmaFloatComplex**>(dinvA_array),
lddia,
info_array,
batchsize,
magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaCholeskySolve<double>(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, double* dA, magma_int_t ldda,
double* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_dpotrs_gpu(uplo, n, nrhs, dA, ldda, dB, lddb, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaCholeskySolve<float>(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, float* dA, magma_int_t ldda,
float* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_spotrs_gpu(uplo, n, nrhs, dA, ldda, dB, lddb, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaCholeskySolve<c10::complex<double>>(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, c10::complex<double>* dA, magma_int_t ldda,
c10::complex<double>* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_zpotrs_gpu(uplo, n, nrhs,
reinterpret_cast<magmaDoubleComplex*>(dA), ldda,
reinterpret_cast<magmaDoubleComplex*>(dB), lddb, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaCholeskySolve<c10::complex<float>>(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, c10::complex<float>* dA, magma_int_t ldda,
c10::complex<float>* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_cpotrs_gpu(uplo, n, nrhs,
reinterpret_cast<magmaFloatComplex*>(dA), ldda,
reinterpret_cast<magmaFloatComplex*>(dB), lddb, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaCholeskySolveBatched<double>(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, double** dA_array, magma_int_t ldda,
double** dB_array, magma_int_t lddb, magma_int_t& info, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
info = magma_dpotrs_batched(uplo, n, nrhs, dA_array, ldda, dB_array, lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaCholeskySolveBatched<float>(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, float** dA_array, magma_int_t ldda,
float** dB_array, magma_int_t lddb, magma_int_t& info, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
info = magma_spotrs_batched(uplo, n, nrhs, dA_array, ldda, dB_array, lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaCholeskySolveBatched<c10::complex<double>>(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, c10::complex<double>** dA_array, magma_int_t ldda,
c10::complex<double>** dB_array, magma_int_t lddb, magma_int_t& info, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
info = magma_zpotrs_batched(uplo, n, nrhs,
reinterpret_cast<magmaDoubleComplex**>(dA_array), ldda,
reinterpret_cast<magmaDoubleComplex**>(dB_array), lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaCholeskySolveBatched<c10::complex<float>>(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, c10::complex<float>** dA_array, magma_int_t ldda,
c10::complex<float>** dB_array, magma_int_t lddb, magma_int_t& info, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
info = magma_cpotrs_batched(uplo, n, nrhs,
reinterpret_cast<magmaFloatComplex**>(dA_array), ldda,
reinterpret_cast<magmaFloatComplex**>(dB_array), lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaCholesky<double>(
magma_uplo_t uplo, magma_int_t n, double* dA,
magma_int_t ldda, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_dpotrf_gpu(uplo, n, dA, ldda, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaCholesky<float>(
magma_uplo_t uplo, magma_int_t n, float* dA,
magma_int_t ldda, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_spotrf_gpu(uplo, n, dA, ldda, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaCholesky<c10::complex<double>>(
magma_uplo_t uplo, magma_int_t n, c10::complex<double>* dA,
magma_int_t ldda, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_zpotrf_gpu(uplo, n, reinterpret_cast<magmaDoubleComplex*>(dA), ldda, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaCholesky<c10::complex<float>>(
magma_uplo_t uplo, magma_int_t n, c10::complex<float>* dA,
magma_int_t ldda, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_cpotrf_gpu(uplo, n, reinterpret_cast<magmaFloatComplex*>(dA), ldda, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaCholeskyBatched<double>(
magma_uplo_t uplo, magma_int_t n, double** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_dpotrf_batched(uplo, n, dA_array, ldda, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaCholeskyBatched<float>(
magma_uplo_t uplo, magma_int_t n, float** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_spotrf_batched(uplo, n, dA_array, ldda, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaCholeskyBatched<c10::complex<double>>(
magma_uplo_t uplo, magma_int_t n, c10::complex<double>** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_zpotrf_batched(uplo, n, reinterpret_cast<magmaDoubleComplex**>(dA_array), ldda, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaCholeskyBatched<c10::complex<float>>(
magma_uplo_t uplo, magma_int_t n, c10::complex<float>** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_cpotrf_batched(uplo, n, reinterpret_cast<magmaFloatComplex**>(dA_array), ldda, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaTriangularSolveBatched<double>(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n,
double** dA_array, magma_int_t ldda, double** dB_array, magma_int_t lddb, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magmablas_dtrsm_batched(MagmaLeft, uplo, trans, diag, m, n, 1, dA_array, ldda, dB_array, lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaTriangularSolveBatched<float>(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n,
float** dA_array, magma_int_t ldda, float** dB_array, magma_int_t lddb, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magmablas_strsm_batched(MagmaLeft, uplo, trans, diag, m, n, 1, dA_array, ldda, dB_array, lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaTriangularSolveBatched<c10::complex<double>>(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n,
c10::complex<double>** dA_array, magma_int_t ldda, c10::complex<double>** dB_array, magma_int_t lddb, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magmaDoubleComplex alpha({1, 0});
magmablas_ztrsm_batched(MagmaLeft, uplo, trans, diag, m, n, alpha,
reinterpret_cast<magmaDoubleComplex**>(dA_array), ldda,
reinterpret_cast<magmaDoubleComplex**>(dB_array), lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaTriangularSolveBatched<c10::complex<float>>(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n,
c10::complex<float>** dA_array, magma_int_t ldda, c10::complex<float>** dB_array, magma_int_t lddb, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magmaFloatComplex alpha({1, 0});
magmablas_ctrsm_batched(MagmaLeft, uplo, trans, diag, m, n, alpha,
reinterpret_cast<magmaFloatComplex**>(dA_array), ldda,
reinterpret_cast<magmaFloatComplex**>(dB_array), lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
inline magma_int_t magmaGeqrfOptimalBlocksize<double>(magma_int_t m, magma_int_t n) {
return magma_get_dgeqrf_nb(m, n);
}
template<>
inline magma_int_t magmaGeqrfOptimalBlocksize<float>(magma_int_t m, magma_int_t n) {
return magma_get_sgeqrf_nb(m, n);
}
template <>
inline magma_int_t magmaGeqrfOptimalBlocksize<c10::complex<double>>(
magma_int_t m,
magma_int_t n) {
return magma_get_zgeqrf_nb(m, n);
}
template <>
inline magma_int_t magmaGeqrfOptimalBlocksize<c10::complex<float>>(
magma_int_t m,
magma_int_t n) {
return magma_get_cgeqrf_nb(m, n);
}
template<>
void magmaGeqrf<double>(
magma_int_t m, magma_int_t n, double* dA, magma_int_t ldda,
double* tau, double* dT, magma_int_t* info, bool is_v2) {
MagmaStreamSyncGuard guard;
if (!is_v2) {
magma_dgeqrf_gpu(m, n, dA, ldda, tau, dT, info);
} else {
magma_dgeqrf2_gpu(m, n, dA, ldda, tau, info);
}
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaGeqrf<float>(
magma_int_t m, magma_int_t n, float* dA, magma_int_t ldda,
float* tau, float* dT, magma_int_t* info, bool is_v2) {
MagmaStreamSyncGuard guard;
if (!is_v2) {
magma_sgeqrf_gpu(m, n, dA, ldda, tau, dT, info);
} else {
magma_sgeqrf2_gpu(m, n, dA, ldda, tau, info);
}
AT_CUDA_CHECK(cudaGetLastError());
}
template <>
void magmaGeqrf<c10::complex<double>>(
magma_int_t m,
magma_int_t n,
c10::complex<double>* dA,
magma_int_t ldda,
c10::complex<double>* tau,
c10::complex<double>* dT,
magma_int_t* info,
bool is_v2) {
MagmaStreamSyncGuard guard;
if (!is_v2) {
magma_zgeqrf_gpu(
m,
n,
reinterpret_cast<magmaDoubleComplex*>(dA),
ldda,
reinterpret_cast<magmaDoubleComplex*>(tau),
reinterpret_cast<magmaDoubleComplex*>(dT),
info);
} else {
magma_zgeqrf2_gpu(
m,
n,
reinterpret_cast<magmaDoubleComplex*>(dA),
ldda,
reinterpret_cast<magmaDoubleComplex*>(tau),
info);
}
AT_CUDA_CHECK(cudaGetLastError());
}
template <>
void magmaGeqrf<c10::complex<float>>(
magma_int_t m,
magma_int_t n,
c10::complex<float>* dA,
magma_int_t ldda,
c10::complex<float>* tau,
c10::complex<float>* dT,
magma_int_t* info,
bool is_v2) {
MagmaStreamSyncGuard guard;
if (!is_v2) {
magma_cgeqrf_gpu(
m,
n,
reinterpret_cast<magmaFloatComplex*>(dA),
ldda,
reinterpret_cast<magmaFloatComplex*>(tau),
reinterpret_cast<magmaFloatComplex*>(dT),
info);
} else {
magma_cgeqrf2_gpu(
m,
n,
reinterpret_cast<magmaFloatComplex*>(dA),
ldda,
reinterpret_cast<magmaFloatComplex*>(tau),
info);
}
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaOrgqr<double>(
magma_int_t m, magma_int_t n, magma_int_t k, double* dA, magma_int_t ldda,
double* tau, double* dT, magma_int_t nb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_dorgqr_gpu(m, n, k, dA, ldda, tau, dT, nb, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaOrgqr<float>(
magma_int_t m, magma_int_t n, magma_int_t k, float* dA, magma_int_t ldda,
float* tau, float* dT, magma_int_t nb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_sorgqr_gpu(m, n, k, dA, ldda, tau, dT, nb, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template <>
void magmaOrgqr<c10::complex<double>>(
magma_int_t m,
magma_int_t n,
magma_int_t k,
c10::complex<double>* dA,
magma_int_t ldda,
c10::complex<double>* tau,
c10::complex<double>* dT,
magma_int_t nb,
magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_zungqr_gpu(
m,
n,
k,
reinterpret_cast<magmaDoubleComplex*>(dA),
ldda,
reinterpret_cast<magmaDoubleComplex*>(tau),
reinterpret_cast<magmaDoubleComplex*>(dT),
nb,
info);
AT_CUDA_CHECK(cudaGetLastError());
}
template <>
void magmaOrgqr<c10::complex<float>>(
magma_int_t m,
magma_int_t n,
magma_int_t k,
c10::complex<float>* dA,
magma_int_t ldda,
c10::complex<float>* tau,
c10::complex<float>* dT,
magma_int_t nb,
magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_cungqr_gpu(
m,
n,
k,
reinterpret_cast<magmaFloatComplex*>(dA),
ldda,
reinterpret_cast<magmaFloatComplex*>(tau),
reinterpret_cast<magmaFloatComplex*>(dT),
nb,
info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaSyevd<double>(
magma_vec_t jobz, magma_uplo_t uplo, magma_int_t n, double* dA, magma_int_t ldda,
double* w, double* wA, magma_int_t ldwa, double* work, magma_int_t lwork, double* rwork,
magma_int_t lrwork, magma_int_t* iwork, magma_int_t liwork, magma_int_t* info) {
(void)rwork; // unused
(void)lrwork; // unused
MagmaStreamSyncGuard guard;
magma_dsyevd_gpu(jobz, uplo, n, dA, ldda, w, wA, ldwa, work, lwork, iwork, liwork, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaSyevd<float>(
magma_vec_t jobz, magma_uplo_t uplo, magma_int_t n, float* dA, magma_int_t ldda,
float* w, float* wA, magma_int_t ldwa, float* work, magma_int_t lwork, float* rwork,
magma_int_t lrwork, magma_int_t* iwork, magma_int_t liwork, magma_int_t* info) {
(void)rwork; // unused
(void)lrwork; // unused
MagmaStreamSyncGuard guard;
magma_ssyevd_gpu(jobz, uplo, n, dA, ldda, w, wA, ldwa, work, lwork, iwork, liwork, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaSyevd<c10::complex<double>, double>(
magma_vec_t jobz, magma_uplo_t uplo, magma_int_t n, c10::complex<double>* dA, magma_int_t ldda,
double* w, c10::complex<double>* wA, magma_int_t ldwa, c10::complex<double>* work, magma_int_t lwork, double* rwork,
magma_int_t lrwork, magma_int_t* iwork, magma_int_t liwork, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_zheevd_gpu(
jobz, uplo, n, reinterpret_cast<magmaDoubleComplex*>(dA), ldda, w, reinterpret_cast<magmaDoubleComplex*>(wA),
ldwa, reinterpret_cast<magmaDoubleComplex*>(work), lwork, rwork, lrwork, iwork, liwork, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaSyevd<c10::complex<float>, float>(
magma_vec_t jobz, magma_uplo_t uplo, magma_int_t n, c10::complex<float>* dA, magma_int_t ldda,
float* w, c10::complex<float>* wA, magma_int_t ldwa, c10::complex<float>* work, magma_int_t lwork, float* rwork,
magma_int_t lrwork, magma_int_t* iwork, magma_int_t liwork, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_cheevd_gpu(
jobz, uplo, n, reinterpret_cast<magmaFloatComplex*>(dA), ldda, w, reinterpret_cast<magmaFloatComplex*>(wA),
ldwa, reinterpret_cast<magmaFloatComplex*>(work), lwork, rwork, lrwork, iwork, liwork, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaEig<double>(
magma_vec_t jobvl, magma_vec_t jobvr, magma_int_t n,
double *A, magma_int_t lda,
double *w,
double *VL, magma_int_t ldvl,
double *VR, magma_int_t ldvr,
double *work, magma_int_t lwork,
double *rwork,
magma_int_t *info) {
MagmaStreamSyncGuard guard;
// magma [sd]geev wants to separate output arrays: wr and wi for the real
// and imaginary parts
double *wr = w;
double *wi = w + n;
(void)rwork; // unused
magma_dgeev(jobvl, jobvr, n, A, lda, wr, wi, VL, ldvl, VR, ldvr, work, lwork, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaEig<float>(
magma_vec_t jobvl, magma_vec_t jobvr, magma_int_t n,
float *A, magma_int_t lda,
float *w,
float *VL, magma_int_t ldvl,
float *VR, magma_int_t ldvr,
float *work, magma_int_t lwork,
float *rwork,
magma_int_t *info) {
MagmaStreamSyncGuard guard;
float *wr = w;
float *wi = w + n;
(void)rwork; // unused
magma_sgeev(jobvl, jobvr, n, A, lda, wr, wi, VL, ldvl, VR, ldvr, work, lwork, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaEig<c10::complex<double>, double>(
magma_vec_t jobvl, magma_vec_t jobvr, magma_int_t n,
c10::complex<double> *A, magma_int_t lda,
c10::complex<double> *w,
c10::complex<double> *VL, magma_int_t ldvl,
c10::complex<double> *VR, magma_int_t ldvr,
c10::complex<double> *work, magma_int_t lwork,
double *rwork,
magma_int_t *info) {
MagmaStreamSyncGuard guard;
magma_zgeev(jobvl, jobvr, n,
reinterpret_cast<magmaDoubleComplex*>(A), lda,
reinterpret_cast<magmaDoubleComplex*>(w),
reinterpret_cast<magmaDoubleComplex*>(VL), ldvl,
reinterpret_cast<magmaDoubleComplex*>(VR), ldvr,
reinterpret_cast<magmaDoubleComplex*>(work), lwork,
rwork, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaEig<c10::complex<float>, float>(
magma_vec_t jobvl, magma_vec_t jobvr, magma_int_t n,
c10::complex<float> *A, magma_int_t lda,
c10::complex<float> *w,
c10::complex<float> *VL, magma_int_t ldvl,
c10::complex<float> *VR, magma_int_t ldvr,
c10::complex<float> *work, magma_int_t lwork,
float *rwork,
magma_int_t *info) {
MagmaStreamSyncGuard guard;
magma_cgeev(jobvl, jobvr, n,
reinterpret_cast<magmaFloatComplex*>(A), lda,
reinterpret_cast<magmaFloatComplex*>(w),
reinterpret_cast<magmaFloatComplex*>(VL), ldvl,
reinterpret_cast<magmaFloatComplex*>(VR), ldvr,
reinterpret_cast<magmaFloatComplex*>(work), lwork,
rwork, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaSvd<double>(
magma_vec_t jobz, magma_int_t m, magma_int_t n, double* A,
magma_int_t lda, double* s, double* U, magma_int_t ldu,
double* VT, magma_int_t ldvt, double* work, magma_int_t lwork,
double *rwork, magma_int_t* iwork, magma_int_t* info) {
(void)rwork; // unused
MagmaStreamSyncGuard guard;
magma_dgesdd(jobz, m, n, A, lda, s, U, ldu, VT, ldvt, work, lwork, iwork, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaSvd<float>(
magma_vec_t jobz, magma_int_t m, magma_int_t n, float* A,
magma_int_t lda, float* s, float* U, magma_int_t ldu,
float* VT, magma_int_t ldvt, float* work, magma_int_t lwork,
float* rwork, magma_int_t* iwork, magma_int_t* info) {
(void)rwork; // unused
MagmaStreamSyncGuard guard;
magma_sgesdd(jobz, m, n, A, lda, s, U, ldu, VT, ldvt, work, lwork, iwork, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaSvd<c10::complex<float>, float>(
magma_vec_t jobz, magma_int_t m, magma_int_t n, c10::complex<float>* A,
magma_int_t lda, float* s, c10::complex<float>* U, magma_int_t ldu,
c10::complex<float>* VT, magma_int_t ldvt, c10::complex<float>* work, magma_int_t lwork,
float *rwork, magma_int_t* iwork, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_cgesdd(jobz, m, n, reinterpret_cast<magmaFloatComplex*>(A), lda, s,
reinterpret_cast<magmaFloatComplex*>(U), ldu,
reinterpret_cast<magmaFloatComplex*>(VT), ldvt,
reinterpret_cast<magmaFloatComplex*>(work), lwork,
rwork, iwork, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaSvd<c10::complex<double>, double>(
magma_vec_t jobz, magma_int_t m, magma_int_t n, c10::complex<double>* A,
magma_int_t lda, double* s, c10::complex<double>* U, magma_int_t ldu,
c10::complex<double>* VT, magma_int_t ldvt, c10::complex<double>* work, magma_int_t lwork,
double *rwork, magma_int_t* iwork, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_zgesdd(jobz, m, n, reinterpret_cast<magmaDoubleComplex*>(A), lda, s,
reinterpret_cast<magmaDoubleComplex*>(U), ldu,
reinterpret_cast<magmaDoubleComplex*>(VT), ldvt,
reinterpret_cast<magmaDoubleComplex*>(work), lwork,
rwork, iwork, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuSolve<double>(
magma_int_t n, magma_int_t nrhs, double* dA, magma_int_t ldda, magma_int_t* ipiv,
double* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_dgetrs_gpu(MagmaNoTrans, n, nrhs, dA, ldda, ipiv, dB, lddb, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuSolve<float>(
magma_int_t n, magma_int_t nrhs, float* dA, magma_int_t ldda, magma_int_t* ipiv,
float* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_sgetrs_gpu(MagmaNoTrans, n, nrhs, dA, ldda, ipiv, dB, lddb, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuSolve<c10::complex<double>>(
magma_int_t n, magma_int_t nrhs, c10::complex<double>* dA, magma_int_t ldda, magma_int_t* ipiv,
c10::complex<double>* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_zgetrs_gpu(MagmaNoTrans, n, nrhs, reinterpret_cast<magmaDoubleComplex*>(dA), ldda, ipiv, reinterpret_cast<magmaDoubleComplex*>(dB), lddb, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuSolve<c10::complex<float>>(
magma_int_t n, magma_int_t nrhs, c10::complex<float>* dA, magma_int_t ldda, magma_int_t* ipiv,
c10::complex<float>* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_cgetrs_gpu(MagmaNoTrans, n, nrhs, reinterpret_cast<magmaFloatComplex*>(dA), ldda, ipiv, reinterpret_cast<magmaFloatComplex*>(dB), lddb, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuSolveBatched<double>(
magma_int_t n, magma_int_t nrhs, double** dA_array, magma_int_t ldda, magma_int_t** dipiv_array,
double** dB_array, magma_int_t lddb, magma_int_t& info,
magma_int_t batchsize, const MAGMAQueue& magma_queue) {
info = magma_dgetrs_batched(MagmaNoTrans, n, nrhs, dA_array, ldda, dipiv_array, dB_array, lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuSolveBatched<float>(
magma_int_t n, magma_int_t nrhs, float** dA_array, magma_int_t ldda, magma_int_t** dipiv_array,
float** dB_array, magma_int_t lddb, magma_int_t& info,
magma_int_t batchsize, const MAGMAQueue& magma_queue) {
info = magma_sgetrs_batched(MagmaNoTrans, n, nrhs, dA_array, ldda, dipiv_array, dB_array, lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuSolveBatched<c10::complex<double>>(
magma_int_t n, magma_int_t nrhs, c10::complex<double>** dA_array, magma_int_t ldda, magma_int_t** dipiv_array,
c10::complex<double>** dB_array, magma_int_t lddb, magma_int_t& info,
magma_int_t batchsize, const MAGMAQueue& magma_queue) {
info = magma_zgetrs_batched(MagmaNoTrans, n, nrhs, reinterpret_cast<magmaDoubleComplex**>(dA_array), ldda, dipiv_array, reinterpret_cast<magmaDoubleComplex**>(dB_array), lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuSolveBatched<c10::complex<float>>(
magma_int_t n, magma_int_t nrhs, c10::complex<float>** dA_array, magma_int_t ldda, magma_int_t** dipiv_array,
c10::complex<float>** dB_array, magma_int_t lddb, magma_int_t& info,
magma_int_t batchsize, const MAGMAQueue& magma_queue) {
info = magma_cgetrs_batched(MagmaNoTrans, n, nrhs, reinterpret_cast<magmaFloatComplex**>(dA_array), ldda, dipiv_array, reinterpret_cast<magmaFloatComplex**>(dB_array), lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaGels<float>(
magma_trans_t trans, magma_int_t m, magma_int_t n, magma_int_t nrhs,
float* dA, magma_int_t ldda, float* dB, magma_int_t lddb,
float* hwork, magma_int_t lwork, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_sgels_gpu(trans, m, n, nrhs,
dA, ldda, dB, lddb,
hwork, lwork, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaGels<double>(
magma_trans_t trans, magma_int_t m, magma_int_t n, magma_int_t nrhs,
double* dA, magma_int_t ldda, double* dB, magma_int_t lddb,
double* hwork, magma_int_t lwork, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_dgels_gpu(trans, m, n, nrhs,
dA, ldda, dB, lddb,
hwork, lwork, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaGels<c10::complex<float>>(
magma_trans_t trans, magma_int_t m, magma_int_t n, magma_int_t nrhs,
c10::complex<float>* dA, magma_int_t ldda, c10::complex<float>* dB, magma_int_t lddb,
c10::complex<float>* hwork, magma_int_t lwork, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_cgels_gpu(trans, m, n, nrhs,
reinterpret_cast<magmaFloatComplex*>(dA), ldda,
reinterpret_cast<magmaFloatComplex*>(dB), lddb,
reinterpret_cast<magmaFloatComplex*>(hwork), lwork, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaGels<c10::complex<double>>(
magma_trans_t trans, magma_int_t m, magma_int_t n, magma_int_t nrhs,
c10::complex<double>* dA, magma_int_t ldda, c10::complex<double>* dB, magma_int_t lddb,
c10::complex<double>* hwork, magma_int_t lwork, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_zgels_gpu(trans, m, n, nrhs,
reinterpret_cast<magmaDoubleComplex*>(dA), ldda,
reinterpret_cast<magmaDoubleComplex*>(dB), lddb,
reinterpret_cast<magmaDoubleComplex*>(hwork), lwork, info);
AT_CUDA_CHECK(cudaGetLastError());
}
namespace {
/*
MAGMA can return errors both as a return value and in the info argument.
The return value and info should always be identical.
In general, the meaning is as given in this table.
Predefined error codes are large negative numbers. Using the symbolic
constants below is preferred, but the numeric values can be found in
include/magma_types.h.
Info | Description
----------- | -----------
info = 0 (MAGMA_SUCCESS) | Successful exit
info < 0, but small | For info = -i, the i-th argument had an illegal value
info > 0 | Function-specific error such as singular matrix
MAGMA_ERR_DEVICE_ALLOC | Could not allocate GPU device memory
MAGMA_ERR_HOST_ALLOC | Could not allocate CPU host memory
MAGMA_ERR_ILLEGAL_VALUE | An argument had an illegal value (deprecated; instead it should return -i to say the i-th argument was bad)
MAGMA_ERR_INVALID_PTR | Can't free pointer
MAGMA_ERR_NOT_IMPLEMENTED | Function or option not implemented
MAGMA_ERR_NOT_SUPPORTED | Function or option not supported on the current architecture
*/
void checkMagmaInternalError(magma_int_t info, const std::string& magma_function_name) {
// if info > 0 the error is function-specific, do nothing in this case
TORCH_CHECK(info >= 0,
"MAGMA error: ",
magma_strerror(info),
", info = ", info,
", when calling ", magma_function_name);
}
} // anonymous namespace
#endif // USE_MAGMA
#define ALLOCATE_ARRAY(name, type, size) \
auto storage_##name = pin_memory<type>(size); \
name = static_cast<type*>(storage_##name.data());
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ solve ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_solve(Tensor& b, Tensor& A, Tensor& infos_out) {
#ifndef USE_MAGMA
AT_ERROR("solve: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
auto A_data = A.data_ptr<scalar_t>();
auto b_data = b.data_ptr<scalar_t>();
magma_int_t n = magma_int_cast(A.size(-2), "A.size(-2)");
magma_int_t nrhs = magma_int_cast(b.size(-1), "b.size(-1)");
magma_int_t lda = std::max(magma_int_t{1}, n);
if (b.dim() == 2) {
auto ipiv = at::empty({n}, at::kInt);
// magmaSolve requires infos tensor to live on CPU
Tensor infos = at::empty(infos_out.sizes(), infos_out.options().device(kCPU));
magmaSolve<scalar_t>(n, nrhs, A_data, lda, ipiv.data_ptr<magma_int_t>(),
b_data, lda, infos.data_ptr<magma_int_t>());
infos_out.copy_(infos);
} else {
auto infos_data = infos_out.data_ptr<magma_int_t>();
auto A_mat_stride = matrixStride(A);
auto b_mat_stride = matrixStride(b);
magma_int_t batch_size = magma_int_cast(batchCount(A), "batchCount");
magma_int_t* ipiv_data;
magma_int_t** ipiv_array;
scalar_t** A_array;
scalar_t** b_array;
ALLOCATE_ARRAY(ipiv_data, magma_int_t, batch_size * n);
ALLOCATE_ARRAY(ipiv_array, magma_int_t*, batch_size);
ALLOCATE_ARRAY(A_array, scalar_t*, batch_size);
ALLOCATE_ARRAY(b_array, scalar_t*, batch_size);
// Set up the created arrays
for (int64_t i = 0; i < batch_size; i++) {
A_array[i] = &A_data[i * A_mat_stride];
b_array[i] = &b_data[i * b_mat_stride];
ipiv_array[i] = &ipiv_data[i * n];
}
MAGMAQueue magma_queue(b.get_device());
constexpr int64_t batch_limit = 65535;
// Compute as many batches of 65535 possible
// The number of "mini"-batches are floor(batch_size / batch_limit)
// and these cover floor(batch_size / batch_limit) * batch_limit matrix solves
int64_t mini_batches = batch_size / batch_limit, mini_idx;
for (mini_idx = 0; mini_idx < mini_batches * batch_limit; mini_idx += batch_limit) {
scalar_t** A_array_cur = &A_array[mini_idx];
scalar_t** b_array_cur = &b_array[mini_idx];
magma_int_t** ipiv_array_cur = &ipiv_array[mini_idx];
magma_int_t* info_array_cur = &infos_data[mini_idx];
magmaSolveBatched<scalar_t>(
n, nrhs, A_array_cur, lda, ipiv_array_cur, b_array_cur, lda,
info_array_cur, batch_limit, magma_queue);
}
// Compute whatever is left = batch_size - floor(batch_size / batch_limit) * batch_limit
// which concisely is equal to batch_size % batch_limit
if (batch_size % batch_limit != 0) {
magmaSolveBatched<scalar_t>(
n, nrhs, &A_array[mini_idx], lda, &ipiv_array[mini_idx], &b_array[mini_idx], lda,
&infos_data[mini_idx], batch_size % batch_limit, magma_queue);
}
}
#endif
}
std::tuple<Tensor, Tensor> _solve_helper_cuda(const Tensor& self, const Tensor& A) {
auto self_working_copy = cloneBatchedColumnMajor(self);
auto A_working_copy = cloneBatchedColumnMajor(A);
// infos might not get filled for empty inputs therefore at::zeros is used instead of at::empty
auto infos = at::zeros({std::max<int64_t>(1, batchCount(self))}, self.options().dtype(kInt));
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "solve_cuda", [&]{
apply_solve<scalar_t>(self_working_copy, A_working_copy, infos);
});
if (self.dim() > 2) {
batchCheckErrors(infos, "solve_cuda");
} else {
singleCheckErrors(infos.item().toInt(), "solve_cuda");
}
return std::tuple<Tensor, Tensor>(self_working_copy, A_working_copy);
}
// This is a type dispatching helper function for 'apply_solve'
Tensor& _linalg_solve_out_helper_cuda(Tensor& result, Tensor& input, Tensor& infos) {
// 'result' and 'input' should be in column major order (it should be checked before calling this function)
// the content of 'result', 'input' and 'infos' is overwritten by 'apply_solve'
// 'result' should contain data of 'other' tensor (right-hand-side of the linear system of equations)
// 'input' should contain data of origianl 'input' tensor (left-hand-side of the linear system)
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(result.scalar_type(), "linalg_solve_out_cpu", [&]{
apply_solve<scalar_t>(result, input, infos);
});
return result;
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ inverse ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
/*
Computes the inverse of n-by-n matrix 'self', it is saved to 'self_inv'.
'infos' is an int Tensor containing error codes for each matrix in the batched input.
'infos_lu' is for holding magmaLU errors, and 'infos_getri' is for holding magmaGetri errors
For more information see MAGMA's documentation for GETRI and GETRF routines.
*/
template <typename scalar_t>
static void apply_batched_inverse(Tensor& self, Tensor& self_inv, Tensor& infos_lu, Tensor& infos_getri) {
#ifndef USE_MAGMA
AT_ERROR("inverse: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
auto self_data = self.data_ptr<scalar_t>();
auto self_mat_stride = matrixStride(self);
auto self_inv_data = self_inv.data_ptr<scalar_t>();
auto self_inv_mat_stride = matrixStride(self_inv);
auto infos_lu_data = infos_lu.data_ptr<magma_int_t>();
auto infos_getri_data = infos_getri.data_ptr<magma_int_t>();
magma_int_t batch_size = magma_int_cast(batchCount(self), "batchCount");
// MAGMA does not work with batch_size == 0, let's return early in this case
if (batch_size == 0) {
return;
}
magma_int_t n = magma_int_cast(self.size(-2), "self.size(-2)");
magma_int_t lda = std::max<magma_int_t>(1, n);
magma_int_t* ipiv_data;
magma_int_t** ipiv_array;
scalar_t** self_array;
scalar_t** self_inv_array;
ALLOCATE_ARRAY(ipiv_data, magma_int_t, batch_size * lda);
ALLOCATE_ARRAY(ipiv_array, magma_int_t*, batch_size);
ALLOCATE_ARRAY(self_array, scalar_t*, batch_size);
ALLOCATE_ARRAY(self_inv_array, scalar_t*, batch_size);
// Set up the created arrays
for (int64_t i = 0; i < batch_size; i++) {
self_array[i] = &self_data[i * self_mat_stride];
self_inv_array[i] = &self_inv_data[i * self_inv_mat_stride];
ipiv_array[i] = &ipiv_data[i * n];
}
// magmaLuBatched leaves ipiv_data values unwritten for singular matrices.
// Initialize to avoid memory access violations inside magma kernels (gh-51930).
std::fill_n(ipiv_data, batch_size * n, 1);
MAGMAQueue magma_queue(self.get_device());
magmaLuBatched<scalar_t>(
n, n, self_array, lda, ipiv_array, infos_lu_data,
batch_size, magma_queue);
constexpr int64_t batch_limit = 65535;
// Compute as many batches of 65535 possible
// The number of "mini"-batches are floor(batch_size / batch_limit)
// and these cover floor(batch_size / batch_limit) * batch_limit matrix solves
int64_t mini_batches = batch_size / batch_limit, mini_idx;
for (mini_idx = 0; mini_idx < mini_batches * batch_limit; mini_idx += batch_limit) {
scalar_t** self_array_cur = &self_array[mini_idx];
scalar_t** self_inv_array_cur = &self_inv_array[mini_idx];
magma_int_t** ipiv_array_cur = &ipiv_array[mini_idx];
magma_int_t* info_array_cur_getri = &infos_getri_data[mini_idx];
magmaGetriBatched<scalar_t>(
n, self_array_cur, lda, ipiv_array_cur, self_inv_array_cur,
lda, info_array_cur_getri, batch_limit, magma_queue);
}
// Compute whatever is left = batch_size - floor(batch_size / batch_limit) * batch_limit
// which concisely is equal to batch_size % batch_limit
if (batch_size % batch_limit != 0) {
magmaGetriBatched<scalar_t>(
n, &self_array[mini_idx], lda, &ipiv_array[mini_idx], &self_inv_array[mini_idx],
lda, &infos_getri_data[mini_idx], batch_size % batch_limit, magma_queue);
}
#endif
}
template <typename scalar_t>
static void apply_single_inverse(Tensor& self, Tensor& infos_lu, Tensor& infos_getri) {
#ifndef USE_MAGMA
AT_ERROR("inverse: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
auto self_data = self.data_ptr<scalar_t>();
magma_int_t n = magma_int_cast(self.size(-2), "self.size(-2)");
magma_int_t lda = std::max<magma_int_t>(1, n);
magma_int_t lwork = n * magmaGetriOptimalBlocksize<scalar_t>(n);
// magmaLu and magmaGetri requires infos tensor to live on CPU
infos_lu = infos_lu.to(at::kCPU);
infos_getri = infos_getri.to(at::kCPU);
Tensor ipiv = at::empty({lda}, at::kInt);
Tensor dwork = at::empty({lwork}, self.options());
magmaLu<scalar_t>(n, n, self_data, lda, ipiv.data_ptr<magma_int_t>(), infos_lu.data_ptr<magma_int_t>());
magmaGetri<scalar_t>(
n, self_data, lda, ipiv.data_ptr<magma_int_t>(), dwork.data_ptr<scalar_t>(), lwork, infos_getri.data_ptr<magma_int_t>());
#endif
}
Tensor _inverse_helper_cuda_legacy(const Tensor& self) {
auto self_inv_working_copy = cloneBatchedColumnMajor(self);
if (self.dim() > 2) {
auto infos_lu = at::zeros({std::max<int64_t>(1, batchCount(self))}, self.options().dtype(kInt));
auto infos_getri = at::zeros({std::max<int64_t>(1, batchCount(self))}, self.options().dtype(kInt));
auto self_working_copy = cloneBatchedColumnMajor(self);
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "inverse_cuda", [&]{
apply_batched_inverse<scalar_t>(
self_working_copy, self_inv_working_copy, infos_lu, infos_getri);
});
batchCheckErrors(infos_lu, "inverse_cuda");
batchCheckErrors(infos_getri, "inverse_cuda");
} else {
// magmaLu and magmaGetri requires infos tensor to live on CPU
auto infos_lu = at::zeros({1}, self.options().dtype(kInt).device(kCPU));
auto infos_getri = at::zeros({1}, self.options().dtype(kInt).device(kCPU));
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "inverse_cuda", [&]{
apply_single_inverse<scalar_t>(self_inv_working_copy, infos_lu, infos_getri);
});
singleCheckErrors(infos_lu.item().toInt(), "inverse_cuda");
singleCheckErrors(infos_getri.item().toInt(), "inverse_cuda");
}
return self_inv_working_copy;
}
Tensor _inverse_helper_cuda(const Tensor& self) {
#ifdef USE_CUSOLVER
if ((self.dim() == 2) || (/* self.dim() > 2 && */ batchCount(self) <= 2) || !use_magma_) {
return _inverse_helper_cuda_lib(self); // cusolver or cublas
} else {
return _inverse_helper_cuda_legacy(self); // magma-cuda
}
#else
return _inverse_helper_cuda_legacy(self); // magma-cuda
#endif
}
// This is a type dispatching helper function for 'apply_batched_inverse' and 'singleCheckErrors'
Tensor& _linalg_inv_out_helper_cuda_legacy(Tensor& result, Tensor& infos_lu, Tensor& infos_getri) {
// assuming result is in column major order and contains the matrices to invert
if (result.dim() > 2) {
auto input_working_copy = cloneBatchedColumnMajor(result);
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(result.scalar_type(), "linalg_inv_out_cuda", [&]{
apply_batched_inverse<scalar_t>(
input_working_copy, result, infos_lu, infos_getri);
});
} else {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(result.scalar_type(), "linalg_inv_out_cuda", [&]{
apply_single_inverse<scalar_t>(result, infos_lu, infos_getri);
});
}
return result;
}
// This is a MAGMA/cuSOLVER dispatching helper function
Tensor& _linalg_inv_out_helper_cuda(Tensor &result, Tensor& infos_lu, Tensor& infos_getri) {
// This function calculates the inverse matrix in-place
// result should be in column major order and contain matrices to invert
#ifdef USE_CUSOLVER
if ((result.dim() == 2) || (/* result.dim() > 2 && */ batchCount(result) <= 2) || !use_magma_) {
return _linalg_inv_out_helper_cuda_lib(result, infos_lu, infos_getri); // cusolver or cublas
} else {
return _linalg_inv_out_helper_cuda_legacy(result, infos_lu, infos_getri); // magma-cuda
}
#else
return _linalg_inv_out_helper_cuda_legacy(result, infos_lu, infos_getri); // magma-cuda
#endif
return result;
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ cholesky_solve ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_cholesky_solve(Tensor& b, Tensor& A, bool upper, int64_t& info) {
#ifndef USE_MAGMA
AT_ERROR("cholesky_solve: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
magma_uplo_t uplo = upper ? MagmaUpper : MagmaLower;
auto A_data = A.data_ptr<scalar_t>();
auto b_data = b.data_ptr<scalar_t>();
magma_int_t n = magma_int_cast(A.size(-2), "A.size(-2)");
magma_int_t lda = std::max<magma_int_t>(1, n);
magma_int_t nrhs = magma_int_cast(b.size(-1), "b.size(-1)");
int info_tmp = 0;
if (b.dim() == 2) {
magmaCholeskySolve<scalar_t>(uplo, n, nrhs, A_data, lda,
b_data, lda, &info_tmp);
info = info_tmp;
} else {
auto A_mat_stride = matrixStride(A);
auto b_mat_stride = matrixStride(b);
magma_int_t batch_size = magma_int_cast(batchCount(A), "batchCount");
scalar_t** A_array;
scalar_t** b_array;
ALLOCATE_ARRAY(A_array, scalar_t*, batch_size);
ALLOCATE_ARRAY(b_array, scalar_t*, batch_size);
// Set up the created arrays
for (int64_t i = 0; i < batch_size; i++) {
A_array[i] = &A_data[i * A_mat_stride];
b_array[i] = &b_data[i * b_mat_stride];
}
MAGMAQueue magma_queue(b.get_device());
constexpr int64_t batch_limit = 65535;
// Compute as many batches of 65535 possible
// The number of "mini"-batches are floor(batch_size / batch_limit)
// and these cover floor(batch_size / batch_limit) * batch_limit matrix solves
int64_t mini_batches = batch_size / batch_limit, mini_idx;
for (mini_idx = 0; mini_idx < mini_batches * batch_limit; mini_idx += batch_limit) {
scalar_t** A_array_cur = &A_array[mini_idx];
scalar_t** b_array_cur = &b_array[mini_idx];
magmaCholeskySolveBatched<scalar_t>(
uplo, n, nrhs, A_array_cur, lda, b_array_cur, lda,
info_tmp, batch_limit, magma_queue);
if (info_tmp != 0) {
break;
}
}
// Compute whatever is left = batch_size - floor(batch_size / batch_limit) * batch_limit
// which concisely is equal to batch_size % batch_limit
if (batch_size % batch_limit != 0 && info_tmp == 0) {
magmaCholeskySolveBatched<scalar_t>(
uplo, n, nrhs, &A_array[mini_idx], lda, &b_array[mini_idx], lda,
info_tmp, batch_size % batch_limit, magma_queue);
}
info = info_tmp;
}
#endif
}
Tensor _cholesky_solve_helper_cuda_magma(const Tensor& self, const Tensor& A, bool upper) {
int64_t info = 0;
auto self_working_copy = cloneBatchedColumnMajor(self);
auto A_working_copy = cloneBatchedColumnMajor(A);
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "cholesky_solve_cuda", [&]{
apply_cholesky_solve<scalar_t>(self_working_copy, A_working_copy, upper, info);
});
TORCH_CHECK(info == 0, "MAGMA cholesky_solve : invalid argument: ", -info);
return self_working_copy;
}
// Todo: cusolverDn<T>potrsBatched only supports nrhs == 1 and does not have good performance.
// Batched cholesky_solve is dispatched to magma.
Tensor _cholesky_solve_helper_cuda(const Tensor& self, const Tensor& A, bool upper) {
#ifdef USE_CUSOLVER
if (batchCount(self) == 1 || !use_magma_) {
return _cholesky_solve_helper_cuda_cusolver(self, A, upper);
} else {
return _cholesky_solve_helper_cuda_magma(self, A, upper);
}
#else
return _cholesky_solve_helper_cuda_magma(self, A, upper);
#endif
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ cholesky ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_cholesky(const Tensor& self, bool upper, const Tensor& info) {
#ifndef USE_MAGMA
TORCH_CHECK(
false,
"Calling torch.linalg.cholesky on a CUDA tensor requires compiling ",
"PyTorch with MAGMA. Please use PyTorch built with MAGMA support.");
#else
magma_uplo_t uplo = upper ? MagmaUpper : MagmaLower;
auto self_data = self.data_ptr<scalar_t>();
magma_int_t n = magma_int_cast(self.size(-2), "self.size(-2)");
auto lda = std::max<magma_int_t>(1, n);
if (self.dim() == 2) {
// magmaCholesky requires info to be on CPU
magma_int_t info_cpu = 0;
magmaCholesky<scalar_t>(uplo, n, self_data, lda, &info_cpu);
info.fill_(info_cpu);
} else {
TORCH_INTERNAL_ASSERT(info.is_cuda());
auto info_data = info.data_ptr<magma_int_t>();
// magmaCholeskyBatched supports only upper=false
uplo = MagmaLower;
auto self_mat_stride = matrixStride(self);
magma_int_t batch_size = magma_int_cast(batchCount(self), "batchCount");
scalar_t** self_array;
ALLOCATE_ARRAY(self_array, scalar_t*, batch_size);
// Set up the created arrays
for (int64_t i = 0; i < batch_size; i++) {
self_array[i] = &self_data[i * self_mat_stride];
}
MAGMAQueue magma_queue(self.get_device());
// Compute as many batches of 262140 possible
// 262140 is the size of the largest batch of matrices that can be run with
// violating maximum kernel configuration
// For complex input the batch limit is 65535 (determined experimentally, see https://github.com/pytorch/pytorch/pull/47047#discussion_r516086923 for more information)
int64_t batch_limit = self.is_complex() ? 65535 : 262140;
for (int64_t mini_idx = 0; mini_idx < batch_size; mini_idx += batch_limit) {
int64_t nbatches = std::min(batch_limit, batch_size - mini_idx);
scalar_t** self_array_cur = &self_array[mini_idx];
magma_int_t* info_array_cur = &info_data[mini_idx];
magmaCholeskyBatched<scalar_t>(
uplo, n, self_array_cur, lda, info_array_cur, nbatches, magma_queue);
}
}
#endif
}
void cholesky_helper_magma(const Tensor& input, bool upper, const Tensor& info) {
Tensor result = input;
if (input.dim() > 2) {
// MAGMA's batched cholesky operator has an off-by-one error causing IMA
// (see https://github.com/pytorch/pytorch/issues/42666). This code is based
// on the #cloneBatchedColumnMajor function however it pads the input with
// one extra element utilizing the fact that the resize_as_ method preserves
// the storage even if it's larger than the new sizes. This way if MAGMA
// reads off bounds it will still be valid user memory.
result = at::empty(input.numel() + 1, input.options());
result.resize_as_(input).transpose_(-2, -1);
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(result.transpose(-2, -1).is_contiguous());
// batched MAGMA doesn't support upper=true
// we transpose and conjugate the input as a workaround
result.copy_(upper ? input.conj().transpose(-2, -1) : input);
}
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(
input.scalar_type(), "cholesky_cuda", [&] {
apply_cholesky<scalar_t>(result, upper, info);
});
if (input.dim() > 2) {
// if upper=true we need to tranpose and conjugate the result tensor
// because the cholesky decomposition is stored in the lower triangular part
if (upper) {
input.copy_(result.conj().transpose(-2, -1));
} else {
input.copy_(result);
}
}
}
// Todo: cusolverDnXpotrfBatched has some numerical issue and is not used
// here. Batched cholesky is dispatched to magma.
// We will switch to cusolverDnXpotrfBatched after the issue is fixed.
// See https://github.com/pytorch/pytorch/issues/53879.
static void cholesky_kernel(const Tensor& input, const Tensor& info, bool upper) {
#ifdef USE_CUSOLVER
if (batchCount(input) == 1 || !use_magma_) {
cholesky_helper_cusolver(input, upper, info);
} else {
cholesky_helper_magma(input, upper, info);
}
#else
cholesky_helper_magma(input, upper, info);
#endif // USE_CUSOLVER
}
REGISTER_DISPATCH(cholesky_stub, &cholesky_kernel)
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ cholesky_inverse ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
/*
Computes the inverse of a symmetric (Hermitian) positive-definite matrix n-by-n matrix 'input' using the Cholesky solver
This is an in-place routine, content of 'input' is overwritten.
'infos' is an int Tensor containing error codes for each matrix in the batched input.
MAGMA requires 'infos' to reside in CPU memory.
For more information see MAGMA's documentation for POTRS routine.
*/
template <typename scalar_t>
static void apply_cholesky_inverse(Tensor& input, Tensor& infos, bool upper) {
#ifndef USE_MAGMA
TORCH_CHECK(false, "cholesky_inverse: MAGMA library not found in compilation. Please rebuild with MAGMA.");
#else
// magmaCholeskyInverse (magma_dpotri_gpu) is slow because internally
// it transfers data several times between GPU and CPU and calls lapack routine on CPU
// using magmaCholeskySolveBatched is a lot faster
// note that magmaCholeskySolve is also slow
// 'input' is modified in-place we need to clone it and replace with a diagonal matrix
// for apply_cholesky_solve
auto input_working_copy = cloneBatchedColumnMajor(input);
// 'input' tensor has to be a batch of diagonal matrix
input.fill_(0);
input.diagonal(/*offset=*/0, /*dim1=*/-2, /*dim2=*/-1).fill_(1);
Tensor result_u, input_u;
if (input.dim() == 2) {
// unsqueezing here so that the batched version is used
result_u = input.unsqueeze(0);
input_u = input_working_copy.unsqueeze(0);
} else {
result_u = input;
input_u = input_working_copy;
}
// magma's potrs_batched doesn't take matrix-wise array of ints as an 'info' argument
// it returns a single 'magma_int_t'
// if info = 0 the operation is successful, if info = -i, the i-th parameter had an illegal value.
int64_t info_tmp = 0;
apply_cholesky_solve<scalar_t>(result_u, input_u, upper, info_tmp);
infos.fill_(info_tmp);
#endif
}
// This is a type dispatching helper function for 'apply_cholesky_inverse'
Tensor& cholesky_inverse_kernel_impl_magma(Tensor &result, Tensor& infos, bool upper) {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(result.scalar_type(), "cholesky_inverse_out_cuda", [&]{
apply_cholesky_inverse<scalar_t>(result, infos, upper);
});
return result;
}
Tensor& cholesky_inverse_kernel_impl(Tensor &result, Tensor& infos, bool upper) {
// This function calculates the inverse matrix in-place
// result should be in column major order and contain matrices to invert
// the content of result is overwritten by 'apply_cholesky_inverse'
#ifdef USE_CUSOLVER
if (batchCount(result) == 1 || !use_magma_) {
return cholesky_inverse_kernel_impl_cusolver(result, infos, upper);
} else {
return cholesky_inverse_kernel_impl_magma(result, infos, upper);
}
#else
return cholesky_inverse_kernel_impl_magma(result, infos, upper);
#endif
}
REGISTER_DISPATCH(cholesky_inverse_stub, &cholesky_inverse_kernel_impl);
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ lu ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
/*
Computes the LU decomposition of a m×n matrix or batch of matrices in 'input' tensor.
This is an in-place routine, content of 'input', 'pivots', and 'infos' is overwritten.
This is a "looped" variant for calling single input MAGMA function on batched input.
Args:
* `input` - [in] the input matrix for LU decomposition
[out] the LU decomposition
* `pivots` - [out] the pivot indices
* `infos` - [out] error codes, positive values indicate singular matrices
* `compute_pivots` - controls whether LU is computed with or without pivoting
For further details, please see the MAGMA documentation for magma_dgetrf_gpu.
*/
template <typename scalar_t>
static void apply_lu_looped_magma(const Tensor& input, const Tensor& pivots, const Tensor& infos, bool compute_pivots) {
#ifndef USE_MAGMA
TORCH_CHECK(
false,
"Calling torch.lu on a CUDA tensor requires compiling ",
"PyTorch with MAGMA. lease rebuild with MAGMA.");
#else
// magmaLu and magmaLuNoPiv require infos and pivots tensor to be on CPU
// the data is later copied back to the appropriate output tensor
Tensor infos_cpu = at::empty_like(infos, infos.options().device(kCPU).pinned_memory(true));
auto input_data = input.data_ptr<scalar_t>();
auto infos_data = infos_cpu.data_ptr<magma_int_t>();
auto input_matrix_stride = matrixStride(input);
auto pivots_stride = pivots.size(-1);
auto batch_size = batchCount(input);
magma_int_t m = magma_int_cast(input.size(-2), "m");
magma_int_t n = magma_int_cast(input.size(-1), "n");
auto leading_dimension = std::max<magma_int_t>(1, m);
if (compute_pivots) {
Tensor pivots_cpu = at::empty_like(pivots, pivots.options().device(kCPU).pinned_memory(true));
auto pivots_data = pivots_cpu.data_ptr<magma_int_t>();
for (decltype(batch_size) i = 0; i < batch_size; i++) {
scalar_t* input_working_ptr = &input_data[i * input_matrix_stride];
int* pivots_working_ptr = &pivots_data[i * pivots_stride];
int* infos_working_ptr = &infos_data[i];
magmaLu<scalar_t>(m, n, input_working_ptr, leading_dimension, pivots_working_ptr, infos_working_ptr);
}
pivots.copy_(pivots_cpu, /*non_blocking=*/true);
} else {
for (decltype(batch_size) i = 0; i < batch_size; i++) {
scalar_t* input_working_ptr = &input_data[i * input_matrix_stride];
int* infos_working_ptr = &infos_data[i];
magmaLuNoPiv<scalar_t>(m, n, input_working_ptr, leading_dimension, infos_working_ptr);
}
// fill the pivots tensor with indices using 1-based (Fortran) indexing
auto k = std::min(m, n);
Tensor pivots_tmp = at::arange(1, k + 1, input.options().dtype(at::kInt)).expand_as(pivots);
pivots.copy_(pivots_tmp);
}
infos.copy_(infos_cpu, /*non_blocking=*/true);
#endif
}
/*
Computes the LU decomposition of a m×n matrix or batch of matrices in 'input' tensor.
This is an in-place routine, content of 'input', 'pivots', and 'infos' is overwritten.
This is a specialized batched variant, it is expected to be faster than the "looped" version only for small inputs.
Args:
* `input` - [in] the input matrix for LU decomposition
[out] the LU decomposition
* `pivots` - [out] the pivot indices
* `infos` - [out] error codes, positive values indicate singular matrices
* `compute_pivots` - controls whether LU is computed with or without pivoting
For further details, please see the MAGMA documentation for magma_dgetrf_batched.
*/
template <typename scalar_t>
static void apply_lu_batched_magma(const Tensor& input, const Tensor& pivots, const Tensor& infos, bool compute_pivots) {
#ifndef USE_MAGMA
TORCH_CHECK(
false,
"Calling torch.lu on a CUDA tensor requires compiling ",
"PyTorch with MAGMA. lease rebuild with MAGMA.");
#else
auto input_data = input.data_ptr<scalar_t>();
auto infos_data = infos.data_ptr<magma_int_t>();
auto input_matrix_stride = matrixStride(input);
magma_int_t batch_size = magma_int_cast(batchCount(input), "batchCount");
// magmaLuBatched doesn't work with zero batch dimensions
// it gives CUDA error: invalid configuration argument
if (batch_size == 0) {
infos.fill_(0);
return;
}
magma_int_t m = magma_int_cast(input.size(-2), "m");
magma_int_t n = magma_int_cast(input.size(-1), "n");
auto leading_dimension = std::max<magma_int_t>(1, m);
scalar_t** input_array;
ALLOCATE_ARRAY(input_array, scalar_t*, batch_size);
// Set up array of pointers to matrices
for (int64_t i = 0; i < batch_size; i++) {
input_array[i] = &input_data[i * input_matrix_stride];
}
MAGMAQueue magma_queue(input.get_device());
if (compute_pivots) {
auto pivots_data = pivots.data_ptr<magma_int_t>();
auto pivots_stride = pivots.size(-1);
magma_int_t** pivots_array;
ALLOCATE_ARRAY(pivots_array, magma_int_t*, batch_size);
for (int64_t i = 0; i < batch_size; i++) {
pivots_array[i] = &pivots_data[i * pivots_stride];
}
magmaLuBatched<scalar_t>(m, n, input_array, leading_dimension, pivots_array, infos_data, batch_size, magma_queue);
} else {
magmaLuNoPivBatched<scalar_t>(m, n, input_array, leading_dimension, infos_data, batch_size, magma_queue);
// fill the pivots tensor with indices using 1-based (Fortran) indexing
auto k = std::min(m, n);
Tensor pivots_tmp = at::arange(1, k + 1, input.options().dtype(at::kInt)).expand_as(pivots);
pivots.copy_(pivots_tmp);
}
#endif
}
static void lu_magma(const Tensor& input, const Tensor& pivots, const Tensor& infos, bool compute_pivots) {
// TODO: compare performance and use the best performing option based on input's sizes
if (input.dim() == 2) {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(input.scalar_type(), "lu_magma", [&]{
apply_lu_looped_magma<scalar_t>(input, pivots, infos, compute_pivots);
});
} else {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(input.scalar_type(), "lu_magma", [&]{
apply_lu_batched_magma<scalar_t>(input, pivots, infos, compute_pivots);
});
}
}
REGISTER_DISPATCH(lu_stub, &lu_magma);
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ triangular_solve ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_triangular_solve_batched(Tensor& A, Tensor& b, bool upper, bool transpose, bool conjugate_transpose, bool unitriangular) {
#ifndef USE_MAGMA
AT_ERROR("triangular_solve: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
magma_uplo_t uplo = upper ? MagmaUpper : MagmaLower;
magma_trans_t trans = transpose ? MagmaTrans : MagmaNoTrans;
trans = conjugate_transpose ? MagmaConjTrans : trans;
magma_diag_t diag = unitriangular ? MagmaUnit : MagmaNonUnit;
auto A_data = A.data_ptr<scalar_t>();
auto b_data = b.data_ptr<scalar_t>();
magma_int_t m = magma_int_cast(A.size(-2), "A.size(-2)");
magma_int_t n = magma_int_cast(A.size(-1), "A.size(-1)");
magma_int_t nrhs = magma_int_cast(b.size(-1), "b.size(-1)");
// magma returns early if m <= 0 || n <= 0 for magmaTriangularSolveBatched
// magmaTriangularSolve is calling cuBLAS and it prints
// ** On entry to DTRSM parameter number 9 had an illegal value
// so let's use proper lda parameter here
magma_int_t lda = std::max<magma_int_t>(1, m);
magma_int_t batch_size = magma_int_cast(batchCount(A), "batchCount");
auto A_mat_stride = matrixStride(A);
auto b_mat_stride = matrixStride(b);
scalar_t** A_array;
scalar_t** b_array;
ALLOCATE_ARRAY(A_array, scalar_t*, batch_size);
ALLOCATE_ARRAY(b_array, scalar_t*, batch_size);
// Set up the created arrays
for (int64_t i = 0; i < batch_size; i++) {
A_array[i] = &A_data[i * A_mat_stride];
b_array[i] = &b_data[i * b_mat_stride];
}
MAGMAQueue magma_queue(b.get_device());
constexpr int64_t batch_limit = 65535;
// Compute as many batches of 65535 possible
// The number of "mini"-batches are floor(batch_size / batch_limit)
// and these cover floor(batch_size / batch_limit) * batch_limit matrix solves
int64_t mini_batches = batch_size / batch_limit;
int64_t mini_idx; // this is outside the loop because it is used for the case batch_size % batch_limit != 0
for (mini_idx = 0; mini_idx < mini_batches * batch_limit; mini_idx += batch_limit) {
scalar_t** A_array_cur = &A_array[mini_idx];
scalar_t** b_array_cur = &b_array[mini_idx];
magmaTriangularSolveBatched<scalar_t>(
uplo, trans, diag, n, nrhs, A_array_cur,
lda, b_array_cur, lda, batch_limit, magma_queue);
}
// Compute whatever is left = batch_size - floor(batch_size / batch_limit) * batch_limit
// which concisely is equal to batch_size % batch_limit
if (batch_size % batch_limit != 0) {
magmaTriangularSolveBatched<scalar_t>(
uplo, trans, diag, n, nrhs, &A_array[mini_idx],
lda, &b_array[mini_idx], lda, batch_size % batch_limit, magma_queue);
}
#endif
}
void triangular_solve_batched_magma(Tensor& A, Tensor& B, Tensor& infos, bool upper, bool transpose, bool conjugate_transpose, bool unitriangular) {
(void)infos; // unused
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(A.scalar_type(), "triangular_solve_cuda", [&]{
apply_triangular_solve_batched<scalar_t>(A, B, upper, transpose, conjugate_transpose, unitriangular);
});
}
void triangular_solve_kernel(Tensor& A, Tensor& B, Tensor& infos, bool upper, bool transpose, bool conjugate_transpose, bool unitriangular) {
// For batches smaller than 8 and matrix sizes larger than 64x64 cuBLAS forloop is faster than batched version
if (batchCount(A) <= 8 && A.size(-1) >= 64) {
triangular_solve_cublas(A, B, infos, upper, transpose, conjugate_transpose, unitriangular);
} else {
#ifndef USE_MAGMA
triangular_solve_batched_cublas(A, B, infos, upper, transpose, conjugate_transpose, unitriangular);
#else
// cuBLAS batched is faster than MAGMA batched up until 512x512, after that MAGMA is faster
if (A.size(-1) <= 512) {
triangular_solve_batched_cublas(A, B, infos, upper, transpose, conjugate_transpose, unitriangular);
} else {
triangular_solve_batched_magma(A, B, infos, upper, transpose, conjugate_transpose, unitriangular);
}
#endif // USE_MAGMA
}
}
REGISTER_DISPATCH(triangular_solve_stub, &triangular_solve_kernel);
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ orgqr ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Tensor& orgqr_kernel_impl(Tensor& result, const Tensor& tau) {
// TODO: It is possible to implement efficient batched orgqr for small tau (tau.size(-1) <= 32)
// using MAGMA, however it fails on Windows because of some illegal memory reads inside MAGMA.
// See discussions in https://github.com/pytorch/pytorch/pull/51348 for comparison of cuSOLVER-MAGMA
// and Windows failure.
// For reference here is the MAGMA-based implementation: https://gist.github.com/IvanYashchuk/2db50002c9d3c1462ff769e6410ad983
#if defined(USE_CUSOLVER)
return orgqr_helper_cusolver(result, tau); // cusolver
#else
TORCH_CHECK(false, "Calling torch.orgqr on a CUDA tensor requires compiling ",
"PyTorch with cuSOLVER. Please use PyTorch built with cuSOLVER support.");
#endif
}
REGISTER_DISPATCH(orgqr_stub, &orgqr_kernel_impl);
void ormqr_kernel(const Tensor& input, const Tensor& tau, const Tensor& other, bool left, bool transpose) {
#if defined(USE_CUSOLVER)
ormqr_cusolver(input, tau, other, left, transpose);
#else
TORCH_CHECK(false,
"Calling torch.ormqr on a CUDA tensor requires compiling ",
"PyTorch with cuSOLVER. Please use PyTorch built with cuSOLVER support.");
#endif
}
REGISTER_DISPATCH(ormqr_stub, &ormqr_kernel);
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ qr ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_geqrf(const Tensor& input, const Tensor& tau) {
#ifndef USE_MAGMA
TORCH_CHECK(
false,
"Calling torch.geqrf on a CUDA tensor requires compiling ",
"PyTorch with MAGMA. Please use PyTorch built with MAGMA support.");
#else
magma_int_t m = magma_int_cast(input.size(-2), "m");
magma_int_t n = magma_int_cast(input.size(-1), "n");
auto input_data = input.data_ptr<scalar_t>();
auto input_matrix_stride = matrixStride(input);
auto tau_stride = tau.size(-1);
auto batch_size = batchCount(input);
auto lda = std::max<int>(1, m);
// magmaGeqrf uses a hybrid CPU-GPU algorithm to compute the elementary reflectors.
// The driver routine geqrf2_gpu accepts a tensor on the CPU for elementary reflectors.
Tensor tau_cpu = at::empty(tau.sizes(), tau.options().device(at::kCPU).pinned_memory(true));
scalar_t* tau_data = tau_cpu.data_ptr<scalar_t>();
scalar_t* work_data = nullptr; // workspace is not needed for geqrf2_gpu
magma_int_t info = 0;
for (int64_t i = 0; i < batch_size; i++) {
scalar_t* input_working_ptr = &input_data[i * input_matrix_stride];
scalar_t* tau_working_ptr = &tau_data[i * tau_stride];
// now compute the actual QR and tau
// MAGMA's geqrf2_gpu function is used, this version has LAPACK-complaint arguments.
magmaGeqrf<scalar_t>(m, n, input_working_ptr, lda, tau_working_ptr, work_data, &info, /*is_v2=*/true);
checkMagmaInternalError(info, "geqrf");
}
tau.copy_(tau_cpu, /*non_blocking=*/true);
#endif
}
// This is a type dispatching helper function for 'apply_geqrf'
void geqrf_magma(const Tensor& input, const Tensor& tau) {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(input.scalar_type(), "geqrf_magma", [&]{
apply_geqrf<scalar_t>(input, tau);
});
}
// This is a backend library dispatching helper function for calling looped batch implementation
void geqrf_looped(const Tensor& input, const Tensor& tau) {
#if defined(USE_CUSOLVER)
return geqrf_cusolver(input, tau);
#else
return geqrf_magma(input, tau);
#endif
}
// This is a backend library dispatching helper function for calling specialized batched implementation
void geqrf_batched(const Tensor& input, const Tensor& tau) {
#ifdef CUDART_VERSION
// if cuBLAS is available
return geqrf_batched_cublas(input, tau);
#else
// TODO: implement MAGMA-based path using magma_zgeqrf_expert_batched
return geqrf_looped(input, tau);
#endif
}
void geqrf_kernel(const Tensor& input, const Tensor& tau) {
// if number of rows is smaller than 32 batched is always faster for batch size > 1
// for larger number of rows number of batches condition
if (input.size(-2) <= 256 && batchCount(input) >= std::max<int64_t>(2, input.size(-2) / 16)) {
return geqrf_batched(input, tau);
} else {
return geqrf_looped(input, tau);
}
}
REGISTER_DISPATCH(geqrf_stub, &geqrf_kernel);
template <typename scalar_t>
static void apply_qr(Tensor& Q, Tensor& R, int64_t q_size_minus_2, int64_t r_size_minus_1, int64_t n_columns,
bool compute_q) {
#ifndef USE_MAGMA
AT_ERROR("qr: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
magma_int_t m = magma_int_cast(q_size_minus_2, "Q.size(-2)");
magma_int_t n = magma_int_cast(r_size_minus_1, "R.size(-1)");
auto r_data = R.data_ptr<scalar_t>();
auto r_matrix_stride = matrixStride(R);
magma_int_t k = m < n ? m : n;
magma_int_t nb = magmaGeqrfOptimalBlocksize<scalar_t>(m, n);
int64_t batch_size = batchCount(R);
// magmaGeqrf uses a hybrid CPU-GPU algorithm to compute the elementary reflectors.
// The driver routine magma_(d/s)geqrf2_gpu accepts a tensor on the CPU for elementary reflectors.
Tensor tau = at::empty({k}, Q.options().device(at::kCPU));
Tensor work = at::empty({(2 * k + magma_roundup(n, 32)) * nb}, R.options());
scalar_t* tau_data = tau.data_ptr<scalar_t>();
scalar_t* work_data = work.data_ptr<scalar_t>();
// This phase computes R (the raw version)
// This uses MAGMA's ?geqrf2_gpu function
magma_int_t info = 0;
for (int64_t i = 0; i < batch_size; i++) {
scalar_t* r_working_ptr = &r_data[i * r_matrix_stride];
magmaGeqrf<scalar_t>(m, n, r_working_ptr, m, tau_data, work_data, &info, /*is_v2=*/true);
checkMagmaInternalError(info, "geqrf");
}
if (!compute_q) {
// this is for mode='r'
return;
}
// This phase computes Q (the raw version)
// We require to perform ?geqrf_gpu again due to this bug in MAGMA:
// - ?geqrf_gpu allows fast computation of Q via ?orgqr_gpu, but doesn't give R properly.
// - ?geqrf2_gpu gives correct R, but doesn't allow computation of Q via ?orgqr_gpu
// Refer to the below link for more details:
// http://icl.cs.utk.edu/magma/forum/viewtopic.php?f=2&t=1015&p=2800&hilit=geqrf_gpu#p2800
auto q_data = Q.data_ptr<scalar_t>();
auto q_matrix_stride = matrixStride(Q);
for (int64_t i = 0; i < batch_size; i++) {
scalar_t* q_working_ptr = &q_data[i * q_matrix_stride];
magmaGeqrf<scalar_t>(m, n, q_working_ptr, m, tau_data, work_data, &info, /*is_v2=*/false);
checkMagmaInternalError(info, "geqrf");
magmaOrgqr<scalar_t>(m, n_columns, k, q_working_ptr, m, tau_data, work_data, nb, &info);
checkMagmaInternalError(info, "orgqr");
}
#endif
}
std::tuple<Tensor, Tensor> linalg_qr_helper_magma(const Tensor& self, std::string mode) {
bool compute_q, reduced;
std::tie(compute_q, reduced) = _parse_qr_mode(mode);
// Setup input geometry and inputs for apply_qr
std::vector<int64_t> q_sizes, q_strides;
int64_t n_columns_q;
std::tie(q_sizes, q_strides, n_columns_q) = _compute_geometry_for_Q(self, reduced);
Tensor q_working_copy, r_working_copy;
// If there are no elements, then we simply return a pair of tensors of required dimensions
if (self.numel() == 0) {
int64_t n = self.size(-1);
auto r_shape = self.sizes().vec();
r_shape.end()[-2] = n_columns_q;
r_shape.end()[-1] = n;
r_working_copy = at::empty(r_shape, self.options());
if (compute_q) {
auto q_shape = q_sizes;
q_shape.end()[-1] = n_columns_q;
q_working_copy = at::zeros(q_shape, self.options());
q_working_copy.diagonal(/*offset=*/0, /*dim1=*/-2, /*dim2=*/-1).fill_(1);
} else {
q_working_copy = at::empty({0}, self.options());
}
return std::make_tuple(q_working_copy, r_working_copy);
}
if (compute_q) {
q_working_copy = at::empty_strided(q_sizes, q_strides, self.options());
q_working_copy.narrow(-1, 0, self.size(-1)).copy_(self);
} else {
q_working_copy = at::empty({0}, self.options());
}
r_working_copy = cloneBatchedColumnMajor(self);
int64_t m = q_sizes[self.dim() - 2];
int64_t n = r_working_copy.size(-1);
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "qr_cuda", [&]{
apply_qr<scalar_t>(q_working_copy, r_working_copy, m, n, n_columns_q, compute_q);
});
if (compute_q) {
q_working_copy = q_working_copy.narrow(-1, 0, n_columns_q);
}
r_working_copy = r_working_copy.narrow(-2, 0, n_columns_q).triu();
return std::make_tuple(q_working_copy, r_working_copy);
}
std::tuple<Tensor, Tensor> _linalg_qr_helper_cuda(const Tensor& input, std::string mode) {
#if defined(USE_CUSOLVER)
// _linalg_qr_helper_default is a generic function that is implemented using
// geqrf_stub and orgqr_stub. It dispatches to cuSOLVER for CUDA inputs if USE_CUSOLVER is defined
return _linalg_qr_helper_default(input, mode);
#else
return linalg_qr_helper_magma(input, mode);
#endif
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ symeig ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_magma_eigh(Tensor& values, Tensor& vectors, Tensor& infos, bool upper, bool compute_eigenvectors) {
#ifndef USE_MAGMA
TORCH_CHECK(
false,
"Calling torch.linalg.eigh/eigvalsh on a CUDA tensor requires compiling ",
"PyTorch with MAGMA. Please use PyTorch built with MAGMA support.");
#else
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(values.device() == kCPU);
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(infos.device() == kCPU);
using value_t = typename c10::scalar_value_type<scalar_t>::type;
magma_uplo_t uplo = upper ? MagmaUpper : MagmaLower;
magma_vec_t jobz = compute_eigenvectors ? MagmaVec : MagmaNoVec;
magma_int_t n = magma_int_cast(vectors.size(-1), "n");
auto lda = std::max<magma_int_t>(1, n);
auto batch_size = batchCount(vectors);
auto vectors_stride = matrixStride(vectors);
auto values_stride = values.size(-1);
auto vectors_data = vectors.data_ptr<scalar_t>();
auto values_data = values.data_ptr<value_t>();
auto infos_data = infos.data_ptr<magma_int_t>();
scalar_t* wA;
ALLOCATE_ARRAY(wA, scalar_t, lda * lda);
// Run once, first to get the optimum work sizes.
// Since we deal with batches of matrices with the same dimensions, doing this outside
// the loop saves (batch_size - 1) workspace queries which would provide the same result
// and (batch_size - 1) calls to allocate and deallocate workspace using at::empty()
magma_int_t lwork = -1;
scalar_t wkopt;
magma_int_t liwork = -1;
magma_int_t iwkopt;
magma_int_t lrwork = -1;
value_t rwkopt;
magmaSyevd<scalar_t, value_t>(jobz, uplo, n, vectors_data, lda, values_data,
wA, lda, &wkopt, lwork, &rwkopt, lrwork, &iwkopt, liwork, infos_data);
scalar_t* work;
magma_int_t* iwork;
lwork = magma_int_cast(std::max<int64_t>(1, real_impl<scalar_t, value_t>(wkopt)), "work_size");
liwork = magma_int_cast(std::max<int64_t>(1, iwkopt), "iwork_size");
ALLOCATE_ARRAY(work, scalar_t, lwork);
ALLOCATE_ARRAY(iwork, magma_int_t, liwork);
value_t* rwork = nullptr;
c10::Storage storage_rwork;
if (vectors.is_complex()) {
lrwork = magma_int_cast(std::max<int64_t>(1, rwkopt), "rwork_size");
storage_rwork = pin_memory<value_t>(lrwork);
rwork = static_cast<value_t*>(storage_rwork.data());
}
for (decltype(batch_size) i = 0; i < batch_size; i++) {
scalar_t* vectors_working_ptr = &vectors_data[i * vectors_stride];
value_t* values_working_ptr = &values_data[i * values_stride];
magma_int_t* info_working_ptr = &infos_data[i];
magmaSyevd<scalar_t, value_t>(jobz, uplo, n, vectors_working_ptr, lda, values_working_ptr,
wA, lda, work, lwork, rwork, lrwork, iwork, liwork, info_working_ptr);
// The current behaviour for Linear Algebra functions to raise an error if something goes wrong
// or input doesn't satisfy some requirement
// therefore return early since further computations will be wasted anyway
if (*info_working_ptr != 0) {
return;
}
}
#endif
}
std::tuple<Tensor, Tensor> _symeig_helper_cuda(const Tensor& self, bool eigenvectors, bool upper) {
Tensor infos = at::zeros({std::max<int64_t>(1, batchCount(self))}, self.options().dtype(kInt).device(at::kCPU));
auto eigvals_shape = IntArrayRef(self.sizes().data(), self.dim()-1); // self.shape[:-1]
ScalarType real_dtype = toValueType(self.scalar_type());
// magmaSyevd uses a hybrid CPU-GPU algorithm to compute the eigenvalues and eigenvectors.
// The driver routine magma_(d/s)syev_gpu accepts a tensor on the CPU for eigvalenvalues.
// The data is later moved to the appropriate device.
// In the case where self.numel() == 0, we just return an empty tensor of
// dimensions on the CUDA (to avoid the unnecessary "to(at::kCUDA)")
auto eigvals_working_copy = self.numel() == 0
? at::empty(eigvals_shape, self.options().dtype(real_dtype))
: at::empty(eigvals_shape, self.options().dtype(real_dtype).device(at::kCPU));
if (self.numel() == 0) {
return std::tuple<Tensor, Tensor>(eigvals_working_copy, at::empty_like(self, LEGACY_CONTIGUOUS_MEMORY_FORMAT));
}
auto self_working_copy = cloneBatchedColumnMajor(self);
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "symeig_cuda", [&]{
apply_magma_eigh<scalar_t>(eigvals_working_copy, self_working_copy, infos, upper, eigenvectors);
});
if (self.dim() > 2) {
batchCheckErrors(infos, "symeig_cuda");
} else {
singleCheckErrors(infos.item().toInt(), "symeig_cuda");
}
if (eigenvectors) {
return std::tuple<Tensor, Tensor>(eigvals_working_copy.to(self.device()), self_working_copy);
} else {
return std::tuple<Tensor, Tensor>(eigvals_working_copy.to(self.device()), at::empty({0}, self.options()));
}
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ linalg_eigh ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// This is a type dispatch function for 'apply_magma_eigh'
// For small inputs result is computed on CPU
void linalg_eigh_magma(Tensor& eigenvalues, Tensor& eigenvectors, Tensor& infos, bool upper, bool compute_eigenvectors) {
// MAGMA just calls LAPACK for eigenvectors.size(-1) <= 128
// See https://bitbucket.org/icl/magma/src/e6fdca447bd402693e8b0b950a898b6879bbcc41/src/zheevd_gpu.cpp?at=master#lines-258
// in addition lda is ignored breaking 0x0 inputs
if (eigenvectors.size(-1) > 128) {
// MAGMA requires eigenvalues and infos tensors to reside on CPU
Tensor eigenvalues_cpu = eigenvalues.to(kCPU);
infos = infos.to(kCPU);
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(
eigenvectors.scalar_type(), "linalg_eigh_cpu", [&] {
apply_magma_eigh<scalar_t>(
eigenvalues_cpu, eigenvectors, infos, upper, compute_eigenvectors);
});
// Transfer computed by MAGMA results from CPU to GPU
eigenvalues.copy_(eigenvalues_cpu);
} else { // eigenvectors.size(-1) <= 128
// transfer to CPU, compute the result and copy back to GPU
// this is faster than going through MAGMA that does the same
Tensor eigenvalues_cpu = at::empty_like(eigenvalues, eigenvalues.options().device(kCPU));
if (compute_eigenvectors) {
Tensor eigenvectors_cpu = at::empty_like(eigenvectors, eigenvectors.options().device(kCPU));
at::linalg_eigh_out(eigenvalues_cpu, eigenvectors_cpu, eigenvectors.to(kCPU), upper ? "U" : "L");
eigenvectors.copy_(eigenvectors_cpu);
} else {
at::linalg_eigvalsh_out(eigenvalues_cpu, eigenvectors.to(kCPU), upper ? "U" : "L");
}
eigenvalues.copy_(eigenvalues_cpu);
}
}
void linalg_eigh_kernel(Tensor& eigenvalues, Tensor& eigenvectors, Tensor& infos, bool upper, bool compute_eigenvectors) {
#if defined(USE_CUSOLVER)
linalg_eigh_cusolver(eigenvalues, eigenvectors, infos, upper, compute_eigenvectors);
#else
linalg_eigh_magma(eigenvalues, eigenvectors, infos, upper, compute_eigenvectors);
#endif
}
REGISTER_DISPATCH(linalg_eigh_stub, &linalg_eigh_kernel);
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ eig ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// magmaEig uses a hybrid CPU-GPU algorithm, which takes and return CPU
// memory. So, we accept a GPU tensor, copy it to CPU memory, and later copy
// the returned values from CPU to GPU. See also magmaSymeig, which uses a
// similar approach.
template <typename scalar_t>
static void apply_eig(const Tensor& self, bool eigenvectors, Tensor& out_eigvals, Tensor& out_eigvecs,
int64_t *info_ptr) {
#ifndef USE_MAGMA
TORCH_CHECK(false, "Calling torch.eig on a CUDA tensor requires compiling PyTorch with MAGMA. "
"Either transfer the tensor to the CPU before calling torch.eig or recompile with MAGMA.");
#else
TORCH_INTERNAL_ASSERT(self.device() == at::kCPU, "Internal error: apply_eig needs a CPU tensor");
using value_t = typename c10::scalar_value_type<scalar_t>::type;
magma_vec_t jobvr = eigenvectors ? MagmaVec : MagmaNoVec;
magma_int_t n = magma_int_cast(self.size(-1), "n");
auto self_data = self.data_ptr<scalar_t>();
auto out_eigvals_data = out_eigvals.data_ptr<scalar_t>();
scalar_t *wr = out_eigvals_data;
scalar_t *vr_data = NULL;
magma_int_t ldvr = 1;
if (jobvr == MagmaVec)
{
vr_data = out_eigvecs.data_ptr<scalar_t>();
ldvr = n;
}
value_t *rwork_data = nullptr;
if (isComplexType(at::typeMetaToScalarType(self.dtype()))) {
ALLOCATE_ARRAY(rwork_data, value_t, n*2);
}
if (n > 0) {
// call magmaEig once to get the optimal size of work_data
scalar_t wkopt;
magma_int_t info;
magmaEig<scalar_t, value_t>(MagmaNoVec, jobvr, n, self_data, n, wr, NULL, 1, vr_data, ldvr, &wkopt, -1, rwork_data, &info);
magma_int_t lwork = static_cast<magma_int_t>(real_impl<scalar_t, value_t>(wkopt));
// call it a 2nd time to to the actual work
scalar_t *work_data = nullptr;
ALLOCATE_ARRAY(work_data, scalar_t, lwork);
magmaEig<scalar_t, value_t>(MagmaNoVec, jobvr, n, self_data, n, wr, NULL, 1, vr_data, ldvr, work_data, lwork, rwork_data, &info);
*info_ptr = info;
}
#endif
}
/*
* Internal helper; like eig_cuda but:
* 1. assume that self is a square matrix of side "n"
* 2. return CPU tensors (because this is what magmaEig returns), which will be copied to GPU memory
* by the caller
*/
std::tuple<Tensor, Tensor> eig_kernel_impl(const Tensor& self, bool& eigenvectors) {
int64_t n = self.size(-1);
// copy self to pinned CPU memory
auto self_working_copy = at::empty_strided(
{n, n}, // square matrix
{1, n}, // column-ordered, as magmaEig expects
at::TensorOptions(at::kCPU).dtype(self.dtype()).pinned_memory(true));
self_working_copy.copy_(self);
// tensors holding the results. We use empty_strided to make them column-ordered
auto options = self.options().device(at::kCPU).memory_format(LEGACY_CONTIGUOUS_MEMORY_FORMAT);
Tensor out_eigvals;
if (isComplexType(at::typeMetaToScalarType(self.dtype()))) {
out_eigvals = at::empty({n}, options);
} else {
out_eigvals = at::empty_strided({n, 2}, {1, n}, options);
}
auto out_eigvecs = eigenvectors
? at::empty_strided({n, n}, {1, n}, options)
: Tensor();
int64_t info;
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "eig_cuda", [&]{
apply_eig<scalar_t>(self_working_copy, eigenvectors, out_eigvals, out_eigvecs, &info);
});
singleCheckErrors(info, "eig_cuda");
return std::tuple<Tensor, Tensor>(out_eigvals, out_eigvecs);
}
REGISTER_DISPATCH(eig_stub, &eig_kernel_impl);
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ linalg_eig ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
/*
Computes the eigenvalues and eigenvectors of n-by-n matrix 'input'.
This is an in-place routine, content of 'input', 'values', 'vectors' is overwritten.
'infos' is an int Tensor containing error codes for each matrix in the batched input.
For more information see MAGMA's documentation for GEEV routine.
*/
template <typename scalar_t>
void apply_linalg_eig(Tensor& values, Tensor& vectors, Tensor& input, Tensor& infos, bool compute_eigenvectors) {
#ifndef USE_MAGMA
TORCH_CHECK(false, "Calling torch.linalg.eig on a CUDA tensor requires compiling PyTorch with MAGMA. "
"Either transfer the tensor to the CPU before calling torch.linalg.eig or recompile with MAGMA.");
#else
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(input.device() == at::kCPU);
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(values.device() == at::kCPU);
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(infos.device() == at::kCPU);
if (compute_eigenvectors) {
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(vectors.device() == at::kCPU);
}
using value_t = typename c10::scalar_value_type<scalar_t>::type;
magma_vec_t jobvr = compute_eigenvectors ? MagmaVec : MagmaNoVec;
magma_vec_t jobvl = MagmaNoVec; // only right eigenvectors are computed
magma_int_t n = magma_int_cast(input.size(-1), "n");
auto lda = std::max<magma_int_t>(1, n);
auto batch_size = batchCount(input);
auto input_matrix_stride = matrixStride(input);
auto values_stride = values.size(-1);
auto input_data = input.data_ptr<scalar_t>();
auto values_data = values.data_ptr<scalar_t>();
auto infos_data = infos.data_ptr<magma_int_t>();
auto rvectors_data = compute_eigenvectors ? vectors.data_ptr<scalar_t>() : nullptr;
scalar_t* lvectors_data = nullptr; // only right eigenvectors are computed
int64_t ldvr = compute_eigenvectors ? lda : 1;
int64_t ldvl = 1;
Tensor rwork;
value_t* rwork_data = nullptr;
if (input.is_complex()) {
ScalarType real_dtype = toValueType(input.scalar_type());
rwork = at::empty({lda * 2}, input.options().dtype(real_dtype));
rwork_data = rwork.data_ptr<value_t>();
}
// call magmaEig once to get the optimal size of work_data
scalar_t work_query;
magmaEig<scalar_t, value_t>(jobvl, jobvr, n, input_data, lda, values_data,
lvectors_data, ldvl, rvectors_data, ldvr, &work_query, -1, rwork_data, &infos_data[0]);
magma_int_t lwork = std::max<magma_int_t>(1, static_cast<magma_int_t>(real_impl<scalar_t, value_t>(work_query)));
Tensor work = at::empty({lwork}, input.dtype());
auto work_data = work.data_ptr<scalar_t>();
for (auto i = decltype(batch_size){0}; i < batch_size; i++) {
scalar_t* input_working_ptr = &input_data[i * input_matrix_stride];
scalar_t* values_working_ptr = &values_data[i * values_stride];
scalar_t* rvectors_working_ptr = compute_eigenvectors ? &rvectors_data[i * input_matrix_stride] : nullptr;
int* info_working_ptr = &infos_data[i];
magmaEig<scalar_t, value_t>(jobvl, jobvr, n, input_working_ptr, lda, values_working_ptr,
lvectors_data, ldvl, rvectors_working_ptr, ldvr, work_data, lwork, rwork_data, info_working_ptr);
}
#endif
}
// This is a type dispatching helper function for 'apply_linalg_eig'
void linalg_eig_kernel(Tensor& eigenvalues, Tensor& eigenvectors, Tensor& infos, const Tensor& input, bool compute_eigenvectors) {
// This function calculates the non-symmetric eigendecomposition in-place
// tensors should be in batched column major memory format
// the content of eigenvalues, eigenvectors and infos is overwritten by 'apply_linalg_eig'
// apply_linalg_eig modifies the provided input matrix in-place, therefore we need a copy
// MAGMA doesn't have GPU interface for the eigendecomposition and it forces us to transfer 'input' to CPU
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(input.is_cuda());
Tensor input_working_copy = at::empty(input.sizes(), input.options().device(kCPU));
input_working_copy.transpose_(-2, -1); // make input_working_copy to have Fortran contiguous memory layout
input_working_copy.copy_(input);
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(input.scalar_type(), "linalg_eig_out_cuda", [&]{
apply_linalg_eig<scalar_t>(eigenvalues, eigenvectors, input_working_copy, infos, compute_eigenvectors);
});
}
REGISTER_DISPATCH(linalg_eig_stub, &linalg_eig_kernel);
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ svd ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template<typename scalar_t>
static void apply_svd(Tensor& self, Tensor& U, Tensor& S, Tensor& VT,
char jobchar, std::vector<int64_t>& infos) {
#ifndef USE_MAGMA
AT_ERROR("svd: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
using value_t = typename c10::scalar_value_type<scalar_t>::type;
auto self_data = self.data_ptr<scalar_t>();
auto U_data = U.data_ptr<scalar_t>();
auto S_data = S.data_ptr<value_t>();
auto VT_data = VT.data_ptr<scalar_t>();
auto self_stride = matrixStride(self);
auto U_stride = matrixStride(U);
auto S_stride = S.size(-1);
auto VT_stride = matrixStride(VT);
auto batchsize = batchCount(self);
magma_vec_t jobz = jobchar == 'A' ? MagmaAllVec : (jobchar == 'S' ? MagmaSomeVec : MagmaNoVec);
magma_int_t m = magma_int_cast(self.size(-2), "m");
magma_int_t n = magma_int_cast(self.size(-1), "n");
auto lda = std::max<magma_int_t>(1, m);
auto ldvt = std::max<magma_int_t>(1, n);
auto mn = std::min(m, n);
c10::Storage storage_rwork;
value_t* rwork = nullptr;
magma_int_t* iwork;
ALLOCATE_ARRAY(iwork, magma_int_t, 8 * mn);
if (isComplexType(at::typeMetaToScalarType(self.dtype()))) {
auto lrwork = computeLRWorkDim(jobchar, m, n);
storage_rwork = pin_memory<value_t>(lrwork);
rwork = static_cast<value_t*>(storage_rwork.data());
}
magma_int_t info = 0;
// Run once, first to get the optimum work size.
// Since we deal with batches of matrices with the same dimensions, doing this outside
// the loop saves (batch_size - 1) workspace queries which would provide the same result
// and (batch_size - 1) calls to allocate and deallocate workspace using at::empty()
magma_int_t lwork = -1;
scalar_t wkopt = 1; // MAGMA might not set the value for the optimal workspace therefore use 1 as the default value
magmaSvd<scalar_t, value_t>(jobz, m, n, self_data, lda, S_data, U_data, lda, VT_data, ldvt, &wkopt, lwork, rwork, iwork, &info);
lwork = magma_int_cast(real_impl<scalar_t, value_t>(wkopt), "work_size");
scalar_t* work;
ALLOCATE_ARRAY(work, scalar_t, lwork);
for (int64_t i = 0; i < batchsize; i++) {
scalar_t* self_working_ptr = &self_data[i * self_stride];
value_t* S_working_ptr = &S_data[i * S_stride];
scalar_t* U_working_ptr = &U_data[i * U_stride];
scalar_t* VT_working_ptr = &VT_data[i * VT_stride];
// Compute S, U (optionally), VT (optionally)
magmaSvd<scalar_t, value_t>(jobz, m, n, self_working_ptr, lda,
S_working_ptr, U_working_ptr, lda, VT_working_ptr, ldvt, work, lwork, rwork, iwork, &info);
infos[i] = info;
if (info != 0) {
return;
}
}
#endif
}
std::tuple<Tensor, Tensor, Tensor> _svd_helper_cuda_legacy(const Tensor& self, bool some, bool compute_uv) {
std::vector<int64_t> infos(batchCount(self), 0);
int64_t m = self.size(-2), n = self.size(-1);
int64_t k = std::min(m, n);
char jobchar = compute_uv ? (some ? 'S' : 'A') : 'N';
Tensor U_working_copy, S_working_copy, VT_working_copy;
std::tie(U_working_copy, S_working_copy, VT_working_copy) = _create_U_S_VT(self, some, compute_uv);
// The input matrix, U, S and VT have to reside in pinned memory.
// Additionally, the input and U have to be in column major format.
// _create_U_S_VT takes care of a part of these requirements (for U, S and VT)
// For the input matrix, this requirements are being taken care of below.
// Specify strides
auto self_col_major_strides = at::detail::defaultStrides(self.sizes());
self_col_major_strides[self.dim() - 2] = 1;
self_col_major_strides[self.dim() - 1] = m;
// Create strided tensor in pinned memory
auto self_working_copy = at::empty_strided(self.sizes(), self_col_major_strides,
at::TensorOptions(at::kCPU).dtype(self.dtype()).pinned_memory(true));
self_working_copy.copy_(self);
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "svd_cuda", [&] {
apply_svd<scalar_t>(self_working_copy, U_working_copy, S_working_copy, VT_working_copy, jobchar, infos);
});
if (self.dim() > 2) {
batchCheckErrors(infos, "svd_cuda");
} else {
singleCheckErrors(infos[0], "svd_cuda");
}
U_working_copy = same_stride_to(U_working_copy, self.options());
S_working_copy = same_stride_to(S_working_copy, S_working_copy.options().device(self.device()));
VT_working_copy = same_stride_to(VT_working_copy, self.options());
if (!compute_uv) {
VT_working_copy.zero_();
U_working_copy.zero_();
}
if (some) {
VT_working_copy = VT_working_copy.narrow(-2, 0, k);
}
// so far we have computed VT, but torch.svd returns V instead. Adjust accordingly.
// Note that the 'apply_svd' routine returns VT = V^T (for real inputs) or VT = V^H (for complex inputs), not V.
VT_working_copy = VT_working_copy.conj();
VT_working_copy.transpose_(-2, -1);
return std::make_tuple(U_working_copy, S_working_copy, VT_working_copy);
}
std::tuple<Tensor, Tensor, Tensor> _svd_helper_cuda(const Tensor& self, bool some, bool compute_uv) {
#ifdef USE_CUSOLVER
return _svd_helper_cuda_lib(self, some, compute_uv);
#else
return _svd_helper_cuda_legacy(self, some, compute_uv);
#endif
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ lu_solve ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
/*
Solves the matrix equation A X = B
X and B are n-by-nrhs matrices, A is represented using the LU factorization.
This is an in-place routine, content of `b` is overwritten.
This is a "looped" variant for calling single input MAGMA function on batched input.
Args:
* `b` - [in] the right hand side matrix B
[out] the solution matrix X
* `lu` - [in] the LU factorization of matrix A (see at::_lu_with_info)
* `pivots` - [in] the pivot indices (see at::_lu_with_info)
For further details, please see the MAGMA documentation for magma_dgetrs_gpu.
*/
template <typename scalar_t>
static void apply_lu_solve_looped_magma(const Tensor& b, const Tensor& lu, const Tensor& pivots) {
#ifndef USE_MAGMA
TORCH_CHECK(
false,
"Calling torch.lu_solve on a CUDA tensor requires compiling ",
"PyTorch with MAGMA. lease rebuild with MAGMA.");
#else
auto b_data = b.data_ptr<scalar_t>();
auto lu_data = lu.data_ptr<scalar_t>();
// MAGMA requires pivots to be a CPU tensor
Tensor pivots_cpu = pivots.cpu();
auto pivots_data = pivots_cpu.data_ptr<magma_int_t>();
auto b_stride = matrixStride(b);
auto lu_stride = matrixStride(lu);
auto pivots_stride = pivots_cpu.size(-1);
auto batch_size = batchCount(b);
magma_int_t n = magma_int_cast(lu.size(-2), "n");
magma_int_t nrhs = magma_int_cast(b.size(-1), "nrhs");
auto leading_dimension = std::max<magma_int_t>(1, n);
int info = 0;
for (decltype(batch_size) i = 0; i < batch_size; i++) {
scalar_t* b_working_ptr = &b_data[i * b_stride];
scalar_t* lu_working_ptr = &lu_data[i * lu_stride];
int* pivots_working_ptr = &pivots_data[i * pivots_stride];
magmaLuSolve<scalar_t>(n, nrhs, lu_working_ptr, leading_dimension, pivots_working_ptr, b_working_ptr, leading_dimension, &info);
// info from magmaLuSolve only reports if the i-th parameter is wrong
// so we don't need to check it all the time
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(info == 0);
}
#endif
}
/*
Solves the matrix equation A X = B
X and B are n-by-nrhs matrices, A is represented using the LU factorization.
This is an in-place routine, content of `b` is overwritten.
This is a specialized batched variant, it is expected to be faster than the "looped" version only for small inputs.
Args:
* `b` - [in] the right hand side matrix B
[out] the solution matrix X
* `lu` - [in] the LU factorization of matrix A (see at::_lu_with_info)
* `pivots` - [in] the pivot indices (see at::_lu_with_info)
For further details, please see the MAGMA documentation for magma_dgetrs_batched.
*/
template <typename scalar_t>
static void apply_lu_solve_batched_magma(const Tensor& b, const Tensor& lu, const Tensor& pivots) {
#ifndef USE_MAGMA
TORCH_CHECK(
false,
"Calling torch.lu_solve on a CUDA tensor requires compiling ",
"PyTorch with MAGMA. lease rebuild with MAGMA.");
#else
auto b_data = b.data_ptr<scalar_t>();
auto lu_data = lu.data_ptr<scalar_t>();
magma_int_t n = magma_int_cast(lu.size(-2), "n");
magma_int_t nrhs = magma_int_cast(b.size(-1), "nrhs");
auto leading_dimension = std::max<magma_int_t>(1, n);
auto pivots_data = pivots.data_ptr<magma_int_t>();
auto b_stride = matrixStride(b);
auto lu_stride = matrixStride(lu);
auto pivots_stride = pivots.size(-1);
magma_int_t batch_size = magma_int_cast(batchCount(b), "batchCount");
magma_int_t** pivots_array;
scalar_t** lu_array;
scalar_t** b_array;
ALLOCATE_ARRAY(pivots_array, magma_int_t*, batch_size);
ALLOCATE_ARRAY(lu_array, scalar_t*, batch_size);
ALLOCATE_ARRAY(b_array, scalar_t*, batch_size);
for (int64_t i = 0; i < batch_size; i++) {
pivots_array[i] = &pivots_data[i * pivots_stride];
b_array[i] = &b_data[i * b_stride];
lu_array[i] = &lu_data[i * lu_stride];
}
MAGMAQueue magma_queue(b.get_device());
// Compute the result in batches of 65535
// that is the maximum allowed number for batch_size in MAGMA
constexpr int64_t batch_limit = 65535;
for (int64_t mini_idx = 0; mini_idx < batch_size; mini_idx += batch_limit) {
int64_t nbatches = std::min(batch_limit, batch_size - mini_idx);
scalar_t** lu_array_cur = &lu_array[mini_idx];
scalar_t** b_array_cur = &b_array[mini_idx];
magma_int_t** pivots_array_cur = &pivots_array[mini_idx];
int info;
magmaLuSolveBatched<scalar_t>(
n, nrhs, lu_array_cur, leading_dimension,
pivots_array_cur, b_array_cur, leading_dimension,
info, nbatches, magma_queue);
// info from magmaLuSolveBatched only reports if the i-th parameter is wrong
// so we don't need to check it all the time
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(info == 0);
}
#endif
}
static void lu_solve_magma(const Tensor& b, const Tensor& lu, const Tensor& pivots) {
// TODO: compare performance and use the best performing option based on lu's sizes
if (b.dim() == 2) {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(b.scalar_type(), "lu_solve_magma", [&]{
apply_lu_solve_looped_magma<scalar_t>(b, lu, pivots);
});
} else {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(b.scalar_type(), "lu_solve_magma", [&]{
apply_lu_solve_batched_magma<scalar_t>(b, lu, pivots);
});
}
}
REGISTER_DISPATCH(lu_solve_stub, &lu_solve_magma);
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ lstsq ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_gels(const Tensor& a, Tensor& b, Tensor& infos) {
#ifndef USE_MAGMA
TORCH_CHECK(false, "torch.linalg.lstsq: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
auto trans = MagmaNoTrans;
auto m = magma_int_cast(a.size(-2), "m");
auto n = magma_int_cast(a.size(-1), "n");
TORCH_CHECK(
m >= n,
"torch.linalg.lstsq: only overdetermined systems (input.size(-2) >= input.size(-1)) are allowed on CUDA");
auto nrhs = magma_int_cast(b.size(-1), "nrhs");
auto ldda = std::max<magma_int_t>(1, m);
auto lddb = std::max<magma_int_t>(1, std::max(m, n));
auto nb = magmaGeqrfOptimalBlocksize<scalar_t>(m, n);
auto lwork = (m - n + nb) * (nrhs + nb) + nrhs * nb;
Tensor hwork = at::empty({static_cast<int64_t>(lwork)}, a.scalar_type());
auto* hwork_ptr = hwork.data_ptr<scalar_t>();
// MAGMA requires infos tensor to live on CPU
infos = infos.to(at::kCPU);
auto infos_data = infos.data_ptr<magma_int_t>();
batch_iterator_with_broadcasting<scalar_t>(a, b,
[&](scalar_t* a_working_ptr, scalar_t* b_working_ptr,
int64_t a_linear_batch_idx) {
magma_int_t* infos_working_ptr = &infos_data[a_linear_batch_idx];
magmaGels<scalar_t>(trans, m, n, nrhs,
a_working_ptr, ldda, b_working_ptr, lddb,
hwork_ptr, lwork, infos_working_ptr);
}
);
#endif
}
void gels_magma(const Tensor& a, Tensor& b, Tensor& infos) {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(a.scalar_type(), "gels_magma", [&] {
apply_gels<scalar_t>(a, b, infos);
});
}
void linalg_lstsq_gels(const Tensor& A, const Tensor& B, const Tensor& infos) {
// The steps for using the QR decomposition for solving least squares problems
// are outlined here https://en.wikipedia.org/wiki/QR_decomposition#Using_for_solution_to_linear_inverse_problems
auto m = A.size(-2);
auto n = A.size(-1);
auto mn = std::min(m, n);
// explicitly broadcast the batch dimensions of A
// TODO: revisit this later to use batch_iterator_with_broadcasting in triangular_solve
IntArrayRef A_batch_sizes(A.sizes().data(), A.dim() - 2);
IntArrayRef B_batch_sizes(B.sizes().data(), B.dim() - 2);
std::vector<int64_t> expand_batch_portion = at::infer_size(A_batch_sizes, B_batch_sizes);
auto tau_shape = A.sizes().vec();
tau_shape.pop_back();
tau_shape.back() = mn;
Tensor tau = at::empty(tau_shape, A.options());
if (m >= n) {
// Step 1: compute QR factorization using geqrf
geqrf_kernel(A, tau);
// explicitly broadcast the batch dimensions of A
// we do it after geqrf so that we don't do redundant computations for the same input
auto A_expand_batch = expand_batch_portion;
A_expand_batch.insert(A_expand_batch.end(), {A.size(-2), A.size(-1)});
Tensor A_expanded = A.expand({A_expand_batch});
bool is_fortran_contiguous = A_expanded.transpose(-2, -1).is_contiguous();
Tensor A_broadcasted = is_fortran_contiguous ? A_expanded : cloneBatchedColumnMajor(A_expanded);
auto tau_expand_batch = expand_batch_portion;
tau_expand_batch.push_back(tau.size(-1));
Tensor tau_broadcasted = tau.expand({tau_expand_batch}).contiguous();
// Step 2: B <- Q^H B
ormqr_kernel(A_broadcasted, tau_broadcasted, B, /*left=*/true, /*transpose=*/true);
// Step 3: solve R X = B
bool upper = true;
bool transpose = false;
bool conjugate_transpose = false;
bool unitriangular = false;
triangular_solve_kernel(
const_cast<Tensor&>(A_broadcasted),
const_cast<Tensor&>(B),
const_cast<Tensor&>(infos),
upper, transpose, conjugate_transpose, unitriangular);
} else { // underdetermined case
Tensor Ah = cloneBatchedColumnMajor(A.conj().transpose(-2, -1));
// Step 1: compute QR factorization of conjugate transpose of A using geqrf
geqrf_kernel(Ah, tau);
// explicitly broadcast the batch dimensions of A
// we do it after geqrf so that we don't do redundant computations for the same input
auto A_expand_batch = expand_batch_portion;
A_expand_batch.insert(A_expand_batch.end(), {Ah.size(-2), Ah.size(-1)});
Tensor Ah_expanded = Ah.expand({A_expand_batch});
bool is_fortran_contiguous = Ah_expanded.transpose(-2, -1).is_contiguous();
Tensor Ah_broadcasted = is_fortran_contiguous ? Ah_expanded : cloneBatchedColumnMajor(Ah_expanded);
// Step 2: R^H Z = B
bool upper = true;
bool transpose = true;
bool conjugate_transpose = true;
bool unitriangular = false;
triangular_solve_kernel(
const_cast<Tensor&>(Ah_broadcasted),
const_cast<Tensor&>(B),
const_cast<Tensor&>(infos),
upper, transpose, conjugate_transpose, unitriangular);
// B matrix has the size max(m, n) x nrhs
// triangular_solve_kernel writes its output into the first m rows of B leaving the rest untouched
// we need to set the rest of the rows to zero so that the multiplication from step 3 is correct
B.narrow(-2, m, n - m).zero_();
auto tau_expand_batch = expand_batch_portion;
tau_expand_batch.push_back(tau.size(-1));
Tensor tau_broadcasted = tau.expand({tau_expand_batch}).contiguous();
// Step 3: X <- Q Z
ormqr_kernel(Ah_broadcasted, tau_broadcasted, B, /*left=*/true, /*transpose=*/false);
}
}
void gels_looped(const Tensor& a, Tensor& b, Tensor& infos) {
#if defined(USE_CUSOLVER)
// linalg_lstsq_gels is a generic function that is implemented using
// geqrf_stub, ormqr_stub, and triangular_solve_stub
// It dispatches to cuSOLVER for CUDA inputs if USE_CUSOLVER is defined
return linalg_lstsq_gels(a, b, infos);
#else
return gels_magma(a, b, infos);
#endif
}
void lstsq_kernel(const Tensor& a, Tensor& b, Tensor& /*rank*/, Tensor& /*singular_values*/, Tensor& infos, double /*rcond*/, std::string /*driver_name*/) {
auto m = a.size(-2);
auto n = a.size(-1);
// first handle the underdetermined case (m < n)
// this case is not supported by MAGMA or cuBLAS
if (m < n) {
#if defined(USE_CUSOLVER)
linalg_lstsq_gels(a, b, infos);
#else
TORCH_CHECK(
false,
"torch.linalg.lstsq: only overdetermined systems (input.size(-2) >= input.size(-1)) are allowed on CUDA. ",
"Please rebuild with cuSOLVER.");
#endif
} else { // m >= n
#ifndef USE_MAGMA
// MAGMA is not available we can either use cuBLAS or cuSOLVER here
// the batched vs looped dispatch is implemented based on the following performance results
// https://github.com/pytorch/pytorch/pull/54725#issuecomment-832234456
if (m <= 256 && batchCount(b) >= std::max<int64_t>(2, m / 16)) {
// if CUDART_VERSION is defined then cuBLAS is available
#ifdef CUDART_VERSION
gels_batched_cublas(a, b, infos);
#else
// this would either call cuSOLVER or MAGMA,
// if MAGMA is called a runtime error is thrown about not finding MAGMA in compilation
gels_looped(a, b, infos);
#endif // CUDART_VERSION
} else {
gels_looped(a, b, infos);
}
#else
// if both MAGMA and cuSOLVER are available this would call cuSOLVER
// MAGMA is called if cuSOLVER is not available
gels_looped(a, b, infos);
#endif // USE_MAGMA
}
}
REGISTER_DISPATCH(lstsq_stub, &lstsq_kernel);
}} // namespace at::native
#undef ALLOCATE_ARRAY
|
b52a566a7d6096a02b6803cbf1a58828f65f4a86.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <iostream>
// For the CUDA runtime routines (prefixed with "cuda_")
#include <hip/hip_runtime.h>
#include "vec3.h"
#include "ray.h"
#include "marchobject.h"
#include "world.h"
#define checkCudaErrors(val) check_cuda( (val), #val, __FILE__, __LINE__ )
void check_cuda(hipError_t result, char const *const func, const char *const file, int const line) {
if (result) {
std::cout << "CUDA error = " << static_cast<unsigned int>(result) << " at " <<
file << ":" << line << " '" << func << "' \n";
// Make sure we call CUDA Device Reset before exiting
hipDeviceReset();
exit(99);
}
}
/**
* CUDA Kernel Device code
*
* Computes the vector addition of A and B into C. The 3 vectors have the same
* number of elements numElements.
*/
__global__ void
vectorAdd(const float *A, const float *B, float *C, int numElements)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < numElements)
{
C[i] = A[i] + B[i];
}
}
/**
* Host main routine
*/
//__device__ bool raymarchSingle(ray& r, int ignore, int pass, int cnt, World* world, marchobject** objects, int* culled, int tid)//, hitable **world) {
__device__ bool raymarchSingle(ray& r, int ignore, int pass, int cnt, World* world, marchobject** objects)//, hitable **world) {
{
//marchobject::Init();
vec3 isp;
float shadow = 1;
float t = 1;
int winner = -1;
int culled[24];
// ray culledr[16];
int len = 0;
vec3 isp1, isp2;
float t1, t2;
for (int i=0;i<world->length;i++) {
if (len>=24) break;
if (i!=ignore)
if (r.IntersectSphere(objects[i]->pos*-1,vec3(1,1,1)*objects[i]->bbRadius,isp1,isp2,t1,t2)) {
culled[len] = i;
len++;
}
}
ray rwinner;
r.curPos = r.org;
// world->length=7;
for (int i=0;i<cnt;i++) {
float precis = 0.004*t;
float keep=1000.0;
//ray.m_curStep =t; //(ray.m_origin-m_objects[j]->m_position).length();
//ray.setCurrent(t);
r.curPos = r.point_at_parameter(t);
// r.point_at_parameter(t);
int w= -1;
// for (int j=0;j<world->length;j++) {//marchobject* ro: culled) {
for (int l=0;l<len;l++) {//marchobject* ro: culled) {
int j = culled[l];
ray rotr = r.Rotate(objects[j]->rotMat,objects[j]->pos);
float keep2 = objects[j]->intersect(rotr);
if (keep2<keep) {
keep = keep2;
w = j;
}
if (keep2<precis) {
winner = w;
i=cnt;
if (pass==2) {
return true;
}
rwinner = rotr;
break;
}
}
t=t+keep;
}
if (winner!=-1) {
//Ray rotated = winner->m_localRay[tid];//ray.Rotate(winner->m_rotmat, winner->m_position);
ray rotated = rwinner;
//ray.m_currentPos = isp;
// exit(1);
isp = rotated.curPos;
r.intensity = vec3(1,0,0);
vec3 normal = objects[winner]->CalcMarchNormal(rotated.curPos);
normal = objects[winner]->rotMatInv*normal;
vec3 tt(1,2,-213.123);
vec3 tangent =vec3::cross(tt,normal).normalized();
vec3 bi = vec3::cross(tangent,normal).normalized();
// normal = objects[winner]->GetPerturbedNormal(isp,normal,tangent);
// ray.m_reflect = 0;
vec3 reflectionDir = r.dir-normal*2*dot(r.dir, normal);;
vec3 lp = r.curPos;//-winner->m_localPos;
// ray.m_z=10000;
objects[winner]->CalculateLight(&r,normal,tangent,lp,world->light0,reflectionDir,objects,0);
// objects[winner]->reflectivity = 0.9;
// Reflections
if (objects[winner]->reflectivity>0 && r.reflect>0) {
if (objects[winner]->glossiness==1)
{
ray nxt(lp,reflectionDir);
nxt.reflect=r.reflect-1;
raymarchSingle(nxt, winner, 1, 24,world, objects);
r.intensity = r.intensity*(1-objects[winner]->reflectivity) + objects[winner]->reflectivity*nxt.intensity;
}
else {
// Doesn't work
shadow=1;
len=0;
vec3 in = vec3(0,0,0);
for (int j=0;j<shadow;j++) {
vec3 disp = vec3(world->rnd[3*j+0]%1024-512,world->rnd[3*j+1]%1024-512,world->rnd[3*j+2]%1024-512).normalized();
disp = (disp*3 + reflectionDir.normalized()).normalized();
ray nxt(lp,disp);
nxt.reflect=0;
raymarchSingle(nxt, winner, 1, 24,world, objects);
in+=nxt.intensity/(float)shadow;
}
r.intensity = r.intensity*(1-objects[winner]->reflectivity) + objects[winner]->reflectivity*in;
}
shadow = 1;
}
if (pass==0) {
ray shadowRay(lp,world->light0);
if (raymarchSingle(shadowRay, winner, 2,32,world,objects)) {
shadow*=0.5;
}
}
r.intensity*=shadow;
return true;
}
world->sky(r);
return false;
}
__global__ void create_world(marchobject* objects, marchobject** objectsI, int cnt) {
if (threadIdx.x == 0 && blockIdx.x == 0) {
for (int i=0;i<cnt;i++) {
objectsI[i] = nullptr;
if (objects[i].type==0) {
mo_sphere* s = new mo_sphere();
*(objectsI+i) = s;
*s = (mo_sphere&)objects[i];
}
if (objects[i].type==1) {
mo_plane* s = new mo_plane();
*(objectsI+i) = s;
*s = (mo_plane&)objects[i];
// s->pos = vec3(0,4,0);
}
if (objects[i].type==2) {
mo_box* s = new mo_box();
*(objectsI+i) = s;
*s = (mo_box&)objects[i];
s->box = objects[i].p2;
// s->pos = vec3(0,4,0);
}
if (objects[i].type==3) {
mo_torus* s = new mo_torus();
*(objectsI+i) = s;
*s = (mo_torus&)objects[i];
// s->pos = vec3(0,4,0);
}
if (objects[i].type==4) {
mo_cylinder* s = new mo_cylinder();
*(objectsI+i) = s;
*s = (mo_cylinder&)objects[i];
// s->pos = vec3(0,4,0);
}
}
/* *(d_list) = new sphere(vec3(0,0,-1), 0.5);
*(d_list+1) = new sphere(vec3(0,-100.5,-1), 100);
*d_world = new hitable_list(d_list,2);*/
}
}
__global__ void delete_world(marchobject* objects, marchobject** objectsI, int cnt) {
if (threadIdx.x == 0 && blockIdx.x == 0) {
for (int i=0;i<cnt;i++) {
if (objectsI[i]!=nullptr)
delete objectsI[i];
}
}
}
__global__ void renderImage(unsigned char *fb, int max_x, int max_y,vec3 lower_left_corner, vec3 horizontal, vec3 vertical, vec3 origin, World *world, marchobject** objects)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
if((i >= max_x) || (j >= max_y)) return;
int pixel_index = j*max_x + i;
float u = float(i) / float(max_x);
float v = float(j) / float(max_y);
ray r(origin, (lower_left_corner + u*horizontal + v*vertical).normalized());
// ray r(origin, (u*horizontal + v*vertical).normalized());
r.reflect = 2;
// vec3 col(0,0,0);
// fb[pixel_index] = color(r, world);
r.intensity = vec3(0,0,0);
if (raymarchSingle(r,-1,0,90,world, objects)) {
// col = r.intensity;
}
vec3 in = r.intensity.clamp();
fb[3*pixel_index] = in.x()*255;
fb[3*pixel_index+1] = in.y()*255;
fb[3*pixel_index+2] = in.z()*255;
// raymarchSingle(const ray& r, int pass, int cnt, World* world)//, hitable **world)
}
/*__global__ void renderImage(float *fb, int max_x, int max_y) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
if((i >= max_x) || (j >= max_y)) return;
int pixel_index = j*max_x*3 + i*3;
fb[pixel_index + 0] = float(i) / max_x;
fb[pixel_index + 1] = float(j) / max_y;
fb[pixel_index + 2] = 0.2;
}
*/
unsigned char *fb = nullptr;
unsigned char* RaytraceImage(int nx, int ny, int* img, World* w) {
int num_pixels = nx*ny;
size_t fb_size = num_pixels*3;
// allocate FB
if (fb==nullptr)
checkCudaErrors(hipMallocManaged((void **)&fb, fb_size));
World* world;
int bytesw = (sizeof(World));
int bytesm = (w->length*(sizeof(marchobject)));
marchobject* objects;
marchobject** objectsI;
checkCudaErrors(hipMallocManaged((void **)&world, bytesw));
checkCudaErrors(hipMallocManaged((void **)&objects, bytesm));
checkCudaErrors(hipMallocManaged((void **)&objectsI, bytesm));
/* world->length = w->length;
for (int i=0;i<w->length;i++)
world->objects[i] = w->objects[i];*/
hipMemcpy(world,w,bytesw,hipMemcpyHostToDevice);
checkCudaErrors(hipGetLastError());
hipMemcpy(objects, w->objects, bytesm, hipMemcpyHostToDevice);
checkCudaErrors(hipGetLastError());
int tx = 8;
int ty = 8;
dim3 blocks(nx/tx+1,ny/ty+1);
dim3 threads(tx,ty);
hipLaunchKernelGGL(( create_world), dim3(1),dim3(1), 0, 0, objects,objectsI, world->length);
checkCudaErrors(hipGetLastError());
checkCudaErrors(hipDeviceSynchronize());
hipLaunchKernelGGL(( renderImage), dim3(blocks), dim3(threads), 0, 0, fb, nx, ny,
w->lower_left_corner,
w->horizontal,
w->vertical,
w->origin, world, objectsI);
checkCudaErrors(hipGetLastError());
checkCudaErrors(hipDeviceSynchronize());
hipLaunchKernelGGL(( delete_world), dim3(1),dim3(1), 0, 0, objects,objectsI, world->length);
checkCudaErrors(hipGetLastError());
checkCudaErrors(hipDeviceSynchronize());
checkCudaErrors(hipFree(objects));
checkCudaErrors(hipFree(world));
return fb;
}
void TestCuda()
{
// Error code to check return values for CUDA calls
hipError_t err = hipSuccess;
// Print the vector length to be used, and compute its size
int numElements = 50000;
size_t size = numElements * sizeof(float);
printf("[Vector addition of %d elements]\n", numElements);
// Allocate the host input vector A
float *h_A = (float *)malloc(size);
// Allocate the host input vector B
float *h_B = (float *)malloc(size);
// Allocate the host output vector C
float *h_C = (float *)malloc(size);
// Verify that allocations succeeded
if (h_A == NULL || h_B == NULL || h_C == NULL)
{
fprintf(stderr, "Failed to allocate host vectors!\n");
exit(EXIT_FAILURE);
}
// Initialize the host input vectors
for (int i = 0; i < numElements; ++i)
{
h_A[i] = rand()/(float)RAND_MAX;
h_B[i] = rand()/(float)RAND_MAX;
}
// Allocate the device input vector A
float *d_A = NULL;
err = hipMalloc((void **)&d_A, size);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to allocate device vector A (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// Allocate the device input vector B
float *d_B = NULL;
err = hipMalloc((void **)&d_B, size);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to allocate device vector B (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// Allocate the device output vector C
float *d_C = NULL;
err = hipMalloc((void **)&d_C, size);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to allocate device vector C (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// Copy the host input vectors A and B in host memory to the device input vectors in
// device memory
printf("Copy input data from the host memory to the CUDA device\n");
err = hipMemcpy(d_A, h_A, size, hipMemcpyHostToDevice);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to copy vector A from host to device (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipMemcpy(d_B, h_B, size, hipMemcpyHostToDevice);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to copy vector B from host to device (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// Launch the Vector Add CUDA Kernel
int threadsPerBlock = 256;
int blocksPerGrid =(numElements + threadsPerBlock - 1) / threadsPerBlock;
printf("CUDA kernel launch with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock);
hipLaunchKernelGGL(( vectorAdd), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_A, d_B, d_C, numElements);
err = hipGetLastError();
if (err != hipSuccess)
{
fprintf(stderr, "Failed to launch vectorAdd kernel (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// Copy the device result vector in device memory to the host result vector
// in host memory.
printf("Copy output data from the CUDA device to the host memory\n");
err = hipMemcpy(h_C, d_C, size, hipMemcpyDeviceToHost);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to copy vector C from device to host (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// Verify that the result vector is correct
for (int i = 0; i < numElements; ++i)
{
if (fabs(h_A[i] + h_B[i] - h_C[i]) > 1e-5)
{
fprintf(stderr, "Result verification failed at element %d!\n", i);
exit(EXIT_FAILURE);
}
}
printf("Test PASSED\n");
// Free device global memory
err = hipFree(d_A);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to free device vector A (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipFree(d_B);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to free device vector B (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipFree(d_C);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to free device vector C (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// Free host memory
free(h_A);
free(h_B);
free(h_C);
// Reset the device and exit
err = hipDeviceReset();
if (err != hipSuccess)
{
fprintf(stderr, "Failed to deinitialize the device! error=%s\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
printf("Done\n");
system("pause");
}
| b52a566a7d6096a02b6803cbf1a58828f65f4a86.cu |
#include <stdio.h>
#include <iostream>
// For the CUDA runtime routines (prefixed with "cuda_")
#include <cuda_runtime.h>
#include "vec3.h"
#include "ray.h"
#include "marchobject.h"
#include "world.h"
#define checkCudaErrors(val) check_cuda( (val), #val, __FILE__, __LINE__ )
void check_cuda(cudaError_t result, char const *const func, const char *const file, int const line) {
if (result) {
std::cout << "CUDA error = " << static_cast<unsigned int>(result) << " at " <<
file << ":" << line << " '" << func << "' \n";
// Make sure we call CUDA Device Reset before exiting
cudaDeviceReset();
exit(99);
}
}
/**
* CUDA Kernel Device code
*
* Computes the vector addition of A and B into C. The 3 vectors have the same
* number of elements numElements.
*/
__global__ void
vectorAdd(const float *A, const float *B, float *C, int numElements)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < numElements)
{
C[i] = A[i] + B[i];
}
}
/**
* Host main routine
*/
//__device__ bool raymarchSingle(ray& r, int ignore, int pass, int cnt, World* world, marchobject** objects, int* culled, int tid)//, hitable **world) {
__device__ bool raymarchSingle(ray& r, int ignore, int pass, int cnt, World* world, marchobject** objects)//, hitable **world) {
{
//marchobject::Init();
vec3 isp;
float shadow = 1;
float t = 1;
int winner = -1;
int culled[24];
// ray culledr[16];
int len = 0;
vec3 isp1, isp2;
float t1, t2;
for (int i=0;i<world->length;i++) {
if (len>=24) break;
if (i!=ignore)
if (r.IntersectSphere(objects[i]->pos*-1,vec3(1,1,1)*objects[i]->bbRadius,isp1,isp2,t1,t2)) {
culled[len] = i;
len++;
}
}
ray rwinner;
r.curPos = r.org;
// world->length=7;
for (int i=0;i<cnt;i++) {
float precis = 0.004*t;
float keep=1000.0;
//ray.m_curStep =t; //(ray.m_origin-m_objects[j]->m_position).length();
//ray.setCurrent(t);
r.curPos = r.point_at_parameter(t);
// r.point_at_parameter(t);
int w= -1;
// for (int j=0;j<world->length;j++) {//marchobject* ro: culled) {
for (int l=0;l<len;l++) {//marchobject* ro: culled) {
int j = culled[l];
ray rotr = r.Rotate(objects[j]->rotMat,objects[j]->pos);
float keep2 = objects[j]->intersect(rotr);
if (keep2<keep) {
keep = keep2;
w = j;
}
if (keep2<precis) {
winner = w;
i=cnt;
if (pass==2) {
return true;
}
rwinner = rotr;
break;
}
}
t=t+keep;
}
if (winner!=-1) {
//Ray rotated = winner->m_localRay[tid];//ray.Rotate(winner->m_rotmat, winner->m_position);
ray rotated = rwinner;
//ray.m_currentPos = isp;
// exit(1);
isp = rotated.curPos;
r.intensity = vec3(1,0,0);
vec3 normal = objects[winner]->CalcMarchNormal(rotated.curPos);
normal = objects[winner]->rotMatInv*normal;
vec3 tt(1,2,-213.123);
vec3 tangent =vec3::cross(tt,normal).normalized();
vec3 bi = vec3::cross(tangent,normal).normalized();
// normal = objects[winner]->GetPerturbedNormal(isp,normal,tangent);
// ray.m_reflect = 0;
vec3 reflectionDir = r.dir-normal*2*dot(r.dir, normal);;
vec3 lp = r.curPos;//-winner->m_localPos;
// ray.m_z=10000;
objects[winner]->CalculateLight(&r,normal,tangent,lp,world->light0,reflectionDir,objects,0);
// objects[winner]->reflectivity = 0.9;
// Reflections
if (objects[winner]->reflectivity>0 && r.reflect>0) {
if (objects[winner]->glossiness==1)
{
ray nxt(lp,reflectionDir);
nxt.reflect=r.reflect-1;
raymarchSingle(nxt, winner, 1, 24,world, objects);
r.intensity = r.intensity*(1-objects[winner]->reflectivity) + objects[winner]->reflectivity*nxt.intensity;
}
else {
// Doesn't work
shadow=1;
len=0;
vec3 in = vec3(0,0,0);
for (int j=0;j<shadow;j++) {
vec3 disp = vec3(world->rnd[3*j+0]%1024-512,world->rnd[3*j+1]%1024-512,world->rnd[3*j+2]%1024-512).normalized();
disp = (disp*3 + reflectionDir.normalized()).normalized();
ray nxt(lp,disp);
nxt.reflect=0;
raymarchSingle(nxt, winner, 1, 24,world, objects);
in+=nxt.intensity/(float)shadow;
}
r.intensity = r.intensity*(1-objects[winner]->reflectivity) + objects[winner]->reflectivity*in;
}
shadow = 1;
}
if (pass==0) {
ray shadowRay(lp,world->light0);
if (raymarchSingle(shadowRay, winner, 2,32,world,objects)) {
shadow*=0.5;
}
}
r.intensity*=shadow;
return true;
}
world->sky(r);
return false;
}
__global__ void create_world(marchobject* objects, marchobject** objectsI, int cnt) {
if (threadIdx.x == 0 && blockIdx.x == 0) {
for (int i=0;i<cnt;i++) {
objectsI[i] = nullptr;
if (objects[i].type==0) {
mo_sphere* s = new mo_sphere();
*(objectsI+i) = s;
*s = (mo_sphere&)objects[i];
}
if (objects[i].type==1) {
mo_plane* s = new mo_plane();
*(objectsI+i) = s;
*s = (mo_plane&)objects[i];
// s->pos = vec3(0,4,0);
}
if (objects[i].type==2) {
mo_box* s = new mo_box();
*(objectsI+i) = s;
*s = (mo_box&)objects[i];
s->box = objects[i].p2;
// s->pos = vec3(0,4,0);
}
if (objects[i].type==3) {
mo_torus* s = new mo_torus();
*(objectsI+i) = s;
*s = (mo_torus&)objects[i];
// s->pos = vec3(0,4,0);
}
if (objects[i].type==4) {
mo_cylinder* s = new mo_cylinder();
*(objectsI+i) = s;
*s = (mo_cylinder&)objects[i];
// s->pos = vec3(0,4,0);
}
}
/* *(d_list) = new sphere(vec3(0,0,-1), 0.5);
*(d_list+1) = new sphere(vec3(0,-100.5,-1), 100);
*d_world = new hitable_list(d_list,2);*/
}
}
__global__ void delete_world(marchobject* objects, marchobject** objectsI, int cnt) {
if (threadIdx.x == 0 && blockIdx.x == 0) {
for (int i=0;i<cnt;i++) {
if (objectsI[i]!=nullptr)
delete objectsI[i];
}
}
}
__global__ void renderImage(unsigned char *fb, int max_x, int max_y,vec3 lower_left_corner, vec3 horizontal, vec3 vertical, vec3 origin, World *world, marchobject** objects)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
if((i >= max_x) || (j >= max_y)) return;
int pixel_index = j*max_x + i;
float u = float(i) / float(max_x);
float v = float(j) / float(max_y);
ray r(origin, (lower_left_corner + u*horizontal + v*vertical).normalized());
// ray r(origin, (u*horizontal + v*vertical).normalized());
r.reflect = 2;
// vec3 col(0,0,0);
// fb[pixel_index] = color(r, world);
r.intensity = vec3(0,0,0);
if (raymarchSingle(r,-1,0,90,world, objects)) {
// col = r.intensity;
}
vec3 in = r.intensity.clamp();
fb[3*pixel_index] = in.x()*255;
fb[3*pixel_index+1] = in.y()*255;
fb[3*pixel_index+2] = in.z()*255;
// raymarchSingle(const ray& r, int pass, int cnt, World* world)//, hitable **world)
}
/*__global__ void renderImage(float *fb, int max_x, int max_y) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
if((i >= max_x) || (j >= max_y)) return;
int pixel_index = j*max_x*3 + i*3;
fb[pixel_index + 0] = float(i) / max_x;
fb[pixel_index + 1] = float(j) / max_y;
fb[pixel_index + 2] = 0.2;
}
*/
unsigned char *fb = nullptr;
unsigned char* RaytraceImage(int nx, int ny, int* img, World* w) {
int num_pixels = nx*ny;
size_t fb_size = num_pixels*3;
// allocate FB
if (fb==nullptr)
checkCudaErrors(cudaMallocManaged((void **)&fb, fb_size));
World* world;
int bytesw = (sizeof(World));
int bytesm = (w->length*(sizeof(marchobject)));
marchobject* objects;
marchobject** objectsI;
checkCudaErrors(cudaMallocManaged((void **)&world, bytesw));
checkCudaErrors(cudaMallocManaged((void **)&objects, bytesm));
checkCudaErrors(cudaMallocManaged((void **)&objectsI, bytesm));
/* world->length = w->length;
for (int i=0;i<w->length;i++)
world->objects[i] = w->objects[i];*/
cudaMemcpy(world,w,bytesw,cudaMemcpyHostToDevice);
checkCudaErrors(cudaGetLastError());
cudaMemcpy(objects, w->objects, bytesm, cudaMemcpyHostToDevice);
checkCudaErrors(cudaGetLastError());
int tx = 8;
int ty = 8;
dim3 blocks(nx/tx+1,ny/ty+1);
dim3 threads(tx,ty);
create_world<<<1,1>>>(objects,objectsI, world->length);
checkCudaErrors(cudaGetLastError());
checkCudaErrors(cudaDeviceSynchronize());
renderImage<<<blocks, threads>>>(fb, nx, ny,
w->lower_left_corner,
w->horizontal,
w->vertical,
w->origin, world, objectsI);
checkCudaErrors(cudaGetLastError());
checkCudaErrors(cudaDeviceSynchronize());
delete_world<<<1,1>>>(objects,objectsI, world->length);
checkCudaErrors(cudaGetLastError());
checkCudaErrors(cudaDeviceSynchronize());
checkCudaErrors(cudaFree(objects));
checkCudaErrors(cudaFree(world));
return fb;
}
void TestCuda()
{
// Error code to check return values for CUDA calls
cudaError_t err = cudaSuccess;
// Print the vector length to be used, and compute its size
int numElements = 50000;
size_t size = numElements * sizeof(float);
printf("[Vector addition of %d elements]\n", numElements);
// Allocate the host input vector A
float *h_A = (float *)malloc(size);
// Allocate the host input vector B
float *h_B = (float *)malloc(size);
// Allocate the host output vector C
float *h_C = (float *)malloc(size);
// Verify that allocations succeeded
if (h_A == NULL || h_B == NULL || h_C == NULL)
{
fprintf(stderr, "Failed to allocate host vectors!\n");
exit(EXIT_FAILURE);
}
// Initialize the host input vectors
for (int i = 0; i < numElements; ++i)
{
h_A[i] = rand()/(float)RAND_MAX;
h_B[i] = rand()/(float)RAND_MAX;
}
// Allocate the device input vector A
float *d_A = NULL;
err = cudaMalloc((void **)&d_A, size);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device vector A (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Allocate the device input vector B
float *d_B = NULL;
err = cudaMalloc((void **)&d_B, size);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device vector B (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Allocate the device output vector C
float *d_C = NULL;
err = cudaMalloc((void **)&d_C, size);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device vector C (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Copy the host input vectors A and B in host memory to the device input vectors in
// device memory
printf("Copy input data from the host memory to the CUDA device\n");
err = cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy vector A from host to device (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy vector B from host to device (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Launch the Vector Add CUDA Kernel
int threadsPerBlock = 256;
int blocksPerGrid =(numElements + threadsPerBlock - 1) / threadsPerBlock;
printf("CUDA kernel launch with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock);
vectorAdd<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C, numElements);
err = cudaGetLastError();
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to launch vectorAdd kernel (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Copy the device result vector in device memory to the host result vector
// in host memory.
printf("Copy output data from the CUDA device to the host memory\n");
err = cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy vector C from device to host (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Verify that the result vector is correct
for (int i = 0; i < numElements; ++i)
{
if (fabs(h_A[i] + h_B[i] - h_C[i]) > 1e-5)
{
fprintf(stderr, "Result verification failed at element %d!\n", i);
exit(EXIT_FAILURE);
}
}
printf("Test PASSED\n");
// Free device global memory
err = cudaFree(d_A);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to free device vector A (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaFree(d_B);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to free device vector B (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaFree(d_C);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to free device vector C (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Free host memory
free(h_A);
free(h_B);
free(h_C);
// Reset the device and exit
err = cudaDeviceReset();
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to deinitialize the device! error=%s\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
printf("Done\n");
system("pause");
}
|
c95d118fbd6607891f39bb9a7ed4e8ce202b8e7c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#define WARPS_PER_GROUP (THREAD_BLOCK_SIZE/TILE_SIZE)
typedef struct {
real3 pos, force, torque, inducedDipole, inducedDipolePolar, sphericalDipole;
real q;
float thole, damp;
#ifdef INCLUDE_QUADRUPOLES
real sphericalQuadrupole[5];
#endif
} AtomData;
inline __device__ void loadAtomData(AtomData& data, int atom, const real4* __restrict__ posq, const real* __restrict__ sphericalDipole,
const real* __restrict__ sphericalQuadrupole, const real* __restrict__ inducedDipole, const real* __restrict__ inducedDipolePolar,
const float2* __restrict__ dampingAndThole) {
real4 atomPosq = posq[atom];
data.pos = make_real3(atomPosq.x, atomPosq.y, atomPosq.z);
data.q = atomPosq.w;
data.sphericalDipole.x = sphericalDipole[atom*3];
data.sphericalDipole.y = sphericalDipole[atom*3+1];
data.sphericalDipole.z = sphericalDipole[atom*3+2];
#ifdef INCLUDE_QUADRUPOLES
data.sphericalQuadrupole[0] = sphericalQuadrupole[atom*5];
data.sphericalQuadrupole[1] = sphericalQuadrupole[atom*5+1];
data.sphericalQuadrupole[2] = sphericalQuadrupole[atom*5+2];
data.sphericalQuadrupole[3] = sphericalQuadrupole[atom*5+3];
data.sphericalQuadrupole[4] = sphericalQuadrupole[atom*5+4];
#endif
data.inducedDipole.x = inducedDipole[atom*3];
data.inducedDipole.y = inducedDipole[atom*3+1];
data.inducedDipole.z = inducedDipole[atom*3+2];
data.inducedDipolePolar.x = inducedDipolePolar[atom*3];
data.inducedDipolePolar.y = inducedDipolePolar[atom*3+1];
data.inducedDipolePolar.z = inducedDipolePolar[atom*3+2];
float2 temp = dampingAndThole[atom];
data.damp = temp.x;
data.thole = temp.y;
}
__device__ real computeDScaleFactor(unsigned int polarizationGroup, int index) {
return (polarizationGroup & 1<<index ? 0 : 1);
}
__device__ float computeMScaleFactor(uint2 covalent, int index) {
int mask = 1<<index;
bool x = (covalent.x & mask);
bool y = (covalent.y & mask);
return (x ? (y ? 0.0f : 0.4f) : (y ? 0.8f : 1.0f));
}
__device__ float computePScaleFactor(uint2 covalent, unsigned int polarizationGroup, int index) {
int mask = 1<<index;
bool x = (covalent.x & mask);
bool y = (covalent.y & mask);
bool p = (polarizationGroup & mask);
return (x && y ? 0.0f : (x && p ? 0.5f : 1.0f));
}
__device__ void computeOneInteraction(AtomData& atom1, AtomData& atom2, bool hasExclusions, float dScale, float pScale, float mScale, float forceFactor,
mixed& energy, real4 periodicBoxSize, real4 invPeriodicBoxSize, real4 periodicBoxVecX, real4 periodicBoxVecY, real4 periodicBoxVecZ) {
// Compute the displacement.
real3 delta;
delta.x = atom2.pos.x - atom1.pos.x;
delta.y = atom2.pos.y - atom1.pos.y;
delta.z = atom2.pos.z - atom1.pos.z;
APPLY_PERIODIC_TO_DELTA(delta)
real r2 = delta.x*delta.x + delta.y*delta.y + delta.z*delta.z;
if (r2 > CUTOFF_SQUARED)
return;
real rInv = RSQRT(r2);
real r = r2*rInv;
// Rotate the various dipoles and quadrupoles.
real qiRotationMatrix[3][3];
buildQIRotationMatrix(delta, rInv, qiRotationMatrix);
real3 qiUindI = 0.5f*make_real3(qiRotationMatrix[0][1]*atom1.inducedDipole.x + qiRotationMatrix[0][2]*atom1.inducedDipole.y + qiRotationMatrix[0][0]*atom1.inducedDipole.z,
qiRotationMatrix[1][1]*atom1.inducedDipole.x + qiRotationMatrix[1][2]*atom1.inducedDipole.y + qiRotationMatrix[1][0]*atom1.inducedDipole.z,
qiRotationMatrix[2][1]*atom1.inducedDipole.x + qiRotationMatrix[2][2]*atom1.inducedDipole.y + qiRotationMatrix[2][0]*atom1.inducedDipole.z);
real3 qiUindJ = 0.5f*make_real3(qiRotationMatrix[0][1]*atom2.inducedDipole.x + qiRotationMatrix[0][2]*atom2.inducedDipole.y + qiRotationMatrix[0][0]*atom2.inducedDipole.z,
qiRotationMatrix[1][1]*atom2.inducedDipole.x + qiRotationMatrix[1][2]*atom2.inducedDipole.y + qiRotationMatrix[1][0]*atom2.inducedDipole.z,
qiRotationMatrix[2][1]*atom2.inducedDipole.x + qiRotationMatrix[2][2]*atom2.inducedDipole.y + qiRotationMatrix[2][0]*atom2.inducedDipole.z);
real3 qiUinpI = 0.5f*make_real3(qiRotationMatrix[0][1]*atom1.inducedDipolePolar.x + qiRotationMatrix[0][2]*atom1.inducedDipolePolar.y + qiRotationMatrix[0][0]*atom1.inducedDipolePolar.z,
qiRotationMatrix[1][1]*atom1.inducedDipolePolar.x + qiRotationMatrix[1][2]*atom1.inducedDipolePolar.y + qiRotationMatrix[1][0]*atom1.inducedDipolePolar.z,
qiRotationMatrix[2][1]*atom1.inducedDipolePolar.x + qiRotationMatrix[2][2]*atom1.inducedDipolePolar.y + qiRotationMatrix[2][0]*atom1.inducedDipolePolar.z);
real3 qiUinpJ = 0.5f*make_real3(qiRotationMatrix[0][1]*atom2.inducedDipolePolar.x + qiRotationMatrix[0][2]*atom2.inducedDipolePolar.y + qiRotationMatrix[0][0]*atom2.inducedDipolePolar.z,
qiRotationMatrix[1][1]*atom2.inducedDipolePolar.x + qiRotationMatrix[1][2]*atom2.inducedDipolePolar.y + qiRotationMatrix[1][0]*atom2.inducedDipolePolar.z,
qiRotationMatrix[2][1]*atom2.inducedDipolePolar.x + qiRotationMatrix[2][2]*atom2.inducedDipolePolar.y + qiRotationMatrix[2][0]*atom2.inducedDipolePolar.z);
real3 rotatedDipole1 = rotateDipole(atom1.sphericalDipole, qiRotationMatrix);
real3 rotatedDipole2 = rotateDipole(atom2.sphericalDipole, qiRotationMatrix);
real rotatedQuadrupole1[] = {0, 0, 0, 0, 0};
real rotatedQuadrupole2[] = {0, 0, 0, 0, 0};
#ifdef INCLUDE_QUADRUPOLES
rotateQuadupoles(qiRotationMatrix, atom1.sphericalQuadrupole, atom2.sphericalQuadrupole, rotatedQuadrupole1, rotatedQuadrupole2);
#endif
// The field derivatives at I due to permanent and induced moments on J, and vice-versa.
// Also, their derivatives w.r.t. R, which are needed for force calculations
real Vij[9], Vji[9], VjiR[9], VijR[9];
// The field derivatives at I due to only permanent moments on J, and vice-versa.
real Vijp[3], Vijd[3], Vjip[3], Vjid[3];
real rInvVec[7], alphaRVec[8], bVec[5];
// The rInvVec array is defined such that the ith element is R^-i, with the
// dieleectric constant folded in, to avoid conversions later.
rInvVec[1] = rInv;
for (int i = 2; i < 7; ++i)
rInvVec[i] = rInvVec[i-1] * rInv;
// The alpharVec array is defined such that the ith element is (alpha R)^i,
// where kappa (alpha in OpenMM parlance) is the Ewald attenuation parameter.
real ralpha = EWALD_ALPHA*r;
real exp2a = EXP(-(ralpha*ralpha));
#ifdef USE_DOUBLE_PRECISION
const real erfAlphaR = erf(ralpha);
#else
// This approximation for erfc is from Abramowitz and Stegun (1964) p. 299. They cite the following as
// the original source: C. Hastings, Jr., Approximations for Digital Computers (1955). It has a maximum
// error of 1.5e-7.
const real t = RECIP(1.0f+0.3275911f*ralpha);
const real erfAlphaR = 1-(0.254829592f+(-0.284496736f+(1.421413741f+(-1.453152027f+1.061405429f*t)*t)*t)*t)*t*exp2a;
#endif
alphaRVec[1] = ralpha;
for (int i = 2; i < 8; ++i)
alphaRVec[i] = alphaRVec[i-1]*ralpha;
real X = 2*exp2a/SQRT_PI;
int doubleFactorial = 1, facCount = 1;
real tmp = alphaRVec[1];
bVec[1] = -erfAlphaR;
for (int i = 2; i < 5; ++i) {
bVec[i] = bVec[i-1] + tmp * X / (real)(doubleFactorial);
facCount = facCount + 2;
doubleFactorial = doubleFactorial * facCount;
tmp *= 2*alphaRVec[2];
}
real dmp = atom1.damp*atom2.damp;
real a = min(atom1.thole, atom2.thole);
real u = fabs(dmp) > 1.0e-5f ? r/dmp : 1e10f;
real au3 = a*u*u*u;
real expau3 = au3 < 50 ? EXP(-au3) : 0;
real a2u6 = au3*au3;
real a3u9 = a2u6*au3;
// Thole damping factors for energies
real thole_c = 1 - expau3;
real thole_d0 = 1 - expau3*(1 + 1.5f*au3);
real thole_d1 = 1 - expau3;
real thole_q0 = 1 - expau3*(1 + au3 + a2u6);
real thole_q1 = 1 - expau3*(1 + au3);
// Thole damping factors for derivatives
real dthole_c = 1 - expau3*(1 + 1.5f*au3);
real dthole_d0 = 1 - expau3*(1 + au3 + 1.5f*a2u6);
real dthole_d1 = 1 - expau3*(1 + au3);
real dthole_q0 = 1 - expau3*(1 + au3 + 0.25f*a2u6 + 0.75f*a3u9);
real dthole_q1 = 1 - expau3*(1 + au3 + 0.75f*a2u6);
// Now we compute the (attenuated) Coulomb operator and its derivatives, contracted with
// permanent moments and induced dipoles. Note that the coefficient of the permanent force
// terms is half of the expected value; this is because we compute the interaction of I with
// the sum of induced and permanent moments on J, as well as the interaction of J with I's
// permanent and induced moments; doing so double counts the permanent-permanent interaction.
real ePermCoef, dPermCoef, eUIndCoef, dUIndCoef, eUInpCoef, dUInpCoef;
// C-C terms (m=0)
ePermCoef = rInvVec[1]*(mScale + bVec[2] - alphaRVec[1]*X);
dPermCoef = -0.5f*(mScale + bVec[2])*rInvVec[2];
Vij[0] = ePermCoef*atom2.q;
Vji[0] = ePermCoef*atom1.q;
VijR[0] = dPermCoef*atom2.q;
VjiR[0] = dPermCoef*atom1.q;
// C-D and C-Uind terms (m=0)
ePermCoef = rInvVec[2]*(mScale + bVec[2]);
eUIndCoef = rInvVec[2]*(pScale*thole_c + bVec[2]);
eUInpCoef = rInvVec[2]*(dScale*thole_c + bVec[2]);
dPermCoef = -rInvVec[3]*(mScale + bVec[2] + alphaRVec[3]*X);
dUIndCoef = -2*rInvVec[3]*(pScale*dthole_c + bVec[2] + alphaRVec[3]*X);
dUInpCoef = -2*rInvVec[3]*(dScale*dthole_c + bVec[2] + alphaRVec[3]*X);
Vij[0] += -(ePermCoef*rotatedDipole2.x + eUIndCoef*qiUindJ.x + eUInpCoef*qiUinpJ.x);
Vji[1] = -(ePermCoef*atom1.q);
VijR[0] += -(dPermCoef*rotatedDipole2.x + dUIndCoef*qiUindJ.x + dUInpCoef*qiUinpJ.x);
VjiR[1] = -(dPermCoef*atom1.q);
Vjip[0] = -(eUInpCoef*atom1.q);
Vjid[0] = -(eUIndCoef*atom1.q);
// D-C and Uind-C terms (m=0)
Vij[1] = ePermCoef*atom2.q;
Vji[0] += ePermCoef*rotatedDipole1.x + eUIndCoef*qiUindI.x + eUInpCoef*qiUinpI.x;
VijR[1] = dPermCoef*atom2.q;
VjiR[0] += dPermCoef*rotatedDipole1.x + dUIndCoef*qiUindI.x + dUInpCoef*qiUinpI.x;
Vijp[0] = eUInpCoef*atom2.q;
Vijd[0] = eUIndCoef*atom2.q;
// D-D and D-Uind terms (m=0)
const real twoThirds = (real) 2/3;
ePermCoef = -twoThirds*rInvVec[3]*(3*(mScale + bVec[3]) + alphaRVec[3]*X);
eUIndCoef = -twoThirds*rInvVec[3]*(3*(pScale*thole_d0 + bVec[3]) + alphaRVec[3]*X);
eUInpCoef = -twoThirds*rInvVec[3]*(3*(dScale*thole_d0 + bVec[3]) + alphaRVec[3]*X);
dPermCoef = rInvVec[4]*(3*(mScale + bVec[3]) + 2*alphaRVec[5]*X);
dUIndCoef = rInvVec[4]*(6*(pScale*dthole_d0 + bVec[3]) + 4*alphaRVec[5]*X);
dUInpCoef = rInvVec[4]*(6*(dScale*dthole_d0 + bVec[3]) + 4*alphaRVec[5]*X);
Vij[1] += ePermCoef*rotatedDipole2.x + eUIndCoef*qiUindJ.x + eUInpCoef*qiUinpJ.x;
Vji[1] += ePermCoef*rotatedDipole1.x + eUIndCoef*qiUindI.x + eUInpCoef*qiUinpI.x;
VijR[1] += dPermCoef*rotatedDipole2.x + dUIndCoef*qiUindJ.x + dUInpCoef*qiUinpJ.x;
VjiR[1] += dPermCoef*rotatedDipole1.x + dUIndCoef*qiUindI.x + dUInpCoef*qiUinpI.x;
Vijp[0] += eUInpCoef*rotatedDipole2.x;
Vijd[0] += eUIndCoef*rotatedDipole2.x;
Vjip[0] += eUInpCoef*rotatedDipole1.x;
Vjid[0] += eUIndCoef*rotatedDipole1.x;
// D-D and D-Uind terms (m=1)
ePermCoef = rInvVec[3]*(mScale + bVec[3] - twoThirds*alphaRVec[3]*X);
eUIndCoef = rInvVec[3]*(pScale*thole_d1 + bVec[3] - twoThirds*alphaRVec[3]*X);
eUInpCoef = rInvVec[3]*(dScale*thole_d1 + bVec[3] - twoThirds*alphaRVec[3]*X);
dPermCoef = -1.5f*rInvVec[4]*(mScale + bVec[3]);
dUIndCoef = -3*rInvVec[4]*(pScale*dthole_d1 + bVec[3]);
dUInpCoef = -3*rInvVec[4]*(dScale*dthole_d1 + bVec[3]);
Vij[2] = ePermCoef*rotatedDipole2.y + eUIndCoef*qiUindJ.y + eUInpCoef*qiUinpJ.y;
Vji[2] = ePermCoef*rotatedDipole1.y + eUIndCoef*qiUindI.y + eUInpCoef*qiUinpI.y;
VijR[2] = dPermCoef*rotatedDipole2.y + dUIndCoef*qiUindJ.y + dUInpCoef*qiUinpJ.y;
VjiR[2] = dPermCoef*rotatedDipole1.y + dUIndCoef*qiUindI.y + dUInpCoef*qiUinpI.y;
Vij[3] = ePermCoef*rotatedDipole2.z + eUIndCoef*qiUindJ.z + eUInpCoef*qiUinpJ.z;
Vji[3] = ePermCoef*rotatedDipole1.z + eUIndCoef*qiUindI.z + eUInpCoef*qiUinpI.z;
VijR[3] = dPermCoef*rotatedDipole2.z + dUIndCoef*qiUindJ.z + dUInpCoef*qiUinpJ.z;
VjiR[3] = dPermCoef*rotatedDipole1.z + dUIndCoef*qiUindI.z + dUInpCoef*qiUinpI.z;
Vijp[1] = eUInpCoef*rotatedDipole2.y;
Vijd[1] = eUIndCoef*rotatedDipole2.y;
Vjip[1] = eUInpCoef*rotatedDipole1.y;
Vjid[1] = eUIndCoef*rotatedDipole1.y;
Vijp[2] = eUInpCoef*rotatedDipole2.z;
Vijd[2] = eUIndCoef*rotatedDipole2.z;
Vjip[2] = eUInpCoef*rotatedDipole1.z;
Vjid[2] = eUIndCoef*rotatedDipole1.z;
// C-Q terms (m=0)
ePermCoef = (mScale + bVec[3])*rInvVec[3];
dPermCoef = -((real) 1/3)*rInvVec[4]*(4.5f*(mScale + bVec[3]) + 2*alphaRVec[5]*X);
Vij[0] += ePermCoef*rotatedQuadrupole2[0];
Vji[4] = ePermCoef*atom1.q;
VijR[0] += dPermCoef*rotatedQuadrupole2[0];
VjiR[4] = dPermCoef*atom1.q;
// Q-C terms (m=0)
Vij[4] = ePermCoef*atom2.q;
Vji[0] += ePermCoef*rotatedQuadrupole1[0];
VijR[4] = dPermCoef*atom2.q;
VjiR[0] += dPermCoef*rotatedQuadrupole1[0];
// D-Q and Uind-Q terms (m=0)
const real fourThirds = (real) 4/3;
ePermCoef = rInvVec[4]*(3*(mScale + bVec[3]) + fourThirds*alphaRVec[5]*X);
eUIndCoef = rInvVec[4]*(3*(pScale*thole_q0 + bVec[3]) + fourThirds*alphaRVec[5]*X);
eUInpCoef = rInvVec[4]*(3*(dScale*thole_q0 + bVec[3]) + fourThirds*alphaRVec[5]*X);
dPermCoef = -fourThirds*rInvVec[5]*(4.5f*(mScale + bVec[3]) + (1 + alphaRVec[2])*alphaRVec[5]*X);
dUIndCoef = -fourThirds*rInvVec[5]*(9*(pScale*dthole_q0 + bVec[3]) + 2*(1 + alphaRVec[2])*alphaRVec[5]*X);
dUInpCoef = -fourThirds*rInvVec[5]*(9*(dScale*dthole_q0 + bVec[3]) + 2*(1 + alphaRVec[2])*alphaRVec[5]*X);
Vij[1] += ePermCoef*rotatedQuadrupole2[0];
Vji[4] += ePermCoef*rotatedDipole1.x + eUIndCoef*qiUindI.x + eUInpCoef*qiUinpI.x;
VijR[1] += dPermCoef*rotatedQuadrupole2[0];
VjiR[4] += dPermCoef*rotatedDipole1.x + dUIndCoef*qiUindI.x + dUInpCoef*qiUinpI.x;
Vijp[0] += eUInpCoef*rotatedQuadrupole2[0];
Vijd[0] += eUIndCoef*rotatedQuadrupole2[0];
// Q-D and Q-Uind terms (m=0)
Vij[4] += -(ePermCoef*rotatedDipole2.x + eUIndCoef*qiUindJ.x + eUInpCoef*qiUinpJ.x);
Vji[1] += -(ePermCoef*rotatedQuadrupole1[0]);
VijR[4] += -(dPermCoef*rotatedDipole2.x + dUIndCoef*qiUindJ.x + dUInpCoef*qiUinpJ.x);
VjiR[1] += -(dPermCoef*rotatedQuadrupole1[0]);
Vjip[0] += -(eUInpCoef*rotatedQuadrupole1[0]);
Vjid[0] += -(eUIndCoef*rotatedQuadrupole1[0]);
// D-Q and Uind-Q terms (m=1)
const real sqrtThree = SQRT((real) 3);
ePermCoef = -sqrtThree*rInvVec[4]*(mScale + bVec[3]);
eUIndCoef = -sqrtThree*rInvVec[4]*(pScale*thole_q1 + bVec[3]);
eUInpCoef = -sqrtThree*rInvVec[4]*(dScale*thole_q1 + bVec[3]);
const real fourSqrtOneThird = 4/sqrt((real) 3);
dPermCoef = fourSqrtOneThird*rInvVec[5]*(1.5f*(mScale + bVec[3]) + 0.5f*alphaRVec[5]*X);
dUIndCoef = fourSqrtOneThird*rInvVec[5]*(3*(pScale*dthole_q1 + bVec[3]) + alphaRVec[5]*X);
dUInpCoef = fourSqrtOneThird*rInvVec[5]*(3*(dScale*dthole_q1 + bVec[3]) + alphaRVec[5]*X);
Vij[2] += ePermCoef*rotatedQuadrupole2[1];
Vji[5] = ePermCoef*rotatedDipole1.y + eUIndCoef*qiUindI.y + eUInpCoef*qiUinpI.y;
VijR[2] += dPermCoef*rotatedQuadrupole2[1];
VjiR[5] = dPermCoef*rotatedDipole1.y + dUIndCoef*qiUindI.y + dUInpCoef*qiUinpI.y;
Vij[3] += ePermCoef*rotatedQuadrupole2[2];
Vji[6] = ePermCoef*rotatedDipole1.z + eUIndCoef*qiUindI.z + eUInpCoef*qiUinpI.z;
VijR[3] += dPermCoef*rotatedQuadrupole2[2];
VjiR[6] = dPermCoef*rotatedDipole1.z + dUIndCoef*qiUindI.z + dUInpCoef*qiUinpI.z;
Vijp[1] += eUInpCoef*rotatedQuadrupole2[1];
Vijd[1] += eUIndCoef*rotatedQuadrupole2[1];
Vijp[2] += eUInpCoef*rotatedQuadrupole2[2];
Vijd[2] += eUIndCoef*rotatedQuadrupole2[2];
// D-Q and Uind-Q terms (m=1)
Vij[5] = -(ePermCoef*rotatedDipole2.y + eUIndCoef*qiUindJ.y + eUInpCoef*qiUinpJ.y);
Vji[2] += -(ePermCoef*rotatedQuadrupole1[1]);
VijR[5] = -(dPermCoef*rotatedDipole2.y + dUIndCoef*qiUindJ.y + dUInpCoef*qiUinpJ.y);
VjiR[2] += -(dPermCoef*rotatedQuadrupole1[1]);
Vij[6] = -(ePermCoef*rotatedDipole2.z + eUIndCoef*qiUindJ.z + eUInpCoef*qiUinpJ.z);
Vji[3] += -(ePermCoef*rotatedQuadrupole1[2]);
VijR[6] = -(dPermCoef*rotatedDipole2.z + dUIndCoef*qiUindJ.z + dUInpCoef*qiUinpJ.z);
VjiR[3] += -(dPermCoef*rotatedQuadrupole1[2]);
Vjip[1] += -(eUInpCoef*rotatedQuadrupole1[1]);
Vjid[1] += -(eUIndCoef*rotatedQuadrupole1[1]);
Vjip[2] += -(eUInpCoef*rotatedQuadrupole1[2]);
Vjid[2] += -(eUIndCoef*rotatedQuadrupole1[2]);
// Q-Q terms (m=0)
ePermCoef = rInvVec[5]*(6*(mScale + bVec[4]) + ((real) 4/45)*(-3 + 10*alphaRVec[2])*alphaRVec[5]*X);
dPermCoef = -rInvVec[6]*(135*(mScale + bVec[4]) + 4*(1 + 2*alphaRVec[2])*alphaRVec[7]*X)/9;
Vij[4] += ePermCoef*rotatedQuadrupole2[0];
Vji[4] += ePermCoef*rotatedQuadrupole1[0];
VijR[4] += dPermCoef*rotatedQuadrupole2[0];
VjiR[4] += dPermCoef*rotatedQuadrupole1[0];
// Q-Q terms (m=1)
const real fourOverFifteen = (real) 4/15;
ePermCoef = -fourOverFifteen*rInvVec[5]*(15*(mScale + bVec[4]) + alphaRVec[5]*X);
dPermCoef = rInvVec[6]*(10*(mScale + bVec[4]) + fourThirds*alphaRVec[7]*X);
Vij[5] += ePermCoef*rotatedQuadrupole2[1];
Vji[5] += ePermCoef*rotatedQuadrupole1[1];
VijR[5] += dPermCoef*rotatedQuadrupole2[1];
VjiR[5] += dPermCoef*rotatedQuadrupole1[1];
Vij[6] += ePermCoef*rotatedQuadrupole2[2];
Vji[6] += ePermCoef*rotatedQuadrupole1[2];
VijR[6] += dPermCoef*rotatedQuadrupole2[2];
VjiR[6] += dPermCoef*rotatedQuadrupole1[2];
// Q-Q terms (m=2)
ePermCoef = rInvVec[5]*(mScale + bVec[4] - fourOverFifteen*alphaRVec[5]*X);
dPermCoef = -2.5f*(mScale + bVec[4])*rInvVec[6];
Vij[7] = ePermCoef*rotatedQuadrupole2[3];
Vji[7] = ePermCoef*rotatedQuadrupole1[3];
VijR[7] = dPermCoef*rotatedQuadrupole2[3];
VjiR[7] = dPermCoef*rotatedQuadrupole1[3];
Vij[8] = ePermCoef*rotatedQuadrupole2[4];
Vji[8] = ePermCoef*rotatedQuadrupole1[4];
VijR[8] = dPermCoef*rotatedQuadrupole2[4];
VjiR[8] = dPermCoef*rotatedQuadrupole1[4];
// Evaluate the energies, forces and torques due to permanent+induced moments
// interacting with just the permanent moments.
energy += forceFactor*0.5f*(
atom1.q*Vij[0] + rotatedDipole1.x*Vij[1] + rotatedDipole1.y*Vij[2] + rotatedDipole1.z*Vij[3] + rotatedQuadrupole1[0]*Vij[4] + rotatedQuadrupole1[1]*Vij[5] + rotatedQuadrupole1[2]*Vij[6] + rotatedQuadrupole1[3]*Vij[7] + rotatedQuadrupole1[4]*Vij[8] +
atom2.q*Vji[0] + rotatedDipole2.x*Vji[1] + rotatedDipole2.y*Vji[2] + rotatedDipole2.z*Vji[3] + rotatedQuadrupole2[0]*Vji[4] + rotatedQuadrupole2[1]*Vji[5] + rotatedQuadrupole2[2]*Vji[6] + rotatedQuadrupole2[3]*Vji[7] + rotatedQuadrupole2[4]*Vji[8]);
real fIZ = atom1.q*VijR[0] + rotatedDipole1.x*VijR[1] + rotatedDipole1.y*VijR[2] + rotatedDipole1.z*VijR[3] + rotatedQuadrupole1[0]*VijR[4] + rotatedQuadrupole1[1]*VijR[5] + rotatedQuadrupole1[2]*VijR[6] + rotatedQuadrupole1[3]*VijR[7] + rotatedQuadrupole1[4]*VijR[8];
real fJZ = atom2.q*VjiR[0] + rotatedDipole2.x*VjiR[1] + rotatedDipole2.y*VjiR[2] + rotatedDipole2.z*VjiR[3] + rotatedQuadrupole2[0]*VjiR[4] + rotatedQuadrupole2[1]*VjiR[5] + rotatedQuadrupole2[2]*VjiR[6] + rotatedQuadrupole2[3]*VjiR[7] + rotatedQuadrupole2[4]*VjiR[8];
real EIX = rotatedDipole1.z*Vij[1] - rotatedDipole1.x*Vij[3] + sqrtThree*rotatedQuadrupole1[2]*Vij[4] + rotatedQuadrupole1[4]*Vij[5] - (sqrtThree*rotatedQuadrupole1[0]+rotatedQuadrupole1[3])*Vij[6] + rotatedQuadrupole1[2]*Vij[7] - rotatedQuadrupole1[1]*Vij[8];
real EIY = -rotatedDipole1.y*Vij[1] + rotatedDipole1.x*Vij[2] - sqrtThree*rotatedQuadrupole1[1]*Vij[4] + (sqrtThree*rotatedQuadrupole1[0]-rotatedQuadrupole1[3])*Vij[5] - rotatedQuadrupole1[4]*Vij[6] + rotatedQuadrupole1[1]*Vij[7] + rotatedQuadrupole1[2]*Vij[8];
real EIZ = -rotatedDipole1.z*Vij[2] + rotatedDipole1.y*Vij[3] - rotatedQuadrupole1[2]*Vij[5] + rotatedQuadrupole1[1]*Vij[6] - 2*rotatedQuadrupole1[4]*Vij[7] + 2*rotatedQuadrupole1[3]*Vij[8];
real EJX = rotatedDipole2.z*Vji[1] - rotatedDipole2.x*Vji[3] + sqrtThree*rotatedQuadrupole2[2]*Vji[4] + rotatedQuadrupole2[4]*Vji[5] - (sqrtThree*rotatedQuadrupole2[0]+rotatedQuadrupole2[3])*Vji[6] + rotatedQuadrupole2[2]*Vji[7] - rotatedQuadrupole2[1]*Vji[8];
real EJY = -rotatedDipole2.y*Vji[1] + rotatedDipole2.x*Vji[2] - sqrtThree*rotatedQuadrupole2[1]*Vji[4] + (sqrtThree*rotatedQuadrupole2[0]-rotatedQuadrupole2[3])*Vji[5] - rotatedQuadrupole2[4]*Vji[6] + rotatedQuadrupole2[1]*Vji[7] + rotatedQuadrupole2[2]*Vji[8];
real EJZ = -rotatedDipole2.z*Vji[2] + rotatedDipole2.y*Vji[3] - rotatedQuadrupole2[2]*Vji[5] + rotatedQuadrupole2[1]*Vji[6] - 2*rotatedQuadrupole2[4]*Vji[7] + 2*rotatedQuadrupole2[3]*Vji[8];
// Define the torque intermediates for the induced dipoles. These are simply the induced dipole torque
// intermediates dotted with the field due to permanent moments only, at each center. We inline the
// induced dipole torque intermediates here, for simplicity. N.B. There are no torques on the dipoles
// themselves, so we accumulate the torque intermediates into separate variables to allow them to be
// used only in the force calculation.
//
// The torque about the x axis (needed to obtain the y force on the induced dipoles, below)
// qiUindIx[0] = qiQUindI[2]; qiUindIx[1] = 0; qiUindIx[2] = -qiQUindI[0]
real iEIX = qiUinpI.z*Vijp[0] + qiUindI.z*Vijd[0] - qiUinpI.x*Vijp[2] - qiUindI.x*Vijd[2];
real iEJX = qiUinpJ.z*Vjip[0] + qiUindJ.z*Vjid[0] - qiUinpJ.x*Vjip[2] - qiUindJ.x*Vjid[2];
// The torque about the y axis (needed to obtain the x force on the induced dipoles, below)
// qiUindIy[0] = -qiQUindI[1]; qiUindIy[1] = qiQUindI[0]; qiUindIy[2] = 0
real iEIY = qiUinpI.x*Vijp[1] + qiUindI.x*Vijd[1] - qiUinpI.y*Vijp[0] - qiUindI.y*Vijd[0];
real iEJY = qiUinpJ.x*Vjip[1] + qiUindJ.x*Vjid[1] - qiUinpJ.y*Vjip[0] - qiUindJ.y*Vjid[0];
#ifdef MUTUAL_POLARIZATION
// Uind-Uind terms (m=0)
real eCoef = -fourThirds*rInvVec[3]*(3*(thole_d0 + bVec[3]) + alphaRVec[3]*X);
real dCoef = rInvVec[4]*(6*(dthole_d0 + bVec[3]) + 4*alphaRVec[5]*X);
iEIX += eCoef*(qiUinpI.z*qiUindJ.x + qiUindI.z*qiUinpJ.x);
iEJX += eCoef*(qiUinpJ.z*qiUindI.x + qiUindJ.z*qiUinpI.x);
iEIY -= eCoef*(qiUinpI.y*qiUindJ.x + qiUindI.y*qiUinpJ.x);
iEJY -= eCoef*(qiUinpJ.y*qiUindI.x + qiUindJ.y*qiUinpI.x);
fIZ += dCoef*(qiUinpI.x*qiUindJ.x + qiUindI.x*qiUinpJ.x);
fIZ += dCoef*(qiUinpJ.x*qiUindI.x + qiUindJ.x*qiUinpI.x);
// Uind-Uind terms (m=1)
eCoef = 2*rInvVec[3]*(thole_d1 + bVec[3] - twoThirds*alphaRVec[3]*X);
dCoef = -3*rInvVec[4]*(dthole_d1 + bVec[3]);
iEIX -= eCoef*(qiUinpI.x*qiUindJ.z + qiUindI.x*qiUinpJ.z);
iEJX -= eCoef*(qiUinpJ.x*qiUindI.z + qiUindJ.x*qiUinpI.z);
iEIY += eCoef*(qiUinpI.x*qiUindJ.y + qiUindI.x*qiUinpJ.y);
iEJY += eCoef*(qiUinpJ.x*qiUindI.y + qiUindJ.x*qiUinpI.y);
fIZ += dCoef*(qiUinpI.y*qiUindJ.y + qiUindI.y*qiUinpJ.y + qiUinpI.z*qiUindJ.z + qiUindI.z*qiUinpJ.z);
fIZ += dCoef*(qiUinpJ.y*qiUindI.y + qiUindJ.y*qiUinpI.y + qiUinpJ.z*qiUindI.z + qiUindJ.z*qiUinpI.z);
#endif
// The quasi-internal frame forces and torques. Note that the induced torque intermediates are
// used in the force expression, but not in the torques; the induced dipoles are isotropic.
real qiForce[3] = {rInv*(EIY+EJY+iEIY+iEJY), -rInv*(EIX+EJX+iEIX+iEJX), -(fJZ+fIZ)};
real qiTorqueI[3] = {-EIX, -EIY, -EIZ};
real qiTorqueJ[3] = {-EJX, -EJY, -EJZ};
real3 force = make_real3(qiRotationMatrix[1][1]*qiForce[0] + qiRotationMatrix[2][1]*qiForce[1] + qiRotationMatrix[0][1]*qiForce[2],
qiRotationMatrix[1][2]*qiForce[0] + qiRotationMatrix[2][2]*qiForce[1] + qiRotationMatrix[0][2]*qiForce[2],
qiRotationMatrix[1][0]*qiForce[0] + qiRotationMatrix[2][0]*qiForce[1] + qiRotationMatrix[0][0]*qiForce[2]);
atom1.force += force;
atom1.torque += make_real3(qiRotationMatrix[1][1]*qiTorqueI[0] + qiRotationMatrix[2][1]*qiTorqueI[1] + qiRotationMatrix[0][1]*qiTorqueI[2],
qiRotationMatrix[1][2]*qiTorqueI[0] + qiRotationMatrix[2][2]*qiTorqueI[1] + qiRotationMatrix[0][2]*qiTorqueI[2],
qiRotationMatrix[1][0]*qiTorqueI[0] + qiRotationMatrix[2][0]*qiTorqueI[1] + qiRotationMatrix[0][0]*qiTorqueI[2]);
if (forceFactor == 1) {
atom2.force -= force;
atom2.torque += make_real3(qiRotationMatrix[1][1]*qiTorqueJ[0] + qiRotationMatrix[2][1]*qiTorqueJ[1] + qiRotationMatrix[0][1]*qiTorqueJ[2],
qiRotationMatrix[1][2]*qiTorqueJ[0] + qiRotationMatrix[2][2]*qiTorqueJ[1] + qiRotationMatrix[0][2]*qiTorqueJ[2],
qiRotationMatrix[1][0]*qiTorqueJ[0] + qiRotationMatrix[2][0]*qiTorqueJ[1] + qiRotationMatrix[0][0]*qiTorqueJ[2]);
}
}
/**
* Compute the self energy and self torque.
*/
__device__ void computeSelfEnergyAndTorque(AtomData& atom1, mixed& energy) {
real cii = atom1.q*atom1.q;
real3 dipole = make_real3(atom1.sphericalDipole.y, atom1.sphericalDipole.z, atom1.sphericalDipole.x);
real dii = dot(dipole, dipole+(atom1.inducedDipole+atom1.inducedDipolePolar)*0.5f);
#ifdef INCLUDE_QUADRUPOLES
real qii = (atom1.sphericalQuadrupole[0]*atom1.sphericalQuadrupole[0] +
atom1.sphericalQuadrupole[1]*atom1.sphericalQuadrupole[1] +
atom1.sphericalQuadrupole[2]*atom1.sphericalQuadrupole[2] +
atom1.sphericalQuadrupole[3]*atom1.sphericalQuadrupole[3] +
atom1.sphericalQuadrupole[4]*atom1.sphericalQuadrupole[4]);
#else
real qii = 0;
#endif
real prefac = -EWALD_ALPHA/SQRT_PI;
real a2 = EWALD_ALPHA*EWALD_ALPHA;
real a4 = a2*a2;
energy += prefac*(cii + ((real)2/3)*a2*dii + ((real) 4/15)*a4*qii);
// self-torque for PME
real3 ui = atom1.inducedDipole+atom1.inducedDipolePolar;
atom1.torque += ((2/(real) 3)*(EWALD_ALPHA*EWALD_ALPHA*EWALD_ALPHA)/SQRT_PI)*cross(dipole, ui);
}
/**
* Compute electrostatic interactions.
*/
extern "C" __global__ void computeElectrostatics(
unsigned long long* __restrict__ forceBuffers, unsigned long long* __restrict__ torqueBuffers, mixed* __restrict__ energyBuffer,
const real4* __restrict__ posq, const uint2* __restrict__ covalentFlags, const unsigned int* __restrict__ polarizationGroupFlags,
const ushort2* __restrict__ exclusionTiles, unsigned int startTileIndex, unsigned int numTileIndices,
#ifdef USE_CUTOFF
const int* __restrict__ tiles, const unsigned int* __restrict__ interactionCount, real4 periodicBoxSize, real4 invPeriodicBoxSize,
real4 periodicBoxVecX, real4 periodicBoxVecY, real4 periodicBoxVecZ, unsigned int maxTiles, const real4* __restrict__ blockCenter,
const unsigned int* __restrict__ interactingAtoms,
#endif
const real* __restrict__ sphericalDipole, const real* __restrict__ sphericalQuadrupole, const real* __restrict__ inducedDipole,
const real* __restrict__ inducedDipolePolar, const float2* __restrict__ dampingAndThole) {
const unsigned int totalWarps = (blockDim.x*gridDim.x)/TILE_SIZE;
const unsigned int warp = (blockIdx.x*blockDim.x+threadIdx.x)/TILE_SIZE;
const unsigned int tgx = threadIdx.x & (TILE_SIZE-1);
const unsigned int tbx = threadIdx.x - tgx;
mixed energy = 0;
__shared__ AtomData localData[THREAD_BLOCK_SIZE];
// First loop: process tiles that contain exclusions.
const unsigned int firstExclusionTile = FIRST_EXCLUSION_TILE+warp*(LAST_EXCLUSION_TILE-FIRST_EXCLUSION_TILE)/totalWarps;
const unsigned int lastExclusionTile = FIRST_EXCLUSION_TILE+(warp+1)*(LAST_EXCLUSION_TILE-FIRST_EXCLUSION_TILE)/totalWarps;
for (int pos = firstExclusionTile; pos < lastExclusionTile; pos++) {
const ushort2 tileIndices = exclusionTiles[pos];
const unsigned int x = tileIndices.x;
const unsigned int y = tileIndices.y;
AtomData data;
unsigned int atom1 = x*TILE_SIZE + tgx;
loadAtomData(data, atom1, posq, sphericalDipole, sphericalQuadrupole, inducedDipole, inducedDipolePolar, dampingAndThole);
data.force = make_real3(0);
data.torque = make_real3(0);
uint2 covalent = covalentFlags[pos*TILE_SIZE+tgx];
unsigned int polarizationGroup = polarizationGroupFlags[pos*TILE_SIZE+tgx];
if (x == y) {
// This tile is on the diagonal.
localData[threadIdx.x].pos = data.pos;
localData[threadIdx.x].q = data.q;
localData[threadIdx.x].sphericalDipole = data.sphericalDipole;
#ifdef INCLUDE_QUADRUPOLES
localData[threadIdx.x].sphericalQuadrupole[0] = data.sphericalQuadrupole[0];
localData[threadIdx.x].sphericalQuadrupole[1] = data.sphericalQuadrupole[1];
localData[threadIdx.x].sphericalQuadrupole[2] = data.sphericalQuadrupole[2];
localData[threadIdx.x].sphericalQuadrupole[3] = data.sphericalQuadrupole[3];
localData[threadIdx.x].sphericalQuadrupole[4] = data.sphericalQuadrupole[4];
#endif
localData[threadIdx.x].inducedDipole = data.inducedDipole;
localData[threadIdx.x].inducedDipolePolar = data.inducedDipolePolar;
localData[threadIdx.x].thole = data.thole;
localData[threadIdx.x].damp = data.damp;
// Compute forces.
for (unsigned int j = 0; j < TILE_SIZE; j++) {
int atom2 = y*TILE_SIZE+j;
if (atom1 != atom2 && atom1 < NUM_ATOMS && atom2 < NUM_ATOMS) {
float d = computeDScaleFactor(polarizationGroup, j);
float p = computePScaleFactor(covalent, polarizationGroup, j);
float m = computeMScaleFactor(covalent, j);
computeOneInteraction(data, localData[tbx+j], true, d, p, m, 0.5f, energy, periodicBoxSize, invPeriodicBoxSize, periodicBoxVecX, periodicBoxVecY, periodicBoxVecZ);
}
}
if (atom1 < NUM_ATOMS)
computeSelfEnergyAndTorque(data, energy);
data.force *= -ENERGY_SCALE_FACTOR;
data.torque *= ENERGY_SCALE_FACTOR;
atomicAdd(&forceBuffers[atom1], static_cast<unsigned long long>((long long) (data.force.x*0x100000000)));
atomicAdd(&forceBuffers[atom1+PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (data.force.y*0x100000000)));
atomicAdd(&forceBuffers[atom1+2*PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (data.force.z*0x100000000)));
atomicAdd(&torqueBuffers[atom1], static_cast<unsigned long long>((long long) (data.torque.x*0x100000000)));
atomicAdd(&torqueBuffers[atom1+PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (data.torque.y*0x100000000)));
atomicAdd(&torqueBuffers[atom1+2*PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (data.torque.z*0x100000000)));
}
else {
// This is an off-diagonal tile.
unsigned int j = y*TILE_SIZE + tgx;
loadAtomData(localData[threadIdx.x], j, posq, sphericalDipole, sphericalQuadrupole, inducedDipole, inducedDipolePolar, dampingAndThole);
localData[threadIdx.x].force = make_real3(0);
localData[threadIdx.x].torque = make_real3(0);
unsigned int tj = tgx;
for (j = 0; j < TILE_SIZE; j++) {
int atom2 = y*TILE_SIZE+tj;
if (atom1 < NUM_ATOMS && atom2 < NUM_ATOMS) {
float d = computeDScaleFactor(polarizationGroup, tj);
float p = computePScaleFactor(covalent, polarizationGroup, tj);
float m = computeMScaleFactor(covalent, tj);
computeOneInteraction(data, localData[tbx+tj], true, d, p, m, 1, energy, periodicBoxSize, invPeriodicBoxSize, periodicBoxVecX, periodicBoxVecY, periodicBoxVecZ);
}
tj = (tj + 1) & (TILE_SIZE - 1);
}
data.force *= -ENERGY_SCALE_FACTOR;
data.torque *= ENERGY_SCALE_FACTOR;
localData[threadIdx.x].force *= -ENERGY_SCALE_FACTOR;
localData[threadIdx.x].torque *= ENERGY_SCALE_FACTOR;
unsigned int offset = x*TILE_SIZE + tgx;
atomicAdd(&forceBuffers[offset], static_cast<unsigned long long>((long long) (data.force.x*0x100000000)));
atomicAdd(&forceBuffers[offset+PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (data.force.y*0x100000000)));
atomicAdd(&forceBuffers[offset+2*PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (data.force.z*0x100000000)));
atomicAdd(&torqueBuffers[offset], static_cast<unsigned long long>((long long) (data.torque.x*0x100000000)));
atomicAdd(&torqueBuffers[offset+PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (data.torque.y*0x100000000)));
atomicAdd(&torqueBuffers[offset+2*PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (data.torque.z*0x100000000)));
offset = y*TILE_SIZE + tgx;
atomicAdd(&forceBuffers[offset], static_cast<unsigned long long>((long long) (localData[threadIdx.x].force.x*0x100000000)));
atomicAdd(&forceBuffers[offset+PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (localData[threadIdx.x].force.y*0x100000000)));
atomicAdd(&forceBuffers[offset+2*PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (localData[threadIdx.x].force.z*0x100000000)));
atomicAdd(&torqueBuffers[offset], static_cast<unsigned long long>((long long) (localData[threadIdx.x].torque.x*0x100000000)));
atomicAdd(&torqueBuffers[offset+PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (localData[threadIdx.x].torque.y*0x100000000)));
atomicAdd(&torqueBuffers[offset+2*PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (localData[threadIdx.x].torque.z*0x100000000)));
}
}
// Second loop: tiles without exclusions, either from the neighbor list (with cutoff) or just enumerating all
// of them (no cutoff).
#ifdef USE_CUTOFF
const unsigned int numTiles = interactionCount[0];
if (numTiles > maxTiles)
return; // There wasn't enough memory for the neighbor list.
int pos = (int) (numTiles > maxTiles ? startTileIndex+warp*(long long)numTileIndices/totalWarps : warp*(long long)numTiles/totalWarps);
int end = (int) (numTiles > maxTiles ? startTileIndex+(warp+1)*(long long)numTileIndices/totalWarps : (warp+1)*(long long)numTiles/totalWarps);
#else
const unsigned int numTiles = numTileIndices;
int pos = (int) (startTileIndex+warp*(long long)numTiles/totalWarps);
int end = (int) (startTileIndex+(warp+1)*(long long)numTiles/totalWarps);
#endif
int skipBase = 0;
int currentSkipIndex = tbx;
__shared__ int atomIndices[THREAD_BLOCK_SIZE];
__shared__ volatile int skipTiles[THREAD_BLOCK_SIZE];
skipTiles[threadIdx.x] = -1;
while (pos < end) {
bool includeTile = true;
// Extract the coordinates of this tile.
int x, y;
#ifdef USE_CUTOFF
x = tiles[pos];
#else
y = (int) floor(NUM_BLOCKS+0.5f-SQRT((NUM_BLOCKS+0.5f)*(NUM_BLOCKS+0.5f)-2*pos));
x = (pos-y*NUM_BLOCKS+y*(y+1)/2);
if (x < y || x >= NUM_BLOCKS) { // Occasionally happens due to roundoff error.
y += (x < y ? -1 : 1);
x = (pos-y*NUM_BLOCKS+y*(y+1)/2);
}
// Skip over tiles that have exclusions, since they were already processed.
while (skipTiles[tbx+TILE_SIZE-1] < pos) {
if (skipBase+tgx < NUM_TILES_WITH_EXCLUSIONS) {
ushort2 tile = exclusionTiles[skipBase+tgx];
skipTiles[threadIdx.x] = tile.x + tile.y*NUM_BLOCKS - tile.y*(tile.y+1)/2;
}
else
skipTiles[threadIdx.x] = end;
skipBase += TILE_SIZE;
currentSkipIndex = tbx;
}
while (skipTiles[currentSkipIndex] < pos)
currentSkipIndex++;
includeTile = (skipTiles[currentSkipIndex] != pos);
#endif
if (includeTile) {
unsigned int atom1 = x*TILE_SIZE + tgx;
// Load atom data for this tile.
AtomData data;
loadAtomData(data, atom1, posq, sphericalDipole, sphericalQuadrupole, inducedDipole, inducedDipolePolar, dampingAndThole);
data.force = make_real3(0);
data.torque = make_real3(0);
#ifdef USE_CUTOFF
unsigned int j = interactingAtoms[pos*TILE_SIZE+tgx];
#else
unsigned int j = y*TILE_SIZE + tgx;
#endif
atomIndices[threadIdx.x] = j;
loadAtomData(localData[threadIdx.x], j, posq, sphericalDipole, sphericalQuadrupole, inducedDipole, inducedDipolePolar, dampingAndThole);
localData[threadIdx.x].force = make_real3(0);
localData[threadIdx.x].torque = make_real3(0);
// Compute forces.
unsigned int tj = tgx;
for (j = 0; j < TILE_SIZE; j++) {
int atom2 = atomIndices[tbx+tj];
if (atom1 < NUM_ATOMS && atom2 < NUM_ATOMS) {
computeOneInteraction(data, localData[tbx+tj], false, 1, 1, 1, 1, energy, periodicBoxSize, invPeriodicBoxSize, periodicBoxVecX, periodicBoxVecY, periodicBoxVecZ);
}
tj = (tj + 1) & (TILE_SIZE - 1);
}
data.force *= -ENERGY_SCALE_FACTOR;
data.torque *= ENERGY_SCALE_FACTOR;
localData[threadIdx.x].force *= -ENERGY_SCALE_FACTOR;
localData[threadIdx.x].torque *= ENERGY_SCALE_FACTOR;
// Write results.
unsigned int offset = x*TILE_SIZE + tgx;
atomicAdd(&forceBuffers[offset], static_cast<unsigned long long>((long long) (data.force.x*0x100000000)));
atomicAdd(&forceBuffers[offset+PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (data.force.y*0x100000000)));
atomicAdd(&forceBuffers[offset+2*PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (data.force.z*0x100000000)));
atomicAdd(&torqueBuffers[offset], static_cast<unsigned long long>((long long) (data.torque.x*0x100000000)));
atomicAdd(&torqueBuffers[offset+PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (data.torque.y*0x100000000)));
atomicAdd(&torqueBuffers[offset+2*PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (data.torque.z*0x100000000)));
#ifdef USE_CUTOFF
offset = atomIndices[threadIdx.x];
#else
offset = y*TILE_SIZE + tgx;
#endif
atomicAdd(&forceBuffers[offset], static_cast<unsigned long long>((long long) (localData[threadIdx.x].force.x*0x100000000)));
atomicAdd(&forceBuffers[offset+PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (localData[threadIdx.x].force.y*0x100000000)));
atomicAdd(&forceBuffers[offset+2*PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (localData[threadIdx.x].force.z*0x100000000)));
atomicAdd(&torqueBuffers[offset], static_cast<unsigned long long>((long long) (localData[threadIdx.x].torque.x*0x100000000)));
atomicAdd(&torqueBuffers[offset+PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (localData[threadIdx.x].torque.y*0x100000000)));
atomicAdd(&torqueBuffers[offset+2*PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (localData[threadIdx.x].torque.z*0x100000000)));
}
pos++;
}
energyBuffer[blockIdx.x*blockDim.x+threadIdx.x] += energy*ENERGY_SCALE_FACTOR;
}
| c95d118fbd6607891f39bb9a7ed4e8ce202b8e7c.cu | #define WARPS_PER_GROUP (THREAD_BLOCK_SIZE/TILE_SIZE)
typedef struct {
real3 pos, force, torque, inducedDipole, inducedDipolePolar, sphericalDipole;
real q;
float thole, damp;
#ifdef INCLUDE_QUADRUPOLES
real sphericalQuadrupole[5];
#endif
} AtomData;
inline __device__ void loadAtomData(AtomData& data, int atom, const real4* __restrict__ posq, const real* __restrict__ sphericalDipole,
const real* __restrict__ sphericalQuadrupole, const real* __restrict__ inducedDipole, const real* __restrict__ inducedDipolePolar,
const float2* __restrict__ dampingAndThole) {
real4 atomPosq = posq[atom];
data.pos = make_real3(atomPosq.x, atomPosq.y, atomPosq.z);
data.q = atomPosq.w;
data.sphericalDipole.x = sphericalDipole[atom*3];
data.sphericalDipole.y = sphericalDipole[atom*3+1];
data.sphericalDipole.z = sphericalDipole[atom*3+2];
#ifdef INCLUDE_QUADRUPOLES
data.sphericalQuadrupole[0] = sphericalQuadrupole[atom*5];
data.sphericalQuadrupole[1] = sphericalQuadrupole[atom*5+1];
data.sphericalQuadrupole[2] = sphericalQuadrupole[atom*5+2];
data.sphericalQuadrupole[3] = sphericalQuadrupole[atom*5+3];
data.sphericalQuadrupole[4] = sphericalQuadrupole[atom*5+4];
#endif
data.inducedDipole.x = inducedDipole[atom*3];
data.inducedDipole.y = inducedDipole[atom*3+1];
data.inducedDipole.z = inducedDipole[atom*3+2];
data.inducedDipolePolar.x = inducedDipolePolar[atom*3];
data.inducedDipolePolar.y = inducedDipolePolar[atom*3+1];
data.inducedDipolePolar.z = inducedDipolePolar[atom*3+2];
float2 temp = dampingAndThole[atom];
data.damp = temp.x;
data.thole = temp.y;
}
__device__ real computeDScaleFactor(unsigned int polarizationGroup, int index) {
return (polarizationGroup & 1<<index ? 0 : 1);
}
__device__ float computeMScaleFactor(uint2 covalent, int index) {
int mask = 1<<index;
bool x = (covalent.x & mask);
bool y = (covalent.y & mask);
return (x ? (y ? 0.0f : 0.4f) : (y ? 0.8f : 1.0f));
}
__device__ float computePScaleFactor(uint2 covalent, unsigned int polarizationGroup, int index) {
int mask = 1<<index;
bool x = (covalent.x & mask);
bool y = (covalent.y & mask);
bool p = (polarizationGroup & mask);
return (x && y ? 0.0f : (x && p ? 0.5f : 1.0f));
}
__device__ void computeOneInteraction(AtomData& atom1, AtomData& atom2, bool hasExclusions, float dScale, float pScale, float mScale, float forceFactor,
mixed& energy, real4 periodicBoxSize, real4 invPeriodicBoxSize, real4 periodicBoxVecX, real4 periodicBoxVecY, real4 periodicBoxVecZ) {
// Compute the displacement.
real3 delta;
delta.x = atom2.pos.x - atom1.pos.x;
delta.y = atom2.pos.y - atom1.pos.y;
delta.z = atom2.pos.z - atom1.pos.z;
APPLY_PERIODIC_TO_DELTA(delta)
real r2 = delta.x*delta.x + delta.y*delta.y + delta.z*delta.z;
if (r2 > CUTOFF_SQUARED)
return;
real rInv = RSQRT(r2);
real r = r2*rInv;
// Rotate the various dipoles and quadrupoles.
real qiRotationMatrix[3][3];
buildQIRotationMatrix(delta, rInv, qiRotationMatrix);
real3 qiUindI = 0.5f*make_real3(qiRotationMatrix[0][1]*atom1.inducedDipole.x + qiRotationMatrix[0][2]*atom1.inducedDipole.y + qiRotationMatrix[0][0]*atom1.inducedDipole.z,
qiRotationMatrix[1][1]*atom1.inducedDipole.x + qiRotationMatrix[1][2]*atom1.inducedDipole.y + qiRotationMatrix[1][0]*atom1.inducedDipole.z,
qiRotationMatrix[2][1]*atom1.inducedDipole.x + qiRotationMatrix[2][2]*atom1.inducedDipole.y + qiRotationMatrix[2][0]*atom1.inducedDipole.z);
real3 qiUindJ = 0.5f*make_real3(qiRotationMatrix[0][1]*atom2.inducedDipole.x + qiRotationMatrix[0][2]*atom2.inducedDipole.y + qiRotationMatrix[0][0]*atom2.inducedDipole.z,
qiRotationMatrix[1][1]*atom2.inducedDipole.x + qiRotationMatrix[1][2]*atom2.inducedDipole.y + qiRotationMatrix[1][0]*atom2.inducedDipole.z,
qiRotationMatrix[2][1]*atom2.inducedDipole.x + qiRotationMatrix[2][2]*atom2.inducedDipole.y + qiRotationMatrix[2][0]*atom2.inducedDipole.z);
real3 qiUinpI = 0.5f*make_real3(qiRotationMatrix[0][1]*atom1.inducedDipolePolar.x + qiRotationMatrix[0][2]*atom1.inducedDipolePolar.y + qiRotationMatrix[0][0]*atom1.inducedDipolePolar.z,
qiRotationMatrix[1][1]*atom1.inducedDipolePolar.x + qiRotationMatrix[1][2]*atom1.inducedDipolePolar.y + qiRotationMatrix[1][0]*atom1.inducedDipolePolar.z,
qiRotationMatrix[2][1]*atom1.inducedDipolePolar.x + qiRotationMatrix[2][2]*atom1.inducedDipolePolar.y + qiRotationMatrix[2][0]*atom1.inducedDipolePolar.z);
real3 qiUinpJ = 0.5f*make_real3(qiRotationMatrix[0][1]*atom2.inducedDipolePolar.x + qiRotationMatrix[0][2]*atom2.inducedDipolePolar.y + qiRotationMatrix[0][0]*atom2.inducedDipolePolar.z,
qiRotationMatrix[1][1]*atom2.inducedDipolePolar.x + qiRotationMatrix[1][2]*atom2.inducedDipolePolar.y + qiRotationMatrix[1][0]*atom2.inducedDipolePolar.z,
qiRotationMatrix[2][1]*atom2.inducedDipolePolar.x + qiRotationMatrix[2][2]*atom2.inducedDipolePolar.y + qiRotationMatrix[2][0]*atom2.inducedDipolePolar.z);
real3 rotatedDipole1 = rotateDipole(atom1.sphericalDipole, qiRotationMatrix);
real3 rotatedDipole2 = rotateDipole(atom2.sphericalDipole, qiRotationMatrix);
real rotatedQuadrupole1[] = {0, 0, 0, 0, 0};
real rotatedQuadrupole2[] = {0, 0, 0, 0, 0};
#ifdef INCLUDE_QUADRUPOLES
rotateQuadupoles(qiRotationMatrix, atom1.sphericalQuadrupole, atom2.sphericalQuadrupole, rotatedQuadrupole1, rotatedQuadrupole2);
#endif
// The field derivatives at I due to permanent and induced moments on J, and vice-versa.
// Also, their derivatives w.r.t. R, which are needed for force calculations
real Vij[9], Vji[9], VjiR[9], VijR[9];
// The field derivatives at I due to only permanent moments on J, and vice-versa.
real Vijp[3], Vijd[3], Vjip[3], Vjid[3];
real rInvVec[7], alphaRVec[8], bVec[5];
// The rInvVec array is defined such that the ith element is R^-i, with the
// dieleectric constant folded in, to avoid conversions later.
rInvVec[1] = rInv;
for (int i = 2; i < 7; ++i)
rInvVec[i] = rInvVec[i-1] * rInv;
// The alpharVec array is defined such that the ith element is (alpha R)^i,
// where kappa (alpha in OpenMM parlance) is the Ewald attenuation parameter.
real ralpha = EWALD_ALPHA*r;
real exp2a = EXP(-(ralpha*ralpha));
#ifdef USE_DOUBLE_PRECISION
const real erfAlphaR = erf(ralpha);
#else
// This approximation for erfc is from Abramowitz and Stegun (1964) p. 299. They cite the following as
// the original source: C. Hastings, Jr., Approximations for Digital Computers (1955). It has a maximum
// error of 1.5e-7.
const real t = RECIP(1.0f+0.3275911f*ralpha);
const real erfAlphaR = 1-(0.254829592f+(-0.284496736f+(1.421413741f+(-1.453152027f+1.061405429f*t)*t)*t)*t)*t*exp2a;
#endif
alphaRVec[1] = ralpha;
for (int i = 2; i < 8; ++i)
alphaRVec[i] = alphaRVec[i-1]*ralpha;
real X = 2*exp2a/SQRT_PI;
int doubleFactorial = 1, facCount = 1;
real tmp = alphaRVec[1];
bVec[1] = -erfAlphaR;
for (int i = 2; i < 5; ++i) {
bVec[i] = bVec[i-1] + tmp * X / (real)(doubleFactorial);
facCount = facCount + 2;
doubleFactorial = doubleFactorial * facCount;
tmp *= 2*alphaRVec[2];
}
real dmp = atom1.damp*atom2.damp;
real a = min(atom1.thole, atom2.thole);
real u = fabs(dmp) > 1.0e-5f ? r/dmp : 1e10f;
real au3 = a*u*u*u;
real expau3 = au3 < 50 ? EXP(-au3) : 0;
real a2u6 = au3*au3;
real a3u9 = a2u6*au3;
// Thole damping factors for energies
real thole_c = 1 - expau3;
real thole_d0 = 1 - expau3*(1 + 1.5f*au3);
real thole_d1 = 1 - expau3;
real thole_q0 = 1 - expau3*(1 + au3 + a2u6);
real thole_q1 = 1 - expau3*(1 + au3);
// Thole damping factors for derivatives
real dthole_c = 1 - expau3*(1 + 1.5f*au3);
real dthole_d0 = 1 - expau3*(1 + au3 + 1.5f*a2u6);
real dthole_d1 = 1 - expau3*(1 + au3);
real dthole_q0 = 1 - expau3*(1 + au3 + 0.25f*a2u6 + 0.75f*a3u9);
real dthole_q1 = 1 - expau3*(1 + au3 + 0.75f*a2u6);
// Now we compute the (attenuated) Coulomb operator and its derivatives, contracted with
// permanent moments and induced dipoles. Note that the coefficient of the permanent force
// terms is half of the expected value; this is because we compute the interaction of I with
// the sum of induced and permanent moments on J, as well as the interaction of J with I's
// permanent and induced moments; doing so double counts the permanent-permanent interaction.
real ePermCoef, dPermCoef, eUIndCoef, dUIndCoef, eUInpCoef, dUInpCoef;
// C-C terms (m=0)
ePermCoef = rInvVec[1]*(mScale + bVec[2] - alphaRVec[1]*X);
dPermCoef = -0.5f*(mScale + bVec[2])*rInvVec[2];
Vij[0] = ePermCoef*atom2.q;
Vji[0] = ePermCoef*atom1.q;
VijR[0] = dPermCoef*atom2.q;
VjiR[0] = dPermCoef*atom1.q;
// C-D and C-Uind terms (m=0)
ePermCoef = rInvVec[2]*(mScale + bVec[2]);
eUIndCoef = rInvVec[2]*(pScale*thole_c + bVec[2]);
eUInpCoef = rInvVec[2]*(dScale*thole_c + bVec[2]);
dPermCoef = -rInvVec[3]*(mScale + bVec[2] + alphaRVec[3]*X);
dUIndCoef = -2*rInvVec[3]*(pScale*dthole_c + bVec[2] + alphaRVec[3]*X);
dUInpCoef = -2*rInvVec[3]*(dScale*dthole_c + bVec[2] + alphaRVec[3]*X);
Vij[0] += -(ePermCoef*rotatedDipole2.x + eUIndCoef*qiUindJ.x + eUInpCoef*qiUinpJ.x);
Vji[1] = -(ePermCoef*atom1.q);
VijR[0] += -(dPermCoef*rotatedDipole2.x + dUIndCoef*qiUindJ.x + dUInpCoef*qiUinpJ.x);
VjiR[1] = -(dPermCoef*atom1.q);
Vjip[0] = -(eUInpCoef*atom1.q);
Vjid[0] = -(eUIndCoef*atom1.q);
// D-C and Uind-C terms (m=0)
Vij[1] = ePermCoef*atom2.q;
Vji[0] += ePermCoef*rotatedDipole1.x + eUIndCoef*qiUindI.x + eUInpCoef*qiUinpI.x;
VijR[1] = dPermCoef*atom2.q;
VjiR[0] += dPermCoef*rotatedDipole1.x + dUIndCoef*qiUindI.x + dUInpCoef*qiUinpI.x;
Vijp[0] = eUInpCoef*atom2.q;
Vijd[0] = eUIndCoef*atom2.q;
// D-D and D-Uind terms (m=0)
const real twoThirds = (real) 2/3;
ePermCoef = -twoThirds*rInvVec[3]*(3*(mScale + bVec[3]) + alphaRVec[3]*X);
eUIndCoef = -twoThirds*rInvVec[3]*(3*(pScale*thole_d0 + bVec[3]) + alphaRVec[3]*X);
eUInpCoef = -twoThirds*rInvVec[3]*(3*(dScale*thole_d0 + bVec[3]) + alphaRVec[3]*X);
dPermCoef = rInvVec[4]*(3*(mScale + bVec[3]) + 2*alphaRVec[5]*X);
dUIndCoef = rInvVec[4]*(6*(pScale*dthole_d0 + bVec[3]) + 4*alphaRVec[5]*X);
dUInpCoef = rInvVec[4]*(6*(dScale*dthole_d0 + bVec[3]) + 4*alphaRVec[5]*X);
Vij[1] += ePermCoef*rotatedDipole2.x + eUIndCoef*qiUindJ.x + eUInpCoef*qiUinpJ.x;
Vji[1] += ePermCoef*rotatedDipole1.x + eUIndCoef*qiUindI.x + eUInpCoef*qiUinpI.x;
VijR[1] += dPermCoef*rotatedDipole2.x + dUIndCoef*qiUindJ.x + dUInpCoef*qiUinpJ.x;
VjiR[1] += dPermCoef*rotatedDipole1.x + dUIndCoef*qiUindI.x + dUInpCoef*qiUinpI.x;
Vijp[0] += eUInpCoef*rotatedDipole2.x;
Vijd[0] += eUIndCoef*rotatedDipole2.x;
Vjip[0] += eUInpCoef*rotatedDipole1.x;
Vjid[0] += eUIndCoef*rotatedDipole1.x;
// D-D and D-Uind terms (m=1)
ePermCoef = rInvVec[3]*(mScale + bVec[3] - twoThirds*alphaRVec[3]*X);
eUIndCoef = rInvVec[3]*(pScale*thole_d1 + bVec[3] - twoThirds*alphaRVec[3]*X);
eUInpCoef = rInvVec[3]*(dScale*thole_d1 + bVec[3] - twoThirds*alphaRVec[3]*X);
dPermCoef = -1.5f*rInvVec[4]*(mScale + bVec[3]);
dUIndCoef = -3*rInvVec[4]*(pScale*dthole_d1 + bVec[3]);
dUInpCoef = -3*rInvVec[4]*(dScale*dthole_d1 + bVec[3]);
Vij[2] = ePermCoef*rotatedDipole2.y + eUIndCoef*qiUindJ.y + eUInpCoef*qiUinpJ.y;
Vji[2] = ePermCoef*rotatedDipole1.y + eUIndCoef*qiUindI.y + eUInpCoef*qiUinpI.y;
VijR[2] = dPermCoef*rotatedDipole2.y + dUIndCoef*qiUindJ.y + dUInpCoef*qiUinpJ.y;
VjiR[2] = dPermCoef*rotatedDipole1.y + dUIndCoef*qiUindI.y + dUInpCoef*qiUinpI.y;
Vij[3] = ePermCoef*rotatedDipole2.z + eUIndCoef*qiUindJ.z + eUInpCoef*qiUinpJ.z;
Vji[3] = ePermCoef*rotatedDipole1.z + eUIndCoef*qiUindI.z + eUInpCoef*qiUinpI.z;
VijR[3] = dPermCoef*rotatedDipole2.z + dUIndCoef*qiUindJ.z + dUInpCoef*qiUinpJ.z;
VjiR[3] = dPermCoef*rotatedDipole1.z + dUIndCoef*qiUindI.z + dUInpCoef*qiUinpI.z;
Vijp[1] = eUInpCoef*rotatedDipole2.y;
Vijd[1] = eUIndCoef*rotatedDipole2.y;
Vjip[1] = eUInpCoef*rotatedDipole1.y;
Vjid[1] = eUIndCoef*rotatedDipole1.y;
Vijp[2] = eUInpCoef*rotatedDipole2.z;
Vijd[2] = eUIndCoef*rotatedDipole2.z;
Vjip[2] = eUInpCoef*rotatedDipole1.z;
Vjid[2] = eUIndCoef*rotatedDipole1.z;
// C-Q terms (m=0)
ePermCoef = (mScale + bVec[3])*rInvVec[3];
dPermCoef = -((real) 1/3)*rInvVec[4]*(4.5f*(mScale + bVec[3]) + 2*alphaRVec[5]*X);
Vij[0] += ePermCoef*rotatedQuadrupole2[0];
Vji[4] = ePermCoef*atom1.q;
VijR[0] += dPermCoef*rotatedQuadrupole2[0];
VjiR[4] = dPermCoef*atom1.q;
// Q-C terms (m=0)
Vij[4] = ePermCoef*atom2.q;
Vji[0] += ePermCoef*rotatedQuadrupole1[0];
VijR[4] = dPermCoef*atom2.q;
VjiR[0] += dPermCoef*rotatedQuadrupole1[0];
// D-Q and Uind-Q terms (m=0)
const real fourThirds = (real) 4/3;
ePermCoef = rInvVec[4]*(3*(mScale + bVec[3]) + fourThirds*alphaRVec[5]*X);
eUIndCoef = rInvVec[4]*(3*(pScale*thole_q0 + bVec[3]) + fourThirds*alphaRVec[5]*X);
eUInpCoef = rInvVec[4]*(3*(dScale*thole_q0 + bVec[3]) + fourThirds*alphaRVec[5]*X);
dPermCoef = -fourThirds*rInvVec[5]*(4.5f*(mScale + bVec[3]) + (1 + alphaRVec[2])*alphaRVec[5]*X);
dUIndCoef = -fourThirds*rInvVec[5]*(9*(pScale*dthole_q0 + bVec[3]) + 2*(1 + alphaRVec[2])*alphaRVec[5]*X);
dUInpCoef = -fourThirds*rInvVec[5]*(9*(dScale*dthole_q0 + bVec[3]) + 2*(1 + alphaRVec[2])*alphaRVec[5]*X);
Vij[1] += ePermCoef*rotatedQuadrupole2[0];
Vji[4] += ePermCoef*rotatedDipole1.x + eUIndCoef*qiUindI.x + eUInpCoef*qiUinpI.x;
VijR[1] += dPermCoef*rotatedQuadrupole2[0];
VjiR[4] += dPermCoef*rotatedDipole1.x + dUIndCoef*qiUindI.x + dUInpCoef*qiUinpI.x;
Vijp[0] += eUInpCoef*rotatedQuadrupole2[0];
Vijd[0] += eUIndCoef*rotatedQuadrupole2[0];
// Q-D and Q-Uind terms (m=0)
Vij[4] += -(ePermCoef*rotatedDipole2.x + eUIndCoef*qiUindJ.x + eUInpCoef*qiUinpJ.x);
Vji[1] += -(ePermCoef*rotatedQuadrupole1[0]);
VijR[4] += -(dPermCoef*rotatedDipole2.x + dUIndCoef*qiUindJ.x + dUInpCoef*qiUinpJ.x);
VjiR[1] += -(dPermCoef*rotatedQuadrupole1[0]);
Vjip[0] += -(eUInpCoef*rotatedQuadrupole1[0]);
Vjid[0] += -(eUIndCoef*rotatedQuadrupole1[0]);
// D-Q and Uind-Q terms (m=1)
const real sqrtThree = SQRT((real) 3);
ePermCoef = -sqrtThree*rInvVec[4]*(mScale + bVec[3]);
eUIndCoef = -sqrtThree*rInvVec[4]*(pScale*thole_q1 + bVec[3]);
eUInpCoef = -sqrtThree*rInvVec[4]*(dScale*thole_q1 + bVec[3]);
const real fourSqrtOneThird = 4/sqrt((real) 3);
dPermCoef = fourSqrtOneThird*rInvVec[5]*(1.5f*(mScale + bVec[3]) + 0.5f*alphaRVec[5]*X);
dUIndCoef = fourSqrtOneThird*rInvVec[5]*(3*(pScale*dthole_q1 + bVec[3]) + alphaRVec[5]*X);
dUInpCoef = fourSqrtOneThird*rInvVec[5]*(3*(dScale*dthole_q1 + bVec[3]) + alphaRVec[5]*X);
Vij[2] += ePermCoef*rotatedQuadrupole2[1];
Vji[5] = ePermCoef*rotatedDipole1.y + eUIndCoef*qiUindI.y + eUInpCoef*qiUinpI.y;
VijR[2] += dPermCoef*rotatedQuadrupole2[1];
VjiR[5] = dPermCoef*rotatedDipole1.y + dUIndCoef*qiUindI.y + dUInpCoef*qiUinpI.y;
Vij[3] += ePermCoef*rotatedQuadrupole2[2];
Vji[6] = ePermCoef*rotatedDipole1.z + eUIndCoef*qiUindI.z + eUInpCoef*qiUinpI.z;
VijR[3] += dPermCoef*rotatedQuadrupole2[2];
VjiR[6] = dPermCoef*rotatedDipole1.z + dUIndCoef*qiUindI.z + dUInpCoef*qiUinpI.z;
Vijp[1] += eUInpCoef*rotatedQuadrupole2[1];
Vijd[1] += eUIndCoef*rotatedQuadrupole2[1];
Vijp[2] += eUInpCoef*rotatedQuadrupole2[2];
Vijd[2] += eUIndCoef*rotatedQuadrupole2[2];
// D-Q and Uind-Q terms (m=1)
Vij[5] = -(ePermCoef*rotatedDipole2.y + eUIndCoef*qiUindJ.y + eUInpCoef*qiUinpJ.y);
Vji[2] += -(ePermCoef*rotatedQuadrupole1[1]);
VijR[5] = -(dPermCoef*rotatedDipole2.y + dUIndCoef*qiUindJ.y + dUInpCoef*qiUinpJ.y);
VjiR[2] += -(dPermCoef*rotatedQuadrupole1[1]);
Vij[6] = -(ePermCoef*rotatedDipole2.z + eUIndCoef*qiUindJ.z + eUInpCoef*qiUinpJ.z);
Vji[3] += -(ePermCoef*rotatedQuadrupole1[2]);
VijR[6] = -(dPermCoef*rotatedDipole2.z + dUIndCoef*qiUindJ.z + dUInpCoef*qiUinpJ.z);
VjiR[3] += -(dPermCoef*rotatedQuadrupole1[2]);
Vjip[1] += -(eUInpCoef*rotatedQuadrupole1[1]);
Vjid[1] += -(eUIndCoef*rotatedQuadrupole1[1]);
Vjip[2] += -(eUInpCoef*rotatedQuadrupole1[2]);
Vjid[2] += -(eUIndCoef*rotatedQuadrupole1[2]);
// Q-Q terms (m=0)
ePermCoef = rInvVec[5]*(6*(mScale + bVec[4]) + ((real) 4/45)*(-3 + 10*alphaRVec[2])*alphaRVec[5]*X);
dPermCoef = -rInvVec[6]*(135*(mScale + bVec[4]) + 4*(1 + 2*alphaRVec[2])*alphaRVec[7]*X)/9;
Vij[4] += ePermCoef*rotatedQuadrupole2[0];
Vji[4] += ePermCoef*rotatedQuadrupole1[0];
VijR[4] += dPermCoef*rotatedQuadrupole2[0];
VjiR[4] += dPermCoef*rotatedQuadrupole1[0];
// Q-Q terms (m=1)
const real fourOverFifteen = (real) 4/15;
ePermCoef = -fourOverFifteen*rInvVec[5]*(15*(mScale + bVec[4]) + alphaRVec[5]*X);
dPermCoef = rInvVec[6]*(10*(mScale + bVec[4]) + fourThirds*alphaRVec[7]*X);
Vij[5] += ePermCoef*rotatedQuadrupole2[1];
Vji[5] += ePermCoef*rotatedQuadrupole1[1];
VijR[5] += dPermCoef*rotatedQuadrupole2[1];
VjiR[5] += dPermCoef*rotatedQuadrupole1[1];
Vij[6] += ePermCoef*rotatedQuadrupole2[2];
Vji[6] += ePermCoef*rotatedQuadrupole1[2];
VijR[6] += dPermCoef*rotatedQuadrupole2[2];
VjiR[6] += dPermCoef*rotatedQuadrupole1[2];
// Q-Q terms (m=2)
ePermCoef = rInvVec[5]*(mScale + bVec[4] - fourOverFifteen*alphaRVec[5]*X);
dPermCoef = -2.5f*(mScale + bVec[4])*rInvVec[6];
Vij[7] = ePermCoef*rotatedQuadrupole2[3];
Vji[7] = ePermCoef*rotatedQuadrupole1[3];
VijR[7] = dPermCoef*rotatedQuadrupole2[3];
VjiR[7] = dPermCoef*rotatedQuadrupole1[3];
Vij[8] = ePermCoef*rotatedQuadrupole2[4];
Vji[8] = ePermCoef*rotatedQuadrupole1[4];
VijR[8] = dPermCoef*rotatedQuadrupole2[4];
VjiR[8] = dPermCoef*rotatedQuadrupole1[4];
// Evaluate the energies, forces and torques due to permanent+induced moments
// interacting with just the permanent moments.
energy += forceFactor*0.5f*(
atom1.q*Vij[0] + rotatedDipole1.x*Vij[1] + rotatedDipole1.y*Vij[2] + rotatedDipole1.z*Vij[3] + rotatedQuadrupole1[0]*Vij[4] + rotatedQuadrupole1[1]*Vij[5] + rotatedQuadrupole1[2]*Vij[6] + rotatedQuadrupole1[3]*Vij[7] + rotatedQuadrupole1[4]*Vij[8] +
atom2.q*Vji[0] + rotatedDipole2.x*Vji[1] + rotatedDipole2.y*Vji[2] + rotatedDipole2.z*Vji[3] + rotatedQuadrupole2[0]*Vji[4] + rotatedQuadrupole2[1]*Vji[5] + rotatedQuadrupole2[2]*Vji[6] + rotatedQuadrupole2[3]*Vji[7] + rotatedQuadrupole2[4]*Vji[8]);
real fIZ = atom1.q*VijR[0] + rotatedDipole1.x*VijR[1] + rotatedDipole1.y*VijR[2] + rotatedDipole1.z*VijR[3] + rotatedQuadrupole1[0]*VijR[4] + rotatedQuadrupole1[1]*VijR[5] + rotatedQuadrupole1[2]*VijR[6] + rotatedQuadrupole1[3]*VijR[7] + rotatedQuadrupole1[4]*VijR[8];
real fJZ = atom2.q*VjiR[0] + rotatedDipole2.x*VjiR[1] + rotatedDipole2.y*VjiR[2] + rotatedDipole2.z*VjiR[3] + rotatedQuadrupole2[0]*VjiR[4] + rotatedQuadrupole2[1]*VjiR[5] + rotatedQuadrupole2[2]*VjiR[6] + rotatedQuadrupole2[3]*VjiR[7] + rotatedQuadrupole2[4]*VjiR[8];
real EIX = rotatedDipole1.z*Vij[1] - rotatedDipole1.x*Vij[3] + sqrtThree*rotatedQuadrupole1[2]*Vij[4] + rotatedQuadrupole1[4]*Vij[5] - (sqrtThree*rotatedQuadrupole1[0]+rotatedQuadrupole1[3])*Vij[6] + rotatedQuadrupole1[2]*Vij[7] - rotatedQuadrupole1[1]*Vij[8];
real EIY = -rotatedDipole1.y*Vij[1] + rotatedDipole1.x*Vij[2] - sqrtThree*rotatedQuadrupole1[1]*Vij[4] + (sqrtThree*rotatedQuadrupole1[0]-rotatedQuadrupole1[3])*Vij[5] - rotatedQuadrupole1[4]*Vij[6] + rotatedQuadrupole1[1]*Vij[7] + rotatedQuadrupole1[2]*Vij[8];
real EIZ = -rotatedDipole1.z*Vij[2] + rotatedDipole1.y*Vij[3] - rotatedQuadrupole1[2]*Vij[5] + rotatedQuadrupole1[1]*Vij[6] - 2*rotatedQuadrupole1[4]*Vij[7] + 2*rotatedQuadrupole1[3]*Vij[8];
real EJX = rotatedDipole2.z*Vji[1] - rotatedDipole2.x*Vji[3] + sqrtThree*rotatedQuadrupole2[2]*Vji[4] + rotatedQuadrupole2[4]*Vji[5] - (sqrtThree*rotatedQuadrupole2[0]+rotatedQuadrupole2[3])*Vji[6] + rotatedQuadrupole2[2]*Vji[7] - rotatedQuadrupole2[1]*Vji[8];
real EJY = -rotatedDipole2.y*Vji[1] + rotatedDipole2.x*Vji[2] - sqrtThree*rotatedQuadrupole2[1]*Vji[4] + (sqrtThree*rotatedQuadrupole2[0]-rotatedQuadrupole2[3])*Vji[5] - rotatedQuadrupole2[4]*Vji[6] + rotatedQuadrupole2[1]*Vji[7] + rotatedQuadrupole2[2]*Vji[8];
real EJZ = -rotatedDipole2.z*Vji[2] + rotatedDipole2.y*Vji[3] - rotatedQuadrupole2[2]*Vji[5] + rotatedQuadrupole2[1]*Vji[6] - 2*rotatedQuadrupole2[4]*Vji[7] + 2*rotatedQuadrupole2[3]*Vji[8];
// Define the torque intermediates for the induced dipoles. These are simply the induced dipole torque
// intermediates dotted with the field due to permanent moments only, at each center. We inline the
// induced dipole torque intermediates here, for simplicity. N.B. There are no torques on the dipoles
// themselves, so we accumulate the torque intermediates into separate variables to allow them to be
// used only in the force calculation.
//
// The torque about the x axis (needed to obtain the y force on the induced dipoles, below)
// qiUindIx[0] = qiQUindI[2]; qiUindIx[1] = 0; qiUindIx[2] = -qiQUindI[0]
real iEIX = qiUinpI.z*Vijp[0] + qiUindI.z*Vijd[0] - qiUinpI.x*Vijp[2] - qiUindI.x*Vijd[2];
real iEJX = qiUinpJ.z*Vjip[0] + qiUindJ.z*Vjid[0] - qiUinpJ.x*Vjip[2] - qiUindJ.x*Vjid[2];
// The torque about the y axis (needed to obtain the x force on the induced dipoles, below)
// qiUindIy[0] = -qiQUindI[1]; qiUindIy[1] = qiQUindI[0]; qiUindIy[2] = 0
real iEIY = qiUinpI.x*Vijp[1] + qiUindI.x*Vijd[1] - qiUinpI.y*Vijp[0] - qiUindI.y*Vijd[0];
real iEJY = qiUinpJ.x*Vjip[1] + qiUindJ.x*Vjid[1] - qiUinpJ.y*Vjip[0] - qiUindJ.y*Vjid[0];
#ifdef MUTUAL_POLARIZATION
// Uind-Uind terms (m=0)
real eCoef = -fourThirds*rInvVec[3]*(3*(thole_d0 + bVec[3]) + alphaRVec[3]*X);
real dCoef = rInvVec[4]*(6*(dthole_d0 + bVec[3]) + 4*alphaRVec[5]*X);
iEIX += eCoef*(qiUinpI.z*qiUindJ.x + qiUindI.z*qiUinpJ.x);
iEJX += eCoef*(qiUinpJ.z*qiUindI.x + qiUindJ.z*qiUinpI.x);
iEIY -= eCoef*(qiUinpI.y*qiUindJ.x + qiUindI.y*qiUinpJ.x);
iEJY -= eCoef*(qiUinpJ.y*qiUindI.x + qiUindJ.y*qiUinpI.x);
fIZ += dCoef*(qiUinpI.x*qiUindJ.x + qiUindI.x*qiUinpJ.x);
fIZ += dCoef*(qiUinpJ.x*qiUindI.x + qiUindJ.x*qiUinpI.x);
// Uind-Uind terms (m=1)
eCoef = 2*rInvVec[3]*(thole_d1 + bVec[3] - twoThirds*alphaRVec[3]*X);
dCoef = -3*rInvVec[4]*(dthole_d1 + bVec[3]);
iEIX -= eCoef*(qiUinpI.x*qiUindJ.z + qiUindI.x*qiUinpJ.z);
iEJX -= eCoef*(qiUinpJ.x*qiUindI.z + qiUindJ.x*qiUinpI.z);
iEIY += eCoef*(qiUinpI.x*qiUindJ.y + qiUindI.x*qiUinpJ.y);
iEJY += eCoef*(qiUinpJ.x*qiUindI.y + qiUindJ.x*qiUinpI.y);
fIZ += dCoef*(qiUinpI.y*qiUindJ.y + qiUindI.y*qiUinpJ.y + qiUinpI.z*qiUindJ.z + qiUindI.z*qiUinpJ.z);
fIZ += dCoef*(qiUinpJ.y*qiUindI.y + qiUindJ.y*qiUinpI.y + qiUinpJ.z*qiUindI.z + qiUindJ.z*qiUinpI.z);
#endif
// The quasi-internal frame forces and torques. Note that the induced torque intermediates are
// used in the force expression, but not in the torques; the induced dipoles are isotropic.
real qiForce[3] = {rInv*(EIY+EJY+iEIY+iEJY), -rInv*(EIX+EJX+iEIX+iEJX), -(fJZ+fIZ)};
real qiTorqueI[3] = {-EIX, -EIY, -EIZ};
real qiTorqueJ[3] = {-EJX, -EJY, -EJZ};
real3 force = make_real3(qiRotationMatrix[1][1]*qiForce[0] + qiRotationMatrix[2][1]*qiForce[1] + qiRotationMatrix[0][1]*qiForce[2],
qiRotationMatrix[1][2]*qiForce[0] + qiRotationMatrix[2][2]*qiForce[1] + qiRotationMatrix[0][2]*qiForce[2],
qiRotationMatrix[1][0]*qiForce[0] + qiRotationMatrix[2][0]*qiForce[1] + qiRotationMatrix[0][0]*qiForce[2]);
atom1.force += force;
atom1.torque += make_real3(qiRotationMatrix[1][1]*qiTorqueI[0] + qiRotationMatrix[2][1]*qiTorqueI[1] + qiRotationMatrix[0][1]*qiTorqueI[2],
qiRotationMatrix[1][2]*qiTorqueI[0] + qiRotationMatrix[2][2]*qiTorqueI[1] + qiRotationMatrix[0][2]*qiTorqueI[2],
qiRotationMatrix[1][0]*qiTorqueI[0] + qiRotationMatrix[2][0]*qiTorqueI[1] + qiRotationMatrix[0][0]*qiTorqueI[2]);
if (forceFactor == 1) {
atom2.force -= force;
atom2.torque += make_real3(qiRotationMatrix[1][1]*qiTorqueJ[0] + qiRotationMatrix[2][1]*qiTorqueJ[1] + qiRotationMatrix[0][1]*qiTorqueJ[2],
qiRotationMatrix[1][2]*qiTorqueJ[0] + qiRotationMatrix[2][2]*qiTorqueJ[1] + qiRotationMatrix[0][2]*qiTorqueJ[2],
qiRotationMatrix[1][0]*qiTorqueJ[0] + qiRotationMatrix[2][0]*qiTorqueJ[1] + qiRotationMatrix[0][0]*qiTorqueJ[2]);
}
}
/**
* Compute the self energy and self torque.
*/
__device__ void computeSelfEnergyAndTorque(AtomData& atom1, mixed& energy) {
real cii = atom1.q*atom1.q;
real3 dipole = make_real3(atom1.sphericalDipole.y, atom1.sphericalDipole.z, atom1.sphericalDipole.x);
real dii = dot(dipole, dipole+(atom1.inducedDipole+atom1.inducedDipolePolar)*0.5f);
#ifdef INCLUDE_QUADRUPOLES
real qii = (atom1.sphericalQuadrupole[0]*atom1.sphericalQuadrupole[0] +
atom1.sphericalQuadrupole[1]*atom1.sphericalQuadrupole[1] +
atom1.sphericalQuadrupole[2]*atom1.sphericalQuadrupole[2] +
atom1.sphericalQuadrupole[3]*atom1.sphericalQuadrupole[3] +
atom1.sphericalQuadrupole[4]*atom1.sphericalQuadrupole[4]);
#else
real qii = 0;
#endif
real prefac = -EWALD_ALPHA/SQRT_PI;
real a2 = EWALD_ALPHA*EWALD_ALPHA;
real a4 = a2*a2;
energy += prefac*(cii + ((real)2/3)*a2*dii + ((real) 4/15)*a4*qii);
// self-torque for PME
real3 ui = atom1.inducedDipole+atom1.inducedDipolePolar;
atom1.torque += ((2/(real) 3)*(EWALD_ALPHA*EWALD_ALPHA*EWALD_ALPHA)/SQRT_PI)*cross(dipole, ui);
}
/**
* Compute electrostatic interactions.
*/
extern "C" __global__ void computeElectrostatics(
unsigned long long* __restrict__ forceBuffers, unsigned long long* __restrict__ torqueBuffers, mixed* __restrict__ energyBuffer,
const real4* __restrict__ posq, const uint2* __restrict__ covalentFlags, const unsigned int* __restrict__ polarizationGroupFlags,
const ushort2* __restrict__ exclusionTiles, unsigned int startTileIndex, unsigned int numTileIndices,
#ifdef USE_CUTOFF
const int* __restrict__ tiles, const unsigned int* __restrict__ interactionCount, real4 periodicBoxSize, real4 invPeriodicBoxSize,
real4 periodicBoxVecX, real4 periodicBoxVecY, real4 periodicBoxVecZ, unsigned int maxTiles, const real4* __restrict__ blockCenter,
const unsigned int* __restrict__ interactingAtoms,
#endif
const real* __restrict__ sphericalDipole, const real* __restrict__ sphericalQuadrupole, const real* __restrict__ inducedDipole,
const real* __restrict__ inducedDipolePolar, const float2* __restrict__ dampingAndThole) {
const unsigned int totalWarps = (blockDim.x*gridDim.x)/TILE_SIZE;
const unsigned int warp = (blockIdx.x*blockDim.x+threadIdx.x)/TILE_SIZE;
const unsigned int tgx = threadIdx.x & (TILE_SIZE-1);
const unsigned int tbx = threadIdx.x - tgx;
mixed energy = 0;
__shared__ AtomData localData[THREAD_BLOCK_SIZE];
// First loop: process tiles that contain exclusions.
const unsigned int firstExclusionTile = FIRST_EXCLUSION_TILE+warp*(LAST_EXCLUSION_TILE-FIRST_EXCLUSION_TILE)/totalWarps;
const unsigned int lastExclusionTile = FIRST_EXCLUSION_TILE+(warp+1)*(LAST_EXCLUSION_TILE-FIRST_EXCLUSION_TILE)/totalWarps;
for (int pos = firstExclusionTile; pos < lastExclusionTile; pos++) {
const ushort2 tileIndices = exclusionTiles[pos];
const unsigned int x = tileIndices.x;
const unsigned int y = tileIndices.y;
AtomData data;
unsigned int atom1 = x*TILE_SIZE + tgx;
loadAtomData(data, atom1, posq, sphericalDipole, sphericalQuadrupole, inducedDipole, inducedDipolePolar, dampingAndThole);
data.force = make_real3(0);
data.torque = make_real3(0);
uint2 covalent = covalentFlags[pos*TILE_SIZE+tgx];
unsigned int polarizationGroup = polarizationGroupFlags[pos*TILE_SIZE+tgx];
if (x == y) {
// This tile is on the diagonal.
localData[threadIdx.x].pos = data.pos;
localData[threadIdx.x].q = data.q;
localData[threadIdx.x].sphericalDipole = data.sphericalDipole;
#ifdef INCLUDE_QUADRUPOLES
localData[threadIdx.x].sphericalQuadrupole[0] = data.sphericalQuadrupole[0];
localData[threadIdx.x].sphericalQuadrupole[1] = data.sphericalQuadrupole[1];
localData[threadIdx.x].sphericalQuadrupole[2] = data.sphericalQuadrupole[2];
localData[threadIdx.x].sphericalQuadrupole[3] = data.sphericalQuadrupole[3];
localData[threadIdx.x].sphericalQuadrupole[4] = data.sphericalQuadrupole[4];
#endif
localData[threadIdx.x].inducedDipole = data.inducedDipole;
localData[threadIdx.x].inducedDipolePolar = data.inducedDipolePolar;
localData[threadIdx.x].thole = data.thole;
localData[threadIdx.x].damp = data.damp;
// Compute forces.
for (unsigned int j = 0; j < TILE_SIZE; j++) {
int atom2 = y*TILE_SIZE+j;
if (atom1 != atom2 && atom1 < NUM_ATOMS && atom2 < NUM_ATOMS) {
float d = computeDScaleFactor(polarizationGroup, j);
float p = computePScaleFactor(covalent, polarizationGroup, j);
float m = computeMScaleFactor(covalent, j);
computeOneInteraction(data, localData[tbx+j], true, d, p, m, 0.5f, energy, periodicBoxSize, invPeriodicBoxSize, periodicBoxVecX, periodicBoxVecY, periodicBoxVecZ);
}
}
if (atom1 < NUM_ATOMS)
computeSelfEnergyAndTorque(data, energy);
data.force *= -ENERGY_SCALE_FACTOR;
data.torque *= ENERGY_SCALE_FACTOR;
atomicAdd(&forceBuffers[atom1], static_cast<unsigned long long>((long long) (data.force.x*0x100000000)));
atomicAdd(&forceBuffers[atom1+PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (data.force.y*0x100000000)));
atomicAdd(&forceBuffers[atom1+2*PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (data.force.z*0x100000000)));
atomicAdd(&torqueBuffers[atom1], static_cast<unsigned long long>((long long) (data.torque.x*0x100000000)));
atomicAdd(&torqueBuffers[atom1+PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (data.torque.y*0x100000000)));
atomicAdd(&torqueBuffers[atom1+2*PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (data.torque.z*0x100000000)));
}
else {
// This is an off-diagonal tile.
unsigned int j = y*TILE_SIZE + tgx;
loadAtomData(localData[threadIdx.x], j, posq, sphericalDipole, sphericalQuadrupole, inducedDipole, inducedDipolePolar, dampingAndThole);
localData[threadIdx.x].force = make_real3(0);
localData[threadIdx.x].torque = make_real3(0);
unsigned int tj = tgx;
for (j = 0; j < TILE_SIZE; j++) {
int atom2 = y*TILE_SIZE+tj;
if (atom1 < NUM_ATOMS && atom2 < NUM_ATOMS) {
float d = computeDScaleFactor(polarizationGroup, tj);
float p = computePScaleFactor(covalent, polarizationGroup, tj);
float m = computeMScaleFactor(covalent, tj);
computeOneInteraction(data, localData[tbx+tj], true, d, p, m, 1, energy, periodicBoxSize, invPeriodicBoxSize, periodicBoxVecX, periodicBoxVecY, periodicBoxVecZ);
}
tj = (tj + 1) & (TILE_SIZE - 1);
}
data.force *= -ENERGY_SCALE_FACTOR;
data.torque *= ENERGY_SCALE_FACTOR;
localData[threadIdx.x].force *= -ENERGY_SCALE_FACTOR;
localData[threadIdx.x].torque *= ENERGY_SCALE_FACTOR;
unsigned int offset = x*TILE_SIZE + tgx;
atomicAdd(&forceBuffers[offset], static_cast<unsigned long long>((long long) (data.force.x*0x100000000)));
atomicAdd(&forceBuffers[offset+PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (data.force.y*0x100000000)));
atomicAdd(&forceBuffers[offset+2*PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (data.force.z*0x100000000)));
atomicAdd(&torqueBuffers[offset], static_cast<unsigned long long>((long long) (data.torque.x*0x100000000)));
atomicAdd(&torqueBuffers[offset+PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (data.torque.y*0x100000000)));
atomicAdd(&torqueBuffers[offset+2*PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (data.torque.z*0x100000000)));
offset = y*TILE_SIZE + tgx;
atomicAdd(&forceBuffers[offset], static_cast<unsigned long long>((long long) (localData[threadIdx.x].force.x*0x100000000)));
atomicAdd(&forceBuffers[offset+PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (localData[threadIdx.x].force.y*0x100000000)));
atomicAdd(&forceBuffers[offset+2*PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (localData[threadIdx.x].force.z*0x100000000)));
atomicAdd(&torqueBuffers[offset], static_cast<unsigned long long>((long long) (localData[threadIdx.x].torque.x*0x100000000)));
atomicAdd(&torqueBuffers[offset+PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (localData[threadIdx.x].torque.y*0x100000000)));
atomicAdd(&torqueBuffers[offset+2*PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (localData[threadIdx.x].torque.z*0x100000000)));
}
}
// Second loop: tiles without exclusions, either from the neighbor list (with cutoff) or just enumerating all
// of them (no cutoff).
#ifdef USE_CUTOFF
const unsigned int numTiles = interactionCount[0];
if (numTiles > maxTiles)
return; // There wasn't enough memory for the neighbor list.
int pos = (int) (numTiles > maxTiles ? startTileIndex+warp*(long long)numTileIndices/totalWarps : warp*(long long)numTiles/totalWarps);
int end = (int) (numTiles > maxTiles ? startTileIndex+(warp+1)*(long long)numTileIndices/totalWarps : (warp+1)*(long long)numTiles/totalWarps);
#else
const unsigned int numTiles = numTileIndices;
int pos = (int) (startTileIndex+warp*(long long)numTiles/totalWarps);
int end = (int) (startTileIndex+(warp+1)*(long long)numTiles/totalWarps);
#endif
int skipBase = 0;
int currentSkipIndex = tbx;
__shared__ int atomIndices[THREAD_BLOCK_SIZE];
__shared__ volatile int skipTiles[THREAD_BLOCK_SIZE];
skipTiles[threadIdx.x] = -1;
while (pos < end) {
bool includeTile = true;
// Extract the coordinates of this tile.
int x, y;
#ifdef USE_CUTOFF
x = tiles[pos];
#else
y = (int) floor(NUM_BLOCKS+0.5f-SQRT((NUM_BLOCKS+0.5f)*(NUM_BLOCKS+0.5f)-2*pos));
x = (pos-y*NUM_BLOCKS+y*(y+1)/2);
if (x < y || x >= NUM_BLOCKS) { // Occasionally happens due to roundoff error.
y += (x < y ? -1 : 1);
x = (pos-y*NUM_BLOCKS+y*(y+1)/2);
}
// Skip over tiles that have exclusions, since they were already processed.
while (skipTiles[tbx+TILE_SIZE-1] < pos) {
if (skipBase+tgx < NUM_TILES_WITH_EXCLUSIONS) {
ushort2 tile = exclusionTiles[skipBase+tgx];
skipTiles[threadIdx.x] = tile.x + tile.y*NUM_BLOCKS - tile.y*(tile.y+1)/2;
}
else
skipTiles[threadIdx.x] = end;
skipBase += TILE_SIZE;
currentSkipIndex = tbx;
}
while (skipTiles[currentSkipIndex] < pos)
currentSkipIndex++;
includeTile = (skipTiles[currentSkipIndex] != pos);
#endif
if (includeTile) {
unsigned int atom1 = x*TILE_SIZE + tgx;
// Load atom data for this tile.
AtomData data;
loadAtomData(data, atom1, posq, sphericalDipole, sphericalQuadrupole, inducedDipole, inducedDipolePolar, dampingAndThole);
data.force = make_real3(0);
data.torque = make_real3(0);
#ifdef USE_CUTOFF
unsigned int j = interactingAtoms[pos*TILE_SIZE+tgx];
#else
unsigned int j = y*TILE_SIZE + tgx;
#endif
atomIndices[threadIdx.x] = j;
loadAtomData(localData[threadIdx.x], j, posq, sphericalDipole, sphericalQuadrupole, inducedDipole, inducedDipolePolar, dampingAndThole);
localData[threadIdx.x].force = make_real3(0);
localData[threadIdx.x].torque = make_real3(0);
// Compute forces.
unsigned int tj = tgx;
for (j = 0; j < TILE_SIZE; j++) {
int atom2 = atomIndices[tbx+tj];
if (atom1 < NUM_ATOMS && atom2 < NUM_ATOMS) {
computeOneInteraction(data, localData[tbx+tj], false, 1, 1, 1, 1, energy, periodicBoxSize, invPeriodicBoxSize, periodicBoxVecX, periodicBoxVecY, periodicBoxVecZ);
}
tj = (tj + 1) & (TILE_SIZE - 1);
}
data.force *= -ENERGY_SCALE_FACTOR;
data.torque *= ENERGY_SCALE_FACTOR;
localData[threadIdx.x].force *= -ENERGY_SCALE_FACTOR;
localData[threadIdx.x].torque *= ENERGY_SCALE_FACTOR;
// Write results.
unsigned int offset = x*TILE_SIZE + tgx;
atomicAdd(&forceBuffers[offset], static_cast<unsigned long long>((long long) (data.force.x*0x100000000)));
atomicAdd(&forceBuffers[offset+PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (data.force.y*0x100000000)));
atomicAdd(&forceBuffers[offset+2*PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (data.force.z*0x100000000)));
atomicAdd(&torqueBuffers[offset], static_cast<unsigned long long>((long long) (data.torque.x*0x100000000)));
atomicAdd(&torqueBuffers[offset+PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (data.torque.y*0x100000000)));
atomicAdd(&torqueBuffers[offset+2*PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (data.torque.z*0x100000000)));
#ifdef USE_CUTOFF
offset = atomIndices[threadIdx.x];
#else
offset = y*TILE_SIZE + tgx;
#endif
atomicAdd(&forceBuffers[offset], static_cast<unsigned long long>((long long) (localData[threadIdx.x].force.x*0x100000000)));
atomicAdd(&forceBuffers[offset+PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (localData[threadIdx.x].force.y*0x100000000)));
atomicAdd(&forceBuffers[offset+2*PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (localData[threadIdx.x].force.z*0x100000000)));
atomicAdd(&torqueBuffers[offset], static_cast<unsigned long long>((long long) (localData[threadIdx.x].torque.x*0x100000000)));
atomicAdd(&torqueBuffers[offset+PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (localData[threadIdx.x].torque.y*0x100000000)));
atomicAdd(&torqueBuffers[offset+2*PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (localData[threadIdx.x].torque.z*0x100000000)));
}
pos++;
}
energyBuffer[blockIdx.x*blockDim.x+threadIdx.x] += energy*ENERGY_SCALE_FACTOR;
}
|
bb19b01c5b0eb06b9a21ad08092b6332e55b12ee.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 1993-2006 NVIDIA Corporation. All rights reserved.
*
* NOTICE TO USER:
*
* This source code is subject to NVIDIA ownership rights under U.S. and
* international Copyright laws.
*
* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
* OR PERFORMANCE OF THIS SOURCE CODE.
*
* U.S. Government End Users. This source code is a "commercial item" as
* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
* "commercial computer software" and "commercial computer software
* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
* and is provided to the U.S. Government only as a commercial end item.
* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
* source code with only those rights set forth herein.
*/
#ifndef _SCAN_BEST_KERNEL_H_
#define _SCAN_BEST_KERNEL_H_
#define NUM_BANKS 16
#define LOG_NUM_BANKS 4
// Define this to more rigorously avoid bank conflicts, even at the lower (root) levels of the tree
//#define ZERO_BANK_CONFLICTS
#ifdef ZERO_BANK_CONFLICTS
#define CONFLICT_FREE_OFFSET(index) ((index) >> LOG_NUM_BANKS + (index) >> (2 * LOG_NUM_BANKS))
#else
#define CONFLICT_FREE_OFFSET(index) ((index) >> LOG_NUM_BANKS)
#endif
#ifdef CHECK_BANK_CONFLICTS
#define TEMP(index) cutilBankChecker(temp, index)
#else
#define TEMP(index) temp[index]
#endif
///////////////////////////////////////////////////////////////////////////////
// Work-efficient compute implementation of scan, one thread per 2 elements
// Work-efficient: O(log(n)) steps, and O(n) adds.
// Also shared storage efficient: Uses n + n/NUM_BANKS shared memory -- no ping-ponging
// Also avoids most bank conflicts using single-element offsets every NUM_BANKS elements.
//
// In addition, If ZERO_BANK_CONFLICTS is defined, uses
// n + n/NUM_BANKS + n/(NUM_BANKS*NUM_BANKS)
// shared memory. If ZERO_BANK_CONFLICTS is defined, avoids ALL bank conflicts using
// single-element offsets every NUM_BANKS elements, plus additional single-element offsets
// after every NUM_BANKS^2 elements.
//
// Uses a balanced tree type algorithm. See Blelloch, 1990 "Prefix Sums
// and Their Applications", or Prins and Chatterjee PRAM course notes:
// http://www.cs.unc.edu/~prins/Classes/203/Handouts/pram.pdf
//
// This work-efficient version is based on the algorithm presented in Guy Blelloch's
// Excellent paper "Prefix sums and their applications".
// http://www-2.cs.cmu.edu/afs/cs.cmu.edu/project/scandal/public/papers/CMU-CS-90-190.html
//
// Pro: Work Efficient, very few bank conflicts (or zero if ZERO_BANK_CONFLICTS is defined)
// Con: More instructions to compute bank-conflict-free shared memory addressing,
// and slightly more shared memory storage used.
//
// @param g_odata output data in global memory
// @param g_idata input data in global memory
// @param n input number of elements to scan from input data
__global__ void scan_best(float *g_odata, float *g_idata, int n)
{
// Dynamically allocated shared memory for scan kernels
extern __shared__ float temp[];
int thid = threadIdx.x;
int ai = thid;
int bi = thid + (n/2);
// compute spacing to avoid bank conflicts
int bankOffsetA = CONFLICT_FREE_OFFSET(ai);
int bankOffsetB = CONFLICT_FREE_OFFSET(bi);
// Cache the computational window in shared memory
TEMP(ai + bankOffsetA) = g_idata[ai];
TEMP(bi + bankOffsetB) = g_idata[bi];
int offset = 1;
// build the sum in place up the tree
for (int d = n/2; d > 0; d >>= 1)
{
__syncthreads();
if (thid < d)
{
int ai = offset*(2*thid+1)-1;
int bi = offset*(2*thid+2)-1;
ai += CONFLICT_FREE_OFFSET(ai);
bi += CONFLICT_FREE_OFFSET(bi);
TEMP(bi) += TEMP(ai);
}
offset *= 2;
}
// scan back down the tree
// clear the last element
if (thid == 0)
{
int index = n - 1;
index += CONFLICT_FREE_OFFSET(index);
TEMP(index) = 0;
}
// traverse down the tree building the scan in place
for (int d = 1; d < n; d *= 2)
{
offset /= 2;
__syncthreads();
if (thid < d)
{
int ai = offset*(2*thid+1)-1;
int bi = offset*(2*thid+2)-1;
ai += CONFLICT_FREE_OFFSET(ai);
bi += CONFLICT_FREE_OFFSET(bi);
float t = TEMP(ai);
TEMP(ai) = TEMP(bi);
TEMP(bi) += t;
}
}
__syncthreads();
// write results to global memory
g_odata[ai] = TEMP(ai + bankOffsetA);
g_odata[bi] = TEMP(bi + bankOffsetB);
}
#endif // #ifndef _SCAN_BEST_KERNEL_H_
| bb19b01c5b0eb06b9a21ad08092b6332e55b12ee.cu | /*
* Copyright 1993-2006 NVIDIA Corporation. All rights reserved.
*
* NOTICE TO USER:
*
* This source code is subject to NVIDIA ownership rights under U.S. and
* international Copyright laws.
*
* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
* OR PERFORMANCE OF THIS SOURCE CODE.
*
* U.S. Government End Users. This source code is a "commercial item" as
* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
* "commercial computer software" and "commercial computer software
* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
* and is provided to the U.S. Government only as a commercial end item.
* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
* source code with only those rights set forth herein.
*/
#ifndef _SCAN_BEST_KERNEL_H_
#define _SCAN_BEST_KERNEL_H_
#define NUM_BANKS 16
#define LOG_NUM_BANKS 4
// Define this to more rigorously avoid bank conflicts, even at the lower (root) levels of the tree
//#define ZERO_BANK_CONFLICTS
#ifdef ZERO_BANK_CONFLICTS
#define CONFLICT_FREE_OFFSET(index) ((index) >> LOG_NUM_BANKS + (index) >> (2 * LOG_NUM_BANKS))
#else
#define CONFLICT_FREE_OFFSET(index) ((index) >> LOG_NUM_BANKS)
#endif
#ifdef CHECK_BANK_CONFLICTS
#define TEMP(index) cutilBankChecker(temp, index)
#else
#define TEMP(index) temp[index]
#endif
///////////////////////////////////////////////////////////////////////////////
// Work-efficient compute implementation of scan, one thread per 2 elements
// Work-efficient: O(log(n)) steps, and O(n) adds.
// Also shared storage efficient: Uses n + n/NUM_BANKS shared memory -- no ping-ponging
// Also avoids most bank conflicts using single-element offsets every NUM_BANKS elements.
//
// In addition, If ZERO_BANK_CONFLICTS is defined, uses
// n + n/NUM_BANKS + n/(NUM_BANKS*NUM_BANKS)
// shared memory. If ZERO_BANK_CONFLICTS is defined, avoids ALL bank conflicts using
// single-element offsets every NUM_BANKS elements, plus additional single-element offsets
// after every NUM_BANKS^2 elements.
//
// Uses a balanced tree type algorithm. See Blelloch, 1990 "Prefix Sums
// and Their Applications", or Prins and Chatterjee PRAM course notes:
// http://www.cs.unc.edu/~prins/Classes/203/Handouts/pram.pdf
//
// This work-efficient version is based on the algorithm presented in Guy Blelloch's
// Excellent paper "Prefix sums and their applications".
// http://www-2.cs.cmu.edu/afs/cs.cmu.edu/project/scandal/public/papers/CMU-CS-90-190.html
//
// Pro: Work Efficient, very few bank conflicts (or zero if ZERO_BANK_CONFLICTS is defined)
// Con: More instructions to compute bank-conflict-free shared memory addressing,
// and slightly more shared memory storage used.
//
// @param g_odata output data in global memory
// @param g_idata input data in global memory
// @param n input number of elements to scan from input data
__global__ void scan_best(float *g_odata, float *g_idata, int n)
{
// Dynamically allocated shared memory for scan kernels
extern __shared__ float temp[];
int thid = threadIdx.x;
int ai = thid;
int bi = thid + (n/2);
// compute spacing to avoid bank conflicts
int bankOffsetA = CONFLICT_FREE_OFFSET(ai);
int bankOffsetB = CONFLICT_FREE_OFFSET(bi);
// Cache the computational window in shared memory
TEMP(ai + bankOffsetA) = g_idata[ai];
TEMP(bi + bankOffsetB) = g_idata[bi];
int offset = 1;
// build the sum in place up the tree
for (int d = n/2; d > 0; d >>= 1)
{
__syncthreads();
if (thid < d)
{
int ai = offset*(2*thid+1)-1;
int bi = offset*(2*thid+2)-1;
ai += CONFLICT_FREE_OFFSET(ai);
bi += CONFLICT_FREE_OFFSET(bi);
TEMP(bi) += TEMP(ai);
}
offset *= 2;
}
// scan back down the tree
// clear the last element
if (thid == 0)
{
int index = n - 1;
index += CONFLICT_FREE_OFFSET(index);
TEMP(index) = 0;
}
// traverse down the tree building the scan in place
for (int d = 1; d < n; d *= 2)
{
offset /= 2;
__syncthreads();
if (thid < d)
{
int ai = offset*(2*thid+1)-1;
int bi = offset*(2*thid+2)-1;
ai += CONFLICT_FREE_OFFSET(ai);
bi += CONFLICT_FREE_OFFSET(bi);
float t = TEMP(ai);
TEMP(ai) = TEMP(bi);
TEMP(bi) += t;
}
}
__syncthreads();
// write results to global memory
g_odata[ai] = TEMP(ai + bankOffsetA);
g_odata[bi] = TEMP(bi + bankOffsetB);
}
#endif // #ifndef _SCAN_BEST_KERNEL_H_
|
fa6bf4eba4b9335aa14dccd176903af6f99e7e56.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
Illinois Open Source License
University of Illinois/NCSA
Open Source License
Copyright 2009, University of Illinois. All rights reserved.
Developed by:
Innovative Systems Lab
National Center for Supercomputing Applications
http://www.ncsa.uiuc.edu/AboutUs/Directorates/ISL.html
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the Software), to deal with the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
* Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimers.
* Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimers in the documentation and/or other materials provided with the distribution.
* Neither the names of Innovative Systems Lab and National Center for Supercomputing Applications, nor the names of its contributors may be used to endorse or promote products derived from this Software without specific prior written permission.
THE SOFTWARE IS PROVIDED AS IS, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE SOFTWARE.
*/
// Angular correlation function kernel
// Takes two sets of cartesians in g_idata1, g_idata2,
// computes dot products for all pairs, uses waterfall search
// to determine the appropriate bin for each dot product,
// and outputs bins in g_odata (packed 4 bins to 1 unsigned int)
// The problem is treated as a grid of dot products.
// Each thread block has 128 threads, and calculates the dot
// products for a 128x128 sub-grid.
#ifndef _ACF_KERNEL_H_
#define _ACF_KERNEL_H_
#define LOG2_GRID_SIZE 14
#include "model_io.cu"
#include "histogram_kernel.cu"
__device__ __constant__ double binbounds[NUMBINS-1];
// Similar to ACF kernel, but takes advantage of symmetry to cut computations down by half.
// Obviously, due to symmetry, it needs only one input set.
__global__ void ACFKernelSymm(cartesian g_idata1, unsigned int* g_odata)
{
extern __shared__ double3 sdata[];
int tx = (blockIdx.x<<7) + threadIdx.x;
int by = (blockIdx.y<<7);
if(blockIdx.x < blockIdx.y) { // All elements computed by block are above the main diagonal
by <<= (LOG2_GRID_SIZE - 2);
by += tx;
#pragma unroll
for(int i=0; i<128; i+=4) {
g_odata[by+(i<<(LOG2_GRID_SIZE - 2))] = 2088533116; // (124<<24) + (124<<16) + (124<<8) + (124);
}
}
else if(blockIdx.x > blockIdx.y) { // All elements computed by block are below the main diagonal
double temp;
unsigned int temp2;
double3 vec1, vec2;
vec1.x = g_idata1.x[tx];
vec1.y = g_idata1.y[tx];
vec1.z = g_idata1.z[tx];
sdata[threadIdx.x].x = g_idata1.x[by+threadIdx.x];
sdata[threadIdx.x].y = g_idata1.y[by+threadIdx.x];
sdata[threadIdx.x].z = g_idata1.z[by+threadIdx.x];
__syncthreads();
by <<= (LOG2_GRID_SIZE - 2);
by += tx;
#pragma unroll
for(int i=0; i<128; i+=4) {
temp2 = 0;
#pragma unroll
for(int j=0; j<4; j++) {
vec2 = sdata[i+j];
temp = vec1.x*vec2.x + vec1.y*vec2.y + vec1.z*vec2.z;
if(temp < binbounds[30]) temp2 += (124<<(j<<3));
else if(temp < binbounds[29]) temp2 += (120<<(j<<3));
else if(temp < binbounds[28]) temp2 += (116<<(j<<3));
else if(temp < binbounds[27]) temp2 += (112<<(j<<3));
else if(temp < binbounds[26]) temp2 += (108<<(j<<3));
else if(temp < binbounds[25]) temp2 += (104<<(j<<3));
else if(temp < binbounds[24]) temp2 += (100<<(j<<3));
else if(temp < binbounds[23]) temp2 += (96<<(j<<3));
else if(temp < binbounds[22]) temp2 += (92<<(j<<3));
else if(temp < binbounds[21]) temp2 += (88<<(j<<3));
else if(temp < binbounds[20]) temp2 += (84<<(j<<3));
else if(temp < binbounds[19]) temp2 += (80<<(j<<3));
else if(temp < binbounds[18]) temp2 += (76<<(j<<3));
else if(temp < binbounds[17]) temp2 += (72<<(j<<3));
else if(temp < binbounds[16]) temp2 += (68<<(j<<3));
else if(temp < binbounds[15]) temp2 += (64<<(j<<3));
else if(temp < binbounds[14]) temp2 += (60<<(j<<3));
else if(temp < binbounds[13]) temp2 += (56<<(j<<3));
else if(temp < binbounds[12]) temp2 += (52<<(j<<3));
else if(temp < binbounds[11]) temp2 += (48<<(j<<3));
else if(temp < binbounds[10]) temp2 += (44<<(j<<3));
else if(temp < binbounds[9]) temp2 += (40<<(j<<3));
else if(temp < binbounds[8]) temp2 += (36<<(j<<3));
else if(temp < binbounds[7]) temp2 += (32<<(j<<3));
else if(temp < binbounds[6]) temp2 += (28<<(j<<3));
else if(temp < binbounds[5]) temp2 += (24<<(j<<3));
else if(temp < binbounds[4]) temp2 += (20<<(j<<3));
else if(temp < binbounds[3]) temp2 += (16<<(j<<3));
else if(temp < binbounds[2]) temp2 += (12<<(j<<3));
else if(temp < binbounds[1]) temp2 += (8<<(j<<3));
else if(temp < binbounds[0]) temp2 += (4<<(j<<3));
else temp2 += (0<<(j<<3));
}
g_odata[by+(i<<(LOG2_GRID_SIZE - 2))] = temp2;
}
}
else { // blockIdx.x = blockIdx.y, so half the block will be ignorable..
double temp;
unsigned int temp2;
double3 vec1, vec2;
vec1.x = g_idata1.x[tx];
vec1.y = g_idata1.y[tx];
vec1.z = g_idata1.z[tx];
sdata[threadIdx.x].x = g_idata1.x[by+threadIdx.x];
sdata[threadIdx.x].y = g_idata1.y[by+threadIdx.x];
sdata[threadIdx.x].z = g_idata1.z[by+threadIdx.x];
__syncthreads();
by <<= (LOG2_GRID_SIZE - 2);
by += tx;
#pragma unroll
for(int i=0; i<128; i+=4) {
temp2 = 0;
#pragma unroll
for(int j=0; j<4; j++) {
if(threadIdx.x <= i+j) temp2 += (124<<(j<<3));
else {
vec2 = sdata[i+j];
temp = vec1.x*vec2.x + vec1.y*vec2.y + vec1.z*vec2.z;
if(temp < binbounds[30]) temp2 += (124<<(j<<3));
else if(temp < binbounds[29]) temp2 += (120<<(j<<3));
else if(temp < binbounds[28]) temp2 += (116<<(j<<3));
else if(temp < binbounds[27]) temp2 += (112<<(j<<3));
else if(temp < binbounds[26]) temp2 += (108<<(j<<3));
else if(temp < binbounds[25]) temp2 += (104<<(j<<3));
else if(temp < binbounds[24]) temp2 += (100<<(j<<3));
else if(temp < binbounds[23]) temp2 += (96<<(j<<3));
else if(temp < binbounds[22]) temp2 += (92<<(j<<3));
else if(temp < binbounds[21]) temp2 += (88<<(j<<3));
else if(temp < binbounds[20]) temp2 += (84<<(j<<3));
else if(temp < binbounds[19]) temp2 += (80<<(j<<3));
else if(temp < binbounds[18]) temp2 += (76<<(j<<3));
else if(temp < binbounds[17]) temp2 += (72<<(j<<3));
else if(temp < binbounds[16]) temp2 += (68<<(j<<3));
else if(temp < binbounds[15]) temp2 += (64<<(j<<3));
else if(temp < binbounds[14]) temp2 += (60<<(j<<3));
else if(temp < binbounds[13]) temp2 += (56<<(j<<3));
else if(temp < binbounds[12]) temp2 += (52<<(j<<3));
else if(temp < binbounds[11]) temp2 += (48<<(j<<3));
else if(temp < binbounds[10]) temp2 += (44<<(j<<3));
else if(temp < binbounds[9]) temp2 += (40<<(j<<3));
else if(temp < binbounds[8]) temp2 += (36<<(j<<3));
else if(temp < binbounds[7]) temp2 += (32<<(j<<3));
else if(temp < binbounds[6]) temp2 += (28<<(j<<3));
else if(temp < binbounds[5]) temp2 += (24<<(j<<3));
else if(temp < binbounds[4]) temp2 += (20<<(j<<3));
else if(temp < binbounds[3]) temp2 += (16<<(j<<3));
else if(temp < binbounds[2]) temp2 += (12<<(j<<3));
else if(temp < binbounds[1]) temp2 += (8<<(j<<3));
else if(temp < binbounds[0]) temp2 += (4<<(j<<3));
else temp2 += (0<<(j<<3));
}
}
g_odata[by+(i<<(LOG2_GRID_SIZE - 2))] = temp2;
}
}
}
__global__ void ACFKernel(cartesian g_idata1, cartesian g_idata2, unsigned int* g_odata)
{
// Shared memory used to store vectors from g_idata2
extern __shared__ double3 sdata[];
double temp;
unsigned int temp2;
double3 vec1, vec2;
// tx is the "x position" in the grid
int tx = (blockIdx.x<<7) + threadIdx.x;
// "y position" depends on i (see below), this is just y block
int by = (blockIdx.y<<7);
// Is coalesced, as cartesians are aligned properly and there are no conflicts.
vec1.x = g_idata2.x[tx];
vec1.y = g_idata2.y[tx];
vec1.z = g_idata2.z[tx];
// Then reads one unique vector from global to shared per thread, the "shared vectors".
// Is coalesced for the same reason.
sdata[threadIdx.x].x = g_idata1.x[by+threadIdx.x];
sdata[threadIdx.x].y = g_idata1.y[by+threadIdx.x];
sdata[threadIdx.x].z = g_idata1.z[by+threadIdx.x];
// Each thread will compute the dot product of its assigned vector with every shared vector.
// Ensure all reads are finished before using them for any calculations
__syncthreads();
// Simplify some notation later on.
by <<= (LOG2_GRID_SIZE - 2);
by += tx;
// Unrolling offers significant speed-up
#pragma unroll
for(int i=0; i<128; i+=4) { // Iterate through 128 vectors in sdata
temp2 = 0;
#pragma unroll
for(int j=0; j<4; j++) { // 4 vectors per 1 int output
// sdata broadcasts sdata[i+j] to all threads in a block; so unnecessary bank conflicts are avoided.
vec2 = sdata[i+j];
temp = vec1.x*vec2.x + vec1.y*vec2.y + vec1.z*vec2.z;
// This follows the form (binNum << (elementNum << 3)).
// binNum is the bin we are assigning, elementNum is j, and by summing we pack four bin assignments to one int.
if(temp < binbounds[30]) temp2 += (124<<(j<<3));
else if(temp < binbounds[29]) temp2 += (120<<(j<<3));
else if(temp < binbounds[28]) temp2 += (116<<(j<<3));
else if(temp < binbounds[27]) temp2 += (112<<(j<<3));
else if(temp < binbounds[26]) temp2 += (108<<(j<<3));
else if(temp < binbounds[25]) temp2 += (104<<(j<<3));
else if(temp < binbounds[24]) temp2 += (100<<(j<<3));
else if(temp < binbounds[23]) temp2 += (96<<(j<<3));
else if(temp < binbounds[22]) temp2 += (92<<(j<<3));
else if(temp < binbounds[21]) temp2 += (88<<(j<<3));
else if(temp < binbounds[20]) temp2 += (84<<(j<<3));
else if(temp < binbounds[19]) temp2 += (80<<(j<<3));
else if(temp < binbounds[18]) temp2 += (76<<(j<<3));
else if(temp < binbounds[17]) temp2 += (72<<(j<<3));
else if(temp < binbounds[16]) temp2 += (68<<(j<<3));
else if(temp < binbounds[15]) temp2 += (64<<(j<<3));
else if(temp < binbounds[14]) temp2 += (60<<(j<<3));
else if(temp < binbounds[13]) temp2 += (56<<(j<<3));
else if(temp < binbounds[12]) temp2 += (52<<(j<<3));
else if(temp < binbounds[11]) temp2 += (48<<(j<<3));
else if(temp < binbounds[10]) temp2 += (44<<(j<<3));
else if(temp < binbounds[9]) temp2 += (40<<(j<<3));
else if(temp < binbounds[8]) temp2 += (36<<(j<<3));
else if(temp < binbounds[7]) temp2 += (32<<(j<<3));
else if(temp < binbounds[6]) temp2 += (28<<(j<<3));
else if(temp < binbounds[5]) temp2 += (24<<(j<<3));
else if(temp < binbounds[4]) temp2 += (20<<(j<<3));
else if(temp < binbounds[3]) temp2 += (16<<(j<<3));
else if(temp < binbounds[2]) temp2 += (12<<(j<<3));
else if(temp < binbounds[1]) temp2 += (8<<(j<<3));
else if(temp < binbounds[0]) temp2 += (4<<(j<<3));
else temp2 += (0<<(j<<3));
}
g_odata[by+(i<<(LOG2_GRID_SIZE - 2))] = temp2;
}
}
#endif
| fa6bf4eba4b9335aa14dccd176903af6f99e7e56.cu | /*
Illinois Open Source License
University of Illinois/NCSA
Open Source License
Copyright © 2009, University of Illinois. All rights reserved.
Developed by:
Innovative Systems Lab
National Center for Supercomputing Applications
http://www.ncsa.uiuc.edu/AboutUs/Directorates/ISL.html
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the “Software”), to deal with the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
* Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimers.
* Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimers in the documentation and/or other materials provided with the distribution.
* Neither the names of Innovative Systems Lab and National Center for Supercomputing Applications, nor the names of its contributors may be used to endorse or promote products derived from this Software without specific prior written permission.
THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE SOFTWARE.
*/
// Angular correlation function kernel
// Takes two sets of cartesians in g_idata1, g_idata2,
// computes dot products for all pairs, uses waterfall search
// to determine the appropriate bin for each dot product,
// and outputs bins in g_odata (packed 4 bins to 1 unsigned int)
// The problem is treated as a grid of dot products.
// Each thread block has 128 threads, and calculates the dot
// products for a 128x128 sub-grid.
#ifndef _ACF_KERNEL_H_
#define _ACF_KERNEL_H_
#define LOG2_GRID_SIZE 14
#include "model_io.cu"
#include "histogram_kernel.cu"
__device__ __constant__ double binbounds[NUMBINS-1];
// Similar to ACF kernel, but takes advantage of symmetry to cut computations down by half.
// Obviously, due to symmetry, it needs only one input set.
__global__ void ACFKernelSymm(cartesian g_idata1, unsigned int* g_odata)
{
extern __shared__ double3 sdata[];
int tx = (blockIdx.x<<7) + threadIdx.x;
int by = (blockIdx.y<<7);
if(blockIdx.x < blockIdx.y) { // All elements computed by block are above the main diagonal
by <<= (LOG2_GRID_SIZE - 2);
by += tx;
#pragma unroll
for(int i=0; i<128; i+=4) {
g_odata[by+(i<<(LOG2_GRID_SIZE - 2))] = 2088533116; // (124<<24) + (124<<16) + (124<<8) + (124);
}
}
else if(blockIdx.x > blockIdx.y) { // All elements computed by block are below the main diagonal
double temp;
unsigned int temp2;
double3 vec1, vec2;
vec1.x = g_idata1.x[tx];
vec1.y = g_idata1.y[tx];
vec1.z = g_idata1.z[tx];
sdata[threadIdx.x].x = g_idata1.x[by+threadIdx.x];
sdata[threadIdx.x].y = g_idata1.y[by+threadIdx.x];
sdata[threadIdx.x].z = g_idata1.z[by+threadIdx.x];
__syncthreads();
by <<= (LOG2_GRID_SIZE - 2);
by += tx;
#pragma unroll
for(int i=0; i<128; i+=4) {
temp2 = 0;
#pragma unroll
for(int j=0; j<4; j++) {
vec2 = sdata[i+j];
temp = vec1.x*vec2.x + vec1.y*vec2.y + vec1.z*vec2.z;
if(temp < binbounds[30]) temp2 += (124<<(j<<3));
else if(temp < binbounds[29]) temp2 += (120<<(j<<3));
else if(temp < binbounds[28]) temp2 += (116<<(j<<3));
else if(temp < binbounds[27]) temp2 += (112<<(j<<3));
else if(temp < binbounds[26]) temp2 += (108<<(j<<3));
else if(temp < binbounds[25]) temp2 += (104<<(j<<3));
else if(temp < binbounds[24]) temp2 += (100<<(j<<3));
else if(temp < binbounds[23]) temp2 += (96<<(j<<3));
else if(temp < binbounds[22]) temp2 += (92<<(j<<3));
else if(temp < binbounds[21]) temp2 += (88<<(j<<3));
else if(temp < binbounds[20]) temp2 += (84<<(j<<3));
else if(temp < binbounds[19]) temp2 += (80<<(j<<3));
else if(temp < binbounds[18]) temp2 += (76<<(j<<3));
else if(temp < binbounds[17]) temp2 += (72<<(j<<3));
else if(temp < binbounds[16]) temp2 += (68<<(j<<3));
else if(temp < binbounds[15]) temp2 += (64<<(j<<3));
else if(temp < binbounds[14]) temp2 += (60<<(j<<3));
else if(temp < binbounds[13]) temp2 += (56<<(j<<3));
else if(temp < binbounds[12]) temp2 += (52<<(j<<3));
else if(temp < binbounds[11]) temp2 += (48<<(j<<3));
else if(temp < binbounds[10]) temp2 += (44<<(j<<3));
else if(temp < binbounds[9]) temp2 += (40<<(j<<3));
else if(temp < binbounds[8]) temp2 += (36<<(j<<3));
else if(temp < binbounds[7]) temp2 += (32<<(j<<3));
else if(temp < binbounds[6]) temp2 += (28<<(j<<3));
else if(temp < binbounds[5]) temp2 += (24<<(j<<3));
else if(temp < binbounds[4]) temp2 += (20<<(j<<3));
else if(temp < binbounds[3]) temp2 += (16<<(j<<3));
else if(temp < binbounds[2]) temp2 += (12<<(j<<3));
else if(temp < binbounds[1]) temp2 += (8<<(j<<3));
else if(temp < binbounds[0]) temp2 += (4<<(j<<3));
else temp2 += (0<<(j<<3));
}
g_odata[by+(i<<(LOG2_GRID_SIZE - 2))] = temp2;
}
}
else { // blockIdx.x = blockIdx.y, so half the block will be ignorable..
double temp;
unsigned int temp2;
double3 vec1, vec2;
vec1.x = g_idata1.x[tx];
vec1.y = g_idata1.y[tx];
vec1.z = g_idata1.z[tx];
sdata[threadIdx.x].x = g_idata1.x[by+threadIdx.x];
sdata[threadIdx.x].y = g_idata1.y[by+threadIdx.x];
sdata[threadIdx.x].z = g_idata1.z[by+threadIdx.x];
__syncthreads();
by <<= (LOG2_GRID_SIZE - 2);
by += tx;
#pragma unroll
for(int i=0; i<128; i+=4) {
temp2 = 0;
#pragma unroll
for(int j=0; j<4; j++) {
if(threadIdx.x <= i+j) temp2 += (124<<(j<<3));
else {
vec2 = sdata[i+j];
temp = vec1.x*vec2.x + vec1.y*vec2.y + vec1.z*vec2.z;
if(temp < binbounds[30]) temp2 += (124<<(j<<3));
else if(temp < binbounds[29]) temp2 += (120<<(j<<3));
else if(temp < binbounds[28]) temp2 += (116<<(j<<3));
else if(temp < binbounds[27]) temp2 += (112<<(j<<3));
else if(temp < binbounds[26]) temp2 += (108<<(j<<3));
else if(temp < binbounds[25]) temp2 += (104<<(j<<3));
else if(temp < binbounds[24]) temp2 += (100<<(j<<3));
else if(temp < binbounds[23]) temp2 += (96<<(j<<3));
else if(temp < binbounds[22]) temp2 += (92<<(j<<3));
else if(temp < binbounds[21]) temp2 += (88<<(j<<3));
else if(temp < binbounds[20]) temp2 += (84<<(j<<3));
else if(temp < binbounds[19]) temp2 += (80<<(j<<3));
else if(temp < binbounds[18]) temp2 += (76<<(j<<3));
else if(temp < binbounds[17]) temp2 += (72<<(j<<3));
else if(temp < binbounds[16]) temp2 += (68<<(j<<3));
else if(temp < binbounds[15]) temp2 += (64<<(j<<3));
else if(temp < binbounds[14]) temp2 += (60<<(j<<3));
else if(temp < binbounds[13]) temp2 += (56<<(j<<3));
else if(temp < binbounds[12]) temp2 += (52<<(j<<3));
else if(temp < binbounds[11]) temp2 += (48<<(j<<3));
else if(temp < binbounds[10]) temp2 += (44<<(j<<3));
else if(temp < binbounds[9]) temp2 += (40<<(j<<3));
else if(temp < binbounds[8]) temp2 += (36<<(j<<3));
else if(temp < binbounds[7]) temp2 += (32<<(j<<3));
else if(temp < binbounds[6]) temp2 += (28<<(j<<3));
else if(temp < binbounds[5]) temp2 += (24<<(j<<3));
else if(temp < binbounds[4]) temp2 += (20<<(j<<3));
else if(temp < binbounds[3]) temp2 += (16<<(j<<3));
else if(temp < binbounds[2]) temp2 += (12<<(j<<3));
else if(temp < binbounds[1]) temp2 += (8<<(j<<3));
else if(temp < binbounds[0]) temp2 += (4<<(j<<3));
else temp2 += (0<<(j<<3));
}
}
g_odata[by+(i<<(LOG2_GRID_SIZE - 2))] = temp2;
}
}
}
__global__ void ACFKernel(cartesian g_idata1, cartesian g_idata2, unsigned int* g_odata)
{
// Shared memory used to store vectors from g_idata2
extern __shared__ double3 sdata[];
double temp;
unsigned int temp2;
double3 vec1, vec2;
// tx is the "x position" in the grid
int tx = (blockIdx.x<<7) + threadIdx.x;
// "y position" depends on i (see below), this is just y block
int by = (blockIdx.y<<7);
// Is coalesced, as cartesians are aligned properly and there are no conflicts.
vec1.x = g_idata2.x[tx];
vec1.y = g_idata2.y[tx];
vec1.z = g_idata2.z[tx];
// Then reads one unique vector from global to shared per thread, the "shared vectors".
// Is coalesced for the same reason.
sdata[threadIdx.x].x = g_idata1.x[by+threadIdx.x];
sdata[threadIdx.x].y = g_idata1.y[by+threadIdx.x];
sdata[threadIdx.x].z = g_idata1.z[by+threadIdx.x];
// Each thread will compute the dot product of its assigned vector with every shared vector.
// Ensure all reads are finished before using them for any calculations
__syncthreads();
// Simplify some notation later on.
by <<= (LOG2_GRID_SIZE - 2);
by += tx;
// Unrolling offers significant speed-up
#pragma unroll
for(int i=0; i<128; i+=4) { // Iterate through 128 vectors in sdata
temp2 = 0;
#pragma unroll
for(int j=0; j<4; j++) { // 4 vectors per 1 int output
// sdata broadcasts sdata[i+j] to all threads in a block; so unnecessary bank conflicts are avoided.
vec2 = sdata[i+j];
temp = vec1.x*vec2.x + vec1.y*vec2.y + vec1.z*vec2.z;
// This follows the form (binNum << (elementNum << 3)).
// binNum is the bin we are assigning, elementNum is j, and by summing we pack four bin assignments to one int.
if(temp < binbounds[30]) temp2 += (124<<(j<<3));
else if(temp < binbounds[29]) temp2 += (120<<(j<<3));
else if(temp < binbounds[28]) temp2 += (116<<(j<<3));
else if(temp < binbounds[27]) temp2 += (112<<(j<<3));
else if(temp < binbounds[26]) temp2 += (108<<(j<<3));
else if(temp < binbounds[25]) temp2 += (104<<(j<<3));
else if(temp < binbounds[24]) temp2 += (100<<(j<<3));
else if(temp < binbounds[23]) temp2 += (96<<(j<<3));
else if(temp < binbounds[22]) temp2 += (92<<(j<<3));
else if(temp < binbounds[21]) temp2 += (88<<(j<<3));
else if(temp < binbounds[20]) temp2 += (84<<(j<<3));
else if(temp < binbounds[19]) temp2 += (80<<(j<<3));
else if(temp < binbounds[18]) temp2 += (76<<(j<<3));
else if(temp < binbounds[17]) temp2 += (72<<(j<<3));
else if(temp < binbounds[16]) temp2 += (68<<(j<<3));
else if(temp < binbounds[15]) temp2 += (64<<(j<<3));
else if(temp < binbounds[14]) temp2 += (60<<(j<<3));
else if(temp < binbounds[13]) temp2 += (56<<(j<<3));
else if(temp < binbounds[12]) temp2 += (52<<(j<<3));
else if(temp < binbounds[11]) temp2 += (48<<(j<<3));
else if(temp < binbounds[10]) temp2 += (44<<(j<<3));
else if(temp < binbounds[9]) temp2 += (40<<(j<<3));
else if(temp < binbounds[8]) temp2 += (36<<(j<<3));
else if(temp < binbounds[7]) temp2 += (32<<(j<<3));
else if(temp < binbounds[6]) temp2 += (28<<(j<<3));
else if(temp < binbounds[5]) temp2 += (24<<(j<<3));
else if(temp < binbounds[4]) temp2 += (20<<(j<<3));
else if(temp < binbounds[3]) temp2 += (16<<(j<<3));
else if(temp < binbounds[2]) temp2 += (12<<(j<<3));
else if(temp < binbounds[1]) temp2 += (8<<(j<<3));
else if(temp < binbounds[0]) temp2 += (4<<(j<<3));
else temp2 += (0<<(j<<3));
}
g_odata[by+(i<<(LOG2_GRID_SIZE - 2))] = temp2;
}
}
#endif
|
b83e6a92181c7706c26d6208645a341c57771aad.hip | // !!! This is a file automatically generated by hipify!!!
/*
* File: gpu.cpp
* Author: aliendo
*
* Created on 26 de diciembre de 2013, 11:23 AM
*/
#include "gpu.h"
#include <mpi.h>
gpu::gpu() {
setDeviceCount();
if (present){
if (deviceCount == 0) {
gpu(false);
} else {
setDeviceProperties();
}
} else {
deviceCount = 0;
name = new string[1];
name[0]=" ";
major = new int[1];
major[0]=0;
minor = new int[1];
minor[0]=0;
totalGlobalMem = new unsigned int[1];
totalGlobalMem[0]=0;
multiProcessorCount = new int[1];
multiProcessorCount[0]=0;
numCores = new int[1];
numCores[0]=0;
totalConstMem = new unsigned int[1];
totalConstMem[0]=0;
sharedMemPerBlock = new unsigned int[1];
sharedMemPerBlock[0]=0;
regsPerBlock = new int[1];
regsPerBlock[0]=0;
warpSize = new int[1];
warpSize[0]=0;
maxThreadsPerBlock = new int[1];
maxThreadsPerBlock[0]=0;
maxThreadsDim0 = new int[1];
maxThreadsDim0[0]=0;
maxThreadsDim1 = new int[1];
maxThreadsDim1[0]=0;
maxThreadsDim2 = new int[1];
maxThreadsDim2[0]=0;
maxGridSize0 = new int[1];
maxGridSize0[0]=0;
maxGridSize1 = new int[1];
maxGridSize1[0]=0;
maxGridSize2 = new int[1];
maxGridSize2[0]=0;
memPitch = new unsigned int[1];
memPitch[0]=0;
textureAlignment = new unsigned int[1];
textureAlignment[0]=0;
clockRate = new float[1];
clockRate[0]=0;
deviceOverlap = new bool[1];
deviceOverlap[0]=0;
}
setNatr();
setValueatr();
setNameatr();
}
gpu::gpu(bool verify) {
if (!verify){
present=false;
deviceCount = 0;
name = new string[1];
name[0]=" ";
major = new int[1];
major[0]=0;
minor = new int[1];
minor[0]=0;
totalGlobalMem = new unsigned int[1];
totalGlobalMem[0]=0;
multiProcessorCount = new int[1];
multiProcessorCount[0]=0;
numCores = new int[1];
numCores[0]=0;
totalConstMem = new unsigned int[1];
totalConstMem[0]=0;
sharedMemPerBlock = new unsigned int[1];
sharedMemPerBlock[0]=0;
regsPerBlock = new int[1];
regsPerBlock[0]=0;
warpSize = new int[1];
warpSize[0]=0;
maxThreadsPerBlock = new int[1];
maxThreadsPerBlock[0]=0;
maxThreadsDim0 = new int[1];
maxThreadsDim0[0]=0;
maxThreadsDim1 = new int[1];
maxThreadsDim1[0]=0;
maxThreadsDim2 = new int[1];
maxThreadsDim2[0]=0;
maxGridSize0 = new int[1];
maxGridSize0[0]=0;
maxGridSize1 = new int[1];
maxGridSize1[0]=0;
maxGridSize2 = new int[1];
maxGridSize2[0]=0;
memPitch = new unsigned int[1];
memPitch[0]=0;
textureAlignment = new unsigned int[1];
textureAlignment[0]=0;
clockRate = new float[1];
clockRate[0]=0;
deviceOverlap = new bool[1];
deviceOverlap[0]=0;
} else {
gpu();
}
}
gpu::gpu(void *buf, int size){
int offset=0,aux,nelem=1;
int auxsize;
MPI::INT.Unpack(buf,size,&aux,1,offset,MPI::COMM_WORLD);
if(aux==1) present=true;
else present=false;
MPI::INT.Unpack(buf,size,&deviceCount,1,offset,MPI::COMM_WORLD);
if(deviceCount!=0) nelem=deviceCount;
name = new string[nelem];
major = new int[nelem];
minor = new int[nelem];
totalGlobalMem = new unsigned int[nelem];
multiProcessorCount = new int[nelem];
numCores = new int[nelem];
totalConstMem = new unsigned int[nelem];
sharedMemPerBlock = new unsigned int[nelem];
regsPerBlock = new int[nelem];
warpSize = new int[nelem];
maxThreadsPerBlock = new int[nelem];
maxThreadsDim0 = new int[nelem];
maxThreadsDim1 = new int[nelem];
maxThreadsDim2 = new int[nelem];
maxGridSize0 = new int[nelem];
maxGridSize1 = new int[nelem];
maxGridSize2 = new int[nelem];
memPitch = new unsigned int[nelem];
textureAlignment = new unsigned int[nelem];
clockRate = new float[nelem];
deviceOverlap = new bool[nelem];
MPI::INT.Unpack(buf,size,&auxsize,1,offset,MPI::COMM_WORLD);
MPI::INT.Unpack(buf,size,name,auxsize,offset,MPI::COMM_WORLD);
MPI::INT.Unpack(buf,size,major,nelem,offset,MPI::COMM_WORLD);
MPI::INT.Unpack(buf,size,minor,nelem,offset,MPI::COMM_WORLD);
MPI::INT.Unpack(buf,size,totalGlobalMem,nelem,offset,MPI::COMM_WORLD);
MPI::INT.Unpack(buf,size,multiProcessorCount,nelem,offset,MPI::COMM_WORLD);
MPI::INT.Unpack(buf,size,numCores,nelem,offset,MPI::COMM_WORLD);
MPI::INT.Unpack(buf,size,totalConstMem,nelem,offset,MPI::COMM_WORLD);
MPI::INT.Unpack(buf,size,sharedMemPerBlock,nelem,offset,MPI::COMM_WORLD);
MPI::INT.Unpack(buf,size,regsPerBlock,nelem,offset,MPI::COMM_WORLD);
MPI::INT.Unpack(buf,size,warpSize,nelem,offset,MPI::COMM_WORLD);
MPI::INT.Unpack(buf,size,maxThreadsPerBlock,nelem,offset,MPI::COMM_WORLD);
MPI::INT.Unpack(buf,size,maxThreadsDim0,nelem,offset,MPI::COMM_WORLD);
MPI::INT.Unpack(buf,size,maxThreadsDim1,nelem,offset,MPI::COMM_WORLD);
MPI::INT.Unpack(buf,size,maxThreadsDim2,nelem,offset,MPI::COMM_WORLD);
MPI::INT.Unpack(buf,size,maxGridSize0,nelem,offset,MPI::COMM_WORLD);
MPI::INT.Unpack(buf,size,maxGridSize1,nelem,offset,MPI::COMM_WORLD);
MPI::INT.Unpack(buf,size,maxGridSize2,nelem,offset,MPI::COMM_WORLD);
MPI::INT.Unpack(buf,size,memPitch,nelem,offset,MPI::COMM_WORLD);
MPI::INT.Unpack(buf,size,textureAlignment,nelem,offset,MPI::COMM_WORLD);
MPI::INT.Unpack(buf,size,clockRate,nelem,offset,MPI::COMM_WORLD);
MPI::INT.Unpack(buf,size,deviceOverlap,nelem,offset,MPI::COMM_WORLD);
}
void gpu::pack(void *buf, int size){
int offset=0,aux,nelem=1;
int auxsize=name[0].length();
if(present) aux=1;
else aux=0;
MPI::INT.Pack(&aux,1,buf,size,offset,MPI::COMM_WORLD);
MPI::INT.Pack(&deviceCount,1,buf,size,offset,MPI::COMM_WORLD);
MPI::INT.Pack(&auxsize,1,buf,size,offset,MPI::COMM_WORLD);
if(deviceCount!=0) nelem=deviceCount;
MPI::INT.Pack(name,auxsize*nelem,buf,size,offset,MPI::COMM_WORLD);
MPI::INT.Pack(major,nelem,buf,size,offset,MPI::COMM_WORLD);
MPI::INT.Pack(minor,nelem,buf,size,offset,MPI::COMM_WORLD);
MPI::INT.Pack(totalGlobalMem,nelem,buf,size,offset,MPI::COMM_WORLD);
MPI::INT.Pack(multiProcessorCount,nelem,buf,size,offset,MPI::COMM_WORLD);
MPI::INT.Pack(numCores,nelem,buf,size,offset,MPI::COMM_WORLD);
MPI::INT.Pack(totalConstMem,nelem,buf,size,offset,MPI::COMM_WORLD);
MPI::INT.Pack(sharedMemPerBlock,nelem,buf,size,offset,MPI::COMM_WORLD);
MPI::INT.Pack(regsPerBlock,nelem,buf,size,offset,MPI::COMM_WORLD);
MPI::INT.Pack(warpSize,nelem,buf,size,offset,MPI::COMM_WORLD);
MPI::INT.Pack(maxThreadsPerBlock,nelem,buf,size,offset,MPI::COMM_WORLD);
MPI::INT.Pack(maxThreadsDim0,nelem,buf,size,offset,MPI::COMM_WORLD);
MPI::INT.Pack(maxThreadsDim1,nelem,buf,size,offset,MPI::COMM_WORLD);
MPI::INT.Pack(maxThreadsDim2,nelem,buf,size,offset,MPI::COMM_WORLD);
MPI::INT.Pack(maxGridSize0,nelem,buf,size,offset,MPI::COMM_WORLD);
MPI::INT.Pack(maxGridSize1,nelem,buf,size,offset,MPI::COMM_WORLD);
MPI::INT.Pack(maxGridSize2,nelem,buf,size,offset,MPI::COMM_WORLD);
MPI::INT.Pack(memPitch,nelem,buf,size,offset,MPI::COMM_WORLD);
MPI::INT.Pack(textureAlignment,nelem,buf,size,offset,MPI::COMM_WORLD);
MPI::INT.Pack(clockRate,nelem,buf,size,offset,MPI::COMM_WORLD);
MPI::INT.Pack(deviceOverlap,nelem,buf,size,offset,MPI::COMM_WORLD);
}
void gpu::complete(){
setNatr();
setValueatr();
setNameatr();
}
void gpu::setPresent(){
int auxsystem;
auxsystem=system("which nvcc > nul 2>&1");
if (auxsystem==0)
present=true;
else
present=false;
}
bool gpu::getPresent(){
return present;
}
void gpu::setDeviceProperties(){
#if CUDA
int dev;
hipDeviceProp_t deviceProp;
name = new string[deviceCount];
major = new int[deviceCount];
minor = new int[deviceCount];
totalGlobalMem = new unsigned int[deviceCount];
multiProcessorCount = new int[deviceCount];
numCores = new int[deviceCount];
totalConstMem = new unsigned int[deviceCount];
sharedMemPerBlock = new unsigned int[deviceCount];
regsPerBlock = new int[deviceCount];
warpSize = new int[deviceCount];
maxThreadsPerBlock = new int[deviceCount];
maxThreadsDim0 = new int[deviceCount];
maxThreadsDim1 = new int[deviceCount];
maxThreadsDim2 = new int[deviceCount];
maxGridSize0 = new int[deviceCount];
maxGridSize1 = new int[deviceCount];
maxGridSize2 = new int[deviceCount];
memPitch = new unsigned int[deviceCount];
textureAlignment = new unsigned int[deviceCount];
clockRate = new float[deviceCount];
deviceOverlap = new bool[deviceCount];
for (dev = 0; dev < deviceCount; ++dev) {
hipGetDeviceProperties(&deviceProp, dev);
if (dev == 0) {
if (deviceProp.major == 9999 && deviceProp.minor == 9999){
gpu(false);
}
}
name[dev]=deviceProp.name;
major[dev]=deviceProp.major;
minor[dev]=deviceProp.minor;
totalGlobalMem[dev]=(unsigned int)deviceProp.totalGlobalMem;
#if CUDART_VERSION >= 2000
multiProcessorCount[dev]=deviceProp.multiProcessorCount;
numCores[dev]=8 * deviceProp.multiProcessorCount;
#else
multiProcessorCount[dev]=0;
numCores[dev]=0;
#endif
totalConstMem[dev]=(unsigned int)deviceProp.totalConstMem;
sharedMemPerBlock[dev]=(unsigned int)deviceProp.sharedMemPerBlock;
regsPerBlock[dev]=deviceProp.regsPerBlock;
warpSize[dev]=deviceProp.warpSize;
maxThreadsPerBlock[dev]=deviceProp.maxThreadsPerBlock;
maxThreadsDim0[dev]=deviceProp.maxThreadsDim[0];
maxThreadsDim1[dev]=deviceProp.maxThreadsDim[1];
maxThreadsDim2[dev]=deviceProp.maxThreadsDim[2];
maxGridSize0[dev]=deviceProp.maxGridSize[0];
maxGridSize1[dev]=deviceProp.maxGridSize[1];
maxGridSize2[dev]=deviceProp.maxGridSize[2];
memPitch[dev]=(unsigned int)deviceProp.memPitch;
textureAlignment[dev]=(unsigned int)deviceProp.textureAlignment;
clockRate[dev]=deviceProp.clockRate * 1e-6f;
#if CUDART_VERSION >= 2000
deviceOverlap[dev]=deviceProp.deviceOverlap;
#else
deviceOverlap[dev]=false;
#endif
}
#endif
}
void gpu::gpuCopy(gpu aCopiar){
int dev;
present=aCopiar.present;
if(present){
deviceCount=aCopiar.deviceCount;
for (dev = 0; dev < deviceCount; ++dev) {
name[dev]=aCopiar.name[dev];
major[dev]=aCopiar.major[dev];
minor[dev]=aCopiar.minor[dev];
totalGlobalMem[dev]=aCopiar.totalGlobalMem[dev];
multiProcessorCount[dev]=aCopiar.multiProcessorCount[dev];
numCores[dev]=aCopiar.multiProcessorCount[dev];
totalConstMem[dev]=aCopiar.totalConstMem[dev];
sharedMemPerBlock[dev]=aCopiar.sharedMemPerBlock[dev];
regsPerBlock[dev]=aCopiar.regsPerBlock[dev];
warpSize[dev]=aCopiar.warpSize[dev];
maxThreadsPerBlock[dev]=aCopiar.maxThreadsPerBlock[dev];
maxThreadsDim0[dev]=aCopiar.maxThreadsDim0[dev];
maxThreadsDim1[dev]=aCopiar.maxThreadsDim1[dev];
maxThreadsDim2[dev]=aCopiar.maxThreadsDim2[dev];
maxGridSize0[dev]=aCopiar.maxGridSize0[dev];
maxGridSize1[dev]=aCopiar.maxGridSize1[dev];
maxGridSize2[dev]=aCopiar.maxGridSize2[dev];
memPitch[dev]=aCopiar.memPitch[dev];
textureAlignment[dev]=aCopiar.textureAlignment[dev];
clockRate[dev]=aCopiar.clockRate[dev];
deviceOverlap[dev]=aCopiar.deviceOverlap[dev];
}
}
complete();
}
//Para descubrir la clase
void gpu::setNatr(){
if (present){
natr=23;
} else {
natr=1;
}
}
void gpu::setValueatr(){
valueatr = new string[natr];
stringstream auxss;
if(present){
auxss << present << ",";
valueatr[0]=auxss.str();
auxss.str(string());
auxss << deviceCount << ",";
valueatr[1]=auxss.str();
auxss.str(string());
auxss << "[";
for (int i=0; i<deviceCount;i++){
auxss << name[i];
if(i!=deviceCount-1)
auxss << ",";
}
valueatr[2]=auxss.str().append("],");
auxss.str(string());
auxss << "[";
for (int i=0; i<deviceCount;i++){
auxss << major[i];
if(i!=deviceCount-1)
auxss << ",";
}
valueatr[3]=auxss.str().append("],");
auxss.str(string());
auxss << "[";
for (int i=0; i<deviceCount;i++){
auxss << minor[i];
if(i!=deviceCount-1)
auxss << ",";
}
valueatr[4]=auxss.str().append("],");
auxss.str(string());
auxss << "[";
for (int i=0; i<deviceCount;i++){
auxss << totalGlobalMem[i];
if(i!=deviceCount-1)
auxss << ",";
}
valueatr[5]=auxss.str().append("],");
auxss.str(string());
auxss << "[";
for (int i=0; i<deviceCount;i++){
auxss << multiProcessorCount[i];
if(i!=deviceCount-1)
auxss << ",";
}
valueatr[6]=auxss.str().append("],");
auxss.str(string());
auxss << "[";
for (int i=0; i<deviceCount;i++){
auxss << numCores[i];
if(i!=deviceCount-1)
auxss << ",";
}
valueatr[7]=auxss.str().append("],");
auxss.str(string());
auxss << "[";
for (int i=0; i<deviceCount;i++){
auxss << totalConstMem[i];
if(i!=deviceCount-1)
auxss << ",";
}
valueatr[8]=auxss.str().append("],");
auxss.str(string());
auxss << "[";
for (int i=0; i<deviceCount;i++){
auxss << sharedMemPerBlock[i];
if(i!=deviceCount-1)
auxss << ",";
}
valueatr[9]=auxss.str().append("],");
auxss.str(string());
auxss << "[";
for (int i=0; i<deviceCount;i++){
auxss << regsPerBlock[i];
if(i!=deviceCount-1)
auxss << ",";
}
valueatr[10]=auxss.str().append("],");
auxss.str(string());
auxss << "[";
for (int i=0; i<deviceCount;i++){
auxss << warpSize[i];
if(i!=deviceCount-1)
auxss << ",";
}
valueatr[11]=auxss.str().append("],");
auxss.str(string());
auxss << "[";
for (int i=0; i<deviceCount;i++){
auxss << maxThreadsPerBlock[i];
if(i!=deviceCount-1)
auxss << ",";
}
valueatr[12]=auxss.str().append("],");
auxss.str(string());
auxss << "[";
for (int i=0; i<deviceCount;i++){
auxss << maxThreadsDim0[i];
if(i!=deviceCount-1)
auxss << ",";
}
valueatr[13]=auxss.str().append("],");
auxss.str(string());
auxss << "[";
for (int i=0; i<deviceCount;i++){
auxss << maxThreadsDim1[i];
if(i!=deviceCount-1)
auxss << ",";
}
valueatr[14]=auxss.str().append("],");
auxss.str(string());
auxss << "[";
for (int i=0; i<deviceCount;i++){
auxss << maxThreadsDim2[i];
if(i!=deviceCount-1)
auxss << ",";
}
valueatr[15]=auxss.str().append("],");
auxss.str(string());
auxss << "[";
for (int i=0; i<deviceCount;i++){
auxss << maxGridSize0[i];
if(i!=deviceCount-1)
auxss << ",";
}
valueatr[16]=auxss.str().append("],");
auxss.str(string());
auxss << "[";
for (int i=0; i<deviceCount;i++){
auxss << maxGridSize1[i];
if(i!=deviceCount-1)
auxss << ",";
}
valueatr[17]=auxss.str().append("],");
auxss.str(string());
auxss << "[";
for (int i=0; i<deviceCount;i++){
auxss << maxGridSize2[i];
if(i!=deviceCount-1)
auxss << ",";
}
valueatr[18]=auxss.str().append("],");
auxss.str(string());
auxss << "[";
for (int i=0; i<deviceCount;i++){
auxss << memPitch[i];
if(i!=deviceCount-1)
auxss << ",";
}
valueatr[19]=auxss.str().append("],");
auxss.str(string());
auxss << "[";
for (int i=0; i<deviceCount;i++){
auxss << textureAlignment[i];
if(i!=deviceCount-1)
auxss << ",";
}
valueatr[20]=auxss.str().append("],");
auxss.str(string());
auxss << "[";
for (int i=0; i<deviceCount;i++){
auxss << clockRate[i];
if(i!=deviceCount-1)
auxss << ",";
}
valueatr[21]=auxss.str().append("],");
auxss.str(string());
auxss << "[";
for (int i=0; i<deviceCount;i++){
auxss << deviceOverlap[i];
if(i!=deviceCount-1)
auxss << ",";
}
valueatr[22]=auxss.str().append("]");
auxss.str(string());
} else {
auxss << present ;
valueatr[0]=auxss.str();
auxss.str(string());
}
}
void gpu::setNameatr(){
nameatr = new string[natr];
nameatr[0]="present";
if(present){
nameatr[1]="deviceCount";
nameatr[2]="name";
nameatr[3]="major";
nameatr[4]="minor";
nameatr[5]="totalGlobalMem";
nameatr[6]="multiProcessorCount";
nameatr[7]="numCores";
nameatr[8]="totalConstMem";
nameatr[9]="sharedMemPerBlock";
nameatr[10]="regsPerBlock";
nameatr[11]="warpSize";
nameatr[12]="maxThreadsPerBlock";
nameatr[13]="maxThreadsDim0";
nameatr[14]="maxThreadsDim1";
nameatr[15]="maxThreadsDim2";
nameatr[16]="maxGridSize0";
nameatr[17]="maxGridSize1";
nameatr[18]="maxGridSize2";
nameatr[19]="memPitch";
nameatr[20]="textureAlignment";
nameatr[21]="clockRate";
nameatr[22]="deviceOverlap";
}
}
int gpu::getNatr(){
return natr;
}
string gpu::getValueatr(int n){
if (n<getNatr())
return valueatr[n];
else
exit(EXIT_FAILURE);
}
string gpu::getNameatr(int n){
if (n<getNatr())
return nameatr[n];
else
exit(EXIT_FAILURE);
}
/*gpu::gpu(const gpu& orig) {
}
gpu::~gpu() {
}*/
void gpu::setDeviceCount(){
#if CUDA
hipGetDeviceCount(&deviceCount);
if(deviceCount==0) present=false;
else present=true;
#else
deviceCount=0;
present=false;
#endif
}
| b83e6a92181c7706c26d6208645a341c57771aad.cu | /*
* File: gpu.cpp
* Author: aliendo
*
* Created on 26 de diciembre de 2013, 11:23 AM
*/
#include "gpu.h"
#include <mpi.h>
gpu::gpu() {
setDeviceCount();
if (present){
if (deviceCount == 0) {
gpu(false);
} else {
setDeviceProperties();
}
} else {
deviceCount = 0;
name = new string[1];
name[0]=" ";
major = new int[1];
major[0]=0;
minor = new int[1];
minor[0]=0;
totalGlobalMem = new unsigned int[1];
totalGlobalMem[0]=0;
multiProcessorCount = new int[1];
multiProcessorCount[0]=0;
numCores = new int[1];
numCores[0]=0;
totalConstMem = new unsigned int[1];
totalConstMem[0]=0;
sharedMemPerBlock = new unsigned int[1];
sharedMemPerBlock[0]=0;
regsPerBlock = new int[1];
regsPerBlock[0]=0;
warpSize = new int[1];
warpSize[0]=0;
maxThreadsPerBlock = new int[1];
maxThreadsPerBlock[0]=0;
maxThreadsDim0 = new int[1];
maxThreadsDim0[0]=0;
maxThreadsDim1 = new int[1];
maxThreadsDim1[0]=0;
maxThreadsDim2 = new int[1];
maxThreadsDim2[0]=0;
maxGridSize0 = new int[1];
maxGridSize0[0]=0;
maxGridSize1 = new int[1];
maxGridSize1[0]=0;
maxGridSize2 = new int[1];
maxGridSize2[0]=0;
memPitch = new unsigned int[1];
memPitch[0]=0;
textureAlignment = new unsigned int[1];
textureAlignment[0]=0;
clockRate = new float[1];
clockRate[0]=0;
deviceOverlap = new bool[1];
deviceOverlap[0]=0;
}
setNatr();
setValueatr();
setNameatr();
}
gpu::gpu(bool verify) {
if (!verify){
present=false;
deviceCount = 0;
name = new string[1];
name[0]=" ";
major = new int[1];
major[0]=0;
minor = new int[1];
minor[0]=0;
totalGlobalMem = new unsigned int[1];
totalGlobalMem[0]=0;
multiProcessorCount = new int[1];
multiProcessorCount[0]=0;
numCores = new int[1];
numCores[0]=0;
totalConstMem = new unsigned int[1];
totalConstMem[0]=0;
sharedMemPerBlock = new unsigned int[1];
sharedMemPerBlock[0]=0;
regsPerBlock = new int[1];
regsPerBlock[0]=0;
warpSize = new int[1];
warpSize[0]=0;
maxThreadsPerBlock = new int[1];
maxThreadsPerBlock[0]=0;
maxThreadsDim0 = new int[1];
maxThreadsDim0[0]=0;
maxThreadsDim1 = new int[1];
maxThreadsDim1[0]=0;
maxThreadsDim2 = new int[1];
maxThreadsDim2[0]=0;
maxGridSize0 = new int[1];
maxGridSize0[0]=0;
maxGridSize1 = new int[1];
maxGridSize1[0]=0;
maxGridSize2 = new int[1];
maxGridSize2[0]=0;
memPitch = new unsigned int[1];
memPitch[0]=0;
textureAlignment = new unsigned int[1];
textureAlignment[0]=0;
clockRate = new float[1];
clockRate[0]=0;
deviceOverlap = new bool[1];
deviceOverlap[0]=0;
} else {
gpu();
}
}
gpu::gpu(void *buf, int size){
int offset=0,aux,nelem=1;
int auxsize;
MPI::INT.Unpack(buf,size,&aux,1,offset,MPI::COMM_WORLD);
if(aux==1) present=true;
else present=false;
MPI::INT.Unpack(buf,size,&deviceCount,1,offset,MPI::COMM_WORLD);
if(deviceCount!=0) nelem=deviceCount;
name = new string[nelem];
major = new int[nelem];
minor = new int[nelem];
totalGlobalMem = new unsigned int[nelem];
multiProcessorCount = new int[nelem];
numCores = new int[nelem];
totalConstMem = new unsigned int[nelem];
sharedMemPerBlock = new unsigned int[nelem];
regsPerBlock = new int[nelem];
warpSize = new int[nelem];
maxThreadsPerBlock = new int[nelem];
maxThreadsDim0 = new int[nelem];
maxThreadsDim1 = new int[nelem];
maxThreadsDim2 = new int[nelem];
maxGridSize0 = new int[nelem];
maxGridSize1 = new int[nelem];
maxGridSize2 = new int[nelem];
memPitch = new unsigned int[nelem];
textureAlignment = new unsigned int[nelem];
clockRate = new float[nelem];
deviceOverlap = new bool[nelem];
MPI::INT.Unpack(buf,size,&auxsize,1,offset,MPI::COMM_WORLD);
MPI::INT.Unpack(buf,size,name,auxsize,offset,MPI::COMM_WORLD);
MPI::INT.Unpack(buf,size,major,nelem,offset,MPI::COMM_WORLD);
MPI::INT.Unpack(buf,size,minor,nelem,offset,MPI::COMM_WORLD);
MPI::INT.Unpack(buf,size,totalGlobalMem,nelem,offset,MPI::COMM_WORLD);
MPI::INT.Unpack(buf,size,multiProcessorCount,nelem,offset,MPI::COMM_WORLD);
MPI::INT.Unpack(buf,size,numCores,nelem,offset,MPI::COMM_WORLD);
MPI::INT.Unpack(buf,size,totalConstMem,nelem,offset,MPI::COMM_WORLD);
MPI::INT.Unpack(buf,size,sharedMemPerBlock,nelem,offset,MPI::COMM_WORLD);
MPI::INT.Unpack(buf,size,regsPerBlock,nelem,offset,MPI::COMM_WORLD);
MPI::INT.Unpack(buf,size,warpSize,nelem,offset,MPI::COMM_WORLD);
MPI::INT.Unpack(buf,size,maxThreadsPerBlock,nelem,offset,MPI::COMM_WORLD);
MPI::INT.Unpack(buf,size,maxThreadsDim0,nelem,offset,MPI::COMM_WORLD);
MPI::INT.Unpack(buf,size,maxThreadsDim1,nelem,offset,MPI::COMM_WORLD);
MPI::INT.Unpack(buf,size,maxThreadsDim2,nelem,offset,MPI::COMM_WORLD);
MPI::INT.Unpack(buf,size,maxGridSize0,nelem,offset,MPI::COMM_WORLD);
MPI::INT.Unpack(buf,size,maxGridSize1,nelem,offset,MPI::COMM_WORLD);
MPI::INT.Unpack(buf,size,maxGridSize2,nelem,offset,MPI::COMM_WORLD);
MPI::INT.Unpack(buf,size,memPitch,nelem,offset,MPI::COMM_WORLD);
MPI::INT.Unpack(buf,size,textureAlignment,nelem,offset,MPI::COMM_WORLD);
MPI::INT.Unpack(buf,size,clockRate,nelem,offset,MPI::COMM_WORLD);
MPI::INT.Unpack(buf,size,deviceOverlap,nelem,offset,MPI::COMM_WORLD);
}
void gpu::pack(void *buf, int size){
int offset=0,aux,nelem=1;
int auxsize=name[0].length();
if(present) aux=1;
else aux=0;
MPI::INT.Pack(&aux,1,buf,size,offset,MPI::COMM_WORLD);
MPI::INT.Pack(&deviceCount,1,buf,size,offset,MPI::COMM_WORLD);
MPI::INT.Pack(&auxsize,1,buf,size,offset,MPI::COMM_WORLD);
if(deviceCount!=0) nelem=deviceCount;
MPI::INT.Pack(name,auxsize*nelem,buf,size,offset,MPI::COMM_WORLD);
MPI::INT.Pack(major,nelem,buf,size,offset,MPI::COMM_WORLD);
MPI::INT.Pack(minor,nelem,buf,size,offset,MPI::COMM_WORLD);
MPI::INT.Pack(totalGlobalMem,nelem,buf,size,offset,MPI::COMM_WORLD);
MPI::INT.Pack(multiProcessorCount,nelem,buf,size,offset,MPI::COMM_WORLD);
MPI::INT.Pack(numCores,nelem,buf,size,offset,MPI::COMM_WORLD);
MPI::INT.Pack(totalConstMem,nelem,buf,size,offset,MPI::COMM_WORLD);
MPI::INT.Pack(sharedMemPerBlock,nelem,buf,size,offset,MPI::COMM_WORLD);
MPI::INT.Pack(regsPerBlock,nelem,buf,size,offset,MPI::COMM_WORLD);
MPI::INT.Pack(warpSize,nelem,buf,size,offset,MPI::COMM_WORLD);
MPI::INT.Pack(maxThreadsPerBlock,nelem,buf,size,offset,MPI::COMM_WORLD);
MPI::INT.Pack(maxThreadsDim0,nelem,buf,size,offset,MPI::COMM_WORLD);
MPI::INT.Pack(maxThreadsDim1,nelem,buf,size,offset,MPI::COMM_WORLD);
MPI::INT.Pack(maxThreadsDim2,nelem,buf,size,offset,MPI::COMM_WORLD);
MPI::INT.Pack(maxGridSize0,nelem,buf,size,offset,MPI::COMM_WORLD);
MPI::INT.Pack(maxGridSize1,nelem,buf,size,offset,MPI::COMM_WORLD);
MPI::INT.Pack(maxGridSize2,nelem,buf,size,offset,MPI::COMM_WORLD);
MPI::INT.Pack(memPitch,nelem,buf,size,offset,MPI::COMM_WORLD);
MPI::INT.Pack(textureAlignment,nelem,buf,size,offset,MPI::COMM_WORLD);
MPI::INT.Pack(clockRate,nelem,buf,size,offset,MPI::COMM_WORLD);
MPI::INT.Pack(deviceOverlap,nelem,buf,size,offset,MPI::COMM_WORLD);
}
void gpu::complete(){
setNatr();
setValueatr();
setNameatr();
}
void gpu::setPresent(){
int auxsystem;
auxsystem=system("which nvcc > nul 2>&1");
if (auxsystem==0)
present=true;
else
present=false;
}
bool gpu::getPresent(){
return present;
}
void gpu::setDeviceProperties(){
#if CUDA
int dev;
cudaDeviceProp deviceProp;
name = new string[deviceCount];
major = new int[deviceCount];
minor = new int[deviceCount];
totalGlobalMem = new unsigned int[deviceCount];
multiProcessorCount = new int[deviceCount];
numCores = new int[deviceCount];
totalConstMem = new unsigned int[deviceCount];
sharedMemPerBlock = new unsigned int[deviceCount];
regsPerBlock = new int[deviceCount];
warpSize = new int[deviceCount];
maxThreadsPerBlock = new int[deviceCount];
maxThreadsDim0 = new int[deviceCount];
maxThreadsDim1 = new int[deviceCount];
maxThreadsDim2 = new int[deviceCount];
maxGridSize0 = new int[deviceCount];
maxGridSize1 = new int[deviceCount];
maxGridSize2 = new int[deviceCount];
memPitch = new unsigned int[deviceCount];
textureAlignment = new unsigned int[deviceCount];
clockRate = new float[deviceCount];
deviceOverlap = new bool[deviceCount];
for (dev = 0; dev < deviceCount; ++dev) {
cudaGetDeviceProperties(&deviceProp, dev);
if (dev == 0) {
if (deviceProp.major == 9999 && deviceProp.minor == 9999){
gpu(false);
}
}
name[dev]=deviceProp.name;
major[dev]=deviceProp.major;
minor[dev]=deviceProp.minor;
totalGlobalMem[dev]=(unsigned int)deviceProp.totalGlobalMem;
#if CUDART_VERSION >= 2000
multiProcessorCount[dev]=deviceProp.multiProcessorCount;
numCores[dev]=8 * deviceProp.multiProcessorCount;
#else
multiProcessorCount[dev]=0;
numCores[dev]=0;
#endif
totalConstMem[dev]=(unsigned int)deviceProp.totalConstMem;
sharedMemPerBlock[dev]=(unsigned int)deviceProp.sharedMemPerBlock;
regsPerBlock[dev]=deviceProp.regsPerBlock;
warpSize[dev]=deviceProp.warpSize;
maxThreadsPerBlock[dev]=deviceProp.maxThreadsPerBlock;
maxThreadsDim0[dev]=deviceProp.maxThreadsDim[0];
maxThreadsDim1[dev]=deviceProp.maxThreadsDim[1];
maxThreadsDim2[dev]=deviceProp.maxThreadsDim[2];
maxGridSize0[dev]=deviceProp.maxGridSize[0];
maxGridSize1[dev]=deviceProp.maxGridSize[1];
maxGridSize2[dev]=deviceProp.maxGridSize[2];
memPitch[dev]=(unsigned int)deviceProp.memPitch;
textureAlignment[dev]=(unsigned int)deviceProp.textureAlignment;
clockRate[dev]=deviceProp.clockRate * 1e-6f;
#if CUDART_VERSION >= 2000
deviceOverlap[dev]=deviceProp.deviceOverlap;
#else
deviceOverlap[dev]=false;
#endif
}
#endif
}
void gpu::gpuCopy(gpu aCopiar){
int dev;
present=aCopiar.present;
if(present){
deviceCount=aCopiar.deviceCount;
for (dev = 0; dev < deviceCount; ++dev) {
name[dev]=aCopiar.name[dev];
major[dev]=aCopiar.major[dev];
minor[dev]=aCopiar.minor[dev];
totalGlobalMem[dev]=aCopiar.totalGlobalMem[dev];
multiProcessorCount[dev]=aCopiar.multiProcessorCount[dev];
numCores[dev]=aCopiar.multiProcessorCount[dev];
totalConstMem[dev]=aCopiar.totalConstMem[dev];
sharedMemPerBlock[dev]=aCopiar.sharedMemPerBlock[dev];
regsPerBlock[dev]=aCopiar.regsPerBlock[dev];
warpSize[dev]=aCopiar.warpSize[dev];
maxThreadsPerBlock[dev]=aCopiar.maxThreadsPerBlock[dev];
maxThreadsDim0[dev]=aCopiar.maxThreadsDim0[dev];
maxThreadsDim1[dev]=aCopiar.maxThreadsDim1[dev];
maxThreadsDim2[dev]=aCopiar.maxThreadsDim2[dev];
maxGridSize0[dev]=aCopiar.maxGridSize0[dev];
maxGridSize1[dev]=aCopiar.maxGridSize1[dev];
maxGridSize2[dev]=aCopiar.maxGridSize2[dev];
memPitch[dev]=aCopiar.memPitch[dev];
textureAlignment[dev]=aCopiar.textureAlignment[dev];
clockRate[dev]=aCopiar.clockRate[dev];
deviceOverlap[dev]=aCopiar.deviceOverlap[dev];
}
}
complete();
}
//Para descubrir la clase
void gpu::setNatr(){
if (present){
natr=23;
} else {
natr=1;
}
}
void gpu::setValueatr(){
valueatr = new string[natr];
stringstream auxss;
if(present){
auxss << present << ",";
valueatr[0]=auxss.str();
auxss.str(string());
auxss << deviceCount << ",";
valueatr[1]=auxss.str();
auxss.str(string());
auxss << "[";
for (int i=0; i<deviceCount;i++){
auxss << name[i];
if(i!=deviceCount-1)
auxss << ",";
}
valueatr[2]=auxss.str().append("],");
auxss.str(string());
auxss << "[";
for (int i=0; i<deviceCount;i++){
auxss << major[i];
if(i!=deviceCount-1)
auxss << ",";
}
valueatr[3]=auxss.str().append("],");
auxss.str(string());
auxss << "[";
for (int i=0; i<deviceCount;i++){
auxss << minor[i];
if(i!=deviceCount-1)
auxss << ",";
}
valueatr[4]=auxss.str().append("],");
auxss.str(string());
auxss << "[";
for (int i=0; i<deviceCount;i++){
auxss << totalGlobalMem[i];
if(i!=deviceCount-1)
auxss << ",";
}
valueatr[5]=auxss.str().append("],");
auxss.str(string());
auxss << "[";
for (int i=0; i<deviceCount;i++){
auxss << multiProcessorCount[i];
if(i!=deviceCount-1)
auxss << ",";
}
valueatr[6]=auxss.str().append("],");
auxss.str(string());
auxss << "[";
for (int i=0; i<deviceCount;i++){
auxss << numCores[i];
if(i!=deviceCount-1)
auxss << ",";
}
valueatr[7]=auxss.str().append("],");
auxss.str(string());
auxss << "[";
for (int i=0; i<deviceCount;i++){
auxss << totalConstMem[i];
if(i!=deviceCount-1)
auxss << ",";
}
valueatr[8]=auxss.str().append("],");
auxss.str(string());
auxss << "[";
for (int i=0; i<deviceCount;i++){
auxss << sharedMemPerBlock[i];
if(i!=deviceCount-1)
auxss << ",";
}
valueatr[9]=auxss.str().append("],");
auxss.str(string());
auxss << "[";
for (int i=0; i<deviceCount;i++){
auxss << regsPerBlock[i];
if(i!=deviceCount-1)
auxss << ",";
}
valueatr[10]=auxss.str().append("],");
auxss.str(string());
auxss << "[";
for (int i=0; i<deviceCount;i++){
auxss << warpSize[i];
if(i!=deviceCount-1)
auxss << ",";
}
valueatr[11]=auxss.str().append("],");
auxss.str(string());
auxss << "[";
for (int i=0; i<deviceCount;i++){
auxss << maxThreadsPerBlock[i];
if(i!=deviceCount-1)
auxss << ",";
}
valueatr[12]=auxss.str().append("],");
auxss.str(string());
auxss << "[";
for (int i=0; i<deviceCount;i++){
auxss << maxThreadsDim0[i];
if(i!=deviceCount-1)
auxss << ",";
}
valueatr[13]=auxss.str().append("],");
auxss.str(string());
auxss << "[";
for (int i=0; i<deviceCount;i++){
auxss << maxThreadsDim1[i];
if(i!=deviceCount-1)
auxss << ",";
}
valueatr[14]=auxss.str().append("],");
auxss.str(string());
auxss << "[";
for (int i=0; i<deviceCount;i++){
auxss << maxThreadsDim2[i];
if(i!=deviceCount-1)
auxss << ",";
}
valueatr[15]=auxss.str().append("],");
auxss.str(string());
auxss << "[";
for (int i=0; i<deviceCount;i++){
auxss << maxGridSize0[i];
if(i!=deviceCount-1)
auxss << ",";
}
valueatr[16]=auxss.str().append("],");
auxss.str(string());
auxss << "[";
for (int i=0; i<deviceCount;i++){
auxss << maxGridSize1[i];
if(i!=deviceCount-1)
auxss << ",";
}
valueatr[17]=auxss.str().append("],");
auxss.str(string());
auxss << "[";
for (int i=0; i<deviceCount;i++){
auxss << maxGridSize2[i];
if(i!=deviceCount-1)
auxss << ",";
}
valueatr[18]=auxss.str().append("],");
auxss.str(string());
auxss << "[";
for (int i=0; i<deviceCount;i++){
auxss << memPitch[i];
if(i!=deviceCount-1)
auxss << ",";
}
valueatr[19]=auxss.str().append("],");
auxss.str(string());
auxss << "[";
for (int i=0; i<deviceCount;i++){
auxss << textureAlignment[i];
if(i!=deviceCount-1)
auxss << ",";
}
valueatr[20]=auxss.str().append("],");
auxss.str(string());
auxss << "[";
for (int i=0; i<deviceCount;i++){
auxss << clockRate[i];
if(i!=deviceCount-1)
auxss << ",";
}
valueatr[21]=auxss.str().append("],");
auxss.str(string());
auxss << "[";
for (int i=0; i<deviceCount;i++){
auxss << deviceOverlap[i];
if(i!=deviceCount-1)
auxss << ",";
}
valueatr[22]=auxss.str().append("]");
auxss.str(string());
} else {
auxss << present ;
valueatr[0]=auxss.str();
auxss.str(string());
}
}
void gpu::setNameatr(){
nameatr = new string[natr];
nameatr[0]="present";
if(present){
nameatr[1]="deviceCount";
nameatr[2]="name";
nameatr[3]="major";
nameatr[4]="minor";
nameatr[5]="totalGlobalMem";
nameatr[6]="multiProcessorCount";
nameatr[7]="numCores";
nameatr[8]="totalConstMem";
nameatr[9]="sharedMemPerBlock";
nameatr[10]="regsPerBlock";
nameatr[11]="warpSize";
nameatr[12]="maxThreadsPerBlock";
nameatr[13]="maxThreadsDim0";
nameatr[14]="maxThreadsDim1";
nameatr[15]="maxThreadsDim2";
nameatr[16]="maxGridSize0";
nameatr[17]="maxGridSize1";
nameatr[18]="maxGridSize2";
nameatr[19]="memPitch";
nameatr[20]="textureAlignment";
nameatr[21]="clockRate";
nameatr[22]="deviceOverlap";
}
}
int gpu::getNatr(){
return natr;
}
string gpu::getValueatr(int n){
if (n<getNatr())
return valueatr[n];
else
exit(EXIT_FAILURE);
}
string gpu::getNameatr(int n){
if (n<getNatr())
return nameatr[n];
else
exit(EXIT_FAILURE);
}
/*gpu::gpu(const gpu& orig) {
}
gpu::~gpu() {
}*/
void gpu::setDeviceCount(){
#if CUDA
cudaGetDeviceCount(&deviceCount);
if(deviceCount==0) present=false;
else present=true;
#else
deviceCount=0;
present=false;
#endif
}
|
1e841b321537414063f0ceac8bc37ddadc71b827.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*!
* Copyright (c) 2021 Microsoft Corporation. All rights reserved.
* Licensed under the MIT License. See LICENSE file in the project root for license information.
*/
#ifdef USE_CUDA_EXP
#include <LightGBM/cuda/cuda_tree.hpp>
namespace LightGBM {
__device__ void SetDecisionTypeCUDA(int8_t* decision_type, bool input, int8_t mask) {
if (input) {
(*decision_type) |= mask;
} else {
(*decision_type) &= (127 - mask);
}
}
__device__ void SetMissingTypeCUDA(int8_t* decision_type, int8_t input) {
(*decision_type) &= 3;
(*decision_type) |= (input << 2);
}
__device__ bool GetDecisionTypeCUDA(int8_t decision_type, int8_t mask) {
return (decision_type & mask) > 0;
}
__device__ int8_t GetMissingTypeCUDA(int8_t decision_type) {
return (decision_type >> 2) & 3;
}
__device__ bool IsZeroCUDA(double fval) {
return (fval >= -kZeroThreshold && fval <= kZeroThreshold);
}
template<typename T>
__device__ bool FindInBitsetCUDA(const uint32_t* bits, int n, T pos) {
int i1 = pos / 32;
if (i1 >= n) {
return false;
}
int i2 = pos % 32;
return (bits[i1] >> i2) & 1;
}
__global__ void SplitKernel( // split information
const int leaf_index,
const int real_feature_index,
const double real_threshold,
const MissingType missing_type,
const CUDASplitInfo* cuda_split_info,
// tree structure
const int num_leaves,
int* leaf_parent,
int* leaf_depth,
int* left_child,
int* right_child,
int* split_feature_inner,
int* split_feature,
float* split_gain,
double* internal_weight,
double* internal_value,
data_size_t* internal_count,
double* leaf_weight,
double* leaf_value,
data_size_t* leaf_count,
int8_t* decision_type,
uint32_t* threshold_in_bin,
double* threshold) {
const int new_node_index = num_leaves - 1;
const int thread_index = static_cast<int>(threadIdx.x + blockIdx.x * blockDim.x);
const int parent_index = leaf_parent[leaf_index];
if (thread_index == 0) {
if (parent_index >= 0) {
// if cur node is left child
if (left_child[parent_index] == ~leaf_index) {
left_child[parent_index] = new_node_index;
} else {
right_child[parent_index] = new_node_index;
}
}
left_child[new_node_index] = ~leaf_index;
right_child[new_node_index] = ~num_leaves;
leaf_parent[leaf_index] = new_node_index;
leaf_parent[num_leaves] = new_node_index;
} else if (thread_index == 1) {
// add new node
split_feature_inner[new_node_index] = cuda_split_info->inner_feature_index;
} else if (thread_index == 2) {
split_feature[new_node_index] = real_feature_index;
} else if (thread_index == 3) {
split_gain[new_node_index] = static_cast<float>(cuda_split_info->gain);
} else if (thread_index == 4) {
// save current leaf value to internal node before change
internal_weight[new_node_index] = leaf_weight[leaf_index];
leaf_weight[leaf_index] = cuda_split_info->left_sum_hessians;
} else if (thread_index == 5) {
internal_value[new_node_index] = leaf_value[leaf_index];
leaf_value[leaf_index] = isnan(cuda_split_info->left_value) ? 0.0f : cuda_split_info->left_value;
} else if (thread_index == 6) {
internal_count[new_node_index] = cuda_split_info->left_count + cuda_split_info->right_count;
} else if (thread_index == 7) {
leaf_count[leaf_index] = cuda_split_info->left_count;
} else if (thread_index == 8) {
leaf_value[num_leaves] = isnan(cuda_split_info->right_value) ? 0.0f : cuda_split_info->right_value;
} else if (thread_index == 9) {
leaf_weight[num_leaves] = cuda_split_info->right_sum_hessians;
} else if (thread_index == 10) {
leaf_count[num_leaves] = cuda_split_info->right_count;
} else if (thread_index == 11) {
// update leaf depth
leaf_depth[num_leaves] = leaf_depth[leaf_index] + 1;
leaf_depth[leaf_index]++;
} else if (thread_index == 12) {
decision_type[new_node_index] = 0;
SetDecisionTypeCUDA(&decision_type[new_node_index], false, kCategoricalMask);
SetDecisionTypeCUDA(&decision_type[new_node_index], cuda_split_info->default_left, kDefaultLeftMask);
SetMissingTypeCUDA(&decision_type[new_node_index], static_cast<int8_t>(missing_type));
} else if (thread_index == 13) {
threshold_in_bin[new_node_index] = cuda_split_info->threshold;
} else if (thread_index == 14) {
threshold[new_node_index] = real_threshold;
}
}
void CUDATree::LaunchSplitKernel(const int leaf_index,
const int real_feature_index,
const double real_threshold,
const MissingType missing_type,
const CUDASplitInfo* cuda_split_info) {
hipLaunchKernelGGL(( SplitKernel), dim3(3), dim3(5), 0, cuda_stream_,
// split information
leaf_index,
real_feature_index,
real_threshold,
missing_type,
cuda_split_info,
// tree structure
num_leaves_,
cuda_leaf_parent_,
cuda_leaf_depth_,
cuda_left_child_,
cuda_right_child_,
cuda_split_feature_inner_,
cuda_split_feature_,
cuda_split_gain_,
cuda_internal_weight_,
cuda_internal_value_,
cuda_internal_count_,
cuda_leaf_weight_,
cuda_leaf_value_,
cuda_leaf_count_,
cuda_decision_type_,
cuda_threshold_in_bin_,
cuda_threshold_);
}
__global__ void SplitCategoricalKernel( // split information
const int leaf_index,
const int real_feature_index,
const MissingType missing_type,
const CUDASplitInfo* cuda_split_info,
// tree structure
const int num_leaves,
int* leaf_parent,
int* leaf_depth,
int* left_child,
int* right_child,
int* split_feature_inner,
int* split_feature,
float* split_gain,
double* internal_weight,
double* internal_value,
data_size_t* internal_count,
double* leaf_weight,
double* leaf_value,
data_size_t* leaf_count,
int8_t* decision_type,
uint32_t* threshold_in_bin,
double* threshold,
size_t cuda_bitset_len,
size_t cuda_bitset_inner_len,
int num_cat,
int* cuda_cat_boundaries,
int* cuda_cat_boundaries_inner) {
const int new_node_index = num_leaves - 1;
const int thread_index = static_cast<int>(threadIdx.x + blockIdx.x * blockDim.x);
const int parent_index = leaf_parent[leaf_index];
if (thread_index == 0) {
if (parent_index >= 0) {
// if cur node is left child
if (left_child[parent_index] == ~leaf_index) {
left_child[parent_index] = new_node_index;
} else {
right_child[parent_index] = new_node_index;
}
}
left_child[new_node_index] = ~leaf_index;
right_child[new_node_index] = ~num_leaves;
leaf_parent[leaf_index] = new_node_index;
leaf_parent[num_leaves] = new_node_index;
} else if (thread_index == 1) {
// add new node
split_feature_inner[new_node_index] = cuda_split_info->inner_feature_index;
} else if (thread_index == 2) {
split_feature[new_node_index] = real_feature_index;
} else if (thread_index == 3) {
split_gain[new_node_index] = static_cast<float>(cuda_split_info->gain);
} else if (thread_index == 4) {
// save current leaf value to internal node before change
internal_weight[new_node_index] = leaf_weight[leaf_index];
leaf_weight[leaf_index] = cuda_split_info->left_sum_hessians;
} else if (thread_index == 5) {
internal_value[new_node_index] = leaf_value[leaf_index];
leaf_value[leaf_index] = isnan(cuda_split_info->left_value) ? 0.0f : cuda_split_info->left_value;
} else if (thread_index == 6) {
internal_count[new_node_index] = cuda_split_info->left_count + cuda_split_info->right_count;
} else if (thread_index == 7) {
leaf_count[leaf_index] = cuda_split_info->left_count;
} else if (thread_index == 8) {
leaf_value[num_leaves] = isnan(cuda_split_info->right_value) ? 0.0f : cuda_split_info->right_value;
} else if (thread_index == 9) {
leaf_weight[num_leaves] = cuda_split_info->right_sum_hessians;
} else if (thread_index == 10) {
leaf_count[num_leaves] = cuda_split_info->right_count;
} else if (thread_index == 11) {
// update leaf depth
leaf_depth[num_leaves] = leaf_depth[leaf_index] + 1;
leaf_depth[leaf_index]++;
} else if (thread_index == 12) {
decision_type[new_node_index] = 0;
SetDecisionTypeCUDA(&decision_type[new_node_index], true, kCategoricalMask);
SetMissingTypeCUDA(&decision_type[new_node_index], static_cast<int8_t>(missing_type));
} else if (thread_index == 13) {
threshold_in_bin[new_node_index] = num_cat;
} else if (thread_index == 14) {
threshold[new_node_index] = num_cat;
} else if (thread_index == 15) {
if (num_cat == 0) {
cuda_cat_boundaries[num_cat] = 0;
}
cuda_cat_boundaries[num_cat + 1] = cuda_cat_boundaries[num_cat] + cuda_bitset_len;
} else if (thread_index == 16) {
if (num_cat == 0) {
cuda_cat_boundaries_inner[num_cat] = 0;
}
cuda_cat_boundaries_inner[num_cat + 1] = cuda_cat_boundaries_inner[num_cat] + cuda_bitset_inner_len;
}
}
void CUDATree::LaunchSplitCategoricalKernel(const int leaf_index,
const int real_feature_index,
const MissingType missing_type,
const CUDASplitInfo* cuda_split_info,
size_t cuda_bitset_len,
size_t cuda_bitset_inner_len) {
hipLaunchKernelGGL(( SplitCategoricalKernel), dim3(3), dim3(6), 0, cuda_stream_,
// split information
leaf_index,
real_feature_index,
missing_type,
cuda_split_info,
// tree structure
num_leaves_,
cuda_leaf_parent_,
cuda_leaf_depth_,
cuda_left_child_,
cuda_right_child_,
cuda_split_feature_inner_,
cuda_split_feature_,
cuda_split_gain_,
cuda_internal_weight_,
cuda_internal_value_,
cuda_internal_count_,
cuda_leaf_weight_,
cuda_leaf_value_,
cuda_leaf_count_,
cuda_decision_type_,
cuda_threshold_in_bin_,
cuda_threshold_,
cuda_bitset_len,
cuda_bitset_inner_len,
num_cat_,
cuda_cat_boundaries_.RawData(),
cuda_cat_boundaries_inner_.RawData());
}
__global__ void ShrinkageKernel(const double rate, double* cuda_leaf_value, const int num_leaves) {
const int leaf_index = static_cast<int>(blockIdx.x * blockDim.x + threadIdx.x);
if (leaf_index < num_leaves) {
cuda_leaf_value[leaf_index] *= rate;
}
}
void CUDATree::LaunchShrinkageKernel(const double rate) {
const int num_threads_per_block = 1024;
const int num_blocks = (num_leaves_ + num_threads_per_block - 1) / num_threads_per_block;
hipLaunchKernelGGL(( ShrinkageKernel), dim3(num_blocks), dim3(num_threads_per_block), 0, 0, rate, cuda_leaf_value_, num_leaves_);
}
__global__ void AddBiasKernel(const double val, double* cuda_leaf_value, const int num_leaves) {
const int leaf_index = static_cast<int>(blockIdx.x * blockDim.x + threadIdx.x);
if (leaf_index < num_leaves) {
cuda_leaf_value[leaf_index] += val;
}
}
void CUDATree::LaunchAddBiasKernel(const double val) {
const int num_threads_per_block = 1024;
const int num_blocks = (num_leaves_ + num_threads_per_block - 1) / num_threads_per_block;
hipLaunchKernelGGL(( AddBiasKernel), dim3(num_blocks), dim3(num_threads_per_block), 0, 0, val, cuda_leaf_value_, num_leaves_);
}
template <bool USE_INDICES>
__global__ void AddPredictionToScoreKernel(
// dataset information
const data_size_t num_data,
void* const* cuda_data_by_column,
const uint8_t* cuda_column_bit_type,
const uint32_t* cuda_feature_min_bin,
const uint32_t* cuda_feature_max_bin,
const uint32_t* cuda_feature_offset,
const uint32_t* cuda_feature_default_bin,
const uint32_t* cuda_feature_most_freq_bin,
const int* cuda_feature_to_column,
const data_size_t* cuda_used_indices,
// tree information
const uint32_t* cuda_threshold_in_bin,
const int8_t* cuda_decision_type,
const int* cuda_split_feature_inner,
const int* cuda_left_child,
const int* cuda_right_child,
const double* cuda_leaf_value,
const uint32_t* cuda_bitset_inner,
const int* cuda_cat_boundaries_inner,
// output
double* score) {
const data_size_t inner_data_index = static_cast<data_size_t>(threadIdx.x + blockIdx.x * blockDim.x);
if (inner_data_index < num_data) {
const data_size_t data_index = USE_INDICES ? cuda_used_indices[inner_data_index] : inner_data_index;
int node = 0;
while (node >= 0) {
const int split_feature_inner = cuda_split_feature_inner[node];
const int column = cuda_feature_to_column[split_feature_inner];
const uint32_t default_bin = cuda_feature_default_bin[split_feature_inner];
const uint32_t most_freq_bin = cuda_feature_most_freq_bin[split_feature_inner];
const uint32_t max_bin = cuda_feature_max_bin[split_feature_inner];
const uint32_t min_bin = cuda_feature_min_bin[split_feature_inner];
const uint32_t offset = cuda_feature_offset[split_feature_inner];
const uint8_t column_bit_type = cuda_column_bit_type[column];
uint32_t bin = 0;
if (column_bit_type == 8) {
bin = static_cast<uint32_t>((reinterpret_cast<const uint8_t*>(cuda_data_by_column[column]))[data_index]);
} else if (column_bit_type == 16) {
bin = static_cast<uint32_t>((reinterpret_cast<const uint16_t*>(cuda_data_by_column[column]))[data_index]);
} else if (column_bit_type == 32) {
bin = static_cast<uint32_t>((reinterpret_cast<const uint32_t*>(cuda_data_by_column[column]))[data_index]);
}
if (bin >= min_bin && bin <= max_bin) {
bin = bin - min_bin + offset;
} else {
bin = most_freq_bin;
}
const int8_t decision_type = cuda_decision_type[node];
if (GetDecisionTypeCUDA(decision_type, kCategoricalMask)) {
int cat_idx = static_cast<int>(cuda_threshold_in_bin[node]);
if (FindInBitsetCUDA(cuda_bitset_inner + cuda_cat_boundaries_inner[cat_idx],
cuda_cat_boundaries_inner[cat_idx + 1] - cuda_cat_boundaries_inner[cat_idx], bin)) {
node = cuda_left_child[node];
} else {
node = cuda_right_child[node];
}
} else {
const uint32_t threshold_in_bin = cuda_threshold_in_bin[node];
const int8_t missing_type = GetMissingTypeCUDA(decision_type);
const bool default_left = ((decision_type & kDefaultLeftMask) > 0);
if ((missing_type == 1 && bin == default_bin) || (missing_type == 2 && bin == max_bin)) {
if (default_left) {
node = cuda_left_child[node];
} else {
node = cuda_right_child[node];
}
} else {
if (bin <= threshold_in_bin) {
node = cuda_left_child[node];
} else {
node = cuda_right_child[node];
}
}
}
}
score[data_index] += cuda_leaf_value[~node];
}
}
void CUDATree::LaunchAddPredictionToScoreKernel(
const Dataset* data,
const data_size_t* used_data_indices,
data_size_t num_data,
double* score) const {
const CUDAColumnData* cuda_column_data = data->cuda_column_data();
const int num_blocks = (num_data + num_threads_per_block_add_prediction_to_score_ - 1) / num_threads_per_block_add_prediction_to_score_;
if (used_data_indices == nullptr) {
hipLaunchKernelGGL(( AddPredictionToScoreKernel<false>), dim3(num_blocks), dim3(num_threads_per_block_add_prediction_to_score_), 0, 0,
// dataset information
num_data,
cuda_column_data->cuda_data_by_column(),
cuda_column_data->cuda_column_bit_type(),
cuda_column_data->cuda_feature_min_bin(),
cuda_column_data->cuda_feature_max_bin(),
cuda_column_data->cuda_feature_offset(),
cuda_column_data->cuda_feature_default_bin(),
cuda_column_data->cuda_feature_most_freq_bin(),
cuda_column_data->cuda_feature_to_column(),
nullptr,
// tree information
cuda_threshold_in_bin_,
cuda_decision_type_,
cuda_split_feature_inner_,
cuda_left_child_,
cuda_right_child_,
cuda_leaf_value_,
cuda_bitset_inner_.RawDataReadOnly(),
cuda_cat_boundaries_inner_.RawDataReadOnly(),
// output
score);
} else {
hipLaunchKernelGGL(( AddPredictionToScoreKernel<true>), dim3(num_blocks), dim3(num_threads_per_block_add_prediction_to_score_), 0, 0,
// dataset information
num_data,
cuda_column_data->cuda_data_by_column(),
cuda_column_data->cuda_column_bit_type(),
cuda_column_data->cuda_feature_min_bin(),
cuda_column_data->cuda_feature_max_bin(),
cuda_column_data->cuda_feature_offset(),
cuda_column_data->cuda_feature_default_bin(),
cuda_column_data->cuda_feature_most_freq_bin(),
cuda_column_data->cuda_feature_to_column(),
used_data_indices,
// tree information
cuda_threshold_in_bin_,
cuda_decision_type_,
cuda_split_feature_inner_,
cuda_left_child_,
cuda_right_child_,
cuda_leaf_value_,
cuda_bitset_inner_.RawDataReadOnly(),
cuda_cat_boundaries_inner_.RawDataReadOnly(),
// output
score);
}
SynchronizeCUDADevice(__FILE__, __LINE__);
}
} // namespace LightGBM
#endif // USE_CUDA_EXP
| 1e841b321537414063f0ceac8bc37ddadc71b827.cu | /*!
* Copyright (c) 2021 Microsoft Corporation. All rights reserved.
* Licensed under the MIT License. See LICENSE file in the project root for license information.
*/
#ifdef USE_CUDA_EXP
#include <LightGBM/cuda/cuda_tree.hpp>
namespace LightGBM {
__device__ void SetDecisionTypeCUDA(int8_t* decision_type, bool input, int8_t mask) {
if (input) {
(*decision_type) |= mask;
} else {
(*decision_type) &= (127 - mask);
}
}
__device__ void SetMissingTypeCUDA(int8_t* decision_type, int8_t input) {
(*decision_type) &= 3;
(*decision_type) |= (input << 2);
}
__device__ bool GetDecisionTypeCUDA(int8_t decision_type, int8_t mask) {
return (decision_type & mask) > 0;
}
__device__ int8_t GetMissingTypeCUDA(int8_t decision_type) {
return (decision_type >> 2) & 3;
}
__device__ bool IsZeroCUDA(double fval) {
return (fval >= -kZeroThreshold && fval <= kZeroThreshold);
}
template<typename T>
__device__ bool FindInBitsetCUDA(const uint32_t* bits, int n, T pos) {
int i1 = pos / 32;
if (i1 >= n) {
return false;
}
int i2 = pos % 32;
return (bits[i1] >> i2) & 1;
}
__global__ void SplitKernel( // split information
const int leaf_index,
const int real_feature_index,
const double real_threshold,
const MissingType missing_type,
const CUDASplitInfo* cuda_split_info,
// tree structure
const int num_leaves,
int* leaf_parent,
int* leaf_depth,
int* left_child,
int* right_child,
int* split_feature_inner,
int* split_feature,
float* split_gain,
double* internal_weight,
double* internal_value,
data_size_t* internal_count,
double* leaf_weight,
double* leaf_value,
data_size_t* leaf_count,
int8_t* decision_type,
uint32_t* threshold_in_bin,
double* threshold) {
const int new_node_index = num_leaves - 1;
const int thread_index = static_cast<int>(threadIdx.x + blockIdx.x * blockDim.x);
const int parent_index = leaf_parent[leaf_index];
if (thread_index == 0) {
if (parent_index >= 0) {
// if cur node is left child
if (left_child[parent_index] == ~leaf_index) {
left_child[parent_index] = new_node_index;
} else {
right_child[parent_index] = new_node_index;
}
}
left_child[new_node_index] = ~leaf_index;
right_child[new_node_index] = ~num_leaves;
leaf_parent[leaf_index] = new_node_index;
leaf_parent[num_leaves] = new_node_index;
} else if (thread_index == 1) {
// add new node
split_feature_inner[new_node_index] = cuda_split_info->inner_feature_index;
} else if (thread_index == 2) {
split_feature[new_node_index] = real_feature_index;
} else if (thread_index == 3) {
split_gain[new_node_index] = static_cast<float>(cuda_split_info->gain);
} else if (thread_index == 4) {
// save current leaf value to internal node before change
internal_weight[new_node_index] = leaf_weight[leaf_index];
leaf_weight[leaf_index] = cuda_split_info->left_sum_hessians;
} else if (thread_index == 5) {
internal_value[new_node_index] = leaf_value[leaf_index];
leaf_value[leaf_index] = isnan(cuda_split_info->left_value) ? 0.0f : cuda_split_info->left_value;
} else if (thread_index == 6) {
internal_count[new_node_index] = cuda_split_info->left_count + cuda_split_info->right_count;
} else if (thread_index == 7) {
leaf_count[leaf_index] = cuda_split_info->left_count;
} else if (thread_index == 8) {
leaf_value[num_leaves] = isnan(cuda_split_info->right_value) ? 0.0f : cuda_split_info->right_value;
} else if (thread_index == 9) {
leaf_weight[num_leaves] = cuda_split_info->right_sum_hessians;
} else if (thread_index == 10) {
leaf_count[num_leaves] = cuda_split_info->right_count;
} else if (thread_index == 11) {
// update leaf depth
leaf_depth[num_leaves] = leaf_depth[leaf_index] + 1;
leaf_depth[leaf_index]++;
} else if (thread_index == 12) {
decision_type[new_node_index] = 0;
SetDecisionTypeCUDA(&decision_type[new_node_index], false, kCategoricalMask);
SetDecisionTypeCUDA(&decision_type[new_node_index], cuda_split_info->default_left, kDefaultLeftMask);
SetMissingTypeCUDA(&decision_type[new_node_index], static_cast<int8_t>(missing_type));
} else if (thread_index == 13) {
threshold_in_bin[new_node_index] = cuda_split_info->threshold;
} else if (thread_index == 14) {
threshold[new_node_index] = real_threshold;
}
}
void CUDATree::LaunchSplitKernel(const int leaf_index,
const int real_feature_index,
const double real_threshold,
const MissingType missing_type,
const CUDASplitInfo* cuda_split_info) {
SplitKernel<<<3, 5, 0, cuda_stream_>>>(
// split information
leaf_index,
real_feature_index,
real_threshold,
missing_type,
cuda_split_info,
// tree structure
num_leaves_,
cuda_leaf_parent_,
cuda_leaf_depth_,
cuda_left_child_,
cuda_right_child_,
cuda_split_feature_inner_,
cuda_split_feature_,
cuda_split_gain_,
cuda_internal_weight_,
cuda_internal_value_,
cuda_internal_count_,
cuda_leaf_weight_,
cuda_leaf_value_,
cuda_leaf_count_,
cuda_decision_type_,
cuda_threshold_in_bin_,
cuda_threshold_);
}
__global__ void SplitCategoricalKernel( // split information
const int leaf_index,
const int real_feature_index,
const MissingType missing_type,
const CUDASplitInfo* cuda_split_info,
// tree structure
const int num_leaves,
int* leaf_parent,
int* leaf_depth,
int* left_child,
int* right_child,
int* split_feature_inner,
int* split_feature,
float* split_gain,
double* internal_weight,
double* internal_value,
data_size_t* internal_count,
double* leaf_weight,
double* leaf_value,
data_size_t* leaf_count,
int8_t* decision_type,
uint32_t* threshold_in_bin,
double* threshold,
size_t cuda_bitset_len,
size_t cuda_bitset_inner_len,
int num_cat,
int* cuda_cat_boundaries,
int* cuda_cat_boundaries_inner) {
const int new_node_index = num_leaves - 1;
const int thread_index = static_cast<int>(threadIdx.x + blockIdx.x * blockDim.x);
const int parent_index = leaf_parent[leaf_index];
if (thread_index == 0) {
if (parent_index >= 0) {
// if cur node is left child
if (left_child[parent_index] == ~leaf_index) {
left_child[parent_index] = new_node_index;
} else {
right_child[parent_index] = new_node_index;
}
}
left_child[new_node_index] = ~leaf_index;
right_child[new_node_index] = ~num_leaves;
leaf_parent[leaf_index] = new_node_index;
leaf_parent[num_leaves] = new_node_index;
} else if (thread_index == 1) {
// add new node
split_feature_inner[new_node_index] = cuda_split_info->inner_feature_index;
} else if (thread_index == 2) {
split_feature[new_node_index] = real_feature_index;
} else if (thread_index == 3) {
split_gain[new_node_index] = static_cast<float>(cuda_split_info->gain);
} else if (thread_index == 4) {
// save current leaf value to internal node before change
internal_weight[new_node_index] = leaf_weight[leaf_index];
leaf_weight[leaf_index] = cuda_split_info->left_sum_hessians;
} else if (thread_index == 5) {
internal_value[new_node_index] = leaf_value[leaf_index];
leaf_value[leaf_index] = isnan(cuda_split_info->left_value) ? 0.0f : cuda_split_info->left_value;
} else if (thread_index == 6) {
internal_count[new_node_index] = cuda_split_info->left_count + cuda_split_info->right_count;
} else if (thread_index == 7) {
leaf_count[leaf_index] = cuda_split_info->left_count;
} else if (thread_index == 8) {
leaf_value[num_leaves] = isnan(cuda_split_info->right_value) ? 0.0f : cuda_split_info->right_value;
} else if (thread_index == 9) {
leaf_weight[num_leaves] = cuda_split_info->right_sum_hessians;
} else if (thread_index == 10) {
leaf_count[num_leaves] = cuda_split_info->right_count;
} else if (thread_index == 11) {
// update leaf depth
leaf_depth[num_leaves] = leaf_depth[leaf_index] + 1;
leaf_depth[leaf_index]++;
} else if (thread_index == 12) {
decision_type[new_node_index] = 0;
SetDecisionTypeCUDA(&decision_type[new_node_index], true, kCategoricalMask);
SetMissingTypeCUDA(&decision_type[new_node_index], static_cast<int8_t>(missing_type));
} else if (thread_index == 13) {
threshold_in_bin[new_node_index] = num_cat;
} else if (thread_index == 14) {
threshold[new_node_index] = num_cat;
} else if (thread_index == 15) {
if (num_cat == 0) {
cuda_cat_boundaries[num_cat] = 0;
}
cuda_cat_boundaries[num_cat + 1] = cuda_cat_boundaries[num_cat] + cuda_bitset_len;
} else if (thread_index == 16) {
if (num_cat == 0) {
cuda_cat_boundaries_inner[num_cat] = 0;
}
cuda_cat_boundaries_inner[num_cat + 1] = cuda_cat_boundaries_inner[num_cat] + cuda_bitset_inner_len;
}
}
void CUDATree::LaunchSplitCategoricalKernel(const int leaf_index,
const int real_feature_index,
const MissingType missing_type,
const CUDASplitInfo* cuda_split_info,
size_t cuda_bitset_len,
size_t cuda_bitset_inner_len) {
SplitCategoricalKernel<<<3, 6, 0, cuda_stream_>>>(
// split information
leaf_index,
real_feature_index,
missing_type,
cuda_split_info,
// tree structure
num_leaves_,
cuda_leaf_parent_,
cuda_leaf_depth_,
cuda_left_child_,
cuda_right_child_,
cuda_split_feature_inner_,
cuda_split_feature_,
cuda_split_gain_,
cuda_internal_weight_,
cuda_internal_value_,
cuda_internal_count_,
cuda_leaf_weight_,
cuda_leaf_value_,
cuda_leaf_count_,
cuda_decision_type_,
cuda_threshold_in_bin_,
cuda_threshold_,
cuda_bitset_len,
cuda_bitset_inner_len,
num_cat_,
cuda_cat_boundaries_.RawData(),
cuda_cat_boundaries_inner_.RawData());
}
__global__ void ShrinkageKernel(const double rate, double* cuda_leaf_value, const int num_leaves) {
const int leaf_index = static_cast<int>(blockIdx.x * blockDim.x + threadIdx.x);
if (leaf_index < num_leaves) {
cuda_leaf_value[leaf_index] *= rate;
}
}
void CUDATree::LaunchShrinkageKernel(const double rate) {
const int num_threads_per_block = 1024;
const int num_blocks = (num_leaves_ + num_threads_per_block - 1) / num_threads_per_block;
ShrinkageKernel<<<num_blocks, num_threads_per_block>>>(rate, cuda_leaf_value_, num_leaves_);
}
__global__ void AddBiasKernel(const double val, double* cuda_leaf_value, const int num_leaves) {
const int leaf_index = static_cast<int>(blockIdx.x * blockDim.x + threadIdx.x);
if (leaf_index < num_leaves) {
cuda_leaf_value[leaf_index] += val;
}
}
void CUDATree::LaunchAddBiasKernel(const double val) {
const int num_threads_per_block = 1024;
const int num_blocks = (num_leaves_ + num_threads_per_block - 1) / num_threads_per_block;
AddBiasKernel<<<num_blocks, num_threads_per_block>>>(val, cuda_leaf_value_, num_leaves_);
}
template <bool USE_INDICES>
__global__ void AddPredictionToScoreKernel(
// dataset information
const data_size_t num_data,
void* const* cuda_data_by_column,
const uint8_t* cuda_column_bit_type,
const uint32_t* cuda_feature_min_bin,
const uint32_t* cuda_feature_max_bin,
const uint32_t* cuda_feature_offset,
const uint32_t* cuda_feature_default_bin,
const uint32_t* cuda_feature_most_freq_bin,
const int* cuda_feature_to_column,
const data_size_t* cuda_used_indices,
// tree information
const uint32_t* cuda_threshold_in_bin,
const int8_t* cuda_decision_type,
const int* cuda_split_feature_inner,
const int* cuda_left_child,
const int* cuda_right_child,
const double* cuda_leaf_value,
const uint32_t* cuda_bitset_inner,
const int* cuda_cat_boundaries_inner,
// output
double* score) {
const data_size_t inner_data_index = static_cast<data_size_t>(threadIdx.x + blockIdx.x * blockDim.x);
if (inner_data_index < num_data) {
const data_size_t data_index = USE_INDICES ? cuda_used_indices[inner_data_index] : inner_data_index;
int node = 0;
while (node >= 0) {
const int split_feature_inner = cuda_split_feature_inner[node];
const int column = cuda_feature_to_column[split_feature_inner];
const uint32_t default_bin = cuda_feature_default_bin[split_feature_inner];
const uint32_t most_freq_bin = cuda_feature_most_freq_bin[split_feature_inner];
const uint32_t max_bin = cuda_feature_max_bin[split_feature_inner];
const uint32_t min_bin = cuda_feature_min_bin[split_feature_inner];
const uint32_t offset = cuda_feature_offset[split_feature_inner];
const uint8_t column_bit_type = cuda_column_bit_type[column];
uint32_t bin = 0;
if (column_bit_type == 8) {
bin = static_cast<uint32_t>((reinterpret_cast<const uint8_t*>(cuda_data_by_column[column]))[data_index]);
} else if (column_bit_type == 16) {
bin = static_cast<uint32_t>((reinterpret_cast<const uint16_t*>(cuda_data_by_column[column]))[data_index]);
} else if (column_bit_type == 32) {
bin = static_cast<uint32_t>((reinterpret_cast<const uint32_t*>(cuda_data_by_column[column]))[data_index]);
}
if (bin >= min_bin && bin <= max_bin) {
bin = bin - min_bin + offset;
} else {
bin = most_freq_bin;
}
const int8_t decision_type = cuda_decision_type[node];
if (GetDecisionTypeCUDA(decision_type, kCategoricalMask)) {
int cat_idx = static_cast<int>(cuda_threshold_in_bin[node]);
if (FindInBitsetCUDA(cuda_bitset_inner + cuda_cat_boundaries_inner[cat_idx],
cuda_cat_boundaries_inner[cat_idx + 1] - cuda_cat_boundaries_inner[cat_idx], bin)) {
node = cuda_left_child[node];
} else {
node = cuda_right_child[node];
}
} else {
const uint32_t threshold_in_bin = cuda_threshold_in_bin[node];
const int8_t missing_type = GetMissingTypeCUDA(decision_type);
const bool default_left = ((decision_type & kDefaultLeftMask) > 0);
if ((missing_type == 1 && bin == default_bin) || (missing_type == 2 && bin == max_bin)) {
if (default_left) {
node = cuda_left_child[node];
} else {
node = cuda_right_child[node];
}
} else {
if (bin <= threshold_in_bin) {
node = cuda_left_child[node];
} else {
node = cuda_right_child[node];
}
}
}
}
score[data_index] += cuda_leaf_value[~node];
}
}
void CUDATree::LaunchAddPredictionToScoreKernel(
const Dataset* data,
const data_size_t* used_data_indices,
data_size_t num_data,
double* score) const {
const CUDAColumnData* cuda_column_data = data->cuda_column_data();
const int num_blocks = (num_data + num_threads_per_block_add_prediction_to_score_ - 1) / num_threads_per_block_add_prediction_to_score_;
if (used_data_indices == nullptr) {
AddPredictionToScoreKernel<false><<<num_blocks, num_threads_per_block_add_prediction_to_score_>>>(
// dataset information
num_data,
cuda_column_data->cuda_data_by_column(),
cuda_column_data->cuda_column_bit_type(),
cuda_column_data->cuda_feature_min_bin(),
cuda_column_data->cuda_feature_max_bin(),
cuda_column_data->cuda_feature_offset(),
cuda_column_data->cuda_feature_default_bin(),
cuda_column_data->cuda_feature_most_freq_bin(),
cuda_column_data->cuda_feature_to_column(),
nullptr,
// tree information
cuda_threshold_in_bin_,
cuda_decision_type_,
cuda_split_feature_inner_,
cuda_left_child_,
cuda_right_child_,
cuda_leaf_value_,
cuda_bitset_inner_.RawDataReadOnly(),
cuda_cat_boundaries_inner_.RawDataReadOnly(),
// output
score);
} else {
AddPredictionToScoreKernel<true><<<num_blocks, num_threads_per_block_add_prediction_to_score_>>>(
// dataset information
num_data,
cuda_column_data->cuda_data_by_column(),
cuda_column_data->cuda_column_bit_type(),
cuda_column_data->cuda_feature_min_bin(),
cuda_column_data->cuda_feature_max_bin(),
cuda_column_data->cuda_feature_offset(),
cuda_column_data->cuda_feature_default_bin(),
cuda_column_data->cuda_feature_most_freq_bin(),
cuda_column_data->cuda_feature_to_column(),
used_data_indices,
// tree information
cuda_threshold_in_bin_,
cuda_decision_type_,
cuda_split_feature_inner_,
cuda_left_child_,
cuda_right_child_,
cuda_leaf_value_,
cuda_bitset_inner_.RawDataReadOnly(),
cuda_cat_boundaries_inner_.RawDataReadOnly(),
// output
score);
}
SynchronizeCUDADevice(__FILE__, __LINE__);
}
} // namespace LightGBM
#endif // USE_CUDA_EXP
|
f66b9488795cc5c0c024c42fb0b3ea6bd6298ec7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <helper_cuda.h>
#define NUM_THREADS 16
#define NUM_BLOCKS 16
__global__
void hello_world()
{
// For 1D problems, the Y values will be either 0/1! But the general formulas below will still be valid
int threadsPerBlock = blockDim.x * blockDim.y;
int threadNumInBlock = threadIdx.x + blockDim.x * threadIdx.y;
int blockNumInGrid = blockIdx.x + gridDim.x * blockIdx.y;
int globalThreadId = blockNumInGrid * threadsPerBlock + threadNumInBlock;
int totalNumberOfThreads = gridDim.x*gridDim.y*blockDim.x*blockDim.y;
if(globalThreadId==100)
{
int *a = (int*) 0x10000;
*a = 0;
}
printf("Hello world! I'm thread %d out of %d in block %d. My global thread id is %d out of %d.\n",threadNumInBlock,threadsPerBlock,blockNumInGrid, globalThreadId,totalNumberOfThreads);
}
int main(int argc,char** argv)
{
hipLaunchKernelGGL(( hello_world), dim3(NUM_BLOCKS),dim3(NUM_THREADS), 0, 0, );
checkCudaErrors(hipDeviceSynchronize());
return 0;
} | f66b9488795cc5c0c024c42fb0b3ea6bd6298ec7.cu | #include <stdio.h>
#include <helper_cuda.h>
#define NUM_THREADS 16
#define NUM_BLOCKS 16
__global__
void hello_world()
{
// For 1D problems, the Y values will be either 0/1! But the general formulas below will still be valid
int threadsPerBlock = blockDim.x * blockDim.y;
int threadNumInBlock = threadIdx.x + blockDim.x * threadIdx.y;
int blockNumInGrid = blockIdx.x + gridDim.x * blockIdx.y;
int globalThreadId = blockNumInGrid * threadsPerBlock + threadNumInBlock;
int totalNumberOfThreads = gridDim.x*gridDim.y*blockDim.x*blockDim.y;
if(globalThreadId==100)
{
int *a = (int*) 0x10000;
*a = 0;
}
printf("Hello world! I'm thread %d out of %d in block %d. My global thread id is %d out of %d.\n",threadNumInBlock,threadsPerBlock,blockNumInGrid, globalThreadId,totalNumberOfThreads);
}
int main(int argc,char** argv)
{
hello_world<<<NUM_BLOCKS,NUM_THREADS>>>();
checkCudaErrors(cudaDeviceSynchronize());
return 0;
} |
f372c9d2ec059887778902e448c0a31b6da4b9fe.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <chrono>
#include "utils.h"
#include "kernels.hip"
int main(int argc, char **argv) {
Option option;
checkOption(argc, argv, option);
std::vector<Read> reads;
bool fail = readFile(reads, option);
if (fail) return 1;
int readsCount = reads.size();
int* h_lengths = (int*) malloc (sizeof(int) * readsCount);
long* h_offsets = (long*) malloc (sizeof(long) * (1 + readsCount));
h_offsets[0] = 0;
for (int i = 0; i < readsCount; i++) { // copy data for lengths and offsets
int length = reads[i].data.size();
h_lengths[i] = length;
h_offsets[i+1] = h_offsets[i] + length/16*16+16;
}
long total_length = h_offsets[readsCount];
char* h_reads = (char*) malloc (sizeof(char) * total_length);
for (int i = 0; i < readsCount; i++) { // copy data for reads
memcpy(&h_reads[h_offsets[i]], reads[i].data.c_str(), h_lengths[i]*sizeof(char));
}
auto t1 = std::chrono::high_resolution_clock::now();
int *d_lengths;
hipMalloc((void**)&d_lengths, readsCount * sizeof(int));
hipMemcpy(d_lengths, h_lengths, readsCount * sizeof(int), hipMemcpyHostToDevice);
long *d_offsets;
hipMalloc((void**)&d_offsets, (1+readsCount) * sizeof(long));
hipMemcpy(d_offsets, h_offsets, (1+readsCount) * sizeof(long), hipMemcpyHostToDevice);
char *d_reads;
hipMalloc((void**)&d_reads, total_length * sizeof(char));
hipMemcpy(d_reads, h_reads, total_length * sizeof(char), hipMemcpyHostToDevice);
dim3 baseToNum_grid(128);
dim3 baseToNum_block(128);
hipLaunchKernelGGL(kernel_baseToNumber, dim3(baseToNum_grid), dim3(baseToNum_block), 0, 0, d_reads, total_length);
unsigned int *d_compressed;
hipMalloc((void**)&d_compressed, (total_length / 16) * sizeof(int));
int *d_gaps;
hipMalloc((void**)&d_gaps, readsCount * sizeof(int));
dim3 compress_grid((readsCount+127)/128);
dim3 compress_block(128);
hipLaunchKernelGGL(kernel_compressData, dim3(compress_grid), dim3(compress_block), 0, 0,
d_lengths,
d_offsets,
d_reads,
d_compressed,
d_gaps,
readsCount);
//createIndex(data, option);
unsigned short* h_indexs = (unsigned short*) malloc (sizeof(unsigned short) * total_length);
long* h_words = (long*) malloc (sizeof(long) * readsCount);
unsigned short *d_indexs;
hipMalloc((void**)&d_indexs, total_length * sizeof(unsigned short));
unsigned short *d_orders;
hipMalloc((void**)&d_orders, total_length * sizeof(unsigned short));
long *d_words;
hipMalloc((void**)&d_words, readsCount * sizeof(long));
int *d_magicBase;
hipMalloc((void**)&d_magicBase, (readsCount * 4) * sizeof(int));
int wordLength = option.wordLength;
dim3 index_grid ((readsCount+127)/128);
dim3 index_block (128);
switch (wordLength) {
case 4:
hipLaunchKernelGGL(kernel_createIndex4, dim3(index_grid), dim3(index_block), 0, 0,
d_reads,
d_lengths,
d_offsets,
d_indexs,
d_orders,
d_words,
d_magicBase,
readsCount);
break;
case 5:
hipLaunchKernelGGL(kernel_createIndex5, dim3(index_grid), dim3(index_block), 0, 0,
d_reads,
d_lengths,
d_offsets,
d_indexs,
d_orders,
d_words,
d_magicBase,
readsCount);
break;
case 6:
hipLaunchKernelGGL(kernel_createIndex6, dim3(index_grid), dim3(index_block), 0, 0,
d_reads,
d_lengths,
d_offsets,
d_indexs,
d_orders,
d_words,
d_magicBase,
readsCount);
break;
case 7:
hipLaunchKernelGGL(kernel_createIndex7, dim3(index_grid), dim3(index_block), 0, 0,
d_reads,
d_lengths,
d_offsets,
d_indexs,
d_orders,
d_words,
d_magicBase,
readsCount);
break;
}
// createCutoff(data, option);
float threshold = option.threshold;
int *d_wordCutoff;
hipMalloc((void**)&d_wordCutoff, sizeof(int) * readsCount);
hipLaunchKernelGGL(kernel_createCutoff, dim3(index_grid), dim3(index_block), 0, 0,
threshold,
wordLength,
d_lengths,
d_words,
d_wordCutoff,
readsCount);
// sd_ortIndex(data);
hipMemcpy(h_indexs, d_indexs, sizeof(unsigned short) * total_length, hipMemcpyDeviceToHost);
hipMemcpy(h_offsets, d_offsets, sizeof(long) * (1+readsCount), hipMemcpyDeviceToHost);
hipMemcpy(h_words, d_words, sizeof(long) * readsCount, hipMemcpyDeviceToHost);
for (int i = 0; i< readsCount; i++) {
int start = h_offsets[i];
int length = h_words[i];
std::sort(&h_indexs[start], &h_indexs[start]+length);
}
// mergeIndex(data);
hipMemcpy(d_indexs, h_indexs, sizeof(unsigned short) * total_length, hipMemcpyHostToDevice);
hipLaunchKernelGGL(kernel_mergeIndex, dim3(index_grid), dim3(index_block), 0, 0,
d_offsets,
d_indexs,
d_orders,
d_words,
readsCount);
int* h_cluster = (int*) malloc (sizeof(int) * readsCount);
for (int i = 0; i < readsCount; i++) {
h_cluster[i] = -1;
}
int *d_cluster;
hipMalloc((void**)&d_cluster, sizeof(int) * readsCount);
hipMemcpy(d_cluster, h_cluster, sizeof(int) * readsCount, hipMemcpyHostToDevice);
unsigned short* table = (unsigned short*) malloc (sizeof(unsigned short) * 65536);
memset(table, 0, 65536*sizeof(unsigned short)); // fill zero
unsigned short *d_table;
hipMalloc((void**)&d_table, 65536*sizeof(unsigned short));
hipMemcpy(d_table, table, 65536*sizeof(unsigned short), hipMemcpyHostToDevice);
int r = -1; // a shorthand for representative
dim3 makeTable_grid(128);
dim3 makeTable_block(128);
dim3 cleanTable_grid(128);
dim3 cleanTable_block(128);
dim3 magic_grid((readsCount+127)/128);
dim3 magic_block(128);
dim3 filter_grid(readsCount);
dim3 filter_block(128);
dim3 align_grid((readsCount+127)/128);
dim3 align_block(128);
while (r < readsCount) { // clustering
updateRepresentative(d_cluster, &r, readsCount); // update representative
if (r >= readsCount-1) { // complete
break;
}
//std::cout << r << "/" << readsCount << std::endl;
hipLaunchKernelGGL(kernel_makeTable, dim3(makeTable_grid), dim3(makeTable_block), 0, 0,
d_offsets,
d_indexs,
d_orders,
d_words,
d_table,
r);
hipLaunchKernelGGL(kernel_magic, dim3(magic_grid), dim3(magic_block), 0, 0,
threshold,
d_lengths,
d_magicBase,
d_cluster,
r,
readsCount);
hipLaunchKernelGGL(kernel_filter, dim3(filter_grid), dim3(filter_block), 0, 0,
threshold,
wordLength,
d_lengths,
d_offsets,
d_indexs,
d_orders,
d_words,
d_wordCutoff,
d_cluster,
d_table,
readsCount);
hipLaunchKernelGGL(kernel_align, dim3(align_grid), dim3(align_block), 0, 0,
threshold,
d_lengths,
d_offsets,
d_compressed,
d_gaps,
r,
d_cluster,
readsCount);
hipLaunchKernelGGL(kernel_cleanTable, dim3(cleanTable_grid), dim3(cleanTable_block), 0, 0,
d_offsets,
d_indexs,
d_orders,
d_words,
d_table,
r);
}
hipMemcpy(h_cluster, d_cluster, sizeof(int) * readsCount, hipMemcpyDeviceToHost);
auto t2 = std::chrono::high_resolution_clock::now();
double total_time = std::chrono::duration_cast<std::chrono::microseconds>(t2 - t1).count();
printf("Device offload time %lf secs \n", total_time / 1.0e6);
std::ofstream file(option.outputFile.c_str());
int sum = 0;
for (int i = 0; i < readsCount; i++) {
if (h_cluster[i] == i) {
file << reads[i].name << std::endl;
file << reads[i].data << std::endl;
sum++;
}
}
file.close();
std::cout << "cluster count: " << sum << std::endl;
free(h_lengths);
free(h_offsets);
free(h_reads);
free(h_indexs);
free(h_words);
free(h_cluster);
free(table);
hipFree(d_lengths);
hipFree(d_offsets);
hipFree(d_reads);
hipFree(d_compressed);
hipFree(d_gaps);
hipFree(d_indexs);
hipFree(d_orders);
hipFree(d_words);
hipFree(d_magicBase);
hipFree(d_wordCutoff);
hipFree(d_cluster);
hipFree(d_table);
return 0;
}
| f372c9d2ec059887778902e448c0a31b6da4b9fe.cu | #include <chrono>
#include "utils.h"
#include "kernels.cu"
int main(int argc, char **argv) {
Option option;
checkOption(argc, argv, option);
std::vector<Read> reads;
bool fail = readFile(reads, option);
if (fail) return 1;
int readsCount = reads.size();
int* h_lengths = (int*) malloc (sizeof(int) * readsCount);
long* h_offsets = (long*) malloc (sizeof(long) * (1 + readsCount));
h_offsets[0] = 0;
for (int i = 0; i < readsCount; i++) { // copy data for lengths and offsets
int length = reads[i].data.size();
h_lengths[i] = length;
h_offsets[i+1] = h_offsets[i] + length/16*16+16;
}
long total_length = h_offsets[readsCount];
char* h_reads = (char*) malloc (sizeof(char) * total_length);
for (int i = 0; i < readsCount; i++) { // copy data for reads
memcpy(&h_reads[h_offsets[i]], reads[i].data.c_str(), h_lengths[i]*sizeof(char));
}
auto t1 = std::chrono::high_resolution_clock::now();
int *d_lengths;
hipMalloc((void**)&d_lengths, readsCount * sizeof(int));
hipMemcpy(d_lengths, h_lengths, readsCount * sizeof(int), hipMemcpyHostToDevice);
long *d_offsets;
hipMalloc((void**)&d_offsets, (1+readsCount) * sizeof(long));
hipMemcpy(d_offsets, h_offsets, (1+readsCount) * sizeof(long), hipMemcpyHostToDevice);
char *d_reads;
hipMalloc((void**)&d_reads, total_length * sizeof(char));
hipMemcpy(d_reads, h_reads, total_length * sizeof(char), hipMemcpyHostToDevice);
dim3 baseToNum_grid(128);
dim3 baseToNum_block(128);
hipLaunchKernelGGL(kernel_baseToNumber, dim3(baseToNum_grid), dim3(baseToNum_block), 0, 0, d_reads, total_length);
unsigned int *d_compressed;
hipMalloc((void**)&d_compressed, (total_length / 16) * sizeof(int));
int *d_gaps;
hipMalloc((void**)&d_gaps, readsCount * sizeof(int));
dim3 compress_grid((readsCount+127)/128);
dim3 compress_block(128);
hipLaunchKernelGGL(kernel_compressData, dim3(compress_grid), dim3(compress_block), 0, 0,
d_lengths,
d_offsets,
d_reads,
d_compressed,
d_gaps,
readsCount);
//createIndex(data, option);
unsigned short* h_indexs = (unsigned short*) malloc (sizeof(unsigned short) * total_length);
long* h_words = (long*) malloc (sizeof(long) * readsCount);
unsigned short *d_indexs;
hipMalloc((void**)&d_indexs, total_length * sizeof(unsigned short));
unsigned short *d_orders;
hipMalloc((void**)&d_orders, total_length * sizeof(unsigned short));
long *d_words;
hipMalloc((void**)&d_words, readsCount * sizeof(long));
int *d_magicBase;
hipMalloc((void**)&d_magicBase, (readsCount * 4) * sizeof(int));
int wordLength = option.wordLength;
dim3 index_grid ((readsCount+127)/128);
dim3 index_block (128);
switch (wordLength) {
case 4:
hipLaunchKernelGGL(kernel_createIndex4, dim3(index_grid), dim3(index_block), 0, 0,
d_reads,
d_lengths,
d_offsets,
d_indexs,
d_orders,
d_words,
d_magicBase,
readsCount);
break;
case 5:
hipLaunchKernelGGL(kernel_createIndex5, dim3(index_grid), dim3(index_block), 0, 0,
d_reads,
d_lengths,
d_offsets,
d_indexs,
d_orders,
d_words,
d_magicBase,
readsCount);
break;
case 6:
hipLaunchKernelGGL(kernel_createIndex6, dim3(index_grid), dim3(index_block), 0, 0,
d_reads,
d_lengths,
d_offsets,
d_indexs,
d_orders,
d_words,
d_magicBase,
readsCount);
break;
case 7:
hipLaunchKernelGGL(kernel_createIndex7, dim3(index_grid), dim3(index_block), 0, 0,
d_reads,
d_lengths,
d_offsets,
d_indexs,
d_orders,
d_words,
d_magicBase,
readsCount);
break;
}
// createCutoff(data, option);
float threshold = option.threshold;
int *d_wordCutoff;
hipMalloc((void**)&d_wordCutoff, sizeof(int) * readsCount);
hipLaunchKernelGGL(kernel_createCutoff, dim3(index_grid), dim3(index_block), 0, 0,
threshold,
wordLength,
d_lengths,
d_words,
d_wordCutoff,
readsCount);
// sd_ortIndex(data);
hipMemcpy(h_indexs, d_indexs, sizeof(unsigned short) * total_length, hipMemcpyDeviceToHost);
hipMemcpy(h_offsets, d_offsets, sizeof(long) * (1+readsCount), hipMemcpyDeviceToHost);
hipMemcpy(h_words, d_words, sizeof(long) * readsCount, hipMemcpyDeviceToHost);
for (int i = 0; i< readsCount; i++) {
int start = h_offsets[i];
int length = h_words[i];
std::sort(&h_indexs[start], &h_indexs[start]+length);
}
// mergeIndex(data);
hipMemcpy(d_indexs, h_indexs, sizeof(unsigned short) * total_length, hipMemcpyHostToDevice);
hipLaunchKernelGGL(kernel_mergeIndex, dim3(index_grid), dim3(index_block), 0, 0,
d_offsets,
d_indexs,
d_orders,
d_words,
readsCount);
int* h_cluster = (int*) malloc (sizeof(int) * readsCount);
for (int i = 0; i < readsCount; i++) {
h_cluster[i] = -1;
}
int *d_cluster;
hipMalloc((void**)&d_cluster, sizeof(int) * readsCount);
hipMemcpy(d_cluster, h_cluster, sizeof(int) * readsCount, hipMemcpyHostToDevice);
unsigned short* table = (unsigned short*) malloc (sizeof(unsigned short) * 65536);
memset(table, 0, 65536*sizeof(unsigned short)); // fill zero
unsigned short *d_table;
hipMalloc((void**)&d_table, 65536*sizeof(unsigned short));
hipMemcpy(d_table, table, 65536*sizeof(unsigned short), hipMemcpyHostToDevice);
int r = -1; // a shorthand for representative
dim3 makeTable_grid(128);
dim3 makeTable_block(128);
dim3 cleanTable_grid(128);
dim3 cleanTable_block(128);
dim3 magic_grid((readsCount+127)/128);
dim3 magic_block(128);
dim3 filter_grid(readsCount);
dim3 filter_block(128);
dim3 align_grid((readsCount+127)/128);
dim3 align_block(128);
while (r < readsCount) { // clustering
updateRepresentative(d_cluster, &r, readsCount); // update representative
if (r >= readsCount-1) { // complete
break;
}
//std::cout << r << "/" << readsCount << std::endl;
hipLaunchKernelGGL(kernel_makeTable, dim3(makeTable_grid), dim3(makeTable_block), 0, 0,
d_offsets,
d_indexs,
d_orders,
d_words,
d_table,
r);
hipLaunchKernelGGL(kernel_magic, dim3(magic_grid), dim3(magic_block), 0, 0,
threshold,
d_lengths,
d_magicBase,
d_cluster,
r,
readsCount);
hipLaunchKernelGGL(kernel_filter, dim3(filter_grid), dim3(filter_block), 0, 0,
threshold,
wordLength,
d_lengths,
d_offsets,
d_indexs,
d_orders,
d_words,
d_wordCutoff,
d_cluster,
d_table,
readsCount);
hipLaunchKernelGGL(kernel_align, dim3(align_grid), dim3(align_block), 0, 0,
threshold,
d_lengths,
d_offsets,
d_compressed,
d_gaps,
r,
d_cluster,
readsCount);
hipLaunchKernelGGL(kernel_cleanTable, dim3(cleanTable_grid), dim3(cleanTable_block), 0, 0,
d_offsets,
d_indexs,
d_orders,
d_words,
d_table,
r);
}
hipMemcpy(h_cluster, d_cluster, sizeof(int) * readsCount, hipMemcpyDeviceToHost);
auto t2 = std::chrono::high_resolution_clock::now();
double total_time = std::chrono::duration_cast<std::chrono::microseconds>(t2 - t1).count();
printf("Device offload time %lf secs \n", total_time / 1.0e6);
std::ofstream file(option.outputFile.c_str());
int sum = 0;
for (int i = 0; i < readsCount; i++) {
if (h_cluster[i] == i) {
file << reads[i].name << std::endl;
file << reads[i].data << std::endl;
sum++;
}
}
file.close();
std::cout << "cluster count: " << sum << std::endl;
free(h_lengths);
free(h_offsets);
free(h_reads);
free(h_indexs);
free(h_words);
free(h_cluster);
free(table);
hipFree(d_lengths);
hipFree(d_offsets);
hipFree(d_reads);
hipFree(d_compressed);
hipFree(d_gaps);
hipFree(d_indexs);
hipFree(d_orders);
hipFree(d_words);
hipFree(d_magicBase);
hipFree(d_wordCutoff);
hipFree(d_cluster);
hipFree(d_table);
return 0;
}
|
4c834b15c1ebcb38cb58ec68e3f9ea9d55b3a54a.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright 2011-2014 Maxim Milakov
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "convolution_1x1_layer_updater_cuda.h"
#include "util_cuda.h"
#include "neural_network_cublas_exception.h"
#include "neural_network_cuda_exception.h"
#include "neural_network_cudnn_exception.h"
#include "../convolution_layer.h"
namespace nnforge
{
namespace cuda
{
convolution_1x1_layer_updater_cuda::convolution_1x1_layer_updater_cuda()
: output_data_desc(0)
, bias_desc(0)
{
cudnn_safe_call(cudnnCreateTensorDescriptor(&output_data_desc));
cudnn_safe_call(cudnnCreateTensorDescriptor(&bias_desc));
}
convolution_1x1_layer_updater_cuda::~convolution_1x1_layer_updater_cuda()
{
cudnnDestroyTensorDescriptor(output_data_desc);
cudnnDestroyTensorDescriptor(bias_desc);
}
void convolution_1x1_layer_updater_cuda::enqueue_test(
unsigned int offset_input_entry_id,
hipStream_t stream_id,
const std::vector<const_cuda_linear_buffer_device_smart_ptr>& schema_data,
const std::vector<cuda_linear_buffer_device_smart_ptr>& data,
const std::vector<cuda_linear_buffer_device_smart_ptr>& data_custom,
const_cuda_linear_buffer_device_smart_ptr input_neurons_buffer,
cuda_linear_buffer_device_smart_ptr output_neurons_buffer,
const std::vector<cuda_linear_buffer_device_smart_ptr>& additional_buffers,
std::vector<cuda_memobject_smart_ptr>& dynamic_memobjects,
unsigned int entry_count,
bool force_deterministic)
{
{
cuda_util::transpose(
*cuda_config,
(const float *)(*input_neurons_buffer) + input_elem_count_per_entry * offset_input_entry_id,
*additional_buffers[0],
input_elem_count_per_feature_map,
input_configuration_specific.feature_map_count,
entry_count,
stream_id);
cublas_safe_call(hipblasSetStream(cuda_config->get_cublas_handle(), stream_id));
float alpha = 1.0F;
float beta = 0.0F;
cublas_safe_call(hipblasSgemm(
cuda_config->get_cublas_handle(),
HIPBLAS_OP_T,
HIPBLAS_OP_N,
output_configuration_specific.feature_map_count,
entry_count * input_elem_count_per_feature_map,
input_configuration_specific.feature_map_count,
&alpha,
*data[0],
input_configuration_specific.feature_map_count,
*additional_buffers[0],
input_configuration_specific.feature_map_count,
&beta,
*additional_buffers[1],
output_configuration_specific.feature_map_count));
cuda_util::transpose(
*cuda_config,
*additional_buffers[1],
*output_neurons_buffer,
output_configuration_specific.feature_map_count,
output_elem_count_per_feature_map,
entry_count,
stream_id);
}
// Add bias
{
cudnn_safe_call(cudnnSetStream(cuda_config->get_cudnn_handle(), stream_id));
cudnn_safe_call(cudnnSetTensor4dDescriptor(
output_data_desc,
CUDNN_TENSOR_NCHW,
CUDNN_DATA_FLOAT,
entry_count,
output_configuration_specific.feature_map_count,
1,
output_elem_count_per_feature_map));
float alpha = 1.0F;
float beta = 1.0F;
cudnn_safe_call(cudnnAddTensor(
cuda_config->get_cudnn_handle(),
CUDNN_ADD_SAME_C,
&alpha,
bias_desc,
*data[1],
&beta,
output_data_desc,
*output_neurons_buffer));
}
}
void convolution_1x1_layer_updater_cuda::enqueue_backprop(
hipStream_t stream_id,
const std::vector<const_cuda_linear_buffer_device_smart_ptr>& schema_data,
const std::vector<cuda_linear_buffer_device_smart_ptr>& data,
const std::vector<cuda_linear_buffer_device_smart_ptr>& data_custom,
const_cuda_linear_buffer_device_smart_ptr output_neurons_buffer,
const_cuda_linear_buffer_device_smart_ptr input_neurons_buffer,
cuda_linear_buffer_device_smart_ptr output_errors_buffer,
cuda_linear_buffer_device_smart_ptr input_errors_buffer,
const std::vector<cuda_linear_buffer_device_smart_ptr>& additional_buffers,
std::vector<cuda_memobject_smart_ptr>& dynamic_memobjects,
unsigned int entry_count,
bool force_deterministic)
{
cublas_safe_call(hipblasSetStream(cuda_config->get_cublas_handle(), stream_id));
float alpha = 1.0F;
float beta = 0.0F;
cublas_safe_call(hipblasSgemm(
cuda_config->get_cublas_handle(),
HIPBLAS_OP_N,
HIPBLAS_OP_N,
input_configuration_specific.feature_map_count,
entry_count * input_elem_count_per_feature_map,
output_configuration_specific.feature_map_count,
&alpha,
*data[0],
input_configuration_specific.feature_map_count,
*additional_buffers[1],
output_configuration_specific.feature_map_count,
&beta,
*additional_buffers[0],
input_configuration_specific.feature_map_count));
cuda_util::transpose(
*cuda_config,
*additional_buffers[0],
*input_errors_buffer,
input_configuration_specific.feature_map_count,
input_elem_count_per_feature_map,
entry_count,
stream_id);
}
void convolution_1x1_layer_updater_cuda::enqueue_update_weights(
unsigned int offset_input_entry_id,
hipStream_t stream_id,
const std::vector<cuda_linear_buffer_device_smart_ptr>& gradient,
const std::vector<cuda_linear_buffer_device_smart_ptr>& data_custom,
const std::vector<const_cuda_linear_buffer_device_smart_ptr>& schema_data,
cuda_linear_buffer_device_smart_ptr output_errors_buffer,
const_cuda_linear_buffer_device_smart_ptr input_neurons_buffer,
const std::vector<cuda_linear_buffer_device_smart_ptr>& additional_buffers,
std::vector<cuda_memobject_smart_ptr>& dynamic_memobjects,
unsigned int entry_count,
bool force_deterministic)
{
// Update weights
{
cuda_util::transpose(
*cuda_config,
*output_errors_buffer,
*additional_buffers[1],
output_elem_count_per_feature_map,
output_configuration_specific.feature_map_count,
entry_count,
stream_id);
cublas_safe_call(hipblasSetStream(cuda_config->get_cublas_handle(), stream_id));
float alpha = 1.0F;
float beta = 1.0F;
cublas_safe_call(hipblasSgemm(
cuda_config->get_cublas_handle(),
HIPBLAS_OP_N,
HIPBLAS_OP_T,
input_configuration_specific.feature_map_count,
output_configuration_specific.feature_map_count,
entry_count * input_elem_count_per_feature_map,
&alpha,
*additional_buffers[0],
input_configuration_specific.feature_map_count,
*additional_buffers[1],
output_configuration_specific.feature_map_count,
&beta,
*gradient[0],
input_configuration_specific.feature_map_count));
}
// Update bias
{
cudnn_safe_call(cudnnSetStream(cuda_config->get_cudnn_handle(), stream_id));
cudnn_safe_call(cudnnSetTensor4dDescriptor(
output_data_desc,
CUDNN_TENSOR_NCHW,
CUDNN_DATA_FLOAT,
entry_count,
output_configuration_specific.feature_map_count,
1,
output_elem_count_per_feature_map));
float alpha = 1.0F;
float beta = 1.0F;
cudnn_safe_call(cudnnConvolutionBackwardBias(
cuda_config->get_cudnn_handle(),
&alpha,
output_data_desc,
*output_errors_buffer,
&beta,
bias_desc,
*gradient[1]));
}
}
void convolution_1x1_layer_updater_cuda::updater_configured()
{
cudnn_safe_call(cudnnSetTensor4dDescriptor(
bias_desc,
CUDNN_TENSOR_NCHW,
CUDNN_DATA_FLOAT,
1,
output_configuration_specific.feature_map_count,
1,
1));
}
std::vector<size_t> convolution_1x1_layer_updater_cuda::get_sizes_of_additional_buffers_per_entry() const
{
std::vector<size_t> res;
res.push_back(input_elem_count_per_entry * sizeof(float));
res.push_back(output_elem_count_per_entry * sizeof(float));
return res;
}
bool convolution_1x1_layer_updater_cuda::is_in_place_backprop() const
{
return false;
}
}
}
| 4c834b15c1ebcb38cb58ec68e3f9ea9d55b3a54a.cu | /*
* Copyright 2011-2014 Maxim Milakov
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "convolution_1x1_layer_updater_cuda.h"
#include "util_cuda.h"
#include "neural_network_cublas_exception.h"
#include "neural_network_cuda_exception.h"
#include "neural_network_cudnn_exception.h"
#include "../convolution_layer.h"
namespace nnforge
{
namespace cuda
{
convolution_1x1_layer_updater_cuda::convolution_1x1_layer_updater_cuda()
: output_data_desc(0)
, bias_desc(0)
{
cudnn_safe_call(cudnnCreateTensorDescriptor(&output_data_desc));
cudnn_safe_call(cudnnCreateTensorDescriptor(&bias_desc));
}
convolution_1x1_layer_updater_cuda::~convolution_1x1_layer_updater_cuda()
{
cudnnDestroyTensorDescriptor(output_data_desc);
cudnnDestroyTensorDescriptor(bias_desc);
}
void convolution_1x1_layer_updater_cuda::enqueue_test(
unsigned int offset_input_entry_id,
cudaStream_t stream_id,
const std::vector<const_cuda_linear_buffer_device_smart_ptr>& schema_data,
const std::vector<cuda_linear_buffer_device_smart_ptr>& data,
const std::vector<cuda_linear_buffer_device_smart_ptr>& data_custom,
const_cuda_linear_buffer_device_smart_ptr input_neurons_buffer,
cuda_linear_buffer_device_smart_ptr output_neurons_buffer,
const std::vector<cuda_linear_buffer_device_smart_ptr>& additional_buffers,
std::vector<cuda_memobject_smart_ptr>& dynamic_memobjects,
unsigned int entry_count,
bool force_deterministic)
{
{
cuda_util::transpose(
*cuda_config,
(const float *)(*input_neurons_buffer) + input_elem_count_per_entry * offset_input_entry_id,
*additional_buffers[0],
input_elem_count_per_feature_map,
input_configuration_specific.feature_map_count,
entry_count,
stream_id);
cublas_safe_call(cublasSetStream(cuda_config->get_cublas_handle(), stream_id));
float alpha = 1.0F;
float beta = 0.0F;
cublas_safe_call(cublasSgemm(
cuda_config->get_cublas_handle(),
CUBLAS_OP_T,
CUBLAS_OP_N,
output_configuration_specific.feature_map_count,
entry_count * input_elem_count_per_feature_map,
input_configuration_specific.feature_map_count,
&alpha,
*data[0],
input_configuration_specific.feature_map_count,
*additional_buffers[0],
input_configuration_specific.feature_map_count,
&beta,
*additional_buffers[1],
output_configuration_specific.feature_map_count));
cuda_util::transpose(
*cuda_config,
*additional_buffers[1],
*output_neurons_buffer,
output_configuration_specific.feature_map_count,
output_elem_count_per_feature_map,
entry_count,
stream_id);
}
// Add bias
{
cudnn_safe_call(cudnnSetStream(cuda_config->get_cudnn_handle(), stream_id));
cudnn_safe_call(cudnnSetTensor4dDescriptor(
output_data_desc,
CUDNN_TENSOR_NCHW,
CUDNN_DATA_FLOAT,
entry_count,
output_configuration_specific.feature_map_count,
1,
output_elem_count_per_feature_map));
float alpha = 1.0F;
float beta = 1.0F;
cudnn_safe_call(cudnnAddTensor(
cuda_config->get_cudnn_handle(),
CUDNN_ADD_SAME_C,
&alpha,
bias_desc,
*data[1],
&beta,
output_data_desc,
*output_neurons_buffer));
}
}
void convolution_1x1_layer_updater_cuda::enqueue_backprop(
cudaStream_t stream_id,
const std::vector<const_cuda_linear_buffer_device_smart_ptr>& schema_data,
const std::vector<cuda_linear_buffer_device_smart_ptr>& data,
const std::vector<cuda_linear_buffer_device_smart_ptr>& data_custom,
const_cuda_linear_buffer_device_smart_ptr output_neurons_buffer,
const_cuda_linear_buffer_device_smart_ptr input_neurons_buffer,
cuda_linear_buffer_device_smart_ptr output_errors_buffer,
cuda_linear_buffer_device_smart_ptr input_errors_buffer,
const std::vector<cuda_linear_buffer_device_smart_ptr>& additional_buffers,
std::vector<cuda_memobject_smart_ptr>& dynamic_memobjects,
unsigned int entry_count,
bool force_deterministic)
{
cublas_safe_call(cublasSetStream(cuda_config->get_cublas_handle(), stream_id));
float alpha = 1.0F;
float beta = 0.0F;
cublas_safe_call(cublasSgemm(
cuda_config->get_cublas_handle(),
CUBLAS_OP_N,
CUBLAS_OP_N,
input_configuration_specific.feature_map_count,
entry_count * input_elem_count_per_feature_map,
output_configuration_specific.feature_map_count,
&alpha,
*data[0],
input_configuration_specific.feature_map_count,
*additional_buffers[1],
output_configuration_specific.feature_map_count,
&beta,
*additional_buffers[0],
input_configuration_specific.feature_map_count));
cuda_util::transpose(
*cuda_config,
*additional_buffers[0],
*input_errors_buffer,
input_configuration_specific.feature_map_count,
input_elem_count_per_feature_map,
entry_count,
stream_id);
}
void convolution_1x1_layer_updater_cuda::enqueue_update_weights(
unsigned int offset_input_entry_id,
cudaStream_t stream_id,
const std::vector<cuda_linear_buffer_device_smart_ptr>& gradient,
const std::vector<cuda_linear_buffer_device_smart_ptr>& data_custom,
const std::vector<const_cuda_linear_buffer_device_smart_ptr>& schema_data,
cuda_linear_buffer_device_smart_ptr output_errors_buffer,
const_cuda_linear_buffer_device_smart_ptr input_neurons_buffer,
const std::vector<cuda_linear_buffer_device_smart_ptr>& additional_buffers,
std::vector<cuda_memobject_smart_ptr>& dynamic_memobjects,
unsigned int entry_count,
bool force_deterministic)
{
// Update weights
{
cuda_util::transpose(
*cuda_config,
*output_errors_buffer,
*additional_buffers[1],
output_elem_count_per_feature_map,
output_configuration_specific.feature_map_count,
entry_count,
stream_id);
cublas_safe_call(cublasSetStream(cuda_config->get_cublas_handle(), stream_id));
float alpha = 1.0F;
float beta = 1.0F;
cublas_safe_call(cublasSgemm(
cuda_config->get_cublas_handle(),
CUBLAS_OP_N,
CUBLAS_OP_T,
input_configuration_specific.feature_map_count,
output_configuration_specific.feature_map_count,
entry_count * input_elem_count_per_feature_map,
&alpha,
*additional_buffers[0],
input_configuration_specific.feature_map_count,
*additional_buffers[1],
output_configuration_specific.feature_map_count,
&beta,
*gradient[0],
input_configuration_specific.feature_map_count));
}
// Update bias
{
cudnn_safe_call(cudnnSetStream(cuda_config->get_cudnn_handle(), stream_id));
cudnn_safe_call(cudnnSetTensor4dDescriptor(
output_data_desc,
CUDNN_TENSOR_NCHW,
CUDNN_DATA_FLOAT,
entry_count,
output_configuration_specific.feature_map_count,
1,
output_elem_count_per_feature_map));
float alpha = 1.0F;
float beta = 1.0F;
cudnn_safe_call(cudnnConvolutionBackwardBias(
cuda_config->get_cudnn_handle(),
&alpha,
output_data_desc,
*output_errors_buffer,
&beta,
bias_desc,
*gradient[1]));
}
}
void convolution_1x1_layer_updater_cuda::updater_configured()
{
cudnn_safe_call(cudnnSetTensor4dDescriptor(
bias_desc,
CUDNN_TENSOR_NCHW,
CUDNN_DATA_FLOAT,
1,
output_configuration_specific.feature_map_count,
1,
1));
}
std::vector<size_t> convolution_1x1_layer_updater_cuda::get_sizes_of_additional_buffers_per_entry() const
{
std::vector<size_t> res;
res.push_back(input_elem_count_per_entry * sizeof(float));
res.push_back(output_elem_count_per_entry * sizeof(float));
return res;
}
bool convolution_1x1_layer_updater_cuda::is_in_place_backprop() const
{
return false;
}
}
}
|
b4190516393258fe79b2911762e3c5b7d95a19ca.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cudaconv2_extra.cuh>
/*
* Block size: 16x16.
* blockIdx.x determines case in batches of 16*imgsPerThread.
* blockIdx.y determines 4x4 image region in target image.
*
* threadIdx.x determines case.
* threadIdx.y determines pixel.
*
* hidActs: (numFilters, numModulesY, numModulesX, numImages)
* filters: (numColors, filterPixels, numFilters) if conv
* (numModulesY, numModulesX, numColors, filterPixels, numFilters) otherwise
* targets: (numColors, imgSizeY, imgSizeX, numImages)
*
* Each block reconstructs one 4x4 pixels from 16*imgsPerThread cases.
*
* Number of filters must be divisible by 16.
* Number of images must be divisible by 16*imgsPerThread if checkCaseBounds is false.
* 16 * imgsPerThread must be divisible by 32.
*
* This version loads 32 cases at a time, so it gets full coalescing on that load.
* It only loads 16 weights at a time, so those aren't fully coalesced.
* This version conserves shared memory by loading 16 filters at a time rather than 32.
*/
template <int imgsPerThread, int numColors, bool scale, bool checkCaseBounds, bool conv>
__global__ void img_acts_color2(const float* hidActs, const float*_hidSwitches, const float* filters, float* targets,
const int numModulesY, const int numModulesX, const int numImages, const int numFilters,
const int filterSize, const int imgSizeY, const int imgSizeX,
const int paddingStart, const int moduleStride,
const int mpSizeX, const int mpStart, const int mpStride, const int mpOutputsX,
const float scaleTargets, const float scaleOutputs) {
__shared__ float shFilters[numColors*16][16 + 1];
__shared__ float shHidActs[16][16*imgsPerThread];
const int* hidSwitches = (const int *) _hidSwitches;
const int blockCaseIdx = blockIdx.x * 16*imgsPerThread;
const int numRegionsX = DIVUP(imgSizeX, 4);
const int blockRegionIdx = blockIdx.y;
const int blockRegionIdxX = blockRegionIdx % numRegionsX;
const int blockRegionIdxY = blockRegionIdx / numRegionsX;
const int blockRegionLeft = blockRegionIdxX * 4;
const int blockRegionTop = blockRegionIdxY * 4;
const int pxYInRegion = threadIdx.y / 4, pxXInRegion = threadIdx.y % 4;
const int pxY = blockRegionTop + pxYInRegion;
const int pxX = blockRegionLeft + pxXInRegion;
const int pxIdx = pxY * imgSizeX + pxX;
const bool isPxInImg = pxY < imgSizeY && pxX < imgSizeX;
const int numModules = numModulesY * numModulesX;
const int filterPixels = filterSize * filterSize;
const int imgPixels = imgSizeX * imgSizeY;
const int tidx = threadIdx.y * 16 + threadIdx.x;
const int loadY = tidx / 32, loadX = tidx % 32;
const int mp_numModules = mpOutputsX * mpOutputsX;
hidActs += blockCaseIdx + loadY * numImages * mp_numModules + loadX;
hidSwitches += blockCaseIdx + loadY * numImages * mp_numModules + loadX;
filters += threadIdx.x;
targets += pxIdx * numImages + blockCaseIdx + threadIdx.x;
float prod[numColors][imgsPerThread];
#pragma unroll
for (int c = 0; c < numColors; c++) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
prod[c][i] = 0;
}
}
const int startY = blockRegionTop - paddingStart < filterSize ? 0
: 1 + (blockRegionTop - paddingStart - filterSize) / moduleStride;
const int endY = MIN(numModulesY, 1 + (blockRegionTop + 3 - paddingStart) / moduleStride);
const int startX = blockRegionLeft - paddingStart < filterSize ? 0
: 1 + (blockRegionLeft - paddingStart - filterSize) / moduleStride;
const int endX = MIN(numModulesX, 1 + (blockRegionLeft + 3 - paddingStart) / moduleStride);
float* shilterLoad = &shFilters[threadIdx.y][threadIdx.x];
float* shHidActLoad = &shHidActs[loadY][loadX];
for (int my = startY; my < endY; my++) {
const int moduleTop = paddingStart + my * moduleStride;
const int pxInModuleY = pxY - moduleTop;
for (int mx = startX; mx < endX; mx++) {
const int moduleIdx = my * numModulesX + mx;
const int moduleLeft = paddingStart + mx * moduleStride;
const int pxInModuleX = pxX - moduleLeft;
const bool isPxInModule = pxInModuleY >= 0 && pxInModuleY < filterSize && pxInModuleX >= 0 && pxInModuleX < filterSize;
const int pxIdxInModule = pxInModuleY * filterSize + pxInModuleX;
const int convTargetY = moduleIdx / numModulesX;
const int convTargetX = moduleIdx % numModulesX;
const int loopStartY = MAX(0, DIVUP(convTargetY - mpStart - (mpSizeX-1), mpStride));
const int loopStartX = MAX(0, DIVUP(convTargetX - mpStart - (mpSizeX-1), mpStride));
const int loopEndY = MIN(mpOutputsX-1, (convTargetY - mpStart) / mpStride);
const int loopEndX = MIN(mpOutputsX-1, (convTargetX - mpStart) / mpStride);
for (int f = 0; f < numFilters; f += 16) { // multiply with 16 filters at a time
// Now the threads split up into half-warps, and each half-warp decides if it's interested.
// const float* hLoad = &hidActs[(moduleIdx + f * numModules) * numImages];
// to load the moduleIdx + f module
#pragma unroll
for (int i = 0; i < imgsPerThread * 16; i += 32) {
if (!checkCaseBounds || blockCaseIdx + i + loadX < numImages) {
#pragma unroll
for (int j = 0; j < 16; j += 8) { // load 16 rows of imgsPerThread*16 cols, 8 * 32 elements at a time.
shHidActLoad[j * 16 * imgsPerThread + i] = 0;
#pragma unroll
for (int y = loopStartY; y <= loopEndY; y++) {
#pragma unroll
for (int x = loopStartX; x <= loopEndX; x++) {
const int mpOutIdx = (y * mpOutputsX + x) * numImages + i + (f+j) * numImages * mp_numModules;
shHidActLoad[j * 16 * imgsPerThread + i] += hidSwitches[mpOutIdx] == moduleIdx? hidActs[mpOutIdx] : 0;
//hLoad[j * numModules * numImages + i];
}
}
}
} else {
#pragma unroll
for (int j = 0; j < 16; j += 8) { // load 16 rows of imgsPerThread*16 cols, 8 * 32 elements at a time.
shHidActLoad[j * 16 * imgsPerThread + i] = 0;
}
}
}
if (isPxInImg && isPxInModule) {
// This half-warp is interested, so it's going to load the weights from this module to its pixel.
// Not fully coalesced read :(
// But taking out this read entirely only reduces the runtime by ~2.8%, so it isn't costing me much.
const float* fLoad = conv ? &filters[pxIdxInModule * numFilters + f]
: &filters[(moduleIdx * numColors * filterPixels + pxIdxInModule) * numFilters + f];
#pragma unroll
for (int c = 0; c < numColors; c++) {
shilterLoad[c * 16 * (16 + 1)] = fLoad[c * filterPixels * numFilters];
}
}
__syncthreads();
// Do some actual computation
if (isPxInImg && isPxInModule) {
#pragma unroll
for (int c = 0; c < numColors; c++) {
#pragma unroll
for (int w = 0; w < 16; w++) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
prod[c][i] += shFilters[threadIdx.y + c * 16][w] * shHidActs[w][threadIdx.x + i * 16];
}
}
}
}
__syncthreads();
}
}
}
// Not fully coalesced write :(... shmem (and fully coalesced) version is actually slightly slower, though
if (isPxInImg) {
if (scale) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || blockCaseIdx + threadIdx.x + i * 16 < numImages) {
#pragma unroll
for (int c = 0; c < numColors; c++) {
targets[c * imgPixels * numImages + i * 16] = scaleTargets * targets[c * imgPixels * numImages + i * 16] + scaleOutputs * prod[c][i];
}
}
}
} else {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || blockCaseIdx + threadIdx.x + i * 16 < numImages) {
#pragma unroll
for (int c = 0; c < numColors; c++) {
targets[c * imgPixels * numImages + i * 16] = scaleOutputs * prod[c][i];
}
}
}
}
}
}
/*
* Block size: 16x16.
* blockIdx.x determines case in batches of 16*imgsPerThread, also color in batches of colorsPerThread.
* In essence, blockIdx.x.x = 1..numImages/(16*imgsPerThread)
* blockIdx.x.y = 1..numImgColors/colorsPerThread
* blockIdx.y determines 4x4 image region in target image.
*
* threadIdx.x determines case.
* threadIdx.y determines pixel.
*
* hidActs: (numFilters, numModulesY, numModulesX, numImages)
* filters: (numFilterColors, filterPixels, numFilters) if conv
* (numModulesY, numModulesX, numFilterColors, filterPixels, numFilters) otherwise
* targets: (numImageColors, imgSizeY, imgSizeX, numImages)
*
* Each block reconstructs one 4x4 pixels from 16*imgsPerThread cases.
*
* numImages must be divisible by 16*imgsPerThread if checkCaseBounds is false.
* 16 * imgsPerThread must be divisible by 32.
* numImageColors/numGroups must be divisible by colorsPerThread.
*
* This version loads 32 cases at a time, so it gets full coalescing on that load.
* It only loads 16 weights at a time, so those aren't fully coalesced.
* This version conserves shared memory by loading 16 filters at a time rather than 32.
*
* To be used when there are 4-16 color channels.
*/
template <int imgsPerThread, int colorsPerThread, bool scale, bool checkCaseBounds, bool conv>
__global__ void img_acts_mediumcolor2(const float* hidActs, const float* _hidSwitches, const float* filters, float* targets,
const int numModulesY, const int numModulesX, const int numImages, const int numFilters,
const int filterSize, const int imgSizeY, const int imgSizeX, const int paddingStart,
const int moduleStride, const int numImgColors, const int numGroups,
const int mpSizeX, const int mpStart, const int mpStride, const int mpOutputsX,
const float scaleTargets, const float scaleOutputs) {
__shared__ float shFilters[colorsPerThread*16][16 + 1];
__shared__ float shHidActs[16][16*imgsPerThread];
const int* hidSwitches = (const int*) _hidSwitches;
const int numImgBlocks = DIVUP(numImages,16*imgsPerThread);
const int blockCaseIdx = (blockIdx.x % numImgBlocks) * 16*imgsPerThread;
const int imgColorIdx = (blockIdx.x / numImgBlocks) * colorsPerThread; // color idx globally
const int numFilterColors = numImgColors / numGroups;
const int blockGroupIdx = imgColorIdx / numFilterColors;
const int filterColorIdx = imgColorIdx % numFilterColors; // color idx within group
const int numFiltersPerGroup = numFilters / numGroups;
const int blockFilterIdx = blockGroupIdx * numFiltersPerGroup;
const int numRegionsX = DIVUP(imgSizeX, 4);
const int blockRegionIdx = blockIdx.y;
const int blockRegionIdxX = blockRegionIdx % numRegionsX;
const int blockRegionIdxY = blockRegionIdx / numRegionsX;
const int blockRegionLeft = blockRegionIdxX * 4;
const int blockRegionTop = blockRegionIdxY * 4;
const int pxYInRegion = threadIdx.y / 4, pxXInRegion = threadIdx.y % 4;
const int pxY = blockRegionTop + pxYInRegion;
const int pxX = blockRegionLeft + pxXInRegion;
const int pxIdx = pxY * imgSizeX + pxX;
const bool isPxInImg = pxY < imgSizeY && pxX < imgSizeX;
const uint numModules = numModulesY * numModulesX;
const int filterPixels = filterSize * filterSize;
const int imgPixels = imgSizeY * imgSizeX;
const int tidx = threadIdx.y * 16 + threadIdx.x;
const int loadY = tidx / 32, loadX = tidx % 32;
const int mp_numModules = mpOutputsX * mpOutputsX;
hidActs += blockCaseIdx + (blockFilterIdx + loadY) * numImages * mp_numModules + loadX;
hidSwitches += blockCaseIdx + (blockFilterIdx + loadY) * numImages * mp_numModules + loadX;
filters += blockFilterIdx + filterColorIdx * filterPixels * numFilters + threadIdx.x;
targets += imgColorIdx * imgPixels * numImages + pxIdx * numImages + blockCaseIdx + threadIdx.x;
float prod[colorsPerThread][imgsPerThread];
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
prod[c][i] = 0;
}
}
const int startY = blockRegionTop - paddingStart < filterSize ? 0
: 1 + (blockRegionTop - paddingStart - filterSize) / moduleStride;
const int endY = MIN(numModulesY, 1 + (blockRegionTop + 3 - paddingStart) / moduleStride);
const int startX = blockRegionLeft - paddingStart < filterSize ? 0
: 1 + (blockRegionLeft - paddingStart - filterSize) / moduleStride;
const int endX = MIN(numModulesX, 1 + (blockRegionLeft + 3 - paddingStart) / moduleStride);
float* shFilterLoad = &shFilters[threadIdx.y][threadIdx.x];
float* shHidActLoad = &shHidActs[loadY][loadX];
for (int my = startY; my < endY; my++) {
const int moduleTop = paddingStart + my * moduleStride;
const int pxInModuleY = pxY - moduleTop;
for (int mx = startX; mx < endX; mx++) {
const int moduleIdx = my * numModulesX + mx;
const int moduleLeft = paddingStart + mx * moduleStride;
const int pxInModuleX = pxX - moduleLeft;
const bool isPxInModule = pxInModuleY >= 0 && pxInModuleY < filterSize && pxInModuleX >= 0 && pxInModuleX < filterSize;
const int pxIdxInModule = pxInModuleY * filterSize + pxInModuleX;
const int convTargetY = moduleIdx / numModulesX;
const int convTargetX = moduleIdx % numModulesX;
const int loopStartY = MAX(0, DIVUP(convTargetY - mpStart - (mpSizeX-1), mpStride));
const int loopStartX = MAX(0, DIVUP(convTargetX - mpStart - (mpSizeX-1), mpStride));
const int loopEndY = MIN(mpOutputsX-1, (convTargetY - mpStart) / mpStride);
const int loopEndX = MIN(mpOutputsX-1, (convTargetX - mpStart) / mpStride);
for (int f = 0; f < numFiltersPerGroup; f += 16) { // multipply with 16 filters at a time
// Now the threads split up into half-warps, and each half-warp decides if it's interested.
//const float* hLoad = &hidActs[(moduleIdx + f * numModules) * numImages];
#pragma unroll
for (int i = 0; i < imgsPerThread * 16; i += 32) {
if (!checkCaseBounds || blockCaseIdx + loadX + i < numImages) {
#pragma unroll
for (int j = 0; j < 16; j += 8) { // load 16 rows of imgsPerThread*16 cols, 8 * 32 elements at a time.
shHidActLoad[j * 16 * imgsPerThread + i] = 0;
#pragma unroll
for (int y = loopStartY; y <= loopEndY; y++) {
#pragma unroll
for (int x = loopStartX; x <= loopEndX; x++) {
const int mpOutIdx = (y * mpOutputsX + x) * numImages + i + (f+j) * numImages * mp_numModules;
shHidActLoad[j * 16 * imgsPerThread + i] += hidSwitches[mpOutIdx] == moduleIdx? hidActs[mpOutIdx] : 0;
}
}
// shHidActLoad[j * 16 * imgsPerThread + i] = hLoad[j * numModules * numImages + i];
}
} else {
#pragma unroll
for (int j = 0; j < 16; j += 8) { // load 16 rows of imgsPerThread*16 cols, 8 * 32 elements at a time.
shHidActLoad[j * 16 * imgsPerThread + i] = 0;
}
}
}
if (isPxInImg && isPxInModule) {
// This half-warp is interested, so it's going to load the weights from this module to its pixel.
// Not fully coalesced read :(
// But taking out this read entirely only reduces the runtime by ~2.8%, so it isn't costing me much.
const float* fLoad = conv ? &filters[pxIdxInModule * numFilters + f]
: &filters[moduleIdx * numFilterColors * filterPixels * numFilters + pxIdxInModule * numFilters + f];
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
shFilterLoad[c * 16 * (16 + 1)] = fLoad[c * filterPixels * numFilters];
}
}
__syncthreads();
// Do some actual computation
if (isPxInImg && isPxInModule) {
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
#pragma unroll
for (int w = 0; w < 16; w++) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
prod[c][i] += shFilters[threadIdx.y + c * 16][w] * shHidActs[w][threadIdx.x + i * 16];
}
}
}
}
__syncthreads();
}
}
}
// Not fully coalesced write :(... shmem (and fully coalesced) version is actually slightly slower, though
if (isPxInImg) {
if (scale) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || blockCaseIdx + threadIdx.x + i * 16 < numImages) {
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
targets[c * imgPixels * numImages + i * 16] = scaleTargets * targets[c * imgPixels * numImages + i * 16] + scaleOutputs * prod[c][i];
}
}
}
} else {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || blockCaseIdx + threadIdx.x + i * 16 < numImages) {
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
targets[c * imgPixels * numImages + i * 16] = scaleOutputs * prod[c][i];
}
}
}
}
}
}
/*
* Block size: B_YxB_X.
* blockIdx.x determines case in batches of B_X*imgsPerThread, also color in batches of B_Y*colorsPerThread.
* In essence, blockIdx.x.x = 1..numImages/(B_X*imgsPerThread)
* blockIdx.x.y = 1..numImgColors/(B_Y*colorsPerThread)
* blockIdx.y determines image pixel in target image.
*
* threadIdx.x determines case.
* threadIdx.y determines color.
*
* hidActs: (numFilters, numModulesY, numModulesX, numImages)
* filters: (numFilterColors, filterPixels, numFilters) if conv
* (numModulesY, numModulesX, numFilterColors, filterPixels, numFilters) otherwise
* targets: (numImageColors, imgSizeY, imgSizeX, numImages)
*
* Each block reconstructs one B_Y*colorsPerThread colors from 1 pixel from B_X*imgsPerThread cases.
*
* numImages must be divisible by B_X*imgsPerThread if checkCaseBounds is false.
* numFiltersPerGroup must be divisible by 16.
*
* B_X * imgsPerThread must be divisible by 32.
* numFilterColors must be divisible by B_Y*colorsPerThread.
* B_X*B_Y must be divisible by 32.
*
* This version loads 32 cases at a time, so it gets full coalescing on that load.
* It only loads 16 weights at a time, so those aren't fully coalesced.
* This version conserves shared memory by loading 16 filters at a time rather than 32.
*
* To be used when there are >= 16 color channels.
*/
template <int B_Y, int B_X, int imgsPerThread, int colorsPerThread, bool scale, bool checkCaseBounds, bool conv>
__global__ void conv_img_acts_manycolor2(const float* hidActs, const float* _hidSwitches, const float* filters, float* targets,
const int numModulesY, const int numModulesX, const int numImages, const int numFilters,
const int filterSize, const int imgSizeY, const int imgSizeX, const int paddingStart, const int moduleStride,
const int numImgColors, const int numGroups,
int mpSizeX, int mpStart, int mpStride, int mpOutputsX,
const float scaleTargets, const float scaleOutputs) {
const int *hidSwitches = (const int *) _hidSwitches;
__shared__ float shFilters[colorsPerThread*B_Y][16 + 1]; // TODO: perhaps reconsider this 16
__shared__ float shHidActs[16][B_X*imgsPerThread];
const int numImgBlocks = DIVUP(numImages,B_X*imgsPerThread);
const int blockCaseIdx = (blockIdx.x % numImgBlocks) * B_X*imgsPerThread;
const int imgColorIdx = (blockIdx.x / numImgBlocks) * B_Y*colorsPerThread; // color idx globally
const int numFilterColors = numImgColors / numGroups;
const int blockGroupIdx = imgColorIdx / numFilterColors;
const int filterColorIdx = imgColorIdx % numFilterColors; // color idx within group
const int numFiltersPerGroup = numFilters / numGroups;
const int blockFilterIdx = blockGroupIdx * numFiltersPerGroup;
const int blockPixelIdx = blockIdx.y;
const int blockPixelIdxX = blockPixelIdx % imgSizeX;
const int blockPixelIdxY = blockPixelIdx / imgSizeX;
const int filterPixels = filterSize * filterSize;
const int imgPixels = imgSizeY * imgSizeX;
const int tidx = threadIdx.y * B_X + threadIdx.x;
const int hidActLoadY = tidx / 32, hidActLoadX = tidx % 32;
const int filtersLoadY = tidx / 16, filtersLoadX = tidx % 16;
// const int numModules = numModulesY * numModulesX;
const int mp_numModules = mpOutputsX * mpOutputsX;
hidActs += blockCaseIdx + (blockFilterIdx + hidActLoadY) * numImages * mp_numModules + hidActLoadX;
hidSwitches += blockCaseIdx + (blockFilterIdx + hidActLoadY) * numImages * mp_numModules + hidActLoadX;
// hidActs += blockCaseIdx + (blockFilterIdx + hidActLoadY) * numImages * numModules + hidActLoadX;
filters += blockFilterIdx + (filterColorIdx + filtersLoadY) * filterPixels * numFilters + filtersLoadX;
targets += (imgColorIdx + threadIdx.y) * imgPixels * numImages + blockPixelIdx * numImages + blockCaseIdx + threadIdx.x;
float prod[colorsPerThread][imgsPerThread];
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
prod[c][i] = 0;
}
}
const int startY = blockPixelIdxY - paddingStart < filterSize ? 0
: 1 + (blockPixelIdxY - paddingStart - filterSize) / moduleStride;
const int endY = MIN(numModulesY, 1 + (blockPixelIdxY - paddingStart) / moduleStride);
const int startX = blockPixelIdxX - paddingStart < filterSize ? 0
: 1 + (blockPixelIdxX - paddingStart - filterSize) / moduleStride;
const int endX = MIN(numModulesX, 1 + (blockPixelIdxX - paddingStart) / moduleStride);
float* shFilterLoad = &shFilters[filtersLoadY][filtersLoadX];
float* shHidActLoad = &shHidActs[hidActLoadY][hidActLoadX];
for (int my = startY; my < endY; my++) {
const int moduleTop = paddingStart + my * moduleStride;
const int pxInFilterY = blockPixelIdxY - moduleTop;
for (int mx = startX; mx < endX; mx++) {
const int moduleIdx = my * numModulesX + mx;
const int moduleLeft = paddingStart + mx * moduleStride;
const int pxInFilterX = blockPixelIdxX - moduleLeft;
const int pxIdxInFilter = pxInFilterY * filterSize + pxInFilterX;
const int convTargetY = moduleIdx / numModulesX;
const int convTargetX = moduleIdx % numModulesX;
const int loopStartY = MAX(0, DIVUP(convTargetY - mpStart - (mpSizeX-1), mpStride));
const int loopStartX = MAX(0, DIVUP(convTargetX - mpStart - (mpSizeX-1), mpStride));
const int loopEndY = MIN(mpOutputsX-1, (convTargetY - mpStart) / mpStride);
const int loopEndX = MIN(mpOutputsX-1, (convTargetX - mpStart) / mpStride);
for (int f = 0; f < numFiltersPerGroup; f += 16) { // multiply with 16 filters at a time
// const float* hLoad = &hidActs[(moduleIdx + f * numModules) * numImages];
#pragma unroll
for (int i = 0; i < imgsPerThread * B_X; i += 32) {
if (!checkCaseBounds || blockCaseIdx + hidActLoadX + i < numImages) {
#pragma unroll
for (int j = 0; j < 16; j += B_X*B_Y/32) { // load 16 rows of imgsPerThread*16 cols, 8 * 32 elements at a time.
shHidActLoad[j * B_X * imgsPerThread + i] = 0;
#pragma unroll
for (int y = loopStartY; y <= loopEndY; y++) {
#pragma unroll
for (int x = loopStartX; x <= loopEndX; x++) {
const int mpOutIdx = (y * mpOutputsX + x) * numImages + i + (f+j) * numImages * mp_numModules;
shHidActLoad[j * B_X * imgsPerThread + i] += hidSwitches[mpOutIdx] == moduleIdx? hidActs[mpOutIdx] : 0;
//shHidActLoad[j * B_X * imgsPerThread + i] += hidSwitches[mpOutIdx];
}
}
// shHidActLoad[j * B_X * imgsPerThread + i] = hLoad[j * numModules * numImages + i];
}
} else {
#pragma unroll
for (int j = 0; j < 16; j += B_X*B_Y/32) { // load 16 rows of imgsPerThread*16 cols, 8 * 32 elements at a time.
shHidActLoad[j * B_X * imgsPerThread + i] = 0;
}
}
}
const float* fLoad = conv ? &filters[pxIdxInFilter * numFilters + f]
: &filters[moduleIdx * numFilterColors * filterPixels * numFilters + pxIdxInFilter * numFilters + f];
#pragma unroll
for (int i = 0; i < colorsPerThread*B_Y; i+= B_X*B_Y/16) {
if ((colorsPerThread*B_Y) % (B_X*B_Y/16) == 0 || i + filtersLoadY < colorsPerThread*B_Y) {
shFilterLoad[i * (16 + 1)] = fLoad[i * filterPixels * numFilters];
}
}
__syncthreads();
// Do some actual computation
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
#pragma unroll
for (int w = 0; w < 16; w++) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
prod[c][i] += shFilters[c * B_Y + threadIdx.y][w] * shHidActs[w][threadIdx.x + i * B_X];
}
}
}
__syncthreads();
}
}
}
if (scale) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || blockCaseIdx + threadIdx.x + i * B_X < numImages) {
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
targets[c * B_Y * imgPixels * numImages + i * B_X] = scaleTargets * targets[c * B_Y * imgPixels * numImages + i * B_X] + scaleOutputs * prod[c][i];
}
}
}
} else {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || blockCaseIdx + threadIdx.x + i * B_X < numImages) {
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
targets[c * B_Y * imgPixels * numImages + i * B_X] = scaleOutputs * prod[c][i];
}
}
}
}
}
void _imgActs2(NVMatrix& hidActs, NVMatrix& hidSwitches, NVMatrix& filters, NVMatrix& targets,
int imgSizeY, int imgSizeX, int numModulesY, int paddingStart, int moduleStride, int numImgColors, int numGroups,
int mpSizeX, int mpStart, int mpStride, int mpOutputsX,
float scaleTargets, float scaleOutput, bool conv) {
int numFilterColors = numImgColors / numGroups;
int numImages = hidActs.getNumCols();
int numFilters = filters.getNumCols();
int numModules = numModulesY * numModulesY;
int filterModuleMult = conv ? 1 : numModules;
int filterPixels = filters.getNumRows() / (filterModuleMult * numFilterColors);
int filterSize = sqrt(filterPixels);
int imgPixels = imgSizeY * imgSizeX;
int numModulesX = numModules / numModulesY;
int numOutputs = mpOutputsX * mpOutputsX;
assert(numImgColors % numGroups == 0);
assert(numFilters % (16*numGroups) == 0);
assert(numGroups > 1 || (numImgColors > 0 && (numImgColors <= 3 || numImgColors % 2 == 0)));
assert(numGroups == 1 || numFilterColors % 4 == 0);
assert(filterPixels == filterSize * filterSize);
assert(hidActs.getNumRows() == numOutputs * numFilters);
assert(filters.getNumRows() == filterModuleMult * numFilterColors * filterPixels);
assert(numModules == numModulesY * numModulesX);
assert(hidActs.isContiguous());
assert(filters.isContiguous());
assert(!hidActs.isTrans());
assert(!filters.isTrans());
assert(!targets.isTrans());
// These routines don't handle the case when only part of the image is visited in the convolution
assert(paddingStart <= 0);
assert(paddingStart + (numModulesX-1)*moduleStride + filterSize >= imgSizeX);
assert(paddingStart + (numModulesY-1)*moduleStride + filterSize >= imgSizeY);
assert(moduleStride <= filterSize);
assert(targets.isContiguous()); // no stride support here!
dim3 blocks;
dim3 threads(16,16);
int colorsPerThread;
int imgsPerThread = numImages % 128 == 0 ? 8 : numImages % 64 == 0 ? 4 : 2;
if (numFilterColors % 8 == 0) {
threads = dim3(32, 4);
colorsPerThread = numFilterColors % 16 == 0 ? 4 : 2;
imgsPerThread = numImages % 128 == 0 ? 4 : numImages % 64 == 0 ? 2 : 1;
assert(numFilterColors % (threads.y * colorsPerThread) == 0);
blocks = dim3(DIVUP(numImages, threads.x*imgsPerThread) * (numImgColors/(threads.y*colorsPerThread)), imgPixels);
} else if (numFilterColors > 3) {
colorsPerThread = numFilterColors % 4 == 0 ? 4 : 2;
blocks = dim3(DIVUP(numImages,threads.x*imgsPerThread) * (numImgColors / colorsPerThread), DIVUP(imgSizeY,4) * DIVUP(imgSizeX,4));
} else {
blocks = dim3(DIVUP(numImages,threads.x*imgsPerThread), DIVUP(imgSizeY,4) * DIVUP(imgSizeX,4));
}
bool checkCaseBounds = numImages % (threads.x * imgsPerThread) != 0;
if (scaleTargets == 0) { // do not scale or use targets matrix
targets.resize(numImgColors*imgPixels, numImages);
} else {
assert(targets.getNumRows() == numImgColors * imgPixels);
assert(targets.getNumCols() == numImages);
}
if (conv) { // convolutional units
if (scaleTargets == 0) { // do not scale or use targets matrix
if (numFilterColors % 8 == 0) {
if (imgsPerThread == 4) {
if (checkCaseBounds) {
if (numFilterColors % 16 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor2<4, 32, 4, 4, false, true, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor2<4, 32, 4, 4, false, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), hidSwitches.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, mpSizeX, mpStart, mpStride, mpOutputsX, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(conv_img_acts_manycolor2<4, 32, 4, 2, false, true, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor2<4, 32, 4, 2, false, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), hidSwitches.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, mpSizeX, mpStart, mpStride, mpOutputsX, scaleTargets, scaleOutput);
}
} else {
if (numFilterColors % 16 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor2<4, 32, 4, 4, false, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor2<4, 32, 4, 4, false, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), hidSwitches.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, mpSizeX, mpStart, mpStride, mpOutputsX, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(conv_img_acts_manycolor2<4, 32, 4, 2, false, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor2<4, 32, 4, 2, false, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), hidSwitches.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, mpSizeX, mpStart, mpStride, mpOutputsX, scaleTargets, scaleOutput);
}
}
} else if (imgsPerThread == 2) {
if (checkCaseBounds) {
if (numFilterColors % 16 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor2<4, 32, 2, 4, false, true, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor2<4, 32, 2, 4, false, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), hidSwitches.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, mpSizeX, mpStart, mpStride, mpOutputsX, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(conv_img_acts_manycolor2<4, 32, 2, 2, false, true, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor2<4, 32, 2, 2, false, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), hidSwitches.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, mpSizeX, mpStart, mpStride, mpOutputsX, scaleTargets, scaleOutput);
}
} else {
if (numFilterColors % 16 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor2<4, 32, 2, 4, false, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor2<4, 32, 2, 4, false, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), hidSwitches.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, mpSizeX, mpStart, mpStride, mpOutputsX, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(conv_img_acts_manycolor2<4, 32, 2, 2, false, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor2<4, 32, 2, 2, false, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), hidSwitches.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, mpSizeX, mpStart, mpStride, mpOutputsX, scaleTargets, scaleOutput);
}
}
} else {
if (checkCaseBounds) {
if (numFilterColors % 16 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor2<4, 32, 1, 4, false, true, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor2<4, 32, 1, 4, false, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), hidSwitches.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, mpSizeX, mpStart, mpStride, mpOutputsX, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(conv_img_acts_manycolor2<4, 32, 1, 2, false, true, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor2<4, 32, 1, 2, false, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), hidSwitches.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, mpSizeX, mpStart, mpStride, mpOutputsX, scaleTargets, scaleOutput);
}
} else {
if (numFilterColors % 16 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor2<4, 32, 1, 4, false, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor2<4, 32, 1, 4, false, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), hidSwitches.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, mpSizeX, mpStart, mpStride, mpOutputsX, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(conv_img_acts_manycolor2<4, 32, 1, 2, false, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor2<4, 32, 1, 2, false, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), hidSwitches.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, mpSizeX, mpStart, mpStride, mpOutputsX, scaleTargets, scaleOutput);
}
}
}
} else if (numFilterColors > 3) {
if (imgsPerThread == 8) {
if (checkCaseBounds) {
if (colorsPerThread == 4) {
hipFuncSetCacheConfig(img_acts_mediumcolor2<8, 4, false, true, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor2<8, 4, false, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), hidSwitches.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, mpSizeX, mpStart, mpStride, mpOutputsX, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(img_acts_mediumcolor2<8, 2, false, true, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor2<8, 2, false, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), hidSwitches.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, mpSizeX, mpStart, mpStride, mpOutputsX, scaleTargets, scaleOutput);
}
} else {
if (colorsPerThread == 4) {
hipFuncSetCacheConfig(img_acts_mediumcolor2<8, 4, false, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor2<8, 4, false, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), hidSwitches.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, mpSizeX, mpStart, mpStride, mpOutputsX, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(img_acts_mediumcolor2<8, 2, false, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor2<8, 2, false, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), hidSwitches.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, mpSizeX, mpStart, mpStride, mpOutputsX, scaleTargets, scaleOutput);
}
}
} else if (imgsPerThread == 4) {
if (checkCaseBounds) {
if (colorsPerThread == 4) {
hipFuncSetCacheConfig(img_acts_mediumcolor2<4, 4, false, true, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor2<4, 4, false, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), hidSwitches.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, mpSizeX, mpStart, mpStride, mpOutputsX, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(img_acts_mediumcolor2<4, 2, false, true, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor2<4, 2, false, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), hidSwitches.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, mpSizeX, mpStart, mpStride, mpOutputsX, scaleTargets, scaleOutput);
}
} else {
if (colorsPerThread == 4) {
hipFuncSetCacheConfig(img_acts_mediumcolor2<4, 4, false, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor2<4, 4, false, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), hidSwitches.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, mpSizeX, mpStart, mpStride, mpOutputsX, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(img_acts_mediumcolor2<4, 2, false, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor2<4, 2, false, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), hidSwitches.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, mpSizeX, mpStart, mpStride, mpOutputsX, scaleTargets, scaleOutput);
}
}
} else {
if (checkCaseBounds) {
if (colorsPerThread == 4) {
hipFuncSetCacheConfig(img_acts_mediumcolor2<2, 4, false, true, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor2<2, 4, false, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), hidSwitches.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, mpSizeX, mpStart, mpStride, mpOutputsX, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(img_acts_mediumcolor2<2, 2, false, true, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor2<2, 2, false, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), hidSwitches.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, mpSizeX, mpStart, mpStride, mpOutputsX, scaleTargets, scaleOutput);
}
} else {
if (colorsPerThread == 4) {
hipFuncSetCacheConfig(img_acts_mediumcolor2<2, 4, false, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor2<2, 4, false, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), hidSwitches.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, mpSizeX, mpStart, mpStride, mpOutputsX, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(img_acts_mediumcolor2<2, 2, false, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor2<2, 2, false, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), hidSwitches.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, mpSizeX, mpStart, mpStride, mpOutputsX, scaleTargets, scaleOutput);
}
}
}
} else {
if (imgsPerThread == 8) {
if (checkCaseBounds) {
if (numFilterColors == 1) {
hipFuncSetCacheConfig(img_acts_color2<8, 1, false, true, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color2<8, 1, false, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), hidSwitches.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, mpSizeX, mpStart, mpStride, mpOutputsX, scaleTargets, scaleOutput);
} else if (numFilterColors == 2) {
hipFuncSetCacheConfig(img_acts_color2<8, 2, false, true, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color2<8, 2, false, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), hidSwitches.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, mpSizeX, mpStart, mpStride, mpOutputsX, scaleTargets, scaleOutput);
} else if (numFilterColors == 3) {
hipFuncSetCacheConfig(img_acts_color2<8, 3, false, true, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color2<8, 3, false, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), hidSwitches.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, mpSizeX, mpStart, mpStride, mpOutputsX, scaleTargets, scaleOutput);
}
} else {
if (numFilterColors == 1) {
hipFuncSetCacheConfig(img_acts_color2<8, 1, false, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color2<8, 1, false, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), hidSwitches.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, mpSizeX, mpStart, mpStride, mpOutputsX, scaleTargets, scaleOutput);
} else if (numFilterColors == 2) {
hipFuncSetCacheConfig(img_acts_color2<8, 2, false, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color2<8, 2, false, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), hidSwitches.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, mpSizeX, mpStart, mpStride, mpOutputsX, scaleTargets, scaleOutput);
} else if (numFilterColors == 3) {
hipFuncSetCacheConfig(img_acts_color2<8, 3, false, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color2<8, 3, false, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), hidSwitches.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, mpSizeX, mpStart, mpStride, mpOutputsX, scaleTargets, scaleOutput);
}
}
} else if (imgsPerThread == 4) {
if (checkCaseBounds) {
if (numFilterColors == 1) {
hipFuncSetCacheConfig(img_acts_color2<4, 1, false, true, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color2<4, 1, false, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), hidSwitches.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, mpSizeX, mpStart, mpStride, mpOutputsX, scaleTargets, scaleOutput);
} else if (numFilterColors == 2) {
hipFuncSetCacheConfig(img_acts_color2<4, 2, false, true, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color2<4, 2, false, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), hidSwitches.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, mpSizeX, mpStart, mpStride, mpOutputsX, scaleTargets, scaleOutput);
} else if (numFilterColors == 3) {
hipFuncSetCacheConfig(img_acts_color2<4, 3, false, true, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color2<4, 3, false, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), hidSwitches.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, mpSizeX, mpStart, mpStride, mpOutputsX, scaleTargets, scaleOutput);
}
} else {
if (numFilterColors == 1) {
hipFuncSetCacheConfig(img_acts_color2<4, 1, false, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color2<4, 1, false, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), hidSwitches.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, mpSizeX, mpStart, mpStride, mpOutputsX, scaleTargets, scaleOutput);
} else if (numFilterColors == 2) {
hipFuncSetCacheConfig(img_acts_color2<4, 2, false, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color2<4, 2, false, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), hidSwitches.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, mpSizeX, mpStart, mpStride, mpOutputsX, scaleTargets, scaleOutput);
} else if (numFilterColors == 3) {
hipFuncSetCacheConfig(img_acts_color2<4, 3, false, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color2<4, 3, false, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), hidSwitches.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, mpSizeX, mpStart, mpStride, mpOutputsX, scaleTargets, scaleOutput);
}
}
} else {
if (checkCaseBounds) {
if (numFilterColors == 1) {
hipFuncSetCacheConfig(img_acts_color2<2, 1, false, true, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color2<2, 1, false, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), hidSwitches.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, mpSizeX, mpStart, mpStride, mpOutputsX, scaleTargets, scaleOutput);
} else if (numFilterColors == 2) {
hipFuncSetCacheConfig(img_acts_color2<2, 2, false, true, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color2<2, 2, false, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), hidSwitches.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, mpSizeX, mpStart, mpStride, mpOutputsX, scaleTargets, scaleOutput);
} else if (numFilterColors == 3) {
hipFuncSetCacheConfig(img_acts_color2<2, 3, false, true, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color2<2, 3, false, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), hidSwitches.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, mpSizeX, mpStart, mpStride, mpOutputsX, scaleTargets, scaleOutput);
}
} else {
if (numFilterColors == 1) {
hipFuncSetCacheConfig(img_acts_color2<2, 1, false, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color2<2, 1, false, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), hidSwitches.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, mpSizeX, mpStart, mpStride, mpOutputsX, scaleTargets, scaleOutput);
} else if (numFilterColors == 2) {
hipFuncSetCacheConfig(img_acts_color2<2, 2, false, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color2<2, 2, false, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), hidSwitches.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, mpSizeX, mpStart, mpStride, mpOutputsX, scaleTargets, scaleOutput);
} else if (numFilterColors == 3) {
hipFuncSetCacheConfig(img_acts_color2<2, 3, false, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color2<2, 3, false, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), hidSwitches.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, mpSizeX, mpStart, mpStride, mpOutputsX, scaleTargets, scaleOutput);
}
}
}
}
} else { // do scale
if (numFilterColors % 8 == 0) {
if (imgsPerThread == 4) {
if (checkCaseBounds) {
if (numFilterColors % 16 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor2<4, 32, 4, 4, true, true, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor2<4, 32, 4, 4, true, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), hidSwitches.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, mpSizeX, mpStart, mpStride, mpOutputsX, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(conv_img_acts_manycolor2<4, 32, 4, 2, true, true, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor2<4, 32, 4, 2, true, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), hidSwitches.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, mpSizeX, mpStart, mpStride, mpOutputsX, scaleTargets, scaleOutput);
}
} else {
if (numFilterColors % 16 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor2<4, 32, 4, 4, true, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor2<4, 32, 4, 4, true, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), hidSwitches.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, mpSizeX, mpStart, mpStride, mpOutputsX, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(conv_img_acts_manycolor2<4, 32, 4, 2, true, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor2<4, 32, 4, 2, true, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), hidSwitches.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, mpSizeX, mpStart, mpStride, mpOutputsX, scaleTargets, scaleOutput);
}
}
} else if (imgsPerThread == 2) {
if (checkCaseBounds) {
if (numFilterColors % 16 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor2<4, 32, 2, 4, true, true, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor2<4, 32, 2, 4, true, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), hidSwitches.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, mpSizeX, mpStart, mpStride, mpOutputsX, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(conv_img_acts_manycolor2<4, 32, 2, 2, true, true, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor2<4, 32, 2, 2, true, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), hidSwitches.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, mpSizeX, mpStart, mpStride, mpOutputsX, scaleTargets, scaleOutput);
}
} else {
if (numFilterColors % 16 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor2<4, 32, 2, 4, true, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor2<4, 32, 2, 4, true, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), hidSwitches.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, mpSizeX, mpStart, mpStride, mpOutputsX, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(conv_img_acts_manycolor2<4, 32, 2, 2, true, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor2<4, 32, 2, 2, true, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), hidSwitches.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, mpSizeX, mpStart, mpStride, mpOutputsX, scaleTargets, scaleOutput);
}
}
} else {
if (checkCaseBounds) {
if (numFilterColors % 16 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor2<4, 32, 1, 4, true, true, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor2<4, 32, 1, 4, true, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), hidSwitches.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, mpSizeX, mpStart, mpStride, mpOutputsX, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(conv_img_acts_manycolor2<4, 32, 1, 2, true, true, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor2<4, 32, 1, 2, true, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), hidSwitches.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, mpSizeX, mpStart, mpStride, mpOutputsX, scaleTargets, scaleOutput);
}
} else {
if (numFilterColors % 16 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor2<4, 32, 1, 4, true, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor2<4, 32, 1, 4, true, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), hidSwitches.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, mpSizeX, mpStart, mpStride, mpOutputsX, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(conv_img_acts_manycolor2<4, 32, 1, 2, true, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor2<4, 32, 1, 2, true, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), hidSwitches.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, mpSizeX, mpStart, mpStride, mpOutputsX, scaleTargets, scaleOutput);
}
}
}
} else if (numFilterColors > 3) {
if (imgsPerThread == 8) {
if (checkCaseBounds) {
if (colorsPerThread == 4) {
hipFuncSetCacheConfig(img_acts_mediumcolor2<8, 4, true, true, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor2<8, 4, true, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), hidSwitches.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, mpSizeX, mpStart, mpStride, mpOutputsX, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(img_acts_mediumcolor2<8, 2, true, true, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor2<8, 2, true, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), hidSwitches.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, mpSizeX, mpStart, mpStride, mpOutputsX, scaleTargets, scaleOutput);
}
} else {
if (colorsPerThread == 4) {
hipFuncSetCacheConfig(img_acts_mediumcolor2<8, 4, true, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor2<8, 4, true, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), hidSwitches.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, mpSizeX, mpStart, mpStride, mpOutputsX, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(img_acts_mediumcolor2<8, 2, true, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor2<8, 2, true, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), hidSwitches.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, mpSizeX, mpStart, mpStride, mpOutputsX, scaleTargets, scaleOutput);
}
}
} else if (imgsPerThread == 4) {
if (checkCaseBounds) {
if (colorsPerThread == 4) {
hipFuncSetCacheConfig(img_acts_mediumcolor2<4, 4, true, true, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor2<4, 4, true, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), hidSwitches.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, mpSizeX, mpStart, mpStride, mpOutputsX, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(img_acts_mediumcolor2<4, 2, true, true, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor2<4, 2, true, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), hidSwitches.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, mpSizeX, mpStart, mpStride, mpOutputsX, scaleTargets, scaleOutput);
}
} else {
if (colorsPerThread == 4) {
hipFuncSetCacheConfig(img_acts_mediumcolor2<4, 4, true, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor2<4, 4, true, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), hidSwitches.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, mpSizeX, mpStart, mpStride, mpOutputsX, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(img_acts_mediumcolor2<4, 2, true, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor2<4, 2, true, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), hidSwitches.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, mpSizeX, mpStart, mpStride, mpOutputsX, scaleTargets, scaleOutput);
}
}
} else {
if (checkCaseBounds) {
if (colorsPerThread == 4) {
hipFuncSetCacheConfig(img_acts_mediumcolor2<2, 4, true, true, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor2<2, 4, true, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), hidSwitches.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, mpSizeX, mpStart, mpStride, mpOutputsX, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(img_acts_mediumcolor2<2, 2, true, true, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor2<2, 2, true, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), hidSwitches.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, mpSizeX, mpStart, mpStride, mpOutputsX, scaleTargets, scaleOutput);
}
} else {
if (colorsPerThread == 4) {
hipFuncSetCacheConfig(img_acts_mediumcolor2<2, 4, true, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor2<2, 4, true, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), hidSwitches.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, mpSizeX, mpStart, mpStride, mpOutputsX, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(img_acts_mediumcolor2<2, 2, true, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor2<2, 2, true, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), hidSwitches.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, mpSizeX, mpStart, mpStride, mpOutputsX, scaleTargets, scaleOutput);
}
}
}
} else {
if (imgsPerThread == 8) {
if (checkCaseBounds) {
if (numFilterColors == 1) {
hipFuncSetCacheConfig(img_acts_color2<8, 1, true, true, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color2<8, 1, true, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), hidSwitches.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, mpSizeX, mpStart, mpStride, mpOutputsX, scaleTargets, scaleOutput);
} else if (numFilterColors == 2) {
hipFuncSetCacheConfig(img_acts_color2<8, 2, true, true, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color2<8, 2, true, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), hidSwitches.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, mpSizeX, mpStart, mpStride, mpOutputsX, scaleTargets, scaleOutput);
} else if (numFilterColors == 3) {
hipFuncSetCacheConfig(img_acts_color2<8, 3, true, true, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color2<8, 3, true, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), hidSwitches.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, mpSizeX, mpStart, mpStride, mpOutputsX, scaleTargets, scaleOutput);
}
} else {
if (numFilterColors == 1) {
hipFuncSetCacheConfig(img_acts_color2<8, 1, true, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color2<8, 1, true, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), hidSwitches.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, mpSizeX, mpStart, mpStride, mpOutputsX, scaleTargets, scaleOutput);
} else if (numFilterColors == 2) {
hipFuncSetCacheConfig(img_acts_color2<8, 2, true, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color2<8, 2, true, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), hidSwitches.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, mpSizeX, mpStart, mpStride, mpOutputsX, scaleTargets, scaleOutput);
} else if (numFilterColors == 3) {
hipFuncSetCacheConfig(img_acts_color2<8, 3, true, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color2<8, 3, true, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), hidSwitches.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, mpSizeX, mpStart, mpStride, mpOutputsX, scaleTargets, scaleOutput);
}
}
} else if (imgsPerThread == 4) {
if (checkCaseBounds) {
if (numFilterColors == 1) {
hipFuncSetCacheConfig(img_acts_color2<4, 1, true, true, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color2<4, 1, true, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), hidSwitches.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, mpSizeX, mpStart, mpStride, mpOutputsX, scaleTargets, scaleOutput);
} else if (numFilterColors == 2) {
hipFuncSetCacheConfig(img_acts_color2<4, 2, true, true, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color2<4, 2, true, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), hidSwitches.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, mpSizeX, mpStart, mpStride, mpOutputsX, scaleTargets, scaleOutput);
} else if (numFilterColors == 3) {
hipFuncSetCacheConfig(img_acts_color2<4, 3, true, true, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color2<4, 3, true, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), hidSwitches.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, mpSizeX, mpStart, mpStride, mpOutputsX, scaleTargets, scaleOutput);
}
} else {
if (numFilterColors == 1) {
hipFuncSetCacheConfig(img_acts_color2<4, 1, true, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color2<4, 1, true, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), hidSwitches.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, mpSizeX, mpStart, mpStride, mpOutputsX, scaleTargets, scaleOutput);
} else if (numFilterColors == 2) {
hipFuncSetCacheConfig(img_acts_color2<4, 2, true, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color2<4, 2, true, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), hidSwitches.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, mpSizeX, mpStart, mpStride, mpOutputsX, scaleTargets, scaleOutput);
} else if (numFilterColors == 3) {
hipFuncSetCacheConfig(img_acts_color2<4, 3, true, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color2<4, 3, true, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), hidSwitches.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, mpSizeX, mpStart, mpStride, mpOutputsX, scaleTargets, scaleOutput);
}
}
} else {
if (checkCaseBounds) {
if (numFilterColors == 1) {
hipFuncSetCacheConfig(img_acts_color2<2, 1, true, true, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color2<2, 1, true, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), hidSwitches.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, mpSizeX, mpStart, mpStride, mpOutputsX, scaleTargets, scaleOutput);
} else if (numFilterColors == 2) {
hipFuncSetCacheConfig(img_acts_color2<2, 2, true, true, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color2<2, 2, true, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), hidSwitches.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, mpSizeX, mpStart, mpStride, mpOutputsX, scaleTargets, scaleOutput);
} else if (numFilterColors == 3) {
hipFuncSetCacheConfig(img_acts_color2<2, 3, true, true, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color2<2, 3, true, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), hidSwitches.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, mpSizeX, mpStart, mpStride, mpOutputsX, scaleTargets, scaleOutput);
}
} else {
if (numFilterColors == 1) {
hipFuncSetCacheConfig(img_acts_color2<2, 1, true, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color2<2, 1, true, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), hidSwitches.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, mpSizeX, mpStart, mpStride, mpOutputsX, scaleTargets, scaleOutput);
} else if (numFilterColors == 2) {
hipFuncSetCacheConfig(img_acts_color2<2, 2, true, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color2<2, 2, true, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), hidSwitches.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, mpSizeX, mpStart, mpStride, mpOutputsX, scaleTargets, scaleOutput);
} else if (numFilterColors == 3) {
hipFuncSetCacheConfig(img_acts_color2<2, 3, true, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color2<2, 3, true, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), hidSwitches.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, mpSizeX, mpStart, mpStride, mpOutputsX, scaleTargets, scaleOutput);
}
}
}
}
}
}
getLastCudaError("imgActs: kernel execution failed");
}
void convImgActs2(NVMatrix& hidActs, NVMatrix& hidSwitches, NVMatrix& filters, NVMatrix& targets,
int imgSizeY, int imgSizeX, int numModulesY, int paddingStart, int moduleStride, int numImgColors, int numGroups,
int mpSizeX, int mpStart, int mpStride, int mpOutputsX) {
_imgActs2(hidActs, hidSwitches, filters, targets, imgSizeY, imgSizeX, numModulesY, paddingStart, moduleStride, numImgColors, numGroups, mpSizeX, mpStart, mpStride, mpOutputsX, 0, 1, true);
}
void convImgActs2(NVMatrix& hidActs, NVMatrix& hidSwitches, NVMatrix& filters, NVMatrix& targets,
int imgSizeY, int imgSizeX, int numModulesY, int paddingStart, int moduleStride, int numImgColors, int numGroups,
int mpSizeX, int mpStart, int mpStride, int mpOutputsX,
float scaleTargets, float scaleOutput) {
_imgActs2(hidActs, hidSwitches, filters, targets, imgSizeY, imgSizeX, numModulesY, paddingStart, moduleStride, numImgColors, numGroups, mpSizeX, mpStart, mpStride, mpOutputsX,scaleTargets, scaleOutput, true);
} | b4190516393258fe79b2911762e3c5b7d95a19ca.cu | #include <cudaconv2_extra.cuh>
/*
* Block size: 16x16.
* blockIdx.x determines case in batches of 16*imgsPerThread.
* blockIdx.y determines 4x4 image region in target image.
*
* threadIdx.x determines case.
* threadIdx.y determines pixel.
*
* hidActs: (numFilters, numModulesY, numModulesX, numImages)
* filters: (numColors, filterPixels, numFilters) if conv
* (numModulesY, numModulesX, numColors, filterPixels, numFilters) otherwise
* targets: (numColors, imgSizeY, imgSizeX, numImages)
*
* Each block reconstructs one 4x4 pixels from 16*imgsPerThread cases.
*
* Number of filters must be divisible by 16.
* Number of images must be divisible by 16*imgsPerThread if checkCaseBounds is false.
* 16 * imgsPerThread must be divisible by 32.
*
* This version loads 32 cases at a time, so it gets full coalescing on that load.
* It only loads 16 weights at a time, so those aren't fully coalesced.
* This version conserves shared memory by loading 16 filters at a time rather than 32.
*/
template <int imgsPerThread, int numColors, bool scale, bool checkCaseBounds, bool conv>
__global__ void img_acts_color2(const float* hidActs, const float*_hidSwitches, const float* filters, float* targets,
const int numModulesY, const int numModulesX, const int numImages, const int numFilters,
const int filterSize, const int imgSizeY, const int imgSizeX,
const int paddingStart, const int moduleStride,
const int mpSizeX, const int mpStart, const int mpStride, const int mpOutputsX,
const float scaleTargets, const float scaleOutputs) {
__shared__ float shFilters[numColors*16][16 + 1];
__shared__ float shHidActs[16][16*imgsPerThread];
const int* hidSwitches = (const int *) _hidSwitches;
const int blockCaseIdx = blockIdx.x * 16*imgsPerThread;
const int numRegionsX = DIVUP(imgSizeX, 4);
const int blockRegionIdx = blockIdx.y;
const int blockRegionIdxX = blockRegionIdx % numRegionsX;
const int blockRegionIdxY = blockRegionIdx / numRegionsX;
const int blockRegionLeft = blockRegionIdxX * 4;
const int blockRegionTop = blockRegionIdxY * 4;
const int pxYInRegion = threadIdx.y / 4, pxXInRegion = threadIdx.y % 4;
const int pxY = blockRegionTop + pxYInRegion;
const int pxX = blockRegionLeft + pxXInRegion;
const int pxIdx = pxY * imgSizeX + pxX;
const bool isPxInImg = pxY < imgSizeY && pxX < imgSizeX;
const int numModules = numModulesY * numModulesX;
const int filterPixels = filterSize * filterSize;
const int imgPixels = imgSizeX * imgSizeY;
const int tidx = threadIdx.y * 16 + threadIdx.x;
const int loadY = tidx / 32, loadX = tidx % 32;
const int mp_numModules = mpOutputsX * mpOutputsX;
hidActs += blockCaseIdx + loadY * numImages * mp_numModules + loadX;
hidSwitches += blockCaseIdx + loadY * numImages * mp_numModules + loadX;
filters += threadIdx.x;
targets += pxIdx * numImages + blockCaseIdx + threadIdx.x;
float prod[numColors][imgsPerThread];
#pragma unroll
for (int c = 0; c < numColors; c++) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
prod[c][i] = 0;
}
}
const int startY = blockRegionTop - paddingStart < filterSize ? 0
: 1 + (blockRegionTop - paddingStart - filterSize) / moduleStride;
const int endY = MIN(numModulesY, 1 + (blockRegionTop + 3 - paddingStart) / moduleStride);
const int startX = blockRegionLeft - paddingStart < filterSize ? 0
: 1 + (blockRegionLeft - paddingStart - filterSize) / moduleStride;
const int endX = MIN(numModulesX, 1 + (blockRegionLeft + 3 - paddingStart) / moduleStride);
float* shilterLoad = &shFilters[threadIdx.y][threadIdx.x];
float* shHidActLoad = &shHidActs[loadY][loadX];
for (int my = startY; my < endY; my++) {
const int moduleTop = paddingStart + my * moduleStride;
const int pxInModuleY = pxY - moduleTop;
for (int mx = startX; mx < endX; mx++) {
const int moduleIdx = my * numModulesX + mx;
const int moduleLeft = paddingStart + mx * moduleStride;
const int pxInModuleX = pxX - moduleLeft;
const bool isPxInModule = pxInModuleY >= 0 && pxInModuleY < filterSize && pxInModuleX >= 0 && pxInModuleX < filterSize;
const int pxIdxInModule = pxInModuleY * filterSize + pxInModuleX;
const int convTargetY = moduleIdx / numModulesX;
const int convTargetX = moduleIdx % numModulesX;
const int loopStartY = MAX(0, DIVUP(convTargetY - mpStart - (mpSizeX-1), mpStride));
const int loopStartX = MAX(0, DIVUP(convTargetX - mpStart - (mpSizeX-1), mpStride));
const int loopEndY = MIN(mpOutputsX-1, (convTargetY - mpStart) / mpStride);
const int loopEndX = MIN(mpOutputsX-1, (convTargetX - mpStart) / mpStride);
for (int f = 0; f < numFilters; f += 16) { // multiply with 16 filters at a time
// Now the threads split up into half-warps, and each half-warp decides if it's interested.
// const float* hLoad = &hidActs[(moduleIdx + f * numModules) * numImages];
// to load the moduleIdx + f module
#pragma unroll
for (int i = 0; i < imgsPerThread * 16; i += 32) {
if (!checkCaseBounds || blockCaseIdx + i + loadX < numImages) {
#pragma unroll
for (int j = 0; j < 16; j += 8) { // load 16 rows of imgsPerThread*16 cols, 8 * 32 elements at a time.
shHidActLoad[j * 16 * imgsPerThread + i] = 0;
#pragma unroll
for (int y = loopStartY; y <= loopEndY; y++) {
#pragma unroll
for (int x = loopStartX; x <= loopEndX; x++) {
const int mpOutIdx = (y * mpOutputsX + x) * numImages + i + (f+j) * numImages * mp_numModules;
shHidActLoad[j * 16 * imgsPerThread + i] += hidSwitches[mpOutIdx] == moduleIdx? hidActs[mpOutIdx] : 0;
//hLoad[j * numModules * numImages + i];
}
}
}
} else {
#pragma unroll
for (int j = 0; j < 16; j += 8) { // load 16 rows of imgsPerThread*16 cols, 8 * 32 elements at a time.
shHidActLoad[j * 16 * imgsPerThread + i] = 0;
}
}
}
if (isPxInImg && isPxInModule) {
// This half-warp is interested, so it's going to load the weights from this module to its pixel.
// Not fully coalesced read :(
// But taking out this read entirely only reduces the runtime by ~2.8%, so it isn't costing me much.
const float* fLoad = conv ? &filters[pxIdxInModule * numFilters + f]
: &filters[(moduleIdx * numColors * filterPixels + pxIdxInModule) * numFilters + f];
#pragma unroll
for (int c = 0; c < numColors; c++) {
shilterLoad[c * 16 * (16 + 1)] = fLoad[c * filterPixels * numFilters];
}
}
__syncthreads();
// Do some actual computation
if (isPxInImg && isPxInModule) {
#pragma unroll
for (int c = 0; c < numColors; c++) {
#pragma unroll
for (int w = 0; w < 16; w++) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
prod[c][i] += shFilters[threadIdx.y + c * 16][w] * shHidActs[w][threadIdx.x + i * 16];
}
}
}
}
__syncthreads();
}
}
}
// Not fully coalesced write :(... shmem (and fully coalesced) version is actually slightly slower, though
if (isPxInImg) {
if (scale) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || blockCaseIdx + threadIdx.x + i * 16 < numImages) {
#pragma unroll
for (int c = 0; c < numColors; c++) {
targets[c * imgPixels * numImages + i * 16] = scaleTargets * targets[c * imgPixels * numImages + i * 16] + scaleOutputs * prod[c][i];
}
}
}
} else {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || blockCaseIdx + threadIdx.x + i * 16 < numImages) {
#pragma unroll
for (int c = 0; c < numColors; c++) {
targets[c * imgPixels * numImages + i * 16] = scaleOutputs * prod[c][i];
}
}
}
}
}
}
/*
* Block size: 16x16.
* blockIdx.x determines case in batches of 16*imgsPerThread, also color in batches of colorsPerThread.
* In essence, blockIdx.x.x = 1..numImages/(16*imgsPerThread)
* blockIdx.x.y = 1..numImgColors/colorsPerThread
* blockIdx.y determines 4x4 image region in target image.
*
* threadIdx.x determines case.
* threadIdx.y determines pixel.
*
* hidActs: (numFilters, numModulesY, numModulesX, numImages)
* filters: (numFilterColors, filterPixels, numFilters) if conv
* (numModulesY, numModulesX, numFilterColors, filterPixels, numFilters) otherwise
* targets: (numImageColors, imgSizeY, imgSizeX, numImages)
*
* Each block reconstructs one 4x4 pixels from 16*imgsPerThread cases.
*
* numImages must be divisible by 16*imgsPerThread if checkCaseBounds is false.
* 16 * imgsPerThread must be divisible by 32.
* numImageColors/numGroups must be divisible by colorsPerThread.
*
* This version loads 32 cases at a time, so it gets full coalescing on that load.
* It only loads 16 weights at a time, so those aren't fully coalesced.
* This version conserves shared memory by loading 16 filters at a time rather than 32.
*
* To be used when there are 4-16 color channels.
*/
template <int imgsPerThread, int colorsPerThread, bool scale, bool checkCaseBounds, bool conv>
__global__ void img_acts_mediumcolor2(const float* hidActs, const float* _hidSwitches, const float* filters, float* targets,
const int numModulesY, const int numModulesX, const int numImages, const int numFilters,
const int filterSize, const int imgSizeY, const int imgSizeX, const int paddingStart,
const int moduleStride, const int numImgColors, const int numGroups,
const int mpSizeX, const int mpStart, const int mpStride, const int mpOutputsX,
const float scaleTargets, const float scaleOutputs) {
__shared__ float shFilters[colorsPerThread*16][16 + 1];
__shared__ float shHidActs[16][16*imgsPerThread];
const int* hidSwitches = (const int*) _hidSwitches;
const int numImgBlocks = DIVUP(numImages,16*imgsPerThread);
const int blockCaseIdx = (blockIdx.x % numImgBlocks) * 16*imgsPerThread;
const int imgColorIdx = (blockIdx.x / numImgBlocks) * colorsPerThread; // color idx globally
const int numFilterColors = numImgColors / numGroups;
const int blockGroupIdx = imgColorIdx / numFilterColors;
const int filterColorIdx = imgColorIdx % numFilterColors; // color idx within group
const int numFiltersPerGroup = numFilters / numGroups;
const int blockFilterIdx = blockGroupIdx * numFiltersPerGroup;
const int numRegionsX = DIVUP(imgSizeX, 4);
const int blockRegionIdx = blockIdx.y;
const int blockRegionIdxX = blockRegionIdx % numRegionsX;
const int blockRegionIdxY = blockRegionIdx / numRegionsX;
const int blockRegionLeft = blockRegionIdxX * 4;
const int blockRegionTop = blockRegionIdxY * 4;
const int pxYInRegion = threadIdx.y / 4, pxXInRegion = threadIdx.y % 4;
const int pxY = blockRegionTop + pxYInRegion;
const int pxX = blockRegionLeft + pxXInRegion;
const int pxIdx = pxY * imgSizeX + pxX;
const bool isPxInImg = pxY < imgSizeY && pxX < imgSizeX;
const uint numModules = numModulesY * numModulesX;
const int filterPixels = filterSize * filterSize;
const int imgPixels = imgSizeY * imgSizeX;
const int tidx = threadIdx.y * 16 + threadIdx.x;
const int loadY = tidx / 32, loadX = tidx % 32;
const int mp_numModules = mpOutputsX * mpOutputsX;
hidActs += blockCaseIdx + (blockFilterIdx + loadY) * numImages * mp_numModules + loadX;
hidSwitches += blockCaseIdx + (blockFilterIdx + loadY) * numImages * mp_numModules + loadX;
filters += blockFilterIdx + filterColorIdx * filterPixels * numFilters + threadIdx.x;
targets += imgColorIdx * imgPixels * numImages + pxIdx * numImages + blockCaseIdx + threadIdx.x;
float prod[colorsPerThread][imgsPerThread];
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
prod[c][i] = 0;
}
}
const int startY = blockRegionTop - paddingStart < filterSize ? 0
: 1 + (blockRegionTop - paddingStart - filterSize) / moduleStride;
const int endY = MIN(numModulesY, 1 + (blockRegionTop + 3 - paddingStart) / moduleStride);
const int startX = blockRegionLeft - paddingStart < filterSize ? 0
: 1 + (blockRegionLeft - paddingStart - filterSize) / moduleStride;
const int endX = MIN(numModulesX, 1 + (blockRegionLeft + 3 - paddingStart) / moduleStride);
float* shFilterLoad = &shFilters[threadIdx.y][threadIdx.x];
float* shHidActLoad = &shHidActs[loadY][loadX];
for (int my = startY; my < endY; my++) {
const int moduleTop = paddingStart + my * moduleStride;
const int pxInModuleY = pxY - moduleTop;
for (int mx = startX; mx < endX; mx++) {
const int moduleIdx = my * numModulesX + mx;
const int moduleLeft = paddingStart + mx * moduleStride;
const int pxInModuleX = pxX - moduleLeft;
const bool isPxInModule = pxInModuleY >= 0 && pxInModuleY < filterSize && pxInModuleX >= 0 && pxInModuleX < filterSize;
const int pxIdxInModule = pxInModuleY * filterSize + pxInModuleX;
const int convTargetY = moduleIdx / numModulesX;
const int convTargetX = moduleIdx % numModulesX;
const int loopStartY = MAX(0, DIVUP(convTargetY - mpStart - (mpSizeX-1), mpStride));
const int loopStartX = MAX(0, DIVUP(convTargetX - mpStart - (mpSizeX-1), mpStride));
const int loopEndY = MIN(mpOutputsX-1, (convTargetY - mpStart) / mpStride);
const int loopEndX = MIN(mpOutputsX-1, (convTargetX - mpStart) / mpStride);
for (int f = 0; f < numFiltersPerGroup; f += 16) { // multipply with 16 filters at a time
// Now the threads split up into half-warps, and each half-warp decides if it's interested.
//const float* hLoad = &hidActs[(moduleIdx + f * numModules) * numImages];
#pragma unroll
for (int i = 0; i < imgsPerThread * 16; i += 32) {
if (!checkCaseBounds || blockCaseIdx + loadX + i < numImages) {
#pragma unroll
for (int j = 0; j < 16; j += 8) { // load 16 rows of imgsPerThread*16 cols, 8 * 32 elements at a time.
shHidActLoad[j * 16 * imgsPerThread + i] = 0;
#pragma unroll
for (int y = loopStartY; y <= loopEndY; y++) {
#pragma unroll
for (int x = loopStartX; x <= loopEndX; x++) {
const int mpOutIdx = (y * mpOutputsX + x) * numImages + i + (f+j) * numImages * mp_numModules;
shHidActLoad[j * 16 * imgsPerThread + i] += hidSwitches[mpOutIdx] == moduleIdx? hidActs[mpOutIdx] : 0;
}
}
// shHidActLoad[j * 16 * imgsPerThread + i] = hLoad[j * numModules * numImages + i];
}
} else {
#pragma unroll
for (int j = 0; j < 16; j += 8) { // load 16 rows of imgsPerThread*16 cols, 8 * 32 elements at a time.
shHidActLoad[j * 16 * imgsPerThread + i] = 0;
}
}
}
if (isPxInImg && isPxInModule) {
// This half-warp is interested, so it's going to load the weights from this module to its pixel.
// Not fully coalesced read :(
// But taking out this read entirely only reduces the runtime by ~2.8%, so it isn't costing me much.
const float* fLoad = conv ? &filters[pxIdxInModule * numFilters + f]
: &filters[moduleIdx * numFilterColors * filterPixels * numFilters + pxIdxInModule * numFilters + f];
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
shFilterLoad[c * 16 * (16 + 1)] = fLoad[c * filterPixels * numFilters];
}
}
__syncthreads();
// Do some actual computation
if (isPxInImg && isPxInModule) {
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
#pragma unroll
for (int w = 0; w < 16; w++) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
prod[c][i] += shFilters[threadIdx.y + c * 16][w] * shHidActs[w][threadIdx.x + i * 16];
}
}
}
}
__syncthreads();
}
}
}
// Not fully coalesced write :(... shmem (and fully coalesced) version is actually slightly slower, though
if (isPxInImg) {
if (scale) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || blockCaseIdx + threadIdx.x + i * 16 < numImages) {
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
targets[c * imgPixels * numImages + i * 16] = scaleTargets * targets[c * imgPixels * numImages + i * 16] + scaleOutputs * prod[c][i];
}
}
}
} else {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || blockCaseIdx + threadIdx.x + i * 16 < numImages) {
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
targets[c * imgPixels * numImages + i * 16] = scaleOutputs * prod[c][i];
}
}
}
}
}
}
/*
* Block size: B_YxB_X.
* blockIdx.x determines case in batches of B_X*imgsPerThread, also color in batches of B_Y*colorsPerThread.
* In essence, blockIdx.x.x = 1..numImages/(B_X*imgsPerThread)
* blockIdx.x.y = 1..numImgColors/(B_Y*colorsPerThread)
* blockIdx.y determines image pixel in target image.
*
* threadIdx.x determines case.
* threadIdx.y determines color.
*
* hidActs: (numFilters, numModulesY, numModulesX, numImages)
* filters: (numFilterColors, filterPixels, numFilters) if conv
* (numModulesY, numModulesX, numFilterColors, filterPixels, numFilters) otherwise
* targets: (numImageColors, imgSizeY, imgSizeX, numImages)
*
* Each block reconstructs one B_Y*colorsPerThread colors from 1 pixel from B_X*imgsPerThread cases.
*
* numImages must be divisible by B_X*imgsPerThread if checkCaseBounds is false.
* numFiltersPerGroup must be divisible by 16.
*
* B_X * imgsPerThread must be divisible by 32.
* numFilterColors must be divisible by B_Y*colorsPerThread.
* B_X*B_Y must be divisible by 32.
*
* This version loads 32 cases at a time, so it gets full coalescing on that load.
* It only loads 16 weights at a time, so those aren't fully coalesced.
* This version conserves shared memory by loading 16 filters at a time rather than 32.
*
* To be used when there are >= 16 color channels.
*/
template <int B_Y, int B_X, int imgsPerThread, int colorsPerThread, bool scale, bool checkCaseBounds, bool conv>
__global__ void conv_img_acts_manycolor2(const float* hidActs, const float* _hidSwitches, const float* filters, float* targets,
const int numModulesY, const int numModulesX, const int numImages, const int numFilters,
const int filterSize, const int imgSizeY, const int imgSizeX, const int paddingStart, const int moduleStride,
const int numImgColors, const int numGroups,
int mpSizeX, int mpStart, int mpStride, int mpOutputsX,
const float scaleTargets, const float scaleOutputs) {
const int *hidSwitches = (const int *) _hidSwitches;
__shared__ float shFilters[colorsPerThread*B_Y][16 + 1]; // TODO: perhaps reconsider this 16
__shared__ float shHidActs[16][B_X*imgsPerThread];
const int numImgBlocks = DIVUP(numImages,B_X*imgsPerThread);
const int blockCaseIdx = (blockIdx.x % numImgBlocks) * B_X*imgsPerThread;
const int imgColorIdx = (blockIdx.x / numImgBlocks) * B_Y*colorsPerThread; // color idx globally
const int numFilterColors = numImgColors / numGroups;
const int blockGroupIdx = imgColorIdx / numFilterColors;
const int filterColorIdx = imgColorIdx % numFilterColors; // color idx within group
const int numFiltersPerGroup = numFilters / numGroups;
const int blockFilterIdx = blockGroupIdx * numFiltersPerGroup;
const int blockPixelIdx = blockIdx.y;
const int blockPixelIdxX = blockPixelIdx % imgSizeX;
const int blockPixelIdxY = blockPixelIdx / imgSizeX;
const int filterPixels = filterSize * filterSize;
const int imgPixels = imgSizeY * imgSizeX;
const int tidx = threadIdx.y * B_X + threadIdx.x;
const int hidActLoadY = tidx / 32, hidActLoadX = tidx % 32;
const int filtersLoadY = tidx / 16, filtersLoadX = tidx % 16;
// const int numModules = numModulesY * numModulesX;
const int mp_numModules = mpOutputsX * mpOutputsX;
hidActs += blockCaseIdx + (blockFilterIdx + hidActLoadY) * numImages * mp_numModules + hidActLoadX;
hidSwitches += blockCaseIdx + (blockFilterIdx + hidActLoadY) * numImages * mp_numModules + hidActLoadX;
// hidActs += blockCaseIdx + (blockFilterIdx + hidActLoadY) * numImages * numModules + hidActLoadX;
filters += blockFilterIdx + (filterColorIdx + filtersLoadY) * filterPixels * numFilters + filtersLoadX;
targets += (imgColorIdx + threadIdx.y) * imgPixels * numImages + blockPixelIdx * numImages + blockCaseIdx + threadIdx.x;
float prod[colorsPerThread][imgsPerThread];
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
prod[c][i] = 0;
}
}
const int startY = blockPixelIdxY - paddingStart < filterSize ? 0
: 1 + (blockPixelIdxY - paddingStart - filterSize) / moduleStride;
const int endY = MIN(numModulesY, 1 + (blockPixelIdxY - paddingStart) / moduleStride);
const int startX = blockPixelIdxX - paddingStart < filterSize ? 0
: 1 + (blockPixelIdxX - paddingStart - filterSize) / moduleStride;
const int endX = MIN(numModulesX, 1 + (blockPixelIdxX - paddingStart) / moduleStride);
float* shFilterLoad = &shFilters[filtersLoadY][filtersLoadX];
float* shHidActLoad = &shHidActs[hidActLoadY][hidActLoadX];
for (int my = startY; my < endY; my++) {
const int moduleTop = paddingStart + my * moduleStride;
const int pxInFilterY = blockPixelIdxY - moduleTop;
for (int mx = startX; mx < endX; mx++) {
const int moduleIdx = my * numModulesX + mx;
const int moduleLeft = paddingStart + mx * moduleStride;
const int pxInFilterX = blockPixelIdxX - moduleLeft;
const int pxIdxInFilter = pxInFilterY * filterSize + pxInFilterX;
const int convTargetY = moduleIdx / numModulesX;
const int convTargetX = moduleIdx % numModulesX;
const int loopStartY = MAX(0, DIVUP(convTargetY - mpStart - (mpSizeX-1), mpStride));
const int loopStartX = MAX(0, DIVUP(convTargetX - mpStart - (mpSizeX-1), mpStride));
const int loopEndY = MIN(mpOutputsX-1, (convTargetY - mpStart) / mpStride);
const int loopEndX = MIN(mpOutputsX-1, (convTargetX - mpStart) / mpStride);
for (int f = 0; f < numFiltersPerGroup; f += 16) { // multiply with 16 filters at a time
// const float* hLoad = &hidActs[(moduleIdx + f * numModules) * numImages];
#pragma unroll
for (int i = 0; i < imgsPerThread * B_X; i += 32) {
if (!checkCaseBounds || blockCaseIdx + hidActLoadX + i < numImages) {
#pragma unroll
for (int j = 0; j < 16; j += B_X*B_Y/32) { // load 16 rows of imgsPerThread*16 cols, 8 * 32 elements at a time.
shHidActLoad[j * B_X * imgsPerThread + i] = 0;
#pragma unroll
for (int y = loopStartY; y <= loopEndY; y++) {
#pragma unroll
for (int x = loopStartX; x <= loopEndX; x++) {
const int mpOutIdx = (y * mpOutputsX + x) * numImages + i + (f+j) * numImages * mp_numModules;
shHidActLoad[j * B_X * imgsPerThread + i] += hidSwitches[mpOutIdx] == moduleIdx? hidActs[mpOutIdx] : 0;
//shHidActLoad[j * B_X * imgsPerThread + i] += hidSwitches[mpOutIdx];
}
}
// shHidActLoad[j * B_X * imgsPerThread + i] = hLoad[j * numModules * numImages + i];
}
} else {
#pragma unroll
for (int j = 0; j < 16; j += B_X*B_Y/32) { // load 16 rows of imgsPerThread*16 cols, 8 * 32 elements at a time.
shHidActLoad[j * B_X * imgsPerThread + i] = 0;
}
}
}
const float* fLoad = conv ? &filters[pxIdxInFilter * numFilters + f]
: &filters[moduleIdx * numFilterColors * filterPixels * numFilters + pxIdxInFilter * numFilters + f];
#pragma unroll
for (int i = 0; i < colorsPerThread*B_Y; i+= B_X*B_Y/16) {
if ((colorsPerThread*B_Y) % (B_X*B_Y/16) == 0 || i + filtersLoadY < colorsPerThread*B_Y) {
shFilterLoad[i * (16 + 1)] = fLoad[i * filterPixels * numFilters];
}
}
__syncthreads();
// Do some actual computation
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
#pragma unroll
for (int w = 0; w < 16; w++) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
prod[c][i] += shFilters[c * B_Y + threadIdx.y][w] * shHidActs[w][threadIdx.x + i * B_X];
}
}
}
__syncthreads();
}
}
}
if (scale) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || blockCaseIdx + threadIdx.x + i * B_X < numImages) {
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
targets[c * B_Y * imgPixels * numImages + i * B_X] = scaleTargets * targets[c * B_Y * imgPixels * numImages + i * B_X] + scaleOutputs * prod[c][i];
}
}
}
} else {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || blockCaseIdx + threadIdx.x + i * B_X < numImages) {
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
targets[c * B_Y * imgPixels * numImages + i * B_X] = scaleOutputs * prod[c][i];
}
}
}
}
}
void _imgActs2(NVMatrix& hidActs, NVMatrix& hidSwitches, NVMatrix& filters, NVMatrix& targets,
int imgSizeY, int imgSizeX, int numModulesY, int paddingStart, int moduleStride, int numImgColors, int numGroups,
int mpSizeX, int mpStart, int mpStride, int mpOutputsX,
float scaleTargets, float scaleOutput, bool conv) {
int numFilterColors = numImgColors / numGroups;
int numImages = hidActs.getNumCols();
int numFilters = filters.getNumCols();
int numModules = numModulesY * numModulesY;
int filterModuleMult = conv ? 1 : numModules;
int filterPixels = filters.getNumRows() / (filterModuleMult * numFilterColors);
int filterSize = sqrt(filterPixels);
int imgPixels = imgSizeY * imgSizeX;
int numModulesX = numModules / numModulesY;
int numOutputs = mpOutputsX * mpOutputsX;
assert(numImgColors % numGroups == 0);
assert(numFilters % (16*numGroups) == 0);
assert(numGroups > 1 || (numImgColors > 0 && (numImgColors <= 3 || numImgColors % 2 == 0)));
assert(numGroups == 1 || numFilterColors % 4 == 0);
assert(filterPixels == filterSize * filterSize);
assert(hidActs.getNumRows() == numOutputs * numFilters);
assert(filters.getNumRows() == filterModuleMult * numFilterColors * filterPixels);
assert(numModules == numModulesY * numModulesX);
assert(hidActs.isContiguous());
assert(filters.isContiguous());
assert(!hidActs.isTrans());
assert(!filters.isTrans());
assert(!targets.isTrans());
// These routines don't handle the case when only part of the image is visited in the convolution
assert(paddingStart <= 0);
assert(paddingStart + (numModulesX-1)*moduleStride + filterSize >= imgSizeX);
assert(paddingStart + (numModulesY-1)*moduleStride + filterSize >= imgSizeY);
assert(moduleStride <= filterSize);
assert(targets.isContiguous()); // no stride support here!
dim3 blocks;
dim3 threads(16,16);
int colorsPerThread;
int imgsPerThread = numImages % 128 == 0 ? 8 : numImages % 64 == 0 ? 4 : 2;
if (numFilterColors % 8 == 0) {
threads = dim3(32, 4);
colorsPerThread = numFilterColors % 16 == 0 ? 4 : 2;
imgsPerThread = numImages % 128 == 0 ? 4 : numImages % 64 == 0 ? 2 : 1;
assert(numFilterColors % (threads.y * colorsPerThread) == 0);
blocks = dim3(DIVUP(numImages, threads.x*imgsPerThread) * (numImgColors/(threads.y*colorsPerThread)), imgPixels);
} else if (numFilterColors > 3) {
colorsPerThread = numFilterColors % 4 == 0 ? 4 : 2;
blocks = dim3(DIVUP(numImages,threads.x*imgsPerThread) * (numImgColors / colorsPerThread), DIVUP(imgSizeY,4) * DIVUP(imgSizeX,4));
} else {
blocks = dim3(DIVUP(numImages,threads.x*imgsPerThread), DIVUP(imgSizeY,4) * DIVUP(imgSizeX,4));
}
bool checkCaseBounds = numImages % (threads.x * imgsPerThread) != 0;
if (scaleTargets == 0) { // do not scale or use targets matrix
targets.resize(numImgColors*imgPixels, numImages);
} else {
assert(targets.getNumRows() == numImgColors * imgPixels);
assert(targets.getNumCols() == numImages);
}
if (conv) { // convolutional units
if (scaleTargets == 0) { // do not scale or use targets matrix
if (numFilterColors % 8 == 0) {
if (imgsPerThread == 4) {
if (checkCaseBounds) {
if (numFilterColors % 16 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor2<4, 32, 4, 4, false, true, true>, cudaFuncCachePreferShared);
conv_img_acts_manycolor2<4, 32, 4, 4, false, true, true><<<blocks, threads>>>(hidActs.getDevData(), hidSwitches.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, mpSizeX, mpStart, mpStride, mpOutputsX, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(conv_img_acts_manycolor2<4, 32, 4, 2, false, true, true>, cudaFuncCachePreferShared);
conv_img_acts_manycolor2<4, 32, 4, 2, false, true, true><<<blocks, threads>>>(hidActs.getDevData(), hidSwitches.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, mpSizeX, mpStart, mpStride, mpOutputsX, scaleTargets, scaleOutput);
}
} else {
if (numFilterColors % 16 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor2<4, 32, 4, 4, false, false, true>, cudaFuncCachePreferShared);
conv_img_acts_manycolor2<4, 32, 4, 4, false, false, true><<<blocks, threads>>>(hidActs.getDevData(), hidSwitches.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, mpSizeX, mpStart, mpStride, mpOutputsX, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(conv_img_acts_manycolor2<4, 32, 4, 2, false, false, true>, cudaFuncCachePreferShared);
conv_img_acts_manycolor2<4, 32, 4, 2, false, false, true><<<blocks, threads>>>(hidActs.getDevData(), hidSwitches.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, mpSizeX, mpStart, mpStride, mpOutputsX, scaleTargets, scaleOutput);
}
}
} else if (imgsPerThread == 2) {
if (checkCaseBounds) {
if (numFilterColors % 16 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor2<4, 32, 2, 4, false, true, true>, cudaFuncCachePreferShared);
conv_img_acts_manycolor2<4, 32, 2, 4, false, true, true><<<blocks, threads>>>(hidActs.getDevData(), hidSwitches.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, mpSizeX, mpStart, mpStride, mpOutputsX, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(conv_img_acts_manycolor2<4, 32, 2, 2, false, true, true>, cudaFuncCachePreferShared);
conv_img_acts_manycolor2<4, 32, 2, 2, false, true, true><<<blocks, threads>>>(hidActs.getDevData(), hidSwitches.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, mpSizeX, mpStart, mpStride, mpOutputsX, scaleTargets, scaleOutput);
}
} else {
if (numFilterColors % 16 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor2<4, 32, 2, 4, false, false, true>, cudaFuncCachePreferShared);
conv_img_acts_manycolor2<4, 32, 2, 4, false, false, true><<<blocks, threads>>>(hidActs.getDevData(), hidSwitches.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, mpSizeX, mpStart, mpStride, mpOutputsX, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(conv_img_acts_manycolor2<4, 32, 2, 2, false, false, true>, cudaFuncCachePreferShared);
conv_img_acts_manycolor2<4, 32, 2, 2, false, false, true><<<blocks, threads>>>(hidActs.getDevData(), hidSwitches.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, mpSizeX, mpStart, mpStride, mpOutputsX, scaleTargets, scaleOutput);
}
}
} else {
if (checkCaseBounds) {
if (numFilterColors % 16 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor2<4, 32, 1, 4, false, true, true>, cudaFuncCachePreferShared);
conv_img_acts_manycolor2<4, 32, 1, 4, false, true, true><<<blocks, threads>>>(hidActs.getDevData(), hidSwitches.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, mpSizeX, mpStart, mpStride, mpOutputsX, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(conv_img_acts_manycolor2<4, 32, 1, 2, false, true, true>, cudaFuncCachePreferShared);
conv_img_acts_manycolor2<4, 32, 1, 2, false, true, true><<<blocks, threads>>>(hidActs.getDevData(), hidSwitches.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, mpSizeX, mpStart, mpStride, mpOutputsX, scaleTargets, scaleOutput);
}
} else {
if (numFilterColors % 16 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor2<4, 32, 1, 4, false, false, true>, cudaFuncCachePreferShared);
conv_img_acts_manycolor2<4, 32, 1, 4, false, false, true><<<blocks, threads>>>(hidActs.getDevData(), hidSwitches.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, mpSizeX, mpStart, mpStride, mpOutputsX, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(conv_img_acts_manycolor2<4, 32, 1, 2, false, false, true>, cudaFuncCachePreferShared);
conv_img_acts_manycolor2<4, 32, 1, 2, false, false, true><<<blocks, threads>>>(hidActs.getDevData(), hidSwitches.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, mpSizeX, mpStart, mpStride, mpOutputsX, scaleTargets, scaleOutput);
}
}
}
} else if (numFilterColors > 3) {
if (imgsPerThread == 8) {
if (checkCaseBounds) {
if (colorsPerThread == 4) {
cudaFuncSetCacheConfig(img_acts_mediumcolor2<8, 4, false, true, true>, cudaFuncCachePreferShared);
img_acts_mediumcolor2<8, 4, false, true, true><<<blocks, threads>>>(hidActs.getDevData(), hidSwitches.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, mpSizeX, mpStart, mpStride, mpOutputsX, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(img_acts_mediumcolor2<8, 2, false, true, true>, cudaFuncCachePreferShared);
img_acts_mediumcolor2<8, 2, false, true, true><<<blocks, threads>>>(hidActs.getDevData(), hidSwitches.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, mpSizeX, mpStart, mpStride, mpOutputsX, scaleTargets, scaleOutput);
}
} else {
if (colorsPerThread == 4) {
cudaFuncSetCacheConfig(img_acts_mediumcolor2<8, 4, false, false, true>, cudaFuncCachePreferShared);
img_acts_mediumcolor2<8, 4, false, false, true><<<blocks, threads>>>(hidActs.getDevData(), hidSwitches.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, mpSizeX, mpStart, mpStride, mpOutputsX, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(img_acts_mediumcolor2<8, 2, false, false, true>, cudaFuncCachePreferShared);
img_acts_mediumcolor2<8, 2, false, false, true><<<blocks, threads>>>(hidActs.getDevData(), hidSwitches.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, mpSizeX, mpStart, mpStride, mpOutputsX, scaleTargets, scaleOutput);
}
}
} else if (imgsPerThread == 4) {
if (checkCaseBounds) {
if (colorsPerThread == 4) {
cudaFuncSetCacheConfig(img_acts_mediumcolor2<4, 4, false, true, true>, cudaFuncCachePreferShared);
img_acts_mediumcolor2<4, 4, false, true, true><<<blocks, threads>>>(hidActs.getDevData(), hidSwitches.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, mpSizeX, mpStart, mpStride, mpOutputsX, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(img_acts_mediumcolor2<4, 2, false, true, true>, cudaFuncCachePreferShared);
img_acts_mediumcolor2<4, 2, false, true, true><<<blocks, threads>>>(hidActs.getDevData(), hidSwitches.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, mpSizeX, mpStart, mpStride, mpOutputsX, scaleTargets, scaleOutput);
}
} else {
if (colorsPerThread == 4) {
cudaFuncSetCacheConfig(img_acts_mediumcolor2<4, 4, false, false, true>, cudaFuncCachePreferShared);
img_acts_mediumcolor2<4, 4, false, false, true><<<blocks, threads>>>(hidActs.getDevData(), hidSwitches.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, mpSizeX, mpStart, mpStride, mpOutputsX, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(img_acts_mediumcolor2<4, 2, false, false, true>, cudaFuncCachePreferShared);
img_acts_mediumcolor2<4, 2, false, false, true><<<blocks, threads>>>(hidActs.getDevData(), hidSwitches.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, mpSizeX, mpStart, mpStride, mpOutputsX, scaleTargets, scaleOutput);
}
}
} else {
if (checkCaseBounds) {
if (colorsPerThread == 4) {
cudaFuncSetCacheConfig(img_acts_mediumcolor2<2, 4, false, true, true>, cudaFuncCachePreferShared);
img_acts_mediumcolor2<2, 4, false, true, true><<<blocks, threads>>>(hidActs.getDevData(), hidSwitches.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, mpSizeX, mpStart, mpStride, mpOutputsX, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(img_acts_mediumcolor2<2, 2, false, true, true>, cudaFuncCachePreferShared);
img_acts_mediumcolor2<2, 2, false, true, true><<<blocks, threads>>>(hidActs.getDevData(), hidSwitches.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, mpSizeX, mpStart, mpStride, mpOutputsX, scaleTargets, scaleOutput);
}
} else {
if (colorsPerThread == 4) {
cudaFuncSetCacheConfig(img_acts_mediumcolor2<2, 4, false, false, true>, cudaFuncCachePreferShared);
img_acts_mediumcolor2<2, 4, false, false, true><<<blocks, threads>>>(hidActs.getDevData(), hidSwitches.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, mpSizeX, mpStart, mpStride, mpOutputsX, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(img_acts_mediumcolor2<2, 2, false, false, true>, cudaFuncCachePreferShared);
img_acts_mediumcolor2<2, 2, false, false, true><<<blocks, threads>>>(hidActs.getDevData(), hidSwitches.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, mpSizeX, mpStart, mpStride, mpOutputsX, scaleTargets, scaleOutput);
}
}
}
} else {
if (imgsPerThread == 8) {
if (checkCaseBounds) {
if (numFilterColors == 1) {
cudaFuncSetCacheConfig(img_acts_color2<8, 1, false, true, true>, cudaFuncCachePreferShared);
img_acts_color2<8, 1, false, true, true><<<blocks, threads>>>(hidActs.getDevData(), hidSwitches.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, mpSizeX, mpStart, mpStride, mpOutputsX, scaleTargets, scaleOutput);
} else if (numFilterColors == 2) {
cudaFuncSetCacheConfig(img_acts_color2<8, 2, false, true, true>, cudaFuncCachePreferShared);
img_acts_color2<8, 2, false, true, true><<<blocks, threads>>>(hidActs.getDevData(), hidSwitches.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, mpSizeX, mpStart, mpStride, mpOutputsX, scaleTargets, scaleOutput);
} else if (numFilterColors == 3) {
cudaFuncSetCacheConfig(img_acts_color2<8, 3, false, true, true>, cudaFuncCachePreferShared);
img_acts_color2<8, 3, false, true, true><<<blocks, threads>>>(hidActs.getDevData(), hidSwitches.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, mpSizeX, mpStart, mpStride, mpOutputsX, scaleTargets, scaleOutput);
}
} else {
if (numFilterColors == 1) {
cudaFuncSetCacheConfig(img_acts_color2<8, 1, false, false, true>, cudaFuncCachePreferShared);
img_acts_color2<8, 1, false, false, true><<<blocks, threads>>>(hidActs.getDevData(), hidSwitches.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, mpSizeX, mpStart, mpStride, mpOutputsX, scaleTargets, scaleOutput);
} else if (numFilterColors == 2) {
cudaFuncSetCacheConfig(img_acts_color2<8, 2, false, false, true>, cudaFuncCachePreferShared);
img_acts_color2<8, 2, false, false, true><<<blocks, threads>>>(hidActs.getDevData(), hidSwitches.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, mpSizeX, mpStart, mpStride, mpOutputsX, scaleTargets, scaleOutput);
} else if (numFilterColors == 3) {
cudaFuncSetCacheConfig(img_acts_color2<8, 3, false, false, true>, cudaFuncCachePreferShared);
img_acts_color2<8, 3, false, false, true><<<blocks, threads>>>(hidActs.getDevData(), hidSwitches.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, mpSizeX, mpStart, mpStride, mpOutputsX, scaleTargets, scaleOutput);
}
}
} else if (imgsPerThread == 4) {
if (checkCaseBounds) {
if (numFilterColors == 1) {
cudaFuncSetCacheConfig(img_acts_color2<4, 1, false, true, true>, cudaFuncCachePreferShared);
img_acts_color2<4, 1, false, true, true><<<blocks, threads>>>(hidActs.getDevData(), hidSwitches.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, mpSizeX, mpStart, mpStride, mpOutputsX, scaleTargets, scaleOutput);
} else if (numFilterColors == 2) {
cudaFuncSetCacheConfig(img_acts_color2<4, 2, false, true, true>, cudaFuncCachePreferShared);
img_acts_color2<4, 2, false, true, true><<<blocks, threads>>>(hidActs.getDevData(), hidSwitches.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, mpSizeX, mpStart, mpStride, mpOutputsX, scaleTargets, scaleOutput);
} else if (numFilterColors == 3) {
cudaFuncSetCacheConfig(img_acts_color2<4, 3, false, true, true>, cudaFuncCachePreferShared);
img_acts_color2<4, 3, false, true, true><<<blocks, threads>>>(hidActs.getDevData(), hidSwitches.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, mpSizeX, mpStart, mpStride, mpOutputsX, scaleTargets, scaleOutput);
}
} else {
if (numFilterColors == 1) {
cudaFuncSetCacheConfig(img_acts_color2<4, 1, false, false, true>, cudaFuncCachePreferShared);
img_acts_color2<4, 1, false, false, true><<<blocks, threads>>>(hidActs.getDevData(), hidSwitches.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, mpSizeX, mpStart, mpStride, mpOutputsX, scaleTargets, scaleOutput);
} else if (numFilterColors == 2) {
cudaFuncSetCacheConfig(img_acts_color2<4, 2, false, false, true>, cudaFuncCachePreferShared);
img_acts_color2<4, 2, false, false, true><<<blocks, threads>>>(hidActs.getDevData(), hidSwitches.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, mpSizeX, mpStart, mpStride, mpOutputsX, scaleTargets, scaleOutput);
} else if (numFilterColors == 3) {
cudaFuncSetCacheConfig(img_acts_color2<4, 3, false, false, true>, cudaFuncCachePreferShared);
img_acts_color2<4, 3, false, false, true><<<blocks, threads>>>(hidActs.getDevData(), hidSwitches.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, mpSizeX, mpStart, mpStride, mpOutputsX, scaleTargets, scaleOutput);
}
}
} else {
if (checkCaseBounds) {
if (numFilterColors == 1) {
cudaFuncSetCacheConfig(img_acts_color2<2, 1, false, true, true>, cudaFuncCachePreferShared);
img_acts_color2<2, 1, false, true, true><<<blocks, threads>>>(hidActs.getDevData(), hidSwitches.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, mpSizeX, mpStart, mpStride, mpOutputsX, scaleTargets, scaleOutput);
} else if (numFilterColors == 2) {
cudaFuncSetCacheConfig(img_acts_color2<2, 2, false, true, true>, cudaFuncCachePreferShared);
img_acts_color2<2, 2, false, true, true><<<blocks, threads>>>(hidActs.getDevData(), hidSwitches.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, mpSizeX, mpStart, mpStride, mpOutputsX, scaleTargets, scaleOutput);
} else if (numFilterColors == 3) {
cudaFuncSetCacheConfig(img_acts_color2<2, 3, false, true, true>, cudaFuncCachePreferShared);
img_acts_color2<2, 3, false, true, true><<<blocks, threads>>>(hidActs.getDevData(), hidSwitches.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, mpSizeX, mpStart, mpStride, mpOutputsX, scaleTargets, scaleOutput);
}
} else {
if (numFilterColors == 1) {
cudaFuncSetCacheConfig(img_acts_color2<2, 1, false, false, true>, cudaFuncCachePreferShared);
img_acts_color2<2, 1, false, false, true><<<blocks, threads>>>(hidActs.getDevData(), hidSwitches.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, mpSizeX, mpStart, mpStride, mpOutputsX, scaleTargets, scaleOutput);
} else if (numFilterColors == 2) {
cudaFuncSetCacheConfig(img_acts_color2<2, 2, false, false, true>, cudaFuncCachePreferShared);
img_acts_color2<2, 2, false, false, true><<<blocks, threads>>>(hidActs.getDevData(), hidSwitches.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, mpSizeX, mpStart, mpStride, mpOutputsX, scaleTargets, scaleOutput);
} else if (numFilterColors == 3) {
cudaFuncSetCacheConfig(img_acts_color2<2, 3, false, false, true>, cudaFuncCachePreferShared);
img_acts_color2<2, 3, false, false, true><<<blocks, threads>>>(hidActs.getDevData(), hidSwitches.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, mpSizeX, mpStart, mpStride, mpOutputsX, scaleTargets, scaleOutput);
}
}
}
}
} else { // do scale
if (numFilterColors % 8 == 0) {
if (imgsPerThread == 4) {
if (checkCaseBounds) {
if (numFilterColors % 16 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor2<4, 32, 4, 4, true, true, true>, cudaFuncCachePreferShared);
conv_img_acts_manycolor2<4, 32, 4, 4, true, true, true><<<blocks, threads>>>(hidActs.getDevData(), hidSwitches.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, mpSizeX, mpStart, mpStride, mpOutputsX, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(conv_img_acts_manycolor2<4, 32, 4, 2, true, true, true>, cudaFuncCachePreferShared);
conv_img_acts_manycolor2<4, 32, 4, 2, true, true, true><<<blocks, threads>>>(hidActs.getDevData(), hidSwitches.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, mpSizeX, mpStart, mpStride, mpOutputsX, scaleTargets, scaleOutput);
}
} else {
if (numFilterColors % 16 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor2<4, 32, 4, 4, true, false, true>, cudaFuncCachePreferShared);
conv_img_acts_manycolor2<4, 32, 4, 4, true, false, true><<<blocks, threads>>>(hidActs.getDevData(), hidSwitches.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, mpSizeX, mpStart, mpStride, mpOutputsX, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(conv_img_acts_manycolor2<4, 32, 4, 2, true, false, true>, cudaFuncCachePreferShared);
conv_img_acts_manycolor2<4, 32, 4, 2, true, false, true><<<blocks, threads>>>(hidActs.getDevData(), hidSwitches.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, mpSizeX, mpStart, mpStride, mpOutputsX, scaleTargets, scaleOutput);
}
}
} else if (imgsPerThread == 2) {
if (checkCaseBounds) {
if (numFilterColors % 16 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor2<4, 32, 2, 4, true, true, true>, cudaFuncCachePreferShared);
conv_img_acts_manycolor2<4, 32, 2, 4, true, true, true><<<blocks, threads>>>(hidActs.getDevData(), hidSwitches.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, mpSizeX, mpStart, mpStride, mpOutputsX, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(conv_img_acts_manycolor2<4, 32, 2, 2, true, true, true>, cudaFuncCachePreferShared);
conv_img_acts_manycolor2<4, 32, 2, 2, true, true, true><<<blocks, threads>>>(hidActs.getDevData(), hidSwitches.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, mpSizeX, mpStart, mpStride, mpOutputsX, scaleTargets, scaleOutput);
}
} else {
if (numFilterColors % 16 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor2<4, 32, 2, 4, true, false, true>, cudaFuncCachePreferShared);
conv_img_acts_manycolor2<4, 32, 2, 4, true, false, true><<<blocks, threads>>>(hidActs.getDevData(), hidSwitches.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, mpSizeX, mpStart, mpStride, mpOutputsX, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(conv_img_acts_manycolor2<4, 32, 2, 2, true, false, true>, cudaFuncCachePreferShared);
conv_img_acts_manycolor2<4, 32, 2, 2, true, false, true><<<blocks, threads>>>(hidActs.getDevData(), hidSwitches.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, mpSizeX, mpStart, mpStride, mpOutputsX, scaleTargets, scaleOutput);
}
}
} else {
if (checkCaseBounds) {
if (numFilterColors % 16 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor2<4, 32, 1, 4, true, true, true>, cudaFuncCachePreferShared);
conv_img_acts_manycolor2<4, 32, 1, 4, true, true, true><<<blocks, threads>>>(hidActs.getDevData(), hidSwitches.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, mpSizeX, mpStart, mpStride, mpOutputsX, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(conv_img_acts_manycolor2<4, 32, 1, 2, true, true, true>, cudaFuncCachePreferShared);
conv_img_acts_manycolor2<4, 32, 1, 2, true, true, true><<<blocks, threads>>>(hidActs.getDevData(), hidSwitches.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, mpSizeX, mpStart, mpStride, mpOutputsX, scaleTargets, scaleOutput);
}
} else {
if (numFilterColors % 16 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor2<4, 32, 1, 4, true, false, true>, cudaFuncCachePreferShared);
conv_img_acts_manycolor2<4, 32, 1, 4, true, false, true><<<blocks, threads>>>(hidActs.getDevData(), hidSwitches.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, mpSizeX, mpStart, mpStride, mpOutputsX, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(conv_img_acts_manycolor2<4, 32, 1, 2, true, false, true>, cudaFuncCachePreferShared);
conv_img_acts_manycolor2<4, 32, 1, 2, true, false, true><<<blocks, threads>>>(hidActs.getDevData(), hidSwitches.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, mpSizeX, mpStart, mpStride, mpOutputsX, scaleTargets, scaleOutput);
}
}
}
} else if (numFilterColors > 3) {
if (imgsPerThread == 8) {
if (checkCaseBounds) {
if (colorsPerThread == 4) {
cudaFuncSetCacheConfig(img_acts_mediumcolor2<8, 4, true, true, true>, cudaFuncCachePreferShared);
img_acts_mediumcolor2<8, 4, true, true, true><<<blocks, threads>>>(hidActs.getDevData(), hidSwitches.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, mpSizeX, mpStart, mpStride, mpOutputsX, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(img_acts_mediumcolor2<8, 2, true, true, true>, cudaFuncCachePreferShared);
img_acts_mediumcolor2<8, 2, true, true, true><<<blocks, threads>>>(hidActs.getDevData(), hidSwitches.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, mpSizeX, mpStart, mpStride, mpOutputsX, scaleTargets, scaleOutput);
}
} else {
if (colorsPerThread == 4) {
cudaFuncSetCacheConfig(img_acts_mediumcolor2<8, 4, true, false, true>, cudaFuncCachePreferShared);
img_acts_mediumcolor2<8, 4, true, false, true><<<blocks, threads>>>(hidActs.getDevData(), hidSwitches.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, mpSizeX, mpStart, mpStride, mpOutputsX, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(img_acts_mediumcolor2<8, 2, true, false, true>, cudaFuncCachePreferShared);
img_acts_mediumcolor2<8, 2, true, false, true><<<blocks, threads>>>(hidActs.getDevData(), hidSwitches.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, mpSizeX, mpStart, mpStride, mpOutputsX, scaleTargets, scaleOutput);
}
}
} else if (imgsPerThread == 4) {
if (checkCaseBounds) {
if (colorsPerThread == 4) {
cudaFuncSetCacheConfig(img_acts_mediumcolor2<4, 4, true, true, true>, cudaFuncCachePreferShared);
img_acts_mediumcolor2<4, 4, true, true, true><<<blocks, threads>>>(hidActs.getDevData(), hidSwitches.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, mpSizeX, mpStart, mpStride, mpOutputsX, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(img_acts_mediumcolor2<4, 2, true, true, true>, cudaFuncCachePreferShared);
img_acts_mediumcolor2<4, 2, true, true, true><<<blocks, threads>>>(hidActs.getDevData(), hidSwitches.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, mpSizeX, mpStart, mpStride, mpOutputsX, scaleTargets, scaleOutput);
}
} else {
if (colorsPerThread == 4) {
cudaFuncSetCacheConfig(img_acts_mediumcolor2<4, 4, true, false, true>, cudaFuncCachePreferShared);
img_acts_mediumcolor2<4, 4, true, false, true><<<blocks, threads>>>(hidActs.getDevData(), hidSwitches.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, mpSizeX, mpStart, mpStride, mpOutputsX, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(img_acts_mediumcolor2<4, 2, true, false, true>, cudaFuncCachePreferShared);
img_acts_mediumcolor2<4, 2, true, false, true><<<blocks, threads>>>(hidActs.getDevData(), hidSwitches.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, mpSizeX, mpStart, mpStride, mpOutputsX, scaleTargets, scaleOutput);
}
}
} else {
if (checkCaseBounds) {
if (colorsPerThread == 4) {
cudaFuncSetCacheConfig(img_acts_mediumcolor2<2, 4, true, true, true>, cudaFuncCachePreferShared);
img_acts_mediumcolor2<2, 4, true, true, true><<<blocks, threads>>>(hidActs.getDevData(), hidSwitches.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, mpSizeX, mpStart, mpStride, mpOutputsX, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(img_acts_mediumcolor2<2, 2, true, true, true>, cudaFuncCachePreferShared);
img_acts_mediumcolor2<2, 2, true, true, true><<<blocks, threads>>>(hidActs.getDevData(), hidSwitches.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, mpSizeX, mpStart, mpStride, mpOutputsX, scaleTargets, scaleOutput);
}
} else {
if (colorsPerThread == 4) {
cudaFuncSetCacheConfig(img_acts_mediumcolor2<2, 4, true, false, true>, cudaFuncCachePreferShared);
img_acts_mediumcolor2<2, 4, true, false, true><<<blocks, threads>>>(hidActs.getDevData(), hidSwitches.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, mpSizeX, mpStart, mpStride, mpOutputsX, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(img_acts_mediumcolor2<2, 2, true, false, true>, cudaFuncCachePreferShared);
img_acts_mediumcolor2<2, 2, true, false, true><<<blocks, threads>>>(hidActs.getDevData(), hidSwitches.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, mpSizeX, mpStart, mpStride, mpOutputsX, scaleTargets, scaleOutput);
}
}
}
} else {
if (imgsPerThread == 8) {
if (checkCaseBounds) {
if (numFilterColors == 1) {
cudaFuncSetCacheConfig(img_acts_color2<8, 1, true, true, true>, cudaFuncCachePreferShared);
img_acts_color2<8, 1, true, true, true><<<blocks, threads>>>(hidActs.getDevData(), hidSwitches.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, mpSizeX, mpStart, mpStride, mpOutputsX, scaleTargets, scaleOutput);
} else if (numFilterColors == 2) {
cudaFuncSetCacheConfig(img_acts_color2<8, 2, true, true, true>, cudaFuncCachePreferShared);
img_acts_color2<8, 2, true, true, true><<<blocks, threads>>>(hidActs.getDevData(), hidSwitches.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, mpSizeX, mpStart, mpStride, mpOutputsX, scaleTargets, scaleOutput);
} else if (numFilterColors == 3) {
cudaFuncSetCacheConfig(img_acts_color2<8, 3, true, true, true>, cudaFuncCachePreferShared);
img_acts_color2<8, 3, true, true, true><<<blocks, threads>>>(hidActs.getDevData(), hidSwitches.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, mpSizeX, mpStart, mpStride, mpOutputsX, scaleTargets, scaleOutput);
}
} else {
if (numFilterColors == 1) {
cudaFuncSetCacheConfig(img_acts_color2<8, 1, true, false, true>, cudaFuncCachePreferShared);
img_acts_color2<8, 1, true, false, true><<<blocks, threads>>>(hidActs.getDevData(), hidSwitches.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, mpSizeX, mpStart, mpStride, mpOutputsX, scaleTargets, scaleOutput);
} else if (numFilterColors == 2) {
cudaFuncSetCacheConfig(img_acts_color2<8, 2, true, false, true>, cudaFuncCachePreferShared);
img_acts_color2<8, 2, true, false, true><<<blocks, threads>>>(hidActs.getDevData(), hidSwitches.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, mpSizeX, mpStart, mpStride, mpOutputsX, scaleTargets, scaleOutput);
} else if (numFilterColors == 3) {
cudaFuncSetCacheConfig(img_acts_color2<8, 3, true, false, true>, cudaFuncCachePreferShared);
img_acts_color2<8, 3, true, false, true><<<blocks, threads>>>(hidActs.getDevData(), hidSwitches.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, mpSizeX, mpStart, mpStride, mpOutputsX, scaleTargets, scaleOutput);
}
}
} else if (imgsPerThread == 4) {
if (checkCaseBounds) {
if (numFilterColors == 1) {
cudaFuncSetCacheConfig(img_acts_color2<4, 1, true, true, true>, cudaFuncCachePreferShared);
img_acts_color2<4, 1, true, true, true><<<blocks, threads>>>(hidActs.getDevData(), hidSwitches.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, mpSizeX, mpStart, mpStride, mpOutputsX, scaleTargets, scaleOutput);
} else if (numFilterColors == 2) {
cudaFuncSetCacheConfig(img_acts_color2<4, 2, true, true, true>, cudaFuncCachePreferShared);
img_acts_color2<4, 2, true, true, true><<<blocks, threads>>>(hidActs.getDevData(), hidSwitches.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, mpSizeX, mpStart, mpStride, mpOutputsX, scaleTargets, scaleOutput);
} else if (numFilterColors == 3) {
cudaFuncSetCacheConfig(img_acts_color2<4, 3, true, true, true>, cudaFuncCachePreferShared);
img_acts_color2<4, 3, true, true, true><<<blocks, threads>>>(hidActs.getDevData(), hidSwitches.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, mpSizeX, mpStart, mpStride, mpOutputsX, scaleTargets, scaleOutput);
}
} else {
if (numFilterColors == 1) {
cudaFuncSetCacheConfig(img_acts_color2<4, 1, true, false, true>, cudaFuncCachePreferShared);
img_acts_color2<4, 1, true, false, true><<<blocks, threads>>>(hidActs.getDevData(), hidSwitches.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, mpSizeX, mpStart, mpStride, mpOutputsX, scaleTargets, scaleOutput);
} else if (numFilterColors == 2) {
cudaFuncSetCacheConfig(img_acts_color2<4, 2, true, false, true>, cudaFuncCachePreferShared);
img_acts_color2<4, 2, true, false, true><<<blocks, threads>>>(hidActs.getDevData(), hidSwitches.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, mpSizeX, mpStart, mpStride, mpOutputsX, scaleTargets, scaleOutput);
} else if (numFilterColors == 3) {
cudaFuncSetCacheConfig(img_acts_color2<4, 3, true, false, true>, cudaFuncCachePreferShared);
img_acts_color2<4, 3, true, false, true><<<blocks, threads>>>(hidActs.getDevData(), hidSwitches.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, mpSizeX, mpStart, mpStride, mpOutputsX, scaleTargets, scaleOutput);
}
}
} else {
if (checkCaseBounds) {
if (numFilterColors == 1) {
cudaFuncSetCacheConfig(img_acts_color2<2, 1, true, true, true>, cudaFuncCachePreferShared);
img_acts_color2<2, 1, true, true, true><<<blocks, threads>>>(hidActs.getDevData(), hidSwitches.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, mpSizeX, mpStart, mpStride, mpOutputsX, scaleTargets, scaleOutput);
} else if (numFilterColors == 2) {
cudaFuncSetCacheConfig(img_acts_color2<2, 2, true, true, true>, cudaFuncCachePreferShared);
img_acts_color2<2, 2, true, true, true><<<blocks, threads>>>(hidActs.getDevData(), hidSwitches.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, mpSizeX, mpStart, mpStride, mpOutputsX, scaleTargets, scaleOutput);
} else if (numFilterColors == 3) {
cudaFuncSetCacheConfig(img_acts_color2<2, 3, true, true, true>, cudaFuncCachePreferShared);
img_acts_color2<2, 3, true, true, true><<<blocks, threads>>>(hidActs.getDevData(), hidSwitches.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, mpSizeX, mpStart, mpStride, mpOutputsX, scaleTargets, scaleOutput);
}
} else {
if (numFilterColors == 1) {
cudaFuncSetCacheConfig(img_acts_color2<2, 1, true, false, true>, cudaFuncCachePreferShared);
img_acts_color2<2, 1, true, false, true><<<blocks, threads>>>(hidActs.getDevData(), hidSwitches.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, mpSizeX, mpStart, mpStride, mpOutputsX, scaleTargets, scaleOutput);
} else if (numFilterColors == 2) {
cudaFuncSetCacheConfig(img_acts_color2<2, 2, true, false, true>, cudaFuncCachePreferShared);
img_acts_color2<2, 2, true, false, true><<<blocks, threads>>>(hidActs.getDevData(), hidSwitches.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, mpSizeX, mpStart, mpStride, mpOutputsX, scaleTargets, scaleOutput);
} else if (numFilterColors == 3) {
cudaFuncSetCacheConfig(img_acts_color2<2, 3, true, false, true>, cudaFuncCachePreferShared);
img_acts_color2<2, 3, true, false, true><<<blocks, threads>>>(hidActs.getDevData(), hidSwitches.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, mpSizeX, mpStart, mpStride, mpOutputsX, scaleTargets, scaleOutput);
}
}
}
}
}
}
getLastCudaError("imgActs: kernel execution failed");
}
void convImgActs2(NVMatrix& hidActs, NVMatrix& hidSwitches, NVMatrix& filters, NVMatrix& targets,
int imgSizeY, int imgSizeX, int numModulesY, int paddingStart, int moduleStride, int numImgColors, int numGroups,
int mpSizeX, int mpStart, int mpStride, int mpOutputsX) {
_imgActs2(hidActs, hidSwitches, filters, targets, imgSizeY, imgSizeX, numModulesY, paddingStart, moduleStride, numImgColors, numGroups, mpSizeX, mpStart, mpStride, mpOutputsX, 0, 1, true);
}
void convImgActs2(NVMatrix& hidActs, NVMatrix& hidSwitches, NVMatrix& filters, NVMatrix& targets,
int imgSizeY, int imgSizeX, int numModulesY, int paddingStart, int moduleStride, int numImgColors, int numGroups,
int mpSizeX, int mpStart, int mpStride, int mpOutputsX,
float scaleTargets, float scaleOutput) {
_imgActs2(hidActs, hidSwitches, filters, targets, imgSizeY, imgSizeX, numModulesY, paddingStart, moduleStride, numImgColors, numGroups, mpSizeX, mpStart, mpStride, mpOutputsX,scaleTargets, scaleOutput, true);
} |
56209ffc396b63d6bc6b0e41fe9fcfa3138dccc7.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "callOperation.hip"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int *niz = NULL;
hipMalloc(&niz, XSIZE*YSIZE);
int *res = NULL;
hipMalloc(&res, XSIZE*YSIZE);
int k = 1;
int n = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
callOperation), dim3(gridBlock),dim3(threadBlock), 0, 0, niz,res,k,n);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
callOperation), dim3(gridBlock),dim3(threadBlock), 0, 0, niz,res,k,n);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
callOperation), dim3(gridBlock),dim3(threadBlock), 0, 0, niz,res,k,n);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 56209ffc396b63d6bc6b0e41fe9fcfa3138dccc7.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "callOperation.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int *niz = NULL;
cudaMalloc(&niz, XSIZE*YSIZE);
int *res = NULL;
cudaMalloc(&res, XSIZE*YSIZE);
int k = 1;
int n = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
callOperation<<<gridBlock,threadBlock>>>(niz,res,k,n);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
callOperation<<<gridBlock,threadBlock>>>(niz,res,k,n);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
callOperation<<<gridBlock,threadBlock>>>(niz,res,k,n);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
fdabe74934d23a9977047d8c1dbd06c0a0817bda.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) OpenMMLab. All rights reserved.
#include <cstdint>
namespace mmdeploy {
namespace operation {
namespace cuda {
template <typename T>
__global__ void transpose(const T* src, int height, int width, int channels, int src_width_stride,
T* dst, int dst_channel_stride) {
auto x = blockIdx.x * blockDim.x + threadIdx.x;
auto y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= width || y >= height) return;
for (auto c = 0; c < channels; ++c) {
dst[c * dst_channel_stride + y * width + x] = src[y * src_width_stride + x * channels + c];
}
}
template <typename T>
void Transpose(const T* src, int height, int width, int channels, T* dst, hipStream_t stream) {
const dim3 thread_block(32, 8);
const dim3 block_num((width + thread_block.x - 1) / thread_block.x,
(height + thread_block.y - 1) / thread_block.y);
auto src_width_stride = width * channels;
auto dst_channel_stride = width * height;
hipLaunchKernelGGL(( transpose<T>), dim3(block_num), dim3(thread_block), 0, stream, src, height, width, channels,
src_width_stride, dst, dst_channel_stride);
}
template void Transpose<uint8_t>(const uint8_t* src, int height, int width, int channels,
uint8_t* dst, hipStream_t stream);
template void Transpose<float>(const float* src, int height, int width, int channels, float* dst,
hipStream_t stream);
} // namespace cuda
} // namespace operation
} // namespace mmdeploy
| fdabe74934d23a9977047d8c1dbd06c0a0817bda.cu | // Copyright (c) OpenMMLab. All rights reserved.
#include <cstdint>
namespace mmdeploy {
namespace operation {
namespace cuda {
template <typename T>
__global__ void transpose(const T* src, int height, int width, int channels, int src_width_stride,
T* dst, int dst_channel_stride) {
auto x = blockIdx.x * blockDim.x + threadIdx.x;
auto y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= width || y >= height) return;
for (auto c = 0; c < channels; ++c) {
dst[c * dst_channel_stride + y * width + x] = src[y * src_width_stride + x * channels + c];
}
}
template <typename T>
void Transpose(const T* src, int height, int width, int channels, T* dst, cudaStream_t stream) {
const dim3 thread_block(32, 8);
const dim3 block_num((width + thread_block.x - 1) / thread_block.x,
(height + thread_block.y - 1) / thread_block.y);
auto src_width_stride = width * channels;
auto dst_channel_stride = width * height;
transpose<T><<<block_num, thread_block, 0, stream>>>(src, height, width, channels,
src_width_stride, dst, dst_channel_stride);
}
template void Transpose<uint8_t>(const uint8_t* src, int height, int width, int channels,
uint8_t* dst, cudaStream_t stream);
template void Transpose<float>(const float* src, int height, int width, int channels, float* dst,
cudaStream_t stream);
} // namespace cuda
} // namespace operation
} // namespace mmdeploy
|
5b6419963e5edb7a7d8d05168fe02b73408bc075.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "NvInfer.h"
#include "bertCommon.h"
#include "qkvToContextPlugin.h"
#include "common.h"
#include "serialize.hpp"
#include <cassert>
#include <cstring>
#include <iostream>
#include <vector>
using namespace nvinfer1;
namespace bert
{
template <typename T, unsigned TPB>
__global__ void scaledSoftmaxKernelSmall(const int ld, const float rsqrtHeadSize, const T* input, T* output)
{
scaledSoftmaxSmall<T, TPB>(ld, ld, rsqrtHeadSize, input, output);
}
template <typename T, unsigned TPB>
__global__ void scaledSoftmaxKernel(const int ld, const float rsqrtHeadSize, const T* input, T* output)
{
scaledSoftmax<T, TPB>(ld, ld, rsqrtHeadSize, input, output);
}
template <typename T>
int computeScaledSoftmax(
hipStream_t stream, const int ld, const int B, const int N, const float rsqrtHeadSize, const T* input, T* output)
{
const dim3 grid(ld * N, B, 1);
if (ld <= 32)
{
const int blockSize = 32;
hipLaunchKernelGGL(( scaledSoftmaxKernelSmall<T, blockSize>), dim3(grid), dim3(blockSize), 0, stream, ld, rsqrtHeadSize, input, output);
}
else if (ld <= 128)
{
const int blockSize = 128;
hipLaunchKernelGGL(( scaledSoftmaxKernelSmall<T, blockSize>), dim3(grid), dim3(blockSize), 0, stream, ld, rsqrtHeadSize, input, output);
}
else if (ld == 384)
{
const int blockSize = 384;
hipLaunchKernelGGL(( scaledSoftmaxKernelSmall<T, blockSize>), dim3(grid), dim3(blockSize), 0, stream, ld, rsqrtHeadSize, input, output);
}
else
{
const int blockSize = 256;
hipLaunchKernelGGL(( scaledSoftmaxKernel<T, blockSize>), dim3(grid), dim3(blockSize), 0, stream, ld, rsqrtHeadSize, input, output);
}
CHECK(hipPeekAtLastError());
return 0;
}
template <typename T, unsigned TPB>
__global__ void maskedScaledSoftmaxKernelSmall(
const int ld, const float rsqrtHeadSize, const int* maskIdx, const T* input, T* output)
{
__shared__ int lastValid;
if (threadIdx.x == 0)
{
lastValid = min(ld, maskIdx[blockIdx.y]);
}
__syncthreads();
scaledSoftmaxSmall<T, TPB>(ld, lastValid, rsqrtHeadSize, input, output);
}
template <typename T, unsigned TPB>
__global__ void maskedScaledSoftmaxKernel(
const int ld, const float rsqrtHeadSize, const int* maskIdx, const T* input, T* output)
{
__shared__ int lastValid;
if (threadIdx.x == 0)
{
lastValid = min(ld, maskIdx[blockIdx.y]);
}
__syncthreads();
scaledSoftmax<T, TPB>(ld, lastValid, rsqrtHeadSize, input, output);
}
template <typename T>
int computeMaskedScaledSoftmax(hipStream_t stream, const int ld, const int B, const int N, const float rsqrtHeadSize,
const int* maskIdx, const T* input, T* output)
{
// Mask idx is of length B and assumes the valid region is contiguous starting
// from the beginning of the sequence
const dim3 grid(ld * N, B, 1);
if (ld <= 32)
{
const int blockSize = 32;
hipLaunchKernelGGL(( maskedScaledSoftmaxKernelSmall<T, blockSize>)
, dim3(grid), dim3(blockSize), 0, stream, ld, rsqrtHeadSize, maskIdx, input, output);
}
else if (ld <= 128)
{
const int blockSize = 128;
hipLaunchKernelGGL(( maskedScaledSoftmaxKernelSmall<T, blockSize>)
, dim3(grid), dim3(blockSize), 0, stream, ld, rsqrtHeadSize, maskIdx, input, output);
}
else if (ld == 384)
{
const int blockSize = 384;
hipLaunchKernelGGL(( maskedScaledSoftmaxKernelSmall<T, blockSize>)
, dim3(grid), dim3(blockSize), 0, stream, ld, rsqrtHeadSize, maskIdx, input, output);
}
else
{
const int blockSize = 256;
hipLaunchKernelGGL(( maskedScaledSoftmaxKernel<T, blockSize>)
, dim3(grid), dim3(blockSize), 0, stream, ld, rsqrtHeadSize, maskIdx, input, output);
}
CHECK(hipPeekAtLastError());
return 0;
}
template <typename T>
inline int qkvToCtx(hipblasHandle_t& cublas, const int B, const int S, const int numHeads, const int headSize,
const float rsqrtHeadSize, const T* input, T* output, T* qkptr, T* pptr, hipStream_t stream,
const int* maskIdx = nullptr)
{
const int omatSize = S * S;
const int numMats = B * numHeads;
const T* qptr = input;
const T* kptr = qptr + headSize;
const T* vptr = kptr + headSize;
hipblasSetStream(cublas, stream);
CublasConfigHelper helper(cublas);
// Q, K, V: BxNxSxH (inputs)
// Q * K': BxNxSxS (-> scratch1)
// P: BxNxSxS (-> scratch2)
// P * V: BxNxSxH (output)
const int ldQKV = 3 * B * numHeads * headSize;
const int strideQKV = 3 * headSize;
CHECK(cublasGemmStridedBatched<T>(cublas, HIPBLAS_OP_T, HIPBLAS_OP_N, S, S, headSize, 1.f, kptr, ldQKV, strideQKV,
qptr, ldQKV, strideQKV, 0.f, qkptr, S, omatSize, numMats));
// apply softmax
if (maskIdx)
{ // if we have a mask
computeMaskedScaledSoftmax<T>(stream, S, B, numHeads, rsqrtHeadSize, maskIdx, qkptr, pptr);
}
else
{ // if we don't have a mask
computeScaledSoftmax<T>(stream, S, B, numHeads, rsqrtHeadSize, qkptr, pptr);
}
// compute P*V (as V*P)
const int ldOut = B * numHeads * headSize;
const int strideOut = headSize;
CHECK(cublasGemmStridedBatched<T>(cublas, HIPBLAS_OP_N, HIPBLAS_OP_N, headSize, S, S, 1.f, vptr, ldQKV, strideQKV,
pptr, S, omatSize, 0.f, output, ldOut, strideOut, numMats));
return 0;
}
namespace
{
static const char* QKV_TO_CONTEXT_PLUGIN_VERSION{"1"};
static const char* QKV_TO_CONTEXT_PLUGIN_NAME{"CustomQKVToContextPluginDynamic"};
} // namespace
// Static class fields initialization
PluginFieldCollection QKVToContextPluginDynamicCreator::mFC{};
std::vector<PluginField> QKVToContextPluginDynamicCreator::mPluginAttributes;
REGISTER_TENSORRT_PLUGIN(QKVToContextPluginDynamicCreator);
constexpr size_t kAlignment = 256;
constexpr uint32_t IIDX = 0; // index of the input tensor
constexpr uint32_t MIDX = 1; // index of the mask
QKVToContextPluginDynamic::QKVToContextPluginDynamic(
const std::string name, const DataType type, const int hiddenSize, const int numHeads, bool hasImask)
: mLayerName(name)
, mHiddenSize(hiddenSize)
, mNumHeads(numHeads)
, mHasImask(hasImask)
, mType(type)
{
assert(hiddenSize % numHeads == 0);
mHeadSize = hiddenSize / numHeads;
mRsqrtHeadSize = 1.f / sqrt(float(mHeadSize));
}
QKVToContextPluginDynamic::QKVToContextPluginDynamic(const std::string name, const void* data, size_t length)
: mLayerName(name)
{
gLogVerbose << "QKV Deser Start" << std::endl;
deserialize_value(&data, &length, &mType);
deserialize_value(&data, &length, &mNumHeads);
deserialize_value(&data, &length, &mHeadSize);
deserialize_value(&data, &length, &mRsqrtHeadSize);
deserialize_value(&data, &length, &mHasImask);
deserialize_value(&data, &length, &mHiddenSize);
gLogVerbose << "QKV Deser done" << std::endl;
}
// IPluginV2DynamicExt Methods
nvinfer1::IPluginV2DynamicExt* QKVToContextPluginDynamic::clone() const
{
gLogVerbose << "QKV Clone" << std::endl;
auto ret = new QKVToContextPluginDynamic(mLayerName, mType, mHiddenSize, mNumHeads, mHasImask);
ret->initialize();
gLogVerbose << "QKV Clone done" << std::endl;
return ret;
}
DimsExprs QKVToContextPluginDynamic::getOutputDimensions(
int outputIndex, const DimsExprs* inputs, int nbInputs, IExprBuilder& exprBuilder)
{
// Input is BxSx3*N*H, output should be BxSxN*H
assert(outputIndex == 0);
// Copy over everything
DimsExprs output(inputs[IIDX]);
// Divide last dim by three
auto three = exprBuilder.constant(3);
output.d[HDIM] = exprBuilder.operation(DimensionOperation::kFLOOR_DIV, *inputs[IIDX].d[HDIM], *three);
return output;
}
bool QKVToContextPluginDynamic::supportsFormatCombination(
int pos, const PluginTensorDesc* inOut, int nbInputs, int nbOutputs)
{
assert(pos >= 0);
assert(pos < 2 + mHasImask);
assert(nbInputs == 1 + mHasImask);
const auto* in = inOut;
const auto* out = inOut + nbInputs;
if (pos == 0)
{
// must not check descriptions > pos
return (in->type == mType) && // precision
(in->format == TensorFormat::kLINEAR) && // format
(in->dims.nbDims == 5) && // num dims
((in->dims.d[HDIM] % 3) == 0) && // see getOutputDimensions
((in->dims.d[3]) == 1) && // for fc
((in->dims.d[4]) == 1) // for fc
;
}
else
{ // pos==1
if ((mHasImask && pos == 1))
{
const auto* inMask = &inOut[1];
return (inMask->type == DataType::kINT32) && // precision
(inMask->format == TensorFormat::kLINEAR) && // format
(inMask->dims.nbDims == 1) && // num dims
((inMask->dims.d[0]) == in->dims.d[BDIM]) // check B
;
}
if (!mHasImask || (pos == 2))
{
return (in->type == out->type) && // precision
(out->format == TensorFormat::kLINEAR) && // format
(out->dims.nbDims == 5) && // num dims
((in->dims.d[HDIM] / 3) == (out->dims.d[HDIM])) && // div 3
((out->dims.d[3]) == 1) && // for fc
((out->dims.d[4]) == 1) && // for fc
((out->dims.d[BDIM]) == in->dims.d[BDIM]) && // check B
((out->dims.d[SDIM]) == in->dims.d[SDIM]) // check S
;
}
}
return false;
}
void QKVToContextPluginDynamic::configurePlugin(
const DynamicPluginTensorDesc* in, int nbInputs, const DynamicPluginTensorDesc* out, int nbOutputs)
{
assert(nbInputs == 1 + mHasImask);
assert(nbOutputs == 1);
const PluginTensorDesc& inDesc = in[IIDX].desc;
TRT_UNUSED inDesc;
const PluginTensorDesc& outDesc = out->desc;
TRT_UNUSED outDesc;
assert(mType == inDesc.type);
assert(mType == outDesc.type);
assert(inDesc.dims.d[BDIM] == outDesc.dims.d[BDIM]);
assert(inDesc.dims.d[SDIM] == outDesc.dims.d[SDIM]);
assert(inDesc.dims.d[HDIM] == 3 * outDesc.dims.d[HDIM]);
if (mHasImask)
{
const PluginTensorDesc& maskDesc = in[MIDX].desc;
TRT_UNUSED maskDesc;
assert(maskDesc.type == DataType::kINT32);
assert(maskDesc.dims.d[0] == inDesc.dims.d[BDIM]);
}
}
size_t QKVToContextPluginDynamic::scratchSize(const int B, const int S) const
{
const size_t wordSize = samplesCommon::getElementSize(mType);
const size_t len = B * mNumHeads * S * S;
const size_t bytes = len * wordSize;
return bytes;
}
size_t QKVToContextPluginDynamic::getWorkspaceSize(
const PluginTensorDesc* inputs, int nbInputs, const PluginTensorDesc* outputs, int nbOutputs) const
{
const int B = inputs->dims.d[BDIM];
const int S = inputs->dims.d[SDIM];
const size_t bytesAligned = alignTo<size_t>(scratchSize(B, S), kAlignment);
const size_t ws = 2UL * bytesAligned;
return ws;
}
// IPluginV2Ext Methods
DataType QKVToContextPluginDynamic::getOutputDataType(
int index, const nvinfer1::DataType* inputTypes, int nbInputs) const
{
assert(index == 0);
assert(inputTypes[0] == DataType::kFLOAT || inputTypes[0] == DataType::kHALF);
return inputTypes[0];
}
// IPluginV2 Methods
const char* QKVToContextPluginDynamic::getPluginType() const
{
return QKV_TO_CONTEXT_PLUGIN_NAME;
}
const char* QKVToContextPluginDynamic::getPluginVersion() const
{
return QKV_TO_CONTEXT_PLUGIN_VERSION;
}
int QKVToContextPluginDynamic::getNbOutputs() const
{
return 1;
}
int QKVToContextPluginDynamic::initialize()
{
hipblasCreate(&cublas);
return 0;
}
void QKVToContextPluginDynamic::terminate()
{
CHECK(hipblasDestroy(cublas));
}
size_t QKVToContextPluginDynamic::getSerializationSize() const
{
return sizeof(mNumHeads) + sizeof(mHeadSize) + sizeof(DataType) + sizeof(mRsqrtHeadSize) + sizeof(mHasImask)
+ sizeof(mHiddenSize);
}
void QKVToContextPluginDynamic::serialize(void* buffer) const
{
serialize_value(&buffer, mType);
serialize_value(&buffer, mNumHeads);
serialize_value(&buffer, mHeadSize);
serialize_value(&buffer, mRsqrtHeadSize);
serialize_value(&buffer, mHasImask);
serialize_value(&buffer, mHiddenSize);
}
void QKVToContextPluginDynamic::destroy()
{
delete this;
}
void QKVToContextPluginDynamic::setPluginNamespace(const char* libNamespace)
{
mNamespace = libNamespace;
}
const char* QKVToContextPluginDynamic::getPluginNamespace() const
{
return mNamespace.c_str();
}
int QKVToContextPluginDynamic::enqueue(const PluginTensorDesc* inputDesc, const PluginTensorDesc* outputDesc,
const void* const* inputs, void* const* outputs, void* workspace, hipStream_t stream)
{
const int batchSize = inputDesc->dims.d[BDIM];
const int S = inputDesc->dims.d[SDIM];
const size_t bytesAligned = alignTo<size_t>(scratchSize(batchSize, S), kAlignment);
char* scratch1 = static_cast<char*>(workspace);
char* scratch2 = scratch1 + bytesAligned;
const int* maskIdx = mHasImask ? static_cast<const int*>(inputs[1]) : nullptr;
int status = -1;
if (mType == DataType::kFLOAT)
{
const float* input = static_cast<const float*>(inputs[0]);
float* output = static_cast<float*>(outputs[0]);
float* scr1 = reinterpret_cast<float*>(scratch1);
float* scr2 = reinterpret_cast<float*>(scratch2);
status = qkvToCtx(
cublas, batchSize, S, mNumHeads, mHeadSize, mRsqrtHeadSize, input, output, scr1, scr2, stream, maskIdx);
}
else if (mType == DataType::kHALF)
{
const half* input = static_cast<const half*>(inputs[0]);
half* output = static_cast<half*>(outputs[0]);
half* scr1 = reinterpret_cast<half*>(scratch1);
half* scr2 = reinterpret_cast<half*>(scratch2);
status = qkvToCtx(
cublas, batchSize, S, mNumHeads, mHeadSize, mRsqrtHeadSize, input, output, scr1, scr2, stream, maskIdx);
}
else
{
assert(false);
}
return status;
}
QKVToContextPluginDynamicCreator::QKVToContextPluginDynamicCreator()
{
mFC.nbFields = mPluginAttributes.size();
mFC.fields = mPluginAttributes.data();
}
const char* QKVToContextPluginDynamicCreator::getPluginName() const
{
return QKV_TO_CONTEXT_PLUGIN_NAME;
}
const char* QKVToContextPluginDynamicCreator::getPluginVersion() const
{
return QKV_TO_CONTEXT_PLUGIN_VERSION;
}
const PluginFieldCollection* QKVToContextPluginDynamicCreator::getFieldNames()
{
return &mFC;
}
IPluginV2* QKVToContextPluginDynamicCreator::createPlugin(const char* name, const PluginFieldCollection* fc)
{
gLogVerbose << "Creating QKV2ContextPlugin...\n";
int hiddenSize = 0;
int numHeads = 0;
bool hasMask = false;
int typeId = -1;
for (int i = 0; i < fc->nbFields; i++)
{
std::string field_name(fc->fields[i].name);
if (field_name.compare("type_id") == 0)
{
typeId = *static_cast<const int*>(fc->fields[i].data);
gLogVerbose << "Building typeId: " << typeId << std::endl;
}
if (field_name.compare("hidden_size") == 0)
{
hiddenSize = *static_cast<const int*>(fc->fields[i].data);
gLogVerbose << "Building hiddenSize: " << hiddenSize << std::endl;
}
if (field_name.compare("num_heads") == 0)
{
numHeads = *static_cast<const int*>(fc->fields[i].data);
gLogVerbose << "Building numHeads: " << numHeads << std::endl;
}
if (field_name.compare("has_mask") == 0)
{
hasMask = *static_cast<const bool*>(fc->fields[i].data);
gLogVerbose << "Building hasMask: " << hasMask << std::endl;
}
}
if (typeId < 0 || typeId > 3)
{
gLogError << "QKV: Invalid TypeId " << typeId << std::endl;
}
if (hiddenSize <= 0)
{
gLogError << "QKV: Invalid hiddenSize " << hiddenSize << std::endl;
}
if (numHeads <= 0)
{
gLogError << "QKV: Invalid numHeads " << numHeads << std::endl;
}
gLogVerbose << "Building the Plugin...\n";
DataType type = static_cast<DataType>(typeId);
QKVToContextPluginDynamic* p = new QKVToContextPluginDynamic(name, type, hiddenSize, numHeads, hasMask);
return p;
}
IPluginV2* QKVToContextPluginDynamicCreator::deserializePlugin(
const char* name, const void* serialData, size_t serialLength)
{
// This object will be deleted when the network is destroyed, which will
// call QKVToContextPluginDynamic::destroy()
return new QKVToContextPluginDynamic(name, serialData, serialLength);
}
void QKVToContextPluginDynamicCreator::setPluginNamespace(const char* libNamespace)
{
mNamespace = libNamespace;
}
const char* QKVToContextPluginDynamicCreator::getPluginNamespace() const
{
return mNamespace.c_str();
}
}
| 5b6419963e5edb7a7d8d05168fe02b73408bc075.cu | /*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "NvInfer.h"
#include "bertCommon.h"
#include "qkvToContextPlugin.h"
#include "common.h"
#include "serialize.hpp"
#include <cassert>
#include <cstring>
#include <iostream>
#include <vector>
using namespace nvinfer1;
namespace bert
{
template <typename T, unsigned TPB>
__global__ void scaledSoftmaxKernelSmall(const int ld, const float rsqrtHeadSize, const T* input, T* output)
{
scaledSoftmaxSmall<T, TPB>(ld, ld, rsqrtHeadSize, input, output);
}
template <typename T, unsigned TPB>
__global__ void scaledSoftmaxKernel(const int ld, const float rsqrtHeadSize, const T* input, T* output)
{
scaledSoftmax<T, TPB>(ld, ld, rsqrtHeadSize, input, output);
}
template <typename T>
int computeScaledSoftmax(
cudaStream_t stream, const int ld, const int B, const int N, const float rsqrtHeadSize, const T* input, T* output)
{
const dim3 grid(ld * N, B, 1);
if (ld <= 32)
{
const int blockSize = 32;
scaledSoftmaxKernelSmall<T, blockSize><<<grid, blockSize, 0, stream>>>(ld, rsqrtHeadSize, input, output);
}
else if (ld <= 128)
{
const int blockSize = 128;
scaledSoftmaxKernelSmall<T, blockSize><<<grid, blockSize, 0, stream>>>(ld, rsqrtHeadSize, input, output);
}
else if (ld == 384)
{
const int blockSize = 384;
scaledSoftmaxKernelSmall<T, blockSize><<<grid, blockSize, 0, stream>>>(ld, rsqrtHeadSize, input, output);
}
else
{
const int blockSize = 256;
scaledSoftmaxKernel<T, blockSize><<<grid, blockSize, 0, stream>>>(ld, rsqrtHeadSize, input, output);
}
CHECK(cudaPeekAtLastError());
return 0;
}
template <typename T, unsigned TPB>
__global__ void maskedScaledSoftmaxKernelSmall(
const int ld, const float rsqrtHeadSize, const int* maskIdx, const T* input, T* output)
{
__shared__ int lastValid;
if (threadIdx.x == 0)
{
lastValid = min(ld, maskIdx[blockIdx.y]);
}
__syncthreads();
scaledSoftmaxSmall<T, TPB>(ld, lastValid, rsqrtHeadSize, input, output);
}
template <typename T, unsigned TPB>
__global__ void maskedScaledSoftmaxKernel(
const int ld, const float rsqrtHeadSize, const int* maskIdx, const T* input, T* output)
{
__shared__ int lastValid;
if (threadIdx.x == 0)
{
lastValid = min(ld, maskIdx[blockIdx.y]);
}
__syncthreads();
scaledSoftmax<T, TPB>(ld, lastValid, rsqrtHeadSize, input, output);
}
template <typename T>
int computeMaskedScaledSoftmax(cudaStream_t stream, const int ld, const int B, const int N, const float rsqrtHeadSize,
const int* maskIdx, const T* input, T* output)
{
// Mask idx is of length B and assumes the valid region is contiguous starting
// from the beginning of the sequence
const dim3 grid(ld * N, B, 1);
if (ld <= 32)
{
const int blockSize = 32;
maskedScaledSoftmaxKernelSmall<T, blockSize>
<<<grid, blockSize, 0, stream>>>(ld, rsqrtHeadSize, maskIdx, input, output);
}
else if (ld <= 128)
{
const int blockSize = 128;
maskedScaledSoftmaxKernelSmall<T, blockSize>
<<<grid, blockSize, 0, stream>>>(ld, rsqrtHeadSize, maskIdx, input, output);
}
else if (ld == 384)
{
const int blockSize = 384;
maskedScaledSoftmaxKernelSmall<T, blockSize>
<<<grid, blockSize, 0, stream>>>(ld, rsqrtHeadSize, maskIdx, input, output);
}
else
{
const int blockSize = 256;
maskedScaledSoftmaxKernel<T, blockSize>
<<<grid, blockSize, 0, stream>>>(ld, rsqrtHeadSize, maskIdx, input, output);
}
CHECK(cudaPeekAtLastError());
return 0;
}
template <typename T>
inline int qkvToCtx(cublasHandle_t& cublas, const int B, const int S, const int numHeads, const int headSize,
const float rsqrtHeadSize, const T* input, T* output, T* qkptr, T* pptr, cudaStream_t stream,
const int* maskIdx = nullptr)
{
const int omatSize = S * S;
const int numMats = B * numHeads;
const T* qptr = input;
const T* kptr = qptr + headSize;
const T* vptr = kptr + headSize;
cublasSetStream(cublas, stream);
CublasConfigHelper helper(cublas);
// Q, K, V: BxNxSxH (inputs)
// Q * K': BxNxSxS (-> scratch1)
// P: BxNxSxS (-> scratch2)
// P * V: BxNxSxH (output)
const int ldQKV = 3 * B * numHeads * headSize;
const int strideQKV = 3 * headSize;
CHECK(cublasGemmStridedBatched<T>(cublas, CUBLAS_OP_T, CUBLAS_OP_N, S, S, headSize, 1.f, kptr, ldQKV, strideQKV,
qptr, ldQKV, strideQKV, 0.f, qkptr, S, omatSize, numMats));
// apply softmax
if (maskIdx)
{ // if we have a mask
computeMaskedScaledSoftmax<T>(stream, S, B, numHeads, rsqrtHeadSize, maskIdx, qkptr, pptr);
}
else
{ // if we don't have a mask
computeScaledSoftmax<T>(stream, S, B, numHeads, rsqrtHeadSize, qkptr, pptr);
}
// compute P*V (as V*P)
const int ldOut = B * numHeads * headSize;
const int strideOut = headSize;
CHECK(cublasGemmStridedBatched<T>(cublas, CUBLAS_OP_N, CUBLAS_OP_N, headSize, S, S, 1.f, vptr, ldQKV, strideQKV,
pptr, S, omatSize, 0.f, output, ldOut, strideOut, numMats));
return 0;
}
namespace
{
static const char* QKV_TO_CONTEXT_PLUGIN_VERSION{"1"};
static const char* QKV_TO_CONTEXT_PLUGIN_NAME{"CustomQKVToContextPluginDynamic"};
} // namespace
// Static class fields initialization
PluginFieldCollection QKVToContextPluginDynamicCreator::mFC{};
std::vector<PluginField> QKVToContextPluginDynamicCreator::mPluginAttributes;
REGISTER_TENSORRT_PLUGIN(QKVToContextPluginDynamicCreator);
constexpr size_t kAlignment = 256;
constexpr uint32_t IIDX = 0; // index of the input tensor
constexpr uint32_t MIDX = 1; // index of the mask
QKVToContextPluginDynamic::QKVToContextPluginDynamic(
const std::string name, const DataType type, const int hiddenSize, const int numHeads, bool hasImask)
: mLayerName(name)
, mHiddenSize(hiddenSize)
, mNumHeads(numHeads)
, mHasImask(hasImask)
, mType(type)
{
assert(hiddenSize % numHeads == 0);
mHeadSize = hiddenSize / numHeads;
mRsqrtHeadSize = 1.f / sqrt(float(mHeadSize));
}
QKVToContextPluginDynamic::QKVToContextPluginDynamic(const std::string name, const void* data, size_t length)
: mLayerName(name)
{
gLogVerbose << "QKV Deser Start" << std::endl;
deserialize_value(&data, &length, &mType);
deserialize_value(&data, &length, &mNumHeads);
deserialize_value(&data, &length, &mHeadSize);
deserialize_value(&data, &length, &mRsqrtHeadSize);
deserialize_value(&data, &length, &mHasImask);
deserialize_value(&data, &length, &mHiddenSize);
gLogVerbose << "QKV Deser done" << std::endl;
}
// IPluginV2DynamicExt Methods
nvinfer1::IPluginV2DynamicExt* QKVToContextPluginDynamic::clone() const
{
gLogVerbose << "QKV Clone" << std::endl;
auto ret = new QKVToContextPluginDynamic(mLayerName, mType, mHiddenSize, mNumHeads, mHasImask);
ret->initialize();
gLogVerbose << "QKV Clone done" << std::endl;
return ret;
}
DimsExprs QKVToContextPluginDynamic::getOutputDimensions(
int outputIndex, const DimsExprs* inputs, int nbInputs, IExprBuilder& exprBuilder)
{
// Input is BxSx3*N*H, output should be BxSxN*H
assert(outputIndex == 0);
// Copy over everything
DimsExprs output(inputs[IIDX]);
// Divide last dim by three
auto three = exprBuilder.constant(3);
output.d[HDIM] = exprBuilder.operation(DimensionOperation::kFLOOR_DIV, *inputs[IIDX].d[HDIM], *three);
return output;
}
bool QKVToContextPluginDynamic::supportsFormatCombination(
int pos, const PluginTensorDesc* inOut, int nbInputs, int nbOutputs)
{
assert(pos >= 0);
assert(pos < 2 + mHasImask);
assert(nbInputs == 1 + mHasImask);
const auto* in = inOut;
const auto* out = inOut + nbInputs;
if (pos == 0)
{
// must not check descriptions > pos
return (in->type == mType) && // precision
(in->format == TensorFormat::kLINEAR) && // format
(in->dims.nbDims == 5) && // num dims
((in->dims.d[HDIM] % 3) == 0) && // see getOutputDimensions
((in->dims.d[3]) == 1) && // for fc
((in->dims.d[4]) == 1) // for fc
;
}
else
{ // pos==1
if ((mHasImask && pos == 1))
{
const auto* inMask = &inOut[1];
return (inMask->type == DataType::kINT32) && // precision
(inMask->format == TensorFormat::kLINEAR) && // format
(inMask->dims.nbDims == 1) && // num dims
((inMask->dims.d[0]) == in->dims.d[BDIM]) // check B
;
}
if (!mHasImask || (pos == 2))
{
return (in->type == out->type) && // precision
(out->format == TensorFormat::kLINEAR) && // format
(out->dims.nbDims == 5) && // num dims
((in->dims.d[HDIM] / 3) == (out->dims.d[HDIM])) && // div 3
((out->dims.d[3]) == 1) && // for fc
((out->dims.d[4]) == 1) && // for fc
((out->dims.d[BDIM]) == in->dims.d[BDIM]) && // check B
((out->dims.d[SDIM]) == in->dims.d[SDIM]) // check S
;
}
}
return false;
}
void QKVToContextPluginDynamic::configurePlugin(
const DynamicPluginTensorDesc* in, int nbInputs, const DynamicPluginTensorDesc* out, int nbOutputs)
{
assert(nbInputs == 1 + mHasImask);
assert(nbOutputs == 1);
const PluginTensorDesc& inDesc = in[IIDX].desc;
TRT_UNUSED inDesc;
const PluginTensorDesc& outDesc = out->desc;
TRT_UNUSED outDesc;
assert(mType == inDesc.type);
assert(mType == outDesc.type);
assert(inDesc.dims.d[BDIM] == outDesc.dims.d[BDIM]);
assert(inDesc.dims.d[SDIM] == outDesc.dims.d[SDIM]);
assert(inDesc.dims.d[HDIM] == 3 * outDesc.dims.d[HDIM]);
if (mHasImask)
{
const PluginTensorDesc& maskDesc = in[MIDX].desc;
TRT_UNUSED maskDesc;
assert(maskDesc.type == DataType::kINT32);
assert(maskDesc.dims.d[0] == inDesc.dims.d[BDIM]);
}
}
size_t QKVToContextPluginDynamic::scratchSize(const int B, const int S) const
{
const size_t wordSize = samplesCommon::getElementSize(mType);
const size_t len = B * mNumHeads * S * S;
const size_t bytes = len * wordSize;
return bytes;
}
size_t QKVToContextPluginDynamic::getWorkspaceSize(
const PluginTensorDesc* inputs, int nbInputs, const PluginTensorDesc* outputs, int nbOutputs) const
{
const int B = inputs->dims.d[BDIM];
const int S = inputs->dims.d[SDIM];
const size_t bytesAligned = alignTo<size_t>(scratchSize(B, S), kAlignment);
const size_t ws = 2UL * bytesAligned;
return ws;
}
// IPluginV2Ext Methods
DataType QKVToContextPluginDynamic::getOutputDataType(
int index, const nvinfer1::DataType* inputTypes, int nbInputs) const
{
assert(index == 0);
assert(inputTypes[0] == DataType::kFLOAT || inputTypes[0] == DataType::kHALF);
return inputTypes[0];
}
// IPluginV2 Methods
const char* QKVToContextPluginDynamic::getPluginType() const
{
return QKV_TO_CONTEXT_PLUGIN_NAME;
}
const char* QKVToContextPluginDynamic::getPluginVersion() const
{
return QKV_TO_CONTEXT_PLUGIN_VERSION;
}
int QKVToContextPluginDynamic::getNbOutputs() const
{
return 1;
}
int QKVToContextPluginDynamic::initialize()
{
cublasCreate(&cublas);
return 0;
}
void QKVToContextPluginDynamic::terminate()
{
CHECK(cublasDestroy(cublas));
}
size_t QKVToContextPluginDynamic::getSerializationSize() const
{
return sizeof(mNumHeads) + sizeof(mHeadSize) + sizeof(DataType) + sizeof(mRsqrtHeadSize) + sizeof(mHasImask)
+ sizeof(mHiddenSize);
}
void QKVToContextPluginDynamic::serialize(void* buffer) const
{
serialize_value(&buffer, mType);
serialize_value(&buffer, mNumHeads);
serialize_value(&buffer, mHeadSize);
serialize_value(&buffer, mRsqrtHeadSize);
serialize_value(&buffer, mHasImask);
serialize_value(&buffer, mHiddenSize);
}
void QKVToContextPluginDynamic::destroy()
{
delete this;
}
void QKVToContextPluginDynamic::setPluginNamespace(const char* libNamespace)
{
mNamespace = libNamespace;
}
const char* QKVToContextPluginDynamic::getPluginNamespace() const
{
return mNamespace.c_str();
}
int QKVToContextPluginDynamic::enqueue(const PluginTensorDesc* inputDesc, const PluginTensorDesc* outputDesc,
const void* const* inputs, void* const* outputs, void* workspace, cudaStream_t stream)
{
const int batchSize = inputDesc->dims.d[BDIM];
const int S = inputDesc->dims.d[SDIM];
const size_t bytesAligned = alignTo<size_t>(scratchSize(batchSize, S), kAlignment);
char* scratch1 = static_cast<char*>(workspace);
char* scratch2 = scratch1 + bytesAligned;
const int* maskIdx = mHasImask ? static_cast<const int*>(inputs[1]) : nullptr;
int status = -1;
if (mType == DataType::kFLOAT)
{
const float* input = static_cast<const float*>(inputs[0]);
float* output = static_cast<float*>(outputs[0]);
float* scr1 = reinterpret_cast<float*>(scratch1);
float* scr2 = reinterpret_cast<float*>(scratch2);
status = qkvToCtx(
cublas, batchSize, S, mNumHeads, mHeadSize, mRsqrtHeadSize, input, output, scr1, scr2, stream, maskIdx);
}
else if (mType == DataType::kHALF)
{
const half* input = static_cast<const half*>(inputs[0]);
half* output = static_cast<half*>(outputs[0]);
half* scr1 = reinterpret_cast<half*>(scratch1);
half* scr2 = reinterpret_cast<half*>(scratch2);
status = qkvToCtx(
cublas, batchSize, S, mNumHeads, mHeadSize, mRsqrtHeadSize, input, output, scr1, scr2, stream, maskIdx);
}
else
{
assert(false);
}
return status;
}
QKVToContextPluginDynamicCreator::QKVToContextPluginDynamicCreator()
{
mFC.nbFields = mPluginAttributes.size();
mFC.fields = mPluginAttributes.data();
}
const char* QKVToContextPluginDynamicCreator::getPluginName() const
{
return QKV_TO_CONTEXT_PLUGIN_NAME;
}
const char* QKVToContextPluginDynamicCreator::getPluginVersion() const
{
return QKV_TO_CONTEXT_PLUGIN_VERSION;
}
const PluginFieldCollection* QKVToContextPluginDynamicCreator::getFieldNames()
{
return &mFC;
}
IPluginV2* QKVToContextPluginDynamicCreator::createPlugin(const char* name, const PluginFieldCollection* fc)
{
gLogVerbose << "Creating QKV2ContextPlugin...\n";
int hiddenSize = 0;
int numHeads = 0;
bool hasMask = false;
int typeId = -1;
for (int i = 0; i < fc->nbFields; i++)
{
std::string field_name(fc->fields[i].name);
if (field_name.compare("type_id") == 0)
{
typeId = *static_cast<const int*>(fc->fields[i].data);
gLogVerbose << "Building typeId: " << typeId << std::endl;
}
if (field_name.compare("hidden_size") == 0)
{
hiddenSize = *static_cast<const int*>(fc->fields[i].data);
gLogVerbose << "Building hiddenSize: " << hiddenSize << std::endl;
}
if (field_name.compare("num_heads") == 0)
{
numHeads = *static_cast<const int*>(fc->fields[i].data);
gLogVerbose << "Building numHeads: " << numHeads << std::endl;
}
if (field_name.compare("has_mask") == 0)
{
hasMask = *static_cast<const bool*>(fc->fields[i].data);
gLogVerbose << "Building hasMask: " << hasMask << std::endl;
}
}
if (typeId < 0 || typeId > 3)
{
gLogError << "QKV: Invalid TypeId " << typeId << std::endl;
}
if (hiddenSize <= 0)
{
gLogError << "QKV: Invalid hiddenSize " << hiddenSize << std::endl;
}
if (numHeads <= 0)
{
gLogError << "QKV: Invalid numHeads " << numHeads << std::endl;
}
gLogVerbose << "Building the Plugin...\n";
DataType type = static_cast<DataType>(typeId);
QKVToContextPluginDynamic* p = new QKVToContextPluginDynamic(name, type, hiddenSize, numHeads, hasMask);
return p;
}
IPluginV2* QKVToContextPluginDynamicCreator::deserializePlugin(
const char* name, const void* serialData, size_t serialLength)
{
// This object will be deleted when the network is destroyed, which will
// call QKVToContextPluginDynamic::destroy()
return new QKVToContextPluginDynamic(name, serialData, serialLength);
}
void QKVToContextPluginDynamicCreator::setPluginNamespace(const char* libNamespace)
{
mNamespace = libNamespace;
}
const char* QKVToContextPluginDynamicCreator::getPluginNamespace() const
{
return mNamespace.c_str();
}
}
|
fa03334ad6d627f6e05889aac3d747cf8b0ff293.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.6.1) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date January 2015
@generated from zlaqps2_gpu.cu normal z -> c, Fri Jan 30 19:00:09 2015
*/
#include "common_magma.h"
#include "commonblas_c.h"
#define PRECISION_c
// 512 is maximum number of threads for CUDA capability 1.x
#define BLOCK_SIZE 512
/* --------------------------------------------------------------------------- */
/**
Purpose
-------
CLAQPS computes a step of QR factorization with column pivoting
of a complex M-by-N matrix A by using Blas-3. It tries to factorize
NB columns from A starting from the row OFFSET+1, and updates all
of the matrix with Blas-3 xGEMM.
In some cases, due to catastrophic cancellations, it cannot
factorize NB columns. Hence, the actual number of factorized
columns is returned in KB.
Block A(1:OFFSET,1:N) is accordingly pivoted, but not factorized.
Arguments
---------
@param[in]
m INTEGER
The number of rows of the matrix A. M >= 0.
@param[in]
n INTEGER
The number of columns of the matrix A. N >= 0
@param[in]
offset INTEGER
The number of rows of A that have been factorized in
previous steps.
@param[in]
NB INTEGER
The number of columns to factorize.
@param[out]
kb INTEGER
The number of columns actually factorized.
@param[in,out]
dA COMPLEX array, dimension (LDDA,N)
On entry, the M-by-N matrix A.
On exit, block A(OFFSET+1:M,1:KB) is the triangular
factor obtained and block A(1:OFFSET,1:N) has been
accordingly pivoted, but no factorized.
The rest of the matrix, block A(OFFSET+1:M,KB+1:N) has
been updated.
@param[in]
ldda INTEGER
The leading dimension of the array A. LDDA >= max(1,M).
@param[in,out]
jpvt INTEGER array, dimension (N)
JPVT(I) = K <==> Column K of the full matrix A has been
permuted into position I in AP.
@param[out]
dtau COMPLEX array, dimension (KB)
The scalar factors of the elementary reflectors.
@param[in,out]
dvn1 REAL array, dimension (N)
The vector with the partial column norms.
@param[in,out]
dvn2 REAL array, dimension (N)
The vector with the exact column norms.
@param[in,out]
dauxv COMPLEX array, dimension (NB)
Auxiliar vector.
@param[in,out]
dF COMPLEX array, dimension (LDDF,NB)
Matrix F**H = L * Y**H * A.
@param[in]
lddf INTEGER
The leading dimension of the array F. LDDF >= max(1,N).
@ingroup magma_cgeqp3_aux
********************************************************************/
extern "C" magma_int_t
magma_claqps2_gpu(
magma_int_t m, magma_int_t n, magma_int_t offset,
magma_int_t nb, magma_int_t *kb,
magmaFloatComplex_ptr dA, magma_int_t ldda,
magma_int_t *jpvt,
magmaFloatComplex_ptr dtau,
magmaFloat_ptr dvn1, magmaFloat_ptr dvn2,
magmaFloatComplex_ptr dauxv,
magmaFloatComplex_ptr dF, magma_int_t lddf)
{
#define dA(i_, j_) (dA + (i_) + (j_)*(ldda))
#define dF(i_, j_) (dF + (i_) + (j_)*(lddf))
magmaFloatComplex c_zero = MAGMA_C_MAKE( 0.,0.);
magmaFloatComplex c_one = MAGMA_C_MAKE( 1.,0.);
magmaFloatComplex c_neg_one = MAGMA_C_MAKE(-1.,0.);
magma_int_t ione = 1;
magma_int_t i__1, i__2;
magma_int_t k, rk;
magmaFloatComplex tauk;
magma_int_t pvt, itemp;
float tol3z;
magmaFloatComplex_ptr dAkk = dauxv;
dauxv += nb;
float lsticc, *lsticcs;
magma_smalloc( &lsticcs, 1+256*(n+255)/256 );
tol3z = magma_ssqrt( lapackf77_slamch("Epsilon"));
lsticc = 0;
k = 0;
while( k < nb && lsticc == 0 ) {
rk = offset + k;
/* Determine ith pivot column and swap if necessary */
pvt = k - 1 + magma_isamax( n-k, &dvn1[k], ione );
if (pvt != k) {
magmablas_cswap( k+1, dF(pvt,0), lddf, dF(k,0), lddf);
itemp = jpvt[pvt];
jpvt[pvt] = jpvt[k];
jpvt[k] = itemp;
#if (defined(PRECISION_d) || defined(PRECISION_z))
//magma_dswap( 1, &dvn1[pvt], 1, &dvn1[k], 1 );
//magma_dswap( 1, &dvn2[pvt], 1, &dvn2[k], 1 );
magma_dswap( 2, &dvn1[pvt], n+offset, &dvn1[k], n+offset);
#else
//magma_sswap( 1, &dvn1[pvt], 1, &dvn1[k], 1 );
//magma_sswap( 1, &dvn2[pvt], 1, &dvn2[k], 1 );
magma_sswap(2, &dvn1[pvt], n+offset, &dvn1[k], n+offset);
#endif
magmablas_cswap( m, dA(0,pvt), ione, dA(0, k), ione );
}
/* Apply previous Householder reflectors to column K:
A(RK:M,K) := A(RK:M,K) - A(RK:M,1:K-1)*F(K,1:K-1)'.
Optimization: multiply with beta=0; wait for vector and subtract */
if (k > 0) {
magmablas_cgemv_conjv( m-rk, k,
c_neg_one, dA(rk, 0), ldda,
dF(k, 0), lddf,
c_one, dA(rk, k), ione );
}
/* Generate elementary reflector H(k). */
magma_clarfg_gpu(m-rk, dA(rk, k), dA(rk + 1, k), &dtau[k], &dvn1[k], &dAkk[k]);
magma_csetvector( 1, &c_one, 1, dA(rk, k), 1 );
/* Compute Kth column of F:
Compute F(K+1:N,K) := tau(K)*A(RK:M,K+1:N)'*A(RK:M,K) on the GPU */
if (k < n-1 || k > 0 ) magma_cgetvector( 1, &dtau[k], 1, &tauk, 1 );
if (k < n-1) {
magma_cgemv( MagmaConjTrans, m-rk, n-k-1,
tauk, dA( rk, k+1 ), ldda,
dA( rk, k ), 1,
c_zero, dF( k+1, k ), 1 );
}
/* Incremental updating of F:
F(1:N,K) := F(1:N,K) - tau(K)*F(1:N,1:K-1)*A(RK:M,1:K-1)'*A(RK:M,K).
F(1:N,K) := tau(K)*A(RK:M,K+1:N)'*A(RK:M,K) - tau(K)*F(1:N,1:K-1)*A(RK:M,1:K-1)'*A(RK:M,K)
:= tau(K)(A(RK:M,K+1:N)' - F(1:N,1:K-1)*A(RK:M,1:K-1)') A(RK:M,K)
so, F is (updated A)*V */
if (k > 0) {
/*z__1 = MAGMA_C_NEGATE( tauk );
magma_cgemv( MagmaConjTrans, m-rk, k,
z__1, dA(rk, 0), ldda,
dA(rk, k), ione,
c_zero, dauxv, ione );*/
hipLaunchKernelGGL(( magma_cgemv_kernel3), dim3(k), dim3(BLOCK_SIZE), 0, magma_stream , m-rk, dA(rk, 0), ldda,
dA(rk, k), dauxv, dtau+k);
/* I think we only need stricly lower-triangular part */
magma_cgemv( MagmaNoTrans, n-k-1, k,
c_one, dF(k+1,0), lddf,
dauxv, ione,
c_one, dF(k+1,k), ione );
}
/* Update the current row of A:
A(RK,K+1:N) := A(RK,K+1:N) - A(RK,1:K)*F(K+1:N,1:K)'. */
if (k < n-1) {
i__1 = n - k - 1;
i__2 = k + 1;
/* left-looking update of rows, *
* since F=A**H v with original A, so no right-looking */
magma_cgemm( MagmaNoTrans, MagmaConjTrans, ione, i__1, i__2,
c_neg_one, dA(rk, 0 ), ldda,
dF(k+1,0 ), lddf,
c_one, dA(rk, k+1), ldda );
}
/* Update partial column norms. */
if (rk < min(m, n+offset)-1){
magmablas_scnrm2_row_check_adjust(n-k-1, tol3z, &dvn1[k+1],
&dvn2[k+1], dA(rk,k+1), ldda, lsticcs);
#if defined(PRECISION_d) || defined(PRECISION_z)
magma_sgetvector( 1, &lsticcs[0], 1, &lsticc, 1 );
#else
magma_sgetvector( 1, &lsticcs[0], 1, &lsticc, 1 );
#endif
}
//*dA(rk, k) = Akk;
//magma_csetvector( 1, &Akk, 1, dA(rk, k), 1 );
//magmablas_clacpy(MagmaUpperLower, 1, 1, dAkk, 1, dA(rk, k), 1);
++k;
}
// restore the diagonals
magma_ccopymatrix( 1, k, dAkk, 1, dA(offset, 0), ldda+1 );
// leave k as the last column done
--k;
*kb = k + 1;
rk = offset + *kb - 1;
/* Apply the block reflector to the rest of the matrix:
A(OFFSET+KB+1:M,KB+1:N) := A(OFFSET+KB+1:M,KB+1:N) -
A(OFFSET+KB+1:M,1:KB)*F(KB+1:N,1:KB)' */
if (*kb < min(n, m - offset)) {
i__1 = m - rk - 1;
i__2 = n - *kb;
magma_cgemm( MagmaNoTrans, MagmaConjTrans, i__1, i__2, *kb,
c_neg_one, dA(rk+1, 0 ), ldda,
dF(*kb, 0 ), lddf,
c_one, dA(rk+1, *kb), ldda );
}
/* Recomputation of difficult columns. */
if( lsticc > 0 ) {
// printf( " -- recompute dnorms --\n" );
magmablas_scnrm2_check(m-rk-1, n-*kb, dA(rk+1,*kb), ldda,
&dvn1[*kb], lsticcs);
#if defined(PRECISION_d) || defined(PRECISION_z)
magma_scopymatrix( n-*kb, 1, &dvn1[*kb], n, &dvn2[*kb], n);
#else
magma_scopymatrix( n-*kb, 1, &dvn1[*kb], n, &dvn2[*kb], n);
#endif
}
magma_free(lsticcs);
return MAGMA_SUCCESS;
} /* magma_claqps */
| fa03334ad6d627f6e05889aac3d747cf8b0ff293.cu | /*
-- MAGMA (version 1.6.1) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date January 2015
@generated from zlaqps2_gpu.cu normal z -> c, Fri Jan 30 19:00:09 2015
*/
#include "common_magma.h"
#include "commonblas_c.h"
#define PRECISION_c
// 512 is maximum number of threads for CUDA capability 1.x
#define BLOCK_SIZE 512
/* --------------------------------------------------------------------------- */
/**
Purpose
-------
CLAQPS computes a step of QR factorization with column pivoting
of a complex M-by-N matrix A by using Blas-3. It tries to factorize
NB columns from A starting from the row OFFSET+1, and updates all
of the matrix with Blas-3 xGEMM.
In some cases, due to catastrophic cancellations, it cannot
factorize NB columns. Hence, the actual number of factorized
columns is returned in KB.
Block A(1:OFFSET,1:N) is accordingly pivoted, but not factorized.
Arguments
---------
@param[in]
m INTEGER
The number of rows of the matrix A. M >= 0.
@param[in]
n INTEGER
The number of columns of the matrix A. N >= 0
@param[in]
offset INTEGER
The number of rows of A that have been factorized in
previous steps.
@param[in]
NB INTEGER
The number of columns to factorize.
@param[out]
kb INTEGER
The number of columns actually factorized.
@param[in,out]
dA COMPLEX array, dimension (LDDA,N)
On entry, the M-by-N matrix A.
On exit, block A(OFFSET+1:M,1:KB) is the triangular
factor obtained and block A(1:OFFSET,1:N) has been
accordingly pivoted, but no factorized.
The rest of the matrix, block A(OFFSET+1:M,KB+1:N) has
been updated.
@param[in]
ldda INTEGER
The leading dimension of the array A. LDDA >= max(1,M).
@param[in,out]
jpvt INTEGER array, dimension (N)
JPVT(I) = K <==> Column K of the full matrix A has been
permuted into position I in AP.
@param[out]
dtau COMPLEX array, dimension (KB)
The scalar factors of the elementary reflectors.
@param[in,out]
dvn1 REAL array, dimension (N)
The vector with the partial column norms.
@param[in,out]
dvn2 REAL array, dimension (N)
The vector with the exact column norms.
@param[in,out]
dauxv COMPLEX array, dimension (NB)
Auxiliar vector.
@param[in,out]
dF COMPLEX array, dimension (LDDF,NB)
Matrix F**H = L * Y**H * A.
@param[in]
lddf INTEGER
The leading dimension of the array F. LDDF >= max(1,N).
@ingroup magma_cgeqp3_aux
********************************************************************/
extern "C" magma_int_t
magma_claqps2_gpu(
magma_int_t m, magma_int_t n, magma_int_t offset,
magma_int_t nb, magma_int_t *kb,
magmaFloatComplex_ptr dA, magma_int_t ldda,
magma_int_t *jpvt,
magmaFloatComplex_ptr dtau,
magmaFloat_ptr dvn1, magmaFloat_ptr dvn2,
magmaFloatComplex_ptr dauxv,
magmaFloatComplex_ptr dF, magma_int_t lddf)
{
#define dA(i_, j_) (dA + (i_) + (j_)*(ldda))
#define dF(i_, j_) (dF + (i_) + (j_)*(lddf))
magmaFloatComplex c_zero = MAGMA_C_MAKE( 0.,0.);
magmaFloatComplex c_one = MAGMA_C_MAKE( 1.,0.);
magmaFloatComplex c_neg_one = MAGMA_C_MAKE(-1.,0.);
magma_int_t ione = 1;
magma_int_t i__1, i__2;
magma_int_t k, rk;
magmaFloatComplex tauk;
magma_int_t pvt, itemp;
float tol3z;
magmaFloatComplex_ptr dAkk = dauxv;
dauxv += nb;
float lsticc, *lsticcs;
magma_smalloc( &lsticcs, 1+256*(n+255)/256 );
tol3z = magma_ssqrt( lapackf77_slamch("Epsilon"));
lsticc = 0;
k = 0;
while( k < nb && lsticc == 0 ) {
rk = offset + k;
/* Determine ith pivot column and swap if necessary */
pvt = k - 1 + magma_isamax( n-k, &dvn1[k], ione );
if (pvt != k) {
magmablas_cswap( k+1, dF(pvt,0), lddf, dF(k,0), lddf);
itemp = jpvt[pvt];
jpvt[pvt] = jpvt[k];
jpvt[k] = itemp;
#if (defined(PRECISION_d) || defined(PRECISION_z))
//magma_dswap( 1, &dvn1[pvt], 1, &dvn1[k], 1 );
//magma_dswap( 1, &dvn2[pvt], 1, &dvn2[k], 1 );
magma_dswap( 2, &dvn1[pvt], n+offset, &dvn1[k], n+offset);
#else
//magma_sswap( 1, &dvn1[pvt], 1, &dvn1[k], 1 );
//magma_sswap( 1, &dvn2[pvt], 1, &dvn2[k], 1 );
magma_sswap(2, &dvn1[pvt], n+offset, &dvn1[k], n+offset);
#endif
magmablas_cswap( m, dA(0,pvt), ione, dA(0, k), ione );
}
/* Apply previous Householder reflectors to column K:
A(RK:M,K) := A(RK:M,K) - A(RK:M,1:K-1)*F(K,1:K-1)'.
Optimization: multiply with beta=0; wait for vector and subtract */
if (k > 0) {
magmablas_cgemv_conjv( m-rk, k,
c_neg_one, dA(rk, 0), ldda,
dF(k, 0), lddf,
c_one, dA(rk, k), ione );
}
/* Generate elementary reflector H(k). */
magma_clarfg_gpu(m-rk, dA(rk, k), dA(rk + 1, k), &dtau[k], &dvn1[k], &dAkk[k]);
magma_csetvector( 1, &c_one, 1, dA(rk, k), 1 );
/* Compute Kth column of F:
Compute F(K+1:N,K) := tau(K)*A(RK:M,K+1:N)'*A(RK:M,K) on the GPU */
if (k < n-1 || k > 0 ) magma_cgetvector( 1, &dtau[k], 1, &tauk, 1 );
if (k < n-1) {
magma_cgemv( MagmaConjTrans, m-rk, n-k-1,
tauk, dA( rk, k+1 ), ldda,
dA( rk, k ), 1,
c_zero, dF( k+1, k ), 1 );
}
/* Incremental updating of F:
F(1:N,K) := F(1:N,K) - tau(K)*F(1:N,1:K-1)*A(RK:M,1:K-1)'*A(RK:M,K).
F(1:N,K) := tau(K)*A(RK:M,K+1:N)'*A(RK:M,K) - tau(K)*F(1:N,1:K-1)*A(RK:M,1:K-1)'*A(RK:M,K)
:= tau(K)(A(RK:M,K+1:N)' - F(1:N,1:K-1)*A(RK:M,1:K-1)') A(RK:M,K)
so, F is (updated A)*V */
if (k > 0) {
/*z__1 = MAGMA_C_NEGATE( tauk );
magma_cgemv( MagmaConjTrans, m-rk, k,
z__1, dA(rk, 0), ldda,
dA(rk, k), ione,
c_zero, dauxv, ione );*/
magma_cgemv_kernel3<<< k, BLOCK_SIZE, 0, magma_stream >>>(m-rk, dA(rk, 0), ldda,
dA(rk, k), dauxv, dtau+k);
/* I think we only need stricly lower-triangular part */
magma_cgemv( MagmaNoTrans, n-k-1, k,
c_one, dF(k+1,0), lddf,
dauxv, ione,
c_one, dF(k+1,k), ione );
}
/* Update the current row of A:
A(RK,K+1:N) := A(RK,K+1:N) - A(RK,1:K)*F(K+1:N,1:K)'. */
if (k < n-1) {
i__1 = n - k - 1;
i__2 = k + 1;
/* left-looking update of rows, *
* since F=A**H v with original A, so no right-looking */
magma_cgemm( MagmaNoTrans, MagmaConjTrans, ione, i__1, i__2,
c_neg_one, dA(rk, 0 ), ldda,
dF(k+1,0 ), lddf,
c_one, dA(rk, k+1), ldda );
}
/* Update partial column norms. */
if (rk < min(m, n+offset)-1){
magmablas_scnrm2_row_check_adjust(n-k-1, tol3z, &dvn1[k+1],
&dvn2[k+1], dA(rk,k+1), ldda, lsticcs);
#if defined(PRECISION_d) || defined(PRECISION_z)
magma_sgetvector( 1, &lsticcs[0], 1, &lsticc, 1 );
#else
magma_sgetvector( 1, &lsticcs[0], 1, &lsticc, 1 );
#endif
}
//*dA(rk, k) = Akk;
//magma_csetvector( 1, &Akk, 1, dA(rk, k), 1 );
//magmablas_clacpy(MagmaUpperLower, 1, 1, dAkk, 1, dA(rk, k), 1);
++k;
}
// restore the diagonals
magma_ccopymatrix( 1, k, dAkk, 1, dA(offset, 0), ldda+1 );
// leave k as the last column done
--k;
*kb = k + 1;
rk = offset + *kb - 1;
/* Apply the block reflector to the rest of the matrix:
A(OFFSET+KB+1:M,KB+1:N) := A(OFFSET+KB+1:M,KB+1:N) -
A(OFFSET+KB+1:M,1:KB)*F(KB+1:N,1:KB)' */
if (*kb < min(n, m - offset)) {
i__1 = m - rk - 1;
i__2 = n - *kb;
magma_cgemm( MagmaNoTrans, MagmaConjTrans, i__1, i__2, *kb,
c_neg_one, dA(rk+1, 0 ), ldda,
dF(*kb, 0 ), lddf,
c_one, dA(rk+1, *kb), ldda );
}
/* Recomputation of difficult columns. */
if( lsticc > 0 ) {
// printf( " -- recompute dnorms --\n" );
magmablas_scnrm2_check(m-rk-1, n-*kb, dA(rk+1,*kb), ldda,
&dvn1[*kb], lsticcs);
#if defined(PRECISION_d) || defined(PRECISION_z)
magma_scopymatrix( n-*kb, 1, &dvn1[*kb], n, &dvn2[*kb], n);
#else
magma_scopymatrix( n-*kb, 1, &dvn1[*kb], n, &dvn2[*kb], n);
#endif
}
magma_free(lsticcs);
return MAGMA_SUCCESS;
} /* magma_claqps */
|
244dac7ee5bce05cfddf30469fdc492a65acc4c5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifdef USE_CUDNN
#include <vector>
#include "caffe/filler.hpp"
#include "caffe/layer.hpp"
#include "caffe/util/im2col.hpp"
#include "caffe/util/math_functions.hpp"
#include "caffe/vision_layers.hpp"
namespace caffe {
__global__ void sync_conv_groups() { }
template <typename Dtype>
void CuDNNConvolutionLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
const Dtype* weight = this->blobs_[0]->gpu_data();
for (int i = 0; i < bottom.size(); ++i) {
const Dtype* bottom_data = bottom[i]->gpu_data();
Dtype* top_data = top[i]->mutable_gpu_data();
// Forward through cuDNN in parallel over groups.
for (int g = 0; g < this->group_; g++) {
// get minimum size of the workspace needed for the desired algorithm
// Filters.
CUDNN_CHECK(cudnnConvolutionForward(handle_[g],
cudnn::dataType<Dtype>::one,
bottom_descs_[i], bottom_data + bottom_offset_ * g,
filter_desc_, weight + weight_offset_ * g,
conv_descs_[i],
fwd_algo_[i], WORKSPACE[g], workspace_fwd_sizes_[i],
cudnn::dataType<Dtype>::zero,
top_descs_[i], top_data + top_offset_ * g));
// Bias.
if (this->bias_term_) {
const Dtype* bias_data = this->blobs_[1]->gpu_data();
CUDNN_CHECK(cudnnAddTensor(handle_[g],
cudnn::dataType<Dtype>::one,
bias_desc_, bias_data + bias_offset_ * g,
cudnn::dataType<Dtype>::one,
top_descs_[i], top_data + top_offset_ * g));
}
}
// Synchronize the work across groups, each of which went into its own
// stream, by launching an empty kernel into the default (null) stream.
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( sync_conv_groups), dim3(1), dim3(1), 0, 0, );
}
}
template <typename Dtype>
void CuDNNConvolutionLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
const Dtype* weight = NULL;
Dtype* weight_diff = NULL;
if (this->param_propagate_down_[0]) {
weight = this->blobs_[0]->gpu_data();
weight_diff = this->blobs_[0]->mutable_gpu_diff();
}
Dtype* bias_diff = NULL;
if (this->bias_term_ && this->param_propagate_down_[1]) {
bias_diff = this->blobs_[1]->mutable_gpu_diff();
}
for (int i = 0; i < top.size(); ++i) {
const Dtype* top_diff = top[i]->gpu_diff();
// Backward through cuDNN in parallel over groups and gradients.
for (int g = 0; g < this->group_; g++) {
// Gradient w.r.t. bias.
if (this->bias_term_ && this->param_propagate_down_[1]) {
CUDNN_CHECK(cudnnConvolutionBackwardBias(handle_[0*this->group_ + g],
cudnn::dataType<Dtype>::one,
top_descs_[i], top_diff + top_offset_ * g,
cudnn::dataType<Dtype>::one,
bias_desc_, bias_diff + bias_offset_ * g));
}
// Gradient w.r.t. weights.
if (this->param_propagate_down_[0]) {
const Dtype* bottom_data = (*bottom)[i]->gpu_data();
CUDNN_CHECK(cudnnConvolutionBackwardFilter(handle_[1*this->group_ + g],
cudnn::dataType<Dtype>::one,
bottom_descs_[i], bottom_data + bottom_offset_ * g,
top_descs_[i], top_diff + top_offset_ * g,
conv_descs_[i],bwd_filter_algo_[i], WORKSPACE[1*this->group_ + g],
workspace_bwd_filter_sizes_[i],
cudnn::dataType<Dtype>::one,
filter_desc_, weight_diff + weight_offset_ * g));
}
// Gradient w.r.t. bottom data.
if (propagate_down[i]) {
if (weight == NULL) {
weight = this->blobs_[0]->gpu_data();
}
Dtype* bottom_diff = (*bottom)[i]->mutable_gpu_diff();
CUDNN_CHECK(cudnnConvolutionBackwardData(handle_[2*this->group_ + g],
cudnn::dataType<Dtype>::one,
filter_desc_, weight + weight_offset_ * g,
top_descs_[i], top_diff + top_offset_ * g,
conv_descs_[i],bwd_data_algo_[i], WORKSPACE[2*this->group_ + g],
workspace_bwd_data_sizes_[i],
cudnn::dataType<Dtype>::zero,
bottom_descs_[i], bottom_diff + bottom_offset_ * g));
}
}
// Synchronize the work across groups, each of which went into its own
// stream, by launching an empty kernel into the default (null) stream.
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( sync_conv_groups), dim3(1), dim3(1), 0, 0, );
}
}
INSTANTIATE_LAYER_GPU_FUNCS(CuDNNConvolutionLayer);
} // namespace caffe
#endif
| 244dac7ee5bce05cfddf30469fdc492a65acc4c5.cu | #ifdef USE_CUDNN
#include <vector>
#include "caffe/filler.hpp"
#include "caffe/layer.hpp"
#include "caffe/util/im2col.hpp"
#include "caffe/util/math_functions.hpp"
#include "caffe/vision_layers.hpp"
namespace caffe {
__global__ void sync_conv_groups() { }
template <typename Dtype>
void CuDNNConvolutionLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
const Dtype* weight = this->blobs_[0]->gpu_data();
for (int i = 0; i < bottom.size(); ++i) {
const Dtype* bottom_data = bottom[i]->gpu_data();
Dtype* top_data = top[i]->mutable_gpu_data();
// Forward through cuDNN in parallel over groups.
for (int g = 0; g < this->group_; g++) {
// get minimum size of the workspace needed for the desired algorithm
// Filters.
CUDNN_CHECK(cudnnConvolutionForward(handle_[g],
cudnn::dataType<Dtype>::one,
bottom_descs_[i], bottom_data + bottom_offset_ * g,
filter_desc_, weight + weight_offset_ * g,
conv_descs_[i],
fwd_algo_[i], WORKSPACE[g], workspace_fwd_sizes_[i],
cudnn::dataType<Dtype>::zero,
top_descs_[i], top_data + top_offset_ * g));
// Bias.
if (this->bias_term_) {
const Dtype* bias_data = this->blobs_[1]->gpu_data();
CUDNN_CHECK(cudnnAddTensor(handle_[g],
cudnn::dataType<Dtype>::one,
bias_desc_, bias_data + bias_offset_ * g,
cudnn::dataType<Dtype>::one,
top_descs_[i], top_data + top_offset_ * g));
}
}
// Synchronize the work across groups, each of which went into its own
// stream, by launching an empty kernel into the default (null) stream.
// NOLINT_NEXT_LINE(whitespace/operators)
sync_conv_groups<<<1, 1>>>();
}
}
template <typename Dtype>
void CuDNNConvolutionLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
const Dtype* weight = NULL;
Dtype* weight_diff = NULL;
if (this->param_propagate_down_[0]) {
weight = this->blobs_[0]->gpu_data();
weight_diff = this->blobs_[0]->mutable_gpu_diff();
}
Dtype* bias_diff = NULL;
if (this->bias_term_ && this->param_propagate_down_[1]) {
bias_diff = this->blobs_[1]->mutable_gpu_diff();
}
for (int i = 0; i < top.size(); ++i) {
const Dtype* top_diff = top[i]->gpu_diff();
// Backward through cuDNN in parallel over groups and gradients.
for (int g = 0; g < this->group_; g++) {
// Gradient w.r.t. bias.
if (this->bias_term_ && this->param_propagate_down_[1]) {
CUDNN_CHECK(cudnnConvolutionBackwardBias(handle_[0*this->group_ + g],
cudnn::dataType<Dtype>::one,
top_descs_[i], top_diff + top_offset_ * g,
cudnn::dataType<Dtype>::one,
bias_desc_, bias_diff + bias_offset_ * g));
}
// Gradient w.r.t. weights.
if (this->param_propagate_down_[0]) {
const Dtype* bottom_data = (*bottom)[i]->gpu_data();
CUDNN_CHECK(cudnnConvolutionBackwardFilter(handle_[1*this->group_ + g],
cudnn::dataType<Dtype>::one,
bottom_descs_[i], bottom_data + bottom_offset_ * g,
top_descs_[i], top_diff + top_offset_ * g,
conv_descs_[i],bwd_filter_algo_[i], WORKSPACE[1*this->group_ + g],
workspace_bwd_filter_sizes_[i],
cudnn::dataType<Dtype>::one,
filter_desc_, weight_diff + weight_offset_ * g));
}
// Gradient w.r.t. bottom data.
if (propagate_down[i]) {
if (weight == NULL) {
weight = this->blobs_[0]->gpu_data();
}
Dtype* bottom_diff = (*bottom)[i]->mutable_gpu_diff();
CUDNN_CHECK(cudnnConvolutionBackwardData(handle_[2*this->group_ + g],
cudnn::dataType<Dtype>::one,
filter_desc_, weight + weight_offset_ * g,
top_descs_[i], top_diff + top_offset_ * g,
conv_descs_[i],bwd_data_algo_[i], WORKSPACE[2*this->group_ + g],
workspace_bwd_data_sizes_[i],
cudnn::dataType<Dtype>::zero,
bottom_descs_[i], bottom_diff + bottom_offset_ * g));
}
}
// Synchronize the work across groups, each of which went into its own
// stream, by launching an empty kernel into the default (null) stream.
// NOLINT_NEXT_LINE(whitespace/operators)
sync_conv_groups<<<1, 1>>>();
}
}
INSTANTIATE_LAYER_GPU_FUNCS(CuDNNConvolutionLayer);
} // namespace caffe
#endif
|
0da5c518d4e9df031de0e0e3afb9ab28f61a40f9.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <time.h>
#include <stdlib.h>
#include <math.h>
struct point
{
float x;
float y;
};
struct dist
{
float da;
float db;
float dc;
};
float eucli(float fx, float fy)
{
return sqrt(fx * fx + fy * fy);
}
__global__ void trilaterate(struct point a, struct point b, struct point c, struct dist *d_set, struct point *d_trail, int NUM)
{
float a1Sq = a.x * a.x, a2Sq = b.x * b.x, a3Sq = c.x * c.x, b1Sq = a.y * a.y, b2Sq = b.y * b.y, b3Sq = c.y * c.y;
float r1Sq, r2Sq, r3Sq, denom1, numer1, denom2, numer2;
float a1 = a.x, a2 = b.x, a3 = c.x, b1 = a.y, b2 = b.y, b3 = c.y;
int i;
for(i=0; i < NUM; i++)
{
r1Sq = d_set[i].da * d_set[i].da;
r2Sq = d_set[i].db * d_set[i].db;
r3Sq = d_set[i].dc * d_set[i].dc;
numer1 = (a2 - a1) * (a3Sq + b3Sq - r3Sq) + (a1 - a3) * (a2Sq + b2Sq - r2Sq) + (a3 - a2) * (a1Sq + b1Sq - r1Sq);
denom1 = 2 * (b3 * (a2 - a1) + b2 * (a1 - a3) + b1 * (a3 - a2));
d_trail[i].y = numer1/denom1;
numer2 = r2Sq - r1Sq + a1Sq - a2Sq + b1Sq - b2Sq - 2 * (b1 - b2) * d_trail[i].y;
denom2 = 2 * (a1 - a2);
d_trail[i].x = numer2/denom2;
}
}
int main(int argc, char *argv[])
{
hipEvent_t start, stop;
float etime;
int i, j=0;
float fx, fy, gx, gy, z = 5.0;
int NUM;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start,0);
if (argc != 2)
{
printf("Check you arguments!\n");
exit(1);
}
struct point a, b, c;
a.x = 1.67; a.y = 2.58;
b.x = 3.74; b.y = 2.08;
c.x = 5.12; c.y = 3.95;
struct point init;
init.x = 3.12;
init.y = 4.27;
NUM = atoi(argv[1]);
struct point trail[NUM], avg_trail[(NUM/4)], ret_avg_trail[(NUM/4)];
struct point *d_trail, *h_trail;
trail[0] = init;
srand(time(NULL));
for(i=1; i<NUM; i++)
{
gx = ((float)rand()/(float)(RAND_MAX)) * z;
gx = floorf(gx * 100) / 100;
gy = ((float)rand()/(float)(RAND_MAX)) * z;
gy = floorf(gy * 100) / 100;
trail[i].x = (floorf(trail[i-1].x * 100 + 0.5) / 100) + gx;
trail[i].y = (floorf(trail[i-1].y * 100 + 0.5) / 100) + gy;
}
for(i=0; i<(NUM/4); i++)
{
avg_trail[i].x = (trail[j].x + trail[j+1].x + trail[j+2].x + trail[j+3].x) / 4;
avg_trail[i].y = (trail[j].y + trail[j+1].y + trail[j+2].y + trail[j+3].y) / 4;
j += 4;
}
printf("\nAvg. Random Trail at Host\n");
for(i=0; i<(NUM/4); i++)
{
printf("(%f, %f)\n", avg_trail[i].x, avg_trail[i].y);
}
struct dist *set;
size_t size = NUM * sizeof(struct dist);
set = (struct dist *)malloc(size);
size_t sz = NUM * sizeof(struct point);
h_trail = (struct point *)malloc(sz);
for(i=0; i<NUM; i++)
{
fx = trail[i].x - a.x;
fy = trail[i].y - a.y;
set[i].da = eucli(fx, fy);
fx = trail[i].x - b.x;
fy = trail[i].y - b.y;
set[i].db = eucli(fx, fy);
fx = trail[i].x - c.x;
fy = trail[i].y - c.y;
set[i].dc = eucli(fx, fy);
}
struct dist *d_set;
hipMalloc((void **) &d_set, size);
hipMalloc((void **) &d_trail, sz);
hipMemcpy(d_set, set, sizeof(struct dist)*NUM, hipMemcpyHostToDevice);
hipDeviceProp_t devProp;
hipGetDeviceProperties(&devProp, 0);
int nBlocks = devProp.multiProcessorCount;
int blockSize = devProp.warpSize;
printf("\nU: %d\n", nBlocks);
printf("\nV: %d\n", blockSize);
hipLaunchKernelGGL(( trilaterate) , dim3(nBlocks), dim3(blockSize) , 0, 0, a, b, c, d_set, d_trail, NUM);
hipMemcpy(h_trail, d_trail, sizeof(struct point)*NUM, hipMemcpyDeviceToHost);
j=0;
for(i=0; i<(NUM/4); i++)
{
ret_avg_trail[i].x = (h_trail[j].x + h_trail[j+1].x + h_trail[j+2].x + h_trail[j+3].x) / 4;
ret_avg_trail[i].y = (h_trail[j].y + h_trail[j+1].y + h_trail[j+2].y + h_trail[j+3].y) / 4;
j += 4;
}
printf("\nAvg. Generated Trail at Device\n");
for(i=0; i<(NUM/4); i++)
{
printf("(%f, %f)\n", ret_avg_trail[i].x, ret_avg_trail[i].y);
}
printf("\n");
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&etime, start, stop);
printf("Time elapsed: %f ms\n", etime);
hipEventDestroy(start);
hipEventDestroy(stop);
free(set);
hipFree(d_set);
hipFree(d_trail);
hipFree(h_trail);
return 0;
}
| 0da5c518d4e9df031de0e0e3afb9ab28f61a40f9.cu | #include <stdio.h>
#include <cuda.h>
#include <time.h>
#include <stdlib.h>
#include <math.h>
struct point
{
float x;
float y;
};
struct dist
{
float da;
float db;
float dc;
};
float eucli(float fx, float fy)
{
return sqrt(fx * fx + fy * fy);
}
__global__ void trilaterate(struct point a, struct point b, struct point c, struct dist *d_set, struct point *d_trail, int NUM)
{
float a1Sq = a.x * a.x, a2Sq = b.x * b.x, a3Sq = c.x * c.x, b1Sq = a.y * a.y, b2Sq = b.y * b.y, b3Sq = c.y * c.y;
float r1Sq, r2Sq, r3Sq, denom1, numer1, denom2, numer2;
float a1 = a.x, a2 = b.x, a3 = c.x, b1 = a.y, b2 = b.y, b3 = c.y;
int i;
for(i=0; i < NUM; i++)
{
r1Sq = d_set[i].da * d_set[i].da;
r2Sq = d_set[i].db * d_set[i].db;
r3Sq = d_set[i].dc * d_set[i].dc;
numer1 = (a2 - a1) * (a3Sq + b3Sq - r3Sq) + (a1 - a3) * (a2Sq + b2Sq - r2Sq) + (a3 - a2) * (a1Sq + b1Sq - r1Sq);
denom1 = 2 * (b3 * (a2 - a1) + b2 * (a1 - a3) + b1 * (a3 - a2));
d_trail[i].y = numer1/denom1;
numer2 = r2Sq - r1Sq + a1Sq - a2Sq + b1Sq - b2Sq - 2 * (b1 - b2) * d_trail[i].y;
denom2 = 2 * (a1 - a2);
d_trail[i].x = numer2/denom2;
}
}
int main(int argc, char *argv[])
{
cudaEvent_t start, stop;
float etime;
int i, j=0;
float fx, fy, gx, gy, z = 5.0;
int NUM;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start,0);
if (argc != 2)
{
printf("Check you arguments!\n");
exit(1);
}
struct point a, b, c;
a.x = 1.67; a.y = 2.58;
b.x = 3.74; b.y = 2.08;
c.x = 5.12; c.y = 3.95;
struct point init;
init.x = 3.12;
init.y = 4.27;
NUM = atoi(argv[1]);
struct point trail[NUM], avg_trail[(NUM/4)], ret_avg_trail[(NUM/4)];
struct point *d_trail, *h_trail;
trail[0] = init;
srand(time(NULL));
for(i=1; i<NUM; i++)
{
gx = ((float)rand()/(float)(RAND_MAX)) * z;
gx = floorf(gx * 100) / 100;
gy = ((float)rand()/(float)(RAND_MAX)) * z;
gy = floorf(gy * 100) / 100;
trail[i].x = (floorf(trail[i-1].x * 100 + 0.5) / 100) + gx;
trail[i].y = (floorf(trail[i-1].y * 100 + 0.5) / 100) + gy;
}
for(i=0; i<(NUM/4); i++)
{
avg_trail[i].x = (trail[j].x + trail[j+1].x + trail[j+2].x + trail[j+3].x) / 4;
avg_trail[i].y = (trail[j].y + trail[j+1].y + trail[j+2].y + trail[j+3].y) / 4;
j += 4;
}
printf("\nAvg. Random Trail at Host\n");
for(i=0; i<(NUM/4); i++)
{
printf("(%f, %f)\n", avg_trail[i].x, avg_trail[i].y);
}
struct dist *set;
size_t size = NUM * sizeof(struct dist);
set = (struct dist *)malloc(size);
size_t sz = NUM * sizeof(struct point);
h_trail = (struct point *)malloc(sz);
for(i=0; i<NUM; i++)
{
fx = trail[i].x - a.x;
fy = trail[i].y - a.y;
set[i].da = eucli(fx, fy);
fx = trail[i].x - b.x;
fy = trail[i].y - b.y;
set[i].db = eucli(fx, fy);
fx = trail[i].x - c.x;
fy = trail[i].y - c.y;
set[i].dc = eucli(fx, fy);
}
struct dist *d_set;
cudaMalloc((void **) &d_set, size);
cudaMalloc((void **) &d_trail, sz);
cudaMemcpy(d_set, set, sizeof(struct dist)*NUM, cudaMemcpyHostToDevice);
cudaDeviceProp devProp;
cudaGetDeviceProperties(&devProp, 0);
int nBlocks = devProp.multiProcessorCount;
int blockSize = devProp.warpSize;
printf("\nU: %d\n", nBlocks);
printf("\nV: %d\n", blockSize);
trilaterate <<< nBlocks, blockSize >>> (a, b, c, d_set, d_trail, NUM);
cudaMemcpy(h_trail, d_trail, sizeof(struct point)*NUM, cudaMemcpyDeviceToHost);
j=0;
for(i=0; i<(NUM/4); i++)
{
ret_avg_trail[i].x = (h_trail[j].x + h_trail[j+1].x + h_trail[j+2].x + h_trail[j+3].x) / 4;
ret_avg_trail[i].y = (h_trail[j].y + h_trail[j+1].y + h_trail[j+2].y + h_trail[j+3].y) / 4;
j += 4;
}
printf("\nAvg. Generated Trail at Device\n");
for(i=0; i<(NUM/4); i++)
{
printf("(%f, %f)\n", ret_avg_trail[i].x, ret_avg_trail[i].y);
}
printf("\n");
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&etime, start, stop);
printf("Time elapsed: %f ms\n", etime);
cudaEventDestroy(start);
cudaEventDestroy(stop);
free(set);
cudaFree(d_set);
cudaFree(d_trail);
cudaFree(h_trail);
return 0;
}
|
a5d4a94bd1977eca73e553d0eeb24ffc9343f427.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 2019-2020 NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <claraparabricks/genomeworks/cudaaligner/cudaaligner.hpp>
#include <claraparabricks/genomeworks/utils/cudautils.hpp>
#include <claraparabricks/genomeworks/utils/signed_integer_utils.hpp>
#include <claraparabricks/genomeworks/utils/mathutils.hpp>
#include "../src/myers_gpu.cu"
#include "../src/needleman_wunsch_cpu.hpp"
#include "cudaaligner_test_cases.hpp"
#include <algorithm>
#include <gtest/gtest.h>
namespace claraparabricks
{
namespace genomeworks
{
namespace cudaaligner
{
namespace test
{
__global__ void
myers_compute_scores_edit_dist_banded_test_kernel(
batched_device_matrices<myers::WordType>::device_interface* pvi,
batched_device_matrices<myers::WordType>::device_interface* mvi,
batched_device_matrices<int32_t>::device_interface* scorei,
batched_device_matrices<myers::WordType>::device_interface* query_patternsi,
char const* target,
char const* query,
int32_t const target_size,
int32_t const query_size,
int32_t const band_width,
int32_t const p)
{
using myers::word_size;
using myers::WordType;
constexpr int32_t warp_size = 32;
const int32_t alignment_idx = 0;
const int32_t n_words = ceiling_divide(query_size, word_size);
device_matrix_view<WordType> query_pattern = query_patternsi->get_matrix_view(alignment_idx, n_words, 4);
for (int32_t idx = threadIdx.x; idx < n_words; idx += warp_size)
{
// TODO query load is inefficient
query_pattern(idx, 0) = myers::myers_generate_query_pattern('A', query, query_size, idx * word_size);
query_pattern(idx, 1) = myers::myers_generate_query_pattern('C', query, query_size, idx * word_size);
query_pattern(idx, 2) = myers::myers_generate_query_pattern('T', query, query_size, idx * word_size);
query_pattern(idx, 3) = myers::myers_generate_query_pattern('G', query, query_size, idx * word_size);
}
__syncwarp();
const int32_t n_words_band = ceiling_divide(band_width, word_size);
device_matrix_view<WordType> pv = pvi->get_matrix_view(alignment_idx, n_words_band, target_size + 1);
device_matrix_view<WordType> mv = mvi->get_matrix_view(alignment_idx, n_words_band, target_size + 1);
device_matrix_view<int32_t> score = scorei->get_matrix_view(alignment_idx, n_words_band, target_size + 1);
if (band_width - (n_words_band - 1) * word_size < 2)
{
// invalid band_width: we need at least two bits in the last word
// set everything to zero and return.
for (int32_t t = 0; t < target_size + 1; ++t)
{
for (int32_t idx = threadIdx.x; idx < n_words_band; idx += warp_size)
{
pv(idx, t) = 0;
mv(idx, t) = 0;
score(idx, t) = 0;
}
__syncwarp();
}
return;
}
int32_t diagonal_begin = -1;
int32_t diagonal_end = -1;
myers::myers_compute_scores_edit_dist_banded(diagonal_begin, diagonal_end, pv, mv, score, query_pattern, target, query, target_size, query_size, band_width, n_words_band, p, alignment_idx);
}
} // namespace test
namespace
{
int32_t popc(const myers::WordType x)
{
static_assert(sizeof(myers::WordType) == 4, "This function assumes sizeof(myers::WordType) == 4");
constexpr int32_t nbits[16] = {0, 1, 1, 2,
1, 2, 2, 3,
1, 2, 2, 3,
2, 3, 3, 4};
int32_t cnt = nbits[x & 0xf];
cnt += nbits[(x >> 4) & 0xf];
cnt += nbits[(x >> 8) & 0xf];
cnt += nbits[(x >> 12) & 0xf];
cnt += nbits[(x >> 16) & 0xf];
cnt += nbits[(x >> 20) & 0xf];
cnt += nbits[(x >> 24) & 0xf];
cnt += nbits[(x >> 28) & 0xf];
return cnt;
}
int32_t get_myers_score(const int32_t i, const int32_t j, matrix<myers::WordType> const& pv, matrix<myers::WordType> const& mv, matrix<int32_t> const& score, const myers::WordType last_entry_mask)
{
assert(i > 0); // row 0 is implicit, NW matrix is shifted by i -> i-1
const int32_t word_idx = (i - 1) / myers::word_size;
const int32_t bit_idx = (i - 1) % myers::word_size;
int32_t s = score(word_idx, j);
myers::WordType mask = (~myers::WordType(1)) << bit_idx;
if (word_idx == score.num_rows() - 1)
mask &= last_entry_mask;
s -= popc(mask & pv(word_idx, j));
s += popc(mask & mv(word_idx, j));
return s;
}
} // namespace
class TestMyersEditDistance : public ::testing::TestWithParam<TestCaseData>
{
};
TEST_P(TestMyersEditDistance, TestCases)
{
TestCaseData t = GetParam();
int32_t d = myers_compute_edit_distance(t.target, t.query);
matrix<int32_t> r = needleman_wunsch_build_score_matrix_naive(t.target, t.query);
int32_t reference = r(r.num_rows() - 1, r.num_cols() - 1);
ASSERT_EQ(d, reference);
}
class TestMyersScoreMatrix : public ::testing::TestWithParam<TestCaseData>
{
};
TEST_P(TestMyersScoreMatrix, TestCases)
{
TestCaseData t = GetParam();
matrix<int32_t> m = myers_get_full_score_matrix(t.target, t.query);
matrix<int32_t> r = needleman_wunsch_build_score_matrix_naive(t.target, t.query);
ASSERT_EQ(m.num_rows(), r.num_rows());
ASSERT_EQ(m.num_cols(), r.num_cols());
for (int32_t j = 0; j < m.num_cols(); ++j)
{
for (int32_t i = 0; i < m.num_rows(); ++i)
{
EXPECT_EQ(m(i, j), r(i, j)) << "index: (" << i << "," << j << ")";
}
}
}
class TestMyersBandedMatrixDeltas : public ::testing::TestWithParam<TestCaseData>
{
};
TEST_P(TestMyersBandedMatrixDeltas, TestCases)
{
// Test if adjacent matrix entries
// do not differ by more than delta = +/-1.
using cudautils::device_copy_n_async;
using cudautils::set_device_value;
using myers::word_size;
using myers::WordType;
TestCaseData t = GetParam();
// Skip tests for which myers_banded_gpu is not defined
if (get_size(t.query) == 0 || get_size(t.target) == 0)
return;
CudaStream stream = make_cuda_stream();
DefaultDeviceAllocator allocator = create_default_device_allocator();
const int32_t query_size = get_size<int32_t>(t.query);
const int32_t target_size = get_size<int32_t>(t.target);
device_buffer<char> query_d(query_size, allocator, stream.get());
device_buffer<char> target_d(target_size, allocator, stream.get());
device_copy_n_async(t.query.c_str(), query_size, query_d.data(), stream.get());
device_copy_n_async(t.target.c_str(), target_size, target_d.data(), stream.get());
GW_CU_CHECK_ERR(hipStreamSynchronize(stream.get()));
const int32_t max_distance_estimate = ::max(target_size, query_size) / 4;
int32_t p = min3(target_size, query_size, (max_distance_estimate - abs(target_size - query_size)) / 2);
int32_t band_width = min(1 + 2 * p + abs(target_size - query_size), query_size);
if (band_width % word_size == 1 && band_width != query_size) // we need at least two bits in the last word
{
p += 1;
band_width = min(1 + 2 * p + abs(target_size - query_size), query_size);
}
const int32_t n_words = ceiling_divide(query_size, word_size);
const int32_t n_words_band = ceiling_divide(band_width, word_size);
batched_device_matrices<myers::WordType> pvs(1, n_words_band * (target_size + 1), allocator, stream.get());
batched_device_matrices<myers::WordType> mvs(1, n_words_band * (target_size + 1), allocator, stream.get());
batched_device_matrices<int32_t> scores(1, n_words_band * (target_size + 1), allocator, stream.get());
batched_device_matrices<myers::WordType> query_patterns(1, n_words * 4, allocator, stream.get());
hipLaunchKernelGGL(( test::myers_compute_scores_edit_dist_banded_test_kernel), dim3(1), dim3(32), 0, stream.get(),
pvs.get_device_interface(), mvs.get_device_interface(),
scores.get_device_interface(), query_patterns.get_device_interface(),
target_d.data(), query_d.data(), target_size, query_size, band_width, p);
const int32_t n_rows = n_words_band;
const int32_t n_cols = target_size + 1;
const matrix<int32_t> score = scores.get_matrix(0, n_rows, n_cols, stream.get());
const matrix<myers::WordType> pv = pvs.get_matrix(0, n_rows, n_cols, stream.get());
const matrix<myers::WordType> mv = mvs.get_matrix(0, n_rows, n_cols, stream.get());
const WordType last_entry_mask = band_width % word_size != 0 ? (WordType(1) << (band_width % word_size)) - 1 : ~WordType(0);
// Check consistency along rows
int32_t last_first_col_score = 0;
for (int32_t i = 1; i < band_width + 1; ++i)
{
int32_t last_score = last_first_col_score;
for (int32_t j = 0; j < target_size + 1; ++j)
{
const int32_t this_score = get_myers_score(i, j, pv, mv, score, last_entry_mask);
EXPECT_LE(std::abs(last_score - this_score), 1) << " error at (" << i << "," << j << ")";
last_score = this_score;
if (j == 0)
{
last_first_col_score = this_score;
}
}
}
// Check consistency along cols
int32_t last_first_row_score = 1;
for (int32_t j = 0; j < target_size + 1; ++j)
{
int32_t last_score = last_first_row_score;
for (int32_t i = 1; i < band_width + 1; ++i)
{
const int32_t this_score = get_myers_score(i, j, pv, mv, score, last_entry_mask);
EXPECT_LE(std::abs(last_score - this_score), 1) << " error at (" << i << "," << j << ")";
last_score = this_score;
if (i == 1)
{
last_first_row_score = this_score;
}
}
}
}
INSTANTIATE_TEST_SUITE_P(TestMyersAlgorithm, TestMyersEditDistance, ::testing::ValuesIn(create_cudaaligner_test_cases()));
INSTANTIATE_TEST_SUITE_P(TestMyersAlgorithm, TestMyersScoreMatrix, ::testing::ValuesIn(create_cudaaligner_test_cases()));
INSTANTIATE_TEST_SUITE_P(TestMyersAlgorithm, TestMyersBandedMatrixDeltas, ::testing::ValuesIn(create_cudaaligner_test_cases()));
} // namespace cudaaligner
} // namespace genomeworks
} // namespace claraparabricks
| a5d4a94bd1977eca73e553d0eeb24ffc9343f427.cu | /*
* Copyright 2019-2020 NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <claraparabricks/genomeworks/cudaaligner/cudaaligner.hpp>
#include <claraparabricks/genomeworks/utils/cudautils.hpp>
#include <claraparabricks/genomeworks/utils/signed_integer_utils.hpp>
#include <claraparabricks/genomeworks/utils/mathutils.hpp>
#include "../src/myers_gpu.cu"
#include "../src/needleman_wunsch_cpu.hpp"
#include "cudaaligner_test_cases.hpp"
#include <algorithm>
#include <gtest/gtest.h>
namespace claraparabricks
{
namespace genomeworks
{
namespace cudaaligner
{
namespace test
{
__global__ void
myers_compute_scores_edit_dist_banded_test_kernel(
batched_device_matrices<myers::WordType>::device_interface* pvi,
batched_device_matrices<myers::WordType>::device_interface* mvi,
batched_device_matrices<int32_t>::device_interface* scorei,
batched_device_matrices<myers::WordType>::device_interface* query_patternsi,
char const* target,
char const* query,
int32_t const target_size,
int32_t const query_size,
int32_t const band_width,
int32_t const p)
{
using myers::word_size;
using myers::WordType;
constexpr int32_t warp_size = 32;
const int32_t alignment_idx = 0;
const int32_t n_words = ceiling_divide(query_size, word_size);
device_matrix_view<WordType> query_pattern = query_patternsi->get_matrix_view(alignment_idx, n_words, 4);
for (int32_t idx = threadIdx.x; idx < n_words; idx += warp_size)
{
// TODO query load is inefficient
query_pattern(idx, 0) = myers::myers_generate_query_pattern('A', query, query_size, idx * word_size);
query_pattern(idx, 1) = myers::myers_generate_query_pattern('C', query, query_size, idx * word_size);
query_pattern(idx, 2) = myers::myers_generate_query_pattern('T', query, query_size, idx * word_size);
query_pattern(idx, 3) = myers::myers_generate_query_pattern('G', query, query_size, idx * word_size);
}
__syncwarp();
const int32_t n_words_band = ceiling_divide(band_width, word_size);
device_matrix_view<WordType> pv = pvi->get_matrix_view(alignment_idx, n_words_band, target_size + 1);
device_matrix_view<WordType> mv = mvi->get_matrix_view(alignment_idx, n_words_band, target_size + 1);
device_matrix_view<int32_t> score = scorei->get_matrix_view(alignment_idx, n_words_band, target_size + 1);
if (band_width - (n_words_band - 1) * word_size < 2)
{
// invalid band_width: we need at least two bits in the last word
// set everything to zero and return.
for (int32_t t = 0; t < target_size + 1; ++t)
{
for (int32_t idx = threadIdx.x; idx < n_words_band; idx += warp_size)
{
pv(idx, t) = 0;
mv(idx, t) = 0;
score(idx, t) = 0;
}
__syncwarp();
}
return;
}
int32_t diagonal_begin = -1;
int32_t diagonal_end = -1;
myers::myers_compute_scores_edit_dist_banded(diagonal_begin, diagonal_end, pv, mv, score, query_pattern, target, query, target_size, query_size, band_width, n_words_band, p, alignment_idx);
}
} // namespace test
namespace
{
int32_t popc(const myers::WordType x)
{
static_assert(sizeof(myers::WordType) == 4, "This function assumes sizeof(myers::WordType) == 4");
constexpr int32_t nbits[16] = {0, 1, 1, 2,
1, 2, 2, 3,
1, 2, 2, 3,
2, 3, 3, 4};
int32_t cnt = nbits[x & 0xf];
cnt += nbits[(x >> 4) & 0xf];
cnt += nbits[(x >> 8) & 0xf];
cnt += nbits[(x >> 12) & 0xf];
cnt += nbits[(x >> 16) & 0xf];
cnt += nbits[(x >> 20) & 0xf];
cnt += nbits[(x >> 24) & 0xf];
cnt += nbits[(x >> 28) & 0xf];
return cnt;
}
int32_t get_myers_score(const int32_t i, const int32_t j, matrix<myers::WordType> const& pv, matrix<myers::WordType> const& mv, matrix<int32_t> const& score, const myers::WordType last_entry_mask)
{
assert(i > 0); // row 0 is implicit, NW matrix is shifted by i -> i-1
const int32_t word_idx = (i - 1) / myers::word_size;
const int32_t bit_idx = (i - 1) % myers::word_size;
int32_t s = score(word_idx, j);
myers::WordType mask = (~myers::WordType(1)) << bit_idx;
if (word_idx == score.num_rows() - 1)
mask &= last_entry_mask;
s -= popc(mask & pv(word_idx, j));
s += popc(mask & mv(word_idx, j));
return s;
}
} // namespace
class TestMyersEditDistance : public ::testing::TestWithParam<TestCaseData>
{
};
TEST_P(TestMyersEditDistance, TestCases)
{
TestCaseData t = GetParam();
int32_t d = myers_compute_edit_distance(t.target, t.query);
matrix<int32_t> r = needleman_wunsch_build_score_matrix_naive(t.target, t.query);
int32_t reference = r(r.num_rows() - 1, r.num_cols() - 1);
ASSERT_EQ(d, reference);
}
class TestMyersScoreMatrix : public ::testing::TestWithParam<TestCaseData>
{
};
TEST_P(TestMyersScoreMatrix, TestCases)
{
TestCaseData t = GetParam();
matrix<int32_t> m = myers_get_full_score_matrix(t.target, t.query);
matrix<int32_t> r = needleman_wunsch_build_score_matrix_naive(t.target, t.query);
ASSERT_EQ(m.num_rows(), r.num_rows());
ASSERT_EQ(m.num_cols(), r.num_cols());
for (int32_t j = 0; j < m.num_cols(); ++j)
{
for (int32_t i = 0; i < m.num_rows(); ++i)
{
EXPECT_EQ(m(i, j), r(i, j)) << "index: (" << i << "," << j << ")";
}
}
}
class TestMyersBandedMatrixDeltas : public ::testing::TestWithParam<TestCaseData>
{
};
TEST_P(TestMyersBandedMatrixDeltas, TestCases)
{
// Test if adjacent matrix entries
// do not differ by more than delta = +/-1.
using cudautils::device_copy_n_async;
using cudautils::set_device_value;
using myers::word_size;
using myers::WordType;
TestCaseData t = GetParam();
// Skip tests for which myers_banded_gpu is not defined
if (get_size(t.query) == 0 || get_size(t.target) == 0)
return;
CudaStream stream = make_cuda_stream();
DefaultDeviceAllocator allocator = create_default_device_allocator();
const int32_t query_size = get_size<int32_t>(t.query);
const int32_t target_size = get_size<int32_t>(t.target);
device_buffer<char> query_d(query_size, allocator, stream.get());
device_buffer<char> target_d(target_size, allocator, stream.get());
device_copy_n_async(t.query.c_str(), query_size, query_d.data(), stream.get());
device_copy_n_async(t.target.c_str(), target_size, target_d.data(), stream.get());
GW_CU_CHECK_ERR(cudaStreamSynchronize(stream.get()));
const int32_t max_distance_estimate = std::max(target_size, query_size) / 4;
int32_t p = min3(target_size, query_size, (max_distance_estimate - abs(target_size - query_size)) / 2);
int32_t band_width = min(1 + 2 * p + abs(target_size - query_size), query_size);
if (band_width % word_size == 1 && band_width != query_size) // we need at least two bits in the last word
{
p += 1;
band_width = min(1 + 2 * p + abs(target_size - query_size), query_size);
}
const int32_t n_words = ceiling_divide(query_size, word_size);
const int32_t n_words_band = ceiling_divide(band_width, word_size);
batched_device_matrices<myers::WordType> pvs(1, n_words_band * (target_size + 1), allocator, stream.get());
batched_device_matrices<myers::WordType> mvs(1, n_words_band * (target_size + 1), allocator, stream.get());
batched_device_matrices<int32_t> scores(1, n_words_band * (target_size + 1), allocator, stream.get());
batched_device_matrices<myers::WordType> query_patterns(1, n_words * 4, allocator, stream.get());
test::myers_compute_scores_edit_dist_banded_test_kernel<<<1, 32, 0, stream.get()>>>(
pvs.get_device_interface(), mvs.get_device_interface(),
scores.get_device_interface(), query_patterns.get_device_interface(),
target_d.data(), query_d.data(), target_size, query_size, band_width, p);
const int32_t n_rows = n_words_band;
const int32_t n_cols = target_size + 1;
const matrix<int32_t> score = scores.get_matrix(0, n_rows, n_cols, stream.get());
const matrix<myers::WordType> pv = pvs.get_matrix(0, n_rows, n_cols, stream.get());
const matrix<myers::WordType> mv = mvs.get_matrix(0, n_rows, n_cols, stream.get());
const WordType last_entry_mask = band_width % word_size != 0 ? (WordType(1) << (band_width % word_size)) - 1 : ~WordType(0);
// Check consistency along rows
int32_t last_first_col_score = 0;
for (int32_t i = 1; i < band_width + 1; ++i)
{
int32_t last_score = last_first_col_score;
for (int32_t j = 0; j < target_size + 1; ++j)
{
const int32_t this_score = get_myers_score(i, j, pv, mv, score, last_entry_mask);
EXPECT_LE(std::abs(last_score - this_score), 1) << " error at (" << i << "," << j << ")";
last_score = this_score;
if (j == 0)
{
last_first_col_score = this_score;
}
}
}
// Check consistency along cols
int32_t last_first_row_score = 1;
for (int32_t j = 0; j < target_size + 1; ++j)
{
int32_t last_score = last_first_row_score;
for (int32_t i = 1; i < band_width + 1; ++i)
{
const int32_t this_score = get_myers_score(i, j, pv, mv, score, last_entry_mask);
EXPECT_LE(std::abs(last_score - this_score), 1) << " error at (" << i << "," << j << ")";
last_score = this_score;
if (i == 1)
{
last_first_row_score = this_score;
}
}
}
}
INSTANTIATE_TEST_SUITE_P(TestMyersAlgorithm, TestMyersEditDistance, ::testing::ValuesIn(create_cudaaligner_test_cases()));
INSTANTIATE_TEST_SUITE_P(TestMyersAlgorithm, TestMyersScoreMatrix, ::testing::ValuesIn(create_cudaaligner_test_cases()));
INSTANTIATE_TEST_SUITE_P(TestMyersAlgorithm, TestMyersBandedMatrixDeltas, ::testing::ValuesIn(create_cudaaligner_test_cases()));
} // namespace cudaaligner
} // namespace genomeworks
} // namespace claraparabricks
|
4f0d35f9b8a5f691aec646204e245d14f4021d1c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//-------------------------------------------------------------------------//
// //
// This benchmark is a serial C version of the NPB SP code. This C //
// version is developed by the Center for Manycore Programming at Seoul //
// National University and derived from the serial Fortran versions in //
// "NPB3.3-SER" developed by NAS. //
// //
// Permission to use, copy, distribute and modify this software for any //
// purpose with or without fee is hereby granted. This software is //
// provided "as is" without express or implied warranty. //
// //
// Information on NPB 3.3, including the technical report, the original //
// specifications, source code, results and information on how to submit //
// new results, is available at: //
// //
// http://www.nas.nasa.gov/Software/NPB/ //
// //
// Send comments or suggestions for this C version to [email protected] //
// //
// Center for Manycore Programming //
// School of Computer Science and Engineering //
// Seoul National University //
// Seoul 151-744, Korea //
// //
// E-mail: [email protected] //
// //
//-------------------------------------------------------------------------//
//-------------------------------------------------------------------------//
// Authors: Sangmin Seo, Jungwon Kim, Jun Lee, Jeongho Nah, Gangwon Jo, //
// and Jaejin Lee //
//-------------------------------------------------------------------------//
#include <assert.h>
#include <omp.h>
#include "header.h"
#include "initialize_kernels.cu"
//---------------------------------------------------------------------
// this function performs the solution of the approximate factorization
// step in the y-direction for all five matrix components
// simultaneously. The Thomas algorithm is employed to solve the
// systems for the y-lines. Boundary conditions are non-periodic
//---------------------------------------------------------------------
__global__ void y_solve_kernel(
dim3 gridOffset,
int *grid_points/*[3]*/,
int nx2, int ny2, int nz2,
double (*vs )/*[KMAX]*/[JMAXP+1][IMAXP+1],
double (*rho_i )/*[KMAX]*/[JMAXP+1][IMAXP+1],
double (*speed )/*[KMAX]*/[JMAXP+1][IMAXP+1],
double (*rhs )/*[KMAX]*/[5][JMAXP+1][IMAXP+1],
double dtty1, double dtty2, double comz1, double comz4, double comz5, double comz6, double c2dtty1
) {
int i = blockDim.x * blockIdx.x + threadIdx.x + gridOffset.x;
int k = blockDim.z * blockIdx.z + threadIdx.z + gridOffset.z;
double cv [PROBLEM_SIZE];
double rhoq[PROBLEM_SIZE];
double lhs [IMAXP+1][5];
double lhsp[IMAXP+1][5];
double lhsm[IMAXP+1][5];
int j, j1, j2, m;
double ru1, fac1, fac2;
if (k >= 1 && k <= grid_points[2]-2) {
lhsinitj_kernel(ny2+1, nx2, lhs, lhsp, lhsm);
//---------------------------------------------------------------------
// Computes the left hand side for the three y-factors
//---------------------------------------------------------------------
//---------------------------------------------------------------------
// first fill the lhs for the u-eigenvalue
//---------------------------------------------------------------------
if (i >= 1 && i <= grid_points[0]-2) {
for (j = 0; j <= grid_points[1]-1; j++) {
ru1 = c3c4*rho_i[k][j][i];
cv[j] = vs[k][j][i];
rhoq[j] = max(max(dy3+con43*ru1, dy5+c1c5*ru1), max(dymax+ru1, dy1));
}
for (j = 1; j <= grid_points[1]-2; j++) {
lhs[j][0] = 0.0;
lhs[j][1] = -dtty2 * cv[j-1] - dtty1 * rhoq[j-1];
lhs[j][2] = 1.0 + c2dtty1 * rhoq[j];
lhs[j][3] = dtty2 * cv[j+1] - dtty1 * rhoq[j+1];
lhs[j][4] = 0.0;
}
}
//---------------------------------------------------------------------
// add fourth order dissipation
//---------------------------------------------------------------------
if (i >= 1 && i <= grid_points[0]-2) {
j = 1;
lhs[j][2] = lhs[j][2] + comz5;
lhs[j][3] = lhs[j][3] - comz4;
lhs[j][4] = lhs[j][4] + comz1;
lhs[j+1][1] = lhs[j+1][1] - comz4;
lhs[j+1][2] = lhs[j+1][2] + comz6;
lhs[j+1][3] = lhs[j+1][3] - comz4;
lhs[j+1][4] = lhs[j+1][4] + comz1;
}
for (j = 3; j <= grid_points[1]-4; j++) {
if (i >= 1 && i <= grid_points[0]-2) {
lhs[j][0] = lhs[j][0] + comz1;
lhs[j][1] = lhs[j][1] - comz4;
lhs[j][2] = lhs[j][2] + comz6;
lhs[j][3] = lhs[j][3] - comz4;
lhs[j][4] = lhs[j][4] + comz1;
}
}
if (i >= 1 && i <= grid_points[0]-2) {
j = grid_points[1]-3;
lhs[j][0] = lhs[j][0] + comz1;
lhs[j][1] = lhs[j][1] - comz4;
lhs[j][2] = lhs[j][2] + comz6;
lhs[j][3] = lhs[j][3] - comz4;
lhs[j+1][0] = lhs[j+1][0] + comz1;
lhs[j+1][1] = lhs[j+1][1] - comz4;
lhs[j+1][2] = lhs[j+1][2] + comz5;
}
//---------------------------------------------------------------------
// subsequently, for (the other two factors
//---------------------------------------------------------------------
for (j = 1; j <= grid_points[1]-2; j++) {
if (i >= 1 && i <= grid_points[0]-2) {
lhsp[j][0] = lhs[j][0];
lhsp[j][1] = lhs[j][1] - dtty2 * speed[k][j-1][i];
lhsp[j][2] = lhs[j][2];
lhsp[j][3] = lhs[j][3] + dtty2 * speed[k][j+1][i];
lhsp[j][4] = lhs[j][4];
lhsm[j][0] = lhs[j][0];
lhsm[j][1] = lhs[j][1] + dtty2 * speed[k][j-1][i];
lhsm[j][2] = lhs[j][2];
lhsm[j][3] = lhs[j][3] - dtty2 * speed[k][j+1][i];
lhsm[j][4] = lhs[j][4];
}
}
//---------------------------------------------------------------------
// FORWARD ELIMINATION
//---------------------------------------------------------------------
for (j = 0; j <= grid_points[1]-3; j++) {
j1 = j + 1;
j2 = j + 2;
if (i >= 1 && i <= grid_points[0]-2) {
fac1 = 1.0/lhs[j][2];
lhs[j][3] = fac1*lhs[j][3];
lhs[j][4] = fac1*lhs[j][4];
for (m = 0; m < 3; m++) {
rhs[k][m][j][i] = fac1*rhs[k][m][j][i];
}
lhs[j1][2] = lhs[j1][2] - lhs[j1][1]*lhs[j][3];
lhs[j1][3] = lhs[j1][3] - lhs[j1][1]*lhs[j][4];
for (m = 0; m < 3; m++) {
rhs[k][m][j1][i] = rhs[k][m][j1][i] - lhs[j1][1]*rhs[k][m][j][i];
}
lhs[j2][1] = lhs[j2][1] - lhs[j2][0]*lhs[j][3];
lhs[j2][2] = lhs[j2][2] - lhs[j2][0]*lhs[j][4];
for (m = 0; m < 3; m++) {
rhs[k][m][j2][i] = rhs[k][m][j2][i] - lhs[j2][0]*rhs[k][m][j][i];
}
}
}
//---------------------------------------------------------------------
// The last two rows in this grid block are a bit different,
// since they for (not have two more rows available for the
// elimination of off-diagonal entries
//---------------------------------------------------------------------
j = grid_points[1]-2;
j1 = grid_points[1]-1;
if (i >= 1 && i <= grid_points[0]-2) {
fac1 = 1.0/lhs[j][2];
lhs[j][3] = fac1*lhs[j][3];
lhs[j][4] = fac1*lhs[j][4];
for (m = 0; m < 3; m++) {
rhs[k][m][j][i] = fac1*rhs[k][m][j][i];
}
lhs[j1][2] = lhs[j1][2] - lhs[j1][1]*lhs[j][3];
lhs[j1][3] = lhs[j1][3] - lhs[j1][1]*lhs[j][4];
for (m = 0; m < 3; m++) {
rhs[k][m][j1][i] = rhs[k][m][j1][i] - lhs[j1][1]*rhs[k][m][j][i];
}
//---------------------------------------------------------------------
// scale the last row immediately
//---------------------------------------------------------------------
fac2 = 1.0/lhs[j1][2];
for (m = 0; m < 3; m++) {
rhs[k][m][j1][i] = fac2*rhs[k][m][j1][i];
}
}
//---------------------------------------------------------------------
// for (the u+c and the u-c factors
//---------------------------------------------------------------------
for (j = 0; j <= grid_points[1]-3; j++) {
j1 = j + 1;
j2 = j + 2;
if (i >= 1 && i <= grid_points[0]-2) {
m = 3;
fac1 = 1.0/lhsp[j][2];
lhsp[j][3] = fac1*lhsp[j][3];
lhsp[j][4] = fac1*lhsp[j][4];
rhs[k][m][j][i] = fac1*rhs[k][m][j][i];
lhsp[j1][2] = lhsp[j1][2] - lhsp[j1][1]*lhsp[j][3];
lhsp[j1][3] = lhsp[j1][3] - lhsp[j1][1]*lhsp[j][4];
rhs[k][m][j1][i] = rhs[k][m][j1][i] - lhsp[j1][1]*rhs[k][m][j][i];
lhsp[j2][1] = lhsp[j2][1] - lhsp[j2][0]*lhsp[j][3];
lhsp[j2][2] = lhsp[j2][2] - lhsp[j2][0]*lhsp[j][4];
rhs[k][m][j2][i] = rhs[k][m][j2][i] - lhsp[j2][0]*rhs[k][m][j][i];
m = 4;
fac1 = 1.0/lhsm[j][2];
lhsm[j][3] = fac1*lhsm[j][3];
lhsm[j][4] = fac1*lhsm[j][4];
rhs[k][m][j][i] = fac1*rhs[k][m][j][i];
lhsm[j1][2] = lhsm[j1][2] - lhsm[j1][1]*lhsm[j][3];
lhsm[j1][3] = lhsm[j1][3] - lhsm[j1][1]*lhsm[j][4];
rhs[k][m][j1][i] = rhs[k][m][j1][i] - lhsm[j1][1]*rhs[k][m][j][i];
lhsm[j2][1] = lhsm[j2][1] - lhsm[j2][0]*lhsm[j][3];
lhsm[j2][2] = lhsm[j2][2] - lhsm[j2][0]*lhsm[j][4];
rhs[k][m][j2][i] = rhs[k][m][j2][i] - lhsm[j2][0]*rhs[k][m][j][i];
}
}
//---------------------------------------------------------------------
// And again the last two rows separately
//---------------------------------------------------------------------
j = grid_points[1]-2;
j1 = grid_points[1]-1;
if (i >= 1 && i <= grid_points[0]-2) {
m = 3;
fac1 = 1.0/lhsp[j][2];
lhsp[j][3] = fac1*lhsp[j][3];
lhsp[j][4] = fac1*lhsp[j][4];
rhs[k][m][j][i] = fac1*rhs[k][m][j][i];
lhsp[j1][2] = lhsp[j1][2] - lhsp[j1][1]*lhsp[j][3];
lhsp[j1][3] = lhsp[j1][3] - lhsp[j1][1]*lhsp[j][4];
rhs[k][m][j1][i] = rhs[k][m][j1][i] - lhsp[j1][1]*rhs[k][m][j][i];
m = 4;
fac1 = 1.0/lhsm[j][2];
lhsm[j][3] = fac1*lhsm[j][3];
lhsm[j][4] = fac1*lhsm[j][4];
rhs[k][m][j][i] = fac1*rhs[k][m][j][i];
lhsm[j1][2] = lhsm[j1][2] - lhsm[j1][1]*lhsm[j][3];
lhsm[j1][3] = lhsm[j1][3] - lhsm[j1][1]*lhsm[j][4];
rhs[k][m][j1][i] = rhs[k][m][j1][i] - lhsm[j1][1]*rhs[k][m][j][i];
//---------------------------------------------------------------------
// Scale the last row immediately
//---------------------------------------------------------------------
rhs[k][3][j1][i] = rhs[k][3][j1][i]/lhsp[j1][2];
rhs[k][4][j1][i] = rhs[k][4][j1][i]/lhsm[j1][2];
}
//---------------------------------------------------------------------
// BACKSUBSTITUTION
//---------------------------------------------------------------------
j = grid_points[1]-2;
j1 = grid_points[1]-1;
if (i >= 1 && i <= grid_points[0]-2) {
for (m = 0; m < 3; m++) {
rhs[k][m][j][i] = rhs[k][m][j][i] - lhs[j][3]*rhs[k][m][j1][i];
}
rhs[k][3][j][i] = rhs[k][3][j][i] - lhsp[j][3]*rhs[k][3][j1][i];
rhs[k][4][j][i] = rhs[k][4][j][i] - lhsm[j][3]*rhs[k][4][j1][i];
}
//---------------------------------------------------------------------
// The first three factors
//---------------------------------------------------------------------
for (j = grid_points[1]-3; j >= 0; j--) {
j1 = j + 1;
j2 = j + 2;
if (i >= 1 && i <= grid_points[0]-2) {
for (m = 0; m < 3; m++) {
rhs[k][m][j][i] = rhs[k][m][j][i] -
lhs[j][3]*rhs[k][m][j1][i] -
lhs[j][4]*rhs[k][m][j2][i];
}
//-------------------------------------------------------------------
// And the remaining two
//-------------------------------------------------------------------
rhs[k][3][j][i] = rhs[k][3][j][i] -
lhsp[j][3]*rhs[k][3][j1][i] -
lhsp[j][4]*rhs[k][3][j2][i];
rhs[k][4][j][i] = rhs[k][4][j][i] -
lhsm[j][3]*rhs[k][4][j1][i] -
lhsm[j][4]*rhs[k][4][j2][i];
}
}
}
}
void y_solve() {
if (timeron) timer_start(t_ysolve);
hipLaunchKernelGGL(( y_solve_kernel) , dim3(gridDimXZ), dim3(blockDimXZ) , 0, 0,
gridOffset,
dev_grid_points[device],
nx2, ny2, nz2,
dev_vs[device], dev_rho_i[device], dev_speed[device], dev_rhs[device],
dtty1, dtty2, comz1, comz4, comz5, comz6, c2dtty1
);
if (timeron) timer_stop(t_ysolve);
pinvr();
}
| 4f0d35f9b8a5f691aec646204e245d14f4021d1c.cu | //-------------------------------------------------------------------------//
// //
// This benchmark is a serial C version of the NPB SP code. This C //
// version is developed by the Center for Manycore Programming at Seoul //
// National University and derived from the serial Fortran versions in //
// "NPB3.3-SER" developed by NAS. //
// //
// Permission to use, copy, distribute and modify this software for any //
// purpose with or without fee is hereby granted. This software is //
// provided "as is" without express or implied warranty. //
// //
// Information on NPB 3.3, including the technical report, the original //
// specifications, source code, results and information on how to submit //
// new results, is available at: //
// //
// http://www.nas.nasa.gov/Software/NPB/ //
// //
// Send comments or suggestions for this C version to [email protected] //
// //
// Center for Manycore Programming //
// School of Computer Science and Engineering //
// Seoul National University //
// Seoul 151-744, Korea //
// //
// E-mail: [email protected] //
// //
//-------------------------------------------------------------------------//
//-------------------------------------------------------------------------//
// Authors: Sangmin Seo, Jungwon Kim, Jun Lee, Jeongho Nah, Gangwon Jo, //
// and Jaejin Lee //
//-------------------------------------------------------------------------//
#include <assert.h>
#include <omp.h>
#include "header.h"
#include "initialize_kernels.cu"
//---------------------------------------------------------------------
// this function performs the solution of the approximate factorization
// step in the y-direction for all five matrix components
// simultaneously. The Thomas algorithm is employed to solve the
// systems for the y-lines. Boundary conditions are non-periodic
//---------------------------------------------------------------------
__global__ void y_solve_kernel(
dim3 gridOffset,
int *grid_points/*[3]*/,
int nx2, int ny2, int nz2,
double (*vs )/*[KMAX]*/[JMAXP+1][IMAXP+1],
double (*rho_i )/*[KMAX]*/[JMAXP+1][IMAXP+1],
double (*speed )/*[KMAX]*/[JMAXP+1][IMAXP+1],
double (*rhs )/*[KMAX]*/[5][JMAXP+1][IMAXP+1],
double dtty1, double dtty2, double comz1, double comz4, double comz5, double comz6, double c2dtty1
) {
int i = blockDim.x * blockIdx.x + threadIdx.x + gridOffset.x;
int k = blockDim.z * blockIdx.z + threadIdx.z + gridOffset.z;
double cv [PROBLEM_SIZE];
double rhoq[PROBLEM_SIZE];
double lhs [IMAXP+1][5];
double lhsp[IMAXP+1][5];
double lhsm[IMAXP+1][5];
int j, j1, j2, m;
double ru1, fac1, fac2;
if (k >= 1 && k <= grid_points[2]-2) {
lhsinitj_kernel(ny2+1, nx2, lhs, lhsp, lhsm);
//---------------------------------------------------------------------
// Computes the left hand side for the three y-factors
//---------------------------------------------------------------------
//---------------------------------------------------------------------
// first fill the lhs for the u-eigenvalue
//---------------------------------------------------------------------
if (i >= 1 && i <= grid_points[0]-2) {
for (j = 0; j <= grid_points[1]-1; j++) {
ru1 = c3c4*rho_i[k][j][i];
cv[j] = vs[k][j][i];
rhoq[j] = max(max(dy3+con43*ru1, dy5+c1c5*ru1), max(dymax+ru1, dy1));
}
for (j = 1; j <= grid_points[1]-2; j++) {
lhs[j][0] = 0.0;
lhs[j][1] = -dtty2 * cv[j-1] - dtty1 * rhoq[j-1];
lhs[j][2] = 1.0 + c2dtty1 * rhoq[j];
lhs[j][3] = dtty2 * cv[j+1] - dtty1 * rhoq[j+1];
lhs[j][4] = 0.0;
}
}
//---------------------------------------------------------------------
// add fourth order dissipation
//---------------------------------------------------------------------
if (i >= 1 && i <= grid_points[0]-2) {
j = 1;
lhs[j][2] = lhs[j][2] + comz5;
lhs[j][3] = lhs[j][3] - comz4;
lhs[j][4] = lhs[j][4] + comz1;
lhs[j+1][1] = lhs[j+1][1] - comz4;
lhs[j+1][2] = lhs[j+1][2] + comz6;
lhs[j+1][3] = lhs[j+1][3] - comz4;
lhs[j+1][4] = lhs[j+1][4] + comz1;
}
for (j = 3; j <= grid_points[1]-4; j++) {
if (i >= 1 && i <= grid_points[0]-2) {
lhs[j][0] = lhs[j][0] + comz1;
lhs[j][1] = lhs[j][1] - comz4;
lhs[j][2] = lhs[j][2] + comz6;
lhs[j][3] = lhs[j][3] - comz4;
lhs[j][4] = lhs[j][4] + comz1;
}
}
if (i >= 1 && i <= grid_points[0]-2) {
j = grid_points[1]-3;
lhs[j][0] = lhs[j][0] + comz1;
lhs[j][1] = lhs[j][1] - comz4;
lhs[j][2] = lhs[j][2] + comz6;
lhs[j][3] = lhs[j][3] - comz4;
lhs[j+1][0] = lhs[j+1][0] + comz1;
lhs[j+1][1] = lhs[j+1][1] - comz4;
lhs[j+1][2] = lhs[j+1][2] + comz5;
}
//---------------------------------------------------------------------
// subsequently, for (the other two factors
//---------------------------------------------------------------------
for (j = 1; j <= grid_points[1]-2; j++) {
if (i >= 1 && i <= grid_points[0]-2) {
lhsp[j][0] = lhs[j][0];
lhsp[j][1] = lhs[j][1] - dtty2 * speed[k][j-1][i];
lhsp[j][2] = lhs[j][2];
lhsp[j][3] = lhs[j][3] + dtty2 * speed[k][j+1][i];
lhsp[j][4] = lhs[j][4];
lhsm[j][0] = lhs[j][0];
lhsm[j][1] = lhs[j][1] + dtty2 * speed[k][j-1][i];
lhsm[j][2] = lhs[j][2];
lhsm[j][3] = lhs[j][3] - dtty2 * speed[k][j+1][i];
lhsm[j][4] = lhs[j][4];
}
}
//---------------------------------------------------------------------
// FORWARD ELIMINATION
//---------------------------------------------------------------------
for (j = 0; j <= grid_points[1]-3; j++) {
j1 = j + 1;
j2 = j + 2;
if (i >= 1 && i <= grid_points[0]-2) {
fac1 = 1.0/lhs[j][2];
lhs[j][3] = fac1*lhs[j][3];
lhs[j][4] = fac1*lhs[j][4];
for (m = 0; m < 3; m++) {
rhs[k][m][j][i] = fac1*rhs[k][m][j][i];
}
lhs[j1][2] = lhs[j1][2] - lhs[j1][1]*lhs[j][3];
lhs[j1][3] = lhs[j1][3] - lhs[j1][1]*lhs[j][4];
for (m = 0; m < 3; m++) {
rhs[k][m][j1][i] = rhs[k][m][j1][i] - lhs[j1][1]*rhs[k][m][j][i];
}
lhs[j2][1] = lhs[j2][1] - lhs[j2][0]*lhs[j][3];
lhs[j2][2] = lhs[j2][2] - lhs[j2][0]*lhs[j][4];
for (m = 0; m < 3; m++) {
rhs[k][m][j2][i] = rhs[k][m][j2][i] - lhs[j2][0]*rhs[k][m][j][i];
}
}
}
//---------------------------------------------------------------------
// The last two rows in this grid block are a bit different,
// since they for (not have two more rows available for the
// elimination of off-diagonal entries
//---------------------------------------------------------------------
j = grid_points[1]-2;
j1 = grid_points[1]-1;
if (i >= 1 && i <= grid_points[0]-2) {
fac1 = 1.0/lhs[j][2];
lhs[j][3] = fac1*lhs[j][3];
lhs[j][4] = fac1*lhs[j][4];
for (m = 0; m < 3; m++) {
rhs[k][m][j][i] = fac1*rhs[k][m][j][i];
}
lhs[j1][2] = lhs[j1][2] - lhs[j1][1]*lhs[j][3];
lhs[j1][3] = lhs[j1][3] - lhs[j1][1]*lhs[j][4];
for (m = 0; m < 3; m++) {
rhs[k][m][j1][i] = rhs[k][m][j1][i] - lhs[j1][1]*rhs[k][m][j][i];
}
//---------------------------------------------------------------------
// scale the last row immediately
//---------------------------------------------------------------------
fac2 = 1.0/lhs[j1][2];
for (m = 0; m < 3; m++) {
rhs[k][m][j1][i] = fac2*rhs[k][m][j1][i];
}
}
//---------------------------------------------------------------------
// for (the u+c and the u-c factors
//---------------------------------------------------------------------
for (j = 0; j <= grid_points[1]-3; j++) {
j1 = j + 1;
j2 = j + 2;
if (i >= 1 && i <= grid_points[0]-2) {
m = 3;
fac1 = 1.0/lhsp[j][2];
lhsp[j][3] = fac1*lhsp[j][3];
lhsp[j][4] = fac1*lhsp[j][4];
rhs[k][m][j][i] = fac1*rhs[k][m][j][i];
lhsp[j1][2] = lhsp[j1][2] - lhsp[j1][1]*lhsp[j][3];
lhsp[j1][3] = lhsp[j1][3] - lhsp[j1][1]*lhsp[j][4];
rhs[k][m][j1][i] = rhs[k][m][j1][i] - lhsp[j1][1]*rhs[k][m][j][i];
lhsp[j2][1] = lhsp[j2][1] - lhsp[j2][0]*lhsp[j][3];
lhsp[j2][2] = lhsp[j2][2] - lhsp[j2][0]*lhsp[j][4];
rhs[k][m][j2][i] = rhs[k][m][j2][i] - lhsp[j2][0]*rhs[k][m][j][i];
m = 4;
fac1 = 1.0/lhsm[j][2];
lhsm[j][3] = fac1*lhsm[j][3];
lhsm[j][4] = fac1*lhsm[j][4];
rhs[k][m][j][i] = fac1*rhs[k][m][j][i];
lhsm[j1][2] = lhsm[j1][2] - lhsm[j1][1]*lhsm[j][3];
lhsm[j1][3] = lhsm[j1][3] - lhsm[j1][1]*lhsm[j][4];
rhs[k][m][j1][i] = rhs[k][m][j1][i] - lhsm[j1][1]*rhs[k][m][j][i];
lhsm[j2][1] = lhsm[j2][1] - lhsm[j2][0]*lhsm[j][3];
lhsm[j2][2] = lhsm[j2][2] - lhsm[j2][0]*lhsm[j][4];
rhs[k][m][j2][i] = rhs[k][m][j2][i] - lhsm[j2][0]*rhs[k][m][j][i];
}
}
//---------------------------------------------------------------------
// And again the last two rows separately
//---------------------------------------------------------------------
j = grid_points[1]-2;
j1 = grid_points[1]-1;
if (i >= 1 && i <= grid_points[0]-2) {
m = 3;
fac1 = 1.0/lhsp[j][2];
lhsp[j][3] = fac1*lhsp[j][3];
lhsp[j][4] = fac1*lhsp[j][4];
rhs[k][m][j][i] = fac1*rhs[k][m][j][i];
lhsp[j1][2] = lhsp[j1][2] - lhsp[j1][1]*lhsp[j][3];
lhsp[j1][3] = lhsp[j1][3] - lhsp[j1][1]*lhsp[j][4];
rhs[k][m][j1][i] = rhs[k][m][j1][i] - lhsp[j1][1]*rhs[k][m][j][i];
m = 4;
fac1 = 1.0/lhsm[j][2];
lhsm[j][3] = fac1*lhsm[j][3];
lhsm[j][4] = fac1*lhsm[j][4];
rhs[k][m][j][i] = fac1*rhs[k][m][j][i];
lhsm[j1][2] = lhsm[j1][2] - lhsm[j1][1]*lhsm[j][3];
lhsm[j1][3] = lhsm[j1][3] - lhsm[j1][1]*lhsm[j][4];
rhs[k][m][j1][i] = rhs[k][m][j1][i] - lhsm[j1][1]*rhs[k][m][j][i];
//---------------------------------------------------------------------
// Scale the last row immediately
//---------------------------------------------------------------------
rhs[k][3][j1][i] = rhs[k][3][j1][i]/lhsp[j1][2];
rhs[k][4][j1][i] = rhs[k][4][j1][i]/lhsm[j1][2];
}
//---------------------------------------------------------------------
// BACKSUBSTITUTION
//---------------------------------------------------------------------
j = grid_points[1]-2;
j1 = grid_points[1]-1;
if (i >= 1 && i <= grid_points[0]-2) {
for (m = 0; m < 3; m++) {
rhs[k][m][j][i] = rhs[k][m][j][i] - lhs[j][3]*rhs[k][m][j1][i];
}
rhs[k][3][j][i] = rhs[k][3][j][i] - lhsp[j][3]*rhs[k][3][j1][i];
rhs[k][4][j][i] = rhs[k][4][j][i] - lhsm[j][3]*rhs[k][4][j1][i];
}
//---------------------------------------------------------------------
// The first three factors
//---------------------------------------------------------------------
for (j = grid_points[1]-3; j >= 0; j--) {
j1 = j + 1;
j2 = j + 2;
if (i >= 1 && i <= grid_points[0]-2) {
for (m = 0; m < 3; m++) {
rhs[k][m][j][i] = rhs[k][m][j][i] -
lhs[j][3]*rhs[k][m][j1][i] -
lhs[j][4]*rhs[k][m][j2][i];
}
//-------------------------------------------------------------------
// And the remaining two
//-------------------------------------------------------------------
rhs[k][3][j][i] = rhs[k][3][j][i] -
lhsp[j][3]*rhs[k][3][j1][i] -
lhsp[j][4]*rhs[k][3][j2][i];
rhs[k][4][j][i] = rhs[k][4][j][i] -
lhsm[j][3]*rhs[k][4][j1][i] -
lhsm[j][4]*rhs[k][4][j2][i];
}
}
}
}
void y_solve() {
if (timeron) timer_start(t_ysolve);
y_solve_kernel <<< gridDimXZ, blockDimXZ >>> (
gridOffset,
dev_grid_points[device],
nx2, ny2, nz2,
dev_vs[device], dev_rho_i[device], dev_speed[device], dev_rhs[device],
dtty1, dtty2, comz1, comz4, comz5, comz6, c2dtty1
);
if (timeron) timer_stop(t_ysolve);
pinvr();
}
|
5a8465eba305c5bb467476fa023b97345c2857cc.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdint.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "tuple.h"
#define DISTANCE 1
extern "C" {
__device__
bool eval(TUPLE rt,TUPLE lt){
//double dis = DISTANCE * DISTANCE;
/*
double temp = 0;
double temp2 = 0;
for(uint i = 0; i<VAL_NUM ; i++){
temp2 = rt.val[i]-lt.val[i];
temp += temp2 * temp2;
}
return temp < DISTANCE * DISTANCE;
*/
return rt.val[0]==lt.val[0];
}
__global__
void count(
TUPLE *lt,
TUPLE *rt,
int *count,
int ltn,
int rtn
)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int k = blockIdx.y * gridDim.x * blockDim.x;
/*
transport tuple data to shared memory from global memory
*/
if(i<ltn){
/*
__shared__ TUPLE Tright[BLOCK_SIZE_Y];
for(uint j=0;threadIdx.x+j*BLOCK_SIZE_X<BLOCK_SIZE_Y&&(threadIdx.x+j*BLOCK_SIZE_X+BLOCK_SIZE_Y*blockIdx.y)<rtn;j++){
Tright[threadIdx.x + j*BLOCK_SIZE_X] = rt[threadIdx.x + j*BLOCK_SIZE_X + BLOCK_SIZE_Y * blockIdx.y];
}
__syncthreads();
*/
/*
count loop
*/
//TUPLE Tleft = lt[i];
int rtn_g = rtn;
//uint mcount = 0;
for(uint j = 0; j<BLOCK_SIZE_Y &&((j+BLOCK_SIZE_Y*blockIdx.y)<rtn_g);j++){
if(eval(rt[j+BLOCK_SIZE_Y*blockIdx.y],lt[i])) {
count[i+k]++;
}
}
}
}
__global__ void join(
TUPLE *lt,
TUPLE *rt,
JOIN_TUPLE *p,
int *count,
int ltn,
int rtn
)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i<ltn){
/*
__shared__ TUPLE Tright[BLOCK_SIZE_Y];
for(uint j=0;threadIdx.x+j*BLOCK_SIZE_X<BLOCK_SIZE_Y&&(threadIdx.x+j*BLOCK_SIZE_X+BLOCK_SIZE_Y*blockIdx.y)<rtn;j++){
Tright[threadIdx.x + j*BLOCK_SIZE_X] = rt[threadIdx.x + j*BLOCK_SIZE_X + BLOCK_SIZE_Y * blockIdx.y];
}
__syncthreads();
TUPLE Tleft = lt[i];
*/
//the first write location
/*
int writeloc = 0;
if(i != 0){
writeloc = count[i + blockIdx.y*blockDim.x*gridDim.x];
}
*/
int rtn_g = rtn;
for(uint j = 0; j<BLOCK_SIZE_Y &&((j+BLOCK_SIZE_Y*blockIdx.y)<rtn_g);j++){
if(eval(rt[j+BLOCK_SIZE_Y*blockIdx.y],lt[i])) {
p[count[i + blockIdx.y*blockDim.x*gridDim.x]].rid = rt[j+BLOCK_SIZE_Y*blockIdx.y].id;
p[count[i + blockIdx.y*blockDim.x*gridDim.x]].lid = lt[i].id;
for(uint valnum=0; valnum<VAL_NUM ; valnum++){
p[count[i + blockIdx.y*blockDim.x*gridDim.x]].rval[valnum] = rt[j+BLOCK_SIZE_Y*blockIdx.y].val[valnum];
p[count[i + blockIdx.y*blockDim.x*gridDim.x]].lval[valnum] = lt[i].val[valnum];
}
count[i + blockIdx.y*blockDim.x*gridDim.x]++;
//writeloc++;
}
}
}
}
}
| 5a8465eba305c5bb467476fa023b97345c2857cc.cu | #include <stdio.h>
#include <stdint.h>
#include <cuda.h>
#include <sys/time.h>
#include "tuple.h"
#define DISTANCE 1
extern "C" {
__device__
bool eval(TUPLE rt,TUPLE lt){
//double dis = DISTANCE * DISTANCE;
/*
double temp = 0;
double temp2 = 0;
for(uint i = 0; i<VAL_NUM ; i++){
temp2 = rt.val[i]-lt.val[i];
temp += temp2 * temp2;
}
return temp < DISTANCE * DISTANCE;
*/
return rt.val[0]==lt.val[0];
}
__global__
void count(
TUPLE *lt,
TUPLE *rt,
int *count,
int ltn,
int rtn
)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int k = blockIdx.y * gridDim.x * blockDim.x;
/*
transport tuple data to shared memory from global memory
*/
if(i<ltn){
/*
__shared__ TUPLE Tright[BLOCK_SIZE_Y];
for(uint j=0;threadIdx.x+j*BLOCK_SIZE_X<BLOCK_SIZE_Y&&(threadIdx.x+j*BLOCK_SIZE_X+BLOCK_SIZE_Y*blockIdx.y)<rtn;j++){
Tright[threadIdx.x + j*BLOCK_SIZE_X] = rt[threadIdx.x + j*BLOCK_SIZE_X + BLOCK_SIZE_Y * blockIdx.y];
}
__syncthreads();
*/
/*
count loop
*/
//TUPLE Tleft = lt[i];
int rtn_g = rtn;
//uint mcount = 0;
for(uint j = 0; j<BLOCK_SIZE_Y &&((j+BLOCK_SIZE_Y*blockIdx.y)<rtn_g);j++){
if(eval(rt[j+BLOCK_SIZE_Y*blockIdx.y],lt[i])) {
count[i+k]++;
}
}
}
}
__global__ void join(
TUPLE *lt,
TUPLE *rt,
JOIN_TUPLE *p,
int *count,
int ltn,
int rtn
)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i<ltn){
/*
__shared__ TUPLE Tright[BLOCK_SIZE_Y];
for(uint j=0;threadIdx.x+j*BLOCK_SIZE_X<BLOCK_SIZE_Y&&(threadIdx.x+j*BLOCK_SIZE_X+BLOCK_SIZE_Y*blockIdx.y)<rtn;j++){
Tright[threadIdx.x + j*BLOCK_SIZE_X] = rt[threadIdx.x + j*BLOCK_SIZE_X + BLOCK_SIZE_Y * blockIdx.y];
}
__syncthreads();
TUPLE Tleft = lt[i];
*/
//the first write location
/*
int writeloc = 0;
if(i != 0){
writeloc = count[i + blockIdx.y*blockDim.x*gridDim.x];
}
*/
int rtn_g = rtn;
for(uint j = 0; j<BLOCK_SIZE_Y &&((j+BLOCK_SIZE_Y*blockIdx.y)<rtn_g);j++){
if(eval(rt[j+BLOCK_SIZE_Y*blockIdx.y],lt[i])) {
p[count[i + blockIdx.y*blockDim.x*gridDim.x]].rid = rt[j+BLOCK_SIZE_Y*blockIdx.y].id;
p[count[i + blockIdx.y*blockDim.x*gridDim.x]].lid = lt[i].id;
for(uint valnum=0; valnum<VAL_NUM ; valnum++){
p[count[i + blockIdx.y*blockDim.x*gridDim.x]].rval[valnum] = rt[j+BLOCK_SIZE_Y*blockIdx.y].val[valnum];
p[count[i + blockIdx.y*blockDim.x*gridDim.x]].lval[valnum] = lt[i].val[valnum];
}
count[i + blockIdx.y*blockDim.x*gridDim.x]++;
//writeloc++;
}
}
}
}
}
|
a97bf655d6caea2cd74c6bb7aad30c7ed8f69bb6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2012, Thomas Schutzmeier
// FreeBSD License
// See https://github.com/unvirtual/cukd/blob/master/LICENSE
#include "kdtree_node_array.h"
#include <thrust/iterator/constant_iterator.h>
#include <thrust/sequence.h>
#include <thrust/count.h>
#include <thrust/scan.h>
#include <thrust/copy.h>
namespace cukd {
// Kernel declarations
namespace device {
__global__
void
tree_update_children_small_kernel(device::KDTreeNodeArray tree,
int n_nodes);
__global__
void
update_tree_children_from_small_kernel(int n_active_nodes, int n_small,
device::KDTreeNodeArray tree, int* tags,
int* small_offset, int* active_indices);
__global__
void
leaf_elements_kernel(device::SmallNodeArray active, device::SplitCandidateArray sca,
int old_small_nodes, int* marks, int* elem_offsets, int* result);
} // namespace device
void
KDTreeNodeArray::print() {
left_nodes.print("kdtree_node_array::left_nodes");
right_nodes.print("KDTreeNodeArray::right_nodes");
split_axis.print("KDTreeNodeArray::split_axis");
split_position.print("KDTreeNodeArray::split_position");
depth.print("KDTreeNodeArray::depth");
leaf_idx.print("KDTreeNodeArray::leaf_index");
node_size.print("KDTreeNodeArray::leaf_size");
node_element_first_idx.print("KDTreeNodeArray::leaf_first_elem");
element_idx.print("KDTreeNodeArray::element_idx");
}
std::pair<int, int>
KDTreeNodeArray::update_leaves(SmallNodeArray & small_nca,
cukd::SplitCandidateArray & sca,
DevVector<int> & new_elements,
DevVector<int> & marks,
DevVector<int> & mark_offsets) {
int n_nodes_old = n_nodes();
int n_elements_old = n_elements();
int n_leaves_old = n_leaves();
int small_nodes = small_nca.n_nodes();
DevVector<int> leaf_elements, elem_offsets, leaf_element_offsets;
DevVector<int> leaf_element_sizes;
int new_leaf_nodes = mark_offsets.get_at(mark_offsets.size() - 1);
leaf_element_sizes.resize(new_leaf_nodes);
leaf_element_offsets.resize(new_leaf_nodes);
elem_offsets.resize(small_nodes);
thrust::copy_if(new_elements.begin(), new_elements.end(),
marks.begin(), leaf_element_sizes.begin(), GreaterThanZero());
int new_leaf_elements = thrust::reduce(new_elements.begin(), new_elements.end());
thrust::exclusive_scan(leaf_element_sizes.begin(), leaf_element_sizes.end(),
leaf_element_offsets.begin());
thrust::exclusive_scan(new_elements.begin(), new_elements.end(), elem_offsets.begin());
leaf_elements.resize(new_leaf_elements);
get_leaf_elements(small_nca, sca, small_nodes, marks, elem_offsets, leaf_elements);
resize_nodes(n_nodes_old + small_nodes);
resize_elements(n_elements_old + new_leaf_elements);
resize_leaves(n_leaves_old + new_leaf_nodes);
thrust::copy(leaf_element_sizes.begin(), leaf_element_sizes.end(),
node_size.begin() + n_leaves_old);
int next_off = 0;
if(n_leaves_old != 0) {
next_off = node_element_first_idx.get_at(n_leaves_old - 1)
+ node_size.get_at(n_leaves_old - 1);
thrust::transform(leaf_element_offsets.begin(), leaf_element_offsets.end(),
thrust::constant_iterator<int>(next_off),
node_element_first_idx.begin() + n_leaves_old,
thrust::plus<int>());
} else {
thrust::copy(leaf_element_offsets.begin(), leaf_element_offsets.end(),
node_element_first_idx.begin() + n_leaves_old);
}
thrust::copy(leaf_elements.begin(), leaf_elements.end(),
element_idx.begin() + next_off);
return std::make_pair(n_leaves_old, new_leaf_nodes);
}
void
KDTreeNodeArray::update_children_small() {
dim3 grid(IntegerDivide(256)(n_nodes()), 1, 1);
dim3 blocks(256,1,1);
hipLaunchKernelGGL(( device::tree_update_children_small_kernel), dim3(grid), dim3(blocks), 0, 0, dev_array(), n_nodes());
CUT_CHECK_ERROR("tree_update_children_small_kernel failed");
}
void
KDTreeNodeArray::update_tree_children_from_small(int n_nodes_active, int n_nodes_small,
DevVector<int> & small_tags,
DevVector<int> & child_diff,
DevVector<int> & active_indices) {
dim3 grid(IntegerDivide(256)(n_nodes_active),1,1);
dim3 blocks(256,1,1);
hipLaunchKernelGGL(( device::update_tree_children_from_small_kernel), dim3(grid),dim3(blocks), 0, 0,
n_nodes_active, n_nodes_small, dev_array(),
small_tags.pointer(), child_diff.pointer(),
active_indices.pointer());
CUT_CHECK_ERROR("update_tree_children_from_small_kernel failed");
}
void
KDTreeNodeArray::get_leaf_elements(cukd::SmallNodeArray & active,
cukd::SplitCandidateArray & sca,
int old_small_nodes, DevVector<int> & marks,
DevVector<int> & elem_offsets, DevVector<int> & result) {
dim3 grid(IntegerDivide(256)(old_small_nodes),1,1);
dim3 blocks(256,1,1);
hipLaunchKernelGGL(( device::leaf_elements_kernel), dim3(grid), dim3(blocks), 0, 0, active.dev_array(), sca.dev_array(),
old_small_nodes, marks.pointer(),
elem_offsets.pointer(), result.pointer());
}
} // namespace cukd
| a97bf655d6caea2cd74c6bb7aad30c7ed8f69bb6.cu | // Copyright (c) 2012, Thomas Schutzmeier
// FreeBSD License
// See https://github.com/unvirtual/cukd/blob/master/LICENSE
#include "kdtree_node_array.h"
#include <thrust/iterator/constant_iterator.h>
#include <thrust/sequence.h>
#include <thrust/count.h>
#include <thrust/scan.h>
#include <thrust/copy.h>
namespace cukd {
// Kernel declarations
namespace device {
__global__
void
tree_update_children_small_kernel(device::KDTreeNodeArray tree,
int n_nodes);
__global__
void
update_tree_children_from_small_kernel(int n_active_nodes, int n_small,
device::KDTreeNodeArray tree, int* tags,
int* small_offset, int* active_indices);
__global__
void
leaf_elements_kernel(device::SmallNodeArray active, device::SplitCandidateArray sca,
int old_small_nodes, int* marks, int* elem_offsets, int* result);
} // namespace device
void
KDTreeNodeArray::print() {
left_nodes.print("kdtree_node_array::left_nodes");
right_nodes.print("KDTreeNodeArray::right_nodes");
split_axis.print("KDTreeNodeArray::split_axis");
split_position.print("KDTreeNodeArray::split_position");
depth.print("KDTreeNodeArray::depth");
leaf_idx.print("KDTreeNodeArray::leaf_index");
node_size.print("KDTreeNodeArray::leaf_size");
node_element_first_idx.print("KDTreeNodeArray::leaf_first_elem");
element_idx.print("KDTreeNodeArray::element_idx");
}
std::pair<int, int>
KDTreeNodeArray::update_leaves(SmallNodeArray & small_nca,
cukd::SplitCandidateArray & sca,
DevVector<int> & new_elements,
DevVector<int> & marks,
DevVector<int> & mark_offsets) {
int n_nodes_old = n_nodes();
int n_elements_old = n_elements();
int n_leaves_old = n_leaves();
int small_nodes = small_nca.n_nodes();
DevVector<int> leaf_elements, elem_offsets, leaf_element_offsets;
DevVector<int> leaf_element_sizes;
int new_leaf_nodes = mark_offsets.get_at(mark_offsets.size() - 1);
leaf_element_sizes.resize(new_leaf_nodes);
leaf_element_offsets.resize(new_leaf_nodes);
elem_offsets.resize(small_nodes);
thrust::copy_if(new_elements.begin(), new_elements.end(),
marks.begin(), leaf_element_sizes.begin(), GreaterThanZero());
int new_leaf_elements = thrust::reduce(new_elements.begin(), new_elements.end());
thrust::exclusive_scan(leaf_element_sizes.begin(), leaf_element_sizes.end(),
leaf_element_offsets.begin());
thrust::exclusive_scan(new_elements.begin(), new_elements.end(), elem_offsets.begin());
leaf_elements.resize(new_leaf_elements);
get_leaf_elements(small_nca, sca, small_nodes, marks, elem_offsets, leaf_elements);
resize_nodes(n_nodes_old + small_nodes);
resize_elements(n_elements_old + new_leaf_elements);
resize_leaves(n_leaves_old + new_leaf_nodes);
thrust::copy(leaf_element_sizes.begin(), leaf_element_sizes.end(),
node_size.begin() + n_leaves_old);
int next_off = 0;
if(n_leaves_old != 0) {
next_off = node_element_first_idx.get_at(n_leaves_old - 1)
+ node_size.get_at(n_leaves_old - 1);
thrust::transform(leaf_element_offsets.begin(), leaf_element_offsets.end(),
thrust::constant_iterator<int>(next_off),
node_element_first_idx.begin() + n_leaves_old,
thrust::plus<int>());
} else {
thrust::copy(leaf_element_offsets.begin(), leaf_element_offsets.end(),
node_element_first_idx.begin() + n_leaves_old);
}
thrust::copy(leaf_elements.begin(), leaf_elements.end(),
element_idx.begin() + next_off);
return std::make_pair(n_leaves_old, new_leaf_nodes);
}
void
KDTreeNodeArray::update_children_small() {
dim3 grid(IntegerDivide(256)(n_nodes()), 1, 1);
dim3 blocks(256,1,1);
device::tree_update_children_small_kernel<<<grid, blocks>>>(dev_array(), n_nodes());
CUT_CHECK_ERROR("tree_update_children_small_kernel failed");
}
void
KDTreeNodeArray::update_tree_children_from_small(int n_nodes_active, int n_nodes_small,
DevVector<int> & small_tags,
DevVector<int> & child_diff,
DevVector<int> & active_indices) {
dim3 grid(IntegerDivide(256)(n_nodes_active),1,1);
dim3 blocks(256,1,1);
device::update_tree_children_from_small_kernel<<<grid,blocks>>>(
n_nodes_active, n_nodes_small, dev_array(),
small_tags.pointer(), child_diff.pointer(),
active_indices.pointer());
CUT_CHECK_ERROR("update_tree_children_from_small_kernel failed");
}
void
KDTreeNodeArray::get_leaf_elements(cukd::SmallNodeArray & active,
cukd::SplitCandidateArray & sca,
int old_small_nodes, DevVector<int> & marks,
DevVector<int> & elem_offsets, DevVector<int> & result) {
dim3 grid(IntegerDivide(256)(old_small_nodes),1,1);
dim3 blocks(256,1,1);
device::leaf_elements_kernel<<<grid, blocks>>>(active.dev_array(), sca.dev_array(),
old_small_nodes, marks.pointer(),
elem_offsets.pointer(), result.pointer());
}
} // namespace cukd
|
8e84f7282a465571e2510b24115b7d8496f611c4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
int num_devices = 0;
/******************************************************************************
*
* Auxiliary routines
*
******************************************************************************/
#define error_check(error) do \
{ \
cuda_assert((error), __FILE__, __LINE__); \
} while (0);
void cuda_assert(hipError_t code, const char *file, int line)
{
if (code != hipSuccess) {
fprintf(stderr,
"[ERROR] Error code: %d Message: %s %s %d\n",
code, hipGetErrorString(code), file, line);
exit(code);
}
}
void mem_ustring(char *s, long bytes)
{
double usize = 0;
if (bytes > 1E+9)
{
usize = bytes / 1E+9;
sprintf(s, "%.2lf GB", usize);
}
else if (bytes > 1E+6)
{
usize = bytes / 1E+6;
sprintf(s, "%.2lf MB", usize);
}
else if (bytes > 1E+3)
{
usize = bytes / 1E+3;
sprintf(s, "%.2lf KB", usize);
}
else
{
usize = bytes;
sprintf(s, "%lf Bytes", usize);
}
}
void print_device_info(void)
{
for (int n = 0; n < num_devices; n++)
{
char ustring[64];
hipDeviceProp_t device_prop;
hipGetDeviceProperties(&device_prop, n);
size_t free_mem;;
hipMemGetInfo(&free_mem, NULL);
int device_threads = device_prop.multiProcessorCount * device_prop.maxThreadsPerMultiProcessor;
printf("Device %d: %s\n", n, device_prop.name);
printf(" Compute capability: %d.%d\n", device_prop.major, device_prop.minor);
printf(" Total number of threads: %d\n", device_threads);
mem_ustring(ustring, device_prop.totalGlobalMem);
printf(" Global memory size: %s\n", ustring);
mem_ustring(ustring, device_prop.sharedMemPerBlock);
printf(" Shared memory size: %s\n", ustring);
mem_ustring(ustring, device_prop.totalConstMem);
printf(" Constant memory size: %s\n", ustring);
mem_ustring(ustring, free_mem);
printf(" Total free memory: %s\n", ustring);
printf(" Warp size: %d\n", device_prop.warpSize);
}
printf("\n");
}
/******************************************************************************
*
* CUDA kernels
*
******************************************************************************/
__global__
void char_add(char *a, char *b)
{
a[threadIdx.x] = b[threadIdx.x];
}
/******************************************************************************
*
* Device tests
*
******************************************************************************/
void run_device_test(void)
{
for (int n = 0; n < num_devices; n++)
{
hipSetDevice(n);
int string_size;
char string[256] = "Hello world!";
string_size = strlen(string) + 1;
/* Allocate device */
char *d_a, *d_b;
hipMalloc((void **)&d_a, string_size * sizeof(char));
hipMalloc((void **)&d_b, string_size * sizeof(char));
/* Allocate host */
char *a = (char *)calloc(string_size, sizeof(char));
char *b = (char *)calloc(string_size, sizeof(char));
strcpy(b, string);
dim3 dim_block(string_size, 1);
dim3 dim_grid(1);
hipMemcpy((void *)d_b, (const void *)b, string_size * sizeof(char), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( char_add), dim3(dim_grid), dim3(dim_block), 0, 0, d_a, d_b);
error_check(hipPeekAtLastError());
error_check(hipDeviceSynchronize());
hipMemcpy((void *)a, (void *)d_a, string_size, hipMemcpyDeviceToHost);
hipDeviceProp_t device_prop;
hipGetDeviceProperties(&device_prop, n);
if (strcmp(a, b) == 0)
printf("[PASSED TEST] Device %d: %s\n", n, device_prop.name);
else
printf("[FAILED TEST] Device %d: %s\n", n, device_prop.name);
hipFree(d_a);
hipFree(d_b);
free(a);
free(b);
}
printf("\n");
}
int main(void)
{
error_check(hipGetDeviceCount(&num_devices));
if (num_devices > 0)
{
print_device_info();
run_device_test();
}
else
{
printf("[ERROR] No CUDA devices found!\n");
}
return EXIT_SUCCESS;
}
| 8e84f7282a465571e2510b24115b7d8496f611c4.cu | #include <stdio.h>
int num_devices = 0;
/******************************************************************************
*
* Auxiliary routines
*
******************************************************************************/
#define error_check(error) do \
{ \
cuda_assert((error), __FILE__, __LINE__); \
} while (0);
void cuda_assert(cudaError_t code, const char *file, int line)
{
if (code != cudaSuccess) {
fprintf(stderr,
"[ERROR] Error code: %d Message: %s %s %d\n",
code, cudaGetErrorString(code), file, line);
exit(code);
}
}
void mem_ustring(char *s, long bytes)
{
double usize = 0;
if (bytes > 1E+9)
{
usize = bytes / 1E+9;
sprintf(s, "%.2lf GB", usize);
}
else if (bytes > 1E+6)
{
usize = bytes / 1E+6;
sprintf(s, "%.2lf MB", usize);
}
else if (bytes > 1E+3)
{
usize = bytes / 1E+3;
sprintf(s, "%.2lf KB", usize);
}
else
{
usize = bytes;
sprintf(s, "%lf Bytes", usize);
}
}
void print_device_info(void)
{
for (int n = 0; n < num_devices; n++)
{
char ustring[64];
cudaDeviceProp device_prop;
cudaGetDeviceProperties(&device_prop, n);
size_t free_mem;;
cudaMemGetInfo(&free_mem, NULL);
int device_threads = device_prop.multiProcessorCount * device_prop.maxThreadsPerMultiProcessor;
printf("Device %d: %s\n", n, device_prop.name);
printf(" Compute capability: %d.%d\n", device_prop.major, device_prop.minor);
printf(" Total number of threads: %d\n", device_threads);
mem_ustring(ustring, device_prop.totalGlobalMem);
printf(" Global memory size: %s\n", ustring);
mem_ustring(ustring, device_prop.sharedMemPerBlock);
printf(" Shared memory size: %s\n", ustring);
mem_ustring(ustring, device_prop.totalConstMem);
printf(" Constant memory size: %s\n", ustring);
mem_ustring(ustring, free_mem);
printf(" Total free memory: %s\n", ustring);
printf(" Warp size: %d\n", device_prop.warpSize);
}
printf("\n");
}
/******************************************************************************
*
* CUDA kernels
*
******************************************************************************/
__global__
void char_add(char *a, char *b)
{
a[threadIdx.x] = b[threadIdx.x];
}
/******************************************************************************
*
* Device tests
*
******************************************************************************/
void run_device_test(void)
{
for (int n = 0; n < num_devices; n++)
{
cudaSetDevice(n);
int string_size;
char string[256] = "Hello world!";
string_size = strlen(string) + 1;
/* Allocate device */
char *d_a, *d_b;
cudaMalloc((void **)&d_a, string_size * sizeof(char));
cudaMalloc((void **)&d_b, string_size * sizeof(char));
/* Allocate host */
char *a = (char *)calloc(string_size, sizeof(char));
char *b = (char *)calloc(string_size, sizeof(char));
strcpy(b, string);
dim3 dim_block(string_size, 1);
dim3 dim_grid(1);
cudaMemcpy((void *)d_b, (const void *)b, string_size * sizeof(char), cudaMemcpyHostToDevice);
char_add<<<dim_grid, dim_block>>>(d_a, d_b);
error_check(cudaPeekAtLastError());
error_check(cudaDeviceSynchronize());
cudaMemcpy((void *)a, (void *)d_a, string_size, cudaMemcpyDeviceToHost);
cudaDeviceProp device_prop;
cudaGetDeviceProperties(&device_prop, n);
if (strcmp(a, b) == 0)
printf("[PASSED TEST] Device %d: %s\n", n, device_prop.name);
else
printf("[FAILED TEST] Device %d: %s\n", n, device_prop.name);
cudaFree(d_a);
cudaFree(d_b);
free(a);
free(b);
}
printf("\n");
}
int main(void)
{
error_check(cudaGetDeviceCount(&num_devices));
if (num_devices > 0)
{
print_device_info();
run_device_test();
}
else
{
printf("[ERROR] No CUDA devices found!\n");
}
return EXIT_SUCCESS;
}
|
de57379a9743433ef449240900074779cf839855.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <math.h>
#include "calc.cpp"
#include "utils.h"
#include <stdio.h>
#include <algorithm>
__global__
void rgba_to_greyscale(const uchar4* const rgbaImage,
unsigned char* const greyImage,
int numRows, int numCols)
{
int index_x = blockIdx.x * blockDim.x + threadIdx.x;
int index_y = blockIdx.y * blockDim.y + threadIdx.y;
// map the two 2D indices to a single linear, 1D index
int grid_width = gridDim.x * blockDim.x;
int index = index_y * grid_width + index_x;
vector<float> xi; //make a temporary list-vector
for(int k=index_x-5/2;k<=index_x+5/2;k++) { //apply the window specified by x and y
for(int m=index_y-5/2;m<=index_y+5/2;m++) {
if((k<0)||(m<0)) xi.push_back(0); //on edges of the image use 0 values
else xi.push_back(rgbaImage[k * numCols + m]);
}
}
std::sort(std::begin(xi),std::end(xi)); //sort elements of 'xi' neighbourhood vector
greyImage[index]=xi[3]; //replace pixel with element specified by 'rank' (3)
// write out the final result
greyImage[index] = .299f * rgbaImage[index].x + .587f * rgbaImage[index].y + .114f * rgbaImage[index].z;
}
void your_rgba_to_greyscale(const uchar4 * const h_rgbaImage, uchar4 * const d_rgbaImage,
unsigned char* const d_greyImage, size_t numRows, size_t numCols)
{
const int thread = 16;
const dim3 blockSize( thread, thread, 1);
const dim3 gridSize( ceil(numRows/(float)thread), ceil(numCols/(float)thread), 1);
hipLaunchKernelGGL(( rgba_to_greyscale), dim3(gridSize), dim3(blockSize), 0, 0, d_rgbaImage, d_greyImage, numRows, numCols);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
} | de57379a9743433ef449240900074779cf839855.cu | #include <math.h>
#include "calc.cpp"
#include "utils.h"
#include <stdio.h>
#include <algorithm>
__global__
void rgba_to_greyscale(const uchar4* const rgbaImage,
unsigned char* const greyImage,
int numRows, int numCols)
{
int index_x = blockIdx.x * blockDim.x + threadIdx.x;
int index_y = blockIdx.y * blockDim.y + threadIdx.y;
// map the two 2D indices to a single linear, 1D index
int grid_width = gridDim.x * blockDim.x;
int index = index_y * grid_width + index_x;
vector<float> xi; //make a temporary list-vector
for(int k=index_x-5/2;k<=index_x+5/2;k++) { //apply the window specified by x and y
for(int m=index_y-5/2;m<=index_y+5/2;m++) {
if((k<0)||(m<0)) xi.push_back(0); //on edges of the image use 0 values
else xi.push_back(rgbaImage[k * numCols + m]);
}
}
std::sort(std::begin(xi),std::end(xi)); //sort elements of 'xi' neighbourhood vector
greyImage[index]=xi[3]; //replace pixel with element specified by 'rank' (3)
// write out the final result
greyImage[index] = .299f * rgbaImage[index].x + .587f * rgbaImage[index].y + .114f * rgbaImage[index].z;
}
void your_rgba_to_greyscale(const uchar4 * const h_rgbaImage, uchar4 * const d_rgbaImage,
unsigned char* const d_greyImage, size_t numRows, size_t numCols)
{
const int thread = 16;
const dim3 blockSize( thread, thread, 1);
const dim3 gridSize( ceil(numRows/(float)thread), ceil(numCols/(float)thread), 1);
rgba_to_greyscale<<<gridSize, blockSize>>>(d_rgbaImage, d_greyImage, numRows, numCols);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
} |
95c76a36ea574338fa637de5a06ffe96aec55072.hip | // !!! This is a file automatically generated by hipify!!!
//#define DEBUG
#include <hip/hip_runtime.h>
#include <stdlib.h>
#include <stdio.h>
#ifdef DEBUG
hipError_t status;
void checkCuda(hipError_t& status) {
status = hipGetLastError();
if (status == hipSuccess) {
fprintf(stderr, "Success!\n");
} else {
fprintf(stderr, "CUDA error: %s\n", hipGetErrorString(status));
exit(-1);
}
}
#endif
__global__ void CUDACross(bool *candidates, int size){
for (int idx = blockIdx.x*blockDim.x + threadIdx.x; idx < size/2 + 1; idx += blockDim.x * gridDim.x) {
int multiplier = idx + 2;
int check = multiplier * multiplier; // bang when `multiplier` reaches ceil(sqrt(2^31)) = 46341
//if (candidates[multiplier-2]) { // which is when `N` gets to (46341-2-1)*2 + 2 = 92678
while (check < size + 2){
candidates[check - 2] = false;
check += multiplier;
}
//}
}
}
void init(bool *candidates, int size){
for (int i = 0; i<size; i++)
candidates[i] = true;
}
int main(int argc, char* argv[]) {
/*if (argc != 2 || atoi(argv[1]) < 2 || atoi(argv[1]) > 1000000) {
fprintf(stderr, "bad input\nusage: $ ./seqgenprimes N\nwhere N is in [2, 1000000]");
exit(-1);
}*/
int N = atoi(argv[1]);
int size = N - 1;
bool* candidates = new bool[size];
init(candidates, size);
int deviceNum = 0;
hipSetDevice(deviceNum);
struct hipDeviceProp_t prop;
hipGetDeviceProperties(&prop, deviceNum);
int dimBlock = prop.maxThreadsPerBlock / 4;
int dimGrid = prop.multiProcessorCount * 32;
#ifdef DEBUG
fprintf(stderr, "maxThreadsPerBlock is %d\n", prop.maxThreadsPerBlock);
fprintf(stderr, "maxThreadsPerMultiProcessor is %d\n", prop.maxThreadsPerMultiProcessor);
fprintf(stderr, "totalGlobalMem is %d\n", prop.totalGlobalMem);
#endif
//Initialize arrays
bool *gpudata;
//Allocate memory
hipMalloc((void**)&gpudata, sizeof(bool)*size);
#ifdef DEBUG
fprintf(stderr, "checking hipMalloc()...\n");
checkCuda(status);
#endif
//Copy to GPU
hipMemcpy(gpudata, candidates, sizeof(bool)*size, hipMemcpyHostToDevice);
#ifdef DEBUG
fprintf(stderr, "checking hipMemcpy() host to device...\n");
checkCuda(status);
#endif
//Kernel call on the GPU
// CUDACross<<<bNum, tNum>>>(gpudata, size, bNum, tNum);
hipLaunchKernelGGL(( CUDACross), dim3(dimGrid), dim3(dimBlock), 0, 0, gpudata, size);
// hipLaunchKernelGGL(( CUDACross), dim3(dimGrid), dim3(dimBlock), 0, 0, gpudata, size, N);
#ifdef DEBUG
fprintf(stderr, "checking kernel...\n");
checkCuda(status);
#endif
//Copy from GPU back onto host
hipMemcpy(candidates, gpudata, sizeof(bool)*size, hipMemcpyDeviceToHost);
#ifdef DEBUG
fprintf(stderr, "checking hipMemcpy() device to host...\n");
checkCuda(status);
#endif
//Free the memory on the GPU
hipFree(gpudata);
char filename[20];
sprintf(filename, "%d.txt", N);
FILE *fp = fopen(filename, "w");
fprintf(fp, "%d ", 2);
#ifdef DEBUG
fprintf(stderr, "%d ", 2);
#endif
for (int i = 1; i < size; ++i) {
if (candidates[i]) fprintf(fp, "%d ", i+2);
#ifdef DEBUG
if (candidates[i]) fprintf(stderr, "%d ", i+2);
#endif
}
return 0;
}
| 95c76a36ea574338fa637de5a06ffe96aec55072.cu | //#define DEBUG
#include <cuda.h>
#include <stdlib.h>
#include <stdio.h>
#ifdef DEBUG
cudaError_t status;
void checkCuda(cudaError_t& status) {
status = cudaGetLastError();
if (status == cudaSuccess) {
fprintf(stderr, "Success!\n");
} else {
fprintf(stderr, "CUDA error: %s\n", cudaGetErrorString(status));
exit(-1);
}
}
#endif
__global__ void CUDACross(bool *candidates, int size){
for (int idx = blockIdx.x*blockDim.x + threadIdx.x; idx < size/2 + 1; idx += blockDim.x * gridDim.x) {
int multiplier = idx + 2;
int check = multiplier * multiplier; // bang when `multiplier` reaches ceil(sqrt(2^31)) = 46341
//if (candidates[multiplier-2]) { // which is when `N` gets to (46341-2-1)*2 + 2 = 92678
while (check < size + 2){
candidates[check - 2] = false;
check += multiplier;
}
//}
}
}
void init(bool *candidates, int size){
for (int i = 0; i<size; i++)
candidates[i] = true;
}
int main(int argc, char* argv[]) {
/*if (argc != 2 || atoi(argv[1]) < 2 || atoi(argv[1]) > 1000000) {
fprintf(stderr, "bad input\nusage: $ ./seqgenprimes N\nwhere N is in [2, 1000000]");
exit(-1);
}*/
int N = atoi(argv[1]);
int size = N - 1;
bool* candidates = new bool[size];
init(candidates, size);
int deviceNum = 0;
cudaSetDevice(deviceNum);
struct cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, deviceNum);
int dimBlock = prop.maxThreadsPerBlock / 4;
int dimGrid = prop.multiProcessorCount * 32;
#ifdef DEBUG
fprintf(stderr, "maxThreadsPerBlock is %d\n", prop.maxThreadsPerBlock);
fprintf(stderr, "maxThreadsPerMultiProcessor is %d\n", prop.maxThreadsPerMultiProcessor);
fprintf(stderr, "totalGlobalMem is %d\n", prop.totalGlobalMem);
#endif
//Initialize arrays
bool *gpudata;
//Allocate memory
cudaMalloc((void**)&gpudata, sizeof(bool)*size);
#ifdef DEBUG
fprintf(stderr, "checking cudaMalloc()...\n");
checkCuda(status);
#endif
//Copy to GPU
cudaMemcpy(gpudata, candidates, sizeof(bool)*size, cudaMemcpyHostToDevice);
#ifdef DEBUG
fprintf(stderr, "checking cudaMemcpy() host to device...\n");
checkCuda(status);
#endif
//Kernel call on the GPU
// CUDACross<<<bNum, tNum>>>(gpudata, size, bNum, tNum);
CUDACross<<<dimGrid, dimBlock>>>(gpudata, size);
// CUDACross<<<dimGrid, dimBlock>>>(gpudata, size, N);
#ifdef DEBUG
fprintf(stderr, "checking kernel...\n");
checkCuda(status);
#endif
//Copy from GPU back onto host
cudaMemcpy(candidates, gpudata, sizeof(bool)*size, cudaMemcpyDeviceToHost);
#ifdef DEBUG
fprintf(stderr, "checking cudaMemcpy() device to host...\n");
checkCuda(status);
#endif
//Free the memory on the GPU
cudaFree(gpudata);
char filename[20];
sprintf(filename, "%d.txt", N);
FILE *fp = fopen(filename, "w");
fprintf(fp, "%d ", 2);
#ifdef DEBUG
fprintf(stderr, "%d ", 2);
#endif
for (int i = 1; i < size; ++i) {
if (candidates[i]) fprintf(fp, "%d ", i+2);
#ifdef DEBUG
if (candidates[i]) fprintf(stderr, "%d ", i+2);
#endif
}
return 0;
}
|
6721a2ac11fb9ce5978abd692040536844d4c472.hip | // !!! This is a file automatically generated by hipify!!!
#include <chrono>
#include <cstdio>
#include <cstdlib>
#include <vector>
#include <hip/hip_runtime.h>
#include "utils.h"
__global__
void wyllie ( long *list , const int size )
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
if(index < size )
{
long node, next;
while ( ((node = list[index]) >> 32) != NIL &&
((next = list[node >> 32]) >> 32) != NIL )
{
long temp = (node & MASK) ;
temp += (next & MASK) ;
temp += (next >> 32) << 32;
__syncthreads();
list [ index ] = temp ;
}
}
}
int main(int argc, char* argv[]) {
if (argc != 4) {
printf("Usage: ./%s <list size> <0 or 1> <repeat>", argv[0]);
printf("0 and 1 indicate an ordered list and a random list, respectively\n");
exit(-1);
}
int elems = atoi(argv[1]);
int setRandomList = atoi(argv[2]);
int repeat = atoi(argv[3]);
int i;
std::vector<int> next (elems);
std::vector<int> rank (elems);
std::vector<long> list (elems);
std::vector<long> d_res (elems);
std::vector<long> h_res (elems);
// generate an array in which each element contains the index of the next element
if (setRandomList)
random_list(next);
else
ordered_list(next);
// initialize the rank list
for (i = 0; i < elems; i++) {
rank[i] = next[i] == NIL ? 0 : 1;
}
// pack next and rank as a 64-bit number
for (i = 0; i < elems; i++) list[i] = ((long)next[i] << 32) | rank[i];
// run list ranking on a device
long *d_list;
hipMalloc((void**)&d_list, sizeof(long) * elems);
dim3 grid ((elems + 255)/256);
dim3 block (256);
double time = 0.0;
for (i = 0; i <= repeat; i++) {
hipMemcpy(d_list, list.data(), sizeof(long) * elems, hipMemcpyHostToDevice);
hipDeviceSynchronize();
auto start = std::chrono::steady_clock::now();
hipLaunchKernelGGL(( wyllie), dim3(grid), dim3(block), 0, 0, d_list, elems);
hipDeviceSynchronize();
auto end = std::chrono::steady_clock::now();
if (i > 0) time += std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count();
}
printf("Average kernel execution time: %f (ms)\n", (time * 1e-6f) / repeat);
hipMemcpy(d_res.data(), d_list, sizeof(long) * elems, hipMemcpyDeviceToHost);
hipFree(d_list);
for (i = 0; i < elems; i++) d_res[i] &= MASK;
// verify
// compute distance from the *end* of the list (note the first element is the head node)
h_res[0] = elems-1;
i = 0;
for (int r = 1; r < elems; r++) {
h_res[next[i]] = elems-1-r;
i = next[i];
}
#ifdef DEBUG
printf("Ranks:\n");
for (i = 0; i < elems; i++) {
printf("%d: %ld %ld\n", i, h_res[i], d_res[i]);
}
#endif
printf("%s\n", (h_res == d_res) ? "PASS" : "FAIL");
return 0;
}
| 6721a2ac11fb9ce5978abd692040536844d4c472.cu | #include <chrono>
#include <cstdio>
#include <cstdlib>
#include <vector>
#include <cuda.h>
#include "utils.h"
__global__
void wyllie ( long *list , const int size )
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
if(index < size )
{
long node, next;
while ( ((node = list[index]) >> 32) != NIL &&
((next = list[node >> 32]) >> 32) != NIL )
{
long temp = (node & MASK) ;
temp += (next & MASK) ;
temp += (next >> 32) << 32;
__syncthreads();
list [ index ] = temp ;
}
}
}
int main(int argc, char* argv[]) {
if (argc != 4) {
printf("Usage: ./%s <list size> <0 or 1> <repeat>", argv[0]);
printf("0 and 1 indicate an ordered list and a random list, respectively\n");
exit(-1);
}
int elems = atoi(argv[1]);
int setRandomList = atoi(argv[2]);
int repeat = atoi(argv[3]);
int i;
std::vector<int> next (elems);
std::vector<int> rank (elems);
std::vector<long> list (elems);
std::vector<long> d_res (elems);
std::vector<long> h_res (elems);
// generate an array in which each element contains the index of the next element
if (setRandomList)
random_list(next);
else
ordered_list(next);
// initialize the rank list
for (i = 0; i < elems; i++) {
rank[i] = next[i] == NIL ? 0 : 1;
}
// pack next and rank as a 64-bit number
for (i = 0; i < elems; i++) list[i] = ((long)next[i] << 32) | rank[i];
// run list ranking on a device
long *d_list;
cudaMalloc((void**)&d_list, sizeof(long) * elems);
dim3 grid ((elems + 255)/256);
dim3 block (256);
double time = 0.0;
for (i = 0; i <= repeat; i++) {
cudaMemcpy(d_list, list.data(), sizeof(long) * elems, cudaMemcpyHostToDevice);
cudaDeviceSynchronize();
auto start = std::chrono::steady_clock::now();
wyllie<<<grid, block>>>(d_list, elems);
cudaDeviceSynchronize();
auto end = std::chrono::steady_clock::now();
if (i > 0) time += std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count();
}
printf("Average kernel execution time: %f (ms)\n", (time * 1e-6f) / repeat);
cudaMemcpy(d_res.data(), d_list, sizeof(long) * elems, cudaMemcpyDeviceToHost);
cudaFree(d_list);
for (i = 0; i < elems; i++) d_res[i] &= MASK;
// verify
// compute distance from the *end* of the list (note the first element is the head node)
h_res[0] = elems-1;
i = 0;
for (int r = 1; r < elems; r++) {
h_res[next[i]] = elems-1-r;
i = next[i];
}
#ifdef DEBUG
printf("Ranks:\n");
for (i = 0; i < elems; i++) {
printf("%d: %ld %ld\n", i, h_res[i], d_res[i]);
}
#endif
printf("%s\n", (h_res == d_res) ? "PASS" : "FAIL");
return 0;
}
|
6ba9abdd9eaddc354fca038e55ea37bc4538760d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <helper_cuda.h>
typedef enum TARGET {HOST, DEVICE} TARGET;
typedef struct {
int width;
int height;
float *elements;
} Matrix;
__global__ void sgemm(Matrix A, Matrix B, Matrix C,
const float alpha, const float beta,
const int width, const int height) {
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx = idx_y * width + idx_x;
if (idx_x >= width || idx_y >= height)
return;
float value = 0.f;
for (int e = 0; e < width; e++)
value = alpha * A.elements[idx_y * width + e] * B.elements[e * width + idx_x];
C.elements[idx] = value + beta * C.elements[idx];
}
void InitMatrix(Matrix &mat, const int width, const int height, TARGET target = HOST);
int main(int argv, char* argc[]) {
Matrix A, B, C;
Matrix dA, dB, dC;
const float alpha = 2.f;
const float beta = .5f;
const int width = 2048;
const int height = 2048;
float elapsed_gpu;
double elapsed_cpu;
// CUDA Event Create to estimate elased time
hipEvent_t start, stop;
struct timespec begin, finish;
hipEventCreate(&start);
hipEventCreate(&stop);
// Initialize host matrix
InitMatrix(A, width, height);
InitMatrix(B, width, height);
InitMatrix(C, width, height);
// CUDA Memory Initialize
// TODO: Write device memory pointer obtaining code from host pointer
///////////////
// CUDA Operation
hipEventRecord(start, 0);
clock_gettime(CLOCK_MONOTONIC, &begin);
// Copy host data to the device (CUDA global memory)
//hipMemcpy(dA.elements, A.elements, width * height * sizeof(float), hipMemcpyHostToDevice);
//hipMemcpy(dB.elements, B.elements, width * height * sizeof(float), hipMemcpyHostToDevice);
//hipMemcpy(dC.elements, C.elements, width * height * sizeof(float), hipMemcpyHostToDevice);
// Launch GPU Kernel
dim3 blockDim(16, 16);
dim3 gridDim((width + blockDim.x - 1) / blockDim.x, (height + blockDim.y - 1) / blockDim.y);
hipLaunchKernelGGL(( sgemm), dim3(gridDim), dim3(blockDim), 0, 0, dA, dB, dC, alpha, beta, width, height);
// Copy computation result from the Device the host memory
//hipMemcpy(C.elements, dC.elements, width * height * sizeof(float), hipMemcpyDeviceToHost);
hipEventRecord(stop, 0);
hipDeviceSynchronize();
clock_gettime(CLOCK_MONOTONIC, &finish);
// Estimate CUDA operation time
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsed_gpu, start, stop);
printf("SGEMM CUDA Elapsed time: %f ms\n", elapsed_gpu);
elapsed_cpu = (finish.tv_sec - begin.tv_sec);
elapsed_cpu += (finish.tv_nsec - begin.tv_nsec) / 1000000000.0;
printf("Host time: %f ms\n", elapsed_cpu * 1000);
// finalize CUDA event
hipEventDestroy(start);
hipEventDestroy(stop);
// Finalize
//hipFree(dA.elements);
//hipFree(dB.elements);
//hipFree(dC.elements);
hipHostFree(A.elements);
hipHostFree(B.elements);
hipHostFree(C.elements);
return 0;
}
void InitMatrix(Matrix &mat, const int width, const int height, TARGET target) {
mat.width = width;
mat.height = height;
if (target == DEVICE) {
hipMalloc((void**)&mat.elements, width * height * sizeof(float));
}
else {
// TODO: write pinned memory allocation code (mapped mode)
///////////////////
for (int row = 0; row < height; row++) {
for (int col = 0; col < width; col++) {
mat.elements[row * width + col] = row * width + col * 0.001;
}
}
}
} | 6ba9abdd9eaddc354fca038e55ea37bc4538760d.cu |
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <helper_cuda.h>
typedef enum TARGET {HOST, DEVICE} TARGET;
typedef struct {
int width;
int height;
float *elements;
} Matrix;
__global__ void sgemm(Matrix A, Matrix B, Matrix C,
const float alpha, const float beta,
const int width, const int height) {
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx = idx_y * width + idx_x;
if (idx_x >= width || idx_y >= height)
return;
float value = 0.f;
for (int e = 0; e < width; e++)
value = alpha * A.elements[idx_y * width + e] * B.elements[e * width + idx_x];
C.elements[idx] = value + beta * C.elements[idx];
}
void InitMatrix(Matrix &mat, const int width, const int height, TARGET target = HOST);
int main(int argv, char* argc[]) {
Matrix A, B, C;
Matrix dA, dB, dC;
const float alpha = 2.f;
const float beta = .5f;
const int width = 2048;
const int height = 2048;
float elapsed_gpu;
double elapsed_cpu;
// CUDA Event Create to estimate elased time
cudaEvent_t start, stop;
struct timespec begin, finish;
cudaEventCreate(&start);
cudaEventCreate(&stop);
// Initialize host matrix
InitMatrix(A, width, height);
InitMatrix(B, width, height);
InitMatrix(C, width, height);
// CUDA Memory Initialize
// TODO: Write device memory pointer obtaining code from host pointer
///////////////
// CUDA Operation
cudaEventRecord(start, 0);
clock_gettime(CLOCK_MONOTONIC, &begin);
// Copy host data to the device (CUDA global memory)
//cudaMemcpy(dA.elements, A.elements, width * height * sizeof(float), cudaMemcpyHostToDevice);
//cudaMemcpy(dB.elements, B.elements, width * height * sizeof(float), cudaMemcpyHostToDevice);
//cudaMemcpy(dC.elements, C.elements, width * height * sizeof(float), cudaMemcpyHostToDevice);
// Launch GPU Kernel
dim3 blockDim(16, 16);
dim3 gridDim((width + blockDim.x - 1) / blockDim.x, (height + blockDim.y - 1) / blockDim.y);
sgemm<<<gridDim, blockDim>>>(dA, dB, dC, alpha, beta, width, height);
// Copy computation result from the Device the host memory
//cudaMemcpy(C.elements, dC.elements, width * height * sizeof(float), cudaMemcpyDeviceToHost);
cudaEventRecord(stop, 0);
cudaDeviceSynchronize();
clock_gettime(CLOCK_MONOTONIC, &finish);
// Estimate CUDA operation time
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsed_gpu, start, stop);
printf("SGEMM CUDA Elapsed time: %f ms\n", elapsed_gpu);
elapsed_cpu = (finish.tv_sec - begin.tv_sec);
elapsed_cpu += (finish.tv_nsec - begin.tv_nsec) / 1000000000.0;
printf("Host time: %f ms\n", elapsed_cpu * 1000);
// finalize CUDA event
cudaEventDestroy(start);
cudaEventDestroy(stop);
// Finalize
//cudaFree(dA.elements);
//cudaFree(dB.elements);
//cudaFree(dC.elements);
cudaFreeHost(A.elements);
cudaFreeHost(B.elements);
cudaFreeHost(C.elements);
return 0;
}
void InitMatrix(Matrix &mat, const int width, const int height, TARGET target) {
mat.width = width;
mat.height = height;
if (target == DEVICE) {
cudaMalloc((void**)&mat.elements, width * height * sizeof(float));
}
else {
// TODO: write pinned memory allocation code (mapped mode)
///////////////////
for (int row = 0; row < height; row++) {
for (int col = 0; col < width; col++) {
mat.elements[row * width + col] = row * width + col * 0.001;
}
}
}
} |
67ad1b6cc951ab421e667f267f89e49bb9ecc8af.hip | // !!! This is a file automatically generated by hipify!!!
/*
opts: ./edge /afs/andrew.cmu.edu/usr12/sbali/private/proj/images/building.jpg 32 <s|n>
*/
/*
do 5/10 images of increasing sizes for all
analyze on both shared, not shared
add timing without memory copying
on different block sizes
try using streams?
*/
/*
SOBEL (NOT SHARED)
SMALL
0.000424
0.000220
0.000171
0.000154
0.000162
0.000153
0.000154
MEDIUM
0.001142
0.000533
0.000367
0.000315
0.000314
0.000329
0.000329
LARGE0.004808
0.002161
0.001374
0.001186
0.001193
0.001236
0.001200
SOBEL SHARED
SMALL
1- 0.000448
2- 0.000229
4- 0.000170
8- 0.000155
16- 0.000158
24- 0.000153
32- 0.000165
MEDIUM
1- 0.001256
2- 0.000550
4- 0.000358
8- 0.000317
16- 0.000323
24- 0.000333
32- 0.000328
LARGE
1- 0.005209
2- 0.001975
4- 0.001312
8- 0.002101
16- 0.001190
24- 0.001268
32- 0.001192
SOBEL SHARED DIFF
SMALL
1- 0.000578
2- 0.000276
4- 0.000211
8- 0.000207
16- 0.000200
24- 0.000229
32- 0.000202
MEDIUM
1- 0.001604
2-0.000636
4- 0.000395
8- 0.000367
16- 0.000353
24- 0.000361
32- 0.000349
LARGE
1- 0.006609
2- 0.002341
4- 0.001429
8- 0.001219
16-0.001221
24- 0.001327
32- 0.001236
*/
#include <unistd.h>
#include <algorithm>
#include <cstdlib>
#include <limits>
#include <vector>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <driver_functions.h>
#define STB_IMAGE_IMPLEMENTATION
#include "../../utils/stb_image.h"
#define STB_IMAGE_WRITE_IMPLEMENTATION
#include "../../utils/stb_image_write.h"
#include "../../utils/cycletimer.h"
#define CHANNEL_NUM 1
#define THREADS_PER_BLK 32
__device__ __constant__ float HSOBEL_H[3] = {-1.0, 0, 1.0};
__device__ __constant__ float HSOBEL_V[3] = {1.0, 2.0, 1.0};
__device__ __constant__ float HSOBEL[9] = {-1.0, -2.0, -1.0, 0, 0, 0, 1.0, 2.0, 1.0};
__global__ void single_kernel(uint8_t* old_img, uint8_t* new_img, float kernel[9], int k_width, int k_height, int img_width, int img_height){
int i, j, jj, ii;
i = blockIdx.x * blockDim.x + threadIdx.x;
j = blockIdx.y * blockDim.y + threadIdx.y;
if(i>=img_width || j>= img_height) return;
float tmp=0.f;
int jj_last = min(j+k_height, img_height);
int ii_last = min(i+k_width, img_width);
for(jj = j; jj< jj_last; jj++){
for(ii = i; ii< ii_last; ii++){
tmp += HSOBEL[(jj-j) * k_width + (ii-i)] * (old_img[jj * img_width + ii]);
}
}
new_img[j*img_width + i] = (uint8_t)sqrt(tmp*tmp);
}
//shared requires constant
//assumes k_width, k_height = 3
__global__ void shared_kernel(uint8_t* old_img, uint8_t* new_img, int img_width, int img_height){
int i, j, jj, ii, support_id, img_id, n_img_id;
i = blockIdx.x * blockDim.x + threadIdx.x;
j = blockIdx.y * blockDim.y + threadIdx.y;
__shared__ float support[(THREADS_PER_BLK+2)*(THREADS_PER_BLK+2)];
if(i<img_width && j< img_height) {
support_id = (THREADS_PER_BLK+2)*threadIdx.y + threadIdx.x;
img_id = img_width * j + i;
support[support_id] = old_img[img_id];
if(threadIdx.x < 2){
n_img_id = j*img_width + (THREADS_PER_BLK + i);
if(n_img_id<img_width * img_height)
support[(THREADS_PER_BLK+2)*threadIdx.y + (THREADS_PER_BLK + threadIdx.x)] = old_img[n_img_id];
}
if(threadIdx.y < 2){
n_img_id = (THREADS_PER_BLK + j)*img_width + i;
if(n_img_id<img_width * img_height)
support[(THREADS_PER_BLK+2)*(THREADS_PER_BLK + threadIdx.y) + (threadIdx.x)] = old_img[n_img_id];
}
if(threadIdx.x < 2 && threadIdx.y<2){
n_img_id = (THREADS_PER_BLK + j)*img_width + (THREADS_PER_BLK + i);
if(n_img_id<img_width * img_height)
support[(THREADS_PER_BLK+2)*(THREADS_PER_BLK + threadIdx.y) + (THREADS_PER_BLK + threadIdx.x)] = old_img[n_img_id];
}
}
__syncthreads();
if(i<img_width && j< img_height) {
float tmp=0.f;
int jj_last = min(j+3, img_height) - j;
int ii_last = min(i+3, img_width) - i;
for(jj = 0; jj< jj_last; jj++){
for(ii = 0; ii< ii_last; ii++){
tmp += HSOBEL[3*jj + ii] * support[(threadIdx.y+jj)*(THREADS_PER_BLK+2) + (threadIdx.x+ii)];
}
}
new_img[img_id] = (uint8_t)sqrt(tmp*tmp);
}
}
//shared requires constant
//assumes k_width, k_height = 3
__global__ void shared_sep_kernel(uint8_t* old_img, uint8_t* new_img, int img_width, int img_height){
int i, j, jj, ii, support_id, img_id, n_img_id;
i = blockIdx.x * blockDim.x + threadIdx.x;
j = blockIdx.y * blockDim.y + threadIdx.y;
//
__shared__ float support[(THREADS_PER_BLK+2)*(THREADS_PER_BLK+2)];
__shared__ float tmp_buf[(THREADS_PER_BLK+2)*(THREADS_PER_BLK+2)];
if(i==0 && j==0){
printf("ENTERED");
}
if(i<img_width && j< img_height) {
support_id = (THREADS_PER_BLK+2)*threadIdx.y + threadIdx.x;
img_id = img_width * j + i;
support[support_id] = old_img[img_id];
if(threadIdx.x < 2){
n_img_id = j*img_width + (THREADS_PER_BLK + i);
if(n_img_id<img_width * img_height)
support[(THREADS_PER_BLK+2)*threadIdx.y + (THREADS_PER_BLK + threadIdx.x)] = old_img[n_img_id];
}
if(threadIdx.y < 2){
n_img_id = (THREADS_PER_BLK + j)*img_width + i;
if(n_img_id<img_width * img_height)
support[(THREADS_PER_BLK+2)*(THREADS_PER_BLK + threadIdx.y) + (threadIdx.x)] = old_img[n_img_id];
}
if(threadIdx.x < 2 && threadIdx.y<2){
n_img_id = (THREADS_PER_BLK + j)*img_width + (THREADS_PER_BLK + i);
if(n_img_id<img_width * img_height)
support[(THREADS_PER_BLK+2)*(THREADS_PER_BLK + threadIdx.y) + (THREADS_PER_BLK + threadIdx.x)] = old_img[n_img_id];
}
}
__syncthreads();
if(i<img_width && j< img_height) {
float tmp=0.f;
int ii_last = min(i+3, img_width) - i;
for(ii = 0; ii< ii_last; ii++){
tmp += HSOBEL_V[ii] * support[(threadIdx.y)*(THREADS_PER_BLK+2) + (threadIdx.x+ii)];
}
tmp_buf[(threadIdx.y)*(THREADS_PER_BLK+2) + threadIdx.x] = tmp;
}
if(threadIdx.y < 2){
float tmp=0.f;
int ii_last = min(i+3, img_width) - i;
for(ii = 0; ii< ii_last; ii++){
tmp += HSOBEL_V[ii] * support[(threadIdx.y+THREADS_PER_BLK)*(THREADS_PER_BLK+2) + (threadIdx.x+ii)];
}
tmp_buf[(threadIdx.y+THREADS_PER_BLK )*(THREADS_PER_BLK+2) + (threadIdx.x)] = tmp;
}
if(threadIdx.x < 2){
float tmp=0.f;
int ii_last = min(i+3, img_width) - i;
for(ii = 0; ii< ii_last; ii++){
tmp += HSOBEL_V[ii] * support[(threadIdx.y)*(THREADS_PER_BLK+2) + (THREADS_PER_BLK+threadIdx.x+ii)];
}
tmp_buf[(threadIdx.y )*(THREADS_PER_BLK+2) + (THREADS_PER_BLK+threadIdx.x)] = tmp;
}
if(threadIdx.x < 2 && threadIdx.y < 2){
float tmp=0.f;
int ii_last = min(i+3, img_width) - i;
for(ii = 0; ii< ii_last; ii++){
tmp += HSOBEL_V[ii] * support[(THREADS_PER_BLK+threadIdx.y)*(THREADS_PER_BLK+2) + (THREADS_PER_BLK+threadIdx.x+ii)];
}
tmp_buf[(THREADS_PER_BLK+threadIdx.y )*(THREADS_PER_BLK+2) + (THREADS_PER_BLK+threadIdx.x)] = (uint8_t)sqrt(tmp*tmp);
}
__syncthreads();
if(i<img_width && j< img_height) {
float tmp=0.f;
int jj_last = min(j+3, img_height) - j;
for(jj = 0; jj< jj_last; jj++){
tmp += HSOBEL_H[jj] * tmp_buf[(threadIdx.y+jj)*(THREADS_PER_BLK+2) + (threadIdx.x)];
}
//tmp = tmp_buf[(threadIdx.y)*(THREADS_PER_BLK+2) + (threadIdx.x)];
new_img[j*img_width + i] = (uint8_t)sqrt(tmp*tmp);
}
return;
}
void edge_detect_single(uint8_t* &old_img, int width, int height, int block_side, uint8_t* new_img, uint8_t* new_img_device, uint8_t* old_img_device) {
hipMemcpy(old_img_device, old_img, sizeof(uint8_t) * height * width*CHANNEL_NUM, hipMemcpyHostToDevice);
dim3 threadsPerBlock(block_side, block_side);
dim3 gridDim((width+threadsPerBlock.x-1)/threadsPerBlock.x, (height+threadsPerBlock.y-1)/threadsPerBlock.y);
hipLaunchKernelGGL(( single_kernel), dim3(gridDim), dim3(threadsPerBlock), 0, 0, old_img_device, new_img_device, HSOBEL, 3, 3, width, height);
hipMemcpy(new_img, new_img_device, sizeof(uint8_t) * height * width * CHANNEL_NUM, hipMemcpyDeviceToHost);
stbi_write_png("edge_single.png", width, height, CHANNEL_NUM, new_img, width*CHANNEL_NUM);
}
void edge_detect_shared(uint8_t* &old_img, int width, int height, int block_side, uint8_t* new_img, uint8_t* new_img_device, uint8_t* old_img_device) {
hipMemcpy(old_img_device, old_img, sizeof(uint8_t) * height * width*CHANNEL_NUM, hipMemcpyHostToDevice);
dim3 threadsPerBlock(THREADS_PER_BLK, THREADS_PER_BLK);
dim3 gridDim((width+threadsPerBlock.x-1)/threadsPerBlock.x, (height+threadsPerBlock.y-1)/threadsPerBlock.y);
hipLaunchKernelGGL(( shared_kernel), dim3(gridDim), dim3(threadsPerBlock), 0, 0, old_img_device, new_img_device, width, height);
hipMemcpy(new_img, new_img_device, sizeof(uint8_t) * height * width * CHANNEL_NUM, hipMemcpyDeviceToHost);
//stbi_write_png("edge_shared.png", width, height, CHANNEL_NUM, new_img, width*CHANNEL_NUM);
}
void edge_detect_shared_sep(uint8_t* &old_img, int width, int height, int block_side, uint8_t* new_img, uint8_t* new_img_device, uint8_t* old_img_device) {
hipMemcpy(old_img_device, old_img, sizeof(uint8_t) * height * width*CHANNEL_NUM, hipMemcpyHostToDevice);
dim3 threadsPerBlock(THREADS_PER_BLK, THREADS_PER_BLK);
dim3 gridDim((width+threadsPerBlock.x-1)/threadsPerBlock.x, (height+threadsPerBlock.y-1)/threadsPerBlock.y);
hipLaunchKernelGGL(( shared_sep_kernel), dim3(gridDim), dim3(threadsPerBlock), 0, 0, old_img_device, new_img_device, width, height);
hipMemcpy(new_img, new_img_device, sizeof(uint8_t) * height * width * CHANNEL_NUM, hipMemcpyDeviceToHost);
//stbi_write_png("edge_shared_sep.png", width, height, CHANNEL_NUM, new_img, width*CHANNEL_NUM);
}
// eg: ./edge /afs/andrew.cmu.edu/usr12/sbali/private/proj/images/building.jpg 32 <type>
// NOTE shared doesn't support arg block size it is just a place holder here
int main(int argc, char **argv){
const char *img_file = argv[1];
int block_side = atoi(argv[2]);
int width, height, bpp;
uint8_t* old_img_device;
uint8_t* new_img_device;
uint8_t* old_img = stbi_load(img_file, &width, &height, &bpp, CHANNEL_NUM);
uint8_t* new_img = (uint8_t*)malloc(sizeof(uint8_t) * height * width * CHANNEL_NUM);
hipMalloc(&new_img_device, sizeof(uint8_t) * height * width*CHANNEL_NUM );
hipMalloc(&old_img_device, sizeof(uint8_t) * height * width*CHANNEL_NUM );
//hipMalloc(&kernel_device, sizeof(float) * 9);
//hipMemcpy(kernel_device, HSOBEL, sizeof(float) * 9, hipMemcpyHostToDevice);
char type = argv[3][0];
double start_time_exc = currentSeconds();
for(int i=0; i<1; i++){
if(type=='n')
edge_detect_single(old_img, width, height, block_side, new_img, new_img_device, old_img_device);
else if(type=='s')
edge_detect_shared(old_img, width, height, block_side, new_img, new_img_device, old_img_device);
else if(type=='t')
edge_detect_shared_sep(old_img, width, height, block_side, new_img, new_img_device, old_img_device);
}
double end_time = currentSeconds();
double duration_exc = end_time - start_time_exc;
fprintf(stdout, "Time Without Startup: %f\n", duration_exc);
return 1;
}
| 67ad1b6cc951ab421e667f267f89e49bb9ecc8af.cu | /*
opts: ./edge /afs/andrew.cmu.edu/usr12/sbali/private/proj/images/building.jpg 32 <s|n>
*/
/*
do 5/10 images of increasing sizes for all
analyze on both shared, not shared
add timing without memory copying
on different block sizes
try using streams?
*/
/*
SOBEL (NOT SHARED)
SMALL
0.000424
0.000220
0.000171
0.000154
0.000162
0.000153
0.000154
MEDIUM
0.001142
0.000533
0.000367
0.000315
0.000314
0.000329
0.000329
LARGE0.004808
0.002161
0.001374
0.001186
0.001193
0.001236
0.001200
SOBEL SHARED
SMALL
1- 0.000448
2- 0.000229
4- 0.000170
8- 0.000155
16- 0.000158
24- 0.000153
32- 0.000165
MEDIUM
1- 0.001256
2- 0.000550
4- 0.000358
8- 0.000317
16- 0.000323
24- 0.000333
32- 0.000328
LARGE
1- 0.005209
2- 0.001975
4- 0.001312
8- 0.002101
16- 0.001190
24- 0.001268
32- 0.001192
SOBEL SHARED DIFF
SMALL
1- 0.000578
2- 0.000276
4- 0.000211
8- 0.000207
16- 0.000200
24- 0.000229
32- 0.000202
MEDIUM
1- 0.001604
2-0.000636
4- 0.000395
8- 0.000367
16- 0.000353
24- 0.000361
32- 0.000349
LARGE
1- 0.006609
2- 0.002341
4- 0.001429
8- 0.001219
16-0.001221
24- 0.001327
32- 0.001236
*/
#include <unistd.h>
#include <algorithm>
#include <cstdlib>
#include <limits>
#include <vector>
#include <cuda.h>
#include <cuda_runtime.h>
#include <driver_functions.h>
#define STB_IMAGE_IMPLEMENTATION
#include "../../utils/stb_image.h"
#define STB_IMAGE_WRITE_IMPLEMENTATION
#include "../../utils/stb_image_write.h"
#include "../../utils/cycletimer.h"
#define CHANNEL_NUM 1
#define THREADS_PER_BLK 32
__device__ __constant__ float HSOBEL_H[3] = {-1.0, 0, 1.0};
__device__ __constant__ float HSOBEL_V[3] = {1.0, 2.0, 1.0};
__device__ __constant__ float HSOBEL[9] = {-1.0, -2.0, -1.0, 0, 0, 0, 1.0, 2.0, 1.0};
__global__ void single_kernel(uint8_t* old_img, uint8_t* new_img, float kernel[9], int k_width, int k_height, int img_width, int img_height){
int i, j, jj, ii;
i = blockIdx.x * blockDim.x + threadIdx.x;
j = blockIdx.y * blockDim.y + threadIdx.y;
if(i>=img_width || j>= img_height) return;
float tmp=0.f;
int jj_last = min(j+k_height, img_height);
int ii_last = min(i+k_width, img_width);
for(jj = j; jj< jj_last; jj++){
for(ii = i; ii< ii_last; ii++){
tmp += HSOBEL[(jj-j) * k_width + (ii-i)] * (old_img[jj * img_width + ii]);
}
}
new_img[j*img_width + i] = (uint8_t)sqrt(tmp*tmp);
}
//shared requires constant
//assumes k_width, k_height = 3
__global__ void shared_kernel(uint8_t* old_img, uint8_t* new_img, int img_width, int img_height){
int i, j, jj, ii, support_id, img_id, n_img_id;
i = blockIdx.x * blockDim.x + threadIdx.x;
j = blockIdx.y * blockDim.y + threadIdx.y;
__shared__ float support[(THREADS_PER_BLK+2)*(THREADS_PER_BLK+2)];
if(i<img_width && j< img_height) {
support_id = (THREADS_PER_BLK+2)*threadIdx.y + threadIdx.x;
img_id = img_width * j + i;
support[support_id] = old_img[img_id];
if(threadIdx.x < 2){
n_img_id = j*img_width + (THREADS_PER_BLK + i);
if(n_img_id<img_width * img_height)
support[(THREADS_PER_BLK+2)*threadIdx.y + (THREADS_PER_BLK + threadIdx.x)] = old_img[n_img_id];
}
if(threadIdx.y < 2){
n_img_id = (THREADS_PER_BLK + j)*img_width + i;
if(n_img_id<img_width * img_height)
support[(THREADS_PER_BLK+2)*(THREADS_PER_BLK + threadIdx.y) + (threadIdx.x)] = old_img[n_img_id];
}
if(threadIdx.x < 2 && threadIdx.y<2){
n_img_id = (THREADS_PER_BLK + j)*img_width + (THREADS_PER_BLK + i);
if(n_img_id<img_width * img_height)
support[(THREADS_PER_BLK+2)*(THREADS_PER_BLK + threadIdx.y) + (THREADS_PER_BLK + threadIdx.x)] = old_img[n_img_id];
}
}
__syncthreads();
if(i<img_width && j< img_height) {
float tmp=0.f;
int jj_last = min(j+3, img_height) - j;
int ii_last = min(i+3, img_width) - i;
for(jj = 0; jj< jj_last; jj++){
for(ii = 0; ii< ii_last; ii++){
tmp += HSOBEL[3*jj + ii] * support[(threadIdx.y+jj)*(THREADS_PER_BLK+2) + (threadIdx.x+ii)];
}
}
new_img[img_id] = (uint8_t)sqrt(tmp*tmp);
}
}
//shared requires constant
//assumes k_width, k_height = 3
__global__ void shared_sep_kernel(uint8_t* old_img, uint8_t* new_img, int img_width, int img_height){
int i, j, jj, ii, support_id, img_id, n_img_id;
i = blockIdx.x * blockDim.x + threadIdx.x;
j = blockIdx.y * blockDim.y + threadIdx.y;
//
__shared__ float support[(THREADS_PER_BLK+2)*(THREADS_PER_BLK+2)];
__shared__ float tmp_buf[(THREADS_PER_BLK+2)*(THREADS_PER_BLK+2)];
if(i==0 && j==0){
printf("ENTERED");
}
if(i<img_width && j< img_height) {
support_id = (THREADS_PER_BLK+2)*threadIdx.y + threadIdx.x;
img_id = img_width * j + i;
support[support_id] = old_img[img_id];
if(threadIdx.x < 2){
n_img_id = j*img_width + (THREADS_PER_BLK + i);
if(n_img_id<img_width * img_height)
support[(THREADS_PER_BLK+2)*threadIdx.y + (THREADS_PER_BLK + threadIdx.x)] = old_img[n_img_id];
}
if(threadIdx.y < 2){
n_img_id = (THREADS_PER_BLK + j)*img_width + i;
if(n_img_id<img_width * img_height)
support[(THREADS_PER_BLK+2)*(THREADS_PER_BLK + threadIdx.y) + (threadIdx.x)] = old_img[n_img_id];
}
if(threadIdx.x < 2 && threadIdx.y<2){
n_img_id = (THREADS_PER_BLK + j)*img_width + (THREADS_PER_BLK + i);
if(n_img_id<img_width * img_height)
support[(THREADS_PER_BLK+2)*(THREADS_PER_BLK + threadIdx.y) + (THREADS_PER_BLK + threadIdx.x)] = old_img[n_img_id];
}
}
__syncthreads();
if(i<img_width && j< img_height) {
float tmp=0.f;
int ii_last = min(i+3, img_width) - i;
for(ii = 0; ii< ii_last; ii++){
tmp += HSOBEL_V[ii] * support[(threadIdx.y)*(THREADS_PER_BLK+2) + (threadIdx.x+ii)];
}
tmp_buf[(threadIdx.y)*(THREADS_PER_BLK+2) + threadIdx.x] = tmp;
}
if(threadIdx.y < 2){
float tmp=0.f;
int ii_last = min(i+3, img_width) - i;
for(ii = 0; ii< ii_last; ii++){
tmp += HSOBEL_V[ii] * support[(threadIdx.y+THREADS_PER_BLK)*(THREADS_PER_BLK+2) + (threadIdx.x+ii)];
}
tmp_buf[(threadIdx.y+THREADS_PER_BLK )*(THREADS_PER_BLK+2) + (threadIdx.x)] = tmp;
}
if(threadIdx.x < 2){
float tmp=0.f;
int ii_last = min(i+3, img_width) - i;
for(ii = 0; ii< ii_last; ii++){
tmp += HSOBEL_V[ii] * support[(threadIdx.y)*(THREADS_PER_BLK+2) + (THREADS_PER_BLK+threadIdx.x+ii)];
}
tmp_buf[(threadIdx.y )*(THREADS_PER_BLK+2) + (THREADS_PER_BLK+threadIdx.x)] = tmp;
}
if(threadIdx.x < 2 && threadIdx.y < 2){
float tmp=0.f;
int ii_last = min(i+3, img_width) - i;
for(ii = 0; ii< ii_last; ii++){
tmp += HSOBEL_V[ii] * support[(THREADS_PER_BLK+threadIdx.y)*(THREADS_PER_BLK+2) + (THREADS_PER_BLK+threadIdx.x+ii)];
}
tmp_buf[(THREADS_PER_BLK+threadIdx.y )*(THREADS_PER_BLK+2) + (THREADS_PER_BLK+threadIdx.x)] = (uint8_t)sqrt(tmp*tmp);
}
__syncthreads();
if(i<img_width && j< img_height) {
float tmp=0.f;
int jj_last = min(j+3, img_height) - j;
for(jj = 0; jj< jj_last; jj++){
tmp += HSOBEL_H[jj] * tmp_buf[(threadIdx.y+jj)*(THREADS_PER_BLK+2) + (threadIdx.x)];
}
//tmp = tmp_buf[(threadIdx.y)*(THREADS_PER_BLK+2) + (threadIdx.x)];
new_img[j*img_width + i] = (uint8_t)sqrt(tmp*tmp);
}
return;
}
void edge_detect_single(uint8_t* &old_img, int width, int height, int block_side, uint8_t* new_img, uint8_t* new_img_device, uint8_t* old_img_device) {
cudaMemcpy(old_img_device, old_img, sizeof(uint8_t) * height * width*CHANNEL_NUM, cudaMemcpyHostToDevice);
dim3 threadsPerBlock(block_side, block_side);
dim3 gridDim((width+threadsPerBlock.x-1)/threadsPerBlock.x, (height+threadsPerBlock.y-1)/threadsPerBlock.y);
single_kernel<<<gridDim, threadsPerBlock>>>(old_img_device, new_img_device, HSOBEL, 3, 3, width, height);
cudaMemcpy(new_img, new_img_device, sizeof(uint8_t) * height * width * CHANNEL_NUM, cudaMemcpyDeviceToHost);
stbi_write_png("edge_single.png", width, height, CHANNEL_NUM, new_img, width*CHANNEL_NUM);
}
void edge_detect_shared(uint8_t* &old_img, int width, int height, int block_side, uint8_t* new_img, uint8_t* new_img_device, uint8_t* old_img_device) {
cudaMemcpy(old_img_device, old_img, sizeof(uint8_t) * height * width*CHANNEL_NUM, cudaMemcpyHostToDevice);
dim3 threadsPerBlock(THREADS_PER_BLK, THREADS_PER_BLK);
dim3 gridDim((width+threadsPerBlock.x-1)/threadsPerBlock.x, (height+threadsPerBlock.y-1)/threadsPerBlock.y);
shared_kernel<<<gridDim, threadsPerBlock>>>(old_img_device, new_img_device, width, height);
cudaMemcpy(new_img, new_img_device, sizeof(uint8_t) * height * width * CHANNEL_NUM, cudaMemcpyDeviceToHost);
//stbi_write_png("edge_shared.png", width, height, CHANNEL_NUM, new_img, width*CHANNEL_NUM);
}
void edge_detect_shared_sep(uint8_t* &old_img, int width, int height, int block_side, uint8_t* new_img, uint8_t* new_img_device, uint8_t* old_img_device) {
cudaMemcpy(old_img_device, old_img, sizeof(uint8_t) * height * width*CHANNEL_NUM, cudaMemcpyHostToDevice);
dim3 threadsPerBlock(THREADS_PER_BLK, THREADS_PER_BLK);
dim3 gridDim((width+threadsPerBlock.x-1)/threadsPerBlock.x, (height+threadsPerBlock.y-1)/threadsPerBlock.y);
shared_sep_kernel<<<gridDim, threadsPerBlock>>>(old_img_device, new_img_device, width, height);
cudaMemcpy(new_img, new_img_device, sizeof(uint8_t) * height * width * CHANNEL_NUM, cudaMemcpyDeviceToHost);
//stbi_write_png("edge_shared_sep.png", width, height, CHANNEL_NUM, new_img, width*CHANNEL_NUM);
}
// eg: ./edge /afs/andrew.cmu.edu/usr12/sbali/private/proj/images/building.jpg 32 <type>
// NOTE shared doesn't support arg block size it is just a place holder here
int main(int argc, char **argv){
const char *img_file = argv[1];
int block_side = atoi(argv[2]);
int width, height, bpp;
uint8_t* old_img_device;
uint8_t* new_img_device;
uint8_t* old_img = stbi_load(img_file, &width, &height, &bpp, CHANNEL_NUM);
uint8_t* new_img = (uint8_t*)malloc(sizeof(uint8_t) * height * width * CHANNEL_NUM);
cudaMalloc(&new_img_device, sizeof(uint8_t) * height * width*CHANNEL_NUM );
cudaMalloc(&old_img_device, sizeof(uint8_t) * height * width*CHANNEL_NUM );
//cudaMalloc(&kernel_device, sizeof(float) * 9);
//cudaMemcpy(kernel_device, HSOBEL, sizeof(float) * 9, cudaMemcpyHostToDevice);
char type = argv[3][0];
double start_time_exc = currentSeconds();
for(int i=0; i<1; i++){
if(type=='n')
edge_detect_single(old_img, width, height, block_side, new_img, new_img_device, old_img_device);
else if(type=='s')
edge_detect_shared(old_img, width, height, block_side, new_img, new_img_device, old_img_device);
else if(type=='t')
edge_detect_shared_sep(old_img, width, height, block_side, new_img, new_img_device, old_img_device);
}
double end_time = currentSeconds();
double duration_exc = end_time - start_time_exc;
fprintf(stdout, "Time Without Startup: %f\n", duration_exc);
return 1;
}
|
6b0d5e86ea69ad3e133943526139fbd61ed894f4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// ###
// ###
// ### Practical Course: GPU Programming in Computer Vision
// ###
// ###
// ### Technical University Munich, Computer Vision Group
// ### Winter Semester 2015/2016, March 15 - April 15
// ###
// ###
#include "helper.h"
#include <iostream>
#include <stdio.h>
using namespace std;
// uncomment to use the camera
// #define CAMERA
// clamp an index to the min and max values specified
int clamp(int idx, int min, int max);
float* gaussian_kernel(int kernel_size, float sigma) {
float *kernel = new float[kernel_size * kernel_size];
float mid = (float)kernel_size/2.f; // coordinate value of the center ok the kernel
float dist_sq;
float norm_sum = 0; // normalization factor
for (int i = 0; i < kernel_size; i++) {
for (int j = 0; j < kernel_size; j++) {
dist_sq = powf((float)i + 0.5 - mid, 2) + powf((float)j + 0.5 - mid, 2);
kernel[i + kernel_size * j] = expf( - dist_sq / (2*powf(sigma, 2)) );
norm_sum += kernel[i + kernel_size * j];
}
}
for (int i = 0; i < kernel_size; i++) {
for (int j = 0; j < kernel_size; j++) {
kernel[i + kernel_size * j] /= norm_sum;
// cout << kernel[i + kernel_size *j] << endl;
}
}
return kernel;
}
void convolution(float *imgIn, float *imgOut, float *kernel, int w, int h, int nc, int ks) {
int img_x, img_y;
// for every channel
for (int c = 0; c < nc; c++) {
// for every pixel in the image
for (int i = 0; i < w; i++){
for (int j = 0; j < h; j++) {
// for every pixel in the kernel
for (int k = 0; k < ks; k++) {
for (int l = 0; l < ks; l++) {
img_x = clamp(i + k - (ks/2 + 1), 0, w-1);
img_y = clamp(j + l - (ks/2 + 1), 0, h-1);
imgOut[i + w*j + w*h*c] += imgIn[img_x + w*img_y + w*h*c] * kernel[k + ks*l];
}
}
}
}
}
}
// imgIn CUDA texture has to be declared before any used
texture <float,2,hipReadModeElementType> texRef_imgIn; // at file scope
// constant memory must have fixed size at compile time
// #define KERNEL_SIZE = 1681 // (2*20 + 1)*(2*20 + 1)
__constant__ float constKernel[1681];
__global__ void gpu_convolution(float *imgIn, float *imgOut, float *kernel, int w, int h, int nc, int ks, bool constant_kernel) {
// indexes for the kernel
int img_x, img_y;
// calculate center pixel corresponding to thread
int x = threadIdx.x + blockDim.x * blockIdx.x;
int y = threadIdx.y + blockDim.y * blockIdx.y;
// for every channel
for (int c = 0; c < nc; ++c) {
// for every pixel in the kernel
for (int k = 0; k < ks; ++k) {
for (int l = 0; l < ks; ++l) {
img_x = min(w-1, max(0, x + k - (ks/2 + 1)));
img_y = min(h-1, max(0, y + l - (ks/2 + 1)));
if (x < w && y < h) {
if (constant_kernel) {
imgOut[x + w*y + w*h*c] += imgIn[img_x + w*img_y + w*h*c] * constKernel[k + ks*l];
} else {
imgOut[x + w*y + w*h*c] += imgIn[img_x + w*img_y + w*h*c] * kernel[k + ks*l];
}
}
}
}
}
}
__global__ void gpu_convolution_sharedmem(float *imgIn, float *imgOut, float *kernel, int w, int h, int nc, int ks, bool constant_kernel) { // with imgIn and kernel in global
// image indexes under a mask
int img_x, img_y;
// calculate main pixel corresponding to thread
int x = threadIdx.x + blockDim.x * blockIdx.x;
int y = threadIdx.y + blockDim.y * blockIdx.y;
// calculate thread indexes in block coordinates
int xblock = threadIdx.x;
int yblock = threadIdx.y;
int lin_threadIdx = xblock + blockDim.x * yblock; // index in a linearized array of indexes
// allocate shared array
extern __shared__ float sh_img[];
//load array values
// calculate size of sh_img again ?????????????????????
int shblock_w = blockDim.x + ks - 1;
int shblock_h = blockDim.y + ks - 1;
int shblock_size = shblock_w * shblock_h;
int shblock_topleft_x = blockDim.x * blockIdx.x - ks/2; // sometimes negative
int shblock_topleft_y = blockDim.y * blockIdx.y - ks/2;
// number of threads in the block
int num_threads = blockDim.x * blockDim.y;
int num_loads = (shblock_size + num_threads - 1) / num_threads;
// shared block coordinates
int x_sh, y_sh;
int idx_sh;
//for every channel
for (int c = 0; c < nc; c++) {
// each thread loads some data
for (int l = 0; l < num_loads; l++) {
idx_sh = lin_threadIdx + l*num_threads;
if (idx_sh < shblock_size) {
// if (c == 0 && blockIdx.x == 0 && blockIdx.y == 0) printf("%d %d %d \n", num_threads, shblock_size, idx_sh);
// if (c == 0 && blockIdx.x == 0 && blockIdx.y == 0) printf("xblock: %d yblock: %d blockDim.y: %d thread: %d \n", xblock, yblock, blockDim.y, lin_threadIdx);
img_x = min(w-1, max(0, shblock_topleft_x + idx_sh % shblock_w));
img_y = min(h-1, max(0, shblock_topleft_y + idx_sh / shblock_w));
sh_img[idx_sh] = imgIn[img_x + img_y*w + c*w*h]; // imgIn in global
}
}
// wait for all to finish copying
__syncthreads();
// for every pixel in the kernel
for (int k = 0; k < ks; ++k) {
for (int l = 0; l < ks; ++l) {
x_sh = xblock + k;
y_sh = yblock + l;
if (x < w && y < h) {
if (constant_kernel) {
imgOut[x + w*y + w*h*c] += sh_img[x_sh + y_sh*shblock_w] * constKernel[k + ks*l];
} else {
imgOut[x + w*y + w*h*c] += sh_img[x_sh + y_sh*shblock_w] * kernel[k + ks*l];
}
}
}
}
// wait for the channel ops to finish
__syncthreads();
}
}
__global__ void gpu_convolution_texturemem(float *imgOut, float *kernel, int w, int h, int nc, int ks, bool constant_kernel) {
// indexes for the kernel
int img_x, img_y;
// calculate center pixel corresponding to thread
int x = threadIdx.x + blockDim.x * blockIdx.x;
int y = threadIdx.y + blockDim.y * blockIdx.y;
// for every channel
for (int c = 0; c < nc; ++c) {
// for every pixel in the kernel
for (int k = 0; k < ks; ++k) {
for (int l = 0; l < ks; ++l) {
img_x = min(w-1, max(0, x + k - (ks/2 + 1)));
img_y = min(h-1, max(0, y + l - (ks/2 + 1)));
if (x < w && y < h) {
if (constant_kernel)
imgOut[x + w*y + w*h*c] += tex2D(texRef_imgIn, img_x + 0.5f, img_y + c*h + 0.5f) * constKernel[k + ks*l]; // imgIn in texture. 0.5 to get the center of the pixel
else
imgOut[x + w*y + w*h*c] += tex2D(texRef_imgIn, img_x + 0.5f, img_y + c*h + 0.5f) * kernel[k + ks*l]; // imgIn in texture. 0.5 to get the center of the pixel
}
}
}
}
}
int main(int argc, char **argv)
{
// Before the GPU can process your kernels, a so called "CUDA context" must be initialized
// This happens on the very first call to a CUDA function, and takes some time (around half a second)
// We will do it right here, so that the run time measurements are accurate
hipDeviceSynchronize(); CUDA_CHECK;
// Reading command line parameters:
// getParam("param", var, argc, argv) looks whether "-param xyz" is specified, and if so stores the value "xyz" in "var"
// If "-param" is not specified, the value of "var" remains unchanged
//
// return value: getParam("param", ...) returns true if "-param" is specified, and false otherwise
#ifdef CAMERA
#else
// input image
string image = "";
bool ret = getParam("i", image, argc, argv);
if (!ret) cerr << "ERROR: no image specified" << endl;
if (argc <= 1) { cout << "Usage: " << argv[0] << " -i <image> [-repeats <repeats>] [-gray]" << endl; return 1; }
#endif
// number of computation repetitions to get a better run time measurement
int repeats = 1;
getParam("repeats", repeats, argc, argv);
cout << "repeats: " << repeats << endl;
// load the input image as grayscale if "-gray" is specifed
bool gray = false;
getParam("gray", gray, argc, argv);
cout << "gray: " << gray << endl;
// ### Define your own parameters here as needed
// load the value for sigma if "-sigma" is specified
float sigma = 15;
getParam("sigma", sigma, argc, argv);
cout << "sigma: " << sigma << " with kernel size of 2*ceil(3*sigma) + 1" << endl;
int kernel_size = 2*ceil(3*sigma) + 1; // directly defined by sigma
// load the kernel into constant memory if "-constant_kernel" is specified
bool constant_kernel = false;
getParam("constant_kernel", constant_kernel, argc, argv);
cout << "constant_kernel: " << constant_kernel << endl;
if (constant_kernel) cout << "warning! constant_kernel only has enough memory for 3*sigma <= 20" << endl;
// load the input image into texture memory if "-texture_imgin" is specified
bool texture_imgin = false;
getParam("texture_imgin", texture_imgin, argc, argv);
cout << "texture_imgin: " << texture_imgin << endl;
// load the input image into texture memory if "-texture_imgin" is specified
bool shared_imgin = false;
getParam("shared_imgin", shared_imgin, argc, argv);
cout << "shared_imgin: " << shared_imgin << endl;
cout << "--------------" << endl; // save our eyes
// Init camera / Load input image
#ifdef CAMERA
// Init camera
cv::VideoCapture camera(0);
if(!camera.isOpened()) { cerr << "ERROR: Could not open camera" << endl; return 1; }
int camW = 640;
int camH = 480;
camera.set(CV_CAP_PROP_FRAME_WIDTH,camW);
camera.set(CV_CAP_PROP_FRAME_HEIGHT,camH);
// read in first frame to get the dimensions
cv::Mat mIn;
camera >> mIn;
#else
// Load the input image using opencv (load as grayscale if "gray==true", otherwise as is (may be color or grayscale))
cv::Mat mIn = cv::imread(image.c_str(), (gray? CV_LOAD_IMAGE_GRAYSCALE : -1));
// check
if (mIn.data == NULL) { cerr << "ERROR: Could not load image " << image << endl; return 1; }
#endif
// convert to float representation (opencv loads image values as single bytes by default)
mIn.convertTo(mIn,CV_32F);
// convert range of each channel to [0,1] (opencv default is [0,255])
mIn /= 255.f;
// get image dimensions
int w = mIn.cols; // width
int h = mIn.rows; // height
int nc = mIn.channels(); // number of channels
cout << "image: " << w << " x " << h << endl;
// Set the output image format
// ###
// ###
// ### TODO: Change the output image format as needed
// ###
// ###
cv::Mat mOut(h,w,mIn.type()); // mOut will have the same number of channels as the input image, nc layers
//cv::Mat mOut(h,w,CV_32FC3); // mOut will be a color image, 3 layers
//cv::Mat mOut(h,w,CV_32FC1); // mOut will be a grayscale image, 1 layer
// ### Define your own output images here as needed
// Set the OpenCV kernel display image
cv::Mat mKer(kernel_size, kernel_size, CV_32FC1);
// GPU Output image
cv::Mat mOutGPU(h,w,mIn.type()); // mOut will have the same number of channels as the input image, nc layers
// Allocate arrays
// input/output image width: w
// input/output image height: h
// input image number of channels: nc
// output image number of channels: mOut.channels(), as defined above (nc, 3, or 1)
// allocate raw input image array
float *imgIn = new float[(size_t)w*h*nc];
// allocate raw output array (the computation result will be stored in this array, then later converted to mOut for displaying)
float *imgOut = new float[(size_t)w*h*mOut.channels()];
// allocate raw output array for the GPU
float *imgOutGPU = new float[(size_t)w*h*mOut.channels()];
// For camera mode: Make a loop to read in camera frames
#ifdef CAMERA
// Read a camera image frame every 30 milliseconds:
// cv::waitKey(30) waits 30 milliseconds for a keyboard input,
// returns a value <0 if no key is pressed during this time, returns immediately with a value >=0 if a key is pressed
while (cv::waitKey(30) < 0)
{
// Get camera image
camera >> mIn;
// convert to float representation (opencv loads image values as single bytes by default)
mIn.convertTo(mIn,CV_32F);
// convert range of each channel to [0,1] (opencv default is [0,255])
mIn /= 255.f;
#endif
// Init raw input image array
// opencv images are interleaved: rgb rgb rgb... (actually bgr bgr bgr...)
// But for CUDA it's better to work with layered images: rrr... ggg... bbb...
// So we will convert as necessary, using interleaved "cv::Mat" for loading/saving/displaying, and layered "float*" for CUDA computations
convert_mat_to_layered (imgIn, mIn);
// define kernel through CPU function
float *kernel = gaussian_kernel(kernel_size, sigma);
#ifndef CAMERA
// CPU time
Timer timer; timer.start();
// ###
// ###
convolution(imgIn, imgOut, kernel, w, h, nc, kernel_size);
// cout << "-----------" << endl;
// for (int i = 0; i < kernel_size; i++) {
// for (int j = 0; j < kernel_size; j++) {
// cout << kernel[i + kernel_size *j] << endl;
// }
// }
// ###
// ###
timer.end(); float t = timer.get(); // elapsed time in seconds
cout << "CPU time: " << t*1000 << " ms" << endl;
#endif
// GPU time
Timer timerg; timerg.start();
// ###
// ###
// initialize device memory
float *d_kernel = NULL;
float *d_imgIn = NULL;
float *d_imgOut = NULL;
hipMalloc( &d_kernel, kernel_size*kernel_size*sizeof(float) ); CUDA_CHECK;
hipMalloc( &d_imgIn, w*h*nc*sizeof(float) ); CUDA_CHECK;
hipMalloc( &d_imgOut, w*h*nc*sizeof(float) ); CUDA_CHECK;
// fill device imgOut with 0s to be able to add values directly to it
hipMemset( d_imgOut, 0, w*h*nc*sizeof(float) ); CUDA_CHECK;
// copy imgIn to device global memory
hipMemcpy( d_imgIn, imgIn, w*h*nc*sizeof(float), hipMemcpyHostToDevice ); CUDA_CHECK;
// bind imgIn in global memory to texture memory
if (texture_imgin) {
texRef_imgIn.addressMode[0] = hipAddressModeClamp;
texRef_imgIn.addressMode[1] = hipAddressModeClamp;
texRef_imgIn.filterMode = hipFilterModeLinear; // linear interpolation
texRef_imgIn.normalized = false; // Set whether coordinates are normalized to [0,1)
hipChannelFormatDesc desc = hipCreateChannelDesc<float>(); // number of bits for each texture channel
hipBindTexture2D(NULL, &texRef_imgIn, d_imgIn, &desc, w, h*nc, w*sizeof(d_imgIn[0]));
}
// put kernel in some device memory
if (constant_kernel) {
hipMemcpyToSymbol (constKernel, kernel, kernel_size*kernel_size*sizeof(float)); CUDA_CHECK; // kernel in constant
} else {
cout << "here!" << endl;
hipMemcpy( d_kernel, kernel, kernel_size*kernel_size*sizeof(float), hipMemcpyHostToDevice ); CUDA_CHECK; // kernel in global
}
// launch kernel
dim3 block = dim3(32,8,1);
cout << "Blocksize: " << block.x << "x" << block.y << endl;
dim3 grid = dim3( (w + block.x -1)/block.x, (h + block.y -1)/block.y, 1);
size_t smBytes = (block.x + kernel_size - 1) * (block.y + kernel_size - 1) * sizeof(float); // only per channel. Take advantage through loop
cout << "Shared memory bytes: " << smBytes << endl;
// WARNING
if (texture_imgin && shared_imgin)
cout << "!!! Enabling both texture and shared options results in running with texture" << endl;
if (texture_imgin) {
hipLaunchKernelGGL(( gpu_convolution_texturemem) , dim3(grid),dim3(block), 0, 0, d_imgOut, d_kernel, w, h, nc, kernel_size, constant_kernel); CUDA_CHECK; // with imgIn and kernel in global
} else if (shared_imgin) { // shared memory
hipLaunchKernelGGL(( gpu_convolution_sharedmem) , dim3(grid),dim3(block),smBytes, 0, d_imgIn, d_imgOut, d_kernel, w, h, nc, kernel_size, constant_kernel); CUDA_CHECK; // with imgIn and kernel in global
} else {
hipLaunchKernelGGL(( gpu_convolution) , dim3(grid),dim3(block),smBytes, 0, d_imgIn, d_imgOut, d_kernel, w, h, nc, kernel_size, constant_kernel); CUDA_CHECK; // with imgIn and kernel in global
}
// copy to host
hipMemcpy( imgOutGPU, d_imgOut, w*h*nc*sizeof(float), hipMemcpyDeviceToHost ); CUDA_CHECK;
// ###
// ###
timerg.end(); float tg = timerg.get(); // elapsed time in seconds
#ifndef CAMERA
cout << "GPU time: " << tg*1000 << " ms" << endl;
#endif
// free memory
hipFree( d_kernel );
hipFree( d_imgIn );
hipFree( d_imgOut );
// show input image
showImage("Input", mIn, 100, 100); // show at position (x_from_left=100,y_from_above=100)
#ifndef CAMERA
// show output image: first convert to interleaved opencv format from the layered raw array
convert_layered_to_mat(mOut, imgOut);
showImage("CPU Output", mOut, 100+5+kernel_size+5+w+40, 100);
#endif
// show GPU output image: first convert to interleaved opencv format from the layered raw array
convert_layered_to_mat(mOutGPU, imgOutGPU);
showImage("GPU Output", mOutGPU, 100+5+kernel_size+5+w+40+w+40, 100);
// ### Display your own output images here as needed
// show kernel image
convert_layered_to_mat(mKer, kernel);
// double min, max;
// cv::minMaxLoc(mKer, &min, &max);
showImage("Kernel", mKer/kernel[kernel_size*kernel_size/2], 100+w+5, 100); // mKer is upscaled with its largest value for visualization
#ifdef CAMERA
// end of camera loop
}
#else
// wait for key inputs
cv::waitKey(0);
#endif
// save input and result
cv::imwrite("image_input.png",mIn*255.f); // "imwrite" assumes channel range [0,255]
cv::imwrite("image_result.png",mOut*255.f);
// free allocated arrays
delete[] imgIn;
delete[] imgOut;
// close all opencv windows
cvDestroyAllWindows();
return 0;
}
// clamp an index to the min and max values specified
int clamp(int idx, int minval, int maxval) {
// int clamped_idx = idx;
// if (idx < min) clamped_idx = min;
// else if (idx > max) clamped_idx = max;
// return clamped_idx;
return min(maxval, max(idx, minval));
}
| 6b0d5e86ea69ad3e133943526139fbd61ed894f4.cu | // ###
// ###
// ### Practical Course: GPU Programming in Computer Vision
// ###
// ###
// ### Technical University Munich, Computer Vision Group
// ### Winter Semester 2015/2016, March 15 - April 15
// ###
// ###
#include "helper.h"
#include <iostream>
#include <stdio.h>
using namespace std;
// uncomment to use the camera
// #define CAMERA
// clamp an index to the min and max values specified
int clamp(int idx, int min, int max);
float* gaussian_kernel(int kernel_size, float sigma) {
float *kernel = new float[kernel_size * kernel_size];
float mid = (float)kernel_size/2.f; // coordinate value of the center ok the kernel
float dist_sq;
float norm_sum = 0; // normalization factor
for (int i = 0; i < kernel_size; i++) {
for (int j = 0; j < kernel_size; j++) {
dist_sq = powf((float)i + 0.5 - mid, 2) + powf((float)j + 0.5 - mid, 2);
kernel[i + kernel_size * j] = expf( - dist_sq / (2*powf(sigma, 2)) );
norm_sum += kernel[i + kernel_size * j];
}
}
for (int i = 0; i < kernel_size; i++) {
for (int j = 0; j < kernel_size; j++) {
kernel[i + kernel_size * j] /= norm_sum;
// cout << kernel[i + kernel_size *j] << endl;
}
}
return kernel;
}
void convolution(float *imgIn, float *imgOut, float *kernel, int w, int h, int nc, int ks) {
int img_x, img_y;
// for every channel
for (int c = 0; c < nc; c++) {
// for every pixel in the image
for (int i = 0; i < w; i++){
for (int j = 0; j < h; j++) {
// for every pixel in the kernel
for (int k = 0; k < ks; k++) {
for (int l = 0; l < ks; l++) {
img_x = clamp(i + k - (ks/2 + 1), 0, w-1);
img_y = clamp(j + l - (ks/2 + 1), 0, h-1);
imgOut[i + w*j + w*h*c] += imgIn[img_x + w*img_y + w*h*c] * kernel[k + ks*l];
}
}
}
}
}
}
// imgIn CUDA texture has to be declared before any used
texture <float,2,cudaReadModeElementType> texRef_imgIn; // at file scope
// constant memory must have fixed size at compile time
// #define KERNEL_SIZE = 1681 // (2*20 + 1)*(2*20 + 1)
__constant__ float constKernel[1681];
__global__ void gpu_convolution(float *imgIn, float *imgOut, float *kernel, int w, int h, int nc, int ks, bool constant_kernel) {
// indexes for the kernel
int img_x, img_y;
// calculate center pixel corresponding to thread
int x = threadIdx.x + blockDim.x * blockIdx.x;
int y = threadIdx.y + blockDim.y * blockIdx.y;
// for every channel
for (int c = 0; c < nc; ++c) {
// for every pixel in the kernel
for (int k = 0; k < ks; ++k) {
for (int l = 0; l < ks; ++l) {
img_x = min(w-1, max(0, x + k - (ks/2 + 1)));
img_y = min(h-1, max(0, y + l - (ks/2 + 1)));
if (x < w && y < h) {
if (constant_kernel) {
imgOut[x + w*y + w*h*c] += imgIn[img_x + w*img_y + w*h*c] * constKernel[k + ks*l];
} else {
imgOut[x + w*y + w*h*c] += imgIn[img_x + w*img_y + w*h*c] * kernel[k + ks*l];
}
}
}
}
}
}
__global__ void gpu_convolution_sharedmem(float *imgIn, float *imgOut, float *kernel, int w, int h, int nc, int ks, bool constant_kernel) { // with imgIn and kernel in global
// image indexes under a mask
int img_x, img_y;
// calculate main pixel corresponding to thread
int x = threadIdx.x + blockDim.x * blockIdx.x;
int y = threadIdx.y + blockDim.y * blockIdx.y;
// calculate thread indexes in block coordinates
int xblock = threadIdx.x;
int yblock = threadIdx.y;
int lin_threadIdx = xblock + blockDim.x * yblock; // index in a linearized array of indexes
// allocate shared array
extern __shared__ float sh_img[];
//load array values
// calculate size of sh_img again ?????????????????????
int shblock_w = blockDim.x + ks - 1;
int shblock_h = blockDim.y + ks - 1;
int shblock_size = shblock_w * shblock_h;
int shblock_topleft_x = blockDim.x * blockIdx.x - ks/2; // sometimes negative
int shblock_topleft_y = blockDim.y * blockIdx.y - ks/2;
// number of threads in the block
int num_threads = blockDim.x * blockDim.y;
int num_loads = (shblock_size + num_threads - 1) / num_threads;
// shared block coordinates
int x_sh, y_sh;
int idx_sh;
//for every channel
for (int c = 0; c < nc; c++) {
// each thread loads some data
for (int l = 0; l < num_loads; l++) {
idx_sh = lin_threadIdx + l*num_threads;
if (idx_sh < shblock_size) {
// if (c == 0 && blockIdx.x == 0 && blockIdx.y == 0) printf("%d %d %d \n", num_threads, shblock_size, idx_sh);
// if (c == 0 && blockIdx.x == 0 && blockIdx.y == 0) printf("xblock: %d yblock: %d blockDim.y: %d thread: %d \n", xblock, yblock, blockDim.y, lin_threadIdx);
img_x = min(w-1, max(0, shblock_topleft_x + idx_sh % shblock_w));
img_y = min(h-1, max(0, shblock_topleft_y + idx_sh / shblock_w));
sh_img[idx_sh] = imgIn[img_x + img_y*w + c*w*h]; // imgIn in global
}
}
// wait for all to finish copying
__syncthreads();
// for every pixel in the kernel
for (int k = 0; k < ks; ++k) {
for (int l = 0; l < ks; ++l) {
x_sh = xblock + k;
y_sh = yblock + l;
if (x < w && y < h) {
if (constant_kernel) {
imgOut[x + w*y + w*h*c] += sh_img[x_sh + y_sh*shblock_w] * constKernel[k + ks*l];
} else {
imgOut[x + w*y + w*h*c] += sh_img[x_sh + y_sh*shblock_w] * kernel[k + ks*l];
}
}
}
}
// wait for the channel ops to finish
__syncthreads();
}
}
__global__ void gpu_convolution_texturemem(float *imgOut, float *kernel, int w, int h, int nc, int ks, bool constant_kernel) {
// indexes for the kernel
int img_x, img_y;
// calculate center pixel corresponding to thread
int x = threadIdx.x + blockDim.x * blockIdx.x;
int y = threadIdx.y + blockDim.y * blockIdx.y;
// for every channel
for (int c = 0; c < nc; ++c) {
// for every pixel in the kernel
for (int k = 0; k < ks; ++k) {
for (int l = 0; l < ks; ++l) {
img_x = min(w-1, max(0, x + k - (ks/2 + 1)));
img_y = min(h-1, max(0, y + l - (ks/2 + 1)));
if (x < w && y < h) {
if (constant_kernel)
imgOut[x + w*y + w*h*c] += tex2D(texRef_imgIn, img_x + 0.5f, img_y + c*h + 0.5f) * constKernel[k + ks*l]; // imgIn in texture. 0.5 to get the center of the pixel
else
imgOut[x + w*y + w*h*c] += tex2D(texRef_imgIn, img_x + 0.5f, img_y + c*h + 0.5f) * kernel[k + ks*l]; // imgIn in texture. 0.5 to get the center of the pixel
}
}
}
}
}
int main(int argc, char **argv)
{
// Before the GPU can process your kernels, a so called "CUDA context" must be initialized
// This happens on the very first call to a CUDA function, and takes some time (around half a second)
// We will do it right here, so that the run time measurements are accurate
cudaDeviceSynchronize(); CUDA_CHECK;
// Reading command line parameters:
// getParam("param", var, argc, argv) looks whether "-param xyz" is specified, and if so stores the value "xyz" in "var"
// If "-param" is not specified, the value of "var" remains unchanged
//
// return value: getParam("param", ...) returns true if "-param" is specified, and false otherwise
#ifdef CAMERA
#else
// input image
string image = "";
bool ret = getParam("i", image, argc, argv);
if (!ret) cerr << "ERROR: no image specified" << endl;
if (argc <= 1) { cout << "Usage: " << argv[0] << " -i <image> [-repeats <repeats>] [-gray]" << endl; return 1; }
#endif
// number of computation repetitions to get a better run time measurement
int repeats = 1;
getParam("repeats", repeats, argc, argv);
cout << "repeats: " << repeats << endl;
// load the input image as grayscale if "-gray" is specifed
bool gray = false;
getParam("gray", gray, argc, argv);
cout << "gray: " << gray << endl;
// ### Define your own parameters here as needed
// load the value for sigma if "-sigma" is specified
float sigma = 15;
getParam("sigma", sigma, argc, argv);
cout << "sigma: " << sigma << " with kernel size of 2*ceil(3*sigma) + 1" << endl;
int kernel_size = 2*ceil(3*sigma) + 1; // directly defined by sigma
// load the kernel into constant memory if "-constant_kernel" is specified
bool constant_kernel = false;
getParam("constant_kernel", constant_kernel, argc, argv);
cout << "constant_kernel: " << constant_kernel << endl;
if (constant_kernel) cout << "warning! constant_kernel only has enough memory for 3*sigma <= 20" << endl;
// load the input image into texture memory if "-texture_imgin" is specified
bool texture_imgin = false;
getParam("texture_imgin", texture_imgin, argc, argv);
cout << "texture_imgin: " << texture_imgin << endl;
// load the input image into texture memory if "-texture_imgin" is specified
bool shared_imgin = false;
getParam("shared_imgin", shared_imgin, argc, argv);
cout << "shared_imgin: " << shared_imgin << endl;
cout << "--------------" << endl; // save our eyes
// Init camera / Load input image
#ifdef CAMERA
// Init camera
cv::VideoCapture camera(0);
if(!camera.isOpened()) { cerr << "ERROR: Could not open camera" << endl; return 1; }
int camW = 640;
int camH = 480;
camera.set(CV_CAP_PROP_FRAME_WIDTH,camW);
camera.set(CV_CAP_PROP_FRAME_HEIGHT,camH);
// read in first frame to get the dimensions
cv::Mat mIn;
camera >> mIn;
#else
// Load the input image using opencv (load as grayscale if "gray==true", otherwise as is (may be color or grayscale))
cv::Mat mIn = cv::imread(image.c_str(), (gray? CV_LOAD_IMAGE_GRAYSCALE : -1));
// check
if (mIn.data == NULL) { cerr << "ERROR: Could not load image " << image << endl; return 1; }
#endif
// convert to float representation (opencv loads image values as single bytes by default)
mIn.convertTo(mIn,CV_32F);
// convert range of each channel to [0,1] (opencv default is [0,255])
mIn /= 255.f;
// get image dimensions
int w = mIn.cols; // width
int h = mIn.rows; // height
int nc = mIn.channels(); // number of channels
cout << "image: " << w << " x " << h << endl;
// Set the output image format
// ###
// ###
// ### TODO: Change the output image format as needed
// ###
// ###
cv::Mat mOut(h,w,mIn.type()); // mOut will have the same number of channels as the input image, nc layers
//cv::Mat mOut(h,w,CV_32FC3); // mOut will be a color image, 3 layers
//cv::Mat mOut(h,w,CV_32FC1); // mOut will be a grayscale image, 1 layer
// ### Define your own output images here as needed
// Set the OpenCV kernel display image
cv::Mat mKer(kernel_size, kernel_size, CV_32FC1);
// GPU Output image
cv::Mat mOutGPU(h,w,mIn.type()); // mOut will have the same number of channels as the input image, nc layers
// Allocate arrays
// input/output image width: w
// input/output image height: h
// input image number of channels: nc
// output image number of channels: mOut.channels(), as defined above (nc, 3, or 1)
// allocate raw input image array
float *imgIn = new float[(size_t)w*h*nc];
// allocate raw output array (the computation result will be stored in this array, then later converted to mOut for displaying)
float *imgOut = new float[(size_t)w*h*mOut.channels()];
// allocate raw output array for the GPU
float *imgOutGPU = new float[(size_t)w*h*mOut.channels()];
// For camera mode: Make a loop to read in camera frames
#ifdef CAMERA
// Read a camera image frame every 30 milliseconds:
// cv::waitKey(30) waits 30 milliseconds for a keyboard input,
// returns a value <0 if no key is pressed during this time, returns immediately with a value >=0 if a key is pressed
while (cv::waitKey(30) < 0)
{
// Get camera image
camera >> mIn;
// convert to float representation (opencv loads image values as single bytes by default)
mIn.convertTo(mIn,CV_32F);
// convert range of each channel to [0,1] (opencv default is [0,255])
mIn /= 255.f;
#endif
// Init raw input image array
// opencv images are interleaved: rgb rgb rgb... (actually bgr bgr bgr...)
// But for CUDA it's better to work with layered images: rrr... ggg... bbb...
// So we will convert as necessary, using interleaved "cv::Mat" for loading/saving/displaying, and layered "float*" for CUDA computations
convert_mat_to_layered (imgIn, mIn);
// define kernel through CPU function
float *kernel = gaussian_kernel(kernel_size, sigma);
#ifndef CAMERA
// CPU time
Timer timer; timer.start();
// ###
// ###
convolution(imgIn, imgOut, kernel, w, h, nc, kernel_size);
// cout << "-----------" << endl;
// for (int i = 0; i < kernel_size; i++) {
// for (int j = 0; j < kernel_size; j++) {
// cout << kernel[i + kernel_size *j] << endl;
// }
// }
// ###
// ###
timer.end(); float t = timer.get(); // elapsed time in seconds
cout << "CPU time: " << t*1000 << " ms" << endl;
#endif
// GPU time
Timer timerg; timerg.start();
// ###
// ###
// initialize device memory
float *d_kernel = NULL;
float *d_imgIn = NULL;
float *d_imgOut = NULL;
cudaMalloc( &d_kernel, kernel_size*kernel_size*sizeof(float) ); CUDA_CHECK;
cudaMalloc( &d_imgIn, w*h*nc*sizeof(float) ); CUDA_CHECK;
cudaMalloc( &d_imgOut, w*h*nc*sizeof(float) ); CUDA_CHECK;
// fill device imgOut with 0s to be able to add values directly to it
cudaMemset( d_imgOut, 0, w*h*nc*sizeof(float) ); CUDA_CHECK;
// copy imgIn to device global memory
cudaMemcpy( d_imgIn, imgIn, w*h*nc*sizeof(float), cudaMemcpyHostToDevice ); CUDA_CHECK;
// bind imgIn in global memory to texture memory
if (texture_imgin) {
texRef_imgIn.addressMode[0] = cudaAddressModeClamp;
texRef_imgIn.addressMode[1] = cudaAddressModeClamp;
texRef_imgIn.filterMode = cudaFilterModeLinear; // linear interpolation
texRef_imgIn.normalized = false; // Set whether coordinates are normalized to [0,1)
cudaChannelFormatDesc desc = cudaCreateChannelDesc<float>(); // number of bits for each texture channel
cudaBindTexture2D(NULL, &texRef_imgIn, d_imgIn, &desc, w, h*nc, w*sizeof(d_imgIn[0]));
}
// put kernel in some device memory
if (constant_kernel) {
cudaMemcpyToSymbol (constKernel, kernel, kernel_size*kernel_size*sizeof(float)); CUDA_CHECK; // kernel in constant
} else {
cout << "here!" << endl;
cudaMemcpy( d_kernel, kernel, kernel_size*kernel_size*sizeof(float), cudaMemcpyHostToDevice ); CUDA_CHECK; // kernel in global
}
// launch kernel
dim3 block = dim3(32,8,1);
cout << "Blocksize: " << block.x << "x" << block.y << endl;
dim3 grid = dim3( (w + block.x -1)/block.x, (h + block.y -1)/block.y, 1);
size_t smBytes = (block.x + kernel_size - 1) * (block.y + kernel_size - 1) * sizeof(float); // only per channel. Take advantage through loop
cout << "Shared memory bytes: " << smBytes << endl;
// WARNING
if (texture_imgin && shared_imgin)
cout << "!!! Enabling both texture and shared options results in running with texture" << endl;
if (texture_imgin) {
gpu_convolution_texturemem <<<grid,block>>> (d_imgOut, d_kernel, w, h, nc, kernel_size, constant_kernel); CUDA_CHECK; // with imgIn and kernel in global
} else if (shared_imgin) { // shared memory
gpu_convolution_sharedmem <<<grid,block,smBytes>>> (d_imgIn, d_imgOut, d_kernel, w, h, nc, kernel_size, constant_kernel); CUDA_CHECK; // with imgIn and kernel in global
} else {
gpu_convolution <<<grid,block,smBytes>>> (d_imgIn, d_imgOut, d_kernel, w, h, nc, kernel_size, constant_kernel); CUDA_CHECK; // with imgIn and kernel in global
}
// copy to host
cudaMemcpy( imgOutGPU, d_imgOut, w*h*nc*sizeof(float), cudaMemcpyDeviceToHost ); CUDA_CHECK;
// ###
// ###
timerg.end(); float tg = timerg.get(); // elapsed time in seconds
#ifndef CAMERA
cout << "GPU time: " << tg*1000 << " ms" << endl;
#endif
// free memory
cudaFree( d_kernel );
cudaFree( d_imgIn );
cudaFree( d_imgOut );
// show input image
showImage("Input", mIn, 100, 100); // show at position (x_from_left=100,y_from_above=100)
#ifndef CAMERA
// show output image: first convert to interleaved opencv format from the layered raw array
convert_layered_to_mat(mOut, imgOut);
showImage("CPU Output", mOut, 100+5+kernel_size+5+w+40, 100);
#endif
// show GPU output image: first convert to interleaved opencv format from the layered raw array
convert_layered_to_mat(mOutGPU, imgOutGPU);
showImage("GPU Output", mOutGPU, 100+5+kernel_size+5+w+40+w+40, 100);
// ### Display your own output images here as needed
// show kernel image
convert_layered_to_mat(mKer, kernel);
// double min, max;
// cv::minMaxLoc(mKer, &min, &max);
showImage("Kernel", mKer/kernel[kernel_size*kernel_size/2], 100+w+5, 100); // mKer is upscaled with its largest value for visualization
#ifdef CAMERA
// end of camera loop
}
#else
// wait for key inputs
cv::waitKey(0);
#endif
// save input and result
cv::imwrite("image_input.png",mIn*255.f); // "imwrite" assumes channel range [0,255]
cv::imwrite("image_result.png",mOut*255.f);
// free allocated arrays
delete[] imgIn;
delete[] imgOut;
// close all opencv windows
cvDestroyAllWindows();
return 0;
}
// clamp an index to the min and max values specified
int clamp(int idx, int minval, int maxval) {
// int clamped_idx = idx;
// if (idx < min) clamped_idx = min;
// else if (idx > max) clamped_idx = max;
// return clamped_idx;
return min(maxval, max(idx, minval));
}
|
ad38a934084fa8a6f368b783889e750243da1b6a.hip | // !!! This is a file automatically generated by hipify!!!
#define CUB_STDERR // print CUDA runtime errors to console
#include <stdio.h>
#include <hipcub/hipcub.hpp>
#include <hipcub/hipcub.hpp>
#include "cub/util_debug.cuh"
using namespace cub;
CachingDeviceAllocator g_allocator(true); // Caching allocator for device memory
int main() {
const size_t num_items = 10;
// Set up host arrays
int h_in[num_items] = { 2, 3, -1, 0, 3, 6, 7, 2, -2, 0 };
int sum = 0;
for (unsigned int i = 0; i < num_items; i++)
sum += h_in[i];
// Set up device arrays
int* d_in = NULL;
CubDebugExit(g_allocator.DeviceAllocate((void**)& d_in, sizeof(int) * num_items));
// Initialize device input
CubDebugExit(hipMemcpy(d_in, h_in, sizeof(int) * num_items, hipMemcpyHostToDevice));
// Setup device output array
int* d_sum = NULL;
CubDebugExit(g_allocator.DeviceAllocate((void**)& d_sum, sizeof(int) * 1));
// Request and allocate temporary storage
void* d_temp_storage = NULL;
size_t temp_storage_bytes = 0;
CubDebugExit(DeviceReduce::Sum(d_temp_storage, temp_storage_bytes, d_in, d_sum, num_items));
CubDebugExit(g_allocator.DeviceAllocate(&d_temp_storage, temp_storage_bytes));
// Do the actual reduce operation
CubDebugExit(DeviceReduce::Sum(d_temp_storage, temp_storage_bytes, d_in, d_sum, num_items));
int gpu_sum;
CubDebugExit(hipMemcpy(&gpu_sum, d_sum, sizeof(int) * 1, hipMemcpyDeviceToHost));
// Check for correctness
printf("\t%s\n", (gpu_sum == sum ? "Test passed." : "Test falied."));
printf("\tSum is: %d\n", gpu_sum);
// Cleanup
if (d_in) CubDebugExit(g_allocator.DeviceFree(d_in));
if (d_sum) CubDebugExit(g_allocator.DeviceFree(d_sum));
if (d_temp_storage) CubDebugExit(g_allocator.DeviceFree(d_temp_storage));
return 0;
}
| ad38a934084fa8a6f368b783889e750243da1b6a.cu | #define CUB_STDERR // print CUDA runtime errors to console
#include <stdio.h>
#include <cub/util_allocator.cuh>
#include <cub/device/device_reduce.cuh>
#include "cub/util_debug.cuh"
using namespace cub;
CachingDeviceAllocator g_allocator(true); // Caching allocator for device memory
int main() {
const size_t num_items = 10;
// Set up host arrays
int h_in[num_items] = { 2, 3, -1, 0, 3, 6, 7, 2, -2, 0 };
int sum = 0;
for (unsigned int i = 0; i < num_items; i++)
sum += h_in[i];
// Set up device arrays
int* d_in = NULL;
CubDebugExit(g_allocator.DeviceAllocate((void**)& d_in, sizeof(int) * num_items));
// Initialize device input
CubDebugExit(cudaMemcpy(d_in, h_in, sizeof(int) * num_items, cudaMemcpyHostToDevice));
// Setup device output array
int* d_sum = NULL;
CubDebugExit(g_allocator.DeviceAllocate((void**)& d_sum, sizeof(int) * 1));
// Request and allocate temporary storage
void* d_temp_storage = NULL;
size_t temp_storage_bytes = 0;
CubDebugExit(DeviceReduce::Sum(d_temp_storage, temp_storage_bytes, d_in, d_sum, num_items));
CubDebugExit(g_allocator.DeviceAllocate(&d_temp_storage, temp_storage_bytes));
// Do the actual reduce operation
CubDebugExit(DeviceReduce::Sum(d_temp_storage, temp_storage_bytes, d_in, d_sum, num_items));
int gpu_sum;
CubDebugExit(cudaMemcpy(&gpu_sum, d_sum, sizeof(int) * 1, cudaMemcpyDeviceToHost));
// Check for correctness
printf("\t%s\n", (gpu_sum == sum ? "Test passed." : "Test falied."));
printf("\tSum is: %d\n", gpu_sum);
// Cleanup
if (d_in) CubDebugExit(g_allocator.DeviceFree(d_in));
if (d_sum) CubDebugExit(g_allocator.DeviceFree(d_sum));
if (d_temp_storage) CubDebugExit(g_allocator.DeviceFree(d_temp_storage));
return 0;
}
|
ssymmetrize.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.0.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date February 2016
@generated from magmablas/zsymmetrize.cu normal z -> s, Tue Feb 9 16:05:33 2016
@author Mark Gates
*/
#include "magma_internal.h"
#define NB 64
/*
Matrix is m x m, and is divided into block rows, each NB x m.
Each block has NB threads.
Each thread copies one row, iterating across all columns below diagonal.
The bottom block of rows may be partially outside the matrix;
if so, rows outside the matrix (i >= m) are disabled.
*/
__global__ void
ssymmetrize_lower( int m, float *dA, int ldda )
{
// dA iterates across row i and dAT iterates down column i.
int i = blockIdx.x*NB + threadIdx.x;
float *dAT = dA;
if ( i < m ) {
dA += i;
dAT += i*ldda;
float *dAend = dA + i*ldda;
while( dA < dAend ) {
*dAT = MAGMA_S_CONJ(*dA); // upper := lower
dA += ldda;
dAT += 1;
}
}
}
// only difference with _lower version is direction dA=dAT instead of dAT=dA.
__global__ void
ssymmetrize_upper( int m, float *dA, int ldda )
{
// dA iterates across row i and dAT iterates down column i.
int i = blockIdx.x*NB + threadIdx.x;
float *dAT = dA;
if ( i < m ) {
dA += i;
dAT += i*ldda;
float *dAend = dA + i*ldda;
while( dA < dAend ) {
*dA = MAGMA_S_CONJ(*dAT); // lower := upper
dA += ldda;
dAT += 1;
}
}
}
/**
Purpose
-------
SSYMMETRIZE copies lower triangle to upper triangle, or vice-versa,
to make dA a general representation of a symmetric matrix.
Arguments
---------
@param[in]
uplo magma_uplo_t
Specifies the part of the matrix dA that is valid on input.
- = MagmaUpper: Upper triangular part
- = MagmaLower: Lower triangular part
@param[in]
m INTEGER
The number of rows of the matrix dA. M >= 0.
@param[in,out]
dA REAL array, dimension (LDDA,N)
The m by m matrix dA.
@param[in]
ldda INTEGER
The leading dimension of the array dA. LDDA >= max(1,M).
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_saux2
********************************************************************/
extern "C" void
magmablas_ssymmetrize_q(
magma_uplo_t uplo, magma_int_t m,
magmaFloat_ptr dA, magma_int_t ldda,
magma_queue_t queue )
{
magma_int_t info = 0;
if ( uplo != MagmaLower && uplo != MagmaUpper )
info = -1;
else if ( m < 0 )
info = -2;
else if ( ldda < max(1,m) )
info = -4;
if ( info != 0 ) {
magma_xerbla( __func__, -(info) );
return;
}
if ( m == 0 )
return;
dim3 threads( NB );
dim3 grid( magma_ceildiv( m, NB ) );
if ( uplo == MagmaUpper ) {
hipLaunchKernelGGL(( ssymmetrize_upper), dim3(grid), dim3(threads), 0, queue->cuda_stream() , m, dA, ldda );
}
else {
hipLaunchKernelGGL(( ssymmetrize_lower), dim3(grid), dim3(threads), 0, queue->cuda_stream() , m, dA, ldda );
}
}
| ssymmetrize.cu | /*
-- MAGMA (version 2.0.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date February 2016
@generated from magmablas/zsymmetrize.cu normal z -> s, Tue Feb 9 16:05:33 2016
@author Mark Gates
*/
#include "magma_internal.h"
#define NB 64
/*
Matrix is m x m, and is divided into block rows, each NB x m.
Each block has NB threads.
Each thread copies one row, iterating across all columns below diagonal.
The bottom block of rows may be partially outside the matrix;
if so, rows outside the matrix (i >= m) are disabled.
*/
__global__ void
ssymmetrize_lower( int m, float *dA, int ldda )
{
// dA iterates across row i and dAT iterates down column i.
int i = blockIdx.x*NB + threadIdx.x;
float *dAT = dA;
if ( i < m ) {
dA += i;
dAT += i*ldda;
float *dAend = dA + i*ldda;
while( dA < dAend ) {
*dAT = MAGMA_S_CONJ(*dA); // upper := lower
dA += ldda;
dAT += 1;
}
}
}
// only difference with _lower version is direction dA=dAT instead of dAT=dA.
__global__ void
ssymmetrize_upper( int m, float *dA, int ldda )
{
// dA iterates across row i and dAT iterates down column i.
int i = blockIdx.x*NB + threadIdx.x;
float *dAT = dA;
if ( i < m ) {
dA += i;
dAT += i*ldda;
float *dAend = dA + i*ldda;
while( dA < dAend ) {
*dA = MAGMA_S_CONJ(*dAT); // lower := upper
dA += ldda;
dAT += 1;
}
}
}
/**
Purpose
-------
SSYMMETRIZE copies lower triangle to upper triangle, or vice-versa,
to make dA a general representation of a symmetric matrix.
Arguments
---------
@param[in]
uplo magma_uplo_t
Specifies the part of the matrix dA that is valid on input.
- = MagmaUpper: Upper triangular part
- = MagmaLower: Lower triangular part
@param[in]
m INTEGER
The number of rows of the matrix dA. M >= 0.
@param[in,out]
dA REAL array, dimension (LDDA,N)
The m by m matrix dA.
@param[in]
ldda INTEGER
The leading dimension of the array dA. LDDA >= max(1,M).
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_saux2
********************************************************************/
extern "C" void
magmablas_ssymmetrize_q(
magma_uplo_t uplo, magma_int_t m,
magmaFloat_ptr dA, magma_int_t ldda,
magma_queue_t queue )
{
magma_int_t info = 0;
if ( uplo != MagmaLower && uplo != MagmaUpper )
info = -1;
else if ( m < 0 )
info = -2;
else if ( ldda < max(1,m) )
info = -4;
if ( info != 0 ) {
magma_xerbla( __func__, -(info) );
return;
}
if ( m == 0 )
return;
dim3 threads( NB );
dim3 grid( magma_ceildiv( m, NB ) );
if ( uplo == MagmaUpper ) {
ssymmetrize_upper<<< grid, threads, 0, queue->cuda_stream() >>>( m, dA, ldda );
}
else {
ssymmetrize_lower<<< grid, threads, 0, queue->cuda_stream() >>>( m, dA, ldda );
}
}
|
1ae2f6d13fc456f1e0c47606fb0080a847fc2bcb.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <pthread.h>
#include <unistd.h>
#include <iostream>
#include <list>
#include <vector>
#include <stdio.h>
#include <time.h>
//#include "../usecases/MapReduce.hpp"
//#include "../communication/Message.hpp"
//#include "../function/Function.hpp"
//#include "../partitioning/Partition.hpp"
//#include "../serialization/Serialization.hpp"
using namespace std;
__global__ void sum(char *g_idata, long long *g_odata, unsigned int n)
{
// set thread ID
unsigned int tid = threadIdx.x;
unsigned int idx = 4*(blockIdx.x * blockDim.x + threadIdx.x);
// convert global data pointer to the local pointer of this block
char *idata = g_idata + 4*(blockIdx.x * blockDim.x);
// boundary check
if(idx >= n) return;
long long v1,v2,v3;
// in-place reduction in global memory
for (int stride = blockDim.x / 2; stride > 0; stride >>= 1)
{
if (tid < stride)
{
memcpy(&v1, idata+tid*4, sizeof(int));
memcpy(&v2, idata+(tid+stride)*4, sizeof(int));
v3 = v1+v2;
//printf("%d,%d,%d \n", v1,v2,v3);
memcpy(idata+tid*4,&v3,sizeof(int));
//idata[tid] += idata[tid + stride];
}
__syncthreads();
}
// write result for this block to global mem
if (tid == 0)
memcpy(g_odata+blockIdx.x,idata,sizeof(int));
}
long long cpu(char *h, const size_t n){
long long sum=0;
for(int i=0;i<n;i+=4){
int value=0;
memcpy(&value, h+i, sizeof(int));
sum+= value;
}
return sum;
}
int main(){
clock_t start = clock();
int nEle = 416000000;
size_t nBytes = nEle*sizeof(int);
int i=0;
char * h,*d;
long long *g_result;
h=(char *)malloc(nBytes);
hipMalloc((void **)&d, nBytes);
g_result=(long long *)malloc(nBytes);
int m =0;
for(i=0;i<nBytes ;i+=4){
m=rand() % (100-10)+ 10;
memcpy(h+i, &m, sizeof(int));
}
cout<<endl;
//for(i=0;i<nBytes ;i+=4){
// int value=0;
// memcpy(&value, h+i, sizeof(int));
//
//}
cout<<endl;
cout<<endl;
clock_t v2arr = clock();
double translatency = (v2arr - start )/ (double) CLOCKS_PER_SEC*1000 ; //batch latency calculation
cout<<""<<translatency<<"s"<<endl;
cout<<cpu(h,nBytes )<<endl;
clock_t cpu = clock();
double cpulatency = (cpu - v2arr )/ (double) CLOCKS_PER_SEC*1000 ; //batch latency calculation
cout<<"CPU"<<cpulatency <<"ms"<<endl;
v2arr = clock();
translatency = (v2arr - cpu )/ (double) CLOCKS_PER_SEC*1000 ; //batch latency calculation
cout<<""<<translatency<<"ms"<<endl;
hipMemcpy(d, h, nBytes, hipMemcpyHostToDevice);
clock_t copy = clock();
double copylatency = (copy - v2arr )/ (double) CLOCKS_PER_SEC*1000 ; //batch latency calculation
cout<<"H2D"<<copylatency<<"ms"<<endl;
int blocksize = 256;
dim3 block (blocksize, 1);
dim3 grid ((nEle + block.x - 1) / block.x, 1);
long long *d_odata ;
hipMalloc((void **) &d_odata, grid.x * sizeof(long long));
hipLaunchKernelGGL(( sum), dim3(grid), dim3(block), 0, 0, d,d_odata, nBytes );
hipDeviceSynchronize();
clock_t process = clock();
double processlatency = (process - copy )/ (double) CLOCKS_PER_SEC*1000 ; //batch latency calculation
cout<<"kenel"<<processlatency <<"ms"<<endl;
hipMemcpy(g_result, d_odata, grid.x * sizeof(long long), hipMemcpyDeviceToHost);
long long gpu_sum = 0;
for (int i = 0; i < grid.x; i++){
gpu_sum+=g_result[i];
}
cout<<gpu_sum<<endl;
clock_t DTH = clock();
double DTHlatency = (DTH - process )/ (double) CLOCKS_PER_SEC*1000 ; //batch latency calculation
cout<<"DTH "<<DTHlatency <<"ms"<<endl;
double batchlatency = (clock() - start )/ (double) CLOCKS_PER_SEC*1000 ; //batch latency calculation
cout<<"GPU"<<batchlatency<<"ms"<<endl;
return 0;
}
| 1ae2f6d13fc456f1e0c47606fb0080a847fc2bcb.cu | #include <cuda_runtime.h>
#include <pthread.h>
#include <unistd.h>
#include <iostream>
#include <list>
#include <vector>
#include <stdio.h>
#include <time.h>
//#include "../usecases/MapReduce.hpp"
//#include "../communication/Message.hpp"
//#include "../function/Function.hpp"
//#include "../partitioning/Partition.hpp"
//#include "../serialization/Serialization.hpp"
using namespace std;
__global__ void sum(char *g_idata, long long *g_odata, unsigned int n)
{
// set thread ID
unsigned int tid = threadIdx.x;
unsigned int idx = 4*(blockIdx.x * blockDim.x + threadIdx.x);
// convert global data pointer to the local pointer of this block
char *idata = g_idata + 4*(blockIdx.x * blockDim.x);
// boundary check
if(idx >= n) return;
long long v1,v2,v3;
// in-place reduction in global memory
for (int stride = blockDim.x / 2; stride > 0; stride >>= 1)
{
if (tid < stride)
{
memcpy(&v1, idata+tid*4, sizeof(int));
memcpy(&v2, idata+(tid+stride)*4, sizeof(int));
v3 = v1+v2;
//printf("%d,%d,%d \n", v1,v2,v3);
memcpy(idata+tid*4,&v3,sizeof(int));
//idata[tid] += idata[tid + stride];
}
__syncthreads();
}
// write result for this block to global mem
if (tid == 0)
memcpy(g_odata+blockIdx.x,idata,sizeof(int));
}
long long cpu(char *h, const size_t n){
long long sum=0;
for(int i=0;i<n;i+=4){
int value=0;
memcpy(&value, h+i, sizeof(int));
sum+= value;
}
return sum;
}
int main(){
clock_t start = clock();
int nEle = 416000000;
size_t nBytes = nEle*sizeof(int);
int i=0;
char * h,*d;
long long *g_result;
h=(char *)malloc(nBytes);
cudaMalloc((void **)&d, nBytes);
g_result=(long long *)malloc(nBytes);
int m =0;
for(i=0;i<nBytes ;i+=4){
m=rand() % (100-10)+ 10;
memcpy(h+i, &m, sizeof(int));
}
cout<<endl;
//for(i=0;i<nBytes ;i+=4){
// int value=0;
// memcpy(&value, h+i, sizeof(int));
//
//}
cout<<endl;
cout<<endl;
clock_t v2arr = clock();
double translatency = (v2arr - start )/ (double) CLOCKS_PER_SEC*1000 ; //batch latency calculation
cout<<"转数组总用时:"<<translatency<<"s"<<endl;
cout<<cpu(h,nBytes )<<endl;
clock_t cpu = clock();
double cpulatency = (cpu - v2arr )/ (double) CLOCKS_PER_SEC*1000 ; //batch latency calculation
cout<<"CPU总用时:"<<cpulatency <<"ms"<<endl;
v2arr = clock();
translatency = (v2arr - cpu )/ (double) CLOCKS_PER_SEC*1000 ; //batch latency calculation
cout<<"转数组总用时:"<<translatency<<"ms"<<endl;
cudaMemcpy(d, h, nBytes, cudaMemcpyHostToDevice);
clock_t copy = clock();
double copylatency = (copy - v2arr )/ (double) CLOCKS_PER_SEC*1000 ; //batch latency calculation
cout<<"H2D总用时:"<<copylatency<<"ms"<<endl;
int blocksize = 256;
dim3 block (blocksize, 1);
dim3 grid ((nEle + block.x - 1) / block.x, 1);
long long *d_odata ;
cudaMalloc((void **) &d_odata, grid.x * sizeof(long long));
sum<<<grid, block>>>(d,d_odata, nBytes );
cudaDeviceSynchronize();
clock_t process = clock();
double processlatency = (process - copy )/ (double) CLOCKS_PER_SEC*1000 ; //batch latency calculation
cout<<"kenel总用时:"<<processlatency <<"ms"<<endl;
cudaMemcpy(g_result, d_odata, grid.x * sizeof(long long), cudaMemcpyDeviceToHost);
long long gpu_sum = 0;
for (int i = 0; i < grid.x; i++){
gpu_sum+=g_result[i];
}
cout<<gpu_sum<<endl;
clock_t DTH = clock();
double DTHlatency = (DTH - process )/ (double) CLOCKS_PER_SEC*1000 ; //batch latency calculation
cout<<"DTH 总用时:"<<DTHlatency <<"ms"<<endl;
double batchlatency = (clock() - start )/ (double) CLOCKS_PER_SEC*1000 ; //batch latency calculation
cout<<"GPU总用时:"<<batchlatency<<"ms"<<endl;
return 0;
}
|
305879ce4ad02ce6ce28e3c9a0b79ba784b06777.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
extern "C" {
}
__global__ void reduce_sum_partial(const float* input, float* output, unsigned int len) {
// from http://www.techdarting.com/2014/06/parallel-reduction-in-cuda.html
// Load a segment of the input vector into shared memory
__shared__ float partialSum[2*256];
int globalThreadId = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int t = threadIdx.x;
unsigned int start = 2*blockIdx.x*blockDim.x;
if ((start + t) < len)
{
partialSum[t] = input[start + t];
}
else
{
partialSum[t] = 0.0;
}
if ((start + blockDim.x + t) < len)
{
partialSum[blockDim.x + t] = input[start + blockDim.x + t];
}
else
{
partialSum[blockDim.x + t] = 0.0;
}
// Traverse reduction tree
for (unsigned int stride = blockDim.x; stride > 0; stride /= 2)
{
__syncthreads();
if (t < stride)
partialSum[t] += partialSum[t + stride];
}
__syncthreads();
// Write the computed sum of the block to the output vector at correct index
if (t == 0 && (globalThreadId*2) < len)
{
output[blockIdx.x] = partialSum[t];
}
} | 305879ce4ad02ce6ce28e3c9a0b79ba784b06777.cu | #include "includes.h"
extern "C" {
}
__global__ void reduce_sum_partial(const float* input, float* output, unsigned int len) {
// from http://www.techdarting.com/2014/06/parallel-reduction-in-cuda.html
// Load a segment of the input vector into shared memory
__shared__ float partialSum[2*256];
int globalThreadId = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int t = threadIdx.x;
unsigned int start = 2*blockIdx.x*blockDim.x;
if ((start + t) < len)
{
partialSum[t] = input[start + t];
}
else
{
partialSum[t] = 0.0;
}
if ((start + blockDim.x + t) < len)
{
partialSum[blockDim.x + t] = input[start + blockDim.x + t];
}
else
{
partialSum[blockDim.x + t] = 0.0;
}
// Traverse reduction tree
for (unsigned int stride = blockDim.x; stride > 0; stride /= 2)
{
__syncthreads();
if (t < stride)
partialSum[t] += partialSum[t + stride];
}
__syncthreads();
// Write the computed sum of the block to the output vector at correct index
if (t == 0 && (globalThreadId*2) < len)
{
output[blockIdx.x] = partialSum[t];
}
} |
5757f853dad51d0b177be88e6a76a566d265f21a.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "stencilReadOnly3.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *src = NULL;
hipMalloc(&src, XSIZE*YSIZE);
float *dst = NULL;
hipMalloc(&dst, XSIZE*YSIZE);
int size = XSIZE*YSIZE;
int raio = 1;
float *stencilWeight = NULL;
hipMalloc(&stencilWeight, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
stencilReadOnly3), dim3(gridBlock),dim3(threadBlock), 0, 0, src,dst,size,raio,stencilWeight);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
stencilReadOnly3), dim3(gridBlock),dim3(threadBlock), 0, 0, src,dst,size,raio,stencilWeight);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
stencilReadOnly3), dim3(gridBlock),dim3(threadBlock), 0, 0, src,dst,size,raio,stencilWeight);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 5757f853dad51d0b177be88e6a76a566d265f21a.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "stencilReadOnly3.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *src = NULL;
cudaMalloc(&src, XSIZE*YSIZE);
float *dst = NULL;
cudaMalloc(&dst, XSIZE*YSIZE);
int size = XSIZE*YSIZE;
int raio = 1;
float *stencilWeight = NULL;
cudaMalloc(&stencilWeight, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
stencilReadOnly3<<<gridBlock,threadBlock>>>(src,dst,size,raio,stencilWeight);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
stencilReadOnly3<<<gridBlock,threadBlock>>>(src,dst,size,raio,stencilWeight);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
stencilReadOnly3<<<gridBlock,threadBlock>>>(src,dst,size,raio,stencilWeight);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
6b9906ab466b322e526a75ff3f6a51ff371beb94.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//Based on the work of Andrew Krepps
#include <iostream>
#include <random>
#include <chrono>
#include <stdio.h>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
__constant__ static const int VAL_A = 1;
__constant__ static const int VAL_B = 3;
enum gpu_tests_enum
{
GLOBAL,
SHARED,
CONSTANT,
REGISTER,
STREAM,
THRUST,
NUM_GPU_TESTS
};
gpu_tests_enum& operator++(gpu_tests_enum& e)
{
return e = (e == NUM_GPU_TESTS) ? GLOBAL : static_cast<gpu_tests_enum>(static_cast<int>(e)+1);
}
std::string gpu_tests_strings[NUM_GPU_TESTS] = {
"Global",
"Shared",
"Constant",
"Register",
"Stream",
"Thrust"};
// Global GPU add c[i] = a[i] + b[i]
__global__ void addGlob(int * a, int * b, int * c)
{
const unsigned int thread_idx = (blockIdx.x * blockDim.x) + threadIdx.x;
c[thread_idx] = a[thread_idx] + b[thread_idx];
}
// Global GPU subtract c[i] = a[i] - b[i]
__global__ void subtractGlob(int * a, int * b, int * c)
{
const unsigned int thread_idx = (blockIdx.x * blockDim.x) + threadIdx.x;
c[thread_idx] = a[thread_idx] - b[thread_idx];
}
// Global GPU multiply c[i] = a[i] * b[i]
__global__ void multGlob(int * a, int * b, int * c)
{
const unsigned int thread_idx = (blockIdx.x * blockDim.x) + threadIdx.x;
c[thread_idx] = a[thread_idx] * b[thread_idx];
}
// Global GPU div c[i] = a[i] / b[i]
__global__ void divGlob(int *a, int * b, int * c)
{
const unsigned int thread_idx = (blockIdx.x * blockDim.x) + threadIdx.x;
c[thread_idx] = a[thread_idx] / b[thread_idx];
}
// Global GPU mod c[i] = a[i] % b[i]
__global__ void modGlob(int * a, int * b, int * c)
{
const unsigned int thread_idx = (blockIdx.x * blockDim.x) + threadIdx.x;
c[thread_idx] = a[thread_idx] % b[thread_idx];
}
// Device GPU add c[i] = a[i] + b[i]
__device__ void add(int * a, int * b, int * c)
{
const unsigned int thread_idx = (blockIdx.x * blockDim.x) + threadIdx.x;
c[thread_idx] = a[thread_idx] + b[thread_idx];
}
// Device GPU subtract c[i] = a[i] - b[i]
__device__ void subtract(int * a, int * b, int * c)
{
const unsigned int thread_idx = (blockIdx.x * blockDim.x) + threadIdx.x;
c[thread_idx] = a[thread_idx] - b[thread_idx];
}
// Device GPU multiply c[i] = a[i] * b[i]
__device__ void mult(int * a, int * b, int * c)
{
const unsigned int thread_idx = (blockIdx.x * blockDim.x) + threadIdx.x;
c[thread_idx] = a[thread_idx] * b[thread_idx];
}
// Device GPU div c[i] = a[i] / b[i]
__device__ void div(int *a, int * b, int * c)
{
const unsigned int thread_idx = (blockIdx.x * blockDim.x) + threadIdx.x;
c[thread_idx] = a[thread_idx] / b[thread_idx];
}
// Device GPU mod c[i] = a[i] % b[i]
__device__ void mod(int * a, int * b, int * c)
{
const unsigned int thread_idx = (blockIdx.x * blockDim.x) + threadIdx.x;
c[thread_idx] = a[thread_idx] % b[thread_idx];
}
// Device GPU add in register c[i] = a[i] + b[i]
__device__ void addReg(int * a, int * b, int * c)
{
const unsigned int thread_idx = (blockIdx.x * blockDim.x) + threadIdx.x;
int tempA = a[thread_idx];
int tempB = b[thread_idx];
int tempResult = tempA + tempB;
c[thread_idx] = tempResult;
}
// Device GPU subtract in register c[i] = a[i] - b[i]
__device__ void subtractReg(int * a, int * b, int * c)
{
const unsigned int thread_idx = (blockIdx.x * blockDim.x) + threadIdx.x;
int tempA = a[thread_idx];
int tempB = b[thread_idx];
int tempResult = tempA - tempB;
c[thread_idx] = tempResult;
}
// Device GPU multiply in register c[i] = a[i] * b[i]
__device__ void multReg(int * a, int * b, int * c)
{
const unsigned int thread_idx = (blockIdx.x * blockDim.x) + threadIdx.x;
int tempA = a[thread_idx];
int tempB = b[thread_idx];
int tempResult = tempA * tempB;
c[thread_idx] = tempResult;
}
// Device GPU div in register c[i] = a[i] / b[i]
__device__ void divReg(int *a, int * b, int * c)
{
const unsigned int thread_idx = (blockIdx.x * blockDim.x) + threadIdx.x;
int tempA = a[thread_idx];
int tempB = b[thread_idx];
int tempResult = tempA / tempB;
c[thread_idx] = tempResult;
}
// Device GPU mod in register c[i] = a[i] % b[i]
__device__ void modReg(int * a, int * b, int * c)
{
const unsigned int thread_idx = (blockIdx.x * blockDim.x) + threadIdx.x;
int tempA = a[thread_idx];
int tempB = b[thread_idx];
int tempResult = tempA % tempB;
c[thread_idx] = tempResult;
}
// Executes all 5 shared math operations
__global__ void executeSharedMathOperations(int * a, int * b, int * addDest, int * subDest, int * multDest, int * divDest, int * modDest, const int size)
{
const int tid = (blockIdx.x * blockDim.x) + threadIdx.x;
extern __shared__ int sharedMem[];
// Use offsets in the shared mem to create arrays.
int * sharedA = &sharedMem[0];
int * sharedB = &sharedMem[size];
int * sharedRet = &sharedMem[2*size];
sharedA[tid] = a[tid];
sharedB[tid] = b[tid];
// Add sharedA to sharedB and store in addDest
add(sharedA, sharedB, sharedRet);
addDest[tid] = sharedRet[tid];
// Subtract sharedB from sharedA and store in subDest
subtract(sharedA, sharedB, sharedRet);
subDest[tid] = sharedRet[tid];
// Multiply sharedA to sharedB and store in mutlDest
mult(sharedA, sharedB, sharedRet);
multDest[tid] = sharedRet[tid];
// Divide sharedA by sharedB and store in divDest
div(sharedA, sharedB, sharedRet);
divDest[tid] = sharedRet[tid];
// Mod sharedA by sharedB and store in modDest
mod(sharedA, sharedB, sharedRet);
modDest[tid] = sharedRet[tid];
}
// Executes all 5 global math operations
__global__ void executeGlobalMathOperations(int * a, int * b, int * addDest, int * subDest, int * multDest, int * divDest, int * modDest, const int size)
{
// Add a to b and store in addDest
add(a, b, addDest);
// Subtract a from b and store in subDest
subtract(a, b, subDest);
// Multiply a to b and store in mutlDest
mult(a, b, multDest);
// Divide a by b and store in divDest
div(a, b, divDest);
// Mod a by b and store in modDest
mod(a, b, modDest);
}
// Executes all 5 register math operations
__global__ void executeRegisterMathOperations(int * a, int * b, int * addDest, int * subDest, int * multDest, int * divDest, int * modDest, const int size)
{
// Add a to b and store in addDest
addReg(a, b, addDest);
// Subtract a from b and store in subDest
subtractReg(a, b, subDest);
// Multiply a to b and store in mutlDest
multReg(a, b, multDest);
// Divide a by b and store in divDest
divReg(a, b, divDest);
// Mod a by b and store in modDest
modReg(a, b, modDest);
}
// Executes all 5 constant math operations
__global__ void executeConstantMathOperations(int * addDest, int * subDest, int * multDest, int * divDest, int * modDest, const int size)
{
const int tid = (blockIdx.x * blockDim.x) + threadIdx.x;
// Add VAL_A to VAL_B and store in addDest
addDest[tid] = VAL_A + VAL_B;
// Subtract a from b and store in subDest
subDest[tid] = VAL_A - VAL_B;
// Multiply a to b and store in mutlDest
multDest[tid] = VAL_A * VAL_B;
// Divide a by b and store in divDest
divDest[tid] = VAL_A / VAL_B; // B is chosen to not be 0.
// Mod a by b and store in modDest
modDest[tid] = VAL_A / VAL_B; // B is chosen to not be 0.
}
// Host (Cpu) add c[i] = a[i] + b[i]
void hostAdd(int * a, int * b, int *c, const int size)
{
for (int i = 0; i < size; ++i)
{
c[i] = a[i] + b[i];
}
}
// Host (Cpu) sub c[i] = a[i] - b[i]
void hostSub(int * a, int * b, int *c, const int size)
{
for (int i = 0; i < size; ++i)
{
c[i] = a[i] - b[i];
}
}
// Host (Cpu) multiply c[i] = a[i] * b[i]
void hostMult(int * a, int * b, int *c, const int size)
{
for (int i = 0; i < size; ++i)
{
c[i] = a[i] * b[i];
}
}
// Host (Cpu) divide c[i] = a[i] / b[i]
void hostDiv(int * a, int * b, int *c, const int size)
{
for (int i = 0; i < size; ++i)
{
if (b[i] != 0)
{
c[i] = a[i] / b[i];
}
else
{
c[i] = 0;
}
}
}
// Host (Cpu) mod c[i] = a[i] % b[i]
void hostMod(int * a, int * b, int *c, const int size)
{
for (int i = 0; i < size; ++i)
{
// Protect against divide by 0.
// cuda code catches this error and sets result to 0 by default.
if (b[i] == 0)
{
c[i] = 0;
}
else
{
c[i] = a[i] % b[i];
}
}
}
// Executes each of the host (cpu) tests by creating local memory and executing all 5 math operations on the data.
// The data is filled with random numbers that uses the same seed as the GPU tests.
void executeHostTest(const int totalThreads, const int blockSize, const int numBlocks)
{
int a[totalThreads], b[totalThreads], c[totalThreads];
// Create a random generate that will generate random numbers from 0 to 4.
// Use a set seed so output is deterministic
unsigned seed = 12345;
std::default_random_engine gen(seed);
std::uniform_int_distribution<int> dist(0,4);
for (size_t i = 0; i < totalThreads; ++i)
{
a[i] = i;
b[i] = dist(gen);
}
// Add all of the numbers c[i] = a[i] + b[i];
hostAdd(a,b,c, totalThreads);
// Subtract all of the numbers c[i] = a[i] - b[i];
hostSub(a,b,c, totalThreads);
// Multiply all of the numbers c[i] = a[i] * b[i];
hostMult(a,b,c, totalThreads);
// Divides all of the numbers c[i] = a[i] / b[i]; if b[i] == 0, c[i] = 0
hostDiv(a,b,c, totalThreads);
// Mod all of the numbers c[i] = a[i] % b[i];
hostMod(a,b,c, totalThreads);
}
// Executes a streams test, which is similar to the GPU tests below except here we make use
// of CUDA streams and allocate/deallocate memory in an asynchronous fashion.
// The data is filled with random numbers that uses the same seed as the CPU tests.
void executeStreamTest(const int totalThreads, const int blockSize, const int numBlocks)
{
int a[totalThreads], b[totalThreads], add_dest[totalThreads], sub_dest[totalThreads], mult_dest[totalThreads], div_dest[totalThreads], mod_dest[totalThreads];
int *gpu_a, *gpu_b, *gpu_add_dest, *gpu_sub_dest, *gpu_mult_dest, *gpu_div_dest, *gpu_mod_dest;
hipStream_t stream;
hipStreamCreate(&stream);
hipMalloc((void**)&gpu_a, totalThreads * sizeof(int));
hipMalloc((void**)&gpu_b, totalThreads * sizeof(int));
hipMalloc((void**)&gpu_add_dest, totalThreads * sizeof(int));
hipMalloc((void**)&gpu_sub_dest, totalThreads * sizeof(int));
hipMalloc((void**)&gpu_mult_dest, totalThreads * sizeof(int));
hipMalloc((void**)&gpu_div_dest, totalThreads * sizeof(int));
hipMalloc((void**)&gpu_mod_dest, totalThreads * sizeof(int));
// Create a random generate that will generate random numbers from 0 to 4.
// Use a set seed so output is deterministic
unsigned seed = 12345;
std::default_random_engine gen(seed);
std::uniform_int_distribution<int> dist(0,4);
for (size_t i = 0; i < totalThreads; ++i)
{
a[i] = i;
b[i] = dist(gen);
}
// Here we will now copy memory asynchronously and call each of the global version of the math
// methods using a stream. This will allow the stream to do its own calculation of how these
// methods should be executed.
hipMemcpyAsync(gpu_a, a, totalThreads * sizeof(int), hipMemcpyHostToDevice, stream);
hipMemcpyAsync(gpu_b, b, totalThreads * sizeof(int), hipMemcpyHostToDevice, stream);
// Asynchronously add and then copy memory to host.
hipLaunchKernelGGL(( addGlob), dim3(numBlocks), dim3(blockSize), 0, stream, gpu_a, gpu_b, gpu_add_dest);
hipMemcpyAsync(add_dest, gpu_add_dest, totalThreads*sizeof(int), hipMemcpyDeviceToHost, stream);
// Asynchronously subtract and then copy memory to host.
hipLaunchKernelGGL(( subtractGlob), dim3(numBlocks), dim3(blockSize), 0, stream, gpu_a, gpu_b, gpu_sub_dest);
hipMemcpyAsync(sub_dest, gpu_sub_dest, totalThreads*sizeof(int), hipMemcpyDeviceToHost, stream);
// Asynchronously multiply and then copy memory to host.
hipLaunchKernelGGL(( multGlob), dim3(numBlocks), dim3(blockSize), 0, stream, gpu_a, gpu_b, gpu_mult_dest);
hipMemcpyAsync(mult_dest, gpu_mult_dest, totalThreads*sizeof(int), hipMemcpyDeviceToHost, stream);
// Asynchronously divide and then copy memory to host.
hipLaunchKernelGGL(( divGlob), dim3(numBlocks), dim3(blockSize), 0, stream, gpu_a, gpu_b, gpu_div_dest);
hipMemcpyAsync(div_dest, gpu_div_dest, totalThreads*sizeof(int), hipMemcpyDeviceToHost, stream);
// Asynchronously modulous and then copy memory to host.
hipLaunchKernelGGL(( modGlob), dim3(numBlocks), dim3(blockSize), 0, stream, gpu_a, gpu_b, gpu_mod_dest);
hipMemcpyAsync(mod_dest, gpu_mod_dest, totalThreads*sizeof(int), hipMemcpyDeviceToHost, stream);
hipStreamSynchronize(stream);
hipFree(gpu_a);
hipFree(gpu_b);
hipFree(gpu_add_dest);
hipFree(gpu_sub_dest);
hipFree(gpu_mult_dest);
hipFree(gpu_div_dest);
hipFree(gpu_mod_dest);
}
// Executes a thrust test, which is similar to the GPU tests below except here we make use
// of CUDA thrust
// The data is filled with random numbers that uses the same seed as the CPU tests.
void executeThrustTest(const int totalThreads, const int blockSize, const int numBlocks)
{
// Create host vectors
thrust::host_vector<int> a(totalThreads);
thrust::host_vector<int> b(totalThreads);
thrust::host_vector<int> add_dest(totalThreads);
thrust::host_vector<int> sub_dest(totalThreads);
thrust::host_vector<int> mult_dest(totalThreads);
thrust::host_vector<int> div_dest(totalThreads);
thrust::host_vector<int> mod_dest(totalThreads);
// Create device vectors
thrust::device_vector<int> gpu_a(totalThreads);
thrust::device_vector<int> gpu_b(totalThreads);
thrust::device_vector<int> gpu_add_dest(totalThreads);
thrust::device_vector<int> gpu_sub_dest(totalThreads);
thrust::device_vector<int> gpu_mult_dest(totalThreads);
thrust::device_vector<int> gpu_div_dest(totalThreads);
thrust::device_vector<int> gpu_mod_dest(totalThreads);
// Create a random generate that will generate random numbers from 0 to 4.
// Use a set seed so output is deterministic
unsigned seed = 12345;
std::default_random_engine gen(seed);
std::uniform_int_distribution<int> dist(0,4);
for (size_t i = 0; i < totalThreads; ++i)
{
a[i] = i;
b[i] = dist(gen);
}
// Here we will now copy the vectors from the host to device
gpu_a = a;
gpu_b = b;
// add and then copy memory to host.
for (int i = 0; i < totalThreads; ++i)
{
gpu_add_dest[i] = gpu_a[i] + gpu_b[i];
}
add_dest = gpu_add_dest;
// subtract and then copy memory to host.
for (int i = 0; i < totalThreads; ++i)
{
gpu_sub_dest[i] = gpu_a[i] - gpu_b[i];
}
sub_dest = gpu_sub_dest;
// multiply and then copy memory to host.
for (int i = 0; i < totalThreads; ++i)
{
gpu_mult_dest[i] = gpu_a[i] * gpu_b[i];
}
mult_dest = gpu_mult_dest;
// divide and then copy memory to host.
for (int i = 0; i < totalThreads; ++i)
{
if (gpu_b[i] != 0)
{
gpu_div_dest[i] = gpu_a[i] / gpu_b[i];
}
else
{
gpu_div_dest[i] = 0;
}
}
div_dest = gpu_div_dest;
// modulous and then copy memory to host.
for (int i = 0; i < totalThreads; ++i)
{
if (gpu_b[i] != 0)
{
gpu_mod_dest[i] = gpu_a[i] % gpu_b[i];
}
else
{
gpu_mod_dest[i] = 0;
}
}
mod_dest = gpu_mod_dest;
}
// Executes each of the gpu tests by creating local memory, copying it global memory, and then performing
// all 5 math operations on the data.
// The data is filled with random numbers that uses the same seed as the CPU tests.
void executeGPUTest(const int totalThreads, const int blockSize, const int numBlocks, const gpu_tests_enum testType)
{
// The stream test works differently enough that it requires a different method since its calls will all be async.
if (testType == STREAM)
{
executeStreamTest(totalThreads, blockSize, numBlocks);
return;
}
// The thurst test works differently since it create thrust vectors
if (testType == THRUST)
{
executeThrustTest(totalThreads, blockSize, numBlocks);
return;
}
int a[totalThreads], b[totalThreads], add_dest[totalThreads], sub_dest[totalThreads], mult_dest[totalThreads], div_dest[totalThreads], mod_dest[totalThreads];
int *gpu_a, *gpu_b, *gpu_add_dest, *gpu_sub_dest, *gpu_mult_dest, *gpu_div_dest, *gpu_mod_dest;
hipMalloc((void**)&gpu_a, totalThreads * sizeof(int));
hipMalloc((void**)&gpu_b, totalThreads * sizeof(int));
hipMalloc((void**)&gpu_add_dest, totalThreads * sizeof(int));
hipMalloc((void**)&gpu_sub_dest, totalThreads * sizeof(int));
hipMalloc((void**)&gpu_mult_dest, totalThreads * sizeof(int));
hipMalloc((void**)&gpu_div_dest, totalThreads * sizeof(int));
hipMalloc((void**)&gpu_mod_dest, totalThreads * sizeof(int));
// Create a random generate that will generate random numbers from 0 to 4.
// Use a set seed so output is deterministic
unsigned seed = 12345;
std::default_random_engine gen(seed);
std::uniform_int_distribution<int> dist(0,4);
for (size_t i = 0; i < totalThreads; ++i)
{
a[i] = i;
b[i] = dist(gen);
}
hipMemcpy(gpu_a, a, totalThreads * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(gpu_b, b, totalThreads * sizeof(int), hipMemcpyHostToDevice);
switch (testType)
{
case GLOBAL:
// Executes global memory operations.
hipLaunchKernelGGL(( executeGlobalMathOperations), dim3(numBlocks), dim3(blockSize), 0, 0, gpu_a,gpu_b,gpu_add_dest,gpu_sub_dest,gpu_mult_dest,gpu_div_dest,gpu_mod_dest, numBlocks * blockSize);
break;
case SHARED:
// The third parameter is the size of the shared memory
// We multiply by 3 because we need to copy A and B and then also have room for the return in shared memory.
hipLaunchKernelGGL(( executeSharedMathOperations), dim3(numBlocks), dim3(blockSize), 3 * totalThreads * sizeof(int), 0, gpu_a,gpu_b,gpu_add_dest,gpu_sub_dest,gpu_mult_dest,gpu_div_dest,gpu_mod_dest, totalThreads);
break;
case CONSTANT:
// constant doesn't actually take in gpu_a and gpu_b since it uses constant memory. However the random generation is left in so timing can be compared.
hipLaunchKernelGGL(( executeConstantMathOperations), dim3(numBlocks), dim3(blockSize), 0, 0, gpu_add_dest,gpu_sub_dest,gpu_mult_dest,gpu_div_dest,gpu_mod_dest, totalThreads);
break;
case REGISTER:
// Executes global memory operations by saving the value into local registers first.
hipLaunchKernelGGL(( executeRegisterMathOperations), dim3(numBlocks), dim3(blockSize), 0, 0, gpu_a,gpu_b,gpu_add_dest,gpu_sub_dest,gpu_mult_dest,gpu_div_dest,gpu_mod_dest, totalThreads);
break;
default:
std::cout << "Unknown test type " << testType << "!" << std::endl;
break;
}
hipMemcpy(add_dest, gpu_add_dest, totalThreads*sizeof(int), hipMemcpyDeviceToHost);
hipMemcpy(sub_dest, gpu_sub_dest, totalThreads*sizeof(int), hipMemcpyDeviceToHost);
hipMemcpy(mult_dest, gpu_mult_dest, totalThreads*sizeof(int), hipMemcpyDeviceToHost);
hipMemcpy(div_dest, gpu_div_dest, totalThreads*sizeof(int), hipMemcpyDeviceToHost);
hipMemcpy(mod_dest, gpu_mod_dest, totalThreads*sizeof(int), hipMemcpyDeviceToHost);
hipFree(gpu_a);
hipFree(gpu_b);
hipFree(gpu_add_dest);
hipFree(gpu_sub_dest);
hipFree(gpu_mult_dest);
hipFree(gpu_div_dest);
hipFree(gpu_mod_dest);
}
void printArray(const int * const arr, const int xSize, const int ySize)
{
for (size_t i = 0; i < xSize; ++i)
{
for(size_t j = 0; j < ySize; ++j)
{
std::cout << arr[i * ySize + j] << " ";
}
std::cout << '\n';
}
std::cout << std::flush;
}
int main(int argc, char** argv)
{
// read command line arguments
int totalThreads = 256;
int blockSize = 256;
if (argc >= 2) {
totalThreads = atoi(argv[1]);
}
if (argc >= 3) {
blockSize = atoi(argv[2]);
}
int numBlocks = totalThreads/blockSize;
// validate command line arguments
if (totalThreads % blockSize != 0) {
++numBlocks;
totalThreads = numBlocks*blockSize;
printf("Warning: Total thread count is not evenly divisible by the block size\n");
printf("The total number of threads will be rounded up to %d\n", totalThreads);
}
auto startTime = std::chrono::system_clock::now();
executeHostTest(totalThreads, blockSize, numBlocks);
auto endTime = std::chrono::system_clock::now();
std::chrono::duration<double> totalTime = endTime-startTime;
std::cout << "Host execution took: " << totalTime.count() << " seconds." << std::endl;
for (auto testType = GLOBAL; testType < NUM_GPU_TESTS; ++testType)
{
startTime = std::chrono::system_clock::now();
executeGPUTest(totalThreads, blockSize, numBlocks, testType);
endTime = std::chrono::system_clock::now();
totalTime = endTime-startTime;
std::cout << gpu_tests_strings[testType] + " Memory execution took: " << totalTime.count() << " seconds." << std::endl;
}
return 0;
}
| 6b9906ab466b322e526a75ff3f6a51ff371beb94.cu | //Based on the work of Andrew Krepps
#include <iostream>
#include <random>
#include <chrono>
#include <stdio.h>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
__constant__ static const int VAL_A = 1;
__constant__ static const int VAL_B = 3;
enum gpu_tests_enum
{
GLOBAL,
SHARED,
CONSTANT,
REGISTER,
STREAM,
THRUST,
NUM_GPU_TESTS
};
gpu_tests_enum& operator++(gpu_tests_enum& e)
{
return e = (e == NUM_GPU_TESTS) ? GLOBAL : static_cast<gpu_tests_enum>(static_cast<int>(e)+1);
}
std::string gpu_tests_strings[NUM_GPU_TESTS] = {
"Global",
"Shared",
"Constant",
"Register",
"Stream",
"Thrust"};
// Global GPU add c[i] = a[i] + b[i]
__global__ void addGlob(int * a, int * b, int * c)
{
const unsigned int thread_idx = (blockIdx.x * blockDim.x) + threadIdx.x;
c[thread_idx] = a[thread_idx] + b[thread_idx];
}
// Global GPU subtract c[i] = a[i] - b[i]
__global__ void subtractGlob(int * a, int * b, int * c)
{
const unsigned int thread_idx = (blockIdx.x * blockDim.x) + threadIdx.x;
c[thread_idx] = a[thread_idx] - b[thread_idx];
}
// Global GPU multiply c[i] = a[i] * b[i]
__global__ void multGlob(int * a, int * b, int * c)
{
const unsigned int thread_idx = (blockIdx.x * blockDim.x) + threadIdx.x;
c[thread_idx] = a[thread_idx] * b[thread_idx];
}
// Global GPU div c[i] = a[i] / b[i]
__global__ void divGlob(int *a, int * b, int * c)
{
const unsigned int thread_idx = (blockIdx.x * blockDim.x) + threadIdx.x;
c[thread_idx] = a[thread_idx] / b[thread_idx];
}
// Global GPU mod c[i] = a[i] % b[i]
__global__ void modGlob(int * a, int * b, int * c)
{
const unsigned int thread_idx = (blockIdx.x * blockDim.x) + threadIdx.x;
c[thread_idx] = a[thread_idx] % b[thread_idx];
}
// Device GPU add c[i] = a[i] + b[i]
__device__ void add(int * a, int * b, int * c)
{
const unsigned int thread_idx = (blockIdx.x * blockDim.x) + threadIdx.x;
c[thread_idx] = a[thread_idx] + b[thread_idx];
}
// Device GPU subtract c[i] = a[i] - b[i]
__device__ void subtract(int * a, int * b, int * c)
{
const unsigned int thread_idx = (blockIdx.x * blockDim.x) + threadIdx.x;
c[thread_idx] = a[thread_idx] - b[thread_idx];
}
// Device GPU multiply c[i] = a[i] * b[i]
__device__ void mult(int * a, int * b, int * c)
{
const unsigned int thread_idx = (blockIdx.x * blockDim.x) + threadIdx.x;
c[thread_idx] = a[thread_idx] * b[thread_idx];
}
// Device GPU div c[i] = a[i] / b[i]
__device__ void div(int *a, int * b, int * c)
{
const unsigned int thread_idx = (blockIdx.x * blockDim.x) + threadIdx.x;
c[thread_idx] = a[thread_idx] / b[thread_idx];
}
// Device GPU mod c[i] = a[i] % b[i]
__device__ void mod(int * a, int * b, int * c)
{
const unsigned int thread_idx = (blockIdx.x * blockDim.x) + threadIdx.x;
c[thread_idx] = a[thread_idx] % b[thread_idx];
}
// Device GPU add in register c[i] = a[i] + b[i]
__device__ void addReg(int * a, int * b, int * c)
{
const unsigned int thread_idx = (blockIdx.x * blockDim.x) + threadIdx.x;
int tempA = a[thread_idx];
int tempB = b[thread_idx];
int tempResult = tempA + tempB;
c[thread_idx] = tempResult;
}
// Device GPU subtract in register c[i] = a[i] - b[i]
__device__ void subtractReg(int * a, int * b, int * c)
{
const unsigned int thread_idx = (blockIdx.x * blockDim.x) + threadIdx.x;
int tempA = a[thread_idx];
int tempB = b[thread_idx];
int tempResult = tempA - tempB;
c[thread_idx] = tempResult;
}
// Device GPU multiply in register c[i] = a[i] * b[i]
__device__ void multReg(int * a, int * b, int * c)
{
const unsigned int thread_idx = (blockIdx.x * blockDim.x) + threadIdx.x;
int tempA = a[thread_idx];
int tempB = b[thread_idx];
int tempResult = tempA * tempB;
c[thread_idx] = tempResult;
}
// Device GPU div in register c[i] = a[i] / b[i]
__device__ void divReg(int *a, int * b, int * c)
{
const unsigned int thread_idx = (blockIdx.x * blockDim.x) + threadIdx.x;
int tempA = a[thread_idx];
int tempB = b[thread_idx];
int tempResult = tempA / tempB;
c[thread_idx] = tempResult;
}
// Device GPU mod in register c[i] = a[i] % b[i]
__device__ void modReg(int * a, int * b, int * c)
{
const unsigned int thread_idx = (blockIdx.x * blockDim.x) + threadIdx.x;
int tempA = a[thread_idx];
int tempB = b[thread_idx];
int tempResult = tempA % tempB;
c[thread_idx] = tempResult;
}
// Executes all 5 shared math operations
__global__ void executeSharedMathOperations(int * a, int * b, int * addDest, int * subDest, int * multDest, int * divDest, int * modDest, const int size)
{
const int tid = (blockIdx.x * blockDim.x) + threadIdx.x;
extern __shared__ int sharedMem[];
// Use offsets in the shared mem to create arrays.
int * sharedA = &sharedMem[0];
int * sharedB = &sharedMem[size];
int * sharedRet = &sharedMem[2*size];
sharedA[tid] = a[tid];
sharedB[tid] = b[tid];
// Add sharedA to sharedB and store in addDest
add(sharedA, sharedB, sharedRet);
addDest[tid] = sharedRet[tid];
// Subtract sharedB from sharedA and store in subDest
subtract(sharedA, sharedB, sharedRet);
subDest[tid] = sharedRet[tid];
// Multiply sharedA to sharedB and store in mutlDest
mult(sharedA, sharedB, sharedRet);
multDest[tid] = sharedRet[tid];
// Divide sharedA by sharedB and store in divDest
div(sharedA, sharedB, sharedRet);
divDest[tid] = sharedRet[tid];
// Mod sharedA by sharedB and store in modDest
mod(sharedA, sharedB, sharedRet);
modDest[tid] = sharedRet[tid];
}
// Executes all 5 global math operations
__global__ void executeGlobalMathOperations(int * a, int * b, int * addDest, int * subDest, int * multDest, int * divDest, int * modDest, const int size)
{
// Add a to b and store in addDest
add(a, b, addDest);
// Subtract a from b and store in subDest
subtract(a, b, subDest);
// Multiply a to b and store in mutlDest
mult(a, b, multDest);
// Divide a by b and store in divDest
div(a, b, divDest);
// Mod a by b and store in modDest
mod(a, b, modDest);
}
// Executes all 5 register math operations
__global__ void executeRegisterMathOperations(int * a, int * b, int * addDest, int * subDest, int * multDest, int * divDest, int * modDest, const int size)
{
// Add a to b and store in addDest
addReg(a, b, addDest);
// Subtract a from b and store in subDest
subtractReg(a, b, subDest);
// Multiply a to b and store in mutlDest
multReg(a, b, multDest);
// Divide a by b and store in divDest
divReg(a, b, divDest);
// Mod a by b and store in modDest
modReg(a, b, modDest);
}
// Executes all 5 constant math operations
__global__ void executeConstantMathOperations(int * addDest, int * subDest, int * multDest, int * divDest, int * modDest, const int size)
{
const int tid = (blockIdx.x * blockDim.x) + threadIdx.x;
// Add VAL_A to VAL_B and store in addDest
addDest[tid] = VAL_A + VAL_B;
// Subtract a from b and store in subDest
subDest[tid] = VAL_A - VAL_B;
// Multiply a to b and store in mutlDest
multDest[tid] = VAL_A * VAL_B;
// Divide a by b and store in divDest
divDest[tid] = VAL_A / VAL_B; // B is chosen to not be 0.
// Mod a by b and store in modDest
modDest[tid] = VAL_A / VAL_B; // B is chosen to not be 0.
}
// Host (Cpu) add c[i] = a[i] + b[i]
void hostAdd(int * a, int * b, int *c, const int size)
{
for (int i = 0; i < size; ++i)
{
c[i] = a[i] + b[i];
}
}
// Host (Cpu) sub c[i] = a[i] - b[i]
void hostSub(int * a, int * b, int *c, const int size)
{
for (int i = 0; i < size; ++i)
{
c[i] = a[i] - b[i];
}
}
// Host (Cpu) multiply c[i] = a[i] * b[i]
void hostMult(int * a, int * b, int *c, const int size)
{
for (int i = 0; i < size; ++i)
{
c[i] = a[i] * b[i];
}
}
// Host (Cpu) divide c[i] = a[i] / b[i]
void hostDiv(int * a, int * b, int *c, const int size)
{
for (int i = 0; i < size; ++i)
{
if (b[i] != 0)
{
c[i] = a[i] / b[i];
}
else
{
c[i] = 0;
}
}
}
// Host (Cpu) mod c[i] = a[i] % b[i]
void hostMod(int * a, int * b, int *c, const int size)
{
for (int i = 0; i < size; ++i)
{
// Protect against divide by 0.
// cuda code catches this error and sets result to 0 by default.
if (b[i] == 0)
{
c[i] = 0;
}
else
{
c[i] = a[i] % b[i];
}
}
}
// Executes each of the host (cpu) tests by creating local memory and executing all 5 math operations on the data.
// The data is filled with random numbers that uses the same seed as the GPU tests.
void executeHostTest(const int totalThreads, const int blockSize, const int numBlocks)
{
int a[totalThreads], b[totalThreads], c[totalThreads];
// Create a random generate that will generate random numbers from 0 to 4.
// Use a set seed so output is deterministic
unsigned seed = 12345;
std::default_random_engine gen(seed);
std::uniform_int_distribution<int> dist(0,4);
for (size_t i = 0; i < totalThreads; ++i)
{
a[i] = i;
b[i] = dist(gen);
}
// Add all of the numbers c[i] = a[i] + b[i];
hostAdd(a,b,c, totalThreads);
// Subtract all of the numbers c[i] = a[i] - b[i];
hostSub(a,b,c, totalThreads);
// Multiply all of the numbers c[i] = a[i] * b[i];
hostMult(a,b,c, totalThreads);
// Divides all of the numbers c[i] = a[i] / b[i]; if b[i] == 0, c[i] = 0
hostDiv(a,b,c, totalThreads);
// Mod all of the numbers c[i] = a[i] % b[i];
hostMod(a,b,c, totalThreads);
}
// Executes a streams test, which is similar to the GPU tests below except here we make use
// of CUDA streams and allocate/deallocate memory in an asynchronous fashion.
// The data is filled with random numbers that uses the same seed as the CPU tests.
void executeStreamTest(const int totalThreads, const int blockSize, const int numBlocks)
{
int a[totalThreads], b[totalThreads], add_dest[totalThreads], sub_dest[totalThreads], mult_dest[totalThreads], div_dest[totalThreads], mod_dest[totalThreads];
int *gpu_a, *gpu_b, *gpu_add_dest, *gpu_sub_dest, *gpu_mult_dest, *gpu_div_dest, *gpu_mod_dest;
cudaStream_t stream;
cudaStreamCreate(&stream);
cudaMalloc((void**)&gpu_a, totalThreads * sizeof(int));
cudaMalloc((void**)&gpu_b, totalThreads * sizeof(int));
cudaMalloc((void**)&gpu_add_dest, totalThreads * sizeof(int));
cudaMalloc((void**)&gpu_sub_dest, totalThreads * sizeof(int));
cudaMalloc((void**)&gpu_mult_dest, totalThreads * sizeof(int));
cudaMalloc((void**)&gpu_div_dest, totalThreads * sizeof(int));
cudaMalloc((void**)&gpu_mod_dest, totalThreads * sizeof(int));
// Create a random generate that will generate random numbers from 0 to 4.
// Use a set seed so output is deterministic
unsigned seed = 12345;
std::default_random_engine gen(seed);
std::uniform_int_distribution<int> dist(0,4);
for (size_t i = 0; i < totalThreads; ++i)
{
a[i] = i;
b[i] = dist(gen);
}
// Here we will now copy memory asynchronously and call each of the global version of the math
// methods using a stream. This will allow the stream to do its own calculation of how these
// methods should be executed.
cudaMemcpyAsync(gpu_a, a, totalThreads * sizeof(int), cudaMemcpyHostToDevice, stream);
cudaMemcpyAsync(gpu_b, b, totalThreads * sizeof(int), cudaMemcpyHostToDevice, stream);
// Asynchronously add and then copy memory to host.
addGlob<<<numBlocks, blockSize, 0, stream>>>(gpu_a, gpu_b, gpu_add_dest);
cudaMemcpyAsync(add_dest, gpu_add_dest, totalThreads*sizeof(int), cudaMemcpyDeviceToHost, stream);
// Asynchronously subtract and then copy memory to host.
subtractGlob<<<numBlocks, blockSize, 0, stream>>>(gpu_a, gpu_b, gpu_sub_dest);
cudaMemcpyAsync(sub_dest, gpu_sub_dest, totalThreads*sizeof(int), cudaMemcpyDeviceToHost, stream);
// Asynchronously multiply and then copy memory to host.
multGlob<<<numBlocks, blockSize, 0, stream>>>(gpu_a, gpu_b, gpu_mult_dest);
cudaMemcpyAsync(mult_dest, gpu_mult_dest, totalThreads*sizeof(int), cudaMemcpyDeviceToHost, stream);
// Asynchronously divide and then copy memory to host.
divGlob<<<numBlocks, blockSize, 0, stream>>>(gpu_a, gpu_b, gpu_div_dest);
cudaMemcpyAsync(div_dest, gpu_div_dest, totalThreads*sizeof(int), cudaMemcpyDeviceToHost, stream);
// Asynchronously modulous and then copy memory to host.
modGlob<<<numBlocks, blockSize, 0, stream>>>(gpu_a, gpu_b, gpu_mod_dest);
cudaMemcpyAsync(mod_dest, gpu_mod_dest, totalThreads*sizeof(int), cudaMemcpyDeviceToHost, stream);
cudaStreamSynchronize(stream);
cudaFree(gpu_a);
cudaFree(gpu_b);
cudaFree(gpu_add_dest);
cudaFree(gpu_sub_dest);
cudaFree(gpu_mult_dest);
cudaFree(gpu_div_dest);
cudaFree(gpu_mod_dest);
}
// Executes a thrust test, which is similar to the GPU tests below except here we make use
// of CUDA thrust
// The data is filled with random numbers that uses the same seed as the CPU tests.
void executeThrustTest(const int totalThreads, const int blockSize, const int numBlocks)
{
// Create host vectors
thrust::host_vector<int> a(totalThreads);
thrust::host_vector<int> b(totalThreads);
thrust::host_vector<int> add_dest(totalThreads);
thrust::host_vector<int> sub_dest(totalThreads);
thrust::host_vector<int> mult_dest(totalThreads);
thrust::host_vector<int> div_dest(totalThreads);
thrust::host_vector<int> mod_dest(totalThreads);
// Create device vectors
thrust::device_vector<int> gpu_a(totalThreads);
thrust::device_vector<int> gpu_b(totalThreads);
thrust::device_vector<int> gpu_add_dest(totalThreads);
thrust::device_vector<int> gpu_sub_dest(totalThreads);
thrust::device_vector<int> gpu_mult_dest(totalThreads);
thrust::device_vector<int> gpu_div_dest(totalThreads);
thrust::device_vector<int> gpu_mod_dest(totalThreads);
// Create a random generate that will generate random numbers from 0 to 4.
// Use a set seed so output is deterministic
unsigned seed = 12345;
std::default_random_engine gen(seed);
std::uniform_int_distribution<int> dist(0,4);
for (size_t i = 0; i < totalThreads; ++i)
{
a[i] = i;
b[i] = dist(gen);
}
// Here we will now copy the vectors from the host to device
gpu_a = a;
gpu_b = b;
// add and then copy memory to host.
for (int i = 0; i < totalThreads; ++i)
{
gpu_add_dest[i] = gpu_a[i] + gpu_b[i];
}
add_dest = gpu_add_dest;
// subtract and then copy memory to host.
for (int i = 0; i < totalThreads; ++i)
{
gpu_sub_dest[i] = gpu_a[i] - gpu_b[i];
}
sub_dest = gpu_sub_dest;
// multiply and then copy memory to host.
for (int i = 0; i < totalThreads; ++i)
{
gpu_mult_dest[i] = gpu_a[i] * gpu_b[i];
}
mult_dest = gpu_mult_dest;
// divide and then copy memory to host.
for (int i = 0; i < totalThreads; ++i)
{
if (gpu_b[i] != 0)
{
gpu_div_dest[i] = gpu_a[i] / gpu_b[i];
}
else
{
gpu_div_dest[i] = 0;
}
}
div_dest = gpu_div_dest;
// modulous and then copy memory to host.
for (int i = 0; i < totalThreads; ++i)
{
if (gpu_b[i] != 0)
{
gpu_mod_dest[i] = gpu_a[i] % gpu_b[i];
}
else
{
gpu_mod_dest[i] = 0;
}
}
mod_dest = gpu_mod_dest;
}
// Executes each of the gpu tests by creating local memory, copying it global memory, and then performing
// all 5 math operations on the data.
// The data is filled with random numbers that uses the same seed as the CPU tests.
void executeGPUTest(const int totalThreads, const int blockSize, const int numBlocks, const gpu_tests_enum testType)
{
// The stream test works differently enough that it requires a different method since its calls will all be async.
if (testType == STREAM)
{
executeStreamTest(totalThreads, blockSize, numBlocks);
return;
}
// The thurst test works differently since it create thrust vectors
if (testType == THRUST)
{
executeThrustTest(totalThreads, blockSize, numBlocks);
return;
}
int a[totalThreads], b[totalThreads], add_dest[totalThreads], sub_dest[totalThreads], mult_dest[totalThreads], div_dest[totalThreads], mod_dest[totalThreads];
int *gpu_a, *gpu_b, *gpu_add_dest, *gpu_sub_dest, *gpu_mult_dest, *gpu_div_dest, *gpu_mod_dest;
cudaMalloc((void**)&gpu_a, totalThreads * sizeof(int));
cudaMalloc((void**)&gpu_b, totalThreads * sizeof(int));
cudaMalloc((void**)&gpu_add_dest, totalThreads * sizeof(int));
cudaMalloc((void**)&gpu_sub_dest, totalThreads * sizeof(int));
cudaMalloc((void**)&gpu_mult_dest, totalThreads * sizeof(int));
cudaMalloc((void**)&gpu_div_dest, totalThreads * sizeof(int));
cudaMalloc((void**)&gpu_mod_dest, totalThreads * sizeof(int));
// Create a random generate that will generate random numbers from 0 to 4.
// Use a set seed so output is deterministic
unsigned seed = 12345;
std::default_random_engine gen(seed);
std::uniform_int_distribution<int> dist(0,4);
for (size_t i = 0; i < totalThreads; ++i)
{
a[i] = i;
b[i] = dist(gen);
}
cudaMemcpy(gpu_a, a, totalThreads * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(gpu_b, b, totalThreads * sizeof(int), cudaMemcpyHostToDevice);
switch (testType)
{
case GLOBAL:
// Executes global memory operations.
executeGlobalMathOperations<<<numBlocks, blockSize>>>(gpu_a,gpu_b,gpu_add_dest,gpu_sub_dest,gpu_mult_dest,gpu_div_dest,gpu_mod_dest, numBlocks * blockSize);
break;
case SHARED:
// The third parameter is the size of the shared memory
// We multiply by 3 because we need to copy A and B and then also have room for the return in shared memory.
executeSharedMathOperations<<<numBlocks, blockSize, 3 * totalThreads * sizeof(int)>>>(gpu_a,gpu_b,gpu_add_dest,gpu_sub_dest,gpu_mult_dest,gpu_div_dest,gpu_mod_dest, totalThreads);
break;
case CONSTANT:
// constant doesn't actually take in gpu_a and gpu_b since it uses constant memory. However the random generation is left in so timing can be compared.
executeConstantMathOperations<<<numBlocks, blockSize>>>(gpu_add_dest,gpu_sub_dest,gpu_mult_dest,gpu_div_dest,gpu_mod_dest, totalThreads);
break;
case REGISTER:
// Executes global memory operations by saving the value into local registers first.
executeRegisterMathOperations<<<numBlocks, blockSize>>>(gpu_a,gpu_b,gpu_add_dest,gpu_sub_dest,gpu_mult_dest,gpu_div_dest,gpu_mod_dest, totalThreads);
break;
default:
std::cout << "Unknown test type " << testType << "!" << std::endl;
break;
}
cudaMemcpy(add_dest, gpu_add_dest, totalThreads*sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(sub_dest, gpu_sub_dest, totalThreads*sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(mult_dest, gpu_mult_dest, totalThreads*sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(div_dest, gpu_div_dest, totalThreads*sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(mod_dest, gpu_mod_dest, totalThreads*sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(gpu_a);
cudaFree(gpu_b);
cudaFree(gpu_add_dest);
cudaFree(gpu_sub_dest);
cudaFree(gpu_mult_dest);
cudaFree(gpu_div_dest);
cudaFree(gpu_mod_dest);
}
void printArray(const int * const arr, const int xSize, const int ySize)
{
for (size_t i = 0; i < xSize; ++i)
{
for(size_t j = 0; j < ySize; ++j)
{
std::cout << arr[i * ySize + j] << " ";
}
std::cout << '\n';
}
std::cout << std::flush;
}
int main(int argc, char** argv)
{
// read command line arguments
int totalThreads = 256;
int blockSize = 256;
if (argc >= 2) {
totalThreads = atoi(argv[1]);
}
if (argc >= 3) {
blockSize = atoi(argv[2]);
}
int numBlocks = totalThreads/blockSize;
// validate command line arguments
if (totalThreads % blockSize != 0) {
++numBlocks;
totalThreads = numBlocks*blockSize;
printf("Warning: Total thread count is not evenly divisible by the block size\n");
printf("The total number of threads will be rounded up to %d\n", totalThreads);
}
auto startTime = std::chrono::system_clock::now();
executeHostTest(totalThreads, blockSize, numBlocks);
auto endTime = std::chrono::system_clock::now();
std::chrono::duration<double> totalTime = endTime-startTime;
std::cout << "Host execution took: " << totalTime.count() << " seconds." << std::endl;
for (auto testType = GLOBAL; testType < NUM_GPU_TESTS; ++testType)
{
startTime = std::chrono::system_clock::now();
executeGPUTest(totalThreads, blockSize, numBlocks, testType);
endTime = std::chrono::system_clock::now();
totalTime = endTime-startTime;
std::cout << gpu_tests_strings[testType] + " Memory execution took: " << totalTime.count() << " seconds." << std::endl;
}
return 0;
}
|
790f40b45e4bfd71eefc14b8b6efaeaddce43e29.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "nms.h"
#include "utils.h"
#include <algorithm>
#include <iostream>
#include <stdexcept>
#include <cstdint>
#include <vector>
#include <cmath>
#include <hip/hip_runtime.h>
#include <thrust/device_ptr.h>
#include <thrust/sequence.h>
#include <thrust/execution_policy.h>
#include <thrust/gather.h>
#include <thrust/system/hip/detail/hipcub/hipcub.hpp>
#include <thrust/system/hip/detail/cub/iterator/counting_input_iterator.cuh>
namespace d2dev {
__global__ void nms_kernel(
const int num_per_thread, const float threshold, const int num_detections,
const int *indices, float *scores, const float *classes, const float4 *boxes) {
// Go through detections by descending score
for (int m = 0; m < num_detections; m++)
{
for (int n = 0; n < num_per_thread; n++)
{
int i = threadIdx.x * num_per_thread + n;
if (i < num_detections && m < i && scores[m] > 0.0f)
{
int idx = indices[i];
int max_idx = indices[m];
int icls = classes[idx];
int mcls = classes[max_idx];
if (mcls == icls)
{
float4 ibox = boxes[idx];
float4 mbox = boxes[max_idx];
float x1 = max(ibox.x, mbox.x);
float y1 = max(ibox.y, mbox.y);
float x2 = min(ibox.z, mbox.z);
float y2 = min(ibox.w, mbox.w);
float w = max(0.0f, x2 - x1 + 1);
float h = max(0.0f, y2 - y1 + 1);
float iarea = (ibox.z - ibox.x + 1) * (ibox.w - ibox.y + 1);
float marea = (mbox.z - mbox.x + 1) * (mbox.w - mbox.y + 1);
float inter = w * h;
float overlap = inter / (iarea + marea - inter);
if (overlap > threshold)
{
scores[i] = 0.0f;
}
}
}
}
// Sync discarded detections
__syncthreads();
}
}
int batch_nms(int batch_size,
const void *const *inputs, void **outputs,
size_t count, int detections_per_im, float nms_thresh,
void *workspace, size_t workspace_size, hipStream_t stream) {
if (!workspace || !workspace_size)
{
// Return required scratch space size cub style
workspace_size = get_size_aligned<bool>(count); // flags
workspace_size += get_size_aligned<int>(count); // indices
workspace_size += get_size_aligned<int>(count); // indices_sorted
workspace_size += get_size_aligned<float>(count); // scores
workspace_size += get_size_aligned<float>(count); // scores_sorted
size_t temp_size_flag = 0;
thrust::cuda_cub::hipcub::DeviceSelect::Flagged((void *) nullptr, temp_size_flag,
thrust::cuda_cub::hipcub::CountingInputIterator<int>(count),
(bool *) nullptr, (int *) nullptr, (int *) nullptr, count);
size_t temp_size_sort = 0;
thrust::cuda_cub::hipcub::DeviceRadixSort::SortPairsDescending((void *) nullptr, temp_size_sort,
(float *) nullptr, (float *) nullptr,
(int *) nullptr, (int *) nullptr, count);
workspace_size += ::max(temp_size_flag, temp_size_sort);
return workspace_size;
}
auto on_stream = thrust::hip::par.on(stream);
auto flags = get_next_ptr<bool>(count, workspace, workspace_size);
auto indices = get_next_ptr<int>(count, workspace, workspace_size);
auto indices_sorted = get_next_ptr<int>(count, workspace, workspace_size);
auto scores = get_next_ptr<float>(count, workspace, workspace_size);
auto scores_sorted = get_next_ptr<float>(count, workspace, workspace_size);
for (int batch = 0; batch < batch_size; batch++)
{
auto in_scores = static_cast<const float *>(inputs[0]) + batch * count;
auto in_boxes = static_cast<const float4 *>(inputs[1]) + batch * count;
auto in_classes = static_cast<const float *>(inputs[2]) + batch * count;
auto out_scores = static_cast<float *>(outputs[0]) + batch * detections_per_im;
auto out_boxes = static_cast<float4 *>(outputs[1]) + batch * detections_per_im;
auto out_classes = static_cast<float *>(outputs[2]) + batch * detections_per_im;
// Discard null scores
thrust::transform(on_stream, in_scores, in_scores + count,
flags, thrust::placeholders::_1 > 0.0f);
int *num_selected = reinterpret_cast<int *>(indices_sorted);
thrust::cuda_cub::hipcub::DeviceSelect::Flagged(workspace, workspace_size,
thrust::cuda_cub::hipcub::CountingInputIterator<int>(0),
flags, indices, num_selected, count, stream);
hipStreamSynchronize(stream);
int num_detections = *thrust::device_pointer_cast(num_selected);
// Sort scores and corresponding indices
thrust::gather(on_stream, indices, indices + num_detections, in_scores, scores);
thrust::cuda_cub::hipcub::DeviceRadixSort::SortPairsDescending(workspace, workspace_size,
scores, scores_sorted, indices,
indices_sorted, num_detections, 0,
sizeof(*scores) * 8, stream);
// Launch actual NMS kernel - 1 block with each thread handling n detections
const int max_threads = 1024;
int num_per_thread = ceil((float) num_detections / max_threads);
hipLaunchKernelGGL(( nms_kernel), dim3(1), dim3(max_threads), 0, stream, num_per_thread, nms_thresh, num_detections,
indices_sorted, scores_sorted, in_classes, in_boxes);
// Re-sort with updated scores
thrust::cuda_cub::hipcub::DeviceRadixSort::SortPairsDescending(workspace, workspace_size,
scores_sorted, scores, indices_sorted,
indices, num_detections, 0,
sizeof(*scores) * 8, stream);
// Gather filtered scores, boxes, classes
num_detections = min(detections_per_im, num_detections);
hipMemcpyAsync(out_scores, scores, num_detections * sizeof *scores, hipMemcpyDeviceToDevice, stream);
if (num_detections < detections_per_im)
{
thrust::fill_n(on_stream, out_scores + num_detections, detections_per_im - num_detections, 0);
}
thrust::gather(on_stream, indices, indices + num_detections, in_boxes, out_boxes);
thrust::gather(on_stream, indices, indices + num_detections, in_classes, out_classes);
}
return 0;
}
int plate_batch_nms(int batch_size,
const void *const *inputs, void **outputs,
size_t count, int detections_per_im, float nms_thresh,
void *workspace, size_t workspace_size, hipStream_t stream) {
if (!workspace || !workspace_size)
{
// Return required scratch space size cub style
workspace_size = get_size_aligned<bool>(count); // flags
workspace_size += get_size_aligned<int>(count); // indices
workspace_size += get_size_aligned<int>(count); // indices_sorted
workspace_size += get_size_aligned<float>(count); // scores
workspace_size += get_size_aligned<float>(count); // scores_sorted
size_t temp_size_flag = 0;
thrust::cuda_cub::hipcub::DeviceSelect::Flagged((void *) nullptr, temp_size_flag,
thrust::cuda_cub::hipcub::CountingInputIterator<int>(count),
(bool *) nullptr, (int *) nullptr, (int *) nullptr, count);
size_t temp_size_sort = 0;
thrust::cuda_cub::hipcub::DeviceRadixSort::SortPairsDescending((void *) nullptr, temp_size_sort,
(float *) nullptr, (float *) nullptr,
(int *) nullptr, (int *) nullptr, count);
workspace_size += ::max(temp_size_flag, temp_size_sort);
return workspace_size;
}
auto on_stream = thrust::hip::par.on(stream);
auto flags = get_next_ptr<bool>(count, workspace, workspace_size);
auto indices = get_next_ptr<int>(count, workspace, workspace_size);
auto indices_sorted = get_next_ptr<int>(count, workspace, workspace_size);
auto scores = get_next_ptr<float>(count, workspace, workspace_size);
auto scores_sorted = get_next_ptr<float>(count, workspace, workspace_size);
for (int batch = 0; batch < batch_size; batch++)
{
auto in_scores = static_cast<const float *>(inputs[0]) + batch * count;
auto in_boxes = static_cast<const float4 *>(inputs[1]) + batch * count;
auto in_classes = static_cast<const float *>(inputs[2]) + batch * count;
auto in_keypoints = static_cast<const float8 *>(inputs[3]) + batch * count;
auto out_scores = static_cast<float *>(outputs[0]) + batch * detections_per_im;
auto out_boxes = static_cast<float4 *>(outputs[1]) + batch * detections_per_im;
auto out_classes = static_cast<float *>(outputs[2]) + batch * detections_per_im;
auto out_keypoints = static_cast<float8 *>(outputs[3]) + batch * detections_per_im;
// Discard null scores
thrust::transform(on_stream, in_scores, in_scores + count, flags, thrust::placeholders::_1 > 0.0f);
int *num_selected = reinterpret_cast<int *>(indices_sorted);
thrust::cuda_cub::hipcub::DeviceSelect::Flagged(workspace, workspace_size,
thrust::cuda_cub::hipcub::CountingInputIterator<int>(0),
flags, indices, num_selected, count, stream);
hipStreamSynchronize(stream);
int num_detections = *thrust::device_pointer_cast(num_selected);
// Sort scores and corresponding indices
thrust::gather(on_stream, indices, indices + num_detections, in_scores, scores);
thrust::cuda_cub::hipcub::DeviceRadixSort::SortPairsDescending(workspace, workspace_size,
scores, scores_sorted, indices,
indices_sorted, num_detections, 0,
sizeof(*scores) * 8, stream);
// Launch actual NMS kernel - 1 block with each thread handling n detections
const int max_threads = 1024;
int num_per_thread = ceil((float) num_detections / max_threads);
hipLaunchKernelGGL(( nms_kernel), dim3(1), dim3(max_threads), 0, stream, num_per_thread, nms_thresh, num_detections,
indices_sorted, scores_sorted, in_classes, in_boxes);
// Re-sort with updated scores
thrust::cuda_cub::hipcub::DeviceRadixSort::SortPairsDescending(workspace, workspace_size,
scores_sorted, scores, indices_sorted,
indices, num_detections, 0,
sizeof(*scores) * 8, stream);
// Gather filtered scores, boxes, classes
num_detections = min(detections_per_im, num_detections);
hipMemcpyAsync(out_scores, scores, num_detections * sizeof *scores, hipMemcpyDeviceToDevice, stream);
if (num_detections < detections_per_im)
{
thrust::fill_n(on_stream, out_scores + num_detections, detections_per_im - num_detections, 0);
}
thrust::gather(on_stream, indices, indices + num_detections, in_boxes, out_boxes);
thrust::gather(on_stream, indices, indices + num_detections, in_classes, out_classes);
thrust::gather(on_stream, indices, indices + num_detections, in_keypoints, out_keypoints);
}
return 0;
}
}
| 790f40b45e4bfd71eefc14b8b6efaeaddce43e29.cu | /*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "nms.h"
#include "utils.h"
#include <algorithm>
#include <iostream>
#include <stdexcept>
#include <cstdint>
#include <vector>
#include <cmath>
#include <cuda.h>
#include <thrust/device_ptr.h>
#include <thrust/sequence.h>
#include <thrust/execution_policy.h>
#include <thrust/gather.h>
#include <thrust/system/cuda/detail/cub/device/device_radix_sort.cuh>
#include <thrust/system/cuda/detail/cub/iterator/counting_input_iterator.cuh>
namespace d2dev {
__global__ void nms_kernel(
const int num_per_thread, const float threshold, const int num_detections,
const int *indices, float *scores, const float *classes, const float4 *boxes) {
// Go through detections by descending score
for (int m = 0; m < num_detections; m++)
{
for (int n = 0; n < num_per_thread; n++)
{
int i = threadIdx.x * num_per_thread + n;
if (i < num_detections && m < i && scores[m] > 0.0f)
{
int idx = indices[i];
int max_idx = indices[m];
int icls = classes[idx];
int mcls = classes[max_idx];
if (mcls == icls)
{
float4 ibox = boxes[idx];
float4 mbox = boxes[max_idx];
float x1 = max(ibox.x, mbox.x);
float y1 = max(ibox.y, mbox.y);
float x2 = min(ibox.z, mbox.z);
float y2 = min(ibox.w, mbox.w);
float w = max(0.0f, x2 - x1 + 1);
float h = max(0.0f, y2 - y1 + 1);
float iarea = (ibox.z - ibox.x + 1) * (ibox.w - ibox.y + 1);
float marea = (mbox.z - mbox.x + 1) * (mbox.w - mbox.y + 1);
float inter = w * h;
float overlap = inter / (iarea + marea - inter);
if (overlap > threshold)
{
scores[i] = 0.0f;
}
}
}
}
// Sync discarded detections
__syncthreads();
}
}
int batch_nms(int batch_size,
const void *const *inputs, void **outputs,
size_t count, int detections_per_im, float nms_thresh,
void *workspace, size_t workspace_size, cudaStream_t stream) {
if (!workspace || !workspace_size)
{
// Return required scratch space size cub style
workspace_size = get_size_aligned<bool>(count); // flags
workspace_size += get_size_aligned<int>(count); // indices
workspace_size += get_size_aligned<int>(count); // indices_sorted
workspace_size += get_size_aligned<float>(count); // scores
workspace_size += get_size_aligned<float>(count); // scores_sorted
size_t temp_size_flag = 0;
thrust::cuda_cub::cub::DeviceSelect::Flagged((void *) nullptr, temp_size_flag,
thrust::cuda_cub::cub::CountingInputIterator<int>(count),
(bool *) nullptr, (int *) nullptr, (int *) nullptr, count);
size_t temp_size_sort = 0;
thrust::cuda_cub::cub::DeviceRadixSort::SortPairsDescending((void *) nullptr, temp_size_sort,
(float *) nullptr, (float *) nullptr,
(int *) nullptr, (int *) nullptr, count);
workspace_size += std::max(temp_size_flag, temp_size_sort);
return workspace_size;
}
auto on_stream = thrust::cuda::par.on(stream);
auto flags = get_next_ptr<bool>(count, workspace, workspace_size);
auto indices = get_next_ptr<int>(count, workspace, workspace_size);
auto indices_sorted = get_next_ptr<int>(count, workspace, workspace_size);
auto scores = get_next_ptr<float>(count, workspace, workspace_size);
auto scores_sorted = get_next_ptr<float>(count, workspace, workspace_size);
for (int batch = 0; batch < batch_size; batch++)
{
auto in_scores = static_cast<const float *>(inputs[0]) + batch * count;
auto in_boxes = static_cast<const float4 *>(inputs[1]) + batch * count;
auto in_classes = static_cast<const float *>(inputs[2]) + batch * count;
auto out_scores = static_cast<float *>(outputs[0]) + batch * detections_per_im;
auto out_boxes = static_cast<float4 *>(outputs[1]) + batch * detections_per_im;
auto out_classes = static_cast<float *>(outputs[2]) + batch * detections_per_im;
// Discard null scores
thrust::transform(on_stream, in_scores, in_scores + count,
flags, thrust::placeholders::_1 > 0.0f);
int *num_selected = reinterpret_cast<int *>(indices_sorted);
thrust::cuda_cub::cub::DeviceSelect::Flagged(workspace, workspace_size,
thrust::cuda_cub::cub::CountingInputIterator<int>(0),
flags, indices, num_selected, count, stream);
cudaStreamSynchronize(stream);
int num_detections = *thrust::device_pointer_cast(num_selected);
// Sort scores and corresponding indices
thrust::gather(on_stream, indices, indices + num_detections, in_scores, scores);
thrust::cuda_cub::cub::DeviceRadixSort::SortPairsDescending(workspace, workspace_size,
scores, scores_sorted, indices,
indices_sorted, num_detections, 0,
sizeof(*scores) * 8, stream);
// Launch actual NMS kernel - 1 block with each thread handling n detections
const int max_threads = 1024;
int num_per_thread = ceil((float) num_detections / max_threads);
nms_kernel<<<1, max_threads, 0, stream>>>(num_per_thread, nms_thresh, num_detections,
indices_sorted, scores_sorted, in_classes, in_boxes);
// Re-sort with updated scores
thrust::cuda_cub::cub::DeviceRadixSort::SortPairsDescending(workspace, workspace_size,
scores_sorted, scores, indices_sorted,
indices, num_detections, 0,
sizeof(*scores) * 8, stream);
// Gather filtered scores, boxes, classes
num_detections = min(detections_per_im, num_detections);
cudaMemcpyAsync(out_scores, scores, num_detections * sizeof *scores, cudaMemcpyDeviceToDevice, stream);
if (num_detections < detections_per_im)
{
thrust::fill_n(on_stream, out_scores + num_detections, detections_per_im - num_detections, 0);
}
thrust::gather(on_stream, indices, indices + num_detections, in_boxes, out_boxes);
thrust::gather(on_stream, indices, indices + num_detections, in_classes, out_classes);
}
return 0;
}
int plate_batch_nms(int batch_size,
const void *const *inputs, void **outputs,
size_t count, int detections_per_im, float nms_thresh,
void *workspace, size_t workspace_size, cudaStream_t stream) {
if (!workspace || !workspace_size)
{
// Return required scratch space size cub style
workspace_size = get_size_aligned<bool>(count); // flags
workspace_size += get_size_aligned<int>(count); // indices
workspace_size += get_size_aligned<int>(count); // indices_sorted
workspace_size += get_size_aligned<float>(count); // scores
workspace_size += get_size_aligned<float>(count); // scores_sorted
size_t temp_size_flag = 0;
thrust::cuda_cub::cub::DeviceSelect::Flagged((void *) nullptr, temp_size_flag,
thrust::cuda_cub::cub::CountingInputIterator<int>(count),
(bool *) nullptr, (int *) nullptr, (int *) nullptr, count);
size_t temp_size_sort = 0;
thrust::cuda_cub::cub::DeviceRadixSort::SortPairsDescending((void *) nullptr, temp_size_sort,
(float *) nullptr, (float *) nullptr,
(int *) nullptr, (int *) nullptr, count);
workspace_size += std::max(temp_size_flag, temp_size_sort);
return workspace_size;
}
auto on_stream = thrust::cuda::par.on(stream);
auto flags = get_next_ptr<bool>(count, workspace, workspace_size);
auto indices = get_next_ptr<int>(count, workspace, workspace_size);
auto indices_sorted = get_next_ptr<int>(count, workspace, workspace_size);
auto scores = get_next_ptr<float>(count, workspace, workspace_size);
auto scores_sorted = get_next_ptr<float>(count, workspace, workspace_size);
for (int batch = 0; batch < batch_size; batch++)
{
auto in_scores = static_cast<const float *>(inputs[0]) + batch * count;
auto in_boxes = static_cast<const float4 *>(inputs[1]) + batch * count;
auto in_classes = static_cast<const float *>(inputs[2]) + batch * count;
auto in_keypoints = static_cast<const float8 *>(inputs[3]) + batch * count;
auto out_scores = static_cast<float *>(outputs[0]) + batch * detections_per_im;
auto out_boxes = static_cast<float4 *>(outputs[1]) + batch * detections_per_im;
auto out_classes = static_cast<float *>(outputs[2]) + batch * detections_per_im;
auto out_keypoints = static_cast<float8 *>(outputs[3]) + batch * detections_per_im;
// Discard null scores
thrust::transform(on_stream, in_scores, in_scores + count, flags, thrust::placeholders::_1 > 0.0f);
int *num_selected = reinterpret_cast<int *>(indices_sorted);
thrust::cuda_cub::cub::DeviceSelect::Flagged(workspace, workspace_size,
thrust::cuda_cub::cub::CountingInputIterator<int>(0),
flags, indices, num_selected, count, stream);
cudaStreamSynchronize(stream);
int num_detections = *thrust::device_pointer_cast(num_selected);
// Sort scores and corresponding indices
thrust::gather(on_stream, indices, indices + num_detections, in_scores, scores);
thrust::cuda_cub::cub::DeviceRadixSort::SortPairsDescending(workspace, workspace_size,
scores, scores_sorted, indices,
indices_sorted, num_detections, 0,
sizeof(*scores) * 8, stream);
// Launch actual NMS kernel - 1 block with each thread handling n detections
const int max_threads = 1024;
int num_per_thread = ceil((float) num_detections / max_threads);
nms_kernel<<<1, max_threads, 0, stream>>>(num_per_thread, nms_thresh, num_detections,
indices_sorted, scores_sorted, in_classes, in_boxes);
// Re-sort with updated scores
thrust::cuda_cub::cub::DeviceRadixSort::SortPairsDescending(workspace, workspace_size,
scores_sorted, scores, indices_sorted,
indices, num_detections, 0,
sizeof(*scores) * 8, stream);
// Gather filtered scores, boxes, classes
num_detections = min(detections_per_im, num_detections);
cudaMemcpyAsync(out_scores, scores, num_detections * sizeof *scores, cudaMemcpyDeviceToDevice, stream);
if (num_detections < detections_per_im)
{
thrust::fill_n(on_stream, out_scores + num_detections, detections_per_im - num_detections, 0);
}
thrust::gather(on_stream, indices, indices + num_detections, in_boxes, out_boxes);
thrust::gather(on_stream, indices, indices + num_detections, in_classes, out_classes);
thrust::gather(on_stream, indices, indices + num_detections, in_keypoints, out_keypoints);
}
return 0;
}
}
|
4b941785571d04ba911f48b032e559f3069c3894.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include"../include/graph.cuh"
//host function for parallel bellman ford routine that invokes sssp_kernel iteratively
void sssp(graph *cpu_g, graph *gpu_g)
{
int i = cpu_g->v;
while(i--)
{
hipLaunchKernelGGL(( sssp_kernel), dim3(nblocks),dim3(threads_per_block), 0, 0, gpu_g);
}
} | 4b941785571d04ba911f48b032e559f3069c3894.cu | #include"../include/graph.cuh"
//host function for parallel bellman ford routine that invokes sssp_kernel iteratively
void sssp(graph *cpu_g, graph *gpu_g)
{
int i = cpu_g->v;
while(i--)
{
sssp_kernel<<<nblocks,threads_per_block>>>(gpu_g);
}
} |
f008679af417a43f863aeea8e4f6319d81fb26d1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/operators/detection/yolo_box_op.h"
#include "paddle/fluid/operators/math/math_function.h"
namespace paddle {
namespace operators {
using Tensor = framework::Tensor;
template <typename T>
__global__ void KeYoloBoxFw(const T* input, const int* imgsize, T* boxes,
T* scores, const float conf_thresh,
const int* anchors, const int n, const int h,
const int w, const int an_num, const int class_num,
const int box_num, int input_size) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
T box[4];
for (; tid < n * box_num; tid += stride) {
int grid_num = h * w;
int i = tid / box_num;
int j = (tid % box_num) / grid_num;
int k = (tid % grid_num) / w;
int l = tid % w;
int an_stride = (5 + class_num) * grid_num;
int img_height = imgsize[2 * i];
int img_width = imgsize[2 * i + 1];
int obj_idx =
GetEntryIndex(i, j, k * w + l, an_num, an_stride, grid_num, 4);
T conf = sigmoid<T>(input[obj_idx]);
if (conf < conf_thresh) {
continue;
}
int box_idx =
GetEntryIndex(i, j, k * w + l, an_num, an_stride, grid_num, 0);
GetYoloBox<T>(box, input, anchors, l, k, j, h, input_size, box_idx,
grid_num, img_height, img_width);
box_idx = (i * box_num + j * grid_num + k * w + l) * 4;
CalcDetectionBox<T>(boxes, box, box_idx, img_height, img_width);
int label_idx =
GetEntryIndex(i, j, k * w + l, an_num, an_stride, grid_num, 5);
int score_idx = (i * box_num + j * grid_num + k * w + l) * class_num;
CalcLabelScore<T>(scores, input, label_idx, score_idx, class_num, conf,
grid_num);
}
}
template <typename T>
class YoloBoxOpCUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
auto* input = ctx.Input<Tensor>("X");
auto* img_size = ctx.Input<Tensor>("ImgSize");
auto* boxes = ctx.Output<Tensor>("Boxes");
auto* scores = ctx.Output<Tensor>("Scores");
auto anchors = ctx.Attr<std::vector<int>>("anchors");
int class_num = ctx.Attr<int>("class_num");
float conf_thresh = ctx.Attr<float>("conf_thresh");
int downsample_ratio = ctx.Attr<int>("downsample_ratio");
const int n = input->dims()[0];
const int h = input->dims()[2];
const int w = input->dims()[3];
const int box_num = boxes->dims()[1];
const int an_num = anchors.size() / 2;
int input_size = downsample_ratio * h;
auto& dev_ctx = ctx.cuda_device_context();
auto& allocator =
platform::DeviceTemporaryAllocator::Instance().Get(dev_ctx);
int bytes = sizeof(int) * anchors.size();
auto anchors_ptr = allocator.Allocate(sizeof(int) * anchors.size());
int* anchors_data = reinterpret_cast<int*>(anchors_ptr->ptr());
const auto gplace = boost::get<platform::CUDAPlace>(ctx.GetPlace());
const auto cplace = platform::CPUPlace();
memory::Copy(gplace, anchors_data, cplace, anchors.data(), bytes,
dev_ctx.stream());
const T* input_data = input->data<T>();
const int* imgsize_data = img_size->data<int>();
T* boxes_data = boxes->mutable_data<T>({n, box_num, 4}, ctx.GetPlace());
T* scores_data =
scores->mutable_data<T>({n, box_num, class_num}, ctx.GetPlace());
math::SetConstant<platform::CUDADeviceContext, T> set_zero;
set_zero(dev_ctx, boxes, static_cast<T>(0));
set_zero(dev_ctx, scores, static_cast<T>(0));
int grid_dim = (n * box_num + 512 - 1) / 512;
grid_dim = grid_dim > 8 ? 8 : grid_dim;
hipLaunchKernelGGL(( KeYoloBoxFw<T>), dim3(grid_dim), dim3(512), 0, ctx.cuda_device_context().stream(),
input_data, imgsize_data, boxes_data, scores_data, conf_thresh,
anchors_data, n, h, w, an_num, class_num, box_num, input_size);
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
REGISTER_OP_CUDA_KERNEL(yolo_box, ops::YoloBoxOpCUDAKernel<float>,
ops::YoloBoxOpCUDAKernel<double>);
| f008679af417a43f863aeea8e4f6319d81fb26d1.cu | /* Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/operators/detection/yolo_box_op.h"
#include "paddle/fluid/operators/math/math_function.h"
namespace paddle {
namespace operators {
using Tensor = framework::Tensor;
template <typename T>
__global__ void KeYoloBoxFw(const T* input, const int* imgsize, T* boxes,
T* scores, const float conf_thresh,
const int* anchors, const int n, const int h,
const int w, const int an_num, const int class_num,
const int box_num, int input_size) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
T box[4];
for (; tid < n * box_num; tid += stride) {
int grid_num = h * w;
int i = tid / box_num;
int j = (tid % box_num) / grid_num;
int k = (tid % grid_num) / w;
int l = tid % w;
int an_stride = (5 + class_num) * grid_num;
int img_height = imgsize[2 * i];
int img_width = imgsize[2 * i + 1];
int obj_idx =
GetEntryIndex(i, j, k * w + l, an_num, an_stride, grid_num, 4);
T conf = sigmoid<T>(input[obj_idx]);
if (conf < conf_thresh) {
continue;
}
int box_idx =
GetEntryIndex(i, j, k * w + l, an_num, an_stride, grid_num, 0);
GetYoloBox<T>(box, input, anchors, l, k, j, h, input_size, box_idx,
grid_num, img_height, img_width);
box_idx = (i * box_num + j * grid_num + k * w + l) * 4;
CalcDetectionBox<T>(boxes, box, box_idx, img_height, img_width);
int label_idx =
GetEntryIndex(i, j, k * w + l, an_num, an_stride, grid_num, 5);
int score_idx = (i * box_num + j * grid_num + k * w + l) * class_num;
CalcLabelScore<T>(scores, input, label_idx, score_idx, class_num, conf,
grid_num);
}
}
template <typename T>
class YoloBoxOpCUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
auto* input = ctx.Input<Tensor>("X");
auto* img_size = ctx.Input<Tensor>("ImgSize");
auto* boxes = ctx.Output<Tensor>("Boxes");
auto* scores = ctx.Output<Tensor>("Scores");
auto anchors = ctx.Attr<std::vector<int>>("anchors");
int class_num = ctx.Attr<int>("class_num");
float conf_thresh = ctx.Attr<float>("conf_thresh");
int downsample_ratio = ctx.Attr<int>("downsample_ratio");
const int n = input->dims()[0];
const int h = input->dims()[2];
const int w = input->dims()[3];
const int box_num = boxes->dims()[1];
const int an_num = anchors.size() / 2;
int input_size = downsample_ratio * h;
auto& dev_ctx = ctx.cuda_device_context();
auto& allocator =
platform::DeviceTemporaryAllocator::Instance().Get(dev_ctx);
int bytes = sizeof(int) * anchors.size();
auto anchors_ptr = allocator.Allocate(sizeof(int) * anchors.size());
int* anchors_data = reinterpret_cast<int*>(anchors_ptr->ptr());
const auto gplace = boost::get<platform::CUDAPlace>(ctx.GetPlace());
const auto cplace = platform::CPUPlace();
memory::Copy(gplace, anchors_data, cplace, anchors.data(), bytes,
dev_ctx.stream());
const T* input_data = input->data<T>();
const int* imgsize_data = img_size->data<int>();
T* boxes_data = boxes->mutable_data<T>({n, box_num, 4}, ctx.GetPlace());
T* scores_data =
scores->mutable_data<T>({n, box_num, class_num}, ctx.GetPlace());
math::SetConstant<platform::CUDADeviceContext, T> set_zero;
set_zero(dev_ctx, boxes, static_cast<T>(0));
set_zero(dev_ctx, scores, static_cast<T>(0));
int grid_dim = (n * box_num + 512 - 1) / 512;
grid_dim = grid_dim > 8 ? 8 : grid_dim;
KeYoloBoxFw<T><<<grid_dim, 512, 0, ctx.cuda_device_context().stream()>>>(
input_data, imgsize_data, boxes_data, scores_data, conf_thresh,
anchors_data, n, h, w, an_num, class_num, box_num, input_size);
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
REGISTER_OP_CUDA_KERNEL(yolo_box, ops::YoloBoxOpCUDAKernel<float>,
ops::YoloBoxOpCUDAKernel<double>);
|
9dad9f2a6f22185f877f613ae9931a060b347fea.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include "dot_product.h"
int main(int argc, char **argv)
{
int n = 3;
int nbytes = n*sizeof(double);
double *d_a = 0;
hipMalloc(&d_a, nbytes);
double *data = (double *)malloc(nbytes);
for (int i=0; i < n; ++i)
{
data[i] = (double)(i+1);
}
hipMemcpy((void *)d_a, (void *)data, nbytes, hipMemcpyHostToDevice);
printf("Calling kernel\n");
hipLaunchKernelGGL(( dot_prod), dim3(2),dim3(2), 0, 0, d_a, d_a, nbytes);
hipDeviceSynchronize();
printf("done\n");
return 0;
}
| 9dad9f2a6f22185f877f613ae9931a060b347fea.cu |
#include <stdio.h>
#include <stdlib.h>
#include "dot_product.h"
int main(int argc, char **argv)
{
int n = 3;
int nbytes = n*sizeof(double);
double *d_a = 0;
cudaMalloc(&d_a, nbytes);
double *data = (double *)malloc(nbytes);
for (int i=0; i < n; ++i)
{
data[i] = (double)(i+1);
}
cudaMemcpy((void *)d_a, (void *)data, nbytes, cudaMemcpyHostToDevice);
printf("Calling kernel\n");
dot_prod<<<2,2>>>(d_a, d_a, nbytes);
cudaDeviceSynchronize();
printf("done\n");
return 0;
}
|
f6319ac6a374a2407f56a6a3a5c5fdef06c56dd7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* imFlip.cu
*
* Created on: 29/mar/2020
* Author: jack
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "imgStuff.h"
//Function performing vertical flip.
//Every thread switch one pixel. Matrix m and res have linearized indexes
__global__ void VFlip(unsigned char *m,unsigned char *res,int VPixel,int HPixel){
//Linearearized index in matrix of pixels (3 bytes) depending on linear thread indexes
int t=(blockDim.x*blockIdx.x+threadIdx.x);
//Computing the number of padding bytes per row according to bmp format
int pad=((HPixel*3+3)&~3)-HPixel*3;
int rowIdx=t/HPixel;
int colIdx=t%HPixel;
//First index of symmetric row in linearized mat
int symmetricRow=((VPixel-1) * HPixel*3) - (rowIdx * HPixel*3);
if(t*3<VPixel*HPixel*3){
res[ symmetricRow + colIdx*3 + (VPixel-rowIdx-1)*pad ]=m[ t*3 + rowIdx*pad ];
res[ symmetricRow + colIdx*3 + (VPixel-rowIdx-1)*pad + 1 ]=m[ t*3 + rowIdx*pad + 1 ];
res[ symmetricRow + colIdx*3 + (VPixel-rowIdx-1)*pad + 2 ]=m[ t*3 + rowIdx*pad + 2 ];
}
}
//Function performing horizontal flip.
__global__ void HFlip(unsigned char *m,unsigned char *res,int VPixel,int HPixel){
int t=(blockDim.x*blockIdx.x+threadIdx.x);
//Computing the number of padding bytes per row according to bmp format
int pad=((HPixel*3+3)&~3)-HPixel*3;
int rowIdx=t/HPixel;
int colIdx=t%HPixel;
int symmetricCol=(HPixel*3)-colIdx*3;
if(t*3<VPixel*HPixel*3){
res[ rowIdx*HPixel*3 + rowIdx*pad + symmetricCol - 3 ]=m[ t*3 + rowIdx*pad ];
res[ rowIdx*HPixel*3 + rowIdx*pad + symmetricCol - 2 ]=m[ t*3 + rowIdx*pad + 1 ];
res[ rowIdx*HPixel*3 + rowIdx*pad + symmetricCol - 1 ]=m[ t*3 + rowIdx*pad + 2 ];
}
}
int main(int argc,char **argv){
int threadPerBlock=32,dimGrid;
unsigned char *GPUImgRes,*GPUImg;
//Creates test bmp img
//randFourSquares("test1.bmp",480,640);
//Img properties obj
ImgProp *ip=(ImgProp *)malloc(sizeof(ImgProp));
unsigned char *mat=ReadBMP("dog.bmp",ip);
/*printf("Ya\n");
int i,j
for(i=0;i<ip->VPixel;i++){
for(j=0;j<ip->HPixel*3;j+=3){
printf("* %d %d %d *",mat[i*ip->HBytes+j],mat[i*ip->HBytes+j+1],mat[i*ip->HBytes+j+2]);
}
for(j=ip->HPixel*3;j<ip->HBytes;j++){
printf(" %d ",mat[i* ip->HBytes+j]);
}
printf("\n");
}*/
//Number of blocks. Every thread switch one pixel
dimGrid=(ip->HPixel*ip->VPixel + threadPerBlock - 1)/threadPerBlock;
//Arguments in cuda are passed by reference
hipMalloc((void **)&GPUImgRes,sizeof(unsigned char)*ip->HBytes*ip->VPixel);
hipMalloc((void **)&GPUImg,sizeof(unsigned char)*ip->HBytes*ip->VPixel);
hipMemcpy(GPUImg,mat,ip->HBytes*ip->VPixel,hipMemcpyHostToDevice);
hipMemcpy(GPUImgRes,mat,ip->HBytes*ip->VPixel,hipMemcpyHostToDevice);
hipLaunchKernelGGL(( VFlip), dim3(dimGrid),dim3(threadPerBlock), 0, 0, GPUImg,GPUImgRes,ip->VPixel,ip->HPixel);
hipMemcpy(mat,GPUImgRes,ip->HBytes*ip->VPixel,hipMemcpyDeviceToHost);
hipFree(GPUImgRes);
hipFree(GPUImg);
/*printf("Yo\n");
for(i=0;i<ip->VPixel;i++){
for(j=0;j<ip->HPixel*3;j+=3){
printf("* %d %d %d *",mat[i*ip->HBytes+j],mat[i*ip->HBytes+j+1],mat[i*ip->HBytes+j+2]);
}
for(j=ip->HPixel*3;j<ip->HBytes;j++){
printf(" %d ",mat[i* ip->HBytes+j]);
}
printf("\n");
}*/
WriteBMP(mat,"dogVFlip.bmp",ip);
free(mat);
free(ip->HeaderInfo);
free(ip);
}
| f6319ac6a374a2407f56a6a3a5c5fdef06c56dd7.cu | /*
* imFlip.cu
*
* Created on: 29/mar/2020
* Author: jack
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "imgStuff.h"
//Function performing vertical flip.
//Every thread switch one pixel. Matrix m and res have linearized indexes
__global__ void VFlip(unsigned char *m,unsigned char *res,int VPixel,int HPixel){
//Linearearized index in matrix of pixels (3 bytes) depending on linear thread indexes
int t=(blockDim.x*blockIdx.x+threadIdx.x);
//Computing the number of padding bytes per row according to bmp format
int pad=((HPixel*3+3)&~3)-HPixel*3;
int rowIdx=t/HPixel;
int colIdx=t%HPixel;
//First index of symmetric row in linearized mat
int symmetricRow=((VPixel-1) * HPixel*3) - (rowIdx * HPixel*3);
if(t*3<VPixel*HPixel*3){
res[ symmetricRow + colIdx*3 + (VPixel-rowIdx-1)*pad ]=m[ t*3 + rowIdx*pad ];
res[ symmetricRow + colIdx*3 + (VPixel-rowIdx-1)*pad + 1 ]=m[ t*3 + rowIdx*pad + 1 ];
res[ symmetricRow + colIdx*3 + (VPixel-rowIdx-1)*pad + 2 ]=m[ t*3 + rowIdx*pad + 2 ];
}
}
//Function performing horizontal flip.
__global__ void HFlip(unsigned char *m,unsigned char *res,int VPixel,int HPixel){
int t=(blockDim.x*blockIdx.x+threadIdx.x);
//Computing the number of padding bytes per row according to bmp format
int pad=((HPixel*3+3)&~3)-HPixel*3;
int rowIdx=t/HPixel;
int colIdx=t%HPixel;
int symmetricCol=(HPixel*3)-colIdx*3;
if(t*3<VPixel*HPixel*3){
res[ rowIdx*HPixel*3 + rowIdx*pad + symmetricCol - 3 ]=m[ t*3 + rowIdx*pad ];
res[ rowIdx*HPixel*3 + rowIdx*pad + symmetricCol - 2 ]=m[ t*3 + rowIdx*pad + 1 ];
res[ rowIdx*HPixel*3 + rowIdx*pad + symmetricCol - 1 ]=m[ t*3 + rowIdx*pad + 2 ];
}
}
int main(int argc,char **argv){
int threadPerBlock=32,dimGrid;
unsigned char *GPUImgRes,*GPUImg;
//Creates test bmp img
//randFourSquares("test1.bmp",480,640);
//Img properties obj
ImgProp *ip=(ImgProp *)malloc(sizeof(ImgProp));
unsigned char *mat=ReadBMP("dog.bmp",ip);
/*printf("Ya\n");
int i,j
for(i=0;i<ip->VPixel;i++){
for(j=0;j<ip->HPixel*3;j+=3){
printf("* %d %d %d *",mat[i*ip->HBytes+j],mat[i*ip->HBytes+j+1],mat[i*ip->HBytes+j+2]);
}
for(j=ip->HPixel*3;j<ip->HBytes;j++){
printf(" %d ",mat[i* ip->HBytes+j]);
}
printf("\n");
}*/
//Number of blocks. Every thread switch one pixel
dimGrid=(ip->HPixel*ip->VPixel + threadPerBlock - 1)/threadPerBlock;
//Arguments in cuda are passed by reference
cudaMalloc((void **)&GPUImgRes,sizeof(unsigned char)*ip->HBytes*ip->VPixel);
cudaMalloc((void **)&GPUImg,sizeof(unsigned char)*ip->HBytes*ip->VPixel);
cudaMemcpy(GPUImg,mat,ip->HBytes*ip->VPixel,cudaMemcpyHostToDevice);
cudaMemcpy(GPUImgRes,mat,ip->HBytes*ip->VPixel,cudaMemcpyHostToDevice);
VFlip<<<dimGrid,threadPerBlock>>>(GPUImg,GPUImgRes,ip->VPixel,ip->HPixel);
cudaMemcpy(mat,GPUImgRes,ip->HBytes*ip->VPixel,cudaMemcpyDeviceToHost);
cudaFree(GPUImgRes);
cudaFree(GPUImg);
/*printf("Yo\n");
for(i=0;i<ip->VPixel;i++){
for(j=0;j<ip->HPixel*3;j+=3){
printf("* %d %d %d *",mat[i*ip->HBytes+j],mat[i*ip->HBytes+j+1],mat[i*ip->HBytes+j+2]);
}
for(j=ip->HPixel*3;j<ip->HBytes;j++){
printf(" %d ",mat[i* ip->HBytes+j]);
}
printf("\n");
}*/
WriteBMP(mat,"dogVFlip.bmp",ip);
free(mat);
free(ip->HeaderInfo);
free(ip);
}
|
e19a5b759637830914e0b8bd8f0f0a8928885fcf.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "util.h"
#include "calc_error.h"
__device__ unsigned int log2(unsigned int n) {
return (n > 1) ? 1 + log2(n >> 1) : 0;
}
////////////////////////////////////////////////////////////////////////////////
// Check error functions
////////////////////////////////////////////////////////////////////////////////
__device__ unsigned long long errors = 0;
__device__ unsigned int maxUintErrorNonZeros = 0;
__device__ unsigned int maxUintErrorZeros = 0;
unsigned long long get_dmr_error() {
unsigned long long ret = 0;
hipMemcpyFromSymbol(&ret, errors, sizeof(unsigned long long), 0, hipMemcpyDeviceToHost);
return ret;
}
__forceinline__ __device__ void relative_error(double val, float val_rp) {
float relative = __fdividef(val_rp, float(val));
if (relative < MIN_PERCENTAGE || relative > MAX_PERCENTAGE) {
atomicAdd(&errors, 1);
}
}
__forceinline__ __device__ void uint_error(double rhs, float lhs) {
float rhs_as_float = float(rhs);
uint32_t lhs_data = *((uint32_t*) &lhs);
uint32_t rhs_data = *((uint32_t*) &rhs_as_float);
uint32_t diff = SUB_ABS(lhs_data, rhs_data);
if (diff > UINT_THRESHOLD) {
atomicAdd(&errors, 1);
}
}
__forceinline__ __device__ void hybrid_error(double val, float val_rp) {
float lhs = abs(val_rp);
float rhs = __double2float_rz(abs(val));
if (rhs == 0 || lhs == 0) {
// ABSOLUTE ERROR
float abs_err = SUB_ABS(rhs, lhs);
if (abs_err > ABS_ERR_THRESHOLD) {
atomicAdd(&errors, 1);
}
} else if (rhs < ABS_ERR_UPPER_BOUND_VAL && lhs < ABS_ERR_UPPER_BOUND_VAL) {
// ABSOLUTE ERROR
float abs_err = SUB_ABS(rhs, lhs);
if (abs_err > ABS_ERR_THRESHOLD) {
atomicAdd(&errors, 1);
}
} else if (rhs >= ABS_ERR_UPPER_BOUND_VAL || lhs >= ABS_ERR_UPPER_BOUND_VAL) {
// RELATIVE ERROR
float rel_err = SUB_ABS(1, lhs / rhs);
if (rel_err > REL_ERR_THRESHOLD) {
atomicAdd(&errors, 1);
}
}
}
__global__ void check_errors_kernel(double *array, float *array_rp, int N) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < N)
#if ERROR_METRIC == HYBRID
hybrid_error(array[tid], array_rp[tid]);
#elif ERROR_METRIC == UINT_ERROR
uint_error(array[tid], array_rp[tid]);
#else
relative_error(array[tid], array_rp[tid]);
#endif
}
void check_errors_gpu(double *array, float *array_rp, int N) {
int gridDim = (N + BLOCK_SIZE - 1) / BLOCK_SIZE;
hipLaunchKernelGGL(( check_errors_kernel), dim3(gridDim), dim3(BLOCK_SIZE), 0, 0, array, array_rp, N);
CHECK_CUDA_ERROR(hipPeekAtLastError());
}
////////////////////////////////////////////////////////////////////////////////
// Find max error functions
////////////////////////////////////////////////////////////////////////////////
__device__ unsigned long long uintErrDistribution[32] = {0};
__device__ uint32_t maxUintError = 0;
__device__ float relErrArray[1 << 24];
__device__ float absErrArray[1 << 24];
__device__ uint32_t zerosFP64 = 0;
__device__ uint32_t zerosFP32 = 0;
__device__ uint32_t negatives = 0;
__global__ void calc_errors_kernel(double *array, float *array_rp, int N) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= N) return;
double rhs = array[i];
float lhs = array_rp[i];
float rhs_as_float = __double2float_rz(rhs);
// > UINT error
uint32_t lhs_data = *((uint32_t*) &lhs);
uint32_t rhs_data = *((uint32_t*) &rhs_as_float);
uint32_t diff = SUB_ABS(lhs_data, rhs_data);
int bit = __float2int_rd(log2(diff));
atomicAdd(&(uintErrDistribution[bit]), 1);
atomicMax(&maxUintError, diff);
// > Relative error
relErrArray[i] = (rhs_as_float != 0) ? abs(1 - lhs / rhs_as_float) : IGNORE_VAL_FLAG;
// > Absolute error
absErrArray[i] = abs(lhs - rhs_as_float);
// > Stats
if (rhs_as_float < 0 || lhs < 0) atomicAdd(&negatives, 1);
if (rhs_as_float == 0) atomicAdd(&zerosFP64, 1);
if (lhs == 0) atomicAdd(&zerosFP32, 1);
}
void calc_errors_gpu(double *array, float *array_rp, int N) {
int gridDim = (N + BLOCK_SIZE - 1) / BLOCK_SIZE;
hipLaunchKernelGGL(( calc_errors_kernel), dim3(gridDim), dim3(BLOCK_SIZE), 0, 0, array, array_rp, N);
CHECK_CUDA_ERROR(hipPeekAtLastError());
}
// > Getters
void get_diffs_distribution(unsigned long long *dist) {
hipMemcpyFromSymbol(dist, uintErrDistribution, sizeof(unsigned long long) * 33, 0, hipMemcpyDeviceToHost);
}
uint32_t get_max_uint_err() {
uint32_t ret = 0;
hipMemcpyFromSymbol(&ret, maxUintError, sizeof(uint32_t), 0, hipMemcpyDeviceToHost);
return ret;
}
void get_rel_error_array(float *relErrArr, int N) {
hipMemcpyFromSymbol(relErrArr, relErrArray, sizeof(float) * N, 0, hipMemcpyDeviceToHost);
}
void get_abs_error_array(float *absErrArr, int N) {
hipMemcpyFromSymbol(absErrArr, absErrArray, sizeof(float) * N, 0, hipMemcpyDeviceToHost);
}
uint32_t get_zeros_fp64() {
uint32_t ret = 0;
hipMemcpyFromSymbol(&ret, zerosFP64, sizeof(uint32_t), 0, hipMemcpyDeviceToHost);
return ret;
}
uint32_t get_zeros_fp32() {
uint32_t ret = 0;
hipMemcpyFromSymbol(&ret, zerosFP32, sizeof(uint32_t), 0, hipMemcpyDeviceToHost);
return ret;
}
uint32_t get_negatives() {
uint32_t ret = 0;
hipMemcpyFromSymbol(&ret, negatives, sizeof(uint32_t), 0, hipMemcpyDeviceToHost);
return ret;
}
// > Hybrid error (relative + abs error)
// __global__ void calc_error_hybrid_kernel(double *array, float *array_rp, int N) {
// int i = blockDim.x * blockIdx.x + threadIdx.x;
// if (i < N) {
// float lhs = abs(array_rp[i]);
// float rhs = __double2float_rz(abs(array[i]));
// if (rhs == 0 || lhs == 0) {
// // ABSOLUTE ERROR
// absErrArray[i] = abs(rhs - lhs);
// relErrArray[i] = IGNORE_VAL_FLAG;
// } else if (rhs < ABS_ERR_UPPER_BOUND_VAL && lhs < ABS_ERR_UPPER_BOUND_VAL) {
// // ABSOLUTE ERROR
// absErrArray[i] = abs(rhs - lhs);
// relErrArray[i] = IGNORE_VAL_FLAG;
// } else if (rhs >= ABS_ERR_UPPER_BOUND_VAL || lhs >= ABS_ERR_UPPER_BOUND_VAL) {
// // RELATIVE ERROR
// absErrArray[i] = IGNORE_VAL_FLAG;
// relErrArray[i] = abs(1 - lhs / rhs);
// }
// }
// }
// void calc_error_hybrid_gpu(double *array, float *array_rp, int N) {
// int gridDim = (N + BLOCK_SIZE - 1) / BLOCK_SIZE;
// calc_error_hybrid_kernel<<<gridDim, BLOCK_SIZE>>>(array, array_rp, N);
// // > Relative error
// hipMemcpyFromSymbol(tmpArr, relErrArray, sizeof(float) * N, 0, hipMemcpyDeviceToHost);
// int max_rel_err_index = find_max_i(tmpArr, N);
// float max_rel_err = tmpArr[max_rel_err_index];
// double max_rel_err_double_val; hipMemcpy(&max_rel_err_double_val, array + max_rel_err_index, sizeof(double), hipMemcpyDeviceToHost);
// float max_rel_err_float_val; hipMemcpy(&max_rel_err_float_val, array_rp + max_rel_err_index, sizeof(float), hipMemcpyDeviceToHost);
// float rhs_as_float = (float)(max_rel_err_double_val);
// uint32_t lhs_data = *((uint32_t*) &max_rel_err_float_val);
// uint32_t rhs_data = *((uint32_t*) &rhs_as_float);
// uint32_t uintErr = SUB_ABS(lhs_data, rhs_data);
// if (max_rel_err > maxRelErr) {
// maxRelErr = max_rel_err;
// maxRelErrDoubleVal = max_rel_err_double_val;
// maxRelErrFloatVal = max_rel_err_float_val;
// maxRelErrUINTErr = uintErr;
// }
// // > Absolute error
// hipMemcpyFromSymbol(tmpArr, absErrArray, sizeof(float) * N, 0, hipMemcpyDeviceToHost);
// int max_abs_err_index = find_max_i(tmpArr, N);
// float max_abs_err = tmpArr[max_abs_err_index];
// double max_abs_err_double_val; hipMemcpy(&max_abs_err_double_val, array + max_abs_err_index, sizeof(double), hipMemcpyDeviceToHost);
// float max_abs_err_float_val; hipMemcpy(&max_abs_err_float_val, array_rp + max_abs_err_index, sizeof(float), hipMemcpyDeviceToHost);
// rhs_as_float = (float)(max_abs_err_double_val);
// lhs_data = *((uint32_t*) &max_abs_err_float_val);
// rhs_data = *((uint32_t*) &rhs_as_float);
// uintErr = SUB_ABS(lhs_data, rhs_data);
// if (max_abs_err > maxAbsErr) {
// maxAbsErr = max_abs_err;
// maxAbsErrDoubleVal = max_abs_err_double_val;
// maxAbsErrFloatVal = max_abs_err_float_val;
// maxAbsErrUINTErr = uintErr;
// }
// } | e19a5b759637830914e0b8bd8f0f0a8928885fcf.cu | #include "util.h"
#include "calc_error.h"
__device__ unsigned int log2(unsigned int n) {
return (n > 1) ? 1 + log2(n >> 1) : 0;
}
////////////////////////////////////////////////////////////////////////////////
// Check error functions
////////////////////////////////////////////////////////////////////////////////
__device__ unsigned long long errors = 0;
__device__ unsigned int maxUintErrorNonZeros = 0;
__device__ unsigned int maxUintErrorZeros = 0;
unsigned long long get_dmr_error() {
unsigned long long ret = 0;
cudaMemcpyFromSymbol(&ret, errors, sizeof(unsigned long long), 0, cudaMemcpyDeviceToHost);
return ret;
}
__forceinline__ __device__ void relative_error(double val, float val_rp) {
float relative = __fdividef(val_rp, float(val));
if (relative < MIN_PERCENTAGE || relative > MAX_PERCENTAGE) {
atomicAdd(&errors, 1);
}
}
__forceinline__ __device__ void uint_error(double rhs, float lhs) {
float rhs_as_float = float(rhs);
uint32_t lhs_data = *((uint32_t*) &lhs);
uint32_t rhs_data = *((uint32_t*) &rhs_as_float);
uint32_t diff = SUB_ABS(lhs_data, rhs_data);
if (diff > UINT_THRESHOLD) {
atomicAdd(&errors, 1);
}
}
__forceinline__ __device__ void hybrid_error(double val, float val_rp) {
float lhs = abs(val_rp);
float rhs = __double2float_rz(abs(val));
if (rhs == 0 || lhs == 0) {
// ABSOLUTE ERROR
float abs_err = SUB_ABS(rhs, lhs);
if (abs_err > ABS_ERR_THRESHOLD) {
atomicAdd(&errors, 1);
}
} else if (rhs < ABS_ERR_UPPER_BOUND_VAL && lhs < ABS_ERR_UPPER_BOUND_VAL) {
// ABSOLUTE ERROR
float abs_err = SUB_ABS(rhs, lhs);
if (abs_err > ABS_ERR_THRESHOLD) {
atomicAdd(&errors, 1);
}
} else if (rhs >= ABS_ERR_UPPER_BOUND_VAL || lhs >= ABS_ERR_UPPER_BOUND_VAL) {
// RELATIVE ERROR
float rel_err = SUB_ABS(1, lhs / rhs);
if (rel_err > REL_ERR_THRESHOLD) {
atomicAdd(&errors, 1);
}
}
}
__global__ void check_errors_kernel(double *array, float *array_rp, int N) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < N)
#if ERROR_METRIC == HYBRID
hybrid_error(array[tid], array_rp[tid]);
#elif ERROR_METRIC == UINT_ERROR
uint_error(array[tid], array_rp[tid]);
#else
relative_error(array[tid], array_rp[tid]);
#endif
}
void check_errors_gpu(double *array, float *array_rp, int N) {
int gridDim = (N + BLOCK_SIZE - 1) / BLOCK_SIZE;
check_errors_kernel<<<gridDim, BLOCK_SIZE>>>(array, array_rp, N);
CHECK_CUDA_ERROR(cudaPeekAtLastError());
}
////////////////////////////////////////////////////////////////////////////////
// Find max error functions
////////////////////////////////////////////////////////////////////////////////
__device__ unsigned long long uintErrDistribution[32] = {0};
__device__ uint32_t maxUintError = 0;
__device__ float relErrArray[1 << 24];
__device__ float absErrArray[1 << 24];
__device__ uint32_t zerosFP64 = 0;
__device__ uint32_t zerosFP32 = 0;
__device__ uint32_t negatives = 0;
__global__ void calc_errors_kernel(double *array, float *array_rp, int N) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= N) return;
double rhs = array[i];
float lhs = array_rp[i];
float rhs_as_float = __double2float_rz(rhs);
// > UINT error
uint32_t lhs_data = *((uint32_t*) &lhs);
uint32_t rhs_data = *((uint32_t*) &rhs_as_float);
uint32_t diff = SUB_ABS(lhs_data, rhs_data);
int bit = __float2int_rd(log2(diff));
atomicAdd(&(uintErrDistribution[bit]), 1);
atomicMax(&maxUintError, diff);
// > Relative error
relErrArray[i] = (rhs_as_float != 0) ? abs(1 - lhs / rhs_as_float) : IGNORE_VAL_FLAG;
// > Absolute error
absErrArray[i] = abs(lhs - rhs_as_float);
// > Stats
if (rhs_as_float < 0 || lhs < 0) atomicAdd(&negatives, 1);
if (rhs_as_float == 0) atomicAdd(&zerosFP64, 1);
if (lhs == 0) atomicAdd(&zerosFP32, 1);
}
void calc_errors_gpu(double *array, float *array_rp, int N) {
int gridDim = (N + BLOCK_SIZE - 1) / BLOCK_SIZE;
calc_errors_kernel<<<gridDim, BLOCK_SIZE>>>(array, array_rp, N);
CHECK_CUDA_ERROR(cudaPeekAtLastError());
}
// > Getters
void get_diffs_distribution(unsigned long long *dist) {
cudaMemcpyFromSymbol(dist, uintErrDistribution, sizeof(unsigned long long) * 33, 0, cudaMemcpyDeviceToHost);
}
uint32_t get_max_uint_err() {
uint32_t ret = 0;
cudaMemcpyFromSymbol(&ret, maxUintError, sizeof(uint32_t), 0, cudaMemcpyDeviceToHost);
return ret;
}
void get_rel_error_array(float *relErrArr, int N) {
cudaMemcpyFromSymbol(relErrArr, relErrArray, sizeof(float) * N, 0, cudaMemcpyDeviceToHost);
}
void get_abs_error_array(float *absErrArr, int N) {
cudaMemcpyFromSymbol(absErrArr, absErrArray, sizeof(float) * N, 0, cudaMemcpyDeviceToHost);
}
uint32_t get_zeros_fp64() {
uint32_t ret = 0;
cudaMemcpyFromSymbol(&ret, zerosFP64, sizeof(uint32_t), 0, cudaMemcpyDeviceToHost);
return ret;
}
uint32_t get_zeros_fp32() {
uint32_t ret = 0;
cudaMemcpyFromSymbol(&ret, zerosFP32, sizeof(uint32_t), 0, cudaMemcpyDeviceToHost);
return ret;
}
uint32_t get_negatives() {
uint32_t ret = 0;
cudaMemcpyFromSymbol(&ret, negatives, sizeof(uint32_t), 0, cudaMemcpyDeviceToHost);
return ret;
}
// > Hybrid error (relative + abs error)
// __global__ void calc_error_hybrid_kernel(double *array, float *array_rp, int N) {
// int i = blockDim.x * blockIdx.x + threadIdx.x;
// if (i < N) {
// float lhs = abs(array_rp[i]);
// float rhs = __double2float_rz(abs(array[i]));
// if (rhs == 0 || lhs == 0) {
// // ABSOLUTE ERROR
// absErrArray[i] = abs(rhs - lhs);
// relErrArray[i] = IGNORE_VAL_FLAG;
// } else if (rhs < ABS_ERR_UPPER_BOUND_VAL && lhs < ABS_ERR_UPPER_BOUND_VAL) {
// // ABSOLUTE ERROR
// absErrArray[i] = abs(rhs - lhs);
// relErrArray[i] = IGNORE_VAL_FLAG;
// } else if (rhs >= ABS_ERR_UPPER_BOUND_VAL || lhs >= ABS_ERR_UPPER_BOUND_VAL) {
// // RELATIVE ERROR
// absErrArray[i] = IGNORE_VAL_FLAG;
// relErrArray[i] = abs(1 - lhs / rhs);
// }
// }
// }
// void calc_error_hybrid_gpu(double *array, float *array_rp, int N) {
// int gridDim = (N + BLOCK_SIZE - 1) / BLOCK_SIZE;
// calc_error_hybrid_kernel<<<gridDim, BLOCK_SIZE>>>(array, array_rp, N);
// // > Relative error
// cudaMemcpyFromSymbol(tmpArr, relErrArray, sizeof(float) * N, 0, cudaMemcpyDeviceToHost);
// int max_rel_err_index = find_max_i(tmpArr, N);
// float max_rel_err = tmpArr[max_rel_err_index];
// double max_rel_err_double_val; cudaMemcpy(&max_rel_err_double_val, array + max_rel_err_index, sizeof(double), cudaMemcpyDeviceToHost);
// float max_rel_err_float_val; cudaMemcpy(&max_rel_err_float_val, array_rp + max_rel_err_index, sizeof(float), cudaMemcpyDeviceToHost);
// float rhs_as_float = (float)(max_rel_err_double_val);
// uint32_t lhs_data = *((uint32_t*) &max_rel_err_float_val);
// uint32_t rhs_data = *((uint32_t*) &rhs_as_float);
// uint32_t uintErr = SUB_ABS(lhs_data, rhs_data);
// if (max_rel_err > maxRelErr) {
// maxRelErr = max_rel_err;
// maxRelErrDoubleVal = max_rel_err_double_val;
// maxRelErrFloatVal = max_rel_err_float_val;
// maxRelErrUINTErr = uintErr;
// }
// // > Absolute error
// cudaMemcpyFromSymbol(tmpArr, absErrArray, sizeof(float) * N, 0, cudaMemcpyDeviceToHost);
// int max_abs_err_index = find_max_i(tmpArr, N);
// float max_abs_err = tmpArr[max_abs_err_index];
// double max_abs_err_double_val; cudaMemcpy(&max_abs_err_double_val, array + max_abs_err_index, sizeof(double), cudaMemcpyDeviceToHost);
// float max_abs_err_float_val; cudaMemcpy(&max_abs_err_float_val, array_rp + max_abs_err_index, sizeof(float), cudaMemcpyDeviceToHost);
// rhs_as_float = (float)(max_abs_err_double_val);
// lhs_data = *((uint32_t*) &max_abs_err_float_val);
// rhs_data = *((uint32_t*) &rhs_as_float);
// uintErr = SUB_ABS(lhs_data, rhs_data);
// if (max_abs_err > maxAbsErr) {
// maxAbsErr = max_abs_err;
// maxAbsErrDoubleVal = max_abs_err_double_val;
// maxAbsErrFloatVal = max_abs_err_float_val;
// maxAbsErrUINTErr = uintErr;
// }
// } |
dc705e71001f00e4d647f0da2a06b61b6556a11c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifdef WITH_CUDA
#include "core/context_cuda.h"
#include "utils/op_kernel.h"
namespace dragon {
namespace kernel {
/*! Concat <T = ?, Device = CUDA> */
template <typename T>
__global__ void _Concat(
const int nthreads,
const int inner_dim,
const int x_cols,
const int y_concat_dim,
const int concat_offset,
const T* x,
T* y) {
CUDA_1D_KERNEL_LOOP(x_idx, nthreads) {
const int outer_idx = x_idx / x_cols;
const int concat_idx = x_idx % x_cols;
const int y_idx = (outer_idx * y_concat_dim + concat_offset)
* inner_dim + concat_idx;
y[y_idx] = x[x_idx];
}
}
/*! Kernel Launchers */
#define DEFINE_CONCAT_KERNEL_LAUNCHER(name, T) \
template <> void name<T, CUDAContext>( \
const int outer_dim, \
const int inner_dim, \
const int x_concat_dim, \
const int y_concat_dim, \
const int concat_offset, \
const T* x, \
T* y, \
CUDAContext* ctx) { \
auto x_cols = x_concat_dim * inner_dim; \
auto nthreads = outer_dim * x_concat_dim * inner_dim; \
_##name<T> \
<< < CUDA_BLOCKS(nthreads), CUDA_THREADS, \
0, ctx->cuda_stream() >> > \
(nthreads, inner_dim, x_cols, \
y_concat_dim, concat_offset, x, y); \
}
DEFINE_CONCAT_KERNEL_LAUNCHER(Concat, bool);
DEFINE_CONCAT_KERNEL_LAUNCHER(Concat, int8_t);
DEFINE_CONCAT_KERNEL_LAUNCHER(Concat, uint8_t);
DEFINE_CONCAT_KERNEL_LAUNCHER(Concat, int);
DEFINE_CONCAT_KERNEL_LAUNCHER(Concat, int64_t);
DEFINE_CONCAT_KERNEL_LAUNCHER(Concat, float16);
DEFINE_CONCAT_KERNEL_LAUNCHER(Concat, float);
DEFINE_CONCAT_KERNEL_LAUNCHER(Concat, double);
#undef DEFINE_CONCAT_KERNEL_LAUNCHER
} // namespace kernel
} // namepsace dragon
#endif // WITH_CUDA | dc705e71001f00e4d647f0da2a06b61b6556a11c.cu | #ifdef WITH_CUDA
#include "core/context_cuda.h"
#include "utils/op_kernel.h"
namespace dragon {
namespace kernel {
/*! Concat <T = ?, Device = CUDA> */
template <typename T>
__global__ void _Concat(
const int nthreads,
const int inner_dim,
const int x_cols,
const int y_concat_dim,
const int concat_offset,
const T* x,
T* y) {
CUDA_1D_KERNEL_LOOP(x_idx, nthreads) {
const int outer_idx = x_idx / x_cols;
const int concat_idx = x_idx % x_cols;
const int y_idx = (outer_idx * y_concat_dim + concat_offset)
* inner_dim + concat_idx;
y[y_idx] = x[x_idx];
}
}
/*! Kernel Launchers */
#define DEFINE_CONCAT_KERNEL_LAUNCHER(name, T) \
template <> void name<T, CUDAContext>( \
const int outer_dim, \
const int inner_dim, \
const int x_concat_dim, \
const int y_concat_dim, \
const int concat_offset, \
const T* x, \
T* y, \
CUDAContext* ctx) { \
auto x_cols = x_concat_dim * inner_dim; \
auto nthreads = outer_dim * x_concat_dim * inner_dim; \
_##name<T> \
<< < CUDA_BLOCKS(nthreads), CUDA_THREADS, \
0, ctx->cuda_stream() >> > \
(nthreads, inner_dim, x_cols, \
y_concat_dim, concat_offset, x, y); \
}
DEFINE_CONCAT_KERNEL_LAUNCHER(Concat, bool);
DEFINE_CONCAT_KERNEL_LAUNCHER(Concat, int8_t);
DEFINE_CONCAT_KERNEL_LAUNCHER(Concat, uint8_t);
DEFINE_CONCAT_KERNEL_LAUNCHER(Concat, int);
DEFINE_CONCAT_KERNEL_LAUNCHER(Concat, int64_t);
DEFINE_CONCAT_KERNEL_LAUNCHER(Concat, float16);
DEFINE_CONCAT_KERNEL_LAUNCHER(Concat, float);
DEFINE_CONCAT_KERNEL_LAUNCHER(Concat, double);
#undef DEFINE_CONCAT_KERNEL_LAUNCHER
} // namespace kernel
} // namepsace dragon
#endif // WITH_CUDA |
3e9f94d1e3daab7857d0a620c98da6af3c2e7d78.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "../../include/layers/pooling_layer.h"
//sc
#include <iostream>
//
#include <device_launch_parameters.h>
#include "../../include/base.h"
#include "../../include/gtest.h"
#include "../../include/util/common.h"
template<typename dtype>
__global__ void MaxPool(const int count, const dtype* input, const int channels,
const int height, const int width, const int pool_h, const int pool_w,
const int pool_stride_h, const int pool_stride_w, int* pool_pos, dtype* output) {
const int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < count) {
const int output_h = (height - pool_h) / pool_stride_h + 1;
const int output_w = (width - pool_w) / pool_stride_w + 1;
const int s = output_h * output_w;
const int c = s * channels;
const int i = index / c;
const int channels_i = (index - c*i) / s;
const int h_i = (index - c*i - s*channels_i) / output_w;
const int w_i = index - c*i - s*channels_i - h_i * output_w;
const int h = h_i * pool_stride_h * width;
const int w = w_i * pool_stride_w;
output[index] = input[i * height * width * channels + channels_i *height * width + h + w];
pool_pos[index] = 0;
for (int pool_row = 0; pool_row < pool_h; ++pool_row) {
for (int pool_column = 0; pool_column < pool_w; ++pool_column) {
int d = i * height * width * channels + channels_i *height * width + h + w + pool_row*width + pool_column;
if (output[index] < input[d]) {
pool_pos[index] = pool_row*pool_w + pool_column;
output[index] = input[d];
}
}
}
}
}
template <typename dtype>
__global__ void MaxPoolBackward(const int count, const dtype* input, const int input_channels, const int input_h,
const int input_w, const int pool_h, const int pool_w, const int pool_stride_h, const int pool_stride_w,
const int* pool_pos, const int output_h, const int output_w, dtype* output) {
const int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < count) {
const int s = input_h * input_w;
const int c = s * input_channels;
const int i = index / c;
const int channels_i = (index - c*i) / s;
const int h_i = (index - c*i - s*channels_i) / input_w;
const int w_i = index - c*i - s*channels_i - h_i * input_w;
const int pos = pool_pos[index];
output[i*input_channels*output_h*output_w + channels_i*output_h*output_w +
(h_i*pool_stride_h + pos/pool_h)*output_w + (w_i*pool_stride_w + pos%pool_w)] = input[index];
}
}
namespace BigBang {
//
template<typename dtype>
void PoolingLayer<dtype>::Forward_GPU(const Tensor<dtype>* bottom, Tensor<dtype>* top) {
const dtype* bottom_data = bottom->gpu_data();
dtype* top_data = top->mutable_gpu_data();
switch (pooling_method_) {
case PoolingLayerParameter::MAX: {
int* pos_data = max_pool_pos_->mutable_gpu_data();
const int size = top->size();
hipLaunchKernelGGL(( MaxPool), dim3(BigBangGetBlocks(size)),dim3(THREAD_MAX_NUMS) , 0, 0, size, bottom_data, bottom_channels_,
bottom_row_, bottom_column_, pool_h_, pool_w_, stride_h_, stride_w_, pos_data,
top_data);
}
break;
default:
std::cout << "only support max pool now" << std::endl;
THROW_EXCEPTION;
break;
}
//sc
/*const dtype* top_data_cpu = top->cpu_data();
for (int i = 0; i < top->size(); ++i) {
std::cout << top_data_cpu[i] << std::endl;
}*/
//
}
template<typename dtype>
void PoolingLayer<dtype>::Backward_GPU(const Tensor<dtype>* top, Tensor<dtype>* bottom) {
const dtype* top_data = top->gpu_data();
const dtype* top_diff_data = top->gpu_diff_data();
dtype* bottom_diff_data = bottom->mutable_gpu_diff_data();
bigbanggpumemset(bottom_diff_data, 0, sizeof(dtype)*bottom->size());
switch (pooling_method_) {
case PoolingLayerParameter::MAX: {
const int size = top->size();
hipLaunchKernelGGL(( MaxPoolBackward), dim3(BigBangGetBlocks(size)), dim3(THREAD_MAX_NUMS) , 0, 0, size, top_diff_data, top_channels_, top_row_, top_column_,
pool_h_, pool_w_, stride_h_, stride_w_, max_pool_pos_->gpu_data(), bottom_row_,
bottom_column_, bottom_diff_data);
}
break;
default:
std::cout << "only support max pool now" << std::endl;
THROW_EXCEPTION;
break;
}
//sc
/*for (int i = 0; i < bottom->size(); ++i) {
std::cout << bottom->cpu_diff_data()[i] << std::endl;
}*/
//
}
INSTANTIATE_CLASS_GPU_FUNCTION(PoolingLayer);
} | 3e9f94d1e3daab7857d0a620c98da6af3c2e7d78.cu | #include "../../include/layers/pooling_layer.h"
//sc
#include <iostream>
//
#include <device_launch_parameters.h>
#include "../../include/base.h"
#include "../../include/gtest.h"
#include "../../include/util/common.h"
template<typename dtype>
__global__ void MaxPool(const int count, const dtype* input, const int channels,
const int height, const int width, const int pool_h, const int pool_w,
const int pool_stride_h, const int pool_stride_w, int* pool_pos, dtype* output) {
const int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < count) {
const int output_h = (height - pool_h) / pool_stride_h + 1;
const int output_w = (width - pool_w) / pool_stride_w + 1;
const int s = output_h * output_w;
const int c = s * channels;
const int i = index / c;
const int channels_i = (index - c*i) / s;
const int h_i = (index - c*i - s*channels_i) / output_w;
const int w_i = index - c*i - s*channels_i - h_i * output_w;
const int h = h_i * pool_stride_h * width;
const int w = w_i * pool_stride_w;
output[index] = input[i * height * width * channels + channels_i *height * width + h + w];
pool_pos[index] = 0;
for (int pool_row = 0; pool_row < pool_h; ++pool_row) {
for (int pool_column = 0; pool_column < pool_w; ++pool_column) {
int d = i * height * width * channels + channels_i *height * width + h + w + pool_row*width + pool_column;
if (output[index] < input[d]) {
pool_pos[index] = pool_row*pool_w + pool_column;
output[index] = input[d];
}
}
}
}
}
template <typename dtype>
__global__ void MaxPoolBackward(const int count, const dtype* input, const int input_channels, const int input_h,
const int input_w, const int pool_h, const int pool_w, const int pool_stride_h, const int pool_stride_w,
const int* pool_pos, const int output_h, const int output_w, dtype* output) {
const int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < count) {
const int s = input_h * input_w;
const int c = s * input_channels;
const int i = index / c;
const int channels_i = (index - c*i) / s;
const int h_i = (index - c*i - s*channels_i) / input_w;
const int w_i = index - c*i - s*channels_i - h_i * input_w;
const int pos = pool_pos[index];
output[i*input_channels*output_h*output_w + channels_i*output_h*output_w +
(h_i*pool_stride_h + pos/pool_h)*output_w + (w_i*pool_stride_w + pos%pool_w)] = input[index];
}
}
namespace BigBang {
//Ôݲ»¿¼ÂÇÖØµþ
template<typename dtype>
void PoolingLayer<dtype>::Forward_GPU(const Tensor<dtype>* bottom, Tensor<dtype>* top) {
const dtype* bottom_data = bottom->gpu_data();
dtype* top_data = top->mutable_gpu_data();
switch (pooling_method_) {
case PoolingLayerParameter::MAX: {
int* pos_data = max_pool_pos_->mutable_gpu_data();
const int size = top->size();
MaxPool<<<BigBangGetBlocks(size),THREAD_MAX_NUMS >>>(size, bottom_data, bottom_channels_,
bottom_row_, bottom_column_, pool_h_, pool_w_, stride_h_, stride_w_, pos_data,
top_data);
}
break;
default:
std::cout << "only support max pool now" << std::endl;
THROW_EXCEPTION;
break;
}
//sc
/*const dtype* top_data_cpu = top->cpu_data();
for (int i = 0; i < top->size(); ++i) {
std::cout << top_data_cpu[i] << std::endl;
}*/
//
}
template<typename dtype>
void PoolingLayer<dtype>::Backward_GPU(const Tensor<dtype>* top, Tensor<dtype>* bottom) {
const dtype* top_data = top->gpu_data();
const dtype* top_diff_data = top->gpu_diff_data();
dtype* bottom_diff_data = bottom->mutable_gpu_diff_data();
bigbanggpumemset(bottom_diff_data, 0, sizeof(dtype)*bottom->size());
switch (pooling_method_) {
case PoolingLayerParameter::MAX: {
const int size = top->size();
MaxPoolBackward<<<BigBangGetBlocks(size), THREAD_MAX_NUMS >>>(size, top_diff_data, top_channels_, top_row_, top_column_,
pool_h_, pool_w_, stride_h_, stride_w_, max_pool_pos_->gpu_data(), bottom_row_,
bottom_column_, bottom_diff_data);
}
break;
default:
std::cout << "only support max pool now" << std::endl;
THROW_EXCEPTION;
break;
}
//sc
/*for (int i = 0; i < bottom->size(); ++i) {
std::cout << bottom->cpu_diff_data()[i] << std::endl;
}*/
//
}
INSTANTIATE_CLASS_GPU_FUNCTION(PoolingLayer);
} |
2a1d5354633649ab982af280eb058c8a1f22f0a0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date September 2014
@generated from dgemm_tesla_ab_0.cu normal d -> s, Wed Sep 17 15:08:23 2014
*/
#include "common_magma.h"
#include "commonblas_s.h"
/*
* Computes C = alpha*A*B + beta*C when alpha == 0 and beta == 0.
* That is, C = 0.
*/
__global__ void
sgemm_kernel_ab_0(
float* __restrict__ C,
const float* __restrict__ A,
const float* __restrict__ B,
int m, int n, int k,
int lda, int ldb, int ldc,
float alpha, float beta )
{
const int tx = threadIdx.x;
const int ty = threadIdx.y;
int ibx = blockIdx.x * 64;
int iby = blockIdx.y * 16;
const int idt = ty * 16 + tx;
C += ibx + idt + __mul24(iby, ldc);
ibx = ibx + idt - m;
if ( (iby+16) >= n ) {
lda = n - iby;
}
else {
lda = 16;
}
if ( ibx >= 0 )
lda = 0;
else
lda = lda;
switch(lda) {
case 16:
C[ 0 ] = 0;
C[ 1*ldc] = 0;
C[ 2*ldc] = 0;
C[ 3*ldc] = 0;
C[ 4*ldc] = 0;
C[ 5*ldc] = 0;
C[ 6*ldc] = 0;
C[ 7*ldc] = 0;
C[ 8*ldc] = 0;
C[ 9*ldc] = 0;
C[10*ldc] = 0;
C[11*ldc] = 0;
C[12*ldc] = 0;
C[13*ldc] = 0;
C[14*ldc] = 0;
C[15*ldc] = 0;
break;
case 15:
C[ 0 ] = 0;
C[ 1*ldc] = 0;
C[ 2*ldc] = 0;
C[ 3*ldc] = 0;
C[ 4*ldc] = 0;
C[ 5*ldc] = 0;
C[ 6*ldc] = 0;
C[ 7*ldc] = 0;
C[ 8*ldc] = 0;
C[ 9*ldc] = 0;
C[10*ldc] = 0;
C[11*ldc] = 0;
C[12*ldc] = 0;
C[13*ldc] = 0;
C[14*ldc] = 0;
break;
case 14:
C[ 0 ] = 0;
C[ 1*ldc] = 0;
C[ 2*ldc] = 0;
C[ 3*ldc] = 0;
C[ 4*ldc] = 0;
C[ 5*ldc] = 0;
C[ 6*ldc] = 0;
C[ 7*ldc] = 0;
C[ 8*ldc] = 0;
C[ 9*ldc] = 0;
C[10*ldc] = 0;
C[11*ldc] = 0;
C[12*ldc] = 0;
C[13*ldc] = 0;
break;
case 13:
C[ 0 ] = 0;
C[ 1*ldc] = 0;
C[ 2*ldc] = 0;
C[ 3*ldc] = 0;
C[ 4*ldc] = 0;
C[ 5*ldc] = 0;
C[ 6*ldc] = 0;
C[ 7*ldc] = 0;
C[ 8*ldc] = 0;
C[ 9*ldc] = 0;
C[10*ldc] = 0;
C[11*ldc] = 0;
C[12*ldc] = 0;
break;
case 12:
C[ 0 ] = 0;
C[ 1*ldc] = 0;
C[ 2*ldc] = 0;
C[ 3*ldc] = 0;
C[ 4*ldc] = 0;
C[ 5*ldc] = 0;
C[ 6*ldc] = 0;
C[ 7*ldc] = 0;
C[ 8*ldc] = 0;
C[ 9*ldc] = 0;
C[10*ldc] = 0;
C[11*ldc] = 0;
break;
case 11:
C[ 0 ] = 0;
C[ 1*ldc] = 0;
C[ 2*ldc] = 0;
C[ 3*ldc] = 0;
C[ 4*ldc] = 0;
C[ 5*ldc] = 0;
C[ 6*ldc] = 0;
C[ 7*ldc] = 0;
C[ 8*ldc] = 0;
C[ 9*ldc] = 0;
C[10*ldc] = 0;
break;
case 10:
C[ 0 ] = 0;
C[ 1*ldc] = 0;
C[ 2*ldc] = 0;
C[ 3*ldc] = 0;
C[ 4*ldc] = 0;
C[ 5*ldc] = 0;
C[ 6*ldc] = 0;
C[ 7*ldc] = 0;
C[ 8*ldc] = 0;
C[ 9*ldc] = 0;
break;
case 9:
C[ 0 ] = 0;
C[ 1*ldc] = 0;
C[ 2*ldc] = 0;
C[ 3*ldc] = 0;
C[ 4*ldc] = 0;
C[ 5*ldc] = 0;
C[ 6*ldc] = 0;
C[ 7*ldc] = 0;
C[ 8*ldc] = 0;
break;
case 8:
C[ 0 ] = 0;
C[ 1*ldc] = 0;
C[ 2*ldc] = 0;
C[ 3*ldc] = 0;
C[ 4*ldc] = 0;
C[ 5*ldc] = 0;
C[ 6*ldc] = 0;
C[ 7*ldc] = 0;
break;
case 7:
C[ 0 ] = 0;
C[ 1*ldc] = 0;
C[ 2*ldc] = 0;
C[ 3*ldc] = 0;
C[ 4*ldc] = 0;
C[ 5*ldc] = 0;
C[ 6*ldc] = 0;
break;
case 6:
C[ 0 ] = 0;
C[ 1*ldc] = 0;
C[ 2*ldc] = 0;
C[ 3*ldc] = 0;
C[ 4*ldc] = 0;
C[ 5*ldc] = 0;
break;
case 5:
C[ 0 ] = 0;
C[ 1*ldc] = 0;
C[ 2*ldc] = 0;
C[ 3*ldc] = 0;
C[ 4*ldc] = 0;
break;
case 4:
C[ 0 ] = 0;
C[ 1*ldc] = 0;
C[ 2*ldc] = 0;
C[ 3*ldc] = 0;
break;
case 3:
C[ 0 ] = 0;
C[ 1*ldc] = 0;
C[ 2*ldc] = 0;
break;
case 2:
C[ 0 ] = 0;
C[ 1*ldc] = 0;
break;
case 1:
C[ 0 ] = 0;
break;
case 0:
break;
}
}
extern "C" void
magmablas_sgemm_ab_0(
float *C, const float *A, const float *B,
magma_int_t m, magma_int_t n, magma_int_t k,
magma_int_t lda, magma_int_t ldb, magma_int_t ldc,
float alpha, float beta )
{
dim3 threads( 16, 4 );
dim3 grid( (m - 1)/64 + 1, (n - 1)/16 + 1 );
hipLaunchKernelGGL(( sgemm_kernel_ab_0), dim3(grid), dim3(threads), 0, magma_stream ,
C, A, B, m, n, k, lda, ldb, ldc, alpha, beta );
}
| 2a1d5354633649ab982af280eb058c8a1f22f0a0.cu | /*
-- MAGMA (version 1.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date September 2014
@generated from dgemm_tesla_ab_0.cu normal d -> s, Wed Sep 17 15:08:23 2014
*/
#include "common_magma.h"
#include "commonblas_s.h"
/*
* Computes C = alpha*A*B + beta*C when alpha == 0 and beta == 0.
* That is, C = 0.
*/
__global__ void
sgemm_kernel_ab_0(
float* __restrict__ C,
const float* __restrict__ A,
const float* __restrict__ B,
int m, int n, int k,
int lda, int ldb, int ldc,
float alpha, float beta )
{
const int tx = threadIdx.x;
const int ty = threadIdx.y;
int ibx = blockIdx.x * 64;
int iby = blockIdx.y * 16;
const int idt = ty * 16 + tx;
C += ibx + idt + __mul24(iby, ldc);
ibx = ibx + idt - m;
if ( (iby+16) >= n ) {
lda = n - iby;
}
else {
lda = 16;
}
if ( ibx >= 0 )
lda = 0;
else
lda = lda;
switch(lda) {
case 16:
C[ 0 ] = 0;
C[ 1*ldc] = 0;
C[ 2*ldc] = 0;
C[ 3*ldc] = 0;
C[ 4*ldc] = 0;
C[ 5*ldc] = 0;
C[ 6*ldc] = 0;
C[ 7*ldc] = 0;
C[ 8*ldc] = 0;
C[ 9*ldc] = 0;
C[10*ldc] = 0;
C[11*ldc] = 0;
C[12*ldc] = 0;
C[13*ldc] = 0;
C[14*ldc] = 0;
C[15*ldc] = 0;
break;
case 15:
C[ 0 ] = 0;
C[ 1*ldc] = 0;
C[ 2*ldc] = 0;
C[ 3*ldc] = 0;
C[ 4*ldc] = 0;
C[ 5*ldc] = 0;
C[ 6*ldc] = 0;
C[ 7*ldc] = 0;
C[ 8*ldc] = 0;
C[ 9*ldc] = 0;
C[10*ldc] = 0;
C[11*ldc] = 0;
C[12*ldc] = 0;
C[13*ldc] = 0;
C[14*ldc] = 0;
break;
case 14:
C[ 0 ] = 0;
C[ 1*ldc] = 0;
C[ 2*ldc] = 0;
C[ 3*ldc] = 0;
C[ 4*ldc] = 0;
C[ 5*ldc] = 0;
C[ 6*ldc] = 0;
C[ 7*ldc] = 0;
C[ 8*ldc] = 0;
C[ 9*ldc] = 0;
C[10*ldc] = 0;
C[11*ldc] = 0;
C[12*ldc] = 0;
C[13*ldc] = 0;
break;
case 13:
C[ 0 ] = 0;
C[ 1*ldc] = 0;
C[ 2*ldc] = 0;
C[ 3*ldc] = 0;
C[ 4*ldc] = 0;
C[ 5*ldc] = 0;
C[ 6*ldc] = 0;
C[ 7*ldc] = 0;
C[ 8*ldc] = 0;
C[ 9*ldc] = 0;
C[10*ldc] = 0;
C[11*ldc] = 0;
C[12*ldc] = 0;
break;
case 12:
C[ 0 ] = 0;
C[ 1*ldc] = 0;
C[ 2*ldc] = 0;
C[ 3*ldc] = 0;
C[ 4*ldc] = 0;
C[ 5*ldc] = 0;
C[ 6*ldc] = 0;
C[ 7*ldc] = 0;
C[ 8*ldc] = 0;
C[ 9*ldc] = 0;
C[10*ldc] = 0;
C[11*ldc] = 0;
break;
case 11:
C[ 0 ] = 0;
C[ 1*ldc] = 0;
C[ 2*ldc] = 0;
C[ 3*ldc] = 0;
C[ 4*ldc] = 0;
C[ 5*ldc] = 0;
C[ 6*ldc] = 0;
C[ 7*ldc] = 0;
C[ 8*ldc] = 0;
C[ 9*ldc] = 0;
C[10*ldc] = 0;
break;
case 10:
C[ 0 ] = 0;
C[ 1*ldc] = 0;
C[ 2*ldc] = 0;
C[ 3*ldc] = 0;
C[ 4*ldc] = 0;
C[ 5*ldc] = 0;
C[ 6*ldc] = 0;
C[ 7*ldc] = 0;
C[ 8*ldc] = 0;
C[ 9*ldc] = 0;
break;
case 9:
C[ 0 ] = 0;
C[ 1*ldc] = 0;
C[ 2*ldc] = 0;
C[ 3*ldc] = 0;
C[ 4*ldc] = 0;
C[ 5*ldc] = 0;
C[ 6*ldc] = 0;
C[ 7*ldc] = 0;
C[ 8*ldc] = 0;
break;
case 8:
C[ 0 ] = 0;
C[ 1*ldc] = 0;
C[ 2*ldc] = 0;
C[ 3*ldc] = 0;
C[ 4*ldc] = 0;
C[ 5*ldc] = 0;
C[ 6*ldc] = 0;
C[ 7*ldc] = 0;
break;
case 7:
C[ 0 ] = 0;
C[ 1*ldc] = 0;
C[ 2*ldc] = 0;
C[ 3*ldc] = 0;
C[ 4*ldc] = 0;
C[ 5*ldc] = 0;
C[ 6*ldc] = 0;
break;
case 6:
C[ 0 ] = 0;
C[ 1*ldc] = 0;
C[ 2*ldc] = 0;
C[ 3*ldc] = 0;
C[ 4*ldc] = 0;
C[ 5*ldc] = 0;
break;
case 5:
C[ 0 ] = 0;
C[ 1*ldc] = 0;
C[ 2*ldc] = 0;
C[ 3*ldc] = 0;
C[ 4*ldc] = 0;
break;
case 4:
C[ 0 ] = 0;
C[ 1*ldc] = 0;
C[ 2*ldc] = 0;
C[ 3*ldc] = 0;
break;
case 3:
C[ 0 ] = 0;
C[ 1*ldc] = 0;
C[ 2*ldc] = 0;
break;
case 2:
C[ 0 ] = 0;
C[ 1*ldc] = 0;
break;
case 1:
C[ 0 ] = 0;
break;
case 0:
break;
}
}
extern "C" void
magmablas_sgemm_ab_0(
float *C, const float *A, const float *B,
magma_int_t m, magma_int_t n, magma_int_t k,
magma_int_t lda, magma_int_t ldb, magma_int_t ldc,
float alpha, float beta )
{
dim3 threads( 16, 4 );
dim3 grid( (m - 1)/64 + 1, (n - 1)/16 + 1 );
sgemm_kernel_ab_0<<< grid, threads, 0, magma_stream >>>
( C, A, B, m, n, k, lda, ldb, ldc, alpha, beta );
}
|
54d60ca03b66b5abe99a2cf9bc6b15e6ab88b068.hip | // !!! This is a file automatically generated by hipify!!!
#include "THHReduceApplyUtils.cuh"
#include <assert.h>
#include <stdlib.h>
// Maximum size per grid dimension that we assume (compute capability >= 2.0)
#define MAX_GRID_SIZE 65535LL
void THCCheckTensorDims(THCState* state, THCudaTensor* tensor, int arg) {
int64_t dims = THCudaTensor_nDimension(state, tensor);
THArgCheck(dims <= MAX_CUTORCH_DIMS, arg, CUTORCH_DIM_WARNING);
}
bool THC_getGridFromTiles(ptrdiff_t gridTiles, dim3& grid) {
if (gridTiles > MAX_GRID_SIZE * MAX_GRID_SIZE * MAX_GRID_SIZE) {
return false;
}
int64_t gridX = gridTiles > MAX_GRID_SIZE ? MAX_GRID_SIZE : gridTiles;
int64_t gridY = 1;
int64_t gridZ = 1;
if (gridTiles > MAX_GRID_SIZE) {
gridTiles = THCCeilDiv(gridTiles, (ptrdiff_t) MAX_GRID_SIZE);
gridY = gridTiles > MAX_GRID_SIZE ? MAX_GRID_SIZE : gridTiles;
if (gridTiles > MAX_GRID_SIZE) {
gridTiles = THCCeilDiv(gridTiles, (ptrdiff_t) MAX_GRID_SIZE);
gridZ = gridTiles > MAX_GRID_SIZE ? MAX_GRID_SIZE : gridTiles;
}
}
grid = dim3(gridX, gridY, gridZ);
return true;
}
| 54d60ca03b66b5abe99a2cf9bc6b15e6ab88b068.cu | #include "THCReduceApplyUtils.cuh"
#include <assert.h>
#include <stdlib.h>
// Maximum size per grid dimension that we assume (compute capability >= 2.0)
#define MAX_GRID_SIZE 65535LL
void THCCheckTensorDims(THCState* state, THCudaTensor* tensor, int arg) {
int64_t dims = THCudaTensor_nDimension(state, tensor);
THArgCheck(dims <= MAX_CUTORCH_DIMS, arg, CUTORCH_DIM_WARNING);
}
bool THC_getGridFromTiles(ptrdiff_t gridTiles, dim3& grid) {
if (gridTiles > MAX_GRID_SIZE * MAX_GRID_SIZE * MAX_GRID_SIZE) {
return false;
}
int64_t gridX = gridTiles > MAX_GRID_SIZE ? MAX_GRID_SIZE : gridTiles;
int64_t gridY = 1;
int64_t gridZ = 1;
if (gridTiles > MAX_GRID_SIZE) {
gridTiles = THCCeilDiv(gridTiles, (ptrdiff_t) MAX_GRID_SIZE);
gridY = gridTiles > MAX_GRID_SIZE ? MAX_GRID_SIZE : gridTiles;
if (gridTiles > MAX_GRID_SIZE) {
gridTiles = THCCeilDiv(gridTiles, (ptrdiff_t) MAX_GRID_SIZE);
gridZ = gridTiles > MAX_GRID_SIZE ? MAX_GRID_SIZE : gridTiles;
}
}
grid = dim3(gridX, gridY, gridZ);
return true;
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.