hip_filename
stringlengths 5
84
| hip_content
stringlengths 79
9.69M
| cuda_filename
stringlengths 4
83
| cuda_content
stringlengths 19
9.69M
|
---|---|---|---|
3ee6315936107f25cbdcbe4d187add8a19153655.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "randomWalk.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
double *results = NULL;
hipMalloc(&results, XSIZE*YSIZE);
int *crossTimes = NULL;
hipMalloc(&crossTimes, XSIZE*YSIZE);
int T = 1;
int N = XSIZE*YSIZE;
double drift = 1;
int numSims = 1;
double lowerThreshold = 1;
double upperThreshold = 1;
int deviceID = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
randomWalk), dim3(gridBlock),dim3(threadBlock), 0, 0, results,crossTimes,T,N,drift,numSims,lowerThreshold,upperThreshold,deviceID);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
randomWalk), dim3(gridBlock),dim3(threadBlock), 0, 0, results,crossTimes,T,N,drift,numSims,lowerThreshold,upperThreshold,deviceID);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
randomWalk), dim3(gridBlock),dim3(threadBlock), 0, 0, results,crossTimes,T,N,drift,numSims,lowerThreshold,upperThreshold,deviceID);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 3ee6315936107f25cbdcbe4d187add8a19153655.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "randomWalk.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
double *results = NULL;
cudaMalloc(&results, XSIZE*YSIZE);
int *crossTimes = NULL;
cudaMalloc(&crossTimes, XSIZE*YSIZE);
int T = 1;
int N = XSIZE*YSIZE;
double drift = 1;
int numSims = 1;
double lowerThreshold = 1;
double upperThreshold = 1;
int deviceID = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
randomWalk<<<gridBlock,threadBlock>>>(results,crossTimes,T,N,drift,numSims,lowerThreshold,upperThreshold,deviceID);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
randomWalk<<<gridBlock,threadBlock>>>(results,crossTimes,T,N,drift,numSims,lowerThreshold,upperThreshold,deviceID);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
randomWalk<<<gridBlock,threadBlock>>>(results,crossTimes,T,N,drift,numSims,lowerThreshold,upperThreshold,deviceID);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
10a29762baa24b1fbddd771fe00ef78511bf51ff.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include<conio.h>
#include<algorithm>
#include<stdlib.h>
#include "mylib.h"
void sortb(int* a, int size);
__global__ void bitonicSort_vp(int* a, int n, int g, int t, int p, int sz) {
int index = blockIdx.x * blockDim.x + threadIdx.x + p*sz,
map = (index / (1 << (t - 1)))*(1 << t) + (index % (1 << (t - 1))),
pos = (map / (1 << g)) % 2,
m1 = (pos == 0) ? map : (map + (1 << (t - 1))),
m2 = (pos == 0) ? (map + (1 << (t - 1))) : map;
// m ap <> map + 1<<(b-1)
//printf("%d %d index - %d %d , %d %d\n", g, b, index, map, map + (1 << (b - 1)), pos);
atomicMin(&a[m1], atomicMax(&a[m2], a[m1]));
__syncthreads();
}
double sortb_vp(int* a, int size, int logn2,int sz) {
int* array;
int mem = sizeof(int) * size;
hipMalloc((void **)&array, sizeof(int)*size);
hipMemcpy(array, a, sizeof(int) * size, hipMemcpyHostToDevice);
//printf("\nthreads p block = %d\n", size);
int threadsPerBlock = 1024;
int blocksPerGrid = ((size / 2) + threadsPerBlock - 1) / threadsPerBlock;
clock_t t, t1, td;
t = clock();
for (int g = 1; g <= logn2; g++) {
for (int t = g; t > 0; t--) {
//printf("g-> %d t-> %d\n", g, t);
int psz = 32;
for (int i = 0; i<(ceil(size / (2 * psz))); i++)
bitonicSort_vp << <((psz) + threadsPerBlock - 1) / threadsPerBlock, 1024 >> >(array, size, g, t, i, psz);
//printf("\n\n");
//
}
}
td = clock();
hipDeviceSynchronize();
t1 = clock();
double time_taken = ((double)(avg2(t1, td) - t)) / CLOCKS_PER_SEC;
//printf("\n\nfunction exec time: "); printf(" %.3lfs\n\n", time_taken,t,t1);
//bitonicSort <<<1, (size / 2) >>>(array, size / 2);
hipMemcpy(a, array, size * sizeof(int), hipMemcpyDeviceToHost);
//size /= 2;
//}
hipFree(array);
return time_taken;
}
int * intdup(int const * src, size_t len)
{
int * p = (int *)malloc(len * sizeof(int));
memcpy(p, src, len * sizeof(int));
return p;
}
void sortn(int* arr, int n) {
int * dup = intdup(arr, n);
clock_t t;
t = clock();
std::sort(dup, dup + n);
t = clock() - t;
double time_taken = ((double)t) / CLOCKS_PER_SEC;
printf("exec time of Normal sort: "); printf(" %.3lfs \nDuplicate array using sequential is sorted %s\n", time_taken, std::is_sorted(dup, dup + n) == 1 ? "YES" : "NO");
}
void wop(int * array, int sz) {
FILE *f1;
f1 = fopen("op.txt", "w");
fprintf(f1, "%d ", sz);
for (int i = 0; i<sz; i++) {
fprintf(f1, "%d ", array[i]);
}
fclose(f1);
}
double avgfornS(int *arr, int n) {
double x = 0, itr = 1;
for (int i = 0; i < itr; i++) {
clock_t t;
t = clock();
//sortb(arr, n, (int)(log(n) / log(2)));
std::sort(arr, arr + n);
t = clock() - t;
double time_taken = ((double)t) / CLOCKS_PER_SEC;
x += time_taken;
}
x = x / itr;
return x;
}
int getAnalysis_vp(int *arr, int size, int w,int vp) {
FILE *f1; double a, b;
int * dup = intdup(arr, size);
int po = (int)(log(vp) / log(2));
f1 = fopen("analysis.txt", "a");
fprintf(f1, "\nAnalysis Report: VARYING PROCESSORS # = %d \n\n", 1<<po);
fprintf(f1, " N time over time over is sorted? speedup:\n");
fprintf(f1, " N serial code Bitonic CUDA code\n");
printf(" N time over time over is sorted? speedup:\n");
printf(" N serial code Bitonic CUDA code\n");
for (int i = po+1; i <= 13; i++) {
memcpy(arr, dup, (1 << i) * sizeof(int));
a = avgfornS(arr, (1 << i));
memcpy(arr, dup, (1 << i) * sizeof(int));
b = sortb_vp(arr, (1 << i), i,vp);
fprintf(f1, "%7d %.5lf %.5lf", (1 << i), a, b);
printf("%7d %.5lf %.5lf", (1 << i), a, b);
fprintf(f1, " %s %f\n", std::is_sorted(arr, arr + (1 << i)) == 1 ? "YES" : "NO", a / b);
printf(" %s %f\n", std::is_sorted(arr, arr + (1 << i)) == 1 ? "YES" : "NO", a / b);
}
fclose(f1);
return 0;
}
int main(int argc, char **argv) {
int* arr;
int n, s;
FILE *f = fopen("z.txt", "r");
if (f == NULL) {
fprintf(stderr, "File not found.\n");
return 1;
}
fscanf(f, "%d", &n);
printf("size n = %d log n = %d\n", n, (int)(log(n) / log(2)));
arr = (int*)malloc(n * sizeof(int));
for (int i = 0; i < n; i++) {
fscanf(f, "%d", (arr + i));
//printf(" %d ", arr[i]);
}
fclose(f);
//for (int i = 0; i<; i++)
getAnalysis_vp(arr, n, 1, 16);
getAnalysis_vp(arr, n, 1,32);
getAnalysis_vp(arr, n, 1, 64);
getAnalysis_vp(arr, n, 1, 128);
getAnalysis_vp(arr, n, 1, 256);
getAnalysis_vp(arr, n, 1, 512);
///printf("input is sorted: %s\n\n\n", std::is_sorted(arr, arr + n) == 1 ? "YES" : "NO");
//----sortn(arr, n);
clock_t t;
//----double time_taken =sortb(arr, n, (int)(log(n) / log(2)));
//std::sort(arr, arr + n);
//---printf("\n");
//----wop(arr,n);
/*for (int i = 0; i < n; i++) {
printf(" %d ", arr[i]);
}*/
//----printf("\narray using Parallel Sort is sorted: %s\n", std::is_sorted(arr, arr + n)==1?"YES":"NOOOOOOOOOOOOOOOOOOOOOOOOO");
//-----printf("exec time: "); printf(" %lfs \n\n",time_taken);
puts("...");
getch();
} | 10a29762baa24b1fbddd771fe00ef78511bf51ff.cu | #include <stdio.h>
#include<conio.h>
#include<algorithm>
#include<stdlib.h>
#include "mylib.h"
void sortb(int* a, int size);
__global__ void bitonicSort_vp(int* a, int n, int g, int t, int p, int sz) {
int index = blockIdx.x * blockDim.x + threadIdx.x + p*sz,
map = (index / (1 << (t - 1)))*(1 << t) + (index % (1 << (t - 1))),
pos = (map / (1 << g)) % 2,
m1 = (pos == 0) ? map : (map + (1 << (t - 1))),
m2 = (pos == 0) ? (map + (1 << (t - 1))) : map;
// m ap <> map + 1<<(b-1)
//printf("%d %d index - %d %d , %d %d\n", g, b, index, map, map + (1 << (b - 1)), pos);
atomicMin(&a[m1], atomicMax(&a[m2], a[m1]));
__syncthreads();
}
double sortb_vp(int* a, int size, int logn2,int sz) {
int* array;
int mem = sizeof(int) * size;
cudaMalloc((void **)&array, sizeof(int)*size);
cudaMemcpy(array, a, sizeof(int) * size, cudaMemcpyHostToDevice);
//printf("\nthreads p block = %d\n", size);
int threadsPerBlock = 1024;
int blocksPerGrid = ((size / 2) + threadsPerBlock - 1) / threadsPerBlock;
clock_t t, t1, td;
t = clock();
for (int g = 1; g <= logn2; g++) {
for (int t = g; t > 0; t--) {
//printf("g-> %d t-> %d\n", g, t);
int psz = 32;
for (int i = 0; i<(ceil(size / (2 * psz))); i++)
bitonicSort_vp << <((psz) + threadsPerBlock - 1) / threadsPerBlock, 1024 >> >(array, size, g, t, i, psz);
//printf("\n\n");
//
}
}
td = clock();
cudaDeviceSynchronize();
t1 = clock();
double time_taken = ((double)(avg2(t1, td) - t)) / CLOCKS_PER_SEC;
//printf("\n\nfunction exec time: "); printf(" %.3lfs\n\n", time_taken,t,t1);
//bitonicSort <<<1, (size / 2) >>>(array, size / 2);
cudaMemcpy(a, array, size * sizeof(int), cudaMemcpyDeviceToHost);
//size /= 2;
//}
cudaFree(array);
return time_taken;
}
int * intdup(int const * src, size_t len)
{
int * p = (int *)malloc(len * sizeof(int));
memcpy(p, src, len * sizeof(int));
return p;
}
void sortn(int* arr, int n) {
int * dup = intdup(arr, n);
clock_t t;
t = clock();
std::sort(dup, dup + n);
t = clock() - t;
double time_taken = ((double)t) / CLOCKS_PER_SEC;
printf("exec time of Normal sort: "); printf(" %.3lfs \nDuplicate array using sequential is sorted %s\n", time_taken, std::is_sorted(dup, dup + n) == 1 ? "YES" : "NO");
}
void wop(int * array, int sz) {
FILE *f1;
f1 = fopen("op.txt", "w");
fprintf(f1, "%d ", sz);
for (int i = 0; i<sz; i++) {
fprintf(f1, "%d ", array[i]);
}
fclose(f1);
}
double avgfornS(int *arr, int n) {
double x = 0, itr = 1;
for (int i = 0; i < itr; i++) {
clock_t t;
t = clock();
//sortb(arr, n, (int)(log(n) / log(2)));
std::sort(arr, arr + n);
t = clock() - t;
double time_taken = ((double)t) / CLOCKS_PER_SEC;
x += time_taken;
}
x = x / itr;
return x;
}
int getAnalysis_vp(int *arr, int size, int w,int vp) {
FILE *f1; double a, b;
int * dup = intdup(arr, size);
int po = (int)(log(vp) / log(2));
f1 = fopen("analysis.txt", "a");
fprintf(f1, "\nAnalysis Report: VARYING PROCESSORS # = %d \n\n", 1<<po);
fprintf(f1, " N time over time over is sorted? speedup:\n");
fprintf(f1, " N serial code Bitonic CUDA code\n");
printf(" N time over time over is sorted? speedup:\n");
printf(" N serial code Bitonic CUDA code\n");
for (int i = po+1; i <= 13; i++) {
memcpy(arr, dup, (1 << i) * sizeof(int));
a = avgfornS(arr, (1 << i));
memcpy(arr, dup, (1 << i) * sizeof(int));
b = sortb_vp(arr, (1 << i), i,vp);
fprintf(f1, "%7d %.5lf %.5lf", (1 << i), a, b);
printf("%7d %.5lf %.5lf", (1 << i), a, b);
fprintf(f1, " %s %f\n", std::is_sorted(arr, arr + (1 << i)) == 1 ? "YES" : "NO", a / b);
printf(" %s %f\n", std::is_sorted(arr, arr + (1 << i)) == 1 ? "YES" : "NO", a / b);
}
fclose(f1);
return 0;
}
int main(int argc, char **argv) {
int* arr;
int n, s;
FILE *f = fopen("z.txt", "r");
if (f == NULL) {
fprintf(stderr, "File not found.\n");
return 1;
}
fscanf(f, "%d", &n);
printf("size n = %d log n = %d\n", n, (int)(log(n) / log(2)));
arr = (int*)malloc(n * sizeof(int));
for (int i = 0; i < n; i++) {
fscanf(f, "%d", (arr + i));
//printf(" %d ", arr[i]);
}
fclose(f);
//for (int i = 0; i<; i++)
getAnalysis_vp(arr, n, 1, 16);
getAnalysis_vp(arr, n, 1,32);
getAnalysis_vp(arr, n, 1, 64);
getAnalysis_vp(arr, n, 1, 128);
getAnalysis_vp(arr, n, 1, 256);
getAnalysis_vp(arr, n, 1, 512);
///printf("input is sorted: %s\n\n\n", std::is_sorted(arr, arr + n) == 1 ? "YES" : "NO");
//----sortn(arr, n);
clock_t t;
//----double time_taken =sortb(arr, n, (int)(log(n) / log(2)));
//std::sort(arr, arr + n);
//---printf("\n");
//----wop(arr,n);
/*for (int i = 0; i < n; i++) {
printf(" %d ", arr[i]);
}*/
//----printf("\narray using Parallel Sort is sorted: %s\n", std::is_sorted(arr, arr + n)==1?"YES":"NOOOOOOOOOOOOOOOOOOOOOOOOO");
//-----printf("exec time: "); printf(" %lfs \n\n",time_taken);
puts("...");
getch();
} |
573591b6a305ccecc1e923738fc386a6c6472cd1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 1993-2007 NVIDIA Corporation. All rights reserved.
*
* NOTICE TO USER:
*
* This source code is subject to NVIDIA ownership rights under U.S. and
* international Copyright laws. Users and possessors of this source code
* are hereby granted a nonexclusive, royalty-free license to use this code
* in individual and commercial software.
*
* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
* OR PERFORMANCE OF THIS SOURCE CODE.
*
* U.S. Government End Users. This source code is a "commercial item" as
* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
* "commercial computer software" and "commercial computer software
* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
* and is provided to the U.S. Government only as a commercial end item.
* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
* source code with only those rights set forth herein.
*
* Any use of this source code in individual and commercial software must
* include, in the user documentation and internal comments to the code,
* the above Disclaimer and U.S. Government End Users Notice.
*/
///////////////////////////////////////////////////////////////////////////////
// This is nvidias histogram256 SDK example modded to do a 1024 point
// histogram
///////////////////////////////////////////////////////////////////////////////
//Total number of possible data values
#define BIN_COUNT 1024 // Changed from 256
#define HISTOGRAM_SIZE (BIN_COUNT * sizeof(unsigned int))
//Machine warp size
#ifndef __DEVICE_EMULATION__
//G80's warp size is 32 threads
#define WARP_LOG_SIZE 5
#else
//Emulation currently doesn't execute threads in coherent groups of 32 threads,
//which effectively means warp size of 1 thread for emulation modes
#define WARP_LOG_SIZE 0
#endif
//Warps in thread block
#define WARP_N 3
//Threads per block count
#ifdef HISTO_WG_SIZE_0
#define THREAD_N HISTO_WG_SIZE_0
#else
#define THREAD_N (WARP_N << WARP_LOG_SIZE)
#endif
//Per-block number of elements in histograms
#define BLOCK_MEMORY (WARP_N * BIN_COUNT)
#define IMUL(a, b) __mul24(a, b)
__device__ void addData1024(volatile unsigned int *s_WarpHist, unsigned int data, unsigned int threadTag){
unsigned int count;
do{
count = s_WarpHist[data] & 0x07FFFFFFU;
count = threadTag | (count + 1);
s_WarpHist[data] = count;
}while(s_WarpHist[data] != count);
}
__global__ void histogram1024Kernel(unsigned int *d_Result, float *d_Data, float minimum, float maximum, int dataN){
//Current global thread index
const int globalTid = IMUL(blockIdx.x, blockDim.x) + threadIdx.x;
//Total number of threads in the compute grid
const int numThreads = IMUL(blockDim.x, gridDim.x);
//WARP_LOG_SIZE higher bits of counter values are tagged
//by lower WARP_LOG_SIZE threadID bits
// Will correctly issue warning when compiling for debug (x<<32-0)
const unsigned int threadTag = threadIdx.x << (32 - WARP_LOG_SIZE);
//Shared memory cache for each warp in current thread block
//Declare as volatile to prevent incorrect compiler optimizations in addPixel()
volatile __shared__ unsigned int s_Hist[BLOCK_MEMORY];
//Current warp shared memory frame
const int warpBase = IMUL(threadIdx.x >> WARP_LOG_SIZE, BIN_COUNT);
//Clear shared memory buffer for current thread block before processing
for(int pos = threadIdx.x; pos < BLOCK_MEMORY; pos += blockDim.x)
s_Hist[pos] = 0;
__syncthreads();
//Cycle through the entire data set, update subhistograms for each warp
//Since threads in warps always execute the same instruction,
//we are safe with the addPixel trick
for(int pos = globalTid; pos < dataN; pos += numThreads){
unsigned int data4 = ((d_Data[pos] - minimum)/(maximum - minimum)) * BIN_COUNT;
addData1024(s_Hist + warpBase, data4 & 0x3FFU, threadTag);
}
__syncthreads();
//Merge per-warp histograms into per-block and write to global memory
for(int pos = threadIdx.x; pos < BIN_COUNT; pos += blockDim.x){
unsigned int sum = 0;
for(int base = 0; base < BLOCK_MEMORY; base += BIN_COUNT)
sum += s_Hist[base + pos] & 0x07FFFFFFU;
atomicAdd(d_Result + pos, sum);
}
}
//Thread block (== subhistogram) count
#define BLOCK_N 64
////////////////////////////////////////////////////////////////////////////////
// Put all kernels together
////////////////////////////////////////////////////////////////////////////////
//histogram1024kernel() results buffer
unsigned int *d_Result1024;
//Internal memory allocation
void initHistogram1024(void){
checkCudaErrors( hipMalloc((void **)&d_Result1024, HISTOGRAM_SIZE ));
}
//Internal memory deallocation
void closeHistogram1024(void){
checkCudaErrors( hipFree(d_Result1024) );
}
//histogram1024 CPU front-end
void histogram1024GPU(
unsigned int *h_Result,
float *d_Data,
float minimum,
float maximum,
int dataN)
{
checkCudaErrors( hipMemset(d_Result1024, 0, HISTOGRAM_SIZE) );
hipLaunchKernelGGL(( histogram1024Kernel), dim3(BLOCK_N), dim3(THREAD_N), 0, 0,
d_Result1024,
d_Data,
minimum,
maximum,
dataN
);
checkCudaErrors( hipMemcpy(h_Result, d_Result1024, HISTOGRAM_SIZE, hipMemcpyDeviceToHost) );
}
| 573591b6a305ccecc1e923738fc386a6c6472cd1.cu | /*
* Copyright 1993-2007 NVIDIA Corporation. All rights reserved.
*
* NOTICE TO USER:
*
* This source code is subject to NVIDIA ownership rights under U.S. and
* international Copyright laws. Users and possessors of this source code
* are hereby granted a nonexclusive, royalty-free license to use this code
* in individual and commercial software.
*
* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
* OR PERFORMANCE OF THIS SOURCE CODE.
*
* U.S. Government End Users. This source code is a "commercial item" as
* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
* "commercial computer software" and "commercial computer software
* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
* and is provided to the U.S. Government only as a commercial end item.
* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
* source code with only those rights set forth herein.
*
* Any use of this source code in individual and commercial software must
* include, in the user documentation and internal comments to the code,
* the above Disclaimer and U.S. Government End Users Notice.
*/
///////////////////////////////////////////////////////////////////////////////
// This is nvidias histogram256 SDK example modded to do a 1024 point
// histogram
///////////////////////////////////////////////////////////////////////////////
//Total number of possible data values
#define BIN_COUNT 1024 // Changed from 256
#define HISTOGRAM_SIZE (BIN_COUNT * sizeof(unsigned int))
//Machine warp size
#ifndef __DEVICE_EMULATION__
//G80's warp size is 32 threads
#define WARP_LOG_SIZE 5
#else
//Emulation currently doesn't execute threads in coherent groups of 32 threads,
//which effectively means warp size of 1 thread for emulation modes
#define WARP_LOG_SIZE 0
#endif
//Warps in thread block
#define WARP_N 3
//Threads per block count
#ifdef HISTO_WG_SIZE_0
#define THREAD_N HISTO_WG_SIZE_0
#else
#define THREAD_N (WARP_N << WARP_LOG_SIZE)
#endif
//Per-block number of elements in histograms
#define BLOCK_MEMORY (WARP_N * BIN_COUNT)
#define IMUL(a, b) __mul24(a, b)
__device__ void addData1024(volatile unsigned int *s_WarpHist, unsigned int data, unsigned int threadTag){
unsigned int count;
do{
count = s_WarpHist[data] & 0x07FFFFFFU;
count = threadTag | (count + 1);
s_WarpHist[data] = count;
}while(s_WarpHist[data] != count);
}
__global__ void histogram1024Kernel(unsigned int *d_Result, float *d_Data, float minimum, float maximum, int dataN){
//Current global thread index
const int globalTid = IMUL(blockIdx.x, blockDim.x) + threadIdx.x;
//Total number of threads in the compute grid
const int numThreads = IMUL(blockDim.x, gridDim.x);
//WARP_LOG_SIZE higher bits of counter values are tagged
//by lower WARP_LOG_SIZE threadID bits
// Will correctly issue warning when compiling for debug (x<<32-0)
const unsigned int threadTag = threadIdx.x << (32 - WARP_LOG_SIZE);
//Shared memory cache for each warp in current thread block
//Declare as volatile to prevent incorrect compiler optimizations in addPixel()
volatile __shared__ unsigned int s_Hist[BLOCK_MEMORY];
//Current warp shared memory frame
const int warpBase = IMUL(threadIdx.x >> WARP_LOG_SIZE, BIN_COUNT);
//Clear shared memory buffer for current thread block before processing
for(int pos = threadIdx.x; pos < BLOCK_MEMORY; pos += blockDim.x)
s_Hist[pos] = 0;
__syncthreads();
//Cycle through the entire data set, update subhistograms for each warp
//Since threads in warps always execute the same instruction,
//we are safe with the addPixel trick
for(int pos = globalTid; pos < dataN; pos += numThreads){
unsigned int data4 = ((d_Data[pos] - minimum)/(maximum - minimum)) * BIN_COUNT;
addData1024(s_Hist + warpBase, data4 & 0x3FFU, threadTag);
}
__syncthreads();
//Merge per-warp histograms into per-block and write to global memory
for(int pos = threadIdx.x; pos < BIN_COUNT; pos += blockDim.x){
unsigned int sum = 0;
for(int base = 0; base < BLOCK_MEMORY; base += BIN_COUNT)
sum += s_Hist[base + pos] & 0x07FFFFFFU;
atomicAdd(d_Result + pos, sum);
}
}
//Thread block (== subhistogram) count
#define BLOCK_N 64
////////////////////////////////////////////////////////////////////////////////
// Put all kernels together
////////////////////////////////////////////////////////////////////////////////
//histogram1024kernel() results buffer
unsigned int *d_Result1024;
//Internal memory allocation
void initHistogram1024(void){
checkCudaErrors( cudaMalloc((void **)&d_Result1024, HISTOGRAM_SIZE ));
}
//Internal memory deallocation
void closeHistogram1024(void){
checkCudaErrors( cudaFree(d_Result1024) );
}
//histogram1024 CPU front-end
void histogram1024GPU(
unsigned int *h_Result,
float *d_Data,
float minimum,
float maximum,
int dataN)
{
checkCudaErrors( cudaMemset(d_Result1024, 0, HISTOGRAM_SIZE) );
histogram1024Kernel<<<BLOCK_N, THREAD_N>>>(
d_Result1024,
d_Data,
minimum,
maximum,
dataN
);
checkCudaErrors( cudaMemcpy(h_Result, d_Result1024, HISTOGRAM_SIZE, cudaMemcpyDeviceToHost) );
}
|
e526768ac7fffc21d0ed3f7e5911c8ec7a66c0e4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void elmult(float * inA, float * inB, int length)
{
int idx = threadIdx.x + blockDim.x*blockIdx.x;
if (idx<length) inA[idx] *= inB[idx];
} | e526768ac7fffc21d0ed3f7e5911c8ec7a66c0e4.cu | #include "includes.h"
__global__ void elmult(float * inA, float * inB, int length)
{
int idx = threadIdx.x + blockDim.x*blockIdx.x;
if (idx<length) inA[idx] *= inB[idx];
} |
09ee1222ac147fc2a1452e1d8df302dfc5f1f101.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Discrete Sine Transform in Column wise (DST one)
* DST_I_Column
* This CUDA code can handle/work with any type of the input mxArrays,
* GPUarray or standard matlab CPU array as input {prhs[0] := mxGPUArray or CPU Array}
* gpuArray output, B=DST_I_Column(A)=mexFunction(A).
* Developed at UCL, Institute of Neurology, 12 Queen Square, WC1N 3AR, London
* Wellcome Trust Centre for Neuroimaging
* Part of the project SPM(http://www.fil.ion.ucl.ac.uk/spm)
* Copyright 2018
* Kevin Bronik
*/
#include "matrix.h"
#include "mex.h"
#include "gpu/mxGPUArray.h"
#define DEFAULT_DIM 32
#define DELTA(i, j) ((i==j)?1:0)
const double PI_d = 3.141592653589793238462643383279502884; //pi
__global__ void DSTI_Column_Kernel_GPUA(double const * const A, double const * const B, double * const C,
int numARows, int numAColumns,
int numBRows, int numBColumns,
int numCRows, int numCColumns)
{
double CValue = 0.0;
int Row = blockIdx.y*DEFAULT_DIM + threadIdx.y;
int Col = blockIdx.x*DEFAULT_DIM + threadIdx.x;
for (int k = 0; k < (DEFAULT_DIM + numAColumns - 1) / DEFAULT_DIM; k++) {
for (int n = 0; n < DEFAULT_DIM; ++n)
if ((k*DEFAULT_DIM + n < numAColumns && Row < numARows) && (k*DEFAULT_DIM + n < numBRows && Col < numBColumns))
CValue += A[Row*numAColumns + k*DEFAULT_DIM + n] * B[(k*DEFAULT_DIM + n)*numBColumns + Col];
}
if (Row < numCRows && Col < numCColumns) C[((blockIdx.y * blockDim.y + threadIdx.y)*numCColumns) + (blockIdx.x*blockDim.x) + threadIdx.x] = CValue;
}
__global__ void DSTI_Column_Kernel(double *A, double *B, double *C,
int numARows, int numAColumns,
int numBRows, int numBColumns,
int numCRows, int numCColumns)
{
double CValue = 0.0;
int Row = blockIdx.y*DEFAULT_DIM + threadIdx.y;
int Col = blockIdx.x*DEFAULT_DIM + threadIdx.x;
for (int k = 0; k < (DEFAULT_DIM + numAColumns - 1) / DEFAULT_DIM; k++) {
for (int n = 0; n < DEFAULT_DIM; ++n)
if ((k*DEFAULT_DIM + n < numAColumns && Row < numARows) && (k*DEFAULT_DIM + n < numBRows && Col < numBColumns))
CValue += A[Row*numAColumns + k*DEFAULT_DIM + n] * B[(k*DEFAULT_DIM + n)*numBColumns + Col];
}
if (Row < numCRows && Col < numCColumns) C[((blockIdx.y * blockDim.y + threadIdx.y)*numCColumns) + (blockIdx.x*blockDim.x) + threadIdx.x] = CValue;
}
// Matrix multiplication - Host code
// Matrix dimensions are assumed to be multiples of BLOCK_SIZE
void CalculateTransform(double * A, double * B, double * C, int numARows,
int numAColumns, int numBRows, int numBColumns,
int numCRows, int numCColumns)
{
double * hostA = A; // The A matrix
double * hostB = B; // The B matrix
double * hostC = C; // The output C matrix
//double * hostComputedC;
double * deviceA=0;
double * deviceB=0;
double * deviceC=0;
//hostA = (double *)malloc(sizeof(float)*numARows*numAColumns);
//hostB = (v *)malloc(sizeof(float)*numBRows*numBColumns);
// Setting numCRows and numCColumns
numCRows = numARows;
numCColumns = numBColumns;
// Allocate GPU buffers for three vectors (two input, one output) .
//hostC = (float *)malloc(sizeof(float)*numCRows*numCColumns);
//hostComputedC = (float *)malloc(sizeof(float)*numCRows*numCColumns);
hipMalloc((void **)&deviceA, sizeof(double )*numARows*numAColumns);
hipMalloc((void **)&deviceB, sizeof(double )*numBRows*numBColumns);
hipMalloc((void **)&deviceC, sizeof(double )*numCRows*numCColumns);
hipMemcpy(deviceA, hostA, sizeof(double )*numARows*numAColumns, hipMemcpyHostToDevice);
hipMemcpy(deviceB, hostB, sizeof(double )*numBRows*numBColumns, hipMemcpyHostToDevice);
dim3 dimBlock(DEFAULT_DIM, DEFAULT_DIM, 1);
dim3 dimGrid;
dimGrid.x = (numCColumns + dimBlock.x - 1) / dimBlock.x;
dimGrid.y = (numCRows + dimBlock.y - 1) / dimBlock.y;
DSTI_Column_Kernel << <dimGrid, dimBlock >> >(deviceA, deviceB, deviceC, numARows, numAColumns, numBRows, numBColumns, numCRows, numCColumns);
hipDeviceSynchronize();//To synchronize the device
// Copy the results in GPU memory back to the CPU
hipMemcpy(hostC, deviceC, sizeof(double)*numCRows*numCColumns, hipMemcpyDeviceToHost);
C = hostC;
hipFree(deviceA);
hipFree(deviceB);
hipFree(deviceC);
}
void mexFunction(int nlhs, mxArray *plhs[],
int nrhs, mxArray const *prhs[])
{
int nDevices;
hipError_t errCode =hipGetDeviceCount(&nDevices);
//int nDevices;
//hipGetDeviceCount(&nDevices);
if (errCode != hipSuccess){
printf("Error! No CUDA devices found! \n");
return;
}
/// input standard GPUarray
if (mxIsGPUArray(prhs[0])) {
//mexErrMsgIdAndTxt(errId, errMsg);
/* Declare all variables.*/
mxGPUArray const *A;
mxGPUArray const *DCOS;
mxGPUArray *B;
double const *d_A, *d_DCOS;
double *d_B;
// mxArray * hostcos;
//test
// double * hostcos, *pointer;
double *pointer;
//int N;
int numARows, numAColumns, numDCOSRows, numDCOSColumns, numCRows, numCColumns;
char const * const errId = "parallel:gpu:mexGPUExample:InvalidInput";
char const * const errMsg = "Invalid input to MEX file.";
/* Initialize the MathWorks GPU API. */
mxInitGPU();
if ((nrhs!=1)) {
mexErrMsgIdAndTxt(errId, errMsg);
}
A = mxGPUCreateFromMxArray(prhs[0]);
const mwSize *dims;
dims=mxGPUGetDimensions(A);
numARows = (int)dims[0]; /* gets number of rows of A */
numAColumns = (int)dims[1]; /* gets number of columns of A */
numDCOSRows=numDCOSColumns = numARows;
numCRows = numARows;
numCColumns = numAColumns;
if (numARows==1)
{
printf("Attention, this is a row vector, please try Discrete Sine Transform in row wise \n");
return;
}
// numDCOSRows=numDCOSColumns=numAColumns;
// numCRows = numARows;
// numCColumns = numDCOSColumns;
mxArray *COS= mxCreateNumericMatrix(numDCOSRows, numDCOSColumns, mxDOUBLE_CLASS, mxREAL);
pointer = mxGetPr(COS);
for (int i = 0; i < numDCOSRows; i++){
for (int j = 0; j < numDCOSColumns; j++){
//hostB[i * numBColumns + j] = i + j* numAColumns;
//hostB[i * numBColumns + j] = 1;
//cosvalx[i * numBColumns + j] = cos(((2 * j + 1) / (2.0 * numBColumns))*3.14*i)*sqrt(1.0 / numBColumns);
//hostB[i * numBColumns + j] = cosvalx[i + j* numAColumns];
//hostB[i + j* numBColumns] = sin((((j + 1)*3.14*(i + 1)) / (numBColumns + 1)))*sqrt(2.0 / (numBColumns + 1));
pointer[i* numDCOSColumns + j] = sin((((j + 1)*PI_d*(i + 1)) / (numDCOSColumns + 1)))*sqrt(2.0 / (numDCOSColumns + 1));
//hostB[i + j* numBColumns] = 1;
//hostL[i* numBColumns + j] = cos(((2 * j + 1) / (2.0 * numBColumns))*3.14*i)*sqrt(1.0 / numBColumns);
//hostB[i + j* numBColumns] = 1;
}
}
// for (int i = 0; i < numDCOSRows; i++){
// for (int j = 0; j < numDCOSColumns; j++){
// //hostB[i * numBColumns + j] = i + j* numACommc
//
// //hostB[i * numBColumns + j] = 1;
// //cosvalx[i * numBColumns + j] = cos(((2 * j + 1) / (2.0 * numBColumns))*3.14*i)*sqrt(1.0 / numBColumns);
// //hostB[i * numBColumns + j] = cosvalx[i + j* numAColumns];
//
// if (numDCOSRows != 1){
// pointer[i* numDCOSColumns + j] = cos((j*PI_d*i / (numDCOSRows - 1)))*sqrt(1.0 / (1 + DELTA(i + 1, 1) + DELTA(i + 1, numDCOSRows)))*sqrt(1.0 / (1 + DELTA(1, j + 1) + DELTA(numDCOSColumns, j + 1)))*sqrt(2.0 / numDCOSColumns);
// //hostB[i + j* numBColumns] = 1;
//
// //hostL[i* numBColumns + j] = cos(((2 * j + 1) / (2.0 * numBColumns))*3.14*i)*sqrt(1.0 / numBColumns);
// //hostB[i + j* numBColumns] = 1;
//
// }
//
// else{
// pointer[i* numDCOSColumns + j] =1;
// }
//
// }
// }
DCOS=mxGPUCreateFromMxArray(COS);
// DCOS=mxGPUCreateFromMxArray(hostcos);
if (mxGPUGetClassID(A) != mxDOUBLE_CLASS) {
mexErrMsgIdAndTxt(errId, errMsg);
}
d_A = (double const *)(mxGPUGetDataReadOnly(A));
d_DCOS=(double const *)(mxGPUGetDataReadOnly(DCOS));
B = mxGPUCreateGPUArray(mxGPUGetNumberOfDimensions(A),
mxGPUGetDimensions(A),
mxGPUGetClassID(A),
mxGPUGetComplexity(A),
MX_GPU_DO_NOT_INITIALIZE);
d_B = (double *)(mxGPUGetData(B));
dim3 dimBlock(DEFAULT_DIM, DEFAULT_DIM, 1);
dim3 dimGrid;
dimGrid.x = (numCColumns + dimBlock.x - 1) / dimBlock.x;
dimGrid.y = (numCRows + dimBlock.y - 1) / dimBlock.y;
//(hostL, hostA, hostC, numBRows, numBColumns, numARows, numAColumns, numCRows, numCColumns);
//DCTII_Column_Kernel_GPUA<< <dimGrid, dimBlock >> >(d_A, d_DCOS, d_B, numARows, numAColumns, numDCOSRows, numDCOSColumns, numCRows, numCColumns);
DSTI_Column_Kernel_GPUA<< <dimGrid, dimBlock >> >(d_DCOS, d_A, d_B, numDCOSRows, numDCOSColumns, numARows, numAColumns, numCRows, numCColumns);
// hipError_t err1 = hipPeekAtLastError();//To capture last error in function call
//hipDeviceSynchronize();//To synchronize the device
plhs[0] = mxGPUCreateMxArrayOnGPU(B);
mxGPUDestroyGPUArray(A);
mxGPUDestroyGPUArray(DCOS);
mxGPUDestroyGPUArray(B);
}
/// input standard array
else if (!(mxIsGPUArray(prhs[0]))){
int numARows = (int)mxGetM(prhs[0]); // number of rows in the matrix A
int numAColumns = (int)mxGetN(prhs[0]); // number of columns in the matrix A
int numBRows; // number of rows in the matrix B
int numBColumns; // number of columns in the matrix B
int numCRows; // number of rows in the matrix C (you have to set this)
int numCColumns; // number of columns in the matrix C (you have to set this)
numBRows = numBColumns = numARows;
numCRows = numARows;
numCColumns = numAColumns;
if (numARows==1)
{
printf("Attention, this is a row vector, please try Discrete Sine Transform in row wise \n");
return;
}
// numBRows = numBColumns = numAColumns;
// numCRows = numARows;
//
// numCColumns = numBColumns;
//char const * const errId = "parallel:gpu:DCTTWO:InvalidInput";
//char const * const errMsg = "Invalid input to MEX file.";
double * hostA ; // The A matrix
double * hostB ; // The B matrix
/* Initialize the MathWorks GPU API. */
//mxInitGPU();
/* Throw an error if the input is not a GPU array. */
//if ((nrhs != 1) || !(mxIsGPUArray(prhs[0]))) {
//mexErrMsgIdAndTxt(errId, errMsg);
//}
//hostA = (double *)malloc(sizeof(double)*numARows*numAColumns);
//hostAx = (double *)malloc(sizeof(double)*numARows*numAColumns);
//hostAy = (double *)malloc(sizeof(double)*numARows*numAColumns);
hostB = (double *)malloc(sizeof(double)*numBRows*numBColumns);
//const mxArray *G =prhs[0];
// if ((nrhs != 1) || (mxIsGPUArray(G))) {
//mexErrMsgIdAndTxt(errId, errMsg);
// G = gather(G);
// }
hostA = (double *)mxGetData(prhs[0]);
// hostA = (double *)mxGetData(G);
//Discrete Sine Transform in Columns wise
for (int i = 0; i < numBRows; i++){
for (int j = 0; j < numBColumns; j++){
//hostB[i * numBColumns + j] = i + j* numAColumns;
//hostB[i * numBColumns + j] = 1;
//cosvalx[i * numBColumns + j] = cos(((2 * j + 1) / (2.0 * numBColumns))*3.14*i)*sqrt(1.0 / numBColumns);
//hostB[i * numBColumns + j] = cosvalx[i + j* numAColumns];
//hostB[i + j* numBColumns] = sin((((j + 1)*3.14*(i + 1)) / (numBColumns + 1)))*sqrt(2.0 / (numBColumns + 1));
hostB[i* numBColumns + j] = sin((((j + 1)*PI_d*(i + 1)) / (numBColumns + 1)))*sqrt(2.0 / (numBColumns + 1));
//hostB[i + j* numBColumns] = 1;
//hostL[i* numBColumns + j] = cos(((2 * j + 1) / (2.0 * numBColumns))*3.14*i)*sqrt(1.0 / numBColumns);
//hostB[i + j* numBColumns] = 1;
}
}
//
// for (int i = 0; i < numBRows; i++){
// for (int j = 0; j < numBColumns; j++){
// //hostB[i * numBColumns + j] = i + j* numAColumns;
// //hostB[i * numBColumns + j] = 1;
// //cosvalx[i * numBColumns + j] = cos(((2 * j + 1) / (2.0 * numBColumns))*3.14*i)*sqrt(1.0 / numBColumns);
// //hostB[i * numBColumns + j] = cosvalx[i + j* numAColumns];
//
// if (numBRows != 1){
// hostB[i* numBColumns + j] = cos((j*PI_d*i / (numBRows - 1)))*sqrt(1.0 / (1 + DELTA(i + 1, 1) + DELTA(i + 1, numBRows)))*sqrt(1.0 / (1 + DELTA(1, j + 1) + DELTA(numBColumns, j + 1)))*sqrt(2.0 / numBColumns);
// //hostB[i + j* numBColumns] = 1;
//
// //hostL[i* numBColumns + j] = cos(((2 * j + 1) / (2.0 * numBColumns))*3.14*i)*sqrt(1.0 / numBColumns);
// //hostB[i + j* numBColumns] = 1;
//
// }
//
// else{
// hostB[i* numBColumns + j] =1;
// }
//
// }
// }
//plhs[0] = mxCreateNumericMatrix(numARows, numBColumns, mxDOUBLE_CLASS, mxREAL);
//hostC = (double*)mxGetData(plhs[0]);
plhs[0] = mxCreateNumericMatrix(numCRows, numCColumns, mxDOUBLE_CLASS, mxREAL);
double *pointer = mxGetPr(plhs[0]);
// (hostL, hostA, hostC, numBRows, numBColumns, numARows, numAColumns, numCRows, numCColumns);
//CalculateTransform(hostA, hostB, hostC, numARows, numAColumns, numBRows, numBColumns, numCRows, numCColumns);
// CalculateTransform(hostA, hostB, pointer, numARows, numAColumns, numBRows, numBColumns, numCRows, numCColumns);
CalculateTransform( hostB, hostA, pointer, numBRows, numBColumns, numARows, numAColumns, numCRows, numCColumns);
//memcpy(pointer, hostC, numCRows*numCColumns*sizeof(double));
free(hostB);
}
}
| 09ee1222ac147fc2a1452e1d8df302dfc5f1f101.cu | /*
* Discrete Sine Transform in Column wise (DST one)
* DST_I_Column
* This CUDA code can handle/work with any type of the input mxArrays,
* GPUarray or standard matlab CPU array as input {prhs[0] := mxGPUArray or CPU Array}
* gpuArray output, B=DST_I_Column(A)=mexFunction(A).
* Developed at UCL, Institute of Neurology, 12 Queen Square, WC1N 3AR, London
* Wellcome Trust Centre for Neuroimaging
* Part of the project SPM(http://www.fil.ion.ucl.ac.uk/spm)
* Copyright 2018
* Kevin Bronik
*/
#include "matrix.h"
#include "mex.h"
#include "gpu/mxGPUArray.h"
#define DEFAULT_DIM 32
#define DELTA(i, j) ((i==j)?1:0)
const double PI_d = 3.141592653589793238462643383279502884; //pi
__global__ void DSTI_Column_Kernel_GPUA(double const * const A, double const * const B, double * const C,
int numARows, int numAColumns,
int numBRows, int numBColumns,
int numCRows, int numCColumns)
{
double CValue = 0.0;
int Row = blockIdx.y*DEFAULT_DIM + threadIdx.y;
int Col = blockIdx.x*DEFAULT_DIM + threadIdx.x;
for (int k = 0; k < (DEFAULT_DIM + numAColumns - 1) / DEFAULT_DIM; k++) {
for (int n = 0; n < DEFAULT_DIM; ++n)
if ((k*DEFAULT_DIM + n < numAColumns && Row < numARows) && (k*DEFAULT_DIM + n < numBRows && Col < numBColumns))
CValue += A[Row*numAColumns + k*DEFAULT_DIM + n] * B[(k*DEFAULT_DIM + n)*numBColumns + Col];
}
if (Row < numCRows && Col < numCColumns) C[((blockIdx.y * blockDim.y + threadIdx.y)*numCColumns) + (blockIdx.x*blockDim.x) + threadIdx.x] = CValue;
}
__global__ void DSTI_Column_Kernel(double *A, double *B, double *C,
int numARows, int numAColumns,
int numBRows, int numBColumns,
int numCRows, int numCColumns)
{
double CValue = 0.0;
int Row = blockIdx.y*DEFAULT_DIM + threadIdx.y;
int Col = blockIdx.x*DEFAULT_DIM + threadIdx.x;
for (int k = 0; k < (DEFAULT_DIM + numAColumns - 1) / DEFAULT_DIM; k++) {
for (int n = 0; n < DEFAULT_DIM; ++n)
if ((k*DEFAULT_DIM + n < numAColumns && Row < numARows) && (k*DEFAULT_DIM + n < numBRows && Col < numBColumns))
CValue += A[Row*numAColumns + k*DEFAULT_DIM + n] * B[(k*DEFAULT_DIM + n)*numBColumns + Col];
}
if (Row < numCRows && Col < numCColumns) C[((blockIdx.y * blockDim.y + threadIdx.y)*numCColumns) + (blockIdx.x*blockDim.x) + threadIdx.x] = CValue;
}
// Matrix multiplication - Host code
// Matrix dimensions are assumed to be multiples of BLOCK_SIZE
void CalculateTransform(double * A, double * B, double * C, int numARows,
int numAColumns, int numBRows, int numBColumns,
int numCRows, int numCColumns)
{
double * hostA = A; // The A matrix
double * hostB = B; // The B matrix
double * hostC = C; // The output C matrix
//double * hostComputedC;
double * deviceA=0;
double * deviceB=0;
double * deviceC=0;
//hostA = (double *)malloc(sizeof(float)*numARows*numAColumns);
//hostB = (v *)malloc(sizeof(float)*numBRows*numBColumns);
// Setting numCRows and numCColumns
numCRows = numARows;
numCColumns = numBColumns;
// Allocate GPU buffers for three vectors (two input, one output) .
//hostC = (float *)malloc(sizeof(float)*numCRows*numCColumns);
//hostComputedC = (float *)malloc(sizeof(float)*numCRows*numCColumns);
cudaMalloc((void **)&deviceA, sizeof(double )*numARows*numAColumns);
cudaMalloc((void **)&deviceB, sizeof(double )*numBRows*numBColumns);
cudaMalloc((void **)&deviceC, sizeof(double )*numCRows*numCColumns);
cudaMemcpy(deviceA, hostA, sizeof(double )*numARows*numAColumns, cudaMemcpyHostToDevice);
cudaMemcpy(deviceB, hostB, sizeof(double )*numBRows*numBColumns, cudaMemcpyHostToDevice);
dim3 dimBlock(DEFAULT_DIM, DEFAULT_DIM, 1);
dim3 dimGrid;
dimGrid.x = (numCColumns + dimBlock.x - 1) / dimBlock.x;
dimGrid.y = (numCRows + dimBlock.y - 1) / dimBlock.y;
DSTI_Column_Kernel << <dimGrid, dimBlock >> >(deviceA, deviceB, deviceC, numARows, numAColumns, numBRows, numBColumns, numCRows, numCColumns);
cudaDeviceSynchronize();//To synchronize the device
// Copy the results in GPU memory back to the CPU
cudaMemcpy(hostC, deviceC, sizeof(double)*numCRows*numCColumns, cudaMemcpyDeviceToHost);
C = hostC;
cudaFree(deviceA);
cudaFree(deviceB);
cudaFree(deviceC);
}
void mexFunction(int nlhs, mxArray *plhs[],
int nrhs, mxArray const *prhs[])
{
int nDevices;
cudaError_t errCode =cudaGetDeviceCount(&nDevices);
//int nDevices;
//cudaGetDeviceCount(&nDevices);
if (errCode != cudaSuccess){
printf("Error! No CUDA devices found! \n");
return;
}
/// input standard GPUarray
if (mxIsGPUArray(prhs[0])) {
//mexErrMsgIdAndTxt(errId, errMsg);
/* Declare all variables.*/
mxGPUArray const *A;
mxGPUArray const *DCOS;
mxGPUArray *B;
double const *d_A, *d_DCOS;
double *d_B;
// mxArray * hostcos;
//test
// double * hostcos, *pointer;
double *pointer;
//int N;
int numARows, numAColumns, numDCOSRows, numDCOSColumns, numCRows, numCColumns;
char const * const errId = "parallel:gpu:mexGPUExample:InvalidInput";
char const * const errMsg = "Invalid input to MEX file.";
/* Initialize the MathWorks GPU API. */
mxInitGPU();
if ((nrhs!=1)) {
mexErrMsgIdAndTxt(errId, errMsg);
}
A = mxGPUCreateFromMxArray(prhs[0]);
const mwSize *dims;
dims=mxGPUGetDimensions(A);
numARows = (int)dims[0]; /* gets number of rows of A */
numAColumns = (int)dims[1]; /* gets number of columns of A */
numDCOSRows=numDCOSColumns = numARows;
numCRows = numARows;
numCColumns = numAColumns;
if (numARows==1)
{
printf("Attention, this is a row vector, please try Discrete Sine Transform in row wise \n");
return;
}
// numDCOSRows=numDCOSColumns=numAColumns;
// numCRows = numARows;
// numCColumns = numDCOSColumns;
mxArray *COS= mxCreateNumericMatrix(numDCOSRows, numDCOSColumns, mxDOUBLE_CLASS, mxREAL);
pointer = mxGetPr(COS);
for (int i = 0; i < numDCOSRows; i++){
for (int j = 0; j < numDCOSColumns; j++){
//hostB[i * numBColumns + j] = i + j* numAColumns;
//hostB[i * numBColumns + j] = 1;
//cosvalx[i * numBColumns + j] = cos(((2 * j + 1) / (2.0 * numBColumns))*3.14*i)*sqrt(1.0 / numBColumns);
//hostB[i * numBColumns + j] = cosvalx[i + j* numAColumns];
//hostB[i + j* numBColumns] = sin((((j + 1)*3.14*(i + 1)) / (numBColumns + 1)))*sqrt(2.0 / (numBColumns + 1));
pointer[i* numDCOSColumns + j] = sin((((j + 1)*PI_d*(i + 1)) / (numDCOSColumns + 1)))*sqrt(2.0 / (numDCOSColumns + 1));
//hostB[i + j* numBColumns] = 1;
//hostL[i* numBColumns + j] = cos(((2 * j + 1) / (2.0 * numBColumns))*3.14*i)*sqrt(1.0 / numBColumns);
//hostB[i + j* numBColumns] = 1;
}
}
// for (int i = 0; i < numDCOSRows; i++){
// for (int j = 0; j < numDCOSColumns; j++){
// //hostB[i * numBColumns + j] = i + j* numACommc
//
// //hostB[i * numBColumns + j] = 1;
// //cosvalx[i * numBColumns + j] = cos(((2 * j + 1) / (2.0 * numBColumns))*3.14*i)*sqrt(1.0 / numBColumns);
// //hostB[i * numBColumns + j] = cosvalx[i + j* numAColumns];
//
// if (numDCOSRows != 1){
// pointer[i* numDCOSColumns + j] = cos((j*PI_d*i / (numDCOSRows - 1)))*sqrt(1.0 / (1 + DELTA(i + 1, 1) + DELTA(i + 1, numDCOSRows)))*sqrt(1.0 / (1 + DELTA(1, j + 1) + DELTA(numDCOSColumns, j + 1)))*sqrt(2.0 / numDCOSColumns);
// //hostB[i + j* numBColumns] = 1;
//
// //hostL[i* numBColumns + j] = cos(((2 * j + 1) / (2.0 * numBColumns))*3.14*i)*sqrt(1.0 / numBColumns);
// //hostB[i + j* numBColumns] = 1;
//
// }
//
// else{
// pointer[i* numDCOSColumns + j] =1;
// }
//
// }
// }
DCOS=mxGPUCreateFromMxArray(COS);
// DCOS=mxGPUCreateFromMxArray(hostcos);
if (mxGPUGetClassID(A) != mxDOUBLE_CLASS) {
mexErrMsgIdAndTxt(errId, errMsg);
}
d_A = (double const *)(mxGPUGetDataReadOnly(A));
d_DCOS=(double const *)(mxGPUGetDataReadOnly(DCOS));
B = mxGPUCreateGPUArray(mxGPUGetNumberOfDimensions(A),
mxGPUGetDimensions(A),
mxGPUGetClassID(A),
mxGPUGetComplexity(A),
MX_GPU_DO_NOT_INITIALIZE);
d_B = (double *)(mxGPUGetData(B));
dim3 dimBlock(DEFAULT_DIM, DEFAULT_DIM, 1);
dim3 dimGrid;
dimGrid.x = (numCColumns + dimBlock.x - 1) / dimBlock.x;
dimGrid.y = (numCRows + dimBlock.y - 1) / dimBlock.y;
//(hostL, hostA, hostC, numBRows, numBColumns, numARows, numAColumns, numCRows, numCColumns);
//DCTII_Column_Kernel_GPUA<< <dimGrid, dimBlock >> >(d_A, d_DCOS, d_B, numARows, numAColumns, numDCOSRows, numDCOSColumns, numCRows, numCColumns);
DSTI_Column_Kernel_GPUA<< <dimGrid, dimBlock >> >(d_DCOS, d_A, d_B, numDCOSRows, numDCOSColumns, numARows, numAColumns, numCRows, numCColumns);
// cudaError_t err1 = cudaPeekAtLastError();//To capture last error in function call
//cudaDeviceSynchronize();//To synchronize the device
plhs[0] = mxGPUCreateMxArrayOnGPU(B);
mxGPUDestroyGPUArray(A);
mxGPUDestroyGPUArray(DCOS);
mxGPUDestroyGPUArray(B);
}
/// input standard array
else if (!(mxIsGPUArray(prhs[0]))){
int numARows = (int)mxGetM(prhs[0]); // number of rows in the matrix A
int numAColumns = (int)mxGetN(prhs[0]); // number of columns in the matrix A
int numBRows; // number of rows in the matrix B
int numBColumns; // number of columns in the matrix B
int numCRows; // number of rows in the matrix C (you have to set this)
int numCColumns; // number of columns in the matrix C (you have to set this)
numBRows = numBColumns = numARows;
numCRows = numARows;
numCColumns = numAColumns;
if (numARows==1)
{
printf("Attention, this is a row vector, please try Discrete Sine Transform in row wise \n");
return;
}
// numBRows = numBColumns = numAColumns;
// numCRows = numARows;
//
// numCColumns = numBColumns;
//char const * const errId = "parallel:gpu:DCTTWO:InvalidInput";
//char const * const errMsg = "Invalid input to MEX file.";
double * hostA ; // The A matrix
double * hostB ; // The B matrix
/* Initialize the MathWorks GPU API. */
//mxInitGPU();
/* Throw an error if the input is not a GPU array. */
//if ((nrhs != 1) || !(mxIsGPUArray(prhs[0]))) {
//mexErrMsgIdAndTxt(errId, errMsg);
//}
//hostA = (double *)malloc(sizeof(double)*numARows*numAColumns);
//hostAx = (double *)malloc(sizeof(double)*numARows*numAColumns);
//hostAy = (double *)malloc(sizeof(double)*numARows*numAColumns);
hostB = (double *)malloc(sizeof(double)*numBRows*numBColumns);
//const mxArray *G =prhs[0];
// if ((nrhs != 1) || (mxIsGPUArray(G))) {
//mexErrMsgIdAndTxt(errId, errMsg);
// G = gather(G);
// }
hostA = (double *)mxGetData(prhs[0]);
// hostA = (double *)mxGetData(G);
//Discrete Sine Transform in Columns wise
for (int i = 0; i < numBRows; i++){
for (int j = 0; j < numBColumns; j++){
//hostB[i * numBColumns + j] = i + j* numAColumns;
//hostB[i * numBColumns + j] = 1;
//cosvalx[i * numBColumns + j] = cos(((2 * j + 1) / (2.0 * numBColumns))*3.14*i)*sqrt(1.0 / numBColumns);
//hostB[i * numBColumns + j] = cosvalx[i + j* numAColumns];
//hostB[i + j* numBColumns] = sin((((j + 1)*3.14*(i + 1)) / (numBColumns + 1)))*sqrt(2.0 / (numBColumns + 1));
hostB[i* numBColumns + j] = sin((((j + 1)*PI_d*(i + 1)) / (numBColumns + 1)))*sqrt(2.0 / (numBColumns + 1));
//hostB[i + j* numBColumns] = 1;
//hostL[i* numBColumns + j] = cos(((2 * j + 1) / (2.0 * numBColumns))*3.14*i)*sqrt(1.0 / numBColumns);
//hostB[i + j* numBColumns] = 1;
}
}
//
// for (int i = 0; i < numBRows; i++){
// for (int j = 0; j < numBColumns; j++){
// //hostB[i * numBColumns + j] = i + j* numAColumns;
// //hostB[i * numBColumns + j] = 1;
// //cosvalx[i * numBColumns + j] = cos(((2 * j + 1) / (2.0 * numBColumns))*3.14*i)*sqrt(1.0 / numBColumns);
// //hostB[i * numBColumns + j] = cosvalx[i + j* numAColumns];
//
// if (numBRows != 1){
// hostB[i* numBColumns + j] = cos((j*PI_d*i / (numBRows - 1)))*sqrt(1.0 / (1 + DELTA(i + 1, 1) + DELTA(i + 1, numBRows)))*sqrt(1.0 / (1 + DELTA(1, j + 1) + DELTA(numBColumns, j + 1)))*sqrt(2.0 / numBColumns);
// //hostB[i + j* numBColumns] = 1;
//
// //hostL[i* numBColumns + j] = cos(((2 * j + 1) / (2.0 * numBColumns))*3.14*i)*sqrt(1.0 / numBColumns);
// //hostB[i + j* numBColumns] = 1;
//
// }
//
// else{
// hostB[i* numBColumns + j] =1;
// }
//
// }
// }
//plhs[0] = mxCreateNumericMatrix(numARows, numBColumns, mxDOUBLE_CLASS, mxREAL);
//hostC = (double*)mxGetData(plhs[0]);
plhs[0] = mxCreateNumericMatrix(numCRows, numCColumns, mxDOUBLE_CLASS, mxREAL);
double *pointer = mxGetPr(plhs[0]);
// (hostL, hostA, hostC, numBRows, numBColumns, numARows, numAColumns, numCRows, numCColumns);
//CalculateTransform(hostA, hostB, hostC, numARows, numAColumns, numBRows, numBColumns, numCRows, numCColumns);
// CalculateTransform(hostA, hostB, pointer, numARows, numAColumns, numBRows, numBColumns, numCRows, numCColumns);
CalculateTransform( hostB, hostA, pointer, numBRows, numBColumns, numARows, numAColumns, numCRows, numCColumns);
//memcpy(pointer, hostC, numCRows*numCColumns*sizeof(double));
free(hostB);
}
}
|
907eab135eebafb8cd131a003a4fcb7d6951542e.hip | // !!! This is a file automatically generated by hipify!!!
/* Vector-matrix multiplication: Y = A * X.
* Host code.
* Author: Naga Kandasamy
* Date: 2/21/2017
*/
#include <stdlib.h>
#include <stdio.h>
#include <time.h>
#include <string.h>
#include <math.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include "vec_mat_mult_kernel.cu"
#include<sys/time.h>
#define MIN_NUMBER 1
#define MAX_NUMBER 4
extern "C" void compute_gold(float*, const float*, const float*, unsigned int, unsigned int);
Matrix allocate_matrix_on_gpu(const Matrix);
Matrix allocate_matrix(int, int, int);
void copy_matrix_to_device(Matrix, const Matrix);
void copy_matrix_from_device(Matrix, const Matrix);
void vec_mat_mult_on_device_using_global_memory(const Matrix, const Matrix, Matrix);
void vec_mat_mult_on_device_using_shared_memory(const Matrix, const Matrix, Matrix);
void print_matrix(const Matrix);
float get_random_number(int, int);
int checkResults(float *, float *, int, float);
struct timeval t1, t2;
main(int argc, char** argv) {
// Matrices for the program
Matrix A; // N x N matrix
Matrix X; // N x 1 vector
Matrix Y_cpu, Y_gpu_1, Y_gpu_2; // N x 1 vector
// Initialize the random number generator with a seed value
srand(time(NULL));
// Check command line arguments
if(argc > 1){
printf("Error. This program accepts no arguments. \n");
exit(0);
}
// Allocate and initialize the matrices
A = allocate_matrix(MATRIX_SIZE, MATRIX_SIZE, 1); // Create a random N x N matrix
X = allocate_matrix(MATRIX_SIZE, 1, 1); // Create a random N x 1 vector
Y_cpu = allocate_matrix(MATRIX_SIZE, 1, 0); // Allocate memory for the output vectors
Y_gpu_1 = allocate_matrix(MATRIX_SIZE, 1, 0);
Y_gpu_2 = allocate_matrix(MATRIX_SIZE, 1, 0);
// compute the vector-matrix multiplication on the CPU for comparison
gettimeofday(&t1,0);
compute_gold(Y_cpu.elements, A.elements, X.elements, A.num_rows, A.num_columns);
gettimeofday(&t2, 0);
double time = (1000000.0*(t2.tv_sec-t1.tv_sec) + t2.tv_usec-t1.tv_usec)/1000.0;
printf("Execution time of CPU(serial) %f seconds. \n",time);
// Perform the vector-matrix multiplication on the GPU using global memory
// Return the results in Y_gpu_1
vec_mat_mult_on_device_using_global_memory(A, X, Y_gpu_1);
// check if the device result is equivalent to the expected solution
printf("Checking against reference result. \n");
int size_elements = NUM_ROWS;
int res = checkResults(Y_cpu.elements, Y_gpu_1.elements, size_elements, 0.0001);
printf("Test %s\n", (1 == res) ? "PASSED" : "FAILED");
// Perform the vector-matrix multiplication on the GPU using shared memory
// Return the results in Y_gpu_2
vec_mat_mult_on_device_using_shared_memory(A, X, Y_gpu_2);
// check if the device result is equivalent to the expected solution
printf("Checking against reference result. \n");
res = checkResults(Y_cpu.elements, Y_gpu_2.elements, size_elements, 0.0001);
printf("Test %s\n", (1 == res) ? "PASSED" : "FAILED");
// Free host matrices
free(A.elements); A.elements = NULL;
free(X.elements); X.elements = NULL;
free(Y_cpu.elements); Y_cpu.elements = NULL;
free(Y_gpu_1.elements); Y_gpu_1.elements = NULL;
free(Y_gpu_2.elements); Y_gpu_2.elements = NULL;
return 0;
}
// Complete the functionality of vector-matrix multiplication using the GPU
// Kernel should use global memory
void
vec_mat_mult_on_device_using_global_memory(const Matrix A, const Matrix X, Matrix Y)
{
Matrix gpu_a = allocate_matrix_on_gpu( A );
Matrix gpu_x = allocate_matrix_on_gpu( X );
Matrix gpu_y = allocate_matrix_on_gpu( Y );
copy_matrix_to_device( gpu_a, A );
copy_matrix_to_device( gpu_x, X );
dim3 dimBlock( 512, 1 );
dim3 dimGrid( MATRIX_SIZE / dimBlock.x, 1 );
gettimeofday(&t1,0);
hipLaunchKernelGGL(( vec_mat_kernel_naive) , dim3(dimGrid), dim3(dimBlock) , 0, 0, gpu_a.elements, gpu_x.elements, gpu_y.elements);
hipDeviceSynchronize();
gettimeofday(&t2, 0);
double time = (1000000.0*(t2.tv_sec-t1.tv_sec) + t2.tv_usec-t1.tv_usec)/1000.0;
printf("Kernel execution time on GPU (global memory): %3f seconds. \n",time);
hipError_t err = hipGetLastError();
if ( hipSuccess != err ) {
fprintf(stderr, " GPU kernel failed: %s.\n", hipGetErrorString(err));
}
copy_matrix_from_device( Y, gpu_y );
hipFree( gpu_a.elements );
hipFree( gpu_x.elements );
hipFree( gpu_y.elements );
}
// Complete the functionality of vector-matrix multiplication using the GPU
// Kernel should use shared memory
void
vec_mat_mult_on_device_using_shared_memory(const Matrix A, const Matrix X, Matrix Y)
{
Matrix gpu_a = allocate_matrix_on_gpu( A );
Matrix gpu_x = allocate_matrix_on_gpu( X );
Matrix gpu_y = allocate_matrix_on_gpu( Y );
copy_matrix_to_device( gpu_a, A );
copy_matrix_to_device( gpu_x, X );
dim3 dimBlock( 16, 16 );
dim3 dimGrid( MATRIX_SIZE / dimBlock.x, 1);
gettimeofday(&t1,0);
hipLaunchKernelGGL(( vec_mat_kernel_optimized) , dim3(dimGrid), dim3(dimBlock) , 0, 0, gpu_a.elements, gpu_x.elements, gpu_y.elements);
hipDeviceSynchronize();
gettimeofday(&t2, 0);
double time = (1000000.0*(t2.tv_sec-t1.tv_sec) + t2.tv_usec-t1.tv_usec)/1000.0;
printf("Kernel execution time on GPU (shared memory): %f seconds. \n",time);
hipError_t err = hipGetLastError();
if ( hipSuccess != err ) {
fprintf(stderr, " GPU kernel failed: %s.\n", hipGetErrorString(err));
}
copy_matrix_from_device( Y, gpu_y );
hipFree( gpu_a.elements );
hipFree( gpu_x.elements );
hipFree( gpu_y.elements );
}
// Allocate a device matrix of same size as M.
Matrix
allocate_matrix_on_gpu(const Matrix M)
{
Matrix Mdevice = M;
int size = M.num_rows * M.num_columns * sizeof(float);
hipMalloc((void**)&Mdevice.elements, size);
return Mdevice;
}
// Allocate a matrix of dimensions height*width
// If init == 0, initialize to all zeroes.
// If init == 1, perform random initialization.
Matrix
allocate_matrix(int num_rows, int num_columns, int init)
{
Matrix M;
M.num_columns = M.pitch = num_columns;
M.num_rows = num_rows;
int size = M.num_rows * M.num_columns;
M.elements = (float*) malloc(size*sizeof(float));
for(unsigned int i = 0; i < size; i++){
if(init == 0) M.elements[i] = 0;
else
M.elements[i] = get_random_number(MIN_NUMBER, MAX_NUMBER);
}
return M;
}
// Copy a host matrix to a device matrix.
void
copy_matrix_to_device(Matrix Mdevice, const Matrix Mhost)
{
int size = Mhost.num_rows * Mhost.num_columns * sizeof(float);
Mdevice.num_rows = Mhost.num_rows;
Mdevice.num_columns = Mhost.num_columns;
Mdevice.pitch = Mhost.pitch;
hipMemcpy(Mdevice.elements, Mhost.elements, size, hipMemcpyHostToDevice);
}
// Copy a device matrix to a host matrix.
void
copy_matrix_from_device(Matrix Mhost, const Matrix Mdevice)
{
int size = Mdevice.num_rows * Mdevice.num_columns * sizeof(float);
hipMemcpy(Mhost.elements, Mdevice.elements, size, hipMemcpyDeviceToHost);
}
// Prints the matrix out to screen
void
print_matrix(const Matrix M)
{
for(unsigned int i = 0; i < M.num_rows; i++){
for(unsigned int j = 0; j < M.num_columns; j++)
printf("%f ", M.elements[i*M.num_columns + j]);
printf("\n");
}
printf("\n");
}
// Returns a random floating-point number between the specified min and max values
float
get_random_number(int min, int max){
return (float)floor((double)(min + (max - min + 1)*((float)rand()/(float)RAND_MAX)));
}
int
checkResults(float *reference, float *gpu_result, int num_elements, float threshold)
{
int checkMark = 1;
float epsilon = 0.0;
for(int i = 0; i < num_elements; i++)
if(fabsf((reference[i] - gpu_result[i])/reference[i]) > threshold){
checkMark = 0;
break;
}
for(int i = 0; i < num_elements; i++)
if(fabsf((reference[i] - gpu_result[i])/reference[i]) > epsilon){
epsilon = fabsf((reference[i] - gpu_result[i])/reference[i]);
}
printf("Max epsilon = %f. \n", epsilon);
return checkMark;
}
| 907eab135eebafb8cd131a003a4fcb7d6951542e.cu | /* Vector-matrix multiplication: Y = A * X.
* Host code.
* Author: Naga Kandasamy
* Date: 2/21/2017
*/
#include <stdlib.h>
#include <stdio.h>
#include <time.h>
#include <string.h>
#include <math.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include "vec_mat_mult_kernel.cu"
#include<sys/time.h>
#define MIN_NUMBER 1
#define MAX_NUMBER 4
extern "C" void compute_gold(float*, const float*, const float*, unsigned int, unsigned int);
Matrix allocate_matrix_on_gpu(const Matrix);
Matrix allocate_matrix(int, int, int);
void copy_matrix_to_device(Matrix, const Matrix);
void copy_matrix_from_device(Matrix, const Matrix);
void vec_mat_mult_on_device_using_global_memory(const Matrix, const Matrix, Matrix);
void vec_mat_mult_on_device_using_shared_memory(const Matrix, const Matrix, Matrix);
void print_matrix(const Matrix);
float get_random_number(int, int);
int checkResults(float *, float *, int, float);
struct timeval t1, t2;
main(int argc, char** argv) {
// Matrices for the program
Matrix A; // N x N matrix
Matrix X; // N x 1 vector
Matrix Y_cpu, Y_gpu_1, Y_gpu_2; // N x 1 vector
// Initialize the random number generator with a seed value
srand(time(NULL));
// Check command line arguments
if(argc > 1){
printf("Error. This program accepts no arguments. \n");
exit(0);
}
// Allocate and initialize the matrices
A = allocate_matrix(MATRIX_SIZE, MATRIX_SIZE, 1); // Create a random N x N matrix
X = allocate_matrix(MATRIX_SIZE, 1, 1); // Create a random N x 1 vector
Y_cpu = allocate_matrix(MATRIX_SIZE, 1, 0); // Allocate memory for the output vectors
Y_gpu_1 = allocate_matrix(MATRIX_SIZE, 1, 0);
Y_gpu_2 = allocate_matrix(MATRIX_SIZE, 1, 0);
// compute the vector-matrix multiplication on the CPU for comparison
gettimeofday(&t1,0);
compute_gold(Y_cpu.elements, A.elements, X.elements, A.num_rows, A.num_columns);
gettimeofday(&t2, 0);
double time = (1000000.0*(t2.tv_sec-t1.tv_sec) + t2.tv_usec-t1.tv_usec)/1000.0;
printf("Execution time of CPU(serial) %f seconds. \n",time);
// Perform the vector-matrix multiplication on the GPU using global memory
// Return the results in Y_gpu_1
vec_mat_mult_on_device_using_global_memory(A, X, Y_gpu_1);
// check if the device result is equivalent to the expected solution
printf("Checking against reference result. \n");
int size_elements = NUM_ROWS;
int res = checkResults(Y_cpu.elements, Y_gpu_1.elements, size_elements, 0.0001);
printf("Test %s\n", (1 == res) ? "PASSED" : "FAILED");
// Perform the vector-matrix multiplication on the GPU using shared memory
// Return the results in Y_gpu_2
vec_mat_mult_on_device_using_shared_memory(A, X, Y_gpu_2);
// check if the device result is equivalent to the expected solution
printf("Checking against reference result. \n");
res = checkResults(Y_cpu.elements, Y_gpu_2.elements, size_elements, 0.0001);
printf("Test %s\n", (1 == res) ? "PASSED" : "FAILED");
// Free host matrices
free(A.elements); A.elements = NULL;
free(X.elements); X.elements = NULL;
free(Y_cpu.elements); Y_cpu.elements = NULL;
free(Y_gpu_1.elements); Y_gpu_1.elements = NULL;
free(Y_gpu_2.elements); Y_gpu_2.elements = NULL;
return 0;
}
// Complete the functionality of vector-matrix multiplication using the GPU
// Kernel should use global memory
void
vec_mat_mult_on_device_using_global_memory(const Matrix A, const Matrix X, Matrix Y)
{
Matrix gpu_a = allocate_matrix_on_gpu( A );
Matrix gpu_x = allocate_matrix_on_gpu( X );
Matrix gpu_y = allocate_matrix_on_gpu( Y );
copy_matrix_to_device( gpu_a, A );
copy_matrix_to_device( gpu_x, X );
dim3 dimBlock( 512, 1 );
dim3 dimGrid( MATRIX_SIZE / dimBlock.x, 1 );
gettimeofday(&t1,0);
vec_mat_kernel_naive <<< dimGrid, dimBlock >>> (gpu_a.elements, gpu_x.elements, gpu_y.elements);
cudaThreadSynchronize();
gettimeofday(&t2, 0);
double time = (1000000.0*(t2.tv_sec-t1.tv_sec) + t2.tv_usec-t1.tv_usec)/1000.0;
printf("Kernel execution time on GPU (global memory): %3f seconds. \n",time);
cudaError_t err = cudaGetLastError();
if ( cudaSuccess != err ) {
fprintf(stderr, " GPU kernel failed: %s.\n", cudaGetErrorString(err));
}
copy_matrix_from_device( Y, gpu_y );
cudaFree( gpu_a.elements );
cudaFree( gpu_x.elements );
cudaFree( gpu_y.elements );
}
// Complete the functionality of vector-matrix multiplication using the GPU
// Kernel should use shared memory
void
vec_mat_mult_on_device_using_shared_memory(const Matrix A, const Matrix X, Matrix Y)
{
Matrix gpu_a = allocate_matrix_on_gpu( A );
Matrix gpu_x = allocate_matrix_on_gpu( X );
Matrix gpu_y = allocate_matrix_on_gpu( Y );
copy_matrix_to_device( gpu_a, A );
copy_matrix_to_device( gpu_x, X );
dim3 dimBlock( 16, 16 );
dim3 dimGrid( MATRIX_SIZE / dimBlock.x, 1);
gettimeofday(&t1,0);
vec_mat_kernel_optimized <<< dimGrid, dimBlock >>> (gpu_a.elements, gpu_x.elements, gpu_y.elements);
cudaThreadSynchronize();
gettimeofday(&t2, 0);
double time = (1000000.0*(t2.tv_sec-t1.tv_sec) + t2.tv_usec-t1.tv_usec)/1000.0;
printf("Kernel execution time on GPU (shared memory): %f seconds. \n",time);
cudaError_t err = cudaGetLastError();
if ( cudaSuccess != err ) {
fprintf(stderr, " GPU kernel failed: %s.\n", cudaGetErrorString(err));
}
copy_matrix_from_device( Y, gpu_y );
cudaFree( gpu_a.elements );
cudaFree( gpu_x.elements );
cudaFree( gpu_y.elements );
}
// Allocate a device matrix of same size as M.
Matrix
allocate_matrix_on_gpu(const Matrix M)
{
Matrix Mdevice = M;
int size = M.num_rows * M.num_columns * sizeof(float);
cudaMalloc((void**)&Mdevice.elements, size);
return Mdevice;
}
// Allocate a matrix of dimensions height*width
// If init == 0, initialize to all zeroes.
// If init == 1, perform random initialization.
Matrix
allocate_matrix(int num_rows, int num_columns, int init)
{
Matrix M;
M.num_columns = M.pitch = num_columns;
M.num_rows = num_rows;
int size = M.num_rows * M.num_columns;
M.elements = (float*) malloc(size*sizeof(float));
for(unsigned int i = 0; i < size; i++){
if(init == 0) M.elements[i] = 0;
else
M.elements[i] = get_random_number(MIN_NUMBER, MAX_NUMBER);
}
return M;
}
// Copy a host matrix to a device matrix.
void
copy_matrix_to_device(Matrix Mdevice, const Matrix Mhost)
{
int size = Mhost.num_rows * Mhost.num_columns * sizeof(float);
Mdevice.num_rows = Mhost.num_rows;
Mdevice.num_columns = Mhost.num_columns;
Mdevice.pitch = Mhost.pitch;
cudaMemcpy(Mdevice.elements, Mhost.elements, size, cudaMemcpyHostToDevice);
}
// Copy a device matrix to a host matrix.
void
copy_matrix_from_device(Matrix Mhost, const Matrix Mdevice)
{
int size = Mdevice.num_rows * Mdevice.num_columns * sizeof(float);
cudaMemcpy(Mhost.elements, Mdevice.elements, size, cudaMemcpyDeviceToHost);
}
// Prints the matrix out to screen
void
print_matrix(const Matrix M)
{
for(unsigned int i = 0; i < M.num_rows; i++){
for(unsigned int j = 0; j < M.num_columns; j++)
printf("%f ", M.elements[i*M.num_columns + j]);
printf("\n");
}
printf("\n");
}
// Returns a random floating-point number between the specified min and max values
float
get_random_number(int min, int max){
return (float)floor((double)(min + (max - min + 1)*((float)rand()/(float)RAND_MAX)));
}
int
checkResults(float *reference, float *gpu_result, int num_elements, float threshold)
{
int checkMark = 1;
float epsilon = 0.0;
for(int i = 0; i < num_elements; i++)
if(fabsf((reference[i] - gpu_result[i])/reference[i]) > threshold){
checkMark = 0;
break;
}
for(int i = 0; i < num_elements; i++)
if(fabsf((reference[i] - gpu_result[i])/reference[i]) > epsilon){
epsilon = fabsf((reference[i] - gpu_result[i])/reference[i]);
}
printf("Max epsilon = %f. \n", epsilon);
return checkMark;
}
|
f3a92a45adc3459a3744f0f5606359749f9df991.hip | // !!! This is a file automatically generated by hipify!!!
#include <mlib/cuda/devmemmanager.h>
namespace cvl{
DevMemManager::DevMemManager(){
allocs.reserve(1024);
}
DevMemManager::~DevMemManager(){
for(int i=0;i<allocs.size();++i){
hipFree(allocs[i]);allocs[i]=nullptr;
}
}
void DevMemManager::synchronize(){
pool.synchronize();
}
int DevMemManager::nextStream(){
std::unique_lock<std::mutex> ul(mtx);
return next++ % pool.streams.size();
}
}// end namespace cvl
| f3a92a45adc3459a3744f0f5606359749f9df991.cu | #include <mlib/cuda/devmemmanager.h>
namespace cvl{
DevMemManager::DevMemManager(){
allocs.reserve(1024);
}
DevMemManager::~DevMemManager(){
for(int i=0;i<allocs.size();++i){
cudaFree(allocs[i]);allocs[i]=nullptr;
}
}
void DevMemManager::synchronize(){
pool.synchronize();
}
int DevMemManager::nextStream(){
std::unique_lock<std::mutex> ul(mtx);
return next++ % pool.streams.size();
}
}// end namespace cvl
|
ef1d0145ff18feaf06d616f4b856ab3704eaf03f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#define DAWN_GENERATED 1
#undef DAWN_BACKEND_T
#define DAWN_BACKEND_T CUDA
#ifndef BOOST_RESULT_OF_USE_TR1
#define BOOST_RESULT_OF_USE_TR1 1
#endif
#ifndef BOOST_NO_CXX11_DECLTYPE
#define BOOST_NO_CXX11_DECLTYPE 1
#endif
#ifndef GRIDTOOLS_DAWN_HALO_EXTENT
#define GRIDTOOLS_DAWN_HALO_EXTENT 3
#endif
#ifndef BOOST_PP_VARIADICS
#define BOOST_PP_VARIADICS 1
#endif
#ifndef BOOST_FUSION_DONT_USE_PREPROCESSED_FILES
#define BOOST_FUSION_DONT_USE_PREPROCESSED_FILES 1
#endif
#ifndef BOOST_MPL_CFG_NO_PREPROCESSED_HEADERS
#define BOOST_MPL_CFG_NO_PREPROCESSED_HEADERS 1
#endif
#ifndef GT_VECTOR_LIMIT_SIZE
#define GT_VECTOR_LIMIT_SIZE 30
#endif
#ifndef BOOST_FUSION_INVOKE_MAX_ARITY
#define BOOST_FUSION_INVOKE_MAX_ARITY GT_VECTOR_LIMIT_SIZE
#endif
#ifndef FUSION_MAX_VECTOR_SIZE
#define FUSION_MAX_VECTOR_SIZE GT_VECTOR_LIMIT_SIZE
#endif
#ifndef FUSION_MAX_MAP_SIZE
#define FUSION_MAX_MAP_SIZE GT_VECTOR_LIMIT_SIZE
#endif
#ifndef BOOST_MPL_LIMIT_VECTOR_SIZE
#define BOOST_MPL_LIMIT_VECTOR_SIZE GT_VECTOR_LIMIT_SIZE
#endif
#include <driver-includes/gridtools_includes.hpp>
using namespace gridtools::dawn;
namespace dawn_generated {
namespace cuda {
__global__ void __launch_bounds__(128)
generated_stencil47_ms46_kernel(const int isize, const int jsize, const int ksize,
const int stride_111_1, const int stride_111_2,
::dawn::float_type* const in, ::dawn::float_type* const out) {
// Start kernel
const unsigned int nx = isize;
const unsigned int ny = jsize;
const int block_size_i = (blockIdx.x + 1) * 32 < nx ? 32 : nx - blockIdx.x * 32;
const int block_size_j = (blockIdx.y + 1) * 4 < ny ? 4 : ny - blockIdx.y * 4;
// computing the global position in the physical domain
// In a typical cuda block we have the following regions
// aa bbbbbbbb cc
// aa bbbbbbbb cc
// hh dddddddd ii
// hh dddddddd ii
// hh dddddddd ii
// hh dddddddd ii
// ee ffffffff gg
// ee ffffffff gg
// Regions b,d,f have warp (or multiple of warp size)
// Size of regions a, c, h, i, e, g are determined by max_extent_t
// Regions b,d,f are easily executed by dedicated warps (one warp for each line)
// Regions (a,h,e) and (c,i,g) are executed by two specialized warp
int iblock = 0 - 1;
int jblock = 0 - 1;
if(threadIdx.y < +4) {
iblock = threadIdx.x;
jblock = (int)threadIdx.y + 0;
}
// initialized iterators
int idx111 = (blockIdx.x * 32 + iblock) * 1 + (blockIdx.y * 4 + jblock) * stride_111_1;
// jump iterators to match the intersection of beginning of next interval and the parallel
// execution block
idx111 += max(0, blockIdx.z * 4) * stride_111_2;
int kleg_lower_bound = max(0, blockIdx.z * 4);
int kleg_upper_bound = min(ksize - 1 + 0, (blockIdx.z + 1) * 4 - 1);
;
for(int k = kleg_lower_bound + 0; k <= kleg_upper_bound + 0; ++k) {
if(iblock >= 0 && iblock <= block_size_i - 1 + 0 && jblock >= 0 &&
jblock <= block_size_j - 1 + 0) {
::dawn::float_type dx;
{
out[idx111] =
(((int)-4 * (__ldg(&(in[idx111])) + (__ldg(&(in[idx111 + 1 * 1])) +
(__ldg(&(in[idx111 + 1 * -1])) +
(__ldg(&(in[idx111 + stride_111_1 * -1])) +
__ldg(&(in[idx111 + stride_111_1 * 1]))))))) /
(dx * dx));
}
}
// Slide kcaches
// increment iterators
idx111 += stride_111_2;
}
}
class generated {
public:
struct sbase : public timer_cuda {
sbase(std::string name) : timer_cuda(name) {}
double get_time() { return total_time(); }
};
struct stencil_47 : public sbase {
// Members
// Temporary storage typedefs
using tmp_halo_t = gridtools::halo<0, 0, 0, 0, 0>;
using tmp_meta_data_t = storage_traits_t::storage_info_t<0, 5, tmp_halo_t>;
using tmp_storage_t = storage_traits_t::data_store_t<::dawn::float_type, tmp_meta_data_t>;
const gridtools::dawn::domain m_dom;
public:
stencil_47(const gridtools::dawn::domain& dom_, int rank, int xcols, int ycols)
: sbase("stencil_47"), m_dom(dom_) {}
static constexpr ::dawn::driver::cartesian_extent in_extent = {-1, 1, -1, 1, 0, 0};
static constexpr ::dawn::driver::cartesian_extent out_extent = {0, 0, 0, 0, 0, 0};
void run(storage_ijk_t in_ds, storage_ijk_t out_ds) {
// starting timers
start();
{
;
gridtools::data_view<storage_ijk_t> in = gridtools::make_device_view(in_ds);
gridtools::data_view<storage_ijk_t> out = gridtools::make_device_view(out_ds);
const unsigned int nx = m_dom.isize() - m_dom.iminus() - m_dom.iplus();
const unsigned int ny = m_dom.jsize() - m_dom.jminus() - m_dom.jplus();
const unsigned int nz = m_dom.ksize() - m_dom.kminus() - m_dom.kplus();
dim3 threads(32, 4 + 0, 1);
const unsigned int nbx = (nx + 32 - 1) / 32;
const unsigned int nby = (ny + 4 - 1) / 4;
const unsigned int nbz = (m_dom.ksize() + 4 - 1) / 4;
dim3 blocks(nbx, nby, nbz);
hipLaunchKernelGGL(( generated_stencil47_ms46_kernel), dim3(blocks), dim3(threads), 0, 0,
nx, ny, nz, in_ds.strides()[1], in_ds.strides()[2],
(in.data() + in_ds.get_storage_info_ptr()->index(m_dom.iminus(), m_dom.jminus(), 0)),
(out.data() + out_ds.get_storage_info_ptr()->index(m_dom.iminus(), m_dom.jminus(), 0)));
};
// stopping timers
pause();
}
};
static constexpr const char* s_name = "generated";
stencil_47 m_stencil_47;
public:
generated(const generated&) = delete;
// Members
// Stencil-Data
generated(const gridtools::dawn::domain& dom, int rank = 1, int xcols = 1, int ycols = 1)
: m_stencil_47(dom, rank, xcols, ycols) {}
template <typename S>
void sync_storages(S field) {
field.sync();
}
template <typename S0, typename... S>
void sync_storages(S0 f0, S... fields) {
f0.sync();
sync_storages(fields...);
}
void run(storage_ijk_t in, storage_ijk_t out) {
sync_storages(in, out);
m_stencil_47.run(in, out);
;
sync_storages(in, out);
}
std::string get_name() const { return std::string(s_name); }
void reset_meters() { m_stencil_47.reset(); }
double get_total_time() {
double res = 0;
res += m_stencil_47.get_time();
return res;
}
};
} // namespace cuda
} // namespace dawn_generated
| ef1d0145ff18feaf06d616f4b856ab3704eaf03f.cu | #define DAWN_GENERATED 1
#undef DAWN_BACKEND_T
#define DAWN_BACKEND_T CUDA
#ifndef BOOST_RESULT_OF_USE_TR1
#define BOOST_RESULT_OF_USE_TR1 1
#endif
#ifndef BOOST_NO_CXX11_DECLTYPE
#define BOOST_NO_CXX11_DECLTYPE 1
#endif
#ifndef GRIDTOOLS_DAWN_HALO_EXTENT
#define GRIDTOOLS_DAWN_HALO_EXTENT 3
#endif
#ifndef BOOST_PP_VARIADICS
#define BOOST_PP_VARIADICS 1
#endif
#ifndef BOOST_FUSION_DONT_USE_PREPROCESSED_FILES
#define BOOST_FUSION_DONT_USE_PREPROCESSED_FILES 1
#endif
#ifndef BOOST_MPL_CFG_NO_PREPROCESSED_HEADERS
#define BOOST_MPL_CFG_NO_PREPROCESSED_HEADERS 1
#endif
#ifndef GT_VECTOR_LIMIT_SIZE
#define GT_VECTOR_LIMIT_SIZE 30
#endif
#ifndef BOOST_FUSION_INVOKE_MAX_ARITY
#define BOOST_FUSION_INVOKE_MAX_ARITY GT_VECTOR_LIMIT_SIZE
#endif
#ifndef FUSION_MAX_VECTOR_SIZE
#define FUSION_MAX_VECTOR_SIZE GT_VECTOR_LIMIT_SIZE
#endif
#ifndef FUSION_MAX_MAP_SIZE
#define FUSION_MAX_MAP_SIZE GT_VECTOR_LIMIT_SIZE
#endif
#ifndef BOOST_MPL_LIMIT_VECTOR_SIZE
#define BOOST_MPL_LIMIT_VECTOR_SIZE GT_VECTOR_LIMIT_SIZE
#endif
#include <driver-includes/gridtools_includes.hpp>
using namespace gridtools::dawn;
namespace dawn_generated {
namespace cuda {
__global__ void __launch_bounds__(128)
generated_stencil47_ms46_kernel(const int isize, const int jsize, const int ksize,
const int stride_111_1, const int stride_111_2,
::dawn::float_type* const in, ::dawn::float_type* const out) {
// Start kernel
const unsigned int nx = isize;
const unsigned int ny = jsize;
const int block_size_i = (blockIdx.x + 1) * 32 < nx ? 32 : nx - blockIdx.x * 32;
const int block_size_j = (blockIdx.y + 1) * 4 < ny ? 4 : ny - blockIdx.y * 4;
// computing the global position in the physical domain
// In a typical cuda block we have the following regions
// aa bbbbbbbb cc
// aa bbbbbbbb cc
// hh dddddddd ii
// hh dddddddd ii
// hh dddddddd ii
// hh dddddddd ii
// ee ffffffff gg
// ee ffffffff gg
// Regions b,d,f have warp (or multiple of warp size)
// Size of regions a, c, h, i, e, g are determined by max_extent_t
// Regions b,d,f are easily executed by dedicated warps (one warp for each line)
// Regions (a,h,e) and (c,i,g) are executed by two specialized warp
int iblock = 0 - 1;
int jblock = 0 - 1;
if(threadIdx.y < +4) {
iblock = threadIdx.x;
jblock = (int)threadIdx.y + 0;
}
// initialized iterators
int idx111 = (blockIdx.x * 32 + iblock) * 1 + (blockIdx.y * 4 + jblock) * stride_111_1;
// jump iterators to match the intersection of beginning of next interval and the parallel
// execution block
idx111 += max(0, blockIdx.z * 4) * stride_111_2;
int kleg_lower_bound = max(0, blockIdx.z * 4);
int kleg_upper_bound = min(ksize - 1 + 0, (blockIdx.z + 1) * 4 - 1);
;
for(int k = kleg_lower_bound + 0; k <= kleg_upper_bound + 0; ++k) {
if(iblock >= 0 && iblock <= block_size_i - 1 + 0 && jblock >= 0 &&
jblock <= block_size_j - 1 + 0) {
::dawn::float_type dx;
{
out[idx111] =
(((int)-4 * (__ldg(&(in[idx111])) + (__ldg(&(in[idx111 + 1 * 1])) +
(__ldg(&(in[idx111 + 1 * -1])) +
(__ldg(&(in[idx111 + stride_111_1 * -1])) +
__ldg(&(in[idx111 + stride_111_1 * 1]))))))) /
(dx * dx));
}
}
// Slide kcaches
// increment iterators
idx111 += stride_111_2;
}
}
class generated {
public:
struct sbase : public timer_cuda {
sbase(std::string name) : timer_cuda(name) {}
double get_time() { return total_time(); }
};
struct stencil_47 : public sbase {
// Members
// Temporary storage typedefs
using tmp_halo_t = gridtools::halo<0, 0, 0, 0, 0>;
using tmp_meta_data_t = storage_traits_t::storage_info_t<0, 5, tmp_halo_t>;
using tmp_storage_t = storage_traits_t::data_store_t<::dawn::float_type, tmp_meta_data_t>;
const gridtools::dawn::domain m_dom;
public:
stencil_47(const gridtools::dawn::domain& dom_, int rank, int xcols, int ycols)
: sbase("stencil_47"), m_dom(dom_) {}
static constexpr ::dawn::driver::cartesian_extent in_extent = {-1, 1, -1, 1, 0, 0};
static constexpr ::dawn::driver::cartesian_extent out_extent = {0, 0, 0, 0, 0, 0};
void run(storage_ijk_t in_ds, storage_ijk_t out_ds) {
// starting timers
start();
{
;
gridtools::data_view<storage_ijk_t> in = gridtools::make_device_view(in_ds);
gridtools::data_view<storage_ijk_t> out = gridtools::make_device_view(out_ds);
const unsigned int nx = m_dom.isize() - m_dom.iminus() - m_dom.iplus();
const unsigned int ny = m_dom.jsize() - m_dom.jminus() - m_dom.jplus();
const unsigned int nz = m_dom.ksize() - m_dom.kminus() - m_dom.kplus();
dim3 threads(32, 4 + 0, 1);
const unsigned int nbx = (nx + 32 - 1) / 32;
const unsigned int nby = (ny + 4 - 1) / 4;
const unsigned int nbz = (m_dom.ksize() + 4 - 1) / 4;
dim3 blocks(nbx, nby, nbz);
generated_stencil47_ms46_kernel<<<blocks, threads>>>(
nx, ny, nz, in_ds.strides()[1], in_ds.strides()[2],
(in.data() + in_ds.get_storage_info_ptr()->index(m_dom.iminus(), m_dom.jminus(), 0)),
(out.data() + out_ds.get_storage_info_ptr()->index(m_dom.iminus(), m_dom.jminus(), 0)));
};
// stopping timers
pause();
}
};
static constexpr const char* s_name = "generated";
stencil_47 m_stencil_47;
public:
generated(const generated&) = delete;
// Members
// Stencil-Data
generated(const gridtools::dawn::domain& dom, int rank = 1, int xcols = 1, int ycols = 1)
: m_stencil_47(dom, rank, xcols, ycols) {}
template <typename S>
void sync_storages(S field) {
field.sync();
}
template <typename S0, typename... S>
void sync_storages(S0 f0, S... fields) {
f0.sync();
sync_storages(fields...);
}
void run(storage_ijk_t in, storage_ijk_t out) {
sync_storages(in, out);
m_stencil_47.run(in, out);
;
sync_storages(in, out);
}
std::string get_name() const { return std::string(s_name); }
void reset_meters() { m_stencil_47.reset(); }
double get_total_time() {
double res = 0;
res += m_stencil_47.get_time();
return res;
}
};
} // namespace cuda
} // namespace dawn_generated
|
138c300ccb7a08eb863b175fd8e59890910823e2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ < 700)
#define __shfl_sync(mask, var, lane, width) \
__shfl((var), (lane), (width))
#define __shfl_down_sync(mask, var, offset, width) \
__shfl_down((var), (offset), (width))
#define __shfl_up_sync(mask, var, offset, width) \
__shfl_up((var), (offset), (width))
#endif
extern "C" __global__ void default_function_kernel0(void* __restrict__ A, void* __restrict__ B, void* __restrict__ T_dense) {
float T_dense_rf[1];
float red_buf0[1];
T_dense_rf[(0)] = 0.000000e+00f;
for (int k_outer = 0; k_outer < 16; ++k_outer) {
T_dense_rf[(0)] = (T_dense_rf[(0)] + (((float*)A)[((((((int)blockIdx.y) * 512) + (k_outer * 32)) + ((int)threadIdx.x)))] * ((float*)B)[((((((int)blockIdx.x) * 512) + (k_outer * 32)) + ((int)threadIdx.x)))]));
}
unsigned int mask[1];
float t0[1];
red_buf0[(0)] = T_dense_rf[(0)];
mask[(0)] = __activemask();
t0[(0)] = __shfl_down_sync(mask[(0)], red_buf0[(0)], 16, 32);
red_buf0[(0)] = (red_buf0[(0)] + t0[(0)]);
t0[(0)] = __shfl_down_sync(mask[(0)], red_buf0[(0)], 8, 32);
red_buf0[(0)] = (red_buf0[(0)] + t0[(0)]);
t0[(0)] = __shfl_down_sync(mask[(0)], red_buf0[(0)], 4, 32);
red_buf0[(0)] = (red_buf0[(0)] + t0[(0)]);
t0[(0)] = __shfl_down_sync(mask[(0)], red_buf0[(0)], 2, 32);
red_buf0[(0)] = (red_buf0[(0)] + t0[(0)]);
t0[(0)] = __shfl_down_sync(mask[(0)], red_buf0[(0)], 1, 32);
red_buf0[(0)] = (red_buf0[(0)] + t0[(0)]);
red_buf0[(0)] = __shfl_sync(mask[(0)], red_buf0[(0)], 0, 32);
if (((int)threadIdx.x) == 0) {
((float*)T_dense)[(((((int)blockIdx.y) * 2048) + ((int)blockIdx.x)))] = red_buf0[(0)];
}
}
| 138c300ccb7a08eb863b175fd8e59890910823e2.cu |
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ < 700)
#define __shfl_sync(mask, var, lane, width) \
__shfl((var), (lane), (width))
#define __shfl_down_sync(mask, var, offset, width) \
__shfl_down((var), (offset), (width))
#define __shfl_up_sync(mask, var, offset, width) \
__shfl_up((var), (offset), (width))
#endif
extern "C" __global__ void default_function_kernel0(void* __restrict__ A, void* __restrict__ B, void* __restrict__ T_dense) {
float T_dense_rf[1];
float red_buf0[1];
T_dense_rf[(0)] = 0.000000e+00f;
for (int k_outer = 0; k_outer < 16; ++k_outer) {
T_dense_rf[(0)] = (T_dense_rf[(0)] + (((float*)A)[((((((int)blockIdx.y) * 512) + (k_outer * 32)) + ((int)threadIdx.x)))] * ((float*)B)[((((((int)blockIdx.x) * 512) + (k_outer * 32)) + ((int)threadIdx.x)))]));
}
unsigned int mask[1];
float t0[1];
red_buf0[(0)] = T_dense_rf[(0)];
mask[(0)] = __activemask();
t0[(0)] = __shfl_down_sync(mask[(0)], red_buf0[(0)], 16, 32);
red_buf0[(0)] = (red_buf0[(0)] + t0[(0)]);
t0[(0)] = __shfl_down_sync(mask[(0)], red_buf0[(0)], 8, 32);
red_buf0[(0)] = (red_buf0[(0)] + t0[(0)]);
t0[(0)] = __shfl_down_sync(mask[(0)], red_buf0[(0)], 4, 32);
red_buf0[(0)] = (red_buf0[(0)] + t0[(0)]);
t0[(0)] = __shfl_down_sync(mask[(0)], red_buf0[(0)], 2, 32);
red_buf0[(0)] = (red_buf0[(0)] + t0[(0)]);
t0[(0)] = __shfl_down_sync(mask[(0)], red_buf0[(0)], 1, 32);
red_buf0[(0)] = (red_buf0[(0)] + t0[(0)]);
red_buf0[(0)] = __shfl_sync(mask[(0)], red_buf0[(0)], 0, 32);
if (((int)threadIdx.x) == 0) {
((float*)T_dense)[(((((int)blockIdx.y) * 2048) + ((int)blockIdx.x)))] = red_buf0[(0)];
}
}
|
ebc0083f08793e5977466af919e2dbba9adf5f9d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2017 Sony Corporation. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <nbla/array.hpp>
#include <nbla/cuda/common.hpp>
#include <nbla/cuda/function/selu.hpp>
#include <nbla/cuda/math.hpp>
#include <nbla/variable.hpp>
namespace nbla {
template <typename T>
__global__ void kernel_selu_forward(const int num, const T scale_, const T coef,
T *y, const T *x) {
NBLA_CUDA_KERNEL_LOOP(idx, num) {
y[idx] = x[idx] > (T)0 ? scale_ * x[idx] : coef * (::exp(x[idx]) - (T)1);
}
}
template <typename T, bool accum = true>
__global__ void kernel_selu_backward(const int num, const T scale_,
const T coef, T *dx, const T *x,
const T *dy) {
NBLA_CUDA_KERNEL_LOOP(idx, num) {
dx[idx] =
(accum ? dx[idx] : (T)0) +
(x[idx] > (T)0 ? dy[idx] * scale_ : dy[idx] * coef * ::exp(x[idx]));
}
}
template <typename T>
void SELUCuda<T>::setup_impl(const Variables &inputs,
const Variables &outputs) {
cuda_set_device(this->device_);
SELU<T>::setup_impl(inputs, outputs);
}
template <typename T>
void SELUCuda<T>::forward_impl(const Variables &inputs,
const Variables &outputs) {
cuda_set_device(this->device_);
const T *x = inputs[0]->get_data_pointer<T>(this->ctx_);
T *y = outputs[0]->cast_data_and_get_pointer<T>(this->ctx_);
size_t size = inputs[0]->size();
const T coef = this->alpha_ * this->scale_;
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel_selu_forward, size, this->scale_, coef,
y, x);
}
template <typename T>
void SELUCuda<T>::backward_impl(const Variables &inputs,
const Variables &outputs,
const vector<bool> &propagate_down,
const vector<bool> &accum) {
if (!propagate_down[0]) {
return;
}
cuda_set_device(this->device_);
const T *x = inputs[0]->get_data_pointer<T>(this->ctx_);
T *dx = inputs[0]->cast_grad_and_get_pointer<T>(this->ctx_);
const T *dy = outputs[0]->get_grad_pointer<T>(this->ctx_);
size_t size = inputs[0]->size();
const T coef = this->alpha_ * this->scale_;
if (accum[0]) {
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE((kernel_selu_backward<T, true>), size,
this->scale_, coef, dx, x, dy);
} else {
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE((kernel_selu_backward<T, false>), size,
this->scale_, coef, dx, x, dy);
}
}
// template instantiation
template class SELUCuda<float>;
}
| ebc0083f08793e5977466af919e2dbba9adf5f9d.cu | // Copyright (c) 2017 Sony Corporation. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <nbla/array.hpp>
#include <nbla/cuda/common.hpp>
#include <nbla/cuda/function/selu.hpp>
#include <nbla/cuda/math.hpp>
#include <nbla/variable.hpp>
namespace nbla {
template <typename T>
__global__ void kernel_selu_forward(const int num, const T scale_, const T coef,
T *y, const T *x) {
NBLA_CUDA_KERNEL_LOOP(idx, num) {
y[idx] = x[idx] > (T)0 ? scale_ * x[idx] : coef * (std::exp(x[idx]) - (T)1);
}
}
template <typename T, bool accum = true>
__global__ void kernel_selu_backward(const int num, const T scale_,
const T coef, T *dx, const T *x,
const T *dy) {
NBLA_CUDA_KERNEL_LOOP(idx, num) {
dx[idx] =
(accum ? dx[idx] : (T)0) +
(x[idx] > (T)0 ? dy[idx] * scale_ : dy[idx] * coef * std::exp(x[idx]));
}
}
template <typename T>
void SELUCuda<T>::setup_impl(const Variables &inputs,
const Variables &outputs) {
cuda_set_device(this->device_);
SELU<T>::setup_impl(inputs, outputs);
}
template <typename T>
void SELUCuda<T>::forward_impl(const Variables &inputs,
const Variables &outputs) {
cuda_set_device(this->device_);
const T *x = inputs[0]->get_data_pointer<T>(this->ctx_);
T *y = outputs[0]->cast_data_and_get_pointer<T>(this->ctx_);
size_t size = inputs[0]->size();
const T coef = this->alpha_ * this->scale_;
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel_selu_forward, size, this->scale_, coef,
y, x);
}
template <typename T>
void SELUCuda<T>::backward_impl(const Variables &inputs,
const Variables &outputs,
const vector<bool> &propagate_down,
const vector<bool> &accum) {
if (!propagate_down[0]) {
return;
}
cuda_set_device(this->device_);
const T *x = inputs[0]->get_data_pointer<T>(this->ctx_);
T *dx = inputs[0]->cast_grad_and_get_pointer<T>(this->ctx_);
const T *dy = outputs[0]->get_grad_pointer<T>(this->ctx_);
size_t size = inputs[0]->size();
const T coef = this->alpha_ * this->scale_;
if (accum[0]) {
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE((kernel_selu_backward<T, true>), size,
this->scale_, coef, dx, x, dy);
} else {
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE((kernel_selu_backward<T, false>), size,
this->scale_, coef, dx, x, dy);
}
}
// template instantiation
template class SELUCuda<float>;
}
|
a765e6ac8ed74c0f6b589d6699f6afa08449a210.hip | // !!! This is a file automatically generated by hipify!!!
/*
csr_graph.cu
Implements CSR Graph. Part of the GGC source code.
Copyright (C) 2014--2016, The University of Texas at Austin
See LICENSE.TXT for copyright license.
Author: Sreepathi Pai <[email protected]>
*/
/* -*- mode: c++ -*- */
#include "gg.h"
#include "csr_graph.h"
unsigned CSRGraph::init() {
row_start = edge_dst = NULL;
edge_data = NULL;
node_data = NULL;
nnodes = nedges = 0;
device_graph = false;
return 0;
}
unsigned CSRGraph::allocOnHost(bool no_edge_data) {
assert(nnodes > 0);
assert(!device_graph);
if(row_start != NULL) // already allocated
return true;
size_t mem_usage = ((nnodes + 1) + nedges) * sizeof(index_type)
+ (nnodes) * sizeof(node_data_type);
if (!no_edge_data) mem_usage += (nedges) * sizeof(edge_data_type);
printf("Host memory for graph: %3u MB\n", mem_usage / 1048756);
row_start = (index_type *) calloc(nnodes+1, sizeof(index_type));
edge_dst = (index_type *) calloc(nedges, sizeof(index_type));
if (!no_edge_data) edge_data = (edge_data_type *) calloc(nedges, sizeof(edge_data_type));
node_data = (node_data_type *) calloc(nnodes, sizeof(node_data_type));
return ((no_edge_data || edge_data) && row_start && edge_dst && node_data);
}
unsigned CSRGraph::allocOnDevice(bool no_edge_data) {
if(edge_dst != NULL) // already allocated
return true;
assert(edge_dst == NULL); // make sure not already allocated
check_cuda(hipMalloc((void **) &edge_dst, nedges * sizeof(index_type)));
check_cuda(hipMalloc((void **) &row_start, (nnodes+1) * sizeof(index_type)));
if (!no_edge_data) check_cuda(hipMalloc((void **) &edge_data, nedges * sizeof(edge_data_type)));
check_cuda(hipMalloc((void **) &node_data, nnodes * sizeof(node_data_type)));
device_graph = true;
return (edge_dst && (no_edge_data || edge_data) && row_start && node_data);
}
void CSRGraphTex::copy_to_gpu(struct CSRGraphTex ©graph) {
copygraph.nnodes = nnodes;
copygraph.nedges = nedges;
assert(copygraph.allocOnDevice(edge_data == NULL));
check_cuda(hipMemcpy(copygraph.edge_dst, edge_dst, nedges * sizeof(index_type), hipMemcpyHostToDevice));
if (edge_data != NULL) check_cuda(hipMemcpy(copygraph.edge_data, edge_data, nedges * sizeof(edge_data_type), hipMemcpyHostToDevice));
check_cuda(hipMemcpy(copygraph.node_data, node_data, nnodes * sizeof(node_data_type), hipMemcpyHostToDevice));
check_cuda(hipMemcpy(copygraph.row_start, row_start, (nnodes+1) * sizeof(index_type), hipMemcpyHostToDevice));
}
unsigned CSRGraphTex::allocOnDevice(bool no_edge_data) {
if(CSRGraph::allocOnDevice(no_edge_data))
{
assert(sizeof(index_type) <= 4); // 32-bit only!
assert(sizeof(node_data_type) <= 4); // 32-bit only!
hipResourceDesc resDesc;
memset(&resDesc, 0, sizeof(resDesc));
resDesc.resType = hipResourceTypeLinear;
resDesc.res.linear.desc.f = hipChannelFormatKindUnsigned;
resDesc.res.linear.desc.x = 32; // bits per channel
hipTextureDesc texDesc;
memset(&texDesc, 0, sizeof(texDesc));
texDesc.readMode = hipReadModeElementType;
resDesc.res.linear.devPtr = edge_dst;
resDesc.res.linear.sizeInBytes = nedges*sizeof(index_type);
check_cuda(hipCreateTextureObject(&edge_dst_tx, &resDesc, &texDesc, NULL));
resDesc.res.linear.devPtr = row_start;
resDesc.res.linear.sizeInBytes = (nnodes + 1) * sizeof(index_type);
check_cuda(hipCreateTextureObject(&row_start_tx, &resDesc, &texDesc, NULL));
resDesc.res.linear.devPtr = node_data;
resDesc.res.linear.sizeInBytes = (nnodes) * sizeof(node_data_type);
check_cuda(hipCreateTextureObject(&node_data_tx, &resDesc, &texDesc, NULL));
return 1;
}
return 0;
}
unsigned CSRGraph::deallocOnHost() {
if(!device_graph) {
free(row_start);
free(edge_dst);
if (edge_data != NULL) free(edge_data);
free(node_data);
}
return 0;
}
unsigned CSRGraph::deallocOnDevice() {
if(device_graph) {
hipFree(edge_dst);
if (edge_data != NULL) hipFree(edge_data);
hipFree(row_start);
hipFree(node_data);
}
return 0;
}
CSRGraph::CSRGraph() {
init();
}
void CSRGraph::progressPrint(unsigned maxii, unsigned ii) {
const unsigned nsteps = 10;
unsigned ineachstep = (maxii / nsteps);
if(ineachstep == 0) ineachstep = 1;
/*if (ii == maxii) {
printf("\t100%%\n");
} else*/ if (ii % ineachstep == 0) {
int progress = ((size_t) ii * 100) / maxii + 1;
printf("\t%3d%%\r", progress);
fflush(stdout);
}
}
unsigned CSRGraph::readFromGR(char file[], bool read_edge_data) {
std::ifstream cfile;
cfile.open(file);
// copied from GaloisCpp/trunk/src/FileGraph.h
int masterFD = open(file, O_RDONLY);
if (masterFD == -1) {
printf("FileGraph::structureFromFile: unable to open %s.\n", file);
return 1;
}
struct stat buf;
int f = fstat(masterFD, &buf);
if (f == -1) {
printf("FileGraph::structureFromFile: unable to stat %s.\n", file);
abort();
}
size_t masterLength = buf.st_size;
int _MAP_BASE = MAP_PRIVATE;
//#ifdef MAP_POPULATE
// _MAP_BASE |= MAP_POPULATE;
//#endif
void* m = mmap(0, masterLength, PROT_READ, _MAP_BASE, masterFD, 0);
if (m == MAP_FAILED) {
m = 0;
printf("FileGraph::structureFromFile: mmap failed.\n");
abort();
}
ggc::Timer t("graphreader");
t.start();
//parse file
uint64_t* fptr = (uint64_t*)m;
__attribute__((unused)) uint64_t version = le64toh(*fptr++);
assert(version == 1);
uint64_t sizeEdgeTy = le64toh(*fptr++);
uint64_t numNodes = le64toh(*fptr++);
uint64_t numEdges = le64toh(*fptr++);
uint64_t *outIdx = fptr;
fptr += numNodes;
uint32_t *fptr32 = (uint32_t*)fptr;
uint32_t *outs = fptr32;
fptr32 += numEdges;
if (numEdges % 2) fptr32 += 1;
edge_data_type *edgeData = (edge_data_type *)fptr32;
// cuda.
nnodes = numNodes;
nedges = numEdges;
printf("nnodes=%d, nedges=%d, sizeEdge=%d.\n", nnodes, nedges, sizeEdgeTy);
allocOnHost(!read_edge_data);
row_start[0] = 0;
for (unsigned ii = 0; ii < nnodes; ++ii) {
row_start[ii+1] = le64toh(outIdx[ii]);
// //noutgoing[ii] = le64toh(outIdx[ii]) - le64toh(outIdx[ii - 1]);
index_type degree = row_start[ii+1] - row_start[ii];
for (unsigned jj = 0; jj < degree; ++jj) {
unsigned edgeindex = row_start[ii] + jj;
unsigned dst = le32toh(outs[edgeindex]);
if (dst >= nnodes) printf("\tinvalid edge from %d to %d at index %d(%d).\n", ii, dst, jj, edgeindex);
edge_dst[edgeindex] = dst;
if(sizeEdgeTy && read_edge_data)
edge_data[edgeindex] = edgeData[edgeindex];
}
progressPrint(nnodes, ii);
}
cfile.close(); // probably galois doesn't close its file due to mmap.
t.stop();
// TODO: fix MB/s
printf("read %lld bytes in %d ms (%0.2f MB/s)\n\r\n", masterLength, t.duration_ms(), (masterLength / 1000.0) / (t.duration_ms()));
return 0;
}
unsigned CSRGraph::read(char file[], bool read_edge_data) {
return readFromGR(file, read_edge_data);
}
void CSRGraph::dealloc() {
if(device_graph)
deallocOnDevice();
else
deallocOnHost();
}
void CSRGraph::copy_to_gpu(struct CSRGraph ©graph) {
copygraph.nnodes = nnodes;
copygraph.nedges = nedges;
assert(copygraph.allocOnDevice(edge_data == NULL));
check_cuda(hipMemcpy(copygraph.edge_dst, edge_dst, nedges * sizeof(index_type), hipMemcpyHostToDevice));
if (edge_data != NULL) check_cuda(hipMemcpy(copygraph.edge_data, edge_data, nedges * sizeof(edge_data_type), hipMemcpyHostToDevice));
check_cuda(hipMemcpy(copygraph.node_data, node_data, nnodes * sizeof(node_data_type), hipMemcpyHostToDevice));
check_cuda(hipMemcpy(copygraph.row_start, row_start, (nnodes+1) * sizeof(index_type), hipMemcpyHostToDevice));
}
void CSRGraph::copy_to_cpu(struct CSRGraph ©graph) {
assert(device_graph);
// cpu graph is not allocated
assert(copygraph.nnodes = nnodes);
assert(copygraph.nedges = nedges);
check_cuda(hipMemcpy(copygraph.edge_dst, edge_dst, nedges * sizeof(index_type), hipMemcpyDeviceToHost));
if (edge_data != NULL) check_cuda(hipMemcpy(copygraph.edge_data, edge_data, nedges * sizeof(edge_data_type), hipMemcpyDeviceToHost));
check_cuda(hipMemcpy(copygraph.node_data, node_data, nnodes * sizeof(node_data_type), hipMemcpyDeviceToHost));
check_cuda(hipMemcpy(copygraph.row_start, row_start, (nnodes+1) * sizeof(index_type), hipMemcpyDeviceToHost));
}
struct EdgeIterator {
CSRGraph *g;
index_type node;
index_type s;
__device__
EdgeIterator(CSRGraph& g, index_type node) {
this->g = &g;
this->node = node;
}
__device__
index_type size() const {
return g->row_start[node + 1] - g->row_start[node];
}
__device__
index_type start() {
s = g->row_start[node];
return s;
}
__device__
index_type end() const {
return g->row_start[node + 1];
}
__device__
void next() {
s++;
}
__device__
index_type dst() const {
return g->edge_dst[s];
}
__device__
edge_data_type data() const {
return g->edge_data[s];
}
};
| a765e6ac8ed74c0f6b589d6699f6afa08449a210.cu | /*
csr_graph.cu
Implements CSR Graph. Part of the GGC source code.
Copyright (C) 2014--2016, The University of Texas at Austin
See LICENSE.TXT for copyright license.
Author: Sreepathi Pai <[email protected]>
*/
/* -*- mode: c++ -*- */
#include "gg.h"
#include "csr_graph.h"
unsigned CSRGraph::init() {
row_start = edge_dst = NULL;
edge_data = NULL;
node_data = NULL;
nnodes = nedges = 0;
device_graph = false;
return 0;
}
unsigned CSRGraph::allocOnHost(bool no_edge_data) {
assert(nnodes > 0);
assert(!device_graph);
if(row_start != NULL) // already allocated
return true;
size_t mem_usage = ((nnodes + 1) + nedges) * sizeof(index_type)
+ (nnodes) * sizeof(node_data_type);
if (!no_edge_data) mem_usage += (nedges) * sizeof(edge_data_type);
printf("Host memory for graph: %3u MB\n", mem_usage / 1048756);
row_start = (index_type *) calloc(nnodes+1, sizeof(index_type));
edge_dst = (index_type *) calloc(nedges, sizeof(index_type));
if (!no_edge_data) edge_data = (edge_data_type *) calloc(nedges, sizeof(edge_data_type));
node_data = (node_data_type *) calloc(nnodes, sizeof(node_data_type));
return ((no_edge_data || edge_data) && row_start && edge_dst && node_data);
}
unsigned CSRGraph::allocOnDevice(bool no_edge_data) {
if(edge_dst != NULL) // already allocated
return true;
assert(edge_dst == NULL); // make sure not already allocated
check_cuda(cudaMalloc((void **) &edge_dst, nedges * sizeof(index_type)));
check_cuda(cudaMalloc((void **) &row_start, (nnodes+1) * sizeof(index_type)));
if (!no_edge_data) check_cuda(cudaMalloc((void **) &edge_data, nedges * sizeof(edge_data_type)));
check_cuda(cudaMalloc((void **) &node_data, nnodes * sizeof(node_data_type)));
device_graph = true;
return (edge_dst && (no_edge_data || edge_data) && row_start && node_data);
}
void CSRGraphTex::copy_to_gpu(struct CSRGraphTex ©graph) {
copygraph.nnodes = nnodes;
copygraph.nedges = nedges;
assert(copygraph.allocOnDevice(edge_data == NULL));
check_cuda(cudaMemcpy(copygraph.edge_dst, edge_dst, nedges * sizeof(index_type), cudaMemcpyHostToDevice));
if (edge_data != NULL) check_cuda(cudaMemcpy(copygraph.edge_data, edge_data, nedges * sizeof(edge_data_type), cudaMemcpyHostToDevice));
check_cuda(cudaMemcpy(copygraph.node_data, node_data, nnodes * sizeof(node_data_type), cudaMemcpyHostToDevice));
check_cuda(cudaMemcpy(copygraph.row_start, row_start, (nnodes+1) * sizeof(index_type), cudaMemcpyHostToDevice));
}
unsigned CSRGraphTex::allocOnDevice(bool no_edge_data) {
if(CSRGraph::allocOnDevice(no_edge_data))
{
assert(sizeof(index_type) <= 4); // 32-bit only!
assert(sizeof(node_data_type) <= 4); // 32-bit only!
cudaResourceDesc resDesc;
memset(&resDesc, 0, sizeof(resDesc));
resDesc.resType = cudaResourceTypeLinear;
resDesc.res.linear.desc.f = cudaChannelFormatKindUnsigned;
resDesc.res.linear.desc.x = 32; // bits per channel
cudaTextureDesc texDesc;
memset(&texDesc, 0, sizeof(texDesc));
texDesc.readMode = cudaReadModeElementType;
resDesc.res.linear.devPtr = edge_dst;
resDesc.res.linear.sizeInBytes = nedges*sizeof(index_type);
check_cuda(cudaCreateTextureObject(&edge_dst_tx, &resDesc, &texDesc, NULL));
resDesc.res.linear.devPtr = row_start;
resDesc.res.linear.sizeInBytes = (nnodes + 1) * sizeof(index_type);
check_cuda(cudaCreateTextureObject(&row_start_tx, &resDesc, &texDesc, NULL));
resDesc.res.linear.devPtr = node_data;
resDesc.res.linear.sizeInBytes = (nnodes) * sizeof(node_data_type);
check_cuda(cudaCreateTextureObject(&node_data_tx, &resDesc, &texDesc, NULL));
return 1;
}
return 0;
}
unsigned CSRGraph::deallocOnHost() {
if(!device_graph) {
free(row_start);
free(edge_dst);
if (edge_data != NULL) free(edge_data);
free(node_data);
}
return 0;
}
unsigned CSRGraph::deallocOnDevice() {
if(device_graph) {
cudaFree(edge_dst);
if (edge_data != NULL) cudaFree(edge_data);
cudaFree(row_start);
cudaFree(node_data);
}
return 0;
}
CSRGraph::CSRGraph() {
init();
}
void CSRGraph::progressPrint(unsigned maxii, unsigned ii) {
const unsigned nsteps = 10;
unsigned ineachstep = (maxii / nsteps);
if(ineachstep == 0) ineachstep = 1;
/*if (ii == maxii) {
printf("\t100%%\n");
} else*/ if (ii % ineachstep == 0) {
int progress = ((size_t) ii * 100) / maxii + 1;
printf("\t%3d%%\r", progress);
fflush(stdout);
}
}
unsigned CSRGraph::readFromGR(char file[], bool read_edge_data) {
std::ifstream cfile;
cfile.open(file);
// copied from GaloisCpp/trunk/src/FileGraph.h
int masterFD = open(file, O_RDONLY);
if (masterFD == -1) {
printf("FileGraph::structureFromFile: unable to open %s.\n", file);
return 1;
}
struct stat buf;
int f = fstat(masterFD, &buf);
if (f == -1) {
printf("FileGraph::structureFromFile: unable to stat %s.\n", file);
abort();
}
size_t masterLength = buf.st_size;
int _MAP_BASE = MAP_PRIVATE;
//#ifdef MAP_POPULATE
// _MAP_BASE |= MAP_POPULATE;
//#endif
void* m = mmap(0, masterLength, PROT_READ, _MAP_BASE, masterFD, 0);
if (m == MAP_FAILED) {
m = 0;
printf("FileGraph::structureFromFile: mmap failed.\n");
abort();
}
ggc::Timer t("graphreader");
t.start();
//parse file
uint64_t* fptr = (uint64_t*)m;
__attribute__((unused)) uint64_t version = le64toh(*fptr++);
assert(version == 1);
uint64_t sizeEdgeTy = le64toh(*fptr++);
uint64_t numNodes = le64toh(*fptr++);
uint64_t numEdges = le64toh(*fptr++);
uint64_t *outIdx = fptr;
fptr += numNodes;
uint32_t *fptr32 = (uint32_t*)fptr;
uint32_t *outs = fptr32;
fptr32 += numEdges;
if (numEdges % 2) fptr32 += 1;
edge_data_type *edgeData = (edge_data_type *)fptr32;
// cuda.
nnodes = numNodes;
nedges = numEdges;
printf("nnodes=%d, nedges=%d, sizeEdge=%d.\n", nnodes, nedges, sizeEdgeTy);
allocOnHost(!read_edge_data);
row_start[0] = 0;
for (unsigned ii = 0; ii < nnodes; ++ii) {
row_start[ii+1] = le64toh(outIdx[ii]);
// //noutgoing[ii] = le64toh(outIdx[ii]) - le64toh(outIdx[ii - 1]);
index_type degree = row_start[ii+1] - row_start[ii];
for (unsigned jj = 0; jj < degree; ++jj) {
unsigned edgeindex = row_start[ii] + jj;
unsigned dst = le32toh(outs[edgeindex]);
if (dst >= nnodes) printf("\tinvalid edge from %d to %d at index %d(%d).\n", ii, dst, jj, edgeindex);
edge_dst[edgeindex] = dst;
if(sizeEdgeTy && read_edge_data)
edge_data[edgeindex] = edgeData[edgeindex];
}
progressPrint(nnodes, ii);
}
cfile.close(); // probably galois doesn't close its file due to mmap.
t.stop();
// TODO: fix MB/s
printf("read %lld bytes in %d ms (%0.2f MB/s)\n\r\n", masterLength, t.duration_ms(), (masterLength / 1000.0) / (t.duration_ms()));
return 0;
}
unsigned CSRGraph::read(char file[], bool read_edge_data) {
return readFromGR(file, read_edge_data);
}
void CSRGraph::dealloc() {
if(device_graph)
deallocOnDevice();
else
deallocOnHost();
}
void CSRGraph::copy_to_gpu(struct CSRGraph ©graph) {
copygraph.nnodes = nnodes;
copygraph.nedges = nedges;
assert(copygraph.allocOnDevice(edge_data == NULL));
check_cuda(cudaMemcpy(copygraph.edge_dst, edge_dst, nedges * sizeof(index_type), cudaMemcpyHostToDevice));
if (edge_data != NULL) check_cuda(cudaMemcpy(copygraph.edge_data, edge_data, nedges * sizeof(edge_data_type), cudaMemcpyHostToDevice));
check_cuda(cudaMemcpy(copygraph.node_data, node_data, nnodes * sizeof(node_data_type), cudaMemcpyHostToDevice));
check_cuda(cudaMemcpy(copygraph.row_start, row_start, (nnodes+1) * sizeof(index_type), cudaMemcpyHostToDevice));
}
void CSRGraph::copy_to_cpu(struct CSRGraph ©graph) {
assert(device_graph);
// cpu graph is not allocated
assert(copygraph.nnodes = nnodes);
assert(copygraph.nedges = nedges);
check_cuda(cudaMemcpy(copygraph.edge_dst, edge_dst, nedges * sizeof(index_type), cudaMemcpyDeviceToHost));
if (edge_data != NULL) check_cuda(cudaMemcpy(copygraph.edge_data, edge_data, nedges * sizeof(edge_data_type), cudaMemcpyDeviceToHost));
check_cuda(cudaMemcpy(copygraph.node_data, node_data, nnodes * sizeof(node_data_type), cudaMemcpyDeviceToHost));
check_cuda(cudaMemcpy(copygraph.row_start, row_start, (nnodes+1) * sizeof(index_type), cudaMemcpyDeviceToHost));
}
struct EdgeIterator {
CSRGraph *g;
index_type node;
index_type s;
__device__
EdgeIterator(CSRGraph& g, index_type node) {
this->g = &g;
this->node = node;
}
__device__
index_type size() const {
return g->row_start[node + 1] - g->row_start[node];
}
__device__
index_type start() {
s = g->row_start[node];
return s;
}
__device__
index_type end() const {
return g->row_start[node + 1];
}
__device__
void next() {
s++;
}
__device__
index_type dst() const {
return g->edge_dst[s];
}
__device__
edge_data_type data() const {
return g->edge_data[s];
}
};
|
da9dd3d954a7121aaef7b4868c0be7f5df118428.hip | // !!! This is a file automatically generated by hipify!!!
// This is modification of Alex's convolution kernel extending 2d to 3d.
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#define LO8(x) ((x) & 0x000000FF)
#define MI16(x) (((x) & 0x0000FFFF) >> 8)
#define HI24(x) (((x) & 0x00FFFFFF) >> 16)
#define MUL24(x,y) ((x) * (y))
#define DIVUP(x, y) (((x) + (y) - 1) / (y))
#define MIN(a, b) ((a) < (b) ? (a) : (b))
__global__ void kSampleMultinomial(int* output, float* distribution, float* random, int k, int n){
const unsigned int id = threadIdx.x + blockIdx.x * blockDim.x;
if(id < n){
distribution += k * id;
random += id;
output += k * id;
float preSum = 0, nowSum = 0;
for(int i = 0; i < k; i++){
nowSum += distribution[i];
output[i] = random[0] >= preSum && random[0] < nowSum;
preSum = nowSum;
}
}
}
__global__ void kExp(float* output, float* input, unsigned int numElements){
const unsigned int id = threadIdx.x + blockIdx.x * blockDim.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for(int i = id; i < numElements; i += numThreads)
output[i] = __expf(input[i]);
}
__global__ void kDivide(float* output, float* leftInput, float* rightInput, unsigned int numElements){
const unsigned int id = threadIdx.x + blockIdx.x * blockDim.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for(int i = id; i < numElements; i += numThreads)
output[i] = __fdividef(leftInput[i], rightInput[i]);
}
__global__ void kConvolve_forward(float* targets, float* images, float* filters,
const int numImages, const int numFilters,
const int imgSizeZ, const int imgSizeY, const int imgSizeX, const int filterSize, const int paddingStart,
const int moduleStride,
const int numModulesZ, const int numModulesY, const int numModulesX, const int imgStride) {
__shared__ float shFilters[4*1][4 * 4]; // pre-load 4 pixels from 4*4 filters
__shared__ float shImages[4*1][32 * 1]; // pre-load 4 pixels from 32*2 images
const int imgPixels = imgSizeZ * imgSizeY * imgSizeX;
const int filterPixels = filterSize * filterSize * filterSize;
const int blocksPerModule = numFilters / (4*4);
const int moduleIdx = blockIdx.x / blocksPerModule;
const int blockFilterIdx = blockIdx.x % blocksPerModule;
const int tidx = threadIdx.x * 32 + threadIdx.y;
const int imgLoadModPosZ = (moduleIdx / (numModulesX * numModulesY)) * moduleStride;
const int imgLoadModPosY = ((moduleIdx / numModulesX) % numModulesY )* moduleStride;
const int imgLoadModPosX = (moduleIdx % numModulesX) * moduleStride;
const int shFilterLoadY = tidx / (4 * 4);
const int shFilterLoadX = tidx % (4 * 4);
const int myImgIdx = blockIdx.y * 32 * 1 + threadIdx.y;
images += myImgIdx;
filters += 4 * 4 * blockFilterIdx
+ shFilterLoadY * numFilters + shFilterLoadX;
targets += moduleIdx * numImages
+ (blockFilterIdx * 4 * 4 + threadIdx.x) * numImages * numModulesZ * numModulesY * numModulesX
+ myImgIdx;
float prod[4][1];
#pragma unroll
for(int f = 0; f < 4; f++) {
#pragma unroll
for(int g = 0; g < 1; g++) {
prod[f][g] = 0;
}
}
for (int p = 0; p < filterPixels; p += 4) {
/*
* Load 4 pixels from 4*4 filters
*/
if (shFilterLoadY < 4) {
#pragma unroll
for (int p2 = 0; p2 < 4; p2 += 32/4) {
if (p + p2 + shFilterLoadY < filterPixels) {
#pragma unroll
for (int c = 0; c < 1; c++) {
shFilters[shFilterLoadY + p2 + c * 4][shFilterLoadX] = filters[(c * filterPixels + p + p2) * numFilters];
}
} else {
#pragma unroll
for (int c = 0; c < 1; c++) {
shFilters[shFilterLoadY + p2 + c * 4][shFilterLoadX] = 0;
}
}
}
}
/*
* Load 4 pixels from 32*1 images
*/
const int pixIdx = p + threadIdx.x;
if (pixIdx < filterPixels) {
const int x = paddingStart + imgLoadModPosX + pixIdx % filterSize;
const int y = paddingStart + imgLoadModPosY + (pixIdx / filterSize) % filterSize;
const int z = paddingStart + imgLoadModPosZ + pixIdx / (filterSize * filterSize);
if (z >= 0 && z < imgSizeZ && y >= 0 && y < imgSizeY && x >= 0 && x < imgSizeX) {
#pragma unroll
for (int i = 0; i < 1; i++) {
if (!true || myImgIdx + i * 32 < numImages) {
#pragma unroll
for (int c = 0; c < 1; c++) {
shImages[threadIdx.x + c * 4][threadIdx.y + i * 32] = images[imgStride * (c * imgPixels + z * imgSizeX * imgSizeY + y * imgSizeX + x) + i * 32];
}
} else {
#pragma unroll
for (int c = 0; c < 1; c++) {
shImages[threadIdx.x + c * 4][threadIdx.y + i * 32] = 0;
}
}
}
} else { // Padding
#pragma unroll
for (int i = 0; i < 1; i++) {
#pragma unroll
for (int c = 0; c < 1; c++) {
shImages[threadIdx.x + c * 4][threadIdx.y + i * 32] = 0;
}
}
}
}
__syncthreads();
#pragma unroll
for (int i = 0; i < 4*1; i++) {
#pragma unroll
for(int f = 0; f < 4; f++) {
#pragma unroll
for(int g = 0; g < 1; g++) {
prod[f][g] += shImages[i][g * 32 + threadIdx.y] * shFilters[i][threadIdx.x + f * 4];
}
}
}
__syncthreads();
}
#pragma unroll
for (int g = 0; g < 1; g++) {
if (!true || myImgIdx + g * 32 < numImages) {
#pragma unroll
for (int f = 0; f < 4; f++) {
targets[g * 32 + f * 4 * numImages * numModulesZ * numModulesY * numModulesX] = prod[f][g];
}
}
}
}
__global__ void kConvolve_weight(float* targets, float* images, float* hidActs,
const int numImages, const int numFilters,
const int numModulesZ, const int numModulesY, const int numModulesX,
const int imgSizeZ, const int imgSizeY, const int imgSizeX, const int filterSize,
const int paddingStart, const int moduleStride, const int imgStride,
const int partialSum, const float scaleOutputs) {
__shared__ float shImages[5 * 8 * 1][32]; // preload 32 cases of 8 * 5 pixels
__shared__ float shHidActs[16][32 + 1]; // preload 32 cases of 16 hidActs
const int tidx = 16 * threadIdx.y + threadIdx.x;
const int loadY = tidx / 32, loadX = tidx % 32;
const int filterPixels = filterSize * filterSize * filterSize;
const int imgPixels = imgSizeZ * imgSizeY * imgSizeX;
const int filterBlocksPerModule = numFilters / 16;
const int outputModuleIdx = blockIdx.x / filterBlocksPerModule;
const int moduleIdx = partialSum * outputModuleIdx;
const int blockFilterIdx = 16 * (blockIdx.x % filterBlocksPerModule);
// const int moduleStride = (imgSize - filterSize + 1) / numModulesX;
const int numModules = numModulesZ * numModulesY * numModulesX;
const int blockPixelOffset = blockIdx.y * 8 * 5;
images += loadX;
hidActs += moduleIdx * numImages
+ blockFilterIdx * numImages * numModules
+ loadY * numImages * numModules
+ loadX;
targets += (outputModuleIdx * numFilters) * filterPixels * 1
+ blockPixelOffset * numFilters
+ blockFilterIdx
+ threadIdx.y * numFilters + threadIdx.x;
float* shImgLoad = &shImages[loadY][loadX];
float* shHidActLoad = &shHidActs[loadY][loadX];
float prod[1][5];
#pragma unroll
for (int c = 0; c < 1; c++) {
#pragma unroll
for (int p = 0; p < 5; p++) {
prod[c][p] = 0;
}
}
__shared__ int pxDivs[8*5];
if (tidx < 8 * 5) {
pxDivs[tidx] = (((blockPixelOffset + tidx) / (filterSize * filterSize)) << 16) + (((blockPixelOffset + tidx) / filterSize) % filterSize << 8) + ((blockPixelOffset + tidx) % filterSize);
}
__syncthreads();
for (int m = moduleIdx; m < moduleIdx + partialSum; m++) {
const int imgLoadModPosZ = paddingStart + (m / (numModulesX * numModulesY)) * moduleStride;
const int imgLoadModPosY = paddingStart + ((m / numModulesX) % numModulesY) * moduleStride;
const int imgLoadModPosX = paddingStart + (m % numModulesX) * moduleStride;
for (int caseIdx = 0; caseIdx < numImages; caseIdx += 32) {
if (loadY < 8 * 5) {
/*
* As long as 8 * 16 is divisible by 32 this will loop the right
* number of times.
*
* This will load some imgGrads from filter pixels that don't exit (it'll set those to 0),
* but the code does not produce any output for those pixels (see last lines).
*/
// #pragma unroll
for (int y = 0; y < 8 * 5; y += (16 * 8) / 32) {
// Make sure number of rows in the array is divisible by number of rows filled per iteration
if ((8 * 5) % (16 * 8 / 32) == 0 || y + loadY < 8 * 5) {
const int pxIdx = loadY + y; // pixel idx in filter
if (pxIdx + blockPixelOffset < filterPixels && (!true || caseIdx + loadX < numImages)) {
const int pxZ = imgLoadModPosZ + HI24(pxDivs[pxIdx]);
const int pxY = imgLoadModPosY + MI16(pxDivs[pxIdx]); // pixel x,y coords in image
const int pxX = imgLoadModPosX + LO8(pxDivs[pxIdx]);
if (pxZ >= 0 && pxZ < imgSizeZ && pxY >= 0 && pxY < imgSizeY && pxX >= 0 && pxX < imgSizeX) {
const int pixIdx = (pxZ * imgSizeX * imgSizeY + pxY * imgSizeX + pxX) * imgStride;
#pragma unroll
for (int c = 0; c < 1; c++) {
shImgLoad[(y + c * 5 * 8) * 32] = images[caseIdx + c * imgPixels * imgStride + pixIdx];
}
} else {
#pragma unroll
for (int c = 0; c < 1; c++) {
shImgLoad[(y + c * 5 * 8) * 32] = 0;
}
}
} else {
#pragma unroll
for (int c = 0; c < 1; c++) {
shImgLoad[(y + c * 5 * 8) * 32] = 0;
}
}
}
}
}
if (loadY < 16 && (!true || caseIdx + loadX < numImages)) {
#pragma unroll
for (int y = 0; y < 16; y += (16 * 8) / 32) {
// Make sure number of rows in the array is divisible by number of rows filled per iteration
if (16 % (16 * 8 / 32) == 0 || y + loadY < 16) {
shHidActLoad[y * (32 + 1)] = hidActs[caseIdx + y * numImages * numModules];
}
}
}
__syncthreads();
#pragma unroll
for (int p = 0; p < 5; p++) {
#pragma unroll
for (int i = 0; i < 32; i++) {
#pragma unroll
for (int c = 0; c < 1; c++) {
prod[c][p] += shImages[threadIdx.y + p * 8 + c * 5 * 8][i] * shHidActs[threadIdx.x][i];
}
}
}
__syncthreads();
}
hidActs += numImages;
}
#pragma unroll
for (int p = 0; p < 5; p++) {
if (blockPixelOffset + p * 8 + threadIdx.y < filterPixels) {
#pragma unroll
for (int c = 0; c < 1; c++) {
targets[p * 8 * numFilters + c * filterPixels * numFilters] = scaleOutputs * prod[c][p];
}
}
}
}
__global__ void kConvolve_backward(float* targets, const float* hidActs, const float* filters,
const int numModulesZ, const int numModulesY, const int numModulesX,
const int numImages, const int numFilters, const int filterSize,
const int imgSizeZ, const int imgSizeY, const int imgSizeX,
const int paddingStart, const int moduleStride) {
__shared__ float shFilters[1*16][16 + 1]; // load 16 filter one time. See below.
__shared__ float shHidActs[16][16*2]; // each block deal with 16 * imgPerThread images.
const int blockCaseIdx = blockIdx.x * 16 * 2;
const int numRegionsX = DIVUP(imgSizeX, 4);
const int numRegionsY = DIVUP(imgSizeY, 4);
const int blockRegionIdx = blockIdx.y;
const int blockRegionIdxX = blockRegionIdx % numRegionsX;
const int blockRegionIdxY = (blockRegionIdx / numRegionsX) % numRegionsY;
const int blockRegionIdxZ = blockRegionIdx / (numRegionsX * numRegionsY);
const int blockRegionLeft = blockRegionIdxX * 4;
const int blockRegionTop = blockRegionIdxY * 4;
const int blockRegionFront = blockRegionIdxZ;
const int pxYInRegion = threadIdx.y / 4, pxXInRegion = threadIdx.y % 4;
const int pxZ = blockRegionFront;
const int pxY = blockRegionTop + pxYInRegion;
const int pxX = blockRegionLeft + pxXInRegion;
const int pxIdx = pxZ * imgSizeX * imgSizeY + pxY * imgSizeX + pxX;
const bool isPxInImg = pxZ < imgSizeZ && pxY < imgSizeY && pxX < imgSizeX;
const int numModules = numModulesZ * numModulesY * numModulesX;
const int filterPixels = filterSize * filterSize * filterSize;
const int imgPixels = imgSizeZ * imgSizeX * imgSizeY;
const int tidx = threadIdx.y * 16 + threadIdx.x;
const int loadY = tidx / 32, loadX = tidx % 32; // load 32 cases one time.
hidActs += blockCaseIdx + loadY * numImages * numModules + loadX;
filters += threadIdx.x;
targets += pxIdx * numImages + blockCaseIdx + threadIdx.x;
float prod[1][2];
#pragma unroll
for (int c = 0; c < 1; c++) {
#pragma unroll
for (int i = 0; i < 2; i++) {
prod[c][i] = 0;
}
}
const int startZ = blockRegionFront - paddingStart < filterSize ? 0
: 1 + (blockRegionFront - paddingStart -filterSize) / moduleStride;
const int endZ = MIN(numModulesZ, 1 + (blockRegionFront + 3 - paddingStart) / moduleStride);
const int startY = blockRegionTop - paddingStart < filterSize ? 0
: 1 + (blockRegionTop - paddingStart - filterSize) / moduleStride;
const int endY = MIN(numModulesY, 1 + (blockRegionTop + 3 - paddingStart) / moduleStride);
const int startX = blockRegionLeft - paddingStart < filterSize ? 0
: 1 + (blockRegionLeft - paddingStart - filterSize) / moduleStride;
const int endX = MIN(numModulesX, 1 + (blockRegionLeft + 3 - paddingStart) / moduleStride);
float* shilterLoad = &shFilters[threadIdx.y][threadIdx.x];
float* shHidActLoad = &shHidActs[loadY][loadX];
for (int mz = startZ; mz < endZ; mz++){
const int moduleFront = paddingStart + mz * moduleStride;
const int pxInModuleZ = pxZ - moduleFront;
for (int my = startY; my < endY; my++) {
const int moduleTop = paddingStart + my * moduleStride;
const int pxInModuleY = pxY - moduleTop;
for (int mx = startX; mx < endX; mx++) {
const int moduleIdx = mz * numModulesX * numModulesY + my * numModulesX + mx;
const int moduleLeft = paddingStart + mx * moduleStride;
const int pxInModuleX = pxX - moduleLeft;
const bool isPxInModule = pxInModuleZ >= 0 && pxInModuleZ < filterSize && pxInModuleY >= 0 && pxInModuleY < filterSize && pxInModuleX >= 0 && pxInModuleX < filterSize;
const int pxIdxInModule = pxInModuleZ * filterSize * filterSize + pxInModuleY * filterSize + pxInModuleX;
for (int f = 0; f < numFilters; f += 16) { // multiply with 16 filters at a time
// Now the threads split up into half-warps, and each half-warp decides if it's interested.
const float* hLoad = &hidActs[(moduleIdx + f * numModules) * numImages];
#pragma unroll
for (int i = 0; i < 2 * 16; i += 32) { // IMAGES
if (!true || blockCaseIdx + i + loadX < numImages) {
#pragma unroll
for (int j = 0; j < 16; j += 8) { // load 16 rows of 2*16 cols, 8 * 32 elements at a time.
shHidActLoad[j * 16 * 2 + i] = hLoad[j * numModules * numImages + i];
}
} else {
#pragma unroll
for (int j = 0; j < 16; j += 8) { // load 16 rows of 2*16 cols, 8 * 32 elements at a time.
shHidActLoad[j * 16 * 2 + i] = 0;
}
}
}
if (isPxInImg && isPxInModule) {
// This half-warp is interested, so it's going to load the weights from this module to its pixel.
// Not fully coalesced read :(
// But taking out this read entirely only reduces the runtime by ~2.8%, so it isn't costing me much.
const float* fLoad = true ? &filters[pxIdxInModule * numFilters + f]
: &filters[(moduleIdx * 1 * filterPixels + pxIdxInModule) * numFilters + f];
#pragma unroll
for (int c = 0; c < 1; c++) {
shilterLoad[c * 16 * (16 + 1)] = fLoad[c * filterPixels * numFilters];
}
}
__syncthreads();
// Do some actual computation
if (isPxInImg && isPxInModule) {
#pragma unroll
for (int c = 0; c < 1; c++) {
#pragma unroll
for (int w = 0; w < 16; w++) {
#pragma unroll
for (int i = 0; i < 2; i++) {
prod[c][i] += shFilters[threadIdx.y + c * 16][w] * shHidActs[w][threadIdx.x + i * 16];
}
}
}
}
__syncthreads();
}
}
}
}
// Not fully coalesced write :(... shmem (and fully coalesced) version is actually slightly slower, though
if (isPxInImg) {
#pragma unroll
for (int i = 0; i < 2; i++) {
if (!true || blockCaseIdx + threadIdx.x + i * 16 < numImages) {
#pragma unroll
for (int c = 0; c < 1; c++) {
targets[c * imgPixels * numImages + i * 16] = prod[c][i];
}
}
}
}
}
__global__ void kConvolve_forward_c(float* targets, float* images, float* filters,
const int numImages, const int numFilters,
const int imgSizeZ, const int imgSizeY, const int imgSizeX, const int filterSize, const int paddingStart,
const int moduleStride,
const int numModulesZ, const int numModulesY, const int numModulesX, const int imgStride, const int numImgColors,
const int numGroups) {
__shared__ float shFilters[4*2][4 * 8]; // pre-load 4 pixels from 4*8 filters
__shared__ float shImages[4*2][32 * 1]; // pre-load 4 pixels from 32*1 images
const int imgPixels = imgSizeZ * imgSizeY * imgSizeX;
const int filterPixels = filterSize * filterSize * filterSize;
const int numFilterColors = numImgColors / numGroups;
const int blocksPerModule = numFilters / (4*8);
const int moduleIdx = blockIdx.x / blocksPerModule;
const int blockFilterIdx = 8 * 4 * (blockIdx.x % blocksPerModule);
const int numFiltersPerGroup = numFilters / numGroups;
const int blockGroupIdx = blockFilterIdx / numFiltersPerGroup;
const int numModules = numModulesX * numModulesY * numModulesZ;
const int blockColorIdx = numFilterColors * blockGroupIdx;
const int tidx = threadIdx.x * 32 + threadIdx.y;
const int imgLoadModPosZ = paddingStart + (moduleIdx / (numModulesX * numModulesY)) * moduleStride;
const int imgLoadModPosY = paddingStart + ((moduleIdx / numModulesX) % numModulesY) * moduleStride;
const int imgLoadModPosX = paddingStart + (moduleIdx % numModulesX) * moduleStride;
const int shFilterLoadY = tidx / (4 * 8);
const int shFilterLoadX = tidx % (4 * 8);
const int myImgIdx = blockIdx.y * 32 * 1 + threadIdx.y;
images += blockColorIdx * imgPixels * imgStride + myImgIdx;
filters +=blockFilterIdx
+ shFilterLoadY * numFilters + shFilterLoadX;
targets += moduleIdx * numImages
+ (blockFilterIdx + threadIdx.x) * numImages * numModules
+ myImgIdx;
float prod[8][1];
#pragma unroll
for(int f = 0; f < 8; f++) {
#pragma unroll
for(int g = 0; g < 1; g++) {
prod[f][g] = 0;
}
}
// __shared__ int imgPos[]
for (int oc = 0; oc < numFilterColors; oc += 2) { // oc stands for outer color (loop)
for (int p = 0; p < filterPixels; p += 4) {
/*
* Load 4 pixels from 4*8 filters
*/
if (shFilterLoadY < 4) {
#pragma unroll
for (int p2 = 0; p2 < 4; p2 += 32/8) {
if (p + p2 + shFilterLoadY < filterPixels) {
#pragma unroll
for (int c = 0; c < 2; c++) {
shFilters[shFilterLoadY + p2 + c * 4][shFilterLoadX] = filters[((oc+c) * filterPixels + p + p2) * numFilters];
}
} else {
#pragma unroll
for (int c = 0; c < 2; c++) {
shFilters[shFilterLoadY + p2 + c * 4][shFilterLoadX] = 0;
}
}
}
}
/*
* Load 4 pixels from 32*1 images
*/
const int pixIdx = p + threadIdx.x;
if (pixIdx < filterPixels) {
const int x = imgLoadModPosX + pixIdx % filterSize;
const int y = imgLoadModPosY + (pixIdx / filterSize) % filterSize;
const int z = imgLoadModPosZ + pixIdx / (filterSize * filterSize);
if (z >= 0 && z < imgSizeZ && y >= 0 && y < imgSizeY && x >= 0 && x < imgSizeX) {
float* m = &images[imgStride * (oc * imgPixels + z * imgSizeX * imgSizeY + y * imgSizeX + x)];
#pragma unroll
for (int i = 0; i < 1; i++) {
if (!true || myImgIdx + i * 32 < numImages) {
#pragma unroll
for (int c = 0; c < 2; c++) {
shImages[threadIdx.x + c * 4][threadIdx.y + i * 32] = m[c * imgStride * imgPixels + i * 32];
}
} else {
#pragma unroll
for (int c = 0; c < 2; c++) {
shImages[threadIdx.x + c * 4][threadIdx.y + i * 32] = 0;
}
}
}
} else { // Padding
#pragma unroll
for (int i = 0; i < 1; i++) {
#pragma unroll
for (int c = 0; c < 2; c++) {
shImages[threadIdx.x + c * 4][threadIdx.y + i * 32] = 0;
}
}
}
}
__syncthreads();
#pragma unroll
for (int i = 0; i < 4*2; i++) {
#pragma unroll
for(int f = 0; f < 8; f++) {
#pragma unroll
for(int g = 0; g < 1; g++) {
prod[f][g] += shImages[i][g * 32 + threadIdx.y] * shFilters[i][threadIdx.x + f * 4];
}
}
}
__syncthreads();
}
}
#pragma unroll
for (int g = 0; g < 1; g++) {
if (!true || myImgIdx + g * 32 < numImages) {
#pragma unroll
for (int f = 0; f < 8; f++) {
targets[g * 32 + f * 4 * numImages * numModules] = prod[f][g];
}
}
}
}
__global__ void kConvolve_weight_c(float* targets, float* images, float* hidActs,
const int numImages, const int numFilters,
const int numModulesZ, const int numModulesY, const int numModulesX,
const int imgSizeZ, const int imgSizeY, const int imgSizeX, const int filterSize,
const int paddingStart, const int moduleStride, const int imgStride,
const int numImgColors, const int numGroups, const int partialSum,
const float scaleOutputs) {
__shared__ float shImages[8 * 8][32]; // preload 32 cases of 4 * pixelsPerThread pixels
__shared__ float shHidActs[2 * 16][32 + 1]; // preload 32 cases of 32 hidacts
const int tidx = 16 * threadIdx.y + threadIdx.x;
const int loadY = tidx / 32, loadX = tidx % 32;
const int filterPixels = filterSize * filterSize * filterSize;
const int imgPixels = imgSizeZ * imgSizeY * imgSizeX;
const int numFilterBlocks = numFilters / (16 * 2);
const int outputModuleIdx = blockIdx.x / numFilterBlocks;
const int moduleIdx = partialSum * outputModuleIdx;
const int blockFilterIdx = 2 * 16 * (blockIdx.x % numFilterBlocks);
const int numModules = numModulesZ * numModulesY * numModulesX;
const int numFiltersPerGroup = numFilters / numGroups;
const int blockGroupIdx = blockFilterIdx / numFiltersPerGroup;
const int numFilterColors = numImgColors / numGroups;
const int blockPixelOffset = (blockIdx.y / (numFilterColors/8)) * 8;
const int filterColorIdx = (blockIdx.y % (numFilterColors/8)) * 8;
const int imgColorIdx = filterColorIdx + blockGroupIdx * numFilterColors;
images += imgColorIdx * imgPixels * imgStride + loadX;
hidActs += moduleIdx * numImages
+ blockFilterIdx * numImages * numModules
+ loadY * numImages * numModules
+ loadX;
targets += outputModuleIdx * numFilters * filterPixels * numFilterColors
+ filterColorIdx * filterPixels * numFilters
+ blockPixelOffset * numFilters
+ blockFilterIdx
+ threadIdx.y * numFilters + threadIdx.x;
float* shHidActLoad = &shHidActs[loadY][loadX];
float* shImgLoad = &shImages[loadY][loadX];
float prod[8][2];
#pragma unroll
for (int c = 0; c < 8; c++) {
#pragma unroll
for (int f = 0; f < 2; f++) {
prod[c][f] = 0;
}
}
// This avoids doing a division in an inner loop
__shared__ int pxDivs[8];
if (tidx < 8) {
pxDivs[tidx] = (((blockPixelOffset + tidx) / (filterSize * filterSize)) << 16) + (((blockPixelOffset + tidx) / filterSize) % filterSize << 8) + ((blockPixelOffset + tidx) % filterSize);
}
__syncthreads();
for (int m = moduleIdx; m < moduleIdx + partialSum; m++) {
const int imgLoadModPosZ = paddingStart + (m / (numModulesY * numModulesX)) * moduleStride;
const int imgLoadModPosY = paddingStart + ((m / numModulesX) % numModulesY) * moduleStride;
const int imgLoadModPosX = paddingStart + (m % numModulesX) * moduleStride;
for (int caseIdx = 0; caseIdx < numImages; caseIdx += 32) {
if (loadY < 8) {
/*
* As long as 4 * 32 is divisible by 32 this will loop the right
* number of times.
*
* This will load some images from filter pixels that don't exist (it'll set those to 0),
* but the code does not produce any output for those pixels (see last lines).
*/
// #pragma unroll
for (int y = 0; y < 8; y += (16 * 8) / 32) {
// Make sure number of rows in the array is divisible by number of rows filled per iteration
if (8 % (16 * 8 / 32) == 0 || y + loadY < 8) {
const int pxIdx = loadY + y; // pixel idx in filter
if (pxIdx + blockPixelOffset < filterPixels && (!true || caseIdx + loadX < numImages)) {
const int pxZ = imgLoadModPosZ + HI24(pxDivs[pxIdx]);
const int pxY = imgLoadModPosY + MI16(pxDivs[pxIdx]);//pxIdx / filterSize; // pixel x,y coords in image
const int pxX = imgLoadModPosX + LO8(pxDivs[pxIdx]);
if (pxZ >= 0 && pxZ < imgSizeZ && pxY >= 0 && pxY < imgSizeY && pxX >= 0 && pxX < imgSizeX) {
const int pixIdx = (pxZ * imgSizeX * imgSizeY + pxY * imgSizeX + pxX) * imgStride; // pixel idx in image
#pragma unroll
for (int c = 0; c < 8; c++) {
shImgLoad[(y + c * 8) * 32] = images[caseIdx + c * imgPixels * imgStride + pixIdx];
}
} else {
#pragma unroll
for (int c = 0; c < 8; c++) {
shImgLoad[(y + c * 8) * 32] = 0;
}
}
} else {
#pragma unroll
for (int c = 0; c < 8; c++) {
shImgLoad[(y + c * 8) * 32] = 0;
}
}
}
}
}
if (loadY < 16 * 2 && (!true || caseIdx + loadX < numImages)) {
#pragma unroll
for (int y = 0; y < 16 * 2; y += (16 * 8) / 32) {
// Make sure number of rows in the array is divisible by number of rows filled per iteration
if ((16 * 2) % (16 * 8 / 32) == 0 || y + loadY < 16 * 2) {
shHidActLoad[y * (32 + 1)] = hidActs[caseIdx + y * numImages * numModules];
}
}
}
__syncthreads();
#pragma unroll
for (int c = 0; c < 8; c++) {
#pragma unroll
for (int i = 0; i < 32; i++) {
#pragma unroll
for (int f = 0; f < 2; f++) {
prod[c][f] += shImages[threadIdx.y + c * 8][i] * shHidActs[threadIdx.x + f * 16][i];
}
}
}
__syncthreads();
}
hidActs += numImages;
}
if (blockPixelOffset + threadIdx.y < filterPixels) {
#pragma unroll
for (int f = 0; f < 2; f++) {
#pragma unroll
for (int c = 0; c < 8; c++) {
targets[c * filterPixels * numFilters + f * 16] = scaleOutputs * prod[c][f];
}
}
}
}
__global__ void kConvolve_backward_c(float* targets, const float* hidActs, const float* filters,
const int numModulesZ, const int numModulesY, const int numModulesX, const int numImages, const int numFilters,
const int filterSize, const int imgSizeZ, const int imgSizeY, const int imgSizeX, const int paddingStart, const int moduleStride,
const int numImgColors, const int numGroups) {
__shared__ float shFilters[4*4][16 + 1]; // TODO: perhaps reconsider this 16
__shared__ float shHidActs[16][32*1];
const int numImgBlocks = DIVUP(numImages,32*1);
const int blockCaseIdx = (blockIdx.x % numImgBlocks) * 32*1;
const int imgColorIdx = (blockIdx.x / numImgBlocks) * 4*4; // color idx globally
const int numFilterColors = numImgColors / numGroups;
const int blockGroupIdx = imgColorIdx / numFilterColors;
const int filterColorIdx = imgColorIdx % numFilterColors; // color idx within group
const int numFiltersPerGroup = numFilters / numGroups;
const int blockFilterIdx = blockGroupIdx * numFiltersPerGroup;
const int blockPixelIdx = blockIdx.y;
const int blockPixelIdxX = blockPixelIdx % imgSizeX;
const int blockPixelIdxY = (blockPixelIdx / imgSizeX) % imgSizeY;
const int blockPixelIdxZ = blockPixelIdx / (imgSizeX * imgSizeY);
const int filterPixels = filterSize * filterSize * filterSize;
const int imgPixels = imgSizeZ * imgSizeY * imgSizeX;
const int tidx = threadIdx.y * 32 + threadIdx.x;
const int hidActLoadY = tidx / 32, hidActLoadX = tidx % 32;
const int filtersLoadY = tidx / 16, filtersLoadX = tidx % 16;
const int numModules = numModulesZ * numModulesY * numModulesX;
hidActs += blockCaseIdx + (blockFilterIdx + hidActLoadY) * numImages * numModules + hidActLoadX;
filters += blockFilterIdx + (filterColorIdx + filtersLoadY) * filterPixels * numFilters + filtersLoadX;
targets += (imgColorIdx + threadIdx.y) * imgPixels * numImages + blockPixelIdx * numImages + blockCaseIdx + threadIdx.x;
float prod[4][1];
#pragma unroll
for (int c = 0; c < 4; c++) {
#pragma unroll
for (int i = 0; i < 1; i++) {
prod[c][i] = 0;
}
}
const int startZ = blockPixelIdxZ - paddingStart < filterSize ? 0
: 1 + (blockPixelIdxZ - paddingStart - filterSize) / moduleStride;
const int endZ = MIN(numModulesZ, 1 + (blockPixelIdxZ - paddingStart) / moduleStride);
const int startY = blockPixelIdxY - paddingStart < filterSize ? 0
: 1 + (blockPixelIdxY - paddingStart - filterSize) / moduleStride;
const int endY = MIN(numModulesY, 1 + (blockPixelIdxY - paddingStart) / moduleStride);
const int startX = blockPixelIdxX - paddingStart < filterSize ? 0
: 1 + (blockPixelIdxX - paddingStart - filterSize) / moduleStride;
const int endX = MIN(numModulesX, 1 + (blockPixelIdxX - paddingStart) / moduleStride);
float* shFilterLoad = &shFilters[filtersLoadY][filtersLoadX];
float* shHidActLoad = &shHidActs[hidActLoadY][hidActLoadX];
for (int mz = startZ; mz < endZ ; mz++){
const int moduleFront = paddingStart + mz * moduleStride;
const int pxInFilterZ = blockPixelIdxZ - moduleFront;
for (int my = startY; my < endY; my++) {
const int moduleTop = paddingStart + my * moduleStride;
const int pxInFilterY = blockPixelIdxY - moduleTop;
for (int mx = startX; mx < endX; mx++) {
const int moduleIdx = mz * numModulesX * numModulesY + my * numModulesX + mx;
const int moduleLeft = paddingStart + mx * moduleStride;
const int pxInFilterX = blockPixelIdxX - moduleLeft;
const int pxIdxInFilter = pxInFilterZ * filterSize * filterSize + pxInFilterY * filterSize + pxInFilterX;
for (int f = 0; f < numFiltersPerGroup; f += 16) { // multiply with 16 filters at a time
const float* hLoad = &hidActs[(moduleIdx + f * numModules) * numImages];
#pragma unroll
for (int i = 0; i < 1 * 32; i += 32) {
if (!true || blockCaseIdx + hidActLoadX + i < numImages) {
#pragma unroll
for (int j = 0; j < 16; j += 32*4/32) { // load 16 rows of 1*16 cols, 8 * 32 elements at a time.
shHidActLoad[j * 32 * 1 + i] = hLoad[j * numModules * numImages + i];
}
} else {
#pragma unroll
for (int j = 0; j < 16; j += 32*4/32) { // load 16 rows of 1*16 cols, 8 * 32 elements at a time.
shHidActLoad[j * 32 * 1 + i] = 0;
}
}
}
const float* fLoad = true ? &filters[pxIdxInFilter * numFilters + f]
: &filters[moduleIdx * numFilterColors * filterPixels * numFilters + pxIdxInFilter * numFilters + f];
#pragma unroll
for (int i = 0; i < 4*4; i+= 32*4/16) {
if ((4*4) % (32*4/16) == 0 || i + filtersLoadY < 4*4) {
shFilterLoad[i * (16 + 1)] = fLoad[i * filterPixels * numFilters];
}
}
__syncthreads();
// Do some actual computation
#pragma unroll
for (int c = 0; c < 4; c++) {
#pragma unroll
for (int w = 0; w < 16; w++) {
#pragma unroll
for (int i = 0; i < 1; i++) {
prod[c][i] += shFilters[c * 4 + threadIdx.y][w] * shHidActs[w][threadIdx.x + i * 32];
}
}
}
__syncthreads();
}
}
}
}
#pragma unroll
for (int i = 0; i < 1; i++) {
if (!true || blockCaseIdx + threadIdx.x + i * 32 < numImages) {
#pragma unroll
for (int c = 0; c < 4; c++) {
targets[c * 4 * imgPixels * numImages + i * 32] = prod[c][i];
}
}
}
}
__global__ void kConvolve_backward_my(float* targets, const float* hidActs, const float *filters,
const int numModulesZ, const int numModulesY, const int numModulesX, const int numImages, const int numFilters,
const int filterSize, const int imgSizeZ, const int imgSizeY, const int imgSizeX,const int paddingStart,const int moduleStride,
const int numImgColors, const int numGroups){
__shared__ float shFilters[4*4][16+1];
__shared__ float shHidActs[16][32*1];
const int numImgBlocks = DIVUP(numImages, 32*1);
const int blockCaseIdx = (blockIdx.x % numImgBlocks) * 32* 1;
const int numRegionsX = DIVUP(imgSizeX, 4);
const int numRegionsY = DIVUP(imgSizeY, 4);
const int imgColorIdx = (blockIdx.x / numImgBlocks);
const int numFilterColors = numImgColors / numGroups;
const int blockGroupIdx = imgColorIdx / numFilterColors; //0
//const int filterColorIdx = imgColorIdx % numFilterColors; //color idx within group; imgColorIdx
const int numFiltersPerGroup = numFilters / numGroups; //numFilters
const int blockFilterIdx = blockGroupIdx * numFiltersPerGroup; //0
const int blockPixelIdx = blockIdx.y;
const int blockRegionIdxX = blockPixelIdx % numRegionsX;
const int blockRegionIdxY = (blockPixelIdx / numRegionsX) % numRegionsY;
const int blockRegionIdxZ = blockPixelIdx / (numRegionsX * numRegionsY);
const int blockRegionLeft = blockRegionIdxX * 4;
const int blockRegionTop = blockRegionIdxY * 4;
const int blockRegionFront = blockRegionIdxZ;
const int pxYInRegion = threadIdx.y / 4, pxXInRegion = threadIdx.y % 4;
const int blockPixelIdxZ = blockRegionFront;
const int blockPixelIdxY = blockRegionTop + pxYInRegion;
const int blockPixelIdxX = blockRegionLeft + pxXInRegion;
const int pxIdx = blockPixelIdxZ * imgSizeX * imgSizeY + blockPixelIdxY * imgSizeX + blockPixelIdxX;
const bool isPxInImg = blockPixelIdxZ < imgSizeZ && blockPixelIdxY < imgSizeY && blockPixelIdxX < imgSizeX;
const int numModules = numModulesZ * numModulesY * numModulesX;
const int filterPixels = filterSize * filterSize * filterSize;
const int imgPixels = imgSizeZ * imgSizeY * imgSizeX;
const int tidx = threadIdx.y * 32 + threadIdx.x;
const int hidActLoadY = tidx / 32, hidActLoadX = tidx % 32;
const int /*filtersLoadY = tidx / 16,*/ filtersLoadX = tidx % 16;
hidActs += blockCaseIdx + (blockFilterIdx /*0*/ + hidActLoadY) * numImages * numModules + hidActLoadX;
filters += blockFilterIdx + (imgColorIdx) * filterPixels * numFilters + filtersLoadX;
targets += (imgColorIdx) * imgPixels * numImages + pxIdx * numImages + blockCaseIdx + threadIdx.x;
float prod[1][1];
#pragma unroll
for(int c=0; c<1 ; c++){
#pragma unroll
for(int i=0; i<1; i++){
prod[c][i] = 0;
}
}
const int startZ = blockRegionFront - paddingStart < filterSize ? 0
: 1 + (blockRegionFront - paddingStart -filterSize) / moduleStride;
const int endZ = MIN(numModulesZ, 1 + (blockRegionFront + 3 - paddingStart) / moduleStride);
const int startY = blockRegionTop - paddingStart < filterSize ? 0
: 1 + (blockRegionTop - paddingStart - filterSize) / moduleStride;
const int endY = MIN(numModulesY, 1 + (blockRegionTop + 3 - paddingStart) / moduleStride);
const int startX = blockRegionLeft - paddingStart < filterSize ? 0
: 1 + (blockRegionLeft - paddingStart - filterSize) / moduleStride;
const int endX = MIN(numModulesX, 1 + (blockRegionLeft + 3 - paddingStart) / moduleStride);
float * shFilterLoad = &shFilters[threadIdx.y][filtersLoadX];
float * shHidActLoad = &shHidActs[hidActLoadY][hidActLoadX];
for(int mz=startZ; mz<endZ; mz++){
const int moduleFront = paddingStart + mz * moduleStride;
const int pxInModuleZ = blockPixelIdxZ - moduleFront;
for(int my = startY; my < endY; my++){
const int moduleTop = paddingStart + my * moduleStride;
const int pxInModuleY = blockPixelIdxY - moduleTop;
for(int mx = startX; mx < endX; mx++){
const int moduleIdx = mz * numModulesX * numModulesY + my * numModulesX + mx;
const int moduleLeft = paddingStart + mx * moduleStride;
const int pxInModuleX = blockPixelIdxX - moduleLeft;
const bool isPxInModule = pxInModuleZ >= 0 && pxInModuleZ < filterSize && pxInModuleY >= 0 && pxInModuleY < filterSize && pxInModuleX >= 0 && pxInModuleX < filterSize;
const int pxIdxInModule = pxInModuleZ * filterSize * filterSize + pxInModuleY * filterSize + pxInModuleX;
for(int f=0; f<numFiltersPerGroup; f += 16){
const float * hLoad = &hidActs[(moduleIdx + f * numModules) * numImages];
#pragma unroll
for(int i=0; i<1*32; i+=32){
if(!true || blockCaseIdx + hidActLoadX + i < numImages){
#pragma unroll
for(int j = 0; j < 16; j += 32*16/32){
shHidActLoad[j * 32 * 1 + i] = hLoad[j * numModules * numImages + i];
}
}else{
#pragma unroll
for(int j = 0; j < 16; j += 32*16/32){
shHidActLoad[j * 32 * 1 + i] = 0;
}
}
}
if(isPxInImg && isPxInModule){
const float * fLoad = true ? &filters[pxIdxInModule * numFilters +f]
:&filters[moduleIdx * numFilterColors * filterPixels * numFilters + f ];
#pragma unroll
for(int i = 0; i < 4*4; i+=16){
shFilterLoad[i * (16 + 1)] = fLoad[i * filterPixels * numFilters];
}
}
__syncthreads();
if(isPxInImg && isPxInModule){
#pragma unroll
for(int c = 0; c < 1; c++){
#pragma unroll
for(int w = 0; w < 16; w++){
#pragma unroll
for(int i = 0; i < 1; i++){
prod[c][i] += shFilters[threadIdx.y + c * 16][w] * shHidActs[w][threadIdx.x + i * 32];
}
}
}
}
__syncthreads();
}
}
}
}
if(isPxInImg){
#pragma unroll
for(int i = 0; i < 1; i++){
if(!true || blockCaseIdx + threadIdx.x + i*32 < numImages){
#pragma unroll
for(int c = 0; c < 1; c++){
targets[c * imgPixels *numImages + i * 32] = prod[c][i];
}
}
}
}
}
__global__ void kConvolve_forward_reverse(float* targets, float* images, float* filters,
const int numImages, const int numFilters,
const int imgSizeZ, const int imgSizeY, const int imgSizeX, const int filterSize, const int paddingStart,
const int moduleStride,
const int numModulesZ, const int numModulesY, const int numModulesX, const int imgStride, const int numImgColors,
const int numGroups){
__shared__ float shFilters[4*2][1];
__shared__ float shImages[4*8][32 * 1];
const int imgPixels = imgSizeZ * imgSizeY * imgSizeX;
const int filterPixels = filterSize * filterSize * filterSize;
const int numFilterColors = numImgColors / numGroups;
const int blocksPerModule = numFilters ;
const int moduleIdx = blockIdx.y / blocksPerModule;
const int blockFilterIdx = (blockIdx.y % blocksPerModule);
const int numFiltersPerGroup = numFilters / numGroups;
const int blockGroupIdx = blockFilterIdx / numFiltersPerGroup;
const int numModules = numModulesX * numModulesY * numModulesZ;
const int blockColorIdx = numFilterColors * blockGroupIdx;
const int tidx = threadIdx.y * 32 + threadIdx.x;
const int my_tmpZ = (moduleIdx % DIVUP(numModulesZ, 4));
const int my_tmpXY = (moduleIdx / DIVUP(numModulesZ, 4));
const int imgLoadModPosZ = (my_tmpZ + threadIdx.y) * moduleStride;//paddingStart + (moduleIdx / (numModulesX * numModulesY)) * moduleStride;
const int imgLoadModPosY = ((my_tmpXY / numModulesX) % numModulesY) * moduleStride; //paddingStart + ((moduleIdx / numModulesX) % numModulesY) * moduleStride;
const int imgLoadModPosX = (my_tmpXY % numModulesX) * moduleStride;//paddingStart + (moduleIdx % numModulesX) * moduleStride;
const int pxInModule = (imgLoadModPosZ / moduleStride) * numModulesX * numModulesY + (imgLoadModPosY / moduleStride) * numModulesX + (imgLoadModPosX / moduleStride);
const int shFilterLoadY = tidx / (4 * 8);
//const int shFilterLoadX = tidx % (4 * 8);
const int myImgIdx = blockIdx.x * 32 * 1 + threadIdx.x;
images += blockColorIdx * imgPixels * imgStride + myImgIdx;
filters +=blockFilterIdx
+ shFilterLoadY * numFilters;
targets += pxInModule * numImages
+ (blockFilterIdx) * numImages * numModules
+ myImgIdx;
float prod[1][1];
#pragma unroll
for(int f = 0; f < 1; f++) {
#pragma unroll
for(int g = 0; g < 1; g++) {
prod[f][g] = 0;
}
}
// __shared__ int imgPos[]
for (int oc = 0; oc < numFilterColors; oc += 2) { // oc stands for outer color (loop)
for (int p = 0; p < filterPixels; p += 4) {
/*
* Load 4 pixels from 4*8 filters
*/
if (shFilterLoadY < 4 && threadIdx.x < 1) {
#pragma unroll
for (int p2 = 0; p2 < 4; p2 += 32/8) {
if (p + p2 + shFilterLoadY < filterPixels) {
#pragma unroll
for (int c = 0; c < 2; c++) {
shFilters[shFilterLoadY + p2 + c * 4][0] = filters[((oc+c) * filterPixels + p + p2) * numFilters];
}
} else {
#pragma unroll
for (int c = 0; c < 2; c++) {
shFilters[shFilterLoadY + p2 + c * 4][0] = 0;
}
}
}
}
/*
* Load 4 pixels from 32*1 images
*/
for(int my_tmp = 0; my_tmp < 4; my_tmp++){
const int pixIdx = p + my_tmp;
if (pixIdx < filterPixels) {
const int x = imgLoadModPosX + pixIdx % filterSize;
const int y = imgLoadModPosY + (pixIdx / filterSize) % filterSize;
const int z = imgLoadModPosZ + pixIdx / (filterSize * filterSize);
if (z >= 0 && z < imgSizeZ && y >= 0 && y < imgSizeY && x >= 0 && x < imgSizeX) {
float* m = &images[imgStride * (oc * imgPixels + z * imgSizeX * imgSizeY + y * imgSizeX + x)];
#pragma unroll
for (int i = 0; i < 1; i++) {
if (!true || myImgIdx + i * 32 < numImages) {
#pragma unroll
for (int c = 0; c < 2; c++) {
shImages[threadIdx.y * 8 + my_tmp + c * 4][threadIdx.x + i * 32] = m[c * imgStride * imgPixels + i * 32];
}
} else {
#pragma unroll
for (int c = 0; c < 2; c++) {
shImages[threadIdx.y * 8 + my_tmp + c * 4][threadIdx.x + i * 32] = 0;
}
}
}
} else { // Padding
#pragma unroll
for (int i = 0; i < 1; i++) {
#pragma unroll
for (int c = 0; c < 2; c++) {
shImages[threadIdx.y * 8 + my_tmp + c * 4][threadIdx.x + i * 32] = 0;
}
}
}
}
}
__syncthreads();
#pragma unroll
for (int i = 0; i < 4*2; i++) {
#pragma unroll
for(int f = 0; f < 1; f++) {
#pragma unroll
for(int g = 0; g < 1; g++) {
prod[f][g] += shImages[i + threadIdx.y * 8][g * 32 + threadIdx.x] * shFilters[i][f];
}
}
}
__syncthreads();
}
}
#pragma unroll
for (int g = 0; g < 1; g++) {
if (!true || myImgIdx + g * 32 < numImages) {
#pragma unroll
for (int f = 0; f < 1; f++) {
targets[g * 32 + f * 4 * numImages * numModules] = prod[f][g];
}
}
}
}
__global__ void kConvolve_weight_reverse(float* targets, float* images, float* hidActs,
const int numImages, const int numFilters,
const int numModulesZ, const int numModulesY, const int numModulesX,
const int imgSizeZ, const int imgSizeY, const int imgSizeX, const int filterSize,
const int paddingStart, const int moduleStride, const int imgStride,
const int numImgColors, const int numGroups, const int partialSum,
const float scaleOutputs) {
__shared__ float shImages[8 * 8][1]; // preload 32 cases of 4 * pixelsPerThread pixels
__shared__ float shHidActs[1][1]; // preload 32 cases of 32 hidacts
const int tidx = 16 * threadIdx.y + threadIdx.x;
const int loadY = tidx / 32 /*, loadX = tidx % 32*/;
const int filterPixels = filterSize * filterSize * filterSize;
const int imgPixels = imgSizeZ * imgSizeY * imgSizeX;
const int numFilterBlocks = numFilters;
const int outputModuleIdx = blockIdx.x / numFilterBlocks;
const int moduleIdx = partialSum * outputModuleIdx;
const int blockFilterIdx = (blockIdx.x % numFilterBlocks);
const int numModules = numModulesZ * numModulesY * numModulesX;
const int numFiltersPerGroup = numFilters / numGroups;
const int blockGroupIdx = blockFilterIdx / numFiltersPerGroup;
const int numFilterColors = numImgColors / numGroups;
const int blockPixelOffset = (blockIdx.y / (numFilterColors/8)) * 8;
const int filterColorIdx = (blockIdx.y % (numFilterColors/8)) * 8;
const int imgColorIdx = filterColorIdx + blockGroupIdx * numFilterColors;
images += imgColorIdx * imgPixels * imgStride;
hidActs += moduleIdx * numImages;
targets +=filterColorIdx * filterPixels * numFilters
+ blockPixelOffset * numFilters
+ threadIdx.y * numFilters;
float* shHidActLoad = &shHidActs[0][0];
float* shImgLoad = &shImages[loadY][0];
float prod[8][1];
#pragma unroll
for (int c = 0; c < 8; c++) {
#pragma unroll
for (int f = 0; f < 1; f++) {
prod[c][f] = 0;
}
}
// This avoids doing a division in an inner loop
__shared__ int pxDivs[8];
if(threadIdx.x < 1 && threadIdx.y < 1 )
for (int my_x = 0; my_x < 8; my_x++) {
pxDivs[my_x] = (((blockPixelOffset + my_x) / (filterSize * filterSize)) << 16) + (((blockPixelOffset + my_x) / filterSize) % filterSize << 8) + ((blockPixelOffset + my_x) % filterSize);
}
__syncthreads();
for (int m = moduleIdx; m < moduleIdx + partialSum; m++) {
const int imgLoadModPosZ = paddingStart + (m / (numModulesY * numModulesX)) * moduleStride;
const int imgLoadModPosY = paddingStart + ((m / numModulesX) % numModulesY) * moduleStride;
const int imgLoadModPosX = paddingStart + (m % numModulesX) * moduleStride;
for (int caseIdx = 0; caseIdx < numImages; caseIdx ++) {
if (loadY < 8) {
/*
* As long as 4 * 32 is divisible by 32 this will loop the right
* number of times.
*
* This will load some images from filter pixels that don't exist (it'll set those to 0),
* but the code does not produce any output for those pixels (see last lines).
*/
// #pragma unroll
for (int y = 0; y < 8; y += (16 * 8) / 32) {
// Make sure number of rows in the array is divisible by number of rows filled per iteration
if (8 % (16 * 8 / 32) == 0 || y + loadY < 8) {
const int pxIdx = loadY + y; // pixel idx in filter
if (pxIdx + blockPixelOffset < filterPixels && (!true || caseIdx< numImages)) {
const int pxZ = imgLoadModPosZ + HI24(pxDivs[pxIdx]);
const int pxY = imgLoadModPosY + MI16(pxDivs[pxIdx]);//pxIdx / filterSize; // pixel x,y coords in image
const int pxX = imgLoadModPosX + LO8(pxDivs[pxIdx]);
if (pxZ >= 0 && pxZ < imgSizeZ && pxY >= 0 && pxY < imgSizeY && pxX >= 0 && pxX < imgSizeX) {
const int pixIdx = (pxZ * imgSizeX * imgSizeY + pxY * imgSizeX + pxX) * imgStride; // pixel idx in image
#pragma unroll
for (int c = 0; c < 8; c++) {
shImgLoad[(y + c * 8)] = images[caseIdx + c * imgPixels * imgStride + pixIdx];
}
} else {
#pragma unroll
for (int c = 0; c < 8; c++) {
shImgLoad[(y + c * 8) ] = 0;
}
}
} else {
#pragma unroll
for (int c = 0; c < 8; c++) {
shImgLoad[(y + c * 8) ] = 0;
}
}
}
}
}
if (loadY < 16 * 2 && (!true || caseIdx < numImages)) {
#pragma unroll
for (int y = 0; y < 16 * 2; y += (16 * 64) / 32) {
// Make sure number of rows in the array is divisible by number of rows filled per iteration
if ((16 * 2) % (16 * 8 / 32) == 0 || y < 1) {
shHidActLoad[y] = hidActs[caseIdx + y * numImages * numModules];
}
}
}
__syncthreads();
#pragma unroll
for (int c = 0; c < 8; c++) {
#pragma unroll
for (int i = 0; i < 1; i++) {
#pragma unroll
for (int f = 0; f < 1; f++) {
prod[c][f] += shImages[threadIdx.y + c * 8][i] * shHidActs[threadIdx.x][i];
}
}
}
__syncthreads();
}
hidActs += numImages;
}
if (blockPixelOffset + threadIdx.y < filterPixels) {
#pragma unroll
for (int f = 0; f < 1; f++) {
#pragma unroll
for (int c = 0; c < 8; c++) {
targets[c * filterPixels * numFilters + f * 16] = scaleOutputs * prod[c][f];
}
}
}
}
__global__ void kConvolve_backward_reverse(float* targets, const float* hidActs, const float* filters,
const int numModulesZ, const int numModulesY, const int numModulesX, const int numImages, const int numFilters,
const int filterSize, const int imgSizeZ, const int imgSizeY, const int imgSizeX, const int paddingStart, const int moduleStride,
const int numImgColors, const int numGroups) {
__shared__ float shFilters[4*4][1]; // TODO: perhaps reconsider this 16
__shared__ float shHidActs[1][32*1];
const int numImgBlocks = DIVUP(numImages,32*1);
const int blockCaseIdx = (blockIdx.x % numImgBlocks) * 32 * 1;
const int imgColorIdx = (blockIdx.x / numImgBlocks) * 4 * 4; //0; color idx globally
const int numFilterColors = numImgColors / numGroups;
//const int blockGroupIdx = imgColorIdx / numFilterColors;
const int filterColorIdx = imgColorIdx % numFilterColors; // color idx within group
const int numFiltersPerGroup = numFilters / numGroups;
//const int blockFilterIdx = blockGroupIdx * numFiltersPerGroup;
const int blockPixelIdx = blockIdx.y;
const int blockPixelIdxX = blockPixelIdx % imgSizeX;
const int blockPixelIdxY = (blockPixelIdx / imgSizeX) % imgSizeY;
const int blockPixelIdxZ = blockPixelIdx / (imgSizeX * imgSizeY);
const int filterPixels = filterSize * filterSize * filterSize;
const int imgPixels = imgSizeZ * imgSizeY * imgSizeX;
const int tidx = threadIdx.y * 32 + threadIdx.x;
const int /*hidActLoadY = tidx / 32,*/ hidActLoadX = tidx % 32;
const int filtersLoadY = tidx / 16 /*, filtersLoadX = tidx % 16*/;
const int numModules = numModulesZ * numModulesY * numModulesX;
hidActs += blockCaseIdx + hidActLoadX;
filters += (filterColorIdx + filtersLoadY) * filterPixels * numFilters;
targets += (imgColorIdx + threadIdx.y) * imgPixels * numImages + blockPixelIdx * numImages + blockCaseIdx + threadIdx.x;
float prod[4][1];
#pragma unroll
for (int c = 0; c < 4; c++) {
#pragma unroll
for (int i = 0; i < 1; i++) {
prod[c][i] = 0;
}
}
const int startZ = blockPixelIdxZ - paddingStart < filterSize ? 0
: 1 + (blockPixelIdxZ - paddingStart - filterSize) / moduleStride;
const int endZ = MIN(numModulesZ, 1 + (blockPixelIdxZ - paddingStart) / moduleStride);
const int startY = blockPixelIdxY - paddingStart < filterSize ? 0
: 1 + (blockPixelIdxY - paddingStart - filterSize) / moduleStride;
const int endY = MIN(numModulesY, 1 + (blockPixelIdxY - paddingStart) / moduleStride);
const int startX = blockPixelIdxX - paddingStart < filterSize ? 0
: 1 + (blockPixelIdxX - paddingStart - filterSize) / moduleStride;
const int endX = MIN(numModulesX, 1 + (blockPixelIdxX - paddingStart) / moduleStride);
float* shFilterLoad = &shFilters[filtersLoadY][0];
float* shHidActLoad = &shHidActs[0][hidActLoadX];
for (int mz = startZ; mz < endZ ; mz++){
const int moduleFront = paddingStart + mz * moduleStride;
const int pxInFilterZ = blockPixelIdxZ - moduleFront;
for (int my = startY; my < endY; my++) {
const int moduleTop = paddingStart + my * moduleStride;
const int pxInFilterY = blockPixelIdxY - moduleTop;
for (int mx = startX; mx < endX; mx++) {
const int moduleIdx = mz * numModulesX * numModulesY + my * numModulesX + mx;
const int moduleLeft = paddingStart + mx * moduleStride;
const int pxInFilterX = blockPixelIdxX - moduleLeft;
const int pxIdxInFilter = pxInFilterZ * filterSize * filterSize + pxInFilterY * filterSize + pxInFilterX;
for (int f = 0; f < numFiltersPerGroup; f += 16) { // multiply with 16 filters at a time
const float* hLoad = &hidActs[(moduleIdx + f * numModules) * numImages];
#pragma unroll
for (int i = 0; i < 1 * 32; i += 32) {
if (!true || blockCaseIdx + hidActLoadX + i < numImages) {
#pragma unroll
for (int j = 0; j < 16; j += 32*16/32) { // load 16 rows of 1*16 cols, 8 * 32 elements at a time.
shHidActLoad[j * 32 * 1 + i] = hLoad[j * numModules * numImages + i];
}
} else {
#pragma unroll
for (int j = 0; j < 16; j += 32*16/32) { // load 16 rows of 1*16 cols, 8 * 32 elements at a time.
shHidActLoad[j * 32 * 1 + i] = 0;
}
}
}
const float* fLoad = true ? &filters[pxIdxInFilter * numFilters + f]
: &filters[moduleIdx * numFilterColors * filterPixels * numFilters + pxIdxInFilter * numFilters + f];
#pragma unroll
for (int i = 0; i < 4*4; i+= 32*4/16) {
if ((4*4) % (32*4/16) == 0 || i + filtersLoadY < 4*4) {
shFilterLoad[i] = fLoad[i * filterPixels * numFilters];
}
}
__syncthreads();
// Do some actual computation
#pragma unroll
for (int c = 0; c < 4; c++) {
#pragma unroll
for (int w = 0; w < 1; w++) {
#pragma unroll
for (int i = 0; i < 1; i++) {
prod[c][i] += shFilters[c * 4 + threadIdx.y][w] * shHidActs[w][threadIdx.x + i * 32];
}
}
}
__syncthreads();
}
}
}
}
#pragma unroll
for (int i = 0; i < 1; i++) {
if (!true || blockCaseIdx + threadIdx.x + i * 32 < numImages) {
#pragma unroll
for (int c = 0; c < 4; c++) {
targets[c * 4 * imgPixels * numImages + i * 32] = prod[c][i];
}
}
}
}
| da9dd3d954a7121aaef7b4868c0be7f5df118428.cu | // This is modification of Alex's convolution kernel extending 2d to 3d.
#include <stdio.h>
#include <stdlib.h>
#include <cuda_runtime.h>
#define LO8(x) ((x) & 0x000000FF)
#define MI16(x) (((x) & 0x0000FFFF) >> 8)
#define HI24(x) (((x) & 0x00FFFFFF) >> 16)
#define MUL24(x,y) ((x) * (y))
#define DIVUP(x, y) (((x) + (y) - 1) / (y))
#define MIN(a, b) ((a) < (b) ? (a) : (b))
__global__ void kSampleMultinomial(int* output, float* distribution, float* random, int k, int n){
const unsigned int id = threadIdx.x + blockIdx.x * blockDim.x;
if(id < n){
distribution += k * id;
random += id;
output += k * id;
float preSum = 0, nowSum = 0;
for(int i = 0; i < k; i++){
nowSum += distribution[i];
output[i] = random[0] >= preSum && random[0] < nowSum;
preSum = nowSum;
}
}
}
__global__ void kExp(float* output, float* input, unsigned int numElements){
const unsigned int id = threadIdx.x + blockIdx.x * blockDim.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for(int i = id; i < numElements; i += numThreads)
output[i] = __expf(input[i]);
}
__global__ void kDivide(float* output, float* leftInput, float* rightInput, unsigned int numElements){
const unsigned int id = threadIdx.x + blockIdx.x * blockDim.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for(int i = id; i < numElements; i += numThreads)
output[i] = __fdividef(leftInput[i], rightInput[i]);
}
__global__ void kConvolve_forward(float* targets, float* images, float* filters,
const int numImages, const int numFilters,
const int imgSizeZ, const int imgSizeY, const int imgSizeX, const int filterSize, const int paddingStart,
const int moduleStride,
const int numModulesZ, const int numModulesY, const int numModulesX, const int imgStride) {
__shared__ float shFilters[4*1][4 * 4]; // pre-load 4 pixels from 4*4 filters
__shared__ float shImages[4*1][32 * 1]; // pre-load 4 pixels from 32*2 images
const int imgPixels = imgSizeZ * imgSizeY * imgSizeX;
const int filterPixels = filterSize * filterSize * filterSize;
const int blocksPerModule = numFilters / (4*4);
const int moduleIdx = blockIdx.x / blocksPerModule;
const int blockFilterIdx = blockIdx.x % blocksPerModule;
const int tidx = threadIdx.x * 32 + threadIdx.y;
const int imgLoadModPosZ = (moduleIdx / (numModulesX * numModulesY)) * moduleStride;
const int imgLoadModPosY = ((moduleIdx / numModulesX) % numModulesY )* moduleStride;
const int imgLoadModPosX = (moduleIdx % numModulesX) * moduleStride;
const int shFilterLoadY = tidx / (4 * 4);
const int shFilterLoadX = tidx % (4 * 4);
const int myImgIdx = blockIdx.y * 32 * 1 + threadIdx.y;
images += myImgIdx;
filters += 4 * 4 * blockFilterIdx
+ shFilterLoadY * numFilters + shFilterLoadX;
targets += moduleIdx * numImages
+ (blockFilterIdx * 4 * 4 + threadIdx.x) * numImages * numModulesZ * numModulesY * numModulesX
+ myImgIdx;
float prod[4][1];
#pragma unroll
for(int f = 0; f < 4; f++) {
#pragma unroll
for(int g = 0; g < 1; g++) {
prod[f][g] = 0;
}
}
for (int p = 0; p < filterPixels; p += 4) {
/*
* Load 4 pixels from 4*4 filters
*/
if (shFilterLoadY < 4) {
#pragma unroll
for (int p2 = 0; p2 < 4; p2 += 32/4) {
if (p + p2 + shFilterLoadY < filterPixels) {
#pragma unroll
for (int c = 0; c < 1; c++) {
shFilters[shFilterLoadY + p2 + c * 4][shFilterLoadX] = filters[(c * filterPixels + p + p2) * numFilters];
}
} else {
#pragma unroll
for (int c = 0; c < 1; c++) {
shFilters[shFilterLoadY + p2 + c * 4][shFilterLoadX] = 0;
}
}
}
}
/*
* Load 4 pixels from 32*1 images
*/
const int pixIdx = p + threadIdx.x;
if (pixIdx < filterPixels) {
const int x = paddingStart + imgLoadModPosX + pixIdx % filterSize;
const int y = paddingStart + imgLoadModPosY + (pixIdx / filterSize) % filterSize;
const int z = paddingStart + imgLoadModPosZ + pixIdx / (filterSize * filterSize);
if (z >= 0 && z < imgSizeZ && y >= 0 && y < imgSizeY && x >= 0 && x < imgSizeX) {
#pragma unroll
for (int i = 0; i < 1; i++) {
if (!true || myImgIdx + i * 32 < numImages) {
#pragma unroll
for (int c = 0; c < 1; c++) {
shImages[threadIdx.x + c * 4][threadIdx.y + i * 32] = images[imgStride * (c * imgPixels + z * imgSizeX * imgSizeY + y * imgSizeX + x) + i * 32];
}
} else {
#pragma unroll
for (int c = 0; c < 1; c++) {
shImages[threadIdx.x + c * 4][threadIdx.y + i * 32] = 0;
}
}
}
} else { // Padding
#pragma unroll
for (int i = 0; i < 1; i++) {
#pragma unroll
for (int c = 0; c < 1; c++) {
shImages[threadIdx.x + c * 4][threadIdx.y + i * 32] = 0;
}
}
}
}
__syncthreads();
#pragma unroll
for (int i = 0; i < 4*1; i++) {
#pragma unroll
for(int f = 0; f < 4; f++) {
#pragma unroll
for(int g = 0; g < 1; g++) {
prod[f][g] += shImages[i][g * 32 + threadIdx.y] * shFilters[i][threadIdx.x + f * 4];
}
}
}
__syncthreads();
}
#pragma unroll
for (int g = 0; g < 1; g++) {
if (!true || myImgIdx + g * 32 < numImages) {
#pragma unroll
for (int f = 0; f < 4; f++) {
targets[g * 32 + f * 4 * numImages * numModulesZ * numModulesY * numModulesX] = prod[f][g];
}
}
}
}
__global__ void kConvolve_weight(float* targets, float* images, float* hidActs,
const int numImages, const int numFilters,
const int numModulesZ, const int numModulesY, const int numModulesX,
const int imgSizeZ, const int imgSizeY, const int imgSizeX, const int filterSize,
const int paddingStart, const int moduleStride, const int imgStride,
const int partialSum, const float scaleOutputs) {
__shared__ float shImages[5 * 8 * 1][32]; // preload 32 cases of 8 * 5 pixels
__shared__ float shHidActs[16][32 + 1]; // preload 32 cases of 16 hidActs
const int tidx = 16 * threadIdx.y + threadIdx.x;
const int loadY = tidx / 32, loadX = tidx % 32;
const int filterPixels = filterSize * filterSize * filterSize;
const int imgPixels = imgSizeZ * imgSizeY * imgSizeX;
const int filterBlocksPerModule = numFilters / 16;
const int outputModuleIdx = blockIdx.x / filterBlocksPerModule;
const int moduleIdx = partialSum * outputModuleIdx;
const int blockFilterIdx = 16 * (blockIdx.x % filterBlocksPerModule);
// const int moduleStride = (imgSize - filterSize + 1) / numModulesX;
const int numModules = numModulesZ * numModulesY * numModulesX;
const int blockPixelOffset = blockIdx.y * 8 * 5;
images += loadX;
hidActs += moduleIdx * numImages
+ blockFilterIdx * numImages * numModules
+ loadY * numImages * numModules
+ loadX;
targets += (outputModuleIdx * numFilters) * filterPixels * 1
+ blockPixelOffset * numFilters
+ blockFilterIdx
+ threadIdx.y * numFilters + threadIdx.x;
float* shImgLoad = &shImages[loadY][loadX];
float* shHidActLoad = &shHidActs[loadY][loadX];
float prod[1][5];
#pragma unroll
for (int c = 0; c < 1; c++) {
#pragma unroll
for (int p = 0; p < 5; p++) {
prod[c][p] = 0;
}
}
__shared__ int pxDivs[8*5];
if (tidx < 8 * 5) {
pxDivs[tidx] = (((blockPixelOffset + tidx) / (filterSize * filterSize)) << 16) + (((blockPixelOffset + tidx) / filterSize) % filterSize << 8) + ((blockPixelOffset + tidx) % filterSize);
}
__syncthreads();
for (int m = moduleIdx; m < moduleIdx + partialSum; m++) {
const int imgLoadModPosZ = paddingStart + (m / (numModulesX * numModulesY)) * moduleStride;
const int imgLoadModPosY = paddingStart + ((m / numModulesX) % numModulesY) * moduleStride;
const int imgLoadModPosX = paddingStart + (m % numModulesX) * moduleStride;
for (int caseIdx = 0; caseIdx < numImages; caseIdx += 32) {
if (loadY < 8 * 5) {
/*
* As long as 8 * 16 is divisible by 32 this will loop the right
* number of times.
*
* This will load some imgGrads from filter pixels that don't exit (it'll set those to 0),
* but the code does not produce any output for those pixels (see last lines).
*/
// #pragma unroll
for (int y = 0; y < 8 * 5; y += (16 * 8) / 32) {
// Make sure number of rows in the array is divisible by number of rows filled per iteration
if ((8 * 5) % (16 * 8 / 32) == 0 || y + loadY < 8 * 5) {
const int pxIdx = loadY + y; // pixel idx in filter
if (pxIdx + blockPixelOffset < filterPixels && (!true || caseIdx + loadX < numImages)) {
const int pxZ = imgLoadModPosZ + HI24(pxDivs[pxIdx]);
const int pxY = imgLoadModPosY + MI16(pxDivs[pxIdx]); // pixel x,y coords in image
const int pxX = imgLoadModPosX + LO8(pxDivs[pxIdx]);
if (pxZ >= 0 && pxZ < imgSizeZ && pxY >= 0 && pxY < imgSizeY && pxX >= 0 && pxX < imgSizeX) {
const int pixIdx = (pxZ * imgSizeX * imgSizeY + pxY * imgSizeX + pxX) * imgStride;
#pragma unroll
for (int c = 0; c < 1; c++) {
shImgLoad[(y + c * 5 * 8) * 32] = images[caseIdx + c * imgPixels * imgStride + pixIdx];
}
} else {
#pragma unroll
for (int c = 0; c < 1; c++) {
shImgLoad[(y + c * 5 * 8) * 32] = 0;
}
}
} else {
#pragma unroll
for (int c = 0; c < 1; c++) {
shImgLoad[(y + c * 5 * 8) * 32] = 0;
}
}
}
}
}
if (loadY < 16 && (!true || caseIdx + loadX < numImages)) {
#pragma unroll
for (int y = 0; y < 16; y += (16 * 8) / 32) {
// Make sure number of rows in the array is divisible by number of rows filled per iteration
if (16 % (16 * 8 / 32) == 0 || y + loadY < 16) {
shHidActLoad[y * (32 + 1)] = hidActs[caseIdx + y * numImages * numModules];
}
}
}
__syncthreads();
#pragma unroll
for (int p = 0; p < 5; p++) {
#pragma unroll
for (int i = 0; i < 32; i++) {
#pragma unroll
for (int c = 0; c < 1; c++) {
prod[c][p] += shImages[threadIdx.y + p * 8 + c * 5 * 8][i] * shHidActs[threadIdx.x][i];
}
}
}
__syncthreads();
}
hidActs += numImages;
}
#pragma unroll
for (int p = 0; p < 5; p++) {
if (blockPixelOffset + p * 8 + threadIdx.y < filterPixels) {
#pragma unroll
for (int c = 0; c < 1; c++) {
targets[p * 8 * numFilters + c * filterPixels * numFilters] = scaleOutputs * prod[c][p];
}
}
}
}
__global__ void kConvolve_backward(float* targets, const float* hidActs, const float* filters,
const int numModulesZ, const int numModulesY, const int numModulesX,
const int numImages, const int numFilters, const int filterSize,
const int imgSizeZ, const int imgSizeY, const int imgSizeX,
const int paddingStart, const int moduleStride) {
__shared__ float shFilters[1*16][16 + 1]; // load 16 filter one time. See below.
__shared__ float shHidActs[16][16*2]; // each block deal with 16 * imgPerThread images.
const int blockCaseIdx = blockIdx.x * 16 * 2;
const int numRegionsX = DIVUP(imgSizeX, 4);
const int numRegionsY = DIVUP(imgSizeY, 4);
const int blockRegionIdx = blockIdx.y;
const int blockRegionIdxX = blockRegionIdx % numRegionsX;
const int blockRegionIdxY = (blockRegionIdx / numRegionsX) % numRegionsY;
const int blockRegionIdxZ = blockRegionIdx / (numRegionsX * numRegionsY);
const int blockRegionLeft = blockRegionIdxX * 4;
const int blockRegionTop = blockRegionIdxY * 4;
const int blockRegionFront = blockRegionIdxZ;
const int pxYInRegion = threadIdx.y / 4, pxXInRegion = threadIdx.y % 4;
const int pxZ = blockRegionFront;
const int pxY = blockRegionTop + pxYInRegion;
const int pxX = blockRegionLeft + pxXInRegion;
const int pxIdx = pxZ * imgSizeX * imgSizeY + pxY * imgSizeX + pxX;
const bool isPxInImg = pxZ < imgSizeZ && pxY < imgSizeY && pxX < imgSizeX;
const int numModules = numModulesZ * numModulesY * numModulesX;
const int filterPixels = filterSize * filterSize * filterSize;
const int imgPixels = imgSizeZ * imgSizeX * imgSizeY;
const int tidx = threadIdx.y * 16 + threadIdx.x;
const int loadY = tidx / 32, loadX = tidx % 32; // load 32 cases one time.
hidActs += blockCaseIdx + loadY * numImages * numModules + loadX;
filters += threadIdx.x;
targets += pxIdx * numImages + blockCaseIdx + threadIdx.x;
float prod[1][2];
#pragma unroll
for (int c = 0; c < 1; c++) {
#pragma unroll
for (int i = 0; i < 2; i++) {
prod[c][i] = 0;
}
}
const int startZ = blockRegionFront - paddingStart < filterSize ? 0
: 1 + (blockRegionFront - paddingStart -filterSize) / moduleStride;
const int endZ = MIN(numModulesZ, 1 + (blockRegionFront + 3 - paddingStart) / moduleStride);
const int startY = blockRegionTop - paddingStart < filterSize ? 0
: 1 + (blockRegionTop - paddingStart - filterSize) / moduleStride;
const int endY = MIN(numModulesY, 1 + (blockRegionTop + 3 - paddingStart) / moduleStride);
const int startX = blockRegionLeft - paddingStart < filterSize ? 0
: 1 + (blockRegionLeft - paddingStart - filterSize) / moduleStride;
const int endX = MIN(numModulesX, 1 + (blockRegionLeft + 3 - paddingStart) / moduleStride);
float* shilterLoad = &shFilters[threadIdx.y][threadIdx.x];
float* shHidActLoad = &shHidActs[loadY][loadX];
for (int mz = startZ; mz < endZ; mz++){
const int moduleFront = paddingStart + mz * moduleStride;
const int pxInModuleZ = pxZ - moduleFront;
for (int my = startY; my < endY; my++) {
const int moduleTop = paddingStart + my * moduleStride;
const int pxInModuleY = pxY - moduleTop;
for (int mx = startX; mx < endX; mx++) {
const int moduleIdx = mz * numModulesX * numModulesY + my * numModulesX + mx;
const int moduleLeft = paddingStart + mx * moduleStride;
const int pxInModuleX = pxX - moduleLeft;
const bool isPxInModule = pxInModuleZ >= 0 && pxInModuleZ < filterSize && pxInModuleY >= 0 && pxInModuleY < filterSize && pxInModuleX >= 0 && pxInModuleX < filterSize;
const int pxIdxInModule = pxInModuleZ * filterSize * filterSize + pxInModuleY * filterSize + pxInModuleX;
for (int f = 0; f < numFilters; f += 16) { // multiply with 16 filters at a time
// Now the threads split up into half-warps, and each half-warp decides if it's interested.
const float* hLoad = &hidActs[(moduleIdx + f * numModules) * numImages];
#pragma unroll
for (int i = 0; i < 2 * 16; i += 32) { // IMAGES
if (!true || blockCaseIdx + i + loadX < numImages) {
#pragma unroll
for (int j = 0; j < 16; j += 8) { // load 16 rows of 2*16 cols, 8 * 32 elements at a time.
shHidActLoad[j * 16 * 2 + i] = hLoad[j * numModules * numImages + i];
}
} else {
#pragma unroll
for (int j = 0; j < 16; j += 8) { // load 16 rows of 2*16 cols, 8 * 32 elements at a time.
shHidActLoad[j * 16 * 2 + i] = 0;
}
}
}
if (isPxInImg && isPxInModule) {
// This half-warp is interested, so it's going to load the weights from this module to its pixel.
// Not fully coalesced read :(
// But taking out this read entirely only reduces the runtime by ~2.8%, so it isn't costing me much.
const float* fLoad = true ? &filters[pxIdxInModule * numFilters + f]
: &filters[(moduleIdx * 1 * filterPixels + pxIdxInModule) * numFilters + f];
#pragma unroll
for (int c = 0; c < 1; c++) {
shilterLoad[c * 16 * (16 + 1)] = fLoad[c * filterPixels * numFilters];
}
}
__syncthreads();
// Do some actual computation
if (isPxInImg && isPxInModule) {
#pragma unroll
for (int c = 0; c < 1; c++) {
#pragma unroll
for (int w = 0; w < 16; w++) {
#pragma unroll
for (int i = 0; i < 2; i++) {
prod[c][i] += shFilters[threadIdx.y + c * 16][w] * shHidActs[w][threadIdx.x + i * 16];
}
}
}
}
__syncthreads();
}
}
}
}
// Not fully coalesced write :(... shmem (and fully coalesced) version is actually slightly slower, though
if (isPxInImg) {
#pragma unroll
for (int i = 0; i < 2; i++) {
if (!true || blockCaseIdx + threadIdx.x + i * 16 < numImages) {
#pragma unroll
for (int c = 0; c < 1; c++) {
targets[c * imgPixels * numImages + i * 16] = prod[c][i];
}
}
}
}
}
__global__ void kConvolve_forward_c(float* targets, float* images, float* filters,
const int numImages, const int numFilters,
const int imgSizeZ, const int imgSizeY, const int imgSizeX, const int filterSize, const int paddingStart,
const int moduleStride,
const int numModulesZ, const int numModulesY, const int numModulesX, const int imgStride, const int numImgColors,
const int numGroups) {
__shared__ float shFilters[4*2][4 * 8]; // pre-load 4 pixels from 4*8 filters
__shared__ float shImages[4*2][32 * 1]; // pre-load 4 pixels from 32*1 images
const int imgPixels = imgSizeZ * imgSizeY * imgSizeX;
const int filterPixels = filterSize * filterSize * filterSize;
const int numFilterColors = numImgColors / numGroups;
const int blocksPerModule = numFilters / (4*8);
const int moduleIdx = blockIdx.x / blocksPerModule;
const int blockFilterIdx = 8 * 4 * (blockIdx.x % blocksPerModule);
const int numFiltersPerGroup = numFilters / numGroups;
const int blockGroupIdx = blockFilterIdx / numFiltersPerGroup;
const int numModules = numModulesX * numModulesY * numModulesZ;
const int blockColorIdx = numFilterColors * blockGroupIdx;
const int tidx = threadIdx.x * 32 + threadIdx.y;
const int imgLoadModPosZ = paddingStart + (moduleIdx / (numModulesX * numModulesY)) * moduleStride;
const int imgLoadModPosY = paddingStart + ((moduleIdx / numModulesX) % numModulesY) * moduleStride;
const int imgLoadModPosX = paddingStart + (moduleIdx % numModulesX) * moduleStride;
const int shFilterLoadY = tidx / (4 * 8);
const int shFilterLoadX = tidx % (4 * 8);
const int myImgIdx = blockIdx.y * 32 * 1 + threadIdx.y;
images += blockColorIdx * imgPixels * imgStride + myImgIdx;
filters +=blockFilterIdx
+ shFilterLoadY * numFilters + shFilterLoadX;
targets += moduleIdx * numImages
+ (blockFilterIdx + threadIdx.x) * numImages * numModules
+ myImgIdx;
float prod[8][1];
#pragma unroll
for(int f = 0; f < 8; f++) {
#pragma unroll
for(int g = 0; g < 1; g++) {
prod[f][g] = 0;
}
}
// __shared__ int imgPos[]
for (int oc = 0; oc < numFilterColors; oc += 2) { // oc stands for outer color (loop)
for (int p = 0; p < filterPixels; p += 4) {
/*
* Load 4 pixels from 4*8 filters
*/
if (shFilterLoadY < 4) {
#pragma unroll
for (int p2 = 0; p2 < 4; p2 += 32/8) {
if (p + p2 + shFilterLoadY < filterPixels) {
#pragma unroll
for (int c = 0; c < 2; c++) {
shFilters[shFilterLoadY + p2 + c * 4][shFilterLoadX] = filters[((oc+c) * filterPixels + p + p2) * numFilters];
}
} else {
#pragma unroll
for (int c = 0; c < 2; c++) {
shFilters[shFilterLoadY + p2 + c * 4][shFilterLoadX] = 0;
}
}
}
}
/*
* Load 4 pixels from 32*1 images
*/
const int pixIdx = p + threadIdx.x;
if (pixIdx < filterPixels) {
const int x = imgLoadModPosX + pixIdx % filterSize;
const int y = imgLoadModPosY + (pixIdx / filterSize) % filterSize;
const int z = imgLoadModPosZ + pixIdx / (filterSize * filterSize);
if (z >= 0 && z < imgSizeZ && y >= 0 && y < imgSizeY && x >= 0 && x < imgSizeX) {
float* m = &images[imgStride * (oc * imgPixels + z * imgSizeX * imgSizeY + y * imgSizeX + x)];
#pragma unroll
for (int i = 0; i < 1; i++) {
if (!true || myImgIdx + i * 32 < numImages) {
#pragma unroll
for (int c = 0; c < 2; c++) {
shImages[threadIdx.x + c * 4][threadIdx.y + i * 32] = m[c * imgStride * imgPixels + i * 32];
}
} else {
#pragma unroll
for (int c = 0; c < 2; c++) {
shImages[threadIdx.x + c * 4][threadIdx.y + i * 32] = 0;
}
}
}
} else { // Padding
#pragma unroll
for (int i = 0; i < 1; i++) {
#pragma unroll
for (int c = 0; c < 2; c++) {
shImages[threadIdx.x + c * 4][threadIdx.y + i * 32] = 0;
}
}
}
}
__syncthreads();
#pragma unroll
for (int i = 0; i < 4*2; i++) {
#pragma unroll
for(int f = 0; f < 8; f++) {
#pragma unroll
for(int g = 0; g < 1; g++) {
prod[f][g] += shImages[i][g * 32 + threadIdx.y] * shFilters[i][threadIdx.x + f * 4];
}
}
}
__syncthreads();
}
}
#pragma unroll
for (int g = 0; g < 1; g++) {
if (!true || myImgIdx + g * 32 < numImages) {
#pragma unroll
for (int f = 0; f < 8; f++) {
targets[g * 32 + f * 4 * numImages * numModules] = prod[f][g];
}
}
}
}
__global__ void kConvolve_weight_c(float* targets, float* images, float* hidActs,
const int numImages, const int numFilters,
const int numModulesZ, const int numModulesY, const int numModulesX,
const int imgSizeZ, const int imgSizeY, const int imgSizeX, const int filterSize,
const int paddingStart, const int moduleStride, const int imgStride,
const int numImgColors, const int numGroups, const int partialSum,
const float scaleOutputs) {
__shared__ float shImages[8 * 8][32]; // preload 32 cases of 4 * pixelsPerThread pixels
__shared__ float shHidActs[2 * 16][32 + 1]; // preload 32 cases of 32 hidacts
const int tidx = 16 * threadIdx.y + threadIdx.x;
const int loadY = tidx / 32, loadX = tidx % 32;
const int filterPixels = filterSize * filterSize * filterSize;
const int imgPixels = imgSizeZ * imgSizeY * imgSizeX;
const int numFilterBlocks = numFilters / (16 * 2);
const int outputModuleIdx = blockIdx.x / numFilterBlocks;
const int moduleIdx = partialSum * outputModuleIdx;
const int blockFilterIdx = 2 * 16 * (blockIdx.x % numFilterBlocks);
const int numModules = numModulesZ * numModulesY * numModulesX;
const int numFiltersPerGroup = numFilters / numGroups;
const int blockGroupIdx = blockFilterIdx / numFiltersPerGroup;
const int numFilterColors = numImgColors / numGroups;
const int blockPixelOffset = (blockIdx.y / (numFilterColors/8)) * 8;
const int filterColorIdx = (blockIdx.y % (numFilterColors/8)) * 8;
const int imgColorIdx = filterColorIdx + blockGroupIdx * numFilterColors;
images += imgColorIdx * imgPixels * imgStride + loadX;
hidActs += moduleIdx * numImages
+ blockFilterIdx * numImages * numModules
+ loadY * numImages * numModules
+ loadX;
targets += outputModuleIdx * numFilters * filterPixels * numFilterColors
+ filterColorIdx * filterPixels * numFilters
+ blockPixelOffset * numFilters
+ blockFilterIdx
+ threadIdx.y * numFilters + threadIdx.x;
float* shHidActLoad = &shHidActs[loadY][loadX];
float* shImgLoad = &shImages[loadY][loadX];
float prod[8][2];
#pragma unroll
for (int c = 0; c < 8; c++) {
#pragma unroll
for (int f = 0; f < 2; f++) {
prod[c][f] = 0;
}
}
// This avoids doing a division in an inner loop
__shared__ int pxDivs[8];
if (tidx < 8) {
pxDivs[tidx] = (((blockPixelOffset + tidx) / (filterSize * filterSize)) << 16) + (((blockPixelOffset + tidx) / filterSize) % filterSize << 8) + ((blockPixelOffset + tidx) % filterSize);
}
__syncthreads();
for (int m = moduleIdx; m < moduleIdx + partialSum; m++) {
const int imgLoadModPosZ = paddingStart + (m / (numModulesY * numModulesX)) * moduleStride;
const int imgLoadModPosY = paddingStart + ((m / numModulesX) % numModulesY) * moduleStride;
const int imgLoadModPosX = paddingStart + (m % numModulesX) * moduleStride;
for (int caseIdx = 0; caseIdx < numImages; caseIdx += 32) {
if (loadY < 8) {
/*
* As long as 4 * 32 is divisible by 32 this will loop the right
* number of times.
*
* This will load some images from filter pixels that don't exist (it'll set those to 0),
* but the code does not produce any output for those pixels (see last lines).
*/
// #pragma unroll
for (int y = 0; y < 8; y += (16 * 8) / 32) {
// Make sure number of rows in the array is divisible by number of rows filled per iteration
if (8 % (16 * 8 / 32) == 0 || y + loadY < 8) {
const int pxIdx = loadY + y; // pixel idx in filter
if (pxIdx + blockPixelOffset < filterPixels && (!true || caseIdx + loadX < numImages)) {
const int pxZ = imgLoadModPosZ + HI24(pxDivs[pxIdx]);
const int pxY = imgLoadModPosY + MI16(pxDivs[pxIdx]);//pxIdx / filterSize; // pixel x,y coords in image
const int pxX = imgLoadModPosX + LO8(pxDivs[pxIdx]);
if (pxZ >= 0 && pxZ < imgSizeZ && pxY >= 0 && pxY < imgSizeY && pxX >= 0 && pxX < imgSizeX) {
const int pixIdx = (pxZ * imgSizeX * imgSizeY + pxY * imgSizeX + pxX) * imgStride; // pixel idx in image
#pragma unroll
for (int c = 0; c < 8; c++) {
shImgLoad[(y + c * 8) * 32] = images[caseIdx + c * imgPixels * imgStride + pixIdx];
}
} else {
#pragma unroll
for (int c = 0; c < 8; c++) {
shImgLoad[(y + c * 8) * 32] = 0;
}
}
} else {
#pragma unroll
for (int c = 0; c < 8; c++) {
shImgLoad[(y + c * 8) * 32] = 0;
}
}
}
}
}
if (loadY < 16 * 2 && (!true || caseIdx + loadX < numImages)) {
#pragma unroll
for (int y = 0; y < 16 * 2; y += (16 * 8) / 32) {
// Make sure number of rows in the array is divisible by number of rows filled per iteration
if ((16 * 2) % (16 * 8 / 32) == 0 || y + loadY < 16 * 2) {
shHidActLoad[y * (32 + 1)] = hidActs[caseIdx + y * numImages * numModules];
}
}
}
__syncthreads();
#pragma unroll
for (int c = 0; c < 8; c++) {
#pragma unroll
for (int i = 0; i < 32; i++) {
#pragma unroll
for (int f = 0; f < 2; f++) {
prod[c][f] += shImages[threadIdx.y + c * 8][i] * shHidActs[threadIdx.x + f * 16][i];
}
}
}
__syncthreads();
}
hidActs += numImages;
}
if (blockPixelOffset + threadIdx.y < filterPixels) {
#pragma unroll
for (int f = 0; f < 2; f++) {
#pragma unroll
for (int c = 0; c < 8; c++) {
targets[c * filterPixels * numFilters + f * 16] = scaleOutputs * prod[c][f];
}
}
}
}
__global__ void kConvolve_backward_c(float* targets, const float* hidActs, const float* filters,
const int numModulesZ, const int numModulesY, const int numModulesX, const int numImages, const int numFilters,
const int filterSize, const int imgSizeZ, const int imgSizeY, const int imgSizeX, const int paddingStart, const int moduleStride,
const int numImgColors, const int numGroups) {
__shared__ float shFilters[4*4][16 + 1]; // TODO: perhaps reconsider this 16
__shared__ float shHidActs[16][32*1];
const int numImgBlocks = DIVUP(numImages,32*1);
const int blockCaseIdx = (blockIdx.x % numImgBlocks) * 32*1;
const int imgColorIdx = (blockIdx.x / numImgBlocks) * 4*4; // color idx globally
const int numFilterColors = numImgColors / numGroups;
const int blockGroupIdx = imgColorIdx / numFilterColors;
const int filterColorIdx = imgColorIdx % numFilterColors; // color idx within group
const int numFiltersPerGroup = numFilters / numGroups;
const int blockFilterIdx = blockGroupIdx * numFiltersPerGroup;
const int blockPixelIdx = blockIdx.y;
const int blockPixelIdxX = blockPixelIdx % imgSizeX;
const int blockPixelIdxY = (blockPixelIdx / imgSizeX) % imgSizeY;
const int blockPixelIdxZ = blockPixelIdx / (imgSizeX * imgSizeY);
const int filterPixels = filterSize * filterSize * filterSize;
const int imgPixels = imgSizeZ * imgSizeY * imgSizeX;
const int tidx = threadIdx.y * 32 + threadIdx.x;
const int hidActLoadY = tidx / 32, hidActLoadX = tidx % 32;
const int filtersLoadY = tidx / 16, filtersLoadX = tidx % 16;
const int numModules = numModulesZ * numModulesY * numModulesX;
hidActs += blockCaseIdx + (blockFilterIdx + hidActLoadY) * numImages * numModules + hidActLoadX;
filters += blockFilterIdx + (filterColorIdx + filtersLoadY) * filterPixels * numFilters + filtersLoadX;
targets += (imgColorIdx + threadIdx.y) * imgPixels * numImages + blockPixelIdx * numImages + blockCaseIdx + threadIdx.x;
float prod[4][1];
#pragma unroll
for (int c = 0; c < 4; c++) {
#pragma unroll
for (int i = 0; i < 1; i++) {
prod[c][i] = 0;
}
}
const int startZ = blockPixelIdxZ - paddingStart < filterSize ? 0
: 1 + (blockPixelIdxZ - paddingStart - filterSize) / moduleStride;
const int endZ = MIN(numModulesZ, 1 + (blockPixelIdxZ - paddingStart) / moduleStride);
const int startY = blockPixelIdxY - paddingStart < filterSize ? 0
: 1 + (blockPixelIdxY - paddingStart - filterSize) / moduleStride;
const int endY = MIN(numModulesY, 1 + (blockPixelIdxY - paddingStart) / moduleStride);
const int startX = blockPixelIdxX - paddingStart < filterSize ? 0
: 1 + (blockPixelIdxX - paddingStart - filterSize) / moduleStride;
const int endX = MIN(numModulesX, 1 + (blockPixelIdxX - paddingStart) / moduleStride);
float* shFilterLoad = &shFilters[filtersLoadY][filtersLoadX];
float* shHidActLoad = &shHidActs[hidActLoadY][hidActLoadX];
for (int mz = startZ; mz < endZ ; mz++){
const int moduleFront = paddingStart + mz * moduleStride;
const int pxInFilterZ = blockPixelIdxZ - moduleFront;
for (int my = startY; my < endY; my++) {
const int moduleTop = paddingStart + my * moduleStride;
const int pxInFilterY = blockPixelIdxY - moduleTop;
for (int mx = startX; mx < endX; mx++) {
const int moduleIdx = mz * numModulesX * numModulesY + my * numModulesX + mx;
const int moduleLeft = paddingStart + mx * moduleStride;
const int pxInFilterX = blockPixelIdxX - moduleLeft;
const int pxIdxInFilter = pxInFilterZ * filterSize * filterSize + pxInFilterY * filterSize + pxInFilterX;
for (int f = 0; f < numFiltersPerGroup; f += 16) { // multiply with 16 filters at a time
const float* hLoad = &hidActs[(moduleIdx + f * numModules) * numImages];
#pragma unroll
for (int i = 0; i < 1 * 32; i += 32) {
if (!true || blockCaseIdx + hidActLoadX + i < numImages) {
#pragma unroll
for (int j = 0; j < 16; j += 32*4/32) { // load 16 rows of 1*16 cols, 8 * 32 elements at a time.
shHidActLoad[j * 32 * 1 + i] = hLoad[j * numModules * numImages + i];
}
} else {
#pragma unroll
for (int j = 0; j < 16; j += 32*4/32) { // load 16 rows of 1*16 cols, 8 * 32 elements at a time.
shHidActLoad[j * 32 * 1 + i] = 0;
}
}
}
const float* fLoad = true ? &filters[pxIdxInFilter * numFilters + f]
: &filters[moduleIdx * numFilterColors * filterPixels * numFilters + pxIdxInFilter * numFilters + f];
#pragma unroll
for (int i = 0; i < 4*4; i+= 32*4/16) {
if ((4*4) % (32*4/16) == 0 || i + filtersLoadY < 4*4) {
shFilterLoad[i * (16 + 1)] = fLoad[i * filterPixels * numFilters];
}
}
__syncthreads();
// Do some actual computation
#pragma unroll
for (int c = 0; c < 4; c++) {
#pragma unroll
for (int w = 0; w < 16; w++) {
#pragma unroll
for (int i = 0; i < 1; i++) {
prod[c][i] += shFilters[c * 4 + threadIdx.y][w] * shHidActs[w][threadIdx.x + i * 32];
}
}
}
__syncthreads();
}
}
}
}
#pragma unroll
for (int i = 0; i < 1; i++) {
if (!true || blockCaseIdx + threadIdx.x + i * 32 < numImages) {
#pragma unroll
for (int c = 0; c < 4; c++) {
targets[c * 4 * imgPixels * numImages + i * 32] = prod[c][i];
}
}
}
}
__global__ void kConvolve_backward_my(float* targets, const float* hidActs, const float *filters,
const int numModulesZ, const int numModulesY, const int numModulesX, const int numImages, const int numFilters,
const int filterSize, const int imgSizeZ, const int imgSizeY, const int imgSizeX,const int paddingStart,const int moduleStride,
const int numImgColors, const int numGroups){
__shared__ float shFilters[4*4][16+1];
__shared__ float shHidActs[16][32*1];
const int numImgBlocks = DIVUP(numImages, 32*1);
const int blockCaseIdx = (blockIdx.x % numImgBlocks) * 32* 1;
const int numRegionsX = DIVUP(imgSizeX, 4);
const int numRegionsY = DIVUP(imgSizeY, 4);
const int imgColorIdx = (blockIdx.x / numImgBlocks);
const int numFilterColors = numImgColors / numGroups;
const int blockGroupIdx = imgColorIdx / numFilterColors; //0
//const int filterColorIdx = imgColorIdx % numFilterColors; //color idx within group; imgColorIdx
const int numFiltersPerGroup = numFilters / numGroups; //numFilters
const int blockFilterIdx = blockGroupIdx * numFiltersPerGroup; //0
const int blockPixelIdx = blockIdx.y;
const int blockRegionIdxX = blockPixelIdx % numRegionsX;
const int blockRegionIdxY = (blockPixelIdx / numRegionsX) % numRegionsY;
const int blockRegionIdxZ = blockPixelIdx / (numRegionsX * numRegionsY);
const int blockRegionLeft = blockRegionIdxX * 4;
const int blockRegionTop = blockRegionIdxY * 4;
const int blockRegionFront = blockRegionIdxZ;
const int pxYInRegion = threadIdx.y / 4, pxXInRegion = threadIdx.y % 4;
const int blockPixelIdxZ = blockRegionFront;
const int blockPixelIdxY = blockRegionTop + pxYInRegion;
const int blockPixelIdxX = blockRegionLeft + pxXInRegion;
const int pxIdx = blockPixelIdxZ * imgSizeX * imgSizeY + blockPixelIdxY * imgSizeX + blockPixelIdxX;
const bool isPxInImg = blockPixelIdxZ < imgSizeZ && blockPixelIdxY < imgSizeY && blockPixelIdxX < imgSizeX;
const int numModules = numModulesZ * numModulesY * numModulesX;
const int filterPixels = filterSize * filterSize * filterSize;
const int imgPixels = imgSizeZ * imgSizeY * imgSizeX;
const int tidx = threadIdx.y * 32 + threadIdx.x;
const int hidActLoadY = tidx / 32, hidActLoadX = tidx % 32;
const int /*filtersLoadY = tidx / 16,*/ filtersLoadX = tidx % 16;
hidActs += blockCaseIdx + (blockFilterIdx /*0*/ + hidActLoadY) * numImages * numModules + hidActLoadX;
filters += blockFilterIdx + (imgColorIdx) * filterPixels * numFilters + filtersLoadX;
targets += (imgColorIdx) * imgPixels * numImages + pxIdx * numImages + blockCaseIdx + threadIdx.x;
float prod[1][1];
#pragma unroll
for(int c=0; c<1 ; c++){
#pragma unroll
for(int i=0; i<1; i++){
prod[c][i] = 0;
}
}
const int startZ = blockRegionFront - paddingStart < filterSize ? 0
: 1 + (blockRegionFront - paddingStart -filterSize) / moduleStride;
const int endZ = MIN(numModulesZ, 1 + (blockRegionFront + 3 - paddingStart) / moduleStride);
const int startY = blockRegionTop - paddingStart < filterSize ? 0
: 1 + (blockRegionTop - paddingStart - filterSize) / moduleStride;
const int endY = MIN(numModulesY, 1 + (blockRegionTop + 3 - paddingStart) / moduleStride);
const int startX = blockRegionLeft - paddingStart < filterSize ? 0
: 1 + (blockRegionLeft - paddingStart - filterSize) / moduleStride;
const int endX = MIN(numModulesX, 1 + (blockRegionLeft + 3 - paddingStart) / moduleStride);
float * shFilterLoad = &shFilters[threadIdx.y][filtersLoadX];
float * shHidActLoad = &shHidActs[hidActLoadY][hidActLoadX];
for(int mz=startZ; mz<endZ; mz++){
const int moduleFront = paddingStart + mz * moduleStride;
const int pxInModuleZ = blockPixelIdxZ - moduleFront;
for(int my = startY; my < endY; my++){
const int moduleTop = paddingStart + my * moduleStride;
const int pxInModuleY = blockPixelIdxY - moduleTop;
for(int mx = startX; mx < endX; mx++){
const int moduleIdx = mz * numModulesX * numModulesY + my * numModulesX + mx;
const int moduleLeft = paddingStart + mx * moduleStride;
const int pxInModuleX = blockPixelIdxX - moduleLeft;
const bool isPxInModule = pxInModuleZ >= 0 && pxInModuleZ < filterSize && pxInModuleY >= 0 && pxInModuleY < filterSize && pxInModuleX >= 0 && pxInModuleX < filterSize;
const int pxIdxInModule = pxInModuleZ * filterSize * filterSize + pxInModuleY * filterSize + pxInModuleX;
for(int f=0; f<numFiltersPerGroup; f += 16){
const float * hLoad = &hidActs[(moduleIdx + f * numModules) * numImages];
#pragma unroll
for(int i=0; i<1*32; i+=32){
if(!true || blockCaseIdx + hidActLoadX + i < numImages){
#pragma unroll
for(int j = 0; j < 16; j += 32*16/32){
shHidActLoad[j * 32 * 1 + i] = hLoad[j * numModules * numImages + i];
}
}else{
#pragma unroll
for(int j = 0; j < 16; j += 32*16/32){
shHidActLoad[j * 32 * 1 + i] = 0;
}
}
}
if(isPxInImg && isPxInModule){
const float * fLoad = true ? &filters[pxIdxInModule * numFilters +f]
:&filters[moduleIdx * numFilterColors * filterPixels * numFilters + f ];
#pragma unroll
for(int i = 0; i < 4*4; i+=16){
shFilterLoad[i * (16 + 1)] = fLoad[i * filterPixels * numFilters];
}
}
__syncthreads();
if(isPxInImg && isPxInModule){
#pragma unroll
for(int c = 0; c < 1; c++){
#pragma unroll
for(int w = 0; w < 16; w++){
#pragma unroll
for(int i = 0; i < 1; i++){
prod[c][i] += shFilters[threadIdx.y + c * 16][w] * shHidActs[w][threadIdx.x + i * 32];
}
}
}
}
__syncthreads();
}
}
}
}
if(isPxInImg){
#pragma unroll
for(int i = 0; i < 1; i++){
if(!true || blockCaseIdx + threadIdx.x + i*32 < numImages){
#pragma unroll
for(int c = 0; c < 1; c++){
targets[c * imgPixels *numImages + i * 32] = prod[c][i];
}
}
}
}
}
__global__ void kConvolve_forward_reverse(float* targets, float* images, float* filters,
const int numImages, const int numFilters,
const int imgSizeZ, const int imgSizeY, const int imgSizeX, const int filterSize, const int paddingStart,
const int moduleStride,
const int numModulesZ, const int numModulesY, const int numModulesX, const int imgStride, const int numImgColors,
const int numGroups){
__shared__ float shFilters[4*2][1];
__shared__ float shImages[4*8][32 * 1];
const int imgPixels = imgSizeZ * imgSizeY * imgSizeX;
const int filterPixels = filterSize * filterSize * filterSize;
const int numFilterColors = numImgColors / numGroups;
const int blocksPerModule = numFilters ;
const int moduleIdx = blockIdx.y / blocksPerModule;
const int blockFilterIdx = (blockIdx.y % blocksPerModule);
const int numFiltersPerGroup = numFilters / numGroups;
const int blockGroupIdx = blockFilterIdx / numFiltersPerGroup;
const int numModules = numModulesX * numModulesY * numModulesZ;
const int blockColorIdx = numFilterColors * blockGroupIdx;
const int tidx = threadIdx.y * 32 + threadIdx.x;
const int my_tmpZ = (moduleIdx % DIVUP(numModulesZ, 4));
const int my_tmpXY = (moduleIdx / DIVUP(numModulesZ, 4));
const int imgLoadModPosZ = (my_tmpZ + threadIdx.y) * moduleStride;//paddingStart + (moduleIdx / (numModulesX * numModulesY)) * moduleStride;
const int imgLoadModPosY = ((my_tmpXY / numModulesX) % numModulesY) * moduleStride; //paddingStart + ((moduleIdx / numModulesX) % numModulesY) * moduleStride;
const int imgLoadModPosX = (my_tmpXY % numModulesX) * moduleStride;//paddingStart + (moduleIdx % numModulesX) * moduleStride;
const int pxInModule = (imgLoadModPosZ / moduleStride) * numModulesX * numModulesY + (imgLoadModPosY / moduleStride) * numModulesX + (imgLoadModPosX / moduleStride);
const int shFilterLoadY = tidx / (4 * 8);
//const int shFilterLoadX = tidx % (4 * 8);
const int myImgIdx = blockIdx.x * 32 * 1 + threadIdx.x;
images += blockColorIdx * imgPixels * imgStride + myImgIdx;
filters +=blockFilterIdx
+ shFilterLoadY * numFilters;
targets += pxInModule * numImages
+ (blockFilterIdx) * numImages * numModules
+ myImgIdx;
float prod[1][1];
#pragma unroll
for(int f = 0; f < 1; f++) {
#pragma unroll
for(int g = 0; g < 1; g++) {
prod[f][g] = 0;
}
}
// __shared__ int imgPos[]
for (int oc = 0; oc < numFilterColors; oc += 2) { // oc stands for outer color (loop)
for (int p = 0; p < filterPixels; p += 4) {
/*
* Load 4 pixels from 4*8 filters
*/
if (shFilterLoadY < 4 && threadIdx.x < 1) {
#pragma unroll
for (int p2 = 0; p2 < 4; p2 += 32/8) {
if (p + p2 + shFilterLoadY < filterPixels) {
#pragma unroll
for (int c = 0; c < 2; c++) {
shFilters[shFilterLoadY + p2 + c * 4][0] = filters[((oc+c) * filterPixels + p + p2) * numFilters];
}
} else {
#pragma unroll
for (int c = 0; c < 2; c++) {
shFilters[shFilterLoadY + p2 + c * 4][0] = 0;
}
}
}
}
/*
* Load 4 pixels from 32*1 images
*/
for(int my_tmp = 0; my_tmp < 4; my_tmp++){
const int pixIdx = p + my_tmp;
if (pixIdx < filterPixels) {
const int x = imgLoadModPosX + pixIdx % filterSize;
const int y = imgLoadModPosY + (pixIdx / filterSize) % filterSize;
const int z = imgLoadModPosZ + pixIdx / (filterSize * filterSize);
if (z >= 0 && z < imgSizeZ && y >= 0 && y < imgSizeY && x >= 0 && x < imgSizeX) {
float* m = &images[imgStride * (oc * imgPixels + z * imgSizeX * imgSizeY + y * imgSizeX + x)];
#pragma unroll
for (int i = 0; i < 1; i++) {
if (!true || myImgIdx + i * 32 < numImages) {
#pragma unroll
for (int c = 0; c < 2; c++) {
shImages[threadIdx.y * 8 + my_tmp + c * 4][threadIdx.x + i * 32] = m[c * imgStride * imgPixels + i * 32];
}
} else {
#pragma unroll
for (int c = 0; c < 2; c++) {
shImages[threadIdx.y * 8 + my_tmp + c * 4][threadIdx.x + i * 32] = 0;
}
}
}
} else { // Padding
#pragma unroll
for (int i = 0; i < 1; i++) {
#pragma unroll
for (int c = 0; c < 2; c++) {
shImages[threadIdx.y * 8 + my_tmp + c * 4][threadIdx.x + i * 32] = 0;
}
}
}
}
}
__syncthreads();
#pragma unroll
for (int i = 0; i < 4*2; i++) {
#pragma unroll
for(int f = 0; f < 1; f++) {
#pragma unroll
for(int g = 0; g < 1; g++) {
prod[f][g] += shImages[i + threadIdx.y * 8][g * 32 + threadIdx.x] * shFilters[i][f];
}
}
}
__syncthreads();
}
}
#pragma unroll
for (int g = 0; g < 1; g++) {
if (!true || myImgIdx + g * 32 < numImages) {
#pragma unroll
for (int f = 0; f < 1; f++) {
targets[g * 32 + f * 4 * numImages * numModules] = prod[f][g];
}
}
}
}
__global__ void kConvolve_weight_reverse(float* targets, float* images, float* hidActs,
const int numImages, const int numFilters,
const int numModulesZ, const int numModulesY, const int numModulesX,
const int imgSizeZ, const int imgSizeY, const int imgSizeX, const int filterSize,
const int paddingStart, const int moduleStride, const int imgStride,
const int numImgColors, const int numGroups, const int partialSum,
const float scaleOutputs) {
__shared__ float shImages[8 * 8][1]; // preload 32 cases of 4 * pixelsPerThread pixels
__shared__ float shHidActs[1][1]; // preload 32 cases of 32 hidacts
const int tidx = 16 * threadIdx.y + threadIdx.x;
const int loadY = tidx / 32 /*, loadX = tidx % 32*/;
const int filterPixels = filterSize * filterSize * filterSize;
const int imgPixels = imgSizeZ * imgSizeY * imgSizeX;
const int numFilterBlocks = numFilters;
const int outputModuleIdx = blockIdx.x / numFilterBlocks;
const int moduleIdx = partialSum * outputModuleIdx;
const int blockFilterIdx = (blockIdx.x % numFilterBlocks);
const int numModules = numModulesZ * numModulesY * numModulesX;
const int numFiltersPerGroup = numFilters / numGroups;
const int blockGroupIdx = blockFilterIdx / numFiltersPerGroup;
const int numFilterColors = numImgColors / numGroups;
const int blockPixelOffset = (blockIdx.y / (numFilterColors/8)) * 8;
const int filterColorIdx = (blockIdx.y % (numFilterColors/8)) * 8;
const int imgColorIdx = filterColorIdx + blockGroupIdx * numFilterColors;
images += imgColorIdx * imgPixels * imgStride;
hidActs += moduleIdx * numImages;
targets +=filterColorIdx * filterPixels * numFilters
+ blockPixelOffset * numFilters
+ threadIdx.y * numFilters;
float* shHidActLoad = &shHidActs[0][0];
float* shImgLoad = &shImages[loadY][0];
float prod[8][1];
#pragma unroll
for (int c = 0; c < 8; c++) {
#pragma unroll
for (int f = 0; f < 1; f++) {
prod[c][f] = 0;
}
}
// This avoids doing a division in an inner loop
__shared__ int pxDivs[8];
if(threadIdx.x < 1 && threadIdx.y < 1 )
for (int my_x = 0; my_x < 8; my_x++) {
pxDivs[my_x] = (((blockPixelOffset + my_x) / (filterSize * filterSize)) << 16) + (((blockPixelOffset + my_x) / filterSize) % filterSize << 8) + ((blockPixelOffset + my_x) % filterSize);
}
__syncthreads();
for (int m = moduleIdx; m < moduleIdx + partialSum; m++) {
const int imgLoadModPosZ = paddingStart + (m / (numModulesY * numModulesX)) * moduleStride;
const int imgLoadModPosY = paddingStart + ((m / numModulesX) % numModulesY) * moduleStride;
const int imgLoadModPosX = paddingStart + (m % numModulesX) * moduleStride;
for (int caseIdx = 0; caseIdx < numImages; caseIdx ++) {
if (loadY < 8) {
/*
* As long as 4 * 32 is divisible by 32 this will loop the right
* number of times.
*
* This will load some images from filter pixels that don't exist (it'll set those to 0),
* but the code does not produce any output for those pixels (see last lines).
*/
// #pragma unroll
for (int y = 0; y < 8; y += (16 * 8) / 32) {
// Make sure number of rows in the array is divisible by number of rows filled per iteration
if (8 % (16 * 8 / 32) == 0 || y + loadY < 8) {
const int pxIdx = loadY + y; // pixel idx in filter
if (pxIdx + blockPixelOffset < filterPixels && (!true || caseIdx< numImages)) {
const int pxZ = imgLoadModPosZ + HI24(pxDivs[pxIdx]);
const int pxY = imgLoadModPosY + MI16(pxDivs[pxIdx]);//pxIdx / filterSize; // pixel x,y coords in image
const int pxX = imgLoadModPosX + LO8(pxDivs[pxIdx]);
if (pxZ >= 0 && pxZ < imgSizeZ && pxY >= 0 && pxY < imgSizeY && pxX >= 0 && pxX < imgSizeX) {
const int pixIdx = (pxZ * imgSizeX * imgSizeY + pxY * imgSizeX + pxX) * imgStride; // pixel idx in image
#pragma unroll
for (int c = 0; c < 8; c++) {
shImgLoad[(y + c * 8)] = images[caseIdx + c * imgPixels * imgStride + pixIdx];
}
} else {
#pragma unroll
for (int c = 0; c < 8; c++) {
shImgLoad[(y + c * 8) ] = 0;
}
}
} else {
#pragma unroll
for (int c = 0; c < 8; c++) {
shImgLoad[(y + c * 8) ] = 0;
}
}
}
}
}
if (loadY < 16 * 2 && (!true || caseIdx < numImages)) {
#pragma unroll
for (int y = 0; y < 16 * 2; y += (16 * 64) / 32) {
// Make sure number of rows in the array is divisible by number of rows filled per iteration
if ((16 * 2) % (16 * 8 / 32) == 0 || y < 1) {
shHidActLoad[y] = hidActs[caseIdx + y * numImages * numModules];
}
}
}
__syncthreads();
#pragma unroll
for (int c = 0; c < 8; c++) {
#pragma unroll
for (int i = 0; i < 1; i++) {
#pragma unroll
for (int f = 0; f < 1; f++) {
prod[c][f] += shImages[threadIdx.y + c * 8][i] * shHidActs[threadIdx.x][i];
}
}
}
__syncthreads();
}
hidActs += numImages;
}
if (blockPixelOffset + threadIdx.y < filterPixels) {
#pragma unroll
for (int f = 0; f < 1; f++) {
#pragma unroll
for (int c = 0; c < 8; c++) {
targets[c * filterPixels * numFilters + f * 16] = scaleOutputs * prod[c][f];
}
}
}
}
__global__ void kConvolve_backward_reverse(float* targets, const float* hidActs, const float* filters,
const int numModulesZ, const int numModulesY, const int numModulesX, const int numImages, const int numFilters,
const int filterSize, const int imgSizeZ, const int imgSizeY, const int imgSizeX, const int paddingStart, const int moduleStride,
const int numImgColors, const int numGroups) {
__shared__ float shFilters[4*4][1]; // TODO: perhaps reconsider this 16
__shared__ float shHidActs[1][32*1];
const int numImgBlocks = DIVUP(numImages,32*1);
const int blockCaseIdx = (blockIdx.x % numImgBlocks) * 32 * 1;
const int imgColorIdx = (blockIdx.x / numImgBlocks) * 4 * 4; //0; color idx globally
const int numFilterColors = numImgColors / numGroups;
//const int blockGroupIdx = imgColorIdx / numFilterColors;
const int filterColorIdx = imgColorIdx % numFilterColors; // color idx within group
const int numFiltersPerGroup = numFilters / numGroups;
//const int blockFilterIdx = blockGroupIdx * numFiltersPerGroup;
const int blockPixelIdx = blockIdx.y;
const int blockPixelIdxX = blockPixelIdx % imgSizeX;
const int blockPixelIdxY = (blockPixelIdx / imgSizeX) % imgSizeY;
const int blockPixelIdxZ = blockPixelIdx / (imgSizeX * imgSizeY);
const int filterPixels = filterSize * filterSize * filterSize;
const int imgPixels = imgSizeZ * imgSizeY * imgSizeX;
const int tidx = threadIdx.y * 32 + threadIdx.x;
const int /*hidActLoadY = tidx / 32,*/ hidActLoadX = tidx % 32;
const int filtersLoadY = tidx / 16 /*, filtersLoadX = tidx % 16*/;
const int numModules = numModulesZ * numModulesY * numModulesX;
hidActs += blockCaseIdx + hidActLoadX;
filters += (filterColorIdx + filtersLoadY) * filterPixels * numFilters;
targets += (imgColorIdx + threadIdx.y) * imgPixels * numImages + blockPixelIdx * numImages + blockCaseIdx + threadIdx.x;
float prod[4][1];
#pragma unroll
for (int c = 0; c < 4; c++) {
#pragma unroll
for (int i = 0; i < 1; i++) {
prod[c][i] = 0;
}
}
const int startZ = blockPixelIdxZ - paddingStart < filterSize ? 0
: 1 + (blockPixelIdxZ - paddingStart - filterSize) / moduleStride;
const int endZ = MIN(numModulesZ, 1 + (blockPixelIdxZ - paddingStart) / moduleStride);
const int startY = blockPixelIdxY - paddingStart < filterSize ? 0
: 1 + (blockPixelIdxY - paddingStart - filterSize) / moduleStride;
const int endY = MIN(numModulesY, 1 + (blockPixelIdxY - paddingStart) / moduleStride);
const int startX = blockPixelIdxX - paddingStart < filterSize ? 0
: 1 + (blockPixelIdxX - paddingStart - filterSize) / moduleStride;
const int endX = MIN(numModulesX, 1 + (blockPixelIdxX - paddingStart) / moduleStride);
float* shFilterLoad = &shFilters[filtersLoadY][0];
float* shHidActLoad = &shHidActs[0][hidActLoadX];
for (int mz = startZ; mz < endZ ; mz++){
const int moduleFront = paddingStart + mz * moduleStride;
const int pxInFilterZ = blockPixelIdxZ - moduleFront;
for (int my = startY; my < endY; my++) {
const int moduleTop = paddingStart + my * moduleStride;
const int pxInFilterY = blockPixelIdxY - moduleTop;
for (int mx = startX; mx < endX; mx++) {
const int moduleIdx = mz * numModulesX * numModulesY + my * numModulesX + mx;
const int moduleLeft = paddingStart + mx * moduleStride;
const int pxInFilterX = blockPixelIdxX - moduleLeft;
const int pxIdxInFilter = pxInFilterZ * filterSize * filterSize + pxInFilterY * filterSize + pxInFilterX;
for (int f = 0; f < numFiltersPerGroup; f += 16) { // multiply with 16 filters at a time
const float* hLoad = &hidActs[(moduleIdx + f * numModules) * numImages];
#pragma unroll
for (int i = 0; i < 1 * 32; i += 32) {
if (!true || blockCaseIdx + hidActLoadX + i < numImages) {
#pragma unroll
for (int j = 0; j < 16; j += 32*16/32) { // load 16 rows of 1*16 cols, 8 * 32 elements at a time.
shHidActLoad[j * 32 * 1 + i] = hLoad[j * numModules * numImages + i];
}
} else {
#pragma unroll
for (int j = 0; j < 16; j += 32*16/32) { // load 16 rows of 1*16 cols, 8 * 32 elements at a time.
shHidActLoad[j * 32 * 1 + i] = 0;
}
}
}
const float* fLoad = true ? &filters[pxIdxInFilter * numFilters + f]
: &filters[moduleIdx * numFilterColors * filterPixels * numFilters + pxIdxInFilter * numFilters + f];
#pragma unroll
for (int i = 0; i < 4*4; i+= 32*4/16) {
if ((4*4) % (32*4/16) == 0 || i + filtersLoadY < 4*4) {
shFilterLoad[i] = fLoad[i * filterPixels * numFilters];
}
}
__syncthreads();
// Do some actual computation
#pragma unroll
for (int c = 0; c < 4; c++) {
#pragma unroll
for (int w = 0; w < 1; w++) {
#pragma unroll
for (int i = 0; i < 1; i++) {
prod[c][i] += shFilters[c * 4 + threadIdx.y][w] * shHidActs[w][threadIdx.x + i * 32];
}
}
}
__syncthreads();
}
}
}
}
#pragma unroll
for (int i = 0; i < 1; i++) {
if (!true || blockCaseIdx + threadIdx.x + i * 32 < numImages) {
#pragma unroll
for (int c = 0; c < 4; c++) {
targets[c * 4 * imgPixels * numImages + i * 32] = prod[c][i];
}
}
}
}
|
f6781e040b38916d047338d4b6d1f5e2e2bda4d1.hip | // !!! This is a file automatically generated by hipify!!!
#include "stdafx.h"
#include "common.h"
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <cmath>
/*
* Display a variety of information on the first CUDA device in this system,
* including driver version, runtime version, compute capability, bytes of
* global memory, etc.
*/
int check_device_info()
{
// int deviceCount = 0;
// hipGetDeviceCount(&deviceCount);
//
// if (deviceCount == 0)
// {
// printf("There are no available device(s) that support CUDA\n");
// }
// else
// {
// printf("Detected %d CUDA Capable device(s)\n", deviceCount);
// }
//
// int dev = 0, driverVersion = 0, runtimeVersion = 0;
// CHECK(hipSetDevice(dev));
// hipDeviceProp_t deviceProp;
// CHECK(hipGetDeviceProperties(&deviceProp, dev));
// printf("Device %d: \"%s\"\n", dev, deviceProp.name);
//
// hipDriverGetVersion(&driverVersion);
// hipRuntimeGetVersion(&runtimeVersion);
// printf(" CUDA Driver Version / Runtime Version %d.%d / %d.%d\n",
// driverVersion / 1000, (driverVersion % 100) / 10,
// runtimeVersion / 1000, (runtimeVersion % 100) / 10);
// printf(" CUDA Capability Major/Minor version number: %d.%d\n",
// deviceProp.major, deviceProp.minor);
// printf(" Total amount of global memory: %.2f GBytes (%llu "
// "bytes)\n", (float)deviceProp.totalGlobalMem / pow(1024.0, 3),
// (unsigned long long)deviceProp.totalGlobalMem);
// printf(" GPU Clock rate: %.0f MHz (%0.2f "
// "GHz)\n", deviceProp.clockRate * 1e-3f,
// deviceProp.clockRate * 1e-6f);
// printf(" Memory Clock rate: %.0f Mhz\n",
// deviceProp.memoryClockRate * 1e-3f);
// printf(" Memory Bus Width: %d-bit\n",
// deviceProp.memoryBusWidth);
//
// if (deviceProp.l2CacheSize)
// {
// printf(" L2 Cache Size: %d bytes\n",
// deviceProp.l2CacheSize);
// }
//
// printf(" Max Texture Dimension Size (x,y,z) 1D=(%d), "
// "2D=(%d,%d), 3D=(%d,%d,%d)\n", deviceProp.maxTexture1D,
// deviceProp.maxTexture2D[0], deviceProp.maxTexture2D[1],
// deviceProp.maxTexture3D[0], deviceProp.maxTexture3D[1],
// deviceProp.maxTexture3D[2]);
// printf(" Max Layered Texture Size (dim) x layers 1D=(%d) x %d, "
// "2D=(%d,%d) x %d\n", deviceProp.maxTexture1DLayered[0],
// deviceProp.maxTexture1DLayered[1], deviceProp.maxTexture2DLayered[0],
// deviceProp.maxTexture2DLayered[1],
// deviceProp.maxTexture2DLayered[2]);
// printf(" Total amount of constant memory: %lu bytes\n",
// deviceProp.totalConstMem);
// printf(" Total amount of shared memory per block: %lu bytes\n",
// deviceProp.sharedMemPerBlock);
// printf(" Total number of registers available per block: %d\n",
// deviceProp.regsPerBlock);
// printf(" Warp size: %d\n",
// deviceProp.warpSize);
// printf(" Maximum number of threads per multiprocessor: %d\n",
// deviceProp.maxThreadsPerMultiProcessor);
// printf(" Maximum number of threads per block: %d\n",
// deviceProp.maxThreadsPerBlock);
// printf(" Maximum sizes of each dimension of a block: %d x %d x %d\n",
// deviceProp.maxThreadsDim[0],
// deviceProp.maxThreadsDim[1],
// deviceProp.maxThreadsDim[2]);
// printf(" Maximum sizes of each dimension of a grid: %d x %d x %d\n",
// deviceProp.maxGridSize[0],
// deviceProp.maxGridSize[1],
// deviceProp.maxGridSize[2]);
// printf(" Maximum memory pitch: %lu bytes\n",
// deviceProp.memPitch);
return 0;
}
| f6781e040b38916d047338d4b6d1f5e2e2bda4d1.cu | #include "stdafx.h"
#include "common.h"
#include <cuda_runtime.h>
#include <stdio.h>
#include <cmath>
/*
* Display a variety of information on the first CUDA device in this system,
* including driver version, runtime version, compute capability, bytes of
* global memory, etc.
*/
int check_device_info()
{
// int deviceCount = 0;
// cudaGetDeviceCount(&deviceCount);
//
// if (deviceCount == 0)
// {
// printf("There are no available device(s) that support CUDA\n");
// }
// else
// {
// printf("Detected %d CUDA Capable device(s)\n", deviceCount);
// }
//
// int dev = 0, driverVersion = 0, runtimeVersion = 0;
// CHECK(cudaSetDevice(dev));
// cudaDeviceProp deviceProp;
// CHECK(cudaGetDeviceProperties(&deviceProp, dev));
// printf("Device %d: \"%s\"\n", dev, deviceProp.name);
//
// cudaDriverGetVersion(&driverVersion);
// cudaRuntimeGetVersion(&runtimeVersion);
// printf(" CUDA Driver Version / Runtime Version %d.%d / %d.%d\n",
// driverVersion / 1000, (driverVersion % 100) / 10,
// runtimeVersion / 1000, (runtimeVersion % 100) / 10);
// printf(" CUDA Capability Major/Minor version number: %d.%d\n",
// deviceProp.major, deviceProp.minor);
// printf(" Total amount of global memory: %.2f GBytes (%llu "
// "bytes)\n", (float)deviceProp.totalGlobalMem / pow(1024.0, 3),
// (unsigned long long)deviceProp.totalGlobalMem);
// printf(" GPU Clock rate: %.0f MHz (%0.2f "
// "GHz)\n", deviceProp.clockRate * 1e-3f,
// deviceProp.clockRate * 1e-6f);
// printf(" Memory Clock rate: %.0f Mhz\n",
// deviceProp.memoryClockRate * 1e-3f);
// printf(" Memory Bus Width: %d-bit\n",
// deviceProp.memoryBusWidth);
//
// if (deviceProp.l2CacheSize)
// {
// printf(" L2 Cache Size: %d bytes\n",
// deviceProp.l2CacheSize);
// }
//
// printf(" Max Texture Dimension Size (x,y,z) 1D=(%d), "
// "2D=(%d,%d), 3D=(%d,%d,%d)\n", deviceProp.maxTexture1D,
// deviceProp.maxTexture2D[0], deviceProp.maxTexture2D[1],
// deviceProp.maxTexture3D[0], deviceProp.maxTexture3D[1],
// deviceProp.maxTexture3D[2]);
// printf(" Max Layered Texture Size (dim) x layers 1D=(%d) x %d, "
// "2D=(%d,%d) x %d\n", deviceProp.maxTexture1DLayered[0],
// deviceProp.maxTexture1DLayered[1], deviceProp.maxTexture2DLayered[0],
// deviceProp.maxTexture2DLayered[1],
// deviceProp.maxTexture2DLayered[2]);
// printf(" Total amount of constant memory: %lu bytes\n",
// deviceProp.totalConstMem);
// printf(" Total amount of shared memory per block: %lu bytes\n",
// deviceProp.sharedMemPerBlock);
// printf(" Total number of registers available per block: %d\n",
// deviceProp.regsPerBlock);
// printf(" Warp size: %d\n",
// deviceProp.warpSize);
// printf(" Maximum number of threads per multiprocessor: %d\n",
// deviceProp.maxThreadsPerMultiProcessor);
// printf(" Maximum number of threads per block: %d\n",
// deviceProp.maxThreadsPerBlock);
// printf(" Maximum sizes of each dimension of a block: %d x %d x %d\n",
// deviceProp.maxThreadsDim[0],
// deviceProp.maxThreadsDim[1],
// deviceProp.maxThreadsDim[2]);
// printf(" Maximum sizes of each dimension of a grid: %d x %d x %d\n",
// deviceProp.maxGridSize[0],
// deviceProp.maxGridSize[1],
// deviceProp.maxGridSize[2]);
// printf(" Maximum memory pitch: %lu bytes\n",
// deviceProp.memPitch);
return 0;
}
|
2d7aa07d9ec9f7880760aa29a2d18f08f39404b3.hip | // !!! This is a file automatically generated by hipify!!!
#include "../common/common.h"
#include <stdio.h>
#include <stdlib.h>
#include <cusparse_v2.h>
#include <hip/hip_runtime.h>
/*
* This is an example demonstrating usage of the cuSPARSE library to perform a
* sparse matrix-vector multiplication on randomly generated data.
*/
/*
* M = # of rows
* N = # of columns
*/
int M = 5;
int N = 5;
/*
* Generate a vector of length N with random single-precision floating-point
* values between 0 and 100.
*/
void generate_random_vector(int N, float **outX)
{
int i;
double rMax = (double)RAND_MAX;
float *X = (float *)malloc(sizeof(float) * N);
for (i = 0; i < N; i++)
{
int r = rand();
double dr = (double)r;
X[i] = (dr / rMax) * 100.0;
}
*outX = X;
}
void compute_coefficients(int N, int M, float* A, float* y)
{
for (int i = 0; i < N; i++)
{
float s=0.0f;
for (int j = 0; j < M; j++)
{
s=s+A[j * N + i];
}
y[i]=s;
}
}
/*
* Generate random dense matrix A in column-major order, while rounding some
* elements down to zero to ensure it is sparse.
*/
int generate_random_dense_matrix(int M, int N, float **outA)
{
int i, j;
double rMax = (double)RAND_MAX;
float *A = (float *)malloc(sizeof(float) * M * N);
//hipMemset(A,0,M*N*sizeof(float));
int totalNnz = 0;
for (j = 0; j < N; j++)
{
for (i = 0; i < M; i++)
{
int r = rand();
float *curr = A + (j * M + i);
if (r % 3 > 0)
{
*curr = 0.0f;
}
else
{
double dr = (double)r;
*curr = (dr / rMax) * 5.0;
}
//if(i==j) *curr=1.0f;
if (*curr != 0.0f)
{
totalNnz++;
}
}
}
*outA = A;
return totalNnz;
}
void print_partial_matrix(float *M, int nrows, int ncols, int max_row,
int max_col)
{
int row, col;
for (row = 0; row < max_row; row++)
{
for (col = 0; col < max_col; col++)
{
printf("%2.2f ", M[row * ncols + col]);
}
printf("...\n");
}
printf("...\n");
}
int main(int argc, char **argv)
{
int row;
float *A, *dA;
int *dNnzPerRow;
float *dCsrValA;
int *dCsrRowPtrA;
int *dCsrColIndA;
int totalNnz;
float alpha = 1.0f;
//float beta = 0.0f;
float *dX, *X;
float *dY, *Y;
int structural_zero;
int numerical_zero;
hipsparseHandle_t handle = 0;
hipsparseMatDescr_t descr = 0;
csrsv2Info_t info_A = 0;
const hipsparseSolvePolicy_t policy = HIPSPARSE_SOLVE_POLICY_USE_LEVEL;
int lworkInBytes;
void * d_work = NULL;
// Generate input
srand(9384);
int trueNnz = generate_random_dense_matrix(M, N, &A);
print_partial_matrix(A,M,M,M,M);
// generate_random_vector(M, &Y);
X=(float*)malloc(N*sizeof(float));
Y=(float*)malloc(M*sizeof(float));
compute_coefficients(M,N,A,Y);
for(int i=0;i<N;i++)
{
printf("%f ",Y[i]);
Y[i]=1;
}
printf("\n");
//memset(X,0,N*sizeof(float));
// Create the cuSPARSE handlef
CHECK_CUSPARSE(hipsparseCreate(&handle));
CHECK_CUSPARSE(hipsparseCreateCsrsv2Info(&info_A));
// Allocate device memory for vectors and the dense form of the matrix A
CHECK(hipMalloc((void **)&dX, sizeof(float) * N));
CHECK(hipMalloc((void **)&dY, sizeof(float) * M));
CHECK(hipMalloc((void **)&dA, sizeof(float) * M * N));
CHECK(hipMalloc((void **)&dNnzPerRow, sizeof(int) * M));
// Construct a descriptor of the matrix A
CHECK_CUSPARSE(hipsparseCreateMatDescr(&descr));
CHECK_CUSPARSE(hipsparseSetMatFillMode(descr, HIPSPARSE_FILL_MODE_LOWER));
CHECK_CUSPARSE(hipsparseSetMatDiagType(descr, HIPSPARSE_DIAG_TYPE_NON_UNIT));
CHECK_CUSPARSE(hipsparseSetMatIndexBase(descr, HIPSPARSE_INDEX_BASE_ZERO));
CHECK_CUSPARSE(hipsparseSetMatType(descr, HIPSPARSE_MATRIX_TYPE_GENERAL));
// Transfer the input vectors and dense matrix A to the device
//CHECK(hipMemcpy(dX, X, sizeof(float) * N, hipMemcpyHostToDevice));
CHECK(hipMemcpy(dY, Y, sizeof(float) * M, hipMemcpyHostToDevice));
CHECK(hipMemcpy(dA, A, sizeof(float) * M * N, hipMemcpyHostToDevice));
/*
// Compute the number of non-zero elements in A
CHECK_CUSPARSE(hipsparseSnnz(handle, HIPSPARSE_DIRECTION_ROW, M, N, descr, dA,
M, dNnzPerRow, &totalNnz));
*/
totalNnz=trueNnz;
if (totalNnz != trueNnz)
{
fprintf(stderr, "Difference detected between cuSPARSE NNZ and true "
"value: expected %d but got %d\n", trueNnz, totalNnz);
return 1;
}
// Allocate device memory to store the sparse CSR representation of A
CHECK(hipMalloc((void **)&dCsrValA, sizeof(float) * totalNnz));
CHECK(hipMalloc((void **)&dCsrRowPtrA, sizeof(int) * (M + 1)));
CHECK(hipMalloc((void **)&dCsrColIndA, sizeof(int) * totalNnz));
// Convert A from a dense formatting to a CSR formatting, using the GPU
CHECK_CUSPARSE(hipsparseSdense2csr(handle, M, N, descr, dA, M, dNnzPerRow,
dCsrValA, dCsrRowPtrA, dCsrColIndA));
for(int i=0;i<N;i++)
X[i]=1;
CHECK(hipMemcpy(dX, X, sizeof(float) * N, hipMemcpyHostToDevice));
/*
CHECK_CUSPARSE(hipsparseScsrmv(handle, HIPSPARSE_OPERATION_NON_TRANSPOSE,
M, N, totalNnz, &alpha, descr, dCsrValA,
dCsrRowPtrA, dCsrColIndA, dX, &beta, dY));
CHECK(hipMemcpy(Y, dY, sizeof(float) * M, hipMemcpyDeviceToHost));
for (row = 0; row < 5; row++)
{
printf("%f ", Y[row]);
}
printf("%\n mnozenje\n");
*/
compute_coefficients(M,N,A,Y);
CHECK(hipMemcpy(dY, Y, sizeof(float) * N, hipMemcpyHostToDevice));
CHECK_CUSPARSE(hipsparseScsrsv2_bufferSize(
handle,
HIPSPARSE_OPERATION_NON_TRANSPOSE,
M,
totalNnz,
descr,
dCsrValA,
dCsrRowPtrA,
dCsrColIndA,
info_A,
&lworkInBytes));
if (NULL != d_work) { hipFree(d_work); }
CHECK(hipMalloc((void**)&d_work, lworkInBytes));
CHECK(hipDeviceSynchronize());
CHECK_CUSPARSE(hipsparseScsrsv2_analysis(handle,
HIPSPARSE_OPERATION_NON_TRANSPOSE,
M,
totalNnz,
descr,
dCsrValA,
dCsrRowPtrA,
dCsrColIndA,
info_A,
policy,
d_work));
CHECK(hipDeviceSynchronize());
hipsparseStatus_t status = hipsparseXcsrsv2_zeroPivot(handle, info_A, &structural_zero);
if (HIPSPARSE_STATUS_ZERO_PIVOT == status){
printf("L(%d,%d) is missing\n", structural_zero, structural_zero);
}
CHECK_CUSPARSE(hipsparseScsrsv2_solve(handle,
HIPSPARSE_OPERATION_NON_TRANSPOSE,
M,
totalNnz,
&alpha,
descr,
dCsrValA,
dCsrRowPtrA,
dCsrColIndA,
info_A,
dY,
dX,
policy,
d_work));
CHECK(hipDeviceSynchronize());
status = hipsparseXcsrsv2_zeroPivot(handle, info_A, &numerical_zero);
if (HIPSPARSE_STATUS_ZERO_PIVOT == status){
printf("L(%d,%d) is zero\n", numerical_zero, numerical_zero);
}
// Copy the result vector back to the host
CHECK(hipMemcpy(Y, dY, sizeof(float) * M, hipMemcpyDeviceToHost));
for (row = 0; row < 5; row++)
{
printf("%2.2f ", Y[row]);
}
printf("...\n");
CHECK(hipMemcpy(X, dX, sizeof(float) * M, hipMemcpyDeviceToHost));
for (row = 0; row < 5; row++)
{
printf("%2.2f ", X[row]);
}
printf("...\n\n");
// Perform matrix-vector multiplication with the CSR-formatted matrix A
free(A);
free(X);
free(Y);
CHECK(hipFree(dX));
CHECK(hipFree(dY));
CHECK(hipFree(dA));
CHECK(hipFree(d_work));
CHECK(hipFree(dNnzPerRow));
CHECK(hipFree(dCsrValA));
CHECK(hipFree(dCsrRowPtrA));
CHECK(hipFree(dCsrColIndA));
hipsparseDestroyCsrsv2Info(info_A);
CHECK_CUSPARSE(hipsparseDestroyMatDescr(descr));
CHECK_CUSPARSE(hipsparseDestroy(handle));
return 0;
}
/*
ovo nece iz ne znam ni ja kog razloga mozda je i neki bug
a mozda i ja nesto nisam dobro uradio al u sustini po dokumentaciji
bi trebalo ovako da prodje
*/ | 2d7aa07d9ec9f7880760aa29a2d18f08f39404b3.cu | #include "../common/common.h"
#include <stdio.h>
#include <stdlib.h>
#include <cusparse_v2.h>
#include <cuda.h>
/*
* This is an example demonstrating usage of the cuSPARSE library to perform a
* sparse matrix-vector multiplication on randomly generated data.
*/
/*
* M = # of rows
* N = # of columns
*/
int M = 5;
int N = 5;
/*
* Generate a vector of length N with random single-precision floating-point
* values between 0 and 100.
*/
void generate_random_vector(int N, float **outX)
{
int i;
double rMax = (double)RAND_MAX;
float *X = (float *)malloc(sizeof(float) * N);
for (i = 0; i < N; i++)
{
int r = rand();
double dr = (double)r;
X[i] = (dr / rMax) * 100.0;
}
*outX = X;
}
void compute_coefficients(int N, int M, float* A, float* y)
{
for (int i = 0; i < N; i++)
{
float s=0.0f;
for (int j = 0; j < M; j++)
{
s=s+A[j * N + i];
}
y[i]=s;
}
}
/*
* Generate random dense matrix A in column-major order, while rounding some
* elements down to zero to ensure it is sparse.
*/
int generate_random_dense_matrix(int M, int N, float **outA)
{
int i, j;
double rMax = (double)RAND_MAX;
float *A = (float *)malloc(sizeof(float) * M * N);
//cudaMemset(A,0,M*N*sizeof(float));
int totalNnz = 0;
for (j = 0; j < N; j++)
{
for (i = 0; i < M; i++)
{
int r = rand();
float *curr = A + (j * M + i);
if (r % 3 > 0)
{
*curr = 0.0f;
}
else
{
double dr = (double)r;
*curr = (dr / rMax) * 5.0;
}
//if(i==j) *curr=1.0f;
if (*curr != 0.0f)
{
totalNnz++;
}
}
}
*outA = A;
return totalNnz;
}
void print_partial_matrix(float *M, int nrows, int ncols, int max_row,
int max_col)
{
int row, col;
for (row = 0; row < max_row; row++)
{
for (col = 0; col < max_col; col++)
{
printf("%2.2f ", M[row * ncols + col]);
}
printf("...\n");
}
printf("...\n");
}
int main(int argc, char **argv)
{
int row;
float *A, *dA;
int *dNnzPerRow;
float *dCsrValA;
int *dCsrRowPtrA;
int *dCsrColIndA;
int totalNnz;
float alpha = 1.0f;
//float beta = 0.0f;
float *dX, *X;
float *dY, *Y;
int structural_zero;
int numerical_zero;
cusparseHandle_t handle = 0;
cusparseMatDescr_t descr = 0;
csrsv2Info_t info_A = 0;
const cusparseSolvePolicy_t policy = CUSPARSE_SOLVE_POLICY_USE_LEVEL;
int lworkInBytes;
void * d_work = NULL;
// Generate input
srand(9384);
int trueNnz = generate_random_dense_matrix(M, N, &A);
print_partial_matrix(A,M,M,M,M);
// generate_random_vector(M, &Y);
X=(float*)malloc(N*sizeof(float));
Y=(float*)malloc(M*sizeof(float));
compute_coefficients(M,N,A,Y);
for(int i=0;i<N;i++)
{
printf("%f ",Y[i]);
Y[i]=1;
}
printf("\n");
//memset(X,0,N*sizeof(float));
// Create the cuSPARSE handlef
CHECK_CUSPARSE(cusparseCreate(&handle));
CHECK_CUSPARSE(cusparseCreateCsrsv2Info(&info_A));
// Allocate device memory for vectors and the dense form of the matrix A
CHECK(cudaMalloc((void **)&dX, sizeof(float) * N));
CHECK(cudaMalloc((void **)&dY, sizeof(float) * M));
CHECK(cudaMalloc((void **)&dA, sizeof(float) * M * N));
CHECK(cudaMalloc((void **)&dNnzPerRow, sizeof(int) * M));
// Construct a descriptor of the matrix A
CHECK_CUSPARSE(cusparseCreateMatDescr(&descr));
CHECK_CUSPARSE(cusparseSetMatFillMode(descr, CUSPARSE_FILL_MODE_LOWER));
CHECK_CUSPARSE(cusparseSetMatDiagType(descr, CUSPARSE_DIAG_TYPE_NON_UNIT));
CHECK_CUSPARSE(cusparseSetMatIndexBase(descr, CUSPARSE_INDEX_BASE_ZERO));
CHECK_CUSPARSE(cusparseSetMatType(descr, CUSPARSE_MATRIX_TYPE_GENERAL));
// Transfer the input vectors and dense matrix A to the device
//CHECK(cudaMemcpy(dX, X, sizeof(float) * N, cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(dY, Y, sizeof(float) * M, cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(dA, A, sizeof(float) * M * N, cudaMemcpyHostToDevice));
/*
// Compute the number of non-zero elements in A
CHECK_CUSPARSE(cusparseSnnz(handle, CUSPARSE_DIRECTION_ROW, M, N, descr, dA,
M, dNnzPerRow, &totalNnz));
*/
totalNnz=trueNnz;
if (totalNnz != trueNnz)
{
fprintf(stderr, "Difference detected between cuSPARSE NNZ and true "
"value: expected %d but got %d\n", trueNnz, totalNnz);
return 1;
}
// Allocate device memory to store the sparse CSR representation of A
CHECK(cudaMalloc((void **)&dCsrValA, sizeof(float) * totalNnz));
CHECK(cudaMalloc((void **)&dCsrRowPtrA, sizeof(int) * (M + 1)));
CHECK(cudaMalloc((void **)&dCsrColIndA, sizeof(int) * totalNnz));
// Convert A from a dense formatting to a CSR formatting, using the GPU
CHECK_CUSPARSE(cusparseSdense2csr(handle, M, N, descr, dA, M, dNnzPerRow,
dCsrValA, dCsrRowPtrA, dCsrColIndA));
for(int i=0;i<N;i++)
X[i]=1;
CHECK(cudaMemcpy(dX, X, sizeof(float) * N, cudaMemcpyHostToDevice));
/*
CHECK_CUSPARSE(cusparseScsrmv(handle, CUSPARSE_OPERATION_NON_TRANSPOSE,
M, N, totalNnz, &alpha, descr, dCsrValA,
dCsrRowPtrA, dCsrColIndA, dX, &beta, dY));
CHECK(cudaMemcpy(Y, dY, sizeof(float) * M, cudaMemcpyDeviceToHost));
for (row = 0; row < 5; row++)
{
printf("%f ", Y[row]);
}
printf("%\n mnozenje\n");
*/
compute_coefficients(M,N,A,Y);
CHECK(cudaMemcpy(dY, Y, sizeof(float) * N, cudaMemcpyHostToDevice));
CHECK_CUSPARSE(cusparseScsrsv2_bufferSize(
handle,
CUSPARSE_OPERATION_NON_TRANSPOSE,
M,
totalNnz,
descr,
dCsrValA,
dCsrRowPtrA,
dCsrColIndA,
info_A,
&lworkInBytes));
if (NULL != d_work) { cudaFree(d_work); }
CHECK(cudaMalloc((void**)&d_work, lworkInBytes));
CHECK(cudaDeviceSynchronize());
CHECK_CUSPARSE(cusparseScsrsv2_analysis(handle,
CUSPARSE_OPERATION_NON_TRANSPOSE,
M,
totalNnz,
descr,
dCsrValA,
dCsrRowPtrA,
dCsrColIndA,
info_A,
policy,
d_work));
CHECK(cudaDeviceSynchronize());
cusparseStatus_t status = cusparseXcsrsv2_zeroPivot(handle, info_A, &structural_zero);
if (CUSPARSE_STATUS_ZERO_PIVOT == status){
printf("L(%d,%d) is missing\n", structural_zero, structural_zero);
}
CHECK_CUSPARSE(cusparseScsrsv2_solve(handle,
CUSPARSE_OPERATION_NON_TRANSPOSE,
M,
totalNnz,
&alpha,
descr,
dCsrValA,
dCsrRowPtrA,
dCsrColIndA,
info_A,
dY,
dX,
policy,
d_work));
CHECK(cudaDeviceSynchronize());
status = cusparseXcsrsv2_zeroPivot(handle, info_A, &numerical_zero);
if (CUSPARSE_STATUS_ZERO_PIVOT == status){
printf("L(%d,%d) is zero\n", numerical_zero, numerical_zero);
}
// Copy the result vector back to the host
CHECK(cudaMemcpy(Y, dY, sizeof(float) * M, cudaMemcpyDeviceToHost));
for (row = 0; row < 5; row++)
{
printf("%2.2f ", Y[row]);
}
printf("...\n");
CHECK(cudaMemcpy(X, dX, sizeof(float) * M, cudaMemcpyDeviceToHost));
for (row = 0; row < 5; row++)
{
printf("%2.2f ", X[row]);
}
printf("...\n\n");
// Perform matrix-vector multiplication with the CSR-formatted matrix A
free(A);
free(X);
free(Y);
CHECK(cudaFree(dX));
CHECK(cudaFree(dY));
CHECK(cudaFree(dA));
CHECK(cudaFree(d_work));
CHECK(cudaFree(dNnzPerRow));
CHECK(cudaFree(dCsrValA));
CHECK(cudaFree(dCsrRowPtrA));
CHECK(cudaFree(dCsrColIndA));
cusparseDestroyCsrsv2Info(info_A);
CHECK_CUSPARSE(cusparseDestroyMatDescr(descr));
CHECK_CUSPARSE(cusparseDestroy(handle));
return 0;
}
/*
ovo nece iz ne znam ni ja kog razloga mozda je i neki bug
a mozda i ja nesto nisam dobro uradio al u sustini po dokumentaciji
bi trebalo ovako da prodje
*/ |
1188821ec93129f5da8c10162a065d5c98ed15a0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
extern "C"
{
#include "fwt_1d.h"
#include "../print_info/print_info.h"
#include "../misc/memory_management.cuh"
#include "../misc/cuda_errors.h"
}
#define BLOCK 256
#define BLOCKX 16
#define BLOCKY 16
#define SM_SIZE 256
/**
* @defgroup 97Coeff 97 Coefficients.
*
* 97 Coefficients.
*
* @{
*/
const float a1 = -1.586134342f;
const float a2 = -0.05298011854f;
const float a3 = 0.8829110762f;
const float a4 = 0.4435068522f;
/** @} */
/**
* @defgroup ScaleCoeff Scale coefficients.
*
* Scale coefficients.
*
* @{
*/
const float k = 1.230174104914f; // 1.230174104914
/** @} */
__device__ static void read_data(float *sm, int tidx, type_data** data, int w, int h, int num_components, short offset)
{
const short p_offset_l = ((tidx < offset) ? (offset - tidx) /* left symmetric extension*/: -offset + tidx /* take normally pixels */);
int p_offset;
int pix = blockIdx.x + w * blockIdx.y;
while (tidx < num_components + 2 * offset)
{
short p_offset_r = (num_components - 2) - (tidx - (num_components + offset)) /* right symmetric extension*/;
p_offset = ((tidx >= num_components + offset) ? p_offset_r : p_offset_l);
sm[tidx] = data[p_offset][pix];
tidx += BLOCK;
}
}
__device__ static void save_data(int tidx, type_data **data, int w, float *pix_neighborhood, int num_components)
{
int pix = blockIdx.x + w * blockIdx.y;
int high_pass = (num_components + 1) >> 1;
while (2*tidx < num_components)
{
data[tidx][pix] = pix_neighborhood[4] / k;
if (tidx + high_pass < num_components)
{
data[tidx + high_pass][pix] = k * pix_neighborhood[5];
}
tidx += BLOCK;
}
}
/**
* @brief Does lifting process.
*
* @param a Coefficient.
* @param pix_neighborhood Array storing neighbor pixels.
*/
template<class T, unsigned int start, unsigned int end>
__device__
void process(const float a, T *pix_neighborhood)
{
#pragma unroll
for (int i = start; i <= end; i += 2)
{
pix_neighborhood[i] += a * (pix_neighborhood[i - 1] + pix_neighborhood[i + 1]);
}
}
__device__ void process_97(int tidx, float *pix_neighborhood, float *sm, int offset)
{
// Read necessary data
#pragma unroll
for (int i = 0; i < 9; i++)
{
/* Data start from offset */
pix_neighborhood[i] = sm[tidx + i];
}
// Predict 1
process<float, 1, 7> (a1, pix_neighborhood);
// Update 1
process<float, 2, 6> (a2, pix_neighborhood);
// Predict 2
process<float, 3, 5> (a3, pix_neighborhood);
// Update 2
process<float, 4, 4> (a4, pix_neighborhood);
}
__global__ void fwt_1d_kernel(type_data** data, int w, int h, int num_components, short offset)
{
__shared__ float sm[SM_SIZE]; // TODO provide space for offset (dynamic allocation)
int tidx = threadIdx.x;
read_data(sm, tidx, data, w, h, num_components, offset);
__syncthreads();
float pix_neighborhood[9];
tidx = threadIdx.x;
int tidx2 = threadIdx.x * 2;
while (tidx2 < num_components)
{
process_97(tidx2, pix_neighborhood, sm, offset);
tidx2 += BLOCK;
}
__syncthreads();
tidx = threadIdx.x;
save_data(tidx, data, w, pix_neighborhood, num_components);
/* if (blockIdx.x >= w || blockIdx.y >= h || tidx >= num_components)
{
return;
}*/
}
void fwt_1d(type_image *img, int lvl)
{
type_data** data_pd;
hipMalloc(&data_pd, sizeof(type_data*) * img->num_components);
type_data** data_p = (type_data**) calloc(img->num_components, sizeof(type_data*));
for (int g = 0; g < img->num_components; ++g)
{
data_p[g] = img->tile[0].tile_comp[g].img_data_d;
}
hipMemcpy(data_pd, data_p, sizeof(type_data*) * img->num_components, hipMemcpyHostToDevice);
hipDeviceSynchronize();
hipError_t err = hipGetLastError();
if( hipSuccess != err)
{
fprintf(stderr, "Cuda error: %s: %s.\n", "Error before fwt_1d_kernel", hipGetErrorString( err) );
exit(EXIT_FAILURE);
}
type_tile *tile = &img->tile[0];
type_tile_comp *tile_comp = &tile->tile_comp[0];
/* Number of all thread blocks */
dim3 grid_size = dim3(tile_comp-> width, tile_comp->height, 1);
// printf("w:%d h:%d num_comp:%d\n", tile_comp->width, tile_comp->height, img->num_components);
int n = img->num_components;
int i;
for(i = 0; i < lvl; ++i)
{
hipLaunchKernelGGL(( fwt_1d_kernel), dim3(grid_size), dim3(BLOCK), 0, 0, data_pd, tile_comp->width, tile_comp->height, n, 4);
n = (n + 1) >> 1;
}
hipDeviceSynchronize();
err = hipGetLastError();
if( hipSuccess != err)
{
fprintf(stderr, "Cuda error: %s: %s.\n", "Error in fwt_1d_kernel", hipGetErrorString( err) );
exit(EXIT_FAILURE);
}
free(data_p);
hipFree(data_pd);
}
| 1188821ec93129f5da8c10162a065d5c98ed15a0.cu | extern "C"
{
#include "fwt_1d.h"
#include "../print_info/print_info.h"
#include "../misc/memory_management.cuh"
#include "../misc/cuda_errors.h"
}
#define BLOCK 256
#define BLOCKX 16
#define BLOCKY 16
#define SM_SIZE 256
/**
* @defgroup 97Coeff 97 Coefficients.
*
* 97 Coefficients.
*
* @{
*/
const float a1 = -1.586134342f;
const float a2 = -0.05298011854f;
const float a3 = 0.8829110762f;
const float a4 = 0.4435068522f;
/** @} */
/**
* @defgroup ScaleCoeff Scale coefficients.
*
* Scale coefficients.
*
* @{
*/
const float k = 1.230174104914f; // 1.230174104914
/** @} */
__device__ static void read_data(float *sm, int tidx, type_data** data, int w, int h, int num_components, short offset)
{
const short p_offset_l = ((tidx < offset) ? (offset - tidx) /* left symmetric extension*/: -offset + tidx /* take normally pixels */);
int p_offset;
int pix = blockIdx.x + w * blockIdx.y;
while (tidx < num_components + 2 * offset)
{
short p_offset_r = (num_components - 2) - (tidx - (num_components + offset)) /* right symmetric extension*/;
p_offset = ((tidx >= num_components + offset) ? p_offset_r : p_offset_l);
sm[tidx] = data[p_offset][pix];
tidx += BLOCK;
}
}
__device__ static void save_data(int tidx, type_data **data, int w, float *pix_neighborhood, int num_components)
{
int pix = blockIdx.x + w * blockIdx.y;
int high_pass = (num_components + 1) >> 1;
while (2*tidx < num_components)
{
data[tidx][pix] = pix_neighborhood[4] / k;
if (tidx + high_pass < num_components)
{
data[tidx + high_pass][pix] = k * pix_neighborhood[5];
}
tidx += BLOCK;
}
}
/**
* @brief Does lifting process.
*
* @param a Coefficient.
* @param pix_neighborhood Array storing neighbor pixels.
*/
template<class T, unsigned int start, unsigned int end>
__device__
void process(const float a, T *pix_neighborhood)
{
#pragma unroll
for (int i = start; i <= end; i += 2)
{
pix_neighborhood[i] += a * (pix_neighborhood[i - 1] + pix_neighborhood[i + 1]);
}
}
__device__ void process_97(int tidx, float *pix_neighborhood, float *sm, int offset)
{
// Read necessary data
#pragma unroll
for (int i = 0; i < 9; i++)
{
/* Data start from offset */
pix_neighborhood[i] = sm[tidx + i];
}
// Predict 1
process<float, 1, 7> (a1, pix_neighborhood);
// Update 1
process<float, 2, 6> (a2, pix_neighborhood);
// Predict 2
process<float, 3, 5> (a3, pix_neighborhood);
// Update 2
process<float, 4, 4> (a4, pix_neighborhood);
}
__global__ void fwt_1d_kernel(type_data** data, int w, int h, int num_components, short offset)
{
__shared__ float sm[SM_SIZE]; // TODO provide space for offset (dynamic allocation)
int tidx = threadIdx.x;
read_data(sm, tidx, data, w, h, num_components, offset);
__syncthreads();
float pix_neighborhood[9];
tidx = threadIdx.x;
int tidx2 = threadIdx.x * 2;
while (tidx2 < num_components)
{
process_97(tidx2, pix_neighborhood, sm, offset);
tidx2 += BLOCK;
}
__syncthreads();
tidx = threadIdx.x;
save_data(tidx, data, w, pix_neighborhood, num_components);
/* if (blockIdx.x >= w || blockIdx.y >= h || tidx >= num_components)
{
return;
}*/
}
void fwt_1d(type_image *img, int lvl)
{
type_data** data_pd;
cudaMalloc(&data_pd, sizeof(type_data*) * img->num_components);
type_data** data_p = (type_data**) calloc(img->num_components, sizeof(type_data*));
for (int g = 0; g < img->num_components; ++g)
{
data_p[g] = img->tile[0].tile_comp[g].img_data_d;
}
cudaMemcpy(data_pd, data_p, sizeof(type_data*) * img->num_components, cudaMemcpyHostToDevice);
cudaDeviceSynchronize();
cudaError_t err = cudaGetLastError();
if( cudaSuccess != err)
{
fprintf(stderr, "Cuda error: %s: %s.\n", "Error before fwt_1d_kernel", cudaGetErrorString( err) );
exit(EXIT_FAILURE);
}
type_tile *tile = &img->tile[0];
type_tile_comp *tile_comp = &tile->tile_comp[0];
/* Number of all thread blocks */
dim3 grid_size = dim3(tile_comp-> width, tile_comp->height, 1);
// printf("w:%d h:%d num_comp:%d\n", tile_comp->width, tile_comp->height, img->num_components);
int n = img->num_components;
int i;
for(i = 0; i < lvl; ++i)
{
fwt_1d_kernel<<<grid_size, BLOCK>>>(data_pd, tile_comp->width, tile_comp->height, n, 4);
n = (n + 1) >> 1;
}
cudaDeviceSynchronize();
err = cudaGetLastError();
if( cudaSuccess != err)
{
fprintf(stderr, "Cuda error: %s: %s.\n", "Error in fwt_1d_kernel", cudaGetErrorString( err) );
exit(EXIT_FAILURE);
}
free(data_p);
cudaFree(data_pd);
}
|
c8a7a2c76e287b5f3c82c55ba262b9d52ee56f5f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "ATen/ATen.h"
#include "ATen/TensorUtils.h"
#include "ATen/NativeFunctions.h"
#include "ATen/Error.h"
#include "ATen/AccumulateType.h"
#include "ATen/hip/HIPTensorMethods.cuh"
#include "ATen/hip/HIPTypeConversion.cuh"
#include <THH/THHDeviceUtils.cuh>
#include <THH/THHNumerics.cuh>
#include <THH/THHTensorMathReduce.cuh>
#include <THH/THHTensorSort.cuh>
#include <THH/THHThrustAllocator.cuh>
#include <THHUNN/THHHalfAutoNumerics.cuh>
#include <thrust/execution_policy.h>
#include <thrust/unique.h>
namespace at { namespace native {
namespace {
static const int WARP_SIZE = 32;
static const int BLOCKDIMY = 32;
template
<typename scalar_t,
typename accscalar_t>
__global__ void embedding_backward_feature_kernel
(int64_t* indices,
const scalar_t* __restrict__ grad,
scalar_t* __restrict__ grad_weight,
int n, // OK to pass as int, we don't expect 2 billion+ samples in one shot
int64_t stride,
int padding_idx)
{
extern __shared__ char buf[];
accscalar_t* smem = (accscalar_t*)buf;
accscalar_t* my_s = smem + WARP_SIZE*threadIdx.y;
int* indices_batch = (int*)(buf + sizeof(accscalar_t)*WARP_SIZE*blockDim.y);
const int s = (int)stride; // OK to make int, we don't expect 2 billion+ embedding row size
const int f = threadIdx.x + blockIdx.x*blockDim.x; // feature_dim
for(int batch_start = 0; batch_start < n; batch_start += blockDim.x*blockDim.y)
{
// Entire block cooperates to load a batch of 1024 indices to process
int tid = threadIdx.x + threadIdx.y*blockDim.x;
if(batch_start + tid < n)
indices_batch[tid] = (int)indices[batch_start + tid];
// Loop over the batch of <= 1024 loaded indices in chunks of blockDim.y = 32
for(int chunk_start = batch_start; chunk_start < n; chunk_start += blockDim.y)
{
// This does double duty: it makes sure indices_batch is ready, and it makes sure match-group
// leaders are done with their accumulates before other warps start loading again.
__syncthreads();
int n_this_chunk = (n - chunk_start) < blockDim.y ? (n - chunk_start) : blockDim.y;
int src_row = chunk_start + threadIdx.y;
int dst_row = indices_batch[src_row - batch_start]; // This warp's target row in grad_weight
// All warps load their smem segments with incoming grad data
if(src_row < n && f < s && dst_row != padding_idx)
my_s[threadIdx.x] = scalar_cast<accscalar_t>(grad[src_row*stride + f]);
__syncthreads();
// To ensure determinism, we can't just have each warp add its grad data to its dst_row.
// We need to check if any other warps pulled grad data targeting dst_row.
// If so, we elect the first warp in each matching group as the leader.
// Each leader warp serializes the accumulates targeting dst_row in shared memory,
// then finishes by adding the accumulated buffer to dst_row in grad_weight.
if(dst_row != padding_idx && src_row < n) // Per-warp exit condition, safe with ballot_sync
{
int match_found_this_thread =
(dst_row == indices_batch[chunk_start - batch_start + threadIdx.x]);
if(threadIdx.x >= n_this_chunk)
match_found_this_thread = 0;
unsigned int matchmask = WARP_BALLOT(match_found_this_thread);
int first_remaining_peer = __ffs(matchmask) - 1;
if(threadIdx.y == first_remaining_peer) // Nominate lowest-indexed warp as the leader
{
matchmask ^= (1 << first_remaining_peer);
while(matchmask)
{
first_remaining_peer = __ffs(matchmask) - 1;
my_s[threadIdx.x] += smem[threadIdx.x + WARP_SIZE*first_remaining_peer];
matchmask ^= (1 << first_remaining_peer);
}
if(f < s)
grad_weight[dst_row*stride + f] += scalar_cast<scalar_t>(my_s[threadIdx.x]);
}
}
}
}
}
template <typename scalar_t>
__global__ void embedding_backward_kernel(
int64_t* input, int64_t* indices, scalar_t* grad_output, scalar_t* grad_weight,
int64_t* count, int64_t numel, int64_t stride, int padding_idx) {
using accscalar_t = acc_type<scalar_t, true>;
int idx = blockIdx.x * 4 + threadIdx.y;
// Each warp is responsible for an input into the LookupTable.
// If the preceding input has the same as this input, then the warp
// exits immediately. The warp also processes subsequent inputs with the
// same value.
//
// Input Warp
// 1 <warp 1>
// 1 <warp 1> (<warp 2> exits without doing any work)
// 5 <warp 3>
// 8 <warp 4>
// Number of values proceessed by each thread (grain size)
const int SZ = 4;
if (idx < numel
&& (idx == 0 || input[idx] != input[idx - 1])
&& input[idx] != padding_idx) {
do {
const int start_feature = threadIdx.x + blockIdx.y * blockDim.x * SZ;
const int weight_row = ((int) input[idx]) * stride;
const int grad_row = ((int) indices[idx]) * stride;
const accscalar_t scale = count ? (accscalar_t)1.0 / count[idx] : 1.0;
accscalar_t gradient[SZ];
accscalar_t weight[SZ];
#pragma unroll
for (int ii = 0; ii < SZ; ii++) {
int feature_dim = start_feature + ii * WARP_SIZE;
if (feature_dim < stride) {
gradient[ii] = scalar_cast<accscalar_t>(grad_output[grad_row + feature_dim]);
weight[ii] = scalar_cast<accscalar_t>(grad_weight[weight_row + feature_dim]);
}
}
#pragma unroll
for (int ii = 0; ii < SZ; ii++) {
weight[ii] += gradient[ii] * scale;
}
#pragma unroll
for (int ii = 0; ii < SZ; ii++) {
int feature_dim = start_feature + ii * WARP_SIZE;
if (feature_dim < stride) {
grad_weight[weight_row + feature_dim] = scalar_cast<scalar_t>(weight[ii]);
}
}
idx++;
} while (idx < numel && input[idx] == input[idx - 1]);
}
}
/* Calculate norms of the rows of weight_ptr given by idx_ptr and capture them in norms */
template <typename scalar_t, typename accscalar_t>
__global__ void renorm_kernel(
scalar_t* weights, int64_t* indices, accscalar_t max_norm,
accscalar_t norm_type, int dim) {
// Some casting hacks since dynamic shared memory and templates don't work together:
extern __shared__ unsigned char smem[];
auto sdata = reinterpret_cast<accscalar_t*>(smem);
int tid = threadIdx.x;
int base_index = indices[blockIdx.x] * dim;
accscalar_t v = 0;
for (int i = tid; i < dim; i += blockDim.x) {
auto x = scalar_cast<accscalar_t>(weights[base_index + i]);
if (norm_type == 1) {
v += std::abs(x);
} else if (norm_type == 2) {
v += x * x;
} else {
v += THCNumerics<accscalar_t>::pow(x, norm_type);
}
}
using Op = ReduceAdd<accscalar_t>;
v = reduceBlock<accscalar_t>(sdata, blockDim.x, v, Op(), 0);
if (tid == 0) {
sdata[0] = THCNumerics<accscalar_t>::pow(v, scalar_cast<accscalar_t>(1.0 / norm_type));
}
__syncthreads();
// now we renormalize the blocks that need it
if (sdata[0] > max_norm) {
auto factor = scalar_cast<scalar_t>(max_norm / (sdata[0] + 1e-7));
for (int i = tid; i < dim; i += blockDim.x) {
weights[base_index + i] *= factor;
}
}
}
} // anonymous namespace
Tensor embedding_backward_cuda(const Tensor & grad_, const Tensor & indices,
int64_t num_weights, int64_t padding_idx,
bool scale_grad_by_freq) {
auto grad_arg = TensorArg(grad_, "grad", 1);
auto indices_arg = TensorArg(indices, "indices", 1);
checkScalarType("embedding_backward", indices_arg, kLong);
checkContiguous("embedding_backward", indices_arg);
checkSameGPU("embedding_backward", grad_arg, indices_arg);
auto num_indices = indices.numel();
auto grad = grad_.contiguous().view({num_indices, grad_.size(-1)});
auto grad_weight = at::zeros(grad_.type(), {num_weights, grad_.size(-1)});
int64_t stride = grad_weight.stride(0);
hipStream_t stream = globalContext().getCurrentHIPStreamMasqueradingAsCUDA();
if (num_indices <= 768 && !scale_grad_by_freq) {
dim3 grid(THCCeilDiv(stride, (int64_t)WARP_SIZE));
dim3 block(WARP_SIZE, BLOCKDIMY);
AT_DISPATCH_FLOATING_TYPES_AND_HALF
(grad.type(),
"embedding_backward",
[&]
{
using cuda_scalar_t = cuda::into_type<scalar_t>;
using accscalar_t = acc_type<cuda_scalar_t, true>;
hipLaunchKernelGGL(( embedding_backward_feature_kernel<cuda_scalar_t, accscalar_t>)
, dim3(grid),
dim3(block),
sizeof(accscalar_t)*WARP_SIZE*BLOCKDIMY + sizeof(int)*WARP_SIZE*BLOCKDIMY,
stream,
indices.data<int64_t>(),
grad.data<cuda_scalar_t>(),
grad_weight.data<cuda_scalar_t>(),
num_indices,
stride,
padding_idx);
});
THCudaCheck(hipGetLastError());
return grad_weight;
}
auto sorted_indices = indices.type().tensor(indices.sizes());
auto orig_indices = indices.type().tensor(indices.sizes());
using device_ptr = thrust::device_ptr<int64_t>;
// Sort the inputs into sorted with the corresponding indices; we
// don't need a stable or multidimensional sort, so just use Thrust
// directly
{
sorted_indices.copy_(indices);
auto allocator = THCThrustAllocator(globalContext().lazyInitCUDA());
auto policy = thrust::hip::par(allocator).on(stream);
// Fill sortedOrigIndices with sequential indices
auto count_iter = thrust::counting_iterator<int64_t>(0);
auto orig_data = device_ptr(orig_indices.data<int64_t>());
thrust::copy(policy, count_iter, count_iter + num_indices, orig_data);
// Sort; a stable sort is not required
auto sorted_data = device_ptr(sorted_indices.data<int64_t>());
thrust::sort_by_key(policy, sorted_data, sorted_data + num_indices, orig_data,
ThrustLTOp<int64_t>());
}
Tensor count;
if (scale_grad_by_freq) {
count = indices.type().tensor(indices.sizes());
auto allocator = THCThrustAllocator(globalContext().lazyInitCUDA());
auto policy = thrust::hip::par(allocator).on(stream);
// Compute an increasing sequence per unique item in sortedIndices:
// sorted: 2 5 5 5 7 7 8 9 9
// count: 1 1 2 3 1 2 1 1 2
auto sorted_data = device_ptr(sorted_indices.data<int64_t>());
auto count_data = device_ptr(count.data<int64_t>());
thrust::inclusive_scan_by_key(
policy,
sorted_data,
sorted_data + num_indices,
thrust::make_constant_iterator(1),
count_data
);
// Take the maximum of each count per unique key in reverse:
// sorted: 2 5 5 5 7 7 8 9 9
// count: 1 3 3 3 2 2 1 2 2
thrust::inclusive_scan_by_key(
policy,
thrust::make_reverse_iterator(sorted_data + num_indices),
thrust::make_reverse_iterator(sorted_data),
thrust::make_reverse_iterator(count_data + num_indices),
thrust::make_reverse_iterator(count_data + num_indices),
thrust::equal_to<int64_t>(),
thrust::maximum<int64_t>()
);
}
dim3 grid(THCCeilDiv(num_indices, (int64_t) 4), THCCeilDiv(stride, (int64_t) 128));
dim3 block(32, 4);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(grad.type(), "embedding_backward", [&] {
using cuda_scalar_t = cuda::into_type<scalar_t>;
hipLaunchKernelGGL(( embedding_backward_kernel), dim3(grid), dim3(block), 0, stream,
sorted_indices.data<int64_t>(),
orig_indices.data<int64_t>(),
grad.data<cuda_scalar_t>(),
grad_weight.data<cuda_scalar_t>(),
count.defined() ? count.data<int64_t>() : nullptr,
num_indices,
stride,
padding_idx);
});
THCudaCheck(hipGetLastError());
return grad_weight;
}
Tensor & embedding_renorm_cuda_(Tensor & self, const Tensor & indices,
double max_norm, double norm_type) {
auto self_arg = TensorArg(self, "self", 1);
auto indices_arg = TensorArg(indices, "indices", 1);
checkContiguous("embedding_renorm_", self_arg);
checkContiguous("embedding_renorm", indices_arg);
checkDim("embedding_renorm_", self_arg, 2);
checkSameGPU("embedding_renorm", self_arg, indices_arg);
hipStream_t stream = globalContext().getCurrentHIPStreamMasqueradingAsCUDA();
auto allocator = THCThrustAllocator(globalContext().lazyInitCUDA());
auto policy = thrust::hip::par(allocator).on(stream);
using device_ptr = thrust::device_ptr<int64_t>;
auto num_indices = indices.numel();
auto indices_data = device_ptr(indices.data<int64_t>());
// FIXME: thrust::unique only removes consecutive elements that are equal.
// We have race conditions when indices contain duplicates which are not
// adjacent
auto unique_indices = indices.type().tensor(indices.numel());
auto unique_data = device_ptr(unique_indices.data<int64_t>());
auto end = thrust::unique_copy(policy, indices_data, indices_data + num_indices, unique_data);
auto num_unique_indices = static_cast<int>(end - unique_data);
dim3 grid(num_unique_indices);
dim3 block(128);
int dim = self.stride(0);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(self.type(), "embedding_backward", [&] {
using cuda_scalar_t = cuda::into_type<scalar_t>;
using accscalar_t = acc_type<cuda_scalar_t, true>;
hipLaunchKernelGGL(( renorm_kernel), dim3(grid), dim3(block), 128 * sizeof(accscalar_t), stream,
self.data<cuda_scalar_t>(),
unique_indices.data<int64_t>(),
scalar_cast<accscalar_t>(max_norm),
scalar_cast<accscalar_t>(norm_type),
dim);
});
THCudaCheck(hipGetLastError());
return self;
}
}} // namespace at::native
| c8a7a2c76e287b5f3c82c55ba262b9d52ee56f5f.cu | #include "ATen/ATen.h"
#include "ATen/TensorUtils.h"
#include "ATen/NativeFunctions.h"
#include "ATen/Error.h"
#include "ATen/AccumulateType.h"
#include "ATen/cuda/CUDATensorMethods.cuh"
#include "ATen/cuda/CUDATypeConversion.cuh"
#include <THC/THCDeviceUtils.cuh>
#include <THC/THCNumerics.cuh>
#include <THC/THCTensorMathReduce.cuh>
#include <THC/THCTensorSort.cuh>
#include <THC/THCThrustAllocator.cuh>
#include <THCUNN/THCHalfAutoNumerics.cuh>
#include <thrust/execution_policy.h>
#include <thrust/unique.h>
namespace at { namespace native {
namespace {
static const int WARP_SIZE = 32;
static const int BLOCKDIMY = 32;
template
<typename scalar_t,
typename accscalar_t>
__global__ void embedding_backward_feature_kernel
(int64_t* indices,
const scalar_t* __restrict__ grad,
scalar_t* __restrict__ grad_weight,
int n, // OK to pass as int, we don't expect 2 billion+ samples in one shot
int64_t stride,
int padding_idx)
{
extern __shared__ char buf[];
accscalar_t* smem = (accscalar_t*)buf;
accscalar_t* my_s = smem + WARP_SIZE*threadIdx.y;
int* indices_batch = (int*)(buf + sizeof(accscalar_t)*WARP_SIZE*blockDim.y);
const int s = (int)stride; // OK to make int, we don't expect 2 billion+ embedding row size
const int f = threadIdx.x + blockIdx.x*blockDim.x; // feature_dim
for(int batch_start = 0; batch_start < n; batch_start += blockDim.x*blockDim.y)
{
// Entire block cooperates to load a batch of 1024 indices to process
int tid = threadIdx.x + threadIdx.y*blockDim.x;
if(batch_start + tid < n)
indices_batch[tid] = (int)indices[batch_start + tid];
// Loop over the batch of <= 1024 loaded indices in chunks of blockDim.y = 32
for(int chunk_start = batch_start; chunk_start < n; chunk_start += blockDim.y)
{
// This does double duty: it makes sure indices_batch is ready, and it makes sure match-group
// leaders are done with their accumulates before other warps start loading again.
__syncthreads();
int n_this_chunk = (n - chunk_start) < blockDim.y ? (n - chunk_start) : blockDim.y;
int src_row = chunk_start + threadIdx.y;
int dst_row = indices_batch[src_row - batch_start]; // This warp's target row in grad_weight
// All warps load their smem segments with incoming grad data
if(src_row < n && f < s && dst_row != padding_idx)
my_s[threadIdx.x] = scalar_cast<accscalar_t>(grad[src_row*stride + f]);
__syncthreads();
// To ensure determinism, we can't just have each warp add its grad data to its dst_row.
// We need to check if any other warps pulled grad data targeting dst_row.
// If so, we elect the first warp in each matching group as the leader.
// Each leader warp serializes the accumulates targeting dst_row in shared memory,
// then finishes by adding the accumulated buffer to dst_row in grad_weight.
if(dst_row != padding_idx && src_row < n) // Per-warp exit condition, safe with ballot_sync
{
int match_found_this_thread =
(dst_row == indices_batch[chunk_start - batch_start + threadIdx.x]);
if(threadIdx.x >= n_this_chunk)
match_found_this_thread = 0;
unsigned int matchmask = WARP_BALLOT(match_found_this_thread);
int first_remaining_peer = __ffs(matchmask) - 1;
if(threadIdx.y == first_remaining_peer) // Nominate lowest-indexed warp as the leader
{
matchmask ^= (1 << first_remaining_peer);
while(matchmask)
{
first_remaining_peer = __ffs(matchmask) - 1;
my_s[threadIdx.x] += smem[threadIdx.x + WARP_SIZE*first_remaining_peer];
matchmask ^= (1 << first_remaining_peer);
}
if(f < s)
grad_weight[dst_row*stride + f] += scalar_cast<scalar_t>(my_s[threadIdx.x]);
}
}
}
}
}
template <typename scalar_t>
__global__ void embedding_backward_kernel(
int64_t* input, int64_t* indices, scalar_t* grad_output, scalar_t* grad_weight,
int64_t* count, int64_t numel, int64_t stride, int padding_idx) {
using accscalar_t = acc_type<scalar_t, true>;
int idx = blockIdx.x * 4 + threadIdx.y;
// Each warp is responsible for an input into the LookupTable.
// If the preceding input has the same as this input, then the warp
// exits immediately. The warp also processes subsequent inputs with the
// same value.
//
// Input Warp
// 1 <warp 1>
// 1 <warp 1> (<warp 2> exits without doing any work)
// 5 <warp 3>
// 8 <warp 4>
// Number of values proceessed by each thread (grain size)
const int SZ = 4;
if (idx < numel
&& (idx == 0 || input[idx] != input[idx - 1])
&& input[idx] != padding_idx) {
do {
const int start_feature = threadIdx.x + blockIdx.y * blockDim.x * SZ;
const int weight_row = ((int) input[idx]) * stride;
const int grad_row = ((int) indices[idx]) * stride;
const accscalar_t scale = count ? (accscalar_t)1.0 / count[idx] : 1.0;
accscalar_t gradient[SZ];
accscalar_t weight[SZ];
#pragma unroll
for (int ii = 0; ii < SZ; ii++) {
int feature_dim = start_feature + ii * WARP_SIZE;
if (feature_dim < stride) {
gradient[ii] = scalar_cast<accscalar_t>(grad_output[grad_row + feature_dim]);
weight[ii] = scalar_cast<accscalar_t>(grad_weight[weight_row + feature_dim]);
}
}
#pragma unroll
for (int ii = 0; ii < SZ; ii++) {
weight[ii] += gradient[ii] * scale;
}
#pragma unroll
for (int ii = 0; ii < SZ; ii++) {
int feature_dim = start_feature + ii * WARP_SIZE;
if (feature_dim < stride) {
grad_weight[weight_row + feature_dim] = scalar_cast<scalar_t>(weight[ii]);
}
}
idx++;
} while (idx < numel && input[idx] == input[idx - 1]);
}
}
/* Calculate norms of the rows of weight_ptr given by idx_ptr and capture them in norms */
template <typename scalar_t, typename accscalar_t>
__global__ void renorm_kernel(
scalar_t* weights, int64_t* indices, accscalar_t max_norm,
accscalar_t norm_type, int dim) {
// Some casting hacks since dynamic shared memory and templates don't work together:
extern __shared__ unsigned char smem[];
auto sdata = reinterpret_cast<accscalar_t*>(smem);
int tid = threadIdx.x;
int base_index = indices[blockIdx.x] * dim;
accscalar_t v = 0;
for (int i = tid; i < dim; i += blockDim.x) {
auto x = scalar_cast<accscalar_t>(weights[base_index + i]);
if (norm_type == 1) {
v += std::abs(x);
} else if (norm_type == 2) {
v += x * x;
} else {
v += THCNumerics<accscalar_t>::pow(x, norm_type);
}
}
using Op = ReduceAdd<accscalar_t>;
v = reduceBlock<accscalar_t>(sdata, blockDim.x, v, Op(), 0);
if (tid == 0) {
sdata[0] = THCNumerics<accscalar_t>::pow(v, scalar_cast<accscalar_t>(1.0 / norm_type));
}
__syncthreads();
// now we renormalize the blocks that need it
if (sdata[0] > max_norm) {
auto factor = scalar_cast<scalar_t>(max_norm / (sdata[0] + 1e-7));
for (int i = tid; i < dim; i += blockDim.x) {
weights[base_index + i] *= factor;
}
}
}
} // anonymous namespace
Tensor embedding_backward_cuda(const Tensor & grad_, const Tensor & indices,
int64_t num_weights, int64_t padding_idx,
bool scale_grad_by_freq) {
auto grad_arg = TensorArg(grad_, "grad", 1);
auto indices_arg = TensorArg(indices, "indices", 1);
checkScalarType("embedding_backward", indices_arg, kLong);
checkContiguous("embedding_backward", indices_arg);
checkSameGPU("embedding_backward", grad_arg, indices_arg);
auto num_indices = indices.numel();
auto grad = grad_.contiguous().view({num_indices, grad_.size(-1)});
auto grad_weight = at::zeros(grad_.type(), {num_weights, grad_.size(-1)});
int64_t stride = grad_weight.stride(0);
cudaStream_t stream = globalContext().getCurrentCUDAStream();
if (num_indices <= 768 && !scale_grad_by_freq) {
dim3 grid(THCCeilDiv(stride, (int64_t)WARP_SIZE));
dim3 block(WARP_SIZE, BLOCKDIMY);
AT_DISPATCH_FLOATING_TYPES_AND_HALF
(grad.type(),
"embedding_backward",
[&]
{
using cuda_scalar_t = cuda::into_type<scalar_t>;
using accscalar_t = acc_type<cuda_scalar_t, true>;
embedding_backward_feature_kernel<cuda_scalar_t, accscalar_t>
<<<grid,
block,
sizeof(accscalar_t)*WARP_SIZE*BLOCKDIMY + sizeof(int)*WARP_SIZE*BLOCKDIMY,
stream>>>
(indices.data<int64_t>(),
grad.data<cuda_scalar_t>(),
grad_weight.data<cuda_scalar_t>(),
num_indices,
stride,
padding_idx);
});
THCudaCheck(cudaGetLastError());
return grad_weight;
}
auto sorted_indices = indices.type().tensor(indices.sizes());
auto orig_indices = indices.type().tensor(indices.sizes());
using device_ptr = thrust::device_ptr<int64_t>;
// Sort the inputs into sorted with the corresponding indices; we
// don't need a stable or multidimensional sort, so just use Thrust
// directly
{
sorted_indices.copy_(indices);
auto allocator = THCThrustAllocator(globalContext().lazyInitCUDA());
auto policy = thrust::cuda::par(allocator).on(stream);
// Fill sortedOrigIndices with sequential indices
auto count_iter = thrust::counting_iterator<int64_t>(0);
auto orig_data = device_ptr(orig_indices.data<int64_t>());
thrust::copy(policy, count_iter, count_iter + num_indices, orig_data);
// Sort; a stable sort is not required
auto sorted_data = device_ptr(sorted_indices.data<int64_t>());
thrust::sort_by_key(policy, sorted_data, sorted_data + num_indices, orig_data,
ThrustLTOp<int64_t>());
}
Tensor count;
if (scale_grad_by_freq) {
count = indices.type().tensor(indices.sizes());
auto allocator = THCThrustAllocator(globalContext().lazyInitCUDA());
auto policy = thrust::cuda::par(allocator).on(stream);
// Compute an increasing sequence per unique item in sortedIndices:
// sorted: 2 5 5 5 7 7 8 9 9
// count: 1 1 2 3 1 2 1 1 2
auto sorted_data = device_ptr(sorted_indices.data<int64_t>());
auto count_data = device_ptr(count.data<int64_t>());
thrust::inclusive_scan_by_key(
policy,
sorted_data,
sorted_data + num_indices,
thrust::make_constant_iterator(1),
count_data
);
// Take the maximum of each count per unique key in reverse:
// sorted: 2 5 5 5 7 7 8 9 9
// count: 1 3 3 3 2 2 1 2 2
thrust::inclusive_scan_by_key(
policy,
thrust::make_reverse_iterator(sorted_data + num_indices),
thrust::make_reverse_iterator(sorted_data),
thrust::make_reverse_iterator(count_data + num_indices),
thrust::make_reverse_iterator(count_data + num_indices),
thrust::equal_to<int64_t>(),
thrust::maximum<int64_t>()
);
}
dim3 grid(THCCeilDiv(num_indices, (int64_t) 4), THCCeilDiv(stride, (int64_t) 128));
dim3 block(32, 4);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(grad.type(), "embedding_backward", [&] {
using cuda_scalar_t = cuda::into_type<scalar_t>;
embedding_backward_kernel<<<grid, block, 0, stream>>>(
sorted_indices.data<int64_t>(),
orig_indices.data<int64_t>(),
grad.data<cuda_scalar_t>(),
grad_weight.data<cuda_scalar_t>(),
count.defined() ? count.data<int64_t>() : nullptr,
num_indices,
stride,
padding_idx);
});
THCudaCheck(cudaGetLastError());
return grad_weight;
}
Tensor & embedding_renorm_cuda_(Tensor & self, const Tensor & indices,
double max_norm, double norm_type) {
auto self_arg = TensorArg(self, "self", 1);
auto indices_arg = TensorArg(indices, "indices", 1);
checkContiguous("embedding_renorm_", self_arg);
checkContiguous("embedding_renorm", indices_arg);
checkDim("embedding_renorm_", self_arg, 2);
checkSameGPU("embedding_renorm", self_arg, indices_arg);
cudaStream_t stream = globalContext().getCurrentCUDAStream();
auto allocator = THCThrustAllocator(globalContext().lazyInitCUDA());
auto policy = thrust::cuda::par(allocator).on(stream);
using device_ptr = thrust::device_ptr<int64_t>;
auto num_indices = indices.numel();
auto indices_data = device_ptr(indices.data<int64_t>());
// FIXME: thrust::unique only removes consecutive elements that are equal.
// We have race conditions when indices contain duplicates which are not
// adjacent
auto unique_indices = indices.type().tensor(indices.numel());
auto unique_data = device_ptr(unique_indices.data<int64_t>());
auto end = thrust::unique_copy(policy, indices_data, indices_data + num_indices, unique_data);
auto num_unique_indices = static_cast<int>(end - unique_data);
dim3 grid(num_unique_indices);
dim3 block(128);
int dim = self.stride(0);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(self.type(), "embedding_backward", [&] {
using cuda_scalar_t = cuda::into_type<scalar_t>;
using accscalar_t = acc_type<cuda_scalar_t, true>;
renorm_kernel<<<grid, block, 128 * sizeof(accscalar_t), stream>>>(
self.data<cuda_scalar_t>(),
unique_indices.data<int64_t>(),
scalar_cast<accscalar_t>(max_norm),
scalar_cast<accscalar_t>(norm_type),
dim);
});
THCudaCheck(cudaGetLastError());
return self;
}
}} // namespace at::native
|
b536b9f3767f8b0ab04de59eb62d0d6c77e124f9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <assert.h>
#include <stdlib.h>
#include <sys/time.h>
#include <math.h>
#define warp_size 32
#define Hwarp_size 16
#define A 0
#define B 15
void checkCUDAError(const char* msg);
__host__ __device__ inline double f(double x)
{
return exp(x)*sin(x);
}
__host__ __device__ inline unsigned int getFirstSetBitPos(int n)
{
return log2((float)(n&-n))+1;
}
__global__ void romberg(double a, double b, int row_size, double *result) //row_size<=25, preferably 14
{
extern __shared__ double local_array[];
double diff = (b-a)/gridDim.x, step;
int max_eval = (1<<(row_size-1)),k;
b = a + (blockIdx.x+1)*diff;
a += blockIdx.x*diff;
step = (b-a)/max_eval;
double local_col[25];
for(int i = 0; i < row_size; i++)
local_col[i] = 0.0;
if(!threadIdx.x)
{
k = blockDim.x;
local_col[0] = f(a) + f(b);
}
else
k = threadIdx.x;
for(; k < max_eval; k += blockDim.x)
{
local_col[row_size - getFirstSetBitPos(k)] += 2.0*f(a + step*k);
}
for(int i = 0; i < row_size; i++)
{
local_array[row_size*threadIdx.x + i] = local_col[i];
}
__syncthreads();
if(threadIdx.x < row_size)
{
double sum = 0.0;
for(int i = threadIdx.x; i < blockDim.x*row_size; i+=row_size)
sum += local_array[i];
local_array[threadIdx.x] = sum;
}
if(!threadIdx.x)
{
double *romberg_table = local_col;
romberg_table[0] = local_array[0];
for(int k = 1; k < row_size; k++)
romberg_table[k] = romberg_table[k-1] + local_array[k];
for(int k = 0; k < row_size; k++)
romberg_table[k]*= (b-a)/(1<<(k+1));
for(int col = 0 ; col < row_size-1 ; col++)
{
for(int row = row_size-1; row > col; row--)
{
romberg_table[row] = romberg_table[row] + (romberg_table[row] - romberg_table[row-1])/((1<<(2*col+1))-1);
}
}
result[blockIdx.x] = romberg_table[row_size-1];
}
}
int main( int argc, char** argv)
{
double *d_result, *h_result,sum=0.0;
int numBlocks = 128, numThreadsPerBlock = 64, row_size = 13, max_eval = (1<<(row_size-1));
hipDeviceSetCacheConfig(hipFuncCachePreferShared);
hipMalloc( (void **) &d_result, numBlocks*sizeof(double) );
h_result = new double[numBlocks];
timeval t;
double t1,t2,t3,t4;
gettimeofday(&t, NULL);
t1 = t.tv_sec*1000.0 + (t.tv_usec/1000.0);
hipLaunchKernelGGL(( romberg), dim3(numBlocks), dim3(numThreadsPerBlock), row_size*numThreadsPerBlock*sizeof(double) , 0, A,B,row_size,d_result);
hipDeviceSynchronize();
gettimeofday(&t, NULL);
t2 = t.tv_sec*1000.0 + (t.tv_usec/1000.0);
checkCUDAError("kernel invocation");
hipMemcpy( h_result, d_result, numBlocks*sizeof(double), hipMemcpyDeviceToHost );
checkCUDAError("memcpy");
//for(int k = 0; k<(max_eval+1)*numBlocks; k++ )
// printf("%lf\t",h_result[k]);
for(int k=0;k<numBlocks;k++)
sum+=h_result[k];
printf("TIME : %lf ms with ans = %lf\n\n\n",t2-t1,sum);
}
void checkCUDAError(const char *msg)
{
hipError_t err = hipGetLastError();
if( hipSuccess != err)
{
fprintf(stderr, "Cuda error: %s: %s.\n", msg, hipGetErrorString( err) );
exit(EXIT_FAILURE);
}
}
| b536b9f3767f8b0ab04de59eb62d0d6c77e124f9.cu | #include <stdio.h>
#include <assert.h>
#include <stdlib.h>
#include <sys/time.h>
#include <math.h>
#define warp_size 32
#define Hwarp_size 16
#define A 0
#define B 15
void checkCUDAError(const char* msg);
__host__ __device__ inline double f(double x)
{
return exp(x)*sin(x);
}
__host__ __device__ inline unsigned int getFirstSetBitPos(int n)
{
return log2((float)(n&-n))+1;
}
__global__ void romberg(double a, double b, int row_size, double *result) //row_size<=25, preferably 14
{
extern __shared__ double local_array[];
double diff = (b-a)/gridDim.x, step;
int max_eval = (1<<(row_size-1)),k;
b = a + (blockIdx.x+1)*diff;
a += blockIdx.x*diff;
step = (b-a)/max_eval;
double local_col[25];
for(int i = 0; i < row_size; i++)
local_col[i] = 0.0;
if(!threadIdx.x)
{
k = blockDim.x;
local_col[0] = f(a) + f(b);
}
else
k = threadIdx.x;
for(; k < max_eval; k += blockDim.x)
{
local_col[row_size - getFirstSetBitPos(k)] += 2.0*f(a + step*k);
}
for(int i = 0; i < row_size; i++)
{
local_array[row_size*threadIdx.x + i] = local_col[i];
}
__syncthreads();
if(threadIdx.x < row_size)
{
double sum = 0.0;
for(int i = threadIdx.x; i < blockDim.x*row_size; i+=row_size)
sum += local_array[i];
local_array[threadIdx.x] = sum;
}
if(!threadIdx.x)
{
double *romberg_table = local_col;
romberg_table[0] = local_array[0];
for(int k = 1; k < row_size; k++)
romberg_table[k] = romberg_table[k-1] + local_array[k];
for(int k = 0; k < row_size; k++)
romberg_table[k]*= (b-a)/(1<<(k+1));
for(int col = 0 ; col < row_size-1 ; col++)
{
for(int row = row_size-1; row > col; row--)
{
romberg_table[row] = romberg_table[row] + (romberg_table[row] - romberg_table[row-1])/((1<<(2*col+1))-1);
}
}
result[blockIdx.x] = romberg_table[row_size-1];
}
}
int main( int argc, char** argv)
{
double *d_result, *h_result,sum=0.0;
int numBlocks = 128, numThreadsPerBlock = 64, row_size = 13, max_eval = (1<<(row_size-1));
cudaDeviceSetCacheConfig(cudaFuncCachePreferShared);
cudaMalloc( (void **) &d_result, numBlocks*sizeof(double) );
h_result = new double[numBlocks];
timeval t;
double t1,t2,t3,t4;
gettimeofday(&t, NULL);
t1 = t.tv_sec*1000.0 + (t.tv_usec/1000.0);
romberg<<< numBlocks, numThreadsPerBlock, row_size*numThreadsPerBlock*sizeof(double) >>>(A,B,row_size,d_result);
cudaThreadSynchronize();
gettimeofday(&t, NULL);
t2 = t.tv_sec*1000.0 + (t.tv_usec/1000.0);
checkCUDAError("kernel invocation");
cudaMemcpy( h_result, d_result, numBlocks*sizeof(double), cudaMemcpyDeviceToHost );
checkCUDAError("memcpy");
//for(int k = 0; k<(max_eval+1)*numBlocks; k++ )
// printf("%lf\t",h_result[k]);
for(int k=0;k<numBlocks;k++)
sum+=h_result[k];
printf("TIME : %lf ms with ans = %lf\n\n\n",t2-t1,sum);
}
void checkCUDAError(const char *msg)
{
cudaError_t err = cudaGetLastError();
if( cudaSuccess != err)
{
fprintf(stderr, "Cuda error: %s: %s.\n", msg, cudaGetErrorString( err) );
exit(EXIT_FAILURE);
}
}
|
1d3c68ad785d17d6d54518ed416b8f75e92acb8c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <malloc.h>
struct Vector
{
double x;
double y;
double z;
};
struct Object
{
unsigned char Type;
unsigned char R;
unsigned char G;
unsigned char B;
unsigned char atr1;
unsigned char atr2;
unsigned char atr3;
unsigned char atr4;
struct Vector position;
struct Vector size;
double r;
};
union Data {
struct Object obj;
unsigned char bytes[64];
};
struct DistanceDate {
double distanceValue;
unsigned char r, g, b;
};
__shared__ double lightIntensity, lightSize;
__shared__ Data* objects;
__shared__ int n;
extern "C"
__device__ Vector mul(Vector a, double value) {
Vector v = { a.x * value, a.y * value, a.z * value };
return v;
}
extern "C"
__device__ Vector sum(Vector a, Vector b) {
Vector v = { a.x + b.x, a.y + b.y, a.z + b.z };
return v;
}
extern "C"
__device__ Vector sub(Vector a, Vector b) {
Vector v = { a.x - b.x, a.y - b.y, a.z - b.z };
return v;
}
extern "C"
__device__ double dot(Vector a, Vector b) {
return a.x * b.x + a.y * b.y + a.z * b.z;
}
extern "C"
__device__ double distance(Vector a, Vector b) {
return sqrt((a.x - b.x) * (a.x - b.x) + (a.y - b.y) * (a.y - b.y) + (a.z - b.z) * (a.z - b.z));
}
extern "C"
__device__ Vector normalize(Vector a) {
Vector n = { 0, 0, 0 };
double l = distance(a, n);
Vector v = { a.x / l, a.y / l, a.z / l };
return v;
}
extern "C"
__device__ double smin(double a, double b, double k)
{
double m;
if ((0.5 + 0.5 * (b - a) / k) > 1)
m = 1;
else
m = (0.5 + 0.5 * (b - a) / k);
double h;
if (m > 0)
h = (0.5 + 0.5 * (b - a) / k);
else
h = 0;
return a * h + b * (1 - h) - k * h * (1.0 - h);
}
extern "C"
__device__ double cylinder(Vector p, Object obj)
{
Vector pa = sub(p, obj.position);
Vector ba = sub(obj.size, obj.position);
double baba = dot(ba, ba);
double paba = dot(pa, ba);
float x = distance(sub(mul(pa, baba), mul(ba, paba)), { 1,1,1 }) - obj.r * baba;
float y = abs(paba - baba * 0.5) - baba * 0.5;
float x2 = x * x;
float y2 = y * y * baba;
float d = (max(x, y) < 0.0) ? -min(x2, y2) : (((x > 0.0) ? x2 : 0.0) + ((y > 0.0) ? y2 : 0.0));
float t = 0;
if (d > 0)
{
t = 1;
}
if (d < 0)
{
t = -1;
}
return t * sqrt(abs(d)) / baba;
}
extern "C"
__device__ double cube(Object obj, Vector p) {
Vector d = sub({ abs(p.x - obj.position.x), abs(p.y - obj.position.y), abs(p.z - obj.position.z) }, { obj.size.x, obj.size.y, obj.size.z});
double insideDistance = min(max(d.x, max(d.y, d.z)), (double)0);
if (d.x < 0.0)
d.x = 0.0;
if (d.y < 0.0)
d.y = 0.0;
if (d.z < 0.0)
d.z = 0.0;
double outsideDistance = sqrt(d.x * d.x + d.y * d.y + d.z * d.z);
return insideDistance + outsideDistance;
}
__device__ double TriPrism(Vector p, Object obj)
{
Vector q = { abs(p.x), abs(p.y), abs(p.z) };
return max(q.z - obj.position.y, max(q.x * 0.866025 + p.y * 0.5, -p.y) - obj.position.x * 0.5);
}
extern "C"
__device__ double distanceByType(Object obj, Vector p) {
switch (obj.Type) {
case 1:
return distance(p, obj.position) - obj.r;
break;
case 2:
return distance(p, obj.position) - obj.r;
break;
case 3:
return -(distance(p, obj.position) - obj.r);
break;
case 4:
return cube(obj, p);
break;
case 5:
return cube(obj, p);
break;
case 6:
return -cube(obj, p);
break;
case 7:
return cylinder(p, obj);
break;
case 8:
return cylinder(p, obj);
break;
case 9:
return -cylinder(p, obj);
break;
case 10:
return TriPrism(p, obj);
break;
case 11:
return TriPrism(p, obj);
break;
case 12:
return -TriPrism(p, obj);
break;
default:
break;
}
}
extern "C"
__device__ DistanceDate getDist(Vector p) {
union Data* data;
data = objects;
DistanceDate value;
value.distanceValue = distanceByType(data->obj, p);
value.r = data->obj.R;
value.g = data->obj.G;
value.b = data->obj.B;
double dist = value.distanceValue;
for (int i = 1; i < n; i++)
{
data = (Data*)((uintptr_t)data + sizeof(Data));
dist = distanceByType(data->obj, p);
if (value.distanceValue > dist && (data->obj.Type % 3 == 1 || data->obj.Type == 0)) {
value.distanceValue = dist;
value.r = data->obj.R;
value.g = data->obj.G;
value.b = data->obj.B;
}
if (value.distanceValue <= dist && data->obj.Type % 3 != 1) {
value.distanceValue = dist;
value.r = data->obj.R;
value.g = data->obj.G;
value.b = data->obj.B;
}
}
return value;
}
extern "C"
__device__ Vector getNormal(Vector p)
{
double d = getDist(p).distanceValue;
double p1 = getDist(sub(p, { 0.001, 0, 0 })).distanceValue;
double p2 = getDist(sub(p, { 0, 0.001, 0 })).distanceValue;
double p3 = getDist(sub(p, { 0, 0, 0.001 })).distanceValue;
Vector tri = { p1, p2, p3 };
Vector di = { d, d, d };
Vector n = sub(di, tri);
return normalize(n);
}
extern "C"
__device__ double light(Vector p) {
Vector lightPos = { -5, 5, -10 };
Vector lightDir = normalize(sub(lightPos, p));
Vector normal = getNormal(p);
double lI = lightIntensity;
double lS = lightSize;
double dif = dot(normal, lightDir) * (1 - lS) * lI + lS * lI;
return dif;
}
extern "C"
__device__ DistanceDate rayMarching(Vector ro, Vector rd) {
Vector p = { ro.x, ro.y, ro.z };
for (int i = 0; i < 300; i++) {
DistanceDate d = getDist(p);
if (d.distanceValue > 100)
break;
p = sum(p, mul(rd, d.distanceValue));
if (d.distanceValue < 0.001) {
return { light(p), d.r, d.g, d.b };
}
}
return { 0, 0, 0, 0};
}
extern "C"
__global__ void draw(unsigned char* data, Data* input, int on, int widht, int height, double camX, double camY, double camZ, float vA, float hA, float lI, float lS, int pn, int pc) {
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = (height/pc) * pn + blockIdx.y * blockDim.y + threadIdx.y;
if (x >= widht || y >= height)
return;
objects = input;
n = on;
double qx = ((double)x / widht) * 2 - 1;
double qy = ((double)y / height) * 2 - 1;
lightIntensity = lI;
lightSize = lS;
qx = qx * ((double)widht / (double)height);
Vector rd = { 1, qx, qy };
rd = normalize(rd);
double s = sin(vA);
double c = cos(vA);
rd = { c * rd.x + (-s) * rd.z, rd.y, s * rd.x + c * rd.z };
s = sin(hA);
c = cos(hA);
rd = { c * rd.x + (-s) * rd.y, s * rd.x + c * rd.y, rd.z };
DistanceDate col = rayMarching({camX, camY, camZ}, rd);
if (col.distanceValue != 0) {
data[widht * 3 * y + 3 * x ] = (unsigned char)((double)col.r * col.distanceValue);
data[widht * 3 * y + 3 * x + 1] = (unsigned char)((double)col.g * col.distanceValue);
data[widht * 3 * y + 3 * x + 2] = (unsigned char)((double)col.b * col.distanceValue);
}
else
{
data[widht * 3 * y + 3 * x] = (unsigned char)((double)117 * lI);
data[widht * 3 * y + 3 * x + 1] = (unsigned char)((double)187 * lI);
data[widht * 3 * y + 3 * x + 2] = (unsigned char)((double)253 * lI);
}
} | 1d3c68ad785d17d6d54518ed416b8f75e92acb8c.cu | #include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <malloc.h>
struct Vector
{
double x;
double y;
double z;
};
struct Object
{
unsigned char Type;
unsigned char R;
unsigned char G;
unsigned char B;
unsigned char atr1;
unsigned char atr2;
unsigned char atr3;
unsigned char atr4;
struct Vector position;
struct Vector size;
double r;
};
union Data {
struct Object obj;
unsigned char bytes[64];
};
struct DistanceDate {
double distanceValue;
unsigned char r, g, b;
};
__shared__ double lightIntensity, lightSize;
__shared__ Data* objects;
__shared__ int n;
extern "C"
__device__ Vector mul(Vector a, double value) {
Vector v = { a.x * value, a.y * value, a.z * value };
return v;
}
extern "C"
__device__ Vector sum(Vector a, Vector b) {
Vector v = { a.x + b.x, a.y + b.y, a.z + b.z };
return v;
}
extern "C"
__device__ Vector sub(Vector a, Vector b) {
Vector v = { a.x - b.x, a.y - b.y, a.z - b.z };
return v;
}
extern "C"
__device__ double dot(Vector a, Vector b) {
return a.x * b.x + a.y * b.y + a.z * b.z;
}
extern "C"
__device__ double distance(Vector a, Vector b) {
return sqrt((a.x - b.x) * (a.x - b.x) + (a.y - b.y) * (a.y - b.y) + (a.z - b.z) * (a.z - b.z));
}
extern "C"
__device__ Vector normalize(Vector a) {
Vector n = { 0, 0, 0 };
double l = distance(a, n);
Vector v = { a.x / l, a.y / l, a.z / l };
return v;
}
extern "C"
__device__ double smin(double a, double b, double k)
{
double m;
if ((0.5 + 0.5 * (b - a) / k) > 1)
m = 1;
else
m = (0.5 + 0.5 * (b - a) / k);
double h;
if (m > 0)
h = (0.5 + 0.5 * (b - a) / k);
else
h = 0;
return a * h + b * (1 - h) - k * h * (1.0 - h);
}
extern "C"
__device__ double cylinder(Vector p, Object obj)
{
Vector pa = sub(p, obj.position);
Vector ba = sub(obj.size, obj.position);
double baba = dot(ba, ba);
double paba = dot(pa, ba);
float x = distance(sub(mul(pa, baba), mul(ba, paba)), { 1,1,1 }) - obj.r * baba;
float y = abs(paba - baba * 0.5) - baba * 0.5;
float x2 = x * x;
float y2 = y * y * baba;
float d = (max(x, y) < 0.0) ? -min(x2, y2) : (((x > 0.0) ? x2 : 0.0) + ((y > 0.0) ? y2 : 0.0));
float t = 0;
if (d > 0)
{
t = 1;
}
if (d < 0)
{
t = -1;
}
return t * sqrt(abs(d)) / baba;
}
extern "C"
__device__ double cube(Object obj, Vector p) {
Vector d = sub({ abs(p.x - obj.position.x), abs(p.y - obj.position.y), abs(p.z - obj.position.z) }, { obj.size.x, obj.size.y, obj.size.z});
double insideDistance = min(max(d.x, max(d.y, d.z)), (double)0);
if (d.x < 0.0)
d.x = 0.0;
if (d.y < 0.0)
d.y = 0.0;
if (d.z < 0.0)
d.z = 0.0;
double outsideDistance = sqrt(d.x * d.x + d.y * d.y + d.z * d.z);
return insideDistance + outsideDistance;
}
__device__ double TriPrism(Vector p, Object obj)
{
Vector q = { abs(p.x), abs(p.y), abs(p.z) };
return max(q.z - obj.position.y, max(q.x * 0.866025 + p.y * 0.5, -p.y) - obj.position.x * 0.5);
}
extern "C"
__device__ double distanceByType(Object obj, Vector p) {
switch (obj.Type) {
case 1:
return distance(p, obj.position) - obj.r;
break;
case 2:
return distance(p, obj.position) - obj.r;
break;
case 3:
return -(distance(p, obj.position) - obj.r);
break;
case 4:
return cube(obj, p);
break;
case 5:
return cube(obj, p);
break;
case 6:
return -cube(obj, p);
break;
case 7:
return cylinder(p, obj);
break;
case 8:
return cylinder(p, obj);
break;
case 9:
return -cylinder(p, obj);
break;
case 10:
return TriPrism(p, obj);
break;
case 11:
return TriPrism(p, obj);
break;
case 12:
return -TriPrism(p, obj);
break;
default:
break;
}
}
extern "C"
__device__ DistanceDate getDist(Vector p) {
union Data* data;
data = objects;
DistanceDate value;
value.distanceValue = distanceByType(data->obj, p);
value.r = data->obj.R;
value.g = data->obj.G;
value.b = data->obj.B;
double dist = value.distanceValue;
for (int i = 1; i < n; i++)
{
data = (Data*)((uintptr_t)data + sizeof(Data));
dist = distanceByType(data->obj, p);
if (value.distanceValue > dist && (data->obj.Type % 3 == 1 || data->obj.Type == 0)) {
value.distanceValue = dist;
value.r = data->obj.R;
value.g = data->obj.G;
value.b = data->obj.B;
}
if (value.distanceValue <= dist && data->obj.Type % 3 != 1) {
value.distanceValue = dist;
value.r = data->obj.R;
value.g = data->obj.G;
value.b = data->obj.B;
}
}
return value;
}
extern "C"
__device__ Vector getNormal(Vector p)
{
double d = getDist(p).distanceValue;
double p1 = getDist(sub(p, { 0.001, 0, 0 })).distanceValue;
double p2 = getDist(sub(p, { 0, 0.001, 0 })).distanceValue;
double p3 = getDist(sub(p, { 0, 0, 0.001 })).distanceValue;
Vector tri = { p1, p2, p3 };
Vector di = { d, d, d };
Vector n = sub(di, tri);
return normalize(n);
}
extern "C"
__device__ double light(Vector p) {
Vector lightPos = { -5, 5, -10 };
Vector lightDir = normalize(sub(lightPos, p));
Vector normal = getNormal(p);
double lI = lightIntensity;
double lS = lightSize;
double dif = dot(normal, lightDir) * (1 - lS) * lI + lS * lI;
return dif;
}
extern "C"
__device__ DistanceDate rayMarching(Vector ro, Vector rd) {
Vector p = { ro.x, ro.y, ro.z };
for (int i = 0; i < 300; i++) {
DistanceDate d = getDist(p);
if (d.distanceValue > 100)
break;
p = sum(p, mul(rd, d.distanceValue));
if (d.distanceValue < 0.001) {
return { light(p), d.r, d.g, d.b };
}
}
return { 0, 0, 0, 0};
}
extern "C"
__global__ void draw(unsigned char* data, Data* input, int on, int widht, int height, double camX, double camY, double camZ, float vA, float hA, float lI, float lS, int pn, int pc) {
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = (height/pc) * pn + blockIdx.y * blockDim.y + threadIdx.y;
if (x >= widht || y >= height)
return;
objects = input;
n = on;
double qx = ((double)x / widht) * 2 - 1;
double qy = ((double)y / height) * 2 - 1;
lightIntensity = lI;
lightSize = lS;
qx = qx * ((double)widht / (double)height);
Vector rd = { 1, qx, qy };
rd = normalize(rd);
double s = sin(vA);
double c = cos(vA);
rd = { c * rd.x + (-s) * rd.z, rd.y, s * rd.x + c * rd.z };
s = sin(hA);
c = cos(hA);
rd = { c * rd.x + (-s) * rd.y, s * rd.x + c * rd.y, rd.z };
DistanceDate col = rayMarching({camX, camY, camZ}, rd);
if (col.distanceValue != 0) {
data[widht * 3 * y + 3 * x ] = (unsigned char)((double)col.r * col.distanceValue);
data[widht * 3 * y + 3 * x + 1] = (unsigned char)((double)col.g * col.distanceValue);
data[widht * 3 * y + 3 * x + 2] = (unsigned char)((double)col.b * col.distanceValue);
}
else
{
data[widht * 3 * y + 3 * x] = (unsigned char)((double)117 * lI);
data[widht * 3 * y + 3 * x + 1] = (unsigned char)((double)187 * lI);
data[widht * 3 * y + 3 * x + 2] = (unsigned char)((double)253 * lI);
}
} |
87d6aa4214be56e95330844a72f102ba34903033.hip | // !!! This is a file automatically generated by hipify!!!
#include <needle.h>
#include <stdio.h>
#include <hip/hip_runtime.h>
#if defined (__CUDA_ARCH__) && (__CUDA_ARCH__ < 200)
#define printf(f, ...) ((void)(f, __VA_ARGS__),0)
#endif
__device__ void dia_upperleft(char *s_seq1, unsigned int seq1_len,
char *s_seq2, unsigned int seq2_len,
short *matrix, unsigned int dia_len,
short *s_dia1, short *s_dia2, short *s_dia3,
int penalty)
{
int tid = threadIdx.x;
int stripe = blockDim.x;
int index_x;
int index_y;
int iteration;
// process the left-up triangle
s_dia1[0] = matrix[0] = 0;
s_dia2[0] = matrix[1] = penalty * 1;
s_dia2[1] = matrix[1*(seq1_len+1)] = penalty * 1;
for (int i=2; i<=seq2_len; ++i){ // ith diagonal line
iteration = (i+1)/blockDim.x;
if ( (i+1)%blockDim.x != 0 ) iteration++;
if (i%3==2) {
for (int j=0; j<iteration; ++j) {
if ( tid+stripe*j<=i ) { // ith diagonal has i+1 elements
index_x = i-(tid+stripe*j); index_y = tid+stripe*j;
if ( index_y==0 || index_y==i ) s_dia3[ index_y ] = penalty * i;
else {
s_dia3[ index_y ] = \
maximum(s_dia2[ index_y ] + penalty, // up
s_dia2[ index_y-1 ] + penalty, // left
s_dia1[ index_y-1 ]+blosum62[ s_seq2[index_x] ][ s_seq1[index_y] ] );
}
// store to global memory
matrix[ index_x*(seq1_len+1)+index_y ] = s_dia3[ index_y ];
}
}
}
else if (i%3==0) {
for (int j=0; j<iteration; ++j) {
if ( tid+stripe*j<=i ) {
index_x = i-(tid+stripe*j); index_y = tid+stripe*j;
if ( index_y==0 || index_y==i ) s_dia1[ index_y ] = penalty * i;
else {
s_dia1[ tid+stripe*j ] = \
maximum(s_dia3[ index_y ] + penalty, // up
s_dia3[ index_y-1 ] + penalty, // left
s_dia2[ index_y-1 ]+blosum62[ s_seq2[index_x] ][ s_seq1[index_y] ] );
}
matrix[ index_x*(seq1_len+1)+index_y ] = s_dia1[ index_y ];
}
}
}
else { //i%3==1
for (int j=0; j<iteration; ++j) {
index_x = i-(tid+stripe*j); index_y = tid+stripe*j;
if ( tid+stripe*j<=i ) {
if ( (tid+stripe*j)==0 || (tid+stripe*j)==i ) s_dia2[ tid+stripe*j ] = penalty * i;
else {
s_dia2[ tid+stripe*j ] = \
maximum(s_dia1[ tid+stripe*j ] + penalty, // up
s_dia1[ tid+stripe*j-1 ] + penalty, // left
s_dia3[ tid+stripe*j-1 ]+blosum62[ s_seq2[index_x] ][ s_seq1[index_y] ] );
}
matrix[ index_x*(seq1_len+1)+index_y ] = s_dia2[ index_y ];
}
}
}
__syncthreads();
}
}
__device__ void dia_lowerright( char *s_seq1, unsigned int len1,
char *s_seq2, unsigned int len2,
short *matrix, unsigned int dia_len,
short *s_dia1, short *s_dia2, short *s_dia3,
unsigned int start, int penalty)
{
int tid = threadIdx.x;
int stripe = blockDim.x;
int index_x, index_y;
int iteration = dia_len/blockDim.x;
if ( dia_len%blockDim.x!=0 ) iteration++;
// initial, load from shared memory
for (int i=0; i<iteration; ++i) {
if ( tid+stripe*i<dia_len ) {
index_x = len2 - (tid+stripe*i); index_y = start-1 + (tid+stripe*i);
s_dia1[ tid+stripe*i ] = matrix[ index_x*(len1+1)+index_y ];
}
}
s_dia1[ dia_len ] = matrix[ (len2-dia_len)*(len1+1)+start-1 + dia_len ];
__syncthreads();
for (int i=0; i<iteration; ++i) {
if ( tid+stripe*i<dia_len ) {
index_x = len2 - (tid+stripe*i); index_y = start + (tid+stripe*i);
s_dia2[ tid+stripe*i ] = \
maximum(s_dia1[ tid+stripe*i+1 ] + penalty, // up
s_dia1[ tid+stripe*i ] + penalty, // left
matrix[(index_x-1)*(len1+1)+index_y-1]+blosum62[s_seq2[index_x]][s_seq1[index_y]] );
matrix[ index_x*(len1+1)+index_y ] = s_dia2[ tid+stripe*i ];
}
}
__syncthreads();
for (int i=1; i<dia_len; ++i){ // ith diagonal line
iteration = (dia_len-i)/blockDim.x;
if ( (dia_len-i)%blockDim.x != 0 ) iteration++;
if (i%3==1) {
for (int j=0; j<iteration; ++j) {
index_x = len2 - (tid+stripe*j);
index_y = start + i + (tid+stripe*j);
if ( tid+stripe*j +i <dia_len ) {
s_dia3[ tid+stripe*j ] = \
maximum(s_dia2[ tid+stripe*j+1 ] + penalty, // up
s_dia2[ tid+stripe*j ] + penalty, // left
s_dia1[ tid+stripe*j+1 ]+blosum62[ s_seq2[index_x] ][ s_seq1[index_y] ] );
// store to global memory
matrix[ index_x*(len1+1)+index_y ] = s_dia3[ tid+stripe*j ];
}
}
}
else if (i%3==2) {
for (int j=0; j<iteration; ++j) {
index_x = len2 - (tid+stripe*j);
index_y = start + i + (tid+stripe*j);
if ( tid+stripe*j +i <dia_len ) {
s_dia1[ tid+stripe*j ] = \
maximum(s_dia3[ tid+stripe*j+1 ] + penalty, // up
s_dia3[ tid+stripe*j ] + penalty, // left
s_dia2[ tid+stripe*j+1 ]+blosum62[ s_seq2[index_x] ][ s_seq1[index_y] ] );
// store to global memory
matrix[ index_x*(len1+1)+index_y ] = s_dia1[ tid+stripe*j ];
}
}
}
else { // i%3==0
for (int j=0; j<iteration; ++j) {
index_x = len2 - (tid+stripe*j);
index_y = start + i + (tid+stripe*j);
if ( tid+stripe*j +i <dia_len ) {
s_dia2[ tid+stripe*j ] = \
maximum(s_dia1[ tid+stripe*j+1 ] + penalty, // up
s_dia1[ tid+stripe*j ] + penalty, // left
s_dia3[ tid+stripe*j+1 ]+blosum62[ s_seq2[index_x] ][ s_seq1[index_y] ] );
// store to global memory
matrix[ index_x*(len1+1)+index_y ] = s_dia2[ tid+stripe*j ];
}
}
}
__syncthreads();
}
}
/*******************************************************************************
pos1 and pos2 are the array store the position
********************************************************************************/
__global__ void needleman_cuda_diagonal(char *sequence_set1, char *sequence_set2,
unsigned int *pos1, unsigned int *pos2,
short *score_matrix, unsigned int *pos_matrix,
unsigned int max_pair_no, short penalty)
{
int pair_no, seq1_len, seq2_len;
int tid = threadIdx.x;
// 48 KB/4 = 12KB, seq1+sqe2, diagonal1, diagonal2, diagonal3
__shared__ char s_seq1[MAX_SEQ_LEN];
__shared__ char s_seq2[MAX_SEQ_LEN];
__shared__ short s_dia1[MAX_SEQ_LEN];
__shared__ short s_dia2[MAX_SEQ_LEN];
__shared__ short s_dia3[MAX_SEQ_LEN];
pair_no = blockIdx.x; // for each block, caculate one pair
char *seq1 = sequence_set1 + pos1[pair_no];
char *seq2 = sequence_set2 + pos2[pair_no];
short *matrix = score_matrix+pos_matrix[pair_no];
seq1_len = pos1[pair_no+1] - pos1[pair_no];
seq2_len = pos2[pair_no+1] - pos2[pair_no];
// load the two sequences
unsigned int stride_length = blockDim.x;
for (int i=0; i<seq1_len/stride_length+1; ++i){
if ( tid+i*stride_length<seq1_len )
s_seq1[tid+i*stride_length+1] = seq1[tid+i*stride_length];
}
for (int i=0; i<seq2_len/stride_length+1; ++i){
if ( tid+i*stride_length<seq2_len )
s_seq2[tid+i*stride_length+1] = seq2[tid+i*stride_length];
}
__syncthreads();
/*dia_upperleft( s_seq1, seq1_len, s_seq2, seq2_len, matrix, seq2_len,
s_dia1, s_dia2, s_dia3, penalty);*/
/*dia_lowerright( s_seq1, seq1_len, s_seq2, seq2_len, matrix, seq2_len,
s_dia1, s_dia2, s_dia3, 1, penalty);*/
int stripe = blockDim.x;
int index_x;
int index_y;
int iteration;
// process the left-up triangle
s_dia1[0] = matrix[0] = 0;
s_dia2[0] = matrix[1] = penalty * 1;
s_dia2[1] = matrix[1*(seq1_len+1)] = penalty * 1;
for (int i=2; i<=seq2_len; ++i){ // ith diagonal line
iteration = (i+1)/blockDim.x;
if ( (i+1)%blockDim.x != 0 ) iteration++;
if (i%3==2) {
for (int j=0; j<iteration; ++j) {
if ( tid+stripe*j<=i ) { // ith diagonal has i+1 elements
index_x = i-(tid+stripe*j); index_y = tid+stripe*j;
if ( index_y==0 || index_y==i ) s_dia3[ index_y ] = penalty * i;
else {
s_dia3[ index_y ] = \
maximum(s_dia2[ index_y ] + penalty, // up
s_dia2[ index_y-1 ] + penalty, // left
s_dia1[ index_y-1 ]+blosum62[ s_seq2[index_x] ][ s_seq1[index_y] ] );
}
// store to global memory
matrix[ index_x*(seq1_len+1)+index_y ] = s_dia3[ index_y ];
}
}
}
else if (i%3==0) {
for (int j=0; j<iteration; ++j) {
if ( tid+stripe*j<=i ) {
index_x = i-(tid+stripe*j); index_y = tid+stripe*j;
if ( index_y==0 || index_y==i ) s_dia1[ index_y ] = penalty * i;
else {
s_dia1[ tid+stripe*j ] = \
maximum(s_dia3[ index_y ] + penalty, // up
s_dia3[ index_y-1 ] + penalty, // left
s_dia2[ index_y-1 ]+blosum62[ s_seq2[index_x] ][ s_seq1[index_y] ] );
}
matrix[ index_x*(seq1_len+1)+index_y ] = s_dia1[ index_y ];
}
}
}
else { //i%3==1
for (int j=0; j<iteration; ++j) {
index_x = i-(tid+stripe*j); index_y = tid+stripe*j;
if ( tid+stripe*j<=i ) {
if ( (tid+stripe*j)==0 || (tid+stripe*j)==i ) s_dia2[ tid+stripe*j ] = penalty * i;
else {
s_dia2[ tid+stripe*j ] = \
maximum(s_dia1[ tid+stripe*j ] + penalty, // up
s_dia1[ tid+stripe*j-1 ] + penalty, // left
s_dia3[ tid+stripe*j-1 ]+blosum62[ s_seq2[index_x] ][ s_seq1[index_y] ] );
}
matrix[ index_x*(seq1_len+1)+index_y ] = s_dia2[ index_y ];
}
}
}
__syncthreads();
}
//int tid = threadIdx.x;
stripe = blockDim.x;
//int index_x, index_y;
iteration = (seq1_len+1)/blockDim.x;
if ( (seq1_len+1)%blockDim.x!=0 ) iteration++;
// initial, load from shared memory
for (int i=0; i<iteration; ++i) {
if ( tid+stripe*i<seq1_len+1 ) {
index_x = seq2_len - (tid+stripe*i); index_y = (tid+stripe*i);
s_dia1[ tid+stripe*i ] = matrix[ index_x*(seq1_len+1)+index_y ];
}
}
__syncthreads();
// calculate the 1th diagonal
for (int i=0; i<iteration; ++i) {
if ( tid+stripe*i<seq1_len ) {
index_x = seq2_len - (tid+stripe*i); index_y = 1 + (tid+stripe*i);
s_dia2[ tid+stripe*i ] = \
maximum(s_dia1[ tid+stripe*i+1 ] + penalty, // up
s_dia1[ tid+stripe*i ] + penalty, // left
matrix[(index_x-1)*(seq1_len+1)+index_y-1]+blosum62[s_seq2[index_x]][s_seq1[index_y]] );
matrix[ index_x*(seq1_len+1)+index_y ] = s_dia2[ tid+stripe*i ];
}
}
__syncthreads();
for (int i=2; i<=seq1_len; ++i){ // ith diagonal line, start from 2
iteration = (seq1_len-i+1)/blockDim.x;
if ( (seq1_len-i+1)%blockDim.x != 0 ) iteration++;
if (i%3==2) {
for (int j=0; j<iteration; ++j) {
index_x = seq2_len - (tid+stripe*j);
index_y = i + (tid+stripe*j);
if ( tid+stripe*j +i <seq1_len+1 ) {
s_dia3[ tid+stripe*j ] = \
maximum(s_dia2[ tid+stripe*j+1 ] + penalty, // up
s_dia2[ tid+stripe*j ] + penalty, // left
s_dia1[ tid+stripe*j+1 ]+blosum62[ s_seq2[index_x] ][ s_seq1[index_y] ] );
// store to global memory
matrix[ index_x*(seq1_len+1)+index_y ] = s_dia3[ tid+stripe*j ];
}
}
}
else if (i%3==0) {
for (int j=0; j<iteration; ++j) {
index_x = seq2_len - (tid+stripe*j);
index_y = i + (tid+stripe*j);
if ( tid+stripe*j +i <seq1_len+1 ) {
s_dia1[ tid+stripe*j ] = \
maximum(s_dia3[ tid+stripe*j+1 ] + penalty, // up
s_dia3[ tid+stripe*j ] + penalty, // left
s_dia2[ tid+stripe*j+1 ]+blosum62[ s_seq2[index_x] ][ s_seq1[index_y] ] );
// store to global memory
matrix[ index_x*(seq1_len+1)+index_y ] = s_dia1[ tid+stripe*j ];
}
}
}
else { // i%3==1
for (int j=0; j<iteration; ++j) {
index_x = seq2_len - (tid+stripe*j);
index_y = i + (tid+stripe*j);
if ( tid+stripe*j +i <seq1_len+1 ) {
s_dia2[ tid+stripe*j ] = \
maximum(s_dia1[ tid+stripe*j+1 ] + penalty, // up
s_dia1[ tid+stripe*j ] + penalty, // left
s_dia3[ tid+stripe*j+1 ]+blosum62[ s_seq2[index_x] ][ s_seq1[index_y] ] );
// store to global memory
matrix[ index_x*(seq1_len+1)+index_y ] = s_dia2[ tid+stripe*j ];
}
}
}
__syncthreads();
}
}
| 87d6aa4214be56e95330844a72f102ba34903033.cu | #include <needle.h>
#include <stdio.h>
#include <cuda.h>
#if defined (__CUDA_ARCH__) && (__CUDA_ARCH__ < 200)
#define printf(f, ...) ((void)(f, __VA_ARGS__),0)
#endif
__device__ void dia_upperleft(char *s_seq1, unsigned int seq1_len,
char *s_seq2, unsigned int seq2_len,
short *matrix, unsigned int dia_len,
short *s_dia1, short *s_dia2, short *s_dia3,
int penalty)
{
int tid = threadIdx.x;
int stripe = blockDim.x;
int index_x;
int index_y;
int iteration;
// process the left-up triangle
s_dia1[0] = matrix[0] = 0;
s_dia2[0] = matrix[1] = penalty * 1;
s_dia2[1] = matrix[1*(seq1_len+1)] = penalty * 1;
for (int i=2; i<=seq2_len; ++i){ // ith diagonal line
iteration = (i+1)/blockDim.x;
if ( (i+1)%blockDim.x != 0 ) iteration++;
if (i%3==2) {
for (int j=0; j<iteration; ++j) {
if ( tid+stripe*j<=i ) { // ith diagonal has i+1 elements
index_x = i-(tid+stripe*j); index_y = tid+stripe*j;
if ( index_y==0 || index_y==i ) s_dia3[ index_y ] = penalty * i;
else {
s_dia3[ index_y ] = \
maximum(s_dia2[ index_y ] + penalty, // up
s_dia2[ index_y-1 ] + penalty, // left
s_dia1[ index_y-1 ]+blosum62[ s_seq2[index_x] ][ s_seq1[index_y] ] );
}
// store to global memory
matrix[ index_x*(seq1_len+1)+index_y ] = s_dia3[ index_y ];
}
}
}
else if (i%3==0) {
for (int j=0; j<iteration; ++j) {
if ( tid+stripe*j<=i ) {
index_x = i-(tid+stripe*j); index_y = tid+stripe*j;
if ( index_y==0 || index_y==i ) s_dia1[ index_y ] = penalty * i;
else {
s_dia1[ tid+stripe*j ] = \
maximum(s_dia3[ index_y ] + penalty, // up
s_dia3[ index_y-1 ] + penalty, // left
s_dia2[ index_y-1 ]+blosum62[ s_seq2[index_x] ][ s_seq1[index_y] ] );
}
matrix[ index_x*(seq1_len+1)+index_y ] = s_dia1[ index_y ];
}
}
}
else { //i%3==1
for (int j=0; j<iteration; ++j) {
index_x = i-(tid+stripe*j); index_y = tid+stripe*j;
if ( tid+stripe*j<=i ) {
if ( (tid+stripe*j)==0 || (tid+stripe*j)==i ) s_dia2[ tid+stripe*j ] = penalty * i;
else {
s_dia2[ tid+stripe*j ] = \
maximum(s_dia1[ tid+stripe*j ] + penalty, // up
s_dia1[ tid+stripe*j-1 ] + penalty, // left
s_dia3[ tid+stripe*j-1 ]+blosum62[ s_seq2[index_x] ][ s_seq1[index_y] ] );
}
matrix[ index_x*(seq1_len+1)+index_y ] = s_dia2[ index_y ];
}
}
}
__syncthreads();
}
}
__device__ void dia_lowerright( char *s_seq1, unsigned int len1,
char *s_seq2, unsigned int len2,
short *matrix, unsigned int dia_len,
short *s_dia1, short *s_dia2, short *s_dia3,
unsigned int start, int penalty)
{
int tid = threadIdx.x;
int stripe = blockDim.x;
int index_x, index_y;
int iteration = dia_len/blockDim.x;
if ( dia_len%blockDim.x!=0 ) iteration++;
// initial, load from shared memory
for (int i=0; i<iteration; ++i) {
if ( tid+stripe*i<dia_len ) {
index_x = len2 - (tid+stripe*i); index_y = start-1 + (tid+stripe*i);
s_dia1[ tid+stripe*i ] = matrix[ index_x*(len1+1)+index_y ];
}
}
s_dia1[ dia_len ] = matrix[ (len2-dia_len)*(len1+1)+start-1 + dia_len ];
__syncthreads();
for (int i=0; i<iteration; ++i) {
if ( tid+stripe*i<dia_len ) {
index_x = len2 - (tid+stripe*i); index_y = start + (tid+stripe*i);
s_dia2[ tid+stripe*i ] = \
maximum(s_dia1[ tid+stripe*i+1 ] + penalty, // up
s_dia1[ tid+stripe*i ] + penalty, // left
matrix[(index_x-1)*(len1+1)+index_y-1]+blosum62[s_seq2[index_x]][s_seq1[index_y]] );
matrix[ index_x*(len1+1)+index_y ] = s_dia2[ tid+stripe*i ];
}
}
__syncthreads();
for (int i=1; i<dia_len; ++i){ // ith diagonal line
iteration = (dia_len-i)/blockDim.x;
if ( (dia_len-i)%blockDim.x != 0 ) iteration++;
if (i%3==1) {
for (int j=0; j<iteration; ++j) {
index_x = len2 - (tid+stripe*j);
index_y = start + i + (tid+stripe*j);
if ( tid+stripe*j +i <dia_len ) {
s_dia3[ tid+stripe*j ] = \
maximum(s_dia2[ tid+stripe*j+1 ] + penalty, // up
s_dia2[ tid+stripe*j ] + penalty, // left
s_dia1[ tid+stripe*j+1 ]+blosum62[ s_seq2[index_x] ][ s_seq1[index_y] ] );
// store to global memory
matrix[ index_x*(len1+1)+index_y ] = s_dia3[ tid+stripe*j ];
}
}
}
else if (i%3==2) {
for (int j=0; j<iteration; ++j) {
index_x = len2 - (tid+stripe*j);
index_y = start + i + (tid+stripe*j);
if ( tid+stripe*j +i <dia_len ) {
s_dia1[ tid+stripe*j ] = \
maximum(s_dia3[ tid+stripe*j+1 ] + penalty, // up
s_dia3[ tid+stripe*j ] + penalty, // left
s_dia2[ tid+stripe*j+1 ]+blosum62[ s_seq2[index_x] ][ s_seq1[index_y] ] );
// store to global memory
matrix[ index_x*(len1+1)+index_y ] = s_dia1[ tid+stripe*j ];
}
}
}
else { // i%3==0
for (int j=0; j<iteration; ++j) {
index_x = len2 - (tid+stripe*j);
index_y = start + i + (tid+stripe*j);
if ( tid+stripe*j +i <dia_len ) {
s_dia2[ tid+stripe*j ] = \
maximum(s_dia1[ tid+stripe*j+1 ] + penalty, // up
s_dia1[ tid+stripe*j ] + penalty, // left
s_dia3[ tid+stripe*j+1 ]+blosum62[ s_seq2[index_x] ][ s_seq1[index_y] ] );
// store to global memory
matrix[ index_x*(len1+1)+index_y ] = s_dia2[ tid+stripe*j ];
}
}
}
__syncthreads();
}
}
/*******************************************************************************
pos1 and pos2 are the array store the position
********************************************************************************/
__global__ void needleman_cuda_diagonal(char *sequence_set1, char *sequence_set2,
unsigned int *pos1, unsigned int *pos2,
short *score_matrix, unsigned int *pos_matrix,
unsigned int max_pair_no, short penalty)
{
int pair_no, seq1_len, seq2_len;
int tid = threadIdx.x;
// 48 KB/4 = 12KB, seq1+sqe2, diagonal1, diagonal2, diagonal3
__shared__ char s_seq1[MAX_SEQ_LEN];
__shared__ char s_seq2[MAX_SEQ_LEN];
__shared__ short s_dia1[MAX_SEQ_LEN];
__shared__ short s_dia2[MAX_SEQ_LEN];
__shared__ short s_dia3[MAX_SEQ_LEN];
pair_no = blockIdx.x; // for each block, caculate one pair
char *seq1 = sequence_set1 + pos1[pair_no];
char *seq2 = sequence_set2 + pos2[pair_no];
short *matrix = score_matrix+pos_matrix[pair_no];
seq1_len = pos1[pair_no+1] - pos1[pair_no];
seq2_len = pos2[pair_no+1] - pos2[pair_no];
// load the two sequences
unsigned int stride_length = blockDim.x;
for (int i=0; i<seq1_len/stride_length+1; ++i){
if ( tid+i*stride_length<seq1_len )
s_seq1[tid+i*stride_length+1] = seq1[tid+i*stride_length];
}
for (int i=0; i<seq2_len/stride_length+1; ++i){
if ( tid+i*stride_length<seq2_len )
s_seq2[tid+i*stride_length+1] = seq2[tid+i*stride_length];
}
__syncthreads();
/*dia_upperleft( s_seq1, seq1_len, s_seq2, seq2_len, matrix, seq2_len,
s_dia1, s_dia2, s_dia3, penalty);*/
/*dia_lowerright( s_seq1, seq1_len, s_seq2, seq2_len, matrix, seq2_len,
s_dia1, s_dia2, s_dia3, 1, penalty);*/
int stripe = blockDim.x;
int index_x;
int index_y;
int iteration;
// process the left-up triangle
s_dia1[0] = matrix[0] = 0;
s_dia2[0] = matrix[1] = penalty * 1;
s_dia2[1] = matrix[1*(seq1_len+1)] = penalty * 1;
for (int i=2; i<=seq2_len; ++i){ // ith diagonal line
iteration = (i+1)/blockDim.x;
if ( (i+1)%blockDim.x != 0 ) iteration++;
if (i%3==2) {
for (int j=0; j<iteration; ++j) {
if ( tid+stripe*j<=i ) { // ith diagonal has i+1 elements
index_x = i-(tid+stripe*j); index_y = tid+stripe*j;
if ( index_y==0 || index_y==i ) s_dia3[ index_y ] = penalty * i;
else {
s_dia3[ index_y ] = \
maximum(s_dia2[ index_y ] + penalty, // up
s_dia2[ index_y-1 ] + penalty, // left
s_dia1[ index_y-1 ]+blosum62[ s_seq2[index_x] ][ s_seq1[index_y] ] );
}
// store to global memory
matrix[ index_x*(seq1_len+1)+index_y ] = s_dia3[ index_y ];
}
}
}
else if (i%3==0) {
for (int j=0; j<iteration; ++j) {
if ( tid+stripe*j<=i ) {
index_x = i-(tid+stripe*j); index_y = tid+stripe*j;
if ( index_y==0 || index_y==i ) s_dia1[ index_y ] = penalty * i;
else {
s_dia1[ tid+stripe*j ] = \
maximum(s_dia3[ index_y ] + penalty, // up
s_dia3[ index_y-1 ] + penalty, // left
s_dia2[ index_y-1 ]+blosum62[ s_seq2[index_x] ][ s_seq1[index_y] ] );
}
matrix[ index_x*(seq1_len+1)+index_y ] = s_dia1[ index_y ];
}
}
}
else { //i%3==1
for (int j=0; j<iteration; ++j) {
index_x = i-(tid+stripe*j); index_y = tid+stripe*j;
if ( tid+stripe*j<=i ) {
if ( (tid+stripe*j)==0 || (tid+stripe*j)==i ) s_dia2[ tid+stripe*j ] = penalty * i;
else {
s_dia2[ tid+stripe*j ] = \
maximum(s_dia1[ tid+stripe*j ] + penalty, // up
s_dia1[ tid+stripe*j-1 ] + penalty, // left
s_dia3[ tid+stripe*j-1 ]+blosum62[ s_seq2[index_x] ][ s_seq1[index_y] ] );
}
matrix[ index_x*(seq1_len+1)+index_y ] = s_dia2[ index_y ];
}
}
}
__syncthreads();
}
//int tid = threadIdx.x;
stripe = blockDim.x;
//int index_x, index_y;
iteration = (seq1_len+1)/blockDim.x;
if ( (seq1_len+1)%blockDim.x!=0 ) iteration++;
// initial, load from shared memory
for (int i=0; i<iteration; ++i) {
if ( tid+stripe*i<seq1_len+1 ) {
index_x = seq2_len - (tid+stripe*i); index_y = (tid+stripe*i);
s_dia1[ tid+stripe*i ] = matrix[ index_x*(seq1_len+1)+index_y ];
}
}
__syncthreads();
// calculate the 1th diagonal
for (int i=0; i<iteration; ++i) {
if ( tid+stripe*i<seq1_len ) {
index_x = seq2_len - (tid+stripe*i); index_y = 1 + (tid+stripe*i);
s_dia2[ tid+stripe*i ] = \
maximum(s_dia1[ tid+stripe*i+1 ] + penalty, // up
s_dia1[ tid+stripe*i ] + penalty, // left
matrix[(index_x-1)*(seq1_len+1)+index_y-1]+blosum62[s_seq2[index_x]][s_seq1[index_y]] );
matrix[ index_x*(seq1_len+1)+index_y ] = s_dia2[ tid+stripe*i ];
}
}
__syncthreads();
for (int i=2; i<=seq1_len; ++i){ // ith diagonal line, start from 2
iteration = (seq1_len-i+1)/blockDim.x;
if ( (seq1_len-i+1)%blockDim.x != 0 ) iteration++;
if (i%3==2) {
for (int j=0; j<iteration; ++j) {
index_x = seq2_len - (tid+stripe*j);
index_y = i + (tid+stripe*j);
if ( tid+stripe*j +i <seq1_len+1 ) {
s_dia3[ tid+stripe*j ] = \
maximum(s_dia2[ tid+stripe*j+1 ] + penalty, // up
s_dia2[ tid+stripe*j ] + penalty, // left
s_dia1[ tid+stripe*j+1 ]+blosum62[ s_seq2[index_x] ][ s_seq1[index_y] ] );
// store to global memory
matrix[ index_x*(seq1_len+1)+index_y ] = s_dia3[ tid+stripe*j ];
}
}
}
else if (i%3==0) {
for (int j=0; j<iteration; ++j) {
index_x = seq2_len - (tid+stripe*j);
index_y = i + (tid+stripe*j);
if ( tid+stripe*j +i <seq1_len+1 ) {
s_dia1[ tid+stripe*j ] = \
maximum(s_dia3[ tid+stripe*j+1 ] + penalty, // up
s_dia3[ tid+stripe*j ] + penalty, // left
s_dia2[ tid+stripe*j+1 ]+blosum62[ s_seq2[index_x] ][ s_seq1[index_y] ] );
// store to global memory
matrix[ index_x*(seq1_len+1)+index_y ] = s_dia1[ tid+stripe*j ];
}
}
}
else { // i%3==1
for (int j=0; j<iteration; ++j) {
index_x = seq2_len - (tid+stripe*j);
index_y = i + (tid+stripe*j);
if ( tid+stripe*j +i <seq1_len+1 ) {
s_dia2[ tid+stripe*j ] = \
maximum(s_dia1[ tid+stripe*j+1 ] + penalty, // up
s_dia1[ tid+stripe*j ] + penalty, // left
s_dia3[ tid+stripe*j+1 ]+blosum62[ s_seq2[index_x] ][ s_seq1[index_y] ] );
// store to global memory
matrix[ index_x*(seq1_len+1)+index_y ] = s_dia2[ tid+stripe*j ];
}
}
}
__syncthreads();
}
}
|
b255c7e615d592848dadab227fdcab46bd44621f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "ScatterNdExecution.hpp"
namespace MNN {
namespace CUDA {
#define CUDA_KERNEL_LOOP(i, n) for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < (n); i += blockDim.x * gridDim.x)
template <typename T>
__global__ void SETZERO(const int n, T* outputPtr) {
CUDA_KERNEL_LOOP(index, n) {
outputPtr[index] = (T)0;
}
}
template<typename T>
__global__ void SCATTERND(const int n, const int indicesLastDim, const int accNumber, const int* indicesPtr,
const T* updatesPtr, T* outputPtr, const int32_t* dimsToCount) {
CUDA_KERNEL_LOOP(index, n) {
int pos = 0;
for (int j = 0; j < indicesLastDim; ++j) {
auto curIndex = (int)indicesPtr[index * indicesLastDim + j];
// MNN_ASSERT(curIndex >= 0 && curIndex < output->length(j));
pos += curIndex * dimsToCount[j];
}
for (int k = 0; k < accNumber; ++k) {
float updateValue = updatesPtr[index * accNumber + k];
atomicAdd(outputPtr + pos + k, updateValue);
}
}
}
ScatterNdExecution::ScatterNdExecution(Backend *backend) : Execution(backend) {
}
ScatterNdExecution::~ScatterNdExecution() {
if (nullptr != dimsTensor) {
backend()->onReleaseBuffer(dimsTensor.get(), Backend::DYNAMIC_SEPERATE);
}
}
ErrorCode ScatterNdExecution::onResize(const std::vector<Tensor *> &inputs, const std::vector<Tensor *> &outputs) {
MNN_ASSERT(inputs.size() == 3);
MNN_ASSERT(outputs.size() == 1);
auto indices = inputs[0];
auto updates = inputs[1];
auto shape = inputs[2];
auto output = outputs[0];
const int indicesDimension = indices->dimensions();
mIndicesLastDim = indices->length(indicesDimension - 1);
mIndexes = indices->elementSize() / mIndicesLastDim;
mAccNumber = 1;
for (int i = indicesDimension - 1; i < updates->dimensions(); ++i) {
mAccNumber *= updates->length(i);
}
const int outputElementSize = output->elementSize();
mOutElementSize = outputElementSize;
int remainSize = outputElementSize;
std::vector<int> temp(mIndicesLastDim, 0);
for (int i = 0; i < mIndicesLastDim; ++i) {
temp[i] = remainSize / output->length(i);
remainSize = temp[i];
}
//save dimToCount to Device
dimsTensor.reset(Tensor::createDevice<int>({mIndicesLastDim}));
backend()->onAcquireBuffer(dimsTensor.get(), Backend::DYNAMIC_SEPERATE);
mDimsToCount = (void *)dimsTensor.get()->buffer().device;
cuda_check(hipMemcpy(mDimsToCount, temp.data(), mIndicesLastDim*sizeof(int), hipMemcpyHostToDevice));
return NO_ERROR;
}
ErrorCode ScatterNdExecution::onExecute(const std::vector<Tensor *> &inputs, const std::vector<Tensor *> &outputs) {
auto runtime = static_cast<CUDABackend*>(backend())->getCUDARuntime();
int block_num0 = runtime->blocks_num(mOutElementSize);
int block_num1 = runtime->blocks_num(mIndexes);
int threads_num = runtime->threads_num();
auto input_addr0 = (void*)inputs[0]->deviceId();
auto input_addr1 = (void*)inputs[1]->deviceId();
auto output_addr = (void*)outputs[0]->deviceId();
//printf("mOutElementSize:%d- mIndexes:%d- mIndicesLastDim:%d- mAccNumber:%d\n", mOutElementSize,mIndexes,mIndicesLastDim, mAccNumber);
hipLaunchKernelGGL(( SETZERO), dim3(block_num0), dim3(threads_num), 0, 0, mOutElementSize, (float*)output_addr);
hipLaunchKernelGGL(( SCATTERND), dim3(block_num1), dim3(threads_num), 0, 0, mIndexes, mIndicesLastDim, mAccNumber,
(const int*)input_addr0, (const float*)input_addr1, (float*)output_addr, (const int32_t*)mDimsToCount);
return NO_ERROR;
}
class ScatterNdCreator : public CUDABackend::Creator {
public:
virtual Execution* onCreate(const std::vector<Tensor*>& inputs, const std::vector<Tensor*>& outputs,
const MNN::Op* op, Backend* backend) const override {
if(inputs.size() != 3) {
MNN_PRINT("CUDA ScatterNd inputs size:%d not support, back to CPU\n", inputs.size());
return nullptr;
}
return new ScatterNdExecution(backend);
}
};
static CUDACreatorRegister<ScatterNdCreator> __init(OpType_ScatterNd);
}
} | b255c7e615d592848dadab227fdcab46bd44621f.cu | #include "ScatterNdExecution.hpp"
namespace MNN {
namespace CUDA {
#define CUDA_KERNEL_LOOP(i, n) for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < (n); i += blockDim.x * gridDim.x)
template <typename T>
__global__ void SETZERO(const int n, T* outputPtr) {
CUDA_KERNEL_LOOP(index, n) {
outputPtr[index] = (T)0;
}
}
template<typename T>
__global__ void SCATTERND(const int n, const int indicesLastDim, const int accNumber, const int* indicesPtr,
const T* updatesPtr, T* outputPtr, const int32_t* dimsToCount) {
CUDA_KERNEL_LOOP(index, n) {
int pos = 0;
for (int j = 0; j < indicesLastDim; ++j) {
auto curIndex = (int)indicesPtr[index * indicesLastDim + j];
// MNN_ASSERT(curIndex >= 0 && curIndex < output->length(j));
pos += curIndex * dimsToCount[j];
}
for (int k = 0; k < accNumber; ++k) {
float updateValue = updatesPtr[index * accNumber + k];
atomicAdd(outputPtr + pos + k, updateValue);
}
}
}
ScatterNdExecution::ScatterNdExecution(Backend *backend) : Execution(backend) {
}
ScatterNdExecution::~ScatterNdExecution() {
if (nullptr != dimsTensor) {
backend()->onReleaseBuffer(dimsTensor.get(), Backend::DYNAMIC_SEPERATE);
}
}
ErrorCode ScatterNdExecution::onResize(const std::vector<Tensor *> &inputs, const std::vector<Tensor *> &outputs) {
MNN_ASSERT(inputs.size() == 3);
MNN_ASSERT(outputs.size() == 1);
auto indices = inputs[0];
auto updates = inputs[1];
auto shape = inputs[2];
auto output = outputs[0];
const int indicesDimension = indices->dimensions();
mIndicesLastDim = indices->length(indicesDimension - 1);
mIndexes = indices->elementSize() / mIndicesLastDim;
mAccNumber = 1;
for (int i = indicesDimension - 1; i < updates->dimensions(); ++i) {
mAccNumber *= updates->length(i);
}
const int outputElementSize = output->elementSize();
mOutElementSize = outputElementSize;
int remainSize = outputElementSize;
std::vector<int> temp(mIndicesLastDim, 0);
for (int i = 0; i < mIndicesLastDim; ++i) {
temp[i] = remainSize / output->length(i);
remainSize = temp[i];
}
//save dimToCount to Device
dimsTensor.reset(Tensor::createDevice<int>({mIndicesLastDim}));
backend()->onAcquireBuffer(dimsTensor.get(), Backend::DYNAMIC_SEPERATE);
mDimsToCount = (void *)dimsTensor.get()->buffer().device;
cuda_check(cudaMemcpy(mDimsToCount, temp.data(), mIndicesLastDim*sizeof(int), cudaMemcpyHostToDevice));
return NO_ERROR;
}
ErrorCode ScatterNdExecution::onExecute(const std::vector<Tensor *> &inputs, const std::vector<Tensor *> &outputs) {
auto runtime = static_cast<CUDABackend*>(backend())->getCUDARuntime();
int block_num0 = runtime->blocks_num(mOutElementSize);
int block_num1 = runtime->blocks_num(mIndexes);
int threads_num = runtime->threads_num();
auto input_addr0 = (void*)inputs[0]->deviceId();
auto input_addr1 = (void*)inputs[1]->deviceId();
auto output_addr = (void*)outputs[0]->deviceId();
//printf("mOutElementSize:%d- mIndexes:%d- mIndicesLastDim:%d- mAccNumber:%d\n", mOutElementSize,mIndexes,mIndicesLastDim, mAccNumber);
SETZERO<<<block_num0, threads_num>>>(mOutElementSize, (float*)output_addr);
SCATTERND<<<block_num1, threads_num>>>(mIndexes, mIndicesLastDim, mAccNumber,
(const int*)input_addr0, (const float*)input_addr1, (float*)output_addr, (const int32_t*)mDimsToCount);
return NO_ERROR;
}
class ScatterNdCreator : public CUDABackend::Creator {
public:
virtual Execution* onCreate(const std::vector<Tensor*>& inputs, const std::vector<Tensor*>& outputs,
const MNN::Op* op, Backend* backend) const override {
if(inputs.size() != 3) {
MNN_PRINT("CUDA ScatterNd inputs size:%d not support, back to CPU\n", inputs.size());
return nullptr;
}
return new ScatterNdExecution(backend);
}
};
static CUDACreatorRegister<ScatterNdCreator> __init(OpType_ScatterNd);
}
} |
d984b80ae137073ef7b682d2c49c3936c4360ff7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void binarize_input_kernel(float *input, int n, int size, float *binary)
{
int s = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (s >= size) return;
int i = 0;
float mean = 0;
for(i = 0; i < n; ++i){
mean += fabsf(input[i*size + s]);
}
mean = mean / n;
for(i = 0; i < n; ++i){
binary[i*size + s] = (input[i*size + s] > 0) ? mean : -mean;
}
} | d984b80ae137073ef7b682d2c49c3936c4360ff7.cu | #include "includes.h"
__global__ void binarize_input_kernel(float *input, int n, int size, float *binary)
{
int s = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (s >= size) return;
int i = 0;
float mean = 0;
for(i = 0; i < n; ++i){
mean += fabsf(input[i*size + s]);
}
mean = mean / n;
for(i = 0; i < n; ++i){
binary[i*size + s] = (input[i*size + s] > 0) ? mean : -mean;
}
} |
17ef75883f4983410290c0fff37aefaf856d4bbd.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <hiprand/hiprand.h>
#include <device_launch_parameters.h>
#include <hip/device_functions.h>
#include <math_constants.h>
#include "common.h"
#include "mlp.h"
#define RANDSEED 0x0bad1bad2bad104 //was 104 for the glory run
#define LAMBDA 0.05 //the learning delta //was 0.05 for the glory run
#define GOODENOUGH 0.00001
//These are definitions for index math in the 1d-2d world
#define UL(idx, w) (idx - w - 1)
#define UC(idx, w) (idx - w)
#define UR(idx, w) (idx - w + 1)
#define CL(idx, w) (idx - 1)
#define CC(idx, w) (idx)
#define CR(idx, w) (idx + 1)
#define DL(idx, w) (idx + w - 1)
#define DC(idx, w) (idx + w)
#define DR(idx, w) (idx + w + 1)
namespace CharacterRecognition {
using Common::PerformanceTimer;
PerformanceTimer& timer()
{
static PerformanceTimer timer;
return timer;
}
//##################################
// SIZE DEFINES
//##################################
#define NUMFILTERS 6
#define KERNWIDTH 3
#define POOLWIDTH 3
#define F0SIZE 10201
#define F0WIDTH (101)
#define SINCONVRAWSIZE ((F0WIDTH - (KERNWIDTH - 1)) * (F0WIDTH - (KERNWIDTH - 1)))
#define CONVRAWSIZE (SINCONVRAWSIZE * NUMFILTERS)
#define SINCONVPOOLSIZE (SINCONVRAWSIZE / (POOLWIDTH * POOLWIDTH))
#define CONVPOOLSIZE (SINCONVPOOLSIZE * NUMFILTERS)
#define F1SIZE (CONVPOOLSIZE + 1)
//#define F1SIZE (6535)
#define F2SIZE 156 //was 156 for the glory run
#define F2SIZEA (F2SIZE + 1)
#define W1SIZE (F1SIZE * F2SIZE)
#ifndef RSIZE
#define RSIZE 52
#endif
#define W2SIZE (F2SIZEA * RSIZE)
void printSizes() {
printf("FEATURE VECTORS\n");
printf("\tF0Size:\t%d\n", F0SIZE);
printf("\tF1Size:\t%d\n", F1SIZE);
printf("\tF2Size:\t%d\n", F2SIZE);
printf("\tRSize:\t%d\n", RSIZE);
printf("WEIGHT MATRICES\n");
printf("\tW1Size:\t%d\t(F1Size * F2Size)\n", W1SIZE);
printf("\tW2Size:\t%d\t(F2Size * RSize)\n", W2SIZE);
printf("SINCONVRAWSIZE: %d\n", SINCONVRAWSIZE);
printf("CONVRAWSIZE: %d\n", CONVRAWSIZE);
printf("SINCONVPOOLSIZE: %d\n", SINCONVPOOLSIZE);
printf("CONVPOOLSIZE: %d\n", CONVPOOLSIZE);
}//printSizes
//##################################
// DEVICE POINTER MEMORY
//##################################
float* dF0;//features 0 (orig data)
float* dC0;//convolutional memory for first layer
float* dF1;//features 1
float* dW1;//weights 1
float* dW1D;//delta value for weights 1
float* dPj;//psi_j result matrix
float* dOj;//omega_j result matrix
float* dF2;//features 2
float* dF2A;//features 2 (activated)
float* dW2;//weights 2
float* dW2D;//delta value for weights 2
float* dPi;//psi_i result matrix
float* dR;//result
float* dRA;//result(activated)
float* dRE;//result error
float* dRT;//result (true)
//Convolution kernel initialization
filter3 kern1 = { 1.0 / 16, 1.0 / 8, 1.0 / 16,
1.0 / 8, 1.0 / 4, 1.0 / 8,
1.0 / 16, 1.0 / 8, 1.0 / 16 };//gaussian
filter3 kern2 = { -1, -1, -1, -1, 8, -1, -1, -1, -1 };//outline
filter3 kern3 = { 1, 2, 1, 0, 0, 0, -1, -2, -1 };//sobel top
filter3 kern4 = { -1, 0, 1, -2, 0, 2, -1, 0, 1 };//sobel right
filter3 kern5 = { -1, -2, -1, 0, 0, 0, 1, 2, 1 };//sobel bottom
filter3 kern6 = { 1, 0, -1, 2, 0, -2, 1, 0, -1 };//sobel left
filter3 allKernels[NUMFILTERS] = { kern1, kern2, kern3, kern4, kern5, kern6 };
//##################################
// FUNCTION DELCARATIONS
//##################################
/**
Gets the "index" for the thread
Currently, only supporting single-dimensional block indexes
Computes all relevant x, y, z transformations
*/
__device__ int getIndex();
//##################################
// DEVICE POINTER MALLOC AND FREE
//##################################
void kmallocBuffers() {
hipMalloc((void**)& dF0, F0SIZE * sizeof(float));
checkCUDAErrorFn("hipMalloc failed\n", NULL, __LINE__);
hipMalloc((void**)& dC0, CONVRAWSIZE * sizeof(float));
checkCUDAErrorFn("hipMalloc failed\n", NULL, __LINE__);
hipMalloc((void**)& dF1, (F1SIZE + 1) * sizeof(float));
checkCUDAErrorFn("hipMalloc failed\n", NULL, __LINE__);
hipMalloc((void**)& dW1, W1SIZE * sizeof(float));
checkCUDAErrorFn("hipMalloc failed\n", NULL, __LINE__);
hipMalloc((void**)& dW1D, W1SIZE * sizeof(float));
checkCUDAErrorFn("hipMalloc failed\n", NULL, __LINE__);
hipMalloc((void**)& dPj, F2SIZE * sizeof(float));
checkCUDAErrorFn("hipMalloc failed\n", NULL, __LINE__);
hipMalloc((void**)& dOj, F2SIZE * sizeof(float));
checkCUDAErrorFn("hipMalloc failed\n", NULL, __LINE__);
hipMalloc((void**)& dF2, F2SIZE * sizeof(float));
checkCUDAErrorFn("hipMalloc failed\n", NULL, __LINE__);
hipMalloc((void**)& dF2A, F2SIZEA * sizeof(float));
checkCUDAErrorFn("hipMalloc failed\n", NULL, __LINE__);
hipMalloc((void**)& dW2, W2SIZE *sizeof(float));
checkCUDAErrorFn("hipMalloc failed\n", NULL, __LINE__);
hipMalloc((void**)& dW2D, W2SIZE * sizeof(float));
checkCUDAErrorFn("hipMalloc failed\n", NULL, __LINE__);
hipMalloc((void**)& dPi, RSIZE * sizeof(float));
checkCUDAErrorFn("hipMalloc failed\n", NULL, __LINE__);
hipMalloc((void**)& dR, RSIZE * sizeof(float));
checkCUDAErrorFn("hipMalloc failed\n", NULL, __LINE__);
hipMalloc((void**)& dRE, RSIZE * sizeof(float));
checkCUDAErrorFn("hipMalloc failed\n", NULL, __LINE__);
hipMalloc((void**)& dRA, RSIZE * sizeof(float));
checkCUDAErrorFn("hipMalloc failed\n", NULL, __LINE__);
hipMalloc((void**)& dRT, RSIZE * sizeof(float));
checkCUDAErrorFn("hipMalloc failed\n", NULL, __LINE__);
}//kmallocBuffers
void kfreeBuffers() {
hipFree(dF0);
checkCUDAErrorFn("hipFree failed\n", NULL, __LINE__);
hipFree(dC0);
checkCUDAErrorFn("hipFree failed\n", NULL, __LINE__);
hipFree(dF1);
checkCUDAErrorFn("hipFree failed\n", NULL, __LINE__);
hipFree(dW1);
checkCUDAErrorFn("hipFree failed\n", NULL, __LINE__);
hipFree(dW1D);
checkCUDAErrorFn("hipFree failed\n", NULL, __LINE__);
hipFree(dPj);
checkCUDAErrorFn("hipFree failed\n", NULL, __LINE__);
hipFree(dOj);
checkCUDAErrorFn("hipFree failed\n", NULL, __LINE__);
hipFree(dF2);
checkCUDAErrorFn("hipFree failed\n", NULL, __LINE__);
hipFree(dF2A);
checkCUDAErrorFn("hipFree failed\n", NULL, __LINE__);
hipFree(dW2);
checkCUDAErrorFn("hipFree failed\n", NULL, __LINE__);
hipFree(dW2D);
checkCUDAErrorFn("hipFree failed\n", NULL, __LINE__);
hipFree(dPi);
checkCUDAErrorFn("hipFree failed\n", NULL, __LINE__);
hipFree(dR);
checkCUDAErrorFn("hipFree failed\n", NULL, __LINE__);
hipFree(dRE);
checkCUDAErrorFn("hipFree failed\n", NULL, __LINE__);
hipFree(dRA);
checkCUDAErrorFn("hipFree failed\n", NULL, __LINE__);
hipFree(dRT);
checkCUDAErrorFn("hipFree failed\n", NULL, __LINE__);
}//kfreeBuffers
//##################################
// DEVICE FUNCTIONS
//##################################
__device__ int getIndex() {
int threadIndex = threadIdx.x + (blockDim.x) * threadIdx.y + (blockDim.y * blockDim.x) * threadIdx.z;
int overallIndex = threadIndex + blockIdx.x * (blockDim.x * blockDim.y * blockDim.z);
return overallIndex;
}//getIndex
//##################################
// DEVICE GLOBAL FUNCTIONS
//##################################
/**
Performs our activation function on our results to put them in the range between 0 and 1
Does so in-place
*/
__global__ void kActivateResults(float* results, float* resultsA, int N) {
int index = getIndex();
if (index >= N) return;
resultsA[index] = 1.0 / (1.0 + expf(-1 * results[index]));
}//activateResults
__global__ void kActivateInverse(float* results, float* resultsIA, int N) {
int index = getIndex();
if (index >= N) return;
//resultsIA[index] = logf(results[index] / (1.0 - results[index]));
float ex = expf(results[index]);
resultsIA[index] = ex / ((ex + 1) * (ex + 1));
}//kActivateInverse
//##################################
// HOST HELPER FUNCTIONS
//##################################
void printWeights() {
float weights[W2SIZE] = {};
hipMemcpy(weights, dW2, W2SIZE * sizeof(float), hipMemcpyDeviceToHost);
for (int i = 0; i < F2SIZE; i++) {
int iOffset = i * RSIZE;
for (int j = 0; j < RSIZE; j++) {
printf("%.02f ", weights[iOffset + j]);
}//for
printf("\n");
}//for
}//printWeights
void activateResults(float* results, float* resultsActivated, int numResults) {
dim3 tpb = dim3(BLOCKSIZE);
dim3 bpg = dim3((numResults + BLOCKSIZE - 1) / BLOCKSIZE);
hipLaunchKernelGGL(( kActivateResults), dim3(bpg), dim3(tpb), 0, 0, results, resultsActivated, numResults);
checkCUDAErrorFn("kActivateResults failed\n", NULL, __LINE__);
}//activateResults
void calculateError(hipblasHandle_t* handle, float* resultsActivated, float* resultsTrue, float* resultsDiff, int numResults) {
hipMemcpy(resultsDiff, resultsTrue, numResults * sizeof(float), hipMemcpyDeviceToDevice);
checkCUDAErrorFn("Cudamemcpy failed\n", NULL, __LINE__);
float alpha = -1.0;
hipblasSaxpy(*handle, numResults, &alpha, resultsActivated, 1, resultsDiff, 1);
}//calculateError
__global__ void shiftByFactor(float* A, int N, float mulFactor, float offset) {
int index = getIndex();
if (index > N) return;
A[index] = mulFactor * A[index] + offset;
}//shiftByFactor
void gpuFillRand(float* A, int nr_rows_A, int nr_cols_A, float lo, float hi){
hiprandGenerator_t prng;
hiprandCreateGenerator(&prng, HIPRAND_RNG_PSEUDO_XORWOW);
hiprandSetPseudoRandomGeneratorSeed(prng, (unsigned long long) RANDSEED);
hiprandGenerateUniform(prng, A, nr_rows_A * nr_cols_A);
//shift the random numbers into the given range
float mulFactor = hi - lo;
float offset = lo;
int numElements = nr_rows_A * nr_cols_A;
dim3 tpb = dim3(BLOCKSIZE);
dim3 bpg = dim3((numElements + BLOCKSIZE - 1) / BLOCKSIZE);
hipLaunchKernelGGL(( shiftByFactor), dim3(bpg), dim3(tpb), 0, 0, A, numElements, mulFactor, offset);
checkCUDAErrorFn("shiftByFactor failed\n", NULL, __LINE__);
hipDeviceSynchronize();//safety
}//gpuFillRand
void matMul(hipblasHandle_t* handle, const float* A, const float* B, float* C, int m, int k, int n) {
//Since cublas expects column-major indexing, our A is effectively AT (kxm), and our B is effectively BT (nxk)
//As such, we're going to be doing BT * AT = CT (nxm)
//Then, we transpose C "in place" before we return
//And by that I mean we don't do that, because for some reason the multiplication works how I want
float alpha = 1.0;
float beta = 0.0;
//Future development: put the result into Cswap, transpose into C
hipblasSgemm(*handle, HIPBLAS_OP_N, HIPBLAS_OP_N, n, m, k, &alpha, B, n, A, k, &beta, C, n) ;
checkCUDAErrorFn("the internal matrix multiply failed\n", NULL, __LINE__);
//hipDeviceSynchronize();
//no need to transpose?? not sure why, but this function operates
//transpose(C, Cswap, n, m);
}//matMul
//##################################
// ERROR CALCULATIONS (?)
//##################################
float_v calcErrorSingle(InputData record, float* resultArray, float* kResultArray) {
float_v retval = float_v();
float_v trueResult = record.resultArray;
for (int i = 0; i < trueResult.size(); i++) {
float error = trueResult[i] - resultArray[i];
retval.push_back(error);
}//for
//TODO: delete this
//if (kResultArray) {
// hipMemcpy(kResultArray, retval.data(), trueResult.size() * sizeof(float), hipMemcpyHostToDevice);
// checkCUDAErrorFn("hipMemcpy failed\n", NULL, __LINE__);
//}//if
return retval;
}//calcError
float_v calcSumSquareErrors(float_vv errorVals) {
float_v result = float_v(errorVals[0].size(), 0.0f);
for (int i = 0; i < errorVals.size(); i++) {
for (int j = 0; j < errorVals[0].size(); j++) {
result[j] += errorVals[i][j] * errorVals[i][j] / 2.0;
}//for j
}//for i
return result;
}//calcSumSquareErrors
float calcEnergy(float_v errors) {
float sum = 0;
for (int i = 0; i < errors.size(); i++) {
sum += (errors[i] * errors[i]);
}//for
return sum / errors.size();//averaging the energy function?
}//calcEnergy
//##################################
// WEIGHT CHANGES
//##################################
__global__ void kCalcWeightChange1(float* thetaA, float* omega, float* data, int cmax, int rmax,
float* weightChange, float* psiOut) {
int index = getIndex();
if (index >= rmax * cmax) return;
int c = index / rmax;
int r = index % rmax;
float rA = thetaA[r];
float psi = (rA * (1 - rA)) * omega[r];
weightChange[index] += psi * data[c];//formerly: * LAMBDA
psiOut[r] = psi;
return;
}//kCalcWeightChange1
__global__ void kCalcWeightChange2(float* thetaA, float* omegaError, float* data, int cmax, int rmax,
float* weightChange, float* psiOut) {
int index = getIndex();
if (index >= rmax * cmax) return;
int c = index / rmax;
int r = index % rmax;
float rA = thetaA[r];
float psi = (rA * (1 - rA)) * omegaError[r];
weightChange[index] += data[c] * psi;//formerly: * LAMBDA
psiOut[r] = psi;
return;
}//kCalcWeightChange2
void calcWeightChange1(float* thetaResultA, float* omegaError, float* features, int kmax, int jmax, float* weightChange, float* psiOut) {
dim3 tpb = dim3(BLOCKSIZE);
dim3 bpgij = dim3(((jmax * kmax) + BLOCKSIZE - 1) / BLOCKSIZE);
hipLaunchKernelGGL(( kCalcWeightChange1), dim3(bpgij), dim3(tpb), 0, 0, thetaResultA, omegaError, features, kmax, jmax, weightChange, psiOut);
}//calcWeightChange1
void calcWeightChange2(float* thetaResultA, float* omegaError, float* features, int jmax, int imax, float* weightChange, float* psiOut) {
/*
result: [0:imax)(52), error: [0:imax)(52), data: [0, jmax)(10201), weightChange (outvar) ixj matrix
*/
//calcWeightChange2(dRA, dRE, dF2A, F2SIZEA, RSIZE, dW2D, dPi);
// matMul(handle, dF2A, dW2, dR, 1, F2SIZE + 1, RSIZE);
dim3 tpb = dim3(BLOCKSIZE);
dim3 bpgij = dim3(((imax * jmax) + BLOCKSIZE - 1) / BLOCKSIZE);
hipLaunchKernelGGL(( kCalcWeightChange2), dim3(bpgij), dim3(tpb), 0, 0, thetaResultA, omegaError, features, jmax, imax, weightChange, psiOut);
}//calcWeightChange
void applyWeightChanges(hipblasHandle_t* handle, float* weight, float* delta, int weightSize) {
float alpha = LAMBDA;
hipblasSaxpy(*handle, weightSize, &alpha, delta , 1, weight, 1);
checkCUDAErrorFn("saxpy failed\n", NULL, __LINE__);
}//applyWeightChanges
void clearWeightChanges(float* wDelta, int wDeltaSize) {
hipMemset(wDelta, 0, wDeltaSize * sizeof(float));
checkCUDAErrorFn("CudaMemset failed\n", NULL, __LINE__);
}//clearWeightChanges
//##################################
// HOST MAIN FUNCTIONS
//##################################
void applyAllWeightChanges(hipblasHandle_t* handle) {
applyWeightChanges(handle, dW2, dW2D, W2SIZE);
applyWeightChanges(handle, dW1, dW1D, W1SIZE);
hipDeviceSynchronize();
clearWeightChanges(dW2D, W2SIZE);
clearWeightChanges(dW1D, W1SIZE);
}//applyAllWeightChanges
void backPropagate(hipblasHandle_t* handle) {
hipblasHandle_t mHandle; bool handling = false;
if (handle == NULL) {
handling = true; handle = &mHandle; hipblasCreate(handle);
}//if
//final layer weight delta calculation
calcWeightChange2(dRA, dRE, dF2A, F2SIZEA, RSIZE, dW2D, dPi);
//calculate Omega_j off the psi_i values
matMul(handle, dW2, dPi, dOj, F2SIZE, RSIZE, 1);
//matMul(handle, dPi, dW2, dOj, 1, RSIZE, F2SIZE);//go the other way because IT CANT HURT I GUESS
checkCUDAErrorFn("matMul failed\n", NULL, __LINE__);
//next-to-last layer weight delta calculation
calcWeightChange1(dF2A, dOj, dF1, F1SIZE, F2SIZE, dW1D, dPj);
checkCUDAErrorFn("calcWeightChange failed\n", NULL, __LINE__);
if (handling) {
hipblasDestroy(*handle);
}//if
}//backPropagate
float_v forwardPropagate(InputData x, float* resultArray, hipblasHandle_t* handle) {
//Make our cublas handle if not handed one
hipblasHandle_t mHandle; bool handling = false;
if (handle == NULL) {
handling = true;
handle = &mHandle;
hipblasCreate(handle);
}//if
float dataPtr[F0SIZE];
float truePtr[RSIZE];
memcpy(dataPtr, x.fData.data(), F0SIZE * sizeof(float));
memcpy(truePtr, x.resultArray.data(), RSIZE * sizeof(float));
//printFloatPic(dataPtr, 101, 101);
//load data into kernel memory
hipMemcpy(dF0, dataPtr, F0SIZE * sizeof(float), hipMemcpyHostToDevice);
checkCUDAErrorFn("hipMemcpy failed\n", NULL, __LINE__);
hipMemcpy(dRT, truePtr, RSIZE * sizeof(float), hipMemcpyHostToDevice);
checkCUDAErrorFn("hipMemcpy failed\n", NULL, __LINE__);
//convolve step
convolveStep(dF0, F0SIZE, dC0, dF1, POOLWIDTH);
checkCUDAErrorFn("convolveStep failed\n", NULL, __LINE__);
//Fully connected layer w/ W1
matMul(handle, dF1, dW1, dF2, 1, F1SIZE, F2SIZE);
checkCUDAErrorFn("matMul failed\n", NULL, __LINE__);
//activate the first results
activateResults(dF2, dF2A, F2SIZE);
checkCUDAErrorFn("activateResults failed\n", NULL, __LINE__);
//Fully connected layer w/ W2
matMul(handle, dF2A, dW2, dR, 1, F2SIZE + 1, RSIZE);
checkCUDAErrorFn("matMul failed\n", NULL, __LINE__);
//Activate results
activateResults(dR, dRA, RSIZE);
checkCUDAErrorFn("activateResults failed\n", NULL, __LINE__);
//calculate error
calculateError(handle, dRA, dRT, dRE, RSIZE);
checkCUDAErrorFn("calcError failed\n", NULL, __LINE__);
hipMemcpy(resultArray, dRA, RSIZE * sizeof(float), hipMemcpyDeviceToHost);
checkCUDAErrorFn("hipMemcpy failed\n", NULL, __LINE__);
if (handling) {
hipblasDestroy(*handle);
}//if
return calcErrorSingle(x, resultArray);
}//forwardPropH
void trainWeights(InputData_v records, int numIterations, int_v* iterRecord, float_v* errorRecord, bool noRandom) {
printSizes();
hipblasHandle_t handle;
hipblasCreate(&handle);
float results[RSIZE] = {};//floating space for the results to be put
if (!noRandom) {
//initialize random weights between -1 and 1
gpuFillRand(dW1, F1SIZE, F2SIZE, -1.0, 1.0);
gpuFillRand(dW2, F2SIZE, RSIZE, -1.0, 1.0);
}//if
printForwardResults(records);//see our starting point
//starting biases
float fakeBias = 1.0;
//add a bias term
hipMemcpy(dF1 + (F1SIZE - 1), &fakeBias, 1 * sizeof(float), hipMemcpyHostToDevice);
checkCUDAErrorFn("hipMemcpy failed\n", NULL, __LINE__);
hipMemcpy(dF2A + (F2SIZEA - 1), &fakeBias, 1 * sizeof(float), hipMemcpyHostToDevice);
checkCUDAErrorFn("hipMemcpy failed\n", NULL, __LINE__);
std::vector<int> indexVector = std::vector<int>();
for (int i = 0; i < records.size(); i++) indexVector.push_back(i);
timer().startCpuTimer();
for (int iter = 0; iter < numIterations; iter++) {
//for (int iter = 0; true; iter++) {
float_vv errorValues = float_vv();
float energy;
//std::random_shuffle(indexVector.begin(), indexVector.end());//not relevant currently
for (int j = 0; j < records.size(); j++) {
//go forward
int recordNum = indexVector[j];
float_v errorVal = forwardPropagate(records[recordNum], results, &handle);
errorValues.push_back(errorVal);
energy = calcEnergy(errorVal);
//printf("\ti#%04d: r#%02d: Calculated energy is %.8f\n", iter, recordNum, energy);
//go backwards
backPropagate(&handle);
}//for
applyAllWeightChanges(&handle);
if (iter % 10 == 0){
float_v sseError = calcSumSquareErrors(errorValues);
energy = calcEnergy(sseError);
printf("i#%04d: Total energy is %.9f\n", iter, energy);
iterRecord->push_back(iter);
errorRecord->push_back(energy);
if (energy < GOODENOUGH) {
break;//we're probably all trained!
}
}//if
}//for
timer().endCpuTimer();
hipblasDestroy(handle);
}//trainWeights
void printForwardResults(InputData_v allRecords) {
float resultArray[RSIZE];
int_v correctResults = int_v();
for (int i = 0; i < allRecords.size(); i++) {
float_v errorResult = CharacterRecognition::forwardPropagate(allRecords[i], resultArray);
printf("=========RESULT FOR RECORD %d==============\n", i);
bool isCorrect = true;
for (int j = 0; j < RSIZE; j++) {
if (resultArray[j] >= 0.5 && j != i) isCorrect = false;
if (resultArray[j] < 0.5 && j == i) isCorrect = false;
printf("@%02d: %0.2f ", j, resultArray[j]);
if ((j + 1) % 8 == 0) {
printf("\n");
}
}//for
if (isCorrect) correctResults.push_back(1);
else correctResults.push_back(0);
printf("\n");
}//for
printf("*****************\n");
printf("*****SUMMARY*****\n");
printf("*****************\n");
int totalCorrect = 0;
for (int i = 0; i < correctResults.size(); i++) {
if (correctResults[i]) {
printf("\tCorrect for entry %02d: TRUE\n", i);
totalCorrect++;
}//if
else {
printf("\tCorrect for entry %02d: FALSE\n", i);
}//else
}//for
printf("Total Correct: %d\n", totalCorrect);
}//printForwardResults
//##################################
// CONVOLVING
//##################################
//Convolutional layer:
//1. Convolve (into an intermediary)
//2. Activate the intermediary
//3. Max pool down into some feature vector (to be fed into some of the FC layers)
/**
Pools some number of activated convolutions down into a smaller buffer
Does so in blockWidth x blockWidth squares
Wants to spawn a number of threads equal to the number of resultant output "pixels"
*/
__global__ void kmaxPool(float* idata, float* odata, int blockWidth, int idataWidth, int odataWidth) {
int index = getIndex();
if (index >= odataWidth * odataWidth) return;
int oR = index / odataWidth;
int oC = index % odataWidth;
int iR = oR * blockWidth - (blockWidth / 2);
int iC = oC * blockWidth - (blockWidth / 2);
int iindex = iR * idataWidth + iC;
float max = -1.0e40;//stand-in for a minimum
for (int i = 0; i < blockWidth; i++) {
int iOffset = idataWidth * (i - (blockWidth / 2));
for (int j = 0; j < blockWidth; j++) {
max = fmaxf(max, idata[iindex + iOffset + (j - (blockWidth / 2))]);
}//for
}//for
odata[index] = max;
}//kmaxPool
/**
* Does a convolution from one image to another
* A few notes:
* Takes char data in for the input
* Assuming we're running one thread per output pixel, and that we've sized things correctly for our filter
* filter, idata, and odata must all be square
* Also, currently only accepting filter widths of 3
*/
__global__ void kconvolve(filter3 filter, float* idata, float* odata, int odataWidth) {
int index = getIndex();
if (index >= odataWidth * odataWidth) return;
int idataW = odataWidth + 2;
int oR = index / odataWidth;
int oC = index % odataWidth;
int iR = oR + 1;
int iC = oC + 1;
//get ourselves an "idata" index
int iindex = iR * idataW + iC;
float sum = 0;
float relData[9];
//Flips the kernel here
relData[0] = idata[DR(iindex, idataW)];
relData[1] = idata[DC(iindex, idataW)];
relData[2] = idata[DL(iindex, idataW)];
relData[3] = idata[CR(iindex, idataW)];
relData[4] = idata[CC(iindex, idataW)];
relData[5] = idata[CL(iindex, idataW)];
relData[6] = idata[UR(iindex, idataW)];
relData[7] = idata[UC(iindex, idataW)];
relData[8] = idata[UL(iindex, idataW)];
for (int i = 0; i < 9; i++) {
sum += relData[i] * filter.kernel[i];
}//for 9
odata[index] = sum;
}//kconvolve
void convolve(float* idata, float* odata, int oOffset, int odataSize, filter3 kernel) {
dim3 tpb = dim3(BLOCKSIZE);
dim3 bpg = dim3(((odataSize) + BLOCKSIZE - 1) / BLOCKSIZE);
hipLaunchKernelGGL(( kconvolve), dim3(bpg), dim3(tpb), 0, 0, kernel, idata, odata + oOffset, (int)sqrt(odataSize));
checkCUDAErrorFn("kconvolve failed\n", NULL, __LINE__);
}//convolve
/**
Does the forward propagation for convolving stuff
Also max-pools
Returns the size of the output layer (sure why not)
*/
int convolveStep(float* inputLayer, int inputLayerSize, float* outputPoolingLayer, float* outputLayer, int poolWidth) {
int inputLayerWidth = (int)sqrt(inputLayerSize);
int outputPoolingBlockWidth = inputLayerWidth - 2;
int outputPoolingBlockSize = outputPoolingBlockWidth * outputPoolingBlockWidth;
int outputPooledBlockSize = outputPoolingBlockSize / (poolWidth * poolWidth);
int outputPooledBlockWidth = (int)sqrt(outputPooledBlockSize);
int outputLayerSize = NUMFILTERS * outputPooledBlockSize;
//convolve
for (int i = 0; i < NUMFILTERS; i++) {
convolve(inputLayer, outputPoolingLayer, i * outputPoolingBlockSize, outputPoolingBlockSize, allKernels[i]);
}//for
hipDeviceSynchronize();
//pool
dim3 tpb = dim3(BLOCKSIZE);
dim3 bpg = dim3(((outputPooledBlockSize)+BLOCKSIZE - 1) / BLOCKSIZE);
for (int i = 0; i < NUMFILTERS; i++) {
// __global__ void kmaxPool(float* idata, float* odata, int blockWidth, int idataWidth, int odataWidth) {
int iBlockOffset = i * outputPoolingBlockSize;
int oBlockOffset = i * outputPooledBlockSize;
hipLaunchKernelGGL(( kmaxPool), dim3(bpg), dim3(tpb), 0, 0, outputPoolingLayer + iBlockOffset, outputLayer + oBlockOffset, poolWidth, outputPoolingBlockWidth, outputPooledBlockWidth);
checkCUDAErrorFn("kmaxpool failed\n", NULL, __LINE__);
}//for
return outputLayerSize;
}//convolveStep
void outputWeights(std::string pathName, bool inText) {
float* w1 = (float*)malloc(W1SIZE * sizeof(float));
float* w2 = (float*)malloc(W2SIZE * sizeof(float));
std::FILE* oF = std::fopen(pathName.c_str(), "wb");
hipMemcpy((void*) w1, dW1, W1SIZE * sizeof(float), hipMemcpyDeviceToHost);
checkCUDAErrorFn("hipMemcpy failed\n", NULL, __LINE__);
hipMemcpy((void*) w2, dW2, W2SIZE * sizeof(float), hipMemcpyDeviceToHost);
checkCUDAErrorFn("hipMemcpy failed\n", NULL, __LINE__);
int totalWritten = 0;
int numWritten = 0;
//weights 1
while (totalWritten < W1SIZE) {
numWritten = std::fwrite(w1 + totalWritten, sizeof(float), W1SIZE - totalWritten, oF);
if (!numWritten) {
printf("Wrote none of them for some reason\n");
exit(0);
}//if
totalWritten += numWritten;
}//while
totalWritten = 0;
//weights 2
while (totalWritten < W2SIZE) {
numWritten = std::fwrite(w2 + totalWritten, sizeof(float), W2SIZE - totalWritten, oF);
if (!numWritten) {
printf("Wrote none of them for some reason\n");
exit(0);
}//if
totalWritten += numWritten;
}//while
int ending = 0;
std::fwrite(&ending, sizeof(int), 1, oF);//ending 0-pad?
std::fflush(oF);
std::fclose(oF);
free(w1);
free(w2);
}//outputWeights
void inputWeights(std::string pathName) {
float* w1 = (float*)malloc(W1SIZE * sizeof(float));
std::FILE* iF = std::fopen(pathName.c_str(), "rb");
int totalRead = 0;
int numRead = 0;
//weights1
while (totalRead < W1SIZE) {
numRead = std::fread(w1 + totalRead, sizeof(float), W1SIZE - totalRead, iF);
if (!numRead) {
printf("Read none of them for some reason, errno %d\n", errno);
exit(0);
}//if
totalRead += numRead;
}//while
float* w2 = (float*)malloc(W2SIZE * sizeof(float));
totalRead = 0;
numRead = 0;
//weights2
while (totalRead < W2SIZE) {
numRead = std::fread(w2 + totalRead, sizeof(float), W2SIZE - totalRead, iF);
if (!numRead) {
printf("Read none of them for some reason, errno %d\n", errno);
exit(0);
}//if
totalRead += numRead;
}//while
hipMemcpy(dW1, w1, W1SIZE * sizeof(float), hipMemcpyHostToDevice);
checkCUDAErrorFn("hipMemcpy failed\n", NULL, __LINE__);
hipMemcpy(dW2, w2, W2SIZE * sizeof(float), hipMemcpyHostToDevice);
checkCUDAErrorFn("hipMemcpy failed\n", NULL, __LINE__);
std::fclose(iF);
free(w1);
free(w2);
}//inputWeights
void testMatMul() {
hipblasHandle_t handle;
hipblasCreate(&handle);
float f2[F2SIZE] = {};
float w2[W2SIZE] = {};
float r[RSIZE] = {};
float rgpu[RSIZE] = {};
gpuFillRand(dF2, 1, F2SIZE);
gpuFillRand(dW2, F2SIZE, RSIZE);
matMul(&handle, dF2, dW2, dR, 1, F2SIZE, RSIZE);
hipMemcpy(rgpu, dR, RSIZE * sizeof(float), hipMemcpyDeviceToHost);
checkCUDAErrorFn("hipMemcpy failed\n", NULL, __LINE__);
hipMemcpy(f2, dF2, F2SIZE * sizeof(float), hipMemcpyDeviceToHost);
checkCUDAErrorFn("hipMemcpy failed\n", NULL, __LINE__);
hipMemcpy(w2, dW2, W2SIZE * sizeof(float), hipMemcpyDeviceToHost);
checkCUDAErrorFn("hipMemcpy failed\n", NULL, __LINE__);
//cpu-style try the multiplcation
for (int i = 0; i < 1; i++) {
for (int j = 0; j < RSIZE; j++) {
float sum = 0;
for (int k = 0; k < F2SIZE; k++) {
sum += f2[i * F2SIZE + k] * w2[k * RSIZE + j];
}//for intermediary
r[i * RSIZE + j] = sum;
}//for end cols
}//for end rows
for (int i = 0; i < RSIZE; i++) {
printf("gpu:%.04f\tcpu:%.04f\n", rgpu[i], r[i]);
}//for
hipblasDestroy(handle);
}//testMatMul
}//CharacterRecognition
| 17ef75883f4983410290c0fff37aefaf856d4bbd.cu | #include <cuda.h>
#include <cuda_runtime.h>
#include <curand.h>
#include <device_launch_parameters.h>
#include <device_functions.h>
#include <math_constants.h>
#include "common.h"
#include "mlp.h"
#define RANDSEED 0x0bad1bad2bad104 //was 104 for the glory run
#define LAMBDA 0.05 //the learning delta //was 0.05 for the glory run
#define GOODENOUGH 0.00001
//These are definitions for index math in the 1d-2d world
#define UL(idx, w) (idx - w - 1)
#define UC(idx, w) (idx - w)
#define UR(idx, w) (idx - w + 1)
#define CL(idx, w) (idx - 1)
#define CC(idx, w) (idx)
#define CR(idx, w) (idx + 1)
#define DL(idx, w) (idx + w - 1)
#define DC(idx, w) (idx + w)
#define DR(idx, w) (idx + w + 1)
namespace CharacterRecognition {
using Common::PerformanceTimer;
PerformanceTimer& timer()
{
static PerformanceTimer timer;
return timer;
}
//##################################
// SIZE DEFINES
//##################################
#define NUMFILTERS 6
#define KERNWIDTH 3
#define POOLWIDTH 3
#define F0SIZE 10201
#define F0WIDTH (101)
#define SINCONVRAWSIZE ((F0WIDTH - (KERNWIDTH - 1)) * (F0WIDTH - (KERNWIDTH - 1)))
#define CONVRAWSIZE (SINCONVRAWSIZE * NUMFILTERS)
#define SINCONVPOOLSIZE (SINCONVRAWSIZE / (POOLWIDTH * POOLWIDTH))
#define CONVPOOLSIZE (SINCONVPOOLSIZE * NUMFILTERS)
#define F1SIZE (CONVPOOLSIZE + 1)
//#define F1SIZE (6535)
#define F2SIZE 156 //was 156 for the glory run
#define F2SIZEA (F2SIZE + 1)
#define W1SIZE (F1SIZE * F2SIZE)
#ifndef RSIZE
#define RSIZE 52
#endif
#define W2SIZE (F2SIZEA * RSIZE)
void printSizes() {
printf("FEATURE VECTORS\n");
printf("\tF0Size:\t%d\n", F0SIZE);
printf("\tF1Size:\t%d\n", F1SIZE);
printf("\tF2Size:\t%d\n", F2SIZE);
printf("\tRSize:\t%d\n", RSIZE);
printf("WEIGHT MATRICES\n");
printf("\tW1Size:\t%d\t(F1Size * F2Size)\n", W1SIZE);
printf("\tW2Size:\t%d\t(F2Size * RSize)\n", W2SIZE);
printf("SINCONVRAWSIZE: %d\n", SINCONVRAWSIZE);
printf("CONVRAWSIZE: %d\n", CONVRAWSIZE);
printf("SINCONVPOOLSIZE: %d\n", SINCONVPOOLSIZE);
printf("CONVPOOLSIZE: %d\n", CONVPOOLSIZE);
}//printSizes
//##################################
// DEVICE POINTER MEMORY
//##################################
float* dF0;//features 0 (orig data)
float* dC0;//convolutional memory for first layer
float* dF1;//features 1
float* dW1;//weights 1
float* dW1D;//delta value for weights 1
float* dPj;//psi_j result matrix
float* dOj;//omega_j result matrix
float* dF2;//features 2
float* dF2A;//features 2 (activated)
float* dW2;//weights 2
float* dW2D;//delta value for weights 2
float* dPi;//psi_i result matrix
float* dR;//result
float* dRA;//result(activated)
float* dRE;//result error
float* dRT;//result (true)
//Convolution kernel initialization
filter3 kern1 = { 1.0 / 16, 1.0 / 8, 1.0 / 16,
1.0 / 8, 1.0 / 4, 1.0 / 8,
1.0 / 16, 1.0 / 8, 1.0 / 16 };//gaussian
filter3 kern2 = { -1, -1, -1, -1, 8, -1, -1, -1, -1 };//outline
filter3 kern3 = { 1, 2, 1, 0, 0, 0, -1, -2, -1 };//sobel top
filter3 kern4 = { -1, 0, 1, -2, 0, 2, -1, 0, 1 };//sobel right
filter3 kern5 = { -1, -2, -1, 0, 0, 0, 1, 2, 1 };//sobel bottom
filter3 kern6 = { 1, 0, -1, 2, 0, -2, 1, 0, -1 };//sobel left
filter3 allKernels[NUMFILTERS] = { kern1, kern2, kern3, kern4, kern5, kern6 };
//##################################
// FUNCTION DELCARATIONS
//##################################
/**
Gets the "index" for the thread
Currently, only supporting single-dimensional block indexes
Computes all relevant x, y, z transformations
*/
__device__ int getIndex();
//##################################
// DEVICE POINTER MALLOC AND FREE
//##################################
void kmallocBuffers() {
cudaMalloc((void**)& dF0, F0SIZE * sizeof(float));
checkCUDAErrorFn("cudaMalloc failed\n", NULL, __LINE__);
cudaMalloc((void**)& dC0, CONVRAWSIZE * sizeof(float));
checkCUDAErrorFn("cudaMalloc failed\n", NULL, __LINE__);
cudaMalloc((void**)& dF1, (F1SIZE + 1) * sizeof(float));
checkCUDAErrorFn("cudaMalloc failed\n", NULL, __LINE__);
cudaMalloc((void**)& dW1, W1SIZE * sizeof(float));
checkCUDAErrorFn("cudaMalloc failed\n", NULL, __LINE__);
cudaMalloc((void**)& dW1D, W1SIZE * sizeof(float));
checkCUDAErrorFn("cudaMalloc failed\n", NULL, __LINE__);
cudaMalloc((void**)& dPj, F2SIZE * sizeof(float));
checkCUDAErrorFn("cudaMalloc failed\n", NULL, __LINE__);
cudaMalloc((void**)& dOj, F2SIZE * sizeof(float));
checkCUDAErrorFn("cudaMalloc failed\n", NULL, __LINE__);
cudaMalloc((void**)& dF2, F2SIZE * sizeof(float));
checkCUDAErrorFn("cudaMalloc failed\n", NULL, __LINE__);
cudaMalloc((void**)& dF2A, F2SIZEA * sizeof(float));
checkCUDAErrorFn("cudaMalloc failed\n", NULL, __LINE__);
cudaMalloc((void**)& dW2, W2SIZE *sizeof(float));
checkCUDAErrorFn("cudaMalloc failed\n", NULL, __LINE__);
cudaMalloc((void**)& dW2D, W2SIZE * sizeof(float));
checkCUDAErrorFn("cudaMalloc failed\n", NULL, __LINE__);
cudaMalloc((void**)& dPi, RSIZE * sizeof(float));
checkCUDAErrorFn("cudaMalloc failed\n", NULL, __LINE__);
cudaMalloc((void**)& dR, RSIZE * sizeof(float));
checkCUDAErrorFn("cudaMalloc failed\n", NULL, __LINE__);
cudaMalloc((void**)& dRE, RSIZE * sizeof(float));
checkCUDAErrorFn("cudaMalloc failed\n", NULL, __LINE__);
cudaMalloc((void**)& dRA, RSIZE * sizeof(float));
checkCUDAErrorFn("cudaMalloc failed\n", NULL, __LINE__);
cudaMalloc((void**)& dRT, RSIZE * sizeof(float));
checkCUDAErrorFn("cudaMalloc failed\n", NULL, __LINE__);
}//kmallocBuffers
void kfreeBuffers() {
cudaFree(dF0);
checkCUDAErrorFn("cudaFree failed\n", NULL, __LINE__);
cudaFree(dC0);
checkCUDAErrorFn("cudaFree failed\n", NULL, __LINE__);
cudaFree(dF1);
checkCUDAErrorFn("cudaFree failed\n", NULL, __LINE__);
cudaFree(dW1);
checkCUDAErrorFn("cudaFree failed\n", NULL, __LINE__);
cudaFree(dW1D);
checkCUDAErrorFn("cudaFree failed\n", NULL, __LINE__);
cudaFree(dPj);
checkCUDAErrorFn("cudaFree failed\n", NULL, __LINE__);
cudaFree(dOj);
checkCUDAErrorFn("cudaFree failed\n", NULL, __LINE__);
cudaFree(dF2);
checkCUDAErrorFn("cudaFree failed\n", NULL, __LINE__);
cudaFree(dF2A);
checkCUDAErrorFn("cudaFree failed\n", NULL, __LINE__);
cudaFree(dW2);
checkCUDAErrorFn("cudaFree failed\n", NULL, __LINE__);
cudaFree(dW2D);
checkCUDAErrorFn("cudaFree failed\n", NULL, __LINE__);
cudaFree(dPi);
checkCUDAErrorFn("cudaFree failed\n", NULL, __LINE__);
cudaFree(dR);
checkCUDAErrorFn("cudaFree failed\n", NULL, __LINE__);
cudaFree(dRE);
checkCUDAErrorFn("cudaFree failed\n", NULL, __LINE__);
cudaFree(dRA);
checkCUDAErrorFn("cudaFree failed\n", NULL, __LINE__);
cudaFree(dRT);
checkCUDAErrorFn("cudaFree failed\n", NULL, __LINE__);
}//kfreeBuffers
//##################################
// DEVICE FUNCTIONS
//##################################
__device__ int getIndex() {
int threadIndex = threadIdx.x + (blockDim.x) * threadIdx.y + (blockDim.y * blockDim.x) * threadIdx.z;
int overallIndex = threadIndex + blockIdx.x * (blockDim.x * blockDim.y * blockDim.z);
return overallIndex;
}//getIndex
//##################################
// DEVICE GLOBAL FUNCTIONS
//##################################
/**
Performs our activation function on our results to put them in the range between 0 and 1
Does so in-place
*/
__global__ void kActivateResults(float* results, float* resultsA, int N) {
int index = getIndex();
if (index >= N) return;
resultsA[index] = 1.0 / (1.0 + expf(-1 * results[index]));
}//activateResults
__global__ void kActivateInverse(float* results, float* resultsIA, int N) {
int index = getIndex();
if (index >= N) return;
//resultsIA[index] = logf(results[index] / (1.0 - results[index]));
float ex = expf(results[index]);
resultsIA[index] = ex / ((ex + 1) * (ex + 1));
}//kActivateInverse
//##################################
// HOST HELPER FUNCTIONS
//##################################
void printWeights() {
float weights[W2SIZE] = {};
cudaMemcpy(weights, dW2, W2SIZE * sizeof(float), cudaMemcpyDeviceToHost);
for (int i = 0; i < F2SIZE; i++) {
int iOffset = i * RSIZE;
for (int j = 0; j < RSIZE; j++) {
printf("%.02f ", weights[iOffset + j]);
}//for
printf("\n");
}//for
}//printWeights
void activateResults(float* results, float* resultsActivated, int numResults) {
dim3 tpb = dim3(BLOCKSIZE);
dim3 bpg = dim3((numResults + BLOCKSIZE - 1) / BLOCKSIZE);
kActivateResults<<<bpg, tpb>>>(results, resultsActivated, numResults);
checkCUDAErrorFn("kActivateResults failed\n", NULL, __LINE__);
}//activateResults
void calculateError(cublasHandle_t* handle, float* resultsActivated, float* resultsTrue, float* resultsDiff, int numResults) {
cudaMemcpy(resultsDiff, resultsTrue, numResults * sizeof(float), cudaMemcpyDeviceToDevice);
checkCUDAErrorFn("Cudamemcpy failed\n", NULL, __LINE__);
float alpha = -1.0;
cublasSaxpy(*handle, numResults, &alpha, resultsActivated, 1, resultsDiff, 1);
}//calculateError
__global__ void shiftByFactor(float* A, int N, float mulFactor, float offset) {
int index = getIndex();
if (index > N) return;
A[index] = mulFactor * A[index] + offset;
}//shiftByFactor
void gpuFillRand(float* A, int nr_rows_A, int nr_cols_A, float lo, float hi){
curandGenerator_t prng;
curandCreateGenerator(&prng, CURAND_RNG_PSEUDO_XORWOW);
curandSetPseudoRandomGeneratorSeed(prng, (unsigned long long) RANDSEED);
curandGenerateUniform(prng, A, nr_rows_A * nr_cols_A);
//shift the random numbers into the given range
float mulFactor = hi - lo;
float offset = lo;
int numElements = nr_rows_A * nr_cols_A;
dim3 tpb = dim3(BLOCKSIZE);
dim3 bpg = dim3((numElements + BLOCKSIZE - 1) / BLOCKSIZE);
shiftByFactor<<<bpg, tpb>>>(A, numElements, mulFactor, offset);
checkCUDAErrorFn("shiftByFactor failed\n", NULL, __LINE__);
cudaDeviceSynchronize();//safety
}//gpuFillRand
void matMul(cublasHandle_t* handle, const float* A, const float* B, float* C, int m, int k, int n) {
//Since cublas expects column-major indexing, our A is effectively AT (kxm), and our B is effectively BT (nxk)
//As such, we're going to be doing BT * AT = CT (nxm)
//Then, we transpose C "in place" before we return
//And by that I mean we don't do that, because for some reason the multiplication works how I want
float alpha = 1.0;
float beta = 0.0;
//Future development: put the result into Cswap, transpose into C
cublasSgemm(*handle, CUBLAS_OP_N, CUBLAS_OP_N, n, m, k, &alpha, B, n, A, k, &beta, C, n) ;
checkCUDAErrorFn("the internal matrix multiply failed\n", NULL, __LINE__);
//cudaDeviceSynchronize();
//no need to transpose?? not sure why, but this function operates
//transpose(C, Cswap, n, m);
}//matMul
//##################################
// ERROR CALCULATIONS (?)
//##################################
float_v calcErrorSingle(InputData record, float* resultArray, float* kResultArray) {
float_v retval = float_v();
float_v trueResult = record.resultArray;
for (int i = 0; i < trueResult.size(); i++) {
float error = trueResult[i] - resultArray[i];
retval.push_back(error);
}//for
//TODO: delete this
//if (kResultArray) {
// cudaMemcpy(kResultArray, retval.data(), trueResult.size() * sizeof(float), cudaMemcpyHostToDevice);
// checkCUDAErrorFn("cudaMemcpy failed\n", NULL, __LINE__);
//}//if
return retval;
}//calcError
float_v calcSumSquareErrors(float_vv errorVals) {
float_v result = float_v(errorVals[0].size(), 0.0f);
for (int i = 0; i < errorVals.size(); i++) {
for (int j = 0; j < errorVals[0].size(); j++) {
result[j] += errorVals[i][j] * errorVals[i][j] / 2.0;
}//for j
}//for i
return result;
}//calcSumSquareErrors
float calcEnergy(float_v errors) {
float sum = 0;
for (int i = 0; i < errors.size(); i++) {
sum += (errors[i] * errors[i]);
}//for
return sum / errors.size();//averaging the energy function?
}//calcEnergy
//##################################
// WEIGHT CHANGES
//##################################
__global__ void kCalcWeightChange1(float* thetaA, float* omega, float* data, int cmax, int rmax,
float* weightChange, float* psiOut) {
int index = getIndex();
if (index >= rmax * cmax) return;
int c = index / rmax;
int r = index % rmax;
float rA = thetaA[r];
float psi = (rA * (1 - rA)) * omega[r];
weightChange[index] += psi * data[c];//formerly: * LAMBDA
psiOut[r] = psi;
return;
}//kCalcWeightChange1
__global__ void kCalcWeightChange2(float* thetaA, float* omegaError, float* data, int cmax, int rmax,
float* weightChange, float* psiOut) {
int index = getIndex();
if (index >= rmax * cmax) return;
int c = index / rmax;
int r = index % rmax;
float rA = thetaA[r];
float psi = (rA * (1 - rA)) * omegaError[r];
weightChange[index] += data[c] * psi;//formerly: * LAMBDA
psiOut[r] = psi;
return;
}//kCalcWeightChange2
void calcWeightChange1(float* thetaResultA, float* omegaError, float* features, int kmax, int jmax, float* weightChange, float* psiOut) {
dim3 tpb = dim3(BLOCKSIZE);
dim3 bpgij = dim3(((jmax * kmax) + BLOCKSIZE - 1) / BLOCKSIZE);
kCalcWeightChange1<<<bpgij, tpb>>>(thetaResultA, omegaError, features, kmax, jmax, weightChange, psiOut);
}//calcWeightChange1
void calcWeightChange2(float* thetaResultA, float* omegaError, float* features, int jmax, int imax, float* weightChange, float* psiOut) {
/*
result: [0:imax)(52), error: [0:imax)(52), data: [0, jmax)(10201), weightChange (outvar) ixj matrix
*/
//calcWeightChange2(dRA, dRE, dF2A, F2SIZEA, RSIZE, dW2D, dPi);
// matMul(handle, dF2A, dW2, dR, 1, F2SIZE + 1, RSIZE);
dim3 tpb = dim3(BLOCKSIZE);
dim3 bpgij = dim3(((imax * jmax) + BLOCKSIZE - 1) / BLOCKSIZE);
kCalcWeightChange2<<<bpgij, tpb>>>(thetaResultA, omegaError, features, jmax, imax, weightChange, psiOut);
}//calcWeightChange
void applyWeightChanges(cublasHandle_t* handle, float* weight, float* delta, int weightSize) {
float alpha = LAMBDA;
cublasSaxpy(*handle, weightSize, &alpha, delta , 1, weight, 1);
checkCUDAErrorFn("saxpy failed\n", NULL, __LINE__);
}//applyWeightChanges
void clearWeightChanges(float* wDelta, int wDeltaSize) {
cudaMemset(wDelta, 0, wDeltaSize * sizeof(float));
checkCUDAErrorFn("CudaMemset failed\n", NULL, __LINE__);
}//clearWeightChanges
//##################################
// HOST MAIN FUNCTIONS
//##################################
void applyAllWeightChanges(cublasHandle_t* handle) {
applyWeightChanges(handle, dW2, dW2D, W2SIZE);
applyWeightChanges(handle, dW1, dW1D, W1SIZE);
cudaDeviceSynchronize();
clearWeightChanges(dW2D, W2SIZE);
clearWeightChanges(dW1D, W1SIZE);
}//applyAllWeightChanges
void backPropagate(cublasHandle_t* handle) {
cublasHandle_t mHandle; bool handling = false;
if (handle == NULL) {
handling = true; handle = &mHandle; cublasCreate(handle);
}//if
//final layer weight delta calculation
calcWeightChange2(dRA, dRE, dF2A, F2SIZEA, RSIZE, dW2D, dPi);
//calculate Omega_j off the psi_i values
matMul(handle, dW2, dPi, dOj, F2SIZE, RSIZE, 1);
//matMul(handle, dPi, dW2, dOj, 1, RSIZE, F2SIZE);//go the other way because IT CANT HURT I GUESS
checkCUDAErrorFn("matMul failed\n", NULL, __LINE__);
//next-to-last layer weight delta calculation
calcWeightChange1(dF2A, dOj, dF1, F1SIZE, F2SIZE, dW1D, dPj);
checkCUDAErrorFn("calcWeightChange failed\n", NULL, __LINE__);
if (handling) {
cublasDestroy(*handle);
}//if
}//backPropagate
float_v forwardPropagate(InputData x, float* resultArray, cublasHandle_t* handle) {
//Make our cublas handle if not handed one
cublasHandle_t mHandle; bool handling = false;
if (handle == NULL) {
handling = true;
handle = &mHandle;
cublasCreate(handle);
}//if
float dataPtr[F0SIZE];
float truePtr[RSIZE];
memcpy(dataPtr, x.fData.data(), F0SIZE * sizeof(float));
memcpy(truePtr, x.resultArray.data(), RSIZE * sizeof(float));
//printFloatPic(dataPtr, 101, 101);
//load data into kernel memory
cudaMemcpy(dF0, dataPtr, F0SIZE * sizeof(float), cudaMemcpyHostToDevice);
checkCUDAErrorFn("cudaMemcpy failed\n", NULL, __LINE__);
cudaMemcpy(dRT, truePtr, RSIZE * sizeof(float), cudaMemcpyHostToDevice);
checkCUDAErrorFn("cudaMemcpy failed\n", NULL, __LINE__);
//convolve step
convolveStep(dF0, F0SIZE, dC0, dF1, POOLWIDTH);
checkCUDAErrorFn("convolveStep failed\n", NULL, __LINE__);
//Fully connected layer w/ W1
matMul(handle, dF1, dW1, dF2, 1, F1SIZE, F2SIZE);
checkCUDAErrorFn("matMul failed\n", NULL, __LINE__);
//activate the first results
activateResults(dF2, dF2A, F2SIZE);
checkCUDAErrorFn("activateResults failed\n", NULL, __LINE__);
//Fully connected layer w/ W2
matMul(handle, dF2A, dW2, dR, 1, F2SIZE + 1, RSIZE);
checkCUDAErrorFn("matMul failed\n", NULL, __LINE__);
//Activate results
activateResults(dR, dRA, RSIZE);
checkCUDAErrorFn("activateResults failed\n", NULL, __LINE__);
//calculate error
calculateError(handle, dRA, dRT, dRE, RSIZE);
checkCUDAErrorFn("calcError failed\n", NULL, __LINE__);
cudaMemcpy(resultArray, dRA, RSIZE * sizeof(float), cudaMemcpyDeviceToHost);
checkCUDAErrorFn("cudaMemcpy failed\n", NULL, __LINE__);
if (handling) {
cublasDestroy(*handle);
}//if
return calcErrorSingle(x, resultArray);
}//forwardPropH
void trainWeights(InputData_v records, int numIterations, int_v* iterRecord, float_v* errorRecord, bool noRandom) {
printSizes();
cublasHandle_t handle;
cublasCreate(&handle);
float results[RSIZE] = {};//floating space for the results to be put
if (!noRandom) {
//initialize random weights between -1 and 1
gpuFillRand(dW1, F1SIZE, F2SIZE, -1.0, 1.0);
gpuFillRand(dW2, F2SIZE, RSIZE, -1.0, 1.0);
}//if
printForwardResults(records);//see our starting point
//starting biases
float fakeBias = 1.0;
//add a bias term
cudaMemcpy(dF1 + (F1SIZE - 1), &fakeBias, 1 * sizeof(float), cudaMemcpyHostToDevice);
checkCUDAErrorFn("cudaMemcpy failed\n", NULL, __LINE__);
cudaMemcpy(dF2A + (F2SIZEA - 1), &fakeBias, 1 * sizeof(float), cudaMemcpyHostToDevice);
checkCUDAErrorFn("cudaMemcpy failed\n", NULL, __LINE__);
std::vector<int> indexVector = std::vector<int>();
for (int i = 0; i < records.size(); i++) indexVector.push_back(i);
timer().startCpuTimer();
for (int iter = 0; iter < numIterations; iter++) {
//for (int iter = 0; true; iter++) {
float_vv errorValues = float_vv();
float energy;
//std::random_shuffle(indexVector.begin(), indexVector.end());//not relevant currently
for (int j = 0; j < records.size(); j++) {
//go forward
int recordNum = indexVector[j];
float_v errorVal = forwardPropagate(records[recordNum], results, &handle);
errorValues.push_back(errorVal);
energy = calcEnergy(errorVal);
//printf("\ti#%04d: r#%02d: Calculated energy is %.8f\n", iter, recordNum, energy);
//go backwards
backPropagate(&handle);
}//for
applyAllWeightChanges(&handle);
if (iter % 10 == 0){
float_v sseError = calcSumSquareErrors(errorValues);
energy = calcEnergy(sseError);
printf("i#%04d: Total energy is %.9f\n", iter, energy);
iterRecord->push_back(iter);
errorRecord->push_back(energy);
if (energy < GOODENOUGH) {
break;//we're probably all trained!
}
}//if
}//for
timer().endCpuTimer();
cublasDestroy(handle);
}//trainWeights
void printForwardResults(InputData_v allRecords) {
float resultArray[RSIZE];
int_v correctResults = int_v();
for (int i = 0; i < allRecords.size(); i++) {
float_v errorResult = CharacterRecognition::forwardPropagate(allRecords[i], resultArray);
printf("=========RESULT FOR RECORD %d==============\n", i);
bool isCorrect = true;
for (int j = 0; j < RSIZE; j++) {
if (resultArray[j] >= 0.5 && j != i) isCorrect = false;
if (resultArray[j] < 0.5 && j == i) isCorrect = false;
printf("@%02d: %0.2f ", j, resultArray[j]);
if ((j + 1) % 8 == 0) {
printf("\n");
}
}//for
if (isCorrect) correctResults.push_back(1);
else correctResults.push_back(0);
printf("\n");
}//for
printf("*****************\n");
printf("*****SUMMARY*****\n");
printf("*****************\n");
int totalCorrect = 0;
for (int i = 0; i < correctResults.size(); i++) {
if (correctResults[i]) {
printf("\tCorrect for entry %02d: TRUE\n", i);
totalCorrect++;
}//if
else {
printf("\tCorrect for entry %02d: FALSE\n", i);
}//else
}//for
printf("Total Correct: %d\n", totalCorrect);
}//printForwardResults
//##################################
// CONVOLVING
//##################################
//Convolutional layer:
//1. Convolve (into an intermediary)
//2. Activate the intermediary
//3. Max pool down into some feature vector (to be fed into some of the FC layers)
/**
Pools some number of activated convolutions down into a smaller buffer
Does so in blockWidth x blockWidth squares
Wants to spawn a number of threads equal to the number of resultant output "pixels"
*/
__global__ void kmaxPool(float* idata, float* odata, int blockWidth, int idataWidth, int odataWidth) {
int index = getIndex();
if (index >= odataWidth * odataWidth) return;
int oR = index / odataWidth;
int oC = index % odataWidth;
int iR = oR * blockWidth - (blockWidth / 2);
int iC = oC * blockWidth - (blockWidth / 2);
int iindex = iR * idataWidth + iC;
float max = -1.0e40;//stand-in for a minimum
for (int i = 0; i < blockWidth; i++) {
int iOffset = idataWidth * (i - (blockWidth / 2));
for (int j = 0; j < blockWidth; j++) {
max = fmaxf(max, idata[iindex + iOffset + (j - (blockWidth / 2))]);
}//for
}//for
odata[index] = max;
}//kmaxPool
/**
* Does a convolution from one image to another
* A few notes:
* Takes char data in for the input
* Assuming we're running one thread per output pixel, and that we've sized things correctly for our filter
* filter, idata, and odata must all be square
* Also, currently only accepting filter widths of 3
*/
__global__ void kconvolve(filter3 filter, float* idata, float* odata, int odataWidth) {
int index = getIndex();
if (index >= odataWidth * odataWidth) return;
int idataW = odataWidth + 2;
int oR = index / odataWidth;
int oC = index % odataWidth;
int iR = oR + 1;
int iC = oC + 1;
//get ourselves an "idata" index
int iindex = iR * idataW + iC;
float sum = 0;
float relData[9];
//Flips the kernel here
relData[0] = idata[DR(iindex, idataW)];
relData[1] = idata[DC(iindex, idataW)];
relData[2] = idata[DL(iindex, idataW)];
relData[3] = idata[CR(iindex, idataW)];
relData[4] = idata[CC(iindex, idataW)];
relData[5] = idata[CL(iindex, idataW)];
relData[6] = idata[UR(iindex, idataW)];
relData[7] = idata[UC(iindex, idataW)];
relData[8] = idata[UL(iindex, idataW)];
for (int i = 0; i < 9; i++) {
sum += relData[i] * filter.kernel[i];
}//for 9
odata[index] = sum;
}//kconvolve
void convolve(float* idata, float* odata, int oOffset, int odataSize, filter3 kernel) {
dim3 tpb = dim3(BLOCKSIZE);
dim3 bpg = dim3(((odataSize) + BLOCKSIZE - 1) / BLOCKSIZE);
kconvolve<<<bpg, tpb>>>(kernel, idata, odata + oOffset, (int)sqrt(odataSize));
checkCUDAErrorFn("kconvolve failed\n", NULL, __LINE__);
}//convolve
/**
Does the forward propagation for convolving stuff
Also max-pools
Returns the size of the output layer (sure why not)
*/
int convolveStep(float* inputLayer, int inputLayerSize, float* outputPoolingLayer, float* outputLayer, int poolWidth) {
int inputLayerWidth = (int)sqrt(inputLayerSize);
int outputPoolingBlockWidth = inputLayerWidth - 2;
int outputPoolingBlockSize = outputPoolingBlockWidth * outputPoolingBlockWidth;
int outputPooledBlockSize = outputPoolingBlockSize / (poolWidth * poolWidth);
int outputPooledBlockWidth = (int)sqrt(outputPooledBlockSize);
int outputLayerSize = NUMFILTERS * outputPooledBlockSize;
//convolve
for (int i = 0; i < NUMFILTERS; i++) {
convolve(inputLayer, outputPoolingLayer, i * outputPoolingBlockSize, outputPoolingBlockSize, allKernels[i]);
}//for
cudaDeviceSynchronize();
//pool
dim3 tpb = dim3(BLOCKSIZE);
dim3 bpg = dim3(((outputPooledBlockSize)+BLOCKSIZE - 1) / BLOCKSIZE);
for (int i = 0; i < NUMFILTERS; i++) {
// __global__ void kmaxPool(float* idata, float* odata, int blockWidth, int idataWidth, int odataWidth) {
int iBlockOffset = i * outputPoolingBlockSize;
int oBlockOffset = i * outputPooledBlockSize;
kmaxPool<<<bpg, tpb>>>(outputPoolingLayer + iBlockOffset, outputLayer + oBlockOffset, poolWidth, outputPoolingBlockWidth, outputPooledBlockWidth);
checkCUDAErrorFn("kmaxpool failed\n", NULL, __LINE__);
}//for
return outputLayerSize;
}//convolveStep
void outputWeights(std::string pathName, bool inText) {
float* w1 = (float*)malloc(W1SIZE * sizeof(float));
float* w2 = (float*)malloc(W2SIZE * sizeof(float));
std::FILE* oF = std::fopen(pathName.c_str(), "wb");
cudaMemcpy((void*) w1, dW1, W1SIZE * sizeof(float), cudaMemcpyDeviceToHost);
checkCUDAErrorFn("cudaMemcpy failed\n", NULL, __LINE__);
cudaMemcpy((void*) w2, dW2, W2SIZE * sizeof(float), cudaMemcpyDeviceToHost);
checkCUDAErrorFn("cudaMemcpy failed\n", NULL, __LINE__);
int totalWritten = 0;
int numWritten = 0;
//weights 1
while (totalWritten < W1SIZE) {
numWritten = std::fwrite(w1 + totalWritten, sizeof(float), W1SIZE - totalWritten, oF);
if (!numWritten) {
printf("Wrote none of them for some reason\n");
exit(0);
}//if
totalWritten += numWritten;
}//while
totalWritten = 0;
//weights 2
while (totalWritten < W2SIZE) {
numWritten = std::fwrite(w2 + totalWritten, sizeof(float), W2SIZE - totalWritten, oF);
if (!numWritten) {
printf("Wrote none of them for some reason\n");
exit(0);
}//if
totalWritten += numWritten;
}//while
int ending = 0;
std::fwrite(&ending, sizeof(int), 1, oF);//ending 0-pad?
std::fflush(oF);
std::fclose(oF);
free(w1);
free(w2);
}//outputWeights
void inputWeights(std::string pathName) {
float* w1 = (float*)malloc(W1SIZE * sizeof(float));
std::FILE* iF = std::fopen(pathName.c_str(), "rb");
int totalRead = 0;
int numRead = 0;
//weights1
while (totalRead < W1SIZE) {
numRead = std::fread(w1 + totalRead, sizeof(float), W1SIZE - totalRead, iF);
if (!numRead) {
printf("Read none of them for some reason, errno %d\n", errno);
exit(0);
}//if
totalRead += numRead;
}//while
float* w2 = (float*)malloc(W2SIZE * sizeof(float));
totalRead = 0;
numRead = 0;
//weights2
while (totalRead < W2SIZE) {
numRead = std::fread(w2 + totalRead, sizeof(float), W2SIZE - totalRead, iF);
if (!numRead) {
printf("Read none of them for some reason, errno %d\n", errno);
exit(0);
}//if
totalRead += numRead;
}//while
cudaMemcpy(dW1, w1, W1SIZE * sizeof(float), cudaMemcpyHostToDevice);
checkCUDAErrorFn("cudaMemcpy failed\n", NULL, __LINE__);
cudaMemcpy(dW2, w2, W2SIZE * sizeof(float), cudaMemcpyHostToDevice);
checkCUDAErrorFn("cudaMemcpy failed\n", NULL, __LINE__);
std::fclose(iF);
free(w1);
free(w2);
}//inputWeights
void testMatMul() {
cublasHandle_t handle;
cublasCreate(&handle);
float f2[F2SIZE] = {};
float w2[W2SIZE] = {};
float r[RSIZE] = {};
float rgpu[RSIZE] = {};
gpuFillRand(dF2, 1, F2SIZE);
gpuFillRand(dW2, F2SIZE, RSIZE);
matMul(&handle, dF2, dW2, dR, 1, F2SIZE, RSIZE);
cudaMemcpy(rgpu, dR, RSIZE * sizeof(float), cudaMemcpyDeviceToHost);
checkCUDAErrorFn("cudaMemcpy failed\n", NULL, __LINE__);
cudaMemcpy(f2, dF2, F2SIZE * sizeof(float), cudaMemcpyDeviceToHost);
checkCUDAErrorFn("cudaMemcpy failed\n", NULL, __LINE__);
cudaMemcpy(w2, dW2, W2SIZE * sizeof(float), cudaMemcpyDeviceToHost);
checkCUDAErrorFn("cudaMemcpy failed\n", NULL, __LINE__);
//cpu-style try the multiplcation
for (int i = 0; i < 1; i++) {
for (int j = 0; j < RSIZE; j++) {
float sum = 0;
for (int k = 0; k < F2SIZE; k++) {
sum += f2[i * F2SIZE + k] * w2[k * RSIZE + j];
}//for intermediary
r[i * RSIZE + j] = sum;
}//for end cols
}//for end rows
for (int i = 0; i < RSIZE; i++) {
printf("gpu:%.04f\tcpu:%.04f\n", rgpu[i], r[i]);
}//for
cublasDestroy(handle);
}//testMatMul
}//CharacterRecognition
|
14dbf166dbe42bad02709dde18715edba322823c.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <hip/hip_runtime.h>
int *ha, *hb, *hc; // host data
int *hd; // results
__global__
void add(int *a, int *b, int *c, int *d, int N) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < N*N) {
d[i] = 2 * a[i] + 3/4 * b[i] + 1/2 * c[i];
}
}
//CPU function
void addCPU(int *a,int *b, int *c, int *d, int N){
for(int i = 0; i < N; i++) {
for(int j = 0; j < N; j++) {
d[i*N+j] = 2 * a[i*N+j] + 3/4 * b[i*N+j] + 1/2 * c[i*N+j];
}
}
}
int main() {
int N = 1000;
int nBytes = N*N*sizeof(int);
//Block size and number
int block_size, block_no;
//memory allocation
ha = (int *) malloc(nBytes);
hb = (int *) malloc(nBytes);
hc = (int *) malloc(nBytes);
hd = (int *) malloc(nBytes);
block_size = 8; //threads per block
block_no = N*N/block_size;
//Work definition
dim3 dimBlock(block_size, 1, 1);
dim3 dimGrid(block_no, 1, 1);
for (int i = 0; i < N*N; ++i) {
ha[i] = i;
hb[i] = i*i;
hc[i] = i*i-2*i;
}
int *da, *db, *dc, *dd;
hipMalloc((void **)&da, N*N*sizeof(int));
hipMalloc((void **)&db, N*N*sizeof(int));
hipMalloc((void **)&dc, N*N*sizeof(int));
hipMalloc((void **)&dd, N*N*sizeof(int));
hipMemcpy(da, ha, N*N*sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(db, hb, N*N*sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(dc, hc, N*N*sizeof(int), hipMemcpyHostToDevice);
clock_t start_d=clock();
hipLaunchKernelGGL(( add), dim3(block_no),dim3(block_size), 0, 0, da, db, dc, dd, N);
hipMemcpy(hd, dd, N*sizeof(int), hipMemcpyDeviceToHost);
clock_t end_d = clock();
clock_t start_h = clock();
addCPU(ha, hb, hc, hd, N);
clock_t end_h = clock();
//Time computing
double time_d = (double)(end_d-start_d)/CLOCKS_PER_SEC;
double time_h = (double)(end_h-start_h)/CLOCKS_PER_SEC;
printf("n = %d \t GPU time = %fs \t CPU time = %fs\n", N, time_d, time_h);
hipFree(da);
hipFree(db);
hipFree(dc);
hipFree(dd);
return 0;
}
| 14dbf166dbe42bad02709dde18715edba322823c.cu | #include <stdio.h>
#include <cuda.h>
int *ha, *hb, *hc; // host data
int *hd; // results
__global__
void add(int *a, int *b, int *c, int *d, int N) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < N*N) {
d[i] = 2 * a[i] + 3/4 * b[i] + 1/2 * c[i];
}
}
//CPU function
void addCPU(int *a,int *b, int *c, int *d, int N){
for(int i = 0; i < N; i++) {
for(int j = 0; j < N; j++) {
d[i*N+j] = 2 * a[i*N+j] + 3/4 * b[i*N+j] + 1/2 * c[i*N+j];
}
}
}
int main() {
int N = 1000;
int nBytes = N*N*sizeof(int);
//Block size and number
int block_size, block_no;
//memory allocation
ha = (int *) malloc(nBytes);
hb = (int *) malloc(nBytes);
hc = (int *) malloc(nBytes);
hd = (int *) malloc(nBytes);
block_size = 8; //threads per block
block_no = N*N/block_size;
//Work definition
dim3 dimBlock(block_size, 1, 1);
dim3 dimGrid(block_no, 1, 1);
for (int i = 0; i < N*N; ++i) {
ha[i] = i;
hb[i] = i*i;
hc[i] = i*i-2*i;
}
int *da, *db, *dc, *dd;
cudaMalloc((void **)&da, N*N*sizeof(int));
cudaMalloc((void **)&db, N*N*sizeof(int));
cudaMalloc((void **)&dc, N*N*sizeof(int));
cudaMalloc((void **)&dd, N*N*sizeof(int));
cudaMemcpy(da, ha, N*N*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(db, hb, N*N*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(dc, hc, N*N*sizeof(int), cudaMemcpyHostToDevice);
clock_t start_d=clock();
add<<<block_no,block_size>>>(da, db, dc, dd, N);
cudaMemcpy(hd, dd, N*sizeof(int), cudaMemcpyDeviceToHost);
clock_t end_d = clock();
clock_t start_h = clock();
addCPU(ha, hb, hc, hd, N);
clock_t end_h = clock();
//Time computing
double time_d = (double)(end_d-start_d)/CLOCKS_PER_SEC;
double time_h = (double)(end_h-start_h)/CLOCKS_PER_SEC;
printf("n = %d \t GPU time = %fs \t CPU time = %fs\n", N, time_d, time_h);
cudaFree(da);
cudaFree(db);
cudaFree(dc);
cudaFree(dd);
return 0;
}
|
112ae8b8af6977a11b920537beca7fb71ad519e6.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <iostream>
#include <assert.h>
#include <hip/hip_runtime.h>
#include <time.h>
#define TILE_WIDTH 32
using namespace std;
// Multiplicacion con shared mem
__global__ void matrixMulKernelTiled(float *d_M, float *d_N, float *d_P, int width1, int height1, int width2) {
__shared__ float ds_M[TILE_WIDTH][TILE_WIDTH];
__shared__ float ds_N[TILE_WIDTH][TILE_WIDTH];
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int row = by * TILE_WIDTH + ty;
int col = bx * TILE_WIDTH + tx;
float Pvalue = 0;
for (int p = 0; p < width1 / TILE_WIDTH; p++) {
//Nos ubicamos en el elemento de la matriz 1 que deseamos multiplicar
if (row < height1 and (p * TILE_WIDTH + tx) < width1) {
ds_M[ty][tx] = d_M[row * width1 + p * TILE_WIDTH + tx];
} else {
//si esta fuera del rango llenamos con cero
ds_M[ty][tx] = 0.0;
}
//Nos ubicamos en el elemento de la matriz 2 que deseamos multiplicar
if ((p * TILE_WIDTH + ty) < width1 and col < width1) {
ds_N[ty][tx] = d_N[(p * TILE_WIDTH + ty) * width2 + col];
} else {
//si esta fuera del rango llenamos con cero
ds_N[ty][tx] = 0.0;
}
__syncthreads();
//Se hace la multiplicacion utilizando shared mem
if (row < height1 and col < width2)
for (int k = 0; k < TILE_WIDTH; k++) {
Pvalue += ds_M[ty][k] * ds_N[k][tx];
}
__syncthreads();
}
//Se guardan los resultados.
if (row < height1 and col < width2)
d_P[row * width2 + col] = Pvalue;
}
//Multiplicacn en GPU:
void MatrixMulCPU(float *M, float *N, float *P, int width1, int height1, int width2) {
//Aqui se guarda el resultado de la multiplicacion
int sum = 0;
for (int i = 0; i < height1; i++) {
for (int j = 0; j < width2; j++) {
sum = 0;
for (int k = 0; k < width1; k++)
//Se hace el productto y se guarda en la variable
sum += M[i * width1 + k] * N[k * width2 + j];
//Se colocan los valores en la matriz resultado
P[i * width2 + j] = sum;
}
}
}
//Inicializa las matrices a multiplicar.
int initValues(float *data, int width, int heigth){
for(int i = 0; i < width*heigth; i++)
data[i] = 1.0;
return 0;
}
int main()
{
clock_t start, end;
float *h_M, *h_N, *h_P,*h_P_d; //Matrices del host
float *d_M, *d_N,*d_P; // Matrices del device
//Aqui introducimos los tamaos de las matrices 1 y 2 (heigth y width)
int heigth1 = 10;
int width1 = 10;
int heigth2 = 10;
int width2 = 15;
hipError_t error = hipSuccess;
int size1 = width1 * heigth1 * sizeof(float); //Dimension de la matriz 1
int size2 = width2 * heigth2 * sizeof(float); //Dimension de la matriz 2
int size3 = width2 * heigth1 * sizeof(float); //Dimension de la matriz resultado
//Reservamos memoria para las matrices del host
h_M = (float*)malloc(size1);
h_N = (float*)malloc(size2);
h_P = (float*)malloc(size3);
h_P_d = (float*)malloc(size3);
if(h_P_d == NULL)
return 0;
//Inicializamos las matrices
initValues(h_M, width1, heigth1);
initValues(h_N, width2, heigth2);
//Procedimiento en GPU:
//Reservamos espacio en el device para una matriz de dimensin size1
error = hipMalloc((void**)&d_M,size1);
if(error != hipSuccess){
printf("Error reservando memoria para d_M");
exit(0);
}
//Reservamos espacio en el device para una matriz de dimensin size2
error = hipMalloc((void**)&d_N,size2);
if(error != hipSuccess){
printf("Error reservando memoria para d_N");
exit(0);
}
//Reservamos espacio en el device para la matriz resultante de size3
error = hipMalloc((void**)&d_P,size3);
if(error != hipSuccess){
printf("Error reservando memoria para d_P");
exit(0);
}
//Copiamos los datos de las matrices del host al device con las mismas dimensiones.
error = hipMemcpy(d_M, h_M, size1, hipMemcpyHostToDevice);
if(error != hipSuccess){
printf("Error copiando datos a d_M");
exit(0);
}
error = hipMemcpy(d_N, h_N, size2, hipMemcpyHostToDevice);
if(error != hipSuccess){
printf("Error copiando datos a d_N");
exit(0);
}
int blockSize = 1;
dim3 dimBlock(blockSize,blockSize,1);
dim3 dimGrid(ceil(width2 / float(blockSize)), ceil(heigth1 / float(blockSize)), 1);
// CICLO DE TIEMPOS
for(int x=1; x<=5;x++)
{
printf ("Ciclo numero %d\n",x);
//multiplicacin con CPU
start = clock();
MatrixMulCPU(h_M, h_N, h_P, width1, heigth1, width2); //Invocamos la multiplicacion secuencial en CPU.
end = clock();
double cpu_time_used = ((double) (end - start)) / CLOCKS_PER_SEC;
printf("Tiempo en CPU: %.10f\n", cpu_time_used);
//Fin
//Multiplicacion con GPU
start = clock();
hipLaunchKernelGGL(( matrixMulKernelTiled), dim3(dimGrid), dim3(dimBlock), 0, 0, d_M, d_N, d_P, width1, heigth1, width2);// Invocamos la multiplicacion con Tiles.
hipMemcpy(h_P_d,d_P,size3,hipMemcpyDeviceToHost); //Copiamos el resultado de la matriz del device al host.
end = clock();
double gpu_time_used = double(end - start) / CLOCKS_PER_SEC;
printf("Tiempo en GPU: %.10f\n",gpu_time_used);
//FIN
}
hipFree(d_M);
hipFree(d_N);
hipFree(d_P);
return 0;
}
| 112ae8b8af6977a11b920537beca7fb71ad519e6.cu | #include <stdio.h>
#include <iostream>
#include <assert.h>
#include <cuda.h>
#include <time.h>
#define TILE_WIDTH 32
using namespace std;
// Multiplicacion con shared mem
__global__ void matrixMulKernelTiled(float *d_M, float *d_N, float *d_P, int width1, int height1, int width2) {
__shared__ float ds_M[TILE_WIDTH][TILE_WIDTH];
__shared__ float ds_N[TILE_WIDTH][TILE_WIDTH];
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int row = by * TILE_WIDTH + ty;
int col = bx * TILE_WIDTH + tx;
float Pvalue = 0;
for (int p = 0; p < width1 / TILE_WIDTH; p++) {
//Nos ubicamos en el elemento de la matriz 1 que deseamos multiplicar
if (row < height1 and (p * TILE_WIDTH + tx) < width1) {
ds_M[ty][tx] = d_M[row * width1 + p * TILE_WIDTH + tx];
} else {
//si esta fuera del rango llenamos con cero
ds_M[ty][tx] = 0.0;
}
//Nos ubicamos en el elemento de la matriz 2 que deseamos multiplicar
if ((p * TILE_WIDTH + ty) < width1 and col < width1) {
ds_N[ty][tx] = d_N[(p * TILE_WIDTH + ty) * width2 + col];
} else {
//si esta fuera del rango llenamos con cero
ds_N[ty][tx] = 0.0;
}
__syncthreads();
//Se hace la multiplicacion utilizando shared mem
if (row < height1 and col < width2)
for (int k = 0; k < TILE_WIDTH; k++) {
Pvalue += ds_M[ty][k] * ds_N[k][tx];
}
__syncthreads();
}
//Se guardan los resultados.
if (row < height1 and col < width2)
d_P[row * width2 + col] = Pvalue;
}
//Multiplicacón en GPU:
void MatrixMulCPU(float *M, float *N, float *P, int width1, int height1, int width2) {
//Aqui se guarda el resultado de la multiplicacion
int sum = 0;
for (int i = 0; i < height1; i++) {
for (int j = 0; j < width2; j++) {
sum = 0;
for (int k = 0; k < width1; k++)
//Se hace el productto y se guarda en la variable
sum += M[i * width1 + k] * N[k * width2 + j];
//Se colocan los valores en la matriz resultado
P[i * width2 + j] = sum;
}
}
}
//Inicializa las matrices a multiplicar.
int initValues(float *data, int width, int heigth){
for(int i = 0; i < width*heigth; i++)
data[i] = 1.0;
return 0;
}
int main()
{
clock_t start, end;
float *h_M, *h_N, *h_P,*h_P_d; //Matrices del host
float *d_M, *d_N,*d_P; // Matrices del device
//Aqui introducimos los tamaños de las matrices 1 y 2 (heigth y width)
int heigth1 = 10;
int width1 = 10;
int heigth2 = 10;
int width2 = 15;
cudaError_t error = cudaSuccess;
int size1 = width1 * heigth1 * sizeof(float); //Dimension de la matriz 1
int size2 = width2 * heigth2 * sizeof(float); //Dimension de la matriz 2
int size3 = width2 * heigth1 * sizeof(float); //Dimension de la matriz resultado
//Reservamos memoria para las matrices del host
h_M = (float*)malloc(size1);
h_N = (float*)malloc(size2);
h_P = (float*)malloc(size3);
h_P_d = (float*)malloc(size3);
if(h_P_d == NULL)
return 0;
//Inicializamos las matrices
initValues(h_M, width1, heigth1);
initValues(h_N, width2, heigth2);
//Procedimiento en GPU:
//Reservamos espacio en el device para una matriz de dimensión size1
error = cudaMalloc((void**)&d_M,size1);
if(error != cudaSuccess){
printf("Error reservando memoria para d_M");
exit(0);
}
//Reservamos espacio en el device para una matriz de dimensión size2
error = cudaMalloc((void**)&d_N,size2);
if(error != cudaSuccess){
printf("Error reservando memoria para d_N");
exit(0);
}
//Reservamos espacio en el device para la matriz resultante de size3
error = cudaMalloc((void**)&d_P,size3);
if(error != cudaSuccess){
printf("Error reservando memoria para d_P");
exit(0);
}
//Copiamos los datos de las matrices del host al device con las mismas dimensiones.
error = cudaMemcpy(d_M, h_M, size1, cudaMemcpyHostToDevice);
if(error != cudaSuccess){
printf("Error copiando datos a d_M");
exit(0);
}
error = cudaMemcpy(d_N, h_N, size2, cudaMemcpyHostToDevice);
if(error != cudaSuccess){
printf("Error copiando datos a d_N");
exit(0);
}
int blockSize = 1;
dim3 dimBlock(blockSize,blockSize,1);
dim3 dimGrid(ceil(width2 / float(blockSize)), ceil(heigth1 / float(blockSize)), 1);
// CICLO DE TIEMPOS
for(int x=1; x<=5;x++)
{
printf ("Ciclo numero %d\n",x);
//multiplicación con CPU
start = clock();
MatrixMulCPU(h_M, h_N, h_P, width1, heigth1, width2); //Invocamos la multiplicacion secuencial en CPU.
end = clock();
double cpu_time_used = ((double) (end - start)) / CLOCKS_PER_SEC;
printf("Tiempo en CPU: %.10f\n", cpu_time_used);
//Fin
//Multiplicacion con GPU
start = clock();
matrixMulKernelTiled<<<dimGrid, dimBlock>>>(d_M, d_N, d_P, width1, heigth1, width2);// Invocamos la multiplicacion con Tiles.
cudaMemcpy(h_P_d,d_P,size3,cudaMemcpyDeviceToHost); //Copiamos el resultado de la matriz del device al host.
end = clock();
double gpu_time_used = double(end - start) / CLOCKS_PER_SEC;
printf("Tiempo en GPU: %.10f\n",gpu_time_used);
//FIN
}
cudaFree(d_M);
cudaFree(d_N);
cudaFree(d_P);
return 0;
}
|
6e5b175e7206d5d2bb03f1b0a8c614c47734f6d9.hip | // !!! This is a file automatically generated by hipify!!!
#include "filter.h"
#include <hip/hip_runtime.h>
texture<unsigned char, 2> dataIn;
texture<unsigned char, 2> dataOut;
__constant__ int kernel_3x3[9];
__constant__ int kernel_sum[1];
__constant__ int total_planes[1];
__constant__ int current_plane[1];
__global__ void filter_3x3_kernel(unsigned char *data, bool dstOut) {
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int offset = x + y * gridDim.x * blockDim.x;
int ul, u, ur, l, c, r, dl, d, dr;
if(dstOut) {
ul = tex2D(dataIn, x - 1, y - 1) * (kernel_3x3[0]);
u = tex2D(dataIn, x, y - 1) * (kernel_3x3[1]);
ur = tex2D(dataIn, x + 1, y - 1) * (kernel_3x3[2]);
l = tex2D(dataIn, x - 1, y) * (kernel_3x3[3]);
c = tex2D(dataIn, x, y) * (kernel_3x3[4]);
r = tex2D(dataIn, x + 1, y) * (kernel_3x3[5]);
dl = tex2D(dataIn, x - 1, y + 1) * (kernel_3x3[6]);
d = tex2D(dataIn, x, y + 1) * (kernel_3x3[7]);
dr = tex2D(dataIn, x + 1, y + 1) * (kernel_3x3[8]);
} else {
ul = tex2D(dataOut, x - 1, y - 1) * (kernel_3x3[0]);
u = tex2D(dataOut, x, y - 1) * (kernel_3x3[1]);
ur = tex2D(dataOut, x + 1, y - 1) * (kernel_3x3[2]);
l = tex2D(dataOut, x - 1, y) * (kernel_3x3[3]);
c = tex2D(dataOut, x, y) * (kernel_3x3[4]);
r = tex2D(dataOut, x + 1, y) * (kernel_3x3[5]);
dl = tex2D(dataOut, x - 1, y + 1) * (kernel_3x3[6]);
d = tex2D(dataOut, x, y + 1) * (kernel_3x3[7]);
dr = tex2D(dataOut, x + 1, y + 1) * (kernel_3x3[8]);
}
data[offset] = (unsigned char)((ul + u + ur + l + c + r + dl + d + dr)/(kernel_sum[0]));
}
__global__ void deinterleave_planes(unsigned char *original, unsigned char *data) {
//each thread split one pixel
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int out_offset = x + y * gridDim.x * blockDim.x;
int in_offset = x * total_planes[0] + current_plane[0] + y * gridDim.x * blockDim.x * total_planes[0];
data[out_offset] = original[in_offset];
}
__global__ void interleave_planes(unsigned char *data, unsigned char *out) {
//each thread interleave one pixel
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int in_offset = x + y * gridDim.x * blockDim.x;
int out_offset = x * total_planes[0] + current_plane[0] + y * gridDim.x * blockDim.x * total_planes[0];
out[out_offset] = data[in_offset];
}
extern "C" void apply_filter_rgb(image_t *in, filter_t *f, image_t *out, int ntimes) {
int plane, planes, time;
bool dstOut;
planes = 3;
dim3 f_numBlocks(in->width*planes/30, in->height/30);
dim3 numBlocks(in->width/30, in->height/30);
dim3 threadsPerBlock(30, 30);
unsigned char *original_src, *data1, *data2, *final_out;
if(hipMalloc((void **) &original_src, in->height*in->width*planes) != hipSuccess) {
perror("Could not allocate original_src");
return;
}
if(hipMalloc((void **) &data1, in->height*in->width) != hipSuccess) {
perror("Could not allocate data1");
hipFree(original_src);
return;
}
if(hipMalloc((void **) &data2, in->height*in->width) != hipSuccess) {
perror("Could not allocate data2");
hipFree(original_src);
hipFree(data1);
return;
}
if(hipMalloc((void **) &final_out, in->height*in->width*planes) != hipSuccess) {
perror("Could not allocate final_out");
hipFree(original_src);
hipFree(data1);
hipFree(data2);
return;
}
if(hipMemcpy(original_src, in->data[0], in->height*in->width*planes*sizeof(unsigned char), hipMemcpyHostToDevice) != hipSuccess) {
perror("Could copy image to device");
hipFree(original_src);
hipFree(data1);
hipFree(data2);
hipFree(final_out);
return;
}
hipChannelFormatDesc desc = hipCreateChannelDesc<unsigned char>();
if(hipBindTexture2D( NULL, dataIn, data1, desc, in->width, in->height, in->width*sizeof(unsigned char)) != hipSuccess) {
perror("Could not bind texture dataIn");
hipFree(original_src);
hipFree(data1);
hipFree(data2);
hipFree(final_out);
return;
}
if(hipBindTexture2D( NULL, dataOut, data2, desc, in->width, in->height, in->width*sizeof(unsigned char)) != hipSuccess) {
perror("Could not bind texture dataOut");
hipFree(original_src);
hipFree(data1);
hipFree(data2);
hipFree(final_out);
return;
}
if(hipMemcpyToSymbol(kernel_3x3, f->kernel[0], f->rows*f->cols*sizeof(int), 0, hipMemcpyHostToDevice ) != hipSuccess) {
perror("Could not copy kernel to constant");
hipFree(original_src);
hipFree(data1);
hipFree(data2);
hipFree(final_out);
return;
}
if(hipMemcpyToSymbol(kernel_sum, &(f->sum), sizeof(int), 0, hipMemcpyHostToDevice ) != hipSuccess) {
perror("Could not copy kernel sum to constant");
hipFree(original_src);
hipFree(data1);
hipFree(data2);
hipFree(final_out);
return;
}
if(hipMemcpyToSymbol(total_planes, &(planes), sizeof(int), 0, hipMemcpyHostToDevice ) != hipSuccess) {
perror("Could not copy image height to constant");
hipFree(original_src);
hipFree(data1);
hipFree(data2);
hipFree(final_out);
return;
}
for(plane = 0; plane < planes; plane++) {
dstOut = true;
if(hipMemcpyToSymbol(current_plane, &(plane), sizeof(int), 0, hipMemcpyHostToDevice ) != hipSuccess) {
perror("Could not copy image height to constant");
hipFree(original_src);
hipFree(data1);
hipFree(data2);
hipFree(final_out);
return;
}
//deinterleave data
hipLaunchKernelGGL(( deinterleave_planes), dim3(numBlocks), dim3(threadsPerBlock), 0, 0, original_src, data1);
if(hipSuccess != hipGetLastError()) {
perror("deinteleave kernel error");
break;
}
//filter
for(time=0; time < ntimes; time++) {
if(dstOut) {
hipLaunchKernelGGL(( filter_3x3_kernel), dim3(numBlocks), dim3(threadsPerBlock), 0, 0, data2, dstOut);
} else {
hipLaunchKernelGGL(( filter_3x3_kernel), dim3(numBlocks), dim3(threadsPerBlock), 0, 0, data1, dstOut);
}
if(hipSuccess != hipGetLastError()) {
perror("filter kernel error");
break;
}
dstOut = !dstOut;
}
//interleave out
if(dstOut) {
hipLaunchKernelGGL(( interleave_planes), dim3(numBlocks), dim3(threadsPerBlock), 0, 0, data1, final_out);
} else {
hipLaunchKernelGGL(( interleave_planes), dim3(numBlocks), dim3(threadsPerBlock), 0, 0, data2, final_out);
}
if(hipSuccess != hipGetLastError()) {
perror("inteleave kernel error");
break;
}
}
if(hipMemcpy(out->data[0], final_out, out->height*out->width*planes*sizeof(unsigned char), hipMemcpyDeviceToHost) != hipSuccess) {
perror("hipMemcpy error");
hipFree(original_src);
hipFree(data1);
hipFree(data2);
hipFree(final_out);
return;
}
hipUnbindTexture( dataIn );
hipUnbindTexture( dataOut );
hipFree(original_src);
hipFree(data1);
hipFree(data2);
hipFree(final_out);
}
extern "C" void apply_filter_gs(image_t *in, filter_t *f, image_t *out, int ntimes) {
int time;
bool dstOut = true;
dim3 numBlocks(in->width/30, in->height/30);
dim3 threadsPerBlock(30, 30);
unsigned char *data1, *data2;
if(hipMalloc((void **) &data1, in->height*in->width) != hipSuccess) {
perror("Could not allocate data1");
return;
}
if(hipMalloc((void **) &data2, in->height*in->width) != hipSuccess) {
perror("Could not allocate data2");
hipFree(data1);
return;
}
hipChannelFormatDesc desc = hipCreateChannelDesc<unsigned char>();
if(hipBindTexture2D( NULL, dataIn, data1, desc, in->width, in->height, in->width*sizeof(unsigned char)) != hipSuccess) {
perror("Could not bind texture dataIn");
hipFree(data1);
hipFree(data2);
return;
}
if(hipMemcpy(data1, in->data[0], in->height*in->width*sizeof(unsigned char), hipMemcpyHostToDevice) != hipSuccess) {
perror("Could copy image to device");
hipFree(data1);
hipFree(data2);
return;
}
if(hipBindTexture2D( NULL, dataOut, data2, desc, in->width, in->height, in->width*sizeof(unsigned char)) != hipSuccess) {
perror("Could not bind texture dataOut");
hipFree(data1);
hipFree(data2);
return;
}
if(hipMemcpyToSymbol(kernel_3x3, f->kernel[0], f->rows*f->cols*sizeof(**f->kernel), 0, hipMemcpyHostToDevice ) != hipSuccess) {
perror("Could not copy kernel to constant");
hipFree(data1);
hipFree(data2);
return;
}
int sum = f->sum;
if(hipMemcpyToSymbol(kernel_sum, &(sum), sizeof(int), 0, hipMemcpyHostToDevice ) != hipSuccess) {
perror("Could not copy kernel sum to constant");
hipFree(data1);
hipFree(data2);
return;
}
//filter
for(time=0; time < ntimes; time++) {
if(dstOut) {
hipLaunchKernelGGL(( filter_3x3_kernel), dim3(numBlocks), dim3(threadsPerBlock), 0, 0, data2, dstOut);
} else {
hipLaunchKernelGGL(( filter_3x3_kernel), dim3(numBlocks), dim3(threadsPerBlock), 0, 0, data1, dstOut);
}
if(hipSuccess != hipGetLastError()) {
perror("filter kernel error");
break;
}
dstOut = !dstOut;
}
if(dstOut) {
if(hipMemcpy(out->data[0], data1, out->height*out->width*sizeof(unsigned char), hipMemcpyDeviceToHost) != hipSuccess) {
perror("hipMemcpy error");
hipFree(data1);
hipFree(data2);
return;
}
} else {
if(hipMemcpy(out->data[0], data2, out->height*out->width*sizeof(unsigned char), hipMemcpyDeviceToHost) != hipSuccess) {
perror("hipMemcpy error");
hipFree(data1);
hipFree(data2);
return;
}
}
hipUnbindTexture( dataIn );
hipUnbindTexture( dataOut );
hipFree(data1);
hipFree(data2);
}
| 6e5b175e7206d5d2bb03f1b0a8c614c47734f6d9.cu | #include "filter.h"
#include <cuda.h>
texture<unsigned char, 2> dataIn;
texture<unsigned char, 2> dataOut;
__constant__ int kernel_3x3[9];
__constant__ int kernel_sum[1];
__constant__ int total_planes[1];
__constant__ int current_plane[1];
__global__ void filter_3x3_kernel(unsigned char *data, bool dstOut) {
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int offset = x + y * gridDim.x * blockDim.x;
int ul, u, ur, l, c, r, dl, d, dr;
if(dstOut) {
ul = tex2D(dataIn, x - 1, y - 1) * (kernel_3x3[0]);
u = tex2D(dataIn, x, y - 1) * (kernel_3x3[1]);
ur = tex2D(dataIn, x + 1, y - 1) * (kernel_3x3[2]);
l = tex2D(dataIn, x - 1, y) * (kernel_3x3[3]);
c = tex2D(dataIn, x, y) * (kernel_3x3[4]);
r = tex2D(dataIn, x + 1, y) * (kernel_3x3[5]);
dl = tex2D(dataIn, x - 1, y + 1) * (kernel_3x3[6]);
d = tex2D(dataIn, x, y + 1) * (kernel_3x3[7]);
dr = tex2D(dataIn, x + 1, y + 1) * (kernel_3x3[8]);
} else {
ul = tex2D(dataOut, x - 1, y - 1) * (kernel_3x3[0]);
u = tex2D(dataOut, x, y - 1) * (kernel_3x3[1]);
ur = tex2D(dataOut, x + 1, y - 1) * (kernel_3x3[2]);
l = tex2D(dataOut, x - 1, y) * (kernel_3x3[3]);
c = tex2D(dataOut, x, y) * (kernel_3x3[4]);
r = tex2D(dataOut, x + 1, y) * (kernel_3x3[5]);
dl = tex2D(dataOut, x - 1, y + 1) * (kernel_3x3[6]);
d = tex2D(dataOut, x, y + 1) * (kernel_3x3[7]);
dr = tex2D(dataOut, x + 1, y + 1) * (kernel_3x3[8]);
}
data[offset] = (unsigned char)((ul + u + ur + l + c + r + dl + d + dr)/(kernel_sum[0]));
}
__global__ void deinterleave_planes(unsigned char *original, unsigned char *data) {
//each thread split one pixel
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int out_offset = x + y * gridDim.x * blockDim.x;
int in_offset = x * total_planes[0] + current_plane[0] + y * gridDim.x * blockDim.x * total_planes[0];
data[out_offset] = original[in_offset];
}
__global__ void interleave_planes(unsigned char *data, unsigned char *out) {
//each thread interleave one pixel
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int in_offset = x + y * gridDim.x * blockDim.x;
int out_offset = x * total_planes[0] + current_plane[0] + y * gridDim.x * blockDim.x * total_planes[0];
out[out_offset] = data[in_offset];
}
extern "C" void apply_filter_rgb(image_t *in, filter_t *f, image_t *out, int ntimes) {
int plane, planes, time;
bool dstOut;
planes = 3;
dim3 f_numBlocks(in->width*planes/30, in->height/30);
dim3 numBlocks(in->width/30, in->height/30);
dim3 threadsPerBlock(30, 30);
unsigned char *original_src, *data1, *data2, *final_out;
if(cudaMalloc((void **) &original_src, in->height*in->width*planes) != cudaSuccess) {
perror("Could not allocate original_src");
return;
}
if(cudaMalloc((void **) &data1, in->height*in->width) != cudaSuccess) {
perror("Could not allocate data1");
cudaFree(original_src);
return;
}
if(cudaMalloc((void **) &data2, in->height*in->width) != cudaSuccess) {
perror("Could not allocate data2");
cudaFree(original_src);
cudaFree(data1);
return;
}
if(cudaMalloc((void **) &final_out, in->height*in->width*planes) != cudaSuccess) {
perror("Could not allocate final_out");
cudaFree(original_src);
cudaFree(data1);
cudaFree(data2);
return;
}
if(cudaMemcpy(original_src, in->data[0], in->height*in->width*planes*sizeof(unsigned char), cudaMemcpyHostToDevice) != cudaSuccess) {
perror("Could copy image to device");
cudaFree(original_src);
cudaFree(data1);
cudaFree(data2);
cudaFree(final_out);
return;
}
cudaChannelFormatDesc desc = cudaCreateChannelDesc<unsigned char>();
if(cudaBindTexture2D( NULL, dataIn, data1, desc, in->width, in->height, in->width*sizeof(unsigned char)) != cudaSuccess) {
perror("Could not bind texture dataIn");
cudaFree(original_src);
cudaFree(data1);
cudaFree(data2);
cudaFree(final_out);
return;
}
if(cudaBindTexture2D( NULL, dataOut, data2, desc, in->width, in->height, in->width*sizeof(unsigned char)) != cudaSuccess) {
perror("Could not bind texture dataOut");
cudaFree(original_src);
cudaFree(data1);
cudaFree(data2);
cudaFree(final_out);
return;
}
if(cudaMemcpyToSymbol(kernel_3x3, f->kernel[0], f->rows*f->cols*sizeof(int), 0, cudaMemcpyHostToDevice ) != cudaSuccess) {
perror("Could not copy kernel to constant");
cudaFree(original_src);
cudaFree(data1);
cudaFree(data2);
cudaFree(final_out);
return;
}
if(cudaMemcpyToSymbol(kernel_sum, &(f->sum), sizeof(int), 0, cudaMemcpyHostToDevice ) != cudaSuccess) {
perror("Could not copy kernel sum to constant");
cudaFree(original_src);
cudaFree(data1);
cudaFree(data2);
cudaFree(final_out);
return;
}
if(cudaMemcpyToSymbol(total_planes, &(planes), sizeof(int), 0, cudaMemcpyHostToDevice ) != cudaSuccess) {
perror("Could not copy image height to constant");
cudaFree(original_src);
cudaFree(data1);
cudaFree(data2);
cudaFree(final_out);
return;
}
for(plane = 0; plane < planes; plane++) {
dstOut = true;
if(cudaMemcpyToSymbol(current_plane, &(plane), sizeof(int), 0, cudaMemcpyHostToDevice ) != cudaSuccess) {
perror("Could not copy image height to constant");
cudaFree(original_src);
cudaFree(data1);
cudaFree(data2);
cudaFree(final_out);
return;
}
//deinterleave data
deinterleave_planes<<<numBlocks, threadsPerBlock>>>(original_src, data1);
if(cudaSuccess != cudaGetLastError()) {
perror("deinteleave kernel error");
break;
}
//filter
for(time=0; time < ntimes; time++) {
if(dstOut) {
filter_3x3_kernel<<<numBlocks, threadsPerBlock>>>(data2, dstOut);
} else {
filter_3x3_kernel<<<numBlocks, threadsPerBlock>>>(data1, dstOut);
}
if(cudaSuccess != cudaGetLastError()) {
perror("filter kernel error");
break;
}
dstOut = !dstOut;
}
//interleave out
if(dstOut) {
interleave_planes<<<numBlocks, threadsPerBlock>>>(data1, final_out);
} else {
interleave_planes<<<numBlocks, threadsPerBlock>>>(data2, final_out);
}
if(cudaSuccess != cudaGetLastError()) {
perror("inteleave kernel error");
break;
}
}
if(cudaMemcpy(out->data[0], final_out, out->height*out->width*planes*sizeof(unsigned char), cudaMemcpyDeviceToHost) != cudaSuccess) {
perror("cudaMemcpy error");
cudaFree(original_src);
cudaFree(data1);
cudaFree(data2);
cudaFree(final_out);
return;
}
cudaUnbindTexture( dataIn );
cudaUnbindTexture( dataOut );
cudaFree(original_src);
cudaFree(data1);
cudaFree(data2);
cudaFree(final_out);
}
extern "C" void apply_filter_gs(image_t *in, filter_t *f, image_t *out, int ntimes) {
int time;
bool dstOut = true;
dim3 numBlocks(in->width/30, in->height/30);
dim3 threadsPerBlock(30, 30);
unsigned char *data1, *data2;
if(cudaMalloc((void **) &data1, in->height*in->width) != cudaSuccess) {
perror("Could not allocate data1");
return;
}
if(cudaMalloc((void **) &data2, in->height*in->width) != cudaSuccess) {
perror("Could not allocate data2");
cudaFree(data1);
return;
}
cudaChannelFormatDesc desc = cudaCreateChannelDesc<unsigned char>();
if(cudaBindTexture2D( NULL, dataIn, data1, desc, in->width, in->height, in->width*sizeof(unsigned char)) != cudaSuccess) {
perror("Could not bind texture dataIn");
cudaFree(data1);
cudaFree(data2);
return;
}
if(cudaMemcpy(data1, in->data[0], in->height*in->width*sizeof(unsigned char), cudaMemcpyHostToDevice) != cudaSuccess) {
perror("Could copy image to device");
cudaFree(data1);
cudaFree(data2);
return;
}
if(cudaBindTexture2D( NULL, dataOut, data2, desc, in->width, in->height, in->width*sizeof(unsigned char)) != cudaSuccess) {
perror("Could not bind texture dataOut");
cudaFree(data1);
cudaFree(data2);
return;
}
if(cudaMemcpyToSymbol(kernel_3x3, f->kernel[0], f->rows*f->cols*sizeof(**f->kernel), 0, cudaMemcpyHostToDevice ) != cudaSuccess) {
perror("Could not copy kernel to constant");
cudaFree(data1);
cudaFree(data2);
return;
}
int sum = f->sum;
if(cudaMemcpyToSymbol(kernel_sum, &(sum), sizeof(int), 0, cudaMemcpyHostToDevice ) != cudaSuccess) {
perror("Could not copy kernel sum to constant");
cudaFree(data1);
cudaFree(data2);
return;
}
//filter
for(time=0; time < ntimes; time++) {
if(dstOut) {
filter_3x3_kernel<<<numBlocks, threadsPerBlock>>>(data2, dstOut);
} else {
filter_3x3_kernel<<<numBlocks, threadsPerBlock>>>(data1, dstOut);
}
if(cudaSuccess != cudaGetLastError()) {
perror("filter kernel error");
break;
}
dstOut = !dstOut;
}
if(dstOut) {
if(cudaMemcpy(out->data[0], data1, out->height*out->width*sizeof(unsigned char), cudaMemcpyDeviceToHost) != cudaSuccess) {
perror("cudaMemcpy error");
cudaFree(data1);
cudaFree(data2);
return;
}
} else {
if(cudaMemcpy(out->data[0], data2, out->height*out->width*sizeof(unsigned char), cudaMemcpyDeviceToHost) != cudaSuccess) {
perror("cudaMemcpy error");
cudaFree(data1);
cudaFree(data2);
return;
}
}
cudaUnbindTexture( dataIn );
cudaUnbindTexture( dataOut );
cudaFree(data1);
cudaFree(data2);
}
|
13d0f0c7ebc3cf5ecbf232750597287fa64cbc3c.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <device_launch_parameters.h>
#include <Windows.h>
#include <string.h>
#include <stdlib.h>
#include <stdio.h>
#include <time.h>
#include <math.h>
//
#define CUDA_DEBUG
//
#ifdef CUDA_DEBUG
#define CUDA_CHECK_ERROR(err) \
if (err != hipSuccess) { \
printf("Cuda error: %s\n", hipGetErrorString(err)); \
printf("Error in file: %s, line: %i\n", __FILE__, __LINE__); \
}
#else
#define CUDA_CHECK_ERROR(err)
#endif
// '
// * inputMatrix -
// * outputMatrix -
// * width - ( -)
// * height - ( -)
__global__ void transposeMatrixGlobal(float* inputMatrix, float* outputMatrix, int width, int height) {
//
int xIndex = blockDim.x * blockIdx.x + threadIdx.x;
int yIndex = blockDim.y * blockIdx.y + threadIdx.y;
if ((xIndex < width) && (yIndex < height)) {
//
int inputIdx = xIndex + width * yIndex;
// -
int outputIdx = yIndex + height * xIndex;
//
outputMatrix[outputIdx] = inputMatrix[inputIdx];
}
}
#define BLOCK_DIM 16
#define BLOCK_DIM 16
// '
// * inputMatrix -
// * outputMatrix -
// * width - ( -)
// * height - ( -)
__global__ void transposeMatrixShared(float* inputMatrix, float* outputMatrix, int width, int height) {
__shared__ float temp[BLOCK_DIM][BLOCK_DIM];
//
int xIndex = blockIdx.x * blockDim.x + threadIdx.x;
int yIndex = blockIdx.y * blockDim.y + threadIdx.y;
if ((xIndex < width) && (yIndex < height)) {
//
int idx = yIndex * width + xIndex;
//
temp[threadIdx.y][threadIdx.x] = inputMatrix[idx];
}
//
__syncthreads();
xIndex = blockIdx.y * blockDim.y + threadIdx.x;
yIndex = blockIdx.x * blockDim.x + threadIdx.y;
if ((xIndex < height) && (yIndex < width)) {
//
int idx = yIndex * height + xIndex;
//
outputMatrix[idx] = temp[threadIdx.x][threadIdx.y];
}
}
// , CPU
__host__ void transposeMatrixCPU(float *inputMatrix, float *outputMatrix, int width, int height) {
for (int y = 0; y < height; y++) {
for (int x = 0; x < width; x++) {
outputMatrix[x * height + y] = inputMatrix[y * width + x];
}
}
}
//
__host__ void printMatrixToFile(char* fileName, float* matrix, int width, int height) {
FILE *file = fopen(fileName, "wt");
for (int y = 0; y < height; y++) {
for (int x = 0; x < width; x++) {
fprintf(file, "%.0f\t", matrix[y * width + x]);
}
fprintf(file, "\n");
}
fclose(file);
}
#define GPU_SLOW 1
#define GPU_FAST 2
#define CPU 3
//
#define ITERATIONS 20
__host__ int main() {
//
int width = 2048, height = 1536;
//
int matrixSize = width * height;
//
int byteSize = matrixSize * sizeof(float);
//
float* inputMatrix = new float[matrixSize];
float* outputMatrix = new float[matrixSize];
//
for (int i = 0; i < matrixSize; i++)
inputMatrix[i] = i;
//
printf("Select compute mode: 1 - Slow GPU, 2 - Fast GPU, 3 - CPU\n");
int mode;
scanf("%i", &mode);
//
printMatrixToFile("before.txt", inputMatrix, width, height);
// CPU
if (mode == CPU) {
int start = GetTickCount();
for (int i = 0; i < ITERATIONS; i++) {
transposeMatrixCPU(inputMatrix, outputMatrix, width, height);
}
// CPU ( )
printf("CPU compute time: %i\n", GetTickCount() - start);
}
// GPU
else {
float *devInputMatrix, *devOutputMatrix;
// '
CUDA_CHECK_ERROR(hipMalloc((void**)&devInputMatrix, byteSize));
CUDA_CHECK_ERROR(hipMalloc((void**)&devOutputMatrix, byteSize));
//
CUDA_CHECK_ERROR(hipMemcpy(devInputMatrix, inputMatrix, byteSize, hipMemcpyHostToDevice));
//
dim3 gridSize = dim3(width / BLOCK_DIM, height / BLOCK_DIM, 1);
dim3 blockSize = dim3(BLOCK_DIM, BLOCK_DIM, 1);
hipEvent_t start, stop;
// GPU
CUDA_CHECK_ERROR(hipEventCreate(&start));
CUDA_CHECK_ERROR(hipEventCreate(&stop));
// GPU
hipEventRecord(start, 0);
// '
if (mode == GPU_SLOW) {
for (int i = 0; i < ITERATIONS; i++) {
hipLaunchKernelGGL(( transposeMatrixGlobal), dim3(gridSize), dim3(blockSize), 0, 0, devInputMatrix, devOutputMatrix, width, height);
}
}
// '
else if (mode == GPU_FAST) {
for (int i = 0; i < ITERATIONS; i++) {
hipLaunchKernelGGL(( transposeMatrixShared), dim3(gridSize), dim3(blockSize), 0, 0, devInputMatrix, devOutputMatrix, width, height);
}
}
//
hipEventRecord(stop, 0);
//
hipEventSynchronize(stop);
// GPU
float time = 0;
hipEventElapsedTime(&time, start, stop);
//
printf("GPU compute time: %.0f\n", time);
//
CUDA_CHECK_ERROR(hipMemcpy(outputMatrix, devOutputMatrix, byteSize, hipMemcpyDeviceToHost));
//
CUDA_CHECK_ERROR(hipFree(devInputMatrix));
CUDA_CHECK_ERROR(hipFree(devOutputMatrix));
CUDA_CHECK_ERROR(hipEventDestroy(start));
CUDA_CHECK_ERROR(hipEventDestroy(stop));
}
// -
printMatrixToFile("after.txt", outputMatrix, height, width);
// '
delete[] inputMatrix, outputMatrix;
return 0;
}
| 13d0f0c7ebc3cf5ecbf232750597287fa64cbc3c.cu | #include <cuda.h>
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include <Windows.h>
#include <string.h>
#include <stdlib.h>
#include <stdio.h>
#include <time.h>
#include <math.h>
// Визначення змінної середовища
#define CUDA_DEBUG
// Виведення діагностичної інформації
#ifdef CUDA_DEBUG
#define CUDA_CHECK_ERROR(err) \
if (err != cudaSuccess) { \
printf("Cuda error: %s\n", cudaGetErrorString(err)); \
printf("Error in file: %s, line: %i\n", __FILE__, __LINE__); \
}
#else
#define CUDA_CHECK_ERROR(err)
#endif
// Функція транспонування матриці без використання глобальної пам'яті
// * inputMatrix - покажчик на вихідну матрицю
// * outputMatrix - покажчик на матрицю результат
// * width - ширина вихідної матриці (вона ж висота матриці-результату)
// * height - висота вихідної матриці (вона ж ширина матриці-результату)
__global__ void transposeMatrixGlobal(float* inputMatrix, float* outputMatrix, int width, int height) {
// Розрахунок індексів матриці
int xIndex = blockDim.x * blockIdx.x + threadIdx.x;
int yIndex = blockDim.y * blockIdx.y + threadIdx.y;
if ((xIndex < width) && (yIndex < height)) {
// Лінійний індекс елемента рядки вихідної матриці
int inputIdx = xIndex + width * yIndex;
// Лінійний індекс елемента стовпця матриці-результату
int outputIdx = yIndex + height * xIndex;
// Встановлення елементу
outputMatrix[outputIdx] = inputMatrix[inputIdx];
}
}
#define BLOCK_DIM 16
#define BLOCK_DIM 16
// Функція транспонування матриці з використанням колективної пам'яті
// * inputMatrix - покажчик на вихідну матрицю
// * outputMatrix - покажчик на матрицю результат
// * width - ширина вихідної матриці (вона ж висота матриці-результату)
// * height - висота вихідної матриці (вона ж ширина матриці-результату)
__global__ void transposeMatrixShared(float* inputMatrix, float* outputMatrix, int width, int height) {
__shared__ float temp[BLOCK_DIM][BLOCK_DIM];
// Розрахунок індексів матриці
int xIndex = blockIdx.x * blockDim.x + threadIdx.x;
int yIndex = blockIdx.y * blockDim.y + threadIdx.y;
if ((xIndex < width) && (yIndex < height)) {
// Линейный индекс элемента строки исходной матрицы
int idx = yIndex * width + xIndex;
//Копируем элементы исходной матрицы
temp[threadIdx.y][threadIdx.x] = inputMatrix[idx];
}
//Синхронизируем все нити в блоке
__syncthreads();
xIndex = blockIdx.y * blockDim.y + threadIdx.x;
yIndex = blockIdx.x * blockDim.x + threadIdx.y;
if ((xIndex < height) && (yIndex < width)) {
// Линейный индекс элемента строки исходной матрицы
int idx = yIndex * height + xIndex;
//Копируем элементы исходной матрицы
outputMatrix[idx] = temp[threadIdx.x][threadIdx.y];
}
}
// Функція транспонування матриці, яка виконується на CPU
__host__ void transposeMatrixCPU(float *inputMatrix, float *outputMatrix, int width, int height) {
for (int y = 0; y < height; y++) {
for (int x = 0; x < width; x++) {
outputMatrix[x * height + y] = inputMatrix[y * width + x];
}
}
}
// Функція виведення матриці на екран
__host__ void printMatrixToFile(char* fileName, float* matrix, int width, int height) {
FILE *file = fopen(fileName, "wt");
for (int y = 0; y < height; y++) {
for (int x = 0; x < width; x++) {
fprintf(file, "%.0f\t", matrix[y * width + x]);
}
fprintf(file, "\n");
}
fclose(file);
}
#define GPU_SLOW 1
#define GPU_FAST 2
#define CPU 3
// Кількість навантажувальних циклів
#define ITERATIONS 20
__host__ int main() {
// Ширина і висота матриці
int width = 2048, height = 1536;
// Розмір масиву для збереження матриці
int matrixSize = width * height;
// Кількість байтів що займає матриця
int byteSize = matrixSize * sizeof(float);
//Выделяем память под матрицы на хосте
float* inputMatrix = new float[matrixSize];
float* outputMatrix = new float[matrixSize];
//Заполняем исходную матрицу данными
for (int i = 0; i < matrixSize; i++)
inputMatrix[i] = i;
// Вибираємо спосіб розрахунку транспонованою матриці
printf("Select compute mode: 1 - Slow GPU, 2 - Fast GPU, 3 - CPU\n");
int mode;
scanf("%i", &mode);
// Записуємо вихідну матрицю в файл
printMatrixToFile("before.txt", inputMatrix, width, height);
// Якщо используеться тільки CPU
if (mode == CPU) {
int start = GetTickCount();
for (int i = 0; i < ITERATIONS; i++) {
transposeMatrixCPU(inputMatrix, outputMatrix, width, height);
}
// Виводимо час виконання функції на CPU (в мілліекундах)
printf("CPU compute time: %i\n", GetTickCount() - start);
}
// У разі розрахунку на GPU
else {
float *devInputMatrix, *devOutputMatrix;
// Виділяємо глобальну пам'ять для зберігання даних на пристрої
CUDA_CHECK_ERROR(cudaMalloc((void**)&devInputMatrix, byteSize));
CUDA_CHECK_ERROR(cudaMalloc((void**)&devOutputMatrix, byteSize));
// Копіюємо вихідну матрицю з хоста на девайс
CUDA_CHECK_ERROR(cudaMemcpy(devInputMatrix, inputMatrix, byteSize, cudaMemcpyHostToDevice));
// Конфігурація запуску ядра
dim3 gridSize = dim3(width / BLOCK_DIM, height / BLOCK_DIM, 1);
dim3 blockSize = dim3(BLOCK_DIM, BLOCK_DIM, 1);
cudaEvent_t start, stop;
// Створюємо події для синхронізації і виміру часу роботи GPU
CUDA_CHECK_ERROR(cudaEventCreate(&start));
CUDA_CHECK_ERROR(cudaEventCreate(&stop));
//Отмечаем старт расчетов на GPU
cudaEventRecord(start, 0);
// Використовується функція без суспільної пам'яті
if (mode == GPU_SLOW) {
for (int i = 0; i < ITERATIONS; i++) {
transposeMatrixGlobal<<<gridSize, blockSize>>>(devInputMatrix, devOutputMatrix, width, height);
}
}
// Використовується функція з суспільною пам'яттю
else if (mode == GPU_FAST) {
for (int i = 0; i < ITERATIONS; i++) {
transposeMatrixShared<<<gridSize, blockSize>>>(devInputMatrix, devOutputMatrix, width, height);
}
}
// Відзначаємо закінчення розрахунку
cudaEventRecord(stop, 0);
// Синхронізуються з моментом закінчення розрахунків
cudaEventSynchronize(stop);
// Розраховуємо час роботи GPU
float time = 0;
cudaEventElapsedTime(&time, start, stop);
// Виводимо час розрахунку в консоль
printf("GPU compute time: %.0f\n", time);
// Копіюємо результат з девайса на хост
CUDA_CHECK_ERROR(cudaMemcpy(outputMatrix, devOutputMatrix, byteSize, cudaMemcpyDeviceToHost));
// Чистимо ресурси на відеокарті
CUDA_CHECK_ERROR(cudaFree(devInputMatrix));
CUDA_CHECK_ERROR(cudaFree(devOutputMatrix));
CUDA_CHECK_ERROR(cudaEventDestroy(start));
CUDA_CHECK_ERROR(cudaEventDestroy(stop));
}
// Записуємо матрицю-результат в файл
printMatrixToFile("after.txt", outputMatrix, height, width);
// Чистимо пам'ять на хості
delete[] inputMatrix, outputMatrix;
return 0;
}
|
a45673c4f82ace83640fd0aa32925c46cf71c0cb.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <cstdlib>
#include <cstdio>
#include <cassert>
#include <iostream>
__global__ void array_manipulation_kernel(int* a, int n) {
unsigned int index;
index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < n)
a[index] *= 2;
}
class ArrayManipulation {
public:
int arrayLength;
explicit ArrayManipulation(int arrayLength);
void initArray(int* array) const;
void run(int numGrids, int numThreads) const;
void displayResult(int* array, int* resultArray) const;
void checkResult(const int* array, const int* resultArray) const;
};
ArrayManipulation::ArrayManipulation(int arrayLength) {
this->arrayLength = arrayLength;
}
void ArrayManipulation::initArray(int *array) const {
for(int i = 0; i < this->arrayLength; i++)
array[i] = rand() % 100;
}
void ArrayManipulation::displayResult(int *array, int* resultArray) const {
for(int i = 0; i < this->arrayLength; i++)
printf("%d * 2 = %d\n", array[i], resultArray[i]);
}
void ArrayManipulation::checkResult(const int *array, const int* resultArray) const {
for(int i = 0; i < this->arrayLength; i++)
assert(resultArray[i] == array[i] * 2);
printf("Program Executed Successfully");
}
void ArrayManipulation::run(int numGrids, int numThreads) const {
int deviceId = hipGetDevice(&deviceId);
printf("GPU Device ID: %d\n", deviceId);
printf("CPU Device ID: %d\n\n", hipCpuDeviceId);
int * hostArray, * resultArray, * deviceArray;
size_t arrayBytes = sizeof(int) * this->arrayLength;
hipHostMalloc(&hostArray, arrayBytes);
hipHostMalloc(&resultArray, arrayBytes);
hipMalloc(&deviceArray, arrayBytes);
initArray(hostArray);
hipMemcpy(deviceArray, hostArray, arrayBytes, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( array_manipulation_kernel), dim3(numGrids), dim3(numThreads), 0, 0, deviceArray, arrayLength);
hipDeviceSynchronize();
hipMemcpy(resultArray, deviceArray, arrayBytes, hipMemcpyDeviceToHost);
displayResult(hostArray, resultArray);
checkResult(hostArray, resultArray);
hipHostFree(hostArray);
hipHostFree(resultArray);
hipFree(deviceArray);
}
int main() {
ArrayManipulation program(16);
program.run(1, 16);
}
| a45673c4f82ace83640fd0aa32925c46cf71c0cb.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <cstdlib>
#include <cstdio>
#include <cassert>
#include <iostream>
__global__ void array_manipulation_kernel(int* a, int n) {
unsigned int index;
index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < n)
a[index] *= 2;
}
class ArrayManipulation {
public:
int arrayLength;
explicit ArrayManipulation(int arrayLength);
void initArray(int* array) const;
void run(int numGrids, int numThreads) const;
void displayResult(int* array, int* resultArray) const;
void checkResult(const int* array, const int* resultArray) const;
};
ArrayManipulation::ArrayManipulation(int arrayLength) {
this->arrayLength = arrayLength;
}
void ArrayManipulation::initArray(int *array) const {
for(int i = 0; i < this->arrayLength; i++)
array[i] = rand() % 100;
}
void ArrayManipulation::displayResult(int *array, int* resultArray) const {
for(int i = 0; i < this->arrayLength; i++)
printf("%d * 2 = %d\n", array[i], resultArray[i]);
}
void ArrayManipulation::checkResult(const int *array, const int* resultArray) const {
for(int i = 0; i < this->arrayLength; i++)
assert(resultArray[i] == array[i] * 2);
printf("Program Executed Successfully");
}
void ArrayManipulation::run(int numGrids, int numThreads) const {
int deviceId = cudaGetDevice(&deviceId);
printf("GPU Device ID: %d\n", deviceId);
printf("CPU Device ID: %d\n\n", cudaCpuDeviceId);
int * hostArray, * resultArray, * deviceArray;
size_t arrayBytes = sizeof(int) * this->arrayLength;
cudaMallocHost(&hostArray, arrayBytes);
cudaMallocHost(&resultArray, arrayBytes);
cudaMalloc(&deviceArray, arrayBytes);
initArray(hostArray);
cudaMemcpy(deviceArray, hostArray, arrayBytes, cudaMemcpyHostToDevice);
array_manipulation_kernel<<<numGrids, numThreads>>>(deviceArray, arrayLength);
cudaDeviceSynchronize();
cudaMemcpy(resultArray, deviceArray, arrayBytes, cudaMemcpyDeviceToHost);
displayResult(hostArray, resultArray);
checkResult(hostArray, resultArray);
cudaFreeHost(hostArray);
cudaFreeHost(resultArray);
cudaFree(deviceArray);
}
int main() {
ArrayManipulation program(16);
program.run(1, 16);
}
|
1194b81b4f7839e3be9f09d202f32086c3bd097e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "ParticlesCuda.h"
__global__ void particleUpdate(
const float4* __restrict__ positions,
float4* __restrict__ positionOut,
float4* __restrict__ velocity,
const MinMaxDataCuda* staticColliders,
const float dt,
const float3 gravity,
const float3 position,
const float3 dimension,
const size_t numberOfParticles,
const size_t numberOfColliders,
SimulationData simData)
{
const uint index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= numberOfParticles)
return;
float fluidDamp = 0.0;
float particleSize = simData.interactionRadius * 0.1f;
float3 worldAABBmin = make_float3(particleSize);
float3 worldAABBmax = dimension - particleSize;
float3 particlePosition = make_float3(positions[index]);
float3 particleVelocity = make_float3(velocity[index]);
float3 particlePressure = calculatePressure(positions, velocity, index, particlePosition, particleVelocity, numberOfParticles, simData);
particlePosition -= position;
// gravity
particleVelocity += (gravity + particlePressure) * dt;
// *** g
float3 deltaVelocity = particleVelocity * dt;
float3 sizeOffset = normalize(particleVelocity) * particleSize;
float3 newPos = particlePosition + deltaVelocity;
// static collision
for (int i = 0; i < numberOfColliders; i++)
{
MinMaxDataCuda currentAABB = staticColliders[i];
float3 intersection;
float fraction;
bool result = false;
result = LineAABBIntersection(currentAABB, particlePosition, newPos + sizeOffset, intersection, fraction);
if (result == false)
continue;
if (intersection.x == currentAABB.max.x || intersection.x == currentAABB.min.x)
particleVelocity.x *= -fluidDamp;
else if (intersection.y == currentAABB.max.y || intersection.y == currentAABB.min.y)
particleVelocity.y *= -fluidDamp;
else if (intersection.z == currentAABB.max.z || intersection.z == currentAABB.min.z)
particleVelocity.z *= -fluidDamp;
newPos = intersection;
break;
}
// *** sc
// bounding box collision
float3 tmpVel = particleVelocity;
for (int i = 0; i < 3; ++i)
{
if ((dim(newPos, i) > dim(worldAABBmax, i) && dim(tmpVel, i) > 0.0) // max boundary
|| (dim(newPos, i) < dim(worldAABBmin, i) && dim(tmpVel, i) < 0.0) // min boundary
)
{
dim(tmpVel, i) *= -fluidDamp;
}
}
particleVelocity = tmpVel;
// *** bbc
particlePosition += particleVelocity * dt;
positionOut[index] = make_float4(particlePosition + position, length(particleVelocity));
velocity[index] = make_float4(particleVelocity, 0.0f);
}
__device__ __host__ float3 calculatePressure(const float4* __restrict__ position, const float4* __restrict__ velocity, uint index, float3 pos, float3 vel, uint numberOfParticles, SimulationData simData)
{
float3 pressureVec = make_float3(0.f);
float3 viscosityVec = pressureVec;
float influence = 0.f;
for (uint i = 0; i < numberOfParticles; i++)
{
if (index == i)
continue;
float3 dirVec = pos - make_float3(position[i]);
float dist = length(dirVec);//TODO: maybe use half_length
if (dist > simData.interactionRadius)
continue;
float3 dirVecN = normalize(dirVec);
float moveDir = dot(vel - make_float3(velocity[i]), dirVecN);
float distRel = 1.0f - dist / simData.interactionRadius;
float sqx = distRel * distRel;
influence += 1.0f;
// viscosity
if (true || moveDir > 0)
{
float factor = sqx * (simData.viscosity * moveDir);
float3 impulse = factor * dirVecN;
viscosityVec -= impulse;
}
// *** v
float pressure = sqx * simData.pressureMultiplier;
pressureVec += (pressure - simData.restPressure) * dirVecN;
}
//compress viscosity TODO: fix the root of this problem and not just limit it manually
if (influence > 0.f)
{
viscosityVec = viscosityVec / influence;
}
if (length(viscosityVec) > 100.0)
viscosityVec = normalize(viscosityVec) * 100.0;
//*** lv
return pressureVec + viscosityVec;
}
void cudaParticleUpdate(
float4* positions,
float4* positionOut,
float4* velocity,
MinMaxDataCuda* staticColliders,
const float dt,
const float3 gravity,
const float3 position,
const float3 dimension,
const size_t numberOfParticles,
const size_t numberOfColliders,
SimulationData simData)
{
hipDeviceProp_t devProp;
int device;
hipGetDevice(&device);
hipGetDeviceProperties(&devProp, device);
size_t num = 1;
size_t threads = numberOfParticles;
size_t maxThreads = devProp.maxThreadsPerBlock;
if (numberOfParticles > maxThreads)
{
num = (size_t)ceilf(float(numberOfParticles) / maxThreads);
threads = maxThreads;
}
hipLaunchKernelGGL(( particleUpdate) , dim3(num), dim3(threads) , 0, 0, positions, positionOut, velocity, staticColliders, dt, gravity, dimension, position, numberOfParticles, numberOfColliders, simData);
}
| 1194b81b4f7839e3be9f09d202f32086c3bd097e.cu | #include "ParticlesCuda.h"
__global__ void particleUpdate(
const float4* __restrict__ positions,
float4* __restrict__ positionOut,
float4* __restrict__ velocity,
const MinMaxDataCuda* staticColliders,
const float dt,
const float3 gravity,
const float3 position,
const float3 dimension,
const size_t numberOfParticles,
const size_t numberOfColliders,
SimulationData simData)
{
const uint index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= numberOfParticles)
return;
float fluidDamp = 0.0;
float particleSize = simData.interactionRadius * 0.1f;
float3 worldAABBmin = make_float3(particleSize);
float3 worldAABBmax = dimension - particleSize;
float3 particlePosition = make_float3(positions[index]);
float3 particleVelocity = make_float3(velocity[index]);
float3 particlePressure = calculatePressure(positions, velocity, index, particlePosition, particleVelocity, numberOfParticles, simData);
particlePosition -= position;
// gravity
particleVelocity += (gravity + particlePressure) * dt;
// *** g
float3 deltaVelocity = particleVelocity * dt;
float3 sizeOffset = normalize(particleVelocity) * particleSize;
float3 newPos = particlePosition + deltaVelocity;
// static collision
for (int i = 0; i < numberOfColliders; i++)
{
MinMaxDataCuda currentAABB = staticColliders[i];
float3 intersection;
float fraction;
bool result = false;
result = LineAABBIntersection(currentAABB, particlePosition, newPos + sizeOffset, intersection, fraction);
if (result == false)
continue;
if (intersection.x == currentAABB.max.x || intersection.x == currentAABB.min.x)
particleVelocity.x *= -fluidDamp;
else if (intersection.y == currentAABB.max.y || intersection.y == currentAABB.min.y)
particleVelocity.y *= -fluidDamp;
else if (intersection.z == currentAABB.max.z || intersection.z == currentAABB.min.z)
particleVelocity.z *= -fluidDamp;
newPos = intersection;
break;
}
// *** sc
// bounding box collision
float3 tmpVel = particleVelocity;
for (int i = 0; i < 3; ++i)
{
if ((dim(newPos, i) > dim(worldAABBmax, i) && dim(tmpVel, i) > 0.0) // max boundary
|| (dim(newPos, i) < dim(worldAABBmin, i) && dim(tmpVel, i) < 0.0) // min boundary
)
{
dim(tmpVel, i) *= -fluidDamp;
}
}
particleVelocity = tmpVel;
// *** bbc
particlePosition += particleVelocity * dt;
positionOut[index] = make_float4(particlePosition + position, length(particleVelocity));
velocity[index] = make_float4(particleVelocity, 0.0f);
}
__device__ __host__ float3 calculatePressure(const float4* __restrict__ position, const float4* __restrict__ velocity, uint index, float3 pos, float3 vel, uint numberOfParticles, SimulationData simData)
{
float3 pressureVec = make_float3(0.f);
float3 viscosityVec = pressureVec;
float influence = 0.f;
for (uint i = 0; i < numberOfParticles; i++)
{
if (index == i)
continue;
float3 dirVec = pos - make_float3(position[i]);
float dist = length(dirVec);//TODO: maybe use half_length
if (dist > simData.interactionRadius)
continue;
float3 dirVecN = normalize(dirVec);
float moveDir = dot(vel - make_float3(velocity[i]), dirVecN);
float distRel = 1.0f - dist / simData.interactionRadius;
float sqx = distRel * distRel;
influence += 1.0f;
// viscosity
if (true || moveDir > 0)
{
float factor = sqx * (simData.viscosity * moveDir);
float3 impulse = factor * dirVecN;
viscosityVec -= impulse;
}
// *** v
float pressure = sqx * simData.pressureMultiplier;
pressureVec += (pressure - simData.restPressure) * dirVecN;
}
//compress viscosity TODO: fix the root of this problem and not just limit it manually
if (influence > 0.f)
{
viscosityVec = viscosityVec / influence;
}
if (length(viscosityVec) > 100.0)
viscosityVec = normalize(viscosityVec) * 100.0;
//*** lv
return pressureVec + viscosityVec;
}
void cudaParticleUpdate(
float4* positions,
float4* positionOut,
float4* velocity,
MinMaxDataCuda* staticColliders,
const float dt,
const float3 gravity,
const float3 position,
const float3 dimension,
const size_t numberOfParticles,
const size_t numberOfColliders,
SimulationData simData)
{
cudaDeviceProp devProp;
int device;
cudaGetDevice(&device);
cudaGetDeviceProperties(&devProp, device);
size_t num = 1;
size_t threads = numberOfParticles;
size_t maxThreads = devProp.maxThreadsPerBlock;
if (numberOfParticles > maxThreads)
{
num = (size_t)ceilf(float(numberOfParticles) / maxThreads);
threads = maxThreads;
}
particleUpdate <<< num, threads >>> (positions, positionOut, velocity, staticColliders, dt, gravity, dimension, position, numberOfParticles, numberOfColliders, simData);
}
|
57c3a08a995c0062d8207e459b849b6ca1056539.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//Number threads per block
int NTPB=512;
//Num blocks is determined by size of zeropadded 2^n size array
int numBlocks = (ndimp+NTPB-1) / NTPB;
//Shared memory
int smemSize = NTPB * sizeof(double);
//Array to store maximum values for reduction in host memory
double *h_cmax = (double*)malloc(numBlocks*sizeof(double));
hipMalloc((void**)&d_cmax, numBlocks*sizeof(double));
//Array to store maximum values for reduction in GPU global memory
hipMalloc((void**)&d_bmax, numBlocks*sizeof(double));
//set maximum value to zero and update values in GPU memory
(*p)->cmax=0.0;
hipMemcpy(*d_p, *p, sizeof(struct params), hipMemcpyHostToDevice);
//copy speeds and temporary values to device memory
hipLaunchKernelGGL(( copytotemp_parallel), dim3(numBlocks), dim3(numThreadsPerBlock), 0, 0, *d_p, *d_wd, *d_wtemp,cfast);
int i=0;
//find the maximum in each block
for(i=0;i<numBlocks;i++)
h_cmax[i]=0;
hipMemcpy(d_bmax, h_cmax, numBlocks*sizeof(double), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( reductionmax_parallel), dim3(numBlocks),dim3(NTPB),smemSize, 0, d_bmax,*d_wtemp,ndimp);
hipDeviceSynchronize();
hipMemcpy(h_cmax, d_bmax, numBlocks*sizeof(double), hipMemcpyDeviceToHost);
//compare the maxima for all of the blocks and determine maximum value
for( i=0;i<numBlocks;i++)
if(h_cmax[i]>((*p)->cmax)) ((*p)->cmax)=h_cmax[i];
//determine maximum value
hipMemcpy(*d_wtemp, ((*wd)+(soundspeed*dimp)), dimp*sizeof(real), hipMemcpyHostToDevice);
| 57c3a08a995c0062d8207e459b849b6ca1056539.cu | //Number threads per block
int NTPB=512;
//Num blocks is determined by size of zeropadded 2^n size array
int numBlocks = (ndimp+NTPB-1) / NTPB;
//Shared memory
int smemSize = NTPB * sizeof(double);
//Array to store maximum values for reduction in host memory
double *h_cmax = (double*)malloc(numBlocks*sizeof(double));
cudaMalloc((void**)&d_cmax, numBlocks*sizeof(double));
//Array to store maximum values for reduction in GPU global memory
cudaMalloc((void**)&d_bmax, numBlocks*sizeof(double));
//set maximum value to zero and update values in GPU memory
(*p)->cmax=0.0;
cudaMemcpy(*d_p, *p, sizeof(struct params), cudaMemcpyHostToDevice);
//copy speeds and temporary values to device memory
copytotemp_parallel<<<numBlocks, numThreadsPerBlock>>>(*d_p, *d_wd, *d_wtemp,cfast);
int i=0;
//find the maximum in each block
for(i=0;i<numBlocks;i++)
h_cmax[i]=0;
cudaMemcpy(d_bmax, h_cmax, numBlocks*sizeof(double), cudaMemcpyHostToDevice);
reductionmax_parallel<<<numBlocks,NTPB,smemSize>>>(d_bmax,*d_wtemp,ndimp);
cudaThreadSynchronize();
cudaMemcpy(h_cmax, d_bmax, numBlocks*sizeof(double), cudaMemcpyDeviceToHost);
//compare the maxima for all of the blocks and determine maximum value
for( i=0;i<numBlocks;i++)
if(h_cmax[i]>((*p)->cmax)) ((*p)->cmax)=h_cmax[i];
//determine maximum value
cudaMemcpy(*d_wtemp, ((*wd)+(soundspeed*dimp)), dimp*sizeof(real), cudaMemcpyHostToDevice);
|
5f022c90120be39763b9d7d9c86be178061321cc.hip | // !!! This is a file automatically generated by hipify!!!
// -*- C++ -*-
// -*- coding: utf-8 -*-
//
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
//
// michael a.g. avzis
// california institute of technology
// (c) 1998-2010 all rights reserved
//
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
//
// memxchng.cu
#include <hip/hip_runtime.h>
#include <assert.h>
// manipulate the host array
void scale_host(float* array, float scale, int N) {
// loop over all array elements and multiply them by 2
for (int idx=0; idx<N; idx++) {
array[idx] *= scale;
}
return;
}
// and here is the corresponding code for the GPU
__global__ void scale_dev(float* array, float scale, int N) {
// this thread is responsible for one element of the array
// compute its offset using the block geometry builtins
int idx = blockIdx.x * blockDim.x + threadIdx.x;
// make sure we don't go past the last one
if (idx < N) {
// do the arithmetic
array[idx] *= scale;
}
return;
}
int main(int argc, char* argv[]) {
const int N = 12;
// allocate some buffers on the host
float* send_host = (float *) malloc(N*sizeof(float));
float* recv_host = (float *) malloc(N*sizeof(float));
// allocate matching ones on the device
float* array_dev;
hipMalloc((void **) &array_dev, N*sizeof(float));
// and initialize the host data
for (int i=0; i<N; i++) {
send_host[i] = 2.0f + i*i;
recv_host[i] = 0.0f;
}
// send the data from the host to the device
hipMemcpy(array_dev, send_host, N*sizeof(float), hipMemcpyHostToDevice);
// set up the device execution context for our threads
// each thread will take care of one element
int blockSz = 4; // 4 threads per block
// compute the number of blocks needed
int nBlocks = N/blockSz;
// adjust up to make sure we cover the entire array
if (N % nBlocks) {
nBlocks++;
}
// scale the array on the device
float scale = 2.0f;
hipLaunchKernelGGL(( scale_dev) , dim3(nBlocks), dim3(blockSz), 0, 0, array_dev, scale, N);
// scale the input array on the host
scale_host(send_host, scale, N);
// get it back on the host
hipMemcpy(recv_host, array_dev, N*sizeof(float), hipMemcpyDeviceToHost);
// check the result
for (int i=0; i<N; i++) {
assert(send_host[i] == recv_host[i]);
}
// free the buffers;
hipFree(array_dev);
free(send_host); free(recv_host);
return 0;
}
// end of file
| 5f022c90120be39763b9d7d9c86be178061321cc.cu | // -*- C++ -*-
// -*- coding: utf-8 -*-
//
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
//
// michael a.g. aïvázis
// california institute of technology
// (c) 1998-2010 all rights reserved
//
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
//
// memxchng.cu
#include <cuda.h>
#include <assert.h>
// manipulate the host array
void scale_host(float* array, float scale, int N) {
// loop over all array elements and multiply them by 2
for (int idx=0; idx<N; idx++) {
array[idx] *= scale;
}
return;
}
// and here is the corresponding code for the GPU
__global__ void scale_dev(float* array, float scale, int N) {
// this thread is responsible for one element of the array
// compute its offset using the block geometry builtins
int idx = blockIdx.x * blockDim.x + threadIdx.x;
// make sure we don't go past the last one
if (idx < N) {
// do the arithmetic
array[idx] *= scale;
}
return;
}
int main(int argc, char* argv[]) {
const int N = 12;
// allocate some buffers on the host
float* send_host = (float *) malloc(N*sizeof(float));
float* recv_host = (float *) malloc(N*sizeof(float));
// allocate matching ones on the device
float* array_dev;
cudaMalloc((void **) &array_dev, N*sizeof(float));
// and initialize the host data
for (int i=0; i<N; i++) {
send_host[i] = 2.0f + i*i;
recv_host[i] = 0.0f;
}
// send the data from the host to the device
cudaMemcpy(array_dev, send_host, N*sizeof(float), cudaMemcpyHostToDevice);
// set up the device execution context for our threads
// each thread will take care of one element
int blockSz = 4; // 4 threads per block
// compute the number of blocks needed
int nBlocks = N/blockSz;
// adjust up to make sure we cover the entire array
if (N % nBlocks) {
nBlocks++;
}
// scale the array on the device
float scale = 2.0f;
scale_dev <<<nBlocks, blockSz>>> (array_dev, scale, N);
// scale the input array on the host
scale_host(send_host, scale, N);
// get it back on the host
cudaMemcpy(recv_host, array_dev, N*sizeof(float), cudaMemcpyDeviceToHost);
// check the result
for (int i=0; i<N; i++) {
assert(send_host[i] == recv_host[i]);
}
// free the buffers;
cudaFree(array_dev);
free(send_host); free(recv_host);
return 0;
}
// end of file
|
convolve_separable.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*******************************************************
* Copyright (c) 2014, ArrayFire
* All rights reserved.
*
* This file is distributed under 3-clause BSD license.
* The complete license agreement can be obtained at:
* http://arrayfire.com/licenses/BSD-3-Clause
********************************************************/
#include <af/defines.h>
#include <backend.hpp>
#include <dispatch.hpp>
#include <Param.hpp>
#include <debug_cuda.hpp>
#include <math.hpp>
#include <convolve.hpp>
namespace cuda
{
namespace kernel
{
static const dim_type THREADS_X = 16;
static const dim_type THREADS_Y = 16;
// below shared MAX_*_LEN's are calculated based on
// a maximum shared memory configuration of 48KB per block
// considering complex types as well
static const dim_type MAX_SCONV_FILTER_LEN = 31;
// we shall declare the maximum size required of above all three cases
// and re-use the same constant memory locations for every case
__constant__ char sFilter[2*THREADS_Y*(2*(MAX_SCONV_FILTER_LEN-1)+THREADS_X)*sizeof(double)];
template<typename T, typename accType, dim_type conv_dim, bool expand, dim_type fLen>
__global__
void convolve2_separable(Param<T> out, CParam<T> signal, dim_type nBBS0, dim_type nBBS1)
{
const dim_type smem_len = (conv_dim==0 ?
(THREADS_X+2*(fLen-1))* THREADS_Y:
(THREADS_Y+2*(fLen-1))* THREADS_X);
__shared__ T shrdMem[smem_len];
const dim_type radius = fLen-1;
const dim_type padding = 2*radius;
const dim_type s0 = signal.strides[0];
const dim_type s1 = signal.strides[1];
const dim_type d0 = signal.dims[0];
const dim_type d1 = signal.dims[1];
const dim_type shrdLen = THREADS_X + (conv_dim==0 ? padding : 0);
unsigned b2 = blockIdx.x/nBBS0;
unsigned b3 = blockIdx.y/nBBS1;
T *dst = (T *)out.ptr + (b2*out.strides[2] + b3*out.strides[3]);
const T *src = (const T *)signal.ptr + (b2*signal.strides[2] + b3*signal.strides[3]);
const accType *impulse = (const accType *)sFilter;
dim_type lx = threadIdx.x;
dim_type ly = threadIdx.y;
dim_type ox = THREADS_X * (blockIdx.x-b2*nBBS0) + lx;
dim_type oy = THREADS_Y * (blockIdx.y-b3*nBBS1) + ly;
dim_type gx = ox;
dim_type gy = oy;
// below if-else statement is based on template parameter
if (conv_dim==0) {
gx += (expand ? 0 : fLen>>1);
dim_type endX = ((fLen-1)<<1) + THREADS_X;
#pragma unroll
for(dim_type lx = threadIdx.x, glb_x = gx; lx<endX; lx += THREADS_X, glb_x += THREADS_X) {
dim_type i = glb_x - radius;
dim_type j = gy;
bool is_i = i>=0 && i<d0;
bool is_j = j>=0 && j<d1;
shrdMem[ly*shrdLen+lx] = (is_i && is_j ? src[i*s0 + j*s1] : scalar<T>(0));
}
} else if (conv_dim==1) {
gy += (expand ? 0 : fLen>>1);
dim_type endY = ((fLen-1)<<1) + THREADS_Y;
#pragma unroll
for(dim_type ly = threadIdx.y, glb_y = gy; ly<endY; ly += THREADS_Y, glb_y += THREADS_Y) {
dim_type i = gx;
dim_type j = glb_y - radius;
bool is_i = i>=0 && i<d0;
bool is_j = j>=0 && j<d1;
shrdMem[ly*shrdLen+lx] = (is_i && is_j ? src[i*s0 + j*s1] : scalar<T>(0));
}
}
__syncthreads();
if (ox<out.dims[0] && oy<out.dims[1]) {
// below conditional statement is based on template parameter
dim_type i = (conv_dim==0 ? lx : ly) + radius;
accType accum = scalar<accType>(0);
#pragma unroll
for(dim_type f=0; f<fLen; ++f) {
accType f_val = impulse[f];
// below conditional statement is based on template parameter
dim_type s_idx = (conv_dim==0 ? (ly*shrdLen+(i-f)) : ((i-f)*shrdLen+lx));
T s_val = shrdMem[s_idx];
accum = accum + s_val*f_val;
}
dst[oy*out.strides[1]+ox] = (T)accum;
}
}
template<typename T, typename aT, dim_type cDim, bool expand, dim_type f>
void conv2Helper(dim3 blks, dim3 thrds, Param<T> out, CParam<T> sig, dim_type nBBS0, dim_type nBBS1)
{
(convolve2_separable<T, aT, cDim, expand, fhipLaunchKernelGGL((>)), dim3(blks), dim3(thrds), 0, 0, out, sig, nBBS0, nBBS1);
}
template<typename T, typename accType, dim_type conv_dim, bool expand>
void convolve2(Param<T> out, CParam<T> signal, CParam<accType> filter)
{
dim_type fLen = filter.dims[0] * filter.dims[1] * filter.dims[2] * filter.dims[3];
if(fLen > kernel::MAX_SCONV_FILTER_LEN) {
// call upon fft
CUDA_NOT_SUPPORTED();
}
dim3 threads(THREADS_X, THREADS_Y);
dim_type blk_x = divup(out.dims[0], threads.x);
dim_type blk_y = divup(out.dims[1], threads.y);
dim3 blocks(blk_x*signal.dims[2], blk_y*signal.dims[3]);
// FIX ME: if the filter array is strided, direct copy of symbols
// might cause issues
CUDA_CHECK(hipMemcpyToSymbol(kernel::sFilter, filter.ptr, fLen*sizeof(accType), 0, hipMemcpyDeviceToDevice));
switch(fLen) {
case 2: conv2Helper<T, accType, conv_dim, expand, 2>(blocks, threads, out, signal, blk_x, blk_y); break;
case 3: conv2Helper<T, accType, conv_dim, expand, 3>(blocks, threads, out, signal, blk_x, blk_y); break;
case 4: conv2Helper<T, accType, conv_dim, expand, 4>(blocks, threads, out, signal, blk_x, blk_y); break;
case 5: conv2Helper<T, accType, conv_dim, expand, 5>(blocks, threads, out, signal, blk_x, blk_y); break;
case 6: conv2Helper<T, accType, conv_dim, expand, 6>(blocks, threads, out, signal, blk_x, blk_y); break;
case 7: conv2Helper<T, accType, conv_dim, expand, 7>(blocks, threads, out, signal, blk_x, blk_y); break;
case 8: conv2Helper<T, accType, conv_dim, expand, 8>(blocks, threads, out, signal, blk_x, blk_y); break;
case 9: conv2Helper<T, accType, conv_dim, expand, 9>(blocks, threads, out, signal, blk_x, blk_y); break;
case 10: conv2Helper<T, accType, conv_dim, expand, 10>(blocks, threads, out, signal, blk_x, blk_y); break;
case 11: conv2Helper<T, accType, conv_dim, expand, 11>(blocks, threads, out, signal, blk_x, blk_y); break;
case 12: conv2Helper<T, accType, conv_dim, expand, 12>(blocks, threads, out, signal, blk_x, blk_y); break;
case 13: conv2Helper<T, accType, conv_dim, expand, 13>(blocks, threads, out, signal, blk_x, blk_y); break;
case 14: conv2Helper<T, accType, conv_dim, expand, 14>(blocks, threads, out, signal, blk_x, blk_y); break;
case 15: conv2Helper<T, accType, conv_dim, expand, 15>(blocks, threads, out, signal, blk_x, blk_y); break;
case 16: conv2Helper<T, accType, conv_dim, expand, 16>(blocks, threads, out, signal, blk_x, blk_y); break;
case 17: conv2Helper<T, accType, conv_dim, expand, 17>(blocks, threads, out, signal, blk_x, blk_y); break;
case 18: conv2Helper<T, accType, conv_dim, expand, 18>(blocks, threads, out, signal, blk_x, blk_y); break;
case 19: conv2Helper<T, accType, conv_dim, expand, 19>(blocks, threads, out, signal, blk_x, blk_y); break;
case 20: conv2Helper<T, accType, conv_dim, expand, 20>(blocks, threads, out, signal, blk_x, blk_y); break;
case 21: conv2Helper<T, accType, conv_dim, expand, 21>(blocks, threads, out, signal, blk_x, blk_y); break;
case 22: conv2Helper<T, accType, conv_dim, expand, 22>(blocks, threads, out, signal, blk_x, blk_y); break;
case 23: conv2Helper<T, accType, conv_dim, expand, 23>(blocks, threads, out, signal, blk_x, blk_y); break;
case 24: conv2Helper<T, accType, conv_dim, expand, 24>(blocks, threads, out, signal, blk_x, blk_y); break;
case 25: conv2Helper<T, accType, conv_dim, expand, 25>(blocks, threads, out, signal, blk_x, blk_y); break;
case 26: conv2Helper<T, accType, conv_dim, expand, 26>(blocks, threads, out, signal, blk_x, blk_y); break;
case 27: conv2Helper<T, accType, conv_dim, expand, 27>(blocks, threads, out, signal, blk_x, blk_y); break;
case 28: conv2Helper<T, accType, conv_dim, expand, 28>(blocks, threads, out, signal, blk_x, blk_y); break;
case 29: conv2Helper<T, accType, conv_dim, expand, 29>(blocks, threads, out, signal, blk_x, blk_y); break;
case 30: conv2Helper<T, accType, conv_dim, expand, 30>(blocks, threads, out, signal, blk_x, blk_y); break;
case 31: conv2Helper<T, accType, conv_dim, expand, 31>(blocks, threads, out, signal, blk_x, blk_y); break;
default: CUDA_NOT_SUPPORTED();
}
POST_LAUNCH_CHECK();
}
#define INSTANTIATE(T, accType) \
template void convolve2<T, accType, 0, true >(Param<T> out, CParam<T> signal, CParam<accType> filter); \
template void convolve2<T, accType, 0, false>(Param<T> out, CParam<T> signal, CParam<accType> filter); \
template void convolve2<T, accType, 1, true >(Param<T> out, CParam<T> signal, CParam<accType> filter); \
template void convolve2<T, accType, 1, false>(Param<T> out, CParam<T> signal, CParam<accType> filter); \
INSTANTIATE(cdouble, cdouble)
INSTANTIATE(cfloat , cfloat)
INSTANTIATE(double , double)
INSTANTIATE(float , float)
INSTANTIATE(uint , float)
INSTANTIATE(int , float)
INSTANTIATE(uchar , float)
INSTANTIATE(char , float)
}
}
| convolve_separable.cu | /*******************************************************
* Copyright (c) 2014, ArrayFire
* All rights reserved.
*
* This file is distributed under 3-clause BSD license.
* The complete license agreement can be obtained at:
* http://arrayfire.com/licenses/BSD-3-Clause
********************************************************/
#include <af/defines.h>
#include <backend.hpp>
#include <dispatch.hpp>
#include <Param.hpp>
#include <debug_cuda.hpp>
#include <math.hpp>
#include <convolve.hpp>
namespace cuda
{
namespace kernel
{
static const dim_type THREADS_X = 16;
static const dim_type THREADS_Y = 16;
// below shared MAX_*_LEN's are calculated based on
// a maximum shared memory configuration of 48KB per block
// considering complex types as well
static const dim_type MAX_SCONV_FILTER_LEN = 31;
// we shall declare the maximum size required of above all three cases
// and re-use the same constant memory locations for every case
__constant__ char sFilter[2*THREADS_Y*(2*(MAX_SCONV_FILTER_LEN-1)+THREADS_X)*sizeof(double)];
template<typename T, typename accType, dim_type conv_dim, bool expand, dim_type fLen>
__global__
void convolve2_separable(Param<T> out, CParam<T> signal, dim_type nBBS0, dim_type nBBS1)
{
const dim_type smem_len = (conv_dim==0 ?
(THREADS_X+2*(fLen-1))* THREADS_Y:
(THREADS_Y+2*(fLen-1))* THREADS_X);
__shared__ T shrdMem[smem_len];
const dim_type radius = fLen-1;
const dim_type padding = 2*radius;
const dim_type s0 = signal.strides[0];
const dim_type s1 = signal.strides[1];
const dim_type d0 = signal.dims[0];
const dim_type d1 = signal.dims[1];
const dim_type shrdLen = THREADS_X + (conv_dim==0 ? padding : 0);
unsigned b2 = blockIdx.x/nBBS0;
unsigned b3 = blockIdx.y/nBBS1;
T *dst = (T *)out.ptr + (b2*out.strides[2] + b3*out.strides[3]);
const T *src = (const T *)signal.ptr + (b2*signal.strides[2] + b3*signal.strides[3]);
const accType *impulse = (const accType *)sFilter;
dim_type lx = threadIdx.x;
dim_type ly = threadIdx.y;
dim_type ox = THREADS_X * (blockIdx.x-b2*nBBS0) + lx;
dim_type oy = THREADS_Y * (blockIdx.y-b3*nBBS1) + ly;
dim_type gx = ox;
dim_type gy = oy;
// below if-else statement is based on template parameter
if (conv_dim==0) {
gx += (expand ? 0 : fLen>>1);
dim_type endX = ((fLen-1)<<1) + THREADS_X;
#pragma unroll
for(dim_type lx = threadIdx.x, glb_x = gx; lx<endX; lx += THREADS_X, glb_x += THREADS_X) {
dim_type i = glb_x - radius;
dim_type j = gy;
bool is_i = i>=0 && i<d0;
bool is_j = j>=0 && j<d1;
shrdMem[ly*shrdLen+lx] = (is_i && is_j ? src[i*s0 + j*s1] : scalar<T>(0));
}
} else if (conv_dim==1) {
gy += (expand ? 0 : fLen>>1);
dim_type endY = ((fLen-1)<<1) + THREADS_Y;
#pragma unroll
for(dim_type ly = threadIdx.y, glb_y = gy; ly<endY; ly += THREADS_Y, glb_y += THREADS_Y) {
dim_type i = gx;
dim_type j = glb_y - radius;
bool is_i = i>=0 && i<d0;
bool is_j = j>=0 && j<d1;
shrdMem[ly*shrdLen+lx] = (is_i && is_j ? src[i*s0 + j*s1] : scalar<T>(0));
}
}
__syncthreads();
if (ox<out.dims[0] && oy<out.dims[1]) {
// below conditional statement is based on template parameter
dim_type i = (conv_dim==0 ? lx : ly) + radius;
accType accum = scalar<accType>(0);
#pragma unroll
for(dim_type f=0; f<fLen; ++f) {
accType f_val = impulse[f];
// below conditional statement is based on template parameter
dim_type s_idx = (conv_dim==0 ? (ly*shrdLen+(i-f)) : ((i-f)*shrdLen+lx));
T s_val = shrdMem[s_idx];
accum = accum + s_val*f_val;
}
dst[oy*out.strides[1]+ox] = (T)accum;
}
}
template<typename T, typename aT, dim_type cDim, bool expand, dim_type f>
void conv2Helper(dim3 blks, dim3 thrds, Param<T> out, CParam<T> sig, dim_type nBBS0, dim_type nBBS1)
{
(convolve2_separable<T, aT, cDim, expand, f>)<<<blks, thrds>>>(out, sig, nBBS0, nBBS1);
}
template<typename T, typename accType, dim_type conv_dim, bool expand>
void convolve2(Param<T> out, CParam<T> signal, CParam<accType> filter)
{
dim_type fLen = filter.dims[0] * filter.dims[1] * filter.dims[2] * filter.dims[3];
if(fLen > kernel::MAX_SCONV_FILTER_LEN) {
// call upon fft
CUDA_NOT_SUPPORTED();
}
dim3 threads(THREADS_X, THREADS_Y);
dim_type blk_x = divup(out.dims[0], threads.x);
dim_type blk_y = divup(out.dims[1], threads.y);
dim3 blocks(blk_x*signal.dims[2], blk_y*signal.dims[3]);
// FIX ME: if the filter array is strided, direct copy of symbols
// might cause issues
CUDA_CHECK(cudaMemcpyToSymbol(kernel::sFilter, filter.ptr, fLen*sizeof(accType), 0, cudaMemcpyDeviceToDevice));
switch(fLen) {
case 2: conv2Helper<T, accType, conv_dim, expand, 2>(blocks, threads, out, signal, blk_x, blk_y); break;
case 3: conv2Helper<T, accType, conv_dim, expand, 3>(blocks, threads, out, signal, blk_x, blk_y); break;
case 4: conv2Helper<T, accType, conv_dim, expand, 4>(blocks, threads, out, signal, blk_x, blk_y); break;
case 5: conv2Helper<T, accType, conv_dim, expand, 5>(blocks, threads, out, signal, blk_x, blk_y); break;
case 6: conv2Helper<T, accType, conv_dim, expand, 6>(blocks, threads, out, signal, blk_x, blk_y); break;
case 7: conv2Helper<T, accType, conv_dim, expand, 7>(blocks, threads, out, signal, blk_x, blk_y); break;
case 8: conv2Helper<T, accType, conv_dim, expand, 8>(blocks, threads, out, signal, blk_x, blk_y); break;
case 9: conv2Helper<T, accType, conv_dim, expand, 9>(blocks, threads, out, signal, blk_x, blk_y); break;
case 10: conv2Helper<T, accType, conv_dim, expand, 10>(blocks, threads, out, signal, blk_x, blk_y); break;
case 11: conv2Helper<T, accType, conv_dim, expand, 11>(blocks, threads, out, signal, blk_x, blk_y); break;
case 12: conv2Helper<T, accType, conv_dim, expand, 12>(blocks, threads, out, signal, blk_x, blk_y); break;
case 13: conv2Helper<T, accType, conv_dim, expand, 13>(blocks, threads, out, signal, blk_x, blk_y); break;
case 14: conv2Helper<T, accType, conv_dim, expand, 14>(blocks, threads, out, signal, blk_x, blk_y); break;
case 15: conv2Helper<T, accType, conv_dim, expand, 15>(blocks, threads, out, signal, blk_x, blk_y); break;
case 16: conv2Helper<T, accType, conv_dim, expand, 16>(blocks, threads, out, signal, blk_x, blk_y); break;
case 17: conv2Helper<T, accType, conv_dim, expand, 17>(blocks, threads, out, signal, blk_x, blk_y); break;
case 18: conv2Helper<T, accType, conv_dim, expand, 18>(blocks, threads, out, signal, blk_x, blk_y); break;
case 19: conv2Helper<T, accType, conv_dim, expand, 19>(blocks, threads, out, signal, blk_x, blk_y); break;
case 20: conv2Helper<T, accType, conv_dim, expand, 20>(blocks, threads, out, signal, blk_x, blk_y); break;
case 21: conv2Helper<T, accType, conv_dim, expand, 21>(blocks, threads, out, signal, blk_x, blk_y); break;
case 22: conv2Helper<T, accType, conv_dim, expand, 22>(blocks, threads, out, signal, blk_x, blk_y); break;
case 23: conv2Helper<T, accType, conv_dim, expand, 23>(blocks, threads, out, signal, blk_x, blk_y); break;
case 24: conv2Helper<T, accType, conv_dim, expand, 24>(blocks, threads, out, signal, blk_x, blk_y); break;
case 25: conv2Helper<T, accType, conv_dim, expand, 25>(blocks, threads, out, signal, blk_x, blk_y); break;
case 26: conv2Helper<T, accType, conv_dim, expand, 26>(blocks, threads, out, signal, blk_x, blk_y); break;
case 27: conv2Helper<T, accType, conv_dim, expand, 27>(blocks, threads, out, signal, blk_x, blk_y); break;
case 28: conv2Helper<T, accType, conv_dim, expand, 28>(blocks, threads, out, signal, blk_x, blk_y); break;
case 29: conv2Helper<T, accType, conv_dim, expand, 29>(blocks, threads, out, signal, blk_x, blk_y); break;
case 30: conv2Helper<T, accType, conv_dim, expand, 30>(blocks, threads, out, signal, blk_x, blk_y); break;
case 31: conv2Helper<T, accType, conv_dim, expand, 31>(blocks, threads, out, signal, blk_x, blk_y); break;
default: CUDA_NOT_SUPPORTED();
}
POST_LAUNCH_CHECK();
}
#define INSTANTIATE(T, accType) \
template void convolve2<T, accType, 0, true >(Param<T> out, CParam<T> signal, CParam<accType> filter); \
template void convolve2<T, accType, 0, false>(Param<T> out, CParam<T> signal, CParam<accType> filter); \
template void convolve2<T, accType, 1, true >(Param<T> out, CParam<T> signal, CParam<accType> filter); \
template void convolve2<T, accType, 1, false>(Param<T> out, CParam<T> signal, CParam<accType> filter); \
INSTANTIATE(cdouble, cdouble)
INSTANTIATE(cfloat , cfloat)
INSTANTIATE(double , double)
INSTANTIATE(float , float)
INSTANTIATE(uint , float)
INSTANTIATE(int , float)
INSTANTIATE(uchar , float)
INSTANTIATE(char , float)
}
}
|
787bb9b882a57188772a9755303a71ab88ef1c8f.hip | // !!! This is a file automatically generated by hipify!!!
#include <thread>
#include <iostream>
#include <stdlib.h>
#include <fstream>
#include <ATen/ATen.h>
#include <ATen/hip/HIPContext.h>
#include <THH/THH.h>
#include <THH/THHAtomics.cuh>
#include <THH/THHDeviceUtils.cuh>
/* Switch of minimal spanning tree algorithms */
/* Note: we will migrate the cuda implementaion to PyTorch in the next version */
//#define MST_PRIM
//#define MST_KRUSKAL
#define MST_BORUVKA
#ifdef MST_PRIM
#include <boost/graph/adjacency_list.hpp>
#include <boost/graph/prim_minimum_spanning_tree.hpp>
#endif
#ifdef MST_KRUSKAL
#include <boost/graph/adjacency_list.hpp>
#include <boost/graph/kruskal_min_spanning_tree.hpp>
#endif
#ifdef MST_BORUVKA
#include "boruvka.hpp"
#endif
#ifndef MST_BORUVKA
using namespace boost;
typedef adjacency_list <vecS, vecS, undirectedS, no_property,
property < edge_weight_t, float > > Graph;
typedef graph_traits < Graph >::edge_descriptor Edge;
typedef graph_traits < Graph >::vertex_descriptor Vertex;
typedef std::pair<int, int> E;
#endif
static void forward_kernel(int * edge_index, float * edge_weight, int * edge_out, int vertex_count, int edge_count){
#ifdef MST_BORUVKA
struct Graph * g = createGraph(vertex_count, edge_count);
for (int i = 0; i < edge_count; ++i){
g->edge[i].src = edge_index[i * 2];
g->edge[i].dest = edge_index[i * 2 + 1];
g->edge[i].weight = edge_weight[i];
}
#else
Graph g(vertex_count);
for (int i = 0; i < edge_count; ++i)
boost::add_edge((int)edge_index[i * 2], (int)edge_index[i * 2 + 1],
edge_weight[i], g);
#endif
#ifdef MST_PRIM
std::vector < graph_traits < Graph >::vertex_descriptor > p(num_vertices(g));
prim_minimum_spanning_tree(g, &(p[0]));
int * edge_out_ptr = edge_out;
for (std::size_t i = 0; i != p.size(); ++i)
if (p[i] != i) {
*(edge_out_ptr++) = i;
*(edge_out_ptr++) = p[i];
}
#endif
#ifdef MST_KRUSKAL
std::vector < Edge > spanning_tree;
kruskal_minimum_spanning_tree(g, std::back_inserter(spanning_tree));
float * edge_out_ptr = edge_out;
for (std::vector < Edge >::iterator ei = spanning_tree.begin();
ei != spanning_tree.end(); ++ei){
*(edge_out_ptr++) = source(*ei, g);
*(edge_out_ptr++) = target(*ei, g);
}
#endif
#ifdef MST_BORUVKA
boruvkaMST(g, edge_out);
delete[] g->edge;
delete[] g;
#endif
}
at::Tensor mst_forward(
const at::Tensor & edge_index_tensor,
const at::Tensor & edge_weight_tensor,
int vertex_count){
unsigned batch_size = edge_index_tensor.size(0);
unsigned edge_count = edge_index_tensor.size(1);
auto edge_index_cpu = edge_index_tensor.cpu();
auto edge_weight_cpu = edge_weight_tensor.cpu();
auto edge_out_cpu = at::empty({batch_size, vertex_count - 1, 2}, edge_index_cpu.options());
int * edge_out = edge_out_cpu.contiguous().data_ptr<int>();
int * edge_index = edge_index_cpu.contiguous().data_ptr<int>();
float * edge_weight = edge_weight_cpu.contiguous().data_ptr<float>();
// Loop for batch
std::thread pids[batch_size];
for (unsigned i = 0; i < batch_size; i++){
auto edge_index_iter = edge_index + i * edge_count * 2;
auto edge_weight_iter = edge_weight + i * edge_count;
auto edge_out_iter = edge_out + i * (vertex_count - 1) * 2;
pids[i] = std::thread(forward_kernel, edge_index_iter, edge_weight_iter, edge_out_iter, vertex_count, edge_count);
}
for (unsigned i = 0; i < batch_size; i++){
pids[i].join();
}
auto edge_out_tensor = edge_out_cpu.to(edge_index_tensor.device());
return edge_out_tensor;
}
| 787bb9b882a57188772a9755303a71ab88ef1c8f.cu | #include <thread>
#include <iostream>
#include <stdlib.h>
#include <fstream>
#include <ATen/ATen.h>
#include <ATen/cuda/CUDAContext.h>
#include <THC/THC.h>
#include <THC/THCAtomics.cuh>
#include <THC/THCDeviceUtils.cuh>
/* Switch of minimal spanning tree algorithms */
/* Note: we will migrate the cuda implementaion to PyTorch in the next version */
//#define MST_PRIM
//#define MST_KRUSKAL
#define MST_BORUVKA
#ifdef MST_PRIM
#include <boost/graph/adjacency_list.hpp>
#include <boost/graph/prim_minimum_spanning_tree.hpp>
#endif
#ifdef MST_KRUSKAL
#include <boost/graph/adjacency_list.hpp>
#include <boost/graph/kruskal_min_spanning_tree.hpp>
#endif
#ifdef MST_BORUVKA
#include "boruvka.hpp"
#endif
#ifndef MST_BORUVKA
using namespace boost;
typedef adjacency_list <vecS, vecS, undirectedS, no_property,
property < edge_weight_t, float > > Graph;
typedef graph_traits < Graph >::edge_descriptor Edge;
typedef graph_traits < Graph >::vertex_descriptor Vertex;
typedef std::pair<int, int> E;
#endif
static void forward_kernel(int * edge_index, float * edge_weight, int * edge_out, int vertex_count, int edge_count){
#ifdef MST_BORUVKA
struct Graph * g = createGraph(vertex_count, edge_count);
for (int i = 0; i < edge_count; ++i){
g->edge[i].src = edge_index[i * 2];
g->edge[i].dest = edge_index[i * 2 + 1];
g->edge[i].weight = edge_weight[i];
}
#else
Graph g(vertex_count);
for (int i = 0; i < edge_count; ++i)
boost::add_edge((int)edge_index[i * 2], (int)edge_index[i * 2 + 1],
edge_weight[i], g);
#endif
#ifdef MST_PRIM
std::vector < graph_traits < Graph >::vertex_descriptor > p(num_vertices(g));
prim_minimum_spanning_tree(g, &(p[0]));
int * edge_out_ptr = edge_out;
for (std::size_t i = 0; i != p.size(); ++i)
if (p[i] != i) {
*(edge_out_ptr++) = i;
*(edge_out_ptr++) = p[i];
}
#endif
#ifdef MST_KRUSKAL
std::vector < Edge > spanning_tree;
kruskal_minimum_spanning_tree(g, std::back_inserter(spanning_tree));
float * edge_out_ptr = edge_out;
for (std::vector < Edge >::iterator ei = spanning_tree.begin();
ei != spanning_tree.end(); ++ei){
*(edge_out_ptr++) = source(*ei, g);
*(edge_out_ptr++) = target(*ei, g);
}
#endif
#ifdef MST_BORUVKA
boruvkaMST(g, edge_out);
delete[] g->edge;
delete[] g;
#endif
}
at::Tensor mst_forward(
const at::Tensor & edge_index_tensor,
const at::Tensor & edge_weight_tensor,
int vertex_count){
unsigned batch_size = edge_index_tensor.size(0);
unsigned edge_count = edge_index_tensor.size(1);
auto edge_index_cpu = edge_index_tensor.cpu();
auto edge_weight_cpu = edge_weight_tensor.cpu();
auto edge_out_cpu = at::empty({batch_size, vertex_count - 1, 2}, edge_index_cpu.options());
int * edge_out = edge_out_cpu.contiguous().data_ptr<int>();
int * edge_index = edge_index_cpu.contiguous().data_ptr<int>();
float * edge_weight = edge_weight_cpu.contiguous().data_ptr<float>();
// Loop for batch
std::thread pids[batch_size];
for (unsigned i = 0; i < batch_size; i++){
auto edge_index_iter = edge_index + i * edge_count * 2;
auto edge_weight_iter = edge_weight + i * edge_count;
auto edge_out_iter = edge_out + i * (vertex_count - 1) * 2;
pids[i] = std::thread(forward_kernel, edge_index_iter, edge_weight_iter, edge_out_iter, vertex_count, edge_count);
}
for (unsigned i = 0; i < batch_size; i++){
pids[i].join();
}
auto edge_out_tensor = edge_out_cpu.to(edge_index_tensor.device());
return edge_out_tensor;
}
|
7aa69b406ff057baf0abc4015ac598cf24133dcd.hip | // !!! This is a file automatically generated by hipify!!!
/* 18645 Spring 2019 Mini project
* Seam Carving with Cuda
* Author: kaiyuan1
*/
#ifndef SEAM_CARVING_CUDA_SC_CUDA_CU
#define SEAM_CARVING_CUDA_SC_CUDA_CU
#include <cstdlib>
#include <cstring>
#include <algorithm>
#include <cstdint>
#include <limits>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <hip/hip_cooperative_groups.h>
#include "sc_cuda.h"
#define err(format, ...) do { fprintf(stderr, format, ##__VA_ARGS__); exit(1); } while (0)
#define THREAD 128
inline void checkCuda(hipError_t e) {
if (e != hipSuccess) {
err("CUDA Error: %s\n", hipGetErrorString(e));
}
}
inline void checkLastCudaError() {
checkCuda(hipGetLastError());
}
__device__ inline static
unsigned diffRGB(RGBA p1, RGBA p2) {
return abs(int(p1.r) - int(p2.r)) +
abs(int(p1.g) - int(p2.g)) +
abs(int(p1.b) - int(p2.b));
}
__global__ static
void searchPath(RGBA* img, unsigned h, unsigned w, unsigned dim, unsigned *trace, unsigned *diff) {
unsigned j = blockIdx.x * blockDim.x + threadIdx.x;
if (j >= w) return;
cooperative_groups::grid_group g = cooperative_groups::this_grid();
for (unsigned i = 1; i < h; ++i) {
unsigned t = j;
unsigned e = diffRGB(img[i*dim + j], img[(i-1)*dim + j]);
unsigned en = 1;
unsigned d = diff[(i-1)*dim + j];
if (j != 0) {
e += diffRGB(img[i*dim + j], img[(i-1)*dim + j-1]);
en ++;
unsigned pd = diff[(i-1)*dim + j-1];
if (pd < d) {
d = pd;
t = j - 1;
}
}
if (j != w-1) {
e += diffRGB(img[i*dim + j], img[(i-1)*dim + j+1]);
en ++;
unsigned nd = diff[(i-1)*dim + j+1];
if (nd < d) {
d = nd;
t = j + 1;
}
}
if (i != h-1) {
e += diffRGB(img[i*dim + j], img[(i+1)*dim + j]);
en ++;
}
diff[i*dim + j] = d + e / en;
trace[i*dim + j] = t;
// sync among blocks
g.sync();
}
}
__global__ static
void removeMin(RGBA* in, RGBA* out, unsigned* from, unsigned w, unsigned dim) {
unsigned j = blockIdx.y * blockDim.x + threadIdx.x;
if (j >= w) return;
unsigned i = blockIdx.x;
out[i*dim + j] = (j < from[i])?in[i*dim + j]:in[i*dim + j + 1];
}
__global__ static
void flipImg(RGBA* in, RGBA* out, unsigned w, unsigned dim) {
unsigned j = blockIdx.y * blockDim.x + threadIdx.x;
if (j >= w) return;
unsigned i = blockIdx.x;
out[j*dim + i] = in[i*dim + j];
}
inline static void shrink(RGBA *img0, RGBA *img1,
RGBA *&inImg, RGBA *&outImg,
bool &use0,
unsigned iW,
unsigned tW,
unsigned H,
unsigned dim,
unsigned *deviceTrace, unsigned *deviceDiff, unsigned *deviceFrom,
unsigned **hostTrace, unsigned *hostDiff, unsigned *hostFrom) {
for (auto W = iW; W > tW; --W, use0 = !use0) {
if (use0) {
inImg = img0;
outImg = img1;
} else {
inImg = img1;
outImg = img0;
}
unsigned n = (W + THREAD - 1) / THREAD;
// DP
void *args[] = {
(void*) &inImg, (void*) &H, (void*) &W, (void*) &dim,
(void*) &deviceTrace, (void*) &deviceDiff
};
// Use hipLaunchCooperativeKernel instead of <<< ... >>> for cooperative_groups APIs
checkCuda(hipLaunchCooperativeKernel((void*)searchPath, n, THREAD, args));
hipDeviceSynchronize(); checkLastCudaError();
// find minimum, use CPU
checkCuda(hipMemcpy(hostDiff, &(deviceDiff[(H-1) * dim]), W *sizeof(unsigned), hipMemcpyDeviceToHost));
checkCuda(hipMemcpy(hostTrace[0], deviceTrace, H * dim *sizeof(unsigned), hipMemcpyDeviceToHost));
unsigned min = std::numeric_limits<unsigned>::max();
unsigned idx = 0;
for (unsigned j = 0; j < W; ++j) {
if (hostDiff[j] <= min) {
min = hostDiff[j];
idx = j;
}
}
// retrace, use CPU
hostFrom[H-1] = idx;
for (auto i = H-1; i >= 1; --i) {
hostFrom[i-1] = hostTrace[i][hostFrom[i]];
}
checkCuda(hipMemcpy(deviceFrom, hostFrom, H * sizeof(unsigned), hipMemcpyHostToDevice));
// remove deleted, use Cuda
n = (W + THREAD - 2) / THREAD;
dim3 gridSize (H, n);
hipLaunchKernelGGL(( removeMin) , dim3(gridSize), dim3(THREAD) , 0, 0, inImg, outImg, deviceFrom, W-1, dim);
hipDeviceSynchronize(); checkLastCudaError();
}
}
void cudaSC(RGBA **inImg, unsigned inW, unsigned inH,
RGBA ** outImg, unsigned outW, unsigned outH) {
// data alloc for cuda
unsigned dim = ::max(inW, inH);
RGBA *deviceImg0, *deviceImg1, *deviceIn, *deviceOut;
unsigned *deviceTrace, *deviceDiff, *deviceFrom, **hostTrace, *hostDiff, *hostFrom;
checkCuda(hipMalloc(&deviceImg0, dim * dim * sizeof(RGBA)));
checkCuda(hipMalloc(&deviceImg1, dim * dim * sizeof(RGBA)));
checkCuda(hipMalloc(&deviceTrace, dim * dim * sizeof(unsigned)));
checkCuda(hipMalloc(&deviceDiff, dim * dim * sizeof(unsigned)));
checkCuda(hipMalloc(&deviceFrom, dim * sizeof(unsigned)));
new2D(hostTrace, dim, dim, unsigned);
hostDiff = new unsigned[dim];
hostFrom = new unsigned[dim];
// data init for cuda
for (unsigned i = 0; i < inH; ++i)
checkCuda(hipMemcpy(&(deviceImg0[i*dim]), inImg[i], inW*sizeof(RGBA), hipMemcpyHostToDevice));
checkCuda(hipMemcpy(deviceImg1, deviceImg0, dim * dim * sizeof(RGBA), hipMemcpyDeviceToDevice));
checkCuda(hipMemset(deviceTrace, 0, dim * dim * sizeof(unsigned)));
checkCuda(hipMemset(deviceDiff, 0, dim * dim * sizeof(unsigned)));
bool use0 = true;
deviceIn = deviceImg0;
deviceOut = deviceImg1;
// shrink width on Cuda
shrink(deviceImg0, deviceImg1, deviceIn, deviceOut,
use0, inW, outW, inH, dim,
deviceTrace, deviceDiff, deviceFrom, hostTrace, hostDiff, hostFrom);
if (inH > outH) {
// reset arrays
checkCuda(hipMemset(deviceTrace, 0, dim * dim * sizeof(unsigned)));
checkCuda(hipMemset(deviceDiff, 0, dim * dim * sizeof(unsigned)));
// flip x and y axis
unsigned n = (outW + THREAD - 1)/THREAD;
hipLaunchKernelGGL(( flipImg) , dim3(dim3(inH, n)), dim3(THREAD) , 0, 0, deviceOut, deviceIn, outW, dim);
use0 = !use0;
// shrink height on Cuda
shrink(deviceImg0, deviceImg1, deviceIn, deviceOut,
use0, inH, outH, outW, dim,
deviceTrace, deviceDiff, deviceFrom, hostTrace, hostDiff, hostFrom);
// flip back
n = (outH + THREAD - 1)/THREAD;
hipLaunchKernelGGL(( flipImg) , dim3(dim3(outW, n)), dim3(THREAD) , 0, 0, deviceOut, deviceIn, outH, dim);
deviceOut = deviceIn;
}
// copy to outImg
for (unsigned i = 0; i < outH; ++i)
checkCuda(hipMemcpy(outImg[i], &(deviceOut[i*dim]), outW*sizeof(RGBA), hipMemcpyDeviceToHost));
// free
checkCuda(hipFree(deviceImg0));
checkCuda(hipFree(deviceImg1));
checkCuda(hipFree(deviceTrace));
checkCuda(hipFree(deviceDiff));
checkCuda(hipFree(deviceFrom));
delete[] hostTrace[0];
delete[] hostTrace;
delete[] hostDiff;
delete[] hostFrom;
}
#endif //SEAM_CARVING_CUDA_SC_CUDA_CU | 7aa69b406ff057baf0abc4015ac598cf24133dcd.cu | /* 18645 Spring 2019 Mini project
* Seam Carving with Cuda
* Author: kaiyuan1
*/
#ifndef SEAM_CARVING_CUDA_SC_CUDA_CU
#define SEAM_CARVING_CUDA_SC_CUDA_CU
#include <cstdlib>
#include <cstring>
#include <algorithm>
#include <cstdint>
#include <limits>
#include <cuda.h>
#include <cuda_runtime.h>
#include <cooperative_groups.h>
#include "sc_cuda.h"
#define err(format, ...) do { fprintf(stderr, format, ##__VA_ARGS__); exit(1); } while (0)
#define THREAD 128
inline void checkCuda(cudaError_t e) {
if (e != cudaSuccess) {
err("CUDA Error: %s\n", cudaGetErrorString(e));
}
}
inline void checkLastCudaError() {
checkCuda(cudaGetLastError());
}
__device__ inline static
unsigned diffRGB(RGBA p1, RGBA p2) {
return abs(int(p1.r) - int(p2.r)) +
abs(int(p1.g) - int(p2.g)) +
abs(int(p1.b) - int(p2.b));
}
__global__ static
void searchPath(RGBA* img, unsigned h, unsigned w, unsigned dim, unsigned *trace, unsigned *diff) {
unsigned j = blockIdx.x * blockDim.x + threadIdx.x;
if (j >= w) return;
cooperative_groups::grid_group g = cooperative_groups::this_grid();
for (unsigned i = 1; i < h; ++i) {
unsigned t = j;
unsigned e = diffRGB(img[i*dim + j], img[(i-1)*dim + j]);
unsigned en = 1;
unsigned d = diff[(i-1)*dim + j];
if (j != 0) {
e += diffRGB(img[i*dim + j], img[(i-1)*dim + j-1]);
en ++;
unsigned pd = diff[(i-1)*dim + j-1];
if (pd < d) {
d = pd;
t = j - 1;
}
}
if (j != w-1) {
e += diffRGB(img[i*dim + j], img[(i-1)*dim + j+1]);
en ++;
unsigned nd = diff[(i-1)*dim + j+1];
if (nd < d) {
d = nd;
t = j + 1;
}
}
if (i != h-1) {
e += diffRGB(img[i*dim + j], img[(i+1)*dim + j]);
en ++;
}
diff[i*dim + j] = d + e / en;
trace[i*dim + j] = t;
// sync among blocks
g.sync();
}
}
__global__ static
void removeMin(RGBA* in, RGBA* out, unsigned* from, unsigned w, unsigned dim) {
unsigned j = blockIdx.y * blockDim.x + threadIdx.x;
if (j >= w) return;
unsigned i = blockIdx.x;
out[i*dim + j] = (j < from[i])?in[i*dim + j]:in[i*dim + j + 1];
}
__global__ static
void flipImg(RGBA* in, RGBA* out, unsigned w, unsigned dim) {
unsigned j = blockIdx.y * blockDim.x + threadIdx.x;
if (j >= w) return;
unsigned i = blockIdx.x;
out[j*dim + i] = in[i*dim + j];
}
inline static void shrink(RGBA *img0, RGBA *img1,
RGBA *&inImg, RGBA *&outImg,
bool &use0,
unsigned iW,
unsigned tW,
unsigned H,
unsigned dim,
unsigned *deviceTrace, unsigned *deviceDiff, unsigned *deviceFrom,
unsigned **hostTrace, unsigned *hostDiff, unsigned *hostFrom) {
for (auto W = iW; W > tW; --W, use0 = !use0) {
if (use0) {
inImg = img0;
outImg = img1;
} else {
inImg = img1;
outImg = img0;
}
unsigned n = (W + THREAD - 1) / THREAD;
// DP
void *args[] = {
(void*) &inImg, (void*) &H, (void*) &W, (void*) &dim,
(void*) &deviceTrace, (void*) &deviceDiff
};
// Use cudaLaunchCooperativeKernel instead of <<< ... >>> for cooperative_groups APIs
checkCuda(cudaLaunchCooperativeKernel((void*)searchPath, n, THREAD, args));
cudaDeviceSynchronize(); checkLastCudaError();
// find minimum, use CPU
checkCuda(cudaMemcpy(hostDiff, &(deviceDiff[(H-1) * dim]), W *sizeof(unsigned), cudaMemcpyDeviceToHost));
checkCuda(cudaMemcpy(hostTrace[0], deviceTrace, H * dim *sizeof(unsigned), cudaMemcpyDeviceToHost));
unsigned min = std::numeric_limits<unsigned>::max();
unsigned idx = 0;
for (unsigned j = 0; j < W; ++j) {
if (hostDiff[j] <= min) {
min = hostDiff[j];
idx = j;
}
}
// retrace, use CPU
hostFrom[H-1] = idx;
for (auto i = H-1; i >= 1; --i) {
hostFrom[i-1] = hostTrace[i][hostFrom[i]];
}
checkCuda(cudaMemcpy(deviceFrom, hostFrom, H * sizeof(unsigned), cudaMemcpyHostToDevice));
// remove deleted, use Cuda
n = (W + THREAD - 2) / THREAD;
dim3 gridSize (H, n);
removeMin <<< gridSize, THREAD >>> (inImg, outImg, deviceFrom, W-1, dim);
cudaDeviceSynchronize(); checkLastCudaError();
}
}
void cudaSC(RGBA **inImg, unsigned inW, unsigned inH,
RGBA ** outImg, unsigned outW, unsigned outH) {
// data alloc for cuda
unsigned dim = std::max(inW, inH);
RGBA *deviceImg0, *deviceImg1, *deviceIn, *deviceOut;
unsigned *deviceTrace, *deviceDiff, *deviceFrom, **hostTrace, *hostDiff, *hostFrom;
checkCuda(cudaMalloc(&deviceImg0, dim * dim * sizeof(RGBA)));
checkCuda(cudaMalloc(&deviceImg1, dim * dim * sizeof(RGBA)));
checkCuda(cudaMalloc(&deviceTrace, dim * dim * sizeof(unsigned)));
checkCuda(cudaMalloc(&deviceDiff, dim * dim * sizeof(unsigned)));
checkCuda(cudaMalloc(&deviceFrom, dim * sizeof(unsigned)));
new2D(hostTrace, dim, dim, unsigned);
hostDiff = new unsigned[dim];
hostFrom = new unsigned[dim];
// data init for cuda
for (unsigned i = 0; i < inH; ++i)
checkCuda(cudaMemcpy(&(deviceImg0[i*dim]), inImg[i], inW*sizeof(RGBA), cudaMemcpyHostToDevice));
checkCuda(cudaMemcpy(deviceImg1, deviceImg0, dim * dim * sizeof(RGBA), cudaMemcpyDeviceToDevice));
checkCuda(cudaMemset(deviceTrace, 0, dim * dim * sizeof(unsigned)));
checkCuda(cudaMemset(deviceDiff, 0, dim * dim * sizeof(unsigned)));
bool use0 = true;
deviceIn = deviceImg0;
deviceOut = deviceImg1;
// shrink width on Cuda
shrink(deviceImg0, deviceImg1, deviceIn, deviceOut,
use0, inW, outW, inH, dim,
deviceTrace, deviceDiff, deviceFrom, hostTrace, hostDiff, hostFrom);
if (inH > outH) {
// reset arrays
checkCuda(cudaMemset(deviceTrace, 0, dim * dim * sizeof(unsigned)));
checkCuda(cudaMemset(deviceDiff, 0, dim * dim * sizeof(unsigned)));
// flip x and y axis
unsigned n = (outW + THREAD - 1)/THREAD;
flipImg <<< dim3(inH, n), THREAD >>> (deviceOut, deviceIn, outW, dim);
use0 = !use0;
// shrink height on Cuda
shrink(deviceImg0, deviceImg1, deviceIn, deviceOut,
use0, inH, outH, outW, dim,
deviceTrace, deviceDiff, deviceFrom, hostTrace, hostDiff, hostFrom);
// flip back
n = (outH + THREAD - 1)/THREAD;
flipImg <<< dim3(outW, n), THREAD >>> (deviceOut, deviceIn, outH, dim);
deviceOut = deviceIn;
}
// copy to outImg
for (unsigned i = 0; i < outH; ++i)
checkCuda(cudaMemcpy(outImg[i], &(deviceOut[i*dim]), outW*sizeof(RGBA), cudaMemcpyDeviceToHost));
// free
checkCuda(cudaFree(deviceImg0));
checkCuda(cudaFree(deviceImg1));
checkCuda(cudaFree(deviceTrace));
checkCuda(cudaFree(deviceDiff));
checkCuda(cudaFree(deviceFrom));
delete[] hostTrace[0];
delete[] hostTrace;
delete[] hostDiff;
delete[] hostFrom;
}
#endif //SEAM_CARVING_CUDA_SC_CUDA_CU |
1a7a084f718f064adc7e44c6efc46dc4e975f6f5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
Hologram generating algorithms for CUDA Devices
Copyright 2009, 2010, 2011, 2012 Martin Persson
[email protected]
Small edits by Lloyd Russell 2016
This file is part of GenerateHologramCUDA.
GenerateHologramCUDA is free software: you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public License as published
by the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
GenerateHologramCUDA is distributed in the hope that it will be
useful, but WITHOUT ANY WARRANTY; without even the implied warranty
of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with GenerateHologramCUDA. If not, see <http://www.gnu.org/licenses/>.
The function "GenerateHologram" contains two different algorithms for
hologram generation. The last parameter in the function call selects which
one to use:
0: Complex addition of "Lenses and Prisms", no optimization (3D)
1: Weighted Gerchberg-Saxton algorithm using Fresnel propagation (3D)
2: Weighted Gerchberg-Saxton algorithm using Fast Fourier Transforms (2D)
(0) produces optimal holograms for 1 or 2 traps and is significantly faster.
(0) is automatically selected if the number of spots is < 3.
Fresnel propagation based algorithm (1) described in:
Roberto Di Leonardo, Francesca Ianni, and Giancarlo Ruocco
"Computer generation of optimal holograms for optical trap arrays"
Opt. Express 15, 1913-1922 (2007)
The original algorithm has been modified to allow variable spot amplitudes
Naming convention for variables:
The prefix indicates where data is located
In host functions: h = host memory
d = device memory
c = constant memory
In global functions: g = global memory
s = shared memory
c = constant memory
no prefix = registers
The suffix indicates the data type, no suffix usually indicates an integer
Possible improvements:
* Improve convergence of the GS algorithms for 2 spots. *done
* Compensate spot intensities for distance from center of field. *done
* Put all arguments for device functions and trap positions in constant memory. *done
(Requires all functions to be moved into the same file or the use of some
workaround found on nVidia forum)
* Put pSLMstart and aLaser in texture memory (may not improve performance on Fermi devices)
* Use "zero-copy" to transfer pSLM to host.
* Rename functions and variables for consistency and readability
* Allow variable spot phases for Lenses and Prisms
*/
//#define M_CUDA_DEBUG //activates a number of custom debug macros//
float dt_milliseconds;
hipEvent_t start, stop;
//Includes
#include <stdlib.h>
#include <stdio.h>
#include "stdint.h"
#include <string.h>
#include <math.h>
#include <hipfft.h>
#ifndef M_PI
#define M_PI 3.14159265358979323846f
#endif
#define MAX_SPOTS 512 //decrease this if your GPU keeps running out of memory, was 1024
#define BLOCK_SIZE 2048 //should be a power of 2, was 512
#define SLM_SIZE 2048
#if ((SLM_SIZE==16)||(SLM_SIZE==32)||(SLM_SIZE==64)||(SLM_SIZE==128)||(SLM_SIZE==256)||(SLM_SIZE==512)||(SLM_SIZE==1024)||(SLM_SIZE==2048))
#define SLMPOW2 //Uses bitwize modulu operations if teh SLM size is a power of 2
#endif
// forward declarations
__global__ void ApplyCorrections(uint16_t *g_pSLM_uc, uint16_t *g_LUT, float *d_AberrationCorr_f, float *d_LUTPolCoeff_f);
__global__ void LensesAndPrisms(uint16_t *g_SLMuc, uint16_t *g_LUT, float *d_AberrationCorr_f, float *d_LUTPolCoeff_f);
__global__ void calculateIobtained(uint16_t *g_pSLM_uc, float *g_Iobtained);
__global__ void PropagateToSLM_Fresnel(float *g_spotRe_f, float *g_spotIm_f, float *g_pSLM2pi, float *g_weights, int iteration, float *g_pSLMstart, float *g_amps,
bool getpSLM65535, uint16_t *g_pSLM65535_uc, uint16_t *g_LUT, float *g_AberrationCorr_f, float *g_LUTPolCoeff_f);
__global__ void PropagateToSLMDC_Fresnel(float *g_pSpot, float *g_wSpot, hipfftComplex *g_cSLM_cc, float *g_pSLM_f, int iteration, float *g_pSLMstart, bool getpSLM65535,
uint16_t *g_pSLM65535_uc);
__global__ void setActiveRegionToZero(hipfftComplex *g_Farfield);
__global__ void PropagateToSpotPositions_Fresnel(float *g_pSLM2pi, float *g_spotRe_f, float *g_spotIm_f);
__global__ void PropagateToSpotPositionsDC_Fresnel(float *g_pSLM_f, float *g_obtainedPhase, float *g_weights, float *g_Iobtained, int iteration);
__global__ void ReplaceAmpsSLM_FFT(float *g_aLaser, hipfftComplex *g_cAmp, float *g_pSLMstart, bool getpSLM65535, uint16_t *g_pSLM65535_uc, uint16_t *g_LUT, float *g_AberrationCorr_f, float *g_LUTPolCoeff_f);
__global__ void ReplaceAmpsSpots_FFT(hipfftComplex *g_cSpotAmp_cc, hipfftComplex *g_cSpotAmpNew_cc, int iteration, float *g_Iobtained, float *g_weight, bool last_iteration);
__global__ void ReplaceAmpsSpotsDC_FFT(hipfftComplex *g_cSpotAmp_cc, hipfftComplex *g_cSpotAmpNew_cc, int iteration, float *g_Iobtained, float *g_weight, bool last_iteration);
__global__ void XYtoIndex();
__global__ void f2uc(uint16_t *uc, float *f, int N_pixels, uint16_t *g_LUT, int use_linLUT, int data_w);
__global__ void uc2f(float *f, uint16_t *uc, int N);
__global__ void p2c(hipfftComplex *g_c, float *g_p, int M);
inline int computeAndCopySpotData(float *h_I, float *x, float *y, float *z, int N_spots, int method);
// Custom debug macros
#define M_CHECK_ERROR() mCheckError(__LINE__, __FILE__)
#define M_SAFE_CALL(errcode) mSafeCall(errcode, __LINE__, __FILE__)
#define M_CUFFT_SAFE_CALL(cuffterror) mCufftSafeCall(cuffterror, __LINE__, __FILE__)
#define M_DISPLAY_DATA_F(data, length) mDisplayDataF(data, length, __LINE__)
#define M_DISPLAY_DATA_UC(data, length) mDisplayDataUC(data, length, __LINE__)
#define M_DISPLAY_DATA_CC(data, length) mDisplayDataCC(data, length, __LINE__)
#define M_DISPLAY_DATA_I(data, length) mDisplayDataI(data, length, __LINE__)
inline void mSafeCall(hipError_t status, int line, const char *file);
inline void mCufftSafeCall(hipfftResult_t status, int line, const char *file);
inline void mCheckError(int line, const char *file);
inline void mDisplayDataF(float *d_data, int length, int line);
inline void mDisplayDataCC(hipfftComplex *d_data, int length, int line);
inline void mDisplayDataUC(uint16_t *d_data, int length, int line);
inline void mDisplayDataI(int *d_data, int length, int line);
//Global declaration
float *d_x, *d_y, *d_z, *d_I; //trap coordinates and intensity in GPU memory
float *d_pSLM_f; //the optimized pSpot pattern, float [-pi, pi]
float *d_weights, *d_Iobtained, *d_desiredAmp; //used h_weights and calculated amplitudes for each spot and each iteration
float *d_pSLMstart_f; //Initial pSpot pattern [-pi, pi]
float *d_spotRe_f, *d_spotIm_f;
float *d_AberrationCorr_f = NULL;
float *d_LUTPolCoeff_f = NULL;
float SLMsizef = (float)SLM_SIZE;
int N_PolLUTCoeff = 0;
int n_blocks_Phi, memsize_SLM_f, memsize_SLMuc, memsize_spotsf, data_w, N_pixels, N_iterations_last;
float h_desiredAmp[MAX_SPOTS];
int h_spotIndex[MAX_SPOTS];
uint16_t *d_pSLM_uc; //The optimized pSpot pattern, uint16_t, the one sent to the SLM [0, 65535]
uint16_t *h_LUT_uc;
uint16_t *d_LUT_uc = NULL;
int maxThreads_device;
bool ApplyLUT_b = false, EnableSLM_b = false, UseAberrationCorr_b = false, UsePolLUT_b = false, saveI_b = false, useRPC_b = false, useDC_b = false;
float alphaRPC_f = 10;
char CUDAmessage[100];
hipError_t status;
float *d_aLaserFFT, *d_LUT_coeff;
hipfftHandle plan;
hipfftComplex *d_FFTo_cc, *d_FFTd_cc, *d_SLM_cc;
int *d_spot_index, memsize_SLMcc;
int borderWidthDC_i;
float *d_obtainedPhase;
//Constant memory declarations
__device__ __constant__ int c_data_w[1];
__device__ __constant__ float c_data_w_f[1];
__device__ __constant__ int c_half_w[1];
__device__ __constant__ float c_half_w_f[1];
__device__ __constant__ int c_N_pixels[1];
__device__ __constant__ float c_N_pixels_f[1];
__device__ __constant__ float c_SLMpitch_f[1];
__device__ __constant__ bool c_useDC_b[1];
__device__ __constant__ int c_DCborderWidth[1];
__device__ __constant__ bool c_useRPC_b[1];
__device__ __constant__ float c_alphaRPC_f[1];
__device__ __constant__ bool c_saveI_b[1];
__device__ __constant__ int c_log2data_w[1];
__device__ __constant__ float c_x[MAX_SPOTS];
__device__ __constant__ float c_y[MAX_SPOTS];
__device__ __constant__ float c_z[MAX_SPOTS];
__device__ __constant__ float c_desiredAmp[MAX_SPOTS];
__device__ __constant__ int c_spotIndex[MAX_SPOTS];
__device__ __constant__ int c_N_spots[1];
//Public dll functions
//Generate a hologram
extern "C" __declspec(dllexport) int GenerateHologram(float *h_checkData, uint16_t *h_pSLM_uc, float *x_spots, float *y_spots, float *z_spots, float *I_spots, int N_spots, int N_iterations, float *h_Iobtained, int method)//, float* gpuTime)
{
//*gpuTime = 0;
//float deltaTime = 0;
if (N_spots > MAX_SPOTS)
N_spots = MAX_SPOTS;
else if (N_spots < 1)
method = 100;
else if (N_spots < 3)
method = 0;
memsize_spotsf = N_spots*sizeof(float);
method = computeAndCopySpotData(I_spots, x_spots, y_spots, z_spots, N_spots, method); //sets method to -1 if N_spots == 0.
switch (method) {
case 0:
//////
//Generate the hologram using "Lenses and Prisms"
//////
hipLaunchKernelGGL(( LensesAndPrisms), dim3(n_blocks_Phi), dim3(BLOCK_SIZE) , 0, 0, d_pSLM_uc, d_LUT_uc, d_AberrationCorr_f, d_LUTPolCoeff_f);
M_CHECK_ERROR();
hipDeviceSynchronize();
M_CHECK_ERROR();
if (saveI_b)
{
hipLaunchKernelGGL(( calculateIobtained), dim3(N_spots), dim3(SLM_SIZE), 0, 0, d_pSLM_uc, d_Iobtained);
M_CHECK_ERROR();
hipDeviceSynchronize();
M_SAFE_CALL(hipMemcpy(h_Iobtained, d_Iobtained, N_spots*sizeof(float), hipMemcpyDeviceToHost));
}
M_SAFE_CALL(hipMemcpy(h_pSLM_uc, d_pSLM_uc, memsize_SLMuc, hipMemcpyDeviceToHost));
break;
case 1:
//Generate holgram using fresnel propagation
//Uncomment this to start with pre-calculated hologram:
//hipMemcpy(d_pSLM_uc, h_pSLM_uc, memsize_SLMuc, hipMemcpyHostToDevice);
//hipDeviceSynchronize();
//uc2f<<< n_blocks_Phi, BLOCK_SIZE >>>(d_pSLM_f, d_pSLM_uc, N_pixels);
/*hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
hipEventSynchronize(start);*/
for (int l=0; l<N_iterations; l++)
{
//Propagate to the spot positions
if (useDC_b)
{
M_CUFFT_SAFE_CALL(hipfftExecC2C(plan, d_SLM_cc, d_FFTo_cc, HIPFFT_FORWARD));
M_CHECK_ERROR();
hipLaunchKernelGGL(( PropagateToSpotPositionsDC_Fresnel), dim3(N_spots), dim3(SLM_SIZE), 0, 0, d_pSLM_f, d_obtainedPhase, d_weights, d_Iobtained, l); //this function is very slow
M_CHECK_ERROR();
hipLaunchKernelGGL(( setActiveRegionToZero), dim3(SLM_SIZE), dim3(SLM_SIZE) , 0, 0, d_FFTo_cc);
}
else
hipLaunchKernelGGL(( PropagateToSpotPositions_Fresnel), dim3(N_spots), dim3(SLM_SIZE), 0, 0, d_pSLM_f, d_spotRe_f, d_spotIm_f);
M_CHECK_ERROR();
hipDeviceSynchronize();
//Propagate to the SLM plane
if (useDC_b)
{
M_CUFFT_SAFE_CALL(hipfftExecC2C(plan, d_FFTo_cc, d_SLM_cc, HIPFFT_BACKWARD));
hipDeviceSynchronize();
hipLaunchKernelGGL(( PropagateToSLMDC_Fresnel), dim3(n_blocks_Phi), dim3(BLOCK_SIZE) , 0, 0, d_obtainedPhase, d_weights, d_SLM_cc, d_pSLM_f, l, d_pSLMstart_f, (l==(N_iterations-1)), d_pSLM_uc);
}
else
{
hipLaunchKernelGGL(( PropagateToSLM_Fresnel), dim3(n_blocks_Phi), dim3(BLOCK_SIZE) , 0, 0, d_spotRe_f, d_spotIm_f, d_pSLM_f, d_weights, l, d_pSLMstart_f, d_Iobtained, (l==(N_iterations-1)), d_pSLM_uc, d_LUT_uc, d_AberrationCorr_f, d_LUTPolCoeff_f);
}
M_CHECK_ERROR();
hipDeviceSynchronize();
}
/*hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&deltaTime, start, stop);
*gpuTime = deltaTime; */
if (saveI_b)
M_SAFE_CALL(hipMemcpy(h_Iobtained, d_Iobtained, N_spots*(N_iterations)*sizeof(float), hipMemcpyDeviceToHost));
else
M_SAFE_CALL(hipMemcpy(h_Iobtained, d_weights, N_spots*(N_iterations)*sizeof(float), hipMemcpyDeviceToHost));
M_SAFE_CALL(hipMemcpy(h_pSLM_uc, d_pSLM_uc, memsize_SLMuc, hipMemcpyDeviceToHost));
break;
case 2:
//generate hologram using fast fourier transforms
//Uncomment this to start with pre-calculated hologram:
//hipMemcpy(d_pSLM_uc, h_pSLM_uc, memsize_SLMuc, hipMemcpyHostToDevice);
//hipDeviceSynchronize();
//p_uc2c_cc_shift<<< n_blocks_Phi, BLOCK_SIZE >>>(d_SLM_cc, d_pSLM_uc, N_pixels, data_w);
//M_DISPLAY_DATA_CC(d_SLM_cc, 100);
M_SAFE_CALL(hipMemcpy(d_desiredAmp, h_desiredAmp, memsize_spotsf, hipMemcpyHostToDevice));
M_SAFE_CALL(hipMemset(d_FFTd_cc, 0, memsize_SLMcc));
M_CHECK_ERROR();
hipDeviceSynchronize();
for (int l=0; l<N_iterations; l++)
{
// Transform to trapping plane
M_CUFFT_SAFE_CALL(hipfftExecC2C(plan, d_SLM_cc, d_FFTo_cc, HIPFFT_FORWARD));
hipDeviceSynchronize();
// Copy phases for spot indices in d_FFTo_cc to d_FFTd_cc
if (useDC_b)
hipLaunchKernelGGL(( ReplaceAmpsSpotsDC_FFT) , dim3(n_blocks_Phi), dim3(BLOCK_SIZE) , 0, 0, d_FFTo_cc, d_FFTd_cc, l, d_Iobtained, d_weights, (l==(N_iterations-1)));
else
hipLaunchKernelGGL(( ReplaceAmpsSpots_FFT) , dim3(1), dim3(N_spots) , 0, 0, d_FFTo_cc, d_FFTd_cc, l, d_Iobtained, d_weights, (l==(N_iterations-1)));
M_CHECK_ERROR();
hipDeviceSynchronize();
//Transform back to SLM plane
M_CUFFT_SAFE_CALL(hipfftExecC2C(plan, d_FFTd_cc, d_SLM_cc, HIPFFT_BACKWARD));
hipDeviceSynchronize();
//M_DISPLAY_DATA_CC(d_SLM_cc, 100);
// Set amplitudes in d_SLM to the laser amplitude profile
hipLaunchKernelGGL(( ReplaceAmpsSLM_FFT) , dim3(n_blocks_Phi), dim3(BLOCK_SIZE) , 0, 0, d_aLaserFFT, d_SLM_cc, d_pSLMstart_f, (l==(N_iterations-1)), d_pSLM_uc, d_LUT_uc, d_AberrationCorr_f, d_LUTPolCoeff_f);
M_CHECK_ERROR();
//M_DISPLAY_DATA_CC(d_SLM_cc, 100);
hipDeviceSynchronize();
}
if (saveI_b)
M_SAFE_CALL(hipMemcpy(h_Iobtained, d_Iobtained, N_spots*(N_iterations)*sizeof(float), hipMemcpyDeviceToHost));
else
M_SAFE_CALL(hipMemcpy(h_Iobtained, d_weights, N_spots*(N_iterations)*sizeof(float), hipMemcpyDeviceToHost));
M_SAFE_CALL(hipMemcpy(h_pSLM_uc, d_pSLM_uc, memsize_SLMuc, hipMemcpyDeviceToHost));
break;
default:
break;
}
//Handle CUDA errors
status = hipGetLastError();
return status;
}
//Allocate GPU memory
extern "C" __declspec(dllexport) int startCUDA(float *h_pSLMstart, int deviceId)
{
//Make sure GPU with desired deviceId exists, set deviceId to 0 if not
int deviceCount=0;
if (hipGetDeviceCount(&deviceCount)!=0)
if (deviceId>=deviceCount)
{
deviceId=0;
}
M_SAFE_CALL(hipSetDevice(deviceId));
hipDeviceProp_t deviceProp;
M_SAFE_CALL(hipGetDeviceProperties(&deviceProp, deviceId));
maxThreads_device = deviceProp.maxThreadsPerBlock;
borderWidthDC_i = 0;
int MaxIterations = 1000;
data_w = SLM_SIZE;
hipMemcpyToSymbol(c_data_w, &data_w, sizeof(int), 0, hipMemcpyHostToDevice);
float data_w_f = (float)data_w;
hipMemcpyToSymbol(c_data_w_f, &data_w_f, sizeof(float), 0, hipMemcpyHostToDevice);
int half_w = (int)(data_w/2);
hipMemcpyToSymbol(c_half_w, &half_w, sizeof(int), 0, hipMemcpyHostToDevice);
float half_w_f = (float)data_w/2.0f;
hipMemcpyToSymbol(c_half_w_f, &half_w_f, sizeof(float), 0, hipMemcpyHostToDevice);
N_pixels = data_w * data_w;
hipMemcpyToSymbol(c_N_pixels, &N_pixels, sizeof(int), 0, hipMemcpyHostToDevice);
float N_pixels_f = (float)N_pixels;
hipMemcpyToSymbol(c_N_pixels_f, &N_pixels_f, sizeof(float), 0, hipMemcpyHostToDevice);
int logN = (int)(log2(data_w_f));
hipMemcpyToSymbol(c_log2data_w, &logN, sizeof(int), 0, hipMemcpyHostToDevice);
hipMemcpyToSymbol(c_useRPC_b, &useRPC_b, sizeof(bool), 0, hipMemcpyHostToDevice);
float SLMpitch_f = 1.0f/data_w_f;
hipMemcpyToSymbol(c_SLMpitch_f, &SLMpitch_f, sizeof(float), 0, hipMemcpyHostToDevice);
N_iterations_last = 10;
memsize_spotsf = MAX_SPOTS * sizeof(float);
memsize_SLM_f = N_pixels * sizeof(float);
memsize_SLMuc = N_pixels * sizeof(uint16_t);
memsize_SLMcc = N_pixels * sizeof(hipfftComplex);
n_blocks_Phi = (N_pixels/BLOCK_SIZE + (N_pixels%BLOCK_SIZE == 0 ? 0:1));
//memory allocations for all methods
M_SAFE_CALL(hipMalloc((void**)&d_x, memsize_spotsf ));
M_SAFE_CALL(hipMalloc((void**)&d_y, memsize_spotsf ));
M_SAFE_CALL(hipMalloc((void**)&d_z, memsize_spotsf ));
M_SAFE_CALL(hipMalloc((void**)&d_I, memsize_spotsf ));
M_SAFE_CALL(hipMalloc((void**)&d_desiredAmp, memsize_spotsf ));
M_SAFE_CALL(hipMalloc((void**)&d_weights, MAX_SPOTS*(MaxIterations+1)*sizeof(float)));
M_SAFE_CALL(hipMalloc((void**)&d_Iobtained, MAX_SPOTS*MaxIterations*sizeof(float)));
M_SAFE_CALL(hipMalloc((void**)&d_obtainedPhase, memsize_spotsf ));
M_SAFE_CALL(hipMalloc((void**)&d_spotRe_f, memsize_spotsf ));
M_SAFE_CALL(hipMalloc((void**)&d_spotIm_f, memsize_spotsf ));
int data_w_pow2 = pow(2, ceil(log((float)data_w)/log(2.0f)));
M_SAFE_CALL(hipMalloc((void**)&d_pSLM_f, data_w_pow2*data_w_pow2*sizeof(float)));//the size of d_pSLM_f must be a power of 2 for the summation algorithm to work
M_SAFE_CALL(hipMemset(d_pSLM_f, 0, data_w_pow2*data_w_pow2*sizeof(float)));
M_SAFE_CALL(hipMalloc((void**)&d_pSLMstart_f, memsize_SLM_f));
M_SAFE_CALL(hipMalloc((void**)&d_pSLM_uc, memsize_SLMuc));
M_SAFE_CALL(hipMemset(d_pSLMstart_f, 0, N_pixels*sizeof(float)));
M_SAFE_CALL(hipMemcpy(d_pSLM_f, h_pSLMstart, N_pixels*sizeof(float), hipMemcpyHostToDevice));
//memory allocations etc. for all FFT based Gerchberg-Saxton
M_SAFE_CALL(hipMalloc((void**)&d_spot_index, MAX_SPOTS * sizeof(int)));
M_SAFE_CALL(hipMalloc((void**)&d_FFTd_cc, memsize_SLMcc));
M_SAFE_CALL(hipMalloc((void**)&d_FFTo_cc, memsize_SLMcc));
M_SAFE_CALL(hipMalloc((void**)&d_SLM_cc, memsize_SLMcc));
M_SAFE_CALL(hipDeviceSynchronize());
hipLaunchKernelGGL(( p2c) , dim3(n_blocks_Phi), dim3(BLOCK_SIZE) , 0, 0, d_SLM_cc, d_pSLM_f, N_pixels);
M_CHECK_ERROR();
hipDeviceSynchronize();
M_CUFFT_SAFE_CALL(hipfftPlan2d(&plan, data_w, data_w, HIPFFT_C2C));
float *h_aLaserFFT = (float *)malloc(memsize_SLM_f);
status = hipGetLastError();
return status;
}
//Free GPU memory and shut down SLM
extern "C" __declspec(dllexport) int stopCUDA()
{
M_SAFE_CALL(hipFree(d_x));
M_SAFE_CALL(hipFree(d_y));
M_SAFE_CALL(hipFree(d_z));
M_SAFE_CALL(hipFree(d_I));
M_SAFE_CALL(hipFree(d_weights));
M_SAFE_CALL(hipFree(d_Iobtained));
M_SAFE_CALL(hipFree(d_pSLM_f));
M_SAFE_CALL(hipFree(d_pSLMstart_f));
M_SAFE_CALL(hipFree(d_pSLM_uc));
M_SAFE_CALL(hipFree(d_FFTd_cc));
M_SAFE_CALL(hipFree(d_FFTo_cc));
M_SAFE_CALL(hipFree(d_SLM_cc));
M_CUFFT_SAFE_CALL(hipfftDestroy(plan));
hipDeviceReset();
status = hipGetLastError();
return status;
}
//Device functions
__device__ float uc2phase(float uc)
{
return (float)uc*2.0f*M_PI/65536.0f - M_PI;
}
__device__ uint16_t phase2uc(float phase2pi)
{
return (uint16_t)floor((phase2pi + M_PI)*65536.0f / (2.0f * M_PI));
}
__device__ int phase2int32(float phase2pi)
{
return (int)floor((phase2pi + M_PI)*65536.0f / (2.0f * M_PI));
}
__device__ float ApplyAberrationCorrection(float pSpot, float correction)
{
pSpot = pSpot - correction; //apply correction
return (pSpot - (2.0f*M_PI) * floor((pSpot+M_PI) / (2.0f*M_PI))); //apply mod([-pi, pi], pSpot)
}
__device__ int getXint(int index)
{
#ifdef SLMPOW2
int X_int = index&(c_data_w[0]-1);
#else
float X_int= index%c_data_w[0];
#endif
return X_int;
}
__device__ int getYint(int index, int X_int)
{
#ifdef SLMPOW2
int Y_int = (index-X_int)>>c_log2data_w[0];
#else
int Y_int = (float)(floor((float)index/c_data_w_f[0]));
#endif
return Y_int;
}
__device__ int fftshift(int idx, int X, int Y)
{
if (X < c_half_w[0])
{
if (Y < c_half_w[0])
{
return idx + (c_data_w[0] * c_half_w[0]) + c_half_w[0];
}
else
{
return idx - (c_data_w[0] * c_half_w[0]) + c_half_w[0];
}
}
else
{
if (Y < c_half_w[0])
{
return idx + (c_data_w[0] * c_half_w[0]) - c_half_w[0];
}
else
{
return idx - (c_data_w[0] * c_half_w[0]) - c_half_w[0];
}
}
}
__device__ void warpReduceC(volatile float *s_Vre, volatile float *s_Vim, int tid)
{
s_Vre[tid] += s_Vre[tid + 32];
s_Vim[tid] += s_Vim[tid + 32];
s_Vre[tid] += s_Vre[tid + 16];
s_Vim[tid] += s_Vim[tid + 16];
s_Vre[tid] += s_Vre[tid + 8];
s_Vim[tid] += s_Vim[tid + 8];
s_Vre[tid] += s_Vre[tid + 4];
s_Vim[tid] += s_Vim[tid + 4];
s_Vre[tid] += s_Vre[tid + 2];
s_Vim[tid] += s_Vim[tid + 2];
s_Vre[tid] += s_Vre[tid + 1];
s_Vim[tid] += s_Vim[tid + 1];
}
inline int computeAndCopySpotData(float *h_I, float *x, float *y, float *z, int N_spots, int method)
{
//float Isum = 0.0f;
//for (int i = 0; i<N_spots; i++)
// Isum += h_I[i];
for (int j = 0; j<N_spots; j++)
{
float sincx_rec = (x[j]==0)? 1.0f:((M_PI*x[j]/SLMsizef)/sinf(M_PI*x[j]/SLMsizef));
float sincy_rec = (y[j]==0)? 1.0f:((M_PI*y[j]/SLMsizef)/sinf(M_PI*y[j]/SLMsizef));
h_desiredAmp[j] = (h_I[j] <= 0.0f) ? 1.0f:(sincx_rec * sincy_rec * sqrtf(h_I[j]/100)*SLMsizef*SLMsizef);
if (method == 2)
h_spotIndex[j] = ((int)(x[j])&(data_w-1)) + ((int)(y[j])&(data_w-1))* data_w;
}
hipMemcpyToSymbol(c_x, x, N_spots*sizeof(float), 0, hipMemcpyHostToDevice);
hipMemcpyToSymbol(c_y, y, N_spots*sizeof(float), 0, hipMemcpyHostToDevice);
hipMemcpyToSymbol(c_z, z, N_spots*sizeof(float), 0, hipMemcpyHostToDevice);
hipMemcpyToSymbol(c_desiredAmp, h_desiredAmp, N_spots*sizeof(float), 0, hipMemcpyHostToDevice);
hipMemcpyToSymbol(c_N_spots, &N_spots, sizeof(int), 0, hipMemcpyHostToDevice);
if (method == 2)
hipMemcpyToSymbol(c_spotIndex, h_spotIndex, N_spots*sizeof(int), 0, hipMemcpyHostToDevice);
if (N_spots == 0)
method = -1;
return method;
}
//Apply corrections to precalculated hologram
__global__ void ApplyCorrections(uint16_t *g_pSLM_uc, uint16_t *g_LUT, float *g_AberrationCorr_f, float *g_LUTPolCoeff_f)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
float pSLM2pi_f = uc2phase(g_pSLM_uc[idx]);
g_pSLM_uc[idx] = phase2uc(pSLM2pi_f);
}
//Calculate hologram using "Lenses and Prisms"
__global__ void LensesAndPrisms(uint16_t *g_SLMuc, uint16_t *g_LUT, float *d_AberrationCorr_f, float *d_LUTPolCoeff_f)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < c_N_pixels[0])
{
//get pixel coordinates
int X_int = getXint(idx);
int Y_int = getYint(idx, X_int);
float X = c_SLMpitch_f[0]*(X_int - c_half_w_f[0]);
float Y = c_SLMpitch_f[0]*(Y_int - c_half_w_f[0]);
float phase2pi;
float SLMre = 0.0f;
float SLMim = 0.0f;
for (int ii=0; ii<c_N_spots[0]; ++ii)
{
//add variable phases to function call
phase2pi = M_PI * c_z[ii] * (X*X + Y*Y) + 2.0f * M_PI * (X * (c_x[ii]) + Y * (c_y[ii]) );
SLMre = SLMre + c_desiredAmp[ii] * cosf(phase2pi);
SLMim = SLMim + c_desiredAmp[ii] * sinf(phase2pi);
}
phase2pi = atan2f(SLMim, SLMre); // [-pi,pi]
g_SLMuc[idx] = phase2uc(phase2pi);
}
}
__global__ void calculateIobtained(uint16_t *g_pSLM_uc, float *g_Iobtained)
{
int blockSize = c_data_w[0];
int spot_number = blockIdx.x;
int tid = threadIdx.x;
int i = tid;
__shared__ float s_Vre[SLM_SIZE];
__shared__ float s_Vim[SLM_SIZE];
s_Vre[tid] = 0.0f;
s_Vim[tid] = 0.0f;
float X = c_SLMpitch_f[0] * ((float)tid - c_half_w_f[0]);
float Y = - c_SLMpitch_f[0] * c_half_w_f[0];
float pSLM_1;
float p;
while (i < c_N_pixels[0])
{
pSLM_1 = 2.0f*M_PI*(float)g_pSLM_uc[i]/65535.0f - M_PI;
p = pSLM_1 - M_PI * (c_z[spot_number] * (X*X + Y*Y) + 2 * (X * c_x[spot_number] + Y * c_y[spot_number]));
s_Vre[tid] += cosf(p);
s_Vim[tid] += sinf(p);
i += blockSize;
Y += c_SLMpitch_f[0];
}
/*__syncthreads();
if (tid < 512)
{
s_Vre[tid] += s_Vre[tid + 512];
s_Vim[tid] += s_Vim[tid + 512];
} */
__syncthreads();
if ((tid < 256)&&(SLM_SIZE>256))
{
s_Vre[tid] += s_Vre[tid + 256];
s_Vim[tid] += s_Vim[tid + 256];
}
__syncthreads();
if (tid < 128)
{
s_Vre[tid] += s_Vre[tid + 128];
s_Vim[tid] += s_Vim[tid + 128];
}
__syncthreads();
if (tid < 64)
{
s_Vre[tid] += s_Vre[tid + 64];
s_Vim[tid] += s_Vim[tid + 64];
}
__syncthreads();
if (tid < 32)
warpReduceC(s_Vre, s_Vim, tid);
if (tid == 0)
{
float spotRe_f = s_Vre[0] / c_N_pixels_f[0]; //512!
float spotIm_f = s_Vim[0] / c_N_pixels_f[0];
float amp = hypotf(spotRe_f, spotIm_f);
g_Iobtained[spot_number] = amp*amp;
}
}
__global__ void calculateIandPhase(uint16_t *g_pSLM_uc, float *g_Iobtained, float *g_Pobtained)
{
int blockSize = c_data_w[0];
int spot_number = blockIdx.x;
int tid = threadIdx.x;
int i = tid;
__shared__ float s_Vre[SLM_SIZE];
__shared__ float s_Vim[SLM_SIZE];
s_Vre[tid] = 0.0f;
s_Vim[tid] = 0.0f;
float X = c_SLMpitch_f[0] * ((float)tid - c_half_w_f[0]);
float Y = - c_SLMpitch_f[0] * c_half_w_f[0];
float pSLM_1;
float p;
while (i < c_N_pixels[0])
{
pSLM_1 = 2.0f*M_PI*(float)g_pSLM_uc[i]/65535.0f - M_PI;
p = pSLM_1 - M_PI * (c_z[spot_number] * (X*X + Y*Y) + 2 * (X * c_x[spot_number] + Y * c_y[spot_number]));
s_Vre[tid] += cosf(p+2*M_PI*c_z[spot_number]);
s_Vim[tid] += sinf(p+2*M_PI*c_z[spot_number]);
i += blockSize;
Y += c_SLMpitch_f[0];
}
/*__syncthreads();
if (tid < 512)
{
s_Vre[tid] += s_Vre[tid + 512];
s_Vim[tid] += s_Vim[tid + 512];
} */
__syncthreads();
if ((tid < 256)&&(SLM_SIZE>256))
{
s_Vre[tid] += s_Vre[tid + 256];
s_Vim[tid] += s_Vim[tid + 256];
}
__syncthreads();
if (tid < 128)
{
s_Vre[tid] += s_Vre[tid + 128];
s_Vim[tid] += s_Vim[tid + 128];
}
__syncthreads();
if (tid < 64)
{
s_Vre[tid] += s_Vre[tid + 64];
s_Vim[tid] += s_Vim[tid + 64];
}
__syncthreads();
if (tid < 32)
warpReduceC(s_Vre, s_Vim, tid);
if (tid == 0)
{
float spotRe_f = s_Vre[0] / c_N_pixels_f[0]; //512!
float spotIm_f = s_Vim[0] / c_N_pixels_f[0];
float amp = hypotf(spotRe_f, spotIm_f);
g_Pobtained[spot_number] = atan2f(spotIm_f , spotRe_f);
g_Iobtained[spot_number] = amp*amp;
}
}
//Functions for GS with Fresnel propagation
//Propagate from the SLM to the spot positions using Fresnel summation
//works only for blocksize = SLMsize
__global__ void PropagateToSpotPositions_Fresnel(float *g_pSLM2pi, float *g_spotRe_f, float *g_spotIm_f)
{
int spot_number = blockIdx.x;
int tid = threadIdx.x;
int i = tid;
__shared__ float s_Vre[SLM_SIZE];
__shared__ float s_Vim[SLM_SIZE];
s_Vre[tid] = 0.0f;
s_Vim[tid] = 0.0f;
int blockSize = blockDim.x;
float X = c_SLMpitch_f[0] * ((float)tid - c_half_w_f[0]);
float Y = - c_SLMpitch_f[0] * c_half_w_f[0];
float p;
while (i < c_N_pixels[0])
{
p = g_pSLM2pi[i] - M_PI * (c_z[spot_number] * (X*X + Y*Y) + 2.0f * (X * c_x[spot_number] + Y * c_y[spot_number]));
s_Vre[tid] += cosf(p);
s_Vim[tid] += sinf(p);
i += blockSize;
Y += c_SLMpitch_f[0];
}
/*__syncthreads();
if (tid < 512)
{
s_Vre[tid] += s_Vre[tid + 512];
s_Vim[tid] += s_Vim[tid + 512];
} */
__syncthreads();
if ((tid < 256)&&(SLM_SIZE>256))
{
s_Vre[tid] += s_Vre[tid + 256];
s_Vim[tid] += s_Vim[tid + 256];
}
__syncthreads();
if (tid < 128)
{
s_Vre[tid] += s_Vre[tid + 128];
s_Vim[tid] += s_Vim[tid + 128];
}
__syncthreads();
if (tid < 64)
{
s_Vre[tid] += s_Vre[tid + 64];
s_Vim[tid] += s_Vim[tid + 64];
}
__syncthreads();
if (tid < 32)
warpReduceC(s_Vre, s_Vim, tid);
if (tid == 0)
{
g_spotRe_f[spot_number] = s_Vre[0];// / c_N_pixels_f[0];
g_spotIm_f[spot_number] = s_Vim[0];// / c_N_pixels_f[0];
}
}
//Propagate from the SLM to the spot positions using Fresnel summation
//works only for blocksize = SLMsize
__global__ void PropagateToSpotPositionsDC_Fresnel(float *g_pSLM_f, float *g_obtainedPhase, float *g_weights, float *obtainedI, int iteration)
{
int spot_number = blockIdx.x;
int tid = threadIdx.x;
int i = tid;
__shared__ float s_Vre[SLM_SIZE];
__shared__ float s_Vim[SLM_SIZE];
float X, Y;
float p;
s_Vre[tid] = 0.0f;
s_Vim[tid] = 0.0f;
int X_int = getXint(i);
X = c_SLMpitch_f[0]*(X_int - c_half_w_f[0]);
Y = -0.5f;
while (i < c_N_pixels[0])
{
p = g_pSLM_f[i] - M_PI * (c_z[spot_number] * (X*X + Y*Y) + 2.0f * (X * c_x[spot_number] + Y * c_y[spot_number]));
s_Vre[tid] += cosf(p);
s_Vim[tid] += sinf(p);
Y += c_SLMpitch_f[0];
i += SLM_SIZE;
}
__syncthreads();
if ((tid < 256)&&(SLM_SIZE>256))
{
s_Vre[tid] += s_Vre[tid + 256];
s_Vim[tid] += s_Vim[tid + 256];
}
__syncthreads();
if (tid < 128)
{
s_Vre[tid] += s_Vre[tid + 128];
s_Vim[tid] += s_Vim[tid + 128];
}
__syncthreads();
if (tid < 64)
{
s_Vre[tid] += s_Vre[tid + 64];
s_Vim[tid] += s_Vim[tid + 64];
}
__syncthreads();
if (tid < 32)
warpReduceC(s_Vre, s_Vim, tid);
if (tid == 0)
{
g_obtainedPhase[spot_number] = atan2f(s_Vim[0], s_Vre[0]);
float obtainedAmp = hypotf(s_Vre[0], s_Vim[0]);
float desiredAmp = c_desiredAmp[spot_number];
if (iteration != 0)
{
g_weights[spot_number + c_N_spots[0]*iteration] = g_weights[spot_number + c_N_spots[0]*(iteration-1)] * (desiredAmp / obtainedAmp);
}
else
{
//obtainedAmp = (obtainedAmp<0.5f) ? 0.5f : obtainedAmp;
g_weights[spot_number] = desiredAmp/c_N_pixels_f[0];
}
if (c_saveI_b[0])
obtainedI[spot_number + c_N_spots[0]*iteration] = obtainedAmp*obtainedAmp/(desiredAmp*desiredAmp);//(c_N_pixels_f[0]*c_N_pixels_f[0]);
}
}
//Obtain phases in SLM plane
__global__ void PropagateToSLM_Fresnel(float *g_spotRe_f, float *g_spotIm_f, float *g_pSLM2pi, float *g_weights, int iteration, float *g_pSLMstart, float *g_Iobtained, bool getpSLM65535, uint16_t *g_pSLM65535_uc,
uint16_t *g_LUT, float *g_AberrationCorr_f, float *g_LUTPolCoeff_f)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int tid = threadIdx.x;
__shared__ float s_aSpot[MAX_SPOTS], s_aSpotsMean, s_weight[MAX_SPOTS], s_pSpot[MAX_SPOTS];
float reSLM = 0.0f, imSLM = 0.0f, pSLM2pi_f = 0.0f;
if (idx<c_N_pixels[0])
{
if (tid<c_N_spots[0])
{
float spotRe_f = g_spotRe_f[tid];
float spotIm_f = g_spotIm_f[tid];
s_pSpot[tid] = atan2f(spotIm_f, spotRe_f);
s_aSpot[tid] = hypotf(spotRe_f, spotIm_f)/c_desiredAmp[tid];
if (iteration != 0)
s_weight[tid] = g_weights[tid + iteration*c_N_spots[0]];
else
{
s_aSpot[tid] = (s_aSpot[tid]<0.5f) ? 0.5f : s_aSpot[tid];
s_weight[tid] = c_desiredAmp[tid];
}
}
__syncthreads();
//compute weights
if (tid==0)
{
float s_aSpot_sum = 0.0f;
for (int jj=0; jj<c_N_spots[0];jj++)
{
s_aSpot_sum += s_aSpot[jj];
}
s_aSpotsMean = s_aSpot_sum / (float)c_N_spots[0];
}
__syncthreads();
if (tid<c_N_spots[0])
{
s_weight[tid] = s_weight[tid] * s_aSpotsMean / s_aSpot[tid];
if (!getpSLM65535) //Copy weights to use as initial value next run
g_weights[tid + c_N_spots[0]*(iteration+1)] = s_weight[tid];
//else
// g_weights[tid] = s_weight[tid]; //Transferring weights to next run may give diverging weights
if (c_saveI_b[0])
g_Iobtained[tid + c_N_spots[0]*iteration] = s_aSpot[tid]*s_aSpot[tid]; //may be excluded, used for monitoring only
}
__syncthreads();
//get pixel coordinates
int X_int = getXint(idx);
int Y_int = getYint(idx, X_int);
float X = c_SLMpitch_f[0]*(X_int - c_half_w_f[0]);
float Y = c_SLMpitch_f[0]*(Y_int - c_half_w_f[0]);
//compute SLM pSpot by summing contribution from all spots
for (int k=0; k<c_N_spots[0]; k++)
{
float delta = M_PI * c_z[k] * (X*X + Y*Y) + 2.0f * M_PI * (X * c_x[k] + Y * c_y[k]);
reSLM += s_weight[k] * cosf(s_pSpot[k] + delta);
imSLM += s_weight[k] * sinf(s_pSpot[k] + delta);
}
pSLM2pi_f = atan2f(imSLM, reSLM);
if (c_useRPC_b[0]) //Apply RPC (restricted Phase Change)
{
float pSLMstart = g_pSLMstart[idx];
if (fabs(pSLM2pi_f - pSLMstart) > c_alphaRPC_f[0])
pSLM2pi_f = pSLMstart;
if (getpSLM65535)
g_pSLMstart[idx] = pSLM2pi_f;
}
if (getpSLM65535) //Compute final SLM phases and write to global memory...
g_pSLM65535_uc[idx] = phase2uc(pSLM2pi_f);
g_pSLM2pi[idx] = pSLM2pi_f; //...or write intermediate pSpot to global memory
}
}
//Obtain phases in SLM plane
__global__ void PropagateToSLMDC_Fresnel(float *g_pSpot, float *g_wSpot, hipfftComplex *g_cSLM_cc, float *g_pSLM_f, int iteration, float *g_pSLMstart, bool getpSLM65535,
uint16_t *g_pSLM65535_uc)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int tid = threadIdx.x;
__shared__ float s_weight[MAX_SPOTS], s_pSpot[MAX_SPOTS];
float reSLM = 0.0f, imSLM = 0.0f, pSLM2pi_f = 0.0f;
if (idx<c_N_pixels[0])
{
if (tid<c_N_spots[0])
{
s_pSpot[tid] = g_pSpot[tid];
s_weight[tid] = g_wSpot[tid+c_N_spots[0]*iteration];
}
__syncthreads();
//get pixel coordinates
int X_int = getXint(idx);
int Y_int = getYint(idx, X_int);
int shiftedidx = fftshift(idx, X_int, Y_int);
float X = c_SLMpitch_f[0]*(X_int - c_half_w_f[0]);
float Y = c_SLMpitch_f[0]*(Y_int - c_half_w_f[0]);
//compute SLM pSpot by summing contribution from all spots
for (int k=0; k<c_N_spots[0]; k++)
{
float delta = M_PI * c_z[k] * (X*X + Y*Y) + 2.0f * M_PI * (X * c_x[k] + Y * c_y[k]);
reSLM += s_weight[k] * cosf(s_pSpot[k] + delta);
imSLM += s_weight[k] * sinf(s_pSpot[k] + delta);
}
hipfftComplex cSLM_cc = g_cSLM_cc[shiftedidx];
reSLM += cSLM_cc.x/c_N_pixels_f[0];
imSLM += cSLM_cc.y/c_N_pixels_f[0];
pSLM2pi_f = atan2f(imSLM, reSLM);
if (c_useRPC_b[0]) //Apply RPC (restricted Phase Change)
{
float pSLMstart = g_pSLMstart[shiftedidx];
if (fabs(pSLM2pi_f - pSLMstart) > c_alphaRPC_f[0])
pSLM2pi_f = pSLMstart;
if (getpSLM65535)
g_pSLMstart[shiftedidx] = pSLM2pi_f;
}
g_pSLM_f[idx] = pSLM2pi_f;
g_cSLM_cc[shiftedidx].x = cosf(pSLM2pi_f);
g_cSLM_cc[shiftedidx].y = sinf(pSLM2pi_f);
if (getpSLM65535) //Compute final SLM phases and write to global memory...
g_pSLM65535_uc[idx] = phase2uc(pSLM2pi_f);
}
}
//Clear inside the DC frame
__global__ void setActiveRegionToZero(hipfftComplex *g_Farfield_cc) //this only works if blocksize = nblocks = SLMsize = 512
{
int tid = threadIdx.x;
int bid = blockIdx.x;
int idx = bid * blockDim.x + tid;
if (((tid < (c_half_w[0] - c_DCborderWidth[0]))||(tid > ((c_half_w[0]-1) + c_DCborderWidth[0])))&&((bid < (c_half_w[0] - c_DCborderWidth[0]))||(bid > ((c_half_w[0]-1) + c_DCborderWidth[0]))))
{
g_Farfield_cc[idx].x = 0.0f;
g_Farfield_cc[idx].y = 0.0f;
}
}
//Functions for GS with FFT propagation
//Compute the phase in SLM pixels and set amplitude to unity or Laser amp
__global__ void ReplaceAmpsSLM_FFT(float *g_aLaser, hipfftComplex *g_cAmp, float *g_pSLMstart, bool getpSLM65535, uint16_t *g_pSLM65535_uc,
uint16_t *g_LUT, float *g_AberrationCorr_f, float *g_LUTPolCoeff_f)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx<c_N_pixels[0])
{
float aLaser = 1.0f;//g_aLaser[idx];
hipfftComplex cAmp = g_cAmp[idx];
float pSLM2pi_f = atan2f(cAmp.y, cAmp.x);
if (c_useRPC_b[0])
{
float pSLMstart = g_pSLMstart[idx];
if (fabs(pSLM2pi_f - pSLMstart) > c_alphaRPC_f[0])
pSLM2pi_f = pSLMstart;
}
if (getpSLM65535)
{
if (c_useRPC_b[0])
g_pSLMstart[idx] = pSLM2pi_f;
//float phase65535;
int X_int = getXint(idx);
int Y_int = getYint(idx, X_int);
int shiftedidx = fftshift(idx, X_int, Y_int);
g_pSLM65535_uc[shiftedidx] = phase2uc(pSLM2pi_f);
}
g_cAmp[idx].x = aLaser*cosf(pSLM2pi_f);
g_cAmp[idx].y = aLaser*sinf(pSLM2pi_f);
}
__syncthreads();
}
//Adjust amplitudes in spot positions
__global__ void ReplaceAmpsSpots_FFT(hipfftComplex *g_cSpotAmp_cc, hipfftComplex *g_cSpotAmpNew_cc, int iteration, float *g_Iobtained, float *g_weight, bool last_iteration)
{
int tid = threadIdx.x;
int spotIndex;
float pSpot;
__shared__ float s_aSpot[MAX_SPOTS], s_ISpotsMeanSq;
float weight;
hipfftComplex cSpotAmp_cc;
if (tid<c_N_spots[0])
{
spotIndex = c_spotIndex[tid];
cSpotAmp_cc = g_cSpotAmp_cc[spotIndex];
pSpot = atan2f(cSpotAmp_cc.y, cSpotAmp_cc.x);
s_aSpot[tid] = hypotf(cSpotAmp_cc.x, cSpotAmp_cc.y)/c_desiredAmp[tid];
if (iteration != 0)
weight = g_weight[tid + iteration*c_N_spots[0]];
else
{
s_aSpot[tid] = (s_aSpot[tid]<0.5f) ? 0.5f : s_aSpot[tid];
weight = c_desiredAmp[tid];
}
}
__syncthreads();
//compute weights
if (tid==0)
{
float ISpot_sum = 0.0f;
for (int jj=0; jj<c_N_spots[0];jj++)
{
ISpot_sum += s_aSpot[jj]*s_aSpot[jj];
}
s_ISpotsMeanSq = sqrtf(ISpot_sum / (float)c_N_spots[0]); //integer division!!
}
__syncthreads();
if (tid<c_N_spots[0])
{
weight = weight * s_ISpotsMeanSq / s_aSpot[tid];
cSpotAmp_cc.x = cosf(pSpot) * weight;
cSpotAmp_cc.y = sinf(pSpot) * weight;
g_cSpotAmpNew_cc[spotIndex] = cSpotAmp_cc;
if (last_iteration)
g_weight[tid] = weight;
else
g_weight[c_N_spots[0] * (iteration + 1) + tid] = weight;
if (c_saveI_b[0])
g_Iobtained[c_N_spots[0] * (iteration) + tid] = s_aSpot[tid]*s_aSpot[tid];
}
}
//Adjust amplitudes in spot positions
__global__ void ReplaceAmpsSpotsDC_FFT(hipfftComplex *g_cSpotAmp_cc, hipfftComplex *g_cSpotAmpNew_cc, int iteration, float *g_Iobtained, float *g_weight, bool last_iteration)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int spotIndex;
float pSpot;
//__shared__ float s_aSpot[MAX_SPOTS], s_ISpotsMeanSq;
float weight;
hipfftComplex cSpotAmp_cc;
if (idx<c_N_spots[0])
{
spotIndex = c_spotIndex[idx];
cSpotAmp_cc = g_cSpotAmp_cc[spotIndex];
pSpot = atan2f(cSpotAmp_cc.y, cSpotAmp_cc.x);
float aSpot = hypotf(cSpotAmp_cc.x, cSpotAmp_cc.y)/c_desiredAmp[idx];
if (iteration != 0)
weight = g_weight[idx + iteration*c_N_spots[0]];
else
{
aSpot = (aSpot<0.5f) ? 0.5f : aSpot; //ska det vara s hr med DC?
weight = c_desiredAmp[idx]/(c_N_pixels_f[0]);
}
weight = weight / aSpot;
cSpotAmp_cc.x = cosf(pSpot) * weight;
cSpotAmp_cc.y = sinf(pSpot) * weight;
g_cSpotAmpNew_cc[spotIndex] = cSpotAmp_cc;
if (last_iteration)
g_weight[idx] = weight;
else
g_weight[c_N_spots[0] * (iteration + 1) + idx] = weight;
if (c_saveI_b[0])
g_Iobtained[c_N_spots[0] * (iteration) + idx] = aSpot*aSpot;
}
int X_int = getXint(idx);
int Y_int = getYint(idx, X_int);
if (((X_int > (c_half_w[0] - c_DCborderWidth[0]))&&(X_int < ((c_half_w[0]-1) + c_DCborderWidth[0])))||((Y_int > (c_half_w[0] - c_DCborderWidth[0]))&&(Y_int < ((c_half_w[0]-1) + c_DCborderWidth[0]))))
{
g_cSpotAmpNew_cc[idx].x = g_cSpotAmp_cc[idx].x/(c_N_pixels_f[0]);
g_cSpotAmpNew_cc[idx].y = g_cSpotAmp_cc[idx].y/(c_N_pixels_f[0]);
}
}
//Misc help functions
__global__ void testfunc(float *testdata)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
testdata[idx] = idx;
}
//Convert from uint16_t [0, 65535] to float [-pi, pi]
__global__ void uc2f(float *f, uint16_t *uc, int N)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx<N)
{
f[idx] = uc[idx]*2.0f*M_PI/65536.0f - M_PI;
}
}
//Calculate complex from phases
__global__ void p2c(hipfftComplex *g_c, float *g_p, int M)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx<M)
{
float pSpot = g_p[idx];
g_c[idx].x = cosf(pSpot);
g_c[idx].y = sinf(pSpot);
}
__syncthreads();
}
//Calculate amplitudes from complex
__global__ void c_cc2a_f(float *g_a, hipfftComplex *g_c, int M)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx<M)
{
g_a[idx] = hypotf(g_c[idx].x, g_c[idx].y);
}
__syncthreads();
}
//Calculate phases from complex
__global__ void c_cc2p_cc(hipfftComplex *g_p, hipfftComplex *g_c, int M)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx<M)
{
g_p[idx].x = atan2f(g_c[idx].y, g_c[idx].x);
g_p[idx].y = 0.0f;
}
__syncthreads();
}
//Calculate phases from complex
__global__ void c_cc2p_f(float *g_p, hipfftComplex *g_c, int M)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx<M)
{
g_p[idx] = atan2f(g_c[idx].y, g_c[idx].x);
}
__syncthreads();
}
//Copy real part from complex
__global__ void c_cc2re_f(float *g_p, hipfftComplex *g_c, int M)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx<M)
{
g_p[idx] = g_c[idx].x;
}
__syncthreads();
}
//Copy imaginary part from complex
__global__ void c_cc2im_f(float *g_p, hipfftComplex *g_c, int M)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx<M)
{
g_p[idx] = g_c[idx].y;
}
__syncthreads();
}
//Custom debug functions
inline void mSafeCall(hipError_t status, int line, const char *file)
{
#ifdef M_CUDA_DEBUG
do
{
if(status != hipSuccess)
{
char CUDAmessage[200] = "CUDA says: ";
strcat(CUDAmessage, hipGetErrorString(status));
sprintf(CUDAmessage, "%s\non line: %d\n", CUDAmessage, line);
//AfxMessageBox(CUDAmessage);
if (status != HIPFFT_SUCCESS)
exit(-1);
}
hipDeviceSynchronize();
status = hipGetLastError();
if(status!=hipSuccess)
{
char CUDAmessage[200] = "CUDA failed after sychronization:\n";
strcat(CUDAmessage, hipGetErrorString(status));
sprintf(CUDAmessage, "%s\non line: %d\n", CUDAmessage, line);
//AfxMessageBox(CUDAmessage);
exit(-1);
}
}while(0);
#endif
return;
}
inline void mCufftSafeCall(hipfftResult_t status, int line, const char *file)
{
#ifdef M_CUDA_DEBUG
if(status != HIPFFT_SUCCESS)
{
char CUDAmessage[200] = "CUFFT error, CUDA says:\n ";
switch (status) {
case HIPFFT_INVALID_PLAN: strcat(CUDAmessage,"HIPFFT_INVALID_PLAN\n");break;
case HIPFFT_ALLOC_FAILED: strcat(CUDAmessage,"HIPFFT_ALLOC_FAILED\n");break;
case HIPFFT_INVALID_TYPE: strcat(CUDAmessage,"HIPFFT_INVALID_TYPE\n");break;
case HIPFFT_INVALID_VALUE: strcat(CUDAmessage,"HIPFFT_INVALID_VALUE\n");break;
case HIPFFT_INTERNAL_ERROR: strcat(CUDAmessage,"HIPFFT_INTERNAL_ERROR\n");break;
case HIPFFT_EXEC_FAILED: strcat(CUDAmessage,"HIPFFT_EXEC_FAILED\n");break;
case HIPFFT_SETUP_FAILED: strcat(CUDAmessage,"HIPFFT_SETUP_FAILED\n");break;
case HIPFFT_INVALID_SIZE: strcat(CUDAmessage,"HIPFFT_INVALID_SIZE\n");break;
case HIPFFT_UNALIGNED_DATA: strcat(CUDAmessage,"HIPFFT_UNALIGNED_DATA\n");break;
default: strcat(CUDAmessage,"CUFFT Unknown error code\n");
}
sprintf(CUDAmessage, "%son line: %d\nin file: %s", CUDAmessage, line, file);
//AfxMessageBox(CUDAmessage);
exit(-1);
}
hipDeviceSynchronize();
hipError_t status2 = hipGetLastError();
if(status2!=hipSuccess)
{
char CUDAmessage[200] = "CUDA failed after sychronization:\n";
strcat(CUDAmessage, hipGetErrorString(status2));
sprintf(CUDAmessage, "%s\non line: %d\n", CUDAmessage, line);
//AfxMessageBox(CUDAmessage);
exit(-1);
}
#endif
return;
}
inline void mCheckError(int line, const char *file)
{
#ifdef M_CUDA_DEBUG
do
{
hipError_t status = hipGetLastError();
if(status!=hipSuccess)
{
char CUDAmessage[200] = "CUDA says: ";
strcat(CUDAmessage, hipGetErrorString(status));
sprintf(CUDAmessage, "%s\non line: %d\n", CUDAmessage, line);
//AfxMessageBox(CUDAmessage);
exit(-1);
}
hipDeviceSynchronize();
status = hipGetLastError();
if(status!=hipSuccess)
{
char CUDAmessage[200] = "CUDA failed after sychronization:\n";
strcat(CUDAmessage, hipGetErrorString(status));
sprintf(CUDAmessage, "%s\non line: %d\n", CUDAmessage, line);
//AfxMessageBox(CUDAmessage);
exit(-1);
}
}while(0);
#endif
return;
}
inline void mDisplayDataF(float *d_data, int length, int line)
{
#ifdef M_CUDA_DEBUG
do
{
int maxlength = 50;
float *h_data;
length = (length<=maxlength) ? length : maxlength;
char MessageString[1000];
h_data = (float*)malloc(length * sizeof (float));
M_SAFE_CALL(hipMemcpy(h_data, d_data, length*sizeof(float), hipMemcpyDeviceToHost));
sprintf(MessageString, "Line: %d\nData: ", line);
for (int ii = 0;ii<length;++ii)
{
sprintf(MessageString, "%s %f", MessageString, h_data[ii]);
}
//AfxMessageBox(MessageString, MB_ICONINFORMATION);
free(h_data);
}while(0);
#endif
return;
}
inline void mDisplayDataCC(hipfftComplex *d_data, int length, int line)
{
#ifdef M_CUDA_DEBUG
do
{
int maxlength = 25;
hipfftComplex *h_data;
length = (length<=maxlength) ? length : maxlength;
char MessageString[1000];
h_data = (hipfftComplex*)malloc(length * sizeof (hipfftComplex));
M_SAFE_CALL(hipMemcpy(h_data, d_data, length*sizeof(hipfftComplex), hipMemcpyDeviceToHost));
sprintf(MessageString, "Line: %d\nData: ", line);
for (int ii = 0;ii<length;++ii)
{
sprintf(MessageString, "%s re: %f im: %f", MessageString, h_data[ii].x, h_data[ii].y);
}
//AfxMessageBox(MessageString, MB_ICONINFORMATION);
free(h_data);
}while(0);
#endif
return;
}
inline void mDisplayDataUC(uint16_t *d_data, int length, int line)
{
#ifdef M_CUDA_DEBUG
do
{
int maxlength = 50;
uint16_t *h_data;
length = (length<=maxlength) ? length : maxlength;
char MessageString[1000];
h_data = (uint16_t*)malloc(length * sizeof (uint16_t));
M_SAFE_CALL(hipMemcpy(h_data, d_data, length*sizeof(uint16_t), hipMemcpyDeviceToHost));
sprintf(MessageString, "Line: %d\nData: ", line);
for (int ii = 0;ii<length;++ii)
{
sprintf(MessageString, "%s %hhu", MessageString, h_data[ii]);
}
//AfxMessageBox(MessageString, MB_ICONINFORMATION);
free(h_data);
}while(0);
#endif
return;
}
inline void mDisplayDataI(int *d_data, int length, int line)
{
#ifdef M_CUDA_DEBUG
do
{
int maxlength = 50;
int *h_data;
length = (length<=maxlength) ? length : maxlength;
char MessageString[1000];
h_data = (int*)malloc(length * sizeof (int));
M_SAFE_CALL(hipMemcpy(h_data, d_data, length*sizeof(int), hipMemcpyDeviceToHost));
sprintf(MessageString, "Line: %d\nData: ", line);
for (int ii = 0;ii<length;++ii)
{
sprintf(MessageString, "%s %d", MessageString, h_data[ii]);
}
//AfxMessageBox(MessageString, MB_ICONINFORMATION);
free(h_data);
}while(0);
#endif
return;
}
//Calculate amplitudes in positions given by x, y, and z from a given hologram
extern "C" __declspec(dllexport) int GetIandPhase(float *x_spots, float *y_spots, float *z_spots, float *h_pSLM_uc, int N_spots_all, int data_w, float *h_I_obt, float *h_Phase_obt)
{
float *d_Iobtained_all;
float *d_Pobtained_all;
hipMalloc((void**)&d_Iobtained_all, N_spots_all*sizeof(float) );
hipMalloc((void**)&d_Pobtained_all, N_spots_all*sizeof(float) );
hipMemcpy(d_pSLM_uc, h_pSLM_uc, memsize_SLMuc, hipMemcpyHostToDevice);
int offset = 0;
int N_spots_rem = N_spots_all;
int N_spots_this;
while (N_spots_rem > 0)
{
N_spots_this = (N_spots_rem > MAX_SPOTS) ? MAX_SPOTS : N_spots_rem;
hipMemcpyToSymbol(c_x, x_spots+offset, N_spots_this*sizeof(float), 0, hipMemcpyHostToDevice);
hipMemcpyToSymbol(c_y, y_spots+offset, N_spots_this*sizeof(float), 0, hipMemcpyHostToDevice);
hipMemcpyToSymbol(c_z, z_spots+offset, N_spots_this*sizeof(float), 0, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( calculateIandPhase), dim3(N_spots_this), dim3(512), 0, 0, d_pSLM_uc, d_Iobtained_all+offset, d_Pobtained_all+offset);
//calculateIobtained(uint16_t *g_pSLM_uc, float *g_Iobtained)
hipDeviceSynchronize();
N_spots_rem -= MAX_SPOTS;
offset += MAX_SPOTS;
}
hipMemcpy(h_I_obt, d_Iobtained_all, N_spots_all*sizeof(float), hipMemcpyDeviceToHost);
hipMemcpy(h_Phase_obt, d_Pobtained_all, N_spots_all*sizeof(float), hipMemcpyDeviceToHost);
hipFree(d_Iobtained_all);
hipFree(d_Pobtained_all);
status = hipGetLastError();
return status;
}
| 1a7a084f718f064adc7e44c6efc46dc4e975f6f5.cu | /*
Hologram generating algorithms for CUDA Devices
Copyright 2009, 2010, 2011, 2012 Martin Persson
[email protected]
Small edits by Lloyd Russell 2016
This file is part of GenerateHologramCUDA.
GenerateHologramCUDA is free software: you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public License as published
by the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
GenerateHologramCUDA is distributed in the hope that it will be
useful, but WITHOUT ANY WARRANTY; without even the implied warranty
of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with GenerateHologramCUDA. If not, see <http://www.gnu.org/licenses/>.
The function "GenerateHologram" contains two different algorithms for
hologram generation. The last parameter in the function call selects which
one to use:
0: Complex addition of "Lenses and Prisms", no optimization (3D)
1: Weighted Gerchberg-Saxton algorithm using Fresnel propagation (3D)
2: Weighted Gerchberg-Saxton algorithm using Fast Fourier Transforms (2D)
(0) produces optimal holograms for 1 or 2 traps and is significantly faster.
(0) is automatically selected if the number of spots is < 3.
Fresnel propagation based algorithm (1) described in:
Roberto Di Leonardo, Francesca Ianni, and Giancarlo Ruocco
"Computer generation of optimal holograms for optical trap arrays"
Opt. Express 15, 1913-1922 (2007)
The original algorithm has been modified to allow variable spot amplitudes
Naming convention for variables:
The prefix indicates where data is located
In host functions: h = host memory
d = device memory
c = constant memory
In global functions: g = global memory
s = shared memory
c = constant memory
no prefix = registers
The suffix indicates the data type, no suffix usually indicates an integer
Possible improvements:
* Improve convergence of the GS algorithms for 2 spots. *done
* Compensate spot intensities for distance from center of field. *done
* Put all arguments for device functions and trap positions in constant memory. *done
(Requires all functions to be moved into the same file or the use of some
workaround found on nVidia forum)
* Put pSLMstart and aLaser in texture memory (may not improve performance on Fermi devices)
* Use "zero-copy" to transfer pSLM to host.
* Rename functions and variables for consistency and readability
* Allow variable spot phases for Lenses and Prisms
*/
//#define M_CUDA_DEBUG //activates a number of custom debug macros//
float dt_milliseconds;
cudaEvent_t start, stop;
//Includes
#include <stdlib.h>
#include <stdio.h>
#include "stdint.h"
#include <string.h>
#include <math.h>
#include <cufft.h>
#ifndef M_PI
#define M_PI 3.14159265358979323846f
#endif
#define MAX_SPOTS 512 //decrease this if your GPU keeps running out of memory, was 1024
#define BLOCK_SIZE 2048 //should be a power of 2, was 512
#define SLM_SIZE 2048
#if ((SLM_SIZE==16)||(SLM_SIZE==32)||(SLM_SIZE==64)||(SLM_SIZE==128)||(SLM_SIZE==256)||(SLM_SIZE==512)||(SLM_SIZE==1024)||(SLM_SIZE==2048))
#define SLMPOW2 //Uses bitwize modulu operations if teh SLM size is a power of 2
#endif
// forward declarations
__global__ void ApplyCorrections(uint16_t *g_pSLM_uc, uint16_t *g_LUT, float *d_AberrationCorr_f, float *d_LUTPolCoeff_f);
__global__ void LensesAndPrisms(uint16_t *g_SLMuc, uint16_t *g_LUT, float *d_AberrationCorr_f, float *d_LUTPolCoeff_f);
__global__ void calculateIobtained(uint16_t *g_pSLM_uc, float *g_Iobtained);
__global__ void PropagateToSLM_Fresnel(float *g_spotRe_f, float *g_spotIm_f, float *g_pSLM2pi, float *g_weights, int iteration, float *g_pSLMstart, float *g_amps,
bool getpSLM65535, uint16_t *g_pSLM65535_uc, uint16_t *g_LUT, float *g_AberrationCorr_f, float *g_LUTPolCoeff_f);
__global__ void PropagateToSLMDC_Fresnel(float *g_pSpot, float *g_wSpot, cufftComplex *g_cSLM_cc, float *g_pSLM_f, int iteration, float *g_pSLMstart, bool getpSLM65535,
uint16_t *g_pSLM65535_uc);
__global__ void setActiveRegionToZero(cufftComplex *g_Farfield);
__global__ void PropagateToSpotPositions_Fresnel(float *g_pSLM2pi, float *g_spotRe_f, float *g_spotIm_f);
__global__ void PropagateToSpotPositionsDC_Fresnel(float *g_pSLM_f, float *g_obtainedPhase, float *g_weights, float *g_Iobtained, int iteration);
__global__ void ReplaceAmpsSLM_FFT(float *g_aLaser, cufftComplex *g_cAmp, float *g_pSLMstart, bool getpSLM65535, uint16_t *g_pSLM65535_uc, uint16_t *g_LUT, float *g_AberrationCorr_f, float *g_LUTPolCoeff_f);
__global__ void ReplaceAmpsSpots_FFT(cufftComplex *g_cSpotAmp_cc, cufftComplex *g_cSpotAmpNew_cc, int iteration, float *g_Iobtained, float *g_weight, bool last_iteration);
__global__ void ReplaceAmpsSpotsDC_FFT(cufftComplex *g_cSpotAmp_cc, cufftComplex *g_cSpotAmpNew_cc, int iteration, float *g_Iobtained, float *g_weight, bool last_iteration);
__global__ void XYtoIndex();
__global__ void f2uc(uint16_t *uc, float *f, int N_pixels, uint16_t *g_LUT, int use_linLUT, int data_w);
__global__ void uc2f(float *f, uint16_t *uc, int N);
__global__ void p2c(cufftComplex *g_c, float *g_p, int M);
inline int computeAndCopySpotData(float *h_I, float *x, float *y, float *z, int N_spots, int method);
// Custom debug macros
#define M_CHECK_ERROR() mCheckError(__LINE__, __FILE__)
#define M_SAFE_CALL(errcode) mSafeCall(errcode, __LINE__, __FILE__)
#define M_CUFFT_SAFE_CALL(cuffterror) mCufftSafeCall(cuffterror, __LINE__, __FILE__)
#define M_DISPLAY_DATA_F(data, length) mDisplayDataF(data, length, __LINE__)
#define M_DISPLAY_DATA_UC(data, length) mDisplayDataUC(data, length, __LINE__)
#define M_DISPLAY_DATA_CC(data, length) mDisplayDataCC(data, length, __LINE__)
#define M_DISPLAY_DATA_I(data, length) mDisplayDataI(data, length, __LINE__)
inline void mSafeCall(cudaError_t status, int line, const char *file);
inline void mCufftSafeCall(cufftResult_t status, int line, const char *file);
inline void mCheckError(int line, const char *file);
inline void mDisplayDataF(float *d_data, int length, int line);
inline void mDisplayDataCC(cufftComplex *d_data, int length, int line);
inline void mDisplayDataUC(uint16_t *d_data, int length, int line);
inline void mDisplayDataI(int *d_data, int length, int line);
//Global declaration
float *d_x, *d_y, *d_z, *d_I; //trap coordinates and intensity in GPU memory
float *d_pSLM_f; //the optimized pSpot pattern, float [-pi, pi]
float *d_weights, *d_Iobtained, *d_desiredAmp; //used h_weights and calculated amplitudes for each spot and each iteration
float *d_pSLMstart_f; //Initial pSpot pattern [-pi, pi]
float *d_spotRe_f, *d_spotIm_f;
float *d_AberrationCorr_f = NULL;
float *d_LUTPolCoeff_f = NULL;
float SLMsizef = (float)SLM_SIZE;
int N_PolLUTCoeff = 0;
int n_blocks_Phi, memsize_SLM_f, memsize_SLMuc, memsize_spotsf, data_w, N_pixels, N_iterations_last;
float h_desiredAmp[MAX_SPOTS];
int h_spotIndex[MAX_SPOTS];
uint16_t *d_pSLM_uc; //The optimized pSpot pattern, uint16_t, the one sent to the SLM [0, 65535]
uint16_t *h_LUT_uc;
uint16_t *d_LUT_uc = NULL;
int maxThreads_device;
bool ApplyLUT_b = false, EnableSLM_b = false, UseAberrationCorr_b = false, UsePolLUT_b = false, saveI_b = false, useRPC_b = false, useDC_b = false;
float alphaRPC_f = 10;
char CUDAmessage[100];
cudaError_t status;
float *d_aLaserFFT, *d_LUT_coeff;
cufftHandle plan;
cufftComplex *d_FFTo_cc, *d_FFTd_cc, *d_SLM_cc;
int *d_spot_index, memsize_SLMcc;
int borderWidthDC_i;
float *d_obtainedPhase;
//Constant memory declarations
__device__ __constant__ int c_data_w[1];
__device__ __constant__ float c_data_w_f[1];
__device__ __constant__ int c_half_w[1];
__device__ __constant__ float c_half_w_f[1];
__device__ __constant__ int c_N_pixels[1];
__device__ __constant__ float c_N_pixels_f[1];
__device__ __constant__ float c_SLMpitch_f[1];
__device__ __constant__ bool c_useDC_b[1];
__device__ __constant__ int c_DCborderWidth[1];
__device__ __constant__ bool c_useRPC_b[1];
__device__ __constant__ float c_alphaRPC_f[1];
__device__ __constant__ bool c_saveI_b[1];
__device__ __constant__ int c_log2data_w[1];
__device__ __constant__ float c_x[MAX_SPOTS];
__device__ __constant__ float c_y[MAX_SPOTS];
__device__ __constant__ float c_z[MAX_SPOTS];
__device__ __constant__ float c_desiredAmp[MAX_SPOTS];
__device__ __constant__ int c_spotIndex[MAX_SPOTS];
__device__ __constant__ int c_N_spots[1];
//Public dll functions
//Generate a hologram
extern "C" __declspec(dllexport) int GenerateHologram(float *h_checkData, uint16_t *h_pSLM_uc, float *x_spots, float *y_spots, float *z_spots, float *I_spots, int N_spots, int N_iterations, float *h_Iobtained, int method)//, float* gpuTime)
{
//*gpuTime = 0;
//float deltaTime = 0;
if (N_spots > MAX_SPOTS)
N_spots = MAX_SPOTS;
else if (N_spots < 1)
method = 100;
else if (N_spots < 3)
method = 0;
memsize_spotsf = N_spots*sizeof(float);
method = computeAndCopySpotData(I_spots, x_spots, y_spots, z_spots, N_spots, method); //sets method to -1 if N_spots == 0.
switch (method) {
case 0:
//////
//Generate the hologram using "Lenses and Prisms"
//////
LensesAndPrisms<<< n_blocks_Phi, BLOCK_SIZE >>>(d_pSLM_uc, d_LUT_uc, d_AberrationCorr_f, d_LUTPolCoeff_f);
M_CHECK_ERROR();
cudaDeviceSynchronize();
M_CHECK_ERROR();
if (saveI_b)
{
calculateIobtained<<< N_spots, SLM_SIZE>>>(d_pSLM_uc, d_Iobtained);
M_CHECK_ERROR();
cudaDeviceSynchronize();
M_SAFE_CALL(cudaMemcpy(h_Iobtained, d_Iobtained, N_spots*sizeof(float), cudaMemcpyDeviceToHost));
}
M_SAFE_CALL(cudaMemcpy(h_pSLM_uc, d_pSLM_uc, memsize_SLMuc, cudaMemcpyDeviceToHost));
break;
case 1:
//Generate holgram using fresnel propagation
//Uncomment this to start with pre-calculated hologram:
//cudaMemcpy(d_pSLM_uc, h_pSLM_uc, memsize_SLMuc, cudaMemcpyHostToDevice);
//cudaDeviceSynchronize();
//uc2f<<< n_blocks_Phi, BLOCK_SIZE >>>(d_pSLM_f, d_pSLM_uc, N_pixels);
/*cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
cudaEventSynchronize(start);*/
for (int l=0; l<N_iterations; l++)
{
//Propagate to the spot positions
if (useDC_b)
{
M_CUFFT_SAFE_CALL(cufftExecC2C(plan, d_SLM_cc, d_FFTo_cc, CUFFT_FORWARD));
M_CHECK_ERROR();
PropagateToSpotPositionsDC_Fresnel<<< N_spots, SLM_SIZE>>>(d_pSLM_f, d_obtainedPhase, d_weights, d_Iobtained, l); //this function is very slow
M_CHECK_ERROR();
setActiveRegionToZero<<< SLM_SIZE, SLM_SIZE >>>(d_FFTo_cc);
}
else
PropagateToSpotPositions_Fresnel<<< N_spots, SLM_SIZE>>>(d_pSLM_f, d_spotRe_f, d_spotIm_f);
M_CHECK_ERROR();
cudaDeviceSynchronize();
//Propagate to the SLM plane
if (useDC_b)
{
M_CUFFT_SAFE_CALL(cufftExecC2C(plan, d_FFTo_cc, d_SLM_cc, CUFFT_INVERSE));
cudaDeviceSynchronize();
PropagateToSLMDC_Fresnel<<< n_blocks_Phi, BLOCK_SIZE >>>(d_obtainedPhase, d_weights, d_SLM_cc, d_pSLM_f, l, d_pSLMstart_f, (l==(N_iterations-1)), d_pSLM_uc);
}
else
{
PropagateToSLM_Fresnel<<< n_blocks_Phi, BLOCK_SIZE >>>(d_spotRe_f, d_spotIm_f, d_pSLM_f, d_weights, l, d_pSLMstart_f, d_Iobtained, (l==(N_iterations-1)), d_pSLM_uc, d_LUT_uc, d_AberrationCorr_f, d_LUTPolCoeff_f);
}
M_CHECK_ERROR();
cudaDeviceSynchronize();
}
/*cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&deltaTime, start, stop);
*gpuTime = deltaTime; */
if (saveI_b)
M_SAFE_CALL(cudaMemcpy(h_Iobtained, d_Iobtained, N_spots*(N_iterations)*sizeof(float), cudaMemcpyDeviceToHost));
else
M_SAFE_CALL(cudaMemcpy(h_Iobtained, d_weights, N_spots*(N_iterations)*sizeof(float), cudaMemcpyDeviceToHost));
M_SAFE_CALL(cudaMemcpy(h_pSLM_uc, d_pSLM_uc, memsize_SLMuc, cudaMemcpyDeviceToHost));
break;
case 2:
//generate hologram using fast fourier transforms
//Uncomment this to start with pre-calculated hologram:
//cudaMemcpy(d_pSLM_uc, h_pSLM_uc, memsize_SLMuc, cudaMemcpyHostToDevice);
//cudaDeviceSynchronize();
//p_uc2c_cc_shift<<< n_blocks_Phi, BLOCK_SIZE >>>(d_SLM_cc, d_pSLM_uc, N_pixels, data_w);
//M_DISPLAY_DATA_CC(d_SLM_cc, 100);
M_SAFE_CALL(cudaMemcpy(d_desiredAmp, h_desiredAmp, memsize_spotsf, cudaMemcpyHostToDevice));
M_SAFE_CALL(cudaMemset(d_FFTd_cc, 0, memsize_SLMcc));
M_CHECK_ERROR();
cudaDeviceSynchronize();
for (int l=0; l<N_iterations; l++)
{
// Transform to trapping plane
M_CUFFT_SAFE_CALL(cufftExecC2C(plan, d_SLM_cc, d_FFTo_cc, CUFFT_FORWARD));
cudaDeviceSynchronize();
// Copy phases for spot indices in d_FFTo_cc to d_FFTd_cc
if (useDC_b)
ReplaceAmpsSpotsDC_FFT <<< n_blocks_Phi, BLOCK_SIZE >>> (d_FFTo_cc, d_FFTd_cc, l, d_Iobtained, d_weights, (l==(N_iterations-1)));
else
ReplaceAmpsSpots_FFT <<< 1, N_spots >>> (d_FFTo_cc, d_FFTd_cc, l, d_Iobtained, d_weights, (l==(N_iterations-1)));
M_CHECK_ERROR();
cudaDeviceSynchronize();
//Transform back to SLM plane
M_CUFFT_SAFE_CALL(cufftExecC2C(plan, d_FFTd_cc, d_SLM_cc, CUFFT_INVERSE));
cudaDeviceSynchronize();
//M_DISPLAY_DATA_CC(d_SLM_cc, 100);
// Set amplitudes in d_SLM to the laser amplitude profile
ReplaceAmpsSLM_FFT <<< n_blocks_Phi, BLOCK_SIZE >>> (d_aLaserFFT, d_SLM_cc, d_pSLMstart_f, (l==(N_iterations-1)), d_pSLM_uc, d_LUT_uc, d_AberrationCorr_f, d_LUTPolCoeff_f);
M_CHECK_ERROR();
//M_DISPLAY_DATA_CC(d_SLM_cc, 100);
cudaDeviceSynchronize();
}
if (saveI_b)
M_SAFE_CALL(cudaMemcpy(h_Iobtained, d_Iobtained, N_spots*(N_iterations)*sizeof(float), cudaMemcpyDeviceToHost));
else
M_SAFE_CALL(cudaMemcpy(h_Iobtained, d_weights, N_spots*(N_iterations)*sizeof(float), cudaMemcpyDeviceToHost));
M_SAFE_CALL(cudaMemcpy(h_pSLM_uc, d_pSLM_uc, memsize_SLMuc, cudaMemcpyDeviceToHost));
break;
default:
break;
}
//Handle CUDA errors
status = cudaGetLastError();
return status;
}
//Allocate GPU memory
extern "C" __declspec(dllexport) int startCUDA(float *h_pSLMstart, int deviceId)
{
//Make sure GPU with desired deviceId exists, set deviceId to 0 if not
int deviceCount=0;
if (cudaGetDeviceCount(&deviceCount)!=0)
if (deviceId>=deviceCount)
{
deviceId=0;
}
M_SAFE_CALL(cudaSetDevice(deviceId));
cudaDeviceProp deviceProp;
M_SAFE_CALL(cudaGetDeviceProperties(&deviceProp, deviceId));
maxThreads_device = deviceProp.maxThreadsPerBlock;
borderWidthDC_i = 0;
int MaxIterations = 1000;
data_w = SLM_SIZE;
cudaMemcpyToSymbol(c_data_w, &data_w, sizeof(int), 0, cudaMemcpyHostToDevice);
float data_w_f = (float)data_w;
cudaMemcpyToSymbol(c_data_w_f, &data_w_f, sizeof(float), 0, cudaMemcpyHostToDevice);
int half_w = (int)(data_w/2);
cudaMemcpyToSymbol(c_half_w, &half_w, sizeof(int), 0, cudaMemcpyHostToDevice);
float half_w_f = (float)data_w/2.0f;
cudaMemcpyToSymbol(c_half_w_f, &half_w_f, sizeof(float), 0, cudaMemcpyHostToDevice);
N_pixels = data_w * data_w;
cudaMemcpyToSymbol(c_N_pixels, &N_pixels, sizeof(int), 0, cudaMemcpyHostToDevice);
float N_pixels_f = (float)N_pixels;
cudaMemcpyToSymbol(c_N_pixels_f, &N_pixels_f, sizeof(float), 0, cudaMemcpyHostToDevice);
int logN = (int)(log2(data_w_f));
cudaMemcpyToSymbol(c_log2data_w, &logN, sizeof(int), 0, cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(c_useRPC_b, &useRPC_b, sizeof(bool), 0, cudaMemcpyHostToDevice);
float SLMpitch_f = 1.0f/data_w_f;
cudaMemcpyToSymbol(c_SLMpitch_f, &SLMpitch_f, sizeof(float), 0, cudaMemcpyHostToDevice);
N_iterations_last = 10;
memsize_spotsf = MAX_SPOTS * sizeof(float);
memsize_SLM_f = N_pixels * sizeof(float);
memsize_SLMuc = N_pixels * sizeof(uint16_t);
memsize_SLMcc = N_pixels * sizeof(cufftComplex);
n_blocks_Phi = (N_pixels/BLOCK_SIZE + (N_pixels%BLOCK_SIZE == 0 ? 0:1));
//memory allocations for all methods
M_SAFE_CALL(cudaMalloc((void**)&d_x, memsize_spotsf ));
M_SAFE_CALL(cudaMalloc((void**)&d_y, memsize_spotsf ));
M_SAFE_CALL(cudaMalloc((void**)&d_z, memsize_spotsf ));
M_SAFE_CALL(cudaMalloc((void**)&d_I, memsize_spotsf ));
M_SAFE_CALL(cudaMalloc((void**)&d_desiredAmp, memsize_spotsf ));
M_SAFE_CALL(cudaMalloc((void**)&d_weights, MAX_SPOTS*(MaxIterations+1)*sizeof(float)));
M_SAFE_CALL(cudaMalloc((void**)&d_Iobtained, MAX_SPOTS*MaxIterations*sizeof(float)));
M_SAFE_CALL(cudaMalloc((void**)&d_obtainedPhase, memsize_spotsf ));
M_SAFE_CALL(cudaMalloc((void**)&d_spotRe_f, memsize_spotsf ));
M_SAFE_CALL(cudaMalloc((void**)&d_spotIm_f, memsize_spotsf ));
int data_w_pow2 = pow(2, ceil(log((float)data_w)/log(2.0f)));
M_SAFE_CALL(cudaMalloc((void**)&d_pSLM_f, data_w_pow2*data_w_pow2*sizeof(float)));//the size of d_pSLM_f must be a power of 2 for the summation algorithm to work
M_SAFE_CALL(cudaMemset(d_pSLM_f, 0, data_w_pow2*data_w_pow2*sizeof(float)));
M_SAFE_CALL(cudaMalloc((void**)&d_pSLMstart_f, memsize_SLM_f));
M_SAFE_CALL(cudaMalloc((void**)&d_pSLM_uc, memsize_SLMuc));
M_SAFE_CALL(cudaMemset(d_pSLMstart_f, 0, N_pixels*sizeof(float)));
M_SAFE_CALL(cudaMemcpy(d_pSLM_f, h_pSLMstart, N_pixels*sizeof(float), cudaMemcpyHostToDevice));
//memory allocations etc. for all FFT based Gerchberg-Saxton
M_SAFE_CALL(cudaMalloc((void**)&d_spot_index, MAX_SPOTS * sizeof(int)));
M_SAFE_CALL(cudaMalloc((void**)&d_FFTd_cc, memsize_SLMcc));
M_SAFE_CALL(cudaMalloc((void**)&d_FFTo_cc, memsize_SLMcc));
M_SAFE_CALL(cudaMalloc((void**)&d_SLM_cc, memsize_SLMcc));
M_SAFE_CALL(cudaDeviceSynchronize());
p2c <<< n_blocks_Phi, BLOCK_SIZE >>>(d_SLM_cc, d_pSLM_f, N_pixels);
M_CHECK_ERROR();
cudaDeviceSynchronize();
M_CUFFT_SAFE_CALL(cufftPlan2d(&plan, data_w, data_w, CUFFT_C2C));
float *h_aLaserFFT = (float *)malloc(memsize_SLM_f);
status = cudaGetLastError();
return status;
}
//Free GPU memory and shut down SLM
extern "C" __declspec(dllexport) int stopCUDA()
{
M_SAFE_CALL(cudaFree(d_x));
M_SAFE_CALL(cudaFree(d_y));
M_SAFE_CALL(cudaFree(d_z));
M_SAFE_CALL(cudaFree(d_I));
M_SAFE_CALL(cudaFree(d_weights));
M_SAFE_CALL(cudaFree(d_Iobtained));
M_SAFE_CALL(cudaFree(d_pSLM_f));
M_SAFE_CALL(cudaFree(d_pSLMstart_f));
M_SAFE_CALL(cudaFree(d_pSLM_uc));
M_SAFE_CALL(cudaFree(d_FFTd_cc));
M_SAFE_CALL(cudaFree(d_FFTo_cc));
M_SAFE_CALL(cudaFree(d_SLM_cc));
M_CUFFT_SAFE_CALL(cufftDestroy(plan));
cudaDeviceReset();
status = cudaGetLastError();
return status;
}
//Device functions
__device__ float uc2phase(float uc)
{
return (float)uc*2.0f*M_PI/65536.0f - M_PI;
}
__device__ uint16_t phase2uc(float phase2pi)
{
return (uint16_t)floor((phase2pi + M_PI)*65536.0f / (2.0f * M_PI));
}
__device__ int phase2int32(float phase2pi)
{
return (int)floor((phase2pi + M_PI)*65536.0f / (2.0f * M_PI));
}
__device__ float ApplyAberrationCorrection(float pSpot, float correction)
{
pSpot = pSpot - correction; //apply correction
return (pSpot - (2.0f*M_PI) * floor((pSpot+M_PI) / (2.0f*M_PI))); //apply mod([-pi, pi], pSpot)
}
__device__ int getXint(int index)
{
#ifdef SLMPOW2
int X_int = index&(c_data_w[0]-1);
#else
float X_int= index%c_data_w[0];
#endif
return X_int;
}
__device__ int getYint(int index, int X_int)
{
#ifdef SLMPOW2
int Y_int = (index-X_int)>>c_log2data_w[0];
#else
int Y_int = (float)(floor((float)index/c_data_w_f[0]));
#endif
return Y_int;
}
__device__ int fftshift(int idx, int X, int Y)
{
if (X < c_half_w[0])
{
if (Y < c_half_w[0])
{
return idx + (c_data_w[0] * c_half_w[0]) + c_half_w[0];
}
else
{
return idx - (c_data_w[0] * c_half_w[0]) + c_half_w[0];
}
}
else
{
if (Y < c_half_w[0])
{
return idx + (c_data_w[0] * c_half_w[0]) - c_half_w[0];
}
else
{
return idx - (c_data_w[0] * c_half_w[0]) - c_half_w[0];
}
}
}
__device__ void warpReduceC(volatile float *s_Vre, volatile float *s_Vim, int tid)
{
s_Vre[tid] += s_Vre[tid + 32];
s_Vim[tid] += s_Vim[tid + 32];
s_Vre[tid] += s_Vre[tid + 16];
s_Vim[tid] += s_Vim[tid + 16];
s_Vre[tid] += s_Vre[tid + 8];
s_Vim[tid] += s_Vim[tid + 8];
s_Vre[tid] += s_Vre[tid + 4];
s_Vim[tid] += s_Vim[tid + 4];
s_Vre[tid] += s_Vre[tid + 2];
s_Vim[tid] += s_Vim[tid + 2];
s_Vre[tid] += s_Vre[tid + 1];
s_Vim[tid] += s_Vim[tid + 1];
}
inline int computeAndCopySpotData(float *h_I, float *x, float *y, float *z, int N_spots, int method)
{
//float Isum = 0.0f;
//for (int i = 0; i<N_spots; i++)
// Isum += h_I[i];
for (int j = 0; j<N_spots; j++)
{
float sincx_rec = (x[j]==0)? 1.0f:((M_PI*x[j]/SLMsizef)/sinf(M_PI*x[j]/SLMsizef));
float sincy_rec = (y[j]==0)? 1.0f:((M_PI*y[j]/SLMsizef)/sinf(M_PI*y[j]/SLMsizef));
h_desiredAmp[j] = (h_I[j] <= 0.0f) ? 1.0f:(sincx_rec * sincy_rec * sqrtf(h_I[j]/100)*SLMsizef*SLMsizef);
if (method == 2)
h_spotIndex[j] = ((int)(x[j])&(data_w-1)) + ((int)(y[j])&(data_w-1))* data_w;
}
cudaMemcpyToSymbol(c_x, x, N_spots*sizeof(float), 0, cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(c_y, y, N_spots*sizeof(float), 0, cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(c_z, z, N_spots*sizeof(float), 0, cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(c_desiredAmp, h_desiredAmp, N_spots*sizeof(float), 0, cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(c_N_spots, &N_spots, sizeof(int), 0, cudaMemcpyHostToDevice);
if (method == 2)
cudaMemcpyToSymbol(c_spotIndex, h_spotIndex, N_spots*sizeof(int), 0, cudaMemcpyHostToDevice);
if (N_spots == 0)
method = -1;
return method;
}
//Apply corrections to precalculated hologram
__global__ void ApplyCorrections(uint16_t *g_pSLM_uc, uint16_t *g_LUT, float *g_AberrationCorr_f, float *g_LUTPolCoeff_f)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
float pSLM2pi_f = uc2phase(g_pSLM_uc[idx]);
g_pSLM_uc[idx] = phase2uc(pSLM2pi_f);
}
//Calculate hologram using "Lenses and Prisms"
__global__ void LensesAndPrisms(uint16_t *g_SLMuc, uint16_t *g_LUT, float *d_AberrationCorr_f, float *d_LUTPolCoeff_f)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < c_N_pixels[0])
{
//get pixel coordinates
int X_int = getXint(idx);
int Y_int = getYint(idx, X_int);
float X = c_SLMpitch_f[0]*(X_int - c_half_w_f[0]);
float Y = c_SLMpitch_f[0]*(Y_int - c_half_w_f[0]);
float phase2pi;
float SLMre = 0.0f;
float SLMim = 0.0f;
for (int ii=0; ii<c_N_spots[0]; ++ii)
{
//add variable phases to function call
phase2pi = M_PI * c_z[ii] * (X*X + Y*Y) + 2.0f * M_PI * (X * (c_x[ii]) + Y * (c_y[ii]) );
SLMre = SLMre + c_desiredAmp[ii] * cosf(phase2pi);
SLMim = SLMim + c_desiredAmp[ii] * sinf(phase2pi);
}
phase2pi = atan2f(SLMim, SLMre); // [-pi,pi]
g_SLMuc[idx] = phase2uc(phase2pi);
}
}
__global__ void calculateIobtained(uint16_t *g_pSLM_uc, float *g_Iobtained)
{
int blockSize = c_data_w[0];
int spot_number = blockIdx.x;
int tid = threadIdx.x;
int i = tid;
__shared__ float s_Vre[SLM_SIZE];
__shared__ float s_Vim[SLM_SIZE];
s_Vre[tid] = 0.0f;
s_Vim[tid] = 0.0f;
float X = c_SLMpitch_f[0] * ((float)tid - c_half_w_f[0]);
float Y = - c_SLMpitch_f[0] * c_half_w_f[0];
float pSLM_1;
float p;
while (i < c_N_pixels[0])
{
pSLM_1 = 2.0f*M_PI*(float)g_pSLM_uc[i]/65535.0f - M_PI;
p = pSLM_1 - M_PI * (c_z[spot_number] * (X*X + Y*Y) + 2 * (X * c_x[spot_number] + Y * c_y[spot_number]));
s_Vre[tid] += cosf(p);
s_Vim[tid] += sinf(p);
i += blockSize;
Y += c_SLMpitch_f[0];
}
/*__syncthreads();
if (tid < 512)
{
s_Vre[tid] += s_Vre[tid + 512];
s_Vim[tid] += s_Vim[tid + 512];
} */
__syncthreads();
if ((tid < 256)&&(SLM_SIZE>256))
{
s_Vre[tid] += s_Vre[tid + 256];
s_Vim[tid] += s_Vim[tid + 256];
}
__syncthreads();
if (tid < 128)
{
s_Vre[tid] += s_Vre[tid + 128];
s_Vim[tid] += s_Vim[tid + 128];
}
__syncthreads();
if (tid < 64)
{
s_Vre[tid] += s_Vre[tid + 64];
s_Vim[tid] += s_Vim[tid + 64];
}
__syncthreads();
if (tid < 32)
warpReduceC(s_Vre, s_Vim, tid);
if (tid == 0)
{
float spotRe_f = s_Vre[0] / c_N_pixels_f[0]; //512!
float spotIm_f = s_Vim[0] / c_N_pixels_f[0];
float amp = hypotf(spotRe_f, spotIm_f);
g_Iobtained[spot_number] = amp*amp;
}
}
__global__ void calculateIandPhase(uint16_t *g_pSLM_uc, float *g_Iobtained, float *g_Pobtained)
{
int blockSize = c_data_w[0];
int spot_number = blockIdx.x;
int tid = threadIdx.x;
int i = tid;
__shared__ float s_Vre[SLM_SIZE];
__shared__ float s_Vim[SLM_SIZE];
s_Vre[tid] = 0.0f;
s_Vim[tid] = 0.0f;
float X = c_SLMpitch_f[0] * ((float)tid - c_half_w_f[0]);
float Y = - c_SLMpitch_f[0] * c_half_w_f[0];
float pSLM_1;
float p;
while (i < c_N_pixels[0])
{
pSLM_1 = 2.0f*M_PI*(float)g_pSLM_uc[i]/65535.0f - M_PI;
p = pSLM_1 - M_PI * (c_z[spot_number] * (X*X + Y*Y) + 2 * (X * c_x[spot_number] + Y * c_y[spot_number]));
s_Vre[tid] += cosf(p+2*M_PI*c_z[spot_number]);
s_Vim[tid] += sinf(p+2*M_PI*c_z[spot_number]);
i += blockSize;
Y += c_SLMpitch_f[0];
}
/*__syncthreads();
if (tid < 512)
{
s_Vre[tid] += s_Vre[tid + 512];
s_Vim[tid] += s_Vim[tid + 512];
} */
__syncthreads();
if ((tid < 256)&&(SLM_SIZE>256))
{
s_Vre[tid] += s_Vre[tid + 256];
s_Vim[tid] += s_Vim[tid + 256];
}
__syncthreads();
if (tid < 128)
{
s_Vre[tid] += s_Vre[tid + 128];
s_Vim[tid] += s_Vim[tid + 128];
}
__syncthreads();
if (tid < 64)
{
s_Vre[tid] += s_Vre[tid + 64];
s_Vim[tid] += s_Vim[tid + 64];
}
__syncthreads();
if (tid < 32)
warpReduceC(s_Vre, s_Vim, tid);
if (tid == 0)
{
float spotRe_f = s_Vre[0] / c_N_pixels_f[0]; //512!
float spotIm_f = s_Vim[0] / c_N_pixels_f[0];
float amp = hypotf(spotRe_f, spotIm_f);
g_Pobtained[spot_number] = atan2f(spotIm_f , spotRe_f);
g_Iobtained[spot_number] = amp*amp;
}
}
//Functions for GS with Fresnel propagation
//Propagate from the SLM to the spot positions using Fresnel summation
//works only for blocksize = SLMsize
__global__ void PropagateToSpotPositions_Fresnel(float *g_pSLM2pi, float *g_spotRe_f, float *g_spotIm_f)
{
int spot_number = blockIdx.x;
int tid = threadIdx.x;
int i = tid;
__shared__ float s_Vre[SLM_SIZE];
__shared__ float s_Vim[SLM_SIZE];
s_Vre[tid] = 0.0f;
s_Vim[tid] = 0.0f;
int blockSize = blockDim.x;
float X = c_SLMpitch_f[0] * ((float)tid - c_half_w_f[0]);
float Y = - c_SLMpitch_f[0] * c_half_w_f[0];
float p;
while (i < c_N_pixels[0])
{
p = g_pSLM2pi[i] - M_PI * (c_z[spot_number] * (X*X + Y*Y) + 2.0f * (X * c_x[spot_number] + Y * c_y[spot_number]));
s_Vre[tid] += cosf(p);
s_Vim[tid] += sinf(p);
i += blockSize;
Y += c_SLMpitch_f[0];
}
/*__syncthreads();
if (tid < 512)
{
s_Vre[tid] += s_Vre[tid + 512];
s_Vim[tid] += s_Vim[tid + 512];
} */
__syncthreads();
if ((tid < 256)&&(SLM_SIZE>256))
{
s_Vre[tid] += s_Vre[tid + 256];
s_Vim[tid] += s_Vim[tid + 256];
}
__syncthreads();
if (tid < 128)
{
s_Vre[tid] += s_Vre[tid + 128];
s_Vim[tid] += s_Vim[tid + 128];
}
__syncthreads();
if (tid < 64)
{
s_Vre[tid] += s_Vre[tid + 64];
s_Vim[tid] += s_Vim[tid + 64];
}
__syncthreads();
if (tid < 32)
warpReduceC(s_Vre, s_Vim, tid);
if (tid == 0)
{
g_spotRe_f[spot_number] = s_Vre[0];// / c_N_pixels_f[0];
g_spotIm_f[spot_number] = s_Vim[0];// / c_N_pixels_f[0];
}
}
//Propagate from the SLM to the spot positions using Fresnel summation
//works only for blocksize = SLMsize
__global__ void PropagateToSpotPositionsDC_Fresnel(float *g_pSLM_f, float *g_obtainedPhase, float *g_weights, float *obtainedI, int iteration)
{
int spot_number = blockIdx.x;
int tid = threadIdx.x;
int i = tid;
__shared__ float s_Vre[SLM_SIZE];
__shared__ float s_Vim[SLM_SIZE];
float X, Y;
float p;
s_Vre[tid] = 0.0f;
s_Vim[tid] = 0.0f;
int X_int = getXint(i);
X = c_SLMpitch_f[0]*(X_int - c_half_w_f[0]);
Y = -0.5f;
while (i < c_N_pixels[0])
{
p = g_pSLM_f[i] - M_PI * (c_z[spot_number] * (X*X + Y*Y) + 2.0f * (X * c_x[spot_number] + Y * c_y[spot_number]));
s_Vre[tid] += cosf(p);
s_Vim[tid] += sinf(p);
Y += c_SLMpitch_f[0];
i += SLM_SIZE;
}
__syncthreads();
if ((tid < 256)&&(SLM_SIZE>256))
{
s_Vre[tid] += s_Vre[tid + 256];
s_Vim[tid] += s_Vim[tid + 256];
}
__syncthreads();
if (tid < 128)
{
s_Vre[tid] += s_Vre[tid + 128];
s_Vim[tid] += s_Vim[tid + 128];
}
__syncthreads();
if (tid < 64)
{
s_Vre[tid] += s_Vre[tid + 64];
s_Vim[tid] += s_Vim[tid + 64];
}
__syncthreads();
if (tid < 32)
warpReduceC(s_Vre, s_Vim, tid);
if (tid == 0)
{
g_obtainedPhase[spot_number] = atan2f(s_Vim[0], s_Vre[0]);
float obtainedAmp = hypotf(s_Vre[0], s_Vim[0]);
float desiredAmp = c_desiredAmp[spot_number];
if (iteration != 0)
{
g_weights[spot_number + c_N_spots[0]*iteration] = g_weights[spot_number + c_N_spots[0]*(iteration-1)] * (desiredAmp / obtainedAmp);
}
else
{
//obtainedAmp = (obtainedAmp<0.5f) ? 0.5f : obtainedAmp;
g_weights[spot_number] = desiredAmp/c_N_pixels_f[0];
}
if (c_saveI_b[0])
obtainedI[spot_number + c_N_spots[0]*iteration] = obtainedAmp*obtainedAmp/(desiredAmp*desiredAmp);//(c_N_pixels_f[0]*c_N_pixels_f[0]);
}
}
//Obtain phases in SLM plane
__global__ void PropagateToSLM_Fresnel(float *g_spotRe_f, float *g_spotIm_f, float *g_pSLM2pi, float *g_weights, int iteration, float *g_pSLMstart, float *g_Iobtained, bool getpSLM65535, uint16_t *g_pSLM65535_uc,
uint16_t *g_LUT, float *g_AberrationCorr_f, float *g_LUTPolCoeff_f)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int tid = threadIdx.x;
__shared__ float s_aSpot[MAX_SPOTS], s_aSpotsMean, s_weight[MAX_SPOTS], s_pSpot[MAX_SPOTS];
float reSLM = 0.0f, imSLM = 0.0f, pSLM2pi_f = 0.0f;
if (idx<c_N_pixels[0])
{
if (tid<c_N_spots[0])
{
float spotRe_f = g_spotRe_f[tid];
float spotIm_f = g_spotIm_f[tid];
s_pSpot[tid] = atan2f(spotIm_f, spotRe_f);
s_aSpot[tid] = hypotf(spotRe_f, spotIm_f)/c_desiredAmp[tid];
if (iteration != 0)
s_weight[tid] = g_weights[tid + iteration*c_N_spots[0]];
else
{
s_aSpot[tid] = (s_aSpot[tid]<0.5f) ? 0.5f : s_aSpot[tid];
s_weight[tid] = c_desiredAmp[tid];
}
}
__syncthreads();
//compute weights
if (tid==0)
{
float s_aSpot_sum = 0.0f;
for (int jj=0; jj<c_N_spots[0];jj++)
{
s_aSpot_sum += s_aSpot[jj];
}
s_aSpotsMean = s_aSpot_sum / (float)c_N_spots[0];
}
__syncthreads();
if (tid<c_N_spots[0])
{
s_weight[tid] = s_weight[tid] * s_aSpotsMean / s_aSpot[tid];
if (!getpSLM65535) //Copy weights to use as initial value next run
g_weights[tid + c_N_spots[0]*(iteration+1)] = s_weight[tid];
//else
// g_weights[tid] = s_weight[tid]; //Transferring weights to next run may give diverging weights
if (c_saveI_b[0])
g_Iobtained[tid + c_N_spots[0]*iteration] = s_aSpot[tid]*s_aSpot[tid]; //may be excluded, used for monitoring only
}
__syncthreads();
//get pixel coordinates
int X_int = getXint(idx);
int Y_int = getYint(idx, X_int);
float X = c_SLMpitch_f[0]*(X_int - c_half_w_f[0]);
float Y = c_SLMpitch_f[0]*(Y_int - c_half_w_f[0]);
//compute SLM pSpot by summing contribution from all spots
for (int k=0; k<c_N_spots[0]; k++)
{
float delta = M_PI * c_z[k] * (X*X + Y*Y) + 2.0f * M_PI * (X * c_x[k] + Y * c_y[k]);
reSLM += s_weight[k] * cosf(s_pSpot[k] + delta);
imSLM += s_weight[k] * sinf(s_pSpot[k] + delta);
}
pSLM2pi_f = atan2f(imSLM, reSLM);
if (c_useRPC_b[0]) //Apply RPC (restricted Phase Change)
{
float pSLMstart = g_pSLMstart[idx];
if (fabs(pSLM2pi_f - pSLMstart) > c_alphaRPC_f[0])
pSLM2pi_f = pSLMstart;
if (getpSLM65535)
g_pSLMstart[idx] = pSLM2pi_f;
}
if (getpSLM65535) //Compute final SLM phases and write to global memory...
g_pSLM65535_uc[idx] = phase2uc(pSLM2pi_f);
g_pSLM2pi[idx] = pSLM2pi_f; //...or write intermediate pSpot to global memory
}
}
//Obtain phases in SLM plane
__global__ void PropagateToSLMDC_Fresnel(float *g_pSpot, float *g_wSpot, cufftComplex *g_cSLM_cc, float *g_pSLM_f, int iteration, float *g_pSLMstart, bool getpSLM65535,
uint16_t *g_pSLM65535_uc)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int tid = threadIdx.x;
__shared__ float s_weight[MAX_SPOTS], s_pSpot[MAX_SPOTS];
float reSLM = 0.0f, imSLM = 0.0f, pSLM2pi_f = 0.0f;
if (idx<c_N_pixels[0])
{
if (tid<c_N_spots[0])
{
s_pSpot[tid] = g_pSpot[tid];
s_weight[tid] = g_wSpot[tid+c_N_spots[0]*iteration];
}
__syncthreads();
//get pixel coordinates
int X_int = getXint(idx);
int Y_int = getYint(idx, X_int);
int shiftedidx = fftshift(idx, X_int, Y_int);
float X = c_SLMpitch_f[0]*(X_int - c_half_w_f[0]);
float Y = c_SLMpitch_f[0]*(Y_int - c_half_w_f[0]);
//compute SLM pSpot by summing contribution from all spots
for (int k=0; k<c_N_spots[0]; k++)
{
float delta = M_PI * c_z[k] * (X*X + Y*Y) + 2.0f * M_PI * (X * c_x[k] + Y * c_y[k]);
reSLM += s_weight[k] * cosf(s_pSpot[k] + delta);
imSLM += s_weight[k] * sinf(s_pSpot[k] + delta);
}
cufftComplex cSLM_cc = g_cSLM_cc[shiftedidx];
reSLM += cSLM_cc.x/c_N_pixels_f[0];
imSLM += cSLM_cc.y/c_N_pixels_f[0];
pSLM2pi_f = atan2f(imSLM, reSLM);
if (c_useRPC_b[0]) //Apply RPC (restricted Phase Change)
{
float pSLMstart = g_pSLMstart[shiftedidx];
if (fabs(pSLM2pi_f - pSLMstart) > c_alphaRPC_f[0])
pSLM2pi_f = pSLMstart;
if (getpSLM65535)
g_pSLMstart[shiftedidx] = pSLM2pi_f;
}
g_pSLM_f[idx] = pSLM2pi_f;
g_cSLM_cc[shiftedidx].x = cosf(pSLM2pi_f);
g_cSLM_cc[shiftedidx].y = sinf(pSLM2pi_f);
if (getpSLM65535) //Compute final SLM phases and write to global memory...
g_pSLM65535_uc[idx] = phase2uc(pSLM2pi_f);
}
}
//Clear inside the DC frame
__global__ void setActiveRegionToZero(cufftComplex *g_Farfield_cc) //this only works if blocksize = nblocks = SLMsize = 512
{
int tid = threadIdx.x;
int bid = blockIdx.x;
int idx = bid * blockDim.x + tid;
if (((tid < (c_half_w[0] - c_DCborderWidth[0]))||(tid > ((c_half_w[0]-1) + c_DCborderWidth[0])))&&((bid < (c_half_w[0] - c_DCborderWidth[0]))||(bid > ((c_half_w[0]-1) + c_DCborderWidth[0]))))
{
g_Farfield_cc[idx].x = 0.0f;
g_Farfield_cc[idx].y = 0.0f;
}
}
//Functions for GS with FFT propagation
//Compute the phase in SLM pixels and set amplitude to unity or Laser amp
__global__ void ReplaceAmpsSLM_FFT(float *g_aLaser, cufftComplex *g_cAmp, float *g_pSLMstart, bool getpSLM65535, uint16_t *g_pSLM65535_uc,
uint16_t *g_LUT, float *g_AberrationCorr_f, float *g_LUTPolCoeff_f)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx<c_N_pixels[0])
{
float aLaser = 1.0f;//g_aLaser[idx];
cufftComplex cAmp = g_cAmp[idx];
float pSLM2pi_f = atan2f(cAmp.y, cAmp.x);
if (c_useRPC_b[0])
{
float pSLMstart = g_pSLMstart[idx];
if (fabs(pSLM2pi_f - pSLMstart) > c_alphaRPC_f[0])
pSLM2pi_f = pSLMstart;
}
if (getpSLM65535)
{
if (c_useRPC_b[0])
g_pSLMstart[idx] = pSLM2pi_f;
//float phase65535;
int X_int = getXint(idx);
int Y_int = getYint(idx, X_int);
int shiftedidx = fftshift(idx, X_int, Y_int);
g_pSLM65535_uc[shiftedidx] = phase2uc(pSLM2pi_f);
}
g_cAmp[idx].x = aLaser*cosf(pSLM2pi_f);
g_cAmp[idx].y = aLaser*sinf(pSLM2pi_f);
}
__syncthreads();
}
//Adjust amplitudes in spot positions
__global__ void ReplaceAmpsSpots_FFT(cufftComplex *g_cSpotAmp_cc, cufftComplex *g_cSpotAmpNew_cc, int iteration, float *g_Iobtained, float *g_weight, bool last_iteration)
{
int tid = threadIdx.x;
int spotIndex;
float pSpot;
__shared__ float s_aSpot[MAX_SPOTS], s_ISpotsMeanSq;
float weight;
cufftComplex cSpotAmp_cc;
if (tid<c_N_spots[0])
{
spotIndex = c_spotIndex[tid];
cSpotAmp_cc = g_cSpotAmp_cc[spotIndex];
pSpot = atan2f(cSpotAmp_cc.y, cSpotAmp_cc.x);
s_aSpot[tid] = hypotf(cSpotAmp_cc.x, cSpotAmp_cc.y)/c_desiredAmp[tid];
if (iteration != 0)
weight = g_weight[tid + iteration*c_N_spots[0]];
else
{
s_aSpot[tid] = (s_aSpot[tid]<0.5f) ? 0.5f : s_aSpot[tid];
weight = c_desiredAmp[tid];
}
}
__syncthreads();
//compute weights
if (tid==0)
{
float ISpot_sum = 0.0f;
for (int jj=0; jj<c_N_spots[0];jj++)
{
ISpot_sum += s_aSpot[jj]*s_aSpot[jj];
}
s_ISpotsMeanSq = sqrtf(ISpot_sum / (float)c_N_spots[0]); //integer division!!
}
__syncthreads();
if (tid<c_N_spots[0])
{
weight = weight * s_ISpotsMeanSq / s_aSpot[tid];
cSpotAmp_cc.x = cosf(pSpot) * weight;
cSpotAmp_cc.y = sinf(pSpot) * weight;
g_cSpotAmpNew_cc[spotIndex] = cSpotAmp_cc;
if (last_iteration)
g_weight[tid] = weight;
else
g_weight[c_N_spots[0] * (iteration + 1) + tid] = weight;
if (c_saveI_b[0])
g_Iobtained[c_N_spots[0] * (iteration) + tid] = s_aSpot[tid]*s_aSpot[tid];
}
}
//Adjust amplitudes in spot positions
__global__ void ReplaceAmpsSpotsDC_FFT(cufftComplex *g_cSpotAmp_cc, cufftComplex *g_cSpotAmpNew_cc, int iteration, float *g_Iobtained, float *g_weight, bool last_iteration)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int spotIndex;
float pSpot;
//__shared__ float s_aSpot[MAX_SPOTS], s_ISpotsMeanSq;
float weight;
cufftComplex cSpotAmp_cc;
if (idx<c_N_spots[0])
{
spotIndex = c_spotIndex[idx];
cSpotAmp_cc = g_cSpotAmp_cc[spotIndex];
pSpot = atan2f(cSpotAmp_cc.y, cSpotAmp_cc.x);
float aSpot = hypotf(cSpotAmp_cc.x, cSpotAmp_cc.y)/c_desiredAmp[idx];
if (iteration != 0)
weight = g_weight[idx + iteration*c_N_spots[0]];
else
{
aSpot = (aSpot<0.5f) ? 0.5f : aSpot; //ska det vara så här med DC?
weight = c_desiredAmp[idx]/(c_N_pixels_f[0]);
}
weight = weight / aSpot;
cSpotAmp_cc.x = cosf(pSpot) * weight;
cSpotAmp_cc.y = sinf(pSpot) * weight;
g_cSpotAmpNew_cc[spotIndex] = cSpotAmp_cc;
if (last_iteration)
g_weight[idx] = weight;
else
g_weight[c_N_spots[0] * (iteration + 1) + idx] = weight;
if (c_saveI_b[0])
g_Iobtained[c_N_spots[0] * (iteration) + idx] = aSpot*aSpot;
}
int X_int = getXint(idx);
int Y_int = getYint(idx, X_int);
if (((X_int > (c_half_w[0] - c_DCborderWidth[0]))&&(X_int < ((c_half_w[0]-1) + c_DCborderWidth[0])))||((Y_int > (c_half_w[0] - c_DCborderWidth[0]))&&(Y_int < ((c_half_w[0]-1) + c_DCborderWidth[0]))))
{
g_cSpotAmpNew_cc[idx].x = g_cSpotAmp_cc[idx].x/(c_N_pixels_f[0]);
g_cSpotAmpNew_cc[idx].y = g_cSpotAmp_cc[idx].y/(c_N_pixels_f[0]);
}
}
//Misc help functions
__global__ void testfunc(float *testdata)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
testdata[idx] = idx;
}
//Convert from uint16_t [0, 65535] to float [-pi, pi]
__global__ void uc2f(float *f, uint16_t *uc, int N)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx<N)
{
f[idx] = uc[idx]*2.0f*M_PI/65536.0f - M_PI;
}
}
//Calculate complex from phases
__global__ void p2c(cufftComplex *g_c, float *g_p, int M)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx<M)
{
float pSpot = g_p[idx];
g_c[idx].x = cosf(pSpot);
g_c[idx].y = sinf(pSpot);
}
__syncthreads();
}
//Calculate amplitudes from complex
__global__ void c_cc2a_f(float *g_a, cufftComplex *g_c, int M)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx<M)
{
g_a[idx] = hypotf(g_c[idx].x, g_c[idx].y);
}
__syncthreads();
}
//Calculate phases from complex
__global__ void c_cc2p_cc(cufftComplex *g_p, cufftComplex *g_c, int M)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx<M)
{
g_p[idx].x = atan2f(g_c[idx].y, g_c[idx].x);
g_p[idx].y = 0.0f;
}
__syncthreads();
}
//Calculate phases from complex
__global__ void c_cc2p_f(float *g_p, cufftComplex *g_c, int M)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx<M)
{
g_p[idx] = atan2f(g_c[idx].y, g_c[idx].x);
}
__syncthreads();
}
//Copy real part from complex
__global__ void c_cc2re_f(float *g_p, cufftComplex *g_c, int M)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx<M)
{
g_p[idx] = g_c[idx].x;
}
__syncthreads();
}
//Copy imaginary part from complex
__global__ void c_cc2im_f(float *g_p, cufftComplex *g_c, int M)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx<M)
{
g_p[idx] = g_c[idx].y;
}
__syncthreads();
}
//Custom debug functions
inline void mSafeCall(cudaError_t status, int line, const char *file)
{
#ifdef M_CUDA_DEBUG
do
{
if(status != cudaSuccess)
{
char CUDAmessage[200] = "CUDA says: ";
strcat(CUDAmessage, cudaGetErrorString(status));
sprintf(CUDAmessage, "%s\non line: %d\n", CUDAmessage, line);
//AfxMessageBox(CUDAmessage);
if (status != CUFFT_SUCCESS)
exit(-1);
}
cudaDeviceSynchronize();
status = cudaGetLastError();
if(status!=cudaSuccess)
{
char CUDAmessage[200] = "CUDA failed after sychronization:\n";
strcat(CUDAmessage, cudaGetErrorString(status));
sprintf(CUDAmessage, "%s\non line: %d\n", CUDAmessage, line);
//AfxMessageBox(CUDAmessage);
exit(-1);
}
}while(0);
#endif
return;
}
inline void mCufftSafeCall(cufftResult_t status, int line, const char *file)
{
#ifdef M_CUDA_DEBUG
if(status != CUFFT_SUCCESS)
{
char CUDAmessage[200] = "CUFFT error, CUDA says:\n ";
switch (status) {
case CUFFT_INVALID_PLAN: strcat(CUDAmessage,"CUFFT_INVALID_PLAN\n");break;
case CUFFT_ALLOC_FAILED: strcat(CUDAmessage,"CUFFT_ALLOC_FAILED\n");break;
case CUFFT_INVALID_TYPE: strcat(CUDAmessage,"CUFFT_INVALID_TYPE\n");break;
case CUFFT_INVALID_VALUE: strcat(CUDAmessage,"CUFFT_INVALID_VALUE\n");break;
case CUFFT_INTERNAL_ERROR: strcat(CUDAmessage,"CUFFT_INTERNAL_ERROR\n");break;
case CUFFT_EXEC_FAILED: strcat(CUDAmessage,"CUFFT_EXEC_FAILED\n");break;
case CUFFT_SETUP_FAILED: strcat(CUDAmessage,"CUFFT_SETUP_FAILED\n");break;
case CUFFT_INVALID_SIZE: strcat(CUDAmessage,"CUFFT_INVALID_SIZE\n");break;
case CUFFT_UNALIGNED_DATA: strcat(CUDAmessage,"CUFFT_UNALIGNED_DATA\n");break;
default: strcat(CUDAmessage,"CUFFT Unknown error code\n");
}
sprintf(CUDAmessage, "%son line: %d\nin file: %s", CUDAmessage, line, file);
//AfxMessageBox(CUDAmessage);
exit(-1);
}
cudaDeviceSynchronize();
cudaError_t status2 = cudaGetLastError();
if(status2!=cudaSuccess)
{
char CUDAmessage[200] = "CUDA failed after sychronization:\n";
strcat(CUDAmessage, cudaGetErrorString(status2));
sprintf(CUDAmessage, "%s\non line: %d\n", CUDAmessage, line);
//AfxMessageBox(CUDAmessage);
exit(-1);
}
#endif
return;
}
inline void mCheckError(int line, const char *file)
{
#ifdef M_CUDA_DEBUG
do
{
cudaError_t status = cudaGetLastError();
if(status!=cudaSuccess)
{
char CUDAmessage[200] = "CUDA says: ";
strcat(CUDAmessage, cudaGetErrorString(status));
sprintf(CUDAmessage, "%s\non line: %d\n", CUDAmessage, line);
//AfxMessageBox(CUDAmessage);
exit(-1);
}
cudaDeviceSynchronize();
status = cudaGetLastError();
if(status!=cudaSuccess)
{
char CUDAmessage[200] = "CUDA failed after sychronization:\n";
strcat(CUDAmessage, cudaGetErrorString(status));
sprintf(CUDAmessage, "%s\non line: %d\n", CUDAmessage, line);
//AfxMessageBox(CUDAmessage);
exit(-1);
}
}while(0);
#endif
return;
}
inline void mDisplayDataF(float *d_data, int length, int line)
{
#ifdef M_CUDA_DEBUG
do
{
int maxlength = 50;
float *h_data;
length = (length<=maxlength) ? length : maxlength;
char MessageString[1000];
h_data = (float*)malloc(length * sizeof (float));
M_SAFE_CALL(cudaMemcpy(h_data, d_data, length*sizeof(float), cudaMemcpyDeviceToHost));
sprintf(MessageString, "Line: %d\nData: ", line);
for (int ii = 0;ii<length;++ii)
{
sprintf(MessageString, "%s %f", MessageString, h_data[ii]);
}
//AfxMessageBox(MessageString, MB_ICONINFORMATION);
free(h_data);
}while(0);
#endif
return;
}
inline void mDisplayDataCC(cufftComplex *d_data, int length, int line)
{
#ifdef M_CUDA_DEBUG
do
{
int maxlength = 25;
cufftComplex *h_data;
length = (length<=maxlength) ? length : maxlength;
char MessageString[1000];
h_data = (cufftComplex*)malloc(length * sizeof (cufftComplex));
M_SAFE_CALL(cudaMemcpy(h_data, d_data, length*sizeof(cufftComplex), cudaMemcpyDeviceToHost));
sprintf(MessageString, "Line: %d\nData: ", line);
for (int ii = 0;ii<length;++ii)
{
sprintf(MessageString, "%s re: %f im: %f", MessageString, h_data[ii].x, h_data[ii].y);
}
//AfxMessageBox(MessageString, MB_ICONINFORMATION);
free(h_data);
}while(0);
#endif
return;
}
inline void mDisplayDataUC(uint16_t *d_data, int length, int line)
{
#ifdef M_CUDA_DEBUG
do
{
int maxlength = 50;
uint16_t *h_data;
length = (length<=maxlength) ? length : maxlength;
char MessageString[1000];
h_data = (uint16_t*)malloc(length * sizeof (uint16_t));
M_SAFE_CALL(cudaMemcpy(h_data, d_data, length*sizeof(uint16_t), cudaMemcpyDeviceToHost));
sprintf(MessageString, "Line: %d\nData: ", line);
for (int ii = 0;ii<length;++ii)
{
sprintf(MessageString, "%s %hhu", MessageString, h_data[ii]);
}
//AfxMessageBox(MessageString, MB_ICONINFORMATION);
free(h_data);
}while(0);
#endif
return;
}
inline void mDisplayDataI(int *d_data, int length, int line)
{
#ifdef M_CUDA_DEBUG
do
{
int maxlength = 50;
int *h_data;
length = (length<=maxlength) ? length : maxlength;
char MessageString[1000];
h_data = (int*)malloc(length * sizeof (int));
M_SAFE_CALL(cudaMemcpy(h_data, d_data, length*sizeof(int), cudaMemcpyDeviceToHost));
sprintf(MessageString, "Line: %d\nData: ", line);
for (int ii = 0;ii<length;++ii)
{
sprintf(MessageString, "%s %d", MessageString, h_data[ii]);
}
//AfxMessageBox(MessageString, MB_ICONINFORMATION);
free(h_data);
}while(0);
#endif
return;
}
//Calculate amplitudes in positions given by x, y, and z from a given hologram
extern "C" __declspec(dllexport) int GetIandPhase(float *x_spots, float *y_spots, float *z_spots, float *h_pSLM_uc, int N_spots_all, int data_w, float *h_I_obt, float *h_Phase_obt)
{
float *d_Iobtained_all;
float *d_Pobtained_all;
cudaMalloc((void**)&d_Iobtained_all, N_spots_all*sizeof(float) );
cudaMalloc((void**)&d_Pobtained_all, N_spots_all*sizeof(float) );
cudaMemcpy(d_pSLM_uc, h_pSLM_uc, memsize_SLMuc, cudaMemcpyHostToDevice);
int offset = 0;
int N_spots_rem = N_spots_all;
int N_spots_this;
while (N_spots_rem > 0)
{
N_spots_this = (N_spots_rem > MAX_SPOTS) ? MAX_SPOTS : N_spots_rem;
cudaMemcpyToSymbol(c_x, x_spots+offset, N_spots_this*sizeof(float), 0, cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(c_y, y_spots+offset, N_spots_this*sizeof(float), 0, cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(c_z, z_spots+offset, N_spots_this*sizeof(float), 0, cudaMemcpyHostToDevice);
calculateIandPhase<<< N_spots_this, 512>>>(d_pSLM_uc, d_Iobtained_all+offset, d_Pobtained_all+offset);
//calculateIobtained(uint16_t *g_pSLM_uc, float *g_Iobtained)
cudaDeviceSynchronize();
N_spots_rem -= MAX_SPOTS;
offset += MAX_SPOTS;
}
cudaMemcpy(h_I_obt, d_Iobtained_all, N_spots_all*sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy(h_Phase_obt, d_Pobtained_all, N_spots_all*sizeof(float), cudaMemcpyDeviceToHost);
cudaFree(d_Iobtained_all);
cudaFree(d_Pobtained_all);
status = cudaGetLastError();
return status;
}
|
4e2fcfeb4dae04ccbaaf231d6b17508b9012e374.hip | // !!! This is a file automatically generated by hipify!!!
/******************************************************************************
*
* Copyright 2010-2011 Duane Merrill
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* For more information, see our Google Code project site:
* http://code.google.com/p/back40computing/
*
******************************************************************************/
/******************************************************************************
* Tuning tool for establishing optimal scan granularity configuration types
******************************************************************************/
#include <stdio.h>
// Scan includes
#include <b40c/util/arch_dispatch.cuh>
#include <b40c/scan/problem_type.cuh>
#include <b40c/scan/problem_config.cuh>
#include <b40c/scan/enactor.cuh>
#include <b40c/util/cuda_properties.cuh>
#include <b40c/util/numeric_traits.cuh>
#include <b40c/util/parameter_generation.cuh>
// Test utils
#include "b40c_test_util.h"
using namespace b40c;
/******************************************************************************
* Defines, constants, globals, and utility types
******************************************************************************/
#ifndef TUNE_ARCH
#define TUNE_ARCH (200)
#endif
#ifndef TUNE_SIZE
#define TUNE_SIZE (4)
#endif
bool g_verbose;
int g_max_ctas = 0;
int g_iterations = 0;
bool g_verify;
template <typename T>
struct Sum
{
static __host__ __device__ __forceinline__ T BinaryOp(const T &a, const T &b)
{
return a + b;
}
static __host__ __device__ __forceinline__ T Identity()
{
return 0;
}
};
template <typename T>
struct Max
{
static __host__ __device__ __forceinline__ T BinaryOp(const T &a, const T &b)
{
return (a > b) ? a : b;
}
static __host__ __device__ __forceinline__ T Identity()
{
return 0;
}
};
/******************************************************************************
* Utility routines
******************************************************************************/
/**
* Displays the commandline usage for this tool
*/
void Usage()
{
printf("\ntune_scan [--device=<device index>] [--v] [--i=<num-iterations>] "
"[--max-ctas=<max-thread-blocks>] [--n=<num-elements>]\n");
printf("\n");
printf("\t--v\tDisplays verbose configuration to the console.\n");
printf("\n");
printf("\t--i\tPerforms the scan operation <num-iterations> times\n");
printf("\t\t\ton the device. Re-copies original input each time. Default = 1\n");
printf("\n");
printf("\t--n\tThe number of elements to comprise the sample problem\n");
printf("\t\t\tDefault = 512\n");
printf("\n");
}
/******************************************************************************
* Tuning Parameter Enumerations and Ranges
******************************************************************************/
/**
* Enumerated tuning params
*/
enum TuningParam {
PARAM_BEGIN,
UPSWEEP_LOG_THREADS,
UPSWEEP_LOG_LOAD_VEC_SIZE,
UPSWEEP_LOG_LOADS_PER_TILE,
DOWNSWEEP_LOG_THREADS,
DOWNSWEEP_LOG_LOAD_VEC_SIZE,
DOWNSWEEP_LOG_LOADS_PER_TILE,
DOWNSWEEP_LOG_RAKING_THREADS,
PARAM_END,
// Parameters below here are currently not part of the tuning sweep
UNIFORM_GRID_SIZE,
// These can be tuned, but we're currently not compelled to
UNIFORM_SMEM_ALLOCATION,
OVERSUBSCRIBED_GRID_SIZE,
READ_MODIFIER,
WRITE_MODIFIER,
// Derive these from the others above
UPSWEEP_MAX_CTA_OCCUPANCY,
DOWNSWEEP_MAX_CTA_OCCUPANCY,
LOG_SCHEDULE_GRANULARITY,
// General performance is insensitive to the spine kernel params
// because it's only a single-CTA: we'll just use reasonable defaults
SPINE_LOG_THREADS,
SPINE_LOG_LOAD_VEC_SIZE,
SPINE_LOG_LOADS_PER_TILE,
SPINE_LOG_RAKING_THREADS
};
/**
* Encapsulation structure for
* - Wrapping problem type and storage
* - Providing call-back for parameter-list generation
*/
template <typename T, typename OpType>
class TuneEnactor : public scan::Enactor
{
public:
T *d_dest;
T *d_src;
T *h_data;
T *h_reference;
size_t num_elements;
/**
* Ranges for the tuning params
*/
template <typename ParamList, int PARAM> struct Ranges;
// READ_MODIFIER
template <typename ParamList>
struct Ranges<ParamList, READ_MODIFIER> {
enum {
MIN = util::io::ld::NONE,
MAX = ((TUNE_ARCH < 200) || (util::NumericTraits<T>::REPRESENTATION == util::NOT_A_NUMBER)) ? util::io::ld::NONE : util::io::ld::LIMIT - 1 // No type modifiers for pre-Fermi or non-builtin types
};
};
// WRITE_MODIFIER
template <typename ParamList>
struct Ranges<ParamList, WRITE_MODIFIER> {
enum {
MIN = util::io::st::NONE,
MAX = ((TUNE_ARCH < 200) || (util::NumericTraits<T>::REPRESENTATION == util::NOT_A_NUMBER)) ? util::io::st::NONE : util::io::st::LIMIT - 1 // No type modifiers for pre-Fermi or non-builtin types
};
};
// UNIFORM_SMEM_ALLOCATION
template <typename ParamList>
struct Ranges<ParamList, UNIFORM_SMEM_ALLOCATION> {
enum {
MIN = 0,
MAX = 1
};
};
// UNIFORM_GRID_SIZE
template <typename ParamList>
struct Ranges<ParamList, UNIFORM_GRID_SIZE> {
enum {
MIN = 0,
MAX = 1
};
};
// OVERSUBSCRIBED_GRID_SIZE
template <typename ParamList>
struct Ranges<ParamList, OVERSUBSCRIBED_GRID_SIZE> {
enum {
MIN = 0,
MAX = 1
};
};
// UPSWEEP_LOG_THREADS
template <typename ParamList>
struct Ranges<ParamList, UPSWEEP_LOG_THREADS> {
enum {
MIN = B40C_LOG_WARP_THREADS(TUNE_ARCH),
MAX = B40C_LOG_CTA_THREADS(TUNE_ARCH)
};
};
// UPSWEEP_LOG_LOAD_VEC_SIZE
template <typename ParamList>
struct Ranges<ParamList, UPSWEEP_LOG_LOAD_VEC_SIZE> {
enum {
MIN = 0,
MAX = 2
};
};
// UPSWEEP_LOG_LOADS_PER_TILE
template <typename ParamList>
struct Ranges<ParamList, UPSWEEP_LOG_LOADS_PER_TILE> {
enum {
MIN = 0,
MAX = 2
};
};
// DOWNSWEEP_LOG_THREADS
template <typename ParamList>
struct Ranges<ParamList, DOWNSWEEP_LOG_THREADS> {
enum {
MIN = B40C_LOG_WARP_THREADS(TUNE_ARCH),
MAX = B40C_LOG_CTA_THREADS(TUNE_ARCH)
};
};
// DOWNSWEEP_LOG_LOAD_VEC_SIZE
template <typename ParamList>
struct Ranges<ParamList, DOWNSWEEP_LOG_LOAD_VEC_SIZE> {
enum {
MIN = 0,
MAX = 2
};
};
// DOWNSWEEP_LOG_LOADS_PER_TILE
template <typename ParamList>
struct Ranges<ParamList, DOWNSWEEP_LOG_LOADS_PER_TILE> {
enum {
MIN = 0,
MAX = 2
};
};
// DOWNSWEEP_LOG_RAKING_THREADS
template <typename ParamList>
struct Ranges<ParamList, DOWNSWEEP_LOG_RAKING_THREADS> {
enum {
MIN = B40C_LOG_WARP_THREADS(TUNE_ARCH),
MAX = util::Access<ParamList, DOWNSWEEP_LOG_THREADS>::VALUE
};
};
/**
* Constructor
*/
TuneEnactor(size_t num_elements) :
scan::Enactor(), d_dest(NULL), d_src(NULL), h_data(NULL), h_reference(NULL), num_elements(num_elements) {}
/**
* Timed scan for applying a specific granularity configuration type
*/
template <typename ProblemConfig>
void TimedScan()
{
printf("%lu, ", (unsigned long) sizeof(T));
ProblemConfig::Print();
fflush(stdout);
// Perform a single iteration to allocate any memory if needed, prime code caches, etc.
this->ENACTOR_DEBUG = g_verbose;
if (this->template Enact<ProblemConfig>(d_dest, d_src, num_elements, g_max_ctas)) {
exit(1);
}
this->ENACTOR_DEBUG = false;
// Perform the timed number of iterations
hipEvent_t start_event, stop_event;
hipEventCreate(&start_event);
hipEventCreate(&stop_event);
double elapsed = 0;
float duration = 0;
for (int i = 0; i < g_iterations; i++) {
// Start cuda timing record
hipEventRecord(start_event, 0);
// Call the scan API routine
if (this->template Enact<ProblemConfig>(d_dest, d_src, num_elements, g_max_ctas)) {
exit(1);
}
// End cuda timing record
hipEventRecord(stop_event, 0);
hipEventSynchronize(stop_event);
hipEventElapsedTime(&duration, start_event, stop_event);
elapsed += (double) duration;
// Flushes any stdio from the GPU
hipDeviceSynchronize();
}
// Display timing information
double avg_runtime = elapsed / g_iterations;
double throughput = 0.0;
if (avg_runtime > 0.0) throughput = ((double) num_elements) / avg_runtime / 1000.0 / 1000.0;
printf(", %f, %f, %f, ",
avg_runtime, throughput, throughput * sizeof(T) * 3);
fflush(stdout);
// Clean up events
hipEventDestroy(start_event);
hipEventDestroy(stop_event);
if (g_verify) {
// Copy out data
if (util::B40CPerror(hipMemcpy(h_data, d_dest, sizeof(T) * num_elements, hipMemcpyDeviceToHost),
"TimedScan hipMemcpy d_dest failed: ", __FILE__, __LINE__)) exit(1);
// Verify solution
CompareResults<T>(h_data, h_reference, num_elements, true);
printf("\n");
}
fflush(stdout);
}
template <typename ProblemConfig, bool VALID>
struct LaunchValidConfig
{
static void Invoke(TuneEnactor *detail)
{
detail->TimedScan<ProblemConfig>();
}
};
template <typename ProblemConfig>
struct LaunchValidConfig <ProblemConfig, false>
{
static void Invoke(TuneEnactor *detail) {}
};
/**
* Callback invoked by parameter-list generation
*/
template <typename ParamList>
void Invoke()
{
const int C_READ_MODIFIER =
// util::Access<ParamList, READ_MODIFIER>::VALUE; // These can be tuned, but we're currently not compelled to
util::io::ld::NONE;
const int C_WRITE_MODIFIER =
// util::Access<ParamList, WRITE_MODIFIER>::VALUE; // These can be tuned, but we're currently not compelled to
util::io::ld::NONE;
const int C_UNIFORM_SMEM_ALLOCATION =
// util::Access<ParamList, UNIFORM_SMEM_ALLOCATION>::VALUE;
0;
const int C_UNIFORM_GRID_SIZE =
// util::Access<ParamList, UNIFORM_GRID_SIZE>::VALUE;
0;
const int C_OVERSUBSCRIBED_GRID_SIZE =
// util::Access<ParamList, OVERSUBSCRIBED_GRID_SIZE>::VALUE;
0;
const int C_UPSWEEP_LOG_THREADS =
util::Access<ParamList, UPSWEEP_LOG_THREADS>::VALUE;
const int C_UPSWEEP_LOG_LOAD_VEC_SIZE =
util::Access<ParamList, UPSWEEP_LOG_LOAD_VEC_SIZE>::VALUE;
const int C_UPSWEEP_LOG_LOADS_PER_TILE =
util::Access<ParamList, UPSWEEP_LOG_LOADS_PER_TILE>::VALUE;
const int C_UPSWEEP_MAX_CTA_OCCUPANCY =
// util::Access<ParamList, UPSWEEP_MAX_CTA_OCCUPANCY>::VALUE;
B40C_SM_CTAS(TUNE_ARCH);
const int C_DOWNSWEEP_LOG_THREADS =
util::Access<ParamList, DOWNSWEEP_LOG_THREADS>::VALUE;
const int C_DOWNSWEEP_LOG_LOAD_VEC_SIZE =
util::Access<ParamList, DOWNSWEEP_LOG_LOAD_VEC_SIZE>::VALUE;
const int C_DOWNSWEEP_LOG_LOADS_PER_TILE =
util::Access<ParamList, DOWNSWEEP_LOG_LOADS_PER_TILE>::VALUE;
const int C_DOWNSWEEP_LOG_RAKING_THREADS =
util::Access<ParamList, DOWNSWEEP_LOG_RAKING_THREADS>::VALUE;
// B40C_LOG_WARP_THREADS(TUNE_ARCH);
const int C_DOWNSWEEP_MAX_CTA_OCCUPANCY =
// util::Access<ParamList, DOWNSWEEP_MAX_CTA_OCCUPANCY>::VALUE;
B40C_SM_CTAS(TUNE_ARCH);
const int C_UPSWEEP_LOG_SCHEDULE_GRANULARITY =
C_UPSWEEP_LOG_LOADS_PER_TILE +
C_UPSWEEP_LOG_LOAD_VEC_SIZE +
C_UPSWEEP_LOG_THREADS;
const int C_DOWNSWEEP_LOG_SCHEDULE_GRANULARITY =
C_DOWNSWEEP_LOG_LOADS_PER_TILE +
C_DOWNSWEEP_LOG_LOAD_VEC_SIZE +
C_DOWNSWEEP_LOG_THREADS;
// TODO: figure out if we should use min here instead
const int C_LOG_SCHEDULE_GRANULARITY = B40C_MAX(
C_UPSWEEP_LOG_SCHEDULE_GRANULARITY,
C_DOWNSWEEP_LOG_SCHEDULE_GRANULARITY);
// General performance is insensitive to spine config it's only a single-CTA:
// simply use reasonable defaults
const int C_SPINE_LOG_THREADS = 8;
const int C_SPINE_LOG_LOAD_VEC_SIZE = 0;
const int C_SPINE_LOG_LOADS_PER_TILE = 1;
const int C_SPINE_LOG_RAKING_THREADS = B40C_LOG_WARP_THREADS(TUNE_ARCH);
// Establish the problem type
const bool EXCLUSIVE = true;
typedef scan::ProblemType<
T,
size_t,
EXCLUSIVE,
OpType::BinaryOp,
OpType::Identity> ProblemType;
// Establish the granularity configuration type
typedef scan::ProblemConfig <
ProblemType,
TUNE_ARCH,
(util::io::ld::CacheModifier) C_READ_MODIFIER,
(util::io::st::CacheModifier) C_WRITE_MODIFIER,
C_UNIFORM_SMEM_ALLOCATION,
C_UNIFORM_GRID_SIZE,
C_OVERSUBSCRIBED_GRID_SIZE,
C_LOG_SCHEDULE_GRANULARITY,
C_UPSWEEP_MAX_CTA_OCCUPANCY,
C_UPSWEEP_LOG_THREADS,
C_UPSWEEP_LOG_LOAD_VEC_SIZE,
C_UPSWEEP_LOG_LOADS_PER_TILE,
C_SPINE_LOG_THREADS,
C_SPINE_LOG_LOAD_VEC_SIZE,
C_SPINE_LOG_LOADS_PER_TILE,
C_SPINE_LOG_RAKING_THREADS,
C_DOWNSWEEP_MAX_CTA_OCCUPANCY,
C_DOWNSWEEP_LOG_THREADS,
C_DOWNSWEEP_LOG_LOAD_VEC_SIZE,
C_DOWNSWEEP_LOG_LOADS_PER_TILE,
C_DOWNSWEEP_LOG_RAKING_THREADS> ProblemConfig;
LaunchValidConfig<ProblemConfig, ProblemConfig::VALID>::Invoke(this);
}
};
/**
* Creates an example scan problem and then dispatches the problem
* to the GPU for the given number of iterations, displaying runtime information.
*/
template<typename T, typename OpType>
void TestScan(size_t num_elements)
{
// Allocate storage and enactor
typedef TuneEnactor<T, OpType> Detail;
Detail detail(num_elements);
if (util::B40CPerror(hipMalloc((void**) &detail.d_src, sizeof(T) * num_elements),
"TimedScan hipMalloc d_src failed: ", __FILE__, __LINE__)) exit(1);
if (util::B40CPerror(hipMalloc((void**) &detail.d_dest, sizeof(T) * num_elements),
"TimedScan hipMalloc d_dest failed: ", __FILE__, __LINE__)) exit(1);
if ((detail.h_data = (T*) malloc(sizeof(T) * num_elements)) == NULL) {
fprintf(stderr, "Host malloc of problem data failed\n");
exit(1);
}
if ((detail.h_reference = (T*) malloc(sizeof(T) * num_elements)) == NULL) {
fprintf(stderr, "Host malloc of problem data failed\n");
exit(1);
}
detail.h_reference[0] = OpType::Identity();
for (size_t i = 0; i < num_elements; ++i) {
// util::RandomBits<T>(h_data[i], 0);
detail.h_data[i] = i;
detail.h_reference[i] = (i == 0) ?
OpType::Identity() :
OpType::BinaryOp(detail.h_reference[i - 1], detail.h_data[i - 1]);
}
// Move a fresh copy of the problem into device storage
if (util::B40CPerror(hipMemcpy(detail.d_src, detail.h_data, sizeof(T) * num_elements, hipMemcpyHostToDevice),
"TimedScan hipMemcpy d_src failed: ", __FILE__, __LINE__)) exit(1);
// Run the timing tests
util::ParamListSweep<
Detail,
PARAM_BEGIN + 1,
PARAM_END,
Detail::template Ranges>::template Invoke<util::EmptyTuple>(detail);
// Free allocated memory
if (detail.d_src) hipFree(detail.d_src);
if (detail.d_dest) hipFree(detail.d_dest);
// Free our allocated host memory
if (detail.h_data) free(detail.h_data);
if (detail.h_reference) free(detail.h_reference);
}
/******************************************************************************
* Main
******************************************************************************/
int main(int argc, char** argv)
{
CommandLineArgs args(argc, argv);
DeviceInit(args);
//srand(time(NULL));
srand(0); // presently deterministic
size_t num_elements = 1024;
// Check command line arguments
if (args.CheckCmdLineFlag("help")) {
Usage();
return 0;
}
args.GetCmdLineArgument("i", g_iterations);
args.GetCmdLineArgument("n", num_elements);
args.GetCmdLineArgument("max-ctas", g_max_ctas);
g_verbose = args.CheckCmdLineFlag("v");
g_verify = args.CheckCmdLineFlag("verify");
util::CudaProperties cuda_props;
printf("Test Scan: %d iterations, %lu elements", g_iterations, (unsigned long) num_elements);
printf("\nCodeGen: \t[device_sm_version: %d, kernel_ptx_version: %d]\n\n",
cuda_props.device_sm_version, cuda_props.kernel_ptx_version);
printf("sizeof(T), READ_MODIFIER, WRITE_MODIFIER, UNIFORM_SMEM_ALLOCATION, UNIFORM_GRID_SIZE, OVERSUBSCRIBED_GRID_SIZE, LOG_SCHEDULE_GRANULARITY, "
"UPSWEEP_MAX_CTA_OCCUPANCY, UPSWEEP_LOG_THREADS, UPSWEEP_LOG_LOAD_VEC_SIZE, UPSWEEP_LOG_LOADS_PER_TILE, "
"SPINE_LOG_THREADS, SPINE_LOG_LOAD_VEC_SIZE, SPINE_LOG_LOADS_PER_TILE, SPINE_LOG_RAKING_THREADS, "
"DOWNSWEEP_MAX_CTA_OCCUPANCY, DOWNSWEEP_LOG_THREADS, DOWNSWEEP_LOG_LOAD_VEC_SIZE, DOWNSWEEP_LOG_LOADS_PER_TILE, DOWNSWEEP_LOG_RAKING_THREADS, "
"elapsed time (ms), throughput (10^9 items/s), bandwidth (10^9 B/s), Correctness\n");
// Execute test(s)
#if TUNE_SIZE == 1
typedef unsigned char T;
TestScan<T, Sum<T> >(num_elements * 4);
#elif TUNE_SIZE == 2
typedef unsigned short T;
TestScan<T, Sum<T> >(num_elements * 2);
#elif TUNE_SIZE == 4
typedef unsigned int T;
TestScan<T, Sum<T> >(num_elements);
#elif TUNE_SIZE == 8
typedef unsigned long long T;
TestScan<T, Sum<T> >(num_elements / 2);
#endif
return 0;
}
| 4e2fcfeb4dae04ccbaaf231d6b17508b9012e374.cu | /******************************************************************************
*
* Copyright 2010-2011 Duane Merrill
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* For more information, see our Google Code project site:
* http://code.google.com/p/back40computing/
*
******************************************************************************/
/******************************************************************************
* Tuning tool for establishing optimal scan granularity configuration types
******************************************************************************/
#include <stdio.h>
// Scan includes
#include <b40c/util/arch_dispatch.cuh>
#include <b40c/scan/problem_type.cuh>
#include <b40c/scan/problem_config.cuh>
#include <b40c/scan/enactor.cuh>
#include <b40c/util/cuda_properties.cuh>
#include <b40c/util/numeric_traits.cuh>
#include <b40c/util/parameter_generation.cuh>
// Test utils
#include "b40c_test_util.h"
using namespace b40c;
/******************************************************************************
* Defines, constants, globals, and utility types
******************************************************************************/
#ifndef TUNE_ARCH
#define TUNE_ARCH (200)
#endif
#ifndef TUNE_SIZE
#define TUNE_SIZE (4)
#endif
bool g_verbose;
int g_max_ctas = 0;
int g_iterations = 0;
bool g_verify;
template <typename T>
struct Sum
{
static __host__ __device__ __forceinline__ T BinaryOp(const T &a, const T &b)
{
return a + b;
}
static __host__ __device__ __forceinline__ T Identity()
{
return 0;
}
};
template <typename T>
struct Max
{
static __host__ __device__ __forceinline__ T BinaryOp(const T &a, const T &b)
{
return (a > b) ? a : b;
}
static __host__ __device__ __forceinline__ T Identity()
{
return 0;
}
};
/******************************************************************************
* Utility routines
******************************************************************************/
/**
* Displays the commandline usage for this tool
*/
void Usage()
{
printf("\ntune_scan [--device=<device index>] [--v] [--i=<num-iterations>] "
"[--max-ctas=<max-thread-blocks>] [--n=<num-elements>]\n");
printf("\n");
printf("\t--v\tDisplays verbose configuration to the console.\n");
printf("\n");
printf("\t--i\tPerforms the scan operation <num-iterations> times\n");
printf("\t\t\ton the device. Re-copies original input each time. Default = 1\n");
printf("\n");
printf("\t--n\tThe number of elements to comprise the sample problem\n");
printf("\t\t\tDefault = 512\n");
printf("\n");
}
/******************************************************************************
* Tuning Parameter Enumerations and Ranges
******************************************************************************/
/**
* Enumerated tuning params
*/
enum TuningParam {
PARAM_BEGIN,
UPSWEEP_LOG_THREADS,
UPSWEEP_LOG_LOAD_VEC_SIZE,
UPSWEEP_LOG_LOADS_PER_TILE,
DOWNSWEEP_LOG_THREADS,
DOWNSWEEP_LOG_LOAD_VEC_SIZE,
DOWNSWEEP_LOG_LOADS_PER_TILE,
DOWNSWEEP_LOG_RAKING_THREADS,
PARAM_END,
// Parameters below here are currently not part of the tuning sweep
UNIFORM_GRID_SIZE,
// These can be tuned, but we're currently not compelled to
UNIFORM_SMEM_ALLOCATION,
OVERSUBSCRIBED_GRID_SIZE,
READ_MODIFIER,
WRITE_MODIFIER,
// Derive these from the others above
UPSWEEP_MAX_CTA_OCCUPANCY,
DOWNSWEEP_MAX_CTA_OCCUPANCY,
LOG_SCHEDULE_GRANULARITY,
// General performance is insensitive to the spine kernel params
// because it's only a single-CTA: we'll just use reasonable defaults
SPINE_LOG_THREADS,
SPINE_LOG_LOAD_VEC_SIZE,
SPINE_LOG_LOADS_PER_TILE,
SPINE_LOG_RAKING_THREADS
};
/**
* Encapsulation structure for
* - Wrapping problem type and storage
* - Providing call-back for parameter-list generation
*/
template <typename T, typename OpType>
class TuneEnactor : public scan::Enactor
{
public:
T *d_dest;
T *d_src;
T *h_data;
T *h_reference;
size_t num_elements;
/**
* Ranges for the tuning params
*/
template <typename ParamList, int PARAM> struct Ranges;
// READ_MODIFIER
template <typename ParamList>
struct Ranges<ParamList, READ_MODIFIER> {
enum {
MIN = util::io::ld::NONE,
MAX = ((TUNE_ARCH < 200) || (util::NumericTraits<T>::REPRESENTATION == util::NOT_A_NUMBER)) ? util::io::ld::NONE : util::io::ld::LIMIT - 1 // No type modifiers for pre-Fermi or non-builtin types
};
};
// WRITE_MODIFIER
template <typename ParamList>
struct Ranges<ParamList, WRITE_MODIFIER> {
enum {
MIN = util::io::st::NONE,
MAX = ((TUNE_ARCH < 200) || (util::NumericTraits<T>::REPRESENTATION == util::NOT_A_NUMBER)) ? util::io::st::NONE : util::io::st::LIMIT - 1 // No type modifiers for pre-Fermi or non-builtin types
};
};
// UNIFORM_SMEM_ALLOCATION
template <typename ParamList>
struct Ranges<ParamList, UNIFORM_SMEM_ALLOCATION> {
enum {
MIN = 0,
MAX = 1
};
};
// UNIFORM_GRID_SIZE
template <typename ParamList>
struct Ranges<ParamList, UNIFORM_GRID_SIZE> {
enum {
MIN = 0,
MAX = 1
};
};
// OVERSUBSCRIBED_GRID_SIZE
template <typename ParamList>
struct Ranges<ParamList, OVERSUBSCRIBED_GRID_SIZE> {
enum {
MIN = 0,
MAX = 1
};
};
// UPSWEEP_LOG_THREADS
template <typename ParamList>
struct Ranges<ParamList, UPSWEEP_LOG_THREADS> {
enum {
MIN = B40C_LOG_WARP_THREADS(TUNE_ARCH),
MAX = B40C_LOG_CTA_THREADS(TUNE_ARCH)
};
};
// UPSWEEP_LOG_LOAD_VEC_SIZE
template <typename ParamList>
struct Ranges<ParamList, UPSWEEP_LOG_LOAD_VEC_SIZE> {
enum {
MIN = 0,
MAX = 2
};
};
// UPSWEEP_LOG_LOADS_PER_TILE
template <typename ParamList>
struct Ranges<ParamList, UPSWEEP_LOG_LOADS_PER_TILE> {
enum {
MIN = 0,
MAX = 2
};
};
// DOWNSWEEP_LOG_THREADS
template <typename ParamList>
struct Ranges<ParamList, DOWNSWEEP_LOG_THREADS> {
enum {
MIN = B40C_LOG_WARP_THREADS(TUNE_ARCH),
MAX = B40C_LOG_CTA_THREADS(TUNE_ARCH)
};
};
// DOWNSWEEP_LOG_LOAD_VEC_SIZE
template <typename ParamList>
struct Ranges<ParamList, DOWNSWEEP_LOG_LOAD_VEC_SIZE> {
enum {
MIN = 0,
MAX = 2
};
};
// DOWNSWEEP_LOG_LOADS_PER_TILE
template <typename ParamList>
struct Ranges<ParamList, DOWNSWEEP_LOG_LOADS_PER_TILE> {
enum {
MIN = 0,
MAX = 2
};
};
// DOWNSWEEP_LOG_RAKING_THREADS
template <typename ParamList>
struct Ranges<ParamList, DOWNSWEEP_LOG_RAKING_THREADS> {
enum {
MIN = B40C_LOG_WARP_THREADS(TUNE_ARCH),
MAX = util::Access<ParamList, DOWNSWEEP_LOG_THREADS>::VALUE
};
};
/**
* Constructor
*/
TuneEnactor(size_t num_elements) :
scan::Enactor(), d_dest(NULL), d_src(NULL), h_data(NULL), h_reference(NULL), num_elements(num_elements) {}
/**
* Timed scan for applying a specific granularity configuration type
*/
template <typename ProblemConfig>
void TimedScan()
{
printf("%lu, ", (unsigned long) sizeof(T));
ProblemConfig::Print();
fflush(stdout);
// Perform a single iteration to allocate any memory if needed, prime code caches, etc.
this->ENACTOR_DEBUG = g_verbose;
if (this->template Enact<ProblemConfig>(d_dest, d_src, num_elements, g_max_ctas)) {
exit(1);
}
this->ENACTOR_DEBUG = false;
// Perform the timed number of iterations
cudaEvent_t start_event, stop_event;
cudaEventCreate(&start_event);
cudaEventCreate(&stop_event);
double elapsed = 0;
float duration = 0;
for (int i = 0; i < g_iterations; i++) {
// Start cuda timing record
cudaEventRecord(start_event, 0);
// Call the scan API routine
if (this->template Enact<ProblemConfig>(d_dest, d_src, num_elements, g_max_ctas)) {
exit(1);
}
// End cuda timing record
cudaEventRecord(stop_event, 0);
cudaEventSynchronize(stop_event);
cudaEventElapsedTime(&duration, start_event, stop_event);
elapsed += (double) duration;
// Flushes any stdio from the GPU
cudaThreadSynchronize();
}
// Display timing information
double avg_runtime = elapsed / g_iterations;
double throughput = 0.0;
if (avg_runtime > 0.0) throughput = ((double) num_elements) / avg_runtime / 1000.0 / 1000.0;
printf(", %f, %f, %f, ",
avg_runtime, throughput, throughput * sizeof(T) * 3);
fflush(stdout);
// Clean up events
cudaEventDestroy(start_event);
cudaEventDestroy(stop_event);
if (g_verify) {
// Copy out data
if (util::B40CPerror(cudaMemcpy(h_data, d_dest, sizeof(T) * num_elements, cudaMemcpyDeviceToHost),
"TimedScan cudaMemcpy d_dest failed: ", __FILE__, __LINE__)) exit(1);
// Verify solution
CompareResults<T>(h_data, h_reference, num_elements, true);
printf("\n");
}
fflush(stdout);
}
template <typename ProblemConfig, bool VALID>
struct LaunchValidConfig
{
static void Invoke(TuneEnactor *detail)
{
detail->TimedScan<ProblemConfig>();
}
};
template <typename ProblemConfig>
struct LaunchValidConfig <ProblemConfig, false>
{
static void Invoke(TuneEnactor *detail) {}
};
/**
* Callback invoked by parameter-list generation
*/
template <typename ParamList>
void Invoke()
{
const int C_READ_MODIFIER =
// util::Access<ParamList, READ_MODIFIER>::VALUE; // These can be tuned, but we're currently not compelled to
util::io::ld::NONE;
const int C_WRITE_MODIFIER =
// util::Access<ParamList, WRITE_MODIFIER>::VALUE; // These can be tuned, but we're currently not compelled to
util::io::ld::NONE;
const int C_UNIFORM_SMEM_ALLOCATION =
// util::Access<ParamList, UNIFORM_SMEM_ALLOCATION>::VALUE;
0;
const int C_UNIFORM_GRID_SIZE =
// util::Access<ParamList, UNIFORM_GRID_SIZE>::VALUE;
0;
const int C_OVERSUBSCRIBED_GRID_SIZE =
// util::Access<ParamList, OVERSUBSCRIBED_GRID_SIZE>::VALUE;
0;
const int C_UPSWEEP_LOG_THREADS =
util::Access<ParamList, UPSWEEP_LOG_THREADS>::VALUE;
const int C_UPSWEEP_LOG_LOAD_VEC_SIZE =
util::Access<ParamList, UPSWEEP_LOG_LOAD_VEC_SIZE>::VALUE;
const int C_UPSWEEP_LOG_LOADS_PER_TILE =
util::Access<ParamList, UPSWEEP_LOG_LOADS_PER_TILE>::VALUE;
const int C_UPSWEEP_MAX_CTA_OCCUPANCY =
// util::Access<ParamList, UPSWEEP_MAX_CTA_OCCUPANCY>::VALUE;
B40C_SM_CTAS(TUNE_ARCH);
const int C_DOWNSWEEP_LOG_THREADS =
util::Access<ParamList, DOWNSWEEP_LOG_THREADS>::VALUE;
const int C_DOWNSWEEP_LOG_LOAD_VEC_SIZE =
util::Access<ParamList, DOWNSWEEP_LOG_LOAD_VEC_SIZE>::VALUE;
const int C_DOWNSWEEP_LOG_LOADS_PER_TILE =
util::Access<ParamList, DOWNSWEEP_LOG_LOADS_PER_TILE>::VALUE;
const int C_DOWNSWEEP_LOG_RAKING_THREADS =
util::Access<ParamList, DOWNSWEEP_LOG_RAKING_THREADS>::VALUE;
// B40C_LOG_WARP_THREADS(TUNE_ARCH);
const int C_DOWNSWEEP_MAX_CTA_OCCUPANCY =
// util::Access<ParamList, DOWNSWEEP_MAX_CTA_OCCUPANCY>::VALUE;
B40C_SM_CTAS(TUNE_ARCH);
const int C_UPSWEEP_LOG_SCHEDULE_GRANULARITY =
C_UPSWEEP_LOG_LOADS_PER_TILE +
C_UPSWEEP_LOG_LOAD_VEC_SIZE +
C_UPSWEEP_LOG_THREADS;
const int C_DOWNSWEEP_LOG_SCHEDULE_GRANULARITY =
C_DOWNSWEEP_LOG_LOADS_PER_TILE +
C_DOWNSWEEP_LOG_LOAD_VEC_SIZE +
C_DOWNSWEEP_LOG_THREADS;
// TODO: figure out if we should use min here instead
const int C_LOG_SCHEDULE_GRANULARITY = B40C_MAX(
C_UPSWEEP_LOG_SCHEDULE_GRANULARITY,
C_DOWNSWEEP_LOG_SCHEDULE_GRANULARITY);
// General performance is insensitive to spine config it's only a single-CTA:
// simply use reasonable defaults
const int C_SPINE_LOG_THREADS = 8;
const int C_SPINE_LOG_LOAD_VEC_SIZE = 0;
const int C_SPINE_LOG_LOADS_PER_TILE = 1;
const int C_SPINE_LOG_RAKING_THREADS = B40C_LOG_WARP_THREADS(TUNE_ARCH);
// Establish the problem type
const bool EXCLUSIVE = true;
typedef scan::ProblemType<
T,
size_t,
EXCLUSIVE,
OpType::BinaryOp,
OpType::Identity> ProblemType;
// Establish the granularity configuration type
typedef scan::ProblemConfig <
ProblemType,
TUNE_ARCH,
(util::io::ld::CacheModifier) C_READ_MODIFIER,
(util::io::st::CacheModifier) C_WRITE_MODIFIER,
C_UNIFORM_SMEM_ALLOCATION,
C_UNIFORM_GRID_SIZE,
C_OVERSUBSCRIBED_GRID_SIZE,
C_LOG_SCHEDULE_GRANULARITY,
C_UPSWEEP_MAX_CTA_OCCUPANCY,
C_UPSWEEP_LOG_THREADS,
C_UPSWEEP_LOG_LOAD_VEC_SIZE,
C_UPSWEEP_LOG_LOADS_PER_TILE,
C_SPINE_LOG_THREADS,
C_SPINE_LOG_LOAD_VEC_SIZE,
C_SPINE_LOG_LOADS_PER_TILE,
C_SPINE_LOG_RAKING_THREADS,
C_DOWNSWEEP_MAX_CTA_OCCUPANCY,
C_DOWNSWEEP_LOG_THREADS,
C_DOWNSWEEP_LOG_LOAD_VEC_SIZE,
C_DOWNSWEEP_LOG_LOADS_PER_TILE,
C_DOWNSWEEP_LOG_RAKING_THREADS> ProblemConfig;
LaunchValidConfig<ProblemConfig, ProblemConfig::VALID>::Invoke(this);
}
};
/**
* Creates an example scan problem and then dispatches the problem
* to the GPU for the given number of iterations, displaying runtime information.
*/
template<typename T, typename OpType>
void TestScan(size_t num_elements)
{
// Allocate storage and enactor
typedef TuneEnactor<T, OpType> Detail;
Detail detail(num_elements);
if (util::B40CPerror(cudaMalloc((void**) &detail.d_src, sizeof(T) * num_elements),
"TimedScan cudaMalloc d_src failed: ", __FILE__, __LINE__)) exit(1);
if (util::B40CPerror(cudaMalloc((void**) &detail.d_dest, sizeof(T) * num_elements),
"TimedScan cudaMalloc d_dest failed: ", __FILE__, __LINE__)) exit(1);
if ((detail.h_data = (T*) malloc(sizeof(T) * num_elements)) == NULL) {
fprintf(stderr, "Host malloc of problem data failed\n");
exit(1);
}
if ((detail.h_reference = (T*) malloc(sizeof(T) * num_elements)) == NULL) {
fprintf(stderr, "Host malloc of problem data failed\n");
exit(1);
}
detail.h_reference[0] = OpType::Identity();
for (size_t i = 0; i < num_elements; ++i) {
// util::RandomBits<T>(h_data[i], 0);
detail.h_data[i] = i;
detail.h_reference[i] = (i == 0) ?
OpType::Identity() :
OpType::BinaryOp(detail.h_reference[i - 1], detail.h_data[i - 1]);
}
// Move a fresh copy of the problem into device storage
if (util::B40CPerror(cudaMemcpy(detail.d_src, detail.h_data, sizeof(T) * num_elements, cudaMemcpyHostToDevice),
"TimedScan cudaMemcpy d_src failed: ", __FILE__, __LINE__)) exit(1);
// Run the timing tests
util::ParamListSweep<
Detail,
PARAM_BEGIN + 1,
PARAM_END,
Detail::template Ranges>::template Invoke<util::EmptyTuple>(detail);
// Free allocated memory
if (detail.d_src) cudaFree(detail.d_src);
if (detail.d_dest) cudaFree(detail.d_dest);
// Free our allocated host memory
if (detail.h_data) free(detail.h_data);
if (detail.h_reference) free(detail.h_reference);
}
/******************************************************************************
* Main
******************************************************************************/
int main(int argc, char** argv)
{
CommandLineArgs args(argc, argv);
DeviceInit(args);
//srand(time(NULL));
srand(0); // presently deterministic
size_t num_elements = 1024;
// Check command line arguments
if (args.CheckCmdLineFlag("help")) {
Usage();
return 0;
}
args.GetCmdLineArgument("i", g_iterations);
args.GetCmdLineArgument("n", num_elements);
args.GetCmdLineArgument("max-ctas", g_max_ctas);
g_verbose = args.CheckCmdLineFlag("v");
g_verify = args.CheckCmdLineFlag("verify");
util::CudaProperties cuda_props;
printf("Test Scan: %d iterations, %lu elements", g_iterations, (unsigned long) num_elements);
printf("\nCodeGen: \t[device_sm_version: %d, kernel_ptx_version: %d]\n\n",
cuda_props.device_sm_version, cuda_props.kernel_ptx_version);
printf("sizeof(T), READ_MODIFIER, WRITE_MODIFIER, UNIFORM_SMEM_ALLOCATION, UNIFORM_GRID_SIZE, OVERSUBSCRIBED_GRID_SIZE, LOG_SCHEDULE_GRANULARITY, "
"UPSWEEP_MAX_CTA_OCCUPANCY, UPSWEEP_LOG_THREADS, UPSWEEP_LOG_LOAD_VEC_SIZE, UPSWEEP_LOG_LOADS_PER_TILE, "
"SPINE_LOG_THREADS, SPINE_LOG_LOAD_VEC_SIZE, SPINE_LOG_LOADS_PER_TILE, SPINE_LOG_RAKING_THREADS, "
"DOWNSWEEP_MAX_CTA_OCCUPANCY, DOWNSWEEP_LOG_THREADS, DOWNSWEEP_LOG_LOAD_VEC_SIZE, DOWNSWEEP_LOG_LOADS_PER_TILE, DOWNSWEEP_LOG_RAKING_THREADS, "
"elapsed time (ms), throughput (10^9 items/s), bandwidth (10^9 B/s), Correctness\n");
// Execute test(s)
#if TUNE_SIZE == 1
typedef unsigned char T;
TestScan<T, Sum<T> >(num_elements * 4);
#elif TUNE_SIZE == 2
typedef unsigned short T;
TestScan<T, Sum<T> >(num_elements * 2);
#elif TUNE_SIZE == 4
typedef unsigned int T;
TestScan<T, Sum<T> >(num_elements);
#elif TUNE_SIZE == 8
typedef unsigned long long T;
TestScan<T, Sum<T> >(num_elements / 2);
#endif
return 0;
}
|
2d6e4758f0fb6608964c310ca0fa7c678ea7da72.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* Copyright 1993-2012 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*/
#include "globals.h"
#include "cuda_functions.h"
#include "cuda_math.h"
/*
* The L-versions of the RHS have to be ran with
* - the L-version of the derivatives
* i.e.: derDev1xL instead of derDev1x
* - the L-version of the grid
* i.e.: h_gridL[0] instead of h_grid[0]
*/
__device__ myprec *d_workY1;
__device__ myprec *d_workY2;
__device__ myprec *d_workZ1;
__device__ myprec *d_workZ2;
/* The whole RHS in the X direction is calculated in RHSDeviceSharedFlxX thanks to the beneficial memory layout that allows to use small pencils */
/* For the Y and Z direction, fluxes require a small pencil discretization while the rest of the RHS can be calculated on large pencils which speed
* up significantly the computation. Therefore 5 streams are used
* stream 0 -> complete X RHS (in RHSDeviceSharedFlxX) (small pencil grid)
* stream 1 -> viscous terms and pressure terms in Y (in RHSDeviceFullYL) (large pencil grid)
* stream 2 -> viscous terms and pressure terms in Z (in RHSDeviceFullZL) (large pencil grid)
* stream 3 -> advective fluxes in Y direction (in FLXDeviceY) (small pencil transposed grid)
* stream 4 -> advective fluxes in Z direction (in FLXDeviceZ) (small pencil transposed grid)*/
__global__ void RHSDeviceSharedFlxX(myprec *rX, myprec *uX, myprec *vX, myprec *wX, myprec *eX,
myprec *r, myprec *u, myprec *v, myprec *w, myprec *h ,
myprec *t, myprec *p, myprec *mu, myprec *lam,
myprec *sij[9], myprec *dil) {
Indices id(threadIdx.x,threadIdx.y,blockIdx.x,blockIdx.y,blockDim.x,blockDim.y);
id.mkidX();
int si = id.i + stencilSize; // local i for shared memory access + halo offset
int sj = id.tiy; // local j for shared memory access
myprec rXtmp=0;
myprec uXtmp=0;
myprec vXtmp=0;
myprec wXtmp=0;
myprec eXtmp=0;
myprec wrk1=0;
myprec wrk2=0;
__shared__ myprec s_r[sPencils][mx+stencilSize*2];
__shared__ myprec s_u[sPencils][mx+stencilSize*2];
__shared__ myprec s_v[sPencils][mx+stencilSize*2];
__shared__ myprec s_w[sPencils][mx+stencilSize*2];
__shared__ myprec s_h[sPencils][mx+stencilSize*2];
__shared__ myprec s_t[sPencils][mx+stencilSize*2];
__shared__ myprec s_p[sPencils][mx+stencilSize*2];
__shared__ myprec s_m[sPencils][mx+stencilSize*2];
__shared__ myprec s_l[sPencils][mx+stencilSize*2];
__shared__ myprec s_wrk[sPencils][mx+stencilSize*2];
s_r[sj][si] = r[id.g];
s_u[sj][si] = u[id.g];
s_v[sj][si] = v[id.g];
s_w[sj][si] = w[id.g];
s_h[sj][si] = h[id.g];
s_t[sj][si] = t[id.g];
s_p[sj][si] = p[id.g];
s_m[sj][si] = mu[id.g];
s_l[sj][si] = lam[id.g];
__syncthreads();
// fill in periodic images in shared memory array
if (id.i < stencilSize) {
s_r[sj][si-stencilSize] = s_r[sj][si+mx-stencilSize];
s_r[sj][si+mx] = s_r[sj][si];
s_u[sj][si-stencilSize] = s_u[sj][si+mx-stencilSize];
s_u[sj][si+mx] = s_u[sj][si];
s_v[sj][si-stencilSize] = s_v[sj][si+mx-stencilSize];
s_v[sj][si+mx] = s_v[sj][si];
s_w[sj][si-stencilSize] = s_w[sj][si+mx-stencilSize];
s_w[sj][si+mx] = s_w[sj][si];
s_h[sj][si-stencilSize] = s_h[sj][si+mx-stencilSize];
s_h[sj][si+mx] = s_h[sj][si];
s_t[sj][si-stencilSize] = s_t[sj][si+mx-stencilSize];
s_t[sj][si+mx] = s_t[sj][si];
s_p[sj][si-stencilSize] = s_p[sj][si+mx-stencilSize];
s_p[sj][si+mx] = s_p[sj][si];
s_m[sj][si-stencilSize] = s_m[sj][si+mx-stencilSize];
s_m[sj][si+mx] = s_m[sj][si];
s_l[sj][si-stencilSize] = s_l[sj][si+mx-stencilSize];
s_l[sj][si+mx] = s_l[sj][si];
}
__syncthreads();
// viscous fluxes derivative
derDevSharedV2x(&wrk1,s_u[sj],si);
uXtmp = wrk1*s_m[sj][si];
derDevSharedV2x(&wrk1,s_v[sj],si);
vXtmp = wrk1*s_m[sj][si];
derDevSharedV2x(&wrk1,s_w[sj],si);
wXtmp = wrk1*s_m[sj][si];
derDevSharedV2x(&wrk1,s_t[sj],si);
eXtmp = wrk1*s_l[sj][si];
__syncthreads();
derDevSharedV1x(&wrk2,s_l[sj],si); //wrk2 = d (lam) dx
derDevSharedV1x(&wrk1,s_t[sj],si); //wrk1 = d (t) dx
eXtmp = eXtmp + wrk1*wrk2;
//Adding here the terms d (mu) dx * sxj; (lambda in case of h in rhse);
derDevSharedV1x(&wrk2,s_m[sj],si); //wrk2 = d (mu) dx
uXtmp = uXtmp + wrk2*sij[0][id.g];
vXtmp = vXtmp + wrk2*sij[1][id.g];
wXtmp = wXtmp + wrk2*sij[2][id.g];
// split advection terms
//Adding here the terms - d (ru phi) dx;
fluxQuadSharedx(&wrk1,s_r[sj],s_u[sj],si);
rXtmp = wrk1;
__syncthreads();
fluxCubeSharedx(&wrk1,s_r[sj],s_u[sj],s_u[sj],si);
uXtmp = uXtmp + wrk1;
__syncthreads();
fluxCubeSharedx(&wrk1,s_r[sj],s_u[sj],s_v[sj],si);
vXtmp = vXtmp + wrk1;
__syncthreads();
fluxCubeSharedx(&wrk1,s_r[sj],s_u[sj],s_w[sj],si);
wXtmp = wXtmp + wrk1;
__syncthreads();
fluxCubeSharedx(&wrk1,s_r[sj],s_u[sj],s_h[sj],si);
eXtmp = eXtmp + wrk1;
__syncthreads();
// pressure and dilation derivatives
s_wrk[sj][si] = dil[id.g];
__syncthreads();
if (id.i < stencilSize) {
s_wrk[sj][si-stencilSize] = s_wrk[sj][si+mx-stencilSize];
s_wrk[sj][si+mx] = s_wrk[sj][si];
}
__syncthreads();
derDevSharedV1x(&wrk2,s_wrk[sj],si);
derDevShared1x(&wrk1,s_p[sj],si);
uXtmp = uXtmp - wrk1 + s_m[sj][si]*wrk2*1.0/3.0;
//viscous dissipation
s_wrk[sj][si] = s_m[sj][si]*(
s_u[sj][si]*( sij[0][id.g] ) +
s_v[sj][si]*( sij[1][id.g] ) +
s_w[sj][si]*( sij[2][id.g] )
);
__syncthreads();
if (id.i < stencilSize) {
s_wrk[sj][si-stencilSize] = s_wrk[sj][si+mx-stencilSize];
s_wrk[sj][si+mx] = s_wrk[sj][si];
}
__syncthreads();
derDevSharedV1x(&wrk2,s_wrk[sj],si);
rX[id.g] = rXtmp;
uX[id.g] = uXtmp;
vX[id.g] = vXtmp;
wX[id.g] = wXtmp;
eX[id.g] = eXtmp + wrk2;
}
__global__ void RHSDeviceSharedFlxY(myprec *rY, myprec *uY, myprec *vY, myprec *wY, myprec *eY,
myprec *r, myprec *u, myprec *v, myprec *w, myprec *h ,
myprec *t, myprec *p, myprec *mu, myprec *lam,
myprec *sij[9], myprec *dil) {
Indices id(threadIdx.x,threadIdx.y,blockIdx.x,blockIdx.y,blockDim.x,blockDim.y);
id.mkidYFlx();
int si = id.j + stencilSize; // local i for shared memory access + halo offset
int sj = id.tiy; // local j for shared memory access
myprec rYtmp=0;
myprec uYtmp=0;
myprec vYtmp=0;
myprec wYtmp=0;
myprec eYtmp=0;
myprec wrk1=0;
myprec wrk2=0;
__shared__ myprec s_r[sPencils][my+stencilSize*2];
__shared__ myprec s_u[sPencils][my+stencilSize*2];
__shared__ myprec s_v[sPencils][my+stencilSize*2];
__shared__ myprec s_w[sPencils][my+stencilSize*2];
__shared__ myprec s_h[sPencils][my+stencilSize*2];
__shared__ myprec s_t[sPencils][my+stencilSize*2];
__shared__ myprec s_p[sPencils][my+stencilSize*2];
__shared__ myprec s_m[sPencils][my+stencilSize*2];
__shared__ myprec s_l[sPencils][my+stencilSize*2];
__shared__ myprec s_wrk[sPencils][my+stencilSize*2];
s_r[sj][si] = r[id.g];
s_u[sj][si] = u[id.g];
s_v[sj][si] = v[id.g];
s_w[sj][si] = w[id.g];
s_h[sj][si] = h[id.g];
s_t[sj][si] = t[id.g];
s_p[sj][si] = p[id.g];
s_m[sj][si] = mu[id.g];
s_l[sj][si] = lam[id.g];
__syncthreads();
// fill in periodic images in shared memory array
if (id.j < stencilSize) {
s_r[sj][si-stencilSize] = s_r[sj][si+my-stencilSize];
s_r[sj][si+my] = s_r[sj][si];
s_u[sj][si-stencilSize] = s_u[sj][si+my-stencilSize];
s_u[sj][si+my] = s_u[sj][si];
s_v[sj][si-stencilSize] = s_v[sj][si+my-stencilSize];
s_v[sj][si+my] = s_v[sj][si];
s_w[sj][si-stencilSize] = s_w[sj][si+my-stencilSize];
s_w[sj][si+my] = s_w[sj][si];
s_h[sj][si-stencilSize] = s_h[sj][si+my-stencilSize];
s_h[sj][si+my] = s_h[sj][si];
s_t[sj][si-stencilSize] = s_t[sj][si+my-stencilSize];
s_t[sj][si+my] = s_t[sj][si];
s_p[sj][si-stencilSize] = s_p[sj][si+my-stencilSize];
s_p[sj][si+my] = s_p[sj][si];
s_m[sj][si-stencilSize] = s_m[sj][si+my-stencilSize];
s_m[sj][si+my] = s_m[sj][si];
s_l[sj][si-stencilSize] = s_l[sj][si+my-stencilSize];
s_l[sj][si+my] = s_l[sj][si];
}
__syncthreads();
// viscous fluxes derivative
derDevSharedV2y(&wrk1,s_u[sj],si);
uYtmp = wrk1*s_m[sj][si];
derDevSharedV2y(&wrk1,s_v[sj],si);
vYtmp = wrk1*s_m[sj][si];
derDevSharedV2y(&wrk1,s_w[sj],si);
wYtmp = wrk1*s_m[sj][si];
derDevSharedV2y(&wrk1,s_t[sj],si);
eYtmp = wrk1*s_l[sj][si];
__syncthreads();
derDevSharedV1y(&wrk2,s_l[sj],si); //wrk2 = d (lam) dx
derDevSharedV1y(&wrk1,s_t[sj],si); //wrk1 = d (t) dx
eYtmp = eYtmp + wrk1*wrk2;
//Adding here the terms d (mu) dy * syj; (lambda in case of h in rhse);
derDevSharedV1y(&wrk2,s_m[sj],si); //wrk2 = d (mu) dx
uYtmp = uYtmp + wrk2*sij[3][id.g];
vYtmp = vYtmp + wrk2*sij[4][id.g];
wYtmp = wYtmp + wrk2*sij[5][id.g];
// split advection terms
//Adding here the terms - d (ru phi) dy;
fluxQuadSharedy(&wrk1,s_r[sj],s_v[sj],si);
rYtmp = wrk1;
__syncthreads();
fluxCubeSharedy(&wrk1,s_r[sj],s_v[sj],s_u[sj],si);
uYtmp = uYtmp + wrk1;
__syncthreads();
fluxCubeSharedy(&wrk1,s_r[sj],s_v[sj],s_v[sj],si);
vYtmp = vYtmp + wrk1;
__syncthreads();
fluxCubeSharedy(&wrk1,s_r[sj],s_v[sj],s_w[sj],si);
wYtmp = wYtmp + wrk1;
__syncthreads();
fluxCubeSharedy(&wrk1,s_r[sj],s_v[sj],s_h[sj],si);
eYtmp = eYtmp + wrk1;
__syncthreads();
// pressure and dilation derivatives
s_wrk[sj][si] = dil[id.g];
__syncthreads();
if (id.j < stencilSize) {
s_wrk[sj][si-stencilSize] = s_wrk[sj][si+my-stencilSize];
s_wrk[sj][si+my] = s_wrk[sj][si];
}
__syncthreads();
derDevSharedV1y(&wrk2,s_wrk[sj],si);
derDevShared1y(&wrk1,s_p[sj],si);
vYtmp = vYtmp - wrk1 + s_m[sj][si]*wrk2*1.0/3.0;
//viscous dissipation
s_wrk[sj][si] = s_m[sj][si]*(
s_u[sj][si]*( sij[3][id.g] ) +
s_v[sj][si]*( sij[4][id.g] ) +
s_w[sj][si]*( sij[5][id.g] )
);
__syncthreads();
if (id.j < stencilSize) {
s_wrk[sj][si-stencilSize] = s_wrk[sj][si+my-stencilSize];
s_wrk[sj][si+my] = s_wrk[sj][si];
}
__syncthreads();
derDevSharedV1y(&wrk2,s_wrk[sj],si);
rY[id.g] = rYtmp;
uY[id.g] = uYtmp;
vY[id.g] = vYtmp;
wY[id.g] = wYtmp;
eY[id.g] = eYtmp + wrk2;
}
__global__ void RHSDeviceSharedFlxZ(myprec *rZ, myprec *uZ, myprec *vZ, myprec *wZ, myprec *eZ,
myprec *r, myprec *u, myprec *v, myprec *w, myprec *h ,
myprec *t, myprec *p, myprec *mu, myprec *lam,
myprec *sij[9], myprec *dil) {
Indices id(threadIdx.x,threadIdx.y,blockIdx.x,blockIdx.y,blockDim.x,blockDim.y);
id.mkidZFlx();
int si = id.k + stencilSize; // local i for shared memory access + halo offset
int sj = id.tiy; // local j for shared memory access
myprec rZtmp=0;
myprec uZtmp=0;
myprec vZtmp=0;
myprec wZtmp=0;
myprec eZtmp=0;
myprec wrk1=0;
myprec wrk2=0;
__shared__ myprec s_r[sPencils][mz+stencilSize*2];
__shared__ myprec s_u[sPencils][mz+stencilSize*2];
__shared__ myprec s_v[sPencils][mz+stencilSize*2];
__shared__ myprec s_w[sPencils][mz+stencilSize*2];
__shared__ myprec s_h[sPencils][mz+stencilSize*2];
__shared__ myprec s_t[sPencils][mz+stencilSize*2];
__shared__ myprec s_p[sPencils][mz+stencilSize*2];
__shared__ myprec s_m[sPencils][mz+stencilSize*2];
__shared__ myprec s_l[sPencils][mz+stencilSize*2];
__shared__ myprec s_wrk[sPencils][mz+stencilSize*2];
s_r[sj][si] = r[id.g];
s_u[sj][si] = u[id.g];
s_v[sj][si] = v[id.g];
s_w[sj][si] = w[id.g];
s_h[sj][si] = h[id.g];
s_t[sj][si] = t[id.g];
s_p[sj][si] = p[id.g];
s_m[sj][si] = mu[id.g];
s_l[sj][si] = lam[id.g];
__syncthreads();
// fill in periodic images in shared memory array
if (id.k < stencilSize) {
s_r[sj][si-stencilSize] = s_r[sj][si+mz-stencilSize];
s_r[sj][si+mz] = s_r[sj][si];
s_u[sj][si-stencilSize] = s_u[sj][si+mz-stencilSize];
s_u[sj][si+mz] = s_u[sj][si];
s_v[sj][si-stencilSize] = s_v[sj][si+mz-stencilSize];
s_v[sj][si+mz] = s_v[sj][si];
s_w[sj][si-stencilSize] = s_w[sj][si+mz-stencilSize];
s_w[sj][si+mz] = s_w[sj][si];
s_h[sj][si-stencilSize] = s_h[sj][si+mz-stencilSize];
s_h[sj][si+mz] = s_h[sj][si];
s_t[sj][si-stencilSize] = s_t[sj][si+mz-stencilSize];
s_t[sj][si+mz] = s_t[sj][si];
s_p[sj][si-stencilSize] = s_p[sj][si+mz-stencilSize];
s_p[sj][si+mz] = s_p[sj][si];
s_m[sj][si-stencilSize] = s_m[sj][si+mz-stencilSize];
s_m[sj][si+mz] = s_m[sj][si];
s_l[sj][si-stencilSize] = s_l[sj][si+mz-stencilSize];
s_l[sj][si+mz] = s_l[sj][si];
}
__syncthreads();
// viscous fluxes derivative
derDevSharedV2z(&wrk1,s_u[sj],si);
uZtmp = wrk1*s_m[sj][si];
derDevSharedV2z(&wrk1,s_v[sj],si);
vZtmp = wrk1*s_m[sj][si];
derDevSharedV2z(&wrk1,s_w[sj],si);
wZtmp = wrk1*s_m[sj][si];
derDevSharedV2z(&wrk1,s_t[sj],si);
eZtmp = wrk1*s_l[sj][si];
__syncthreads();
derDevSharedV1z(&wrk2,s_l[sj],si); //wrk2 = d (lam) dz
derDevSharedV1z(&wrk1,s_t[sj],si); //wrk1 = d (t) dx
eZtmp = eZtmp + wrk1*wrk2;
//Adding here the terms d (mu) dz * szj; (lambda in case of h in rhse);
derDevSharedV1z(&wrk2,s_m[sj],si); //wrk2 = d (mu) dz
uZtmp = uZtmp + wrk2*sij[6][id.g];
vZtmp = vZtmp + wrk2*sij[7][id.g];
wZtmp = wZtmp + wrk2*sij[8][id.g];
// split advection terms
//Adding here the terms - d (ru phi) dz;
fluxQuadSharedz(&wrk1,s_r[sj],s_w[sj],si);
rZtmp = wrk1;
__syncthreads();
fluxCubeSharedz(&wrk1,s_r[sj],s_w[sj],s_u[sj],si);
uZtmp = uZtmp + wrk1;
__syncthreads();
fluxCubeSharedz(&wrk1,s_r[sj],s_w[sj],s_v[sj],si);
vZtmp = vZtmp + wrk1;
__syncthreads();
fluxCubeSharedz(&wrk1,s_r[sj],s_w[sj],s_w[sj],si);
wZtmp = wZtmp + wrk1;
__syncthreads();
fluxCubeSharedz(&wrk1,s_r[sj],s_w[sj],s_h[sj],si);
eZtmp = eZtmp + wrk1;
__syncthreads();
// pressure and dilation derivatives
s_wrk[sj][si] = dil[id.g];
__syncthreads();
if (id.k < stencilSize) {
s_wrk[sj][si-stencilSize] = s_wrk[sj][si+mz-stencilSize];
s_wrk[sj][si+mz] = s_wrk[sj][si];
}
__syncthreads();
derDevSharedV1z(&wrk2,s_wrk[sj],si);
derDevShared1z(&wrk1,s_p[sj],si);
wZtmp = wZtmp - wrk1 + s_m[sj][si]*wrk2*1.0/3.0;
//viscous dissipation
s_wrk[sj][si] = s_m[sj][si]*(
s_u[sj][si]*( sij[6][id.g] ) +
s_v[sj][si]*( sij[7][id.g] ) +
s_w[sj][si]*( sij[8][id.g] )
);
__syncthreads();
if (id.k < stencilSize) {
s_wrk[sj][si-stencilSize] = s_wrk[sj][si+mz-stencilSize];
s_wrk[sj][si+mz] = s_wrk[sj][si];
}
__syncthreads();
derDevSharedV1z(&wrk2,s_wrk[sj],si);
rZ[id.g] = rZtmp;
uZ[id.g] = uZtmp;
vZ[id.g] = vZtmp;
wZ[id.g] = wZtmp;
eZ[id.g] = eZtmp + wrk2;
}
__global__ void RHSDeviceFullYL(myprec *rY, myprec *uY, myprec *vY, myprec *wY, myprec *eY,
myprec *r, myprec *u, myprec *v, myprec *w, myprec *h ,
myprec *t, myprec *p, myprec *mu, myprec *lam,
myprec *sij[9], myprec *dil) {
Indices id(threadIdx.x,threadIdx.y,blockIdx.x,blockIdx.y,blockDim.x,blockDim.y);
int sum = id.biy * mx * my + id.bix*id.bdx + id.tix;
derDevV1yL(d_workY1,u,id);
derDevV1yL(uY,d_workY1,id);
__syncthreads();
derDevV1yL(d_workY1,v,id);
derDevV1yL(vY,d_workY1,id);
__syncthreads();
derDevV1yL(d_workY1,w,id);
derDevV1yL(wY,d_workY1,id);
__syncthreads();
derDevV1yL(d_workY1 ,t,id);
derDevV1yL(eY,d_workY1 ,id);
__syncthreads();
derDevV1yL(d_workY2,lam,id); //d_work2 = d (lam) dy
for (int j = id.tiy; j < my; j += id.bdy) {
int glb = sum + j * mx ;
uY[glb] = uY[glb]*mu[glb];
vY[glb] = vY[glb]*mu[glb];
wY[glb] = wY[glb]*mu[glb];
eY[glb] = eY[glb]*lam[glb]+ d_workY1[glb]*d_workY2[glb];
}
__syncthreads();
//Adding here the terms d (phi) dy * ( d (mu) dy); (lambda in case of t in rhse);
derDevV1yL(d_workY2,mu,id); //d_work2 = d (mu) dy
for (int j = id.tiy; j < my; j += id.bdy) {
int glb = sum + j * mx ;
uY[glb] = uY[glb] + d_workY2[glb]*sij[3][glb];
vY[glb] = vY[glb] + d_workY2[glb]*sij[4][glb];
wY[glb] = wY[glb] + d_workY2[glb]*sij[5][glb];
}
// pressure derivative and dilation derivative
__syncthreads();
derDevV1yL(d_workY2, p,id);
derDevV1yL(d_workY1,dil,id);
for (int j = id.tiy; j < my; j += id.bdy) {
int glb = sum + j * mx ;
vY[glb] = vY[glb] - d_workY2[glb] + mu[glb]*d_workY1[glb]*1.0/3.0;
}
//viscous dissipation
for (int j = id.tiy; j < my; j += id.bdy) {
int glb = sum + j * mx ;
d_workY2[glb] = mu[glb]*(
u[glb]*( sij[3][glb] ) +
v[glb]*( sij[4][glb] ) +
w[glb]*( sij[5][glb] )
);
}
__syncthreads();
derDevV1yL(d_workY1,d_workY2,id);
for (int j = id.tiy; j < my; j += id.bdy) {
int glb = sum + j * mx ;
eY[glb] = eY[glb] + d_workY1[glb];
}
}
__global__ void RHSDeviceFullZL(myprec *rZ, myprec *uZ, myprec *vZ, myprec *wZ, myprec *eZ,
myprec *r, myprec *u, myprec *v, myprec *w, myprec *h ,
myprec *t, myprec *p, myprec *mu, myprec *lam,
myprec *sij[9], myprec *dil) {
Indices id(threadIdx.x,threadIdx.y,blockIdx.x,blockIdx.y,blockDim.x,blockDim.y);
int sum = id.biy * mx + id.bix*id.bdx + id.tix;
derDevV1zL(d_workZ1,u,id);
derDevV1zL(uZ,d_workZ1,id);
__syncthreads();
derDevV1zL(d_workZ1,v,id);
derDevV1zL(vZ,d_workZ1,id);
__syncthreads();
derDevV1zL(d_workZ1,w,id);
derDevV1zL(wZ,d_workZ1,id);
__syncthreads();
derDevV1zL(d_workZ1 ,t,id);
derDevV1zL(eZ,d_workZ1 ,id);
__syncthreads();
derDevV1zL(d_workZ2,lam,id); //d_work2 = d (lam) dz
__syncthreads();
for (int k = id.tiy; k < mz; k += id.bdy) {
int glb = k * mx * my + sum;
uZ[glb] = uZ[glb]*mu[glb];
vZ[glb] = vZ[glb]*mu[glb];
wZ[glb] = wZ[glb]*mu[glb];
eZ[glb] = eZ[glb]*lam[glb] + d_workZ2[glb]*d_workZ1[glb];
}
__syncthreads();
//Adding here the terms d (phi) dz * ( d (mu) dz -0.5 * rw); (lambda in case of h in rhse);
derDevV1zL(d_workZ2,mu,id); //d_work2 = d (mu) dz
for (int k = id.tiy; k < mz; k += id.bdy) {
int glb = k * mx * my + sum;
uZ[glb] = uZ[glb] + d_workZ2[glb]*sij[6][glb];
vZ[glb] = vZ[glb] + d_workZ2[glb]*sij[7][glb];
wZ[glb] = wZ[glb] + d_workZ2[glb]*sij[8][glb];
}
// pressure derivative and dilation derivative
__syncthreads();
derDevV1zL(d_workZ2,p ,id);
derDevV1zL(d_workZ1,dil,id);
for (int k = id.tiy; k < mz; k += id.bdy) {
int glb = k * mx * my + sum;
wZ[glb] = wZ[glb] - d_workZ2[glb] + mu[glb]*d_workZ1[glb]*1.0/3.0;
}
//viscous dissipation
for (int k = id.tiy; k < mz; k += id.bdy) {
int glb = k * mx * my + sum;
d_workZ2[glb] = mu[glb]*(
u[glb]*( sij[6][glb] ) +
v[glb]*( sij[7][glb] ) +
w[glb]*( sij[8][glb] )
);
}
__syncthreads();
derDevV1zL(d_workZ1,d_workZ2,id); // d_work = d (mu dvdz) dy
for (int k = id.tiy; k < mz; k += id.bdy) {
int glb = k * mx * my + sum;
eZ[glb] = eZ[glb] + d_workZ1[glb];
}
}
__global__ void FLXDeviceY(myprec *rY, myprec *uY, myprec *vY, myprec *wY, myprec *eY,
myprec *r, myprec *u, myprec *v, myprec *w, myprec *h ,
myprec *t, myprec *p, myprec *mu, myprec *lam,
myprec *sij[9], myprec *dil) {
Indices id(threadIdx.x,threadIdx.y,blockIdx.x,blockIdx.y,blockDim.x,blockDim.y);
id.mkidYFlx();
int si = id.j + stencilSize; // local i for shared memory access + halo offset
int sj = id.tiy; // local j for shared memory access
myprec wrk1=0;
__shared__ myprec s_r[sPencils][my+stencilSize*2];
__shared__ myprec s_u[sPencils][my+stencilSize*2];
__shared__ myprec s_v[sPencils][my+stencilSize*2];
__shared__ myprec s_w[sPencils][my+stencilSize*2];
__shared__ myprec s_h[sPencils][my+stencilSize*2];
s_r[sj][si] = r[id.g];
s_u[sj][si] = u[id.g];
s_v[sj][si] = v[id.g];
s_w[sj][si] = w[id.g];
s_h[sj][si] = h[id.g];
__syncthreads();
// fill in periodic images in shared memory array
if (id.j < stencilSize) {
s_r[sj][si-stencilSize] = s_r[sj][si+my-stencilSize];
s_r[sj][si+my] = s_r[sj][si];
s_u[sj][si-stencilSize] = s_u[sj][si+my-stencilSize];
s_u[sj][si+my] = s_u[sj][si];
s_v[sj][si-stencilSize] = s_v[sj][si+my-stencilSize];
s_v[sj][si+my] = s_v[sj][si];
s_w[sj][si-stencilSize] = s_w[sj][si+my-stencilSize];
s_w[sj][si+my] = s_w[sj][si];
s_h[sj][si-stencilSize] = s_h[sj][si+my-stencilSize];
s_h[sj][si+my] = s_h[sj][si];
}
__syncthreads();
fluxQuadSharedG(&wrk1,s_r[sj],s_v[sj],si,d_dy);
rY[id.g] = wrk1;
__syncthreads();
fluxCubeSharedG(&wrk1,s_r[sj],s_v[sj],s_u[sj],si,d_dy);
uY[id.g] = wrk1;
__syncthreads();
fluxCubeSharedG(&wrk1,s_r[sj],s_v[sj],s_v[sj],si,d_dy);
vY[id.g] = wrk1;
__syncthreads();
fluxCubeSharedG(&wrk1,s_r[sj],s_v[sj],s_w[sj],si,d_dy);
wY[id.g] = wrk1;
__syncthreads();
fluxCubeSharedG(&wrk1,s_r[sj],s_v[sj],s_h[sj],si,d_dy);
eY[id.g] = wrk1;
__syncthreads();
}
__global__ void FLXDeviceZ(myprec *rZ, myprec *uZ, myprec *vZ, myprec *wZ, myprec *eZ,
myprec *r, myprec *u, myprec *v, myprec *w, myprec *h ,
myprec *t, myprec *p, myprec *mu, myprec *lam,
myprec *sij[9], myprec *dil) {
Indices id(threadIdx.x,threadIdx.y,blockIdx.x,blockIdx.y,blockDim.x,blockDim.y);
id.mkidZFlx();
int si = id.k + stencilSize; // local i for shared memory access + halo offset
int sj = id.tiy; // local j for shared memory access
myprec wrk1=0;
__shared__ myprec s_r[sPencils][mz+stencilSize*2];
__shared__ myprec s_u[sPencils][mz+stencilSize*2];
__shared__ myprec s_v[sPencils][mz+stencilSize*2];
__shared__ myprec s_w[sPencils][mz+stencilSize*2];
__shared__ myprec s_h[sPencils][mz+stencilSize*2];
s_r[sj][si] = r[id.g];
s_u[sj][si] = u[id.g];
s_v[sj][si] = v[id.g];
s_w[sj][si] = w[id.g];
s_h[sj][si] = h[id.g];
__syncthreads();
// fill in periodic images in shared memory array
if (id.k < stencilSize) {
s_r[sj][si-stencilSize] = s_r[sj][si+mz-stencilSize];
s_r[sj][si+mz] = s_r[sj][si];
s_u[sj][si-stencilSize] = s_u[sj][si+mz-stencilSize];
s_u[sj][si+mz] = s_u[sj][si];
s_v[sj][si-stencilSize] = s_v[sj][si+mz-stencilSize];
s_v[sj][si+mz] = s_v[sj][si];
s_w[sj][si-stencilSize] = s_w[sj][si+mz-stencilSize];
s_w[sj][si+mz] = s_w[sj][si];
s_h[sj][si-stencilSize] = s_h[sj][si+mz-stencilSize];
s_h[sj][si+mz] = s_h[sj][si];
}
__syncthreads();
//Adding here the terms - d (ru phi) dx;
fluxQuadSharedG(&wrk1,s_r[sj],s_w[sj],si,d_dz);
rZ[id.g] = wrk1;
__syncthreads();
fluxCubeSharedG(&wrk1,s_r[sj],s_w[sj],s_u[sj],si,d_dz);
uZ[id.g] = wrk1;
__syncthreads();
fluxCubeSharedG(&wrk1,s_r[sj],s_w[sj],s_v[sj],si,d_dz);
vZ[id.g] = wrk1;
__syncthreads();
fluxCubeSharedG(&wrk1,s_r[sj],s_w[sj],s_w[sj],si,d_dz);
wZ[id.g] = wrk1;
__syncthreads();
fluxCubeSharedG(&wrk1,s_r[sj],s_w[sj],s_h[sj],si,d_dz);
eZ[id.g] = wrk1;
__syncthreads();
}
__device__ void initRHS() {
checkCudaDev( hipMalloc((void**)&d_workY1,mx*my*mz*sizeof(myprec)) );
checkCudaDev( hipMalloc((void**)&d_workY2,mx*my*mz*sizeof(myprec)) );
checkCudaDev( hipMalloc((void**)&d_workZ1,mx*my*mz*sizeof(myprec)) );
checkCudaDev( hipMalloc((void**)&d_workZ2,mx*my*mz*sizeof(myprec)) );
}
__device__ void clearRHS() {
checkCudaDev( hipFree(d_workY1) );
checkCudaDev( hipFree(d_workY2) );
checkCudaDev( hipFree(d_workZ1) );
checkCudaDev( hipFree(d_workZ2) );
}
| 2d6e4758f0fb6608964c310ca0fa7c678ea7da72.cu |
/**
* Copyright 1993-2012 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*/
#include "globals.h"
#include "cuda_functions.h"
#include "cuda_math.h"
/*
* The L-versions of the RHS have to be ran with
* - the L-version of the derivatives
* i.e.: derDev1xL instead of derDev1x
* - the L-version of the grid
* i.e.: h_gridL[0] instead of h_grid[0]
*/
__device__ myprec *d_workY1;
__device__ myprec *d_workY2;
__device__ myprec *d_workZ1;
__device__ myprec *d_workZ2;
/* The whole RHS in the X direction is calculated in RHSDeviceSharedFlxX thanks to the beneficial memory layout that allows to use small pencils */
/* For the Y and Z direction, fluxes require a small pencil discretization while the rest of the RHS can be calculated on large pencils which speed
* up significantly the computation. Therefore 5 streams are used
* stream 0 -> complete X RHS (in RHSDeviceSharedFlxX) (small pencil grid)
* stream 1 -> viscous terms and pressure terms in Y (in RHSDeviceFullYL) (large pencil grid)
* stream 2 -> viscous terms and pressure terms in Z (in RHSDeviceFullZL) (large pencil grid)
* stream 3 -> advective fluxes in Y direction (in FLXDeviceY) (small pencil transposed grid)
* stream 4 -> advective fluxes in Z direction (in FLXDeviceZ) (small pencil transposed grid)*/
__global__ void RHSDeviceSharedFlxX(myprec *rX, myprec *uX, myprec *vX, myprec *wX, myprec *eX,
myprec *r, myprec *u, myprec *v, myprec *w, myprec *h ,
myprec *t, myprec *p, myprec *mu, myprec *lam,
myprec *sij[9], myprec *dil) {
Indices id(threadIdx.x,threadIdx.y,blockIdx.x,blockIdx.y,blockDim.x,blockDim.y);
id.mkidX();
int si = id.i + stencilSize; // local i for shared memory access + halo offset
int sj = id.tiy; // local j for shared memory access
myprec rXtmp=0;
myprec uXtmp=0;
myprec vXtmp=0;
myprec wXtmp=0;
myprec eXtmp=0;
myprec wrk1=0;
myprec wrk2=0;
__shared__ myprec s_r[sPencils][mx+stencilSize*2];
__shared__ myprec s_u[sPencils][mx+stencilSize*2];
__shared__ myprec s_v[sPencils][mx+stencilSize*2];
__shared__ myprec s_w[sPencils][mx+stencilSize*2];
__shared__ myprec s_h[sPencils][mx+stencilSize*2];
__shared__ myprec s_t[sPencils][mx+stencilSize*2];
__shared__ myprec s_p[sPencils][mx+stencilSize*2];
__shared__ myprec s_m[sPencils][mx+stencilSize*2];
__shared__ myprec s_l[sPencils][mx+stencilSize*2];
__shared__ myprec s_wrk[sPencils][mx+stencilSize*2];
s_r[sj][si] = r[id.g];
s_u[sj][si] = u[id.g];
s_v[sj][si] = v[id.g];
s_w[sj][si] = w[id.g];
s_h[sj][si] = h[id.g];
s_t[sj][si] = t[id.g];
s_p[sj][si] = p[id.g];
s_m[sj][si] = mu[id.g];
s_l[sj][si] = lam[id.g];
__syncthreads();
// fill in periodic images in shared memory array
if (id.i < stencilSize) {
s_r[sj][si-stencilSize] = s_r[sj][si+mx-stencilSize];
s_r[sj][si+mx] = s_r[sj][si];
s_u[sj][si-stencilSize] = s_u[sj][si+mx-stencilSize];
s_u[sj][si+mx] = s_u[sj][si];
s_v[sj][si-stencilSize] = s_v[sj][si+mx-stencilSize];
s_v[sj][si+mx] = s_v[sj][si];
s_w[sj][si-stencilSize] = s_w[sj][si+mx-stencilSize];
s_w[sj][si+mx] = s_w[sj][si];
s_h[sj][si-stencilSize] = s_h[sj][si+mx-stencilSize];
s_h[sj][si+mx] = s_h[sj][si];
s_t[sj][si-stencilSize] = s_t[sj][si+mx-stencilSize];
s_t[sj][si+mx] = s_t[sj][si];
s_p[sj][si-stencilSize] = s_p[sj][si+mx-stencilSize];
s_p[sj][si+mx] = s_p[sj][si];
s_m[sj][si-stencilSize] = s_m[sj][si+mx-stencilSize];
s_m[sj][si+mx] = s_m[sj][si];
s_l[sj][si-stencilSize] = s_l[sj][si+mx-stencilSize];
s_l[sj][si+mx] = s_l[sj][si];
}
__syncthreads();
// viscous fluxes derivative
derDevSharedV2x(&wrk1,s_u[sj],si);
uXtmp = wrk1*s_m[sj][si];
derDevSharedV2x(&wrk1,s_v[sj],si);
vXtmp = wrk1*s_m[sj][si];
derDevSharedV2x(&wrk1,s_w[sj],si);
wXtmp = wrk1*s_m[sj][si];
derDevSharedV2x(&wrk1,s_t[sj],si);
eXtmp = wrk1*s_l[sj][si];
__syncthreads();
derDevSharedV1x(&wrk2,s_l[sj],si); //wrk2 = d (lam) dx
derDevSharedV1x(&wrk1,s_t[sj],si); //wrk1 = d (t) dx
eXtmp = eXtmp + wrk1*wrk2;
//Adding here the terms d (mu) dx * sxj; (lambda in case of h in rhse);
derDevSharedV1x(&wrk2,s_m[sj],si); //wrk2 = d (mu) dx
uXtmp = uXtmp + wrk2*sij[0][id.g];
vXtmp = vXtmp + wrk2*sij[1][id.g];
wXtmp = wXtmp + wrk2*sij[2][id.g];
// split advection terms
//Adding here the terms - d (ru phi) dx;
fluxQuadSharedx(&wrk1,s_r[sj],s_u[sj],si);
rXtmp = wrk1;
__syncthreads();
fluxCubeSharedx(&wrk1,s_r[sj],s_u[sj],s_u[sj],si);
uXtmp = uXtmp + wrk1;
__syncthreads();
fluxCubeSharedx(&wrk1,s_r[sj],s_u[sj],s_v[sj],si);
vXtmp = vXtmp + wrk1;
__syncthreads();
fluxCubeSharedx(&wrk1,s_r[sj],s_u[sj],s_w[sj],si);
wXtmp = wXtmp + wrk1;
__syncthreads();
fluxCubeSharedx(&wrk1,s_r[sj],s_u[sj],s_h[sj],si);
eXtmp = eXtmp + wrk1;
__syncthreads();
// pressure and dilation derivatives
s_wrk[sj][si] = dil[id.g];
__syncthreads();
if (id.i < stencilSize) {
s_wrk[sj][si-stencilSize] = s_wrk[sj][si+mx-stencilSize];
s_wrk[sj][si+mx] = s_wrk[sj][si];
}
__syncthreads();
derDevSharedV1x(&wrk2,s_wrk[sj],si);
derDevShared1x(&wrk1,s_p[sj],si);
uXtmp = uXtmp - wrk1 + s_m[sj][si]*wrk2*1.0/3.0;
//viscous dissipation
s_wrk[sj][si] = s_m[sj][si]*(
s_u[sj][si]*( sij[0][id.g] ) +
s_v[sj][si]*( sij[1][id.g] ) +
s_w[sj][si]*( sij[2][id.g] )
);
__syncthreads();
if (id.i < stencilSize) {
s_wrk[sj][si-stencilSize] = s_wrk[sj][si+mx-stencilSize];
s_wrk[sj][si+mx] = s_wrk[sj][si];
}
__syncthreads();
derDevSharedV1x(&wrk2,s_wrk[sj],si);
rX[id.g] = rXtmp;
uX[id.g] = uXtmp;
vX[id.g] = vXtmp;
wX[id.g] = wXtmp;
eX[id.g] = eXtmp + wrk2;
}
__global__ void RHSDeviceSharedFlxY(myprec *rY, myprec *uY, myprec *vY, myprec *wY, myprec *eY,
myprec *r, myprec *u, myprec *v, myprec *w, myprec *h ,
myprec *t, myprec *p, myprec *mu, myprec *lam,
myprec *sij[9], myprec *dil) {
Indices id(threadIdx.x,threadIdx.y,blockIdx.x,blockIdx.y,blockDim.x,blockDim.y);
id.mkidYFlx();
int si = id.j + stencilSize; // local i for shared memory access + halo offset
int sj = id.tiy; // local j for shared memory access
myprec rYtmp=0;
myprec uYtmp=0;
myprec vYtmp=0;
myprec wYtmp=0;
myprec eYtmp=0;
myprec wrk1=0;
myprec wrk2=0;
__shared__ myprec s_r[sPencils][my+stencilSize*2];
__shared__ myprec s_u[sPencils][my+stencilSize*2];
__shared__ myprec s_v[sPencils][my+stencilSize*2];
__shared__ myprec s_w[sPencils][my+stencilSize*2];
__shared__ myprec s_h[sPencils][my+stencilSize*2];
__shared__ myprec s_t[sPencils][my+stencilSize*2];
__shared__ myprec s_p[sPencils][my+stencilSize*2];
__shared__ myprec s_m[sPencils][my+stencilSize*2];
__shared__ myprec s_l[sPencils][my+stencilSize*2];
__shared__ myprec s_wrk[sPencils][my+stencilSize*2];
s_r[sj][si] = r[id.g];
s_u[sj][si] = u[id.g];
s_v[sj][si] = v[id.g];
s_w[sj][si] = w[id.g];
s_h[sj][si] = h[id.g];
s_t[sj][si] = t[id.g];
s_p[sj][si] = p[id.g];
s_m[sj][si] = mu[id.g];
s_l[sj][si] = lam[id.g];
__syncthreads();
// fill in periodic images in shared memory array
if (id.j < stencilSize) {
s_r[sj][si-stencilSize] = s_r[sj][si+my-stencilSize];
s_r[sj][si+my] = s_r[sj][si];
s_u[sj][si-stencilSize] = s_u[sj][si+my-stencilSize];
s_u[sj][si+my] = s_u[sj][si];
s_v[sj][si-stencilSize] = s_v[sj][si+my-stencilSize];
s_v[sj][si+my] = s_v[sj][si];
s_w[sj][si-stencilSize] = s_w[sj][si+my-stencilSize];
s_w[sj][si+my] = s_w[sj][si];
s_h[sj][si-stencilSize] = s_h[sj][si+my-stencilSize];
s_h[sj][si+my] = s_h[sj][si];
s_t[sj][si-stencilSize] = s_t[sj][si+my-stencilSize];
s_t[sj][si+my] = s_t[sj][si];
s_p[sj][si-stencilSize] = s_p[sj][si+my-stencilSize];
s_p[sj][si+my] = s_p[sj][si];
s_m[sj][si-stencilSize] = s_m[sj][si+my-stencilSize];
s_m[sj][si+my] = s_m[sj][si];
s_l[sj][si-stencilSize] = s_l[sj][si+my-stencilSize];
s_l[sj][si+my] = s_l[sj][si];
}
__syncthreads();
// viscous fluxes derivative
derDevSharedV2y(&wrk1,s_u[sj],si);
uYtmp = wrk1*s_m[sj][si];
derDevSharedV2y(&wrk1,s_v[sj],si);
vYtmp = wrk1*s_m[sj][si];
derDevSharedV2y(&wrk1,s_w[sj],si);
wYtmp = wrk1*s_m[sj][si];
derDevSharedV2y(&wrk1,s_t[sj],si);
eYtmp = wrk1*s_l[sj][si];
__syncthreads();
derDevSharedV1y(&wrk2,s_l[sj],si); //wrk2 = d (lam) dx
derDevSharedV1y(&wrk1,s_t[sj],si); //wrk1 = d (t) dx
eYtmp = eYtmp + wrk1*wrk2;
//Adding here the terms d (mu) dy * syj; (lambda in case of h in rhse);
derDevSharedV1y(&wrk2,s_m[sj],si); //wrk2 = d (mu) dx
uYtmp = uYtmp + wrk2*sij[3][id.g];
vYtmp = vYtmp + wrk2*sij[4][id.g];
wYtmp = wYtmp + wrk2*sij[5][id.g];
// split advection terms
//Adding here the terms - d (ru phi) dy;
fluxQuadSharedy(&wrk1,s_r[sj],s_v[sj],si);
rYtmp = wrk1;
__syncthreads();
fluxCubeSharedy(&wrk1,s_r[sj],s_v[sj],s_u[sj],si);
uYtmp = uYtmp + wrk1;
__syncthreads();
fluxCubeSharedy(&wrk1,s_r[sj],s_v[sj],s_v[sj],si);
vYtmp = vYtmp + wrk1;
__syncthreads();
fluxCubeSharedy(&wrk1,s_r[sj],s_v[sj],s_w[sj],si);
wYtmp = wYtmp + wrk1;
__syncthreads();
fluxCubeSharedy(&wrk1,s_r[sj],s_v[sj],s_h[sj],si);
eYtmp = eYtmp + wrk1;
__syncthreads();
// pressure and dilation derivatives
s_wrk[sj][si] = dil[id.g];
__syncthreads();
if (id.j < stencilSize) {
s_wrk[sj][si-stencilSize] = s_wrk[sj][si+my-stencilSize];
s_wrk[sj][si+my] = s_wrk[sj][si];
}
__syncthreads();
derDevSharedV1y(&wrk2,s_wrk[sj],si);
derDevShared1y(&wrk1,s_p[sj],si);
vYtmp = vYtmp - wrk1 + s_m[sj][si]*wrk2*1.0/3.0;
//viscous dissipation
s_wrk[sj][si] = s_m[sj][si]*(
s_u[sj][si]*( sij[3][id.g] ) +
s_v[sj][si]*( sij[4][id.g] ) +
s_w[sj][si]*( sij[5][id.g] )
);
__syncthreads();
if (id.j < stencilSize) {
s_wrk[sj][si-stencilSize] = s_wrk[sj][si+my-stencilSize];
s_wrk[sj][si+my] = s_wrk[sj][si];
}
__syncthreads();
derDevSharedV1y(&wrk2,s_wrk[sj],si);
rY[id.g] = rYtmp;
uY[id.g] = uYtmp;
vY[id.g] = vYtmp;
wY[id.g] = wYtmp;
eY[id.g] = eYtmp + wrk2;
}
__global__ void RHSDeviceSharedFlxZ(myprec *rZ, myprec *uZ, myprec *vZ, myprec *wZ, myprec *eZ,
myprec *r, myprec *u, myprec *v, myprec *w, myprec *h ,
myprec *t, myprec *p, myprec *mu, myprec *lam,
myprec *sij[9], myprec *dil) {
Indices id(threadIdx.x,threadIdx.y,blockIdx.x,blockIdx.y,blockDim.x,blockDim.y);
id.mkidZFlx();
int si = id.k + stencilSize; // local i for shared memory access + halo offset
int sj = id.tiy; // local j for shared memory access
myprec rZtmp=0;
myprec uZtmp=0;
myprec vZtmp=0;
myprec wZtmp=0;
myprec eZtmp=0;
myprec wrk1=0;
myprec wrk2=0;
__shared__ myprec s_r[sPencils][mz+stencilSize*2];
__shared__ myprec s_u[sPencils][mz+stencilSize*2];
__shared__ myprec s_v[sPencils][mz+stencilSize*2];
__shared__ myprec s_w[sPencils][mz+stencilSize*2];
__shared__ myprec s_h[sPencils][mz+stencilSize*2];
__shared__ myprec s_t[sPencils][mz+stencilSize*2];
__shared__ myprec s_p[sPencils][mz+stencilSize*2];
__shared__ myprec s_m[sPencils][mz+stencilSize*2];
__shared__ myprec s_l[sPencils][mz+stencilSize*2];
__shared__ myprec s_wrk[sPencils][mz+stencilSize*2];
s_r[sj][si] = r[id.g];
s_u[sj][si] = u[id.g];
s_v[sj][si] = v[id.g];
s_w[sj][si] = w[id.g];
s_h[sj][si] = h[id.g];
s_t[sj][si] = t[id.g];
s_p[sj][si] = p[id.g];
s_m[sj][si] = mu[id.g];
s_l[sj][si] = lam[id.g];
__syncthreads();
// fill in periodic images in shared memory array
if (id.k < stencilSize) {
s_r[sj][si-stencilSize] = s_r[sj][si+mz-stencilSize];
s_r[sj][si+mz] = s_r[sj][si];
s_u[sj][si-stencilSize] = s_u[sj][si+mz-stencilSize];
s_u[sj][si+mz] = s_u[sj][si];
s_v[sj][si-stencilSize] = s_v[sj][si+mz-stencilSize];
s_v[sj][si+mz] = s_v[sj][si];
s_w[sj][si-stencilSize] = s_w[sj][si+mz-stencilSize];
s_w[sj][si+mz] = s_w[sj][si];
s_h[sj][si-stencilSize] = s_h[sj][si+mz-stencilSize];
s_h[sj][si+mz] = s_h[sj][si];
s_t[sj][si-stencilSize] = s_t[sj][si+mz-stencilSize];
s_t[sj][si+mz] = s_t[sj][si];
s_p[sj][si-stencilSize] = s_p[sj][si+mz-stencilSize];
s_p[sj][si+mz] = s_p[sj][si];
s_m[sj][si-stencilSize] = s_m[sj][si+mz-stencilSize];
s_m[sj][si+mz] = s_m[sj][si];
s_l[sj][si-stencilSize] = s_l[sj][si+mz-stencilSize];
s_l[sj][si+mz] = s_l[sj][si];
}
__syncthreads();
// viscous fluxes derivative
derDevSharedV2z(&wrk1,s_u[sj],si);
uZtmp = wrk1*s_m[sj][si];
derDevSharedV2z(&wrk1,s_v[sj],si);
vZtmp = wrk1*s_m[sj][si];
derDevSharedV2z(&wrk1,s_w[sj],si);
wZtmp = wrk1*s_m[sj][si];
derDevSharedV2z(&wrk1,s_t[sj],si);
eZtmp = wrk1*s_l[sj][si];
__syncthreads();
derDevSharedV1z(&wrk2,s_l[sj],si); //wrk2 = d (lam) dz
derDevSharedV1z(&wrk1,s_t[sj],si); //wrk1 = d (t) dx
eZtmp = eZtmp + wrk1*wrk2;
//Adding here the terms d (mu) dz * szj; (lambda in case of h in rhse);
derDevSharedV1z(&wrk2,s_m[sj],si); //wrk2 = d (mu) dz
uZtmp = uZtmp + wrk2*sij[6][id.g];
vZtmp = vZtmp + wrk2*sij[7][id.g];
wZtmp = wZtmp + wrk2*sij[8][id.g];
// split advection terms
//Adding here the terms - d (ru phi) dz;
fluxQuadSharedz(&wrk1,s_r[sj],s_w[sj],si);
rZtmp = wrk1;
__syncthreads();
fluxCubeSharedz(&wrk1,s_r[sj],s_w[sj],s_u[sj],si);
uZtmp = uZtmp + wrk1;
__syncthreads();
fluxCubeSharedz(&wrk1,s_r[sj],s_w[sj],s_v[sj],si);
vZtmp = vZtmp + wrk1;
__syncthreads();
fluxCubeSharedz(&wrk1,s_r[sj],s_w[sj],s_w[sj],si);
wZtmp = wZtmp + wrk1;
__syncthreads();
fluxCubeSharedz(&wrk1,s_r[sj],s_w[sj],s_h[sj],si);
eZtmp = eZtmp + wrk1;
__syncthreads();
// pressure and dilation derivatives
s_wrk[sj][si] = dil[id.g];
__syncthreads();
if (id.k < stencilSize) {
s_wrk[sj][si-stencilSize] = s_wrk[sj][si+mz-stencilSize];
s_wrk[sj][si+mz] = s_wrk[sj][si];
}
__syncthreads();
derDevSharedV1z(&wrk2,s_wrk[sj],si);
derDevShared1z(&wrk1,s_p[sj],si);
wZtmp = wZtmp - wrk1 + s_m[sj][si]*wrk2*1.0/3.0;
//viscous dissipation
s_wrk[sj][si] = s_m[sj][si]*(
s_u[sj][si]*( sij[6][id.g] ) +
s_v[sj][si]*( sij[7][id.g] ) +
s_w[sj][si]*( sij[8][id.g] )
);
__syncthreads();
if (id.k < stencilSize) {
s_wrk[sj][si-stencilSize] = s_wrk[sj][si+mz-stencilSize];
s_wrk[sj][si+mz] = s_wrk[sj][si];
}
__syncthreads();
derDevSharedV1z(&wrk2,s_wrk[sj],si);
rZ[id.g] = rZtmp;
uZ[id.g] = uZtmp;
vZ[id.g] = vZtmp;
wZ[id.g] = wZtmp;
eZ[id.g] = eZtmp + wrk2;
}
__global__ void RHSDeviceFullYL(myprec *rY, myprec *uY, myprec *vY, myprec *wY, myprec *eY,
myprec *r, myprec *u, myprec *v, myprec *w, myprec *h ,
myprec *t, myprec *p, myprec *mu, myprec *lam,
myprec *sij[9], myprec *dil) {
Indices id(threadIdx.x,threadIdx.y,blockIdx.x,blockIdx.y,blockDim.x,blockDim.y);
int sum = id.biy * mx * my + id.bix*id.bdx + id.tix;
derDevV1yL(d_workY1,u,id);
derDevV1yL(uY,d_workY1,id);
__syncthreads();
derDevV1yL(d_workY1,v,id);
derDevV1yL(vY,d_workY1,id);
__syncthreads();
derDevV1yL(d_workY1,w,id);
derDevV1yL(wY,d_workY1,id);
__syncthreads();
derDevV1yL(d_workY1 ,t,id);
derDevV1yL(eY,d_workY1 ,id);
__syncthreads();
derDevV1yL(d_workY2,lam,id); //d_work2 = d (lam) dy
for (int j = id.tiy; j < my; j += id.bdy) {
int glb = sum + j * mx ;
uY[glb] = uY[glb]*mu[glb];
vY[glb] = vY[glb]*mu[glb];
wY[glb] = wY[glb]*mu[glb];
eY[glb] = eY[glb]*lam[glb]+ d_workY1[glb]*d_workY2[glb];
}
__syncthreads();
//Adding here the terms d (phi) dy * ( d (mu) dy); (lambda in case of t in rhse);
derDevV1yL(d_workY2,mu,id); //d_work2 = d (mu) dy
for (int j = id.tiy; j < my; j += id.bdy) {
int glb = sum + j * mx ;
uY[glb] = uY[glb] + d_workY2[glb]*sij[3][glb];
vY[glb] = vY[glb] + d_workY2[glb]*sij[4][glb];
wY[glb] = wY[glb] + d_workY2[glb]*sij[5][glb];
}
// pressure derivative and dilation derivative
__syncthreads();
derDevV1yL(d_workY2, p,id);
derDevV1yL(d_workY1,dil,id);
for (int j = id.tiy; j < my; j += id.bdy) {
int glb = sum + j * mx ;
vY[glb] = vY[glb] - d_workY2[glb] + mu[glb]*d_workY1[glb]*1.0/3.0;
}
//viscous dissipation
for (int j = id.tiy; j < my; j += id.bdy) {
int glb = sum + j * mx ;
d_workY2[glb] = mu[glb]*(
u[glb]*( sij[3][glb] ) +
v[glb]*( sij[4][glb] ) +
w[glb]*( sij[5][glb] )
);
}
__syncthreads();
derDevV1yL(d_workY1,d_workY2,id);
for (int j = id.tiy; j < my; j += id.bdy) {
int glb = sum + j * mx ;
eY[glb] = eY[glb] + d_workY1[glb];
}
}
__global__ void RHSDeviceFullZL(myprec *rZ, myprec *uZ, myprec *vZ, myprec *wZ, myprec *eZ,
myprec *r, myprec *u, myprec *v, myprec *w, myprec *h ,
myprec *t, myprec *p, myprec *mu, myprec *lam,
myprec *sij[9], myprec *dil) {
Indices id(threadIdx.x,threadIdx.y,blockIdx.x,blockIdx.y,blockDim.x,blockDim.y);
int sum = id.biy * mx + id.bix*id.bdx + id.tix;
derDevV1zL(d_workZ1,u,id);
derDevV1zL(uZ,d_workZ1,id);
__syncthreads();
derDevV1zL(d_workZ1,v,id);
derDevV1zL(vZ,d_workZ1,id);
__syncthreads();
derDevV1zL(d_workZ1,w,id);
derDevV1zL(wZ,d_workZ1,id);
__syncthreads();
derDevV1zL(d_workZ1 ,t,id);
derDevV1zL(eZ,d_workZ1 ,id);
__syncthreads();
derDevV1zL(d_workZ2,lam,id); //d_work2 = d (lam) dz
__syncthreads();
for (int k = id.tiy; k < mz; k += id.bdy) {
int glb = k * mx * my + sum;
uZ[glb] = uZ[glb]*mu[glb];
vZ[glb] = vZ[glb]*mu[glb];
wZ[glb] = wZ[glb]*mu[glb];
eZ[glb] = eZ[glb]*lam[glb] + d_workZ2[glb]*d_workZ1[glb];
}
__syncthreads();
//Adding here the terms d (phi) dz * ( d (mu) dz -0.5 * rw); (lambda in case of h in rhse);
derDevV1zL(d_workZ2,mu,id); //d_work2 = d (mu) dz
for (int k = id.tiy; k < mz; k += id.bdy) {
int glb = k * mx * my + sum;
uZ[glb] = uZ[glb] + d_workZ2[glb]*sij[6][glb];
vZ[glb] = vZ[glb] + d_workZ2[glb]*sij[7][glb];
wZ[glb] = wZ[glb] + d_workZ2[glb]*sij[8][glb];
}
// pressure derivative and dilation derivative
__syncthreads();
derDevV1zL(d_workZ2,p ,id);
derDevV1zL(d_workZ1,dil,id);
for (int k = id.tiy; k < mz; k += id.bdy) {
int glb = k * mx * my + sum;
wZ[glb] = wZ[glb] - d_workZ2[glb] + mu[glb]*d_workZ1[glb]*1.0/3.0;
}
//viscous dissipation
for (int k = id.tiy; k < mz; k += id.bdy) {
int glb = k * mx * my + sum;
d_workZ2[glb] = mu[glb]*(
u[glb]*( sij[6][glb] ) +
v[glb]*( sij[7][glb] ) +
w[glb]*( sij[8][glb] )
);
}
__syncthreads();
derDevV1zL(d_workZ1,d_workZ2,id); // d_work = d (mu dvdz) dy
for (int k = id.tiy; k < mz; k += id.bdy) {
int glb = k * mx * my + sum;
eZ[glb] = eZ[glb] + d_workZ1[glb];
}
}
__global__ void FLXDeviceY(myprec *rY, myprec *uY, myprec *vY, myprec *wY, myprec *eY,
myprec *r, myprec *u, myprec *v, myprec *w, myprec *h ,
myprec *t, myprec *p, myprec *mu, myprec *lam,
myprec *sij[9], myprec *dil) {
Indices id(threadIdx.x,threadIdx.y,blockIdx.x,blockIdx.y,blockDim.x,blockDim.y);
id.mkidYFlx();
int si = id.j + stencilSize; // local i for shared memory access + halo offset
int sj = id.tiy; // local j for shared memory access
myprec wrk1=0;
__shared__ myprec s_r[sPencils][my+stencilSize*2];
__shared__ myprec s_u[sPencils][my+stencilSize*2];
__shared__ myprec s_v[sPencils][my+stencilSize*2];
__shared__ myprec s_w[sPencils][my+stencilSize*2];
__shared__ myprec s_h[sPencils][my+stencilSize*2];
s_r[sj][si] = r[id.g];
s_u[sj][si] = u[id.g];
s_v[sj][si] = v[id.g];
s_w[sj][si] = w[id.g];
s_h[sj][si] = h[id.g];
__syncthreads();
// fill in periodic images in shared memory array
if (id.j < stencilSize) {
s_r[sj][si-stencilSize] = s_r[sj][si+my-stencilSize];
s_r[sj][si+my] = s_r[sj][si];
s_u[sj][si-stencilSize] = s_u[sj][si+my-stencilSize];
s_u[sj][si+my] = s_u[sj][si];
s_v[sj][si-stencilSize] = s_v[sj][si+my-stencilSize];
s_v[sj][si+my] = s_v[sj][si];
s_w[sj][si-stencilSize] = s_w[sj][si+my-stencilSize];
s_w[sj][si+my] = s_w[sj][si];
s_h[sj][si-stencilSize] = s_h[sj][si+my-stencilSize];
s_h[sj][si+my] = s_h[sj][si];
}
__syncthreads();
fluxQuadSharedG(&wrk1,s_r[sj],s_v[sj],si,d_dy);
rY[id.g] = wrk1;
__syncthreads();
fluxCubeSharedG(&wrk1,s_r[sj],s_v[sj],s_u[sj],si,d_dy);
uY[id.g] = wrk1;
__syncthreads();
fluxCubeSharedG(&wrk1,s_r[sj],s_v[sj],s_v[sj],si,d_dy);
vY[id.g] = wrk1;
__syncthreads();
fluxCubeSharedG(&wrk1,s_r[sj],s_v[sj],s_w[sj],si,d_dy);
wY[id.g] = wrk1;
__syncthreads();
fluxCubeSharedG(&wrk1,s_r[sj],s_v[sj],s_h[sj],si,d_dy);
eY[id.g] = wrk1;
__syncthreads();
}
__global__ void FLXDeviceZ(myprec *rZ, myprec *uZ, myprec *vZ, myprec *wZ, myprec *eZ,
myprec *r, myprec *u, myprec *v, myprec *w, myprec *h ,
myprec *t, myprec *p, myprec *mu, myprec *lam,
myprec *sij[9], myprec *dil) {
Indices id(threadIdx.x,threadIdx.y,blockIdx.x,blockIdx.y,blockDim.x,blockDim.y);
id.mkidZFlx();
int si = id.k + stencilSize; // local i for shared memory access + halo offset
int sj = id.tiy; // local j for shared memory access
myprec wrk1=0;
__shared__ myprec s_r[sPencils][mz+stencilSize*2];
__shared__ myprec s_u[sPencils][mz+stencilSize*2];
__shared__ myprec s_v[sPencils][mz+stencilSize*2];
__shared__ myprec s_w[sPencils][mz+stencilSize*2];
__shared__ myprec s_h[sPencils][mz+stencilSize*2];
s_r[sj][si] = r[id.g];
s_u[sj][si] = u[id.g];
s_v[sj][si] = v[id.g];
s_w[sj][si] = w[id.g];
s_h[sj][si] = h[id.g];
__syncthreads();
// fill in periodic images in shared memory array
if (id.k < stencilSize) {
s_r[sj][si-stencilSize] = s_r[sj][si+mz-stencilSize];
s_r[sj][si+mz] = s_r[sj][si];
s_u[sj][si-stencilSize] = s_u[sj][si+mz-stencilSize];
s_u[sj][si+mz] = s_u[sj][si];
s_v[sj][si-stencilSize] = s_v[sj][si+mz-stencilSize];
s_v[sj][si+mz] = s_v[sj][si];
s_w[sj][si-stencilSize] = s_w[sj][si+mz-stencilSize];
s_w[sj][si+mz] = s_w[sj][si];
s_h[sj][si-stencilSize] = s_h[sj][si+mz-stencilSize];
s_h[sj][si+mz] = s_h[sj][si];
}
__syncthreads();
//Adding here the terms - d (ru phi) dx;
fluxQuadSharedG(&wrk1,s_r[sj],s_w[sj],si,d_dz);
rZ[id.g] = wrk1;
__syncthreads();
fluxCubeSharedG(&wrk1,s_r[sj],s_w[sj],s_u[sj],si,d_dz);
uZ[id.g] = wrk1;
__syncthreads();
fluxCubeSharedG(&wrk1,s_r[sj],s_w[sj],s_v[sj],si,d_dz);
vZ[id.g] = wrk1;
__syncthreads();
fluxCubeSharedG(&wrk1,s_r[sj],s_w[sj],s_w[sj],si,d_dz);
wZ[id.g] = wrk1;
__syncthreads();
fluxCubeSharedG(&wrk1,s_r[sj],s_w[sj],s_h[sj],si,d_dz);
eZ[id.g] = wrk1;
__syncthreads();
}
__device__ void initRHS() {
checkCudaDev( cudaMalloc((void**)&d_workY1,mx*my*mz*sizeof(myprec)) );
checkCudaDev( cudaMalloc((void**)&d_workY2,mx*my*mz*sizeof(myprec)) );
checkCudaDev( cudaMalloc((void**)&d_workZ1,mx*my*mz*sizeof(myprec)) );
checkCudaDev( cudaMalloc((void**)&d_workZ2,mx*my*mz*sizeof(myprec)) );
}
__device__ void clearRHS() {
checkCudaDev( cudaFree(d_workY1) );
checkCudaDev( cudaFree(d_workY2) );
checkCudaDev( cudaFree(d_workZ1) );
checkCudaDev( cudaFree(d_workZ2) );
}
|
77aab003c593e9fe7486de90cc3abb133d0244f2.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright 1993-2020 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
// CUDA sample demonstrating a Double precision GEMM computation using the Warp
// Matrix Multiply and Accumulate API introduced in CUDA 11.0.
// In this program, the compute_dgemm kernel computes the result of a matrix multiplication
// and addition: D = alpha * A * B + beta * C. The dimensions of both C and D matrices
// are M_GLOBAL x N_GLOBAL. The A matrix is M_GLOBAL x K_GLOBAL (row-major), the B matrix
// is K_GLOBAL x N_GLOBAL (column-major).
// In that kernel, each CTA computes one 64 x 64 tile of the resulting matrix
// per iteration. When the tile is computed, the CTA stores it to the global memory
// and begins a new iteration, selecting a new 64 x 64 tile to compute.
// Each CTA consists of eight warps. For the 64 x 64 tile, each warp computes eight
// 8 x 8 subtiles, organized in a 2 x 4 two-dimensional array.
// Warps compute the 8 x 8 subtiles using nvcuda::wmma::mma_sync operations by
// moving through the K_GLOBAL dimension of the A and B matrices and accumulating
// the intermediate result in the local thread state.
// There are a number of simple optimizations used in the algorithm:
// - The CTA copies the 64 x 64 tile of the C matrix from the global memory to
// shared memory. After that is done, each warp loads the C matrix fragments from
// shared memory, thus avoiding a random global memory access.
// - On each internal iteration, the CTA copies a portion of the A and B matrices from
// global memory to shared memory. After that, all warps in the CTA reuse the A and B
// data from shared memory, thus reducing the number of data copies from global memory.
// - The portions of the A and B matrices are stored in shared memory with an additional
// padding (skew) to reduce the number of shared memory access bank conflicts.
// (See a detailed explanation near the SKEW_DOUBLE macro definition.)
// - When the CTA finishes computing the tiles of the resulting matrix, each warp stores
// its subtiles to shared memory. The CTA then copies the shared memory contents to
// global memory, again avoiding redundant random global memory accesses.
// - Note that the CTA tile size is chosen to maximize the GPU register utilization,
// but carefully enough to avoid local memory use.
#include <assert.h>
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <mma.h>
#include <cuda_pipeline.h>
#include <hip/hip_cooperative_groups.h>
#include <cooperative_groups/memcpy_async.h>
#include <cuda/std/type_traits>
#include <cuda/barrier>
// Switch for choosing cpp interface for cuda pipeline
// vs primitives interface.
#define USE_CPP_API 0
// helper functions and utilities to work with CUDA
#include <helper_functions.h>
#include <helper_cuda.h>
// Externally configurable parameters.
#ifndef CPU_DEBUG
// Set this to 1 to verify the correctness of the GPU-computed matrix.
#define CPU_DEBUG 0
#endif
#ifndef SHARED_MEMORY_LIMIT_64K
// Set this to 0 to use more than 64 Kb of shared memory to cache data, to
// improve the performance of the computations on GPU.
// Note that you need a GPU that can have more than 64 Kb of shared memory
// per multiprocessor.
#define SHARED_MEMORY_LIMIT_64K 0
#endif
// GPU configuration.
#define WARP_SIZE 32
// MMA matrix tile dimensions.
#define M 8
#define N 8
#define K 4
// GEMM configuration.
#define M_TILES 1024
#define N_TILES 1024
#define K_TILES 1024
#define M_GLOBAL (M * M_TILES)
#define N_GLOBAL (N * N_TILES)
#define K_GLOBAL (K * K_TILES)
#define C_LAYOUT wmma::mem_row_major
// Implementation constants.
#define WARPS_PER_BLOCK 8
#define THREADS_PER_BLOCK (WARP_SIZE * WARPS_PER_BLOCK)
#if SHARED_MEMORY_LIMIT_64K
// With only 64 Kb shared memory available, we can fit 8x16-tile chunks of each
// the A and B matrix data, that are (M = 8) * (K = 4) * 8 * (CHUNK_K = 16) * sizeof(double) = 32 Kb each
// But we cannot account the 4 Kb total skew overhead, without which the performance
// would be severely impacted. So we choose to reduce the chunk size in half,
// i.e. the amount of A and B matrix data we cache in shared memory.
// Accordingly, this doubles the number of outer iterations across the global K
// dimension, which only slightly impacts the performance.
#define CHUNK_K 8
#else
#define CHUNK_K 16
#endif
#define CHUNK_LINE_BYTES (CHUNK_K * K * sizeof(double))
#define WARP_COPY_BYTES (WARP_SIZE * sizeof(int4))
#define CHUNK_COPY_LINES_PER_WARP (WARP_COPY_BYTES / CHUNK_LINE_BYTES)
#define CHUNK_COPY_LINE_LANES (WARP_SIZE / CHUNK_COPY_LINES_PER_WARP)
#define BLOCK_ROW_WARPS 2
#define BLOCK_COL_WARPS 4
#define WARP_ROW_TILES 4
#define WARP_COL_TILES 2
#define BLOCK_ROW_TILES (WARP_ROW_TILES * BLOCK_ROW_WARPS)
#define BLOCK_COL_TILES (WARP_COL_TILES * BLOCK_COL_WARPS)
#define GLOBAL_MEM_STRIDE N_GLOBAL
#define SHMEM_STRIDE (N * BLOCK_ROW_TILES)
#define SHMEM_OFFSET (N * WARP_ROW_TILES)
// The macro below is used to shift rows of the A matrix and columns of the B matrix
// in shared memory to minimize possible bank conflicts.
// Before performing the nvcuda::wmma::mma_sync operation, the warp must load the matrix
// data using the nvcuda::wmma::load_matrix_sync operation. Although the memory access pattern
// is not specified for that function, each lane in the warp can read one or multiple matrix
// elements from different matrix rows or columns.
// For shared memory, such access can result in bank conflicts if different rows / columns
// of the matrix map to the same bank. By shifting each row and column by a few bytes, we
// make sure that they map to different banks, thus reducing the number of possible bank
// conflicts.
// The number of 4 eight-byte "double" elements is chosen as the minimum possible shift because
// we must keep each row and column 256-bit aligned, as required by nvcuda::wmma::load_matrix_sync.
#define SKEW_DOUBLE 4
#define checkKernelErrors(expr) do { \
expr; \
\
hipError_t __err = hipGetLastError(); \
if (__err != hipSuccess) { \
printf("Line %d: '%s' failed: %s\n", __LINE__, # expr, hipGetErrorString(__err)); \
abort(); \
} \
} while(0)
enum kernels
{
dmma_shmem_gemm_async_copy = 0, // DMMA shmem using kernel with async_copy
dmma_shmem_gemm_cg_async_copy = 1, // DMMA shmem using kernel with cooperative groups async_copy
dmma_shmem_gemm = 2, // DMMA shmem using kernel normal copy (without async_copy).
simple_dmma_gemm = 3 // DMMA non-shmem using simple kernel.
};
const char* kernelNames[] = {"compute_dgemm_async_copy", "compute_dgemm_cg_async_copy",
"compute_dgemm", "simple_wmma_gemm"};
using namespace nvcuda;
namespace nvcuda_namespace = nvcuda::experimental;
namespace cg = cooperative_groups;
__host__ void init_host_matrices(double *a, double *b, double *c)
{
for (int i = 0; i < M_GLOBAL; i++) {
for (int j = 0; j < K_GLOBAL; j++) {
a[i*K_GLOBAL+j] = (double) (rand() % 3);
}
}
for (int i = 0; i < N_GLOBAL; i++) {
for (int j = 0; j < K_GLOBAL; j++) {
b[i*K_GLOBAL+j] = (double) (rand() % 3);
}
}
for (int t = 0; t < M_GLOBAL * N_GLOBAL; t++) {
c[t] = (double) (rand() % 3);
}
}
__global__ void compute_dgemm(const double *A, const double *B, const double *C, double *D, double alpha, double beta)
{
#if __CUDA_ARCH__ >= 800
extern __shared__ double shmem[][CHUNK_K * K + SKEW_DOUBLE];
// Warp and lane identification.
const unsigned int warpId = threadIdx.x / WARP_SIZE;
const unsigned int laneId = threadIdx.x % WARP_SIZE;
// Offset in shared memory from which the B matrix is stored.
const size_t shmem_idx_b_off = BLOCK_COL_TILES * M;
// This pointer is used to access the C and D matrix tiles this warp computes.
double *shmem_warp_tile_ptr = (double*)&shmem[0][0] + (warpId / BLOCK_ROW_WARPS) * SHMEM_STRIDE * N * BLOCK_ROW_WARPS + (warpId % BLOCK_ROW_WARPS) * SHMEM_OFFSET;
// This pointer is used to stream the C and D matrices block-wide tile to and from shared memory.
double *shmem_warp_stream_ptr = (double*)&shmem[0][0] + warpId * SHMEM_STRIDE * N;
// Adjust the beta scaler, as it'll be multiplied by alpha at the end of
// each tile computation. Technically this is not generally correct (may result
// in a loss of precision). Zero still needs to be specially handled though.
beta /= alpha;
// Each CTA slides along the 64 x 64 tiles from the top left corner of the matrix to the
// right and down, and selects the next tile to compute. Once there's no such tile,
// all warps in this CTA exit.
for(unsigned int block_pos = blockIdx.x;; block_pos += gridDim.x) {
const unsigned int block_tile_i = ((block_pos * BLOCK_ROW_TILES) / N_TILES) * (BLOCK_COL_TILES);
const unsigned int block_tile_j = (block_pos * BLOCK_COL_TILES) % N_TILES;
// Stop when there are no more D matrix tiles to compute in this CTA.
if (block_tile_i >= M_TILES) {
break;
}
// This warp's pointer to the C matrix data to copy memory from to shared memory.
const size_t gmem_idx = (block_tile_i + warpId) * M * GLOBAL_MEM_STRIDE + block_tile_j * N;
const double *src_gmem_warp_stream_ptr = &C[gmem_idx];
// Stream multiple C tiles to shared memory.
#pragma unroll
for (int i = 0; i < N; i++) {
*((int4 *)(shmem_warp_stream_ptr + SHMEM_STRIDE * i) + laneId) =
*((int4 *)(src_gmem_warp_stream_ptr + GLOBAL_MEM_STRIDE * i) + laneId);
}
__syncthreads();
// These fragments will accumulate the result of A and B matrix fragment multiplications
// along the K_GLOBAL dimension.
wmma::fragment<wmma::accumulator, M, N, K, double> c[WARP_COL_TILES][WARP_ROW_TILES];
// Load the C matrix tiles into fragments from shared memory.
#pragma unroll
for (int i = 0; i < WARP_COL_TILES; i++) {
#pragma unroll
for (int j = 0; j < WARP_ROW_TILES; j++) {
const double *tile_ptr = shmem_warp_tile_ptr + i * SHMEM_STRIDE * N + j * N;
wmma::load_matrix_sync(c[i][j], tile_ptr, SHMEM_STRIDE, C_LAYOUT);
}
}
__syncthreads();
// Scale the C matrix.
#pragma unroll
for (int i = 0; i < WARP_COL_TILES; i++) {
#pragma unroll
for (int j = 0; j < WARP_ROW_TILES; j++) {
#pragma unroll
for (int t = 0; t < c[i][j].num_elements; t++) {
c[i][j].x[t] *= beta;
}
}
}
// Select what warp copies what matrix to shared memory.
// Warps 0-3 copy the A matrix, warps 4-7 copy the B matrix.
const double *warp_ptr = (warpId < (WARPS_PER_BLOCK/2)) ? (&A[block_tile_i * M * K_GLOBAL] + M * K_GLOBAL * (warpId % (WARPS_PER_BLOCK/2)) * 2) :
(&B[block_tile_j * N * K_GLOBAL] + N * K_GLOBAL * (warpId % (WARPS_PER_BLOCK/2)) * 2);
// Go through the global K dimension by a fixed step at a time.
#pragma unroll
for (int tile_k = 0; tile_k < K_TILES; tile_k += CHUNK_K) {
// Copy slices of the A and B matrices to shared memory.
// The first half of the warps in the CTA copy the A matrix, the rest copy the B matrix.
size_t shmem_idx = warpId < (WARPS_PER_BLOCK/2) ? (M * (warpId % (WARPS_PER_BLOCK/2)) * 2) :
(N * (warpId % (WARPS_PER_BLOCK/2)) * 2 + shmem_idx_b_off);
// First half of the warp copies the first row / column of the matrix,
// the second half of the warp copies the next.
const double *lane_ptr = warp_ptr + tile_k * K + (laneId / CHUNK_COPY_LINE_LANES) * K_GLOBAL;
// Shift the second half of the warp to the next row / column in the shared memory.
shmem_idx += laneId / CHUNK_COPY_LINE_LANES;
#pragma unroll
for(int i = 0; i < ((WARP_SIZE/2) / CHUNK_COPY_LINES_PER_WARP); i++) {
// Copy 16 bytes at once in each lane.
*((int4*)&shmem[shmem_idx][0] + (laneId % CHUNK_COPY_LINE_LANES)) = *((int4*)lane_ptr + (laneId % CHUNK_COPY_LINE_LANES));
// Advance the global memory pointer and the shared memory index.
lane_ptr = lane_ptr + K_GLOBAL * CHUNK_COPY_LINES_PER_WARP;
shmem_idx += CHUNK_COPY_LINES_PER_WARP;
}
__syncthreads();
// Compute a grid of C matrix tiles in each warp.
#pragma unroll
for (int k_step = 0; k_step < CHUNK_K; k_step++) {
wmma::fragment<wmma::matrix_a, M, N, K, double, wmma::row_major> a[WARP_COL_TILES];
wmma::fragment<wmma::matrix_b, M, N, K, double, wmma::col_major> b[WARP_ROW_TILES];
#pragma unroll
for (int i = 0; i < WARP_COL_TILES; i++) {
size_t shmem_idx_a = (warpId/2) * M * 2 + (i * M);
const double *tile_ptr = &shmem[shmem_idx_a][k_step * K];
wmma::load_matrix_sync(a[i], tile_ptr, K * CHUNK_K + SKEW_DOUBLE);
#pragma unroll
for (int j = 0; j < WARP_ROW_TILES; j++) {
if (i == 0) {
// Load the B matrix fragment once, because it is going to be reused
// against the other A matrix fragments.
size_t shmem_idx_b = shmem_idx_b_off + (WARP_ROW_TILES * N) * (warpId%2) + (j * N);
const double *tile_ptr = &shmem[shmem_idx_b][k_step * K];
wmma::load_matrix_sync(b[j], tile_ptr, K * CHUNK_K + SKEW_DOUBLE);
}
wmma::mma_sync(c[i][j], a[i], b[j], c[i][j]);
}
}
}
__syncthreads();
}
// Store the D fragments to shared memory.
#pragma unroll
for (int i = 0; i < WARP_COL_TILES; i++) {
#pragma unroll
for (int j = 0; j < WARP_ROW_TILES; j++) {
// Uniform, point-wise transformations of ALL fragment elements by ALL threads in the
// warp are well-defined even though element indices within fragment storage are not defined.
#pragma unroll
for (int t = 0; t < c[i][j].num_elements; t++)
c[i][j].x[t] *= alpha;
double *tile_ptr = shmem_warp_tile_ptr + i * SHMEM_STRIDE * N + j * N;
wmma::store_matrix_sync(tile_ptr, c[i][j], SHMEM_STRIDE, C_LAYOUT);
}
}
__syncthreads();
// Now that shared memory contains all the D tiles, stream them to global memory.
double *dst_gmem_warp_stream_ptr = &D[gmem_idx];
#pragma unroll
for (int i = 0; i < N; i++) {
*((int4*)(dst_gmem_warp_stream_ptr + GLOBAL_MEM_STRIDE * i) + laneId) =
*((int4*)(shmem_warp_stream_ptr + SHMEM_STRIDE * i) + laneId);
}
__syncthreads();
}
#endif
}
__global__ void compute_dgemm_async_copy(const double *A, const double *B, const double *C, double *D, double alpha, double beta)
{
#if __CUDA_ARCH__ >= 800
extern __shared__ double shmem[][CHUNK_K * K + SKEW_DOUBLE];
// Warp and lane identification.
const unsigned int warpId = threadIdx.x / WARP_SIZE;
const unsigned int laneId = threadIdx.x % WARP_SIZE;
// Offset in shared memory from which the B matrix is stored.
const size_t shmem_idx_b_off = BLOCK_COL_TILES * M;
// This pointer is used to access the C and D matrix tiles this warp computes.
double *shmem_warp_tile_ptr = (double*)&shmem[0][0] + (warpId/BLOCK_ROW_WARPS) * SHMEM_STRIDE * N * BLOCK_ROW_WARPS + (warpId % BLOCK_ROW_WARPS) * SHMEM_OFFSET;
// This pointer is used to stream the C and D matrices block-wide tile to and from shared memory.
double *shmem_warp_stream_ptr = (double*)&shmem[0][0] + warpId * SHMEM_STRIDE * N;
// Adjust the beta scaler, as it'll be multiplied by alpha at the end of
// each tile computation. Technically this is not generally correct (may result
// in a loss of precision). Zero still needs to be specially handled though.
beta /= alpha;
// Each CTA slides along the 64 x 64 tiles from the top left corner of the matrix to the
// right and down, and selects the next tile to compute. Once there's no such tile,
// all warps in this CTA exit.
#if USE_CPP_API
nvcuda_namespace::pipeline pipe;
#endif
for(unsigned int block_pos = blockIdx.x;; block_pos += gridDim.x) {
const unsigned int block_tile_i = ((block_pos * BLOCK_ROW_TILES) / N_TILES) * (BLOCK_COL_TILES);
const unsigned int block_tile_j = (block_pos * BLOCK_COL_TILES) % N_TILES;
// Stop when there are no more D matrix tiles to compute in this CTA.
if (block_tile_i >= M_TILES) {
break;
}
// This warp's pointer to the C matrix data to copy memory from to shared memory.
const size_t gmem_idx = (block_tile_i + warpId) * M * GLOBAL_MEM_STRIDE + block_tile_j * N;
const double *src_gmem_warp_stream_ptr = &C[gmem_idx];
// Stream multiple C tiles to shared memory.
#pragma unroll
for (int i = 0; i < N; i++) {
#if USE_CPP_API
nvcuda_namespace::memcpy_async(*((int4*)(shmem_warp_stream_ptr + SHMEM_STRIDE * i) + laneId),
*((int4*)(src_gmem_warp_stream_ptr + GLOBAL_MEM_STRIDE * i) + laneId),
pipe);
pipe.commit();
#else
__pipeline_memcpy_async((reinterpret_cast<int4*>(&shmem_warp_stream_ptr[(SHMEM_STRIDE * i)])) + laneId,
(reinterpret_cast<const int4*>(&src_gmem_warp_stream_ptr[(GLOBAL_MEM_STRIDE * i)])) + laneId,
sizeof(int4));
__pipeline_commit();
#endif
}
// Now wait for all the above issued 8 batches to complete.
#if USE_CPP_API
pipe.wait_prior<0>();
#else
__pipeline_wait_prior(0);
#endif
__syncthreads();
// These fragments will accumulate the result of A and B matrix fragment multiplications
// along the K_GLOBAL dimension.
wmma::fragment<wmma::accumulator, M, N, K, double> c[WARP_COL_TILES][WARP_ROW_TILES];
// Load the C matrix tiles into fragments from shared memory.
#pragma unroll
for (int i = 0; i < WARP_COL_TILES; i++) {
#pragma unroll
for (int j = 0; j < WARP_ROW_TILES; j++) {
const double *tile_ptr = shmem_warp_tile_ptr + i * SHMEM_STRIDE * N + j * N;
wmma::load_matrix_sync(c[i][j], tile_ptr, SHMEM_STRIDE, C_LAYOUT);
}
}
__syncthreads();
// Scale the C matrix.
#pragma unroll
for (int i = 0; i < WARP_COL_TILES; i++) {
#pragma unroll
for (int j = 0; j < WARP_ROW_TILES; j++) {
#pragma unroll
for (int t = 0; t < c[i][j].num_elements; t++) {
c[i][j].x[t] *= beta;
}
}
}
// Select what warp copies what matrix to shared memory.
// Warps 0-3 copy the A matrix, warps 4-7 copy the B matrix.
const double *warp_ptr = (warpId < (WARPS_PER_BLOCK/2)) ? (&A[block_tile_i * M * K_GLOBAL] + M * K_GLOBAL * (warpId % (WARPS_PER_BLOCK/2)) * 2) :
(&B[block_tile_j * N * K_GLOBAL] + N * K_GLOBAL * (warpId % (WARPS_PER_BLOCK/2)) * 2);
// Go through the global K dimension by a fixed step at a time.
#pragma unroll
for (int tile_k = 0; tile_k < K_TILES; tile_k += CHUNK_K) {
// Copy slices of the A and B matrices to shared memory.
// The first half of the warps in the CTA copy the A matrix, the rest copy the B matrix.
size_t shmem_idx = warpId < (WARPS_PER_BLOCK/2) ? (M * (warpId % (WARPS_PER_BLOCK/2)) * 2) :
(N * (warpId % (WARPS_PER_BLOCK/2)) * 2 + shmem_idx_b_off);
// First half of the warp copies the first row / column of the matrix,
// the second half of the warp copies the next.
const double *lane_ptr = warp_ptr + tile_k * K + (laneId / CHUNK_COPY_LINE_LANES) * K_GLOBAL;
// Shift the second half of the warp to the next row / column in the shared memory.
shmem_idx += laneId / CHUNK_COPY_LINE_LANES;
#pragma unroll
for(int i = 0; i < ((WARP_SIZE/2) / CHUNK_COPY_LINES_PER_WARP); i++) {
// Copy 16 bytes at once in each lane.
#if USE_CPP_API
nvcuda_namespace::memcpy_async(*((int4*)&shmem[shmem_idx][0] + (laneId % CHUNK_COPY_LINE_LANES)),
*((int4*)lane_ptr + (laneId % CHUNK_COPY_LINE_LANES)), pipe);
pipe.commit();
#else
__pipeline_memcpy_async((int4*)&shmem[shmem_idx][0] + (laneId % CHUNK_COPY_LINE_LANES),
(int4*)lane_ptr + (laneId % CHUNK_COPY_LINE_LANES), sizeof(int4));
__pipeline_commit();
#endif
// Advance the global memory pointer and the shared memory index.
lane_ptr = lane_ptr + K_GLOBAL * CHUNK_COPY_LINES_PER_WARP;
shmem_idx += CHUNK_COPY_LINES_PER_WARP;
}
#if USE_CPP_API
pipe.wait_prior<0>();
#else
__pipeline_wait_prior(0);
#endif
__syncthreads();
// Compute a grid of C matrix tiles in each warp.
#pragma unroll
for (int k_step = 0; k_step < CHUNK_K; k_step++) {
wmma::fragment<wmma::matrix_a, M, N, K, double, wmma::row_major> a[WARP_COL_TILES];
wmma::fragment<wmma::matrix_b, M, N, K, double, wmma::col_major> b[WARP_ROW_TILES];
#pragma unroll
for (int i = 0; i < WARP_COL_TILES; i++) {
size_t shmem_idx_a = (warpId/2) * M * 2 + (i * M);
const double *tile_ptr = &shmem[shmem_idx_a][k_step * K];
wmma::load_matrix_sync(a[i], tile_ptr, K * CHUNK_K + SKEW_DOUBLE);
#pragma unroll
for (int j = 0; j < WARP_ROW_TILES; j++) {
if (i == 0) {
// Load the B matrix fragment once, because it is going to be reused
// against the other A matrix fragments.
size_t shmem_idx_b = shmem_idx_b_off + (WARP_ROW_TILES * N) * (warpId%2) + (j * N);
const double *tile_ptr = &shmem[shmem_idx_b][k_step * K];
wmma::load_matrix_sync(b[j], tile_ptr, K * CHUNK_K + SKEW_DOUBLE);
}
wmma::mma_sync(c[i][j], a[i], b[j], c[i][j]);
}
}
}
__syncthreads();
}
// Store the D fragments to shared memory.
#pragma unroll
for (int i = 0; i < WARP_COL_TILES; i++) {
#pragma unroll
for (int j = 0; j < WARP_ROW_TILES; j++) {
// Uniform, point-wise transformations of ALL fragment elements by ALL threads in the
// warp are well-defined even though element indices within fragment storage are not defined.
#pragma unroll
for (int t = 0; t < c[i][j].num_elements; t++)
c[i][j].x[t] *= alpha;
double *tile_ptr = shmem_warp_tile_ptr + i * SHMEM_STRIDE * N + j * N;
wmma::store_matrix_sync(tile_ptr, c[i][j], SHMEM_STRIDE, C_LAYOUT);
}
}
__syncthreads();
// Now that shared memory contains all the D tiles, stream them to global memory.
double *dst_gmem_warp_stream_ptr = &D[gmem_idx];
#pragma unroll
for (int i = 0; i < N; i++) {
*((int4*)(dst_gmem_warp_stream_ptr + GLOBAL_MEM_STRIDE * i) + laneId) =
*((int4*)(shmem_warp_stream_ptr + SHMEM_STRIDE * i) + laneId);
}
__syncthreads();
}
#endif
}
__global__ void compute_dgemm_cg_async_copy(const double *A, const double *B, const double *C, double *D, double alpha, double beta)
{
#if __CUDA_ARCH__ >= 800
extern __shared__ double shmem[][CHUNK_K * K + SKEW_DOUBLE];
auto cta = cg::this_thread_block();
auto tile32 = cg::tiled_partition<32>(cta);
constexpr int tileChunkCopySize = WARP_SIZE / CHUNK_COPY_LINES_PER_WARP;
auto tileChunkCopy = cg::tiled_partition<tileChunkCopySize>(cta);
// Warp and lane identification.
const unsigned int warpId = threadIdx.x / WARP_SIZE;
const unsigned int laneId = threadIdx.x % WARP_SIZE;
// Offset in shared memory from which the B matrix is stored.
const size_t shmem_idx_b_off = BLOCK_COL_TILES * M;
// This pointer is used to access the C and D matrix tiles this warp computes.
double *shmem_warp_tile_ptr = (double*)&shmem[0][0] + (warpId/2) * SHMEM_STRIDE * N * 2 + (warpId%2) * SHMEM_OFFSET;
// This pointer is used to stream the C and D matrices block-wide tile to and from shared memory.
double *shmem_warp_stream_ptr = (double*)&shmem[0][0] + warpId * SHMEM_STRIDE * N;
// Adjust the beta scaler, as it'll be multiplied by alpha at the end of
// each tile computation. Technically this is not generally correct (may result
// in a loss of precision). Zero still needs to be specially handled though.
beta /= alpha;
// Each CTA slides along the 64 x 64 tiles from the top left corner of the matrix to the
// right and down, and selects the next tile to compute. Once there's no such tile,
// all warps in this CTA exit.
for(unsigned int block_pos = blockIdx.x;; block_pos += gridDim.x) {
const unsigned int block_tile_i = ((block_pos * BLOCK_ROW_TILES) / N_TILES) * (BLOCK_COL_TILES);
const unsigned int block_tile_j = (block_pos * BLOCK_COL_TILES) % N_TILES;
// Stop when there are no more D matrix tiles to compute in this CTA.
if (block_tile_i >= M_TILES) {
break;
}
// This warp's pointer to the C matrix data to copy memory from to shared memory.
const size_t gmem_idx = (block_tile_i + warpId) * M * GLOBAL_MEM_STRIDE + block_tile_j * N;
const double *src_gmem_warp_stream_ptr = &C[gmem_idx];
// Stream multiple C tiles to shared memory.
#pragma unroll
for (int i = 0; i < N; i++) {
auto dst_ptr = &shmem_warp_stream_ptr[(SHMEM_STRIDE * i)];
auto src_ptr = &src_gmem_warp_stream_ptr[(GLOBAL_MEM_STRIDE * i)];
cg::memcpy_async(tile32, dst_ptr, src_ptr, cuda::aligned_size_t<32>{tile32.size() * sizeof(int4)});
}
cg::wait(cta);
// These fragments will accumulate the result of A and B matrix fragment multiplications
// along the K_GLOBAL dimension.
wmma::fragment<wmma::accumulator, M, N, K, double> c[WARP_COL_TILES][WARP_ROW_TILES];
// Load the C matrix tiles into fragments from shared memory.
#pragma unroll
for (int i = 0; i < WARP_COL_TILES; i++) {
#pragma unroll
for (int j = 0; j < WARP_ROW_TILES; j++) {
const double *tile_ptr = shmem_warp_tile_ptr + i * SHMEM_STRIDE * N + j * N;
wmma::load_matrix_sync(c[i][j], tile_ptr, SHMEM_STRIDE, C_LAYOUT);
}
}
cg::wait(cta);
// Scale the C matrix.
#pragma unroll
for (int i = 0; i < WARP_COL_TILES; i++) {
#pragma unroll
for (int j = 0; j < WARP_ROW_TILES; j++) {
#pragma unroll
for (int t = 0; t < c[i][j].num_elements; t++) {
c[i][j].x[t] *= beta;
}
}
}
// Select what warp copies what matrix to shared memory.
// Warps 0-3 copy the A matrix, warps 4-7 copy the B matrix.
const double *warp_ptr = (warpId < 4) ? (&A[block_tile_i * M * K_GLOBAL] + M * K_GLOBAL * (warpId % (WARPS_PER_BLOCK/2)) * 2) :
(&B[block_tile_j * N * K_GLOBAL] + N * K_GLOBAL * (warpId % (WARPS_PER_BLOCK/2)) * 2);
// Go through the global K dimension by a fixed step at a time.
#pragma unroll
for (int tile_k = 0; tile_k < K_TILES; tile_k += CHUNK_K) {
// Copy slices of the A and B matrices to shared memory.
// The first half of the warps in the CTA copy the A matrix, the rest copy the B matrix.
size_t shmem_idx = warpId < (WARPS_PER_BLOCK/2) ? (M * (warpId % (WARPS_PER_BLOCK/2)) * 2) :
(N * (warpId % (WARPS_PER_BLOCK/2)) * 2 + shmem_idx_b_off);
// First half of the warp copies the first row / column of the matrix,
// the second half of the warp copies the next.
auto lane_ptr = warp_ptr + tile_k * K + (laneId / CHUNK_COPY_LINE_LANES) * K_GLOBAL;
// Shift the second half of the warp to the next row / column in the shared memory.
shmem_idx += laneId / CHUNK_COPY_LINE_LANES;
#pragma unroll
for(int i = 0; i < ((WARP_SIZE/2) / CHUNK_COPY_LINES_PER_WARP); i++) {
// Copy 16 bytes at once in each lane.
auto dst_ptr = &shmem[shmem_idx][0];
auto src_ptr = lane_ptr;
cg::memcpy_async(tileChunkCopy, dst_ptr, src_ptr, cuda::aligned_size_t<32>{tile32.size() * sizeof(int4)});
// Advance the global memory pointer and the shared memory index.
lane_ptr = lane_ptr + K_GLOBAL * CHUNK_COPY_LINES_PER_WARP;
shmem_idx += CHUNK_COPY_LINES_PER_WARP;
}
cg::wait(cta);
// Compute a grid of C matrix tiles in each warp.
#pragma unroll
for (int k_step = 0; k_step < CHUNK_K; k_step++) {
wmma::fragment<wmma::matrix_a, M, N, K, double, wmma::row_major> a[WARP_COL_TILES];
wmma::fragment<wmma::matrix_b, M, N, K, double, wmma::col_major> b[WARP_ROW_TILES];
#pragma unroll
for (int i = 0; i < WARP_COL_TILES; i++) {
size_t shmem_idx_a = (warpId/2) * M * 2 + (i * M);
const double *tile_ptr = &shmem[shmem_idx_a][k_step * K];
wmma::load_matrix_sync(a[i], tile_ptr, K * CHUNK_K + SKEW_DOUBLE);
#pragma unroll
for (int j = 0; j < WARP_ROW_TILES; j++) {
if (i == 0) {
// Load the B matrix fragment once, because it is going to be reused
// against the other A matrix fragments.
size_t shmem_idx_b = shmem_idx_b_off + (WARP_ROW_TILES * N) * (warpId%2) + (j * N);
const double *tile_ptr = &shmem[shmem_idx_b][k_step * K];
wmma::load_matrix_sync(b[j], tile_ptr, K * CHUNK_K + SKEW_DOUBLE);
}
wmma::mma_sync(c[i][j], a[i], b[j], c[i][j]);
}
}
}
cg::wait(cta);
}
// Store the D fragments to shared memory.
#pragma unroll
for (int i = 0; i < WARP_COL_TILES; i++) {
#pragma unroll
for (int j = 0; j < WARP_ROW_TILES; j++) {
// Uniform, point-wise transformations of ALL fragment elements by ALL threads in the
// warp are well-defined even though element indices within fragment storage are not defined.
#pragma unroll
for (int t = 0; t < c[i][j].num_elements; t++)
c[i][j].x[t] *= alpha;
double *tile_ptr = shmem_warp_tile_ptr + i * SHMEM_STRIDE * N + j * N;
wmma::store_matrix_sync(tile_ptr, c[i][j], SHMEM_STRIDE, C_LAYOUT);
}
}
cg::wait(cta);
// Now that shared memory contains all the D tiles, stream them to global memory.
double *dst_gmem_warp_stream_ptr = &D[gmem_idx];
#pragma unroll
for (int i = 0; i < N; i++) {
*((int4*)(dst_gmem_warp_stream_ptr + GLOBAL_MEM_STRIDE * i) + laneId) =
*((int4*)(shmem_warp_stream_ptr + SHMEM_STRIDE * i) + laneId);
}
cg::wait(cta);
}
#endif
}
// Performs an MxNxK DGEMM (C=alpha*A*B + beta*C) assuming:
// 1) Matrices are packed in memory.
// 2) M, N and K are multiples of 8, 8 and 4 respectively.
// 3) A is row major, B is column major matrix.
// Note: This is a less performant version of the compute_dgemm kernel. It is designed for
// demonstration purposes only to show the CUDA WMMA API use without relying on
// availability of the shared memory.
__global__ void simple_wmma_gemm(double *a, double *b, double *c, double *d, int m_ld, int n_ld, int k_ld, double alpha, double beta)
{
#if __CUDA_ARCH__ >= 800
// Leading dimensions. Packed with no transpositions.
int lda = k_ld;
int ldb = k_ld;
int ldc = n_ld;
// Tile using a 2D grid
int warpM = (blockIdx.x * blockDim.x + threadIdx.x) / warpSize;
int warpN = (blockIdx.y * blockDim.y + threadIdx.y);
// Declare the fragments
wmma::fragment<wmma::matrix_a, M, N, K, double, wmma::row_major> a_frag;
wmma::fragment<wmma::matrix_b, M, N, K, double, wmma::col_major> b_frag;
wmma::fragment<wmma::accumulator, M, N, K, double> acc_frag;
wmma::fragment<wmma::accumulator, M, N, K, double> c_frag;
wmma::fill_fragment(acc_frag, 0.0f);
// Loop over k
for (int i = 0; i < k_ld; i += K) {
int aCol = i;
int aRow = warpM * M;
int bCol = warpN * N;
int bRow = i;
// Bounds checking
if (aRow < m_ld && aCol < k_ld && bRow < k_ld && bCol < n_ld) {
// Load the inputs
wmma::load_matrix_sync(a_frag, a + aCol + aRow * lda, lda);
wmma::load_matrix_sync(b_frag, b + bRow + bCol * ldb, ldb);
// Perform the matrix multiplication
wmma::mma_sync(acc_frag, a_frag, b_frag, acc_frag);
}
}
// Load in the current value of c, scale it by beta, and add this our result scaled by alpha
int cCol = warpN * N;
int cRow = warpM * M;
if (cRow < m_ld && cCol < n_ld) {
wmma::load_matrix_sync(c_frag, c + cCol + cRow * ldc, ldc, wmma::mem_row_major);
for(int i=0; i < c_frag.num_elements; i++) {
c_frag.x[i] = alpha * acc_frag.x[i] + beta * c_frag.x[i];
}
// Store the output
wmma::store_matrix_sync(d + cCol + cRow * ldc, c_frag, ldc, wmma::mem_row_major);
}
#endif
}
__host__ void matMultiplyOnHost(double *A, double *B, double *C,
float alpha, float beta,
int numARows, int numAColumns,
int numBRows, int numBColumns,
int numCRows, int numCColumns)
{
for (int i = 0; i < numCRows; i++) {
for (int j = 0; j < numCColumns; j++) {
double temp = 0.0;
for (int k = 0; k < numAColumns; k++) {
// B matrix is column major. A matrix is row major.
temp += A[i * numAColumns + k] * B[j * numBRows + k];
}
C[i*numCColumns + j] = temp * alpha + beta * C[i * numCColumns + j];
}
}
}
int main(int argc, char **argv)
{
printf("Initializing...\n");
int dev = findCudaDevice(argc, (const char **)argv);
hipDeviceProp_t deviceProp;
checkCudaErrors(hipGetDeviceProperties(&deviceProp, dev));
// Double precision Tensor cores require a GPU of Ampere (SM8X) architecture or higher.
if (deviceProp.major < 8) {
printf("dmmaTensorCoreGemm requires SM 8.0 or higher. Exiting...\n");
exit(EXIT_WAIVED);
}
printf("M: %d (%d x %d)\n", M_GLOBAL, M, M_TILES);
printf("N: %d (%d x %d)\n", N_GLOBAL, N, N_TILES);
printf("K: %d (%d x %d)\n", K_GLOBAL, K, K_TILES);
double *A_h = NULL;
double *B_h = NULL;
double *C_h = NULL;
#if CPU_DEBUG
double *result_hD = NULL;
double *result_host = NULL;
#endif
A_h = (double*) malloc(sizeof(double) * M_GLOBAL * K_GLOBAL);
B_h = (double*) malloc(sizeof(double) * K_GLOBAL * N_GLOBAL);
C_h = (double*) malloc(sizeof(double) * M_GLOBAL * N_GLOBAL);
#if CPU_DEBUG
result_hD = (double*) malloc(sizeof(double) * M_GLOBAL * N_GLOBAL);
result_host = (double*) malloc(sizeof(double) * M_GLOBAL * N_GLOBAL);
#endif
double *A = NULL;
double *B = NULL;
double *C = NULL;
double *D = NULL;
checkCudaErrors(hipMalloc((void**)&A, sizeof(double) * M_GLOBAL * K_GLOBAL));
checkCudaErrors(hipMalloc((void**)&B, sizeof(double) * N_GLOBAL * K_GLOBAL));
checkCudaErrors(hipMalloc((void**)&C, sizeof(double) * M_GLOBAL * N_GLOBAL));
checkCudaErrors(hipMalloc((void**)&D, sizeof(double) * M_GLOBAL * N_GLOBAL));
assert(((unsigned long long)A) % 128 == 0);
assert(((unsigned long long)B) % 128 == 0);
assert(((unsigned long long)C) % 128 == 0);
assert(((unsigned long long)D) % 128 == 0);
init_host_matrices(A_h, B_h, C_h);
printf("Preparing data for GPU...\n");
checkCudaErrors(hipMemcpy(A, A_h, sizeof(double) * M_GLOBAL * K_GLOBAL, hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(B, B_h, sizeof(double) * N_GLOBAL * K_GLOBAL, hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(C, C_h, sizeof(double) * M_GLOBAL * N_GLOBAL, hipMemcpyHostToDevice));
checkCudaErrors(hipMemset(D, 0, sizeof(double) * M_GLOBAL * N_GLOBAL));
enum {
// Compute the right amount of shared memory to request.
// We need shared memory to hold per-CTA C and D matrix tiles, and to cache per-CTA chunks
// of the A and B matrices. Therefore, the right amount to request is the maximum of those
// two numbers.
SHMEM_SZ = MAX(sizeof(double) * (BLOCK_COL_TILES * M) * (CHUNK_K * K + SKEW_DOUBLE) * 2,
M * (BLOCK_ROW_WARPS * WARP_ROW_TILES) * N * (BLOCK_COL_WARPS * WARP_COL_TILES) * sizeof(double))
};
printf("Required shared memory size: %lu Kb\n", SHMEM_SZ / 1024UL);
const double alpha = 1.1f;
const double beta = 1.2f;
hipEvent_t start, stop;
checkCudaErrors(hipEventCreate(&start));
checkCudaErrors(hipEventCreate(&stop));
checkCudaErrors(hipEventRecord(start));
kernels selected_kernel = dmma_shmem_gemm_async_copy;
// kernel to run - default (dmma_shmem_gemm_async_copy == 0)
if (checkCmdLineFlag(argc, (const char **)argv, "kernel")) {
int kernel_number = getCmdLineArgumentInt(argc, (const char **)argv, "kernel");
if (kernel_number < 4)
{
selected_kernel = (kernels)kernel_number;
}
else
{
printf("Error: kernel number should be between 0 to 3, you have entered %d\n", kernel_number);
exit(EXIT_FAILURE);
}
}
// If enough shared memory available on the GPU use high performant kernel
if ((deviceProp.sharedMemPerMultiprocessor >= SHMEM_SZ) && (selected_kernel != simple_dmma_gemm))
{
printf("Computing using high performance kernel = %d - %s\n", selected_kernel, kernelNames[selected_kernel]);
switch (selected_kernel)
{
case dmma_shmem_gemm_async_copy :
default:
checkCudaErrors(hipFuncSetAttribute(compute_dgemm_async_copy, hipFuncAttributeMaxDynamicSharedMemorySize, SHMEM_SZ));
hipLaunchKernelGGL(( checkKernelErrors((compute_dgemm_async_copy), dim3(deviceProp.multiProcessorCount*2), dim3(THREADS_PER_BLOCK), SHMEM_SZ, 0, A, B, C, D, alpha, beta)));
break;
case dmma_shmem_gemm_cg_async_copy :
checkCudaErrors(hipFuncSetAttribute(compute_dgemm_cg_async_copy, hipFuncAttributeMaxDynamicSharedMemorySize, SHMEM_SZ));
hipLaunchKernelGGL(( checkKernelErrors((compute_dgemm_cg_async_copy), dim3(deviceProp.multiProcessorCount*2), dim3(THREADS_PER_BLOCK), SHMEM_SZ, 0, A, B, C, D, alpha, beta)));
break;
case dmma_shmem_gemm :
checkCudaErrors(hipFuncSetAttribute(compute_dgemm, hipFuncAttributeMaxDynamicSharedMemorySize, SHMEM_SZ));
hipLaunchKernelGGL(( checkKernelErrors((compute_dgemm), dim3(deviceProp.multiProcessorCount*2), dim3(THREADS_PER_BLOCK), SHMEM_SZ, 0, A, B, C, D, alpha, beta)));
break;
}
#if CPU_DEBUG
checkCudaErrors(hipMemcpy(result_hD, D, sizeof(double)*M_GLOBAL*N_GLOBAL, hipMemcpyDeviceToHost));
#endif
}
else
{
dim3 gridDim;
dim3 blockDim;
// blockDim.x must be a multple of warpSize
// 128x4 means we have 16 warps and a block computes a 64x64 output tile
blockDim.x = 128;
blockDim.y = 4;
gridDim.x = (M_GLOBAL + (M * blockDim.x / 32 - 1)) / (M * blockDim.x / 32);
gridDim.y = (N_GLOBAL + N * blockDim.y - 1) / (N * blockDim.y);
printf("Computing... using simple_wmma_gemm kernel\n");
hipLaunchKernelGGL(( simple_wmma_gemm), dim3(gridDim), dim3(blockDim), 0, 0, A, B, C, D, M_GLOBAL, N_GLOBAL, K_GLOBAL, alpha, beta);
#if CPU_DEBUG
checkCudaErrors(hipMemcpy(result_hD, D, sizeof(double) * M_GLOBAL * N_GLOBAL, hipMemcpyDeviceToHost));
#endif
}
checkCudaErrors(hipEventRecord(stop));
checkCudaErrors(hipEventSynchronize(stop));
#if CPU_DEBUG
printf("Verifying correctness of the computations...\n");
memcpy(result_host, C_h, sizeof(double) * M_GLOBAL * N_GLOBAL);
matMultiplyOnHost(A_h, B_h, result_host,
alpha, beta,
M_GLOBAL, K_GLOBAL,
K_GLOBAL, N_GLOBAL,
M_GLOBAL, N_GLOBAL);
size_t number_of_matches = 0;
for (int i = 0; i < N_GLOBAL*M_GLOBAL; i++) {
if (fabs(result_hD[i] - result_host[i]) > 0.1f)
{
printf("mismatch i=%d result_hD=%f result_host=%f\n", i, result_hD[i], result_host[i]);
break;
}
else
{
number_of_matches++;
}
}
printf("number_of_matches = %zu out of = %d \n", number_of_matches, N_GLOBAL*M_GLOBAL);
free(result_hD);
free(result_host);
#endif
float milliseconds = 0;
checkCudaErrors(hipEventElapsedTime(&milliseconds, start, stop));
printf("Time: %f ms\n", milliseconds);
printf("FP64 TFLOPS: %.2f\n", (((double)M_GLOBAL * N_GLOBAL * K_GLOBAL * 2)/(milliseconds/1000.)) / 1e12);
free(A_h);
free(B_h);
free(C_h);
checkCudaErrors(hipFree((void*)A));
checkCudaErrors(hipFree((void*)B));
checkCudaErrors(hipFree((void*)C));
checkCudaErrors(hipFree((void*)D));
return 0;
}
| 77aab003c593e9fe7486de90cc3abb133d0244f2.cu | /*
* Copyright 1993-2020 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
// CUDA sample demonstrating a Double precision GEMM computation using the Warp
// Matrix Multiply and Accumulate API introduced in CUDA 11.0.
// In this program, the compute_dgemm kernel computes the result of a matrix multiplication
// and addition: D = alpha * A * B + beta * C. The dimensions of both C and D matrices
// are M_GLOBAL x N_GLOBAL. The A matrix is M_GLOBAL x K_GLOBAL (row-major), the B matrix
// is K_GLOBAL x N_GLOBAL (column-major).
// In that kernel, each CTA computes one 64 x 64 tile of the resulting matrix
// per iteration. When the tile is computed, the CTA stores it to the global memory
// and begins a new iteration, selecting a new 64 x 64 tile to compute.
// Each CTA consists of eight warps. For the 64 x 64 tile, each warp computes eight
// 8 x 8 subtiles, organized in a 2 x 4 two-dimensional array.
// Warps compute the 8 x 8 subtiles using nvcuda::wmma::mma_sync operations by
// moving through the K_GLOBAL dimension of the A and B matrices and accumulating
// the intermediate result in the local thread state.
// There are a number of simple optimizations used in the algorithm:
// - The CTA copies the 64 x 64 tile of the C matrix from the global memory to
// shared memory. After that is done, each warp loads the C matrix fragments from
// shared memory, thus avoiding a random global memory access.
// - On each internal iteration, the CTA copies a portion of the A and B matrices from
// global memory to shared memory. After that, all warps in the CTA reuse the A and B
// data from shared memory, thus reducing the number of data copies from global memory.
// - The portions of the A and B matrices are stored in shared memory with an additional
// padding (skew) to reduce the number of shared memory access bank conflicts.
// (See a detailed explanation near the SKEW_DOUBLE macro definition.)
// - When the CTA finishes computing the tiles of the resulting matrix, each warp stores
// its subtiles to shared memory. The CTA then copies the shared memory contents to
// global memory, again avoiding redundant random global memory accesses.
// - Note that the CTA tile size is chosen to maximize the GPU register utilization,
// but carefully enough to avoid local memory use.
#include <assert.h>
#include <stdio.h>
#include <cuda.h>
#include <mma.h>
#include <cuda_pipeline.h>
#include <cooperative_groups.h>
#include <cooperative_groups/memcpy_async.h>
#include <cuda/std/type_traits>
#include <cuda/barrier>
// Switch for choosing cpp interface for cuda pipeline
// vs primitives interface.
#define USE_CPP_API 0
// helper functions and utilities to work with CUDA
#include <helper_functions.h>
#include <helper_cuda.h>
// Externally configurable parameters.
#ifndef CPU_DEBUG
// Set this to 1 to verify the correctness of the GPU-computed matrix.
#define CPU_DEBUG 0
#endif
#ifndef SHARED_MEMORY_LIMIT_64K
// Set this to 0 to use more than 64 Kb of shared memory to cache data, to
// improve the performance of the computations on GPU.
// Note that you need a GPU that can have more than 64 Kb of shared memory
// per multiprocessor.
#define SHARED_MEMORY_LIMIT_64K 0
#endif
// GPU configuration.
#define WARP_SIZE 32
// MMA matrix tile dimensions.
#define M 8
#define N 8
#define K 4
// GEMM configuration.
#define M_TILES 1024
#define N_TILES 1024
#define K_TILES 1024
#define M_GLOBAL (M * M_TILES)
#define N_GLOBAL (N * N_TILES)
#define K_GLOBAL (K * K_TILES)
#define C_LAYOUT wmma::mem_row_major
// Implementation constants.
#define WARPS_PER_BLOCK 8
#define THREADS_PER_BLOCK (WARP_SIZE * WARPS_PER_BLOCK)
#if SHARED_MEMORY_LIMIT_64K
// With only 64 Kb shared memory available, we can fit 8x16-tile chunks of each
// the A and B matrix data, that are (M = 8) * (K = 4) * 8 * (CHUNK_K = 16) * sizeof(double) = 32 Kb each
// But we cannot account the 4 Kb total skew overhead, without which the performance
// would be severely impacted. So we choose to reduce the chunk size in half,
// i.e. the amount of A and B matrix data we cache in shared memory.
// Accordingly, this doubles the number of outer iterations across the global K
// dimension, which only slightly impacts the performance.
#define CHUNK_K 8
#else
#define CHUNK_K 16
#endif
#define CHUNK_LINE_BYTES (CHUNK_K * K * sizeof(double))
#define WARP_COPY_BYTES (WARP_SIZE * sizeof(int4))
#define CHUNK_COPY_LINES_PER_WARP (WARP_COPY_BYTES / CHUNK_LINE_BYTES)
#define CHUNK_COPY_LINE_LANES (WARP_SIZE / CHUNK_COPY_LINES_PER_WARP)
#define BLOCK_ROW_WARPS 2
#define BLOCK_COL_WARPS 4
#define WARP_ROW_TILES 4
#define WARP_COL_TILES 2
#define BLOCK_ROW_TILES (WARP_ROW_TILES * BLOCK_ROW_WARPS)
#define BLOCK_COL_TILES (WARP_COL_TILES * BLOCK_COL_WARPS)
#define GLOBAL_MEM_STRIDE N_GLOBAL
#define SHMEM_STRIDE (N * BLOCK_ROW_TILES)
#define SHMEM_OFFSET (N * WARP_ROW_TILES)
// The macro below is used to shift rows of the A matrix and columns of the B matrix
// in shared memory to minimize possible bank conflicts.
// Before performing the nvcuda::wmma::mma_sync operation, the warp must load the matrix
// data using the nvcuda::wmma::load_matrix_sync operation. Although the memory access pattern
// is not specified for that function, each lane in the warp can read one or multiple matrix
// elements from different matrix rows or columns.
// For shared memory, such access can result in bank conflicts if different rows / columns
// of the matrix map to the same bank. By shifting each row and column by a few bytes, we
// make sure that they map to different banks, thus reducing the number of possible bank
// conflicts.
// The number of 4 eight-byte "double" elements is chosen as the minimum possible shift because
// we must keep each row and column 256-bit aligned, as required by nvcuda::wmma::load_matrix_sync.
#define SKEW_DOUBLE 4
#define checkKernelErrors(expr) do { \
expr; \
\
cudaError_t __err = cudaGetLastError(); \
if (__err != cudaSuccess) { \
printf("Line %d: '%s' failed: %s\n", __LINE__, # expr, cudaGetErrorString(__err)); \
abort(); \
} \
} while(0)
enum kernels
{
dmma_shmem_gemm_async_copy = 0, // DMMA shmem using kernel with async_copy
dmma_shmem_gemm_cg_async_copy = 1, // DMMA shmem using kernel with cooperative groups async_copy
dmma_shmem_gemm = 2, // DMMA shmem using kernel normal copy (without async_copy).
simple_dmma_gemm = 3 // DMMA non-shmem using simple kernel.
};
const char* kernelNames[] = {"compute_dgemm_async_copy", "compute_dgemm_cg_async_copy",
"compute_dgemm", "simple_wmma_gemm"};
using namespace nvcuda;
namespace nvcuda_namespace = nvcuda::experimental;
namespace cg = cooperative_groups;
__host__ void init_host_matrices(double *a, double *b, double *c)
{
for (int i = 0; i < M_GLOBAL; i++) {
for (int j = 0; j < K_GLOBAL; j++) {
a[i*K_GLOBAL+j] = (double) (rand() % 3);
}
}
for (int i = 0; i < N_GLOBAL; i++) {
for (int j = 0; j < K_GLOBAL; j++) {
b[i*K_GLOBAL+j] = (double) (rand() % 3);
}
}
for (int t = 0; t < M_GLOBAL * N_GLOBAL; t++) {
c[t] = (double) (rand() % 3);
}
}
__global__ void compute_dgemm(const double *A, const double *B, const double *C, double *D, double alpha, double beta)
{
#if __CUDA_ARCH__ >= 800
extern __shared__ double shmem[][CHUNK_K * K + SKEW_DOUBLE];
// Warp and lane identification.
const unsigned int warpId = threadIdx.x / WARP_SIZE;
const unsigned int laneId = threadIdx.x % WARP_SIZE;
// Offset in shared memory from which the B matrix is stored.
const size_t shmem_idx_b_off = BLOCK_COL_TILES * M;
// This pointer is used to access the C and D matrix tiles this warp computes.
double *shmem_warp_tile_ptr = (double*)&shmem[0][0] + (warpId / BLOCK_ROW_WARPS) * SHMEM_STRIDE * N * BLOCK_ROW_WARPS + (warpId % BLOCK_ROW_WARPS) * SHMEM_OFFSET;
// This pointer is used to stream the C and D matrices block-wide tile to and from shared memory.
double *shmem_warp_stream_ptr = (double*)&shmem[0][0] + warpId * SHMEM_STRIDE * N;
// Adjust the beta scaler, as it'll be multiplied by alpha at the end of
// each tile computation. Technically this is not generally correct (may result
// in a loss of precision). Zero still needs to be specially handled though.
beta /= alpha;
// Each CTA slides along the 64 x 64 tiles from the top left corner of the matrix to the
// right and down, and selects the next tile to compute. Once there's no such tile,
// all warps in this CTA exit.
for(unsigned int block_pos = blockIdx.x;; block_pos += gridDim.x) {
const unsigned int block_tile_i = ((block_pos * BLOCK_ROW_TILES) / N_TILES) * (BLOCK_COL_TILES);
const unsigned int block_tile_j = (block_pos * BLOCK_COL_TILES) % N_TILES;
// Stop when there are no more D matrix tiles to compute in this CTA.
if (block_tile_i >= M_TILES) {
break;
}
// This warp's pointer to the C matrix data to copy memory from to shared memory.
const size_t gmem_idx = (block_tile_i + warpId) * M * GLOBAL_MEM_STRIDE + block_tile_j * N;
const double *src_gmem_warp_stream_ptr = &C[gmem_idx];
// Stream multiple C tiles to shared memory.
#pragma unroll
for (int i = 0; i < N; i++) {
*((int4 *)(shmem_warp_stream_ptr + SHMEM_STRIDE * i) + laneId) =
*((int4 *)(src_gmem_warp_stream_ptr + GLOBAL_MEM_STRIDE * i) + laneId);
}
__syncthreads();
// These fragments will accumulate the result of A and B matrix fragment multiplications
// along the K_GLOBAL dimension.
wmma::fragment<wmma::accumulator, M, N, K, double> c[WARP_COL_TILES][WARP_ROW_TILES];
// Load the C matrix tiles into fragments from shared memory.
#pragma unroll
for (int i = 0; i < WARP_COL_TILES; i++) {
#pragma unroll
for (int j = 0; j < WARP_ROW_TILES; j++) {
const double *tile_ptr = shmem_warp_tile_ptr + i * SHMEM_STRIDE * N + j * N;
wmma::load_matrix_sync(c[i][j], tile_ptr, SHMEM_STRIDE, C_LAYOUT);
}
}
__syncthreads();
// Scale the C matrix.
#pragma unroll
for (int i = 0; i < WARP_COL_TILES; i++) {
#pragma unroll
for (int j = 0; j < WARP_ROW_TILES; j++) {
#pragma unroll
for (int t = 0; t < c[i][j].num_elements; t++) {
c[i][j].x[t] *= beta;
}
}
}
// Select what warp copies what matrix to shared memory.
// Warps 0-3 copy the A matrix, warps 4-7 copy the B matrix.
const double *warp_ptr = (warpId < (WARPS_PER_BLOCK/2)) ? (&A[block_tile_i * M * K_GLOBAL] + M * K_GLOBAL * (warpId % (WARPS_PER_BLOCK/2)) * 2) :
(&B[block_tile_j * N * K_GLOBAL] + N * K_GLOBAL * (warpId % (WARPS_PER_BLOCK/2)) * 2);
// Go through the global K dimension by a fixed step at a time.
#pragma unroll
for (int tile_k = 0; tile_k < K_TILES; tile_k += CHUNK_K) {
// Copy slices of the A and B matrices to shared memory.
// The first half of the warps in the CTA copy the A matrix, the rest copy the B matrix.
size_t shmem_idx = warpId < (WARPS_PER_BLOCK/2) ? (M * (warpId % (WARPS_PER_BLOCK/2)) * 2) :
(N * (warpId % (WARPS_PER_BLOCK/2)) * 2 + shmem_idx_b_off);
// First half of the warp copies the first row / column of the matrix,
// the second half of the warp copies the next.
const double *lane_ptr = warp_ptr + tile_k * K + (laneId / CHUNK_COPY_LINE_LANES) * K_GLOBAL;
// Shift the second half of the warp to the next row / column in the shared memory.
shmem_idx += laneId / CHUNK_COPY_LINE_LANES;
#pragma unroll
for(int i = 0; i < ((WARP_SIZE/2) / CHUNK_COPY_LINES_PER_WARP); i++) {
// Copy 16 bytes at once in each lane.
*((int4*)&shmem[shmem_idx][0] + (laneId % CHUNK_COPY_LINE_LANES)) = *((int4*)lane_ptr + (laneId % CHUNK_COPY_LINE_LANES));
// Advance the global memory pointer and the shared memory index.
lane_ptr = lane_ptr + K_GLOBAL * CHUNK_COPY_LINES_PER_WARP;
shmem_idx += CHUNK_COPY_LINES_PER_WARP;
}
__syncthreads();
// Compute a grid of C matrix tiles in each warp.
#pragma unroll
for (int k_step = 0; k_step < CHUNK_K; k_step++) {
wmma::fragment<wmma::matrix_a, M, N, K, double, wmma::row_major> a[WARP_COL_TILES];
wmma::fragment<wmma::matrix_b, M, N, K, double, wmma::col_major> b[WARP_ROW_TILES];
#pragma unroll
for (int i = 0; i < WARP_COL_TILES; i++) {
size_t shmem_idx_a = (warpId/2) * M * 2 + (i * M);
const double *tile_ptr = &shmem[shmem_idx_a][k_step * K];
wmma::load_matrix_sync(a[i], tile_ptr, K * CHUNK_K + SKEW_DOUBLE);
#pragma unroll
for (int j = 0; j < WARP_ROW_TILES; j++) {
if (i == 0) {
// Load the B matrix fragment once, because it is going to be reused
// against the other A matrix fragments.
size_t shmem_idx_b = shmem_idx_b_off + (WARP_ROW_TILES * N) * (warpId%2) + (j * N);
const double *tile_ptr = &shmem[shmem_idx_b][k_step * K];
wmma::load_matrix_sync(b[j], tile_ptr, K * CHUNK_K + SKEW_DOUBLE);
}
wmma::mma_sync(c[i][j], a[i], b[j], c[i][j]);
}
}
}
__syncthreads();
}
// Store the D fragments to shared memory.
#pragma unroll
for (int i = 0; i < WARP_COL_TILES; i++) {
#pragma unroll
for (int j = 0; j < WARP_ROW_TILES; j++) {
// Uniform, point-wise transformations of ALL fragment elements by ALL threads in the
// warp are well-defined even though element indices within fragment storage are not defined.
#pragma unroll
for (int t = 0; t < c[i][j].num_elements; t++)
c[i][j].x[t] *= alpha;
double *tile_ptr = shmem_warp_tile_ptr + i * SHMEM_STRIDE * N + j * N;
wmma::store_matrix_sync(tile_ptr, c[i][j], SHMEM_STRIDE, C_LAYOUT);
}
}
__syncthreads();
// Now that shared memory contains all the D tiles, stream them to global memory.
double *dst_gmem_warp_stream_ptr = &D[gmem_idx];
#pragma unroll
for (int i = 0; i < N; i++) {
*((int4*)(dst_gmem_warp_stream_ptr + GLOBAL_MEM_STRIDE * i) + laneId) =
*((int4*)(shmem_warp_stream_ptr + SHMEM_STRIDE * i) + laneId);
}
__syncthreads();
}
#endif
}
__global__ void compute_dgemm_async_copy(const double *A, const double *B, const double *C, double *D, double alpha, double beta)
{
#if __CUDA_ARCH__ >= 800
extern __shared__ double shmem[][CHUNK_K * K + SKEW_DOUBLE];
// Warp and lane identification.
const unsigned int warpId = threadIdx.x / WARP_SIZE;
const unsigned int laneId = threadIdx.x % WARP_SIZE;
// Offset in shared memory from which the B matrix is stored.
const size_t shmem_idx_b_off = BLOCK_COL_TILES * M;
// This pointer is used to access the C and D matrix tiles this warp computes.
double *shmem_warp_tile_ptr = (double*)&shmem[0][0] + (warpId/BLOCK_ROW_WARPS) * SHMEM_STRIDE * N * BLOCK_ROW_WARPS + (warpId % BLOCK_ROW_WARPS) * SHMEM_OFFSET;
// This pointer is used to stream the C and D matrices block-wide tile to and from shared memory.
double *shmem_warp_stream_ptr = (double*)&shmem[0][0] + warpId * SHMEM_STRIDE * N;
// Adjust the beta scaler, as it'll be multiplied by alpha at the end of
// each tile computation. Technically this is not generally correct (may result
// in a loss of precision). Zero still needs to be specially handled though.
beta /= alpha;
// Each CTA slides along the 64 x 64 tiles from the top left corner of the matrix to the
// right and down, and selects the next tile to compute. Once there's no such tile,
// all warps in this CTA exit.
#if USE_CPP_API
nvcuda_namespace::pipeline pipe;
#endif
for(unsigned int block_pos = blockIdx.x;; block_pos += gridDim.x) {
const unsigned int block_tile_i = ((block_pos * BLOCK_ROW_TILES) / N_TILES) * (BLOCK_COL_TILES);
const unsigned int block_tile_j = (block_pos * BLOCK_COL_TILES) % N_TILES;
// Stop when there are no more D matrix tiles to compute in this CTA.
if (block_tile_i >= M_TILES) {
break;
}
// This warp's pointer to the C matrix data to copy memory from to shared memory.
const size_t gmem_idx = (block_tile_i + warpId) * M * GLOBAL_MEM_STRIDE + block_tile_j * N;
const double *src_gmem_warp_stream_ptr = &C[gmem_idx];
// Stream multiple C tiles to shared memory.
#pragma unroll
for (int i = 0; i < N; i++) {
#if USE_CPP_API
nvcuda_namespace::memcpy_async(*((int4*)(shmem_warp_stream_ptr + SHMEM_STRIDE * i) + laneId),
*((int4*)(src_gmem_warp_stream_ptr + GLOBAL_MEM_STRIDE * i) + laneId),
pipe);
pipe.commit();
#else
__pipeline_memcpy_async((reinterpret_cast<int4*>(&shmem_warp_stream_ptr[(SHMEM_STRIDE * i)])) + laneId,
(reinterpret_cast<const int4*>(&src_gmem_warp_stream_ptr[(GLOBAL_MEM_STRIDE * i)])) + laneId,
sizeof(int4));
__pipeline_commit();
#endif
}
// Now wait for all the above issued 8 batches to complete.
#if USE_CPP_API
pipe.wait_prior<0>();
#else
__pipeline_wait_prior(0);
#endif
__syncthreads();
// These fragments will accumulate the result of A and B matrix fragment multiplications
// along the K_GLOBAL dimension.
wmma::fragment<wmma::accumulator, M, N, K, double> c[WARP_COL_TILES][WARP_ROW_TILES];
// Load the C matrix tiles into fragments from shared memory.
#pragma unroll
for (int i = 0; i < WARP_COL_TILES; i++) {
#pragma unroll
for (int j = 0; j < WARP_ROW_TILES; j++) {
const double *tile_ptr = shmem_warp_tile_ptr + i * SHMEM_STRIDE * N + j * N;
wmma::load_matrix_sync(c[i][j], tile_ptr, SHMEM_STRIDE, C_LAYOUT);
}
}
__syncthreads();
// Scale the C matrix.
#pragma unroll
for (int i = 0; i < WARP_COL_TILES; i++) {
#pragma unroll
for (int j = 0; j < WARP_ROW_TILES; j++) {
#pragma unroll
for (int t = 0; t < c[i][j].num_elements; t++) {
c[i][j].x[t] *= beta;
}
}
}
// Select what warp copies what matrix to shared memory.
// Warps 0-3 copy the A matrix, warps 4-7 copy the B matrix.
const double *warp_ptr = (warpId < (WARPS_PER_BLOCK/2)) ? (&A[block_tile_i * M * K_GLOBAL] + M * K_GLOBAL * (warpId % (WARPS_PER_BLOCK/2)) * 2) :
(&B[block_tile_j * N * K_GLOBAL] + N * K_GLOBAL * (warpId % (WARPS_PER_BLOCK/2)) * 2);
// Go through the global K dimension by a fixed step at a time.
#pragma unroll
for (int tile_k = 0; tile_k < K_TILES; tile_k += CHUNK_K) {
// Copy slices of the A and B matrices to shared memory.
// The first half of the warps in the CTA copy the A matrix, the rest copy the B matrix.
size_t shmem_idx = warpId < (WARPS_PER_BLOCK/2) ? (M * (warpId % (WARPS_PER_BLOCK/2)) * 2) :
(N * (warpId % (WARPS_PER_BLOCK/2)) * 2 + shmem_idx_b_off);
// First half of the warp copies the first row / column of the matrix,
// the second half of the warp copies the next.
const double *lane_ptr = warp_ptr + tile_k * K + (laneId / CHUNK_COPY_LINE_LANES) * K_GLOBAL;
// Shift the second half of the warp to the next row / column in the shared memory.
shmem_idx += laneId / CHUNK_COPY_LINE_LANES;
#pragma unroll
for(int i = 0; i < ((WARP_SIZE/2) / CHUNK_COPY_LINES_PER_WARP); i++) {
// Copy 16 bytes at once in each lane.
#if USE_CPP_API
nvcuda_namespace::memcpy_async(*((int4*)&shmem[shmem_idx][0] + (laneId % CHUNK_COPY_LINE_LANES)),
*((int4*)lane_ptr + (laneId % CHUNK_COPY_LINE_LANES)), pipe);
pipe.commit();
#else
__pipeline_memcpy_async((int4*)&shmem[shmem_idx][0] + (laneId % CHUNK_COPY_LINE_LANES),
(int4*)lane_ptr + (laneId % CHUNK_COPY_LINE_LANES), sizeof(int4));
__pipeline_commit();
#endif
// Advance the global memory pointer and the shared memory index.
lane_ptr = lane_ptr + K_GLOBAL * CHUNK_COPY_LINES_PER_WARP;
shmem_idx += CHUNK_COPY_LINES_PER_WARP;
}
#if USE_CPP_API
pipe.wait_prior<0>();
#else
__pipeline_wait_prior(0);
#endif
__syncthreads();
// Compute a grid of C matrix tiles in each warp.
#pragma unroll
for (int k_step = 0; k_step < CHUNK_K; k_step++) {
wmma::fragment<wmma::matrix_a, M, N, K, double, wmma::row_major> a[WARP_COL_TILES];
wmma::fragment<wmma::matrix_b, M, N, K, double, wmma::col_major> b[WARP_ROW_TILES];
#pragma unroll
for (int i = 0; i < WARP_COL_TILES; i++) {
size_t shmem_idx_a = (warpId/2) * M * 2 + (i * M);
const double *tile_ptr = &shmem[shmem_idx_a][k_step * K];
wmma::load_matrix_sync(a[i], tile_ptr, K * CHUNK_K + SKEW_DOUBLE);
#pragma unroll
for (int j = 0; j < WARP_ROW_TILES; j++) {
if (i == 0) {
// Load the B matrix fragment once, because it is going to be reused
// against the other A matrix fragments.
size_t shmem_idx_b = shmem_idx_b_off + (WARP_ROW_TILES * N) * (warpId%2) + (j * N);
const double *tile_ptr = &shmem[shmem_idx_b][k_step * K];
wmma::load_matrix_sync(b[j], tile_ptr, K * CHUNK_K + SKEW_DOUBLE);
}
wmma::mma_sync(c[i][j], a[i], b[j], c[i][j]);
}
}
}
__syncthreads();
}
// Store the D fragments to shared memory.
#pragma unroll
for (int i = 0; i < WARP_COL_TILES; i++) {
#pragma unroll
for (int j = 0; j < WARP_ROW_TILES; j++) {
// Uniform, point-wise transformations of ALL fragment elements by ALL threads in the
// warp are well-defined even though element indices within fragment storage are not defined.
#pragma unroll
for (int t = 0; t < c[i][j].num_elements; t++)
c[i][j].x[t] *= alpha;
double *tile_ptr = shmem_warp_tile_ptr + i * SHMEM_STRIDE * N + j * N;
wmma::store_matrix_sync(tile_ptr, c[i][j], SHMEM_STRIDE, C_LAYOUT);
}
}
__syncthreads();
// Now that shared memory contains all the D tiles, stream them to global memory.
double *dst_gmem_warp_stream_ptr = &D[gmem_idx];
#pragma unroll
for (int i = 0; i < N; i++) {
*((int4*)(dst_gmem_warp_stream_ptr + GLOBAL_MEM_STRIDE * i) + laneId) =
*((int4*)(shmem_warp_stream_ptr + SHMEM_STRIDE * i) + laneId);
}
__syncthreads();
}
#endif
}
__global__ void compute_dgemm_cg_async_copy(const double *A, const double *B, const double *C, double *D, double alpha, double beta)
{
#if __CUDA_ARCH__ >= 800
extern __shared__ double shmem[][CHUNK_K * K + SKEW_DOUBLE];
auto cta = cg::this_thread_block();
auto tile32 = cg::tiled_partition<32>(cta);
constexpr int tileChunkCopySize = WARP_SIZE / CHUNK_COPY_LINES_PER_WARP;
auto tileChunkCopy = cg::tiled_partition<tileChunkCopySize>(cta);
// Warp and lane identification.
const unsigned int warpId = threadIdx.x / WARP_SIZE;
const unsigned int laneId = threadIdx.x % WARP_SIZE;
// Offset in shared memory from which the B matrix is stored.
const size_t shmem_idx_b_off = BLOCK_COL_TILES * M;
// This pointer is used to access the C and D matrix tiles this warp computes.
double *shmem_warp_tile_ptr = (double*)&shmem[0][0] + (warpId/2) * SHMEM_STRIDE * N * 2 + (warpId%2) * SHMEM_OFFSET;
// This pointer is used to stream the C and D matrices block-wide tile to and from shared memory.
double *shmem_warp_stream_ptr = (double*)&shmem[0][0] + warpId * SHMEM_STRIDE * N;
// Adjust the beta scaler, as it'll be multiplied by alpha at the end of
// each tile computation. Technically this is not generally correct (may result
// in a loss of precision). Zero still needs to be specially handled though.
beta /= alpha;
// Each CTA slides along the 64 x 64 tiles from the top left corner of the matrix to the
// right and down, and selects the next tile to compute. Once there's no such tile,
// all warps in this CTA exit.
for(unsigned int block_pos = blockIdx.x;; block_pos += gridDim.x) {
const unsigned int block_tile_i = ((block_pos * BLOCK_ROW_TILES) / N_TILES) * (BLOCK_COL_TILES);
const unsigned int block_tile_j = (block_pos * BLOCK_COL_TILES) % N_TILES;
// Stop when there are no more D matrix tiles to compute in this CTA.
if (block_tile_i >= M_TILES) {
break;
}
// This warp's pointer to the C matrix data to copy memory from to shared memory.
const size_t gmem_idx = (block_tile_i + warpId) * M * GLOBAL_MEM_STRIDE + block_tile_j * N;
const double *src_gmem_warp_stream_ptr = &C[gmem_idx];
// Stream multiple C tiles to shared memory.
#pragma unroll
for (int i = 0; i < N; i++) {
auto dst_ptr = &shmem_warp_stream_ptr[(SHMEM_STRIDE * i)];
auto src_ptr = &src_gmem_warp_stream_ptr[(GLOBAL_MEM_STRIDE * i)];
cg::memcpy_async(tile32, dst_ptr, src_ptr, cuda::aligned_size_t<32>{tile32.size() * sizeof(int4)});
}
cg::wait(cta);
// These fragments will accumulate the result of A and B matrix fragment multiplications
// along the K_GLOBAL dimension.
wmma::fragment<wmma::accumulator, M, N, K, double> c[WARP_COL_TILES][WARP_ROW_TILES];
// Load the C matrix tiles into fragments from shared memory.
#pragma unroll
for (int i = 0; i < WARP_COL_TILES; i++) {
#pragma unroll
for (int j = 0; j < WARP_ROW_TILES; j++) {
const double *tile_ptr = shmem_warp_tile_ptr + i * SHMEM_STRIDE * N + j * N;
wmma::load_matrix_sync(c[i][j], tile_ptr, SHMEM_STRIDE, C_LAYOUT);
}
}
cg::wait(cta);
// Scale the C matrix.
#pragma unroll
for (int i = 0; i < WARP_COL_TILES; i++) {
#pragma unroll
for (int j = 0; j < WARP_ROW_TILES; j++) {
#pragma unroll
for (int t = 0; t < c[i][j].num_elements; t++) {
c[i][j].x[t] *= beta;
}
}
}
// Select what warp copies what matrix to shared memory.
// Warps 0-3 copy the A matrix, warps 4-7 copy the B matrix.
const double *warp_ptr = (warpId < 4) ? (&A[block_tile_i * M * K_GLOBAL] + M * K_GLOBAL * (warpId % (WARPS_PER_BLOCK/2)) * 2) :
(&B[block_tile_j * N * K_GLOBAL] + N * K_GLOBAL * (warpId % (WARPS_PER_BLOCK/2)) * 2);
// Go through the global K dimension by a fixed step at a time.
#pragma unroll
for (int tile_k = 0; tile_k < K_TILES; tile_k += CHUNK_K) {
// Copy slices of the A and B matrices to shared memory.
// The first half of the warps in the CTA copy the A matrix, the rest copy the B matrix.
size_t shmem_idx = warpId < (WARPS_PER_BLOCK/2) ? (M * (warpId % (WARPS_PER_BLOCK/2)) * 2) :
(N * (warpId % (WARPS_PER_BLOCK/2)) * 2 + shmem_idx_b_off);
// First half of the warp copies the first row / column of the matrix,
// the second half of the warp copies the next.
auto lane_ptr = warp_ptr + tile_k * K + (laneId / CHUNK_COPY_LINE_LANES) * K_GLOBAL;
// Shift the second half of the warp to the next row / column in the shared memory.
shmem_idx += laneId / CHUNK_COPY_LINE_LANES;
#pragma unroll
for(int i = 0; i < ((WARP_SIZE/2) / CHUNK_COPY_LINES_PER_WARP); i++) {
// Copy 16 bytes at once in each lane.
auto dst_ptr = &shmem[shmem_idx][0];
auto src_ptr = lane_ptr;
cg::memcpy_async(tileChunkCopy, dst_ptr, src_ptr, cuda::aligned_size_t<32>{tile32.size() * sizeof(int4)});
// Advance the global memory pointer and the shared memory index.
lane_ptr = lane_ptr + K_GLOBAL * CHUNK_COPY_LINES_PER_WARP;
shmem_idx += CHUNK_COPY_LINES_PER_WARP;
}
cg::wait(cta);
// Compute a grid of C matrix tiles in each warp.
#pragma unroll
for (int k_step = 0; k_step < CHUNK_K; k_step++) {
wmma::fragment<wmma::matrix_a, M, N, K, double, wmma::row_major> a[WARP_COL_TILES];
wmma::fragment<wmma::matrix_b, M, N, K, double, wmma::col_major> b[WARP_ROW_TILES];
#pragma unroll
for (int i = 0; i < WARP_COL_TILES; i++) {
size_t shmem_idx_a = (warpId/2) * M * 2 + (i * M);
const double *tile_ptr = &shmem[shmem_idx_a][k_step * K];
wmma::load_matrix_sync(a[i], tile_ptr, K * CHUNK_K + SKEW_DOUBLE);
#pragma unroll
for (int j = 0; j < WARP_ROW_TILES; j++) {
if (i == 0) {
// Load the B matrix fragment once, because it is going to be reused
// against the other A matrix fragments.
size_t shmem_idx_b = shmem_idx_b_off + (WARP_ROW_TILES * N) * (warpId%2) + (j * N);
const double *tile_ptr = &shmem[shmem_idx_b][k_step * K];
wmma::load_matrix_sync(b[j], tile_ptr, K * CHUNK_K + SKEW_DOUBLE);
}
wmma::mma_sync(c[i][j], a[i], b[j], c[i][j]);
}
}
}
cg::wait(cta);
}
// Store the D fragments to shared memory.
#pragma unroll
for (int i = 0; i < WARP_COL_TILES; i++) {
#pragma unroll
for (int j = 0; j < WARP_ROW_TILES; j++) {
// Uniform, point-wise transformations of ALL fragment elements by ALL threads in the
// warp are well-defined even though element indices within fragment storage are not defined.
#pragma unroll
for (int t = 0; t < c[i][j].num_elements; t++)
c[i][j].x[t] *= alpha;
double *tile_ptr = shmem_warp_tile_ptr + i * SHMEM_STRIDE * N + j * N;
wmma::store_matrix_sync(tile_ptr, c[i][j], SHMEM_STRIDE, C_LAYOUT);
}
}
cg::wait(cta);
// Now that shared memory contains all the D tiles, stream them to global memory.
double *dst_gmem_warp_stream_ptr = &D[gmem_idx];
#pragma unroll
for (int i = 0; i < N; i++) {
*((int4*)(dst_gmem_warp_stream_ptr + GLOBAL_MEM_STRIDE * i) + laneId) =
*((int4*)(shmem_warp_stream_ptr + SHMEM_STRIDE * i) + laneId);
}
cg::wait(cta);
}
#endif
}
// Performs an MxNxK DGEMM (C=alpha*A*B + beta*C) assuming:
// 1) Matrices are packed in memory.
// 2) M, N and K are multiples of 8, 8 and 4 respectively.
// 3) A is row major, B is column major matrix.
// Note: This is a less performant version of the compute_dgemm kernel. It is designed for
// demonstration purposes only to show the CUDA WMMA API use without relying on
// availability of the shared memory.
__global__ void simple_wmma_gemm(double *a, double *b, double *c, double *d, int m_ld, int n_ld, int k_ld, double alpha, double beta)
{
#if __CUDA_ARCH__ >= 800
// Leading dimensions. Packed with no transpositions.
int lda = k_ld;
int ldb = k_ld;
int ldc = n_ld;
// Tile using a 2D grid
int warpM = (blockIdx.x * blockDim.x + threadIdx.x) / warpSize;
int warpN = (blockIdx.y * blockDim.y + threadIdx.y);
// Declare the fragments
wmma::fragment<wmma::matrix_a, M, N, K, double, wmma::row_major> a_frag;
wmma::fragment<wmma::matrix_b, M, N, K, double, wmma::col_major> b_frag;
wmma::fragment<wmma::accumulator, M, N, K, double> acc_frag;
wmma::fragment<wmma::accumulator, M, N, K, double> c_frag;
wmma::fill_fragment(acc_frag, 0.0f);
// Loop over k
for (int i = 0; i < k_ld; i += K) {
int aCol = i;
int aRow = warpM * M;
int bCol = warpN * N;
int bRow = i;
// Bounds checking
if (aRow < m_ld && aCol < k_ld && bRow < k_ld && bCol < n_ld) {
// Load the inputs
wmma::load_matrix_sync(a_frag, a + aCol + aRow * lda, lda);
wmma::load_matrix_sync(b_frag, b + bRow + bCol * ldb, ldb);
// Perform the matrix multiplication
wmma::mma_sync(acc_frag, a_frag, b_frag, acc_frag);
}
}
// Load in the current value of c, scale it by beta, and add this our result scaled by alpha
int cCol = warpN * N;
int cRow = warpM * M;
if (cRow < m_ld && cCol < n_ld) {
wmma::load_matrix_sync(c_frag, c + cCol + cRow * ldc, ldc, wmma::mem_row_major);
for(int i=0; i < c_frag.num_elements; i++) {
c_frag.x[i] = alpha * acc_frag.x[i] + beta * c_frag.x[i];
}
// Store the output
wmma::store_matrix_sync(d + cCol + cRow * ldc, c_frag, ldc, wmma::mem_row_major);
}
#endif
}
__host__ void matMultiplyOnHost(double *A, double *B, double *C,
float alpha, float beta,
int numARows, int numAColumns,
int numBRows, int numBColumns,
int numCRows, int numCColumns)
{
for (int i = 0; i < numCRows; i++) {
for (int j = 0; j < numCColumns; j++) {
double temp = 0.0;
for (int k = 0; k < numAColumns; k++) {
// B matrix is column major. A matrix is row major.
temp += A[i * numAColumns + k] * B[j * numBRows + k];
}
C[i*numCColumns + j] = temp * alpha + beta * C[i * numCColumns + j];
}
}
}
int main(int argc, char **argv)
{
printf("Initializing...\n");
int dev = findCudaDevice(argc, (const char **)argv);
cudaDeviceProp deviceProp;
checkCudaErrors(cudaGetDeviceProperties(&deviceProp, dev));
// Double precision Tensor cores require a GPU of Ampere (SM8X) architecture or higher.
if (deviceProp.major < 8) {
printf("dmmaTensorCoreGemm requires SM 8.0 or higher. Exiting...\n");
exit(EXIT_WAIVED);
}
printf("M: %d (%d x %d)\n", M_GLOBAL, M, M_TILES);
printf("N: %d (%d x %d)\n", N_GLOBAL, N, N_TILES);
printf("K: %d (%d x %d)\n", K_GLOBAL, K, K_TILES);
double *A_h = NULL;
double *B_h = NULL;
double *C_h = NULL;
#if CPU_DEBUG
double *result_hD = NULL;
double *result_host = NULL;
#endif
A_h = (double*) malloc(sizeof(double) * M_GLOBAL * K_GLOBAL);
B_h = (double*) malloc(sizeof(double) * K_GLOBAL * N_GLOBAL);
C_h = (double*) malloc(sizeof(double) * M_GLOBAL * N_GLOBAL);
#if CPU_DEBUG
result_hD = (double*) malloc(sizeof(double) * M_GLOBAL * N_GLOBAL);
result_host = (double*) malloc(sizeof(double) * M_GLOBAL * N_GLOBAL);
#endif
double *A = NULL;
double *B = NULL;
double *C = NULL;
double *D = NULL;
checkCudaErrors(cudaMalloc((void**)&A, sizeof(double) * M_GLOBAL * K_GLOBAL));
checkCudaErrors(cudaMalloc((void**)&B, sizeof(double) * N_GLOBAL * K_GLOBAL));
checkCudaErrors(cudaMalloc((void**)&C, sizeof(double) * M_GLOBAL * N_GLOBAL));
checkCudaErrors(cudaMalloc((void**)&D, sizeof(double) * M_GLOBAL * N_GLOBAL));
assert(((unsigned long long)A) % 128 == 0);
assert(((unsigned long long)B) % 128 == 0);
assert(((unsigned long long)C) % 128 == 0);
assert(((unsigned long long)D) % 128 == 0);
init_host_matrices(A_h, B_h, C_h);
printf("Preparing data for GPU...\n");
checkCudaErrors(cudaMemcpy(A, A_h, sizeof(double) * M_GLOBAL * K_GLOBAL, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(B, B_h, sizeof(double) * N_GLOBAL * K_GLOBAL, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(C, C_h, sizeof(double) * M_GLOBAL * N_GLOBAL, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemset(D, 0, sizeof(double) * M_GLOBAL * N_GLOBAL));
enum {
// Compute the right amount of shared memory to request.
// We need shared memory to hold per-CTA C and D matrix tiles, and to cache per-CTA chunks
// of the A and B matrices. Therefore, the right amount to request is the maximum of those
// two numbers.
SHMEM_SZ = MAX(sizeof(double) * (BLOCK_COL_TILES * M) * (CHUNK_K * K + SKEW_DOUBLE) * 2,
M * (BLOCK_ROW_WARPS * WARP_ROW_TILES) * N * (BLOCK_COL_WARPS * WARP_COL_TILES) * sizeof(double))
};
printf("Required shared memory size: %lu Kb\n", SHMEM_SZ / 1024UL);
const double alpha = 1.1f;
const double beta = 1.2f;
cudaEvent_t start, stop;
checkCudaErrors(cudaEventCreate(&start));
checkCudaErrors(cudaEventCreate(&stop));
checkCudaErrors(cudaEventRecord(start));
kernels selected_kernel = dmma_shmem_gemm_async_copy;
// kernel to run - default (dmma_shmem_gemm_async_copy == 0)
if (checkCmdLineFlag(argc, (const char **)argv, "kernel")) {
int kernel_number = getCmdLineArgumentInt(argc, (const char **)argv, "kernel");
if (kernel_number < 4)
{
selected_kernel = (kernels)kernel_number;
}
else
{
printf("Error: kernel number should be between 0 to 3, you have entered %d\n", kernel_number);
exit(EXIT_FAILURE);
}
}
// If enough shared memory available on the GPU use high performant kernel
if ((deviceProp.sharedMemPerMultiprocessor >= SHMEM_SZ) && (selected_kernel != simple_dmma_gemm))
{
printf("Computing using high performance kernel = %d - %s\n", selected_kernel, kernelNames[selected_kernel]);
switch (selected_kernel)
{
case dmma_shmem_gemm_async_copy :
default:
checkCudaErrors(cudaFuncSetAttribute(compute_dgemm_async_copy, cudaFuncAttributeMaxDynamicSharedMemorySize, SHMEM_SZ));
checkKernelErrors((compute_dgemm_async_copy<<<deviceProp.multiProcessorCount*2, THREADS_PER_BLOCK, SHMEM_SZ>>>(A, B, C, D, alpha, beta)));
break;
case dmma_shmem_gemm_cg_async_copy :
checkCudaErrors(cudaFuncSetAttribute(compute_dgemm_cg_async_copy, cudaFuncAttributeMaxDynamicSharedMemorySize, SHMEM_SZ));
checkKernelErrors((compute_dgemm_cg_async_copy<<<deviceProp.multiProcessorCount*2, THREADS_PER_BLOCK, SHMEM_SZ>>>(A, B, C, D, alpha, beta)));
break;
case dmma_shmem_gemm :
checkCudaErrors(cudaFuncSetAttribute(compute_dgemm, cudaFuncAttributeMaxDynamicSharedMemorySize, SHMEM_SZ));
checkKernelErrors((compute_dgemm<<<deviceProp.multiProcessorCount*2, THREADS_PER_BLOCK, SHMEM_SZ>>>(A, B, C, D, alpha, beta)));
break;
}
#if CPU_DEBUG
checkCudaErrors(cudaMemcpy(result_hD, D, sizeof(double)*M_GLOBAL*N_GLOBAL, cudaMemcpyDeviceToHost));
#endif
}
else
{
dim3 gridDim;
dim3 blockDim;
// blockDim.x must be a multple of warpSize
// 128x4 means we have 16 warps and a block computes a 64x64 output tile
blockDim.x = 128;
blockDim.y = 4;
gridDim.x = (M_GLOBAL + (M * blockDim.x / 32 - 1)) / (M * blockDim.x / 32);
gridDim.y = (N_GLOBAL + N * blockDim.y - 1) / (N * blockDim.y);
printf("Computing... using simple_wmma_gemm kernel\n");
simple_wmma_gemm<<<gridDim, blockDim>>>(A, B, C, D, M_GLOBAL, N_GLOBAL, K_GLOBAL, alpha, beta);
#if CPU_DEBUG
checkCudaErrors(cudaMemcpy(result_hD, D, sizeof(double) * M_GLOBAL * N_GLOBAL, cudaMemcpyDeviceToHost));
#endif
}
checkCudaErrors(cudaEventRecord(stop));
checkCudaErrors(cudaEventSynchronize(stop));
#if CPU_DEBUG
printf("Verifying correctness of the computations...\n");
memcpy(result_host, C_h, sizeof(double) * M_GLOBAL * N_GLOBAL);
matMultiplyOnHost(A_h, B_h, result_host,
alpha, beta,
M_GLOBAL, K_GLOBAL,
K_GLOBAL, N_GLOBAL,
M_GLOBAL, N_GLOBAL);
size_t number_of_matches = 0;
for (int i = 0; i < N_GLOBAL*M_GLOBAL; i++) {
if (fabs(result_hD[i] - result_host[i]) > 0.1f)
{
printf("mismatch i=%d result_hD=%f result_host=%f\n", i, result_hD[i], result_host[i]);
break;
}
else
{
number_of_matches++;
}
}
printf("number_of_matches = %zu out of = %d \n", number_of_matches, N_GLOBAL*M_GLOBAL);
free(result_hD);
free(result_host);
#endif
float milliseconds = 0;
checkCudaErrors(cudaEventElapsedTime(&milliseconds, start, stop));
printf("Time: %f ms\n", milliseconds);
printf("FP64 TFLOPS: %.2f\n", (((double)M_GLOBAL * N_GLOBAL * K_GLOBAL * 2)/(milliseconds/1000.)) / 1e12);
free(A_h);
free(B_h);
free(C_h);
checkCudaErrors(cudaFree((void*)A));
checkCudaErrors(cudaFree((void*)B));
checkCudaErrors(cudaFree((void*)C));
checkCudaErrors(cudaFree((void*)D));
return 0;
}
|
03fc80a4294e8c8e8d0a56111da08e5ae7639b01.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <fstream>
#include <string>
#include <unordered_map>
#include <unordered_set>
#include <stdlib.h>
#include <vector>
#include <random>
using namespace std;
void printNeighbours(unordered_map<long, unordered_set<long>> neighbours) {
for (auto& n : neighbours) {
cout << n.first << ": ";
for (auto& s : n.second) {
cout << s << " ";
}
cout << endl;
}
}
void printWalks(long** walks, int numNodes, int walkPerNode, int walkLength) {
for (int i = 0; i < numNodes * walkPerNode; i++) {
for (int j = 0; j < walkLength; j++) {
cout << walks[i][j] << " ";
}
cout << endl;
}
}
long findNextNode(unordered_map<long, unordered_set<long>> neighbours, long curNode, long prevNode, double p, double q) {
default_random_engine generator;
vector<long> curNeighbours;
curNeighbours.insert(curNeighbours.end(), neighbours[curNode].begin(), neighbours[curNode].end());
if (prevNode == -1) {
return curNeighbours[rand() % curNeighbours.size()];
} else {
unordered_set<long> prevNeighbours = neighbours[prevNode];
vector<double> weights(curNeighbours.size());
for (int i = 0; i < weights.size(); i++) {
long nextNode = curNeighbours[i];
if (nextNode == prevNode) {
weights[i] = 1.0 / p;
} else if (prevNeighbours.find(nextNode) != prevNeighbours.end()) {
weights[i] = 1.0;
} else {
weights[i] = 1.0 / q;
}
}
discrete_distribution<int> dist(weights.begin(), weights.end());
return curNeighbours[dist(generator)];
}
}
long** generateWalks(unordered_map<long, unordered_set<long>> neighbours, int walkPerNode, int walkLength, double p, double q) {
long** walks = new long*[walkPerNode * neighbours.size()];
int counter = 0;
for (auto& neighbour : neighbours) {
long node = neighbour.first;
for (int i = 0; i < walkPerNode; i++) {
walks[counter + i] = new long[walkLength];
walks[counter + i][0] = node;
int curNode = node;
int prevNode = -1;
for (int j = 1; j < walkLength; j++) {
int nextNode = findNextNode(neighbours, curNode, prevNode, p, q);
walks[counter + i][j] = nextNode;
prevNode = curNode;
curNode = nextNode;
}
}
counter += walkPerNode;
}
return walks;
}
int main(int argc, char* argv[]) {
string fileName;
int walkPerNode = 1;
int walkLength = 10;
double p = 1;
double q = 2;
if (argc == 2) {
fileName = argv[1];
} else {
cerr << "Invalid argument, must provide path to edge list" << endl;
return 1;
}
unordered_map<long, unordered_set<long>> neighbours = {};
ifstream in(fileName);
long s, e;
while (in >> s >> e) {
if (s == e) {
cerr << "Loop edge is not supported" << endl;
return 1;
} else if (s <= 0 || e <= 0) {
cerr << "node must be greater than or equal to 0" << endl;
return 1;
}
if (neighbours.find(s) == neighbours.end()) {
unordered_set<long> emptySet = {};
neighbours[s] = emptySet;
}
if (neighbours.find(e) == neighbours.end()) {
unordered_set<long> emptySet = {};
neighbours[e] = emptySet;
}
neighbours[s].insert(e);
neighbours[e].insert(s);
}
long** cuNeighbours;
hipMallocManaged(&cuNeighbours, neighbours.size() * sizeof(long*));
long** walks = generateWalks(neighbours, walkPerNode, walkLength, p, q);
printWalks(walks, neighbours.size(), walkPerNode, walkLength);
// clean up
for (int i = 0; i < walkPerNode * neighbours.size(); i++) {
delete[] walks[i];
}
delete[] walks;
}
| 03fc80a4294e8c8e8d0a56111da08e5ae7639b01.cu | #include <iostream>
#include <fstream>
#include <string>
#include <unordered_map>
#include <unordered_set>
#include <stdlib.h>
#include <vector>
#include <random>
using namespace std;
void printNeighbours(unordered_map<long, unordered_set<long>> neighbours) {
for (auto& n : neighbours) {
cout << n.first << ": ";
for (auto& s : n.second) {
cout << s << " ";
}
cout << endl;
}
}
void printWalks(long** walks, int numNodes, int walkPerNode, int walkLength) {
for (int i = 0; i < numNodes * walkPerNode; i++) {
for (int j = 0; j < walkLength; j++) {
cout << walks[i][j] << " ";
}
cout << endl;
}
}
long findNextNode(unordered_map<long, unordered_set<long>> neighbours, long curNode, long prevNode, double p, double q) {
default_random_engine generator;
vector<long> curNeighbours;
curNeighbours.insert(curNeighbours.end(), neighbours[curNode].begin(), neighbours[curNode].end());
if (prevNode == -1) {
return curNeighbours[rand() % curNeighbours.size()];
} else {
unordered_set<long> prevNeighbours = neighbours[prevNode];
vector<double> weights(curNeighbours.size());
for (int i = 0; i < weights.size(); i++) {
long nextNode = curNeighbours[i];
if (nextNode == prevNode) {
weights[i] = 1.0 / p;
} else if (prevNeighbours.find(nextNode) != prevNeighbours.end()) {
weights[i] = 1.0;
} else {
weights[i] = 1.0 / q;
}
}
discrete_distribution<int> dist(weights.begin(), weights.end());
return curNeighbours[dist(generator)];
}
}
long** generateWalks(unordered_map<long, unordered_set<long>> neighbours, int walkPerNode, int walkLength, double p, double q) {
long** walks = new long*[walkPerNode * neighbours.size()];
int counter = 0;
for (auto& neighbour : neighbours) {
long node = neighbour.first;
for (int i = 0; i < walkPerNode; i++) {
walks[counter + i] = new long[walkLength];
walks[counter + i][0] = node;
int curNode = node;
int prevNode = -1;
for (int j = 1; j < walkLength; j++) {
int nextNode = findNextNode(neighbours, curNode, prevNode, p, q);
walks[counter + i][j] = nextNode;
prevNode = curNode;
curNode = nextNode;
}
}
counter += walkPerNode;
}
return walks;
}
int main(int argc, char* argv[]) {
string fileName;
int walkPerNode = 1;
int walkLength = 10;
double p = 1;
double q = 2;
if (argc == 2) {
fileName = argv[1];
} else {
cerr << "Invalid argument, must provide path to edge list" << endl;
return 1;
}
unordered_map<long, unordered_set<long>> neighbours = {};
ifstream in(fileName);
long s, e;
while (in >> s >> e) {
if (s == e) {
cerr << "Loop edge is not supported" << endl;
return 1;
} else if (s <= 0 || e <= 0) {
cerr << "node must be greater than or equal to 0" << endl;
return 1;
}
if (neighbours.find(s) == neighbours.end()) {
unordered_set<long> emptySet = {};
neighbours[s] = emptySet;
}
if (neighbours.find(e) == neighbours.end()) {
unordered_set<long> emptySet = {};
neighbours[e] = emptySet;
}
neighbours[s].insert(e);
neighbours[e].insert(s);
}
long** cuNeighbours;
cudaMallocManaged(&cuNeighbours, neighbours.size() * sizeof(long*));
long** walks = generateWalks(neighbours, walkPerNode, walkLength, p, q);
printWalks(walks, neighbours.size(), walkPerNode, walkLength);
// clean up
for (int i = 0; i < walkPerNode * neighbours.size(); i++) {
delete[] walks[i];
}
delete[] walks;
}
|
f75f79549c29e5f601398f96337f8aa1f86aa245.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#if GOOGLE_CUDA
#include "ew_op_gpu.h"
#include <stdio.h>
template <typename TI, typename T>
__global__ void __launch_bounds__(1024) embedding_lookup(T* Y, const TI* __restrict__ I, const T* __restrict__ W, int C, uint K, int nIdx, uint sizeY)
{
#pragma unroll 1
for (uint idxY = blockIdx.x*1024 + threadIdx.x; idxY < sizeY; idxY += gridDim.x*1024)
{
uint idx = idxY / K;
uint k = idxY % K;
if (idx < nIdx)
{
int emb = __ldg(add_ptr_u(I, idx));
float w = load(add_ptr_u(W, emb*K + k), 0, emb >= 0 && emb < C);
store(add_ptr_u(Y, idxY), w);
}
}
}
template <typename TI, typename TG>
__global__ void __launch_bounds__(1024) embedding_lookup_grad(float* DW, const TI* __restrict__ I, const TG* __restrict__ DY, int C, uint K, int nIdx, uint sizeY)
{
#pragma unroll 1
for (uint idxY = blockIdx.x*1024 + threadIdx.x; idxY < sizeY; idxY += gridDim.x*1024)
{
uint idx = idxY / K;
uint k = idxY % K;
if (idx < nIdx)
{
int emb = __ldg(add_ptr_u(I, idx));
if (emb >= 0 && emb < C)
{
float dy = load(add_ptr_u(DY, idxY));
atomicRed(add_ptr_u(DW, emb*K + k), dy);
}
}
}
}
__device__ __forceinline__ uint bfe(uint val, int pos)
{
uint bit;
asm ("bfe.u32 %0, %1, %2, 1;" : "=r"(bit) : "r"(val), "r"(pos) );
return bit;
}
typedef struct __align__(8) EmbMap
{
int iIdx;
int iEmb;
} EmbMap;
template <typename TI, typename TG, int UNROLL>
__global__ void sorted_embedding_lookup_grad(float* DW, const TI* __restrict__ I, const TG* __restrict__ DY, int nIdx, int C, int K, int Exp)
{
extern __shared__ EmbMap emb_map[];
int tid = threadIdx.x;
int bid = blockIdx.x;
EmbMap init;
init.iIdx = bid*blockDim.x + tid;
if (init.iIdx < nIdx)
{
init.iEmb = __ldg(add_ptr_u(I, init.iIdx));
if (init.iEmb < 0 || init.iEmb >= C)
init.iEmb = -1;
}
else
init.iEmb = -1;
emb_map[tid] = init;
__syncthreads();
// Bittonic sort the embedding indicies to allow reduced atomic add contention.
for (int i = 1; i <= Exp; ++i)
{
int j;
#pragma unroll 1
for (j = i - 1; j >= 5; --j)
{
// when the comparison stride is 32 or greater,
// use half of warps and uniform shared memory access to make comparisons
if (tid < blockDim.x/2)
{
// figure out the a and b indexes for the "butterfly" compare operation
uint m = (tid >> j) << (j + 1);
uint r = tid & ((1 << j) - 1);
uint a = m + r;
uint b = a + (1 << j);
bool d = bfe(a, i) != 0;
EmbMap A = emb_map[a];
EmbMap B = emb_map[b];
if((B.iEmb > A.iEmb) ^ d)
{
EmbMap t = A;
A = B;
B = t;
}
emb_map[a] = A;
emb_map[b] = B;
}
__syncthreads();
}
// When the comparison stride is less than 32,
// use all warps and shfl_xor operations to make comparisons in registers
// Load shared to registers
EmbMap A = emb_map[tid];
#pragma unroll 5
while (j >= 0)
{
EmbMap B;
B.iEmb = shfl_xor(A.iEmb, 1 << j);
B.iIdx = shfl_xor(A.iIdx, 1 << j);
bool d = bfe(tid, i) != bfe(tid, j--);
// in the case of equality we want both shuffle lanes to not swap
if(((B.iEmb > A.iEmb) ^ d) && B.iEmb != A.iEmb)
A = B;
}
// Load final register values back to shared.
emb_map[tid] = A;
__syncthreads();
}
int k = blockIdx.y*256 + (tid & 31);
#pragma unroll 1
for(int t = 0; t < 256 && k < K; t += UNROLL*32, k += UNROLL*32)
{
int iMap = tid & -32;
int iPrev = emb_map[iMap].iEmb;
float dw[UNROLL] = {0};
#pragma unroll 1
for (int iTile = 0; iTile < 32 && iPrev != -1; ++iTile, ++iMap)
{
EmbMap curr = emb_map[iMap];
// atomicRed gradient if we hit a new emb index
if (curr.iEmb != iPrev)
{
float* DW_ = add_ptr_u(DW, iPrev*K + k);
for (int i = 0; i < UNROLL; ++i)
atomicRed(DW_, dw[i], i*32, k + i*32 < K);
for (int i = 0; i < UNROLL; ++i)
dw[i] = 0.0f;
}
// grab and accumulate this gradient if valid
if (curr.iEmb != -1)
{
const TG* DY_ = add_ptr_u(DY, curr.iIdx*K + k);
for (int i = 0; i < UNROLL; ++i)
dw[i] += load(DY_, i*32, k + i*32 < K);
}
iPrev = curr.iEmb;
}
// Final atomicRed in case tile size was full 32
if (iPrev != -1)
{
float* DW_ = add_ptr_u(DW, iPrev*K + k);
for (int i = 0; i < UNROLL; ++i)
atomicRed(DW_, dw[i], i*32, k + i*32 < K);
}
}
}
template <typename TI, typename T>
bool EmbeddingLookup(hipStream_t stream, int SMs, T* y, const TI* idx, const T* w, int nIdx, int C, int K)
{
uint sizeY = nIdx*K;
uint grid = sizeY > SMs*1024 ? SMs*2 : SMs;
hipLaunchKernelGGL(( embedding_lookup<TI,T>), dim3(grid),dim3(1024),0,stream, y, idx, w, C, K, nIdx, sizeY);
return true;
}
template bool EmbeddingLookup<int,float>(hipStream_t stream, int SMs, float* y, const int* idx, const float* w, int nIdx, int C, int K);
template bool EmbeddingLookup<int,ehalf>(hipStream_t stream, int SMs, ehalf* y, const int* idx, const ehalf* w, int nIdx, int C, int K);
template bool EmbeddingLookup<int,bhalf>(hipStream_t stream, int SMs, bhalf* y, const int* idx, const bhalf* w, int nIdx, int C, int K);
template bool EmbeddingLookup<ushort,float>(hipStream_t stream, int SMs, float* y, const ushort* idx, const float* w, int nIdx, int C, int K);
template bool EmbeddingLookup<ushort,ehalf>(hipStream_t stream, int SMs, ehalf* y, const ushort* idx, const ehalf* w, int nIdx, int C, int K);
template bool EmbeddingLookup<ushort,bhalf>(hipStream_t stream, int SMs, bhalf* y, const ushort* idx, const bhalf* w, int nIdx, int C, int K);
template bool EmbeddingLookup<unsigned char,float>(hipStream_t stream, int SMs, float* y, const unsigned char* idx, const float* w, int nIdx, int C, int K);
template bool EmbeddingLookup<unsigned char,ehalf>(hipStream_t stream, int SMs, ehalf* y, const unsigned char* idx, const ehalf* w, int nIdx, int C, int K);
template bool EmbeddingLookup<unsigned char,bhalf>(hipStream_t stream, int SMs, bhalf* y, const unsigned char* idx, const bhalf* w, int nIdx, int C, int K);
template <typename TI, typename TG>
bool EmbeddingLookupGrad(hipStream_t stream, int SMs, float* dw, const TI* idx, const TG* dy, int nIdx, int C, int K, bool sorted)
{
hipMemsetAsync((hipDeviceptr_t)dw, 0, C*K, stream);
if (sorted)
{
int exp;
if (nIdx > (SMs << 11)) exp = 10;
else if (nIdx > (SMs << 10)) exp = 9;
else if (nIdx > (SMs << 9)) exp = 8;
else if (nIdx > (SMs << 8)) exp = 7;
else exp = 6;
int threads = 1 << exp;
int shared = threads * 8;
int gridI = (nIdx >> exp) + ((nIdx & (threads-1)) != 0);
int gridK = CEIL_DIV(K, 256);
dim3 grid(gridI, gridK);
if (K > 64)
hipLaunchKernelGGL(( sorted_embedding_lookup_grad<TI,TG,4>), dim3(grid),dim3(threads),shared,stream, dw, idx, dy, nIdx, C, K, exp);
else if (K > 32)
hipLaunchKernelGGL(( sorted_embedding_lookup_grad<TI,TG,2>), dim3(grid),dim3(threads),shared,stream, dw, idx, dy, nIdx, C, K, exp);
else
hipLaunchKernelGGL(( sorted_embedding_lookup_grad<TI,TG,1>), dim3(grid),dim3(threads),shared,stream, dw, idx, dy, nIdx, C, K, exp);
}
else
{
uint sizeY = nIdx*K;
uint grid = sizeY > SMs*1024 ? SMs*2 : SMs;
hipLaunchKernelGGL(( embedding_lookup_grad<TI,TG>), dim3(grid),dim3(1024),0,stream, dw, idx, dy, C, K, nIdx, sizeY);
}
return true;
}
template bool EmbeddingLookupGrad<int,float>(hipStream_t stream, int SMs, float* dw, const int* idx, const float* dy, int nIdx, int C, int K, bool sorted);
template bool EmbeddingLookupGrad<int,ehalf>(hipStream_t stream, int SMs, float* dw, const int* idx, const ehalf* dy, int nIdx, int C, int K, bool sorted);
template bool EmbeddingLookupGrad<int,bhalf>(hipStream_t stream, int SMs, float* dw, const int* idx, const bhalf* dy, int nIdx, int C, int K, bool sorted);
template bool EmbeddingLookupGrad<ushort,float>(hipStream_t stream, int SMs, float* dw, const ushort* idx, const float* dy, int nIdx, int C, int K, bool sorted);
template bool EmbeddingLookupGrad<ushort,ehalf>(hipStream_t stream, int SMs, float* dw, const ushort* idx, const ehalf* dy, int nIdx, int C, int K, bool sorted);
template bool EmbeddingLookupGrad<ushort,bhalf>(hipStream_t stream, int SMs, float* dw, const ushort* idx, const bhalf* dy, int nIdx, int C, int K, bool sorted);
template bool EmbeddingLookupGrad<unsigned char,float>(hipStream_t stream, int SMs, float* dw, const unsigned char* idx, const float* dy, int nIdx, int C, int K, bool sorted);
template bool EmbeddingLookupGrad<unsigned char,ehalf>(hipStream_t stream, int SMs, float* dw, const unsigned char* idx, const ehalf* dy, int nIdx, int C, int K, bool sorted);
template bool EmbeddingLookupGrad<unsigned char,bhalf>(hipStream_t stream, int SMs, float* dw, const unsigned char* idx, const bhalf* dy, int nIdx, int C, int K, bool sorted);
#endif
| f75f79549c29e5f601398f96337f8aa1f86aa245.cu | #if GOOGLE_CUDA
#include "ew_op_gpu.h"
#include <stdio.h>
template <typename TI, typename T>
__global__ void __launch_bounds__(1024) embedding_lookup(T* Y, const TI* __restrict__ I, const T* __restrict__ W, int C, uint K, int nIdx, uint sizeY)
{
#pragma unroll 1
for (uint idxY = blockIdx.x*1024 + threadIdx.x; idxY < sizeY; idxY += gridDim.x*1024)
{
uint idx = idxY / K;
uint k = idxY % K;
if (idx < nIdx)
{
int emb = __ldg(add_ptr_u(I, idx));
float w = load(add_ptr_u(W, emb*K + k), 0, emb >= 0 && emb < C);
store(add_ptr_u(Y, idxY), w);
}
}
}
template <typename TI, typename TG>
__global__ void __launch_bounds__(1024) embedding_lookup_grad(float* DW, const TI* __restrict__ I, const TG* __restrict__ DY, int C, uint K, int nIdx, uint sizeY)
{
#pragma unroll 1
for (uint idxY = blockIdx.x*1024 + threadIdx.x; idxY < sizeY; idxY += gridDim.x*1024)
{
uint idx = idxY / K;
uint k = idxY % K;
if (idx < nIdx)
{
int emb = __ldg(add_ptr_u(I, idx));
if (emb >= 0 && emb < C)
{
float dy = load(add_ptr_u(DY, idxY));
atomicRed(add_ptr_u(DW, emb*K + k), dy);
}
}
}
}
__device__ __forceinline__ uint bfe(uint val, int pos)
{
uint bit;
asm ("bfe.u32 %0, %1, %2, 1;" : "=r"(bit) : "r"(val), "r"(pos) );
return bit;
}
typedef struct __align__(8) EmbMap
{
int iIdx;
int iEmb;
} EmbMap;
template <typename TI, typename TG, int UNROLL>
__global__ void sorted_embedding_lookup_grad(float* DW, const TI* __restrict__ I, const TG* __restrict__ DY, int nIdx, int C, int K, int Exp)
{
extern __shared__ EmbMap emb_map[];
int tid = threadIdx.x;
int bid = blockIdx.x;
EmbMap init;
init.iIdx = bid*blockDim.x + tid;
if (init.iIdx < nIdx)
{
init.iEmb = __ldg(add_ptr_u(I, init.iIdx));
if (init.iEmb < 0 || init.iEmb >= C)
init.iEmb = -1;
}
else
init.iEmb = -1;
emb_map[tid] = init;
__syncthreads();
// Bittonic sort the embedding indicies to allow reduced atomic add contention.
for (int i = 1; i <= Exp; ++i)
{
int j;
#pragma unroll 1
for (j = i - 1; j >= 5; --j)
{
// when the comparison stride is 32 or greater,
// use half of warps and uniform shared memory access to make comparisons
if (tid < blockDim.x/2)
{
// figure out the a and b indexes for the "butterfly" compare operation
uint m = (tid >> j) << (j + 1);
uint r = tid & ((1 << j) - 1);
uint a = m + r;
uint b = a + (1 << j);
bool d = bfe(a, i) != 0;
EmbMap A = emb_map[a];
EmbMap B = emb_map[b];
if((B.iEmb > A.iEmb) ^ d)
{
EmbMap t = A;
A = B;
B = t;
}
emb_map[a] = A;
emb_map[b] = B;
}
__syncthreads();
}
// When the comparison stride is less than 32,
// use all warps and shfl_xor operations to make comparisons in registers
// Load shared to registers
EmbMap A = emb_map[tid];
#pragma unroll 5
while (j >= 0)
{
EmbMap B;
B.iEmb = shfl_xor(A.iEmb, 1 << j);
B.iIdx = shfl_xor(A.iIdx, 1 << j);
bool d = bfe(tid, i) != bfe(tid, j--);
// in the case of equality we want both shuffle lanes to not swap
if(((B.iEmb > A.iEmb) ^ d) && B.iEmb != A.iEmb)
A = B;
}
// Load final register values back to shared.
emb_map[tid] = A;
__syncthreads();
}
int k = blockIdx.y*256 + (tid & 31);
#pragma unroll 1
for(int t = 0; t < 256 && k < K; t += UNROLL*32, k += UNROLL*32)
{
int iMap = tid & -32;
int iPrev = emb_map[iMap].iEmb;
float dw[UNROLL] = {0};
#pragma unroll 1
for (int iTile = 0; iTile < 32 && iPrev != -1; ++iTile, ++iMap)
{
EmbMap curr = emb_map[iMap];
// atomicRed gradient if we hit a new emb index
if (curr.iEmb != iPrev)
{
float* DW_ = add_ptr_u(DW, iPrev*K + k);
for (int i = 0; i < UNROLL; ++i)
atomicRed(DW_, dw[i], i*32, k + i*32 < K);
for (int i = 0; i < UNROLL; ++i)
dw[i] = 0.0f;
}
// grab and accumulate this gradient if valid
if (curr.iEmb != -1)
{
const TG* DY_ = add_ptr_u(DY, curr.iIdx*K + k);
for (int i = 0; i < UNROLL; ++i)
dw[i] += load(DY_, i*32, k + i*32 < K);
}
iPrev = curr.iEmb;
}
// Final atomicRed in case tile size was full 32
if (iPrev != -1)
{
float* DW_ = add_ptr_u(DW, iPrev*K + k);
for (int i = 0; i < UNROLL; ++i)
atomicRed(DW_, dw[i], i*32, k + i*32 < K);
}
}
}
template <typename TI, typename T>
bool EmbeddingLookup(CUstream stream, int SMs, T* y, const TI* idx, const T* w, int nIdx, int C, int K)
{
uint sizeY = nIdx*K;
uint grid = sizeY > SMs*1024 ? SMs*2 : SMs;
embedding_lookup<TI,T><<<grid,1024,0,stream>>>(y, idx, w, C, K, nIdx, sizeY);
return true;
}
template bool EmbeddingLookup<int,float>(CUstream stream, int SMs, float* y, const int* idx, const float* w, int nIdx, int C, int K);
template bool EmbeddingLookup<int,ehalf>(CUstream stream, int SMs, ehalf* y, const int* idx, const ehalf* w, int nIdx, int C, int K);
template bool EmbeddingLookup<int,bhalf>(CUstream stream, int SMs, bhalf* y, const int* idx, const bhalf* w, int nIdx, int C, int K);
template bool EmbeddingLookup<ushort,float>(CUstream stream, int SMs, float* y, const ushort* idx, const float* w, int nIdx, int C, int K);
template bool EmbeddingLookup<ushort,ehalf>(CUstream stream, int SMs, ehalf* y, const ushort* idx, const ehalf* w, int nIdx, int C, int K);
template bool EmbeddingLookup<ushort,bhalf>(CUstream stream, int SMs, bhalf* y, const ushort* idx, const bhalf* w, int nIdx, int C, int K);
template bool EmbeddingLookup<unsigned char,float>(CUstream stream, int SMs, float* y, const unsigned char* idx, const float* w, int nIdx, int C, int K);
template bool EmbeddingLookup<unsigned char,ehalf>(CUstream stream, int SMs, ehalf* y, const unsigned char* idx, const ehalf* w, int nIdx, int C, int K);
template bool EmbeddingLookup<unsigned char,bhalf>(CUstream stream, int SMs, bhalf* y, const unsigned char* idx, const bhalf* w, int nIdx, int C, int K);
template <typename TI, typename TG>
bool EmbeddingLookupGrad(CUstream stream, int SMs, float* dw, const TI* idx, const TG* dy, int nIdx, int C, int K, bool sorted)
{
cuMemsetD32Async((CUdeviceptr)dw, 0, C*K, stream);
if (sorted)
{
int exp;
if (nIdx > (SMs << 11)) exp = 10;
else if (nIdx > (SMs << 10)) exp = 9;
else if (nIdx > (SMs << 9)) exp = 8;
else if (nIdx > (SMs << 8)) exp = 7;
else exp = 6;
int threads = 1 << exp;
int shared = threads * 8;
int gridI = (nIdx >> exp) + ((nIdx & (threads-1)) != 0);
int gridK = CEIL_DIV(K, 256);
dim3 grid(gridI, gridK);
if (K > 64)
sorted_embedding_lookup_grad<TI,TG,4><<<grid,threads,shared,stream>>>(dw, idx, dy, nIdx, C, K, exp);
else if (K > 32)
sorted_embedding_lookup_grad<TI,TG,2><<<grid,threads,shared,stream>>>(dw, idx, dy, nIdx, C, K, exp);
else
sorted_embedding_lookup_grad<TI,TG,1><<<grid,threads,shared,stream>>>(dw, idx, dy, nIdx, C, K, exp);
}
else
{
uint sizeY = nIdx*K;
uint grid = sizeY > SMs*1024 ? SMs*2 : SMs;
embedding_lookup_grad<TI,TG><<<grid,1024,0,stream>>>(dw, idx, dy, C, K, nIdx, sizeY);
}
return true;
}
template bool EmbeddingLookupGrad<int,float>(CUstream stream, int SMs, float* dw, const int* idx, const float* dy, int nIdx, int C, int K, bool sorted);
template bool EmbeddingLookupGrad<int,ehalf>(CUstream stream, int SMs, float* dw, const int* idx, const ehalf* dy, int nIdx, int C, int K, bool sorted);
template bool EmbeddingLookupGrad<int,bhalf>(CUstream stream, int SMs, float* dw, const int* idx, const bhalf* dy, int nIdx, int C, int K, bool sorted);
template bool EmbeddingLookupGrad<ushort,float>(CUstream stream, int SMs, float* dw, const ushort* idx, const float* dy, int nIdx, int C, int K, bool sorted);
template bool EmbeddingLookupGrad<ushort,ehalf>(CUstream stream, int SMs, float* dw, const ushort* idx, const ehalf* dy, int nIdx, int C, int K, bool sorted);
template bool EmbeddingLookupGrad<ushort,bhalf>(CUstream stream, int SMs, float* dw, const ushort* idx, const bhalf* dy, int nIdx, int C, int K, bool sorted);
template bool EmbeddingLookupGrad<unsigned char,float>(CUstream stream, int SMs, float* dw, const unsigned char* idx, const float* dy, int nIdx, int C, int K, bool sorted);
template bool EmbeddingLookupGrad<unsigned char,ehalf>(CUstream stream, int SMs, float* dw, const unsigned char* idx, const ehalf* dy, int nIdx, int C, int K, bool sorted);
template bool EmbeddingLookupGrad<unsigned char,bhalf>(CUstream stream, int SMs, float* dw, const unsigned char* idx, const bhalf* dy, int nIdx, int C, int K, bool sorted);
#endif
|
db27cc07721ac2022d954b52205b020713d957b6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifndef _CUDA_KERNEL_H_
#define _CUDA_KERNEL_H_
#include "../common/globals.h"
__device__ double* g_data[2];
__device__ int maximum_index;
__device__ int data_size[2];
__device__ double* g_weights[2];
__device__ double lambda;
__device__ int max_p_index;
__device__ double max_p;
__device__ int max_q_index;
__device__ double max_q;
__device__ double dot_xi_yi; // <x_i, y_i >
__device__ double dot_xi_xi; // <x_i, x_i >
__device__ double dot_yi_yi; // <y_i, y_i >
__device__ double* distance;
__device__ double* rho;
__device__ double* dot_xi_x;
__device__ double* dot_yi_x;
__device__ double* dot_xi_y;
__device__ double* dot_yi_y;
__device__ double* dot_same[2];
__device__ double* get_element(int id, int set);
__device__ struct svm_parameter param;
__device__ double dot(double* px, int xstride, double *py, int ystride)
{
double sum = 0.0;
int i;
for(i=0; i< maximum_index; i++)
{
sum += px[i*xstride] * py[i*ystride];
}
return sum;
}
inline double powi(double base, int times)
{
double tmp = base, ret = 1.0;
int t;
for(t=times; t>0; t/=2)
{
if(t%2==1) ret*=tmp;
tmp = tmp * tmp;
}
return ret;
}
__device__ double kernel_linear(int set1, int element1, int set2, int element2) //todo: als template implementieren
{
double* px = &(g_data[set1][ element1 ]);
double* py = &(g_data[set2][ element2 ]);
double ret = dot(px, data_size[set1], py, data_size[set2] );
if(set1 == set2 && element1 == element2)
ret += 1.0/param.C;
return ret;
}
__device__ double power(double base, int exponent) { //todo: effizienter berechnen? (squaring, bitshifts)
int i;
double res = base;
for(i=0;i<exponent;i++)
{
res *= base;
}
return res;
}
__device__ double kernel_poly(int set1, int element1, int set2, int element2)
{
double* px = &(g_data[set1][ element1 ]);
double* py = &(g_data[set2][ element2 ]);
double ret = power(param.gamma*dot(px, data_size[set1], py, data_size[set2] )+param.coef0,param.degree);
if(set1 == set2 && element1 == element2)
ret += 1.0/param.C;
return ret;
}
__device__ double kernel_rbf(int set1, int element1, int set2, int element2)
{
double* px = &(g_data[set1][ element1 ]);
double* py = &(g_data[set2][ element2 ]);
double sumxx = 0.0;
double sumxy = 0.0;
double sumyy = 0.0;
int xstride = data_size[set1];
int ystride = data_size[set2];
int i;
for(i=0; i< maximum_index; i++)
{
sumxy += px[i*xstride] * py[i*ystride];
sumxx += px[i*xstride] * px[i*xstride];
sumyy += py[i*ystride] * py[i*ystride];
}
double dots = (sumxx +
sumyy)-2*
sumxy; //todo: dot(x,x) vorberechnen??
double wgamma = -param.gamma*dots;
double wexp = exp(wgamma);
if(set1 == set2 && element1 == element2)
wexp += 1.0/param.C;
return wexp;
}
__device__ double kernel_sigmoid(int set1, int element1, int set2, int element2)
{
double* px = &(g_data[set1][ element1 ]);
double* py = &(g_data[set2][ element2 ]);
double ret = tanh(param.gamma*dot(px, data_size[set1], py, data_size[set2])+param.coef0);
if(set1 == set2 && element1 == element2)
ret += 1.0/param.C;
return ret;
}
__device__ double kernel(int set1, int element1, int set2, int element2)
{
switch(param.kernel_type)
{
case POLY:
return kernel_poly(set1, element1, set2, element2);
case RBF:
return kernel_rbf(set1, element1, set2, element2);
case SIGMOID:
return kernel_sigmoid(set1, element1, set2, element2);
// case PRECOMPUTED:
// return kernel_precomputed(set1, element1, set2, element2);
case LINEAR:
default:
return kernel_linear(set1, element1, set2, element2);
}
}
__device__ int find_max(int p, double *dot_yi_x, double* dot_xi_x, double dot_xi_yi, double dot_xi_xi, double *max_p)
{
// find max
int max_p_index = -1;
*max_p = -1000000000.0; //todo: HUGE_VAL fuer Cuda finden
int i;
for (i=0; i<data_size[p]; i++)
{
double sum = dot_yi_x[i] - dot_xi_x[i] - dot_xi_yi + dot_xi_xi;
if(sum > *max_p)
{
*max_p = sum;
max_p_index = i;
}
}
return max_p_index;
}
template <unsigned int blockSize>
//todo: set und first als template parameter?
__global__ void reduce6(int *g_data_index, double *g_data_value, unsigned int set, int first, int data_size1)
{
__shared__ int sdata_index[blockSize];
__shared__ double sdata_value[blockSize];
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*(blockSize*2) + tid;
if(first == 1)
{
double value1;
if(set == 0)
value1 = dot_yi_x[i] - dot_xi_x[i] - dot_xi_yi + dot_xi_xi;
else
value1 = dot_xi_y[i] - dot_yi_y[i] - dot_xi_yi + dot_yi_yi;
if(i < data_size[set])
{
if( i + blockSize < data_size[set])
{
double value2;
if(set == 0)
value2 = dot_yi_x[i+blockSize] - dot_xi_x[i+blockSize] - dot_xi_yi + dot_xi_xi;
else
value2 = dot_xi_y[i+blockSize] - dot_yi_y[i+blockSize] - dot_xi_yi + dot_yi_yi;
if(value1 > value2)
{
sdata_value[tid] = value1;
sdata_index[tid] = i;
}
else
{
sdata_value[tid] = value2;
sdata_index[tid] = i+blockSize;
}
} else
{
sdata_value[tid] = value1;
sdata_index[tid] = i;
}
} else {
sdata_value[tid] = -100000000000.0; //todo: max_val suchen
sdata_index[tid] = -1;
}
} else
{
if( i + blockSize < data_size1)
{
double value1 = g_data_value[i];
double value2 = g_data_value[i+blockSize];
if(value1>value2)
{
sdata_value[tid] = value1;
sdata_index[tid] = g_data_index[i];
}
else
{
sdata_value[tid] = value2;
sdata_index[tid] = g_data_index[i+blockSize];
}
} else
{
sdata_value[tid] = g_data_value[i];
sdata_index[tid] = g_data_index[i];
}
}
__syncthreads();
if (blockSize >= 512)
{
if (tid < 256)
{
if(sdata_value[tid] < sdata_value[tid + 256])
{
sdata_value[tid] = sdata_value[tid + 256];
sdata_index[tid] = sdata_index[tid + 256];
}
}
__syncthreads();
}
if (blockSize >= 256)
{
if (tid < 128)
{
if(sdata_value[tid] < sdata_value[tid + 128])
{
sdata_value[tid] = sdata_value[tid + 128];
sdata_index[tid] = sdata_index[tid + 128];
}
}
__syncthreads();
}
if (blockSize >= 128)
{
if (tid < 64)
{
if(sdata_value[tid] < sdata_value[tid + 64])
{
sdata_value[tid] = sdata_value[tid + 64];
sdata_index[tid] = sdata_index[tid + 64];
}
}
__syncthreads();
}
#ifdef __DEVICE_EMULATION__
if (blockSize >= 64)
{
if (tid < 32)
{
if(sdata_value[tid] < sdata_value[tid + 32])
{
sdata_value[tid] = sdata_value[tid + 32];
sdata_index[tid] = sdata_index[tid + 32];
}
}
__syncthreads();
}
if (blockSize >= 32)
{
if (tid < 16)
{
if(sdata_value[tid] < sdata_value[tid + 16])
{
sdata_value[tid] = sdata_value[tid + 16];
sdata_index[tid] = sdata_index[tid + 16];
}
}
__syncthreads();
}
if (blockSize >= 16)
{
if (tid < 8)
{
if(sdata_value[tid] < sdata_value[tid + 8])
{
sdata_value[tid] = sdata_value[tid + 8];
sdata_index[tid] = sdata_index[tid + 8];
}
}
__syncthreads();
}
if (blockSize >= 8)
{
if (tid < 4)
{
if(sdata_value[tid] < sdata_value[tid + 4])
{
sdata_value[tid] = sdata_value[tid + 4];
sdata_index[tid] = sdata_index[tid + 4];
}
}
__syncthreads();
}
if (blockSize >= 4)
{
if (tid < 2)
{
if(sdata_value[tid] < sdata_value[tid + 2])
{
sdata_value[tid] = sdata_value[tid + 2];
sdata_index[tid] = sdata_index[tid + 2];
}
}
__syncthreads();
}
if (blockSize >= 2)
{
if (tid < 1)
{
if(sdata_value[tid] < sdata_value[tid + 1])
{
sdata_value[tid] = sdata_value[tid + 1];
sdata_index[tid] = sdata_index[tid + 1];
}
}
__syncthreads();
}
#else
if (tid < 32)
{
if (blockSize >= 64)
{
if(sdata_value[tid] < sdata_value[tid + 32])
{
sdata_value[tid] = sdata_value[tid + 32];
sdata_index[tid] = sdata_index[tid + 32];
}
}
if (blockSize >= 32)
{
if(sdata_value[tid] < sdata_value[tid + 16])
{
sdata_value[tid] = sdata_value[tid + 16];
sdata_index[tid] = sdata_index[tid + 16];
}
}
if (blockSize >= 16)
{
if(sdata_value[tid] < sdata_value[tid + 8])
{
sdata_value[tid] = sdata_value[tid + 8];
sdata_index[tid] = sdata_index[tid + 8];
}
}
if (blockSize >= 8)
{
if(sdata_value[tid] < sdata_value[tid + 4])
{
sdata_value[tid] = sdata_value[tid + 4];
sdata_index[tid] = sdata_index[tid + 4];
}
}
if (blockSize >= 4)
{
if(sdata_value[tid] < sdata_value[tid + 2])
{
sdata_value[tid] = sdata_value[tid + 2];
sdata_index[tid] = sdata_index[tid + 2];
}
}
if (blockSize >= 2)
{
if(sdata_value[tid] < sdata_value[tid + 1])
{
sdata_value[tid] = sdata_value[tid + 1];
sdata_index[tid] = sdata_index[tid + 1];
}
}
}
#endif
if (tid == 0)
{
g_data_index[blockIdx.x] = sdata_index[0];
g_data_value[blockIdx.x] = sdata_value[0];
if(blockIdx.x==0)
{
if(set == 0)
{
max_p = sdata_value[0];
distance[1] = max_p;
max_p_index = sdata_index[0];
}
else
{
max_q = sdata_value[0];
distance[2] = max_q;
max_q_index = sdata_index[0];
}
}
}
}
__device__ double compute_zaehler(double dot_xi_yi, double* dot_yi_x, double* dot_xi_x, int p, int max_p_index )
{
//todo: samevector, kann vorberechnet werden.
double zaehler = dot_xi_yi - dot_yi_x[max_p_index] - dot_xi_x[max_p_index] + dot_same[p][max_p_index];//kernel(p,max_p_index, p, max_p_index);
return zaehler;
}
__device__ double compute_nenner(double dot_xi_xi, double* dot_xi_x, int p, int max_p_index)
{
double nenner = dot_xi_xi - 2* dot_xi_x[max_p_index] + dot_same[p][max_p_index];//kernel(p, max_p_index, p, max_p_index);
return nenner;
}
__device__ void add_to_weights(double* weights, double lambda, int index, int set)
{
int i;
for (i=0; i<data_size[set]; i++)
{
if (i!=index)
weights[i] *= lambda;
else
weights[i] = weights[i]*lambda + (1.0 - lambda)*1;
}
}
__device__ double update_xi_xi(double dot_xi_xi, double* dot_xi_x, int p, int max_p_index, double lambda)
{
dot_xi_xi = lambda * lambda * dot_xi_xi
+ 2 * lambda * (1.0 - lambda) * dot_xi_x[max_p_index]
//todo: skalarprodukt von vector mit sich selbst zwischenspeichern
+ (1.0 - lambda)*(1.0 - lambda)*dot_same[p][max_p_index];//kernel(p, max_p_index, p ,max_p_index );
return dot_xi_xi;
}
__device__ double update_xi_yi(double dot_xi_yi, double* dot_yi_x, int max_p_index, double lambda)
{
dot_xi_yi = lambda * dot_xi_yi + (1.0 - lambda) * dot_yi_x[max_p_index];
return dot_xi_yi;
}
__device__ void update_xi_x(double* dot_xi_x, int p, int p2, int max_p_index, double lambda, double* computed_kernels , int tid)
{
if( (tid < data_size[0] && p2 == 0) || ( tid >= data_size[0] && p2 == 1 ) )
{
int offset = p2 * data_size[0];
//(p, max_p_index, p2, i);
dot_xi_x[tid - offset]= dot_xi_x[tid - offset] * lambda + (1.0 - lambda) * computed_kernels[ tid ];
}
}
// cache anfang
__device__ int nr_of_cache_entries;
__device__ int nr_of_elements;
__device__ double* data;
__device__ int* look_up_table; // translates data id to cache position
// translates cache positions to id
__device__ int* reverse_look_up_table;
__device__ int* circular_array; // safes order in which cache pos was inserted
__device__ int ca_first;
__device__ int ca_last;
__device__ int ca_free_pos; // safes which pos has no yet been occupied
__device__ bool ca_cachemiss;
__global__ void cuda_cache_init(int g_nr_of_cache_entries, int g_nr_of_elements,
int *g_look_up_table, int* g_reverse_look_up_table, int* g_circular_array, double* g_data_cache)
{
// cache initialisieren
look_up_table = g_look_up_table;
reverse_look_up_table = g_reverse_look_up_table;
circular_array = g_circular_array;
data = g_data_cache;
nr_of_cache_entries = g_nr_of_cache_entries;
nr_of_elements = g_nr_of_elements;
// init pointer
ca_first = 0;
ca_last = nr_of_cache_entries - 1;
ca_free_pos = 0;
for(int i=0; i<data_size[0]+data_size[1]; i++)
{
look_up_table[i] = -1;
}
}
__device__ void ca_add(int id)
{
// clean up look up table
int last_id = reverse_look_up_table[ circular_array[ca_last] ];
if(circular_array[ca_last] != -1) //test, ob schon alle stellen im cache belegt sind
{
//pos = look_up_table[ last_id ];
look_up_table[ last_id ] = -1;
}
else
{
circular_array[ca_last] = ca_free_pos;
ca_free_pos++;
}
//circular_array[ca_last] = pos;
ca_first = ca_last;
ca_last = ca_last - 1;
if(ca_last<0) ca_last = nr_of_cache_entries - 1;
reverse_look_up_table[circular_array[ca_first]] = id;
look_up_table[id] = circular_array[ca_first];
}
__device__ void ca_bring_forward(int pos)
{
// printf("bring_fordward. enter. pos = %d\n", pos);
int current = ca_first;
int pos_temp = circular_array[current];
int pos_temp2 = -1;
// int i;
// printf("circular array: ");
// for(i=0; i< nr_of_cache_entries; i++)
// printf(" %d: %d - ", i, circular_array[i]);
// printf("\n");
// printf("lut: ");
// for(i=0; i< nr_of_elements; i++)
// printf(" %d: %d - ", i, look_up_table[i]);
// printf("\n");
// printf("first = %d last = %d \n", ca_first, ca_last);
do
{
// printf("bring_fordward. cycle. \n");
pos_temp2 = pos_temp;
current = current + 1;
if(current>=nr_of_cache_entries) current = 0;
pos_temp = circular_array[current];
// printf("current = %d last = %d pt = %d, pt2 = %d\n", current, last, pos_temp, pos_temp2);
circular_array[current] = pos_temp2;
// printf("circular array 2: ");
// for(i=0; i< nr_of_cache_entries; i++)
// printf(" %d: %d - ", i, circular_array[i]);
// printf("\n");
} while( pos_temp != pos);
circular_array[ca_first] = pos;
// printf("circular array 3: ");
// for(i=0; i< nr_of_cache_entries; i++)
// printf(" %d: %d - ", i, circular_array[i]);
// printf("\n");
}
__global__ void cuda_cache_update()
{
int idset;
if(max_p > max_q)
{
idset = max_p_index;
} else
{
idset = max_q_index + data_size[0];
}
if( look_up_table[idset] == -1 )
{
ca_add(idset);
ca_cachemiss = true;
//get_data(id, set, circular_array[ca_first]);
//printf("cache miss, id = %d, set = %d\n", id, set);
} //cache hit
else
{
//printf("cache hit\n");
if(look_up_table[idset] != circular_array[ca_first])
{
ca_bring_forward(look_up_table[idset]);
}
ca_cachemiss = false;
}
}
__global__ void cuda_kernel_computekernels_cache()
{
int tid = threadIdx.x + blockDim.x*blockIdx.x;
// falls etwas mehr threads als noetig gestartet wurden
if(tid < data_size[0] + data_size[1])
{
int t_set;
int t_element;
if(tid < data_size[0])
t_set = 0;
else
t_set = 1;
t_element = tid - (t_set) * data_size[0];
if (max_p >= max_q)
{
if( ca_cachemiss == true )
data[circular_array[ca_first] * nr_of_elements + tid] = kernel(0, max_p_index, t_set, t_element);
//todo: cache einbauen
double* computed_kernels = &data[circular_array[ca_first] * nr_of_elements];
update_xi_x(dot_xi_x, 0, 0, max_p_index, lambda, computed_kernels, tid);
update_xi_x(dot_xi_y, 0, 1, max_p_index, lambda, computed_kernels, tid);
}
else
{
if( ca_cachemiss == true )
data[circular_array[ca_first] * nr_of_elements + tid] = kernel(1, max_q_index, t_set, t_element);
double* computed_kernels = &data[circular_array[ca_first] * nr_of_elements];
update_xi_x(dot_yi_y, 1, 1, max_q_index, lambda, computed_kernels, tid);
update_xi_x(dot_yi_x, 1, 0, max_q_index, lambda, computed_kernels, tid);
}
}
}
// cache ende
__global__ void cuda_kernel_init_pointer(double* g_data0, double* g_data1 , int g_maximum_index, int g_data0_size, int g_data1_size, double* g_weights0, double* g_weights1 ,
double *g_dot_xi_x, double *g_dot_yi_x, double *g_dot_xi_y, double *g_dot_yi_y,
double* g_dot_same0, double* g_dot_same1, double* g_distance, double* g_rho, struct svm_parameter g_param)
{
dot_xi_x = g_dot_xi_x;
dot_yi_x = g_dot_yi_x;
dot_xi_y = g_dot_xi_y;
dot_yi_y = g_dot_yi_y;
dot_same[0] = g_dot_same0;
dot_same[1] = g_dot_same1;
//todo: gleich die richtigen arrays senden
g_data[0] = g_data0;
g_data[1] = g_data1;
maximum_index = g_maximum_index;
data_size[0] = g_data0_size;
data_size[1] = g_data1_size;
g_weights[0] = g_weights0;
g_weights[1] = g_weights1;
distance = g_distance;
rho = g_rho;
param = g_param;
}
__global__ void cuda_kernel_init_kernel()
{
int tid = threadIdx.x + blockDim.x*blockIdx.x;
// falls etwas mehr threads als noetig gestartet wurden
if(tid < data_size[0] + data_size[1])
{
int t_set;
int t_element;
if(tid < data_size[0])
t_set = 0;
else
t_set = 1;
t_element = tid - (t_set) * data_size[0];
g_weights[t_set][t_element] = 0.0;
// initialisieren
if(t_set == 0)
{
dot_xi_x[t_element]=kernel(0, 0, t_set, t_element);
dot_yi_x[t_element]=kernel(1, 0, t_set, t_element);
} else
{
dot_xi_y[t_element]=kernel(0, 0, t_set, t_element);
dot_yi_y[t_element]=kernel(1, 0, t_set, t_element);
}
dot_same[t_set][t_element]=kernel(t_set, t_element, t_set, t_element);
}
}
__global__ void cuda_kernel_init_findmax()
{
g_weights[0][0] = 1.0;
g_weights[1][0] = 1.0;
dot_xi_xi = kernel(0, 0, 0, 0);
dot_xi_yi = kernel(0, 0, 1, 0);
dot_yi_yi = kernel(1, 0, 1, 0);
// find max
//max_p_index = find_max(0, dot_yi_x, dot_xi_x, dot_xi_yi, dot_xi_xi, &max_p);
//max_q_index = find_max(1, dot_xi_y, dot_yi_y, dot_xi_yi, dot_yi_yi, &max_q);
}
__global__ void cuda_kernel_updateWeights()
{
int tid = threadIdx.x + blockDim.x*blockIdx.x;
if (max_p >= max_q)
{
if(tid < data_size[0])
{
g_weights[0][tid] *= lambda;
}
if(tid == max_p_index)
{
g_weights[0][max_p_index] += 1.0 - lambda;
}
} else {
if(tid < data_size[1])
{
g_weights[1][tid] *= lambda;
}
if(tid == max_q_index)
{
g_weights[1][max_q_index] += 1.0 - lambda;
}
//if(tid == 1691)
// g_weights[1][1691] = 1234.56;
}
}
__global__ void cuda_kernel_lambda()
{
if (max_p >= max_q)
{
double zaehler = compute_zaehler(dot_xi_yi, dot_yi_x, dot_xi_x, 0, max_p_index);
double nenner = compute_nenner(dot_xi_xi, dot_xi_x, 0, max_p_index);
lambda = zaehler / nenner;
if(zaehler == 0.0 && nenner == 0.0) lambda = 0.0;
if(lambda < 0.0) lambda = 0.0;
if(lambda > 1.0) lambda = 0.0;
//add_to_weights(g_weights[0], lambda, max_p_index, 0);
// update dotproducts
dot_xi_xi = update_xi_xi(dot_xi_xi, dot_xi_x, 0, max_p_index, lambda);
dot_xi_yi = update_xi_yi(dot_xi_yi, dot_yi_x, max_p_index, lambda);
}
else
{
double zaehler = compute_zaehler(dot_xi_yi, dot_xi_y, dot_yi_y, 1, max_q_index);
double nenner = compute_nenner(dot_yi_yi, dot_yi_y, 1, max_q_index);
lambda = zaehler / nenner;
if(zaehler == 0.0 && nenner == 0.0) lambda = 0.0;
if(lambda < 0.0) lambda = 0.0;
if(lambda > 1.0) lambda = 0.0;
//g_temp[0] = lambda;
//add_to_weights(g_weights[1], lambda, max_q_index, 1);
// update dotproducts
dot_yi_yi = update_xi_xi(dot_yi_yi, dot_yi_y, 1, max_q_index, lambda);
dot_xi_yi = update_xi_yi(dot_xi_yi, dot_xi_y, max_q_index, lambda);
}
distance[0] = dot_xi_xi + dot_yi_yi - 2 * dot_xi_yi;
*rho = dot_xi_yi - dot_xi_xi - (dot_xi_xi + dot_yi_yi - 2 * dot_xi_yi)/2;
}
__global__ void cuda_kernel_distance()
{
// find max
max_p_index = find_max(0, dot_yi_x, dot_xi_x, dot_xi_yi, dot_xi_xi, &max_p);
max_q_index = find_max(1, dot_xi_y, dot_yi_y, dot_xi_yi, dot_yi_yi, &max_q);
//duality gap
// absolute duality gap
//todo: rueckgabewert an host zurueckgeben
//double adg = max_p + max_q;
//printf("max_p = %f max_q = %f ", max_p, max_q);
//printf("adg = %f ", adg);
// relative duality gap
// adg / ||p-q||^2 - adg
// adg / <p-q, p-q> - adg
//double distance = dot_xi_xi + dot_yi_yi - 2 * dot_xi_yi;
//double rdg_nenner = distance - adg;
//double rdg;
//if (rdg_nenner <= 0)
//{
//printf("set huge value... ");
// rdg = 100000000000.0; // todo: HUGE_VAL;
//}
//else
//{
// rdg = adg / rdg_nenner;
//}
//printf("<x-y,x-y> = %e " , distance);
//printf("adg = %e " , adg);
//printf("rdg = %e \n", rdg);
//print_weights(x_weights, prob[0]);
//print_weights(y_weights, prob[1]);
//rho = - dot_xi_yi + dot_xi_xi - (dot_xi_xi + dot_yi_yi - 2 * dot_xi_yi)/2;
//double rho = dot_xi_yi - dot_xi_xi - (dot_xi_xi + dot_yi_yi - 2 * dot_xi_yi)/2;
//printf("xi_xi = %f yi_yi = %f xi_yi = %f \n", dot_xi_xi, dot_yi_yi, dot_xi_yi);
}
#endif // #ifndef _CUDA_KERNEL_H_
| db27cc07721ac2022d954b52205b020713d957b6.cu | #ifndef _CUDA_KERNEL_H_
#define _CUDA_KERNEL_H_
#include "../common/globals.h"
__device__ double* g_data[2];
__device__ int maximum_index;
__device__ int data_size[2];
__device__ double* g_weights[2];
__device__ double lambda;
__device__ int max_p_index;
__device__ double max_p;
__device__ int max_q_index;
__device__ double max_q;
__device__ double dot_xi_yi; // <x_i, y_i >
__device__ double dot_xi_xi; // <x_i, x_i >
__device__ double dot_yi_yi; // <y_i, y_i >
__device__ double* distance;
__device__ double* rho;
__device__ double* dot_xi_x;
__device__ double* dot_yi_x;
__device__ double* dot_xi_y;
__device__ double* dot_yi_y;
__device__ double* dot_same[2];
__device__ double* get_element(int id, int set);
__device__ struct svm_parameter param;
__device__ double dot(double* px, int xstride, double *py, int ystride)
{
double sum = 0.0;
int i;
for(i=0; i< maximum_index; i++)
{
sum += px[i*xstride] * py[i*ystride];
}
return sum;
}
inline double powi(double base, int times)
{
double tmp = base, ret = 1.0;
int t;
for(t=times; t>0; t/=2)
{
if(t%2==1) ret*=tmp;
tmp = tmp * tmp;
}
return ret;
}
__device__ double kernel_linear(int set1, int element1, int set2, int element2) //todo: als template implementieren
{
double* px = &(g_data[set1][ element1 ]);
double* py = &(g_data[set2][ element2 ]);
double ret = dot(px, data_size[set1], py, data_size[set2] );
if(set1 == set2 && element1 == element2)
ret += 1.0/param.C;
return ret;
}
__device__ double power(double base, int exponent) { //todo: effizienter berechnen? (squaring, bitshifts)
int i;
double res = base;
for(i=0;i<exponent;i++)
{
res *= base;
}
return res;
}
__device__ double kernel_poly(int set1, int element1, int set2, int element2)
{
double* px = &(g_data[set1][ element1 ]);
double* py = &(g_data[set2][ element2 ]);
double ret = power(param.gamma*dot(px, data_size[set1], py, data_size[set2] )+param.coef0,param.degree);
if(set1 == set2 && element1 == element2)
ret += 1.0/param.C;
return ret;
}
__device__ double kernel_rbf(int set1, int element1, int set2, int element2)
{
double* px = &(g_data[set1][ element1 ]);
double* py = &(g_data[set2][ element2 ]);
double sumxx = 0.0;
double sumxy = 0.0;
double sumyy = 0.0;
int xstride = data_size[set1];
int ystride = data_size[set2];
int i;
for(i=0; i< maximum_index; i++)
{
sumxy += px[i*xstride] * py[i*ystride];
sumxx += px[i*xstride] * px[i*xstride];
sumyy += py[i*ystride] * py[i*ystride];
}
double dots = (sumxx +
sumyy)-2*
sumxy; //todo: dot(x,x) vorberechnen??
double wgamma = -param.gamma*dots;
double wexp = exp(wgamma);
if(set1 == set2 && element1 == element2)
wexp += 1.0/param.C;
return wexp;
}
__device__ double kernel_sigmoid(int set1, int element1, int set2, int element2)
{
double* px = &(g_data[set1][ element1 ]);
double* py = &(g_data[set2][ element2 ]);
double ret = tanh(param.gamma*dot(px, data_size[set1], py, data_size[set2])+param.coef0);
if(set1 == set2 && element1 == element2)
ret += 1.0/param.C;
return ret;
}
__device__ double kernel(int set1, int element1, int set2, int element2)
{
switch(param.kernel_type)
{
case POLY:
return kernel_poly(set1, element1, set2, element2);
case RBF:
return kernel_rbf(set1, element1, set2, element2);
case SIGMOID:
return kernel_sigmoid(set1, element1, set2, element2);
// case PRECOMPUTED:
// return kernel_precomputed(set1, element1, set2, element2);
case LINEAR:
default:
return kernel_linear(set1, element1, set2, element2);
}
}
__device__ int find_max(int p, double *dot_yi_x, double* dot_xi_x, double dot_xi_yi, double dot_xi_xi, double *max_p)
{
// find max
int max_p_index = -1;
*max_p = -1000000000.0; //todo: HUGE_VAL fuer Cuda finden
int i;
for (i=0; i<data_size[p]; i++)
{
double sum = dot_yi_x[i] - dot_xi_x[i] - dot_xi_yi + dot_xi_xi;
if(sum > *max_p)
{
*max_p = sum;
max_p_index = i;
}
}
return max_p_index;
}
template <unsigned int blockSize>
//todo: set und first als template parameter?
__global__ void reduce6(int *g_data_index, double *g_data_value, unsigned int set, int first, int data_size1)
{
__shared__ int sdata_index[blockSize];
__shared__ double sdata_value[blockSize];
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*(blockSize*2) + tid;
if(first == 1)
{
double value1;
if(set == 0)
value1 = dot_yi_x[i] - dot_xi_x[i] - dot_xi_yi + dot_xi_xi;
else
value1 = dot_xi_y[i] - dot_yi_y[i] - dot_xi_yi + dot_yi_yi;
if(i < data_size[set])
{
if( i + blockSize < data_size[set])
{
double value2;
if(set == 0)
value2 = dot_yi_x[i+blockSize] - dot_xi_x[i+blockSize] - dot_xi_yi + dot_xi_xi;
else
value2 = dot_xi_y[i+blockSize] - dot_yi_y[i+blockSize] - dot_xi_yi + dot_yi_yi;
if(value1 > value2)
{
sdata_value[tid] = value1;
sdata_index[tid] = i;
}
else
{
sdata_value[tid] = value2;
sdata_index[tid] = i+blockSize;
}
} else
{
sdata_value[tid] = value1;
sdata_index[tid] = i;
}
} else {
sdata_value[tid] = -100000000000.0; //todo: max_val suchen
sdata_index[tid] = -1;
}
} else
{
if( i + blockSize < data_size1)
{
double value1 = g_data_value[i];
double value2 = g_data_value[i+blockSize];
if(value1>value2)
{
sdata_value[tid] = value1;
sdata_index[tid] = g_data_index[i];
}
else
{
sdata_value[tid] = value2;
sdata_index[tid] = g_data_index[i+blockSize];
}
} else
{
sdata_value[tid] = g_data_value[i];
sdata_index[tid] = g_data_index[i];
}
}
__syncthreads();
if (blockSize >= 512)
{
if (tid < 256)
{
if(sdata_value[tid] < sdata_value[tid + 256])
{
sdata_value[tid] = sdata_value[tid + 256];
sdata_index[tid] = sdata_index[tid + 256];
}
}
__syncthreads();
}
if (blockSize >= 256)
{
if (tid < 128)
{
if(sdata_value[tid] < sdata_value[tid + 128])
{
sdata_value[tid] = sdata_value[tid + 128];
sdata_index[tid] = sdata_index[tid + 128];
}
}
__syncthreads();
}
if (blockSize >= 128)
{
if (tid < 64)
{
if(sdata_value[tid] < sdata_value[tid + 64])
{
sdata_value[tid] = sdata_value[tid + 64];
sdata_index[tid] = sdata_index[tid + 64];
}
}
__syncthreads();
}
#ifdef __DEVICE_EMULATION__
if (blockSize >= 64)
{
if (tid < 32)
{
if(sdata_value[tid] < sdata_value[tid + 32])
{
sdata_value[tid] = sdata_value[tid + 32];
sdata_index[tid] = sdata_index[tid + 32];
}
}
__syncthreads();
}
if (blockSize >= 32)
{
if (tid < 16)
{
if(sdata_value[tid] < sdata_value[tid + 16])
{
sdata_value[tid] = sdata_value[tid + 16];
sdata_index[tid] = sdata_index[tid + 16];
}
}
__syncthreads();
}
if (blockSize >= 16)
{
if (tid < 8)
{
if(sdata_value[tid] < sdata_value[tid + 8])
{
sdata_value[tid] = sdata_value[tid + 8];
sdata_index[tid] = sdata_index[tid + 8];
}
}
__syncthreads();
}
if (blockSize >= 8)
{
if (tid < 4)
{
if(sdata_value[tid] < sdata_value[tid + 4])
{
sdata_value[tid] = sdata_value[tid + 4];
sdata_index[tid] = sdata_index[tid + 4];
}
}
__syncthreads();
}
if (blockSize >= 4)
{
if (tid < 2)
{
if(sdata_value[tid] < sdata_value[tid + 2])
{
sdata_value[tid] = sdata_value[tid + 2];
sdata_index[tid] = sdata_index[tid + 2];
}
}
__syncthreads();
}
if (blockSize >= 2)
{
if (tid < 1)
{
if(sdata_value[tid] < sdata_value[tid + 1])
{
sdata_value[tid] = sdata_value[tid + 1];
sdata_index[tid] = sdata_index[tid + 1];
}
}
__syncthreads();
}
#else
if (tid < 32)
{
if (blockSize >= 64)
{
if(sdata_value[tid] < sdata_value[tid + 32])
{
sdata_value[tid] = sdata_value[tid + 32];
sdata_index[tid] = sdata_index[tid + 32];
}
}
if (blockSize >= 32)
{
if(sdata_value[tid] < sdata_value[tid + 16])
{
sdata_value[tid] = sdata_value[tid + 16];
sdata_index[tid] = sdata_index[tid + 16];
}
}
if (blockSize >= 16)
{
if(sdata_value[tid] < sdata_value[tid + 8])
{
sdata_value[tid] = sdata_value[tid + 8];
sdata_index[tid] = sdata_index[tid + 8];
}
}
if (blockSize >= 8)
{
if(sdata_value[tid] < sdata_value[tid + 4])
{
sdata_value[tid] = sdata_value[tid + 4];
sdata_index[tid] = sdata_index[tid + 4];
}
}
if (blockSize >= 4)
{
if(sdata_value[tid] < sdata_value[tid + 2])
{
sdata_value[tid] = sdata_value[tid + 2];
sdata_index[tid] = sdata_index[tid + 2];
}
}
if (blockSize >= 2)
{
if(sdata_value[tid] < sdata_value[tid + 1])
{
sdata_value[tid] = sdata_value[tid + 1];
sdata_index[tid] = sdata_index[tid + 1];
}
}
}
#endif
if (tid == 0)
{
g_data_index[blockIdx.x] = sdata_index[0];
g_data_value[blockIdx.x] = sdata_value[0];
if(blockIdx.x==0)
{
if(set == 0)
{
max_p = sdata_value[0];
distance[1] = max_p;
max_p_index = sdata_index[0];
}
else
{
max_q = sdata_value[0];
distance[2] = max_q;
max_q_index = sdata_index[0];
}
}
}
}
__device__ double compute_zaehler(double dot_xi_yi, double* dot_yi_x, double* dot_xi_x, int p, int max_p_index )
{
//todo: samevector, kann vorberechnet werden.
double zaehler = dot_xi_yi - dot_yi_x[max_p_index] - dot_xi_x[max_p_index] + dot_same[p][max_p_index];//kernel(p,max_p_index, p, max_p_index);
return zaehler;
}
__device__ double compute_nenner(double dot_xi_xi, double* dot_xi_x, int p, int max_p_index)
{
double nenner = dot_xi_xi - 2* dot_xi_x[max_p_index] + dot_same[p][max_p_index];//kernel(p, max_p_index, p, max_p_index);
return nenner;
}
__device__ void add_to_weights(double* weights, double lambda, int index, int set)
{
int i;
for (i=0; i<data_size[set]; i++)
{
if (i!=index)
weights[i] *= lambda;
else
weights[i] = weights[i]*lambda + (1.0 - lambda)*1;
}
}
__device__ double update_xi_xi(double dot_xi_xi, double* dot_xi_x, int p, int max_p_index, double lambda)
{
dot_xi_xi = lambda * lambda * dot_xi_xi
+ 2 * lambda * (1.0 - lambda) * dot_xi_x[max_p_index]
//todo: skalarprodukt von vector mit sich selbst zwischenspeichern
+ (1.0 - lambda)*(1.0 - lambda)*dot_same[p][max_p_index];//kernel(p, max_p_index, p ,max_p_index );
return dot_xi_xi;
}
__device__ double update_xi_yi(double dot_xi_yi, double* dot_yi_x, int max_p_index, double lambda)
{
dot_xi_yi = lambda * dot_xi_yi + (1.0 - lambda) * dot_yi_x[max_p_index];
return dot_xi_yi;
}
__device__ void update_xi_x(double* dot_xi_x, int p, int p2, int max_p_index, double lambda, double* computed_kernels , int tid)
{
if( (tid < data_size[0] && p2 == 0) || ( tid >= data_size[0] && p2 == 1 ) )
{
int offset = p2 * data_size[0];
//(p, max_p_index, p2, i);
dot_xi_x[tid - offset]= dot_xi_x[tid - offset] * lambda + (1.0 - lambda) * computed_kernels[ tid ];
}
}
// cache anfang
__device__ int nr_of_cache_entries;
__device__ int nr_of_elements;
__device__ double* data;
__device__ int* look_up_table; // translates data id to cache position
// translates cache positions to id
__device__ int* reverse_look_up_table;
__device__ int* circular_array; // safes order in which cache pos was inserted
__device__ int ca_first;
__device__ int ca_last;
__device__ int ca_free_pos; // safes which pos has no yet been occupied
__device__ bool ca_cachemiss;
__global__ void cuda_cache_init(int g_nr_of_cache_entries, int g_nr_of_elements,
int *g_look_up_table, int* g_reverse_look_up_table, int* g_circular_array, double* g_data_cache)
{
// cache initialisieren
look_up_table = g_look_up_table;
reverse_look_up_table = g_reverse_look_up_table;
circular_array = g_circular_array;
data = g_data_cache;
nr_of_cache_entries = g_nr_of_cache_entries;
nr_of_elements = g_nr_of_elements;
// init pointer
ca_first = 0;
ca_last = nr_of_cache_entries - 1;
ca_free_pos = 0;
for(int i=0; i<data_size[0]+data_size[1]; i++)
{
look_up_table[i] = -1;
}
}
__device__ void ca_add(int id)
{
// clean up look up table
int last_id = reverse_look_up_table[ circular_array[ca_last] ];
if(circular_array[ca_last] != -1) //test, ob schon alle stellen im cache belegt sind
{
//pos = look_up_table[ last_id ];
look_up_table[ last_id ] = -1;
}
else
{
circular_array[ca_last] = ca_free_pos;
ca_free_pos++;
}
//circular_array[ca_last] = pos;
ca_first = ca_last;
ca_last = ca_last - 1;
if(ca_last<0) ca_last = nr_of_cache_entries - 1;
reverse_look_up_table[circular_array[ca_first]] = id;
look_up_table[id] = circular_array[ca_first];
}
__device__ void ca_bring_forward(int pos)
{
// printf("bring_fordward. enter. pos = %d\n", pos);
int current = ca_first;
int pos_temp = circular_array[current];
int pos_temp2 = -1;
// int i;
// printf("circular array: ");
// for(i=0; i< nr_of_cache_entries; i++)
// printf(" %d: %d - ", i, circular_array[i]);
// printf("\n");
// printf("lut: ");
// for(i=0; i< nr_of_elements; i++)
// printf(" %d: %d - ", i, look_up_table[i]);
// printf("\n");
// printf("first = %d last = %d \n", ca_first, ca_last);
do
{
// printf("bring_fordward. cycle. \n");
pos_temp2 = pos_temp;
current = current + 1;
if(current>=nr_of_cache_entries) current = 0;
pos_temp = circular_array[current];
// printf("current = %d last = %d pt = %d, pt2 = %d\n", current, last, pos_temp, pos_temp2);
circular_array[current] = pos_temp2;
// printf("circular array 2: ");
// for(i=0; i< nr_of_cache_entries; i++)
// printf(" %d: %d - ", i, circular_array[i]);
// printf("\n");
} while( pos_temp != pos);
circular_array[ca_first] = pos;
// printf("circular array 3: ");
// for(i=0; i< nr_of_cache_entries; i++)
// printf(" %d: %d - ", i, circular_array[i]);
// printf("\n");
}
__global__ void cuda_cache_update()
{
int idset;
if(max_p > max_q)
{
idset = max_p_index;
} else
{
idset = max_q_index + data_size[0];
}
if( look_up_table[idset] == -1 )
{
ca_add(idset);
ca_cachemiss = true;
//get_data(id, set, circular_array[ca_first]);
//printf("cache miss, id = %d, set = %d\n", id, set);
} //cache hit
else
{
//printf("cache hit\n");
if(look_up_table[idset] != circular_array[ca_first])
{
ca_bring_forward(look_up_table[idset]);
}
ca_cachemiss = false;
}
}
__global__ void cuda_kernel_computekernels_cache()
{
int tid = threadIdx.x + blockDim.x*blockIdx.x;
// falls etwas mehr threads als noetig gestartet wurden
if(tid < data_size[0] + data_size[1])
{
int t_set;
int t_element;
if(tid < data_size[0])
t_set = 0;
else
t_set = 1;
t_element = tid - (t_set) * data_size[0];
if (max_p >= max_q)
{
if( ca_cachemiss == true )
data[circular_array[ca_first] * nr_of_elements + tid] = kernel(0, max_p_index, t_set, t_element);
//todo: cache einbauen
double* computed_kernels = &data[circular_array[ca_first] * nr_of_elements];
update_xi_x(dot_xi_x, 0, 0, max_p_index, lambda, computed_kernels, tid);
update_xi_x(dot_xi_y, 0, 1, max_p_index, lambda, computed_kernels, tid);
}
else
{
if( ca_cachemiss == true )
data[circular_array[ca_first] * nr_of_elements + tid] = kernel(1, max_q_index, t_set, t_element);
double* computed_kernels = &data[circular_array[ca_first] * nr_of_elements];
update_xi_x(dot_yi_y, 1, 1, max_q_index, lambda, computed_kernels, tid);
update_xi_x(dot_yi_x, 1, 0, max_q_index, lambda, computed_kernels, tid);
}
}
}
// cache ende
__global__ void cuda_kernel_init_pointer(double* g_data0, double* g_data1 , int g_maximum_index, int g_data0_size, int g_data1_size, double* g_weights0, double* g_weights1 ,
double *g_dot_xi_x, double *g_dot_yi_x, double *g_dot_xi_y, double *g_dot_yi_y,
double* g_dot_same0, double* g_dot_same1, double* g_distance, double* g_rho, struct svm_parameter g_param)
{
dot_xi_x = g_dot_xi_x;
dot_yi_x = g_dot_yi_x;
dot_xi_y = g_dot_xi_y;
dot_yi_y = g_dot_yi_y;
dot_same[0] = g_dot_same0;
dot_same[1] = g_dot_same1;
//todo: gleich die richtigen arrays senden
g_data[0] = g_data0;
g_data[1] = g_data1;
maximum_index = g_maximum_index;
data_size[0] = g_data0_size;
data_size[1] = g_data1_size;
g_weights[0] = g_weights0;
g_weights[1] = g_weights1;
distance = g_distance;
rho = g_rho;
param = g_param;
}
__global__ void cuda_kernel_init_kernel()
{
int tid = threadIdx.x + blockDim.x*blockIdx.x;
// falls etwas mehr threads als noetig gestartet wurden
if(tid < data_size[0] + data_size[1])
{
int t_set;
int t_element;
if(tid < data_size[0])
t_set = 0;
else
t_set = 1;
t_element = tid - (t_set) * data_size[0];
g_weights[t_set][t_element] = 0.0;
// initialisieren
if(t_set == 0)
{
dot_xi_x[t_element]=kernel(0, 0, t_set, t_element);
dot_yi_x[t_element]=kernel(1, 0, t_set, t_element);
} else
{
dot_xi_y[t_element]=kernel(0, 0, t_set, t_element);
dot_yi_y[t_element]=kernel(1, 0, t_set, t_element);
}
dot_same[t_set][t_element]=kernel(t_set, t_element, t_set, t_element);
}
}
__global__ void cuda_kernel_init_findmax()
{
g_weights[0][0] = 1.0;
g_weights[1][0] = 1.0;
dot_xi_xi = kernel(0, 0, 0, 0);
dot_xi_yi = kernel(0, 0, 1, 0);
dot_yi_yi = kernel(1, 0, 1, 0);
// find max
//max_p_index = find_max(0, dot_yi_x, dot_xi_x, dot_xi_yi, dot_xi_xi, &max_p);
//max_q_index = find_max(1, dot_xi_y, dot_yi_y, dot_xi_yi, dot_yi_yi, &max_q);
}
__global__ void cuda_kernel_updateWeights()
{
int tid = threadIdx.x + blockDim.x*blockIdx.x;
if (max_p >= max_q)
{
if(tid < data_size[0])
{
g_weights[0][tid] *= lambda;
}
if(tid == max_p_index)
{
g_weights[0][max_p_index] += 1.0 - lambda;
}
} else {
if(tid < data_size[1])
{
g_weights[1][tid] *= lambda;
}
if(tid == max_q_index)
{
g_weights[1][max_q_index] += 1.0 - lambda;
}
//if(tid == 1691)
// g_weights[1][1691] = 1234.56;
}
}
__global__ void cuda_kernel_lambda()
{
if (max_p >= max_q)
{
double zaehler = compute_zaehler(dot_xi_yi, dot_yi_x, dot_xi_x, 0, max_p_index);
double nenner = compute_nenner(dot_xi_xi, dot_xi_x, 0, max_p_index);
lambda = zaehler / nenner;
if(zaehler == 0.0 && nenner == 0.0) lambda = 0.0;
if(lambda < 0.0) lambda = 0.0;
if(lambda > 1.0) lambda = 0.0;
//add_to_weights(g_weights[0], lambda, max_p_index, 0);
// update dotproducts
dot_xi_xi = update_xi_xi(dot_xi_xi, dot_xi_x, 0, max_p_index, lambda);
dot_xi_yi = update_xi_yi(dot_xi_yi, dot_yi_x, max_p_index, lambda);
}
else
{
double zaehler = compute_zaehler(dot_xi_yi, dot_xi_y, dot_yi_y, 1, max_q_index);
double nenner = compute_nenner(dot_yi_yi, dot_yi_y, 1, max_q_index);
lambda = zaehler / nenner;
if(zaehler == 0.0 && nenner == 0.0) lambda = 0.0;
if(lambda < 0.0) lambda = 0.0;
if(lambda > 1.0) lambda = 0.0;
//g_temp[0] = lambda;
//add_to_weights(g_weights[1], lambda, max_q_index, 1);
// update dotproducts
dot_yi_yi = update_xi_xi(dot_yi_yi, dot_yi_y, 1, max_q_index, lambda);
dot_xi_yi = update_xi_yi(dot_xi_yi, dot_xi_y, max_q_index, lambda);
}
distance[0] = dot_xi_xi + dot_yi_yi - 2 * dot_xi_yi;
*rho = dot_xi_yi - dot_xi_xi - (dot_xi_xi + dot_yi_yi - 2 * dot_xi_yi)/2;
}
__global__ void cuda_kernel_distance()
{
// find max
max_p_index = find_max(0, dot_yi_x, dot_xi_x, dot_xi_yi, dot_xi_xi, &max_p);
max_q_index = find_max(1, dot_xi_y, dot_yi_y, dot_xi_yi, dot_yi_yi, &max_q);
//duality gap
// absolute duality gap
//todo: rueckgabewert an host zurueckgeben
//double adg = max_p + max_q;
//printf("max_p = %f max_q = %f ", max_p, max_q);
//printf("adg = %f ", adg);
// relative duality gap
// adg / ||p-q||^2 - adg
// adg / <p-q, p-q> - adg
//double distance = dot_xi_xi + dot_yi_yi - 2 * dot_xi_yi;
//double rdg_nenner = distance - adg;
//double rdg;
//if (rdg_nenner <= 0)
//{
//printf("set huge value... ");
// rdg = 100000000000.0; // todo: HUGE_VAL;
//}
//else
//{
// rdg = adg / rdg_nenner;
//}
//printf("<x-y,x-y> = %e " , distance);
//printf("adg = %e " , adg);
//printf("rdg = %e \n", rdg);
//print_weights(x_weights, prob[0]);
//print_weights(y_weights, prob[1]);
//rho = - dot_xi_yi + dot_xi_xi - (dot_xi_xi + dot_yi_yi - 2 * dot_xi_yi)/2;
//double rho = dot_xi_yi - dot_xi_xi - (dot_xi_xi + dot_yi_yi - 2 * dot_xi_yi)/2;
//printf("xi_xi = %f yi_yi = %f xi_yi = %f \n", dot_xi_xi, dot_yi_yi, dot_xi_yi);
}
#endif // #ifndef _CUDA_KERNEL_H_
|
e6a874f40739850153e24ee159cc7e1df36f59d2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
extern "C"
__global__ void div_strided_float(int n,int xOffset,int yOffset, float *dx,float *dy,int incx,int incy,float *result) {
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x) {
if(i >= xOffset && i >= yOffset && i % incx == 0 && i % incy == 0)
result[i] = dy[i] / dx[i];
}
} | e6a874f40739850153e24ee159cc7e1df36f59d2.cu | #include "includes.h"
extern "C"
__global__ void div_strided_float(int n,int xOffset,int yOffset, float *dx,float *dy,int incx,int incy,float *result) {
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x) {
if(i >= xOffset && i >= yOffset && i % incx == 0 && i % incy == 0)
result[i] = dy[i] / dx[i];
}
} |
0819747580b2ba01a769ce13123b04a0db42ed0f.hip | // !!! This is a file automatically generated by hipify!!!
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include "cutil.h"
#include "HOGEngine.h"
#include "HOGUtils.h"
#include "HOGSVMSlider.h"
texture<float, 1, hipReadModeElementType> texSVM;
hipArray *svmArray = 0;
hipChannelFormatDesc channelDescSVM;
extern int scaleCount;
extern int hNumberOfWindowsX, hNumberOfWindowsY;
extern int hNumberOfBlockPerWindowX, hNumberOfBlockPerWindowY;
extern int rNumberOfWindowsX, rNumberOfWindowsY;
extern __shared__ float1 allSharedF1[];
float svmBias;
void DeviceAllocHOGSVMMemory(void) {
cutilSafeCall(hipMallocArray(&svmArray, &channelDescSVM,
HOG.svmWeightsCount, 1));
}
void CopyInHOGSVM(void) {
cutilSafeCall(cudaMemcpyToArrayAsync(svmArray, 0, 0, HOG.svmWeights,
HOG.svmWeightsCount * sizeof(float), hipMemcpyHostToDevice, stream));
cutilSafeCall(hipStreamSynchronize(stream));
}
void DeviceFreeHOGSVMMemory(void) {
cutilSafeCall(hipFreeArray(svmArray));
svmArray = NULL;
}
void InitSVM() {
channelDescSVM = hipCreateChannelDesc<float>();
svmBias = HOG.svmBias;
}
void CloseSVM() {}
__global__ void linearSVMEvaluation(float1* svmScores, float svmBias,
float1* blockHistograms, int noHistogramBins, int windowSizeX,
int windowSizeY, int hogBlockCountX, int hogBlockCountY, int cellSizeX,
int cellSizeY, int numberOfBlockPerWindowX, int numberOfBlockPerWindowY,
int blockSizeX, int blockSizeY, int alignedBlockDimX, int scaleId,
int scaleCount, int hNumberOfWindowsX, int hNumberOfWindowsY, int width,
int height) {
int i;
int texPos;
float1 localValue;
float texValue;
float1* smem = (float1*) allSharedF1;
int gmemPosWindow, gmemPosInWindow, gmemPosInWindowDown, smemLocalPos,
smemTargetPos;
int gmemStride = hogBlockCountX * noHistogramBins * blockSizeX;
gmemPosWindow = blockIdx.x * noHistogramBins * blockSizeX + blockIdx.y *
blockSizeY * gmemStride;
gmemPosInWindow = gmemPosWindow + threadIdx.x;
smemLocalPos = threadIdx.x;
int val1 = (blockSizeY * blockSizeX * noHistogramBins) *
numberOfBlockPerWindowY;
int val2 = blockSizeX * noHistogramBins;
localValue.x = 0;
if (blockIdx.x == 10 && blockIdx.y == 8) {
int asasasa;
asasasa = 0;
asasasa++;
}
for (i = 0; i < blockSizeY * numberOfBlockPerWindowY; i++) {
gmemPosInWindowDown = gmemPosInWindow + i * gmemStride;
texPos = threadIdx.x % val2 + i * val2 + threadIdx.x / val2 * val1;
texValue = tex1D(texSVM, texPos);
localValue.x += blockHistograms[gmemPosInWindowDown].x * texValue;
}
smem[smemLocalPos] = localValue;
__syncthreads();
for(unsigned int s = alignedBlockDimX >> 1; s > 0; s >>= 1) {
if (threadIdx.x < s && (threadIdx.x + s) < blockDim.x) {
smemTargetPos = threadIdx.x + s;
smem[smemLocalPos].x += smem[smemTargetPos].x;
}
__syncthreads();
}
if (threadIdx.x == 0) {
smem[smemLocalPos].x -= svmBias;
svmScores[blockIdx.x + blockIdx.y * hNumberOfWindowsX + scaleId *
hNumberOfWindowsX * hNumberOfWindowsY] = smem[smemLocalPos];
}
if (blockIdx.x == 10 && blockIdx.y == 8) {
int asasasa;
asasasa = 0;
asasasa++;
}
}
void ResetSVMScores(float1* svmScores) {
cutilSafeCall(hipMemsetAsync(svmScores, 0, sizeof(float) * scaleCount *
hNumberOfWindowsX * hNumberOfWindowsY));
cutilSafeCall(hipStreamSynchronize(stream));
}
void LinearSVMEvaluation(float1* svmScores, float1* blockHistograms,
int noHistogramBins, int windowSizeX, int windowSizeY, int cellSizeX,
int cellSizeY, int blockSizeX, int blockSizeY, int hogBlockCountX,
int hogBlockCountY, int scaleId, int width, int height) {
rNumberOfWindowsX = (width - windowSizeX) / cellSizeX + 1;
rNumberOfWindowsY = (height - windowSizeY) / cellSizeY + 1;
dim3 threadCount = dim3(noHistogramBins * blockSizeX *
hNumberOfBlockPerWindowX);
dim3 blockCount = dim3(rNumberOfWindowsX, rNumberOfWindowsY);
int alignedBlockDimX = iClosestPowerOfTwo(noHistogramBins * blockSizeX *
hNumberOfBlockPerWindowX);
cutilSafeCall(hipBindTextureToArray(texSVM, svmArray, channelDescSVM));
hipLaunchKernelGGL(( linearSVMEvaluation), dim3(blockCount), dim3(threadCount), noHistogramBins * blockSizeX *
hNumberOfBlockPerWindowX * sizeof(float1), stream, svmScores, svmBias,
blockHistograms, noHistogramBins, windowSizeX, windowSizeY, hogBlockCountX,
hogBlockCountY, cellSizeX, cellSizeY, hNumberOfBlockPerWindowX,
hNumberOfBlockPerWindowY, blockSizeX, blockSizeY, alignedBlockDimX,
scaleId, scaleCount, hNumberOfWindowsX, hNumberOfWindowsY, width, height);
cutilSafeCall(hipStreamSynchronize(stream));
cutilSafeCall(hipUnbindTexture(texSVM));
}
| 0819747580b2ba01a769ce13123b04a0db42ed0f.cu | #include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include "cutil.h"
#include "HOGEngine.h"
#include "HOGUtils.h"
#include "HOGSVMSlider.h"
texture<float, 1, cudaReadModeElementType> texSVM;
cudaArray *svmArray = 0;
cudaChannelFormatDesc channelDescSVM;
extern int scaleCount;
extern int hNumberOfWindowsX, hNumberOfWindowsY;
extern int hNumberOfBlockPerWindowX, hNumberOfBlockPerWindowY;
extern int rNumberOfWindowsX, rNumberOfWindowsY;
extern __shared__ float1 allSharedF1[];
float svmBias;
void DeviceAllocHOGSVMMemory(void) {
cutilSafeCall(cudaMallocArray(&svmArray, &channelDescSVM,
HOG.svmWeightsCount, 1));
}
void CopyInHOGSVM(void) {
cutilSafeCall(cudaMemcpyToArrayAsync(svmArray, 0, 0, HOG.svmWeights,
HOG.svmWeightsCount * sizeof(float), cudaMemcpyHostToDevice, stream));
cutilSafeCall(cudaStreamSynchronize(stream));
}
void DeviceFreeHOGSVMMemory(void) {
cutilSafeCall(cudaFreeArray(svmArray));
svmArray = NULL;
}
void InitSVM() {
channelDescSVM = cudaCreateChannelDesc<float>();
svmBias = HOG.svmBias;
}
void CloseSVM() {}
__global__ void linearSVMEvaluation(float1* svmScores, float svmBias,
float1* blockHistograms, int noHistogramBins, int windowSizeX,
int windowSizeY, int hogBlockCountX, int hogBlockCountY, int cellSizeX,
int cellSizeY, int numberOfBlockPerWindowX, int numberOfBlockPerWindowY,
int blockSizeX, int blockSizeY, int alignedBlockDimX, int scaleId,
int scaleCount, int hNumberOfWindowsX, int hNumberOfWindowsY, int width,
int height) {
int i;
int texPos;
float1 localValue;
float texValue;
float1* smem = (float1*) allSharedF1;
int gmemPosWindow, gmemPosInWindow, gmemPosInWindowDown, smemLocalPos,
smemTargetPos;
int gmemStride = hogBlockCountX * noHistogramBins * blockSizeX;
gmemPosWindow = blockIdx.x * noHistogramBins * blockSizeX + blockIdx.y *
blockSizeY * gmemStride;
gmemPosInWindow = gmemPosWindow + threadIdx.x;
smemLocalPos = threadIdx.x;
int val1 = (blockSizeY * blockSizeX * noHistogramBins) *
numberOfBlockPerWindowY;
int val2 = blockSizeX * noHistogramBins;
localValue.x = 0;
if (blockIdx.x == 10 && blockIdx.y == 8) {
int asasasa;
asasasa = 0;
asasasa++;
}
for (i = 0; i < blockSizeY * numberOfBlockPerWindowY; i++) {
gmemPosInWindowDown = gmemPosInWindow + i * gmemStride;
texPos = threadIdx.x % val2 + i * val2 + threadIdx.x / val2 * val1;
texValue = tex1D(texSVM, texPos);
localValue.x += blockHistograms[gmemPosInWindowDown].x * texValue;
}
smem[smemLocalPos] = localValue;
__syncthreads();
for(unsigned int s = alignedBlockDimX >> 1; s > 0; s >>= 1) {
if (threadIdx.x < s && (threadIdx.x + s) < blockDim.x) {
smemTargetPos = threadIdx.x + s;
smem[smemLocalPos].x += smem[smemTargetPos].x;
}
__syncthreads();
}
if (threadIdx.x == 0) {
smem[smemLocalPos].x -= svmBias;
svmScores[blockIdx.x + blockIdx.y * hNumberOfWindowsX + scaleId *
hNumberOfWindowsX * hNumberOfWindowsY] = smem[smemLocalPos];
}
if (blockIdx.x == 10 && blockIdx.y == 8) {
int asasasa;
asasasa = 0;
asasasa++;
}
}
void ResetSVMScores(float1* svmScores) {
cutilSafeCall(cudaMemsetAsync(svmScores, 0, sizeof(float) * scaleCount *
hNumberOfWindowsX * hNumberOfWindowsY));
cutilSafeCall(cudaStreamSynchronize(stream));
}
void LinearSVMEvaluation(float1* svmScores, float1* blockHistograms,
int noHistogramBins, int windowSizeX, int windowSizeY, int cellSizeX,
int cellSizeY, int blockSizeX, int blockSizeY, int hogBlockCountX,
int hogBlockCountY, int scaleId, int width, int height) {
rNumberOfWindowsX = (width - windowSizeX) / cellSizeX + 1;
rNumberOfWindowsY = (height - windowSizeY) / cellSizeY + 1;
dim3 threadCount = dim3(noHistogramBins * blockSizeX *
hNumberOfBlockPerWindowX);
dim3 blockCount = dim3(rNumberOfWindowsX, rNumberOfWindowsY);
int alignedBlockDimX = iClosestPowerOfTwo(noHistogramBins * blockSizeX *
hNumberOfBlockPerWindowX);
cutilSafeCall(cudaBindTextureToArray(texSVM, svmArray, channelDescSVM));
linearSVMEvaluation<<<blockCount, threadCount, noHistogramBins * blockSizeX *
hNumberOfBlockPerWindowX * sizeof(float1), stream>>>(svmScores, svmBias,
blockHistograms, noHistogramBins, windowSizeX, windowSizeY, hogBlockCountX,
hogBlockCountY, cellSizeX, cellSizeY, hNumberOfBlockPerWindowX,
hNumberOfBlockPerWindowY, blockSizeX, blockSizeY, alignedBlockDimX,
scaleId, scaleCount, hNumberOfWindowsX, hNumberOfWindowsY, width, height);
cutilSafeCall(cudaStreamSynchronize(stream));
cutilSafeCall(cudaUnbindTexture(texSVM));
}
|
be1bd52da52a96388d88c56d3ecd8759de7ef46b.hip | // !!! This is a file automatically generated by hipify!!!
/**
* fdtd2d.cu: This file is part of the PolyBench/GPU 1.0 test suite.
*
*
* Contact: Scott Grauer-Gray <[email protected]>
* Louis-Noel Pouchet <[email protected]>
* Web address: http://www.cse.ohio-state.edu/~pouchet/software/polybench/GPU
*/
#include <assert.h>
#include <hip/hip_runtime.h>
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <unistd.h>
#define SMALL_FLOAT_VAL 0.00000001f
double rtclock() {
struct timezone Tzp;
struct timeval Tp;
int stat;
stat = gettimeofday(&Tp, &Tzp);
if (stat != 0)
printf("Error return from gettimeofday: %d", stat);
return (Tp.tv_sec + Tp.tv_usec * 1.0e-6);
}
float absVal(float a) {
if (a < 0) {
return (a * -1);
} else {
return a;
}
}
float percentDiff(double val1, double val2) {
if ((absVal(val1) < 0.01) && (absVal(val2) < 0.01)) {
return 0.0f;
}
else {
return 100.0f *
(absVal(absVal(val1 - val2) / absVal(val1 + SMALL_FLOAT_VAL)));
}
}
// define the error threshold for the results "not matching"
#define PERCENT_DIFF_ERROR_THRESHOLD 10.05
#define GPU_DEVICE 0
/* Problem size */
#define tmax 500
#define NX 2048
#define NY 2048
/* Thread block dimensions */
#define DIM_THREAD_BLOCK_X 32
#define DIM_THREAD_BLOCK_Y 8
/* Can switch DATA_TYPE between float and double */
typedef float DATA_TYPE;
void init_arrays(DATA_TYPE *_fict_, DATA_TYPE *ex, DATA_TYPE *ey,
DATA_TYPE *hz) {
int i, j;
for (i = 0; i < tmax; i++) {
_fict_[i] = (DATA_TYPE)i;
}
for (i = 0; i < NX; i++) {
for (j = 0; j < NY; j++) {
ex[i * NY + j] = ((DATA_TYPE)i * (j + 1) + 1) / NX;
ey[i * NY + j] = ((DATA_TYPE)(i - 1) * (j + 2) + 2) / NX;
hz[i * NY + j] = ((DATA_TYPE)(i - 9) * (j + 4) + 3) / NX;
}
}
}
void runFdtd(DATA_TYPE *_fict_, DATA_TYPE *ex, DATA_TYPE *ey, DATA_TYPE *hz) {
int t, i, j;
for (t = 0; t < tmax; t++) {
for (j = 0; j < NY; j++) {
ey[0 * NY + j] = _fict_[t];
}
for (i = 1; i < NX; i++) {
for (j = 0; j < NY; j++) {
ey[i * NY + j] =
ey[i * NY + j] - 0.5 * (hz[i * NY + j] - hz[(i - 1) * NY + j]);
}
}
for (i = 0; i < NX; i++) {
for (j = 1; j < NY; j++) {
ex[i * (NY + 1) + j] = ex[i * (NY + 1) + j] -
0.5 * (hz[i * NY + j] - hz[i * NY + (j - 1)]);
}
}
for (i = 0; i < NX; i++) {
for (j = 0; j < NY; j++) {
hz[i * NY + j] =
hz[i * NY + j] -
0.7 * (ex[i * (NY + 1) + (j + 1)] - ex[i * (NY + 1) + j] +
ey[(i + 1) * NY + j] - ey[i * NY + j]);
}
}
}
}
void compareResults(DATA_TYPE *hz1, DATA_TYPE *hz2) {
int i, j, fail;
fail = 0;
for (i = 0; i < NX; i++) {
for (j = 0; j < NY; j++) {
if (percentDiff(hz1[i * NY + j], hz2[i * NY + j]) >
PERCENT_DIFF_ERROR_THRESHOLD) {
fail++;
}
}
}
// Print results
printf("Non-Matching CPU-GPU Outputs Beyond Error Threshold of %4.2f "
"Percent: %d\n",
PERCENT_DIFF_ERROR_THRESHOLD, fail);
}
void GPU_argv_init() {
hipDeviceProp_t deviceProp;
hipGetDeviceProperties(&deviceProp, GPU_DEVICE);
printf("setting device %d with name %s\n", GPU_DEVICE, deviceProp.name);
hipSetDevice(GPU_DEVICE);
}
__global__ void fdtd_step1_kernel(DATA_TYPE *_fict_, DATA_TYPE *ex,
DATA_TYPE *ey, DATA_TYPE *hz, int t) {
int j = blockIdx.x * blockDim.x + threadIdx.x;
int i = blockIdx.y * blockDim.y + threadIdx.y;
if ((i < NX) && (j < NY)) {
if (i == 0) {
ey[i * NY + j] = _fict_[t];
} else {
ey[i * NY + j] =
ey[i * NY + j] - 0.5f * (hz[i * NY + j] - hz[(i - 1) * NY + j]);
}
}
}
__global__ void fdtd_step2_kernel(DATA_TYPE *ex, DATA_TYPE *ey, DATA_TYPE *hz,
int t) {
int j = blockIdx.x * blockDim.x + threadIdx.x;
int i = blockIdx.y * blockDim.y + threadIdx.y;
if ((i < NX) && (j < NY) && (j > 0)) {
ex[i * (NY + 1) + j] =
ex[i * (NY + 1) + j] - 0.5f * (hz[i * NY + j] - hz[i * NY + (j - 1)]);
}
}
__global__ void fdtd_step3_kernel(DATA_TYPE *ex, DATA_TYPE *ey, DATA_TYPE *hz,
int t) {
int j = blockIdx.x * blockDim.x + threadIdx.x;
int i = blockIdx.y * blockDim.y + threadIdx.y;
if ((i < NX) && (j < NY)) {
hz[i * NY + j] = hz[i * NY + j] -
0.7f * (ex[i * (NY + 1) + (j + 1)] - ex[i * (NY + 1) + j] +
ey[(i + 1) * NY + j] - ey[i * NY + j]);
}
}
void fdtdCuda(DATA_TYPE *_fict_, DATA_TYPE *ex, DATA_TYPE *ey, DATA_TYPE *hz,
DATA_TYPE *hz_outputFromGpu) {
double t_start, t_end;
DATA_TYPE *_fict_gpu;
DATA_TYPE *ex_gpu;
DATA_TYPE *ey_gpu;
DATA_TYPE *hz_gpu;
hipMalloc((void **)&_fict_gpu, sizeof(DATA_TYPE) * tmax);
hipMalloc((void **)&ex_gpu, sizeof(DATA_TYPE) * NX * (NY + 1));
hipMalloc((void **)&ey_gpu, sizeof(DATA_TYPE) * (NX + 1) * NY);
hipMalloc((void **)&hz_gpu, sizeof(DATA_TYPE) * NX * NY);
hipMemcpy(_fict_gpu, _fict_, sizeof(DATA_TYPE) * tmax,
hipMemcpyHostToDevice);
hipMemcpy(ex_gpu, ex, sizeof(DATA_TYPE) * NX * (NY + 1),
hipMemcpyHostToDevice);
hipMemcpy(ey_gpu, ey, sizeof(DATA_TYPE) * (NX + 1) * NY,
hipMemcpyHostToDevice);
hipMemcpy(hz_gpu, hz, sizeof(DATA_TYPE) * NX * NY, hipMemcpyHostToDevice);
dim3 block(DIM_THREAD_BLOCK_X, DIM_THREAD_BLOCK_Y);
dim3 grid((size_t)ceil(((float)NY) / ((float)block.x)),
(size_t)ceil(((float)NX) / ((float)block.y)));
t_start = rtclock();
for (int t = 0; t < tmax; t++) {
hipLaunchKernelGGL(( fdtd_step1_kernel), dim3(grid), dim3(block), 0, 0, _fict_gpu, ex_gpu, ey_gpu, hz_gpu, t);
hipDeviceSynchronize();
hipLaunchKernelGGL(( fdtd_step2_kernel), dim3(grid), dim3(block), 0, 0, ex_gpu, ey_gpu, hz_gpu, t);
hipDeviceSynchronize();
hipLaunchKernelGGL(( fdtd_step3_kernel), dim3(grid), dim3(block), 0, 0, ex_gpu, ey_gpu, hz_gpu, t);
hipDeviceSynchronize();
}
t_end = rtclock();
fprintf(stdout, "GPU Runtime: %0.6lfs\n", t_end - t_start);
hipMemcpy(hz_outputFromGpu, hz_gpu, sizeof(DATA_TYPE) * NX * NY,
hipMemcpyDeviceToHost);
hipFree(_fict_gpu);
hipFree(ex_gpu);
hipFree(ey_gpu);
hipFree(hz_gpu);
}
int main() {
double t_start, t_end;
DATA_TYPE *_fict_;
DATA_TYPE *ex;
DATA_TYPE *ey;
DATA_TYPE *hz;
DATA_TYPE *hz_outputFromGpu;
_fict_ = (DATA_TYPE *)malloc(tmax * sizeof(DATA_TYPE));
ex = (DATA_TYPE *)malloc(NX * (NY + 1) * sizeof(DATA_TYPE));
ey = (DATA_TYPE *)malloc((NX + 1) * NY * sizeof(DATA_TYPE));
hz = (DATA_TYPE *)malloc(NX * NY * sizeof(DATA_TYPE));
hz_outputFromGpu = (DATA_TYPE *)malloc(NX * NY * sizeof(DATA_TYPE));
init_arrays(_fict_, ex, ey, hz);
GPU_argv_init();
fdtdCuda(_fict_, ex, ey, hz, hz_outputFromGpu);
t_start = rtclock();
runFdtd(_fict_, ex, ey, hz);
t_end = rtclock();
fprintf(stdout, "CPU Runtime: %0.6lfs\n", t_end - t_start);
compareResults(hz, hz_outputFromGpu);
free(_fict_);
free(ex);
free(ey);
free(hz);
free(hz_outputFromGpu);
return 0;
}
| be1bd52da52a96388d88c56d3ecd8759de7ef46b.cu | /**
* fdtd2d.cu: This file is part of the PolyBench/GPU 1.0 test suite.
*
*
* Contact: Scott Grauer-Gray <[email protected]>
* Louis-Noel Pouchet <[email protected]>
* Web address: http://www.cse.ohio-state.edu/~pouchet/software/polybench/GPU
*/
#include <assert.h>
#include <cuda.h>
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <unistd.h>
#define SMALL_FLOAT_VAL 0.00000001f
double rtclock() {
struct timezone Tzp;
struct timeval Tp;
int stat;
stat = gettimeofday(&Tp, &Tzp);
if (stat != 0)
printf("Error return from gettimeofday: %d", stat);
return (Tp.tv_sec + Tp.tv_usec * 1.0e-6);
}
float absVal(float a) {
if (a < 0) {
return (a * -1);
} else {
return a;
}
}
float percentDiff(double val1, double val2) {
if ((absVal(val1) < 0.01) && (absVal(val2) < 0.01)) {
return 0.0f;
}
else {
return 100.0f *
(absVal(absVal(val1 - val2) / absVal(val1 + SMALL_FLOAT_VAL)));
}
}
// define the error threshold for the results "not matching"
#define PERCENT_DIFF_ERROR_THRESHOLD 10.05
#define GPU_DEVICE 0
/* Problem size */
#define tmax 500
#define NX 2048
#define NY 2048
/* Thread block dimensions */
#define DIM_THREAD_BLOCK_X 32
#define DIM_THREAD_BLOCK_Y 8
/* Can switch DATA_TYPE between float and double */
typedef float DATA_TYPE;
void init_arrays(DATA_TYPE *_fict_, DATA_TYPE *ex, DATA_TYPE *ey,
DATA_TYPE *hz) {
int i, j;
for (i = 0; i < tmax; i++) {
_fict_[i] = (DATA_TYPE)i;
}
for (i = 0; i < NX; i++) {
for (j = 0; j < NY; j++) {
ex[i * NY + j] = ((DATA_TYPE)i * (j + 1) + 1) / NX;
ey[i * NY + j] = ((DATA_TYPE)(i - 1) * (j + 2) + 2) / NX;
hz[i * NY + j] = ((DATA_TYPE)(i - 9) * (j + 4) + 3) / NX;
}
}
}
void runFdtd(DATA_TYPE *_fict_, DATA_TYPE *ex, DATA_TYPE *ey, DATA_TYPE *hz) {
int t, i, j;
for (t = 0; t < tmax; t++) {
for (j = 0; j < NY; j++) {
ey[0 * NY + j] = _fict_[t];
}
for (i = 1; i < NX; i++) {
for (j = 0; j < NY; j++) {
ey[i * NY + j] =
ey[i * NY + j] - 0.5 * (hz[i * NY + j] - hz[(i - 1) * NY + j]);
}
}
for (i = 0; i < NX; i++) {
for (j = 1; j < NY; j++) {
ex[i * (NY + 1) + j] = ex[i * (NY + 1) + j] -
0.5 * (hz[i * NY + j] - hz[i * NY + (j - 1)]);
}
}
for (i = 0; i < NX; i++) {
for (j = 0; j < NY; j++) {
hz[i * NY + j] =
hz[i * NY + j] -
0.7 * (ex[i * (NY + 1) + (j + 1)] - ex[i * (NY + 1) + j] +
ey[(i + 1) * NY + j] - ey[i * NY + j]);
}
}
}
}
void compareResults(DATA_TYPE *hz1, DATA_TYPE *hz2) {
int i, j, fail;
fail = 0;
for (i = 0; i < NX; i++) {
for (j = 0; j < NY; j++) {
if (percentDiff(hz1[i * NY + j], hz2[i * NY + j]) >
PERCENT_DIFF_ERROR_THRESHOLD) {
fail++;
}
}
}
// Print results
printf("Non-Matching CPU-GPU Outputs Beyond Error Threshold of %4.2f "
"Percent: %d\n",
PERCENT_DIFF_ERROR_THRESHOLD, fail);
}
void GPU_argv_init() {
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, GPU_DEVICE);
printf("setting device %d with name %s\n", GPU_DEVICE, deviceProp.name);
cudaSetDevice(GPU_DEVICE);
}
__global__ void fdtd_step1_kernel(DATA_TYPE *_fict_, DATA_TYPE *ex,
DATA_TYPE *ey, DATA_TYPE *hz, int t) {
int j = blockIdx.x * blockDim.x + threadIdx.x;
int i = blockIdx.y * blockDim.y + threadIdx.y;
if ((i < NX) && (j < NY)) {
if (i == 0) {
ey[i * NY + j] = _fict_[t];
} else {
ey[i * NY + j] =
ey[i * NY + j] - 0.5f * (hz[i * NY + j] - hz[(i - 1) * NY + j]);
}
}
}
__global__ void fdtd_step2_kernel(DATA_TYPE *ex, DATA_TYPE *ey, DATA_TYPE *hz,
int t) {
int j = blockIdx.x * blockDim.x + threadIdx.x;
int i = blockIdx.y * blockDim.y + threadIdx.y;
if ((i < NX) && (j < NY) && (j > 0)) {
ex[i * (NY + 1) + j] =
ex[i * (NY + 1) + j] - 0.5f * (hz[i * NY + j] - hz[i * NY + (j - 1)]);
}
}
__global__ void fdtd_step3_kernel(DATA_TYPE *ex, DATA_TYPE *ey, DATA_TYPE *hz,
int t) {
int j = blockIdx.x * blockDim.x + threadIdx.x;
int i = blockIdx.y * blockDim.y + threadIdx.y;
if ((i < NX) && (j < NY)) {
hz[i * NY + j] = hz[i * NY + j] -
0.7f * (ex[i * (NY + 1) + (j + 1)] - ex[i * (NY + 1) + j] +
ey[(i + 1) * NY + j] - ey[i * NY + j]);
}
}
void fdtdCuda(DATA_TYPE *_fict_, DATA_TYPE *ex, DATA_TYPE *ey, DATA_TYPE *hz,
DATA_TYPE *hz_outputFromGpu) {
double t_start, t_end;
DATA_TYPE *_fict_gpu;
DATA_TYPE *ex_gpu;
DATA_TYPE *ey_gpu;
DATA_TYPE *hz_gpu;
cudaMalloc((void **)&_fict_gpu, sizeof(DATA_TYPE) * tmax);
cudaMalloc((void **)&ex_gpu, sizeof(DATA_TYPE) * NX * (NY + 1));
cudaMalloc((void **)&ey_gpu, sizeof(DATA_TYPE) * (NX + 1) * NY);
cudaMalloc((void **)&hz_gpu, sizeof(DATA_TYPE) * NX * NY);
cudaMemcpy(_fict_gpu, _fict_, sizeof(DATA_TYPE) * tmax,
cudaMemcpyHostToDevice);
cudaMemcpy(ex_gpu, ex, sizeof(DATA_TYPE) * NX * (NY + 1),
cudaMemcpyHostToDevice);
cudaMemcpy(ey_gpu, ey, sizeof(DATA_TYPE) * (NX + 1) * NY,
cudaMemcpyHostToDevice);
cudaMemcpy(hz_gpu, hz, sizeof(DATA_TYPE) * NX * NY, cudaMemcpyHostToDevice);
dim3 block(DIM_THREAD_BLOCK_X, DIM_THREAD_BLOCK_Y);
dim3 grid((size_t)ceil(((float)NY) / ((float)block.x)),
(size_t)ceil(((float)NX) / ((float)block.y)));
t_start = rtclock();
for (int t = 0; t < tmax; t++) {
fdtd_step1_kernel<<<grid, block>>>(_fict_gpu, ex_gpu, ey_gpu, hz_gpu, t);
cudaDeviceSynchronize();
fdtd_step2_kernel<<<grid, block>>>(ex_gpu, ey_gpu, hz_gpu, t);
cudaDeviceSynchronize();
fdtd_step3_kernel<<<grid, block>>>(ex_gpu, ey_gpu, hz_gpu, t);
cudaDeviceSynchronize();
}
t_end = rtclock();
fprintf(stdout, "GPU Runtime: %0.6lfs\n", t_end - t_start);
cudaMemcpy(hz_outputFromGpu, hz_gpu, sizeof(DATA_TYPE) * NX * NY,
cudaMemcpyDeviceToHost);
cudaFree(_fict_gpu);
cudaFree(ex_gpu);
cudaFree(ey_gpu);
cudaFree(hz_gpu);
}
int main() {
double t_start, t_end;
DATA_TYPE *_fict_;
DATA_TYPE *ex;
DATA_TYPE *ey;
DATA_TYPE *hz;
DATA_TYPE *hz_outputFromGpu;
_fict_ = (DATA_TYPE *)malloc(tmax * sizeof(DATA_TYPE));
ex = (DATA_TYPE *)malloc(NX * (NY + 1) * sizeof(DATA_TYPE));
ey = (DATA_TYPE *)malloc((NX + 1) * NY * sizeof(DATA_TYPE));
hz = (DATA_TYPE *)malloc(NX * NY * sizeof(DATA_TYPE));
hz_outputFromGpu = (DATA_TYPE *)malloc(NX * NY * sizeof(DATA_TYPE));
init_arrays(_fict_, ex, ey, hz);
GPU_argv_init();
fdtdCuda(_fict_, ex, ey, hz, hz_outputFromGpu);
t_start = rtclock();
runFdtd(_fict_, ex, ey, hz);
t_end = rtclock();
fprintf(stdout, "CPU Runtime: %0.6lfs\n", t_end - t_start);
compareResults(hz, hz_outputFromGpu);
free(_fict_);
free(ex);
free(ey);
free(hz);
free(hz_outputFromGpu);
return 0;
}
|
2877e7611dcc580c98b7239153ad9cddbb7222d4.hip | // !!! This is a file automatically generated by hipify!!!
#include "chainerx/cuda/cuda_device.h"
#include <cmath>
#include <cstdint>
#include <hip/hip_runtime.h>
#include "chainerx/array.h"
#include "chainerx/cuda/cuda_runtime.h"
#include "chainerx/cuda/cuda_set_device_scope.h"
#include "chainerx/cuda/elementwise.cuh"
#include "chainerx/cuda/numeric.cuh"
#include "chainerx/cuda/op_regist.h"
#include "chainerx/device.h"
#include "chainerx/dtype.h"
#include "chainerx/numeric.h"
#include "chainerx/routines/math.h"
#include "chainerx/scalar.h"
namespace chainerx {
namespace cuda {
namespace {
template <typename T>
struct SinImpl {
using CudaType = cuda_internal::DataType<T>;
__device__ void operator()(int64_t /*i*/, CudaType x, CudaType& out) { out = cuda::Sin(x); }
};
class CudaSinOp : public SinOp {
public:
void Call(const Array& x, const Array& out) override {
Device& device = x.device();
device.CheckDevicesCompatible(x, out);
CudaSetDeviceScope scope{device.index()};
const Array& x_cast = x.dtype() == out.dtype() ? x : x.AsType(out.dtype());
VisitFloatingPointDtype(out.dtype(), [&](auto pt) {
using T = typename decltype(pt)::type;
Elementwise<const T, T>(SinImpl<T>{}, x_cast, out);
});
}
};
CHAINERX_REGISTER_OP_CUDA(SinOp, CudaSinOp);
template <typename T>
struct CosImpl {
using CudaType = cuda_internal::DataType<T>;
__device__ void operator()(int64_t /*i*/, CudaType x, CudaType& out) { out = cuda::Cos(x); }
};
class CudaCosOp : public CosOp {
public:
void Call(const Array& x, const Array& out) override {
Device& device = x.device();
device.CheckDevicesCompatible(x, out);
CudaSetDeviceScope scope{device.index()};
const Array& x_cast = x.dtype() == out.dtype() ? x : x.AsType(out.dtype());
VisitFloatingPointDtype(out.dtype(), [&](auto pt) {
using T = typename decltype(pt)::type;
Elementwise<const T, T>(CosImpl<T>{}, x_cast, out);
});
}
};
CHAINERX_REGISTER_OP_CUDA(CosOp, CudaCosOp);
template <typename T>
struct TanImpl {
using CudaType = cuda_internal::DataType<T>;
__device__ void operator()(int64_t /*i*/, CudaType x, CudaType& out) { out = cuda::Tan(x); }
};
class CudaTanOp : public TanOp {
public:
void Call(const Array& x, const Array& out) override {
Device& device = x.device();
device.CheckDevicesCompatible(x, out);
CudaSetDeviceScope scope{device.index()};
const Array& x_cast = x.dtype() == out.dtype() ? x : x.AsType(out.dtype());
VisitFloatingPointDtype(out.dtype(), [&](auto pt) {
using T = typename decltype(pt)::type;
Elementwise<const T, T>(TanImpl<T>{}, x_cast, out);
});
}
};
CHAINERX_REGISTER_OP_CUDA(TanOp, CudaTanOp);
template <typename T>
struct ArcsinImpl {
using CudaType = cuda_internal::DataType<T>;
__device__ void operator()(int64_t /*i*/, CudaType x, CudaType& out) { out = cuda::Arcsin(x); }
};
class CudaArcsinOp : public ArcsinOp {
public:
void Call(const Array& x, const Array& out) override {
Device& device = x.device();
device.CheckDevicesCompatible(x, out);
CudaSetDeviceScope scope{device.index()};
const Array& x_cast = x.dtype() == out.dtype() ? x : x.AsType(out.dtype());
VisitFloatingPointDtype(out.dtype(), [&](auto pt) {
using T = typename decltype(pt)::type;
Elementwise<const T, T>(ArcsinImpl<T>{}, x_cast, out);
});
}
};
CHAINERX_REGISTER_OP_CUDA(ArcsinOp, CudaArcsinOp);
template <typename T>
struct ArccosImpl {
using CudaType = cuda_internal::DataType<T>;
__device__ void operator()(int64_t /*i*/, CudaType x, CudaType& out) { out = cuda::Arccos(x); }
};
class CudaArccosOp : public ArccosOp {
public:
void Call(const Array& x, const Array& out) override {
Device& device = x.device();
device.CheckDevicesCompatible(x, out);
CudaSetDeviceScope scope{device.index()};
const Array& x_cast = x.dtype() == out.dtype() ? x : x.AsType(out.dtype());
VisitFloatingPointDtype(out.dtype(), [&](auto pt) {
using T = typename decltype(pt)::type;
Elementwise<const T, T>(ArccosImpl<T>{}, x_cast, out);
});
}
};
CHAINERX_REGISTER_OP_CUDA(ArccosOp, CudaArccosOp);
template <typename T>
struct ArctanImpl {
using CudaType = cuda_internal::DataType<T>;
__device__ void operator()(int64_t /*i*/, CudaType x, CudaType& out) { out = cuda::Arctan(x); }
};
class CudaArctanOp : public ArctanOp {
public:
void Call(const Array& x, const Array& out) override {
Device& device = x.device();
device.CheckDevicesCompatible(x, out);
CudaSetDeviceScope scope{device.index()};
const Array& x_cast = x.dtype() == out.dtype() ? x : x.AsType(out.dtype());
VisitFloatingPointDtype(out.dtype(), [&](auto pt) {
using T = typename decltype(pt)::type;
Elementwise<const T, T>(ArctanImpl<T>{}, x_cast, out);
});
}
};
CHAINERX_REGISTER_OP_CUDA(ArctanOp, CudaArctanOp);
template <typename T>
struct SinhImpl {
using CudaType = cuda_internal::DataType<T>;
__device__ void operator()(int64_t /*i*/, CudaType x, CudaType& out) { out = cuda::Sinh(x); }
};
class CudaSinhOp : public SinhOp {
public:
void Call(const Array& x, const Array& out) override {
Device& device = x.device();
device.CheckDevicesCompatible(x, out);
CudaSetDeviceScope scope{device.index()};
const Array& x_cast = x.dtype() == out.dtype() ? x : x.AsType(out.dtype());
VisitFloatingPointDtype(out.dtype(), [&](auto pt) {
using T = typename decltype(pt)::type;
Elementwise<const T, T>(SinhImpl<T>{}, x_cast, out);
});
}
};
CHAINERX_REGISTER_OP_CUDA(SinhOp, CudaSinhOp);
template <typename T>
struct CoshImpl {
using CudaType = cuda_internal::DataType<T>;
__device__ void operator()(int64_t /*i*/, CudaType x, CudaType& out) { out = cuda::Cosh(x); }
};
class CudaCoshOp : public CoshOp {
public:
void Call(const Array& x, const Array& out) override {
Device& device = x.device();
device.CheckDevicesCompatible(x, out);
CudaSetDeviceScope scope{device.index()};
const Array& x_cast = x.dtype() == out.dtype() ? x : x.AsType(out.dtype());
VisitFloatingPointDtype(out.dtype(), [&](auto pt) {
using T = typename decltype(pt)::type;
Elementwise<const T, T>(CoshImpl<T>{}, x_cast, out);
});
}
};
CHAINERX_REGISTER_OP_CUDA(CoshOp, CudaCoshOp);
template <typename T>
struct ArcsinhImpl {
using CudaType = cuda_internal::DataType<T>;
__device__ void operator()(int64_t /*i*/, CudaType x, CudaType& out) { out = cuda::Arcsinh(x); }
};
class CudaArcsinhOp : public ArcsinhOp {
public:
void Call(const Array& x, const Array& out) override {
Device& device = x.device();
device.CheckDevicesCompatible(x, out);
CudaSetDeviceScope scope{device.index()};
const Array& x_cast = x.dtype() == out.dtype() ? x : x.AsType(out.dtype());
VisitFloatingPointDtype(out.dtype(), [&](auto pt) {
using T = typename decltype(pt)::type;
Elementwise<const T, T>(ArcsinhImpl<T>{}, x_cast, out);
});
}
};
CHAINERX_REGISTER_OP_CUDA(ArcsinhOp, CudaArcsinhOp);
template <typename T>
struct ArccoshImpl {
using CudaType = cuda_internal::DataType<T>;
__device__ void operator()(int64_t /*i*/, CudaType x, CudaType& out) { out = cuda::Arccosh(x); }
};
class CudaArccoshOp : public ArccoshOp {
public:
void Call(const Array& x, const Array& out) override {
Device& device = x.device();
device.CheckDevicesCompatible(x, out);
CudaSetDeviceScope scope{device.index()};
const Array& x_cast = x.dtype() == out.dtype() ? x : x.AsType(out.dtype());
VisitFloatingPointDtype(out.dtype(), [&](auto pt) {
using T = typename decltype(pt)::type;
Elementwise<const T, T>(ArccoshImpl<T>{}, x_cast, out);
});
}
};
CHAINERX_REGISTER_OP_CUDA(ArccoshOp, CudaArccoshOp);
} // namespace
} // namespace cuda
} // namespace chainerx
| 2877e7611dcc580c98b7239153ad9cddbb7222d4.cu | #include "chainerx/cuda/cuda_device.h"
#include <cmath>
#include <cstdint>
#include <cuda_runtime.h>
#include "chainerx/array.h"
#include "chainerx/cuda/cuda_runtime.h"
#include "chainerx/cuda/cuda_set_device_scope.h"
#include "chainerx/cuda/elementwise.cuh"
#include "chainerx/cuda/numeric.cuh"
#include "chainerx/cuda/op_regist.h"
#include "chainerx/device.h"
#include "chainerx/dtype.h"
#include "chainerx/numeric.h"
#include "chainerx/routines/math.h"
#include "chainerx/scalar.h"
namespace chainerx {
namespace cuda {
namespace {
template <typename T>
struct SinImpl {
using CudaType = cuda_internal::DataType<T>;
__device__ void operator()(int64_t /*i*/, CudaType x, CudaType& out) { out = cuda::Sin(x); }
};
class CudaSinOp : public SinOp {
public:
void Call(const Array& x, const Array& out) override {
Device& device = x.device();
device.CheckDevicesCompatible(x, out);
CudaSetDeviceScope scope{device.index()};
const Array& x_cast = x.dtype() == out.dtype() ? x : x.AsType(out.dtype());
VisitFloatingPointDtype(out.dtype(), [&](auto pt) {
using T = typename decltype(pt)::type;
Elementwise<const T, T>(SinImpl<T>{}, x_cast, out);
});
}
};
CHAINERX_REGISTER_OP_CUDA(SinOp, CudaSinOp);
template <typename T>
struct CosImpl {
using CudaType = cuda_internal::DataType<T>;
__device__ void operator()(int64_t /*i*/, CudaType x, CudaType& out) { out = cuda::Cos(x); }
};
class CudaCosOp : public CosOp {
public:
void Call(const Array& x, const Array& out) override {
Device& device = x.device();
device.CheckDevicesCompatible(x, out);
CudaSetDeviceScope scope{device.index()};
const Array& x_cast = x.dtype() == out.dtype() ? x : x.AsType(out.dtype());
VisitFloatingPointDtype(out.dtype(), [&](auto pt) {
using T = typename decltype(pt)::type;
Elementwise<const T, T>(CosImpl<T>{}, x_cast, out);
});
}
};
CHAINERX_REGISTER_OP_CUDA(CosOp, CudaCosOp);
template <typename T>
struct TanImpl {
using CudaType = cuda_internal::DataType<T>;
__device__ void operator()(int64_t /*i*/, CudaType x, CudaType& out) { out = cuda::Tan(x); }
};
class CudaTanOp : public TanOp {
public:
void Call(const Array& x, const Array& out) override {
Device& device = x.device();
device.CheckDevicesCompatible(x, out);
CudaSetDeviceScope scope{device.index()};
const Array& x_cast = x.dtype() == out.dtype() ? x : x.AsType(out.dtype());
VisitFloatingPointDtype(out.dtype(), [&](auto pt) {
using T = typename decltype(pt)::type;
Elementwise<const T, T>(TanImpl<T>{}, x_cast, out);
});
}
};
CHAINERX_REGISTER_OP_CUDA(TanOp, CudaTanOp);
template <typename T>
struct ArcsinImpl {
using CudaType = cuda_internal::DataType<T>;
__device__ void operator()(int64_t /*i*/, CudaType x, CudaType& out) { out = cuda::Arcsin(x); }
};
class CudaArcsinOp : public ArcsinOp {
public:
void Call(const Array& x, const Array& out) override {
Device& device = x.device();
device.CheckDevicesCompatible(x, out);
CudaSetDeviceScope scope{device.index()};
const Array& x_cast = x.dtype() == out.dtype() ? x : x.AsType(out.dtype());
VisitFloatingPointDtype(out.dtype(), [&](auto pt) {
using T = typename decltype(pt)::type;
Elementwise<const T, T>(ArcsinImpl<T>{}, x_cast, out);
});
}
};
CHAINERX_REGISTER_OP_CUDA(ArcsinOp, CudaArcsinOp);
template <typename T>
struct ArccosImpl {
using CudaType = cuda_internal::DataType<T>;
__device__ void operator()(int64_t /*i*/, CudaType x, CudaType& out) { out = cuda::Arccos(x); }
};
class CudaArccosOp : public ArccosOp {
public:
void Call(const Array& x, const Array& out) override {
Device& device = x.device();
device.CheckDevicesCompatible(x, out);
CudaSetDeviceScope scope{device.index()};
const Array& x_cast = x.dtype() == out.dtype() ? x : x.AsType(out.dtype());
VisitFloatingPointDtype(out.dtype(), [&](auto pt) {
using T = typename decltype(pt)::type;
Elementwise<const T, T>(ArccosImpl<T>{}, x_cast, out);
});
}
};
CHAINERX_REGISTER_OP_CUDA(ArccosOp, CudaArccosOp);
template <typename T>
struct ArctanImpl {
using CudaType = cuda_internal::DataType<T>;
__device__ void operator()(int64_t /*i*/, CudaType x, CudaType& out) { out = cuda::Arctan(x); }
};
class CudaArctanOp : public ArctanOp {
public:
void Call(const Array& x, const Array& out) override {
Device& device = x.device();
device.CheckDevicesCompatible(x, out);
CudaSetDeviceScope scope{device.index()};
const Array& x_cast = x.dtype() == out.dtype() ? x : x.AsType(out.dtype());
VisitFloatingPointDtype(out.dtype(), [&](auto pt) {
using T = typename decltype(pt)::type;
Elementwise<const T, T>(ArctanImpl<T>{}, x_cast, out);
});
}
};
CHAINERX_REGISTER_OP_CUDA(ArctanOp, CudaArctanOp);
template <typename T>
struct SinhImpl {
using CudaType = cuda_internal::DataType<T>;
__device__ void operator()(int64_t /*i*/, CudaType x, CudaType& out) { out = cuda::Sinh(x); }
};
class CudaSinhOp : public SinhOp {
public:
void Call(const Array& x, const Array& out) override {
Device& device = x.device();
device.CheckDevicesCompatible(x, out);
CudaSetDeviceScope scope{device.index()};
const Array& x_cast = x.dtype() == out.dtype() ? x : x.AsType(out.dtype());
VisitFloatingPointDtype(out.dtype(), [&](auto pt) {
using T = typename decltype(pt)::type;
Elementwise<const T, T>(SinhImpl<T>{}, x_cast, out);
});
}
};
CHAINERX_REGISTER_OP_CUDA(SinhOp, CudaSinhOp);
template <typename T>
struct CoshImpl {
using CudaType = cuda_internal::DataType<T>;
__device__ void operator()(int64_t /*i*/, CudaType x, CudaType& out) { out = cuda::Cosh(x); }
};
class CudaCoshOp : public CoshOp {
public:
void Call(const Array& x, const Array& out) override {
Device& device = x.device();
device.CheckDevicesCompatible(x, out);
CudaSetDeviceScope scope{device.index()};
const Array& x_cast = x.dtype() == out.dtype() ? x : x.AsType(out.dtype());
VisitFloatingPointDtype(out.dtype(), [&](auto pt) {
using T = typename decltype(pt)::type;
Elementwise<const T, T>(CoshImpl<T>{}, x_cast, out);
});
}
};
CHAINERX_REGISTER_OP_CUDA(CoshOp, CudaCoshOp);
template <typename T>
struct ArcsinhImpl {
using CudaType = cuda_internal::DataType<T>;
__device__ void operator()(int64_t /*i*/, CudaType x, CudaType& out) { out = cuda::Arcsinh(x); }
};
class CudaArcsinhOp : public ArcsinhOp {
public:
void Call(const Array& x, const Array& out) override {
Device& device = x.device();
device.CheckDevicesCompatible(x, out);
CudaSetDeviceScope scope{device.index()};
const Array& x_cast = x.dtype() == out.dtype() ? x : x.AsType(out.dtype());
VisitFloatingPointDtype(out.dtype(), [&](auto pt) {
using T = typename decltype(pt)::type;
Elementwise<const T, T>(ArcsinhImpl<T>{}, x_cast, out);
});
}
};
CHAINERX_REGISTER_OP_CUDA(ArcsinhOp, CudaArcsinhOp);
template <typename T>
struct ArccoshImpl {
using CudaType = cuda_internal::DataType<T>;
__device__ void operator()(int64_t /*i*/, CudaType x, CudaType& out) { out = cuda::Arccosh(x); }
};
class CudaArccoshOp : public ArccoshOp {
public:
void Call(const Array& x, const Array& out) override {
Device& device = x.device();
device.CheckDevicesCompatible(x, out);
CudaSetDeviceScope scope{device.index()};
const Array& x_cast = x.dtype() == out.dtype() ? x : x.AsType(out.dtype());
VisitFloatingPointDtype(out.dtype(), [&](auto pt) {
using T = typename decltype(pt)::type;
Elementwise<const T, T>(ArccoshImpl<T>{}, x_cast, out);
});
}
};
CHAINERX_REGISTER_OP_CUDA(ArccoshOp, CudaArccoshOp);
} // namespace
} // namespace cuda
} // namespace chainerx
|
7814c82913999250a8dc5b51f8c91b33883ee15f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <quda_internal.h>
#include <gauge_field_order.h>
namespace quda {
template <typename Order, int nDim>
struct ExtractGhostExArg {
Order order;
int dim;
int X[nDim];
int R[nDim];
int surfaceCB[nDim];
int A0[nDim];
int A1[nDim];
int B0[nDim];
int B1[nDim];
int C0[nDim];
int C1[nDim];
int fBody[nDim][nDim];
int fBuf[nDim][nDim];
int localParity[nDim];
ExtractGhostExArg(const Order &order, int dim, const int *X_, const int *R_,
const int *surfaceCB_,
const int *A0_, const int *A1_, const int *B0_, const int *B1_,
const int *C0_, const int *C1_, const int fBody_[nDim][nDim],
const int fBuf_[nDim][nDim], const int *localParity_)
: order(order), dim(dim) {
for (int d=0; d<nDim; d++) {
X[d] = X_[d];
R[d] = R_[d];
surfaceCB[d] = surfaceCB_[d];
A0[d] = A0_[d];
A1[d] = A1_[d];
B0[d] = B0_[d];
B1[d] = B1_[d];
C0[d] = C0_[d];
C1[d] = C1_[d];
for (int e=0; e<nDim; e++) {
fBody[d][e] = fBody_[d][e];
fBuf[d][e] = fBuf_[d][e];
}
localParity[d] = localParity_[d];
}
}
};
template <typename Float, int length, typename Arg>
__device__ __host__ void extractor(Arg &arg, int dir, int a, int b,
int c, int d, int g, int parity) {
typename mapper<Float>::type u[length];
int &dim = arg.dim;
int srcIdx = (a*arg.fBody[dim][0] + b*arg.fBody[dim][1] +
c*arg.fBody[dim][2] + d*arg.fBody[dim][3]) >> 1;
int dstIdx = (a*arg.fBuf[dim][0] + b*arg.fBuf[dim][1] +
c*arg.fBuf[dim][2] + (d-(dir?arg.X[dim]:arg.R[dim]))*arg.fBuf[dim][3]) >> 1;
// load the ghost element from the bulk
arg.order.load(u, srcIdx, g, parity);
// need dir dependence in write
// srcIdx is used here to determine boundary condition
arg.order.saveGhostEx(u, dstIdx, srcIdx, dir, dim, g,
(parity+arg.localParity[dim])&1, arg.R);
}
template <typename Float, int length, typename Arg>
__device__ __host__ void injector(Arg &arg, int dir, int a, int b,
int c, int d, int g, int parity) {
typename mapper<Float>::type u[length];
int &dim = arg.dim;
int srcIdx = (a*arg.fBuf[dim][0] + b*arg.fBuf[dim][1] +
c*arg.fBuf[dim][2] + (d-dir*(arg.X[dim]+arg.R[dim]))*arg.fBuf[dim][3]) >> 1;
int dstIdx = (a*arg.fBody[dim][0] + b*arg.fBody[dim][1] +
c*arg.fBody[dim][2] + d*arg.fBody[dim][3]) >> 1;
// need dir dependence in read
// dstIdx is used here to determine boundary condition
arg.order.loadGhostEx(u, srcIdx, dstIdx, dir, dim, g,
(parity+arg.localParity[dim])&1, arg.R);
arg.order.save(u, dstIdx, g, parity); // save the ghost element into the bulk
}
/**
Generic CPU gauge ghost extraction and packing
NB This routines is specialized to four dimensions
*/
template <typename Float, int length, int nDim, typename Order, bool extract>
void extractGhostEx(ExtractGhostExArg<Order,nDim> arg) {
typedef typename mapper<Float>::type RegType;
int dim = arg.dim;
for (int parity=0; parity<2; parity++) {
// the following 4-way loop means this is specialized for 4 dimensions
// dir = 0 backwards, dir = 1 forwards
for (int dir = 0; dir<2; dir++) {
int D0 = extract ? dir*arg.X[dim] + (1-dir)*arg.R[dim] : dir*(arg.X[dim] + arg.R[dim]);
for (int d=D0; d<D0+arg.R[dim]; d++) {
for (int a=arg.A0[dim]; a<arg.A1[dim]; a++) { // loop over the interior surface
for (int b=arg.B0[dim]; b<arg.B1[dim]; b++) { // loop over the interior surface
for (int c=arg.C0[dim]; c<arg.C1[dim]; c++) { // loop over the interior surface
for (int g=0; g<arg.order.geometry; g++) {
// we only do the extraction for parity we are currently working on
int oddness = (a+b+c+d) & 1;
if (oddness == parity) {
if (extract) extractor<Float,length>(arg, dir, a, b, c, d, g, parity);
else injector<Float,length>(arg, dir, a, b, c, d, g, parity);
} // oddness == parity
} // g
} // c
} // b
} // a
} // d
} // dir
} // parity
}
/**
Generic GPU gauge ghost extraction and packing
NB This routines is specialized to four dimensions
FIXME this implementation will have two-way warp divergence
*/
/**
Generic CPU gauge ghost extraction and packing
NB This routines is specialized to four dimensions
*/
template <typename Float, int length, int nDim, typename Order, bool extract>
__global__ void extractGhostExKernel(ExtractGhostExArg<Order,nDim> arg) {
typedef typename mapper<Float>::type RegType;
int dim = arg.dim;
// parallelize over parity and dir using block or grid
/*for (int parity=0; parity<2; parity++) {*/
{
int parity = blockIdx.z;
// the following 4-way loop means this is specialized for 4 dimensions
// dir = 0 backwards, dir = 1 forwards
//for (int dir = 0; dir<2; dir++) {
{
int dir = blockIdx.y;
// this will have two-warp divergence since we only do work on
// one parity but parity alternates between threads
// linear index used for writing into ghost buffer
int X = blockIdx.x * blockDim.x + threadIdx.x;
int dA = arg.A1[dim]-arg.A0[dim];
int dB = arg.B1[dim]-arg.B0[dim];
int dC = arg.C1[dim]-arg.C0[dim];
int D0 = extract ? dir*arg.X[dim] + (1-dir)*arg.R[dim] : dir*(arg.X[dim] + arg.R[dim]);
if (X >= arg.R[dim]*dA*dB*dC*arg.order.geometry) return;
// thread order is optimized to maximize coalescing
// X = (((g*R + d) * dA + a)*dB + b)*dC + c
int gdab = X / dC;
int c = arg.C0[dim] + X - gdab*dC;
int gda = gdab / dB;
int b = arg.B0[dim] + gdab - gda *dB;
int gd = gda / dA;
int a = arg.A0[dim] + gda - gd *dA;
int g = gd / arg.R[dim];
int d = D0 + gd - g *arg.R[dim];
// we only do the extraction for parity we are currently working on
int oddness = (a+b+c+d) & 1;
if (oddness == parity) {
if (extract) extractor<Float,length>(arg, dir, a, b, c, d, g, parity);
else injector<Float,length>(arg, dir, a, b, c, d, g, parity);
} // oddness == parity
} // dir
} // parity
}
template <typename Float, int length, int nDim, typename Order>
class ExtractGhostEx : Tunable {
ExtractGhostExArg<Order,nDim> arg;
int size;
bool extract;
const GaugeField &meta;
QudaFieldLocation location;
private:
unsigned int sharedBytesPerThread() const { return 0; }
unsigned int sharedBytesPerBlock(const TuneParam ¶m) const { return 0 ;}
bool tuneGridDim() const { return false; } // Don't tune the grid dimensions.
unsigned int minThreads() const { return size; }
public:
ExtractGhostEx(ExtractGhostExArg<Order,nDim> &arg, bool extract,
const GaugeField &meta, QudaFieldLocation location)
: arg(arg), extract(extract), meta(meta), location(location) {
int dA = arg.A1[arg.dim]-arg.A0[arg.dim];
int dB = arg.B1[arg.dim]-arg.B0[arg.dim];
int dC = arg.C1[arg.dim]-arg.C0[arg.dim];
size = arg.R[arg.dim]*dA*dB*dC*arg.order.geometry;
writeAuxString("prec=%lu,stride=%d,extract=%d,dimension=%d",
sizeof(Float),arg.order.stride, extract, arg.dim);
}
virtual ~ExtractGhostEx() { ; }
void apply(const hipStream_t &stream) {
if (extract) {
if (location==QUDA_CPU_FIELD_LOCATION) {
extractGhostEx<Float,length,nDim,Order,true>(arg);
} else {
#if (__COMPUTE_CAPABILITY__ >= 200)
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
tp.grid.y = 2;
tp.grid.z = 2;
hipLaunchKernelGGL(( extractGhostExKernel<Float,length,nDim,Order,true>)
, dim3(tp.grid), dim3(tp.block), tp.shared_bytes, stream, arg);
#else
errorQuda("extractGhostEx not supported on pre-Fermi architecture");
#endif
}
} else { // we are injecting
if (location==QUDA_CPU_FIELD_LOCATION) {
extractGhostEx<Float,length,nDim,Order,false>(arg);
} else {
#if (__COMPUTE_CAPABILITY__ >= 200)
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
tp.grid.y = 2;
tp.grid.z = 2;
hipLaunchKernelGGL(( extractGhostExKernel<Float,length,nDim,Order,false>)
, dim3(tp.grid), dim3(tp.block), tp.shared_bytes, stream, arg);
#else
errorQuda("extractGhostEx not supported on pre-Fermi architecture");
#endif
}
}
}
TuneKey tuneKey() const { return TuneKey(meta.VolString(), typeid(*this).name(), aux); }
std::string paramString(const TuneParam ¶m) const { // Don't bother printing the grid dim.
std::stringstream ps;
ps << "block=(" << param.block.x << "," << param.block.y << "," << param.block.z << "), ";
ps << "shared=" << param.shared_bytes;
return ps.str();
}
long long flops() const { return 0; }
long long bytes() const { return 2 * 2 * 2 * size * arg.order.Bytes(); } // 2 for i/o
};
/**
Generic CPU gauge ghost extraction and packing
NB This routines is specialized to four dimensions
@param E the extended gauge dimensions
@param R array holding the radius of the extended region
@param extract Whether we are extracting or injecting the ghost zone
*/
template <typename Float, int length, typename Order>
void extractGhostEx(Order order, const int dim, const int *surfaceCB, const int *E,
const int *R, bool extract, const GaugeField &u, QudaFieldLocation location) {
const int nDim = 4;
//loop variables: a, b, c with a the most signifcant and c the least significant
//A0, B0, C0 the minimum value
//A0, B0, C0 the maximum value
int X[nDim]; // compute interior dimensions
for (int d=0; d<nDim; d++) X[d] = E[d] - 2*R[d];
//..........x..........y............z.............t
int A0[nDim] = {R[3], R[3], R[3], 0};
int A1[nDim] = {X[3]+R[3], X[3]+R[3], X[3]+R[3], X[2]+2*R[2]};
int B0[nDim] = {R[2], R[2], 0, 0};
int B1[nDim] = {X[2]+R[2], X[2]+R[2], X[1]+2*R[1], X[1]+2*R[1]};
int C0[nDim] = {R[1], 0, 0, 0};
int C1[nDim] = {X[1]+R[1], X[0]+2*R[0], X[0]+2*R[0], X[0]+2*R[0]};
int fSrc[nDim][nDim] = {
{E[2]*E[1]*E[0], E[1]*E[0], E[0], 1},
{E[2]*E[1]*E[0], E[1]*E[0], 1, E[0]},
{E[2]*E[1]*E[0], E[0], 1, E[1]*E[0]},
{E[1]*E[0], E[0], 1, E[2]*E[1]*E[0]}
};
int fBuf[nDim][nDim]={
{E[2]*E[1], E[1], 1, E[3]*E[2]*E[1]},
{E[2]*E[0], E[0], 1, E[3]*E[2]*E[0]},
{E[1]*E[0], E[0], 1, E[3]*E[1]*E[0]},
{E[1]*E[0], E[0], 1, E[2]*E[1]*E[0]}
};
//set the local processor parity
//switching odd and even ghost gauge when that dimension size is odd
//only switch if X[dir] is odd and the gridsize in that dimension is greater than 1
// FIXME - I don't understand this, shouldn't it be commDim(dim) == 0 ?
int localParity[nDim];
for (int d=0; d<nDim; d++)
localParity[dim] = ((X[dim] % 2 ==1) && (commDim(dim) > 1)) ? 1 : 0;
// localParity[dim] = (X[dim]%2==0 || commDim(dim)) ? 0 : 1;
ExtractGhostExArg<Order, nDim> arg(order, dim, X, R, surfaceCB, A0, A1, B0, B1,
C0, C1, fSrc, fBuf, localParity);
ExtractGhostEx<Float,length,nDim,Order> extractor(arg, extract, u, location);
extractor.apply(0);
if (location == QUDA_CUDA_FIELD_LOCATION) {
hipDeviceSynchronize(); // need to sync before we commence any communication
checkCudaError();
}
}
/** This is the template driver for extractGhost */
template <typename Float>
void extractGhostEx(const GaugeField &u, int dim, const int *R, Float **Ghost, bool extract) {
const int length = 18;
QudaFieldLocation location =
(typeid(u)==typeid(cudaGaugeField)) ? QUDA_CUDA_FIELD_LOCATION : QUDA_CPU_FIELD_LOCATION;
if (u.isNative()) {
if (u.Reconstruct() == QUDA_RECONSTRUCT_NO) {
if (typeid(Float)==typeid(short) && u.LinkType() == QUDA_ASQTAD_FAT_LINKS) {
extractGhostEx<short,length>(FloatNOrder<short,length,2,19>(u, 0, (short**)Ghost),
dim, u.SurfaceCB(), u.X(), R, extract, u, location);
} else {
typedef typename gauge_mapper<Float,QUDA_RECONSTRUCT_NO>::type G;
extractGhostEx<Float,length>(G(u, 0, Ghost),
dim, u.SurfaceCB(), u.X(), R, extract, u, location);
}
} else if (u.Reconstruct() == QUDA_RECONSTRUCT_12) {
typedef typename gauge_mapper<Float,QUDA_RECONSTRUCT_12>::type G;
extractGhostEx<Float,length>(G(u, 0, Ghost),
dim, u.SurfaceCB(), u.X(), R, extract, u, location);
} else if (u.Reconstruct() == QUDA_RECONSTRUCT_8) {
typedef typename gauge_mapper<Float,QUDA_RECONSTRUCT_8>::type G;
extractGhostEx<Float,length>(G(u, 0, Ghost),
dim, u.SurfaceCB(), u.X(), R, extract, u, location);
} else if (u.Reconstruct() == QUDA_RECONSTRUCT_13) {
typedef typename gauge_mapper<Float,QUDA_RECONSTRUCT_13>::type G;
extractGhostEx<Float,length>(G(u, 0, Ghost),
dim, u.SurfaceCB(), u.X(), R, extract, u, location);
} else if (u.Reconstruct() == QUDA_RECONSTRUCT_9) {
typedef typename gauge_mapper<Float,QUDA_RECONSTRUCT_13>::type G;
extractGhostEx<Float,length>(G(u, 0, Ghost),
dim, u.SurfaceCB(), u.X(), R, extract, u, location);
}
} else if (u.Order() == QUDA_QDP_GAUGE_ORDER) {
#ifdef BUILD_QDP_INTERFACE
extractGhostEx<Float,length>(QDPOrder<Float,length>(u, 0, Ghost),
dim, u.SurfaceCB(), u.X(), R, extract, u, location);
#else
errorQuda("QDP interface has not been built\n");
#endif
} else if (u.Order() == QUDA_QDPJIT_GAUGE_ORDER) {
#ifdef BUILD_QDPJIT_INTERFACE
extractGhostEx<Float,length>(QDPJITOrder<Float,length>(u, 0, Ghost),
dim, u.SurfaceCB(), u.X(), R, extract, u, location);
#else
errorQuda("QDPJIT interface has not been built\n");
#endif
} else if (u.Order() == QUDA_CPS_WILSON_GAUGE_ORDER) {
#ifdef BUILD_CPS_INTERFACE
extractGhostEx<Float,length>(CPSOrder<Float,length>(u, 0, Ghost),
dim, u.SurfaceCB(), u.X(), R, extract, u, location);
#else
errorQuda("CPS interface has not been built\n");
#endif
} else if (u.Order() == QUDA_MILC_GAUGE_ORDER) {
#ifdef BUILD_MILC_INTERFACE
extractGhostEx<Float,length>(MILCOrder<Float,length>(u, 0, Ghost),
dim, u.SurfaceCB(), u.X(), R, extract, u, location);
#else
errorQuda("MILC interface has not been built\n");
#endif
} else if (u.Order() == QUDA_BQCD_GAUGE_ORDER) {
#ifdef BUILD_BQCD_INTERFACE
extractGhostEx<Float,length>(BQCDOrder<Float,length>(u, 0, Ghost),
dim, u.SurfaceCB(), u.X(), R, extract, u, location);
#else
errorQuda("BQCD interface has not been built\n");
#endif
} else if (u.Order() == QUDA_TIFR_GAUGE_ORDER) {
#ifdef BUILD_TIFR_INTERFACE
extractGhostEx<Float,length>(TIFROrder<Float,length>(u, 0, Ghost),
dim, u.SurfaceCB(), u.X(), R, extract, u, location);
#else
errorQuda("TIFR interface has not been built\n");
#endif
} else {
errorQuda("Gauge field %d order not supported", u.Order());
}
}
void extractExtendedGaugeGhost(const GaugeField &u, int dim, const int *R,
void **ghost, bool extract) {
if (u.Precision() == QUDA_DOUBLE_PRECISION) {
extractGhostEx(u, dim, R, (double**)ghost, extract);
} else if (u.Precision() == QUDA_SINGLE_PRECISION) {
extractGhostEx(u, dim, R, (float**)ghost, extract);
} else if (u.Precision() == QUDA_HALF_PRECISION) {
extractGhostEx(u, dim, R, (short**)ghost, extract);
} else {
errorQuda("Unknown precision type %d", u.Precision());
}
}
} // namespace quda
| 7814c82913999250a8dc5b51f8c91b33883ee15f.cu | #include <quda_internal.h>
#include <gauge_field_order.h>
namespace quda {
template <typename Order, int nDim>
struct ExtractGhostExArg {
Order order;
int dim;
int X[nDim];
int R[nDim];
int surfaceCB[nDim];
int A0[nDim];
int A1[nDim];
int B0[nDim];
int B1[nDim];
int C0[nDim];
int C1[nDim];
int fBody[nDim][nDim];
int fBuf[nDim][nDim];
int localParity[nDim];
ExtractGhostExArg(const Order &order, int dim, const int *X_, const int *R_,
const int *surfaceCB_,
const int *A0_, const int *A1_, const int *B0_, const int *B1_,
const int *C0_, const int *C1_, const int fBody_[nDim][nDim],
const int fBuf_[nDim][nDim], const int *localParity_)
: order(order), dim(dim) {
for (int d=0; d<nDim; d++) {
X[d] = X_[d];
R[d] = R_[d];
surfaceCB[d] = surfaceCB_[d];
A0[d] = A0_[d];
A1[d] = A1_[d];
B0[d] = B0_[d];
B1[d] = B1_[d];
C0[d] = C0_[d];
C1[d] = C1_[d];
for (int e=0; e<nDim; e++) {
fBody[d][e] = fBody_[d][e];
fBuf[d][e] = fBuf_[d][e];
}
localParity[d] = localParity_[d];
}
}
};
template <typename Float, int length, typename Arg>
__device__ __host__ void extractor(Arg &arg, int dir, int a, int b,
int c, int d, int g, int parity) {
typename mapper<Float>::type u[length];
int &dim = arg.dim;
int srcIdx = (a*arg.fBody[dim][0] + b*arg.fBody[dim][1] +
c*arg.fBody[dim][2] + d*arg.fBody[dim][3]) >> 1;
int dstIdx = (a*arg.fBuf[dim][0] + b*arg.fBuf[dim][1] +
c*arg.fBuf[dim][2] + (d-(dir?arg.X[dim]:arg.R[dim]))*arg.fBuf[dim][3]) >> 1;
// load the ghost element from the bulk
arg.order.load(u, srcIdx, g, parity);
// need dir dependence in write
// srcIdx is used here to determine boundary condition
arg.order.saveGhostEx(u, dstIdx, srcIdx, dir, dim, g,
(parity+arg.localParity[dim])&1, arg.R);
}
template <typename Float, int length, typename Arg>
__device__ __host__ void injector(Arg &arg, int dir, int a, int b,
int c, int d, int g, int parity) {
typename mapper<Float>::type u[length];
int &dim = arg.dim;
int srcIdx = (a*arg.fBuf[dim][0] + b*arg.fBuf[dim][1] +
c*arg.fBuf[dim][2] + (d-dir*(arg.X[dim]+arg.R[dim]))*arg.fBuf[dim][3]) >> 1;
int dstIdx = (a*arg.fBody[dim][0] + b*arg.fBody[dim][1] +
c*arg.fBody[dim][2] + d*arg.fBody[dim][3]) >> 1;
// need dir dependence in read
// dstIdx is used here to determine boundary condition
arg.order.loadGhostEx(u, srcIdx, dstIdx, dir, dim, g,
(parity+arg.localParity[dim])&1, arg.R);
arg.order.save(u, dstIdx, g, parity); // save the ghost element into the bulk
}
/**
Generic CPU gauge ghost extraction and packing
NB This routines is specialized to four dimensions
*/
template <typename Float, int length, int nDim, typename Order, bool extract>
void extractGhostEx(ExtractGhostExArg<Order,nDim> arg) {
typedef typename mapper<Float>::type RegType;
int dim = arg.dim;
for (int parity=0; parity<2; parity++) {
// the following 4-way loop means this is specialized for 4 dimensions
// dir = 0 backwards, dir = 1 forwards
for (int dir = 0; dir<2; dir++) {
int D0 = extract ? dir*arg.X[dim] + (1-dir)*arg.R[dim] : dir*(arg.X[dim] + arg.R[dim]);
for (int d=D0; d<D0+arg.R[dim]; d++) {
for (int a=arg.A0[dim]; a<arg.A1[dim]; a++) { // loop over the interior surface
for (int b=arg.B0[dim]; b<arg.B1[dim]; b++) { // loop over the interior surface
for (int c=arg.C0[dim]; c<arg.C1[dim]; c++) { // loop over the interior surface
for (int g=0; g<arg.order.geometry; g++) {
// we only do the extraction for parity we are currently working on
int oddness = (a+b+c+d) & 1;
if (oddness == parity) {
if (extract) extractor<Float,length>(arg, dir, a, b, c, d, g, parity);
else injector<Float,length>(arg, dir, a, b, c, d, g, parity);
} // oddness == parity
} // g
} // c
} // b
} // a
} // d
} // dir
} // parity
}
/**
Generic GPU gauge ghost extraction and packing
NB This routines is specialized to four dimensions
FIXME this implementation will have two-way warp divergence
*/
/**
Generic CPU gauge ghost extraction and packing
NB This routines is specialized to four dimensions
*/
template <typename Float, int length, int nDim, typename Order, bool extract>
__global__ void extractGhostExKernel(ExtractGhostExArg<Order,nDim> arg) {
typedef typename mapper<Float>::type RegType;
int dim = arg.dim;
// parallelize over parity and dir using block or grid
/*for (int parity=0; parity<2; parity++) {*/
{
int parity = blockIdx.z;
// the following 4-way loop means this is specialized for 4 dimensions
// dir = 0 backwards, dir = 1 forwards
//for (int dir = 0; dir<2; dir++) {
{
int dir = blockIdx.y;
// this will have two-warp divergence since we only do work on
// one parity but parity alternates between threads
// linear index used for writing into ghost buffer
int X = blockIdx.x * blockDim.x + threadIdx.x;
int dA = arg.A1[dim]-arg.A0[dim];
int dB = arg.B1[dim]-arg.B0[dim];
int dC = arg.C1[dim]-arg.C0[dim];
int D0 = extract ? dir*arg.X[dim] + (1-dir)*arg.R[dim] : dir*(arg.X[dim] + arg.R[dim]);
if (X >= arg.R[dim]*dA*dB*dC*arg.order.geometry) return;
// thread order is optimized to maximize coalescing
// X = (((g*R + d) * dA + a)*dB + b)*dC + c
int gdab = X / dC;
int c = arg.C0[dim] + X - gdab*dC;
int gda = gdab / dB;
int b = arg.B0[dim] + gdab - gda *dB;
int gd = gda / dA;
int a = arg.A0[dim] + gda - gd *dA;
int g = gd / arg.R[dim];
int d = D0 + gd - g *arg.R[dim];
// we only do the extraction for parity we are currently working on
int oddness = (a+b+c+d) & 1;
if (oddness == parity) {
if (extract) extractor<Float,length>(arg, dir, a, b, c, d, g, parity);
else injector<Float,length>(arg, dir, a, b, c, d, g, parity);
} // oddness == parity
} // dir
} // parity
}
template <typename Float, int length, int nDim, typename Order>
class ExtractGhostEx : Tunable {
ExtractGhostExArg<Order,nDim> arg;
int size;
bool extract;
const GaugeField &meta;
QudaFieldLocation location;
private:
unsigned int sharedBytesPerThread() const { return 0; }
unsigned int sharedBytesPerBlock(const TuneParam ¶m) const { return 0 ;}
bool tuneGridDim() const { return false; } // Don't tune the grid dimensions.
unsigned int minThreads() const { return size; }
public:
ExtractGhostEx(ExtractGhostExArg<Order,nDim> &arg, bool extract,
const GaugeField &meta, QudaFieldLocation location)
: arg(arg), extract(extract), meta(meta), location(location) {
int dA = arg.A1[arg.dim]-arg.A0[arg.dim];
int dB = arg.B1[arg.dim]-arg.B0[arg.dim];
int dC = arg.C1[arg.dim]-arg.C0[arg.dim];
size = arg.R[arg.dim]*dA*dB*dC*arg.order.geometry;
writeAuxString("prec=%lu,stride=%d,extract=%d,dimension=%d",
sizeof(Float),arg.order.stride, extract, arg.dim);
}
virtual ~ExtractGhostEx() { ; }
void apply(const cudaStream_t &stream) {
if (extract) {
if (location==QUDA_CPU_FIELD_LOCATION) {
extractGhostEx<Float,length,nDim,Order,true>(arg);
} else {
#if (__COMPUTE_CAPABILITY__ >= 200)
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
tp.grid.y = 2;
tp.grid.z = 2;
extractGhostExKernel<Float,length,nDim,Order,true>
<<<tp.grid, tp.block, tp.shared_bytes, stream>>>(arg);
#else
errorQuda("extractGhostEx not supported on pre-Fermi architecture");
#endif
}
} else { // we are injecting
if (location==QUDA_CPU_FIELD_LOCATION) {
extractGhostEx<Float,length,nDim,Order,false>(arg);
} else {
#if (__COMPUTE_CAPABILITY__ >= 200)
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
tp.grid.y = 2;
tp.grid.z = 2;
extractGhostExKernel<Float,length,nDim,Order,false>
<<<tp.grid, tp.block, tp.shared_bytes, stream>>>(arg);
#else
errorQuda("extractGhostEx not supported on pre-Fermi architecture");
#endif
}
}
}
TuneKey tuneKey() const { return TuneKey(meta.VolString(), typeid(*this).name(), aux); }
std::string paramString(const TuneParam ¶m) const { // Don't bother printing the grid dim.
std::stringstream ps;
ps << "block=(" << param.block.x << "," << param.block.y << "," << param.block.z << "), ";
ps << "shared=" << param.shared_bytes;
return ps.str();
}
long long flops() const { return 0; }
long long bytes() const { return 2 * 2 * 2 * size * arg.order.Bytes(); } // 2 for i/o
};
/**
Generic CPU gauge ghost extraction and packing
NB This routines is specialized to four dimensions
@param E the extended gauge dimensions
@param R array holding the radius of the extended region
@param extract Whether we are extracting or injecting the ghost zone
*/
template <typename Float, int length, typename Order>
void extractGhostEx(Order order, const int dim, const int *surfaceCB, const int *E,
const int *R, bool extract, const GaugeField &u, QudaFieldLocation location) {
const int nDim = 4;
//loop variables: a, b, c with a the most signifcant and c the least significant
//A0, B0, C0 the minimum value
//A0, B0, C0 the maximum value
int X[nDim]; // compute interior dimensions
for (int d=0; d<nDim; d++) X[d] = E[d] - 2*R[d];
//..........x..........y............z.............t
int A0[nDim] = {R[3], R[3], R[3], 0};
int A1[nDim] = {X[3]+R[3], X[3]+R[3], X[3]+R[3], X[2]+2*R[2]};
int B0[nDim] = {R[2], R[2], 0, 0};
int B1[nDim] = {X[2]+R[2], X[2]+R[2], X[1]+2*R[1], X[1]+2*R[1]};
int C0[nDim] = {R[1], 0, 0, 0};
int C1[nDim] = {X[1]+R[1], X[0]+2*R[0], X[0]+2*R[0], X[0]+2*R[0]};
int fSrc[nDim][nDim] = {
{E[2]*E[1]*E[0], E[1]*E[0], E[0], 1},
{E[2]*E[1]*E[0], E[1]*E[0], 1, E[0]},
{E[2]*E[1]*E[0], E[0], 1, E[1]*E[0]},
{E[1]*E[0], E[0], 1, E[2]*E[1]*E[0]}
};
int fBuf[nDim][nDim]={
{E[2]*E[1], E[1], 1, E[3]*E[2]*E[1]},
{E[2]*E[0], E[0], 1, E[3]*E[2]*E[0]},
{E[1]*E[0], E[0], 1, E[3]*E[1]*E[0]},
{E[1]*E[0], E[0], 1, E[2]*E[1]*E[0]}
};
//set the local processor parity
//switching odd and even ghost gauge when that dimension size is odd
//only switch if X[dir] is odd and the gridsize in that dimension is greater than 1
// FIXME - I don't understand this, shouldn't it be commDim(dim) == 0 ?
int localParity[nDim];
for (int d=0; d<nDim; d++)
localParity[dim] = ((X[dim] % 2 ==1) && (commDim(dim) > 1)) ? 1 : 0;
// localParity[dim] = (X[dim]%2==0 || commDim(dim)) ? 0 : 1;
ExtractGhostExArg<Order, nDim> arg(order, dim, X, R, surfaceCB, A0, A1, B0, B1,
C0, C1, fSrc, fBuf, localParity);
ExtractGhostEx<Float,length,nDim,Order> extractor(arg, extract, u, location);
extractor.apply(0);
if (location == QUDA_CUDA_FIELD_LOCATION) {
cudaDeviceSynchronize(); // need to sync before we commence any communication
checkCudaError();
}
}
/** This is the template driver for extractGhost */
template <typename Float>
void extractGhostEx(const GaugeField &u, int dim, const int *R, Float **Ghost, bool extract) {
const int length = 18;
QudaFieldLocation location =
(typeid(u)==typeid(cudaGaugeField)) ? QUDA_CUDA_FIELD_LOCATION : QUDA_CPU_FIELD_LOCATION;
if (u.isNative()) {
if (u.Reconstruct() == QUDA_RECONSTRUCT_NO) {
if (typeid(Float)==typeid(short) && u.LinkType() == QUDA_ASQTAD_FAT_LINKS) {
extractGhostEx<short,length>(FloatNOrder<short,length,2,19>(u, 0, (short**)Ghost),
dim, u.SurfaceCB(), u.X(), R, extract, u, location);
} else {
typedef typename gauge_mapper<Float,QUDA_RECONSTRUCT_NO>::type G;
extractGhostEx<Float,length>(G(u, 0, Ghost),
dim, u.SurfaceCB(), u.X(), R, extract, u, location);
}
} else if (u.Reconstruct() == QUDA_RECONSTRUCT_12) {
typedef typename gauge_mapper<Float,QUDA_RECONSTRUCT_12>::type G;
extractGhostEx<Float,length>(G(u, 0, Ghost),
dim, u.SurfaceCB(), u.X(), R, extract, u, location);
} else if (u.Reconstruct() == QUDA_RECONSTRUCT_8) {
typedef typename gauge_mapper<Float,QUDA_RECONSTRUCT_8>::type G;
extractGhostEx<Float,length>(G(u, 0, Ghost),
dim, u.SurfaceCB(), u.X(), R, extract, u, location);
} else if (u.Reconstruct() == QUDA_RECONSTRUCT_13) {
typedef typename gauge_mapper<Float,QUDA_RECONSTRUCT_13>::type G;
extractGhostEx<Float,length>(G(u, 0, Ghost),
dim, u.SurfaceCB(), u.X(), R, extract, u, location);
} else if (u.Reconstruct() == QUDA_RECONSTRUCT_9) {
typedef typename gauge_mapper<Float,QUDA_RECONSTRUCT_13>::type G;
extractGhostEx<Float,length>(G(u, 0, Ghost),
dim, u.SurfaceCB(), u.X(), R, extract, u, location);
}
} else if (u.Order() == QUDA_QDP_GAUGE_ORDER) {
#ifdef BUILD_QDP_INTERFACE
extractGhostEx<Float,length>(QDPOrder<Float,length>(u, 0, Ghost),
dim, u.SurfaceCB(), u.X(), R, extract, u, location);
#else
errorQuda("QDP interface has not been built\n");
#endif
} else if (u.Order() == QUDA_QDPJIT_GAUGE_ORDER) {
#ifdef BUILD_QDPJIT_INTERFACE
extractGhostEx<Float,length>(QDPJITOrder<Float,length>(u, 0, Ghost),
dim, u.SurfaceCB(), u.X(), R, extract, u, location);
#else
errorQuda("QDPJIT interface has not been built\n");
#endif
} else if (u.Order() == QUDA_CPS_WILSON_GAUGE_ORDER) {
#ifdef BUILD_CPS_INTERFACE
extractGhostEx<Float,length>(CPSOrder<Float,length>(u, 0, Ghost),
dim, u.SurfaceCB(), u.X(), R, extract, u, location);
#else
errorQuda("CPS interface has not been built\n");
#endif
} else if (u.Order() == QUDA_MILC_GAUGE_ORDER) {
#ifdef BUILD_MILC_INTERFACE
extractGhostEx<Float,length>(MILCOrder<Float,length>(u, 0, Ghost),
dim, u.SurfaceCB(), u.X(), R, extract, u, location);
#else
errorQuda("MILC interface has not been built\n");
#endif
} else if (u.Order() == QUDA_BQCD_GAUGE_ORDER) {
#ifdef BUILD_BQCD_INTERFACE
extractGhostEx<Float,length>(BQCDOrder<Float,length>(u, 0, Ghost),
dim, u.SurfaceCB(), u.X(), R, extract, u, location);
#else
errorQuda("BQCD interface has not been built\n");
#endif
} else if (u.Order() == QUDA_TIFR_GAUGE_ORDER) {
#ifdef BUILD_TIFR_INTERFACE
extractGhostEx<Float,length>(TIFROrder<Float,length>(u, 0, Ghost),
dim, u.SurfaceCB(), u.X(), R, extract, u, location);
#else
errorQuda("TIFR interface has not been built\n");
#endif
} else {
errorQuda("Gauge field %d order not supported", u.Order());
}
}
void extractExtendedGaugeGhost(const GaugeField &u, int dim, const int *R,
void **ghost, bool extract) {
if (u.Precision() == QUDA_DOUBLE_PRECISION) {
extractGhostEx(u, dim, R, (double**)ghost, extract);
} else if (u.Precision() == QUDA_SINGLE_PRECISION) {
extractGhostEx(u, dim, R, (float**)ghost, extract);
} else if (u.Precision() == QUDA_HALF_PRECISION) {
extractGhostEx(u, dim, R, (short**)ghost, extract);
} else {
errorQuda("Unknown precision type %d", u.Precision());
}
}
} // namespace quda
|
b13756a4a22c592296f20b06bc889210094e0232.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "LinkedCellsImpl.h"
#include "Integration/Leapfrog.h"
#include <cmath>
#include <Domain/Hilbert.h>
#include <Domain/Hilbert3D.h>
#ifndef _OPENMP
#error("Cuda code requires OpenMP")
#endif
/*
__device__ void calculateKernelInner(int NA, int NB, Particle *cellA, Particle *cellB) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int idy = blockIdx.y * blockDim.y + threadIdx.y;
if (idx < NA && idy < NB) {
auto p = cellA[idx];
auto q = cellB[idy];
if (p != q) {
SpringForce::interact(p, q);
}
}
}*/
struct GPULayout {
Particle *deviceParticles = nullptr;
Particle *deviceHaloParticles = nullptr;
int *deviceInner = nullptr;
int *devicePairOffsets = nullptr;
Cell *deviceCells = nullptr;
int size = 0;
Particle *resultParticles = nullptr;
};
__global__ void
calculateKernel(Cell *cells, Particle *particles, Particle *haloParticles, int *inner, int *pairOffsets) {
int cellIdx = inner[blockIdx.x];
auto &cell = cells[cellIdx];
int offset = pairOffsets[blockIdx.y];
auto &otherCell = cells[cellIdx + offset];
int idx = threadIdx.x;
int idy = threadIdx.y;
if (idx < cell.size && idy < otherCell.size) {
int pi = cell.data[idx];
int qi = otherCell.data[idy];
if (pi != qi) {
Particle *p;
Particle *q;
if (pi >= 0) {
p = &particles[pi];
} else {
p = &haloParticles[-pi - 1];
}
if (qi >= 0) {
q = &particles[qi];
} else {
q = &haloParticles[-qi - 1];
}
SpringForce::interact(*p, *q);
p->modified = true;
}
}
}
__global__ void iterateKernel(Cell *cells, Particle *particles, int *inner) {
int cellIdx = inner[blockIdx.x];
auto &cell = cells[cellIdx];
int idx = threadIdx.x;
if (idx < cell.size) {
int pi = cell.data[idx];
SpringForce::calculate(particles[pi]);
}
}
__global__ void preKernel(Cell *cells, Particle *particles, int *inner) {
int cellIdx = inner[blockIdx.x];
auto &cell = cells[cellIdx];
int idx = threadIdx.x;
if (idx < cell.size) {
int pi = cell.data[idx];
Leapfrog::doStepPreForce(particles[pi]);
}
}
__global__ void postKernel(Cell *cells, Particle *particles, int *inner) {
int cellIdx = inner[blockIdx.x];
auto &cell = cells[cellIdx];
int idx = threadIdx.x;
if (idx < cell.size) {
int pi = cell.data[idx];
Leapfrog::doStepPostForce(particles[pi]);
}
}
void LinkedCellsImpl::prepareComputation() {
#pragma omp parallel for
for (int devId = 0; devId < GPU_N; ++devId) {
CudaSafeCall(hipSetDevice(devId));
int N = this->cells.size();
CudaSafeCall(hipMalloc((void **) &this->layout[devId].deviceHaloParticles, sizeof(Particle) * N));
// Copy cells to device
CudaSafeCall(hipMemcpy(this->layout[devId].deviceCells, this->cells.data(), sizeof(Cell) * N, hipMemcpyHostToDevice));
N = this->haloParticles.size();
// Copy halo particles to device
CudaSafeCall(hipMemcpy(this->layout[devId].deviceHaloParticles, this->haloParticles.data(),
sizeof(Particle) * N, hipMemcpyHostToDevice));
}
}
void LinkedCellsImpl::finalizeComputation() {
#pragma omp parallel for
for (int devId = 0; devId < GPU_N; ++devId) {
CudaSafeCall(hipSetDevice(devId));
int N = this->particles.size();
// Copy particles from device to host to allow output to access the data
CudaSafeCall(hipMemcpy(
//this->particles.data(),
this->layout[devId].resultParticles,
this->layout[devId].deviceParticles,
sizeof(Particle) * N,
hipMemcpyDeviceToHost));
CudaSafeCall(hipFree(this->layout[devId].deviceHaloParticles));
}
for (int devId = 0; devId < GPU_N; ++devId) {
CudaSafeCall(hipSetDevice(devId));
CudaSafeCall(hipDeviceSynchronize());
}
// reduce result
for (int devId = 0; devId < GPU_N; ++devId) {
for (unsigned long i = 0; i < this->particles.size(); ++i) {
if (this->layout[devId].resultParticles[i].modified) {
this->particles[i].x = this->layout[devId].resultParticles[i].x;
this->particles[i].v = this->layout[devId].resultParticles[i].v;
}
}
}
}
void LinkedCellsImpl::iteratePairs() {
#pragma omp parallel for
for (int devId = 0; devId < GPU_N; ++devId) {
CudaSafeCall(hipSetDevice(devId));
int NInner = this->layout[devId].size;
int NPairOffsets = this->pairOffsets.size();
dim3 blocks(NInner, NPairOffsets);
dim3 threadsPerBlock(MAXCELLPARTICLE, MAXCELLPARTICLE);
calculateKernel << < blocks, threadsPerBlock >> >
(this->layout[devId].deviceCells, this->layout[devId].deviceParticles, this->layout[devId].deviceHaloParticles,
this->layout[devId].deviceInner, this->layout[devId].devicePairOffsets);
CudaCheckError();
}
}
void LinkedCellsImpl::iterate() {
#pragma omp parallel for
for (int devId = 0; devId < GPU_N; ++devId) {
CudaSafeCall(hipSetDevice(devId));
int NInner = this->layout[devId].size;
iterateKernel << < NInner, MAXCELLPARTICLE>> >
(this->layout[devId].deviceCells, this->layout[devId].deviceParticles, this->layout[devId].deviceInner);
CudaCheckError();
}
}
void LinkedCellsImpl::preStep() {
#pragma omp parallel for
for (int devId = 0; devId < GPU_N; ++devId) {
CudaSafeCall(hipSetDevice(devId));
int NInner = this->layout[devId].size;
preKernel << < NInner, MAXCELLPARTICLE >> >
(this->layout[devId].deviceCells, this->layout[devId].deviceParticles, this->layout[devId].deviceInner);
CudaCheckError();
}
}
void LinkedCellsImpl::postStep() {
#pragma omp parallel for
for (int devId = 0; devId < GPU_N; ++devId) {
CudaSafeCall(hipSetDevice(devId));
int NInner = this->layout[devId].size;
postKernel << < NInner, MAXCELLPARTICLE>> >
(this->layout[devId].deviceCells, this->layout[devId].deviceParticles, this->layout[devId].deviceInner);
CudaCheckError();
}
}
LinkedCellsImpl::LinkedCellsImpl(Domain &domain, Vector cellSizeTarget, std::vector<Particle> &particles) : LinkedCells(
domain, cellSizeTarget, particles) {
// Get number of available devices
CudaSafeCall(hipGetDeviceCount(&GPU_N));
//GPU_N -= GPU_N % 2;
printf("CUDA-capable device count: %i\n", GPU_N);
this->layout = new GPULayout[GPU_N];
IntVector numInner = numCells;
numInner.x -= 2;
numInner.y -= 2;
numInner.z -= 2;
if(numInner.z == 1){
std::cout << "Init 2D Hilbert\n";
this->decomp = new Hilbert(inner, numInner);
} else {
std::cout << "Init 3D Hilbert\n";
this->decomp = new Hilbert3D(inner, numInner);
}
#pragma omp parallel for
for (int devId = 0; devId < GPU_N; ++devId) {
std::cout << "Init device " << devId << "\n";
CudaSafeCall(hipSetDevice(devId));
// Copy particles to device
std::cout << "Copy particles\n";
int N = this->particles.size();
CudaSafeCall(hipMalloc((void **) &this->layout[devId].deviceParticles, sizeof(Particle) * N));
CudaSafeCall(
hipMemcpy(this->layout[devId].deviceParticles, this->particles.data(), sizeof(Particle) * N,
hipMemcpyHostToDevice));
CudaSafeCall(hipHostMalloc((void **) &this->layout[devId].resultParticles, sizeof(Particle) * N));
std::cout << "Copy inner\n";
N = this->inner.size();
// Copy inner cell indices to device
CudaSafeCall(hipMalloc((void **) &this->layout[devId].deviceInner, sizeof(int) * N));
CudaSafeCall(hipMemcpy(this->layout[devId].deviceInner, this->inner.data(), sizeof(int) * N,
hipMemcpyHostToDevice));
this->layout[devId].size = N;
std::cout << "Copy offsets\n";
N = this->pairOffsets.size();
// Copy pairOffsets to device
CudaSafeCall(hipMalloc((void **) &this->layout[devId].devicePairOffsets, sizeof(int) * N));
CudaSafeCall(
hipMemcpy(this->layout[devId].devicePairOffsets, this->pairOffsets.data(), sizeof(int) * N,
hipMemcpyHostToDevice));
std::cout << "Malloc cells\n";
CudaSafeCall(hipMalloc((void **) &this->layout[devId].deviceCells, sizeof(Cell) * this->cells.size()));
}
}
LinkedCellsImpl::~LinkedCellsImpl() {
for (int devId = 0; devId < GPU_N; ++devId) {
CudaSafeCall(hipSetDevice(devId));
CudaSafeCall(hipFree(this->layout[devId].deviceParticles));
CudaSafeCall(hipFree(this->layout[devId].resultParticles));
CudaSafeCall(hipFree(this->layout[devId].deviceInner));
CudaSafeCall(hipFree(this->layout[devId].devicePairOffsets));
CudaSafeCall(hipFree(this->layout[devId].deviceCells));
}
free(this->layout);
}
void LinkedCellsImpl::updateDecomp() {
// Assert square boundary
// assert(this->numCells.x % 2 == 0 && this->numCells.y == this->numCells.x && this->numCells.z == this->numCells.x);
// TODO Set inner cells for each device
#ifdef DYNDD
auto ordered = this->decomp->ordered();
int partSize = ceil(1.0 * this->particles.size() / GPU_N);
int offset = 0;
for (int devId = 0; devId < GPU_N; ++devId) {
CudaSafeCall(hipSetDevice(devId));
CudaSafeCall(hipFree(this->layout[devId].deviceInner));
int upperCellIndex = offset;
int particleCount = 0;
while(particleCount < partSize && upperCellIndex < ordered.size()){
auto cellIdx = ordered.at(upperCellIndex);
particleCount += cells.at(cellIdx).size;
upperCellIndex++;
}
int N = upperCellIndex-offset;
// Copy inner cell indices to device
CudaSafeCall(hipMalloc((void **) &this->layout[devId].deviceInner, sizeof(int) * N));
CudaSafeCall(hipMemcpy(this->layout[devId].deviceInner, &ordered.data()[offset], sizeof(int) * N,
hipMemcpyHostToDevice));
this->layout[devId].size = N;
offset = upperCellIndex;
}
#else
int partSize = ceil(1.0 * this->inner.size() / GPU_N);
int offset = 0;
int remaining = this->inner.size();
for (int devId = 0; devId < GPU_N; ++devId) {
int N = partSize < remaining ? partSize : remaining;
CudaSafeCall(hipSetDevice(devId));
// Copy inner cell indices to device
CudaSafeCall(hipFree(this->layout[devId].deviceInner));
CudaSafeCall(hipMalloc((void **) &this->layout[devId].deviceInner, sizeof(int) * N));
CudaSafeCall(hipMemcpy(this->layout[devId].deviceInner, &this->inner.data()[offset], sizeof(int) * N,
hipMemcpyHostToDevice));
this->layout[devId].size = N;
offset += N;
remaining -= N;
}
/*for (int devId = 0; devId < GPU_N; ++devId) {
hipStreamSynchronize(this->layout[devId].stream);
}*/
assert(remaining == 0);
#endif
}
void LinkedCellsImpl::init() {
} | b13756a4a22c592296f20b06bc889210094e0232.cu | #include "LinkedCellsImpl.h"
#include "Integration/Leapfrog.h"
#include <cmath>
#include <Domain/Hilbert.h>
#include <Domain/Hilbert3D.h>
#ifndef _OPENMP
#error("Cuda code requires OpenMP")
#endif
/*
__device__ void calculateKernelInner(int NA, int NB, Particle *cellA, Particle *cellB) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int idy = blockIdx.y * blockDim.y + threadIdx.y;
if (idx < NA && idy < NB) {
auto p = cellA[idx];
auto q = cellB[idy];
if (p != q) {
SpringForce::interact(p, q);
}
}
}*/
struct GPULayout {
Particle *deviceParticles = nullptr;
Particle *deviceHaloParticles = nullptr;
int *deviceInner = nullptr;
int *devicePairOffsets = nullptr;
Cell *deviceCells = nullptr;
int size = 0;
Particle *resultParticles = nullptr;
};
__global__ void
calculateKernel(Cell *cells, Particle *particles, Particle *haloParticles, int *inner, int *pairOffsets) {
int cellIdx = inner[blockIdx.x];
auto &cell = cells[cellIdx];
int offset = pairOffsets[blockIdx.y];
auto &otherCell = cells[cellIdx + offset];
int idx = threadIdx.x;
int idy = threadIdx.y;
if (idx < cell.size && idy < otherCell.size) {
int pi = cell.data[idx];
int qi = otherCell.data[idy];
if (pi != qi) {
Particle *p;
Particle *q;
if (pi >= 0) {
p = &particles[pi];
} else {
p = &haloParticles[-pi - 1];
}
if (qi >= 0) {
q = &particles[qi];
} else {
q = &haloParticles[-qi - 1];
}
SpringForce::interact(*p, *q);
p->modified = true;
}
}
}
__global__ void iterateKernel(Cell *cells, Particle *particles, int *inner) {
int cellIdx = inner[blockIdx.x];
auto &cell = cells[cellIdx];
int idx = threadIdx.x;
if (idx < cell.size) {
int pi = cell.data[idx];
SpringForce::calculate(particles[pi]);
}
}
__global__ void preKernel(Cell *cells, Particle *particles, int *inner) {
int cellIdx = inner[blockIdx.x];
auto &cell = cells[cellIdx];
int idx = threadIdx.x;
if (idx < cell.size) {
int pi = cell.data[idx];
Leapfrog::doStepPreForce(particles[pi]);
}
}
__global__ void postKernel(Cell *cells, Particle *particles, int *inner) {
int cellIdx = inner[blockIdx.x];
auto &cell = cells[cellIdx];
int idx = threadIdx.x;
if (idx < cell.size) {
int pi = cell.data[idx];
Leapfrog::doStepPostForce(particles[pi]);
}
}
void LinkedCellsImpl::prepareComputation() {
#pragma omp parallel for
for (int devId = 0; devId < GPU_N; ++devId) {
CudaSafeCall(cudaSetDevice(devId));
int N = this->cells.size();
CudaSafeCall(cudaMalloc((void **) &this->layout[devId].deviceHaloParticles, sizeof(Particle) * N));
// Copy cells to device
CudaSafeCall(cudaMemcpy(this->layout[devId].deviceCells, this->cells.data(), sizeof(Cell) * N, cudaMemcpyHostToDevice));
N = this->haloParticles.size();
// Copy halo particles to device
CudaSafeCall(cudaMemcpy(this->layout[devId].deviceHaloParticles, this->haloParticles.data(),
sizeof(Particle) * N, cudaMemcpyHostToDevice));
}
}
void LinkedCellsImpl::finalizeComputation() {
#pragma omp parallel for
for (int devId = 0; devId < GPU_N; ++devId) {
CudaSafeCall(cudaSetDevice(devId));
int N = this->particles.size();
// Copy particles from device to host to allow output to access the data
CudaSafeCall(cudaMemcpy(
//this->particles.data(),
this->layout[devId].resultParticles,
this->layout[devId].deviceParticles,
sizeof(Particle) * N,
cudaMemcpyDeviceToHost));
CudaSafeCall(cudaFree(this->layout[devId].deviceHaloParticles));
}
for (int devId = 0; devId < GPU_N; ++devId) {
CudaSafeCall(cudaSetDevice(devId));
CudaSafeCall(cudaDeviceSynchronize());
}
// reduce result
for (int devId = 0; devId < GPU_N; ++devId) {
for (unsigned long i = 0; i < this->particles.size(); ++i) {
if (this->layout[devId].resultParticles[i].modified) {
this->particles[i].x = this->layout[devId].resultParticles[i].x;
this->particles[i].v = this->layout[devId].resultParticles[i].v;
}
}
}
}
void LinkedCellsImpl::iteratePairs() {
#pragma omp parallel for
for (int devId = 0; devId < GPU_N; ++devId) {
CudaSafeCall(cudaSetDevice(devId));
int NInner = this->layout[devId].size;
int NPairOffsets = this->pairOffsets.size();
dim3 blocks(NInner, NPairOffsets);
dim3 threadsPerBlock(MAXCELLPARTICLE, MAXCELLPARTICLE);
calculateKernel << < blocks, threadsPerBlock >> >
(this->layout[devId].deviceCells, this->layout[devId].deviceParticles, this->layout[devId].deviceHaloParticles,
this->layout[devId].deviceInner, this->layout[devId].devicePairOffsets);
CudaCheckError();
}
}
void LinkedCellsImpl::iterate() {
#pragma omp parallel for
for (int devId = 0; devId < GPU_N; ++devId) {
CudaSafeCall(cudaSetDevice(devId));
int NInner = this->layout[devId].size;
iterateKernel << < NInner, MAXCELLPARTICLE>> >
(this->layout[devId].deviceCells, this->layout[devId].deviceParticles, this->layout[devId].deviceInner);
CudaCheckError();
}
}
void LinkedCellsImpl::preStep() {
#pragma omp parallel for
for (int devId = 0; devId < GPU_N; ++devId) {
CudaSafeCall(cudaSetDevice(devId));
int NInner = this->layout[devId].size;
preKernel << < NInner, MAXCELLPARTICLE >> >
(this->layout[devId].deviceCells, this->layout[devId].deviceParticles, this->layout[devId].deviceInner);
CudaCheckError();
}
}
void LinkedCellsImpl::postStep() {
#pragma omp parallel for
for (int devId = 0; devId < GPU_N; ++devId) {
CudaSafeCall(cudaSetDevice(devId));
int NInner = this->layout[devId].size;
postKernel << < NInner, MAXCELLPARTICLE>> >
(this->layout[devId].deviceCells, this->layout[devId].deviceParticles, this->layout[devId].deviceInner);
CudaCheckError();
}
}
LinkedCellsImpl::LinkedCellsImpl(Domain &domain, Vector cellSizeTarget, std::vector<Particle> &particles) : LinkedCells(
domain, cellSizeTarget, particles) {
// Get number of available devices
CudaSafeCall(cudaGetDeviceCount(&GPU_N));
//GPU_N -= GPU_N % 2;
printf("CUDA-capable device count: %i\n", GPU_N);
this->layout = new GPULayout[GPU_N];
IntVector numInner = numCells;
numInner.x -= 2;
numInner.y -= 2;
numInner.z -= 2;
if(numInner.z == 1){
std::cout << "Init 2D Hilbert\n";
this->decomp = new Hilbert(inner, numInner);
} else {
std::cout << "Init 3D Hilbert\n";
this->decomp = new Hilbert3D(inner, numInner);
}
#pragma omp parallel for
for (int devId = 0; devId < GPU_N; ++devId) {
std::cout << "Init device " << devId << "\n";
CudaSafeCall(cudaSetDevice(devId));
// Copy particles to device
std::cout << "Copy particles\n";
int N = this->particles.size();
CudaSafeCall(cudaMalloc((void **) &this->layout[devId].deviceParticles, sizeof(Particle) * N));
CudaSafeCall(
cudaMemcpy(this->layout[devId].deviceParticles, this->particles.data(), sizeof(Particle) * N,
cudaMemcpyHostToDevice));
CudaSafeCall(cudaMallocHost((void **) &this->layout[devId].resultParticles, sizeof(Particle) * N));
std::cout << "Copy inner\n";
N = this->inner.size();
// Copy inner cell indices to device
CudaSafeCall(cudaMalloc((void **) &this->layout[devId].deviceInner, sizeof(int) * N));
CudaSafeCall(cudaMemcpy(this->layout[devId].deviceInner, this->inner.data(), sizeof(int) * N,
cudaMemcpyHostToDevice));
this->layout[devId].size = N;
std::cout << "Copy offsets\n";
N = this->pairOffsets.size();
// Copy pairOffsets to device
CudaSafeCall(cudaMalloc((void **) &this->layout[devId].devicePairOffsets, sizeof(int) * N));
CudaSafeCall(
cudaMemcpy(this->layout[devId].devicePairOffsets, this->pairOffsets.data(), sizeof(int) * N,
cudaMemcpyHostToDevice));
std::cout << "Malloc cells\n";
CudaSafeCall(cudaMalloc((void **) &this->layout[devId].deviceCells, sizeof(Cell) * this->cells.size()));
}
}
LinkedCellsImpl::~LinkedCellsImpl() {
for (int devId = 0; devId < GPU_N; ++devId) {
CudaSafeCall(cudaSetDevice(devId));
CudaSafeCall(cudaFree(this->layout[devId].deviceParticles));
CudaSafeCall(cudaFree(this->layout[devId].resultParticles));
CudaSafeCall(cudaFree(this->layout[devId].deviceInner));
CudaSafeCall(cudaFree(this->layout[devId].devicePairOffsets));
CudaSafeCall(cudaFree(this->layout[devId].deviceCells));
}
free(this->layout);
}
void LinkedCellsImpl::updateDecomp() {
// Assert square boundary
// assert(this->numCells.x % 2 == 0 && this->numCells.y == this->numCells.x && this->numCells.z == this->numCells.x);
// TODO Set inner cells for each device
#ifdef DYNDD
auto ordered = this->decomp->ordered();
int partSize = ceil(1.0 * this->particles.size() / GPU_N);
int offset = 0;
for (int devId = 0; devId < GPU_N; ++devId) {
CudaSafeCall(cudaSetDevice(devId));
CudaSafeCall(cudaFree(this->layout[devId].deviceInner));
int upperCellIndex = offset;
int particleCount = 0;
while(particleCount < partSize && upperCellIndex < ordered.size()){
auto cellIdx = ordered.at(upperCellIndex);
particleCount += cells.at(cellIdx).size;
upperCellIndex++;
}
int N = upperCellIndex-offset;
// Copy inner cell indices to device
CudaSafeCall(cudaMalloc((void **) &this->layout[devId].deviceInner, sizeof(int) * N));
CudaSafeCall(cudaMemcpy(this->layout[devId].deviceInner, &ordered.data()[offset], sizeof(int) * N,
cudaMemcpyHostToDevice));
this->layout[devId].size = N;
offset = upperCellIndex;
}
#else
int partSize = ceil(1.0 * this->inner.size() / GPU_N);
int offset = 0;
int remaining = this->inner.size();
for (int devId = 0; devId < GPU_N; ++devId) {
int N = partSize < remaining ? partSize : remaining;
CudaSafeCall(cudaSetDevice(devId));
// Copy inner cell indices to device
CudaSafeCall(cudaFree(this->layout[devId].deviceInner));
CudaSafeCall(cudaMalloc((void **) &this->layout[devId].deviceInner, sizeof(int) * N));
CudaSafeCall(cudaMemcpy(this->layout[devId].deviceInner, &this->inner.data()[offset], sizeof(int) * N,
cudaMemcpyHostToDevice));
this->layout[devId].size = N;
offset += N;
remaining -= N;
}
/*for (int devId = 0; devId < GPU_N; ++devId) {
cudaStreamSynchronize(this->layout[devId].stream);
}*/
assert(remaining == 0);
#endif
}
void LinkedCellsImpl::init() {
} |
222755b655c5a4d4959d5372423cb71d1aa05479.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "globals.h"
__device__ int currentNumberOfNodes = INIT_NUMBER_OF_NODES;
__device__ int numRem[NUMBER_OF_DAYS];
__device__ float numUnInf[NUMBER_OF_DAYS];
__device__ float numLat[NUMBER_OF_DAYS];
__device__ float numInf[NUMBER_OF_DAYS];
__device__ float numInc[NUMBER_OF_DAYS];
__device__ float numAsym[NUMBER_OF_DAYS];
__device__ float numRec[NUMBER_OF_DAYS];
// Kernel that executes on the CUDA device
__global__ void node(Node * nodeInfoList, int seed)
{
/* threadIdx represents the ID of the node */
int i,j;
int tx = threadIdx.x + blockDim.x * blockIdx.x;
int numberOfNeighborsToLookAt;
int neighborIndex, index;
int numberOfDays = 0;
int storedNodeStatus[MAX_NUMBER_OF_NODES];
hiprandState_t state;
/* we have to initialize the state */
hiprand_init(seed+(tx*34), 0, 0, &state);
/* continues to loop until the number of days the simulation is set to run */
while(numberOfDays < NUMBER_OF_DAYS) {
for(i = 0; i < MAX_NUMBER_OF_NODES;i++)
{
storedNodeStatus[i] = nodeInfoList[i].nodeStatus;
}
__syncthreads();
/* if thread is not active skip thread activity until everyone else is done */
if(nodeInfoList[tx].isActive == 1)
{
if (nodeInfoList[tx].nodeStatus == UNINFECTED)
{
/* 0 to Num Neighbors -1 */
numberOfNeighborsToLookAt = hiprand(&state) % (nodeInfoList[tx].numberOfNeighbors);
while( numberOfNeighborsToLookAt > -1 )
{
if ( nodeInfoList[tx].nodeStatus == UNINFECTED)
{
do {
neighborIndex = hiprand(&state) % currentNumberOfNodes;
if(nodeInfoList[tx].neighborId[neighborIndex] == -1)
neighborIndex = -1;
} while (neighborIndex == -1);
if(storedNodeStatus[neighborIndex] == INFECTIOUS || storedNodeStatus[neighborIndex] == ASYMPT)
{
if(hiprand(&state) % 100 < 90)
{
nodeInfoList[tx].nodeStatus = LATENT;
nodeInfoList[tx].dayInfected = numberOfDays;
}
if(hiprand(&state) % 100 < 10)
{
nodeInfoList[tx].nodeStatus = INCUBATION;
nodeInfoList[tx].dayInfected = numberOfDays;
}
}
}
numberOfNeighborsToLookAt--;
}
}
}
__syncthreads();
if(nodeInfoList[tx].isActive == 1)
{
// a chance for node deletion
if ( (float) (hiprand(&state) % 800) < 2 )
{
nodeInfoList[tx].isActive = 0;
nodeInfoList[tx].id = 0;
nodeInfoList[tx].nodeStatus = UNINFECTED;
for(i = 0; i < MAX_NUMBER_OF_NEIGHBORS; i++)
nodeInfoList[tx].neighborId[i] = -1;
nodeInfoList[tx].numberOfNeighbors = 0;
atomicAdd(¤tNumberOfNodes,-1);
}
}
__syncthreads();
if(tx == 0) {
numRem[numberOfDays] = currentNumberOfNodes;
}
if(nodeInfoList[tx].isActive == 1) {
switch(nodeInfoList[tx].nodeStatus)
{
case UNINFECTED:
atomicAdd(&numUnInf[numberOfDays],1);
break;
case LATENT:
atomicAdd(&numLat[numberOfDays],1);
break;
case INCUBATION:
atomicAdd(&numInc[numberOfDays],1);
break;
case INFECTIOUS:
atomicAdd(&numInf[numberOfDays],1);
break;
case ASYMPT:
atomicAdd(&numAsym[numberOfDays],1);
break;
case RECOVERED:
atomicAdd(&numRec[numberOfDays],1);
break;
default:
break;
}
}
numberOfDays++;
if(nodeInfoList[tx].isActive == 1)
{
switch(nodeInfoList[tx].nodeStatus)
{
case UNINFECTED:
break;
case LATENT:
if((numberOfDays) - nodeInfoList[tx].dayInfected >= 2)
{
nodeInfoList[tx].nodeStatus = INFECTIOUS;
}
break;
case INCUBATION:
if((numberOfDays) - nodeInfoList[tx].dayInfected >= 1)
{
nodeInfoList[tx].nodeStatus = ASYMPT;
}
break;
case INFECTIOUS:
if((numberOfDays) - nodeInfoList[tx].dayInfected >= 5)
{
if( hiprand(&state) % 100 < (((numberOfDays) - nodeInfoList[tx].dayInfected)-5)*10 + 70)
nodeInfoList[tx].nodeStatus = RECOVERED;
}
break;
case ASYMPT:
if((numberOfDays) - nodeInfoList[tx].dayInfected >= 3)
{
if( hiprand(&state) % 100 < (((numberOfDays) - nodeInfoList[tx].dayInfected)-3)*10 + 70)
nodeInfoList[tx].nodeStatus = RECOVERED;
}
break;
case RECOVERED:
break;
default:
break;
}
}
__syncthreads();
if(nodeInfoList[tx].isActive == 0) {
// a chance for node addition
if ( ( (float) (hiprand(&state) % 600) < 2.0 ) )
{
nodeInfoList[tx].isActive = 1;
nodeInfoList[tx].nodeStatus = UNINFECTED;
nodeInfoList[tx].numberOfNeighbors = (hiprand(&state) % (MAX_NUMBER_OF_NEIGHBORS + 1));
for(j = 0; j < MAX_NUMBER_OF_NODES; j++)
nodeInfoList[tx].neighborId[j] = -1;
for(j = 0; j < MAX_NUMBER_OF_NEIGHBORS; j++)
{
do {
index = hiprand(&state) % currentNumberOfNodes;
if(nodeInfoList[tx].neighborId[index] != -1)
index = -1;
} while (index == -1);
nodeInfoList[tx].neighborId[index] = 1;
}
atomicAdd(¤tNumberOfNodes,1);
}
}
__syncthreads();
}
}
__global__ void initGraph(Node * nodeInfoList, int seed) {
int tx = threadIdx.x + blockDim.x * blockIdx.x;
hiprandState_t state;
int j, index;
hiprand_init(seed+(tx*56), 0, 0, &state);
if(tx < INIT_NUMBER_OF_NODES) {
nodeInfoList[tx].isActive = 1;
nodeInfoList[tx].numberOfNeighbors = (hiprand(&state) % (MAX_NUMBER_OF_NEIGHBORS));
if (nodeInfoList[tx].numberOfNeighbors < MIN_NUMBER_OF_NEIGHBORS)
nodeInfoList[tx].numberOfNeighbors = MIN_NUMBER_OF_NEIGHBORS;
for(j = 0; j < MAX_NUMBER_OF_NODES; j++)
nodeInfoList[tx].neighborId[j] = -1;
for(j = 0; j < MAX_NUMBER_OF_NEIGHBORS; j++)
{
do {
index = hiprand(&state) % INIT_NUMBER_OF_NODES;
if(nodeInfoList[tx].neighborId[index] != -1)
index = -1;
} while (index == -1);
nodeInfoList[tx].neighborId[index] = 1;
}
} else {
nodeInfoList[tx].isActive = 0;
nodeInfoList[tx].numberOfNeighbors = -1;
}
if(tx == 0)
{
nodeInfoList[tx].nodeStatus = LATENT;
nodeInfoList[tx].dayInfected = 0;
for(j = 0; j < NUMBER_OF_DAYS; j++) {
numRem[j] = 0;
numUnInf[j] = 0;
numLat[j] = 0;
numInf[j] = 0;
numInc[j] = 0;
numAsym[j] = 0;
numRec[j] = 0;
}
} else {
nodeInfoList[tx].nodeStatus = UNINFECTED;
nodeInfoList[tx].dayInfected = -1;
}
}
__global__ void printingRes()
{
int numberOfDays = 0;
int nodeNumber;
if(threadIdx.x == 0)
{
while(numberOfDays < 30) {
nodeNumber = numRem[numberOfDays];
numUnInf[numberOfDays] /= nodeNumber;
numLat[numberOfDays] /= nodeNumber;
numInf[numberOfDays] /= nodeNumber;
numInc[numberOfDays] /= nodeNumber;
numAsym[numberOfDays] /= nodeNumber;
numRec[numberOfDays] /= nodeNumber;
numUnInf[numberOfDays] *= 100;
numLat[numberOfDays] *= 100;
numInf[numberOfDays] *= 100;
numInc[numberOfDays] *= 100;
numAsym[numberOfDays] *= 100;
numRec[numberOfDays] *= 100;
printf("\n \nDay %d Number of Nodes: %d\n",numberOfDays,numRem[numberOfDays]);
printf("Percent Uninfected: %f, Num Latent %f, Num Inf %f, Num Inc %f, Num Asym %f, Num Rec %f\n", numUnInf[numberOfDays],
numLat[numberOfDays], numInf[numberOfDays], numInc[numberOfDays], numAsym[numberOfDays],
numRec[numberOfDays]);
numberOfDays++;
}
}
}
// main routine that executes on the host
int main(void)
{
Node * hostNodeInfoList = (Node *) malloc(MAX_NUMBER_OF_NODES*(sizeof(Node)));
Node * deviceNodeInfoList;
hipMalloc( (void **) &deviceNodeInfoList,(MAX_NUMBER_OF_NODES)* sizeof(Node));
hipMemcpy(deviceNodeInfoList, hostNodeInfoList, (MAX_NUMBER_OF_NODES) * sizeof(Node), hipMemcpyHostToDevice);
dim3 DimGrid( (int) ceil(MAX_NUMBER_OF_NODES/512.0),1,1);
dim3 DimBlock(512,1,1);
hipLaunchKernelGGL(( initGraph), dim3(DimGrid),dim3(DimBlock), 0, 0, deviceNodeInfoList, time(NULL));
hipDeviceSynchronize();
hipLaunchKernelGGL(( node), dim3(DimGrid),dim3(DimBlock), 0, 0, deviceNodeInfoList, time(NULL));
hipDeviceSynchronize();
hipLaunchKernelGGL(( printingRes), dim3(1),dim3(1), 0, 0, );
hipDeviceSynchronize();
free(hostNodeInfoList);
hipFree(deviceNodeInfoList);
return 0;
}
| 222755b655c5a4d4959d5372423cb71d1aa05479.cu | #include "globals.h"
__device__ int currentNumberOfNodes = INIT_NUMBER_OF_NODES;
__device__ int numRem[NUMBER_OF_DAYS];
__device__ float numUnInf[NUMBER_OF_DAYS];
__device__ float numLat[NUMBER_OF_DAYS];
__device__ float numInf[NUMBER_OF_DAYS];
__device__ float numInc[NUMBER_OF_DAYS];
__device__ float numAsym[NUMBER_OF_DAYS];
__device__ float numRec[NUMBER_OF_DAYS];
// Kernel that executes on the CUDA device
__global__ void node(Node * nodeInfoList, int seed)
{
/* threadIdx represents the ID of the node */
int i,j;
int tx = threadIdx.x + blockDim.x * blockIdx.x;
int numberOfNeighborsToLookAt;
int neighborIndex, index;
int numberOfDays = 0;
int storedNodeStatus[MAX_NUMBER_OF_NODES];
curandState_t state;
/* we have to initialize the state */
curand_init(seed+(tx*34), 0, 0, &state);
/* continues to loop until the number of days the simulation is set to run */
while(numberOfDays < NUMBER_OF_DAYS) {
for(i = 0; i < MAX_NUMBER_OF_NODES;i++)
{
storedNodeStatus[i] = nodeInfoList[i].nodeStatus;
}
__syncthreads();
/* if thread is not active skip thread activity until everyone else is done */
if(nodeInfoList[tx].isActive == 1)
{
if (nodeInfoList[tx].nodeStatus == UNINFECTED)
{
/* 0 to Num Neighbors -1 */
numberOfNeighborsToLookAt = curand(&state) % (nodeInfoList[tx].numberOfNeighbors);
while( numberOfNeighborsToLookAt > -1 )
{
if ( nodeInfoList[tx].nodeStatus == UNINFECTED)
{
do {
neighborIndex = curand(&state) % currentNumberOfNodes;
if(nodeInfoList[tx].neighborId[neighborIndex] == -1)
neighborIndex = -1;
} while (neighborIndex == -1);
if(storedNodeStatus[neighborIndex] == INFECTIOUS || storedNodeStatus[neighborIndex] == ASYMPT)
{
if(curand(&state) % 100 < 90)
{
nodeInfoList[tx].nodeStatus = LATENT;
nodeInfoList[tx].dayInfected = numberOfDays;
}
if(curand(&state) % 100 < 10)
{
nodeInfoList[tx].nodeStatus = INCUBATION;
nodeInfoList[tx].dayInfected = numberOfDays;
}
}
}
numberOfNeighborsToLookAt--;
}
}
}
__syncthreads();
if(nodeInfoList[tx].isActive == 1)
{
// a chance for node deletion
if ( (float) (curand(&state) % 800) < 2 )
{
nodeInfoList[tx].isActive = 0;
nodeInfoList[tx].id = 0;
nodeInfoList[tx].nodeStatus = UNINFECTED;
for(i = 0; i < MAX_NUMBER_OF_NEIGHBORS; i++)
nodeInfoList[tx].neighborId[i] = -1;
nodeInfoList[tx].numberOfNeighbors = 0;
atomicAdd(¤tNumberOfNodes,-1);
}
}
__syncthreads();
if(tx == 0) {
numRem[numberOfDays] = currentNumberOfNodes;
}
if(nodeInfoList[tx].isActive == 1) {
switch(nodeInfoList[tx].nodeStatus)
{
case UNINFECTED:
atomicAdd(&numUnInf[numberOfDays],1);
break;
case LATENT:
atomicAdd(&numLat[numberOfDays],1);
break;
case INCUBATION:
atomicAdd(&numInc[numberOfDays],1);
break;
case INFECTIOUS:
atomicAdd(&numInf[numberOfDays],1);
break;
case ASYMPT:
atomicAdd(&numAsym[numberOfDays],1);
break;
case RECOVERED:
atomicAdd(&numRec[numberOfDays],1);
break;
default:
break;
}
}
numberOfDays++;
if(nodeInfoList[tx].isActive == 1)
{
switch(nodeInfoList[tx].nodeStatus)
{
case UNINFECTED:
break;
case LATENT:
if((numberOfDays) - nodeInfoList[tx].dayInfected >= 2)
{
nodeInfoList[tx].nodeStatus = INFECTIOUS;
}
break;
case INCUBATION:
if((numberOfDays) - nodeInfoList[tx].dayInfected >= 1)
{
nodeInfoList[tx].nodeStatus = ASYMPT;
}
break;
case INFECTIOUS:
if((numberOfDays) - nodeInfoList[tx].dayInfected >= 5)
{
if( curand(&state) % 100 < (((numberOfDays) - nodeInfoList[tx].dayInfected)-5)*10 + 70)
nodeInfoList[tx].nodeStatus = RECOVERED;
}
break;
case ASYMPT:
if((numberOfDays) - nodeInfoList[tx].dayInfected >= 3)
{
if( curand(&state) % 100 < (((numberOfDays) - nodeInfoList[tx].dayInfected)-3)*10 + 70)
nodeInfoList[tx].nodeStatus = RECOVERED;
}
break;
case RECOVERED:
break;
default:
break;
}
}
__syncthreads();
if(nodeInfoList[tx].isActive == 0) {
// a chance for node addition
if ( ( (float) (curand(&state) % 600) < 2.0 ) )
{
nodeInfoList[tx].isActive = 1;
nodeInfoList[tx].nodeStatus = UNINFECTED;
nodeInfoList[tx].numberOfNeighbors = (curand(&state) % (MAX_NUMBER_OF_NEIGHBORS + 1));
for(j = 0; j < MAX_NUMBER_OF_NODES; j++)
nodeInfoList[tx].neighborId[j] = -1;
for(j = 0; j < MAX_NUMBER_OF_NEIGHBORS; j++)
{
do {
index = curand(&state) % currentNumberOfNodes;
if(nodeInfoList[tx].neighborId[index] != -1)
index = -1;
} while (index == -1);
nodeInfoList[tx].neighborId[index] = 1;
}
atomicAdd(¤tNumberOfNodes,1);
}
}
__syncthreads();
}
}
__global__ void initGraph(Node * nodeInfoList, int seed) {
int tx = threadIdx.x + blockDim.x * blockIdx.x;
curandState_t state;
int j, index;
curand_init(seed+(tx*56), 0, 0, &state);
if(tx < INIT_NUMBER_OF_NODES) {
nodeInfoList[tx].isActive = 1;
nodeInfoList[tx].numberOfNeighbors = (curand(&state) % (MAX_NUMBER_OF_NEIGHBORS));
if (nodeInfoList[tx].numberOfNeighbors < MIN_NUMBER_OF_NEIGHBORS)
nodeInfoList[tx].numberOfNeighbors = MIN_NUMBER_OF_NEIGHBORS;
for(j = 0; j < MAX_NUMBER_OF_NODES; j++)
nodeInfoList[tx].neighborId[j] = -1;
for(j = 0; j < MAX_NUMBER_OF_NEIGHBORS; j++)
{
do {
index = curand(&state) % INIT_NUMBER_OF_NODES;
if(nodeInfoList[tx].neighborId[index] != -1)
index = -1;
} while (index == -1);
nodeInfoList[tx].neighborId[index] = 1;
}
} else {
nodeInfoList[tx].isActive = 0;
nodeInfoList[tx].numberOfNeighbors = -1;
}
if(tx == 0)
{
nodeInfoList[tx].nodeStatus = LATENT;
nodeInfoList[tx].dayInfected = 0;
for(j = 0; j < NUMBER_OF_DAYS; j++) {
numRem[j] = 0;
numUnInf[j] = 0;
numLat[j] = 0;
numInf[j] = 0;
numInc[j] = 0;
numAsym[j] = 0;
numRec[j] = 0;
}
} else {
nodeInfoList[tx].nodeStatus = UNINFECTED;
nodeInfoList[tx].dayInfected = -1;
}
}
__global__ void printingRes()
{
int numberOfDays = 0;
int nodeNumber;
if(threadIdx.x == 0)
{
while(numberOfDays < 30) {
nodeNumber = numRem[numberOfDays];
numUnInf[numberOfDays] /= nodeNumber;
numLat[numberOfDays] /= nodeNumber;
numInf[numberOfDays] /= nodeNumber;
numInc[numberOfDays] /= nodeNumber;
numAsym[numberOfDays] /= nodeNumber;
numRec[numberOfDays] /= nodeNumber;
numUnInf[numberOfDays] *= 100;
numLat[numberOfDays] *= 100;
numInf[numberOfDays] *= 100;
numInc[numberOfDays] *= 100;
numAsym[numberOfDays] *= 100;
numRec[numberOfDays] *= 100;
printf("\n \nDay %d Number of Nodes: %d\n",numberOfDays,numRem[numberOfDays]);
printf("Percent Uninfected: %f, Num Latent %f, Num Inf %f, Num Inc %f, Num Asym %f, Num Rec %f\n", numUnInf[numberOfDays],
numLat[numberOfDays], numInf[numberOfDays], numInc[numberOfDays], numAsym[numberOfDays],
numRec[numberOfDays]);
numberOfDays++;
}
}
}
// main routine that executes on the host
int main(void)
{
Node * hostNodeInfoList = (Node *) malloc(MAX_NUMBER_OF_NODES*(sizeof(Node)));
Node * deviceNodeInfoList;
cudaMalloc( (void **) &deviceNodeInfoList,(MAX_NUMBER_OF_NODES)* sizeof(Node));
cudaMemcpy(deviceNodeInfoList, hostNodeInfoList, (MAX_NUMBER_OF_NODES) * sizeof(Node), cudaMemcpyHostToDevice);
dim3 DimGrid( (int) ceil(MAX_NUMBER_OF_NODES/512.0),1,1);
dim3 DimBlock(512,1,1);
initGraph<<<DimGrid,DimBlock>>>(deviceNodeInfoList, time(NULL));
cudaDeviceSynchronize();
node<<<DimGrid,DimBlock>>>(deviceNodeInfoList, time(NULL));
cudaDeviceSynchronize();
printingRes<<<1,1>>>();
cudaDeviceSynchronize();
free(hostNodeInfoList);
cudaFree(deviceNodeInfoList);
return 0;
}
|
19b4d1d560d2218f6130da7bde3316d5c87b922b.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/*
* This sample implements a conjugate gradient solver on GPU
* using CUBLAS and CUSPARSE
*
*/
// includes, system
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <string>
#include<fstream>
#include"Util.h"
#include"SparseSolver.h"
/* Using updated (v2) interfaces to cublas */
#include <rocblas.h>
#include <hip/hip_runtime.h>
#include <hipsparse.h>
#include <hip/hip_cooperative_groups.h>
// Utilities and system includes
#include <helper_cuda.h> // helper function CUDA error checking and initialization
#include <helper_functions.h> // helper for shared functions common to CUDA Samples
namespace cg = cooperative_groups;
const char *sSDKNames = "conjugateGradientCudaGraphs";
#ifndef WITH_GRAPH
#define WITH_GRAPH 1
#endif
/* genTridiag: generate a random tridiagonal symmetric matrix */
//void genTridiag(int *I, int *J, float *val, int N, int nz) {
// I[0] = 0, J[0] = 0, J[1] = 1;
// val[0] = (float)rand() / RAND_MAX + 10.0f;
// val[1] = (float)rand() / RAND_MAX;
// int start;
//
// for (int i = 1; i < N; i++) {
// if (i > 1) {
// I[i] = I[i - 1] + 3;
// } else {
// I[1] = 2;
// }
//
// start = (i - 1) * 3 + 2;
// J[start] = i - 1;
// J[start + 1] = i;
//
// if (i < N - 1) {
// J[start + 2] = i + 1;
// }
//
// val[start] = val[start - 1];
// val[start + 1] = (float)rand() / RAND_MAX + 10.0f;
//
// if (i < N - 1) {
// val[start + 2] = (float)rand() / RAND_MAX;
// }
// }
//
// I[N] = nz;
//}
__global__ void initVectors(float *rhs, float *x, int N) {
size_t gid = blockIdx.x * blockDim.x + threadIdx.x;
for (size_t i = gid; i < N; i += gridDim.x * blockDim.x) {
rhs[i] = 1.0;
x[i] = 0.0;
}
}
__global__ void gpuDotProduct(float *vecA, float *vecB, float *result,
int size) {
cg::thread_block cta = cg::this_thread_block();
int gid = blockIdx.x * blockDim.x + threadIdx.x;
extern __shared__ double tmp[];
double temp_sum = 0.0;
for (int i = gid; i < size; i += gridDim.x * blockDim.x) {
temp_sum += (double)(vecA[i] * vecB[i]);
}
tmp[cta.thread_rank()] = temp_sum;
cg::sync(cta);
cg::thread_block_tile<32> tile32 = cg::tiled_partition<32>(cta);
double beta = temp_sum;
double temp;
for (int i = tile32.size() / 2; i > 0; i >>= 1) {
if (tile32.thread_rank() < i) {
temp = tmp[cta.thread_rank() + i];
beta += temp;
tmp[cta.thread_rank()] = beta;
}
cg::sync(tile32);
}
cg::sync(cta);
if (cta.thread_rank() == 0) {
beta = 0.0;
for (int i = 0; i < cta.size(); i += tile32.size()) {
beta += tmp[i];
}
atomicAdd(result, (float)beta);
}
}
__global__ void r1_div_x(float *r1, float *r0, float *b) {
int gid = blockIdx.x * blockDim.x + threadIdx.x;
if (gid == 0) {
b[0] = r1[0] / r0[0];
}
}
__global__ void a_minus(float *a, float *na) {
int gid = blockIdx.x * blockDim.x + threadIdx.x;
if (gid == 0) {
na[0] = -(a[0]);
}
}
int conjugateGradientCudaGraphs(int argc, char **argv, int size, std::string rfile, float eps, std::string wFile) {
int N = 0, nz = 0, *I = NULL, *J = NULL;
float *val = NULL;
const float tol = eps;
const int max_iter = 10000;
float *x;
float *rhs;
float r1;
int *d_col, *d_row;
float *d_val, *d_x;
float *d_r, *d_p, *d_Ax;
int k;
float alpha, beta, alpham1;
hipStream_t stream1, streamForGraph;
// This will pick the best possible CUDA capable device
hipDeviceProp_t deviceProp;
int devID = findCudaDevice(argc, (const char **)argv);
if (devID < 0) {
printf("exiting...\n");
exit(EXIT_SUCCESS);
}
checkCudaErrors(hipGetDeviceProperties(&deviceProp, devID));
// Statistics about the GPU device
printf(
"> GPU device has %d Multi-Processors, SM %d.%d compute capabilities\n\n",
deviceProp.multiProcessorCount, deviceProp.major, deviceProp.minor);
/* Generate a random tridiagonal symmetric matrix in CSR format */
N = size;
std::ifstream fin(rfile);
fin >> nz;
fin.close();
I = (int *)malloc(sizeof(int) * (N + 1));
J = (int *)malloc(sizeof(int) * nz);
val = (float *)malloc(sizeof(float) * nz);
input(I, J, val, rfile, nz, N);
x = (float *)malloc(sizeof(float) * N);
rhs = (float *)malloc(sizeof(float) * N);
for (int i = 0; i < N; i++) {
rhs[i] = 1.0;
x[i] = 0.0;
}
CpuTimer timer = CpuTimer();
timer.start();
/* Get handle to the CUBLAS context */
hipblasHandle_t cublasHandle = 0;
hipblasStatus_t cublasStatus;
cublasStatus = hipblasCreate(&cublasHandle);
checkCudaErrors(cublasStatus);
/* Get handle to the CUSPARSE context */
hipsparseHandle_t cusparseHandle = 0;
hipsparseStatus_t cusparseStatus;
cusparseStatus = hipsparseCreate(&cusparseHandle);
checkCudaErrors(cusparseStatus);
checkCudaErrors(hipStreamCreate(&stream1));
checkCudaErrors(hipMalloc((void **)&d_col, nz * sizeof(int)));
checkCudaErrors(hipMalloc((void **)&d_row, (N + 1) * sizeof(int)));
checkCudaErrors(hipMalloc((void **)&d_val, nz * sizeof(float)));
checkCudaErrors(hipMalloc((void **)&d_x, N * sizeof(float)));
checkCudaErrors(hipMalloc((void **)&d_r, N * sizeof(float)));
checkCudaErrors(hipMalloc((void **)&d_p, N * sizeof(float)));
checkCudaErrors(hipMalloc((void **)&d_Ax, N * sizeof(float)));
float *d_r1, *d_r0, *d_dot, *d_a, *d_na, *d_b;
checkCudaErrors(hipMalloc((void **)&d_r1, sizeof(float)));
checkCudaErrors(hipMalloc((void **)&d_r0, sizeof(float)));
checkCudaErrors(hipMalloc((void **)&d_dot, sizeof(float)));
checkCudaErrors(hipMalloc((void **)&d_a, sizeof(float)));
checkCudaErrors(hipMalloc((void **)&d_na, sizeof(float)));
checkCudaErrors(hipMalloc((void **)&d_b, sizeof(float)));
hipsparseMatDescr_t descr = 0;
checkCudaErrors(hipsparseCreateMatDescr(&descr));
checkCudaErrors(hipsparseSetMatType(descr, HIPSPARSE_MATRIX_TYPE_GENERAL));
checkCudaErrors(hipsparseSetMatIndexBase(descr, HIPSPARSE_INDEX_BASE_ZERO));
int numBlocks = 0, blockSize = 0;
checkCudaErrors(
hipOccupancyMaxPotentialBlockSize(&numBlocks, &blockSize, initVectors));
checkCudaErrors(hipMemcpyAsync(d_col, J, nz * sizeof(int),
hipMemcpyHostToDevice, stream1));
checkCudaErrors(hipMemcpyAsync(d_row, I, (N + 1) * sizeof(int),
hipMemcpyHostToDevice, stream1));
checkCudaErrors(hipMemcpyAsync(d_val, val, nz * sizeof(float),
hipMemcpyHostToDevice, stream1));
hipLaunchKernelGGL(( initVectors), dim3(numBlocks), dim3(blockSize), 0, stream1, d_r, d_x, N);
checkCudaErrors(hipOccupancyMaxPotentialBlockSize(&numBlocks, &blockSize,
gpuDotProduct));
alpha = 1.0;
alpham1 = -1.0;
beta = 0.0;
checkCudaErrors(hipsparseSetStream(cusparseHandle, stream1));
checkCudaErrors(
hipsparseScsrmv(cusparseHandle, HIPSPARSE_OPERATION_NON_TRANSPOSE, N, N, nz,
&alpha, descr, d_val, d_row, d_col, d_x, &beta, d_Ax));
checkCudaErrors(hipblasSetStream(cublasHandle, stream1));
checkCudaErrors(hipblasSaxpy(cublasHandle, N, &alpham1, d_Ax, 1, d_r, 1));
checkCudaErrors(
hipblasSetPointerMode(cublasHandle, HIPBLAS_POINTER_MODE_DEVICE));
checkCudaErrors(hipblasSdot(cublasHandle, N, d_r, 1, d_r, 1, d_r1));
k = 1;
// First Iteration when k=1 starts
checkCudaErrors(hipblasScopy(cublasHandle, N, d_r, 1, d_p, 1));
checkCudaErrors(
hipsparseScsrmv(cusparseHandle, HIPSPARSE_OPERATION_NON_TRANSPOSE, N, N, nz,
&alpha, descr, d_val, d_row, d_col, d_p, &beta, d_Ax));
checkCudaErrors(hipblasSdot(cublasHandle, N, d_p, 1, d_Ax, 1, d_dot));
hipLaunchKernelGGL(( r1_div_x), dim3(1), dim3(1), 0, stream1, d_r1, d_dot, d_a);
checkCudaErrors(hipblasSaxpy(cublasHandle, N, d_a, d_p, 1, d_x, 1));
hipLaunchKernelGGL(( a_minus), dim3(1), dim3(1), 0, stream1, d_a, d_na);
checkCudaErrors(hipblasSaxpy(cublasHandle, N, d_na, d_Ax, 1, d_r, 1));
checkCudaErrors(hipMemcpyAsync(d_r0, d_r1, sizeof(float),
hipMemcpyDeviceToDevice, stream1));
checkCudaErrors(hipblasSdot(cublasHandle, N, d_r, 1, d_r, 1, d_r1));
checkCudaErrors(hipMemcpyAsync(&r1, d_r1, sizeof(float),
hipMemcpyDeviceToHost, stream1));
checkCudaErrors(hipStreamSynchronize(stream1));
//printf("iteration = %3d, residual = %e\n", k, sqrt(r1));
// First Iteration when k=1 ends
k++;
#if WITH_GRAPH
hipGraph_t initGraph;
checkCudaErrors(hipStreamCreate(&streamForGraph));
checkCudaErrors(hipblasSetStream(cublasHandle, stream1));
checkCudaErrors(hipsparseSetStream(cusparseHandle, stream1));
checkCudaErrors(hipStreamBeginCapture(stream1, hipStreamCaptureModeGlobal));
hipLaunchKernelGGL(( r1_div_x), dim3(1), dim3(1), 0, stream1, d_r1, d_r0, d_b);
hipblasSetPointerMode(cublasHandle, HIPBLAS_POINTER_MODE_DEVICE);
checkCudaErrors(hipblasSscal(cublasHandle, N, d_b, d_p, 1));
hipblasSetPointerMode(cublasHandle, HIPBLAS_POINTER_MODE_HOST);
checkCudaErrors(hipblasSaxpy(cublasHandle, N, &alpha, d_r, 1, d_p, 1));
hipblasSetPointerMode(cublasHandle, HIPBLAS_POINTER_MODE_DEVICE);
checkCudaErrors(
hipsparseSetPointerMode(cusparseHandle, HIPSPARSE_POINTER_MODE_HOST));
checkCudaErrors(
hipsparseScsrmv(cusparseHandle, HIPSPARSE_OPERATION_NON_TRANSPOSE, N, N, nz,
&alpha, descr, d_val, d_row, d_col, d_p, &beta, d_Ax));
checkCudaErrors(hipMemsetAsync(d_dot, 0, sizeof(float), stream1));
// Use hipblasSdot API when it is cuda graph compliant.
// checkCudaErrors(hipblasSdot(cublasHandle, N, d_p, 1, d_Ax, 1, d_dot));
hipLaunchKernelGGL(( gpuDotProduct), dim3(numBlocks), dim3(blockSize), blockSize * sizeof(double), stream1,
d_p, d_Ax, d_dot, N);
hipLaunchKernelGGL(( r1_div_x), dim3(1), dim3(1), 0, stream1, d_r1, d_dot, d_a);
checkCudaErrors(hipblasSaxpy(cublasHandle, N, d_a, d_p, 1, d_x, 1));
hipLaunchKernelGGL(( a_minus), dim3(1), dim3(1), 0, stream1, d_a, d_na);
checkCudaErrors(hipblasSaxpy(cublasHandle, N, d_na, d_Ax, 1, d_r, 1));
checkCudaErrors(hipMemcpyAsync(d_r0, d_r1, sizeof(float),
hipMemcpyDeviceToDevice, stream1));
checkCudaErrors(hipMemsetAsync(d_r1, 0, sizeof(float), stream1));
// Use hipblasSdot API when it is cuda graph compliant.
// checkCudaErrors(hipblasSdot(cublasHandle, N, d_r, 1, d_r, 1, d_r1));
hipLaunchKernelGGL(( gpuDotProduct), dim3(numBlocks), dim3(blockSize), blockSize * sizeof(double), stream1,
d_r, d_r, d_r1, N);
checkCudaErrors(hipMemcpyAsync((float *)&r1, d_r1, sizeof(float),
hipMemcpyDeviceToHost, stream1));
checkCudaErrors(hipStreamEndCapture(stream1, &initGraph));
hipGraphExec_t graphExec;
checkCudaErrors(hipGraphInstantiate(&graphExec, initGraph, NULL, NULL, 0));
#endif
checkCudaErrors(hipblasSetStream(cublasHandle, stream1));
checkCudaErrors(hipsparseSetStream(cusparseHandle, stream1));
while (r1 > tol * tol && k <= max_iter) {
#if WITH_GRAPH
checkCudaErrors(hipGraphLaunch(graphExec, streamForGraph));
checkCudaErrors(hipStreamSynchronize(streamForGraph));
#else
hipLaunchKernelGGL(( r1_div_x), dim3(1), dim3(1), 0, stream1, d_r1, d_r0, d_b);
hipblasSetPointerMode(cublasHandle, HIPBLAS_POINTER_MODE_DEVICE);
checkCudaErrors(hipblasSscal(cublasHandle, N, d_b, d_p, 1));
hipblasSetPointerMode(cublasHandle, HIPBLAS_POINTER_MODE_HOST);
checkCudaErrors(hipblasSaxpy(cublasHandle, N, &alpha, d_r, 1, d_p, 1));
checkCudaErrors(hipsparseScsrmv(
cusparseHandle, HIPSPARSE_OPERATION_NON_TRANSPOSE, N, N, nz, &alpha,
descr, d_val, d_row, d_col, d_p, &beta, d_Ax));
hipblasSetPointerMode(cublasHandle, HIPBLAS_POINTER_MODE_DEVICE);
checkCudaErrors(hipblasSdot(cublasHandle, N, d_p, 1, d_Ax, 1, d_dot));
hipLaunchKernelGGL(( r1_div_x), dim3(1), dim3(1), 0, stream1, d_r1, d_dot, d_a);
checkCudaErrors(hipblasSaxpy(cublasHandle, N, d_a, d_p, 1, d_x, 1));
hipLaunchKernelGGL(( a_minus), dim3(1), dim3(1), 0, stream1, d_a, d_na);
checkCudaErrors(hipblasSaxpy(cublasHandle, N, d_na, d_Ax, 1, d_r, 1));
checkCudaErrors(hipMemcpyAsync(d_r0, d_r1, sizeof(float),
hipMemcpyDeviceToDevice, stream1));
checkCudaErrors(hipblasSdot(cublasHandle, N, d_r, 1, d_r, 1, d_r1));
checkCudaErrors(hipMemcpyAsync((float *)&r1, d_r1, sizeof(float),
hipMemcpyDeviceToHost, stream1));
checkCudaErrors(hipStreamSynchronize(stream1));
#endif
//printf("iteration = %3d, residual = %e\n", k, sqrt(r1));
k++;
}
#if WITH_GRAPH
checkCudaErrors(hipMemcpyAsync(x, d_x, N * sizeof(float),
hipMemcpyDeviceToHost, streamForGraph));
checkCudaErrors(hipStreamSynchronize(streamForGraph));
#else
checkCudaErrors(hipMemcpyAsync(x, d_x, N * sizeof(float),
hipMemcpyDeviceToHost, stream1));
checkCudaErrors(hipStreamSynchronize(stream1));
#endif
float costTime = timer.stop();
float rsum, diff, err = 0.0;
for (int i = 0; i < N; i++) {
rsum = 0.0;
for (int j = I[i]; j < I[i + 1]; j++) {
rsum += val[j] * x[J[j]];
}
diff = fabs(rsum - rhs[i]);
if (diff > err) {
err = diff;
}
}
#if WITH_GRAPH
checkCudaErrors(hipGraphExecDestroy(graphExec));
checkCudaErrors(hipGraphDestroy(initGraph));
checkCudaErrors(hipStreamDestroy(streamForGraph));
#endif
output(wFile, x, size);
checkCudaErrors(hipStreamDestroy(stream1));
hipsparseDestroy(cusparseHandle);
hipblasDestroy(cublasHandle);
free(I);
free(J);
free(val);
free(x);
free(rhs);
hipFree(d_col);
hipFree(d_row);
hipFree(d_val);
hipFree(d_x);
hipFree(d_r);
hipFree(d_p);
hipFree(d_Ax);
printf("Iterations %d\n", k);
printf("Time cost %f\n", costTime);
printf("Test Summary: Error amount = %f\n\n", err);
//exit((k <= max_iter) ? 0 : 1);
}
| 19b4d1d560d2218f6130da7bde3316d5c87b922b.cu | /*
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/*
* This sample implements a conjugate gradient solver on GPU
* using CUBLAS and CUSPARSE
*
*/
// includes, system
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <string>
#include<fstream>
#include"Util.h"
#include"SparseSolver.h"
/* Using updated (v2) interfaces to cublas */
#include <cublas_v2.h>
#include <cuda_runtime.h>
#include <cusparse.h>
#include <cooperative_groups.h>
// Utilities and system includes
#include <helper_cuda.h> // helper function CUDA error checking and initialization
#include <helper_functions.h> // helper for shared functions common to CUDA Samples
namespace cg = cooperative_groups;
const char *sSDKNames = "conjugateGradientCudaGraphs";
#ifndef WITH_GRAPH
#define WITH_GRAPH 1
#endif
/* genTridiag: generate a random tridiagonal symmetric matrix */
//void genTridiag(int *I, int *J, float *val, int N, int nz) {
// I[0] = 0, J[0] = 0, J[1] = 1;
// val[0] = (float)rand() / RAND_MAX + 10.0f;
// val[1] = (float)rand() / RAND_MAX;
// int start;
//
// for (int i = 1; i < N; i++) {
// if (i > 1) {
// I[i] = I[i - 1] + 3;
// } else {
// I[1] = 2;
// }
//
// start = (i - 1) * 3 + 2;
// J[start] = i - 1;
// J[start + 1] = i;
//
// if (i < N - 1) {
// J[start + 2] = i + 1;
// }
//
// val[start] = val[start - 1];
// val[start + 1] = (float)rand() / RAND_MAX + 10.0f;
//
// if (i < N - 1) {
// val[start + 2] = (float)rand() / RAND_MAX;
// }
// }
//
// I[N] = nz;
//}
__global__ void initVectors(float *rhs, float *x, int N) {
size_t gid = blockIdx.x * blockDim.x + threadIdx.x;
for (size_t i = gid; i < N; i += gridDim.x * blockDim.x) {
rhs[i] = 1.0;
x[i] = 0.0;
}
}
__global__ void gpuDotProduct(float *vecA, float *vecB, float *result,
int size) {
cg::thread_block cta = cg::this_thread_block();
int gid = blockIdx.x * blockDim.x + threadIdx.x;
extern __shared__ double tmp[];
double temp_sum = 0.0;
for (int i = gid; i < size; i += gridDim.x * blockDim.x) {
temp_sum += (double)(vecA[i] * vecB[i]);
}
tmp[cta.thread_rank()] = temp_sum;
cg::sync(cta);
cg::thread_block_tile<32> tile32 = cg::tiled_partition<32>(cta);
double beta = temp_sum;
double temp;
for (int i = tile32.size() / 2; i > 0; i >>= 1) {
if (tile32.thread_rank() < i) {
temp = tmp[cta.thread_rank() + i];
beta += temp;
tmp[cta.thread_rank()] = beta;
}
cg::sync(tile32);
}
cg::sync(cta);
if (cta.thread_rank() == 0) {
beta = 0.0;
for (int i = 0; i < cta.size(); i += tile32.size()) {
beta += tmp[i];
}
atomicAdd(result, (float)beta);
}
}
__global__ void r1_div_x(float *r1, float *r0, float *b) {
int gid = blockIdx.x * blockDim.x + threadIdx.x;
if (gid == 0) {
b[0] = r1[0] / r0[0];
}
}
__global__ void a_minus(float *a, float *na) {
int gid = blockIdx.x * blockDim.x + threadIdx.x;
if (gid == 0) {
na[0] = -(a[0]);
}
}
int conjugateGradientCudaGraphs(int argc, char **argv, int size, std::string rfile, float eps, std::string wFile) {
int N = 0, nz = 0, *I = NULL, *J = NULL;
float *val = NULL;
const float tol = eps;
const int max_iter = 10000;
float *x;
float *rhs;
float r1;
int *d_col, *d_row;
float *d_val, *d_x;
float *d_r, *d_p, *d_Ax;
int k;
float alpha, beta, alpham1;
cudaStream_t stream1, streamForGraph;
// This will pick the best possible CUDA capable device
cudaDeviceProp deviceProp;
int devID = findCudaDevice(argc, (const char **)argv);
if (devID < 0) {
printf("exiting...\n");
exit(EXIT_SUCCESS);
}
checkCudaErrors(cudaGetDeviceProperties(&deviceProp, devID));
// Statistics about the GPU device
printf(
"> GPU device has %d Multi-Processors, SM %d.%d compute capabilities\n\n",
deviceProp.multiProcessorCount, deviceProp.major, deviceProp.minor);
/* Generate a random tridiagonal symmetric matrix in CSR format */
N = size;
std::ifstream fin(rfile);
fin >> nz;
fin.close();
I = (int *)malloc(sizeof(int) * (N + 1));
J = (int *)malloc(sizeof(int) * nz);
val = (float *)malloc(sizeof(float) * nz);
input(I, J, val, rfile, nz, N);
x = (float *)malloc(sizeof(float) * N);
rhs = (float *)malloc(sizeof(float) * N);
for (int i = 0; i < N; i++) {
rhs[i] = 1.0;
x[i] = 0.0;
}
CpuTimer timer = CpuTimer();
timer.start();
/* Get handle to the CUBLAS context */
cublasHandle_t cublasHandle = 0;
cublasStatus_t cublasStatus;
cublasStatus = cublasCreate(&cublasHandle);
checkCudaErrors(cublasStatus);
/* Get handle to the CUSPARSE context */
cusparseHandle_t cusparseHandle = 0;
cusparseStatus_t cusparseStatus;
cusparseStatus = cusparseCreate(&cusparseHandle);
checkCudaErrors(cusparseStatus);
checkCudaErrors(cudaStreamCreate(&stream1));
checkCudaErrors(cudaMalloc((void **)&d_col, nz * sizeof(int)));
checkCudaErrors(cudaMalloc((void **)&d_row, (N + 1) * sizeof(int)));
checkCudaErrors(cudaMalloc((void **)&d_val, nz * sizeof(float)));
checkCudaErrors(cudaMalloc((void **)&d_x, N * sizeof(float)));
checkCudaErrors(cudaMalloc((void **)&d_r, N * sizeof(float)));
checkCudaErrors(cudaMalloc((void **)&d_p, N * sizeof(float)));
checkCudaErrors(cudaMalloc((void **)&d_Ax, N * sizeof(float)));
float *d_r1, *d_r0, *d_dot, *d_a, *d_na, *d_b;
checkCudaErrors(cudaMalloc((void **)&d_r1, sizeof(float)));
checkCudaErrors(cudaMalloc((void **)&d_r0, sizeof(float)));
checkCudaErrors(cudaMalloc((void **)&d_dot, sizeof(float)));
checkCudaErrors(cudaMalloc((void **)&d_a, sizeof(float)));
checkCudaErrors(cudaMalloc((void **)&d_na, sizeof(float)));
checkCudaErrors(cudaMalloc((void **)&d_b, sizeof(float)));
cusparseMatDescr_t descr = 0;
checkCudaErrors(cusparseCreateMatDescr(&descr));
checkCudaErrors(cusparseSetMatType(descr, CUSPARSE_MATRIX_TYPE_GENERAL));
checkCudaErrors(cusparseSetMatIndexBase(descr, CUSPARSE_INDEX_BASE_ZERO));
int numBlocks = 0, blockSize = 0;
checkCudaErrors(
cudaOccupancyMaxPotentialBlockSize(&numBlocks, &blockSize, initVectors));
checkCudaErrors(cudaMemcpyAsync(d_col, J, nz * sizeof(int),
cudaMemcpyHostToDevice, stream1));
checkCudaErrors(cudaMemcpyAsync(d_row, I, (N + 1) * sizeof(int),
cudaMemcpyHostToDevice, stream1));
checkCudaErrors(cudaMemcpyAsync(d_val, val, nz * sizeof(float),
cudaMemcpyHostToDevice, stream1));
initVectors<<<numBlocks, blockSize, 0, stream1>>>(d_r, d_x, N);
checkCudaErrors(cudaOccupancyMaxPotentialBlockSize(&numBlocks, &blockSize,
gpuDotProduct));
alpha = 1.0;
alpham1 = -1.0;
beta = 0.0;
checkCudaErrors(cusparseSetStream(cusparseHandle, stream1));
checkCudaErrors(
cusparseScsrmv(cusparseHandle, CUSPARSE_OPERATION_NON_TRANSPOSE, N, N, nz,
&alpha, descr, d_val, d_row, d_col, d_x, &beta, d_Ax));
checkCudaErrors(cublasSetStream(cublasHandle, stream1));
checkCudaErrors(cublasSaxpy(cublasHandle, N, &alpham1, d_Ax, 1, d_r, 1));
checkCudaErrors(
cublasSetPointerMode(cublasHandle, CUBLAS_POINTER_MODE_DEVICE));
checkCudaErrors(cublasSdot(cublasHandle, N, d_r, 1, d_r, 1, d_r1));
k = 1;
// First Iteration when k=1 starts
checkCudaErrors(cublasScopy(cublasHandle, N, d_r, 1, d_p, 1));
checkCudaErrors(
cusparseScsrmv(cusparseHandle, CUSPARSE_OPERATION_NON_TRANSPOSE, N, N, nz,
&alpha, descr, d_val, d_row, d_col, d_p, &beta, d_Ax));
checkCudaErrors(cublasSdot(cublasHandle, N, d_p, 1, d_Ax, 1, d_dot));
r1_div_x<<<1, 1, 0, stream1>>>(d_r1, d_dot, d_a);
checkCudaErrors(cublasSaxpy(cublasHandle, N, d_a, d_p, 1, d_x, 1));
a_minus<<<1, 1, 0, stream1>>>(d_a, d_na);
checkCudaErrors(cublasSaxpy(cublasHandle, N, d_na, d_Ax, 1, d_r, 1));
checkCudaErrors(cudaMemcpyAsync(d_r0, d_r1, sizeof(float),
cudaMemcpyDeviceToDevice, stream1));
checkCudaErrors(cublasSdot(cublasHandle, N, d_r, 1, d_r, 1, d_r1));
checkCudaErrors(cudaMemcpyAsync(&r1, d_r1, sizeof(float),
cudaMemcpyDeviceToHost, stream1));
checkCudaErrors(cudaStreamSynchronize(stream1));
//printf("iteration = %3d, residual = %e\n", k, sqrt(r1));
// First Iteration when k=1 ends
k++;
#if WITH_GRAPH
cudaGraph_t initGraph;
checkCudaErrors(cudaStreamCreate(&streamForGraph));
checkCudaErrors(cublasSetStream(cublasHandle, stream1));
checkCudaErrors(cusparseSetStream(cusparseHandle, stream1));
checkCudaErrors(cudaStreamBeginCapture(stream1, cudaStreamCaptureModeGlobal));
r1_div_x<<<1, 1, 0, stream1>>>(d_r1, d_r0, d_b);
cublasSetPointerMode(cublasHandle, CUBLAS_POINTER_MODE_DEVICE);
checkCudaErrors(cublasSscal(cublasHandle, N, d_b, d_p, 1));
cublasSetPointerMode(cublasHandle, CUBLAS_POINTER_MODE_HOST);
checkCudaErrors(cublasSaxpy(cublasHandle, N, &alpha, d_r, 1, d_p, 1));
cublasSetPointerMode(cublasHandle, CUBLAS_POINTER_MODE_DEVICE);
checkCudaErrors(
cusparseSetPointerMode(cusparseHandle, CUSPARSE_POINTER_MODE_HOST));
checkCudaErrors(
cusparseScsrmv(cusparseHandle, CUSPARSE_OPERATION_NON_TRANSPOSE, N, N, nz,
&alpha, descr, d_val, d_row, d_col, d_p, &beta, d_Ax));
checkCudaErrors(cudaMemsetAsync(d_dot, 0, sizeof(float), stream1));
// Use cublasSdot API when it is cuda graph compliant.
// checkCudaErrors(cublasSdot(cublasHandle, N, d_p, 1, d_Ax, 1, d_dot));
gpuDotProduct<<<numBlocks, blockSize, blockSize * sizeof(double), stream1>>>(
d_p, d_Ax, d_dot, N);
r1_div_x<<<1, 1, 0, stream1>>>(d_r1, d_dot, d_a);
checkCudaErrors(cublasSaxpy(cublasHandle, N, d_a, d_p, 1, d_x, 1));
a_minus<<<1, 1, 0, stream1>>>(d_a, d_na);
checkCudaErrors(cublasSaxpy(cublasHandle, N, d_na, d_Ax, 1, d_r, 1));
checkCudaErrors(cudaMemcpyAsync(d_r0, d_r1, sizeof(float),
cudaMemcpyDeviceToDevice, stream1));
checkCudaErrors(cudaMemsetAsync(d_r1, 0, sizeof(float), stream1));
// Use cublasSdot API when it is cuda graph compliant.
// checkCudaErrors(cublasSdot(cublasHandle, N, d_r, 1, d_r, 1, d_r1));
gpuDotProduct<<<numBlocks, blockSize, blockSize * sizeof(double), stream1>>>(
d_r, d_r, d_r1, N);
checkCudaErrors(cudaMemcpyAsync((float *)&r1, d_r1, sizeof(float),
cudaMemcpyDeviceToHost, stream1));
checkCudaErrors(cudaStreamEndCapture(stream1, &initGraph));
cudaGraphExec_t graphExec;
checkCudaErrors(cudaGraphInstantiate(&graphExec, initGraph, NULL, NULL, 0));
#endif
checkCudaErrors(cublasSetStream(cublasHandle, stream1));
checkCudaErrors(cusparseSetStream(cusparseHandle, stream1));
while (r1 > tol * tol && k <= max_iter) {
#if WITH_GRAPH
checkCudaErrors(cudaGraphLaunch(graphExec, streamForGraph));
checkCudaErrors(cudaStreamSynchronize(streamForGraph));
#else
r1_div_x<<<1, 1, 0, stream1>>>(d_r1, d_r0, d_b);
cublasSetPointerMode(cublasHandle, CUBLAS_POINTER_MODE_DEVICE);
checkCudaErrors(cublasSscal(cublasHandle, N, d_b, d_p, 1));
cublasSetPointerMode(cublasHandle, CUBLAS_POINTER_MODE_HOST);
checkCudaErrors(cublasSaxpy(cublasHandle, N, &alpha, d_r, 1, d_p, 1));
checkCudaErrors(cusparseScsrmv(
cusparseHandle, CUSPARSE_OPERATION_NON_TRANSPOSE, N, N, nz, &alpha,
descr, d_val, d_row, d_col, d_p, &beta, d_Ax));
cublasSetPointerMode(cublasHandle, CUBLAS_POINTER_MODE_DEVICE);
checkCudaErrors(cublasSdot(cublasHandle, N, d_p, 1, d_Ax, 1, d_dot));
r1_div_x<<<1, 1, 0, stream1>>>(d_r1, d_dot, d_a);
checkCudaErrors(cublasSaxpy(cublasHandle, N, d_a, d_p, 1, d_x, 1));
a_minus<<<1, 1, 0, stream1>>>(d_a, d_na);
checkCudaErrors(cublasSaxpy(cublasHandle, N, d_na, d_Ax, 1, d_r, 1));
checkCudaErrors(cudaMemcpyAsync(d_r0, d_r1, sizeof(float),
cudaMemcpyDeviceToDevice, stream1));
checkCudaErrors(cublasSdot(cublasHandle, N, d_r, 1, d_r, 1, d_r1));
checkCudaErrors(cudaMemcpyAsync((float *)&r1, d_r1, sizeof(float),
cudaMemcpyDeviceToHost, stream1));
checkCudaErrors(cudaStreamSynchronize(stream1));
#endif
//printf("iteration = %3d, residual = %e\n", k, sqrt(r1));
k++;
}
#if WITH_GRAPH
checkCudaErrors(cudaMemcpyAsync(x, d_x, N * sizeof(float),
cudaMemcpyDeviceToHost, streamForGraph));
checkCudaErrors(cudaStreamSynchronize(streamForGraph));
#else
checkCudaErrors(cudaMemcpyAsync(x, d_x, N * sizeof(float),
cudaMemcpyDeviceToHost, stream1));
checkCudaErrors(cudaStreamSynchronize(stream1));
#endif
float costTime = timer.stop();
float rsum, diff, err = 0.0;
for (int i = 0; i < N; i++) {
rsum = 0.0;
for (int j = I[i]; j < I[i + 1]; j++) {
rsum += val[j] * x[J[j]];
}
diff = fabs(rsum - rhs[i]);
if (diff > err) {
err = diff;
}
}
#if WITH_GRAPH
checkCudaErrors(cudaGraphExecDestroy(graphExec));
checkCudaErrors(cudaGraphDestroy(initGraph));
checkCudaErrors(cudaStreamDestroy(streamForGraph));
#endif
output(wFile, x, size);
checkCudaErrors(cudaStreamDestroy(stream1));
cusparseDestroy(cusparseHandle);
cublasDestroy(cublasHandle);
free(I);
free(J);
free(val);
free(x);
free(rhs);
cudaFree(d_col);
cudaFree(d_row);
cudaFree(d_val);
cudaFree(d_x);
cudaFree(d_r);
cudaFree(d_p);
cudaFree(d_Ax);
printf("Iterations %d\n", k);
printf("Time cost %f\n", costTime);
printf("Test Summary: Error amount = %f\n\n", err);
//exit((k <= max_iter) ? 0 : 1);
}
|
444029ee01b9ddb3cd2e3820697d78407ee6a913.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <iostream>
#include <pcl/gpu/containers/device_array.h>
#include "pcl/gpu/utils/safe_call.hpp"
#include "pcl/gpu/utils/limits.hpp"
#include "pcl/gpu/utils/cutil_math.h"
#include <device_launch_parameters.h>
/** \brief Cross-stream extrinsics: encode the topology describing how the different devices are connected. */
typedef struct rs2_extrinsics
{
float rotation[9]; /**< Column-major 3x3 rotation matrix */
float translation[3]; /**< Three-element translation vector, in meters */
} rs2_extrinsics;
/** \brief Distortion model: defines how pixel coordinates should be mapped to sensor coordinates. */
typedef enum rs2_distortion
{
RS2_DISTORTION_NONE, /**< Rectilinear images. No distortion compensation required. */
RS2_DISTORTION_MODIFIED_BROWN_CONRADY, /**< Equivalent to Brown-Conrady distortion, except that tangential distortion is applied to radially distorted points */
RS2_DISTORTION_INVERSE_BROWN_CONRADY, /**< Equivalent to Brown-Conrady distortion, except undistorts image instead of distorting it */
RS2_DISTORTION_FTHETA, /**< F-Theta fish-eye distortion model */
RS2_DISTORTION_BROWN_CONRADY, /**< Unmodified Brown-Conrady distortion model */
RS2_DISTORTION_COUNT /**< Number of enumeration values. Not a valid input: intended to be used in for-loops. */
} rs2_distortion;
/** \brief Video stream intrinsics */
typedef struct rs2_intrinsics
{
int width; /**< Width of the image in pixels */
int height; /**< Height of the image in pixels */
float ppx; /**< Horizontal coordinate of the principal point of the image, as a pixel offset from the left edge */
float ppy; /**< Vertical coordinate of the principal point of the image, as a pixel offset from the top edge */
float fx; /**< Focal length of the image plane, as a multiple of pixel width */
float fy; /**< Focal length of the image plane, as a multiple of pixel height */
rs2_distortion model; /**< Distortion model of the image */
float coeffs[5]; /**< Distortion coefficients, order: k1, k2, p1, p2, k3 */
} rs2_intrinsics;
#define HALF_WIN_SIZE 5
#define DIST_LIMIT 0.10f
#define SIGMA_SPACE 4.5
#define SIGMA_COLOR 30
#define SIGMA_SPACE2_INV_HALF (0.5/(SIGMA_SPACE*SIGMA_SPACE))
#define SIGMA_COLOR2_INV_HALF (0.5/(SIGMA_COLOR*SIGMA_COLOR))
#define BILATERAL_FILTER_RADIUS 6
#define BILATERAL_FILTER_DIAMETER (2*BILATERAL_FILTER_RADIUS + 1)
__global__ void kernelConvertUshort2Float(pcl::gpu::PtrStepSz<float> output,
const pcl::gpu::PtrStepSz<unsigned short> input,
const float depth_scale)
{
int xid = blockIdx.x*blockDim.x + threadIdx.x;
int yid = blockIdx.y*blockDim.y + threadIdx.y;
if (xid < output.cols && yid < output.rows)
{
output(yid, xid) = (float)input(yid, xid) * depth_scale;
//output(yid, xid) /= 1000.0f; /// some inputs need to multiply 1.25 to get the correct depth
}
}
void convertUshort2Float(pcl::gpu::DeviceArray2D<float> output,
const pcl::gpu::DeviceArray2D<unsigned short> input,
const float depth_scale)
{
dim3 block(16, 16);
dim3 grid(pcl::gpu::divUp(output.cols(), block.x),
pcl::gpu::divUp(output.rows(), block.y));
hipLaunchKernelGGL(( kernelConvertUshort2Float), dim3(grid), dim3(block), 0, 0, output, input, depth_scale);
#if CUDA_GET_LAST_ERROR_AND_SYNC==1
// device synchronize
cudaSafeCall(hipGetLastError());
cudaSafeCall(hipStreamSynchronize(0));
#endif
}
__global__ void kernelConvertUchar32Float3(pcl::gpu::PtrStepSz<float4> output,
const pcl::gpu::PtrStepSz<uchar3> input)
{
int xid = blockIdx.x*blockDim.x + threadIdx.x;
int yid = blockIdx.y*blockDim.y + threadIdx.y;
if (xid < output.cols && yid < output.rows)
{
output(yid, xid).x = (float)input(yid, xid).z / 255.0f;
output(yid, xid).y = (float)input(yid, xid).y / 255.0f;
output(yid, xid).z = (float)input(yid, xid).x / 255.0f;
output(yid, xid).w = 1.0f;
}
}
void convertUchar2Float(pcl::gpu::DeviceArray2D<float4> output,
const pcl::gpu::DeviceArray2D<uchar3> input)
{
dim3 block(16, 16);
dim3 grid(pcl::gpu::divUp(output.cols(), block.x),
pcl::gpu::divUp(output.rows(), block.y));
hipLaunchKernelGGL(( kernelConvertUchar32Float3), dim3(grid), dim3(block), 0, 0, output, input);
#if CUDA_GET_LAST_ERROR_AND_SYNC==1
// device synchronize
cudaSafeCall(hipGetLastError());
cudaSafeCall(hipStreamSynchronize(0));
#endif
}
__global__ void kernelConvertUint2Uchar(pcl::gpu::PtrStepSz<unsigned short> output,
const pcl::gpu::PtrStepSz<unsigned int> input)
{
int xid = blockIdx.x*blockDim.x + threadIdx.x;
int yid = blockIdx.y*blockDim.y + threadIdx.y;
if (xid < output.cols && yid < output.rows)
{
output(yid, xid) = (unsigned short)input(yid, xid);
}
}
void convertUint2Uchar(pcl::gpu::DeviceArray2D<unsigned short> output,
const pcl::gpu::DeviceArray2D<unsigned int> input)
{
dim3 block(16, 16);
dim3 grid(pcl::gpu::divUp(output.cols(), block.x),
pcl::gpu::divUp(output.rows(), block.y));
hipLaunchKernelGGL(( kernelConvertUint2Uchar), dim3(grid), dim3(block), 0, 0, output, input);
}
/* Given a point in 3D space, compute the corresponding pixel coordinates in an image with no distortion or forward distortion coefficients produced by the same camera */
__device__ __forceinline__ void rs2_project_point_to_pixel(float pixel[2], const struct rs2_intrinsics * intrin, const float point[3])
{
//assert(intrin->model != RS2_DISTORTION_INVERSE_BROWN_CONRADY); // Cannot project to an inverse-distorted image
float x = point[0] / point[2], y = point[1] / point[2];
if (intrin->model == RS2_DISTORTION_MODIFIED_BROWN_CONRADY)
{
float r2 = x*x + y*y;
float f = 1 + intrin->coeffs[0] * r2 + intrin->coeffs[1] * r2*r2 + intrin->coeffs[4] * r2*r2*r2;
x *= f;
y *= f;
float dx = x + 2 * intrin->coeffs[2] * x*y + intrin->coeffs[3] * (r2 + 2 * x*x);
float dy = y + 2 * intrin->coeffs[3] * x*y + intrin->coeffs[2] * (r2 + 2 * y*y);
x = dx;
y = dy;
}
if (intrin->model == RS2_DISTORTION_FTHETA)
{
float r = sqrtf(x*x + y*y);
float rd = (float)(1.0f / intrin->coeffs[0] * atan(2 * r* tan(intrin->coeffs[0] / 2.0f)));
x *= rd / r;
y *= rd / r;
}
pixel[0] = x * intrin->fx + intrin->ppx;
pixel[1] = y * intrin->fy + intrin->ppy;
}
__device__ __forceinline__ void rs2_transform_point_to_point(float to_point[3], const struct rs2_extrinsics * extrin, const float from_point[3])
{
to_point[0] = extrin->rotation[0] * from_point[0] + extrin->rotation[3] * from_point[1] + extrin->rotation[6] * from_point[2] + extrin->translation[0];
to_point[1] = extrin->rotation[1] * from_point[0] + extrin->rotation[4] * from_point[1] + extrin->rotation[7] * from_point[2] + extrin->translation[1];
to_point[2] = extrin->rotation[2] * from_point[0] + extrin->rotation[5] * from_point[1] + extrin->rotation[8] * from_point[2] + extrin->translation[2];
}
__global__ void kernel_set_to_max(pcl::gpu::PtrStepSz<unsigned int> input)
{
int xid = blockIdx.x*blockDim.x + threadIdx.x;
int yid = blockIdx.y*blockDim.y + threadIdx.y;
if (xid < input.cols && yid < input.rows)
{
input(yid, xid) = 65535U;
}
}
__global__ void kernel_set_max_to_zero(pcl::gpu::PtrStepSz<unsigned int> input)
{
int xid = blockIdx.x*blockDim.x + threadIdx.x;
int yid = blockIdx.y*blockDim.y + threadIdx.y;
if (xid < input.cols && yid < input.rows)
{
if (input(yid, xid) == 65535)
{
input(yid, xid) = 0;
}
}
}
| 444029ee01b9ddb3cd2e3820697d78407ee6a913.cu | #include <cuda_runtime.h>
#include <iostream>
#include <pcl/gpu/containers/device_array.h>
#include "pcl/gpu/utils/safe_call.hpp"
#include "pcl/gpu/utils/limits.hpp"
#include "pcl/gpu/utils/cutil_math.h"
#include <device_launch_parameters.h>
/** \brief Cross-stream extrinsics: encode the topology describing how the different devices are connected. */
typedef struct rs2_extrinsics
{
float rotation[9]; /**< Column-major 3x3 rotation matrix */
float translation[3]; /**< Three-element translation vector, in meters */
} rs2_extrinsics;
/** \brief Distortion model: defines how pixel coordinates should be mapped to sensor coordinates. */
typedef enum rs2_distortion
{
RS2_DISTORTION_NONE, /**< Rectilinear images. No distortion compensation required. */
RS2_DISTORTION_MODIFIED_BROWN_CONRADY, /**< Equivalent to Brown-Conrady distortion, except that tangential distortion is applied to radially distorted points */
RS2_DISTORTION_INVERSE_BROWN_CONRADY, /**< Equivalent to Brown-Conrady distortion, except undistorts image instead of distorting it */
RS2_DISTORTION_FTHETA, /**< F-Theta fish-eye distortion model */
RS2_DISTORTION_BROWN_CONRADY, /**< Unmodified Brown-Conrady distortion model */
RS2_DISTORTION_COUNT /**< Number of enumeration values. Not a valid input: intended to be used in for-loops. */
} rs2_distortion;
/** \brief Video stream intrinsics */
typedef struct rs2_intrinsics
{
int width; /**< Width of the image in pixels */
int height; /**< Height of the image in pixels */
float ppx; /**< Horizontal coordinate of the principal point of the image, as a pixel offset from the left edge */
float ppy; /**< Vertical coordinate of the principal point of the image, as a pixel offset from the top edge */
float fx; /**< Focal length of the image plane, as a multiple of pixel width */
float fy; /**< Focal length of the image plane, as a multiple of pixel height */
rs2_distortion model; /**< Distortion model of the image */
float coeffs[5]; /**< Distortion coefficients, order: k1, k2, p1, p2, k3 */
} rs2_intrinsics;
#define HALF_WIN_SIZE 5
#define DIST_LIMIT 0.10f
#define SIGMA_SPACE 4.5
#define SIGMA_COLOR 30
#define SIGMA_SPACE2_INV_HALF (0.5/(SIGMA_SPACE*SIGMA_SPACE))
#define SIGMA_COLOR2_INV_HALF (0.5/(SIGMA_COLOR*SIGMA_COLOR))
#define BILATERAL_FILTER_RADIUS 6
#define BILATERAL_FILTER_DIAMETER (2*BILATERAL_FILTER_RADIUS + 1)
__global__ void kernelConvertUshort2Float(pcl::gpu::PtrStepSz<float> output,
const pcl::gpu::PtrStepSz<unsigned short> input,
const float depth_scale)
{
int xid = blockIdx.x*blockDim.x + threadIdx.x;
int yid = blockIdx.y*blockDim.y + threadIdx.y;
if (xid < output.cols && yid < output.rows)
{
output(yid, xid) = (float)input(yid, xid) * depth_scale;
//output(yid, xid) /= 1000.0f; /// some inputs need to multiply 1.25 to get the correct depth
}
}
void convertUshort2Float(pcl::gpu::DeviceArray2D<float> output,
const pcl::gpu::DeviceArray2D<unsigned short> input,
const float depth_scale)
{
dim3 block(16, 16);
dim3 grid(pcl::gpu::divUp(output.cols(), block.x),
pcl::gpu::divUp(output.rows(), block.y));
kernelConvertUshort2Float<<<grid, block>>>(output, input, depth_scale);
#if CUDA_GET_LAST_ERROR_AND_SYNC==1
// device synchronize
cudaSafeCall(cudaGetLastError());
cudaSafeCall(cudaStreamSynchronize(0));
#endif
}
__global__ void kernelConvertUchar32Float3(pcl::gpu::PtrStepSz<float4> output,
const pcl::gpu::PtrStepSz<uchar3> input)
{
int xid = blockIdx.x*blockDim.x + threadIdx.x;
int yid = blockIdx.y*blockDim.y + threadIdx.y;
if (xid < output.cols && yid < output.rows)
{
output(yid, xid).x = (float)input(yid, xid).z / 255.0f;
output(yid, xid).y = (float)input(yid, xid).y / 255.0f;
output(yid, xid).z = (float)input(yid, xid).x / 255.0f;
output(yid, xid).w = 1.0f;
}
}
void convertUchar2Float(pcl::gpu::DeviceArray2D<float4> output,
const pcl::gpu::DeviceArray2D<uchar3> input)
{
dim3 block(16, 16);
dim3 grid(pcl::gpu::divUp(output.cols(), block.x),
pcl::gpu::divUp(output.rows(), block.y));
kernelConvertUchar32Float3<<<grid, block>>>(output, input);
#if CUDA_GET_LAST_ERROR_AND_SYNC==1
// device synchronize
cudaSafeCall(cudaGetLastError());
cudaSafeCall(cudaStreamSynchronize(0));
#endif
}
__global__ void kernelConvertUint2Uchar(pcl::gpu::PtrStepSz<unsigned short> output,
const pcl::gpu::PtrStepSz<unsigned int> input)
{
int xid = blockIdx.x*blockDim.x + threadIdx.x;
int yid = blockIdx.y*blockDim.y + threadIdx.y;
if (xid < output.cols && yid < output.rows)
{
output(yid, xid) = (unsigned short)input(yid, xid);
}
}
void convertUint2Uchar(pcl::gpu::DeviceArray2D<unsigned short> output,
const pcl::gpu::DeviceArray2D<unsigned int> input)
{
dim3 block(16, 16);
dim3 grid(pcl::gpu::divUp(output.cols(), block.x),
pcl::gpu::divUp(output.rows(), block.y));
kernelConvertUint2Uchar<<<grid, block>>>(output, input);
}
/* Given a point in 3D space, compute the corresponding pixel coordinates in an image with no distortion or forward distortion coefficients produced by the same camera */
__device__ __forceinline__ void rs2_project_point_to_pixel(float pixel[2], const struct rs2_intrinsics * intrin, const float point[3])
{
//assert(intrin->model != RS2_DISTORTION_INVERSE_BROWN_CONRADY); // Cannot project to an inverse-distorted image
float x = point[0] / point[2], y = point[1] / point[2];
if (intrin->model == RS2_DISTORTION_MODIFIED_BROWN_CONRADY)
{
float r2 = x*x + y*y;
float f = 1 + intrin->coeffs[0] * r2 + intrin->coeffs[1] * r2*r2 + intrin->coeffs[4] * r2*r2*r2;
x *= f;
y *= f;
float dx = x + 2 * intrin->coeffs[2] * x*y + intrin->coeffs[3] * (r2 + 2 * x*x);
float dy = y + 2 * intrin->coeffs[3] * x*y + intrin->coeffs[2] * (r2 + 2 * y*y);
x = dx;
y = dy;
}
if (intrin->model == RS2_DISTORTION_FTHETA)
{
float r = sqrtf(x*x + y*y);
float rd = (float)(1.0f / intrin->coeffs[0] * atan(2 * r* tan(intrin->coeffs[0] / 2.0f)));
x *= rd / r;
y *= rd / r;
}
pixel[0] = x * intrin->fx + intrin->ppx;
pixel[1] = y * intrin->fy + intrin->ppy;
}
__device__ __forceinline__ void rs2_transform_point_to_point(float to_point[3], const struct rs2_extrinsics * extrin, const float from_point[3])
{
to_point[0] = extrin->rotation[0] * from_point[0] + extrin->rotation[3] * from_point[1] + extrin->rotation[6] * from_point[2] + extrin->translation[0];
to_point[1] = extrin->rotation[1] * from_point[0] + extrin->rotation[4] * from_point[1] + extrin->rotation[7] * from_point[2] + extrin->translation[1];
to_point[2] = extrin->rotation[2] * from_point[0] + extrin->rotation[5] * from_point[1] + extrin->rotation[8] * from_point[2] + extrin->translation[2];
}
__global__ void kernel_set_to_max(pcl::gpu::PtrStepSz<unsigned int> input)
{
int xid = blockIdx.x*blockDim.x + threadIdx.x;
int yid = blockIdx.y*blockDim.y + threadIdx.y;
if (xid < input.cols && yid < input.rows)
{
input(yid, xid) = 65535U;
}
}
__global__ void kernel_set_max_to_zero(pcl::gpu::PtrStepSz<unsigned int> input)
{
int xid = blockIdx.x*blockDim.x + threadIdx.x;
int yid = blockIdx.y*blockDim.y + threadIdx.y;
if (xid < input.cols && yid < input.rows)
{
if (input(yid, xid) == 65535)
{
input(yid, xid) = 0;
}
}
}
|
ace76506058e28ff81491f5fa46fc3b6e68024b3.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <stdint.h>
__global__ void kernel() {
uint32_t tid = threadIdx.x + blockIdx.x * blockDim.x;
uint32_t n = tid;
uint32_t sum = 0;
uint32_t prod = 1;
while(n != 0){
uint32_t digit = n % 10;
n /= 10;
sum += digit;
prod *= digit;
}
if(sum*prod == tid) printf("%u\n", tid);
return;
}
void checkrange(uint32_t range){
double dim = sqrt(range);
printf("Checking %u for sum-product numbers\n", range);
hipLaunchKernelGGL(( kernel), dim3((uint32_t)dim), dim3((uint32_t)ceil(range/(dim))), 0, 0, );
hipDeviceSynchronize();
}
int main() {
// main iteration
checkrange(1024);
checkrange(16777216);
return 0;
} | ace76506058e28ff81491f5fa46fc3b6e68024b3.cu | #include <stdio.h>
#include <cuda_runtime.h>
#include <stdint.h>
__global__ void kernel() {
uint32_t tid = threadIdx.x + blockIdx.x * blockDim.x;
uint32_t n = tid;
uint32_t sum = 0;
uint32_t prod = 1;
while(n != 0){
uint32_t digit = n % 10;
n /= 10;
sum += digit;
prod *= digit;
}
if(sum*prod == tid) printf("%u\n", tid);
return;
}
void checkrange(uint32_t range){
double dim = sqrt(range);
printf("Checking %u for sum-product numbers\n", range);
kernel<<<(uint32_t)dim, (uint32_t)ceil(range/(dim)), 0>>>();
cudaDeviceSynchronize();
}
int main() {
// main iteration
checkrange(1024);
checkrange(16777216);
return 0;
} |
df2e1201df26234d1cbdd67eaf8e4386d143f750.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
CPP_CONTEST=2017
CPP_PROBLEM=I
CPP_LANG=CUDA
CPP_PROCESSES_PER_NODE=saturno 1
*/
/* RECORD
Francisco Muoz Garca
September 20, 2017
in CESGA
time 1520
speed-up 9.80
*/
#include <stdlib.h>
__device__ int count(int ld,int n,char *a,char *b) //Each CUDA thread do this work and is called from kernel so we change to __device__
{
int i,j;
int value=0;
for(i=0;i < n;i++)
for(j=0;j < n;j++)
if(a[i*ld+j]==b[i*n+j])
value++;
return value;
}
/*
We create one thread for each element in matrix sizexsize. Each element compare its matrix and save the results in a matrix. For that reason
each thread has an associated element in the matrix.
*/
__global__ void mask(char* a, char* b, int* temp, int n, int m) {
int i = blockIdx.x*blockDim.x + threadIdx.x;
int j = blockIdx.y*blockDim.y + threadIdx.y;
int size = n-m;
if((i<size) && (j<size)) {
temp[i*size+j]=count(n,m,&a[i*n+j],b);
}
}
int sec(int n,char *a,int m,char *b)
{
int i, j;
int maximum=0,value;
int size = n-m;
int nbytes_a = sizeof(char)*n*n;
int nbytes_b = sizeof(char)*m*m;
int nBytes_temp = sizeof(int)*size*size;
int* temp =(int*) malloc(sizeof(int)*size*size);
int* temp_d;
char* a_d;
char* b_d;
int bl_dim1 = 4;
int bl_dim2 = 8;
dim3 block(bl_dim1,bl_dim2);
//we need n-m threads
int gsx = size / bl_dim1;
if(size%bl_dim1) gsx++;
int gsy = size / bl_dim2;
if(size%bl_dim2) gsy++;
dim3 grid(gsx, gsy);
//We reserve memory for GPU
hipMalloc((void **) &temp_d, nBytes_temp);
hipMalloc((void**) &a_d, nbytes_a);
hipMalloc((void**) &b_d, nbytes_b);
//Transfers here
hipMemset(temp_d, 0, nBytes_temp*sizeof(char)); //All the values should stat with zeros because each thread add values from that initial zero.
hipMemcpy(a_d, a, nbytes_a, hipMemcpyHostToDevice);
hipMemcpy(b_d, b, nbytes_b, hipMemcpyHostToDevice);
//call the kernel
hipLaunchKernelGGL(( mask), dim3(grid), dim3(block), 0, 0, a_d, b_d, temp_d, n,m );
//We transfer the results to RAM
hipMemcpy(temp, temp_d, nBytes_temp, hipMemcpyDeviceToHost);
hipFree((void**)temp_d);
hipFree((void**)a_d);
hipFree((void**)b_d);
//Once we have the results for each comparition we only have to know which is the best. We do this in sequencial mode.
maximum = temp[0];
for(int i=1; i<size*size;i++) {
if(temp[i]>maximum)
maximum=temp[i];
}
free(temp);
return maximum;
}
| df2e1201df26234d1cbdd67eaf8e4386d143f750.cu |
/*
CPP_CONTEST=2017
CPP_PROBLEM=I
CPP_LANG=CUDA
CPP_PROCESSES_PER_NODE=saturno 1
*/
/* RECORD
Francisco Muñoz García
September 20, 2017
in CESGA
time 1520
speed-up 9.80
*/
#include <stdlib.h>
__device__ int count(int ld,int n,char *a,char *b) //Each CUDA thread do this work and is called from kernel so we change to __device__
{
int i,j;
int value=0;
for(i=0;i < n;i++)
for(j=0;j < n;j++)
if(a[i*ld+j]==b[i*n+j])
value++;
return value;
}
/*
We create one thread for each element in matrix sizexsize. Each element compare its matrix and save the results in a matrix. For that reason
each thread has an associated element in the matrix.
*/
__global__ void mask(char* a, char* b, int* temp, int n, int m) {
int i = blockIdx.x*blockDim.x + threadIdx.x;
int j = blockIdx.y*blockDim.y + threadIdx.y;
int size = n-m;
if((i<size) && (j<size)) {
temp[i*size+j]=count(n,m,&a[i*n+j],b);
}
}
int sec(int n,char *a,int m,char *b)
{
int i, j;
int maximum=0,value;
int size = n-m;
int nbytes_a = sizeof(char)*n*n;
int nbytes_b = sizeof(char)*m*m;
int nBytes_temp = sizeof(int)*size*size;
int* temp =(int*) malloc(sizeof(int)*size*size);
int* temp_d;
char* a_d;
char* b_d;
int bl_dim1 = 4;
int bl_dim2 = 8;
dim3 block(bl_dim1,bl_dim2);
//we need n-m threads
int gsx = size / bl_dim1;
if(size%bl_dim1) gsx++;
int gsy = size / bl_dim2;
if(size%bl_dim2) gsy++;
dim3 grid(gsx, gsy);
//We reserve memory for GPU
cudaMalloc((void **) &temp_d, nBytes_temp);
cudaMalloc((void**) &a_d, nbytes_a);
cudaMalloc((void**) &b_d, nbytes_b);
//Transfers here
cudaMemset(temp_d, 0, nBytes_temp*sizeof(char)); //All the values should stat with zeros because each thread add values from that initial zero.
cudaMemcpy(a_d, a, nbytes_a, cudaMemcpyHostToDevice);
cudaMemcpy(b_d, b, nbytes_b, cudaMemcpyHostToDevice);
//call the kernel
mask<<<grid, block>>>(a_d, b_d, temp_d, n,m );
//We transfer the results to RAM
cudaMemcpy(temp, temp_d, nBytes_temp, cudaMemcpyDeviceToHost);
cudaFree((void**)temp_d);
cudaFree((void**)a_d);
cudaFree((void**)b_d);
//Once we have the results for each comparition we only have to know which is the best. We do this in sequencial mode.
maximum = temp[0];
for(int i=1; i<size*size;i++) {
if(temp[i]>maximum)
maximum=temp[i];
}
free(temp);
return maximum;
}
|
b3aee0986826b1a576685e64b1a28161f3224392.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "../GpuDelaunay.h"
#include<iomanip>
#include<iostream>
#include <thrust/gather.h>
#include "KerCommon.h"
#include "KerDivision.h"
#include "KerPredicates.h"
#include "ThrustWrapper.h"
#include "../../Visualizer.h"
////
// GpuDel methods
////
void GpuDel::cleanup()
{
thrust_free_all();
_memPool.free();
_pointVec.free();
_constraintVec.free();
_triVec.free();
_oppVec.free();
_triInfoVec.free();
_orgPointIdx.free();
_vertTriVec.free();
_counters.free();
_actConsVec.free();
_orgFlipNum.clear();
_dPredWrapper.cleanup();
__circleCountVec.free();
__rejFlipVec.free();
_numActiveVec.clear();
_numFlipVec.clear();
_numCircleVec.clear();
_timeCheckVec.clear();
_timeFlipVec.clear();
}
void GpuDel::compute
(
Point2DVec &pointVec,
TriDVec* output
)
{
GDel2DInput input;
input.noSort = true;
input.noReorder = true;
// Set L1 for kernels
hipDeviceSetCacheConfig( hipFuncCachePreferL1 );
_pointVec._ptr = pointVec._ptr;
_pointVec._size = pointVec._size;
_pointVec._capacity = pointVec._capacity;
_pointVec._owned = false;
_input = &input;
_output = new GDel2DOutput();
//_output = output;
initProfiling();
startTiming( ProfDefault );
initForFlip();
splitAndFlip();
outputToHost();
stopTiming( ProfDefault, _output->stats.totalTime );
if ( _input->isProfiling( ProfDetail ) )
{
std::cout << " FlipCompact time: ";
_diagLogCompact.printTime();
std::cout << std::endl;
std::cout << " FlipCollect time: ";
_diagLogCollect.printTime();
std::cout << std::endl;
}
std::cout << "# GPU Triangles: " << _triVec.size() << std::endl;
*(output) = _triVec;
_triVec._owned = false;
cleanup();
return;
}
void GpuDel::startTiming( ProfLevel level )
{
if ( _input->isProfiling( level ) )
_profTimer[ level ].start();
}
void GpuDel::pauseTiming( ProfLevel level )
{
if ( _input->isProfiling( level ) )
_profTimer[ level ].pause();
}
void GpuDel::stopTiming( ProfLevel level, double &accuTime )
{
if ( _input->isProfiling( level ) )
{
_profTimer[ level ].stop();
accuTime += _profTimer[ level ].value();
}
}
void GpuDel::restartTiming( ProfLevel level, double &accuTime )
{
stopTiming( level, accuTime );
startTiming( level );
}
struct CompareX
{
__device__ bool operator()( const Point2 &a, const Point2 &b ) const
{
return a._p[0] < b._p[0];
}
};
struct Get2Ddist
{
Point2 _a;
RealType abx, aby;
Get2Ddist( const Point2 &a, const Point2 &b ) : _a(a)
{
abx = b._p[0] - a._p[0];
aby = b._p[1] - a._p[1];
}
__device__ int operator()( const Point2 &c )
{
RealType acx = c._p[0] - _a._p[0];
RealType acy = c._p[1] - _a._p[1];
RealType dist = abx * acy - aby * acx;
return __float_as_int( fabs((float) dist) );
}
};
RealType orient2dzero( const RealType *pa, const RealType *pb, const RealType *pc );
void GpuDel::constructInitialTriangles()
{
// First, choose two extreme points along the X axis
typedef Point2DVec::iterator Point2DIter;
thrust::pair< Point2DIter, Point2DIter > ret = thrust::minmax_element(
_pointVec.begin(), _pointVec.end(), CompareX() );
int v0 = ret.first - _pointVec.begin();
int v1 = ret.second - _pointVec.begin();
const Point2 p0 = _pointVec[v0];
const Point2 p1 = _pointVec[v1];
// Find the furthest point from v0v1
IntDVec distVec = _memPool.allocateAny<int>( _pointNum );
distVec.resize( _pointVec.size() );
thrust::transform( _pointVec.begin(), _pointVec.end(), distVec.begin(), Get2Ddist( p0, p1 ) );
const int v2 = thrust::max_element( distVec.begin(), distVec.end() ) - distVec.begin();
const Point2 p2 = _pointVec[v2];
_memPool.release( distVec );
if ( _input->isProfiling( ProfDebug ) )
{
std::cout << "Leftmost: " << v0 << " --> " << p0._p[0] << " " << p0._p[1] << std::endl;
std::cout << "Rightmost: " << v1 << " --> " << p1._p[0] << " " << p1._p[1] << std::endl;
std::cout << "Furthest 2D: " << v2 << " --> " << p2._p[0] << " " << p2._p[1] << std::endl;
}
// Check to make sure the 4 points are not co-planar
RealType ori = orient2dzero( p0._p, p1._p, p2._p );
if ( ori == 0.0 )
{
std::cout << "Input too degenerate!!!\n" << std::endl;
exit(-1);
}
if ( ortToOrient( ori ) == OrientNeg )
std::swap( v0, v1 );
// Compute the centroid of v0v1v2v3, to be used as the kernel point.
_ptInfty._p[0] = ( p0._p[0] + p1._p[0] + p2._p[0] ) / 3.0;
_ptInfty._p[1] = ( p0._p[1] + p1._p[1] + p2._p[1] ) / 3.0;
// Add the infinity point to the end of the list
_infIdx = _pointNum - 1;
_pointVec.resize( _pointNum );
_pointVec[ _infIdx ] = _ptInfty;
if ( _input->isProfiling( ProfDiag ) )
{
std::cout << "Kernel: " << _ptInfty._p[0] << " " << _ptInfty._p[1] << std::endl;
}
// Initialize the predicate wrapper!!!
_dPredWrapper.init(
toKernelPtr( _pointVec ),
_pointNum,
_input->noSort ? NULL : toKernelPtr( _orgPointIdx ),
_infIdx );
setPredWrapperConstant( _dPredWrapper );
// Create the initial triangulation
Tri firstTri = { v0, v1, v2 };
_triVec.expand( 4 );
_oppVec.expand( 4 );
_triInfoVec.expand( 4 );
// Put the initial tets at the Inf list
hipLaunchKernelGGL(( kerMakeFirstTri), dim3(1), dim3(1) , 0, 0,
toKernelPtr( _triVec ),
toKernelPtr( _oppVec ),
toKernelPtr( _triInfoVec ),
firstTri, _infIdx
);
CudaCheckError();
// Locate initial positions of points
_vertTriVec.resize( _pointNum );
IntDVec exactCheckVec = _memPool.allocateAny<int>( _pointNum );
_counters.renew();
hipLaunchKernelGGL(( kerInitPointLocationFast), dim3(BlocksPerGrid), dim3(ThreadsPerBlock) , 0, 0,
toKernelArray( _vertTriVec ),
toKernelPtr( exactCheckVec ),
_counters.ptr(),
firstTri
);
hipLaunchKernelGGL(( kerInitPointLocationExact), dim3(PredBlocksPerGrid), dim3(PredThreadsPerBlock) , 0, 0,
toKernelPtr( _vertTriVec ),
toKernelPtr( exactCheckVec ),
_counters.ptr(),
firstTri
);
CudaCheckError();
_memPool.release( exactCheckVec );
_availPtNum = _pointNum - 4;
//Visualizer::instance()->addFrame( _pointVec, SegmentDVec(), _triVec, _infIdx );
}
void GpuDel::initForFlip()
{
startTiming( ProfDefault );
_pointNum = _pointVec.size();
//_pointNum = _input->pointVec.size() + 1; // Plus the infinity point
_triMax = (int) ( _pointNum * 2 );
// Copy points to GPU
//_pointVec.resize( _pointNum ); // 1 additional slot for the infinity point
//_pointVec.copyFromHost( _input->pointVec );
// Copy constraints to GPU
_constraintVec.copyFromHost( _input->constraintVec );
// Allocate space
_triVec.resize( _triMax );
_oppVec.resize( _triMax );
_triInfoVec.resize( _triMax );
_counters.init( CounterNum );
if ( _constraintVec.size() > 0 )
_actConsVec.resize( _constraintVec.size() );
if ( _input->isProfiling( ProfDiag ) )
{
__circleCountVec.resize( _triMax );
__rejFlipVec.resize( _triMax );
}
// Preallocate some buffers in the pool
_memPool.reserve<FlipItem>( _triMax ); // flipVec
_memPool.reserve<int2>( _triMax ); // triMsgVec
_memPool.reserve<int>( _pointNum ); // vertSphereVec
_memPool.reserve<int>( _triMax ); // actTriVec
_memPool.reserve<int>( _triMax ); // Two more for common use
_memPool.reserve<int>( _triMax ); //
if ( _constraintVec.size() > 0 )
_memPool.reserve<int>( _triMax );
// Find the min and max coordinate value
typedef thrust::device_ptr< RealType > RealPtr;
RealPtr coords( ( RealType* ) toKernelPtr( _pointVec ) );
thrust::pair< RealPtr, RealPtr> ret
= thrust::minmax_element( coords, coords + _pointVec.size() * 2 );
_minVal = *ret.first;
_maxVal = *ret.second;
if ( _input->isProfiling( ProfDebug ) )
{
std::cout << "_minVal = " << _minVal << ", _maxVal == " << _maxVal << std::endl;
}
// Sort points along space curve
if ( !_input->noSort )
{
stopTiming( ProfDefault, _output->stats.initTime );
startTiming( ProfDefault );
IntDVec valueVec = _memPool.allocateAny<int>( _pointNum );
valueVec.resize( _pointVec.size() );
_orgPointIdx.resize( _pointNum );
thrust::sequence( _orgPointIdx.begin(), _orgPointIdx.end(), 0 );
thrust_transform_GetMortonNumber(
_pointVec.begin(), _pointVec.end(), valueVec.begin(),
_minVal, _maxVal );
thrust_sort_by_key( valueVec.begin(), valueVec.end(),
make_zip_iterator( make_tuple(
_orgPointIdx.begin(), _pointVec.begin() ) ) );
_memPool.release( valueVec );
stopTiming( ProfDefault, _output->stats.sortTime );
startTiming( ProfDefault );
}
// Create first upper-lower triangles
constructInitialTriangles();
stopTiming( ProfDefault, _output->stats.initTime );
return;
}
void GpuDel::doFlippingLoop( CheckDelaunayMode checkMode )
{
startTiming( ProfDefault );
_flipVec = _memPool.allocateAny<FlipItem>( _triMax );
_triMsgVec = _memPool.allocateAny<int2>( _triMax );
_actTriVec = _memPool.allocateAny<int>( _triMax );
_triMsgVec.assign( _triMax, make_int2( -1, -1 ) );
int flipLoop = 0;
_actTriMode = ActTriMarkCompact;
_diagLog = &_diagLogCompact;
while ( doFlipping( checkMode ) )
++flipLoop;
stopTiming( ProfDefault, _output->stats.flipTime );
relocateAll();
_memPool.release( _triMsgVec );
_memPool.release( _flipVec );
_memPool.release( _actTriVec );
}
void GpuDel::initProfiling()
{
_output->stats.reset();
_diagLogCompact.reset();
_diagLogCollect.reset();
_numActiveVec.clear();
_numFlipVec.clear();
_timeCheckVec.clear();
_timeFlipVec.clear();
}
void GpuDel::initForConstraintInsertion()
{
if ( !_input->noSort )
{
// Update vertex indices of constraints
IntDVec mapVec = _memPool.allocateAny<int>( _pointNum );
mapVec.resize( _pointNum );
thrust_scatterSequenceMap( _orgPointIdx, mapVec );
thrust::device_ptr<int> segInt( (int *) toKernelPtr( _constraintVec ) );
thrust::gather( segInt, segInt + _constraintVec.size() * 2, mapVec.begin(), segInt );
_memPool.release( mapVec );
// // Sort the constraints
// const int constraintNum = _constraintVec.size();
// IntDVec keyVec = _memPool.allocateAny<int>( constraintNum );
// keyVec.resize( constraintNum );
// thrust::transform( _constraintVec.begin(), _constraintVec.end(), keyVec.begin(), GetConstraintMinVert() );
// thrust::sort_by_key( keyVec.begin(), keyVec.end(), _constraintVec.begin() );
// _memPool.release( keyVec );
}
// Construct
_vertTriVec.resize( _pointNum );
hipLaunchKernelGGL(( kerMapTriToVert), dim3(BlocksPerGrid), dim3(ThreadsPerBlock) , 0, 0,
toKernelArray( _triVec ),
toKernelPtr( _vertTriVec )
);
CudaCheckError();
// Initialize list of active constraints
thrust::sequence( _actConsVec.begin(), _actConsVec.end() );
}
bool GpuDel::markIntersections()
{
_counters.renew();
hipLaunchKernelGGL(( kerMarkTriConsIntersectionFast), dim3(BlocksPerGrid), dim3(ThreadsPerBlock) , 0, 0,
toKernelArray( _actConsVec ),
toKernelPtr( _constraintVec ),
toKernelPtr( _triVec ),
toKernelPtr( _oppVec ),
toKernelPtr( _triInfoVec ),
toKernelPtr( _vertTriVec ),
toKernelPtr( _triConsVec ),
_counters.ptr()
);
hipLaunchKernelGGL(( kerMarkTriConsIntersectionExact), dim3(BlocksPerGrid), dim3(ThreadsPerBlock) , 0, 0,
toKernelArray( _actConsVec ),
toKernelPtr( _constraintVec ),
toKernelPtr( _triVec ),
toKernelPtr( _oppVec ),
toKernelPtr( _triInfoVec ),
toKernelPtr( _vertTriVec ),
toKernelPtr( _triConsVec ),
_counters.ptr()
);
CudaCheckError();
return ( _counters[ CounterFlag ] == 1 );
}
void GpuDel::updatePairStatus()
{
IntDVec exactVec = _memPool.allocateAny<int>( _triMax );
_counters.renew();
hipLaunchKernelGGL(( kerUpdatePairStatusFast), dim3(BlocksPerGrid), dim3(ThreadsPerBlock) , 0, 0,
toKernelArray( _actTriVec ),
toKernelPtr( _triConsVec ),
toKernelPtr( _triVec ),
toKernelPtr( _oppVec ),
toKernelPtr( _triInfoVec ),
toKernelPtr( exactVec ),
_counters.ptr()
);
hipLaunchKernelGGL(( kerUpdatePairStatusExact), dim3(BlocksPerGrid), dim3(ThreadsPerBlock) , 0, 0,
toKernelArray( _actTriVec ),
toKernelPtr( _triConsVec ),
toKernelPtr( _triVec ),
toKernelPtr( _oppVec ),
toKernelPtr( _triInfoVec ),
toKernelPtr( exactVec ),
_counters.ptr()
);
CudaCheckError();
_memPool.release( exactVec );
}
void GpuDel::checkConsFlipping( IntDVec& triVoteVec )
{
IntDVec exactVec = _memPool.allocateAny<int>( _triMax );
_counters.renew();
hipLaunchKernelGGL(( kerCheckConsFlippingFast), dim3(BlocksPerGrid), dim3(ThreadsPerBlock) , 0, 0,
toKernelArray( _actTriVec ),
toKernelPtr( _triConsVec ),
toKernelPtr( _triInfoVec ),
toKernelPtr( _triVec ),
toKernelPtr( _oppVec ),
toKernelPtr( triVoteVec ),
toKernelPtr( exactVec ),
_counters.ptr()
);
hipLaunchKernelGGL(( kerCheckConsFlippingExact), dim3(BlocksPerGrid), dim3(ThreadsPerBlock) , 0, 0,
toKernelArray( _actTriVec ),
toKernelPtr( _triConsVec ),
toKernelPtr( _triInfoVec ),
toKernelPtr( _triVec ),
toKernelPtr( _oppVec ),
toKernelPtr( triVoteVec ),
toKernelPtr( exactVec ),
_counters.ptr()
);
CudaCheckError();
_memPool.release( exactVec );
}
bool GpuDel::doConsFlipping( int &flipNum )
{
const int triNum = _triVec.size();
const int actNum = _actTriVec.size();
///////
// Vote for flips
///////
#pragma region Diagnostic
if ( _input->isProfiling( ProfDiag ) )
__rejFlipVec.assign( triNum, 0 );
#pragma endregion
updatePairStatus();
IntDVec triVoteVec = _memPool.allocateAny<int>( _triMax );
triVoteVec.assign( triNum, INT_MAX );
checkConsFlipping( triVoteVec );
////
// Mark rejected flips
////
IntDVec flipToTri = _memPool.allocateAny<int>( _triMax );
flipToTri.resize( actNum );
hipLaunchKernelGGL(( kerMarkRejectedConsFlips), dim3(BlocksPerGrid), dim3(ThreadsPerBlock) , 0, 0,
toKernelArray( _actTriVec ),
toKernelPtr( _triConsVec ),
toKernelPtr( triVoteVec ),
toKernelPtr( _triInfoVec ),
toKernelPtr( _oppVec ),
toKernelPtr( flipToTri ),
_input->isProfiling( ProfDiag ) ? toKernelPtr( __rejFlipVec ) : NULL );
CudaCheckError();
_memPool.release( triVoteVec );
////
// Compact flips
////
IntDVec temp = _memPool.allocateAny<int>( _triMax, true );
flipNum = compactIfNegative( flipToTri, temp );
if ( 0 == flipNum )
{
_memPool.release( flipToTri );
return false;
}
////
// Expand flip vector
////
int orgFlipNum = _flipVec.size();
int expFlipNum = orgFlipNum + flipNum;
if ( expFlipNum > _flipVec.capacity() )
{
_flipVec.resize( 0 );
_triMsgVec.assign( _triMax, make_int2( -1, -1 ) );
orgFlipNum = 0;
expFlipNum = flipNum;
}
_flipVec.grow( expFlipNum );
// See doFlipping
_triMsgVec.resize( _triVec.size() );
////
// Flipping
////
#pragma region Diagnostic
if ( _input->isProfiling( ProfDiag ) )
{
const int rejFlipNum = thrust_sum( __rejFlipVec );
std::cout << " ConsFlips: " << flipNum << " ( " << rejFlipNum << " )"
<< std::endl;
}
#pragma endregion
// 32 ThreadsPerBlock is optimal
hipLaunchKernelGGL(( kerFlip), dim3(BlocksPerGrid), dim3(32) , 0, 0,
toKernelArray( flipToTri ),
toKernelPtr( _triVec ),
toKernelPtr( _oppVec ),
NULL,
toKernelPtr( _triMsgVec ),
NULL,
toKernelPtr( _flipVec ),
toKernelPtr( _triConsVec ),
toKernelPtr( _vertTriVec ),
orgFlipNum, 0
);
CudaCheckError();
////
// Update oppTri
////
hipLaunchKernelGGL(( kerUpdateOpp), dim3(BlocksPerGrid), dim3(32) , 0, 0,
toKernelPtr( _flipVec ) + orgFlipNum,
toKernelPtr( _oppVec ),
toKernelPtr( _triMsgVec ),
toKernelPtr( flipToTri ),
orgFlipNum, flipNum
);
CudaCheckError();
_memPool.release( flipToTri );
/////////////////////////////////////////////////////////////////////
return true;
}
void GpuDel::doInsertConstraints()
{
startTiming( ProfDefault );
initForConstraintInsertion();
const int triNum = _triVec.size();
_triConsVec = _memPool.allocateAny<int>( triNum );
_triConsVec.assign( triNum, -1 );
_flipVec = _memPool.allocateAny<FlipItem>( _triMax );
_triMsgVec = _memPool.allocateAny<int2>( _triMax );
_actTriVec = _memPool.allocateAny<int>( _triMax );
_triMsgVec.assign( _triMax, make_int2( -1, -1 ) );
int outerLoop = 0;
int flipLoop = 0;
int totFlipNum = 0;
int flipNum;
while ( markIntersections() )
{
if ( _input->isProfiling( ProfDiag ) )
std::cout << "Iter " << ( outerLoop+1 ) << std::endl;
// VISUALIZATION
if ( Visualizer::instance()->isEnable() )
{
pauseTiming( ProfNone );
pauseTiming( ProfDefault );
IntHVec triColorVec;
_triConsVec.copyToHost( triColorVec );
for ( int i = 0; i < triColorVec.size(); ++i )
if ( triColorVec[i] != -1 )
triColorVec[i] >>= 4;
Visualizer::instance()->addFrame( _pointVec, _constraintVec, _triVec, triColorVec, _infIdx );
startTiming( ProfDefault );
startTiming( ProfNone );
}
// Collect active triangles
thrust_copyIf_IsNotNegative( _triConsVec, _actTriVec );
int innerLoop = 0;
while ( doConsFlipping( flipNum ) )
{
totFlipNum += flipNum;
// VISUALIZATION
if ( Visualizer::instance()->isEnable() )
{
pauseTiming( ProfNone );
pauseTiming( ProfDefault );
IntHVec triColorVec;
_triConsVec.copyToHost( triColorVec );
for ( int i = 0; i < triColorVec.size(); ++i )
if ( triColorVec[i] != -1 )
triColorVec[i] >>= 4;
Visualizer::instance()->addFrame( _pointVec, _constraintVec, _triVec, triColorVec, _infIdx );
startTiming( ProfDefault );
startTiming( ProfNone );
}
++flipLoop;
++innerLoop;
if ( innerLoop == 5 ) break;
//if ( flipLoop == 1 ) break;
}
++outerLoop;
// Mark all the possibly modified triangles as Alive + Changed (3).
thrust_scatterConstantMap( _actTriVec, _triInfoVec, 3 );
//if ( outerLoop == 5 ) break;
}
//if ( outerLoop >= 20 )
//{
// for ( int i = 0; i < _actTriVec.size(); ++i )
// std::cout << _actTriVec[i] << " ";
// std::cout << std::endl;
//}
if ( _input->isProfiling( ProfDiag ) )
std::cout << "ConsFlip: Outer loop = " << outerLoop
<< ", inner loop = " << flipLoop
<< ", total flip = " << totFlipNum
<< std::endl;
_memPool.release( _triConsVec );
_memPool.release( _triMsgVec );
_memPool.release( _actTriVec );
_memPool.release( _flipVec );
stopTiming( ProfDefault, _output->stats.constraintTime );
}
void GpuDel::splitAndFlip()
{
int insLoop = 0;
_doFlipping = !_input->insAll;
//////////////////
while ( _availPtNum > 0 )
//////////////////
{
////////////////////////
splitTri();
////////////////////////
if ( _doFlipping )
doFlippingLoop( CircleFastOrientFast );
++insLoop;
}
//////////////////////////////
if ( !_doFlipping )
doFlippingLoop( CircleFastOrientFast );
markSpecialTris();
doFlippingLoop( CircleExactOrientSoS );
//////////////////////////////
// Insert constraints if needed
if ( _constraintVec.size() > 0 )
doInsertConstraints();
doFlippingLoop( CircleFastOrientFast );
markSpecialTris();
doFlippingLoop( CircleExactOrientSoS );
#pragma region Diagnostic
if ( _input->isProfiling( ProfDiag ) )
{
std::cout << "\nInsert loops: " << insLoop << std::endl;
std::cout << "Compact: " << std::endl;
_diagLogCompact.printCount();
std::cout << "Collect: " << std::endl;
_diagLogCollect.printCount();
}
#pragma endregion
return;
}
void GpuDel::markSpecialTris()
{
startTiming( ProfDetail );
hipLaunchKernelGGL(( kerMarkSpecialTris), dim3(BlocksPerGrid), dim3(ThreadsPerBlock) , 0, 0,
toKernelArray( _triInfoVec ),
toKernelPtr( _oppVec )
);
CudaCheckError();
stopTiming( ProfDetail, _diagLog->_t[ 0 ] );
}
void GpuDel::expandTri( int newTriNum )
{
//*** Expand triangles
_triVec.expand( newTriNum );
_oppVec.expand( newTriNum );
_triInfoVec.expand( newTriNum );
}
void GpuDel::splitTri()
{
const int MaxSamplePerTri = 100;
startTiming( ProfDefault );
////
// Rank points
////
int triNum = _triVec.size();
int noSample = _pointNum;
if ( noSample / triNum > MaxSamplePerTri )
noSample = triNum * MaxSamplePerTri;
IntDVec triCircleVec = _memPool.allocateAny<int>( _triMax );
triCircleVec.assign( triNum, INT_MIN );
IntDVec vertCircleVec = _memPool.allocateAny<int>( _pointNum );
vertCircleVec.resize( noSample );
hipLaunchKernelGGL(( kerVoteForPoint), dim3(BlocksPerGrid), dim3(ThreadsPerBlock) , 0, 0,
toKernelArray( _vertTriVec ),
toKernelPtr( _triVec ),
toKernelPtr( vertCircleVec ),
toKernelPtr( triCircleVec ),
noSample
);
CudaCheckError();
IntDVec triToVert = _memPool.allocateAny<int>( _triMax );
triToVert.assign( triNum, INT_MAX );
hipLaunchKernelGGL(( kerPickWinnerPoint), dim3(BlocksPerGrid), dim3(ThreadsPerBlock) , 0, 0,
toKernelArray( _vertTriVec ),
toKernelPtr( vertCircleVec ),
toKernelPtr( triCircleVec ),
toKernelPtr( triToVert ),
noSample
);
CudaCheckError();
_memPool.release( vertCircleVec );
_memPool.release( triCircleVec );
////
// Collect triangles with insertions
////
IntDVec splitTriVec = _memPool.allocateAny<int>( _pointNum );
_insNum = thrust_copyIf_TriHasVert( triToVert, splitTriVec );
const int extraTriNum = DIM * _insNum;
const int splitTriNum = triNum + extraTriNum;
if ( _input->isProfiling( ProfDiag ) )
{
std::cout << "Insert: " << _insNum
<< " Tri from: " << triNum
<< " to: " << splitTriNum << std::endl;
}
// If there's just a few points
if ( _availPtNum - _insNum < _insNum &&
_insNum < 0.1 * _pointNum )
{
_doFlipping = false;
//std::cout << "Stop flipping!" << std::endl;
}
if ( !_input->noReorder && _doFlipping )
{
stopTiming( ProfDefault, _output->stats.splitTime );
shiftTri( triToVert, splitTriVec );
triNum = -1; // Mark that we have shifted the array
startTiming( ProfDefault );
}
////
// Make map
////
IntDVec insTriMap = _memPool.allocateAny<int>( _triMax );
insTriMap.assign( ( triNum < 0 ) ? splitTriNum : triNum, -1 );
thrust_scatterSequenceMap( splitTriVec, insTriMap );
////
// Expand if space needed
////
expandTri( splitTriNum );
////
// Update the location of the points
////
stopTiming( ProfDefault, _output->stats.splitTime );
startTiming( ProfDefault );
IntDVec exactCheckVec = _memPool.allocateAny<int>( _pointNum );
_counters.renew();
hipLaunchKernelGGL(( kerSplitPointsFast), dim3(BlocksPerGrid), dim3(ThreadsPerBlock) , 0, 0,
toKernelArray( _vertTriVec ),
toKernelPtr( triToVert ),
toKernelPtr( _triVec ),
toKernelPtr( insTriMap ),
toKernelPtr( exactCheckVec ),
_counters.ptr(),
triNum, _insNum
);
hipLaunchKernelGGL(( kerSplitPointsExactSoS), dim3(PredBlocksPerGrid), dim3(PredThreadsPerBlock) , 0, 0,
toKernelPtr( _vertTriVec ),
toKernelPtr( triToVert ),
toKernelPtr( _triVec ),
toKernelPtr( insTriMap ),
toKernelPtr( exactCheckVec ),
_counters.ptr(),
triNum, _insNum
);
CudaCheckError();
_memPool.release( exactCheckVec );
stopTiming( ProfDefault, _output->stats.relocateTime );
startTiming( ProfDefault );
////
// Split old into new triangle and copy them to new array
////
hipLaunchKernelGGL(( kerSplitTri), dim3(BlocksPerGrid), dim3(32) , 0, 0,
toKernelArray( splitTriVec ),
toKernelPtr( _triVec ),
toKernelPtr( _oppVec ),
toKernelPtr( _triInfoVec ),
toKernelPtr( insTriMap ),
toKernelPtr( triToVert ),
triNum, _insNum
);
CudaCheckError();
_memPool.release( triToVert );
_memPool.release( insTriMap );
_memPool.release( splitTriVec );
_availPtNum -= _insNum;
stopTiming( ProfDefault, _output->stats.splitTime );
Visualizer::instance()->addFrame( _pointVec, SegmentDVec(), _triVec, _infIdx );
return;
}
bool GpuDel::doFlipping( CheckDelaunayMode checkMode )
{
startTiming( ProfDetail );
++_diagLog->_flipLoop;
const int triNum = _triVec.size();
////
// Compact active triangles
////
switch ( _actTriMode )
{
case ActTriMarkCompact:
thrust_copyIf_IsActiveTri( _triInfoVec, _actTriVec );
break;
case ActTriCollectCompact:
IntDVec temp = _memPool.allocateAny<int>( _triMax, true );
compactIfNegative( _actTriVec, temp );
break;
}
int orgActNum = _actTriVec.size();
#pragma region Diagnostic
if ( _input->isProfiling( ProfDiag ) )
{
_numActiveVec.push_back( orgActNum );
if ( orgActNum == 0 || ( checkMode != CircleExactOrientSoS &&
orgActNum < PredBlocksPerGrid * PredThreadsPerBlock ) )
{
_numFlipVec.push_back( 0 );
_timeCheckVec.push_back( 0.0 );
_timeFlipVec.push_back( 0.0 );
_numCircleVec.push_back( 0 );
}
}
#pragma endregion
restartTiming( ProfDetail, _diagLog->_t[ 0 ] );
/////////////////////////////////////////////////////////////////////
////
// Check actNum, switch mode or quit if necessary
////
// No more work
if ( 0 == orgActNum )
return false;
// Little work, leave it for the Exact iterations
if ( checkMode != CircleExactOrientSoS &&
orgActNum < PredBlocksPerGrid * PredThreadsPerBlock )
return false;
// See if there's little work enough to switch to collect mode.
// Safety check: make sure there's enough space to collect
if ( orgActNum < BlocksPerGrid * ThreadsPerBlock &&
orgActNum * 2 < _actTriVec.capacity() &&
orgActNum * 2 < triNum )
{
_actTriMode = ActTriCollectCompact;
_diagLog = &_diagLogCollect;
}
else
{
_actTriMode = ActTriMarkCompact;
_diagLog = &_diagLogCompact;
}
////
// Vote for flips
////
#pragma region Diagnostic
if ( _input->isProfiling( ProfDiag ) )
{
__circleCountVec.assign( triNum, 0 );
__rejFlipVec.assign( triNum, 0 );
}
#pragma endregion
IntDVec triVoteVec = _memPool.allocateAny<int>( _triMax );
triVoteVec.assign( triNum, INT_MAX );
dispatchCheckDelaunay( checkMode, orgActNum, triVoteVec );
double prevTime = _diagLog->_t[ 1 ];
restartTiming( ProfDetail, _diagLog->_t[ 1 ] );
/////////////////////////////////////////////////////////////////////
////
// Mark rejected flips
////
IntDVec flipToTri = _memPool.allocateAny<int>( _triMax );
flipToTri.resize( orgActNum );
hipLaunchKernelGGL(( kerMarkRejectedFlips), dim3(BlocksPerGrid), dim3(ThreadsPerBlock) , 0, 0,
toKernelPtr( _actTriVec ),
toKernelPtr( _oppVec ),
toKernelPtr( triVoteVec ),
toKernelPtr( _triInfoVec ),
toKernelPtr( flipToTri ),
orgActNum,
_input->isProfiling( ProfDiag ) ? toKernelPtr( __rejFlipVec ) : NULL );
CudaCheckError();
_memPool.release( triVoteVec );
restartTiming( ProfDetail, _diagLog->_t[ 2 ] );
/////////////////////////////////////////////////////////////////////
////
// Compact flips
////
IntDVec temp = _memPool.allocateAny<int>( _triMax, true );
const int flipNum = compactIfNegative( flipToTri, temp );
if ( _input->isProfiling( ProfDiag ) )
{
_numFlipVec.push_back( flipNum );
_timeCheckVec.push_back( _diagLog->_t[ 1 ] - prevTime );
}
restartTiming( ProfDetail, _diagLog->_t[ 3 ] );
/////////////////////////////////////////////////////////////////////
////
// Preparation for the actual flipping. Include several steps
////
#pragma region Diagnostic
if ( _input->isProfiling( ProfDiag ) )
{
const int circleNum = thrust_sum( __circleCountVec );
_diagLog->_circleCount += circleNum;
const int rejFlipNum = thrust_sum( __rejFlipVec );
_diagLog->_rejFlipCount += rejFlipNum;
_diagLog->_totFlipNum += flipNum;
std::cout << "Acts: " << orgActNum
<< " Flips: " << flipNum << " ( " << rejFlipNum << " )"
<< " circle: " << circleNum
<< " Exact: "
<< ( checkMode == CircleExactOrientSoS ? _counters[ CounterExact ] : -1 )
<< std::endl;
_numCircleVec.push_back( circleNum );
startTiming( ProfDetail );
}
#pragma endregion
if ( 0 == flipNum )
{
_numCircleVec.push_back( 0 );
_timeFlipVec.push_back( 0 );
_memPool.release( flipToTri );
return false;
}
// Expand flip vector
int orgFlipNum = _flipVec.size();
int expFlipNum = orgFlipNum + flipNum;
if ( expFlipNum > _flipVec.capacity() )
{
stopTiming( ProfDetail, _diagLog->_t[ 4 ] );
stopTiming( ProfDefault, _output->stats.flipTime );
relocateAll();
startTiming( ProfDefault );
startTiming( ProfDetail );
orgFlipNum = 0;
expFlipNum = flipNum;
}
_flipVec.grow( expFlipNum );
// _triMsgVec contains two components.
// - .x is the encoded new neighbor information
// - .y is the flipIdx as in the flipVec (i.e. globIdx)
// As such, we do not need to initialize it to -1 to
// know which tris are not flipped in the current rount.
// We can rely on the flipIdx being > or < than orgFlipIdx.
// Note that we have to initialize everything to -1
// when we clear the flipVec and reset the flip indexing.
//
_triMsgVec.resize( _triVec.size() );
////
// Expand active tri vector
////
if ( _actTriMode == ActTriCollectCompact )
_actTriVec.grow( orgActNum + flipNum );
restartTiming( ProfDetail, _diagLog->_t[ 4 ] );
/////////////////////////////////////////////////////////////////////
////
// Flipping
////
// 32 ThreadsPerBlock is optimal
hipLaunchKernelGGL(( kerFlip), dim3(BlocksPerGrid), dim3(32) , 0, 0,
toKernelArray( flipToTri ),
toKernelPtr( _triVec ),
toKernelPtr( _oppVec ),
toKernelPtr( _triInfoVec ),
toKernelPtr( _triMsgVec ),
( _actTriMode == ActTriCollectCompact ) ? toKernelPtr( _actTriVec ) : NULL,
toKernelPtr( _flipVec ),
NULL, NULL,
orgFlipNum, orgActNum
);
CudaCheckError();
_orgFlipNum.push_back( orgFlipNum );
////
// Update oppTri
////
hipLaunchKernelGGL(( kerUpdateOpp), dim3(BlocksPerGrid), dim3(32) , 0, 0,
toKernelPtr( _flipVec ) + orgFlipNum,
toKernelPtr( _oppVec ),
toKernelPtr( _triMsgVec ),
toKernelPtr( flipToTri ),
orgFlipNum, flipNum
);
CudaCheckError();
_memPool.release( flipToTri );
prevTime = _diagLog->_t[ 5 ];
stopTiming( ProfDetail, _diagLog->_t[ 5 ] );
if ( _input->isProfiling( ProfDiag ) )
_timeFlipVec.push_back( _diagLog->_t[ 5 ] - prevTime );
/////////////////////////////////////////////////////////////////////
Visualizer::instance()->addFrame( _pointVec, SegmentDVec(), _triVec, _infIdx );
return true;
}
void GpuDel::dispatchCheckDelaunay
(
CheckDelaunayMode checkMode,
int orgActNum,
IntDVec& triVoteVec
)
{
switch ( checkMode )
{
case CircleFastOrientFast:
hipLaunchKernelGGL(( kerCheckDelaunayFast), dim3(BlocksPerGrid), dim3(ThreadsPerBlock) , 0, 0,
toKernelPtr( _actTriVec ),
toKernelPtr( _triVec ),
toKernelPtr( _oppVec ),
toKernelPtr( _triInfoVec ),
toKernelPtr( triVoteVec ),
orgActNum,
_input->isProfiling( ProfDiag ) ? toKernelPtr( __circleCountVec ) : NULL
);
CudaCheckError();
break;
case CircleExactOrientSoS:
// Reuse this array to save memory
Int2DVec &exactCheckVi = _triMsgVec;
_counters.renew();
hipLaunchKernelGGL(( kerCheckDelaunayExact_Fast), dim3(BlocksPerGrid), dim3(ThreadsPerBlock) , 0, 0,
toKernelPtr( _actTriVec ),
toKernelPtr( _triVec ),
toKernelPtr( _oppVec ),
toKernelPtr( _triInfoVec ),
toKernelPtr( triVoteVec ),
toKernelPtr( exactCheckVi ),
orgActNum,
_counters.ptr(),
_input->isProfiling( ProfDiag ) ? toKernelPtr( __circleCountVec ) : NULL
);
hipLaunchKernelGGL(( kerCheckDelaunayExact_Exact), dim3(PredBlocksPerGrid), dim3(PredThreadsPerBlock) , 0, 0,
toKernelPtr( _triVec ),
toKernelPtr( _oppVec ),
toKernelPtr( triVoteVec ),
toKernelPtr( exactCheckVi ),
_counters.ptr(),
_input->isProfiling( ProfDiag ) ? toKernelPtr( __circleCountVec ) : NULL
);
CudaCheckError();
break;
}
}
template< typename T >
__global__ void
kerShift
(
KerIntArray shiftVec,
T* src,
T* dest
)
{
for ( int idx = getGlobThreadIdx(); idx < shiftVec._num; idx += getThreadNum() )
{
const int shift = shiftVec._arr[ idx ];
dest[ idx + shift ] = src[ idx ];
}
}
template< typename T >
void GpuDel::shiftExpandVec( IntDVec &shiftVec, DevVector< T > &dataVec, int size )
{
DevVector< T > tempVec = _memPool.allocateAny<T>( size );
tempVec.resize( size );
hipLaunchKernelGGL(( kerShift), dim3(BlocksPerGrid), dim3(ThreadsPerBlock) , 0, 0,
toKernelArray( shiftVec ),
toKernelPtr( dataVec ),
toKernelPtr( tempVec )
);
CudaCheckError();
dataVec.copyFrom( tempVec );
_memPool.release( tempVec );
}
void GpuDel::shiftOppVec( IntDVec &shiftVec, TriOppDVec &dataVec, int size )
{
TriOppDVec tempVec = _memPool.allocateAny< TriOpp >( size );
tempVec.resize( size );
hipLaunchKernelGGL(( kerShiftOpp), dim3(BlocksPerGrid), dim3(ThreadsPerBlock) , 0, 0,
toKernelArray( shiftVec ),
toKernelPtr( dataVec ),
toKernelPtr( tempVec ),
size
);
CudaCheckError();
dataVec.copyFrom( tempVec );
_memPool.release( tempVec );
}
void GpuDel::shiftTri( IntDVec &triToVert, IntDVec &splitTriVec )
{
startTiming( ProfDefault );
const int triNum = _triVec.size() + 2 * splitTriVec.size();
IntDVec shiftVec = _memPool.allocateAny<int>( _triMax );
thrust_scan_TriHasVert( triToVert, shiftVec );
shiftExpandVec( shiftVec, _triVec, triNum );
shiftExpandVec( shiftVec, _triInfoVec, triNum );
shiftExpandVec( shiftVec, triToVert, triNum );
shiftOppVec( shiftVec, _oppVec, triNum );
hipLaunchKernelGGL(( kerShiftTriIdx), dim3(BlocksPerGrid), dim3(ThreadsPerBlock) , 0, 0,
toKernelArray( _vertTriVec ),
toKernelPtr( shiftVec )
);
CudaCheckError();
hipLaunchKernelGGL(( kerShiftTriIdx), dim3(BlocksPerGrid), dim3(ThreadsPerBlock) , 0, 0,
toKernelArray( splitTriVec ),
toKernelPtr( shiftVec )
);
CudaCheckError();
_memPool.release( shiftVec );
stopTiming( ProfDefault, _output->stats.sortTime );
}
void GpuDel::relocateAll()
{
if ( _flipVec.size() == 0 )
return ;
startTiming( ProfDefault );
if ( _availPtNum > 0 )
{
const int triNum = _triVec.size();
IntDVec triToFlip = _memPool.allocateAny<int>( _triMax );
triToFlip.assign( triNum, -1 );
// Rebuild the pointers from back to forth
int nextFlipNum = _flipVec.size();
for ( int i = _orgFlipNum.size() - 1; i >= 0; --i )
{
int prevFlipNum = _orgFlipNum[ i ];
int flipNum = nextFlipNum - prevFlipNum;
hipLaunchKernelGGL(( kerUpdateFlipTrace), dim3(BlocksPerGrid), dim3(ThreadsPerBlock) , 0, 0,
toKernelPtr( _flipVec ),
toKernelPtr( triToFlip ),
prevFlipNum,
flipNum
);
nextFlipNum = prevFlipNum;
}
CudaCheckError();
// Relocate points
IntDVec exactCheckVec = _memPool.allocateAny<int>( _pointNum );
_counters.renew();
hipLaunchKernelGGL(( kerRelocatePointsFast), dim3(BlocksPerGrid), dim3(ThreadsPerBlock) , 0, 0,
toKernelArray( _vertTriVec ),
toKernelPtr( triToFlip ),
toKernelPtr( _flipVec ),
toKernelPtr( exactCheckVec ),
_counters.ptr()
);
hipLaunchKernelGGL(( kerRelocatePointsExact), dim3(BlocksPerGrid), dim3(ThreadsPerBlock) , 0, 0,
toKernelPtr( _vertTriVec ),
toKernelPtr( triToFlip ),
toKernelPtr( _flipVec ),
toKernelPtr( exactCheckVec ),
_counters.ptr()
);
CudaCheckError();
_memPool.release( exactCheckVec );
_memPool.release( triToFlip );
}
// Just clean up the flips
_flipVec.resize( 0 );
_orgFlipNum.clear();
// Reset the triMsgVec
_triMsgVec.assign( _triMax, make_int2( -1, -1 ) );
stopTiming( ProfDefault, _output->stats.relocateTime );
}
void GpuDel::compactTris()
{
const int triNum = _triVec.size();
IntDVec prefixVec = _memPool.allocateAny<int>( _triMax );
prefixVec.resize( triNum );
thrust_scan_TriAliveStencil( _triInfoVec, prefixVec );
int newTriNum = prefixVec[ triNum - 1 ];
int freeNum = triNum - newTriNum;
IntDVec freeVec = _memPool.allocateAny<int>( _triMax );
freeVec.resize( freeNum );
hipLaunchKernelGGL(( kerCollectFreeSlots), dim3(BlocksPerGrid), dim3(ThreadsPerBlock) , 0, 0,
toKernelPtr( _triInfoVec ),
toKernelPtr( prefixVec ),
toKernelPtr( freeVec ),
newTriNum
);
CudaCheckError();
// Make map
hipLaunchKernelGGL(( kerMakeCompactMap), dim3(BlocksPerGrid), dim3(ThreadsPerBlock) , 0, 0,
toKernelArray( _triInfoVec ),
toKernelPtr( prefixVec ),
toKernelPtr( freeVec ),
newTriNum
);
CudaCheckError();
// Reorder the tets
hipLaunchKernelGGL(( kerCompactTris), dim3(BlocksPerGrid), dim3(ThreadsPerBlock) , 0, 0,
toKernelArray( _triInfoVec ),
toKernelPtr( prefixVec ),
toKernelPtr( _triVec ),
toKernelPtr( _oppVec ),
newTriNum
);
CudaCheckError();
_triInfoVec.resize( newTriNum );
_triVec.resize( newTriNum );
_oppVec.resize( newTriNum );
_memPool.release( freeVec );
_memPool.release( prefixVec );
}
void GpuDel::outputToHost()
{
startTiming( ProfDefault );
hipLaunchKernelGGL(( kerMarkInfinityTri), dim3(BlocksPerGrid), dim3(ThreadsPerBlock) , 0, 0,
toKernelArray( _triVec ),
toKernelPtr( _triInfoVec ),
toKernelPtr( _oppVec ),
_infIdx
);
CudaCheckError();
compactTris();
if ( !_input->noSort )
{
// Change the indices back to the original order
hipLaunchKernelGGL(( kerUpdateVertIdx), dim3(BlocksPerGrid), dim3(ThreadsPerBlock) , 0, 0,
toKernelArray( _triVec ),
toKernelPtr( _triInfoVec ),
toKernelPtr( _orgPointIdx )
);
CudaCheckError();
}
////
// Copy to host
//_triVec.copyToHost( _output->triVec );
//_oppVec.copyToHost( _output->triOppVec );
// Output Infty point
_output->ptInfty = _ptInfty;
stopTiming( ProfDefault, _output->stats.outTime );
////
return;
}
| b3aee0986826b1a576685e64b1a28161f3224392.cu | #include "../GpuDelaunay.h"
#include<iomanip>
#include<iostream>
#include <thrust/gather.h>
#include "KerCommon.h"
#include "KerDivision.h"
#include "KerPredicates.h"
#include "ThrustWrapper.h"
#include "../../Visualizer.h"
////
// GpuDel methods
////
void GpuDel::cleanup()
{
thrust_free_all();
_memPool.free();
_pointVec.free();
_constraintVec.free();
_triVec.free();
_oppVec.free();
_triInfoVec.free();
_orgPointIdx.free();
_vertTriVec.free();
_counters.free();
_actConsVec.free();
_orgFlipNum.clear();
_dPredWrapper.cleanup();
__circleCountVec.free();
__rejFlipVec.free();
_numActiveVec.clear();
_numFlipVec.clear();
_numCircleVec.clear();
_timeCheckVec.clear();
_timeFlipVec.clear();
}
void GpuDel::compute
(
Point2DVec &pointVec,
TriDVec* output
)
{
GDel2DInput input;
input.noSort = true;
input.noReorder = true;
// Set L1 for kernels
cudaDeviceSetCacheConfig( cudaFuncCachePreferL1 );
_pointVec._ptr = pointVec._ptr;
_pointVec._size = pointVec._size;
_pointVec._capacity = pointVec._capacity;
_pointVec._owned = false;
_input = &input;
_output = new GDel2DOutput();
//_output = output;
initProfiling();
startTiming( ProfDefault );
initForFlip();
splitAndFlip();
outputToHost();
stopTiming( ProfDefault, _output->stats.totalTime );
if ( _input->isProfiling( ProfDetail ) )
{
std::cout << " FlipCompact time: ";
_diagLogCompact.printTime();
std::cout << std::endl;
std::cout << " FlipCollect time: ";
_diagLogCollect.printTime();
std::cout << std::endl;
}
std::cout << "# GPU Triangles: " << _triVec.size() << std::endl;
*(output) = _triVec;
_triVec._owned = false;
cleanup();
return;
}
void GpuDel::startTiming( ProfLevel level )
{
if ( _input->isProfiling( level ) )
_profTimer[ level ].start();
}
void GpuDel::pauseTiming( ProfLevel level )
{
if ( _input->isProfiling( level ) )
_profTimer[ level ].pause();
}
void GpuDel::stopTiming( ProfLevel level, double &accuTime )
{
if ( _input->isProfiling( level ) )
{
_profTimer[ level ].stop();
accuTime += _profTimer[ level ].value();
}
}
void GpuDel::restartTiming( ProfLevel level, double &accuTime )
{
stopTiming( level, accuTime );
startTiming( level );
}
struct CompareX
{
__device__ bool operator()( const Point2 &a, const Point2 &b ) const
{
return a._p[0] < b._p[0];
}
};
struct Get2Ddist
{
Point2 _a;
RealType abx, aby;
Get2Ddist( const Point2 &a, const Point2 &b ) : _a(a)
{
abx = b._p[0] - a._p[0];
aby = b._p[1] - a._p[1];
}
__device__ int operator()( const Point2 &c )
{
RealType acx = c._p[0] - _a._p[0];
RealType acy = c._p[1] - _a._p[1];
RealType dist = abx * acy - aby * acx;
return __float_as_int( fabs((float) dist) );
}
};
RealType orient2dzero( const RealType *pa, const RealType *pb, const RealType *pc );
void GpuDel::constructInitialTriangles()
{
// First, choose two extreme points along the X axis
typedef Point2DVec::iterator Point2DIter;
thrust::pair< Point2DIter, Point2DIter > ret = thrust::minmax_element(
_pointVec.begin(), _pointVec.end(), CompareX() );
int v0 = ret.first - _pointVec.begin();
int v1 = ret.second - _pointVec.begin();
const Point2 p0 = _pointVec[v0];
const Point2 p1 = _pointVec[v1];
// Find the furthest point from v0v1
IntDVec distVec = _memPool.allocateAny<int>( _pointNum );
distVec.resize( _pointVec.size() );
thrust::transform( _pointVec.begin(), _pointVec.end(), distVec.begin(), Get2Ddist( p0, p1 ) );
const int v2 = thrust::max_element( distVec.begin(), distVec.end() ) - distVec.begin();
const Point2 p2 = _pointVec[v2];
_memPool.release( distVec );
if ( _input->isProfiling( ProfDebug ) )
{
std::cout << "Leftmost: " << v0 << " --> " << p0._p[0] << " " << p0._p[1] << std::endl;
std::cout << "Rightmost: " << v1 << " --> " << p1._p[0] << " " << p1._p[1] << std::endl;
std::cout << "Furthest 2D: " << v2 << " --> " << p2._p[0] << " " << p2._p[1] << std::endl;
}
// Check to make sure the 4 points are not co-planar
RealType ori = orient2dzero( p0._p, p1._p, p2._p );
if ( ori == 0.0 )
{
std::cout << "Input too degenerate!!!\n" << std::endl;
exit(-1);
}
if ( ortToOrient( ori ) == OrientNeg )
std::swap( v0, v1 );
// Compute the centroid of v0v1v2v3, to be used as the kernel point.
_ptInfty._p[0] = ( p0._p[0] + p1._p[0] + p2._p[0] ) / 3.0;
_ptInfty._p[1] = ( p0._p[1] + p1._p[1] + p2._p[1] ) / 3.0;
// Add the infinity point to the end of the list
_infIdx = _pointNum - 1;
_pointVec.resize( _pointNum );
_pointVec[ _infIdx ] = _ptInfty;
if ( _input->isProfiling( ProfDiag ) )
{
std::cout << "Kernel: " << _ptInfty._p[0] << " " << _ptInfty._p[1] << std::endl;
}
// Initialize the predicate wrapper!!!
_dPredWrapper.init(
toKernelPtr( _pointVec ),
_pointNum,
_input->noSort ? NULL : toKernelPtr( _orgPointIdx ),
_infIdx );
setPredWrapperConstant( _dPredWrapper );
// Create the initial triangulation
Tri firstTri = { v0, v1, v2 };
_triVec.expand( 4 );
_oppVec.expand( 4 );
_triInfoVec.expand( 4 );
// Put the initial tets at the Inf list
kerMakeFirstTri<<< 1, 1 >>>(
toKernelPtr( _triVec ),
toKernelPtr( _oppVec ),
toKernelPtr( _triInfoVec ),
firstTri, _infIdx
);
CudaCheckError();
// Locate initial positions of points
_vertTriVec.resize( _pointNum );
IntDVec exactCheckVec = _memPool.allocateAny<int>( _pointNum );
_counters.renew();
kerInitPointLocationFast<<< BlocksPerGrid, ThreadsPerBlock >>>(
toKernelArray( _vertTriVec ),
toKernelPtr( exactCheckVec ),
_counters.ptr(),
firstTri
);
kerInitPointLocationExact<<< PredBlocksPerGrid, PredThreadsPerBlock >>>(
toKernelPtr( _vertTriVec ),
toKernelPtr( exactCheckVec ),
_counters.ptr(),
firstTri
);
CudaCheckError();
_memPool.release( exactCheckVec );
_availPtNum = _pointNum - 4;
//Visualizer::instance()->addFrame( _pointVec, SegmentDVec(), _triVec, _infIdx );
}
void GpuDel::initForFlip()
{
startTiming( ProfDefault );
_pointNum = _pointVec.size();
//_pointNum = _input->pointVec.size() + 1; // Plus the infinity point
_triMax = (int) ( _pointNum * 2 );
// Copy points to GPU
//_pointVec.resize( _pointNum ); // 1 additional slot for the infinity point
//_pointVec.copyFromHost( _input->pointVec );
// Copy constraints to GPU
_constraintVec.copyFromHost( _input->constraintVec );
// Allocate space
_triVec.resize( _triMax );
_oppVec.resize( _triMax );
_triInfoVec.resize( _triMax );
_counters.init( CounterNum );
if ( _constraintVec.size() > 0 )
_actConsVec.resize( _constraintVec.size() );
if ( _input->isProfiling( ProfDiag ) )
{
__circleCountVec.resize( _triMax );
__rejFlipVec.resize( _triMax );
}
// Preallocate some buffers in the pool
_memPool.reserve<FlipItem>( _triMax ); // flipVec
_memPool.reserve<int2>( _triMax ); // triMsgVec
_memPool.reserve<int>( _pointNum ); // vertSphereVec
_memPool.reserve<int>( _triMax ); // actTriVec
_memPool.reserve<int>( _triMax ); // Two more for common use
_memPool.reserve<int>( _triMax ); //
if ( _constraintVec.size() > 0 )
_memPool.reserve<int>( _triMax );
// Find the min and max coordinate value
typedef thrust::device_ptr< RealType > RealPtr;
RealPtr coords( ( RealType* ) toKernelPtr( _pointVec ) );
thrust::pair< RealPtr, RealPtr> ret
= thrust::minmax_element( coords, coords + _pointVec.size() * 2 );
_minVal = *ret.first;
_maxVal = *ret.second;
if ( _input->isProfiling( ProfDebug ) )
{
std::cout << "_minVal = " << _minVal << ", _maxVal == " << _maxVal << std::endl;
}
// Sort points along space curve
if ( !_input->noSort )
{
stopTiming( ProfDefault, _output->stats.initTime );
startTiming( ProfDefault );
IntDVec valueVec = _memPool.allocateAny<int>( _pointNum );
valueVec.resize( _pointVec.size() );
_orgPointIdx.resize( _pointNum );
thrust::sequence( _orgPointIdx.begin(), _orgPointIdx.end(), 0 );
thrust_transform_GetMortonNumber(
_pointVec.begin(), _pointVec.end(), valueVec.begin(),
_minVal, _maxVal );
thrust_sort_by_key( valueVec.begin(), valueVec.end(),
make_zip_iterator( make_tuple(
_orgPointIdx.begin(), _pointVec.begin() ) ) );
_memPool.release( valueVec );
stopTiming( ProfDefault, _output->stats.sortTime );
startTiming( ProfDefault );
}
// Create first upper-lower triangles
constructInitialTriangles();
stopTiming( ProfDefault, _output->stats.initTime );
return;
}
void GpuDel::doFlippingLoop( CheckDelaunayMode checkMode )
{
startTiming( ProfDefault );
_flipVec = _memPool.allocateAny<FlipItem>( _triMax );
_triMsgVec = _memPool.allocateAny<int2>( _triMax );
_actTriVec = _memPool.allocateAny<int>( _triMax );
_triMsgVec.assign( _triMax, make_int2( -1, -1 ) );
int flipLoop = 0;
_actTriMode = ActTriMarkCompact;
_diagLog = &_diagLogCompact;
while ( doFlipping( checkMode ) )
++flipLoop;
stopTiming( ProfDefault, _output->stats.flipTime );
relocateAll();
_memPool.release( _triMsgVec );
_memPool.release( _flipVec );
_memPool.release( _actTriVec );
}
void GpuDel::initProfiling()
{
_output->stats.reset();
_diagLogCompact.reset();
_diagLogCollect.reset();
_numActiveVec.clear();
_numFlipVec.clear();
_timeCheckVec.clear();
_timeFlipVec.clear();
}
void GpuDel::initForConstraintInsertion()
{
if ( !_input->noSort )
{
// Update vertex indices of constraints
IntDVec mapVec = _memPool.allocateAny<int>( _pointNum );
mapVec.resize( _pointNum );
thrust_scatterSequenceMap( _orgPointIdx, mapVec );
thrust::device_ptr<int> segInt( (int *) toKernelPtr( _constraintVec ) );
thrust::gather( segInt, segInt + _constraintVec.size() * 2, mapVec.begin(), segInt );
_memPool.release( mapVec );
// // Sort the constraints
// const int constraintNum = _constraintVec.size();
// IntDVec keyVec = _memPool.allocateAny<int>( constraintNum );
// keyVec.resize( constraintNum );
// thrust::transform( _constraintVec.begin(), _constraintVec.end(), keyVec.begin(), GetConstraintMinVert() );
// thrust::sort_by_key( keyVec.begin(), keyVec.end(), _constraintVec.begin() );
// _memPool.release( keyVec );
}
// Construct
_vertTriVec.resize( _pointNum );
kerMapTriToVert<<< BlocksPerGrid, ThreadsPerBlock >>>(
toKernelArray( _triVec ),
toKernelPtr( _vertTriVec )
);
CudaCheckError();
// Initialize list of active constraints
thrust::sequence( _actConsVec.begin(), _actConsVec.end() );
}
bool GpuDel::markIntersections()
{
_counters.renew();
kerMarkTriConsIntersectionFast<<< BlocksPerGrid, ThreadsPerBlock >>>(
toKernelArray( _actConsVec ),
toKernelPtr( _constraintVec ),
toKernelPtr( _triVec ),
toKernelPtr( _oppVec ),
toKernelPtr( _triInfoVec ),
toKernelPtr( _vertTriVec ),
toKernelPtr( _triConsVec ),
_counters.ptr()
);
kerMarkTriConsIntersectionExact<<< BlocksPerGrid, ThreadsPerBlock >>>(
toKernelArray( _actConsVec ),
toKernelPtr( _constraintVec ),
toKernelPtr( _triVec ),
toKernelPtr( _oppVec ),
toKernelPtr( _triInfoVec ),
toKernelPtr( _vertTriVec ),
toKernelPtr( _triConsVec ),
_counters.ptr()
);
CudaCheckError();
return ( _counters[ CounterFlag ] == 1 );
}
void GpuDel::updatePairStatus()
{
IntDVec exactVec = _memPool.allocateAny<int>( _triMax );
_counters.renew();
kerUpdatePairStatusFast<<< BlocksPerGrid, ThreadsPerBlock >>>(
toKernelArray( _actTriVec ),
toKernelPtr( _triConsVec ),
toKernelPtr( _triVec ),
toKernelPtr( _oppVec ),
toKernelPtr( _triInfoVec ),
toKernelPtr( exactVec ),
_counters.ptr()
);
kerUpdatePairStatusExact<<< BlocksPerGrid, ThreadsPerBlock >>>(
toKernelArray( _actTriVec ),
toKernelPtr( _triConsVec ),
toKernelPtr( _triVec ),
toKernelPtr( _oppVec ),
toKernelPtr( _triInfoVec ),
toKernelPtr( exactVec ),
_counters.ptr()
);
CudaCheckError();
_memPool.release( exactVec );
}
void GpuDel::checkConsFlipping( IntDVec& triVoteVec )
{
IntDVec exactVec = _memPool.allocateAny<int>( _triMax );
_counters.renew();
kerCheckConsFlippingFast<<< BlocksPerGrid, ThreadsPerBlock >>>(
toKernelArray( _actTriVec ),
toKernelPtr( _triConsVec ),
toKernelPtr( _triInfoVec ),
toKernelPtr( _triVec ),
toKernelPtr( _oppVec ),
toKernelPtr( triVoteVec ),
toKernelPtr( exactVec ),
_counters.ptr()
);
kerCheckConsFlippingExact<<< BlocksPerGrid, ThreadsPerBlock >>>(
toKernelArray( _actTriVec ),
toKernelPtr( _triConsVec ),
toKernelPtr( _triInfoVec ),
toKernelPtr( _triVec ),
toKernelPtr( _oppVec ),
toKernelPtr( triVoteVec ),
toKernelPtr( exactVec ),
_counters.ptr()
);
CudaCheckError();
_memPool.release( exactVec );
}
bool GpuDel::doConsFlipping( int &flipNum )
{
const int triNum = _triVec.size();
const int actNum = _actTriVec.size();
///////
// Vote for flips
///////
#pragma region Diagnostic
if ( _input->isProfiling( ProfDiag ) )
__rejFlipVec.assign( triNum, 0 );
#pragma endregion
updatePairStatus();
IntDVec triVoteVec = _memPool.allocateAny<int>( _triMax );
triVoteVec.assign( triNum, INT_MAX );
checkConsFlipping( triVoteVec );
////
// Mark rejected flips
////
IntDVec flipToTri = _memPool.allocateAny<int>( _triMax );
flipToTri.resize( actNum );
kerMarkRejectedConsFlips<<< BlocksPerGrid, ThreadsPerBlock >>>(
toKernelArray( _actTriVec ),
toKernelPtr( _triConsVec ),
toKernelPtr( triVoteVec ),
toKernelPtr( _triInfoVec ),
toKernelPtr( _oppVec ),
toKernelPtr( flipToTri ),
_input->isProfiling( ProfDiag ) ? toKernelPtr( __rejFlipVec ) : NULL );
CudaCheckError();
_memPool.release( triVoteVec );
////
// Compact flips
////
IntDVec temp = _memPool.allocateAny<int>( _triMax, true );
flipNum = compactIfNegative( flipToTri, temp );
if ( 0 == flipNum )
{
_memPool.release( flipToTri );
return false;
}
////
// Expand flip vector
////
int orgFlipNum = _flipVec.size();
int expFlipNum = orgFlipNum + flipNum;
if ( expFlipNum > _flipVec.capacity() )
{
_flipVec.resize( 0 );
_triMsgVec.assign( _triMax, make_int2( -1, -1 ) );
orgFlipNum = 0;
expFlipNum = flipNum;
}
_flipVec.grow( expFlipNum );
// See doFlipping
_triMsgVec.resize( _triVec.size() );
////
// Flipping
////
#pragma region Diagnostic
if ( _input->isProfiling( ProfDiag ) )
{
const int rejFlipNum = thrust_sum( __rejFlipVec );
std::cout << " ConsFlips: " << flipNum << " ( " << rejFlipNum << " )"
<< std::endl;
}
#pragma endregion
// 32 ThreadsPerBlock is optimal
kerFlip<<< BlocksPerGrid, 32 >>>(
toKernelArray( flipToTri ),
toKernelPtr( _triVec ),
toKernelPtr( _oppVec ),
NULL,
toKernelPtr( _triMsgVec ),
NULL,
toKernelPtr( _flipVec ),
toKernelPtr( _triConsVec ),
toKernelPtr( _vertTriVec ),
orgFlipNum, 0
);
CudaCheckError();
////
// Update oppTri
////
kerUpdateOpp<<< BlocksPerGrid, 32 >>>(
toKernelPtr( _flipVec ) + orgFlipNum,
toKernelPtr( _oppVec ),
toKernelPtr( _triMsgVec ),
toKernelPtr( flipToTri ),
orgFlipNum, flipNum
);
CudaCheckError();
_memPool.release( flipToTri );
/////////////////////////////////////////////////////////////////////
return true;
}
void GpuDel::doInsertConstraints()
{
startTiming( ProfDefault );
initForConstraintInsertion();
const int triNum = _triVec.size();
_triConsVec = _memPool.allocateAny<int>( triNum );
_triConsVec.assign( triNum, -1 );
_flipVec = _memPool.allocateAny<FlipItem>( _triMax );
_triMsgVec = _memPool.allocateAny<int2>( _triMax );
_actTriVec = _memPool.allocateAny<int>( _triMax );
_triMsgVec.assign( _triMax, make_int2( -1, -1 ) );
int outerLoop = 0;
int flipLoop = 0;
int totFlipNum = 0;
int flipNum;
while ( markIntersections() )
{
if ( _input->isProfiling( ProfDiag ) )
std::cout << "Iter " << ( outerLoop+1 ) << std::endl;
// VISUALIZATION
if ( Visualizer::instance()->isEnable() )
{
pauseTiming( ProfNone );
pauseTiming( ProfDefault );
IntHVec triColorVec;
_triConsVec.copyToHost( triColorVec );
for ( int i = 0; i < triColorVec.size(); ++i )
if ( triColorVec[i] != -1 )
triColorVec[i] >>= 4;
Visualizer::instance()->addFrame( _pointVec, _constraintVec, _triVec, triColorVec, _infIdx );
startTiming( ProfDefault );
startTiming( ProfNone );
}
// Collect active triangles
thrust_copyIf_IsNotNegative( _triConsVec, _actTriVec );
int innerLoop = 0;
while ( doConsFlipping( flipNum ) )
{
totFlipNum += flipNum;
// VISUALIZATION
if ( Visualizer::instance()->isEnable() )
{
pauseTiming( ProfNone );
pauseTiming( ProfDefault );
IntHVec triColorVec;
_triConsVec.copyToHost( triColorVec );
for ( int i = 0; i < triColorVec.size(); ++i )
if ( triColorVec[i] != -1 )
triColorVec[i] >>= 4;
Visualizer::instance()->addFrame( _pointVec, _constraintVec, _triVec, triColorVec, _infIdx );
startTiming( ProfDefault );
startTiming( ProfNone );
}
++flipLoop;
++innerLoop;
if ( innerLoop == 5 ) break;
//if ( flipLoop == 1 ) break;
}
++outerLoop;
// Mark all the possibly modified triangles as Alive + Changed (3).
thrust_scatterConstantMap( _actTriVec, _triInfoVec, 3 );
//if ( outerLoop == 5 ) break;
}
//if ( outerLoop >= 20 )
//{
// for ( int i = 0; i < _actTriVec.size(); ++i )
// std::cout << _actTriVec[i] << " ";
// std::cout << std::endl;
//}
if ( _input->isProfiling( ProfDiag ) )
std::cout << "ConsFlip: Outer loop = " << outerLoop
<< ", inner loop = " << flipLoop
<< ", total flip = " << totFlipNum
<< std::endl;
_memPool.release( _triConsVec );
_memPool.release( _triMsgVec );
_memPool.release( _actTriVec );
_memPool.release( _flipVec );
stopTiming( ProfDefault, _output->stats.constraintTime );
}
void GpuDel::splitAndFlip()
{
int insLoop = 0;
_doFlipping = !_input->insAll;
//////////////////
while ( _availPtNum > 0 )
//////////////////
{
////////////////////////
splitTri();
////////////////////////
if ( _doFlipping )
doFlippingLoop( CircleFastOrientFast );
++insLoop;
}
//////////////////////////////
if ( !_doFlipping )
doFlippingLoop( CircleFastOrientFast );
markSpecialTris();
doFlippingLoop( CircleExactOrientSoS );
//////////////////////////////
// Insert constraints if needed
if ( _constraintVec.size() > 0 )
doInsertConstraints();
doFlippingLoop( CircleFastOrientFast );
markSpecialTris();
doFlippingLoop( CircleExactOrientSoS );
#pragma region Diagnostic
if ( _input->isProfiling( ProfDiag ) )
{
std::cout << "\nInsert loops: " << insLoop << std::endl;
std::cout << "Compact: " << std::endl;
_diagLogCompact.printCount();
std::cout << "Collect: " << std::endl;
_diagLogCollect.printCount();
}
#pragma endregion
return;
}
void GpuDel::markSpecialTris()
{
startTiming( ProfDetail );
kerMarkSpecialTris<<< BlocksPerGrid, ThreadsPerBlock >>>(
toKernelArray( _triInfoVec ),
toKernelPtr( _oppVec )
);
CudaCheckError();
stopTiming( ProfDetail, _diagLog->_t[ 0 ] );
}
void GpuDel::expandTri( int newTriNum )
{
//*** Expand triangles
_triVec.expand( newTriNum );
_oppVec.expand( newTriNum );
_triInfoVec.expand( newTriNum );
}
void GpuDel::splitTri()
{
const int MaxSamplePerTri = 100;
startTiming( ProfDefault );
////
// Rank points
////
int triNum = _triVec.size();
int noSample = _pointNum;
if ( noSample / triNum > MaxSamplePerTri )
noSample = triNum * MaxSamplePerTri;
IntDVec triCircleVec = _memPool.allocateAny<int>( _triMax );
triCircleVec.assign( triNum, INT_MIN );
IntDVec vertCircleVec = _memPool.allocateAny<int>( _pointNum );
vertCircleVec.resize( noSample );
kerVoteForPoint<<< BlocksPerGrid, ThreadsPerBlock >>>(
toKernelArray( _vertTriVec ),
toKernelPtr( _triVec ),
toKernelPtr( vertCircleVec ),
toKernelPtr( triCircleVec ),
noSample
);
CudaCheckError();
IntDVec triToVert = _memPool.allocateAny<int>( _triMax );
triToVert.assign( triNum, INT_MAX );
kerPickWinnerPoint<<< BlocksPerGrid, ThreadsPerBlock >>>(
toKernelArray( _vertTriVec ),
toKernelPtr( vertCircleVec ),
toKernelPtr( triCircleVec ),
toKernelPtr( triToVert ),
noSample
);
CudaCheckError();
_memPool.release( vertCircleVec );
_memPool.release( triCircleVec );
////
// Collect triangles with insertions
////
IntDVec splitTriVec = _memPool.allocateAny<int>( _pointNum );
_insNum = thrust_copyIf_TriHasVert( triToVert, splitTriVec );
const int extraTriNum = DIM * _insNum;
const int splitTriNum = triNum + extraTriNum;
if ( _input->isProfiling( ProfDiag ) )
{
std::cout << "Insert: " << _insNum
<< " Tri from: " << triNum
<< " to: " << splitTriNum << std::endl;
}
// If there's just a few points
if ( _availPtNum - _insNum < _insNum &&
_insNum < 0.1 * _pointNum )
{
_doFlipping = false;
//std::cout << "Stop flipping!" << std::endl;
}
if ( !_input->noReorder && _doFlipping )
{
stopTiming( ProfDefault, _output->stats.splitTime );
shiftTri( triToVert, splitTriVec );
triNum = -1; // Mark that we have shifted the array
startTiming( ProfDefault );
}
////
// Make map
////
IntDVec insTriMap = _memPool.allocateAny<int>( _triMax );
insTriMap.assign( ( triNum < 0 ) ? splitTriNum : triNum, -1 );
thrust_scatterSequenceMap( splitTriVec, insTriMap );
////
// Expand if space needed
////
expandTri( splitTriNum );
////
// Update the location of the points
////
stopTiming( ProfDefault, _output->stats.splitTime );
startTiming( ProfDefault );
IntDVec exactCheckVec = _memPool.allocateAny<int>( _pointNum );
_counters.renew();
kerSplitPointsFast<<< BlocksPerGrid, ThreadsPerBlock >>>(
toKernelArray( _vertTriVec ),
toKernelPtr( triToVert ),
toKernelPtr( _triVec ),
toKernelPtr( insTriMap ),
toKernelPtr( exactCheckVec ),
_counters.ptr(),
triNum, _insNum
);
kerSplitPointsExactSoS<<< PredBlocksPerGrid, PredThreadsPerBlock >>>(
toKernelPtr( _vertTriVec ),
toKernelPtr( triToVert ),
toKernelPtr( _triVec ),
toKernelPtr( insTriMap ),
toKernelPtr( exactCheckVec ),
_counters.ptr(),
triNum, _insNum
);
CudaCheckError();
_memPool.release( exactCheckVec );
stopTiming( ProfDefault, _output->stats.relocateTime );
startTiming( ProfDefault );
////
// Split old into new triangle and copy them to new array
////
kerSplitTri<<< BlocksPerGrid, 32 >>>(
toKernelArray( splitTriVec ),
toKernelPtr( _triVec ),
toKernelPtr( _oppVec ),
toKernelPtr( _triInfoVec ),
toKernelPtr( insTriMap ),
toKernelPtr( triToVert ),
triNum, _insNum
);
CudaCheckError();
_memPool.release( triToVert );
_memPool.release( insTriMap );
_memPool.release( splitTriVec );
_availPtNum -= _insNum;
stopTiming( ProfDefault, _output->stats.splitTime );
Visualizer::instance()->addFrame( _pointVec, SegmentDVec(), _triVec, _infIdx );
return;
}
bool GpuDel::doFlipping( CheckDelaunayMode checkMode )
{
startTiming( ProfDetail );
++_diagLog->_flipLoop;
const int triNum = _triVec.size();
////
// Compact active triangles
////
switch ( _actTriMode )
{
case ActTriMarkCompact:
thrust_copyIf_IsActiveTri( _triInfoVec, _actTriVec );
break;
case ActTriCollectCompact:
IntDVec temp = _memPool.allocateAny<int>( _triMax, true );
compactIfNegative( _actTriVec, temp );
break;
}
int orgActNum = _actTriVec.size();
#pragma region Diagnostic
if ( _input->isProfiling( ProfDiag ) )
{
_numActiveVec.push_back( orgActNum );
if ( orgActNum == 0 || ( checkMode != CircleExactOrientSoS &&
orgActNum < PredBlocksPerGrid * PredThreadsPerBlock ) )
{
_numFlipVec.push_back( 0 );
_timeCheckVec.push_back( 0.0 );
_timeFlipVec.push_back( 0.0 );
_numCircleVec.push_back( 0 );
}
}
#pragma endregion
restartTiming( ProfDetail, _diagLog->_t[ 0 ] );
/////////////////////////////////////////////////////////////////////
////
// Check actNum, switch mode or quit if necessary
////
// No more work
if ( 0 == orgActNum )
return false;
// Little work, leave it for the Exact iterations
if ( checkMode != CircleExactOrientSoS &&
orgActNum < PredBlocksPerGrid * PredThreadsPerBlock )
return false;
// See if there's little work enough to switch to collect mode.
// Safety check: make sure there's enough space to collect
if ( orgActNum < BlocksPerGrid * ThreadsPerBlock &&
orgActNum * 2 < _actTriVec.capacity() &&
orgActNum * 2 < triNum )
{
_actTriMode = ActTriCollectCompact;
_diagLog = &_diagLogCollect;
}
else
{
_actTriMode = ActTriMarkCompact;
_diagLog = &_diagLogCompact;
}
////
// Vote for flips
////
#pragma region Diagnostic
if ( _input->isProfiling( ProfDiag ) )
{
__circleCountVec.assign( triNum, 0 );
__rejFlipVec.assign( triNum, 0 );
}
#pragma endregion
IntDVec triVoteVec = _memPool.allocateAny<int>( _triMax );
triVoteVec.assign( triNum, INT_MAX );
dispatchCheckDelaunay( checkMode, orgActNum, triVoteVec );
double prevTime = _diagLog->_t[ 1 ];
restartTiming( ProfDetail, _diagLog->_t[ 1 ] );
/////////////////////////////////////////////////////////////////////
////
// Mark rejected flips
////
IntDVec flipToTri = _memPool.allocateAny<int>( _triMax );
flipToTri.resize( orgActNum );
kerMarkRejectedFlips<<< BlocksPerGrid, ThreadsPerBlock >>>(
toKernelPtr( _actTriVec ),
toKernelPtr( _oppVec ),
toKernelPtr( triVoteVec ),
toKernelPtr( _triInfoVec ),
toKernelPtr( flipToTri ),
orgActNum,
_input->isProfiling( ProfDiag ) ? toKernelPtr( __rejFlipVec ) : NULL );
CudaCheckError();
_memPool.release( triVoteVec );
restartTiming( ProfDetail, _diagLog->_t[ 2 ] );
/////////////////////////////////////////////////////////////////////
////
// Compact flips
////
IntDVec temp = _memPool.allocateAny<int>( _triMax, true );
const int flipNum = compactIfNegative( flipToTri, temp );
if ( _input->isProfiling( ProfDiag ) )
{
_numFlipVec.push_back( flipNum );
_timeCheckVec.push_back( _diagLog->_t[ 1 ] - prevTime );
}
restartTiming( ProfDetail, _diagLog->_t[ 3 ] );
/////////////////////////////////////////////////////////////////////
////
// Preparation for the actual flipping. Include several steps
////
#pragma region Diagnostic
if ( _input->isProfiling( ProfDiag ) )
{
const int circleNum = thrust_sum( __circleCountVec );
_diagLog->_circleCount += circleNum;
const int rejFlipNum = thrust_sum( __rejFlipVec );
_diagLog->_rejFlipCount += rejFlipNum;
_diagLog->_totFlipNum += flipNum;
std::cout << "Acts: " << orgActNum
<< " Flips: " << flipNum << " ( " << rejFlipNum << " )"
<< " circle: " << circleNum
<< " Exact: "
<< ( checkMode == CircleExactOrientSoS ? _counters[ CounterExact ] : -1 )
<< std::endl;
_numCircleVec.push_back( circleNum );
startTiming( ProfDetail );
}
#pragma endregion
if ( 0 == flipNum )
{
_numCircleVec.push_back( 0 );
_timeFlipVec.push_back( 0 );
_memPool.release( flipToTri );
return false;
}
// Expand flip vector
int orgFlipNum = _flipVec.size();
int expFlipNum = orgFlipNum + flipNum;
if ( expFlipNum > _flipVec.capacity() )
{
stopTiming( ProfDetail, _diagLog->_t[ 4 ] );
stopTiming( ProfDefault, _output->stats.flipTime );
relocateAll();
startTiming( ProfDefault );
startTiming( ProfDetail );
orgFlipNum = 0;
expFlipNum = flipNum;
}
_flipVec.grow( expFlipNum );
// _triMsgVec contains two components.
// - .x is the encoded new neighbor information
// - .y is the flipIdx as in the flipVec (i.e. globIdx)
// As such, we do not need to initialize it to -1 to
// know which tris are not flipped in the current rount.
// We can rely on the flipIdx being > or < than orgFlipIdx.
// Note that we have to initialize everything to -1
// when we clear the flipVec and reset the flip indexing.
//
_triMsgVec.resize( _triVec.size() );
////
// Expand active tri vector
////
if ( _actTriMode == ActTriCollectCompact )
_actTriVec.grow( orgActNum + flipNum );
restartTiming( ProfDetail, _diagLog->_t[ 4 ] );
/////////////////////////////////////////////////////////////////////
////
// Flipping
////
// 32 ThreadsPerBlock is optimal
kerFlip<<< BlocksPerGrid, 32 >>>(
toKernelArray( flipToTri ),
toKernelPtr( _triVec ),
toKernelPtr( _oppVec ),
toKernelPtr( _triInfoVec ),
toKernelPtr( _triMsgVec ),
( _actTriMode == ActTriCollectCompact ) ? toKernelPtr( _actTriVec ) : NULL,
toKernelPtr( _flipVec ),
NULL, NULL,
orgFlipNum, orgActNum
);
CudaCheckError();
_orgFlipNum.push_back( orgFlipNum );
////
// Update oppTri
////
kerUpdateOpp<<< BlocksPerGrid, 32 >>>(
toKernelPtr( _flipVec ) + orgFlipNum,
toKernelPtr( _oppVec ),
toKernelPtr( _triMsgVec ),
toKernelPtr( flipToTri ),
orgFlipNum, flipNum
);
CudaCheckError();
_memPool.release( flipToTri );
prevTime = _diagLog->_t[ 5 ];
stopTiming( ProfDetail, _diagLog->_t[ 5 ] );
if ( _input->isProfiling( ProfDiag ) )
_timeFlipVec.push_back( _diagLog->_t[ 5 ] - prevTime );
/////////////////////////////////////////////////////////////////////
Visualizer::instance()->addFrame( _pointVec, SegmentDVec(), _triVec, _infIdx );
return true;
}
void GpuDel::dispatchCheckDelaunay
(
CheckDelaunayMode checkMode,
int orgActNum,
IntDVec& triVoteVec
)
{
switch ( checkMode )
{
case CircleFastOrientFast:
kerCheckDelaunayFast<<< BlocksPerGrid, ThreadsPerBlock >>>(
toKernelPtr( _actTriVec ),
toKernelPtr( _triVec ),
toKernelPtr( _oppVec ),
toKernelPtr( _triInfoVec ),
toKernelPtr( triVoteVec ),
orgActNum,
_input->isProfiling( ProfDiag ) ? toKernelPtr( __circleCountVec ) : NULL
);
CudaCheckError();
break;
case CircleExactOrientSoS:
// Reuse this array to save memory
Int2DVec &exactCheckVi = _triMsgVec;
_counters.renew();
kerCheckDelaunayExact_Fast<<< BlocksPerGrid, ThreadsPerBlock >>>(
toKernelPtr( _actTriVec ),
toKernelPtr( _triVec ),
toKernelPtr( _oppVec ),
toKernelPtr( _triInfoVec ),
toKernelPtr( triVoteVec ),
toKernelPtr( exactCheckVi ),
orgActNum,
_counters.ptr(),
_input->isProfiling( ProfDiag ) ? toKernelPtr( __circleCountVec ) : NULL
);
kerCheckDelaunayExact_Exact<<< PredBlocksPerGrid, PredThreadsPerBlock >>>(
toKernelPtr( _triVec ),
toKernelPtr( _oppVec ),
toKernelPtr( triVoteVec ),
toKernelPtr( exactCheckVi ),
_counters.ptr(),
_input->isProfiling( ProfDiag ) ? toKernelPtr( __circleCountVec ) : NULL
);
CudaCheckError();
break;
}
}
template< typename T >
__global__ void
kerShift
(
KerIntArray shiftVec,
T* src,
T* dest
)
{
for ( int idx = getGlobThreadIdx(); idx < shiftVec._num; idx += getThreadNum() )
{
const int shift = shiftVec._arr[ idx ];
dest[ idx + shift ] = src[ idx ];
}
}
template< typename T >
void GpuDel::shiftExpandVec( IntDVec &shiftVec, DevVector< T > &dataVec, int size )
{
DevVector< T > tempVec = _memPool.allocateAny<T>( size );
tempVec.resize( size );
kerShift<<< BlocksPerGrid, ThreadsPerBlock >>>(
toKernelArray( shiftVec ),
toKernelPtr( dataVec ),
toKernelPtr( tempVec )
);
CudaCheckError();
dataVec.copyFrom( tempVec );
_memPool.release( tempVec );
}
void GpuDel::shiftOppVec( IntDVec &shiftVec, TriOppDVec &dataVec, int size )
{
TriOppDVec tempVec = _memPool.allocateAny< TriOpp >( size );
tempVec.resize( size );
kerShiftOpp<<< BlocksPerGrid, ThreadsPerBlock >>>(
toKernelArray( shiftVec ),
toKernelPtr( dataVec ),
toKernelPtr( tempVec ),
size
);
CudaCheckError();
dataVec.copyFrom( tempVec );
_memPool.release( tempVec );
}
void GpuDel::shiftTri( IntDVec &triToVert, IntDVec &splitTriVec )
{
startTiming( ProfDefault );
const int triNum = _triVec.size() + 2 * splitTriVec.size();
IntDVec shiftVec = _memPool.allocateAny<int>( _triMax );
thrust_scan_TriHasVert( triToVert, shiftVec );
shiftExpandVec( shiftVec, _triVec, triNum );
shiftExpandVec( shiftVec, _triInfoVec, triNum );
shiftExpandVec( shiftVec, triToVert, triNum );
shiftOppVec( shiftVec, _oppVec, triNum );
kerShiftTriIdx<<< BlocksPerGrid, ThreadsPerBlock >>>(
toKernelArray( _vertTriVec ),
toKernelPtr( shiftVec )
);
CudaCheckError();
kerShiftTriIdx<<< BlocksPerGrid, ThreadsPerBlock >>>(
toKernelArray( splitTriVec ),
toKernelPtr( shiftVec )
);
CudaCheckError();
_memPool.release( shiftVec );
stopTiming( ProfDefault, _output->stats.sortTime );
}
void GpuDel::relocateAll()
{
if ( _flipVec.size() == 0 )
return ;
startTiming( ProfDefault );
if ( _availPtNum > 0 )
{
const int triNum = _triVec.size();
IntDVec triToFlip = _memPool.allocateAny<int>( _triMax );
triToFlip.assign( triNum, -1 );
// Rebuild the pointers from back to forth
int nextFlipNum = _flipVec.size();
for ( int i = _orgFlipNum.size() - 1; i >= 0; --i )
{
int prevFlipNum = _orgFlipNum[ i ];
int flipNum = nextFlipNum - prevFlipNum;
kerUpdateFlipTrace<<< BlocksPerGrid, ThreadsPerBlock >>>(
toKernelPtr( _flipVec ),
toKernelPtr( triToFlip ),
prevFlipNum,
flipNum
);
nextFlipNum = prevFlipNum;
}
CudaCheckError();
// Relocate points
IntDVec exactCheckVec = _memPool.allocateAny<int>( _pointNum );
_counters.renew();
kerRelocatePointsFast<<< BlocksPerGrid, ThreadsPerBlock >>>(
toKernelArray( _vertTriVec ),
toKernelPtr( triToFlip ),
toKernelPtr( _flipVec ),
toKernelPtr( exactCheckVec ),
_counters.ptr()
);
kerRelocatePointsExact<<< BlocksPerGrid, ThreadsPerBlock >>>(
toKernelPtr( _vertTriVec ),
toKernelPtr( triToFlip ),
toKernelPtr( _flipVec ),
toKernelPtr( exactCheckVec ),
_counters.ptr()
);
CudaCheckError();
_memPool.release( exactCheckVec );
_memPool.release( triToFlip );
}
// Just clean up the flips
_flipVec.resize( 0 );
_orgFlipNum.clear();
// Reset the triMsgVec
_triMsgVec.assign( _triMax, make_int2( -1, -1 ) );
stopTiming( ProfDefault, _output->stats.relocateTime );
}
void GpuDel::compactTris()
{
const int triNum = _triVec.size();
IntDVec prefixVec = _memPool.allocateAny<int>( _triMax );
prefixVec.resize( triNum );
thrust_scan_TriAliveStencil( _triInfoVec, prefixVec );
int newTriNum = prefixVec[ triNum - 1 ];
int freeNum = triNum - newTriNum;
IntDVec freeVec = _memPool.allocateAny<int>( _triMax );
freeVec.resize( freeNum );
kerCollectFreeSlots<<< BlocksPerGrid, ThreadsPerBlock >>>(
toKernelPtr( _triInfoVec ),
toKernelPtr( prefixVec ),
toKernelPtr( freeVec ),
newTriNum
);
CudaCheckError();
// Make map
kerMakeCompactMap<<< BlocksPerGrid, ThreadsPerBlock >>>(
toKernelArray( _triInfoVec ),
toKernelPtr( prefixVec ),
toKernelPtr( freeVec ),
newTriNum
);
CudaCheckError();
// Reorder the tets
kerCompactTris<<< BlocksPerGrid, ThreadsPerBlock >>>(
toKernelArray( _triInfoVec ),
toKernelPtr( prefixVec ),
toKernelPtr( _triVec ),
toKernelPtr( _oppVec ),
newTriNum
);
CudaCheckError();
_triInfoVec.resize( newTriNum );
_triVec.resize( newTriNum );
_oppVec.resize( newTriNum );
_memPool.release( freeVec );
_memPool.release( prefixVec );
}
void GpuDel::outputToHost()
{
startTiming( ProfDefault );
kerMarkInfinityTri<<< BlocksPerGrid, ThreadsPerBlock >>>(
toKernelArray( _triVec ),
toKernelPtr( _triInfoVec ),
toKernelPtr( _oppVec ),
_infIdx
);
CudaCheckError();
compactTris();
if ( !_input->noSort )
{
// Change the indices back to the original order
kerUpdateVertIdx<<< BlocksPerGrid, ThreadsPerBlock >>>(
toKernelArray( _triVec ),
toKernelPtr( _triInfoVec ),
toKernelPtr( _orgPointIdx )
);
CudaCheckError();
}
////
// Copy to host
//_triVec.copyToHost( _output->triVec );
//_oppVec.copyToHost( _output->triOppVec );
// Output Infty point
_output->ptInfty = _ptInfty;
stopTiming( ProfDefault, _output->stats.outTime );
////
return;
}
|
0536a4beb9d1b4364d753c8d914e4ea9641f3bb1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "aes_core.h"
#include "crypto_kernel.h"
/*******************************************************************
AES CBC kernel
******************************************************************/
__global__ void
AES_cbc_128_encrypt_kernel_SharedMem(const uint8_t *in_all,
uint8_t *out_all,
const uint32_t *pkt_offset,
const uint8_t *keys,
uint8_t *ivs,
const unsigned int num_flows,
uint8_t *checkbits = 0)
{
__shared__ uint32_t shared_Te0[256];
__shared__ uint32_t shared_Te1[256];
__shared__ uint32_t shared_Te2[256];
__shared__ uint32_t shared_Te3[256];
__shared__ uint32_t shared_Rcon[10];
/* computer the thread id */
int idx = blockDim.x * blockIdx.x + threadIdx.x;
/* initialize T boxes */
for (unsigned i = 0 ; i *blockDim.x < 256 ; i++) {
unsigned index = threadIdx.x + i * blockDim.x;
if (index >= 256)
break;
shared_Te0[index] = Te0_ConstMem[index];
shared_Te1[index] = Te1_ConstMem[index];
shared_Te2[index] = Te2_ConstMem[index];
shared_Te3[index] = Te3_ConstMem[index];
}
for(unsigned i = 0; i * blockDim.x < 10; i++){
int index = threadIdx.x + blockDim.x * i;
if(index < 10){
shared_Rcon[index] = rcon[index];
}
}
if (idx >= num_flows)
return;
/* make sure T boxes have been initialized. */
__syncthreads();
/* Locate data */
const uint8_t *in = pkt_offset[idx] + in_all;
uint8_t *out = pkt_offset[idx] + out_all;
const uint8_t *key = idx * 16 + keys;
uint8_t *ivec = idx * AES_BLOCK_SIZE + ivs;
/* Encrypt using cbc mode */
unsigned long len = pkt_offset[idx + 1] - pkt_offset[idx];
const unsigned char *iv = ivec;
while (len >= AES_BLOCK_SIZE) {
*((uint64_t*)out) = *((uint64_t*)in) ^ *((uint64_t*)iv);
*(((uint64_t*)out) + 1) = *(((uint64_t*)in) + 1) ^ *(((uint64_t*)iv) + 1);
AES_128_encrypt(out, out, key,
shared_Te0, shared_Te1, shared_Te2, shared_Te3, shared_Rcon);
iv = out;
len -= AES_BLOCK_SIZE;
in += AES_BLOCK_SIZE;
out += AES_BLOCK_SIZE;
}
if (len) {
for(unsigned n = 0; n < len; ++n)
out[n] = in[n] ^ iv[n];
for(unsigned n = len; n < AES_BLOCK_SIZE; ++n)
out[n] = iv[n];
AES_128_encrypt(out, out, key,
shared_Te0, shared_Te1, shared_Te2, shared_Te3, shared_Rcon);
iv = out;
}
*((uint4*)ivec) = *((uint4*)iv);
__syncthreads();
if (threadIdx.x == 0 && checkbits != 0)
*(checkbits + blockIdx.x) = 1;
}
/**************************************************************************
Exported C++ function wrapper function for CUDA kernel
***************************************************************************/
void AES_cbc_128_encrypt_gpu(const uint8_t *in_d,
uint8_t *out_d,
const uint32_t *pkt_offset_d,
const uint8_t *keys_d,
uint8_t *ivs_d,
const unsigned int num_flows,
uint8_t *checkbits_d,
const unsigned int threads_per_blk,
hipStream_t stream)
{
unsigned int num_cuda_blks = (num_flows+threads_per_blk - 1) / threads_per_blk;
if (stream == 0) {
hipLaunchKernelGGL(( AES_cbc_128_encrypt_kernel_SharedMem), dim3(num_cuda_blks), dim3(threads_per_blk), 0, 0,
in_d, out_d, pkt_offset_d, keys_d, ivs_d, num_flows, checkbits_d);
} else {
hipLaunchKernelGGL(( AES_cbc_128_encrypt_kernel_SharedMem), dim3(num_cuda_blks), dim3(threads_per_blk), 0, stream,
in_d, out_d, pkt_offset_d, keys_d, ivs_d, num_flows, checkbits_d);
}
}
| 0536a4beb9d1b4364d753c8d914e4ea9641f3bb1.cu | #include "aes_core.h"
#include "crypto_kernel.h"
/*******************************************************************
AES CBC kernel
******************************************************************/
__global__ void
AES_cbc_128_encrypt_kernel_SharedMem(const uint8_t *in_all,
uint8_t *out_all,
const uint32_t *pkt_offset,
const uint8_t *keys,
uint8_t *ivs,
const unsigned int num_flows,
uint8_t *checkbits = 0)
{
__shared__ uint32_t shared_Te0[256];
__shared__ uint32_t shared_Te1[256];
__shared__ uint32_t shared_Te2[256];
__shared__ uint32_t shared_Te3[256];
__shared__ uint32_t shared_Rcon[10];
/* computer the thread id */
int idx = blockDim.x * blockIdx.x + threadIdx.x;
/* initialize T boxes */
for (unsigned i = 0 ; i *blockDim.x < 256 ; i++) {
unsigned index = threadIdx.x + i * blockDim.x;
if (index >= 256)
break;
shared_Te0[index] = Te0_ConstMem[index];
shared_Te1[index] = Te1_ConstMem[index];
shared_Te2[index] = Te2_ConstMem[index];
shared_Te3[index] = Te3_ConstMem[index];
}
for(unsigned i = 0; i * blockDim.x < 10; i++){
int index = threadIdx.x + blockDim.x * i;
if(index < 10){
shared_Rcon[index] = rcon[index];
}
}
if (idx >= num_flows)
return;
/* make sure T boxes have been initialized. */
__syncthreads();
/* Locate data */
const uint8_t *in = pkt_offset[idx] + in_all;
uint8_t *out = pkt_offset[idx] + out_all;
const uint8_t *key = idx * 16 + keys;
uint8_t *ivec = idx * AES_BLOCK_SIZE + ivs;
/* Encrypt using cbc mode */
unsigned long len = pkt_offset[idx + 1] - pkt_offset[idx];
const unsigned char *iv = ivec;
while (len >= AES_BLOCK_SIZE) {
*((uint64_t*)out) = *((uint64_t*)in) ^ *((uint64_t*)iv);
*(((uint64_t*)out) + 1) = *(((uint64_t*)in) + 1) ^ *(((uint64_t*)iv) + 1);
AES_128_encrypt(out, out, key,
shared_Te0, shared_Te1, shared_Te2, shared_Te3, shared_Rcon);
iv = out;
len -= AES_BLOCK_SIZE;
in += AES_BLOCK_SIZE;
out += AES_BLOCK_SIZE;
}
if (len) {
for(unsigned n = 0; n < len; ++n)
out[n] = in[n] ^ iv[n];
for(unsigned n = len; n < AES_BLOCK_SIZE; ++n)
out[n] = iv[n];
AES_128_encrypt(out, out, key,
shared_Te0, shared_Te1, shared_Te2, shared_Te3, shared_Rcon);
iv = out;
}
*((uint4*)ivec) = *((uint4*)iv);
__syncthreads();
if (threadIdx.x == 0 && checkbits != 0)
*(checkbits + blockIdx.x) = 1;
}
/**************************************************************************
Exported C++ function wrapper function for CUDA kernel
***************************************************************************/
void AES_cbc_128_encrypt_gpu(const uint8_t *in_d,
uint8_t *out_d,
const uint32_t *pkt_offset_d,
const uint8_t *keys_d,
uint8_t *ivs_d,
const unsigned int num_flows,
uint8_t *checkbits_d,
const unsigned int threads_per_blk,
cudaStream_t stream)
{
unsigned int num_cuda_blks = (num_flows+threads_per_blk - 1) / threads_per_blk;
if (stream == 0) {
AES_cbc_128_encrypt_kernel_SharedMem<<<num_cuda_blks, threads_per_blk>>>(
in_d, out_d, pkt_offset_d, keys_d, ivs_d, num_flows, checkbits_d);
} else {
AES_cbc_128_encrypt_kernel_SharedMem<<<num_cuda_blks, threads_per_blk, 0, stream>>>(
in_d, out_d, pkt_offset_d, keys_d, ivs_d, num_flows, checkbits_d);
}
}
|
a9272de8d466eae2a88271e31776d6155414f315.hip | // !!! This is a file automatically generated by hipify!!!
#ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "THH/generic/THHTensorMathPointwise.hip"
#else
#include <ATen/MemoryOverlap.h>
#define IMPLEMENT_CUDA_TENSOR_BASIC_FUNC_(NAME, CFUNC, REAL) \
struct Tensor_##NAME##_##REAL##_Op { \
__device__ __forceinline__ void operator()(scalar_t* out, scalar_t* in) const { \
*out = CFUNC(*in); \
} \
\
__device__ __forceinline__ void operator()(scalar_t* v) const { \
*v = CFUNC(*v); \
} \
}; \
\
void THCTensor_(NAME)(THCState* state, THCTensor* self_, THCTensor* src) { \
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src)); \
at::assert_no_internal_overlap(self_, #NAME); \
if (self_ == src) { \
if (!THC_pointwiseApply1<scalar_t>(state, self_, Tensor_##NAME##_##REAL##_Op())) { \
THArgCheck(false, 2, CUTORCH_DIM_WARNING); \
} \
} else { \
THCTensor_(resizeAs)(state, self_, src); \
\
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src, Tensor_##NAME##_##REAL##_Op())) { \
THArgCheck(false, 2, CUTORCH_DIM_WARNING); \
} \
} \
\
THCudaCheck(hipGetLastError()); \
}
#define IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(NAME, CFUNC, REAL) \
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC_(NAME, CFUNC, REAL)
#if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_HALF)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( log, THCNumerics<scalar_t>::log, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(lgamma, THCNumerics<scalar_t>::lgamma, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(log10, THCNumerics<scalar_t>::log10, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(log1p, THCNumerics<scalar_t>::log1p, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( log2, THCNumerics<scalar_t>::log2, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( exp, THCNumerics<scalar_t>::exp, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(expm1, THCNumerics<scalar_t>::expm1, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( cos, THCNumerics<scalar_t>::cos, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( sin, THCNumerics<scalar_t>::sin, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( sqrt, THCNumerics<scalar_t>::sqrt, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(rsqrt, THCNumerics<scalar_t>::rsqrt, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( ceil, THCNumerics<scalar_t>::ceil, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(floor, THCNumerics<scalar_t>::floor, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(trunc, THCNumerics<scalar_t>::trunc, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( acos, THCNumerics<scalar_t>::acos, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( cosh, THCNumerics<scalar_t>::cosh, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( asin, THCNumerics<scalar_t>::asin, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( sinh, THCNumerics<scalar_t>::sinh, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( tan, THCNumerics<scalar_t>::tan, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( atan, THCNumerics<scalar_t>::atan, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( tanh, THCNumerics<scalar_t>::tanh, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( erf, THCNumerics<scalar_t>::erf, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( erfc, THCNumerics<scalar_t>::erfc, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(erfinv, THCNumerics<scalar_t>::erfinv,Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( round, THCNumerics<scalar_t>::round, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( frac, THCNumerics<scalar_t>::frac, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( cinv, THCNumerics<scalar_t>::cinv, Real)
#endif
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( neg, THCNumerics<scalar_t>::neg, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( abs, THCNumerics<scalar_t>::abs, Real)
#undef IMPLEMENT_CUDA_TENSOR_BASIC_FUNC_
#undef IMPLEMENT_CUDA_TENSOR_BASIC_FUNC
void THCTensor_(sign)(THCState* state, THCTensor* self_, THCTensor* src) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src));
if (self_ == src) {
if (!THC_pointwiseApply1<scalar_t>(state, self_, TensorSignOp<scalar_t>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self_, src);
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src, TensorSignOp<scalar_t>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(hipGetLastError());
}
void THCTensor_(clamp)(THCState *state, THCTensor *self_, THCTensor *src, scalar_t min_value,
scalar_t max_value)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src));
if (self_ == src) {
if (!THC_pointwiseApply1<scalar_t>(state, self_, TensorClampOp<scalar_t>(min_value, max_value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self_, src);
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src, TensorClampOp<scalar_t>(min_value, max_value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(hipGetLastError());
}
void THCTensor_(crossKernel)(THCState *state, THCTensor *self, THCTensor *x, THCTensor *y, int dimension)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, self, x, y));
int64_t sx = THCTensor_(stride)(state, x, dimension);
int64_t sy = THCTensor_(stride)(state, y, dimension);
int64_t so = THCTensor_(stride)(state, self, dimension);
THCTensor *nx = THCTensor_(newNarrow)(state, x, dimension, 0, 1);
THCTensor *ny = THCTensor_(newNarrow)(state, y, dimension, 0, 1);
THCTensor *nself = THCTensor_(newNarrow)(state, self, dimension, 0, 1);
if (!THC_pointwiseApply3<scalar_t, scalar_t, scalar_t>(state, nself, nx, ny, TensorCrossOp<scalar_t>(sx, sy, so))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
THCTensor_(free)(state, nx);
THCTensor_(free)(state, ny);
THCTensor_(free)(state, nself);
}
#if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_HALF)
void THCTensor_(atan2)(THCState *state, THCTensor *self_, THCTensor *tx, THCTensor *ty)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, self_, tx, ty));
THArgCheck(THCTensor_(nElement)(state, tx) ==
THCTensor_(nElement)(state, ty), 3, "sizes do not match");
THCTensor_(resizeAs)(state, self_, tx);
if (!THC_pointwiseApply3<scalar_t, scalar_t, scalar_t>(state, self_, tx, ty, TensorATan2Op<scalar_t>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
THCudaCheck(hipGetLastError());
}
void THCTensor_(sigmoid)(THCState* state, THCTensor* self_, THCTensor* src) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src));
if (self_ == src) {
if (!THC_pointwiseApply1<scalar_t>(state, self_, TensorSigmoidOp<scalar_t>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self_, src);
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src, TensorSigmoidOp<scalar_t>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(hipGetLastError());
}
void THCTensor_(digamma)(THCState* state, THCTensor* self_, THCTensor* src) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src));
if (self_ != src) {
THCTensor_(resizeAs)(state, self_, src);
}
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src, TensorDigammaOp<scalar_t, accreal>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
THCudaCheck(hipGetLastError());
}
void THCTensor_(polygamma)(THCState* state, THCTensor* self_, int64_t n, THCTensor* src) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src));
if (self_ != src) {
THCTensor_(resizeAs)(state, self_, src);
}
switch (n) {
case 0:
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src, TensorDigammaOp<scalar_t, accreal>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
break;
case 1:
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src, TensorTrigammaOp<scalar_t, accreal>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
break;
default:
THError("polygamma(n,x) is not implemented for n>=2");
}
THCudaCheck(hipGetLastError());
}
#endif
namespace {
c10::intrusive_ptr<at::TensorImpl, at::UndefinedTensorImpl> retainTensorImpl(THCTensor* self) {
c10::raw::intrusive_ptr::incref(self);
return c10::intrusive_ptr<at::TensorImpl, at::UndefinedTensorImpl>::reclaim(self);
}
}
void THCTensor_(cadd)(THCState *state, THCTensor *self_, THCTensor* src1, scalar_t value, THCTensor *src2)
{
auto out = at::Tensor(retainTensorImpl(self_));
#ifdef THC_REAL_IS_HALF
auto alpha = at::Half(value);
#else
auto alpha = value;
#endif
at::add_out(out, at::Tensor(retainTensorImpl(src1)), at::Tensor(retainTensorImpl(src2)), alpha);
}
void THCTensor_(csub)(THCState *state, THCTensor *self_, THCTensor* src1, scalar_t value, THCTensor *src2)
{
auto out = at::Tensor(retainTensorImpl(self_));
#ifdef THC_REAL_IS_HALF
auto alpha = at::Half(value);
#else
auto alpha = value;
#endif
at::sub_out(out, at::Tensor(retainTensorImpl(src1)), at::Tensor(retainTensorImpl(src2)), alpha);
}
void THCTensor_(cmul)(THCState *state, THCTensor *self_, THCTensor *src1, THCTensor *src2)
{
auto out = at::Tensor(retainTensorImpl(self_));
at::mul_out(out, at::Tensor(retainTensorImpl(src1)), at::Tensor(retainTensorImpl(src2)));
}
void THCTensor_(cpow)(THCState *state, THCTensor *self_, THCTensor *src1, THCTensor *src2)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, self_, src1, src2));
THArgCheck(THCTensor_(nElement)(state, src1) ==
THCTensor_(nElement)(state, src2), 3, "sizes do not match");
if (self_ == src1) {
// self = pow(self, src2)
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src2, TensorCPowOp<scalar_t>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self_, src1);
// self = pow(src1, src2)
if (!THC_pointwiseApply3<scalar_t, scalar_t, scalar_t>(state, self_, src1, src2, TensorCPowOp<scalar_t>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(hipGetLastError());
}
void THCTensor_(pow)(THCState *state, THCTensor *self_, THCTensor *src, scalar_t value) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src));
if (self_ == src) {
if (THCNumerics<scalar_t>::eq(value, ScalarConvert<int, scalar_t>::to(1))) {
if (!THC_pointwiseApply1<scalar_t>(state, self_, TensorPowOp<scalar_t, 1>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else if (THCNumerics<scalar_t>::eq(value, ScalarConvert<int, scalar_t>::to(2))) {
if (!THC_pointwiseApply1<scalar_t>(state, self_, TensorPowOp<scalar_t, 2>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else if (THCNumerics<scalar_t>::eq(value, ScalarConvert<int, scalar_t>::to(3))) {
if (!THC_pointwiseApply1<scalar_t>(state, self_, TensorPowOp<scalar_t, 3>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
#if defined(THC_REAL_IS_HALF) || defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE)
} else if (THCNumerics<scalar_t>::eq(value, ScalarConvert<int, scalar_t>::to(-1))) {
if (!THC_pointwiseApply1<scalar_t>(state, self_, TensorPowOp<scalar_t, -1>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else if (THCNumerics<scalar_t>::eq(value, ScalarConvert<int, scalar_t>::to(-2))) {
if (!THC_pointwiseApply1<scalar_t>(state, self_, TensorPowOp<scalar_t, -2>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
#endif
} else {
// fallback implementation using pow
if (!THC_pointwiseApply1<scalar_t>(state, self_, TensorPowOp<scalar_t, -3>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
} else {
THCTensor_(resizeAs)(state, self_, src);
if (THCNumerics<scalar_t>::eq(value, ScalarConvert<int, scalar_t>::to(1))) {
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src, TensorPowOp<scalar_t, 1>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else if (THCNumerics<scalar_t>::eq(value, ScalarConvert<int, scalar_t>::to(2))) {
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src, TensorPowOp<scalar_t, 2>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else if (THCNumerics<scalar_t>::eq(value, ScalarConvert<int, scalar_t>::to(3))) {
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src, TensorPowOp<scalar_t, 3>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
#if defined(THC_REAL_IS_HALF) || defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE)
} else if (THCNumerics<scalar_t>::eq(value, ScalarConvert<int, scalar_t>::to(-1))) {
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src, TensorPowOp<scalar_t, -1>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else if (THCNumerics<scalar_t>::eq(value, ScalarConvert<int, scalar_t>::to(-2))) {
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src, TensorPowOp<scalar_t, -2>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
#endif
} else {
// fallback implementation using pow
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src, TensorPowOp<scalar_t, -3>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
}
THCudaCheck(hipGetLastError());
}
void THCTensor_(tpow)(THCState *state, THCTensor *self_, scalar_t value, THCTensor *src)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src));
if (self_ == src) {
if (!THC_pointwiseApply1<scalar_t>(state, self_, TensorTPowOp<scalar_t>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self_, src);
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src, TensorTPowOp<scalar_t>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(hipGetLastError());
}
void THCTensor_(cdiv)(THCState* state, THCTensor *self_, THCTensor *src1, THCTensor *src2)
{
auto out = at::Tensor(retainTensorImpl(self_));
at::div_out(out, at::Tensor(retainTensorImpl(src1)), at::Tensor(retainTensorImpl(src2)));
}
void THCTensor_(clshift)(THCState* state, THCTensor *self_, THCTensor *src1, THCTensor *src2)
{
#if defined(THC_REAL_IS_HALF)
return THError("clshift not supported for torch.CudaHalfTensor");
#else
THAssert(THCTensor_(checkGPU)(state, 3, self_, src1, src2));
THArgCheck(THCTensor_(nElement)(state, src1) ==
THCTensor_(nElement)(state, src2), 3, "sizes do not match");
if (self_ == src1) {
// self /= src2
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src2, TensorLShiftOp<scalar_t>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self_, src1);
// self = src1 / src2
if (!THC_pointwiseApply3<scalar_t, scalar_t, scalar_t>(state, self_, src1, src2, TensorLShiftOp<scalar_t>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(hipGetLastError());
#endif
}
void THCTensor_(crshift)(THCState* state, THCTensor *self_, THCTensor *src1, THCTensor *src2)
{
#if defined(THC_REAL_IS_HALF)
return THError("crshift not supported for torch.CudaHalfTensor");
#else
THAssert(THCTensor_(checkGPU)(state, 3, self_, src1, src2));
THArgCheck(THCTensor_(nElement)(state, src1) ==
THCTensor_(nElement)(state, src2), 3, "sizes do not match");
if (self_ == src1) {
// self /= src2
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src2, TensorRShiftOp<scalar_t>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self_, src1);
// self = src1 / src2
if (!THC_pointwiseApply3<scalar_t, scalar_t, scalar_t>(state, self_, src1, src2, TensorRShiftOp<scalar_t>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(hipGetLastError());
#endif
}
void THCTensor_(cmax)(THCState *state, THCTensor *self, THCTensor *src1, THCTensor *src2)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, self, src1, src2));
THArgCheck(THCTensor_(nElement)(state, src1) ==
THCTensor_(nElement)(state, src2), 2, "sizes do not match");
if (self == src1) {
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self, src2, TensorMaxOp<scalar_t>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self, src1);
if (!THC_pointwiseApply3<scalar_t, scalar_t, scalar_t>(state, self, src1, src2, TensorMaxOp<scalar_t>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
}
void THCTensor_(cmin)(THCState *state, THCTensor *self, THCTensor *src1, THCTensor *src2)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, self, src1, src2));
THArgCheck(THCTensor_(nElement)(state, src1) ==
THCTensor_(nElement)(state, src2), 2, "sizes do not match");
if (self == src1) {
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self, src2, TensorMinOp<scalar_t>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self, src1);
if (!THC_pointwiseApply3<scalar_t, scalar_t, scalar_t>(state, self, src1, src2, TensorMinOp<scalar_t>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
}
void THCTensor_(cremainder)(THCState *state, THCTensor *self, THCTensor *src1, THCTensor *src2)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, self, src1, src2));
THArgCheck(THCTensor_(nElement)(state, src1) ==
THCTensor_(nElement)(state, src2), 2, "sizes do not match");
if (self == src1) {
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self, src2, TensorCRemainderOp<scalar_t>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self, src1);
if (!THC_pointwiseApply3<scalar_t, scalar_t, scalar_t>(state, self, src1, src2, TensorCRemainderOp<scalar_t>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
}
void THCTensor_(cfmod)(THCState *state, THCTensor *self, THCTensor *src1, THCTensor *src2)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, self, src1, src2));
THArgCheck(THCTensor_(nElement)(state, src1) ==
THCTensor_(nElement)(state, src2), 2, "sizes do not match");
if (self == src1) {
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self, src2, TensorCFmodOp<scalar_t>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self, src1);
if (!THC_pointwiseApply3<scalar_t, scalar_t, scalar_t>(state, self, src1, src2, TensorCFmodOp<scalar_t>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
}
void THCTensor_(cmaxValue)(THCState *state, THCTensor *self, THCTensor *src, scalar_t value)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self, src));
if (self == src) {
if (!THC_pointwiseApply1<scalar_t>(state, self, TensorMaxValueOp<scalar_t>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self, src);
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self, src, TensorMaxValueOp<scalar_t>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
}
void THCTensor_(cminValue)(THCState *state, THCTensor *self, THCTensor *src, scalar_t value)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self, src));
if (self == src) {
if (!THC_pointwiseApply1<scalar_t>(state, self, TensorMinValueOp<scalar_t>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self, src);
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self, src, TensorMinValueOp<scalar_t>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
}
void THCTensor_(addcmul)(THCState *state, THCTensor *self_, THCTensor *t, scalar_t value, THCTensor *src1, THCTensor *src2)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 4, self_, t, src1, src2));
if(self_ != t)
{
THCTensor_(resizeAs)(state, self_, t);
THCTensor_(copy)(state, self_, t);
}
else
{
THArgCheck(THCTensor_(nElement)(state, self_) == THCTensor_(nElement)(state, src1),
1, "sizes do not match");
}
THArgCheck(THCTensor_(nElement)(state, src1) == THCTensor_(nElement)(state, src2),
3, "sizes do not match");
if (!THC_pointwiseApply3<scalar_t, scalar_t, scalar_t>(state, self_, src1, src2, TensorAddCMulOp<scalar_t>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
THCudaCheck(hipGetLastError());
}
void THCTensor_(addcdiv)(THCState *state, THCTensor *self_, THCTensor *t, scalar_t value, THCTensor *src1, THCTensor *src2)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 4, self_, t, src1, src2));
if(self_ != t)
{
THCTensor_(resizeAs)(state, self_, t);
THCTensor_(copy)(state, self_, t);
}
else
{
THArgCheck(THCTensor_(nElement)(state, self_) == THCTensor_(nElement)(state, src1),
1, "sizes do not match");
}
THArgCheck(THCTensor_(nElement)(state, src1) == THCTensor_(nElement)(state, src2),
3, "sizes do not match");
if (!THC_pointwiseApply3<scalar_t, scalar_t, scalar_t>(state, self_, src1, src2, TensorAddCDivOp<scalar_t>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
THCudaCheck(hipGetLastError());
}
void THCTensor_(cbitand)(THCState* state, THCTensor *self_, THCTensor *src1, THCTensor *src2)
{
#if defined(THC_REAL_IS_HALF) || defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE)
return THError("cbitand is only supported for integer type tensors");
#else
THAssert(THCTensor_(checkGPU)(state, 3, self_, src1, src2));
THArgCheck(THCTensor_(nElement)(state, src1) ==
THCTensor_(nElement)(state, src2), 3, "sizes do not match");
if (self_ == src1) {
// self /= src2
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src2, TensorBitAndOp<scalar_t>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self_, src1);
// self = src1 / src2
if (!THC_pointwiseApply3<scalar_t, scalar_t, scalar_t>(state, self_, src1, src2, TensorBitAndOp<scalar_t>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(hipGetLastError());
#endif
}
void THCTensor_(cbitor)(THCState* state, THCTensor *self_, THCTensor *src1, THCTensor *src2)
{
#if defined(THC_REAL_IS_HALF) || defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE)
return THError("cbitor is only supported for integer type tensors");
#else
THAssert(THCTensor_(checkGPU)(state, 3, self_, src1, src2));
THArgCheck(THCTensor_(nElement)(state, src1) ==
THCTensor_(nElement)(state, src2), 3, "sizes do not match");
if (self_ == src1) {
// self /= src2
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src2, TensorBitOrOp<scalar_t>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self_, src1);
// self = src1 / src2
if (!THC_pointwiseApply3<scalar_t, scalar_t, scalar_t>(state, self_, src1, src2, TensorBitOrOp<scalar_t>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(hipGetLastError());
#endif
}
void THCTensor_(cbitxor)(THCState* state, THCTensor *self_, THCTensor *src1, THCTensor *src2)
{
#if defined(THC_REAL_IS_HALF) || defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE)
return THError("cbitor is only supported for integer type tensors");
#else
THAssert(THCTensor_(checkGPU)(state, 3, self_, src1, src2));
THArgCheck(THCTensor_(nElement)(state, src1) ==
THCTensor_(nElement)(state, src2), 3, "sizes do not match");
if (self_ == src1) {
// self /= src2
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src2, TensorBitXorOp<scalar_t>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self_, src1);
// self = src1 / src2
if (!THC_pointwiseApply3<scalar_t, scalar_t, scalar_t>(state, self_, src1, src2, TensorBitXorOp<scalar_t>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(hipGetLastError());
#endif
}
#endif
| a9272de8d466eae2a88271e31776d6155414f315.cu | #ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "THC/generic/THCTensorMathPointwise.cu"
#else
#include <ATen/MemoryOverlap.h>
#define IMPLEMENT_CUDA_TENSOR_BASIC_FUNC_(NAME, CFUNC, REAL) \
struct Tensor_##NAME##_##REAL##_Op { \
__device__ __forceinline__ void operator()(scalar_t* out, scalar_t* in) const { \
*out = CFUNC(*in); \
} \
\
__device__ __forceinline__ void operator()(scalar_t* v) const { \
*v = CFUNC(*v); \
} \
}; \
\
void THCTensor_(NAME)(THCState* state, THCTensor* self_, THCTensor* src) { \
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src)); \
at::assert_no_internal_overlap(self_, #NAME); \
if (self_ == src) { \
if (!THC_pointwiseApply1<scalar_t>(state, self_, Tensor_##NAME##_##REAL##_Op())) { \
THArgCheck(false, 2, CUTORCH_DIM_WARNING); \
} \
} else { \
THCTensor_(resizeAs)(state, self_, src); \
\
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src, Tensor_##NAME##_##REAL##_Op())) { \
THArgCheck(false, 2, CUTORCH_DIM_WARNING); \
} \
} \
\
THCudaCheck(cudaGetLastError()); \
}
#define IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(NAME, CFUNC, REAL) \
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC_(NAME, CFUNC, REAL)
#if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_HALF)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( log, THCNumerics<scalar_t>::log, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(lgamma, THCNumerics<scalar_t>::lgamma, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(log10, THCNumerics<scalar_t>::log10, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(log1p, THCNumerics<scalar_t>::log1p, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( log2, THCNumerics<scalar_t>::log2, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( exp, THCNumerics<scalar_t>::exp, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(expm1, THCNumerics<scalar_t>::expm1, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( cos, THCNumerics<scalar_t>::cos, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( sin, THCNumerics<scalar_t>::sin, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( sqrt, THCNumerics<scalar_t>::sqrt, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(rsqrt, THCNumerics<scalar_t>::rsqrt, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( ceil, THCNumerics<scalar_t>::ceil, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(floor, THCNumerics<scalar_t>::floor, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(trunc, THCNumerics<scalar_t>::trunc, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( acos, THCNumerics<scalar_t>::acos, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( cosh, THCNumerics<scalar_t>::cosh, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( asin, THCNumerics<scalar_t>::asin, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( sinh, THCNumerics<scalar_t>::sinh, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( tan, THCNumerics<scalar_t>::tan, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( atan, THCNumerics<scalar_t>::atan, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( tanh, THCNumerics<scalar_t>::tanh, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( erf, THCNumerics<scalar_t>::erf, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( erfc, THCNumerics<scalar_t>::erfc, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(erfinv, THCNumerics<scalar_t>::erfinv,Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( round, THCNumerics<scalar_t>::round, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( frac, THCNumerics<scalar_t>::frac, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( cinv, THCNumerics<scalar_t>::cinv, Real)
#endif
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( neg, THCNumerics<scalar_t>::neg, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( abs, THCNumerics<scalar_t>::abs, Real)
#undef IMPLEMENT_CUDA_TENSOR_BASIC_FUNC_
#undef IMPLEMENT_CUDA_TENSOR_BASIC_FUNC
void THCTensor_(sign)(THCState* state, THCTensor* self_, THCTensor* src) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src));
if (self_ == src) {
if (!THC_pointwiseApply1<scalar_t>(state, self_, TensorSignOp<scalar_t>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self_, src);
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src, TensorSignOp<scalar_t>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(cudaGetLastError());
}
void THCTensor_(clamp)(THCState *state, THCTensor *self_, THCTensor *src, scalar_t min_value,
scalar_t max_value)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src));
if (self_ == src) {
if (!THC_pointwiseApply1<scalar_t>(state, self_, TensorClampOp<scalar_t>(min_value, max_value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self_, src);
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src, TensorClampOp<scalar_t>(min_value, max_value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(cudaGetLastError());
}
void THCTensor_(crossKernel)(THCState *state, THCTensor *self, THCTensor *x, THCTensor *y, int dimension)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, self, x, y));
int64_t sx = THCTensor_(stride)(state, x, dimension);
int64_t sy = THCTensor_(stride)(state, y, dimension);
int64_t so = THCTensor_(stride)(state, self, dimension);
THCTensor *nx = THCTensor_(newNarrow)(state, x, dimension, 0, 1);
THCTensor *ny = THCTensor_(newNarrow)(state, y, dimension, 0, 1);
THCTensor *nself = THCTensor_(newNarrow)(state, self, dimension, 0, 1);
if (!THC_pointwiseApply3<scalar_t, scalar_t, scalar_t>(state, nself, nx, ny, TensorCrossOp<scalar_t>(sx, sy, so))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
THCTensor_(free)(state, nx);
THCTensor_(free)(state, ny);
THCTensor_(free)(state, nself);
}
#if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_HALF)
void THCTensor_(atan2)(THCState *state, THCTensor *self_, THCTensor *tx, THCTensor *ty)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, self_, tx, ty));
THArgCheck(THCTensor_(nElement)(state, tx) ==
THCTensor_(nElement)(state, ty), 3, "sizes do not match");
THCTensor_(resizeAs)(state, self_, tx);
if (!THC_pointwiseApply3<scalar_t, scalar_t, scalar_t>(state, self_, tx, ty, TensorATan2Op<scalar_t>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
THCudaCheck(cudaGetLastError());
}
void THCTensor_(sigmoid)(THCState* state, THCTensor* self_, THCTensor* src) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src));
if (self_ == src) {
if (!THC_pointwiseApply1<scalar_t>(state, self_, TensorSigmoidOp<scalar_t>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self_, src);
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src, TensorSigmoidOp<scalar_t>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(cudaGetLastError());
}
void THCTensor_(digamma)(THCState* state, THCTensor* self_, THCTensor* src) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src));
if (self_ != src) {
THCTensor_(resizeAs)(state, self_, src);
}
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src, TensorDigammaOp<scalar_t, accreal>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
THCudaCheck(cudaGetLastError());
}
void THCTensor_(polygamma)(THCState* state, THCTensor* self_, int64_t n, THCTensor* src) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src));
if (self_ != src) {
THCTensor_(resizeAs)(state, self_, src);
}
switch (n) {
case 0:
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src, TensorDigammaOp<scalar_t, accreal>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
break;
case 1:
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src, TensorTrigammaOp<scalar_t, accreal>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
break;
default:
THError("polygamma(n,x) is not implemented for n>=2");
}
THCudaCheck(cudaGetLastError());
}
#endif
namespace {
c10::intrusive_ptr<at::TensorImpl, at::UndefinedTensorImpl> retainTensorImpl(THCTensor* self) {
c10::raw::intrusive_ptr::incref(self);
return c10::intrusive_ptr<at::TensorImpl, at::UndefinedTensorImpl>::reclaim(self);
}
}
void THCTensor_(cadd)(THCState *state, THCTensor *self_, THCTensor* src1, scalar_t value, THCTensor *src2)
{
auto out = at::Tensor(retainTensorImpl(self_));
#ifdef THC_REAL_IS_HALF
auto alpha = at::Half(value);
#else
auto alpha = value;
#endif
at::add_out(out, at::Tensor(retainTensorImpl(src1)), at::Tensor(retainTensorImpl(src2)), alpha);
}
void THCTensor_(csub)(THCState *state, THCTensor *self_, THCTensor* src1, scalar_t value, THCTensor *src2)
{
auto out = at::Tensor(retainTensorImpl(self_));
#ifdef THC_REAL_IS_HALF
auto alpha = at::Half(value);
#else
auto alpha = value;
#endif
at::sub_out(out, at::Tensor(retainTensorImpl(src1)), at::Tensor(retainTensorImpl(src2)), alpha);
}
void THCTensor_(cmul)(THCState *state, THCTensor *self_, THCTensor *src1, THCTensor *src2)
{
auto out = at::Tensor(retainTensorImpl(self_));
at::mul_out(out, at::Tensor(retainTensorImpl(src1)), at::Tensor(retainTensorImpl(src2)));
}
void THCTensor_(cpow)(THCState *state, THCTensor *self_, THCTensor *src1, THCTensor *src2)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, self_, src1, src2));
THArgCheck(THCTensor_(nElement)(state, src1) ==
THCTensor_(nElement)(state, src2), 3, "sizes do not match");
if (self_ == src1) {
// self = pow(self, src2)
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src2, TensorCPowOp<scalar_t>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self_, src1);
// self = pow(src1, src2)
if (!THC_pointwiseApply3<scalar_t, scalar_t, scalar_t>(state, self_, src1, src2, TensorCPowOp<scalar_t>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(cudaGetLastError());
}
void THCTensor_(pow)(THCState *state, THCTensor *self_, THCTensor *src, scalar_t value) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src));
if (self_ == src) {
if (THCNumerics<scalar_t>::eq(value, ScalarConvert<int, scalar_t>::to(1))) {
if (!THC_pointwiseApply1<scalar_t>(state, self_, TensorPowOp<scalar_t, 1>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else if (THCNumerics<scalar_t>::eq(value, ScalarConvert<int, scalar_t>::to(2))) {
if (!THC_pointwiseApply1<scalar_t>(state, self_, TensorPowOp<scalar_t, 2>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else if (THCNumerics<scalar_t>::eq(value, ScalarConvert<int, scalar_t>::to(3))) {
if (!THC_pointwiseApply1<scalar_t>(state, self_, TensorPowOp<scalar_t, 3>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
#if defined(THC_REAL_IS_HALF) || defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE)
} else if (THCNumerics<scalar_t>::eq(value, ScalarConvert<int, scalar_t>::to(-1))) {
if (!THC_pointwiseApply1<scalar_t>(state, self_, TensorPowOp<scalar_t, -1>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else if (THCNumerics<scalar_t>::eq(value, ScalarConvert<int, scalar_t>::to(-2))) {
if (!THC_pointwiseApply1<scalar_t>(state, self_, TensorPowOp<scalar_t, -2>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
#endif
} else {
// fallback implementation using pow
if (!THC_pointwiseApply1<scalar_t>(state, self_, TensorPowOp<scalar_t, -3>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
} else {
THCTensor_(resizeAs)(state, self_, src);
if (THCNumerics<scalar_t>::eq(value, ScalarConvert<int, scalar_t>::to(1))) {
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src, TensorPowOp<scalar_t, 1>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else if (THCNumerics<scalar_t>::eq(value, ScalarConvert<int, scalar_t>::to(2))) {
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src, TensorPowOp<scalar_t, 2>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else if (THCNumerics<scalar_t>::eq(value, ScalarConvert<int, scalar_t>::to(3))) {
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src, TensorPowOp<scalar_t, 3>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
#if defined(THC_REAL_IS_HALF) || defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE)
} else if (THCNumerics<scalar_t>::eq(value, ScalarConvert<int, scalar_t>::to(-1))) {
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src, TensorPowOp<scalar_t, -1>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else if (THCNumerics<scalar_t>::eq(value, ScalarConvert<int, scalar_t>::to(-2))) {
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src, TensorPowOp<scalar_t, -2>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
#endif
} else {
// fallback implementation using pow
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src, TensorPowOp<scalar_t, -3>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
}
THCudaCheck(cudaGetLastError());
}
void THCTensor_(tpow)(THCState *state, THCTensor *self_, scalar_t value, THCTensor *src)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src));
if (self_ == src) {
if (!THC_pointwiseApply1<scalar_t>(state, self_, TensorTPowOp<scalar_t>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self_, src);
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src, TensorTPowOp<scalar_t>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(cudaGetLastError());
}
void THCTensor_(cdiv)(THCState* state, THCTensor *self_, THCTensor *src1, THCTensor *src2)
{
auto out = at::Tensor(retainTensorImpl(self_));
at::div_out(out, at::Tensor(retainTensorImpl(src1)), at::Tensor(retainTensorImpl(src2)));
}
void THCTensor_(clshift)(THCState* state, THCTensor *self_, THCTensor *src1, THCTensor *src2)
{
#if defined(THC_REAL_IS_HALF)
return THError("clshift not supported for torch.CudaHalfTensor");
#else
THAssert(THCTensor_(checkGPU)(state, 3, self_, src1, src2));
THArgCheck(THCTensor_(nElement)(state, src1) ==
THCTensor_(nElement)(state, src2), 3, "sizes do not match");
if (self_ == src1) {
// self /= src2
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src2, TensorLShiftOp<scalar_t>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self_, src1);
// self = src1 / src2
if (!THC_pointwiseApply3<scalar_t, scalar_t, scalar_t>(state, self_, src1, src2, TensorLShiftOp<scalar_t>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(cudaGetLastError());
#endif
}
void THCTensor_(crshift)(THCState* state, THCTensor *self_, THCTensor *src1, THCTensor *src2)
{
#if defined(THC_REAL_IS_HALF)
return THError("crshift not supported for torch.CudaHalfTensor");
#else
THAssert(THCTensor_(checkGPU)(state, 3, self_, src1, src2));
THArgCheck(THCTensor_(nElement)(state, src1) ==
THCTensor_(nElement)(state, src2), 3, "sizes do not match");
if (self_ == src1) {
// self /= src2
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src2, TensorRShiftOp<scalar_t>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self_, src1);
// self = src1 / src2
if (!THC_pointwiseApply3<scalar_t, scalar_t, scalar_t>(state, self_, src1, src2, TensorRShiftOp<scalar_t>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(cudaGetLastError());
#endif
}
void THCTensor_(cmax)(THCState *state, THCTensor *self, THCTensor *src1, THCTensor *src2)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, self, src1, src2));
THArgCheck(THCTensor_(nElement)(state, src1) ==
THCTensor_(nElement)(state, src2), 2, "sizes do not match");
if (self == src1) {
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self, src2, TensorMaxOp<scalar_t>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self, src1);
if (!THC_pointwiseApply3<scalar_t, scalar_t, scalar_t>(state, self, src1, src2, TensorMaxOp<scalar_t>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
}
void THCTensor_(cmin)(THCState *state, THCTensor *self, THCTensor *src1, THCTensor *src2)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, self, src1, src2));
THArgCheck(THCTensor_(nElement)(state, src1) ==
THCTensor_(nElement)(state, src2), 2, "sizes do not match");
if (self == src1) {
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self, src2, TensorMinOp<scalar_t>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self, src1);
if (!THC_pointwiseApply3<scalar_t, scalar_t, scalar_t>(state, self, src1, src2, TensorMinOp<scalar_t>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
}
void THCTensor_(cremainder)(THCState *state, THCTensor *self, THCTensor *src1, THCTensor *src2)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, self, src1, src2));
THArgCheck(THCTensor_(nElement)(state, src1) ==
THCTensor_(nElement)(state, src2), 2, "sizes do not match");
if (self == src1) {
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self, src2, TensorCRemainderOp<scalar_t>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self, src1);
if (!THC_pointwiseApply3<scalar_t, scalar_t, scalar_t>(state, self, src1, src2, TensorCRemainderOp<scalar_t>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
}
void THCTensor_(cfmod)(THCState *state, THCTensor *self, THCTensor *src1, THCTensor *src2)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, self, src1, src2));
THArgCheck(THCTensor_(nElement)(state, src1) ==
THCTensor_(nElement)(state, src2), 2, "sizes do not match");
if (self == src1) {
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self, src2, TensorCFmodOp<scalar_t>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self, src1);
if (!THC_pointwiseApply3<scalar_t, scalar_t, scalar_t>(state, self, src1, src2, TensorCFmodOp<scalar_t>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
}
void THCTensor_(cmaxValue)(THCState *state, THCTensor *self, THCTensor *src, scalar_t value)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self, src));
if (self == src) {
if (!THC_pointwiseApply1<scalar_t>(state, self, TensorMaxValueOp<scalar_t>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self, src);
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self, src, TensorMaxValueOp<scalar_t>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
}
void THCTensor_(cminValue)(THCState *state, THCTensor *self, THCTensor *src, scalar_t value)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self, src));
if (self == src) {
if (!THC_pointwiseApply1<scalar_t>(state, self, TensorMinValueOp<scalar_t>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self, src);
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self, src, TensorMinValueOp<scalar_t>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
}
void THCTensor_(addcmul)(THCState *state, THCTensor *self_, THCTensor *t, scalar_t value, THCTensor *src1, THCTensor *src2)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 4, self_, t, src1, src2));
if(self_ != t)
{
THCTensor_(resizeAs)(state, self_, t);
THCTensor_(copy)(state, self_, t);
}
else
{
THArgCheck(THCTensor_(nElement)(state, self_) == THCTensor_(nElement)(state, src1),
1, "sizes do not match");
}
THArgCheck(THCTensor_(nElement)(state, src1) == THCTensor_(nElement)(state, src2),
3, "sizes do not match");
if (!THC_pointwiseApply3<scalar_t, scalar_t, scalar_t>(state, self_, src1, src2, TensorAddCMulOp<scalar_t>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
THCudaCheck(cudaGetLastError());
}
void THCTensor_(addcdiv)(THCState *state, THCTensor *self_, THCTensor *t, scalar_t value, THCTensor *src1, THCTensor *src2)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 4, self_, t, src1, src2));
if(self_ != t)
{
THCTensor_(resizeAs)(state, self_, t);
THCTensor_(copy)(state, self_, t);
}
else
{
THArgCheck(THCTensor_(nElement)(state, self_) == THCTensor_(nElement)(state, src1),
1, "sizes do not match");
}
THArgCheck(THCTensor_(nElement)(state, src1) == THCTensor_(nElement)(state, src2),
3, "sizes do not match");
if (!THC_pointwiseApply3<scalar_t, scalar_t, scalar_t>(state, self_, src1, src2, TensorAddCDivOp<scalar_t>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
THCudaCheck(cudaGetLastError());
}
void THCTensor_(cbitand)(THCState* state, THCTensor *self_, THCTensor *src1, THCTensor *src2)
{
#if defined(THC_REAL_IS_HALF) || defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE)
return THError("cbitand is only supported for integer type tensors");
#else
THAssert(THCTensor_(checkGPU)(state, 3, self_, src1, src2));
THArgCheck(THCTensor_(nElement)(state, src1) ==
THCTensor_(nElement)(state, src2), 3, "sizes do not match");
if (self_ == src1) {
// self /= src2
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src2, TensorBitAndOp<scalar_t>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self_, src1);
// self = src1 / src2
if (!THC_pointwiseApply3<scalar_t, scalar_t, scalar_t>(state, self_, src1, src2, TensorBitAndOp<scalar_t>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(cudaGetLastError());
#endif
}
void THCTensor_(cbitor)(THCState* state, THCTensor *self_, THCTensor *src1, THCTensor *src2)
{
#if defined(THC_REAL_IS_HALF) || defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE)
return THError("cbitor is only supported for integer type tensors");
#else
THAssert(THCTensor_(checkGPU)(state, 3, self_, src1, src2));
THArgCheck(THCTensor_(nElement)(state, src1) ==
THCTensor_(nElement)(state, src2), 3, "sizes do not match");
if (self_ == src1) {
// self /= src2
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src2, TensorBitOrOp<scalar_t>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self_, src1);
// self = src1 / src2
if (!THC_pointwiseApply3<scalar_t, scalar_t, scalar_t>(state, self_, src1, src2, TensorBitOrOp<scalar_t>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(cudaGetLastError());
#endif
}
void THCTensor_(cbitxor)(THCState* state, THCTensor *self_, THCTensor *src1, THCTensor *src2)
{
#if defined(THC_REAL_IS_HALF) || defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE)
return THError("cbitor is only supported for integer type tensors");
#else
THAssert(THCTensor_(checkGPU)(state, 3, self_, src1, src2));
THArgCheck(THCTensor_(nElement)(state, src1) ==
THCTensor_(nElement)(state, src2), 3, "sizes do not match");
if (self_ == src1) {
// self /= src2
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src2, TensorBitXorOp<scalar_t>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self_, src1);
// self = src1 / src2
if (!THC_pointwiseApply3<scalar_t, scalar_t, scalar_t>(state, self_, src1, src2, TensorBitXorOp<scalar_t>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(cudaGetLastError());
#endif
}
#endif
|
28495d4e0662d7e823caedd6c184d05f63b5282a.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
__global__ void mandelKernel(float lowerX, float lowerY, float stepX, float stepY, int maxIterations, int* result) {
// To avoid error caused by the floating number, use the following pseudo code
//
// float x = lowerX + thisX * stepX;
// float y = lowerY + thisY * stepY;
int thisX = blockIdx.x * blockDim.x + threadIdx.x;
int thisY = blockIdx.y * blockDim.y + threadIdx.y;
float x = lowerX + thisX * stepX;
float y = lowerY + thisY * stepY;
float z_re = x, z_im = y;
int i;
for (i = 0; i < maxIterations; ++i)
{
if (z_re * z_re + z_im * z_im > 4.f)
break;
float new_re = z_re * z_re - z_im * z_im;
float new_im = 2.f * z_re * z_im;
z_re = x + new_re;
z_im = y + new_im;
}
result[thisY * gridDim.x * blockDim.x + thisX] = i;
}
// Host front-end function that allocates the memory and launches the GPU kernel
//raccoon:img = output
//raccoon:resX = width
//raccoon:resY = height
void hostFE (float upperX, float upperY, float lowerX, float lowerY, int* img, int resX, int resY, int maxIterations)
{
float stepX = (upperX - lowerX) / resX;
float stepY = (upperY - lowerY) / resY;
/*------------------raccoon------------------------*/
size_t size = resX * resY * sizeof(int);
int *temp;
hipHostMalloc(&temp, size, hipHostMallocMapped);
int *result;
size_t pitch;
hipMallocPitch(&result, &pitch, resX * sizeof(int), resY * sizeof(int));
//hipMemcpy(result, img, size, hipMemcpyHostToDevice);
dim3 dimBlock(8, 8);
dim3 dimGrid(resX / dimBlock.x, resY / dimBlock.y);
hipLaunchKernelGGL(( mandelKernel) , dim3(dimGrid), dim3(dimBlock), 0, 0, lowerX, lowerY, stepX, stepY, maxIterations, result);
hipMemcpy(img, result, size, hipMemcpyDeviceToHost);
hipFree(result);
}
| 28495d4e0662d7e823caedd6c184d05f63b5282a.cu | #include <cuda.h>
#include <stdio.h>
#include <stdlib.h>
__global__ void mandelKernel(float lowerX, float lowerY, float stepX, float stepY, int maxIterations, int* result) {
// To avoid error caused by the floating number, use the following pseudo code
//
// float x = lowerX + thisX * stepX;
// float y = lowerY + thisY * stepY;
int thisX = blockIdx.x * blockDim.x + threadIdx.x;
int thisY = blockIdx.y * blockDim.y + threadIdx.y;
float x = lowerX + thisX * stepX;
float y = lowerY + thisY * stepY;
float z_re = x, z_im = y;
int i;
for (i = 0; i < maxIterations; ++i)
{
if (z_re * z_re + z_im * z_im > 4.f)
break;
float new_re = z_re * z_re - z_im * z_im;
float new_im = 2.f * z_re * z_im;
z_re = x + new_re;
z_im = y + new_im;
}
result[thisY * gridDim.x * blockDim.x + thisX] = i;
}
// Host front-end function that allocates the memory and launches the GPU kernel
//raccoon:img = output
//raccoon:resX = width
//raccoon:resY = height
void hostFE (float upperX, float upperY, float lowerX, float lowerY, int* img, int resX, int resY, int maxIterations)
{
float stepX = (upperX - lowerX) / resX;
float stepY = (upperY - lowerY) / resY;
/*------------------raccoon------------------------*/
size_t size = resX * resY * sizeof(int);
int *temp;
cudaHostAlloc(&temp, size, cudaHostAllocMapped);
int *result;
size_t pitch;
cudaMallocPitch(&result, &pitch, resX * sizeof(int), resY * sizeof(int));
//cudaMemcpy(result, img, size, cudaMemcpyHostToDevice);
dim3 dimBlock(8, 8);
dim3 dimGrid(resX / dimBlock.x, resY / dimBlock.y);
mandelKernel <<<dimGrid, dimBlock>>>(lowerX, lowerY, stepX, stepY, maxIterations, result);
cudaMemcpy(img, result, size, cudaMemcpyDeviceToHost);
cudaFree(result);
}
|
1ebfc3ac82095e19d145fc84b35f6c8c7d1c739a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by ops.py
//
__constant__ int xdim0_multidim_kernel;
int xdim0_multidim_kernel_h = -1;
int ydim0_multidim_kernel_h = -1;
#undef OPS_ACC_MD0
#define OPS_ACC_MD0(d, x, y) ((x)*2 + (d) + (xdim0_multidim_kernel * (y)*2))
// user function
__device__
void
multidim_kernel(double *val, int *idx) {
val[OPS_ACC_MD0(0, 0, 0)] = (double)(idx[0]);
val[OPS_ACC_MD0(1, 0, 0)] = (double)(idx[1]);
}
#undef OPS_ACC_MD0
__global__ void ops_multidim_kernel(double *__restrict arg0, int arg_idx0,
int arg_idx1, int size0, int size1) {
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
int arg_idx[2];
arg_idx[0] = arg_idx0 + idx_x;
arg_idx[1] = arg_idx1 + idx_y;
arg0 += idx_x * 1 * 2 + idx_y * 1 * 2 * xdim0_multidim_kernel;
if (idx_x < size0 && idx_y < size1) {
multidim_kernel(arg0, arg_idx);
}
}
// host stub function
void ops_par_loop_multidim_kernel(char const *name, ops_block block, int dim,
int *range, ops_arg arg0, ops_arg arg1) {
// Timing
double t1, t2, c1, c2;
ops_arg args[2] = {arg0, arg1};
#ifdef CHECKPOINTING
if (!ops_checkpointing_before(args, 2, range, 0))
return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(0, "multidim_kernel");
OPS_kernels[0].count++;
ops_timers_core(&c1, &t1);
}
// compute locally allocated range for the sub-block
int start[2];
int end[2];
#ifdef OPS_MPI
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned)
return;
for (int n = 0; n < 2; n++) {
start[n] = sb->decomp_disp[n];
end[n] = sb->decomp_disp[n] + sb->decomp_size[n];
if (start[n] >= range[2 * n]) {
start[n] = 0;
} else {
start[n] = range[2 * n] - start[n];
}
if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0)
start[n] = range[2 * n];
if (end[n] >= range[2 * n + 1]) {
end[n] = range[2 * n + 1] - sb->decomp_disp[n];
} else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n] == MPI_PROC_NULL &&
(range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n]))
end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]);
}
#else
for (int n = 0; n < 2; n++) {
start[n] = range[2 * n];
end[n] = range[2 * n + 1];
}
#endif
int x_size = MAX(0, end[0] - start[0]);
int y_size = MAX(0, end[1] - start[1]);
int arg_idx[2];
#ifdef OPS_MPI
arg_idx[0] = sb->decomp_disp[0] + start[0];
arg_idx[1] = sb->decomp_disp[1] + start[1];
#else
arg_idx[0] = start[0];
arg_idx[1] = start[1];
#endif
int xdim0 = args[0].dat->size[0];
if (xdim0 != xdim0_multidim_kernel_h) {
hipMemcpyToSymbol(xdim0_multidim_kernel, &xdim0, sizeof(int));
xdim0_multidim_kernel_h = xdim0;
}
dim3 grid((x_size - 1) / OPS_block_size_x + 1,
(y_size - 1) / OPS_block_size_y + 1, 1);
dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1);
int dat0 = args[0].dat->elem_size;
char *p_a[2];
// set up initial pointers
int d_m[OPS_MAX_DIM];
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[0].dat->d_m[d];
#endif
int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] -
args[0].dat->base[0] - d_m[0]);
base0 = base0 +
dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1] -
args[0].dat->base[1] - d_m[1]);
p_a[0] = (char *)args[0].data_d + base0;
ops_H_D_exchanges_device(args, 2);
ops_halo_exchanges(args, 2, range);
if (OPS_diags > 1) {
ops_timers_core(&c2, &t2);
OPS_kernels[0].mpi_time += t2 - t1;
}
// call kernel wrapper function, passing in pointers to data
hipLaunchKernelGGL(( ops_multidim_kernel), dim3(grid), dim3(tblock), 0, 0, (double *)p_a[0], arg_idx[0],
arg_idx[1], x_size, y_size);
if (OPS_diags > 1) {
cutilSafeCall(hipDeviceSynchronize());
ops_timers_core(&c1, &t1);
OPS_kernels[0].time += t1 - t2;
}
ops_set_dirtybit_device(args, 2);
ops_set_halo_dirtybit3(&args[0], range);
if (OPS_diags > 1) {
// Update kernel record
ops_timers_core(&c2, &t2);
OPS_kernels[0].mpi_time += t2 - t1;
OPS_kernels[0].transfer += ops_compute_transfer(dim, start, end, &arg0);
}
}
| 1ebfc3ac82095e19d145fc84b35f6c8c7d1c739a.cu | //
// auto-generated by ops.py
//
__constant__ int xdim0_multidim_kernel;
int xdim0_multidim_kernel_h = -1;
int ydim0_multidim_kernel_h = -1;
#undef OPS_ACC_MD0
#define OPS_ACC_MD0(d, x, y) ((x)*2 + (d) + (xdim0_multidim_kernel * (y)*2))
// user function
__device__
void
multidim_kernel(double *val, int *idx) {
val[OPS_ACC_MD0(0, 0, 0)] = (double)(idx[0]);
val[OPS_ACC_MD0(1, 0, 0)] = (double)(idx[1]);
}
#undef OPS_ACC_MD0
__global__ void ops_multidim_kernel(double *__restrict arg0, int arg_idx0,
int arg_idx1, int size0, int size1) {
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
int arg_idx[2];
arg_idx[0] = arg_idx0 + idx_x;
arg_idx[1] = arg_idx1 + idx_y;
arg0 += idx_x * 1 * 2 + idx_y * 1 * 2 * xdim0_multidim_kernel;
if (idx_x < size0 && idx_y < size1) {
multidim_kernel(arg0, arg_idx);
}
}
// host stub function
void ops_par_loop_multidim_kernel(char const *name, ops_block block, int dim,
int *range, ops_arg arg0, ops_arg arg1) {
// Timing
double t1, t2, c1, c2;
ops_arg args[2] = {arg0, arg1};
#ifdef CHECKPOINTING
if (!ops_checkpointing_before(args, 2, range, 0))
return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(0, "multidim_kernel");
OPS_kernels[0].count++;
ops_timers_core(&c1, &t1);
}
// compute locally allocated range for the sub-block
int start[2];
int end[2];
#ifdef OPS_MPI
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned)
return;
for (int n = 0; n < 2; n++) {
start[n] = sb->decomp_disp[n];
end[n] = sb->decomp_disp[n] + sb->decomp_size[n];
if (start[n] >= range[2 * n]) {
start[n] = 0;
} else {
start[n] = range[2 * n] - start[n];
}
if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0)
start[n] = range[2 * n];
if (end[n] >= range[2 * n + 1]) {
end[n] = range[2 * n + 1] - sb->decomp_disp[n];
} else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n] == MPI_PROC_NULL &&
(range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n]))
end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]);
}
#else
for (int n = 0; n < 2; n++) {
start[n] = range[2 * n];
end[n] = range[2 * n + 1];
}
#endif
int x_size = MAX(0, end[0] - start[0]);
int y_size = MAX(0, end[1] - start[1]);
int arg_idx[2];
#ifdef OPS_MPI
arg_idx[0] = sb->decomp_disp[0] + start[0];
arg_idx[1] = sb->decomp_disp[1] + start[1];
#else
arg_idx[0] = start[0];
arg_idx[1] = start[1];
#endif
int xdim0 = args[0].dat->size[0];
if (xdim0 != xdim0_multidim_kernel_h) {
cudaMemcpyToSymbol(xdim0_multidim_kernel, &xdim0, sizeof(int));
xdim0_multidim_kernel_h = xdim0;
}
dim3 grid((x_size - 1) / OPS_block_size_x + 1,
(y_size - 1) / OPS_block_size_y + 1, 1);
dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1);
int dat0 = args[0].dat->elem_size;
char *p_a[2];
// set up initial pointers
int d_m[OPS_MAX_DIM];
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[0].dat->d_m[d];
#endif
int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] -
args[0].dat->base[0] - d_m[0]);
base0 = base0 +
dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1] -
args[0].dat->base[1] - d_m[1]);
p_a[0] = (char *)args[0].data_d + base0;
ops_H_D_exchanges_device(args, 2);
ops_halo_exchanges(args, 2, range);
if (OPS_diags > 1) {
ops_timers_core(&c2, &t2);
OPS_kernels[0].mpi_time += t2 - t1;
}
// call kernel wrapper function, passing in pointers to data
ops_multidim_kernel<<<grid, tblock>>>((double *)p_a[0], arg_idx[0],
arg_idx[1], x_size, y_size);
if (OPS_diags > 1) {
cutilSafeCall(cudaDeviceSynchronize());
ops_timers_core(&c1, &t1);
OPS_kernels[0].time += t1 - t2;
}
ops_set_dirtybit_device(args, 2);
ops_set_halo_dirtybit3(&args[0], range);
if (OPS_diags > 1) {
// Update kernel record
ops_timers_core(&c2, &t2);
OPS_kernels[0].mpi_time += t2 - t1;
OPS_kernels[0].transfer += ops_compute_transfer(dim, start, end, &arg0);
}
}
|
be2debe85af2423515ec471a4ef0bd09531f9174.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "dpCudaUux3a.hpp"
#include "errorCheck.hpp"
#define cudaErrChk(ans) { cudaAssert((ans), __FILE__, __LINE__); }
#define BEGIN hipEventRecord(begin, 0);
#define END hipEventRecord(end, 0); hipEventSynchronize(end); hipEventElapsedTime(&delTime, begin, end);
#define rSQRT2 0.707106781186
#define ERR fprintf(stderr, "%d\n" ,__LINE__);
__device__
void ixxxx1(float* p, int nHEL, int nSF, cmplx* fi)
{
float SQP0P3 = sqrtf(p[0]+p[3])*(float)(nSF);
int NH = nHEL*nSF;
fi[4] = mkcmplx(p[0]*(float)(nSF), p[3]*(float)(nSF));
fi[5] = mkcmplx(p[1]*(float)(nSF), p[2]*(float)(nSF));
cmplx CHI = mkcmplx(NH*p[1]*(1.0f/SQP0P3), p[2]*(1.0f/SQP0P3));
cmplx CZERO = mkcmplx(0.0f, 0.0f);
cmplx CSQP0P3 = mkcmplx(SQP0P3, 0.0f);
fi[0]=(NH== 1)*CZERO + (NH==-1)*CHI;
fi[1]=(NH== 1)*CZERO + (NH==-1)*CSQP0P3;
fi[2]=(NH== 1)*CSQP0P3 + (NH==-1)*CZERO;
fi[3]=(NH== 1)*CHI + (NH==-1)*CZERO;
return;
}
__device__
void oxxxx2(float* p, int nHEL, int nSF, cmplx* fo)
{
int NH=nHEL*nSF;
fo[4] = mkcmplx(p[0]*(float)(nSF), p[3]*(float)(nSF));
fo[5] = mkcmplx(p[1]*(float)(nSF), p[2]*(float)(nSF));
cmplx CHI = mkcmplx(-nHEL*sqrtf(2.0f*p[0]), 0.0f);
cmplx CZERO = mkcmplx(0.0f,0.0f);
fo[0]=CZERO;
fo[1]=(NH== 1)*CHI + (NH==-1)*CZERO;
fo[2]=(NH== 1)*CZERO + (NH==-1)*CHI;
fo[3]=CZERO;
return;
}
__device__
void vxxxx0(float* p, int nHEL, int nSV, cmplx* vc)
{
vc[4] = mkcmplx(p[0], p[3])*nSV;
vc[5] = mkcmplx(p[1], p[2])*nSV;
float rpt = rsqrtf(p[1]*p[1] + p[2]*p[2]);
vc[0] = mkcmplx(0.0f, 0.0f);
vc[3] = mkcmplx( (float)(nHEL)*(1.0f/(rpt*p[0]))*rSQRT2, 0.0f);
float pzpt = (p[3]*(1.0f/p[0])*rpt)*rSQRT2 *(float)(nHEL);
vc[1] = mkcmplx(-p[1]*pzpt, -nSV*p[2] * rpt * rSQRT2);
vc[2] = mkcmplx(-p[2]*pzpt,
+nSV*p[1] * rpt * rSQRT2);
return;
}
__device__
void fvoxx0(cmplx* fo, cmplx* vc, float* gal, cmplx* fvo)
{
fvo[4] = fo[4]+vc[4];
fvo[5] = fo[5]+vc[5];
float pf[4];
pf[0] = fvo[4].re;
pf[1] = fvo[5].re;
pf[2] = fvo[5].im;
pf[3] = fvo[4].im;
float pf2 = pf[0]*pf[0] - (pf[1]*pf[1] + pf[2]*pf[2] + pf[3]*pf[3]);
cmplx cI = mkcmplx( 0.0f, 1.0f);
float d = -1.0f/pf2;
cmplx sl1 = (vc[0] + vc[3])*fo[2] + (vc[1] + cI*vc[2])*fo[3];
cmplx sl2 = (vc[0] - vc[3])*fo[3] + (vc[1] - cI*vc[2])*fo[2];
cmplx sr1 = (vc[0] - vc[3])*fo[0] - (vc[1] + cI*vc[2])*fo[1];
cmplx sr2 = (vc[0] + vc[3])*fo[1] - (vc[1] - cI*vc[2])*fo[0];
fvo[0] = ( gal[1]*((pf[0]+pf[3])*sr1 + fvo[5] *sr2 ))*d;
fvo[1] = ( gal[1]*((pf[0]-pf[3])*sr2 + conj(fvo[5])*sr1 ))*d;
fvo[2] = ( gal[0]*((pf[0]-pf[3])*sl1 - fvo[5] *sl2 ))*d;
fvo[3] = ( gal[0]*((pf[0]+pf[3])*sl2 - conj(fvo[5])*sl1 ))*d;
return;
}
//note: this was defined as iovxx0() in http://arxiv.org/pdf/0908.4403.pdf pg 12
__device__
void iovxxx(cmplx* fi, cmplx* fo, cmplx* vc, float* gal, cmplx& vertex)
{
vertex =
gal[0]*((fo[2]*fi[0]+fo[3]*fi[1])*vc[0]
+(fo[2]*fi[1]+fo[3]*fi[0])*vc[1]
-((fo[2]*fi[1]-fo[3]*fi[0])*vc[2])
*mkcmplx(0.0f, 1.0f)
+(fo[2]*fi[0]-fo[3]*fi[1])*vc[3])
+gal[1]*((fo[0]*fi[2]+fo[1]*fi[3])*vc[0]
-(fo[0]*fi[3]+fo[1]*fi[2])*vc[1]
+((fo[0]*fi[3]-fo[1]*fi[2])*vc[2])
*mkcmplx(0.0f, 1.0f)
-(fo[0]*fi[2]-fo[1]*fi[3])*vc[3]);
return;
}
__device__
void fvixx0(cmplx* fi, cmplx* vc, float* gal, cmplx* fvi)
{
fvi[4] = fi[4]-vc[4];
fvi[5] = fi[5]-vc[5];
float pf[4];
pf[0] = fvi[4].re;
pf[1] = fvi[5].re;
pf[2] = fvi[5].im;
pf[3] = fvi[4].im;
float pf2 = pf[0]*pf[0] - (pf[1]*pf[1] + pf[2]*pf[2] + pf[3]*pf[3]);
cmplx cI = mkcmplx( 0.0f, 1.0f);
float d = -1.0f/pf2;
cmplx sl1 = (vc[0] + vc[3])*fi[0] + (vc[1] - cI*vc[2])*fi[1];
cmplx sl2 = (vc[0] - vc[3])*fi[1] + (vc[1] + cI*vc[2])*fi[0];
cmplx sr1 = (vc[0] - vc[3])*fi[2] - (vc[1] - cI*vc[2])*fi[3];
cmplx sr2 = (vc[0] + vc[3])*fi[3] - (vc[1] + cI*vc[2])*fi[2];
fvi[0] = ( gal[0]*((pf[0]-pf[3])*sl1 - conj(fvi[5])*sl2))*d;
fvi[1] = ( gal[0]*((pf[0]+pf[3])*sl2 - fvi[5] *sl1))*d;
fvi[2] = ( gal[1]*((pf[0]+pf[3])*sr1 + conj(fvi[5])*sr2))*d;
fvi[3] = ( gal[1]*((pf[0]-pf[3])*sr2 + fvi[5] *sr1))*d;
return;
}
// Each thread corresponds to an event
// Each thread takes 5 float arrays of size 4 each
// Each thread saves 8 complex arrays of size 6 each
__global__ void Uux3a(float *P_d, cmplx *Amp_d, int nEvents){
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx > nEvents)
return;
//first term gets us to the correct event in P_d, the second one gets us the corresponding 4momentum for each particle
float *p1 = &P_d[idx*5*4 + 4*0];
float *p2 = &P_d[idx*5*4 + 4*1];
float *p3 = &P_d[idx*5*4 + 4*2];
float *p4 = &P_d[idx*5*4 + 4*3];
float *p5 = &P_d[idx*5*4 + 4*4];
// coupling constants of FFV vertex, using meaningless fillers
float gau[2];
gau[0] = 5123.51;
gau[1] = 3109.64;
//twice fermion helicity (-1 or 1), using meaningless fillers
int nh1 = -1;
int nh2 = 1;
int nh3 = -1;
int nh4 = -1;
int nh5 = 1;
cmplx w01[6], w02[6], w03[6], w04[6], w05[6];
ixxxx1(p1, nh1, +1, w01);
oxxxx2(p2, nh2, -1, w02);
vxxxx0(p3, nh3, +1, w03);
vxxxx0(p4, nh4, +1, w04);
vxxxx0(p5, nh5, +1, w05);
cmplx w06[6], w07[6], w08[6];
cmplx ampsum = mkcmplx(0.0f, 0.0f);
cmplx amp;
fvoxx0(w02,w03,gau,w06);
fvoxx0(w06,w04,gau,w07);
iovxxx(w01,w07,w05,gau,amp);
ampsum = ampsum + amp;
fvixx0(w01,w04,gau,w07);
fvoxx0(w02,w05,gau,w08);
iovxxx(w07,w08,w03,gau,amp);
ampsum = ampsum + amp;
fvoxx0(w02,w03,gau,w06);
fvixx0(w01,w04,gau,w07);
iovxxx(w07,w06,w05,gau,amp);
ampsum = ampsum + amp;
fvoxx0(w02,w04,gau,w06);
fvixx0(w01,w05,gau,w07);
iovxxx(w07,w06,w03,gau,amp);
ampsum = ampsum + amp;
fvixx0(w01,w03,gau,w07);
fvixx0(w07,w04,gau,w08);
iovxxx(w08,w02,w05,gau,amp);
ampsum = ampsum + amp;
fvixx0(w01,w03,gau,w07);
fvoxx0(w02,w04,gau,w06);
iovxxx(w07,w06,w05,gau,amp);
ampsum = ampsum + amp;
//for some reason copying ampsum is causing errors
Amp_d[idx] = amp;
//Amp_d[idx] = mkcmplx(P_d[idx*5*4 + 4*0], P_d[idx*5*4 + 4*1]);
}
//notice unused parameters for CUDA kernel:
dpCudaUux3a::dpCudaUux3a(cl_context ctx, cl_command_queue q){
workDimension = ONE_D;
//name is same as cl alternative allowing the analysis script to later figure
//out this measurement was from a cuda kernel by inspecting the platform id from dpClient
name = "Uux3a";
hipEventCreate(&begin);
hipEventCreate(&end);
hipGetDevice(&device);
hipGetDeviceProperties(&props, device);
cudaErrChk(hipPeekAtLastError());
}
void dpCudaUux3a::setup(int dataMB, int xLocal, int yLocal, int zLocal){
localSize[0] = localSize[1] = localSize[2] = 1;
nEvents = 1048576*dataMB/(sizeof(float)*5*4);
MB = ( nEvents * sizeof(float)*5*4) / 1048576;
}
void dpCudaUux3a::init(){
//allocate local memory for original array
inputBytes = 5*4*nEvents*sizeof(float);
outputBytes = nEvents*sizeof(cmplx);
eventsP = (float*) malloc(inputBytes); //4 momentum for each of the 5 particles in an event. nEevents
Amp = (cmplx*) malloc(outputBytes); //Amp = new cmplx[nEvents]; //6 complex "w" for each of the 8 outputes of an event. nEvents
if(!eventsP || !Amp)
fprintf(stderr, "error in malloc\n");
generateArray(eventsP, nEvents);
dataParameters.push_back(nEvents);
dataNames.push_back("nEvents");
}
void dpCudaUux3a::memoryCopyOut(){
BEGIN
cudaErrChk( hipMalloc((void **) &eventsP_d, inputBytes ));
cudaErrChk( hipMalloc((void **) &Amp_d, outputBytes ));
cudaErrChk( hipMemcpy(eventsP_d, eventsP, inputBytes, hipMemcpyHostToDevice) );
END
}
void dpCudaUux3a::plan(){
BEGIN
blockSize = props.maxThreadsPerBlock;
lastBlock = 0;
nBlocks = nEvents/blockSize; //nblocks = ceil(nEvents/blockSize)
if (nEvents%blockSize != 0)
nBlocks++;
if (nBlocks > 65535)
nBlocks = 65535;
nKernels = nBlocks / 65535;
if (nKernels == 0){
lastBlock = nBlocks; //run normally
}
else
lastBlock = nBlocks % 65535; //run repeated
END
}
int dpCudaUux3a::execute(){
hipError_t err;
int stride = blockSize*nBlocks;
int lastStride = blockSize * lastBlock;
BEGIN
for (int i = 0; i < nKernels; i++){
hipLaunchKernelGGL(( Uux3a) , dim3(nBlocks), dim3(blockSize) , 0, 0, eventsP_d + (i*stride), Amp_d + (i*stride), nEvents - (i*stride));
}
if (lastBlock != 0){
hipLaunchKernelGGL(( Uux3a) , dim3(lastBlock), dim3(blockSize) , 0, 0, eventsP_d + (nKernels*lastStride), Amp_d + (nKernels*lastStride), nEvents - (nKernels*lastStride) );
}
err = hipPeekAtLastError();
cudaErrChk(err);
cudaErrChk(hipDeviceSynchronize());
END
if(err!=hipSuccess)
return -1;
return 0;
}
void dpCudaUux3a::memoryCopyIn(){
BEGIN
cudaErrChk(hipMemcpy(Amp, Amp_d, outputBytes, hipMemcpyDeviceToHost));
END
}
void dpCudaUux3a::cleanUp(){
hipFree(eventsP_d);
hipFree(Amp_d);
delete[] eventsP;
free(Amp);
}
//5 particles each described by their four-momentum for each event,
void dpCudaUux3a::generateArray(float *eventsP, int nEvents){
int n,j,k;
srand(time(NULL));
for (n=0; n < nEvents; n++){
for (j=0; j<5; j++){
for (k=0; k<4; k++){
eventsP[n*5*4 + 4*j + k]=rand() / (RAND_MAX/99999.9 + 1);
}
}
}
}
| be2debe85af2423515ec471a4ef0bd09531f9174.cu | #include "dpCudaUux3a.hpp"
#include "errorCheck.hpp"
#define cudaErrChk(ans) { cudaAssert((ans), __FILE__, __LINE__); }
#define BEGIN cudaEventRecord(begin, 0);
#define END cudaEventRecord(end, 0); cudaEventSynchronize(end); cudaEventElapsedTime(&delTime, begin, end);
#define rSQRT2 0.707106781186
#define ERR fprintf(stderr, "%d\n" ,__LINE__);
__device__
void ixxxx1(float* p, int nHEL, int nSF, cmplx* fi)
{
float SQP0P3 = sqrtf(p[0]+p[3])*(float)(nSF);
int NH = nHEL*nSF;
fi[4] = mkcmplx(p[0]*(float)(nSF), p[3]*(float)(nSF));
fi[5] = mkcmplx(p[1]*(float)(nSF), p[2]*(float)(nSF));
cmplx CHI = mkcmplx(NH*p[1]*(1.0f/SQP0P3), p[2]*(1.0f/SQP0P3));
cmplx CZERO = mkcmplx(0.0f, 0.0f);
cmplx CSQP0P3 = mkcmplx(SQP0P3, 0.0f);
fi[0]=(NH== 1)*CZERO + (NH==-1)*CHI;
fi[1]=(NH== 1)*CZERO + (NH==-1)*CSQP0P3;
fi[2]=(NH== 1)*CSQP0P3 + (NH==-1)*CZERO;
fi[3]=(NH== 1)*CHI + (NH==-1)*CZERO;
return;
}
__device__
void oxxxx2(float* p, int nHEL, int nSF, cmplx* fo)
{
int NH=nHEL*nSF;
fo[4] = mkcmplx(p[0]*(float)(nSF), p[3]*(float)(nSF));
fo[5] = mkcmplx(p[1]*(float)(nSF), p[2]*(float)(nSF));
cmplx CHI = mkcmplx(-nHEL*sqrtf(2.0f*p[0]), 0.0f);
cmplx CZERO = mkcmplx(0.0f,0.0f);
fo[0]=CZERO;
fo[1]=(NH== 1)*CHI + (NH==-1)*CZERO;
fo[2]=(NH== 1)*CZERO + (NH==-1)*CHI;
fo[3]=CZERO;
return;
}
__device__
void vxxxx0(float* p, int nHEL, int nSV, cmplx* vc)
{
vc[4] = mkcmplx(p[0], p[3])*nSV;
vc[5] = mkcmplx(p[1], p[2])*nSV;
float rpt = rsqrtf(p[1]*p[1] + p[2]*p[2]);
vc[0] = mkcmplx(0.0f, 0.0f);
vc[3] = mkcmplx( (float)(nHEL)*(1.0f/(rpt*p[0]))*rSQRT2, 0.0f);
float pzpt = (p[3]*(1.0f/p[0])*rpt)*rSQRT2 *(float)(nHEL);
vc[1] = mkcmplx(-p[1]*pzpt, -nSV*p[2] * rpt * rSQRT2);
vc[2] = mkcmplx(-p[2]*pzpt,
+nSV*p[1] * rpt * rSQRT2);
return;
}
__device__
void fvoxx0(cmplx* fo, cmplx* vc, float* gal, cmplx* fvo)
{
fvo[4] = fo[4]+vc[4];
fvo[5] = fo[5]+vc[5];
float pf[4];
pf[0] = fvo[4].re;
pf[1] = fvo[5].re;
pf[2] = fvo[5].im;
pf[3] = fvo[4].im;
float pf2 = pf[0]*pf[0] - (pf[1]*pf[1] + pf[2]*pf[2] + pf[3]*pf[3]);
cmplx cI = mkcmplx( 0.0f, 1.0f);
float d = -1.0f/pf2;
cmplx sl1 = (vc[0] + vc[3])*fo[2] + (vc[1] + cI*vc[2])*fo[3];
cmplx sl2 = (vc[0] - vc[3])*fo[3] + (vc[1] - cI*vc[2])*fo[2];
cmplx sr1 = (vc[0] - vc[3])*fo[0] - (vc[1] + cI*vc[2])*fo[1];
cmplx sr2 = (vc[0] + vc[3])*fo[1] - (vc[1] - cI*vc[2])*fo[0];
fvo[0] = ( gal[1]*((pf[0]+pf[3])*sr1 + fvo[5] *sr2 ))*d;
fvo[1] = ( gal[1]*((pf[0]-pf[3])*sr2 + conj(fvo[5])*sr1 ))*d;
fvo[2] = ( gal[0]*((pf[0]-pf[3])*sl1 - fvo[5] *sl2 ))*d;
fvo[3] = ( gal[0]*((pf[0]+pf[3])*sl2 - conj(fvo[5])*sl1 ))*d;
return;
}
//note: this was defined as iovxx0() in http://arxiv.org/pdf/0908.4403.pdf pg 12
__device__
void iovxxx(cmplx* fi, cmplx* fo, cmplx* vc, float* gal, cmplx& vertex)
{
vertex =
gal[0]*((fo[2]*fi[0]+fo[3]*fi[1])*vc[0]
+(fo[2]*fi[1]+fo[3]*fi[0])*vc[1]
-((fo[2]*fi[1]-fo[3]*fi[0])*vc[2])
*mkcmplx(0.0f, 1.0f)
+(fo[2]*fi[0]-fo[3]*fi[1])*vc[3])
+gal[1]*((fo[0]*fi[2]+fo[1]*fi[3])*vc[0]
-(fo[0]*fi[3]+fo[1]*fi[2])*vc[1]
+((fo[0]*fi[3]-fo[1]*fi[2])*vc[2])
*mkcmplx(0.0f, 1.0f)
-(fo[0]*fi[2]-fo[1]*fi[3])*vc[3]);
return;
}
__device__
void fvixx0(cmplx* fi, cmplx* vc, float* gal, cmplx* fvi)
{
fvi[4] = fi[4]-vc[4];
fvi[5] = fi[5]-vc[5];
float pf[4];
pf[0] = fvi[4].re;
pf[1] = fvi[5].re;
pf[2] = fvi[5].im;
pf[3] = fvi[4].im;
float pf2 = pf[0]*pf[0] - (pf[1]*pf[1] + pf[2]*pf[2] + pf[3]*pf[3]);
cmplx cI = mkcmplx( 0.0f, 1.0f);
float d = -1.0f/pf2;
cmplx sl1 = (vc[0] + vc[3])*fi[0] + (vc[1] - cI*vc[2])*fi[1];
cmplx sl2 = (vc[0] - vc[3])*fi[1] + (vc[1] + cI*vc[2])*fi[0];
cmplx sr1 = (vc[0] - vc[3])*fi[2] - (vc[1] - cI*vc[2])*fi[3];
cmplx sr2 = (vc[0] + vc[3])*fi[3] - (vc[1] + cI*vc[2])*fi[2];
fvi[0] = ( gal[0]*((pf[0]-pf[3])*sl1 - conj(fvi[5])*sl2))*d;
fvi[1] = ( gal[0]*((pf[0]+pf[3])*sl2 - fvi[5] *sl1))*d;
fvi[2] = ( gal[1]*((pf[0]+pf[3])*sr1 + conj(fvi[5])*sr2))*d;
fvi[3] = ( gal[1]*((pf[0]-pf[3])*sr2 + fvi[5] *sr1))*d;
return;
}
// Each thread corresponds to an event
// Each thread takes 5 float arrays of size 4 each
// Each thread saves 8 complex arrays of size 6 each
__global__ void Uux3a(float *P_d, cmplx *Amp_d, int nEvents){
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx > nEvents)
return;
//first term gets us to the correct event in P_d, the second one gets us the corresponding 4momentum for each particle
float *p1 = &P_d[idx*5*4 + 4*0];
float *p2 = &P_d[idx*5*4 + 4*1];
float *p3 = &P_d[idx*5*4 + 4*2];
float *p4 = &P_d[idx*5*4 + 4*3];
float *p5 = &P_d[idx*5*4 + 4*4];
// coupling constants of FFV vertex, using meaningless fillers
float gau[2];
gau[0] = 5123.51;
gau[1] = 3109.64;
//twice fermion helicity (-1 or 1), using meaningless fillers
int nh1 = -1;
int nh2 = 1;
int nh3 = -1;
int nh4 = -1;
int nh5 = 1;
cmplx w01[6], w02[6], w03[6], w04[6], w05[6];
ixxxx1(p1, nh1, +1, w01);
oxxxx2(p2, nh2, -1, w02);
vxxxx0(p3, nh3, +1, w03);
vxxxx0(p4, nh4, +1, w04);
vxxxx0(p5, nh5, +1, w05);
cmplx w06[6], w07[6], w08[6];
cmplx ampsum = mkcmplx(0.0f, 0.0f);
cmplx amp;
fvoxx0(w02,w03,gau,w06);
fvoxx0(w06,w04,gau,w07);
iovxxx(w01,w07,w05,gau,amp);
ampsum = ampsum + amp;
fvixx0(w01,w04,gau,w07);
fvoxx0(w02,w05,gau,w08);
iovxxx(w07,w08,w03,gau,amp);
ampsum = ampsum + amp;
fvoxx0(w02,w03,gau,w06);
fvixx0(w01,w04,gau,w07);
iovxxx(w07,w06,w05,gau,amp);
ampsum = ampsum + amp;
fvoxx0(w02,w04,gau,w06);
fvixx0(w01,w05,gau,w07);
iovxxx(w07,w06,w03,gau,amp);
ampsum = ampsum + amp;
fvixx0(w01,w03,gau,w07);
fvixx0(w07,w04,gau,w08);
iovxxx(w08,w02,w05,gau,amp);
ampsum = ampsum + amp;
fvixx0(w01,w03,gau,w07);
fvoxx0(w02,w04,gau,w06);
iovxxx(w07,w06,w05,gau,amp);
ampsum = ampsum + amp;
//for some reason copying ampsum is causing errors
Amp_d[idx] = amp;
//Amp_d[idx] = mkcmplx(P_d[idx*5*4 + 4*0], P_d[idx*5*4 + 4*1]);
}
//notice unused parameters for CUDA kernel:
dpCudaUux3a::dpCudaUux3a(cl_context ctx, cl_command_queue q){
workDimension = ONE_D;
//name is same as cl alternative allowing the analysis script to later figure
//out this measurement was from a cuda kernel by inspecting the platform id from dpClient
name = "Uux3a";
cudaEventCreate(&begin);
cudaEventCreate(&end);
cudaGetDevice(&device);
cudaGetDeviceProperties(&props, device);
cudaErrChk(cudaPeekAtLastError());
}
void dpCudaUux3a::setup(int dataMB, int xLocal, int yLocal, int zLocal){
localSize[0] = localSize[1] = localSize[2] = 1;
nEvents = 1048576*dataMB/(sizeof(float)*5*4);
MB = ( nEvents * sizeof(float)*5*4) / 1048576;
}
void dpCudaUux3a::init(){
//allocate local memory for original array
inputBytes = 5*4*nEvents*sizeof(float);
outputBytes = nEvents*sizeof(cmplx);
eventsP = (float*) malloc(inputBytes); //4 momentum for each of the 5 particles in an event. nEevents
Amp = (cmplx*) malloc(outputBytes); //Amp = new cmplx[nEvents]; //6 complex "w" for each of the 8 outputes of an event. nEvents
if(!eventsP || !Amp)
fprintf(stderr, "error in malloc\n");
generateArray(eventsP, nEvents);
dataParameters.push_back(nEvents);
dataNames.push_back("nEvents");
}
void dpCudaUux3a::memoryCopyOut(){
BEGIN
cudaErrChk( cudaMalloc((void **) &eventsP_d, inputBytes ));
cudaErrChk( cudaMalloc((void **) &Amp_d, outputBytes ));
cudaErrChk( cudaMemcpy(eventsP_d, eventsP, inputBytes, cudaMemcpyHostToDevice) );
END
}
void dpCudaUux3a::plan(){
BEGIN
blockSize = props.maxThreadsPerBlock;
lastBlock = 0;
nBlocks = nEvents/blockSize; //nblocks = ceil(nEvents/blockSize)
if (nEvents%blockSize != 0)
nBlocks++;
if (nBlocks > 65535)
nBlocks = 65535;
nKernels = nBlocks / 65535;
if (nKernels == 0){
lastBlock = nBlocks; //run normally
}
else
lastBlock = nBlocks % 65535; //run repeated
END
}
int dpCudaUux3a::execute(){
cudaError_t err;
int stride = blockSize*nBlocks;
int lastStride = blockSize * lastBlock;
BEGIN
for (int i = 0; i < nKernels; i++){
Uux3a <<< nBlocks, blockSize >>> (eventsP_d + (i*stride), Amp_d + (i*stride), nEvents - (i*stride));
}
if (lastBlock != 0){
Uux3a <<<lastBlock, blockSize >>> (eventsP_d + (nKernels*lastStride), Amp_d + (nKernels*lastStride), nEvents - (nKernels*lastStride) );
}
err = cudaPeekAtLastError();
cudaErrChk(err);
cudaErrChk(cudaDeviceSynchronize());
END
if(err!=cudaSuccess)
return -1;
return 0;
}
void dpCudaUux3a::memoryCopyIn(){
BEGIN
cudaErrChk(cudaMemcpy(Amp, Amp_d, outputBytes, cudaMemcpyDeviceToHost));
END
}
void dpCudaUux3a::cleanUp(){
cudaFree(eventsP_d);
cudaFree(Amp_d);
delete[] eventsP;
free(Amp);
}
//5 particles each described by their four-momentum for each event,
void dpCudaUux3a::generateArray(float *eventsP, int nEvents){
int n,j,k;
srand(time(NULL));
for (n=0; n < nEvents; n++){
for (j=0; j<5; j++){
for (k=0; k<4; k++){
eventsP[n*5*4 + 4*j + k]=rand() / (RAND_MAX/99999.9 + 1);
}
}
}
}
|
c4f1cffe3a2bdef0ae944120a04679526951ff15.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* Copyright (c) 2017 Microsoft
* Licensed under The Apache-2.0 License [see LICENSE for details]
* \file deformable_psroi_pooling.cu
* \brief
* \author Yi Li, Guodong Zhang, Jifeng Dai
*/
#include "./deformable_psroi_pooling-inl.h"
#include <mshadow/tensor.h>
#include <mshadow/cuda/reduce.cuh>
#include <algorithm>
#include <vector>
#include "../../common/cuda_utils.h"
#include "../mxnet_op.h"
#define DeformablePSROIPOOLING_CUDA_CHECK(condition) \
/* Code block avoids redefinition of hipError_t error */ \
do { \
hipError_t error = condition; \
CHECK_EQ(error, hipSuccess) << " " << hipGetErrorString(error); \
} while (0)
#define CUDA_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; \
i < (n); \
i += blockDim.x * gridDim.x)
namespace mshadow {
namespace cuda {
template <typename DType>
__device__ DType bilinear_interp(
const DType* data,
const DType x,
const DType y,
const int width,
const int height) {
int x1 = floor(x);
int x2 = ceil(x);
int y1 = floor(y);
int y2 = ceil(y);
DType dist_x = static_cast<DType>(x - x1);
DType dist_y = static_cast<DType>(y - y1);
DType value11 = data[y1*width + x1];
DType value12 = data[y2*width + x1];
DType value21 = data[y1*width + x2];
DType value22 = data[y2*width + x2];
DType value = (1 - dist_x)*(1 - dist_y)*value11 + (1 - dist_x)*dist_y*value12
+ dist_x*(1 - dist_y)*value21 + dist_x*dist_y*value22;
return value;
}
template <typename DType>
__global__ void DeformablePSROIPoolForwardKernel(
const int count,
const DType* bottom_data,
const DType spatial_scale,
const int channels,
const int height, const int width,
const int pooled_height, const int pooled_width,
const DType* bottom_rois, const DType* bottom_trans,
const bool no_trans,
const DType trans_std,
const int sample_per_part,
const int output_dim,
const int group_size,
const int part_size,
const int num_classes,
const int channels_each_class,
DType* top_data,
DType* top_count) {
CUDA_KERNEL_LOOP(index, count) {
// The output is in order (n, ctop, ph, pw)
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int ctop = (index / pooled_width / pooled_height) % output_dim;
int n = index / pooled_width / pooled_height / output_dim;
// [start, end) interval for spatial sampling
const DType* offset_bottom_rois = bottom_rois + n * 5;
int roi_batch_ind = offset_bottom_rois[0];
DType roi_start_w = static_cast<DType>(round(offset_bottom_rois[1])) * spatial_scale - 0.5;
DType roi_start_h = static_cast<DType>(round(offset_bottom_rois[2])) * spatial_scale - 0.5;
DType roi_end_w = static_cast<DType>(round(offset_bottom_rois[3]) + 1.) * spatial_scale - 0.5;
DType roi_end_h = static_cast<DType>(round(offset_bottom_rois[4]) + 1.) * spatial_scale - 0.5;
// Force too small ROIs to be 1x1
DType roi_width = max(roi_end_w - roi_start_w, 0.1); // avoid 0
DType roi_height = max(roi_end_h - roi_start_h, 0.1);
// Compute w and h at bottom
DType bin_size_h = roi_height / static_cast<DType>(pooled_height);
DType bin_size_w = roi_width / static_cast<DType>(pooled_width);
DType sub_bin_size_h = bin_size_h / static_cast<DType>(sample_per_part);
DType sub_bin_size_w = bin_size_w / static_cast<DType>(sample_per_part);
int part_h = floor(static_cast<DType>(ph) / pooled_height*part_size);
int part_w = floor(static_cast<DType>(pw) / pooled_width*part_size);
int class_id = ctop / channels_each_class;
DType trans_x = no_trans ? static_cast<DType>(0) :
bottom_trans[(((n * num_classes + class_id) * 2)
* part_size + part_h)
* part_size + part_w] * trans_std;
DType trans_y = no_trans ? static_cast<DType>(0) :
bottom_trans[(((n * num_classes + class_id) * 2 + 1)
* part_size + part_h)
* part_size + part_w] * trans_std;
DType wstart = static_cast<DType>(pw)* bin_size_w
+ roi_start_w;
wstart += trans_x * roi_width;
DType hstart = static_cast<DType>(ph) * bin_size_h
+ roi_start_h;
hstart += trans_y * roi_height;
DType sum = 0;
int count = 0;
int gw = floor(static_cast<DType>(pw) * group_size / pooled_width);
int gh = floor(static_cast<DType>(ph)* group_size / pooled_height);
gw = min(max(gw, 0), group_size - 1);
gh = min(max(gh, 0), group_size - 1);
const DType* offset_bottom_data = bottom_data + (roi_batch_ind * channels) * height * width;
for (int ih = 0; ih < sample_per_part; ih++) {
for (int iw = 0; iw < sample_per_part; iw++) {
DType w = wstart + iw*sub_bin_size_w;
DType h = hstart + ih*sub_bin_size_h;
// bilinear interpolation
if (w<-0.5 || w>width - 0.5 || h<-0.5 || h>height - 0.5) {
continue;
}
w = min(max(w, 0.), width - 1.);
h = min(max(h, 0.), height - 1.);
int c = (ctop*group_size + gh)*group_size + gw;
DType val = bilinear_interp(offset_bottom_data + c*height*width, w, h, width, height);
sum += val;
count++;
}
}
top_data[index] = count == 0 ? static_cast<DType>(0) : sum / count;
top_count[index] = count;
}
}
template<typename DType>
inline void DeformablePSROIPoolForward(const Tensor<gpu, 4, DType> &out,
const Tensor<gpu, 4, DType> &data,
const Tensor<gpu, 2, DType> &bbox,
const Tensor<gpu, 4, DType> &trans,
const Tensor<gpu, 4, DType> &top_count,
const bool no_trans,
const float spatial_scale,
const int output_dim,
const int group_size,
const int pooled_size,
const int part_size,
const int sample_per_part,
const float trans_std) {
// LOG(INFO) << "DeformablePSROIPoolForward";
const DType *bottom_data = data.dptr_;
const DType *bottom_rois = bbox.dptr_;
const DType *bottom_trans = no_trans ? NULL : trans.dptr_;
DType *top_data = out.dptr_;
DType *top_count_data = top_count.dptr_;
const int count = out.shape_.Size();
const int channels = data.size(1);
const int height = data.size(2);
const int width = data.size(3);
const int pooled_height = pooled_size;
const int pooled_width = pooled_size;
const int num_classes = no_trans ? 1 : trans.size(1) / 2;
const int channels_each_class = no_trans ? output_dim : output_dim / num_classes;
hipStream_t stream = Stream<gpu>::GetStream(out.stream_);
DeformablePSROIPoolForwardKernel<DType> << <mxnet::op::mxnet_op::cuda_get_num_blocks(count),
kBaseThreadNum, 0, stream >> >(
count, bottom_data, spatial_scale, channels, height, width, pooled_height, pooled_width,
bottom_rois, bottom_trans, no_trans, trans_std, sample_per_part, output_dim,
group_size, part_size, num_classes, channels_each_class, top_data, top_count_data);
DeformablePSROIPOOLING_CUDA_CHECK(hipPeekAtLastError());
}
template <typename DType>
__global__ void DeformablePSROIPoolBackwardAccKernel(
const int count,
const DType* top_diff,
const DType* top_count,
const int num_rois,
const DType spatial_scale,
const int channels,
const int height, const int width,
const int pooled_height, const int pooled_width,
const int output_dim,
DType* bottom_data_diff, DType* bottom_trans_diff,
const DType* bottom_data,
const DType* bottom_rois,
const DType* bottom_trans,
const bool no_trans,
const DType trans_std,
const int sample_per_part,
const int group_size,
const int part_size,
const int num_classes,
const int channels_each_class) {
CUDA_KERNEL_LOOP(index, count) {
// The output is in order (n, ctop, ph, pw)
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int ctop = (index / pooled_width / pooled_height) % output_dim;
int n = index / pooled_width / pooled_height / output_dim;
// [start, end) interval for spatial sampling
const DType* offset_bottom_rois = bottom_rois + n * 5;
int roi_batch_ind = offset_bottom_rois[0];
DType roi_start_w = static_cast<DType>(round(offset_bottom_rois[1])) * spatial_scale - 0.5;
DType roi_start_h = static_cast<DType>(round(offset_bottom_rois[2])) * spatial_scale - 0.5;
DType roi_end_w = static_cast<DType>(round(offset_bottom_rois[3]) + 1.) * spatial_scale - 0.5;
DType roi_end_h = static_cast<DType>(round(offset_bottom_rois[4]) + 1.) * spatial_scale - 0.5;
// Force too small ROIs to be 1x1
DType roi_width = max(roi_end_w - roi_start_w, 0.1); // avoid 0
DType roi_height = max(roi_end_h - roi_start_h, 0.1);
// Compute w and h at bottom
DType bin_size_h = roi_height / static_cast<DType>(pooled_height);
DType bin_size_w = roi_width / static_cast<DType>(pooled_width);
DType sub_bin_size_h = bin_size_h / static_cast<DType>(sample_per_part);
DType sub_bin_size_w = bin_size_w / static_cast<DType>(sample_per_part);
int part_h = floor(static_cast<DType>(ph) / pooled_height*part_size);
int part_w = floor(static_cast<DType>(pw) / pooled_width*part_size);
int class_id = ctop / channels_each_class;
DType trans_x = no_trans ? static_cast<DType>(0) :
bottom_trans[(((n * num_classes + class_id) * 2)
* part_size + part_h)
* part_size + part_w] * trans_std;
DType trans_y = no_trans ? static_cast<DType>(0) :
bottom_trans[(((n * num_classes + class_id) * 2 + 1)
* part_size + part_h)
* part_size + part_w] * trans_std;
DType wstart = static_cast<DType>(pw)* bin_size_w
+ roi_start_w;
wstart += trans_x * roi_width;
DType hstart = static_cast<DType>(ph) * bin_size_h
+ roi_start_h;
hstart += trans_y * roi_height;
if (top_count[index] <= 0) {
continue;
}
DType diff_val = top_diff[index] / top_count[index];
const DType* offset_bottom_data = bottom_data + roi_batch_ind * channels * height * width;
DType* offset_bottom_data_diff = bottom_data_diff + roi_batch_ind * channels * height * width;
int gw = floor(static_cast<DType>(pw)* group_size / pooled_width);
int gh = floor(static_cast<DType>(ph)* group_size / pooled_height);
gw = min(max(gw, 0), group_size - 1);
gh = min(max(gh, 0), group_size - 1);
for (int ih = 0; ih < sample_per_part; ih++) {
for (int iw = 0; iw < sample_per_part; iw++) {
DType w = wstart + iw*sub_bin_size_w;
DType h = hstart + ih*sub_bin_size_h;
// bilinear interpolation
if (w<-0.5 || w>width - 0.5 || h<-0.5 || h>height - 0.5) {
continue;
}
w = min(max(w, 0.), width - 1.);
h = min(max(h, 0.), height - 1.);
int c = (ctop*group_size + gh)*group_size + gw;
// backward on feature
int x0 = floor(w);
int x1 = ceil(w);
int y0 = floor(h);
int y1 = ceil(h);
DType dist_x = w - x0, dist_y = h - y0;
DType q00 = (1 - dist_x)*(1 - dist_y);
DType q01 = (1 - dist_x)*dist_y;
DType q10 = dist_x*(1 - dist_y);
DType q11 = dist_x*dist_y;
int bottom_index_base = c * height *width;
atomicAdd(offset_bottom_data_diff + bottom_index_base + y0*width + x0, q00*diff_val);
atomicAdd(offset_bottom_data_diff + bottom_index_base + y1*width + x0, q01*diff_val);
atomicAdd(offset_bottom_data_diff + bottom_index_base + y0*width + x1, q10*diff_val);
atomicAdd(offset_bottom_data_diff + bottom_index_base + y1*width + x1, q11*diff_val);
if (no_trans) {
continue;
}
DType U00 = offset_bottom_data[bottom_index_base + y0*width + x0];
DType U01 = offset_bottom_data[bottom_index_base + y1*width + x0];
DType U10 = offset_bottom_data[bottom_index_base + y0*width + x1];
DType U11 = offset_bottom_data[bottom_index_base + y1*width + x1];
DType diff_x = (U11*dist_y + U10*(1 - dist_y) - U01*dist_y - U00*(1 - dist_y))
*trans_std*diff_val;
diff_x *= roi_width;
DType diff_y = (U11*dist_x + U01*(1 - dist_x) - U10*dist_x - U00*(1 - dist_x))
*trans_std*diff_val;
diff_y *= roi_height;
atomicAdd(bottom_trans_diff + (((n * num_classes + class_id) * 2)
* part_size + part_h)
* part_size + part_w, diff_x);
atomicAdd(bottom_trans_diff + (((n * num_classes + class_id) * 2 + 1)
* part_size + part_h)
* part_size + part_w, diff_y);
}
}
}
}
template<typename DType>
inline void DeformablePSROIPoolBackwardAcc(const Tensor<gpu, 4, DType> &in_grad,
const Tensor<gpu, 4, DType> &trans_grad,
const Tensor<gpu, 4, DType> &out_grad,
const Tensor<gpu, 4, DType> &data,
const Tensor<gpu, 2, DType> &bbox,
const Tensor<gpu, 4, DType> &trans,
const Tensor<gpu, 4, DType> &top_count,
const bool no_trans,
const float spatial_scale,
const int output_dim,
const int group_size,
const int pooled_size,
const int part_size,
const int sample_per_part,
const float trans_std) {
// LOG(INFO) << "DeformablePSROIPoolBackward";
const DType *top_diff = out_grad.dptr_;
const DType *bottom_data = data.dptr_;
const DType *bottom_rois = bbox.dptr_;
const DType *bottom_trans = no_trans ? NULL : trans.dptr_;
DType *bottom_data_diff = in_grad.dptr_;
DType *bottom_trans_diff = no_trans ? NULL : trans_grad.dptr_;
const DType *top_count_data = top_count.dptr_;
const int count = out_grad.shape_.Size();
const int num_rois = bbox.size(0);
const int channels = in_grad.size(1);
const int height = in_grad.size(2);
const int width = in_grad.size(3);
const int pooled_height = pooled_size;
const int pooled_width = pooled_size;
const int num_classes = no_trans ? 1 : trans_grad.size(1) / 2;
const int channels_each_class = no_trans ? output_dim : output_dim / num_classes;
hipStream_t stream = Stream<gpu>::GetStream(in_grad.stream_);
DeformablePSROIPoolBackwardAccKernel<DType> << <mxnet::op::mxnet_op::cuda_get_num_blocks(count),
kBaseThreadNum, 0, stream >> >(
count, top_diff, top_count_data, num_rois, spatial_scale, channels, height, width,
pooled_height, pooled_width, output_dim, bottom_data_diff, bottom_trans_diff,
bottom_data, bottom_rois, bottom_trans, no_trans, trans_std, sample_per_part,
group_size, part_size, num_classes, channels_each_class);
DeformablePSROIPOOLING_CUDA_CHECK(hipPeekAtLastError());
}
} // namespace cuda
template<typename DType>
inline void DeformablePSROIPoolForward(const Tensor<gpu, 4, DType> &out,
const Tensor<gpu, 4, DType> &data,
const Tensor<gpu, 2, DType> &bbox,
const Tensor<gpu, 4, DType> &trans,
const Tensor<gpu, 4, DType> &top_count,
const bool no_trans,
const float spatial_scale,
const int output_dim,
const int group_size,
const int pooled_size,
const int part_size,
const int sample_per_part,
const float trans_std) {
cuda::DeformablePSROIPoolForward(out, data, bbox, trans, top_count, no_trans, spatial_scale,
output_dim, group_size, pooled_size, part_size, sample_per_part, trans_std);
}
template<typename DType>
inline void DeformablePSROIPoolBackwardAcc(const Tensor<gpu, 4, DType> &in_grad,
const Tensor<gpu, 4, DType> &trans_grad,
const Tensor<gpu, 4, DType> &out_grad,
const Tensor<gpu, 4, DType> &data,
const Tensor<gpu, 2, DType> &bbox,
const Tensor<gpu, 4, DType> &trans,
const Tensor<gpu, 4, DType> &top_count,
const bool no_trans,
const float spatial_scale,
const int output_dim,
const int group_size,
const int pooled_size,
const int part_size,
const int sample_per_part,
const float trans_std) {
cuda::DeformablePSROIPoolBackwardAcc(in_grad, trans_grad, out_grad, data, bbox, trans,
top_count, no_trans, spatial_scale, output_dim, group_size, pooled_size, part_size,
sample_per_part, trans_std);
}
} // namespace mshadow
namespace mxnet {
namespace op {
template<>
Operator* CreateOp<gpu>(DeformablePSROIPoolingParam param, int dtype) {
Operator* op = NULL;
MSHADOW_REAL_TYPE_SWITCH(dtype, DType, {
op = new DeformablePSROIPoolingOp<gpu, DType>(param);
});
return op;
}
} // namespace op
} // namespace mxnet
| c4f1cffe3a2bdef0ae944120a04679526951ff15.cu | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* Copyright (c) 2017 Microsoft
* Licensed under The Apache-2.0 License [see LICENSE for details]
* \file deformable_psroi_pooling.cu
* \brief
* \author Yi Li, Guodong Zhang, Jifeng Dai
*/
#include "./deformable_psroi_pooling-inl.h"
#include <mshadow/tensor.h>
#include <mshadow/cuda/reduce.cuh>
#include <algorithm>
#include <vector>
#include "../../common/cuda_utils.h"
#include "../mxnet_op.h"
#define DeformablePSROIPOOLING_CUDA_CHECK(condition) \
/* Code block avoids redefinition of cudaError_t error */ \
do { \
cudaError_t error = condition; \
CHECK_EQ(error, cudaSuccess) << " " << cudaGetErrorString(error); \
} while (0)
#define CUDA_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; \
i < (n); \
i += blockDim.x * gridDim.x)
namespace mshadow {
namespace cuda {
template <typename DType>
__device__ DType bilinear_interp(
const DType* data,
const DType x,
const DType y,
const int width,
const int height) {
int x1 = floor(x);
int x2 = ceil(x);
int y1 = floor(y);
int y2 = ceil(y);
DType dist_x = static_cast<DType>(x - x1);
DType dist_y = static_cast<DType>(y - y1);
DType value11 = data[y1*width + x1];
DType value12 = data[y2*width + x1];
DType value21 = data[y1*width + x2];
DType value22 = data[y2*width + x2];
DType value = (1 - dist_x)*(1 - dist_y)*value11 + (1 - dist_x)*dist_y*value12
+ dist_x*(1 - dist_y)*value21 + dist_x*dist_y*value22;
return value;
}
template <typename DType>
__global__ void DeformablePSROIPoolForwardKernel(
const int count,
const DType* bottom_data,
const DType spatial_scale,
const int channels,
const int height, const int width,
const int pooled_height, const int pooled_width,
const DType* bottom_rois, const DType* bottom_trans,
const bool no_trans,
const DType trans_std,
const int sample_per_part,
const int output_dim,
const int group_size,
const int part_size,
const int num_classes,
const int channels_each_class,
DType* top_data,
DType* top_count) {
CUDA_KERNEL_LOOP(index, count) {
// The output is in order (n, ctop, ph, pw)
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int ctop = (index / pooled_width / pooled_height) % output_dim;
int n = index / pooled_width / pooled_height / output_dim;
// [start, end) interval for spatial sampling
const DType* offset_bottom_rois = bottom_rois + n * 5;
int roi_batch_ind = offset_bottom_rois[0];
DType roi_start_w = static_cast<DType>(round(offset_bottom_rois[1])) * spatial_scale - 0.5;
DType roi_start_h = static_cast<DType>(round(offset_bottom_rois[2])) * spatial_scale - 0.5;
DType roi_end_w = static_cast<DType>(round(offset_bottom_rois[3]) + 1.) * spatial_scale - 0.5;
DType roi_end_h = static_cast<DType>(round(offset_bottom_rois[4]) + 1.) * spatial_scale - 0.5;
// Force too small ROIs to be 1x1
DType roi_width = max(roi_end_w - roi_start_w, 0.1); // avoid 0
DType roi_height = max(roi_end_h - roi_start_h, 0.1);
// Compute w and h at bottom
DType bin_size_h = roi_height / static_cast<DType>(pooled_height);
DType bin_size_w = roi_width / static_cast<DType>(pooled_width);
DType sub_bin_size_h = bin_size_h / static_cast<DType>(sample_per_part);
DType sub_bin_size_w = bin_size_w / static_cast<DType>(sample_per_part);
int part_h = floor(static_cast<DType>(ph) / pooled_height*part_size);
int part_w = floor(static_cast<DType>(pw) / pooled_width*part_size);
int class_id = ctop / channels_each_class;
DType trans_x = no_trans ? static_cast<DType>(0) :
bottom_trans[(((n * num_classes + class_id) * 2)
* part_size + part_h)
* part_size + part_w] * trans_std;
DType trans_y = no_trans ? static_cast<DType>(0) :
bottom_trans[(((n * num_classes + class_id) * 2 + 1)
* part_size + part_h)
* part_size + part_w] * trans_std;
DType wstart = static_cast<DType>(pw)* bin_size_w
+ roi_start_w;
wstart += trans_x * roi_width;
DType hstart = static_cast<DType>(ph) * bin_size_h
+ roi_start_h;
hstart += trans_y * roi_height;
DType sum = 0;
int count = 0;
int gw = floor(static_cast<DType>(pw) * group_size / pooled_width);
int gh = floor(static_cast<DType>(ph)* group_size / pooled_height);
gw = min(max(gw, 0), group_size - 1);
gh = min(max(gh, 0), group_size - 1);
const DType* offset_bottom_data = bottom_data + (roi_batch_ind * channels) * height * width;
for (int ih = 0; ih < sample_per_part; ih++) {
for (int iw = 0; iw < sample_per_part; iw++) {
DType w = wstart + iw*sub_bin_size_w;
DType h = hstart + ih*sub_bin_size_h;
// bilinear interpolation
if (w<-0.5 || w>width - 0.5 || h<-0.5 || h>height - 0.5) {
continue;
}
w = min(max(w, 0.), width - 1.);
h = min(max(h, 0.), height - 1.);
int c = (ctop*group_size + gh)*group_size + gw;
DType val = bilinear_interp(offset_bottom_data + c*height*width, w, h, width, height);
sum += val;
count++;
}
}
top_data[index] = count == 0 ? static_cast<DType>(0) : sum / count;
top_count[index] = count;
}
}
template<typename DType>
inline void DeformablePSROIPoolForward(const Tensor<gpu, 4, DType> &out,
const Tensor<gpu, 4, DType> &data,
const Tensor<gpu, 2, DType> &bbox,
const Tensor<gpu, 4, DType> &trans,
const Tensor<gpu, 4, DType> &top_count,
const bool no_trans,
const float spatial_scale,
const int output_dim,
const int group_size,
const int pooled_size,
const int part_size,
const int sample_per_part,
const float trans_std) {
// LOG(INFO) << "DeformablePSROIPoolForward";
const DType *bottom_data = data.dptr_;
const DType *bottom_rois = bbox.dptr_;
const DType *bottom_trans = no_trans ? NULL : trans.dptr_;
DType *top_data = out.dptr_;
DType *top_count_data = top_count.dptr_;
const int count = out.shape_.Size();
const int channels = data.size(1);
const int height = data.size(2);
const int width = data.size(3);
const int pooled_height = pooled_size;
const int pooled_width = pooled_size;
const int num_classes = no_trans ? 1 : trans.size(1) / 2;
const int channels_each_class = no_trans ? output_dim : output_dim / num_classes;
cudaStream_t stream = Stream<gpu>::GetStream(out.stream_);
DeformablePSROIPoolForwardKernel<DType> << <mxnet::op::mxnet_op::cuda_get_num_blocks(count),
kBaseThreadNum, 0, stream >> >(
count, bottom_data, spatial_scale, channels, height, width, pooled_height, pooled_width,
bottom_rois, bottom_trans, no_trans, trans_std, sample_per_part, output_dim,
group_size, part_size, num_classes, channels_each_class, top_data, top_count_data);
DeformablePSROIPOOLING_CUDA_CHECK(cudaPeekAtLastError());
}
template <typename DType>
__global__ void DeformablePSROIPoolBackwardAccKernel(
const int count,
const DType* top_diff,
const DType* top_count,
const int num_rois,
const DType spatial_scale,
const int channels,
const int height, const int width,
const int pooled_height, const int pooled_width,
const int output_dim,
DType* bottom_data_diff, DType* bottom_trans_diff,
const DType* bottom_data,
const DType* bottom_rois,
const DType* bottom_trans,
const bool no_trans,
const DType trans_std,
const int sample_per_part,
const int group_size,
const int part_size,
const int num_classes,
const int channels_each_class) {
CUDA_KERNEL_LOOP(index, count) {
// The output is in order (n, ctop, ph, pw)
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int ctop = (index / pooled_width / pooled_height) % output_dim;
int n = index / pooled_width / pooled_height / output_dim;
// [start, end) interval for spatial sampling
const DType* offset_bottom_rois = bottom_rois + n * 5;
int roi_batch_ind = offset_bottom_rois[0];
DType roi_start_w = static_cast<DType>(round(offset_bottom_rois[1])) * spatial_scale - 0.5;
DType roi_start_h = static_cast<DType>(round(offset_bottom_rois[2])) * spatial_scale - 0.5;
DType roi_end_w = static_cast<DType>(round(offset_bottom_rois[3]) + 1.) * spatial_scale - 0.5;
DType roi_end_h = static_cast<DType>(round(offset_bottom_rois[4]) + 1.) * spatial_scale - 0.5;
// Force too small ROIs to be 1x1
DType roi_width = max(roi_end_w - roi_start_w, 0.1); // avoid 0
DType roi_height = max(roi_end_h - roi_start_h, 0.1);
// Compute w and h at bottom
DType bin_size_h = roi_height / static_cast<DType>(pooled_height);
DType bin_size_w = roi_width / static_cast<DType>(pooled_width);
DType sub_bin_size_h = bin_size_h / static_cast<DType>(sample_per_part);
DType sub_bin_size_w = bin_size_w / static_cast<DType>(sample_per_part);
int part_h = floor(static_cast<DType>(ph) / pooled_height*part_size);
int part_w = floor(static_cast<DType>(pw) / pooled_width*part_size);
int class_id = ctop / channels_each_class;
DType trans_x = no_trans ? static_cast<DType>(0) :
bottom_trans[(((n * num_classes + class_id) * 2)
* part_size + part_h)
* part_size + part_w] * trans_std;
DType trans_y = no_trans ? static_cast<DType>(0) :
bottom_trans[(((n * num_classes + class_id) * 2 + 1)
* part_size + part_h)
* part_size + part_w] * trans_std;
DType wstart = static_cast<DType>(pw)* bin_size_w
+ roi_start_w;
wstart += trans_x * roi_width;
DType hstart = static_cast<DType>(ph) * bin_size_h
+ roi_start_h;
hstart += trans_y * roi_height;
if (top_count[index] <= 0) {
continue;
}
DType diff_val = top_diff[index] / top_count[index];
const DType* offset_bottom_data = bottom_data + roi_batch_ind * channels * height * width;
DType* offset_bottom_data_diff = bottom_data_diff + roi_batch_ind * channels * height * width;
int gw = floor(static_cast<DType>(pw)* group_size / pooled_width);
int gh = floor(static_cast<DType>(ph)* group_size / pooled_height);
gw = min(max(gw, 0), group_size - 1);
gh = min(max(gh, 0), group_size - 1);
for (int ih = 0; ih < sample_per_part; ih++) {
for (int iw = 0; iw < sample_per_part; iw++) {
DType w = wstart + iw*sub_bin_size_w;
DType h = hstart + ih*sub_bin_size_h;
// bilinear interpolation
if (w<-0.5 || w>width - 0.5 || h<-0.5 || h>height - 0.5) {
continue;
}
w = min(max(w, 0.), width - 1.);
h = min(max(h, 0.), height - 1.);
int c = (ctop*group_size + gh)*group_size + gw;
// backward on feature
int x0 = floor(w);
int x1 = ceil(w);
int y0 = floor(h);
int y1 = ceil(h);
DType dist_x = w - x0, dist_y = h - y0;
DType q00 = (1 - dist_x)*(1 - dist_y);
DType q01 = (1 - dist_x)*dist_y;
DType q10 = dist_x*(1 - dist_y);
DType q11 = dist_x*dist_y;
int bottom_index_base = c * height *width;
atomicAdd(offset_bottom_data_diff + bottom_index_base + y0*width + x0, q00*diff_val);
atomicAdd(offset_bottom_data_diff + bottom_index_base + y1*width + x0, q01*diff_val);
atomicAdd(offset_bottom_data_diff + bottom_index_base + y0*width + x1, q10*diff_val);
atomicAdd(offset_bottom_data_diff + bottom_index_base + y1*width + x1, q11*diff_val);
if (no_trans) {
continue;
}
DType U00 = offset_bottom_data[bottom_index_base + y0*width + x0];
DType U01 = offset_bottom_data[bottom_index_base + y1*width + x0];
DType U10 = offset_bottom_data[bottom_index_base + y0*width + x1];
DType U11 = offset_bottom_data[bottom_index_base + y1*width + x1];
DType diff_x = (U11*dist_y + U10*(1 - dist_y) - U01*dist_y - U00*(1 - dist_y))
*trans_std*diff_val;
diff_x *= roi_width;
DType diff_y = (U11*dist_x + U01*(1 - dist_x) - U10*dist_x - U00*(1 - dist_x))
*trans_std*diff_val;
diff_y *= roi_height;
atomicAdd(bottom_trans_diff + (((n * num_classes + class_id) * 2)
* part_size + part_h)
* part_size + part_w, diff_x);
atomicAdd(bottom_trans_diff + (((n * num_classes + class_id) * 2 + 1)
* part_size + part_h)
* part_size + part_w, diff_y);
}
}
}
}
template<typename DType>
inline void DeformablePSROIPoolBackwardAcc(const Tensor<gpu, 4, DType> &in_grad,
const Tensor<gpu, 4, DType> &trans_grad,
const Tensor<gpu, 4, DType> &out_grad,
const Tensor<gpu, 4, DType> &data,
const Tensor<gpu, 2, DType> &bbox,
const Tensor<gpu, 4, DType> &trans,
const Tensor<gpu, 4, DType> &top_count,
const bool no_trans,
const float spatial_scale,
const int output_dim,
const int group_size,
const int pooled_size,
const int part_size,
const int sample_per_part,
const float trans_std) {
// LOG(INFO) << "DeformablePSROIPoolBackward";
const DType *top_diff = out_grad.dptr_;
const DType *bottom_data = data.dptr_;
const DType *bottom_rois = bbox.dptr_;
const DType *bottom_trans = no_trans ? NULL : trans.dptr_;
DType *bottom_data_diff = in_grad.dptr_;
DType *bottom_trans_diff = no_trans ? NULL : trans_grad.dptr_;
const DType *top_count_data = top_count.dptr_;
const int count = out_grad.shape_.Size();
const int num_rois = bbox.size(0);
const int channels = in_grad.size(1);
const int height = in_grad.size(2);
const int width = in_grad.size(3);
const int pooled_height = pooled_size;
const int pooled_width = pooled_size;
const int num_classes = no_trans ? 1 : trans_grad.size(1) / 2;
const int channels_each_class = no_trans ? output_dim : output_dim / num_classes;
cudaStream_t stream = Stream<gpu>::GetStream(in_grad.stream_);
DeformablePSROIPoolBackwardAccKernel<DType> << <mxnet::op::mxnet_op::cuda_get_num_blocks(count),
kBaseThreadNum, 0, stream >> >(
count, top_diff, top_count_data, num_rois, spatial_scale, channels, height, width,
pooled_height, pooled_width, output_dim, bottom_data_diff, bottom_trans_diff,
bottom_data, bottom_rois, bottom_trans, no_trans, trans_std, sample_per_part,
group_size, part_size, num_classes, channels_each_class);
DeformablePSROIPOOLING_CUDA_CHECK(cudaPeekAtLastError());
}
} // namespace cuda
template<typename DType>
inline void DeformablePSROIPoolForward(const Tensor<gpu, 4, DType> &out,
const Tensor<gpu, 4, DType> &data,
const Tensor<gpu, 2, DType> &bbox,
const Tensor<gpu, 4, DType> &trans,
const Tensor<gpu, 4, DType> &top_count,
const bool no_trans,
const float spatial_scale,
const int output_dim,
const int group_size,
const int pooled_size,
const int part_size,
const int sample_per_part,
const float trans_std) {
cuda::DeformablePSROIPoolForward(out, data, bbox, trans, top_count, no_trans, spatial_scale,
output_dim, group_size, pooled_size, part_size, sample_per_part, trans_std);
}
template<typename DType>
inline void DeformablePSROIPoolBackwardAcc(const Tensor<gpu, 4, DType> &in_grad,
const Tensor<gpu, 4, DType> &trans_grad,
const Tensor<gpu, 4, DType> &out_grad,
const Tensor<gpu, 4, DType> &data,
const Tensor<gpu, 2, DType> &bbox,
const Tensor<gpu, 4, DType> &trans,
const Tensor<gpu, 4, DType> &top_count,
const bool no_trans,
const float spatial_scale,
const int output_dim,
const int group_size,
const int pooled_size,
const int part_size,
const int sample_per_part,
const float trans_std) {
cuda::DeformablePSROIPoolBackwardAcc(in_grad, trans_grad, out_grad, data, bbox, trans,
top_count, no_trans, spatial_scale, output_dim, group_size, pooled_size, part_size,
sample_per_part, trans_std);
}
} // namespace mshadow
namespace mxnet {
namespace op {
template<>
Operator* CreateOp<gpu>(DeformablePSROIPoolingParam param, int dtype) {
Operator* op = NULL;
MSHADOW_REAL_TYPE_SWITCH(dtype, DType, {
op = new DeformablePSROIPoolingOp<gpu, DType>(param);
});
return op;
}
} // namespace op
} // namespace mxnet
|
ce6f13e0e66b9ba6ccac3c17513355542aab22c4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* BSD 3-Clause License
*
* Copyright (c) 2017-2018, plures
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <cinttypes>
#include <thrust/complex.h>
#include "contrib/bfloat16.h"
#include "cuda_device_binary.h"
#include "device.hh"
/*****************************************************************************/
/* CUDA device binary kernels */
/*****************************************************************************/
#define CUDA_DEVICE_BINARY(name, func, t0, t1, t2, common) \
static __global__ void \
_1D_C_##name##_##t0##_##t1##_##t2( \
const t0##_t *x0, const t1##_t *x1, t2##_t *x2, \
const int64_t N) \
{ \
int64_t index = threadIdx.x + blockIdx.x * blockDim.x; \
int64_t stride = blockDim.x * gridDim.x; \
\
for (int64_t i = index; i < N; i += stride) { \
x2[i] = func((common##_t)x0[i], (common##_t)x1[i]); \
} \
} \
\
extern "C" void \
gm_cuda_device_fixed_1D_C_##name##_##t0##_##t1##_##t2( \
const char *a0, const char *a1, char *a2, \
const int64_t N) \
{ \
const t0##_t *x0 = (const t0##_t *)a0; \
const t1##_t *x1 = (const t1##_t *)a1; \
t2##_t *x2 = (t2##_t *)a2; \
int blockSize = 256; \
int64_t numBlocks = (N + blockSize - 1) / blockSize; \
\
hipLaunchKernelGGL(( _1D_C_##name##_##t0##_##t1##_##t2), dim3(numBlocks), dim3(blockSize), 0, 0, x0, x1, x2, N); \
} \
\
static __global__ void \
_1D_S_##name##_##t0##_##t1##_##t2( \
const t0##_t *x0, const t1##_t *x1, t2##_t *x2, \
const int64_t s0, const int64_t s1, const int64_t s2, \
const int64_t N) \
{ \
int64_t index = threadIdx.x + blockIdx.x * blockDim.x; \
int64_t stride = blockDim.x * gridDim.x; \
\
for (int64_t i = index; i < N; i += stride) { \
const int64_t i0 = i * s0; \
const int64_t i1 = i * s1; \
const int64_t i2 = i * s2; \
x2[i2] = func((common##_t)x0[i0], (common##_t)x1[i1]); \
} \
} \
\
extern "C" void \
gm_cuda_device_fixed_1D_S_##name##_##t0##_##t1##_##t2( \
const char *a0, const char *a1, char *a2, \
const int64_t s0, const int64_t s1, const int64_t s2, \
const int64_t N) \
{ \
const t0##_t *x0 = (const t0##_t *)a0; \
const t1##_t *x1 = (const t1##_t *)a1; \
t2##_t *x2 = (t2##_t *)a2; \
int blockSize = 256; \
int64_t numBlocks = (N + blockSize - 1) / blockSize; \
\
hipLaunchKernelGGL(( _1D_S_##name##_##t0##_##t1##_##t2), dim3(numBlocks), dim3(blockSize), 0, 0, x0, x1, x2, \
s0, s1, s2, N); \
} \
\
static __global__ void \
_0D_##name##_##t0##_##t1##_##t2(const t0##_t *x0, const t1##_t *x1, t2##_t *x2) \
{ \
*x2 = func((common##_t)*x0, (common##_t)*x1); \
} \
\
extern "C" void \
gm_cuda_device_0D_##name##_##t0##_##t1##_##t2( \
const char *a0, const char *a1, char *a2) \
{ \
const t0##_t *x0 = (const t0##_t *)a0; \
const t1##_t *x1 = (const t1##_t *)a1; \
t2##_t *x2 = (t2##_t *)a2; \
\
hipLaunchKernelGGL(( _0D_##name##_##t0##_##t1##_##t2), dim3(1), dim3(1), 0, 0, x0, x1, x2); \
}
#define CUDA_DEVICE_NOIMPL(name, func, t0, t1, t2, common)
#define CUDA_DEVICE_NOKERN(name, func, t0, t1, t2, common)
/*****************************************************************************/
/* Arithmetic */
/*****************************************************************************/
#define CUDA_DEVICE_ALL_BINARY(name, func, hfunc) \
CUDA_DEVICE_BINARY(name, func, uint8, uint8, uint8, uint8) \
CUDA_DEVICE_BINARY(name, func, uint8, uint16, uint16, uint16) \
CUDA_DEVICE_BINARY(name, func, uint8, uint32, uint32, uint32) \
CUDA_DEVICE_BINARY(name, func, uint8, uint64, uint64, uint64) \
CUDA_DEVICE_BINARY(name, func, uint8, int8, int16, int16) \
CUDA_DEVICE_BINARY(name, func, uint8, int16, int16, int16) \
CUDA_DEVICE_BINARY(name, func, uint8, int32, int32, int32) \
CUDA_DEVICE_BINARY(name, func, uint8, int64, int64, int64) \
CUDA_DEVICE_BINARY(name, func, uint8, bfloat16, bfloat16, bfloat16) \
CUDA_DEVICE_BINARY(name, hfunc, uint8, float16, float16, float16) \
CUDA_DEVICE_BINARY(name, func, uint8, float32, float32, float32) \
CUDA_DEVICE_BINARY(name, func, uint8, float64, float64, float64) \
CUDA_DEVICE_NOIMPL(name, func, uint8, complex32, complex32, complex32) \
CUDA_DEVICE_BINARY(name, func, uint8, complex64, complex64, complex64) \
CUDA_DEVICE_BINARY(name, func, uint8, complex128, complex128, complex128) \
\
CUDA_DEVICE_BINARY(name, func, uint16, uint8, uint16, uint16) \
CUDA_DEVICE_BINARY(name, func, uint16, uint16, uint16, uint16) \
CUDA_DEVICE_BINARY(name, func, uint16, uint32, uint32, uint32) \
CUDA_DEVICE_BINARY(name, func, uint16, uint64, uint64, uint64) \
CUDA_DEVICE_BINARY(name, func, uint16, int8, int32, int32) \
CUDA_DEVICE_BINARY(name, func, uint16, int16, int32, int32) \
CUDA_DEVICE_BINARY(name, func, uint16, int32, int32, int32) \
CUDA_DEVICE_BINARY(name, func, uint16, int64, int64, int64) \
CUDA_DEVICE_BINARY(name, func, uint16, bfloat16, float32, float32) \
CUDA_DEVICE_BINARY(name, func, uint16, float16, float32, float32) \
CUDA_DEVICE_BINARY(name, func, uint16, float32, float32, float32) \
CUDA_DEVICE_BINARY(name, func, uint16, float64, float64, float64) \
CUDA_DEVICE_NOIMPL(name, func, uint16, complex32, complex64, complex64) \
CUDA_DEVICE_BINARY(name, func, uint16, complex64, complex64, complex64) \
CUDA_DEVICE_BINARY(name, func, uint16, complex128, complex128, complex128) \
\
CUDA_DEVICE_BINARY(name, func, uint32, uint8, uint32, uint32) \
CUDA_DEVICE_BINARY(name, func, uint32, uint16, uint32, uint32) \
CUDA_DEVICE_BINARY(name, func, uint32, uint32, uint32, uint32) \
CUDA_DEVICE_BINARY(name, func, uint32, uint64, uint64, uint64) \
CUDA_DEVICE_BINARY(name, func, uint32, int8, int64, int64) \
CUDA_DEVICE_BINARY(name, func, uint32, int16, int64, int64) \
CUDA_DEVICE_BINARY(name, func, uint32, int32, int64, int64) \
CUDA_DEVICE_BINARY(name, func, uint32, int64, int64, int64) \
CUDA_DEVICE_BINARY(name, func, uint32, bfloat16, float64, float64) \
CUDA_DEVICE_BINARY(name, func, uint32, float16, float64, float64) \
CUDA_DEVICE_BINARY(name, func, uint32, float32, float64, float64) \
CUDA_DEVICE_BINARY(name, func, uint32, float64, float64, float64) \
CUDA_DEVICE_NOIMPL(name, func, uint32, complex32, complex128, complex128) \
CUDA_DEVICE_BINARY(name, func, uint32, complex64, complex128, complex128) \
CUDA_DEVICE_BINARY(name, func, uint32, complex128, complex128, complex128) \
\
CUDA_DEVICE_BINARY(name, func, uint64, uint8, uint64, uint64) \
CUDA_DEVICE_BINARY(name, func, uint64, uint16, uint64, uint64) \
CUDA_DEVICE_BINARY(name, func, uint64, uint32, uint64, uint64) \
CUDA_DEVICE_BINARY(name, func, uint64, uint64, uint64, uint64) \
\
CUDA_DEVICE_BINARY(name, func, int8, uint8, int16, int16) \
CUDA_DEVICE_BINARY(name, func, int8, uint16, int32, int32) \
CUDA_DEVICE_BINARY(name, func, int8, uint32, int64, int64) \
CUDA_DEVICE_BINARY(name, func, int8, int8, int8, int8) \
CUDA_DEVICE_BINARY(name, func, int8, int16, int16, int16) \
CUDA_DEVICE_BINARY(name, func, int8, int32, int32, int32) \
CUDA_DEVICE_BINARY(name, func, int8, int64, int64, int64) \
CUDA_DEVICE_BINARY(name, func, int8, bfloat16, bfloat16, bfloat16) \
CUDA_DEVICE_BINARY(name, hfunc, int8, float16, float16, float16) \
CUDA_DEVICE_BINARY(name, func, int8, float32, float32, float32) \
CUDA_DEVICE_BINARY(name, func, int8, float64, float64, float64) \
CUDA_DEVICE_NOIMPL(name, func, int8, complex32, complex32, complex32) \
CUDA_DEVICE_BINARY(name, func, int8, complex64, complex64, complex64) \
CUDA_DEVICE_BINARY(name, func, int8, complex128, complex128, complex128) \
\
CUDA_DEVICE_BINARY(name, func, int16, uint8, int16, int16) \
CUDA_DEVICE_BINARY(name, func, int16, uint16, int32, int32) \
CUDA_DEVICE_BINARY(name, func, int16, uint32, int64, int64) \
CUDA_DEVICE_BINARY(name, func, int16, int8, int16, int16) \
CUDA_DEVICE_BINARY(name, func, int16, int16, int16, int16) \
CUDA_DEVICE_BINARY(name, func, int16, int32, int32, int32) \
CUDA_DEVICE_BINARY(name, func, int16, int64, int64, int64) \
CUDA_DEVICE_BINARY(name, func, int16, bfloat16, float32, float32) \
CUDA_DEVICE_BINARY(name, func, int16, float16, float32, float32) \
CUDA_DEVICE_BINARY(name, func, int16, float32, float32, float32) \
CUDA_DEVICE_BINARY(name, func, int16, float64, float64, float64) \
CUDA_DEVICE_NOIMPL(name, func, int16, complex32, complex64, complex64) \
CUDA_DEVICE_BINARY(name, func, int16, complex64, complex64, complex64) \
CUDA_DEVICE_BINARY(name, func, int16, complex128, complex128, complex128) \
\
CUDA_DEVICE_BINARY(name, func, int32, uint8, int32, int32) \
CUDA_DEVICE_BINARY(name, func, int32, uint16, int32, int32) \
CUDA_DEVICE_BINARY(name, func, int32, uint32, int64, int64) \
CUDA_DEVICE_BINARY(name, func, int32, int8, int32, int32) \
CUDA_DEVICE_BINARY(name, func, int32, int16, int32, int32) \
CUDA_DEVICE_BINARY(name, func, int32, int32, int32, int32) \
CUDA_DEVICE_BINARY(name, func, int32, int64, int64, int64) \
CUDA_DEVICE_BINARY(name, func, int32, bfloat16, float64, float64) \
CUDA_DEVICE_BINARY(name, func, int32, float16, float64, float64) \
CUDA_DEVICE_BINARY(name, func, int32, float32, float64, float64) \
CUDA_DEVICE_BINARY(name, func, int32, float64, float64, float64) \
CUDA_DEVICE_NOIMPL(name, func, int32, complex32, complex128, complex128) \
CUDA_DEVICE_BINARY(name, func, int32, complex64, complex128, complex128) \
CUDA_DEVICE_BINARY(name, func, int32, complex128, complex128, complex128) \
\
CUDA_DEVICE_BINARY(name, func, int64, uint8, int64, int64) \
CUDA_DEVICE_BINARY(name, func, int64, uint16, int64, int64) \
CUDA_DEVICE_BINARY(name, func, int64, uint32, int64, int64) \
CUDA_DEVICE_BINARY(name, func, int64, int8, int64, int64) \
CUDA_DEVICE_BINARY(name, func, int64, int16, int64, int64) \
CUDA_DEVICE_BINARY(name, func, int64, int32, int64, int64) \
CUDA_DEVICE_BINARY(name, func, int64, int64, int64, int64) \
\
CUDA_DEVICE_BINARY(name, func, bfloat16, uint8, bfloat16, bfloat16) \
CUDA_DEVICE_BINARY(name, func, bfloat16, uint16, float32, float32) \
CUDA_DEVICE_BINARY(name, func, bfloat16, uint32, float64, float64) \
CUDA_DEVICE_BINARY(name, func, bfloat16, int8, bfloat16, bfloat16) \
CUDA_DEVICE_BINARY(name, func, bfloat16, int16, float32, float32) \
CUDA_DEVICE_BINARY(name, func, bfloat16, int32, float64, float64) \
CUDA_DEVICE_BINARY(name, func, bfloat16, bfloat16, bfloat16, bfloat16) \
CUDA_DEVICE_BINARY(name, func, bfloat16, float16, float32, float32) \
CUDA_DEVICE_BINARY(name, func, bfloat16, float32, float32, float32) \
CUDA_DEVICE_BINARY(name, func, bfloat16, float64, float64, float64) \
CUDA_DEVICE_NOIMPL(name, func, bfloat16, complex32, complex32, complex64) \
CUDA_DEVICE_BINARY(name, func, bfloat16, complex64, complex64, complex64) \
CUDA_DEVICE_BINARY(name, func, bfloat16, complex128, complex128, complex128) \
\
CUDA_DEVICE_BINARY(name, hfunc, float16, uint8, float16, float16) \
CUDA_DEVICE_BINARY(name, func, float16, uint16, float32, float32) \
CUDA_DEVICE_BINARY(name, func, float16, uint32, float64, float64) \
CUDA_DEVICE_BINARY(name, hfunc, float16, int8, float16, float16) \
CUDA_DEVICE_BINARY(name, func, float16, int16, float32, float32) \
CUDA_DEVICE_BINARY(name, func, float16, int32, float64, float64) \
CUDA_DEVICE_BINARY(name, func, float16, bfloat16, float32, float32) \
CUDA_DEVICE_BINARY(name, hfunc, float16, float16, float16, float16) \
CUDA_DEVICE_BINARY(name, func, float16, float32, float32, float32) \
CUDA_DEVICE_BINARY(name, func, float16, float64, float64, float64) \
CUDA_DEVICE_NOIMPL(name, func, float16, complex32, complex32, complex32) \
CUDA_DEVICE_BINARY(name, func, float16, complex64, complex64, complex64) \
CUDA_DEVICE_BINARY(name, func, float16, complex128, complex128, complex128) \
\
CUDA_DEVICE_BINARY(name, func, float32, uint8, float32, float32) \
CUDA_DEVICE_BINARY(name, func, float32, uint16, float32, float32) \
CUDA_DEVICE_BINARY(name, func, float32, uint32, float64, float64) \
CUDA_DEVICE_BINARY(name, func, float32, int8, float32, float32) \
CUDA_DEVICE_BINARY(name, func, float32, int16, float32, float32) \
CUDA_DEVICE_BINARY(name, func, float32, int32, float64, float64) \
CUDA_DEVICE_BINARY(name, func, float32, bfloat16, float32, float32) \
CUDA_DEVICE_BINARY(name, func, float32, float16, float32, float32) \
CUDA_DEVICE_BINARY(name, func, float32, float32, float32, float32) \
CUDA_DEVICE_BINARY(name, func, float32, float64, float64, float64) \
CUDA_DEVICE_NOIMPL(name, func, float32, complex32, complex64, complex64) \
CUDA_DEVICE_BINARY(name, func, float32, complex64, complex64, complex64) \
CUDA_DEVICE_BINARY(name, func, float32, complex128, complex128, complex128) \
\
CUDA_DEVICE_BINARY(name, func, float64, uint8, float64, float64) \
CUDA_DEVICE_BINARY(name, func, float64, uint16, float64, float64) \
CUDA_DEVICE_BINARY(name, func, float64, uint32, float64, float64) \
CUDA_DEVICE_BINARY(name, func, float64, int8, float64, float64) \
CUDA_DEVICE_BINARY(name, func, float64, int16, float64, float64) \
CUDA_DEVICE_BINARY(name, func, float64, int32, float64, float64) \
CUDA_DEVICE_BINARY(name, func, float64, bfloat16, float64, float64) \
CUDA_DEVICE_BINARY(name, func, float64, float16, float64, float64) \
CUDA_DEVICE_BINARY(name, func, float64, float32, float64, float64) \
CUDA_DEVICE_BINARY(name, func, float64, float64, float64, float64) \
CUDA_DEVICE_NOIMPL(name, func, float64, complex32, complex128, complex128) \
CUDA_DEVICE_BINARY(name, func, float64, complex64, complex128, complex128) \
CUDA_DEVICE_BINARY(name, func, float64, complex128, complex128, complex128) \
\
CUDA_DEVICE_NOIMPL(name, func, complex32, uint8, complex32, complex32) \
CUDA_DEVICE_NOIMPL(name, func, complex32, uint16, complex64, complex64) \
CUDA_DEVICE_NOIMPL(name, func, complex32, uint32, complex128, complex128) \
CUDA_DEVICE_NOIMPL(name, func, complex32, int8, complex32, complex32) \
CUDA_DEVICE_NOIMPL(name, func, complex32, int16, complex64, complex64) \
CUDA_DEVICE_NOIMPL(name, func, complex32, int32, complex128, complex128) \
CUDA_DEVICE_NOIMPL(name, func, complex32, bfloat16, complex64, complex64) \
CUDA_DEVICE_NOIMPL(name, func, complex32, float16, complex32, complex32) \
CUDA_DEVICE_NOIMPL(name, func, complex32, float32, complex64, complex64) \
CUDA_DEVICE_NOIMPL(name, func, complex32, float64, complex128, complex128) \
CUDA_DEVICE_NOIMPL(name, func, complex32, complex32, complex32, complex32) \
CUDA_DEVICE_NOIMPL(name, func, complex32, complex64, complex64, complex64) \
CUDA_DEVICE_NOIMPL(name, func, complex32, complex128, complex128, complex128) \
\
CUDA_DEVICE_BINARY(name, func, complex64, uint8, complex64, complex64) \
CUDA_DEVICE_BINARY(name, func, complex64, uint16, complex64, complex64) \
CUDA_DEVICE_BINARY(name, func, complex64, uint32, complex128, complex128) \
CUDA_DEVICE_BINARY(name, func, complex64, int8, complex64, complex64) \
CUDA_DEVICE_BINARY(name, func, complex64, int16, complex64, complex64) \
CUDA_DEVICE_BINARY(name, func, complex64, int32, complex128, complex128) \
CUDA_DEVICE_BINARY(name, func, complex64, bfloat16, complex64, complex64) \
CUDA_DEVICE_BINARY(name, func, complex64, float16, complex64, complex64) \
CUDA_DEVICE_BINARY(name, func, complex64, float32, complex64, complex64) \
CUDA_DEVICE_BINARY(name, func, complex64, float64, complex128, complex128) \
CUDA_DEVICE_NOIMPL(name, func, complex64, complex32, complex64, complex64) \
CUDA_DEVICE_BINARY(name, func, complex64, complex64, complex64, complex64) \
CUDA_DEVICE_BINARY(name, func, complex64, complex128, complex128, complex128) \
\
CUDA_DEVICE_BINARY(name, func, complex128, uint8, complex128, complex128) \
CUDA_DEVICE_BINARY(name, func, complex128, uint16, complex128, complex128) \
CUDA_DEVICE_BINARY(name, func, complex128, uint32, complex128, complex128) \
CUDA_DEVICE_BINARY(name, func, complex128, int8, complex128, complex128) \
CUDA_DEVICE_BINARY(name, func, complex128, int16, complex128, complex128) \
CUDA_DEVICE_BINARY(name, func, complex128, int32, complex128, complex128) \
CUDA_DEVICE_BINARY(name, func, complex128, bfloat16, complex128, complex128) \
CUDA_DEVICE_BINARY(name, func, complex128, float16, complex128, complex128) \
CUDA_DEVICE_BINARY(name, func, complex128, float32, complex128, complex128) \
CUDA_DEVICE_BINARY(name, func, complex128, float64, complex128, complex128) \
CUDA_DEVICE_NOIMPL(name, func, complex128, complex32, complex128, complex128) \
CUDA_DEVICE_BINARY(name, func, complex128, complex64, complex128, complex128) \
CUDA_DEVICE_BINARY(name, func, complex128, complex128, complex128, complex128) \
#define CUDA_DEVICE_ALL_BINARY_NO_COMPLEX(name, func, hfunc) \
CUDA_DEVICE_BINARY(name, func, uint8, uint8, uint8, uint8) \
CUDA_DEVICE_BINARY(name, func, uint8, uint16, uint16, uint16) \
CUDA_DEVICE_BINARY(name, func, uint8, uint32, uint32, uint32) \
CUDA_DEVICE_BINARY(name, func, uint8, uint64, uint64, uint64) \
CUDA_DEVICE_BINARY(name, func, uint8, int8, int16, int16) \
CUDA_DEVICE_BINARY(name, func, uint8, int16, int16, int16) \
CUDA_DEVICE_BINARY(name, func, uint8, int32, int32, int32) \
CUDA_DEVICE_BINARY(name, func, uint8, int64, int64, int64) \
CUDA_DEVICE_BINARY(name, func, uint8, bfloat16, bfloat16, bfloat16) \
CUDA_DEVICE_NOIMPL(name, hfunc, uint8, float16, float16, float16) \
CUDA_DEVICE_BINARY(name, func, uint8, float32, float32, float32) \
CUDA_DEVICE_BINARY(name, func, uint8, float64, float64, float64) \
CUDA_DEVICE_NOKERN(name, func, uint8, complex32, complex32, complex32) \
CUDA_DEVICE_NOKERN(name, func, uint8, complex64, complex64, complex64) \
CUDA_DEVICE_NOKERN(name, func, uint8, complex128, complex128, complex128) \
\
CUDA_DEVICE_BINARY(name, func, uint16, uint8, uint16, uint16) \
CUDA_DEVICE_BINARY(name, func, uint16, uint16, uint16, uint16) \
CUDA_DEVICE_BINARY(name, func, uint16, uint32, uint32, uint32) \
CUDA_DEVICE_BINARY(name, func, uint16, uint64, uint64, uint64) \
CUDA_DEVICE_BINARY(name, func, uint16, int8, int32, int32) \
CUDA_DEVICE_BINARY(name, func, uint16, int16, int32, int32) \
CUDA_DEVICE_BINARY(name, func, uint16, int32, int32, int32) \
CUDA_DEVICE_BINARY(name, func, uint16, int64, int64, int64) \
CUDA_DEVICE_BINARY(name, func, uint16, bfloat16, float32, float32) \
CUDA_DEVICE_BINARY(name, func, uint16, float16, float32, float32) \
CUDA_DEVICE_BINARY(name, func, uint16, float32, float32, float32) \
CUDA_DEVICE_BINARY(name, func, uint16, float64, float64, float64) \
CUDA_DEVICE_NOKERN(name, func, uint16, complex32, complex64, complex64) \
CUDA_DEVICE_NOKERN(name, func, uint16, complex64, complex64, complex64) \
CUDA_DEVICE_NOKERN(name, func, uint16, complex128, complex128, complex128) \
\
CUDA_DEVICE_BINARY(name, func, uint32, uint8, uint32, uint32) \
CUDA_DEVICE_BINARY(name, func, uint32, uint16, uint32, uint32) \
CUDA_DEVICE_BINARY(name, func, uint32, uint32, uint32, uint32) \
CUDA_DEVICE_BINARY(name, func, uint32, uint64, uint64, uint64) \
CUDA_DEVICE_BINARY(name, func, uint32, int8, int64, int64) \
CUDA_DEVICE_BINARY(name, func, uint32, int16, int64, int64) \
CUDA_DEVICE_BINARY(name, func, uint32, int32, int64, int64) \
CUDA_DEVICE_BINARY(name, func, uint32, int64, int64, int64) \
CUDA_DEVICE_BINARY(name, func, uint32, bfloat16, float64, float64) \
CUDA_DEVICE_BINARY(name, func, uint32, float16, float64, float64) \
CUDA_DEVICE_BINARY(name, func, uint32, float32, float64, float64) \
CUDA_DEVICE_BINARY(name, func, uint32, float64, float64, float64) \
CUDA_DEVICE_NOKERN(name, func, uint32, complex32, complex128, complex128) \
CUDA_DEVICE_NOKERN(name, func, uint32, complex64, complex128, complex128) \
CUDA_DEVICE_NOKERN(name, func, uint32, complex128, complex128, complex128) \
\
CUDA_DEVICE_BINARY(name, func, uint64, uint8, uint64, uint64) \
CUDA_DEVICE_BINARY(name, func, uint64, uint16, uint64, uint64) \
CUDA_DEVICE_BINARY(name, func, uint64, uint32, uint64, uint64) \
CUDA_DEVICE_BINARY(name, func, uint64, uint64, uint64, uint64) \
\
CUDA_DEVICE_BINARY(name, func, int8, uint8, int16, int16) \
CUDA_DEVICE_BINARY(name, func, int8, uint16, int32, int32) \
CUDA_DEVICE_BINARY(name, func, int8, uint32, int64, int64) \
CUDA_DEVICE_BINARY(name, func, int8, int8, int8, int8) \
CUDA_DEVICE_BINARY(name, func, int8, int16, int16, int16) \
CUDA_DEVICE_BINARY(name, func, int8, int32, int32, int32) \
CUDA_DEVICE_BINARY(name, func, int8, int64, int64, int64) \
CUDA_DEVICE_BINARY(name, func, int8, bfloat16, bfloat16, bfloat16) \
CUDA_DEVICE_NOIMPL(name, hfunc, int8, float16, float16, float16) \
CUDA_DEVICE_BINARY(name, func, int8, float32, float32, float32) \
CUDA_DEVICE_BINARY(name, func, int8, float64, float64, float64) \
CUDA_DEVICE_NOKERN(name, func, int8, complex32, complex32, complex32) \
CUDA_DEVICE_NOKERN(name, func, int8, complex64, complex64, complex64) \
CUDA_DEVICE_NOKERN(name, func, int8, complex128, complex128, complex128) \
\
CUDA_DEVICE_BINARY(name, func, int16, uint8, int16, int16) \
CUDA_DEVICE_BINARY(name, func, int16, uint16, int32, int32) \
CUDA_DEVICE_BINARY(name, func, int16, uint32, int64, int64) \
CUDA_DEVICE_BINARY(name, func, int16, int8, int16, int16) \
CUDA_DEVICE_BINARY(name, func, int16, int16, int16, int16) \
CUDA_DEVICE_BINARY(name, func, int16, int32, int32, int32) \
CUDA_DEVICE_BINARY(name, func, int16, int64, int64, int64) \
CUDA_DEVICE_BINARY(name, func, int16, bfloat16, float32, float32) \
CUDA_DEVICE_BINARY(name, func, int16, float16, float32, float32) \
CUDA_DEVICE_BINARY(name, func, int16, float32, float32, float32) \
CUDA_DEVICE_BINARY(name, func, int16, float64, float64, float64) \
CUDA_DEVICE_NOKERN(name, func, int16, complex32, complex64, complex64) \
CUDA_DEVICE_NOKERN(name, func, int16, complex64, complex64, complex64) \
CUDA_DEVICE_NOKERN(name, func, int16, complex128, complex128, complex128) \
\
CUDA_DEVICE_BINARY(name, func, int32, uint8, int32, int32) \
CUDA_DEVICE_BINARY(name, func, int32, uint16, int32, int32) \
CUDA_DEVICE_BINARY(name, func, int32, uint32, int64, int64) \
CUDA_DEVICE_BINARY(name, func, int32, int8, int32, int32) \
CUDA_DEVICE_BINARY(name, func, int32, int16, int32, int32) \
CUDA_DEVICE_BINARY(name, func, int32, int32, int32, int32) \
CUDA_DEVICE_BINARY(name, func, int32, int64, int64, int64) \
CUDA_DEVICE_BINARY(name, func, int32, bfloat16, float64, float64) \
CUDA_DEVICE_BINARY(name, func, int32, float16, float64, float64) \
CUDA_DEVICE_BINARY(name, func, int32, float32, float64, float64) \
CUDA_DEVICE_BINARY(name, func, int32, float64, float64, float64) \
CUDA_DEVICE_NOKERN(name, func, int32, complex32, complex128, complex128) \
CUDA_DEVICE_NOKERN(name, func, int32, complex64, complex128, complex128) \
CUDA_DEVICE_NOKERN(name, func, int32, complex128, complex128, complex128) \
\
CUDA_DEVICE_BINARY(name, func, int64, uint8, int64, int64) \
CUDA_DEVICE_BINARY(name, func, int64, uint16, int64, int64) \
CUDA_DEVICE_BINARY(name, func, int64, uint32, int64, int64) \
CUDA_DEVICE_BINARY(name, func, int64, int8, int64, int64) \
CUDA_DEVICE_BINARY(name, func, int64, int16, int64, int64) \
CUDA_DEVICE_BINARY(name, func, int64, int32, int64, int64) \
CUDA_DEVICE_BINARY(name, func, int64, int64, int64, int64) \
\
CUDA_DEVICE_BINARY(name, func, bfloat16, uint8, bfloat16, bfloat16) \
CUDA_DEVICE_BINARY(name, func, bfloat16, uint16, float32, float32) \
CUDA_DEVICE_BINARY(name, func, bfloat16, uint32, float64, float64) \
CUDA_DEVICE_BINARY(name, func, bfloat16, int8, bfloat16, bfloat16) \
CUDA_DEVICE_BINARY(name, func, bfloat16, int16, float32, float32) \
CUDA_DEVICE_BINARY(name, func, bfloat16, int32, float64, float64) \
CUDA_DEVICE_BINARY(name, func, bfloat16, bfloat16, bfloat16, bfloat16) \
CUDA_DEVICE_BINARY(name, func, bfloat16, float16, float32, float32) \
CUDA_DEVICE_BINARY(name, func, bfloat16, float32, float32, float32) \
CUDA_DEVICE_BINARY(name, func, bfloat16, float64, float64, float64) \
CUDA_DEVICE_NOKERN(name, func, bfloat16, complex32, complex32, complex32) \
CUDA_DEVICE_NOKERN(name, func, bfloat16, complex64, complex64, complex64) \
CUDA_DEVICE_NOKERN(name, func, bfloat16, complex128, complex128, complex128) \
\
CUDA_DEVICE_NOIMPL(name, hfunc, float16, uint8, float16, float16) \
CUDA_DEVICE_BINARY(name, func, float16, uint16, float32, float32) \
CUDA_DEVICE_BINARY(name, func, float16, uint32, float64, float64) \
CUDA_DEVICE_NOIMPL(name, hfunc, float16, int8, float16, float16) \
CUDA_DEVICE_BINARY(name, func, float16, int16, float32, float32) \
CUDA_DEVICE_BINARY(name, func, float16, int32, float64, float64) \
CUDA_DEVICE_BINARY(name, func, float16, bfloat16, float32, float32) \
CUDA_DEVICE_NOIMPL(name, hfunc, float16, float16, float16, float16) \
CUDA_DEVICE_BINARY(name, func, float16, float32, float32, float32) \
CUDA_DEVICE_BINARY(name, func, float16, float64, float64, float64) \
CUDA_DEVICE_NOKERN(name, func, float16, complex32, complex32, complex32) \
CUDA_DEVICE_NOKERN(name, func, float16, complex64, complex64, complex64) \
CUDA_DEVICE_NOKERN(name, func, float16, complex128, complex128, complex128) \
\
CUDA_DEVICE_BINARY(name, func, float32, uint8, float32, float32) \
CUDA_DEVICE_BINARY(name, func, float32, uint16, float32, float32) \
CUDA_DEVICE_BINARY(name, func, float32, uint32, float64, float64) \
CUDA_DEVICE_BINARY(name, func, float32, int8, float32, float32) \
CUDA_DEVICE_BINARY(name, func, float32, int16, float32, float32) \
CUDA_DEVICE_BINARY(name, func, float32, int32, float64, float64) \
CUDA_DEVICE_BINARY(name, func, float32, bfloat16, float32, float32) \
CUDA_DEVICE_BINARY(name, func, float32, float16, float32, float32) \
CUDA_DEVICE_BINARY(name, func, float32, float32, float32, float32) \
CUDA_DEVICE_BINARY(name, func, float32, float64, float64, float64) \
CUDA_DEVICE_NOKERN(name, func, float32, complex32, complex64, complex64) \
CUDA_DEVICE_NOKERN(name, func, float32, complex64, complex64, complex64) \
CUDA_DEVICE_NOKERN(name, func, float32, complex128, complex128, complex128) \
\
CUDA_DEVICE_BINARY(name, func, float64, uint8, float64, float64) \
CUDA_DEVICE_BINARY(name, func, float64, uint16, float64, float64) \
CUDA_DEVICE_BINARY(name, func, float64, uint32, float64, float64) \
CUDA_DEVICE_BINARY(name, func, float64, int8, float64, float64) \
CUDA_DEVICE_BINARY(name, func, float64, int16, float64, float64) \
CUDA_DEVICE_BINARY(name, func, float64, int32, float64, float64) \
CUDA_DEVICE_BINARY(name, func, float64, bfloat16, float64, float64) \
CUDA_DEVICE_BINARY(name, func, float64, float16, float64, float64) \
CUDA_DEVICE_BINARY(name, func, float64, float32, float64, float64) \
CUDA_DEVICE_BINARY(name, func, float64, float64, float64, float64) \
CUDA_DEVICE_NOKERN(name, func, float64, complex32, complex128, complex128) \
CUDA_DEVICE_NOKERN(name, func, float64, complex64, complex128, complex128) \
CUDA_DEVICE_NOKERN(name, func, float64, complex128, complex128, complex128) \
\
CUDA_DEVICE_NOKERN(name, func, complex32, uint8, complex32, complex32) \
CUDA_DEVICE_NOKERN(name, func, complex32, uint16, complex64, complex64) \
CUDA_DEVICE_NOKERN(name, func, complex32, uint32, complex128, complex128) \
CUDA_DEVICE_NOKERN(name, func, complex32, int8, complex32, complex32) \
CUDA_DEVICE_NOKERN(name, func, complex32, int16, complex64, complex64) \
CUDA_DEVICE_NOKERN(name, func, complex32, int32, complex128, complex128) \
CUDA_DEVICE_NOKERN(name, func, complex32, bfloat16, complex64, complex64) \
CUDA_DEVICE_NOKERN(name, func, complex32, float16, complex32, complex32) \
CUDA_DEVICE_NOKERN(name, func, complex32, float32, complex64, complex64) \
CUDA_DEVICE_NOKERN(name, func, complex32, float64, complex128, complex128) \
CUDA_DEVICE_NOKERN(name, func, complex32, complex32, complex32, complex32) \
CUDA_DEVICE_NOKERN(name, func, complex32, complex64, complex64, complex64) \
CUDA_DEVICE_NOKERN(name, func, complex32, complex128, complex128, complex128) \
\
CUDA_DEVICE_NOKERN(name, func, complex64, uint8, complex64, complex64) \
CUDA_DEVICE_NOKERN(name, func, complex64, uint16, complex64, complex64) \
CUDA_DEVICE_NOKERN(name, func, complex64, uint32, complex128, complex128) \
CUDA_DEVICE_NOKERN(name, func, complex64, int8, complex64, complex64) \
CUDA_DEVICE_NOKERN(name, func, complex64, int16, complex64, complex64) \
CUDA_DEVICE_NOKERN(name, func, complex64, int32, complex128, complex128) \
CUDA_DEVICE_NOKERN(name, func, complex64, bfloat16, complex64, complex64) \
CUDA_DEVICE_NOKERN(name, func, complex64, float16, complex64, complex64) \
CUDA_DEVICE_NOKERN(name, func, complex64, float32, complex64, complex64) \
CUDA_DEVICE_NOKERN(name, func, complex64, float64, complex128, complex128) \
CUDA_DEVICE_NOKERN(name, func, complex64, complex32, complex64, complex64) \
CUDA_DEVICE_NOKERN(name, func, complex64, complex64, complex64, complex64) \
CUDA_DEVICE_NOKERN(name, func, complex64, complex128, complex128, complex128) \
\
CUDA_DEVICE_NOKERN(name, func, complex128, uint8, complex128, complex128) \
CUDA_DEVICE_NOKERN(name, func, complex128, uint16, complex128, complex128) \
CUDA_DEVICE_NOKERN(name, func, complex128, uint32, complex128, complex128) \
CUDA_DEVICE_NOKERN(name, func, complex128, int8, complex128, complex128) \
CUDA_DEVICE_NOKERN(name, func, complex128, int16, complex128, complex128) \
CUDA_DEVICE_NOKERN(name, func, complex128, int32, complex128, complex128) \
CUDA_DEVICE_NOKERN(name, func, complex128, bfloat16, complex128, complex128) \
CUDA_DEVICE_NOKERN(name, func, complex128, float16, complex128, complex128) \
CUDA_DEVICE_NOKERN(name, func, complex128, float32, complex128, complex128) \
CUDA_DEVICE_NOKERN(name, func, complex128, float64, complex128, complex128) \
CUDA_DEVICE_NOKERN(name, func, complex128, complex32, complex128, complex128) \
CUDA_DEVICE_NOKERN(name, func, complex128, complex64, complex128, complex128) \
CUDA_DEVICE_NOKERN(name, func, complex128, complex128, complex128, complex128) \
#define CUDA_DEVICE_ALL_BINARY_FLOAT_RETURN(name, func, hfunc) \
CUDA_DEVICE_BINARY(name, hfunc, uint8, uint8, float16, float16) \
CUDA_DEVICE_BINARY(name, func, uint8, uint16, float32, float32) \
CUDA_DEVICE_BINARY(name, func, uint8, uint32, float64, float64) \
CUDA_DEVICE_NOKERN(name, func, uint8, uint64, uint64, uint64) \
CUDA_DEVICE_BINARY(name, hfunc, uint8, int8, float16, float16) \
CUDA_DEVICE_BINARY(name, func, uint8, int16, float32, float32) \
CUDA_DEVICE_BINARY(name, func, uint8, int32, float64, float64) \
CUDA_DEVICE_NOKERN(name, func, uint8, int64, int64, int64) \
CUDA_DEVICE_BINARY(name, func, uint8, bfloat16, bfloat16, bfloat16) \
CUDA_DEVICE_BINARY(name, hfunc, uint8, float16, float16, float16) \
CUDA_DEVICE_BINARY(name, func, uint8, float32, float32, float32) \
CUDA_DEVICE_BINARY(name, func, uint8, float64, float64, float64) \
CUDA_DEVICE_NOIMPL(name, func, uint8, complex32, complex32, complex32) \
CUDA_DEVICE_BINARY(name, func, uint8, complex64, complex64, complex64) \
CUDA_DEVICE_BINARY(name, func, uint8, complex128, complex128, complex128) \
\
CUDA_DEVICE_BINARY(name, func, uint16, uint8, float32, float32) \
CUDA_DEVICE_BINARY(name, func, uint16, uint16, float32, float32) \
CUDA_DEVICE_BINARY(name, func, uint16, uint32, float64, float64) \
CUDA_DEVICE_NOKERN(name, func, uint16, uint64, uint64, uint64) \
CUDA_DEVICE_BINARY(name, func, uint16, int8, float32, float32) \
CUDA_DEVICE_BINARY(name, func, uint16, int16, float32, float32) \
CUDA_DEVICE_BINARY(name, func, uint16, int32, float64, float64) \
CUDA_DEVICE_NOKERN(name, func, uint16, int64, int64, int64) \
CUDA_DEVICE_BINARY(name, func, uint16, bfloat16, float32, float32) \
CUDA_DEVICE_BINARY(name, func, uint16, float16, float32, float32) \
CUDA_DEVICE_BINARY(name, func, uint16, float32, float32, float32) \
CUDA_DEVICE_BINARY(name, func, uint16, float64, float64, float64) \
CUDA_DEVICE_NOIMPL(name, func, uint16, complex32, complex64, complex64) \
CUDA_DEVICE_BINARY(name, func, uint16, complex64, complex64, complex64) \
CUDA_DEVICE_BINARY(name, func, uint16, complex128, complex128, complex128) \
\
CUDA_DEVICE_BINARY(name, func, uint32, uint8, float64, float64) \
CUDA_DEVICE_BINARY(name, func, uint32, uint16, float64, float64) \
CUDA_DEVICE_BINARY(name, func, uint32, uint32, float64, float64) \
CUDA_DEVICE_NOKERN(name, func, uint32, uint64, uint64, uint64) \
CUDA_DEVICE_BINARY(name, func, uint32, int8, float64, float64) \
CUDA_DEVICE_BINARY(name, func, uint32, int16, float64, float64) \
CUDA_DEVICE_BINARY(name, func, uint32, int32, float64, float64) \
CUDA_DEVICE_NOKERN(name, func, uint32, int64, int64, int64) \
CUDA_DEVICE_BINARY(name, func, uint32, bfloat16, float64, float64) \
CUDA_DEVICE_BINARY(name, func, uint32, float16, float64, float64) \
CUDA_DEVICE_BINARY(name, func, uint32, float32, float64, float64) \
CUDA_DEVICE_BINARY(name, func, uint32, float64, float64, float64) \
CUDA_DEVICE_NOIMPL(name, func, uint32, complex32, complex128, complex128) \
CUDA_DEVICE_BINARY(name, func, uint32, complex64, complex128, complex128) \
CUDA_DEVICE_BINARY(name, func, uint32, complex128, complex128, complex128) \
\
CUDA_DEVICE_NOKERN(name, func, uint64, uint8, uint64, uint64) \
CUDA_DEVICE_NOKERN(name, func, uint64, uint16, uint64, uint64) \
CUDA_DEVICE_NOKERN(name, func, uint64, uint32, uint64, uint64) \
CUDA_DEVICE_NOKERN(name, func, uint64, uint64, uint64, uint64) \
\
CUDA_DEVICE_BINARY(name, hfunc, int8, uint8, float16, float16) \
CUDA_DEVICE_BINARY(name, func, int8, uint16, float32, float32) \
CUDA_DEVICE_BINARY(name, func, int8, uint32, float64, float64) \
CUDA_DEVICE_BINARY(name, hfunc, int8, int8, float16, float16) \
CUDA_DEVICE_BINARY(name, func, int8, int16, float32, float32) \
CUDA_DEVICE_BINARY(name, func, int8, int32, float64, float64) \
CUDA_DEVICE_NOKERN(name, func, int8, int64, int64, int64) \
CUDA_DEVICE_BINARY(name, func, int8, bfloat16, bfloat16, bfloat16) \
CUDA_DEVICE_BINARY(name, hfunc, int8, float16, float16, float16) \
CUDA_DEVICE_BINARY(name, func, int8, float32, float32, float32) \
CUDA_DEVICE_BINARY(name, func, int8, float64, float64, float64) \
CUDA_DEVICE_NOIMPL(name, func, int8, complex32, complex32, complex32) \
CUDA_DEVICE_BINARY(name, func, int8, complex64, complex64, complex64) \
CUDA_DEVICE_BINARY(name, func, int8, complex128, complex128, complex128) \
\
CUDA_DEVICE_BINARY(name, func, int16, uint8, float32, float32) \
CUDA_DEVICE_BINARY(name, func, int16, uint16, float32, float32) \
CUDA_DEVICE_BINARY(name, func, int16, uint32, float64, float64) \
CUDA_DEVICE_BINARY(name, func, int16, int8, float32, float32) \
CUDA_DEVICE_BINARY(name, func, int16, int16, float32, float32) \
CUDA_DEVICE_BINARY(name, func, int16, int32, float64, float64) \
CUDA_DEVICE_NOKERN(name, func, int16, int64, int64, int64) \
CUDA_DEVICE_BINARY(name, func, int16, bfloat16, float32, float32) \
CUDA_DEVICE_BINARY(name, func, int16, float16, float32, float32) \
CUDA_DEVICE_BINARY(name, func, int16, float32, float32, float32) \
CUDA_DEVICE_BINARY(name, func, int16, float64, float64, float64) \
CUDA_DEVICE_NOIMPL(name, func, int16, complex32, complex64, complex64) \
CUDA_DEVICE_BINARY(name, func, int16, complex64, complex64, complex64) \
CUDA_DEVICE_BINARY(name, func, int16, complex128, complex128, complex128) \
\
CUDA_DEVICE_BINARY(name, func, int32, uint8, float64, float64) \
CUDA_DEVICE_BINARY(name, func, int32, uint16, float64, float64) \
CUDA_DEVICE_BINARY(name, func, int32, uint32, float64, float64) \
CUDA_DEVICE_BINARY(name, func, int32, int8, float64, float64) \
CUDA_DEVICE_BINARY(name, func, int32, int16, float64, float64) \
CUDA_DEVICE_BINARY(name, func, int32, int32, float64, float64) \
CUDA_DEVICE_NOKERN(name, func, int32, int64, int64, int64) \
CUDA_DEVICE_BINARY(name, func, int32, bfloat16, float64, float64) \
CUDA_DEVICE_BINARY(name, func, int32, float16, float64, float64) \
CUDA_DEVICE_BINARY(name, func, int32, float32, float64, float64) \
CUDA_DEVICE_BINARY(name, func, int32, float64, float64, float64) \
CUDA_DEVICE_NOIMPL(name, func, int32, complex32, complex128, complex128) \
CUDA_DEVICE_BINARY(name, func, int32, complex64, complex128, complex128) \
CUDA_DEVICE_BINARY(name, func, int32, complex128, complex128, complex128) \
\
CUDA_DEVICE_NOKERN(name, func, int64, uint8, int64, int64) \
CUDA_DEVICE_NOKERN(name, func, int64, uint16, int64, int64) \
CUDA_DEVICE_NOKERN(name, func, int64, uint32, int64, int64) \
CUDA_DEVICE_NOKERN(name, func, int64, int8, int64, int64) \
CUDA_DEVICE_NOKERN(name, func, int64, int16, int64, int64) \
CUDA_DEVICE_NOKERN(name, func, int64, int32, int64, int64) \
CUDA_DEVICE_NOKERN(name, func, int64, int64, int64, int64) \
\
CUDA_DEVICE_BINARY(name, func, bfloat16, uint8, bfloat16, bfloat16) \
CUDA_DEVICE_BINARY(name, func, bfloat16, uint16, float32, float32) \
CUDA_DEVICE_BINARY(name, func, bfloat16, uint32, float64, float64) \
CUDA_DEVICE_BINARY(name, func, bfloat16, int8, bfloat16, bfloat16) \
CUDA_DEVICE_BINARY(name, func, bfloat16, int16, float32, float32) \
CUDA_DEVICE_BINARY(name, func, bfloat16, int32, float64, float64) \
CUDA_DEVICE_BINARY(name, func, bfloat16, bfloat16, bfloat16, bfloat16) \
CUDA_DEVICE_BINARY(name, func, bfloat16, float16, float32, float32) \
CUDA_DEVICE_BINARY(name, func, bfloat16, float32, float32, float32) \
CUDA_DEVICE_BINARY(name, func, bfloat16, float64, float64, float64) \
CUDA_DEVICE_NOIMPL(name, func, bfloat16, complex32, complex64, complex64) \
CUDA_DEVICE_BINARY(name, func, bfloat16, complex64, complex64, complex64) \
CUDA_DEVICE_BINARY(name, func, bfloat16, complex128, complex128, complex128) \
\
CUDA_DEVICE_BINARY(name, hfunc, float16, uint8, float16, float16) \
CUDA_DEVICE_BINARY(name, func, float16, uint16, float32, float32) \
CUDA_DEVICE_BINARY(name, func, float16, uint32, float64, float64) \
CUDA_DEVICE_BINARY(name, hfunc, float16, int8, float16, float16) \
CUDA_DEVICE_BINARY(name, func, float16, int16, float32, float32) \
CUDA_DEVICE_BINARY(name, func, float16, int32, float64, float64) \
CUDA_DEVICE_BINARY(name, func, float16, bfloat16, float32, float32) \
CUDA_DEVICE_BINARY(name, hfunc, float16, float16, float16, float16) \
CUDA_DEVICE_BINARY(name, func, float16, float32, float32, float32) \
CUDA_DEVICE_BINARY(name, func, float16, float64, float64, float64) \
CUDA_DEVICE_NOIMPL(name, func, float16, complex32, complex32, complex32) \
CUDA_DEVICE_BINARY(name, func, float16, complex64, complex64, complex64) \
CUDA_DEVICE_BINARY(name, func, float16, complex128, complex128, complex128) \
\
CUDA_DEVICE_BINARY(name, func, float32, uint8, float32, float32) \
CUDA_DEVICE_BINARY(name, func, float32, uint16, float32, float32) \
CUDA_DEVICE_BINARY(name, func, float32, uint32, float64, float64) \
CUDA_DEVICE_BINARY(name, func, float32, int8, float32, float32) \
CUDA_DEVICE_BINARY(name, func, float32, int16, float32, float32) \
CUDA_DEVICE_BINARY(name, func, float32, int32, float64, float64) \
CUDA_DEVICE_BINARY(name, func, float32, bfloat16, float32, float32) \
CUDA_DEVICE_BINARY(name, func, float32, float16, float32, float32) \
CUDA_DEVICE_BINARY(name, func, float32, float32, float32, float32) \
CUDA_DEVICE_BINARY(name, func, float32, float64, float64, float64) \
CUDA_DEVICE_NOIMPL(name, func, float32, complex32, complex64, complex64) \
CUDA_DEVICE_BINARY(name, func, float32, complex64, complex64, complex64) \
CUDA_DEVICE_BINARY(name, func, float32, complex128, complex128, complex128) \
\
CUDA_DEVICE_BINARY(name, func, float64, uint8, float64, float64) \
CUDA_DEVICE_BINARY(name, func, float64, uint16, float64, float64) \
CUDA_DEVICE_BINARY(name, func, float64, uint32, float64, float64) \
CUDA_DEVICE_BINARY(name, func, float64, int8, float64, float64) \
CUDA_DEVICE_BINARY(name, func, float64, int16, float64, float64) \
CUDA_DEVICE_BINARY(name, func, float64, int32, float64, float64) \
CUDA_DEVICE_BINARY(name, func, float64, bfloat16, float64, float64) \
CUDA_DEVICE_BINARY(name, func, float64, float16, float64, float64) \
CUDA_DEVICE_BINARY(name, func, float64, float32, float64, float64) \
CUDA_DEVICE_BINARY(name, func, float64, float64, float64, float64) \
CUDA_DEVICE_NOIMPL(name, func, float64, complex32, complex128, complex128) \
CUDA_DEVICE_BINARY(name, func, float64, complex64, complex128, complex128) \
CUDA_DEVICE_BINARY(name, func, float64, complex128, complex128, complex128) \
\
CUDA_DEVICE_NOIMPL(name, func, complex32, uint8, complex32, complex32) \
CUDA_DEVICE_NOIMPL(name, func, complex32, uint16, complex64, complex64) \
CUDA_DEVICE_NOIMPL(name, func, complex32, uint32, complex128, complex128) \
CUDA_DEVICE_NOIMPL(name, func, complex32, int8, complex32, complex32) \
CUDA_DEVICE_NOIMPL(name, func, complex32, int16, complex64, complex64) \
CUDA_DEVICE_NOIMPL(name, func, complex32, int32, complex128, complex128) \
CUDA_DEVICE_NOIMPL(name, func, complex32, bfloat16, complex64, complex64) \
CUDA_DEVICE_NOIMPL(name, func, complex32, float16, complex32, complex32) \
CUDA_DEVICE_NOIMPL(name, func, complex32, float32, complex64, complex64) \
CUDA_DEVICE_NOIMPL(name, func, complex32, float64, complex128, complex128) \
CUDA_DEVICE_NOIMPL(name, func, complex32, complex32, complex32, complex32) \
CUDA_DEVICE_NOIMPL(name, func, complex32, complex64, complex64, complex64) \
CUDA_DEVICE_NOIMPL(name, func, complex32, complex128, complex128, complex128) \
\
CUDA_DEVICE_BINARY(name, func, complex64, uint8, complex64, complex64) \
CUDA_DEVICE_BINARY(name, func, complex64, uint16, complex64, complex64) \
CUDA_DEVICE_BINARY(name, func, complex64, uint32, complex128, complex128) \
CUDA_DEVICE_BINARY(name, func, complex64, int8, complex64, complex64) \
CUDA_DEVICE_BINARY(name, func, complex64, int16, complex64, complex64) \
CUDA_DEVICE_BINARY(name, func, complex64, int32, complex128, complex128) \
CUDA_DEVICE_BINARY(name, func, complex64, bfloat16, complex64, complex64) \
CUDA_DEVICE_BINARY(name, func, complex64, float16, complex64, complex64) \
CUDA_DEVICE_BINARY(name, func, complex64, float32, complex64, complex64) \
CUDA_DEVICE_BINARY(name, func, complex64, float64, complex128, complex128) \
CUDA_DEVICE_NOIMPL(name, func, complex64, complex32, complex64, complex64) \
CUDA_DEVICE_BINARY(name, func, complex64, complex64, complex64, complex64) \
CUDA_DEVICE_BINARY(name, func, complex64, complex128, complex128, complex128) \
\
CUDA_DEVICE_BINARY(name, func, complex128, uint8, complex128, complex128) \
CUDA_DEVICE_BINARY(name, func, complex128, uint16, complex128, complex128) \
CUDA_DEVICE_BINARY(name, func, complex128, uint32, complex128, complex128) \
CUDA_DEVICE_BINARY(name, func, complex128, int8, complex128, complex128) \
CUDA_DEVICE_BINARY(name, func, complex128, int16, complex128, complex128) \
CUDA_DEVICE_BINARY(name, func, complex128, int32, complex128, complex128) \
CUDA_DEVICE_BINARY(name, func, complex128, bfloat16, complex128, complex128) \
CUDA_DEVICE_BINARY(name, func, complex128, float16, complex128, complex128) \
CUDA_DEVICE_BINARY(name, func, complex128, float32, complex128, complex128) \
CUDA_DEVICE_BINARY(name, func, complex128, float64, complex128, complex128) \
CUDA_DEVICE_NOIMPL(name, func, complex128, complex32, complex128, complex128) \
CUDA_DEVICE_BINARY(name, func, complex128, complex64, complex128, complex128) \
CUDA_DEVICE_BINARY(name, func, complex128, complex128, complex128, complex128)
#define add(x, y) x + y
CUDA_DEVICE_ALL_BINARY(add, add, __hadd)
#define subtract(x, y) x - y
CUDA_DEVICE_ALL_BINARY(subtract, subtract, __hsub)
#define multiply(x, y) x * y
CUDA_DEVICE_ALL_BINARY(multiply, multiply, __hmul)
#define floor_divide(x, y) x * y
CUDA_DEVICE_ALL_BINARY_NO_COMPLEX(floor_divide, _floor_divide, _floor_divide)
#define remainder(x, y) x % y
CUDA_DEVICE_ALL_BINARY_NO_COMPLEX(remainder, _remainder, _remainder)
#define divide(x, y) x / y
CUDA_DEVICE_ALL_BINARY_FLOAT_RETURN(divide, divide, __hdiv)
/*****************************************************************************/
/* Comparison */
/*****************************************************************************/
#define CUDA_DEVICE_ALL_COMPARISON(name, func, hfunc, cfunc) \
CUDA_DEVICE_BINARY(name, func, uint8, uint8, bool, uint8) \
CUDA_DEVICE_BINARY(name, func, uint8, uint16, bool, uint16) \
CUDA_DEVICE_BINARY(name, func, uint8, uint32, bool, uint32) \
CUDA_DEVICE_BINARY(name, func, uint8, uint64, bool, uint64) \
CUDA_DEVICE_BINARY(name, func, uint8, int8, bool, int16) \
CUDA_DEVICE_BINARY(name, func, uint8, int16, bool, int16) \
CUDA_DEVICE_BINARY(name, func, uint8, int32, bool, int32) \
CUDA_DEVICE_BINARY(name, func, uint8, int64, bool, int64) \
CUDA_DEVICE_BINARY(name, func, uint8, bfloat16, bool, bfloat16) \
CUDA_DEVICE_BINARY(name, hfunc, uint8, float16, bool, float16) \
CUDA_DEVICE_BINARY(name, func, uint8, float32, bool, float32) \
CUDA_DEVICE_BINARY(name, func, uint8, float64, bool, float64) \
CUDA_DEVICE_NOIMPL(name, cfunc, uint8, complex32, bool, complex32) \
CUDA_DEVICE_BINARY(name, cfunc, uint8, complex64, bool, complex64) \
CUDA_DEVICE_BINARY(name, cfunc, uint8, complex128, bool, complex128) \
\
CUDA_DEVICE_BINARY(name, func, uint16, uint8, bool, uint16) \
CUDA_DEVICE_BINARY(name, func, uint16, uint16, bool, uint16) \
CUDA_DEVICE_BINARY(name, func, uint16, uint32, bool, uint32) \
CUDA_DEVICE_BINARY(name, func, uint16, uint64, bool, uint64) \
CUDA_DEVICE_BINARY(name, func, uint16, int8, bool, int32) \
CUDA_DEVICE_BINARY(name, func, uint16, int16, bool, int32) \
CUDA_DEVICE_BINARY(name, func, uint16, int32, bool, int32) \
CUDA_DEVICE_BINARY(name, func, uint16, int64, bool, int64) \
CUDA_DEVICE_BINARY(name, func, uint16, bfloat16, bool, float32) \
CUDA_DEVICE_BINARY(name, func, uint16, float16, bool, float32) \
CUDA_DEVICE_BINARY(name, func, uint16, float32, bool, float32) \
CUDA_DEVICE_BINARY(name, func, uint16, float64, bool, float64) \
CUDA_DEVICE_NOIMPL(name, cfunc, uint16, complex32, bool, complex64) \
CUDA_DEVICE_BINARY(name, cfunc, uint16, complex64, bool, complex64) \
CUDA_DEVICE_BINARY(name, cfunc, uint16, complex128, bool, complex128) \
\
CUDA_DEVICE_BINARY(name, func, uint32, uint8, bool, uint32) \
CUDA_DEVICE_BINARY(name, func, uint32, uint16, bool, uint32) \
CUDA_DEVICE_BINARY(name, func, uint32, uint32, bool, uint32) \
CUDA_DEVICE_BINARY(name, func, uint32, uint64, bool, uint64) \
CUDA_DEVICE_BINARY(name, func, uint32, int8, bool, int64) \
CUDA_DEVICE_BINARY(name, func, uint32, int16, bool, int64) \
CUDA_DEVICE_BINARY(name, func, uint32, int32, bool, int64) \
CUDA_DEVICE_BINARY(name, func, uint32, int64, bool, int64) \
CUDA_DEVICE_BINARY(name, func, uint32, bfloat16, bool, float64) \
CUDA_DEVICE_BINARY(name, func, uint32, float16, bool, float64) \
CUDA_DEVICE_BINARY(name, func, uint32, float32, bool, float64) \
CUDA_DEVICE_BINARY(name, func, uint32, float64, bool, float64) \
CUDA_DEVICE_NOIMPL(name, cfunc, uint32, complex32, bool, complex128) \
CUDA_DEVICE_BINARY(name, cfunc, uint32, complex64, bool, complex128) \
CUDA_DEVICE_BINARY(name, cfunc, uint32, complex128, bool, complex128) \
\
CUDA_DEVICE_BINARY(name, func, uint64, uint8, bool, uint64) \
CUDA_DEVICE_BINARY(name, func, uint64, uint16, bool, uint64) \
CUDA_DEVICE_BINARY(name, func, uint64, uint32, bool, uint64) \
CUDA_DEVICE_BINARY(name, func, uint64, uint64, bool, uint64) \
\
CUDA_DEVICE_BINARY(name, func, int8, uint8, bool, int16) \
CUDA_DEVICE_BINARY(name, func, int8, uint16, bool, int32) \
CUDA_DEVICE_BINARY(name, func, int8, uint32, bool, int64) \
CUDA_DEVICE_BINARY(name, func, int8, int8, bool, int8) \
CUDA_DEVICE_BINARY(name, func, int8, int16, bool, int16) \
CUDA_DEVICE_BINARY(name, func, int8, int32, bool, int32) \
CUDA_DEVICE_BINARY(name, func, int8, int64, bool, int64) \
CUDA_DEVICE_BINARY(name, func, int8, bfloat16, bool, bfloat16) \
CUDA_DEVICE_BINARY(name, hfunc, int8, float16, bool, float16) \
CUDA_DEVICE_BINARY(name, func, int8, float32, bool, float32) \
CUDA_DEVICE_BINARY(name, func, int8, float64, bool, float64) \
CUDA_DEVICE_NOIMPL(name, cfunc, int8, complex32, bool, complex32) \
CUDA_DEVICE_BINARY(name, cfunc, int8, complex64, bool, complex64) \
CUDA_DEVICE_BINARY(name, cfunc, int8, complex128, bool, complex128) \
\
CUDA_DEVICE_BINARY(name, func, int16, uint8, bool, int16) \
CUDA_DEVICE_BINARY(name, func, int16, uint16, bool, int32) \
CUDA_DEVICE_BINARY(name, func, int16, uint32, bool, int64) \
CUDA_DEVICE_BINARY(name, func, int16, int8, bool, int16) \
CUDA_DEVICE_BINARY(name, func, int16, int16, bool, int16) \
CUDA_DEVICE_BINARY(name, func, int16, int32, bool, int32) \
CUDA_DEVICE_BINARY(name, func, int16, int64, bool, int64) \
CUDA_DEVICE_BINARY(name, func, int16, bfloat16, bool, float32) \
CUDA_DEVICE_BINARY(name, func, int16, float16, bool, float32) \
CUDA_DEVICE_BINARY(name, func, int16, float32, bool, float32) \
CUDA_DEVICE_BINARY(name, func, int16, float64, bool, float64) \
CUDA_DEVICE_NOIMPL(name, cfunc, int16, complex32, bool, complex64) \
CUDA_DEVICE_BINARY(name, cfunc, int16, complex64, bool, complex64) \
CUDA_DEVICE_BINARY(name, cfunc, int16, complex128, bool, complex128) \
\
CUDA_DEVICE_BINARY(name, func, int32, uint8, bool, int32) \
CUDA_DEVICE_BINARY(name, func, int32, uint16, bool, int32) \
CUDA_DEVICE_BINARY(name, func, int32, uint32, bool, int64) \
CUDA_DEVICE_BINARY(name, func, int32, int8, bool, int32) \
CUDA_DEVICE_BINARY(name, func, int32, int16, bool, int32) \
CUDA_DEVICE_BINARY(name, func, int32, int32, bool, int32) \
CUDA_DEVICE_BINARY(name, func, int32, int64, bool, int64) \
CUDA_DEVICE_BINARY(name, func, int32, bfloat16, bool, float64) \
CUDA_DEVICE_BINARY(name, func, int32, float16, bool, float64) \
CUDA_DEVICE_BINARY(name, func, int32, float32, bool, float64) \
CUDA_DEVICE_BINARY(name, func, int32, float64, bool, float64) \
CUDA_DEVICE_NOIMPL(name, cfunc, int32, complex32, bool, complex128) \
CUDA_DEVICE_BINARY(name, cfunc, int32, complex64, bool, complex128) \
CUDA_DEVICE_BINARY(name, cfunc, int32, complex128, bool, complex128) \
\
CUDA_DEVICE_BINARY(name, func, int64, uint8, bool, int64) \
CUDA_DEVICE_BINARY(name, func, int64, uint16, bool, int64) \
CUDA_DEVICE_BINARY(name, func, int64, uint32, bool, int64) \
CUDA_DEVICE_BINARY(name, func, int64, int8, bool, int64) \
CUDA_DEVICE_BINARY(name, func, int64, int16, bool, int64) \
CUDA_DEVICE_BINARY(name, func, int64, int32, bool, int64) \
CUDA_DEVICE_BINARY(name, func, int64, int64, bool, int64) \
\
CUDA_DEVICE_BINARY(name, func, bfloat16, uint8, bool, bfloat16) \
CUDA_DEVICE_BINARY(name, func, bfloat16, uint16, bool, float32) \
CUDA_DEVICE_BINARY(name, func, bfloat16, uint32, bool, float64) \
CUDA_DEVICE_BINARY(name, func, bfloat16, int8, bool, bfloat16) \
CUDA_DEVICE_BINARY(name, func, bfloat16, int16, bool, float32) \
CUDA_DEVICE_BINARY(name, func, bfloat16, int32, bool, float64) \
CUDA_DEVICE_BINARY(name, func, bfloat16, bfloat16, bool, bfloat16) \
CUDA_DEVICE_BINARY(name, func, bfloat16, float16, bool, float32) \
CUDA_DEVICE_BINARY(name, func, bfloat16, float32, bool, float32) \
CUDA_DEVICE_BINARY(name, func, bfloat16, float64, bool, float64) \
CUDA_DEVICE_NOIMPL(name, cfunc, bfloat16, complex32, bool, complex64) \
CUDA_DEVICE_BINARY(name, cfunc, bfloat16, complex64, bool, complex64) \
CUDA_DEVICE_BINARY(name, cfunc, bfloat16, complex128, bool, complex128) \
\
CUDA_DEVICE_BINARY(name, hfunc, float16, uint8, bool, float16) \
CUDA_DEVICE_BINARY(name, func, float16, uint16, bool, float32) \
CUDA_DEVICE_BINARY(name, func, float16, uint32, bool, float64) \
CUDA_DEVICE_BINARY(name, hfunc, float16, int8, bool, float16) \
CUDA_DEVICE_BINARY(name, func, float16, int16, bool, float32) \
CUDA_DEVICE_BINARY(name, func, float16, int32, bool, float64) \
CUDA_DEVICE_BINARY(name, func, float16, bfloat16, bool, float32) \
CUDA_DEVICE_BINARY(name, hfunc, float16, float16, bool, float16) \
CUDA_DEVICE_BINARY(name, func, float16, float32, bool, float32) \
CUDA_DEVICE_BINARY(name, func, float16, float64, bool, float64) \
CUDA_DEVICE_NOIMPL(name, cfunc, float16, complex32, bool, complex32) \
CUDA_DEVICE_BINARY(name, cfunc, float16, complex64, bool, complex64) \
CUDA_DEVICE_BINARY(name, cfunc, float16, complex128, bool, complex128) \
\
CUDA_DEVICE_BINARY(name, func, float32, uint8, bool, float32) \
CUDA_DEVICE_BINARY(name, func, float32, uint16, bool, float32) \
CUDA_DEVICE_BINARY(name, func, float32, uint32, bool, float64) \
CUDA_DEVICE_BINARY(name, func, float32, int8, bool, float32) \
CUDA_DEVICE_BINARY(name, func, float32, int16, bool, float32) \
CUDA_DEVICE_BINARY(name, func, float32, int32, bool, float64) \
CUDA_DEVICE_BINARY(name, func, float32, bfloat16, bool, float32) \
CUDA_DEVICE_BINARY(name, func, float32, float16, bool, float32) \
CUDA_DEVICE_BINARY(name, func, float32, float32, bool, float32) \
CUDA_DEVICE_BINARY(name, func, float32, float64, bool, float64) \
CUDA_DEVICE_NOIMPL(name, cfunc, float32, complex32, bool, complex64) \
CUDA_DEVICE_BINARY(name, cfunc, float32, complex64, bool, complex64) \
CUDA_DEVICE_BINARY(name, cfunc, float32, complex128, bool, complex128) \
\
CUDA_DEVICE_BINARY(name, func, float64, uint8, bool, float64) \
CUDA_DEVICE_BINARY(name, func, float64, uint16, bool, float64) \
CUDA_DEVICE_BINARY(name, func, float64, uint32, bool, float64) \
CUDA_DEVICE_BINARY(name, func, float64, int8, bool, float64) \
CUDA_DEVICE_BINARY(name, func, float64, int16, bool, float64) \
CUDA_DEVICE_BINARY(name, func, float64, int32, bool, float64) \
CUDA_DEVICE_BINARY(name, func, float64, bfloat16, bool, float64) \
CUDA_DEVICE_BINARY(name, func, float64, float16, bool, float64) \
CUDA_DEVICE_BINARY(name, func, float64, float32, bool, float64) \
CUDA_DEVICE_BINARY(name, func, float64, float64, bool, float64) \
CUDA_DEVICE_NOIMPL(name, cfunc, float64, complex32, bool, complex128) \
CUDA_DEVICE_BINARY(name, cfunc, float64, complex64, bool, complex128) \
CUDA_DEVICE_BINARY(name, cfunc, float64, complex128, bool, complex128) \
\
CUDA_DEVICE_NOIMPL(name, cfunc, complex32, uint8, bool, complex32) \
CUDA_DEVICE_NOIMPL(name, cfunc, complex32, uint16, bool, complex64) \
CUDA_DEVICE_NOIMPL(name, cfunc, complex32, uint32, bool, complex128) \
CUDA_DEVICE_NOIMPL(name, cfunc, complex32, int8, bool, complex32) \
CUDA_DEVICE_NOIMPL(name, cfunc, complex32, int16, bool, complex64) \
CUDA_DEVICE_NOIMPL(name, cfunc, complex32, int32, bool, complex128) \
CUDA_DEVICE_NOIMPL(name, cfunc, complex32, bfloat16, bool, complex64) \
CUDA_DEVICE_NOIMPL(name, cfunc, complex32, float16, bool, complex32) \
CUDA_DEVICE_NOIMPL(name, cfunc, complex32, float32, bool, complex64) \
CUDA_DEVICE_NOIMPL(name, cfunc, complex32, float64, bool, complex128) \
CUDA_DEVICE_NOIMPL(name, cfunc, complex32, complex32, bool, complex32) \
CUDA_DEVICE_NOIMPL(name, cfunc, complex32, complex64, bool, complex64) \
CUDA_DEVICE_NOIMPL(name, cfunc, complex32, complex128, bool, complex128) \
\
CUDA_DEVICE_BINARY(name, cfunc, complex64, uint8, bool, complex64) \
CUDA_DEVICE_BINARY(name, cfunc, complex64, uint16, bool, complex64) \
CUDA_DEVICE_BINARY(name, cfunc, complex64, uint32, bool, complex128) \
CUDA_DEVICE_BINARY(name, cfunc, complex64, int8, bool, complex64) \
CUDA_DEVICE_BINARY(name, cfunc, complex64, int16, bool, complex64) \
CUDA_DEVICE_BINARY(name, cfunc, complex64, int32, bool, complex128) \
CUDA_DEVICE_BINARY(name, cfunc, complex64, bfloat16, bool, complex64) \
CUDA_DEVICE_BINARY(name, cfunc, complex64, float16, bool, complex64) \
CUDA_DEVICE_BINARY(name, cfunc, complex64, float32, bool, complex64) \
CUDA_DEVICE_BINARY(name, cfunc, complex64, float64, bool, complex128) \
CUDA_DEVICE_NOIMPL(name, cfunc, complex64, complex32, bool, complex64) \
CUDA_DEVICE_BINARY(name, cfunc, complex64, complex64, bool, complex64) \
CUDA_DEVICE_BINARY(name, cfunc, complex64, complex128, bool, complex128) \
\
CUDA_DEVICE_BINARY(name, cfunc, complex128, uint8, bool, complex128) \
CUDA_DEVICE_BINARY(name, cfunc, complex128, uint16, bool, complex128) \
CUDA_DEVICE_BINARY(name, cfunc, complex128, uint32, bool, complex128) \
CUDA_DEVICE_BINARY(name, cfunc, complex128, int8, bool, complex128) \
CUDA_DEVICE_BINARY(name, cfunc, complex128, int16, bool, complex128) \
CUDA_DEVICE_BINARY(name, cfunc, complex128, int32, bool, complex128) \
CUDA_DEVICE_BINARY(name, cfunc, complex128, bfloat16, bool, complex128) \
CUDA_DEVICE_BINARY(name, cfunc, complex128, float16, bool, complex128) \
CUDA_DEVICE_BINARY(name, cfunc, complex128, float32, bool, complex128) \
CUDA_DEVICE_BINARY(name, cfunc, complex128, float64, bool, complex128) \
CUDA_DEVICE_NOIMPL(name, cfunc, complex128, complex32, bool, complex128) \
CUDA_DEVICE_BINARY(name, cfunc, complex128, complex64, bool, complex128) \
CUDA_DEVICE_BINARY(name, cfunc, complex128, complex128, bool, complex128)
#define less(x, y) x < y
CUDA_DEVICE_ALL_COMPARISON(less, less, __hlt, lexorder_lt)
#define less_equal(x, y) x <= y
CUDA_DEVICE_ALL_COMPARISON(less_equal, less_equal, __hle, lexorder_le)
#define greater_equal(x, y) x >= y
CUDA_DEVICE_ALL_COMPARISON(greater_equal, greater_equal, __hge, lexorder_ge)
#define greater(x, y) x > y
CUDA_DEVICE_ALL_COMPARISON(greater, greater, __hgt, lexorder_gt)
#define equal(x, y) x == y
CUDA_DEVICE_ALL_COMPARISON(equal, equal, __heq, equal)
#define not_equal(x, y) x != y
CUDA_DEVICE_ALL_COMPARISON(not_equal, not_equal, half_ne, not_equal)
#define equaln(x, y) (x == y || (x != x && y != y))
CUDA_DEVICE_ALL_COMPARISON(equaln, equaln, half_eqn, lexorder_eqn)
/*****************************************************************************/
/* Bitwise */
/*****************************************************************************/
#define CUDA_DEVICE_ALL_BITWISE(name, func) \
CUDA_DEVICE_BINARY(name, func, bool, bool, bool, bool) \
CUDA_DEVICE_BINARY(name, func, bool, uint8, uint8, uint8) \
CUDA_DEVICE_BINARY(name, func, bool, uint16, uint16, uint16) \
CUDA_DEVICE_BINARY(name, func, bool, uint32, uint32, uint32) \
CUDA_DEVICE_BINARY(name, func, bool, uint64, uint64, uint64) \
CUDA_DEVICE_BINARY(name, func, bool, int8, int8, int8) \
CUDA_DEVICE_BINARY(name, func, bool, int16, int16, int16) \
CUDA_DEVICE_BINARY(name, func, bool, int32, int32, int32) \
CUDA_DEVICE_BINARY(name, func, bool, int64, int64, int64) \
\
CUDA_DEVICE_BINARY(name, func, uint8, bool, uint8, uint8) \
CUDA_DEVICE_BINARY(name, func, uint8, uint8, uint8, uint8) \
CUDA_DEVICE_BINARY(name, func, uint8, uint16, uint16, uint16) \
CUDA_DEVICE_BINARY(name, func, uint8, uint32, uint32, uint32) \
CUDA_DEVICE_BINARY(name, func, uint8, uint64, uint64, uint64) \
CUDA_DEVICE_BINARY(name, func, uint8, int8, int16, int16) \
CUDA_DEVICE_BINARY(name, func, uint8, int16, int16, int16) \
CUDA_DEVICE_BINARY(name, func, uint8, int32, int32, int32) \
CUDA_DEVICE_BINARY(name, func, uint8, int64, int64, int64) \
\
CUDA_DEVICE_BINARY(name, func, uint16, bool, uint16, uint16) \
CUDA_DEVICE_BINARY(name, func, uint16, uint8, uint16, uint16) \
CUDA_DEVICE_BINARY(name, func, uint16, uint16, uint16, uint16) \
CUDA_DEVICE_BINARY(name, func, uint16, uint32, uint32, uint32) \
CUDA_DEVICE_BINARY(name, func, uint16, uint64, uint64, uint64) \
CUDA_DEVICE_BINARY(name, func, uint16, int8, int32, int32) \
CUDA_DEVICE_BINARY(name, func, uint16, int16, int32, int32) \
CUDA_DEVICE_BINARY(name, func, uint16, int32, int32, int32) \
CUDA_DEVICE_BINARY(name, func, uint16, int64, int64, int64) \
\
CUDA_DEVICE_BINARY(name, func, uint32, bool, uint32, uint32) \
CUDA_DEVICE_BINARY(name, func, uint32, uint8, uint32, uint32) \
CUDA_DEVICE_BINARY(name, func, uint32, uint16, uint32, uint32) \
CUDA_DEVICE_BINARY(name, func, uint32, uint32, uint32, uint32) \
CUDA_DEVICE_BINARY(name, func, uint32, uint64, uint64, uint64) \
CUDA_DEVICE_BINARY(name, func, uint32, int8, int64, int64) \
CUDA_DEVICE_BINARY(name, func, uint32, int16, int64, int64) \
CUDA_DEVICE_BINARY(name, func, uint32, int32, int64, int64) \
CUDA_DEVICE_BINARY(name, func, uint32, int64, int64, int64) \
\
CUDA_DEVICE_BINARY(name, func, uint64, bool, uint64, uint64) \
CUDA_DEVICE_BINARY(name, func, uint64, uint8, uint64, uint64) \
CUDA_DEVICE_BINARY(name, func, uint64, uint16, uint64, uint64) \
CUDA_DEVICE_BINARY(name, func, uint64, uint32, uint64, uint64) \
CUDA_DEVICE_BINARY(name, func, uint64, uint64, uint64, uint64) \
\
CUDA_DEVICE_BINARY(name, func, int8, bool, int8, int8) \
CUDA_DEVICE_BINARY(name, func, int8, uint8, int16, int16) \
CUDA_DEVICE_BINARY(name, func, int8, uint16, int32, int32) \
CUDA_DEVICE_BINARY(name, func, int8, uint32, int64, int64) \
CUDA_DEVICE_BINARY(name, func, int8, int8, int8, int8) \
CUDA_DEVICE_BINARY(name, func, int8, int16, int16, int16) \
CUDA_DEVICE_BINARY(name, func, int8, int32, int32, int32) \
CUDA_DEVICE_BINARY(name, func, int8, int64, int64, int64) \
\
CUDA_DEVICE_BINARY(name, func, int16, bool, int16, int16) \
CUDA_DEVICE_BINARY(name, func, int16, uint8, int16, int16) \
CUDA_DEVICE_BINARY(name, func, int16, uint16, int32, int32) \
CUDA_DEVICE_BINARY(name, func, int16, uint32, int64, int64) \
CUDA_DEVICE_BINARY(name, func, int16, int8, int16, int16) \
CUDA_DEVICE_BINARY(name, func, int16, int16, int16, int16) \
CUDA_DEVICE_BINARY(name, func, int16, int32, int32, int32) \
CUDA_DEVICE_BINARY(name, func, int16, int64, int64, int64) \
\
CUDA_DEVICE_BINARY(name, func, int32, bool, int32, int32) \
CUDA_DEVICE_BINARY(name, func, int32, uint8, int32, int32) \
CUDA_DEVICE_BINARY(name, func, int32, uint16, int32, int32) \
CUDA_DEVICE_BINARY(name, func, int32, uint32, int64, int64) \
CUDA_DEVICE_BINARY(name, func, int32, int8, int32, int32) \
CUDA_DEVICE_BINARY(name, func, int32, int16, int32, int32) \
CUDA_DEVICE_BINARY(name, func, int32, int32, int32, int32) \
CUDA_DEVICE_BINARY(name, func, int32, int64, int64, int64) \
\
CUDA_DEVICE_BINARY(name, func, int64, bool, int64, int64) \
CUDA_DEVICE_BINARY(name, func, int64, uint8, int64, int64) \
CUDA_DEVICE_BINARY(name, func, int64, uint16, int64, int64) \
CUDA_DEVICE_BINARY(name, func, int64, uint32, int64, int64) \
CUDA_DEVICE_BINARY(name, func, int64, int8, int64, int64) \
CUDA_DEVICE_BINARY(name, func, int64, int16, int64, int64) \
CUDA_DEVICE_BINARY(name, func, int64, int32, int64, int64) \
CUDA_DEVICE_BINARY(name, func, int64, int64, int64, int64)
#define bitwise_and(x, y) x & y
CUDA_DEVICE_ALL_BITWISE(bitwise_and, bitwise_and)
#define bitwise_or(x, y) x | y
CUDA_DEVICE_ALL_BITWISE(bitwise_or, bitwise_or)
#define bitwise_xor(x, y) x ^ y
CUDA_DEVICE_ALL_BITWISE(bitwise_xor, bitwise_xor)
/*****************************************************************************/
/* Two return values */
/*****************************************************************************/
#define CUDA_DEVICE_BINARY_MV(name, func, t0, t1, t2, t3) \
static __global__ void \
_1D_C_##name##_##t0##_##t1##_##t2##_##t3( \
const t0##_t *x0, const t1##_t *x1, t2##_t *x2, t2##_t *x3, \
int64_t N) \
{ \
int64_t index = threadIdx.x + blockIdx.x * blockDim.x; \
int64_t stride = blockDim.x * gridDim.x; \
\
for (int64_t i = index; i < N; i += stride) { \
func(&x2[i], &x3[i], x0[i], x1[i]); \
} \
} \
\
extern "C" void \
gm_cuda_device_fixed_1D_C_##name##_##t0##_##t1##_##t2##_##t3( \
const char *a0, const char *a1, char *a2, char *a3, \
int64_t N) \
{ \
const t0##_t *x0 = (const t0##_t *)a0; \
const t1##_t *x1 = (const t1##_t *)a1; \
t2##_t *x2 = (t2##_t *)a2; \
t3##_t *x3 = (t3##_t *)a3; \
int blockSize = 256; \
int64_t numBlocks = (N + blockSize - 1) / blockSize; \
\
hipLaunchKernelGGL(( _1D_C_##name##_##t0##_##t1##_##t2##_##t3), dim3(numBlocks), dim3(blockSize), 0, 0, \
x0, x1, x2, x3, N); \
}
#define CUDA_DEVICE_ALL_BINARY_MV(name, func) \
CUDA_DEVICE_BINARY_MV(name, func, uint8, uint8, uint8, uint8) \
CUDA_DEVICE_BINARY_MV(name, func, uint16, uint16, uint16, uint16) \
CUDA_DEVICE_BINARY_MV(name, func, uint32, uint32, uint32, uint32) \
CUDA_DEVICE_BINARY_MV(name, func, uint64, uint64, uint64, uint64) \
CUDA_DEVICE_BINARY_MV(name, func, int8, int8, int8, int8) \
CUDA_DEVICE_BINARY_MV(name, func, int16, int16, int16, int16) \
CUDA_DEVICE_BINARY_MV(name, func, int32, int32, int32, int32) \
CUDA_DEVICE_BINARY_MV(name, func, int64, int64, int64, int64) \
CUDA_DEVICE_BINARY_MV(name, func, bfloat16, bfloat16, bfloat16, bfloat16) \
CUDA_DEVICE_BINARY_MV(name, func, float32, float32, float32, float32) \
CUDA_DEVICE_BINARY_MV(name, func, float64, float64, float64, float64)
CUDA_DEVICE_ALL_BINARY_MV(divmod, _divmod)
| ce6f13e0e66b9ba6ccac3c17513355542aab22c4.cu | /*
* BSD 3-Clause License
*
* Copyright (c) 2017-2018, plures
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <cinttypes>
#include <thrust/complex.h>
#include "contrib/bfloat16.h"
#include "cuda_device_binary.h"
#include "device.hh"
/*****************************************************************************/
/* CUDA device binary kernels */
/*****************************************************************************/
#define CUDA_DEVICE_BINARY(name, func, t0, t1, t2, common) \
static __global__ void \
_1D_C_##name##_##t0##_##t1##_##t2( \
const t0##_t *x0, const t1##_t *x1, t2##_t *x2, \
const int64_t N) \
{ \
int64_t index = threadIdx.x + blockIdx.x * blockDim.x; \
int64_t stride = blockDim.x * gridDim.x; \
\
for (int64_t i = index; i < N; i += stride) { \
x2[i] = func((common##_t)x0[i], (common##_t)x1[i]); \
} \
} \
\
extern "C" void \
gm_cuda_device_fixed_1D_C_##name##_##t0##_##t1##_##t2( \
const char *a0, const char *a1, char *a2, \
const int64_t N) \
{ \
const t0##_t *x0 = (const t0##_t *)a0; \
const t1##_t *x1 = (const t1##_t *)a1; \
t2##_t *x2 = (t2##_t *)a2; \
int blockSize = 256; \
int64_t numBlocks = (N + blockSize - 1) / blockSize; \
\
_1D_C_##name##_##t0##_##t1##_##t2<<<numBlocks, blockSize>>>(x0, x1, x2, N); \
} \
\
static __global__ void \
_1D_S_##name##_##t0##_##t1##_##t2( \
const t0##_t *x0, const t1##_t *x1, t2##_t *x2, \
const int64_t s0, const int64_t s1, const int64_t s2, \
const int64_t N) \
{ \
int64_t index = threadIdx.x + blockIdx.x * blockDim.x; \
int64_t stride = blockDim.x * gridDim.x; \
\
for (int64_t i = index; i < N; i += stride) { \
const int64_t i0 = i * s0; \
const int64_t i1 = i * s1; \
const int64_t i2 = i * s2; \
x2[i2] = func((common##_t)x0[i0], (common##_t)x1[i1]); \
} \
} \
\
extern "C" void \
gm_cuda_device_fixed_1D_S_##name##_##t0##_##t1##_##t2( \
const char *a0, const char *a1, char *a2, \
const int64_t s0, const int64_t s1, const int64_t s2, \
const int64_t N) \
{ \
const t0##_t *x0 = (const t0##_t *)a0; \
const t1##_t *x1 = (const t1##_t *)a1; \
t2##_t *x2 = (t2##_t *)a2; \
int blockSize = 256; \
int64_t numBlocks = (N + blockSize - 1) / blockSize; \
\
_1D_S_##name##_##t0##_##t1##_##t2<<<numBlocks, blockSize>>>(x0, x1, x2, \
s0, s1, s2, N); \
} \
\
static __global__ void \
_0D_##name##_##t0##_##t1##_##t2(const t0##_t *x0, const t1##_t *x1, t2##_t *x2) \
{ \
*x2 = func((common##_t)*x0, (common##_t)*x1); \
} \
\
extern "C" void \
gm_cuda_device_0D_##name##_##t0##_##t1##_##t2( \
const char *a0, const char *a1, char *a2) \
{ \
const t0##_t *x0 = (const t0##_t *)a0; \
const t1##_t *x1 = (const t1##_t *)a1; \
t2##_t *x2 = (t2##_t *)a2; \
\
_0D_##name##_##t0##_##t1##_##t2<<<1, 1>>>(x0, x1, x2); \
}
#define CUDA_DEVICE_NOIMPL(name, func, t0, t1, t2, common)
#define CUDA_DEVICE_NOKERN(name, func, t0, t1, t2, common)
/*****************************************************************************/
/* Arithmetic */
/*****************************************************************************/
#define CUDA_DEVICE_ALL_BINARY(name, func, hfunc) \
CUDA_DEVICE_BINARY(name, func, uint8, uint8, uint8, uint8) \
CUDA_DEVICE_BINARY(name, func, uint8, uint16, uint16, uint16) \
CUDA_DEVICE_BINARY(name, func, uint8, uint32, uint32, uint32) \
CUDA_DEVICE_BINARY(name, func, uint8, uint64, uint64, uint64) \
CUDA_DEVICE_BINARY(name, func, uint8, int8, int16, int16) \
CUDA_DEVICE_BINARY(name, func, uint8, int16, int16, int16) \
CUDA_DEVICE_BINARY(name, func, uint8, int32, int32, int32) \
CUDA_DEVICE_BINARY(name, func, uint8, int64, int64, int64) \
CUDA_DEVICE_BINARY(name, func, uint8, bfloat16, bfloat16, bfloat16) \
CUDA_DEVICE_BINARY(name, hfunc, uint8, float16, float16, float16) \
CUDA_DEVICE_BINARY(name, func, uint8, float32, float32, float32) \
CUDA_DEVICE_BINARY(name, func, uint8, float64, float64, float64) \
CUDA_DEVICE_NOIMPL(name, func, uint8, complex32, complex32, complex32) \
CUDA_DEVICE_BINARY(name, func, uint8, complex64, complex64, complex64) \
CUDA_DEVICE_BINARY(name, func, uint8, complex128, complex128, complex128) \
\
CUDA_DEVICE_BINARY(name, func, uint16, uint8, uint16, uint16) \
CUDA_DEVICE_BINARY(name, func, uint16, uint16, uint16, uint16) \
CUDA_DEVICE_BINARY(name, func, uint16, uint32, uint32, uint32) \
CUDA_DEVICE_BINARY(name, func, uint16, uint64, uint64, uint64) \
CUDA_DEVICE_BINARY(name, func, uint16, int8, int32, int32) \
CUDA_DEVICE_BINARY(name, func, uint16, int16, int32, int32) \
CUDA_DEVICE_BINARY(name, func, uint16, int32, int32, int32) \
CUDA_DEVICE_BINARY(name, func, uint16, int64, int64, int64) \
CUDA_DEVICE_BINARY(name, func, uint16, bfloat16, float32, float32) \
CUDA_DEVICE_BINARY(name, func, uint16, float16, float32, float32) \
CUDA_DEVICE_BINARY(name, func, uint16, float32, float32, float32) \
CUDA_DEVICE_BINARY(name, func, uint16, float64, float64, float64) \
CUDA_DEVICE_NOIMPL(name, func, uint16, complex32, complex64, complex64) \
CUDA_DEVICE_BINARY(name, func, uint16, complex64, complex64, complex64) \
CUDA_DEVICE_BINARY(name, func, uint16, complex128, complex128, complex128) \
\
CUDA_DEVICE_BINARY(name, func, uint32, uint8, uint32, uint32) \
CUDA_DEVICE_BINARY(name, func, uint32, uint16, uint32, uint32) \
CUDA_DEVICE_BINARY(name, func, uint32, uint32, uint32, uint32) \
CUDA_DEVICE_BINARY(name, func, uint32, uint64, uint64, uint64) \
CUDA_DEVICE_BINARY(name, func, uint32, int8, int64, int64) \
CUDA_DEVICE_BINARY(name, func, uint32, int16, int64, int64) \
CUDA_DEVICE_BINARY(name, func, uint32, int32, int64, int64) \
CUDA_DEVICE_BINARY(name, func, uint32, int64, int64, int64) \
CUDA_DEVICE_BINARY(name, func, uint32, bfloat16, float64, float64) \
CUDA_DEVICE_BINARY(name, func, uint32, float16, float64, float64) \
CUDA_DEVICE_BINARY(name, func, uint32, float32, float64, float64) \
CUDA_DEVICE_BINARY(name, func, uint32, float64, float64, float64) \
CUDA_DEVICE_NOIMPL(name, func, uint32, complex32, complex128, complex128) \
CUDA_DEVICE_BINARY(name, func, uint32, complex64, complex128, complex128) \
CUDA_DEVICE_BINARY(name, func, uint32, complex128, complex128, complex128) \
\
CUDA_DEVICE_BINARY(name, func, uint64, uint8, uint64, uint64) \
CUDA_DEVICE_BINARY(name, func, uint64, uint16, uint64, uint64) \
CUDA_DEVICE_BINARY(name, func, uint64, uint32, uint64, uint64) \
CUDA_DEVICE_BINARY(name, func, uint64, uint64, uint64, uint64) \
\
CUDA_DEVICE_BINARY(name, func, int8, uint8, int16, int16) \
CUDA_DEVICE_BINARY(name, func, int8, uint16, int32, int32) \
CUDA_DEVICE_BINARY(name, func, int8, uint32, int64, int64) \
CUDA_DEVICE_BINARY(name, func, int8, int8, int8, int8) \
CUDA_DEVICE_BINARY(name, func, int8, int16, int16, int16) \
CUDA_DEVICE_BINARY(name, func, int8, int32, int32, int32) \
CUDA_DEVICE_BINARY(name, func, int8, int64, int64, int64) \
CUDA_DEVICE_BINARY(name, func, int8, bfloat16, bfloat16, bfloat16) \
CUDA_DEVICE_BINARY(name, hfunc, int8, float16, float16, float16) \
CUDA_DEVICE_BINARY(name, func, int8, float32, float32, float32) \
CUDA_DEVICE_BINARY(name, func, int8, float64, float64, float64) \
CUDA_DEVICE_NOIMPL(name, func, int8, complex32, complex32, complex32) \
CUDA_DEVICE_BINARY(name, func, int8, complex64, complex64, complex64) \
CUDA_DEVICE_BINARY(name, func, int8, complex128, complex128, complex128) \
\
CUDA_DEVICE_BINARY(name, func, int16, uint8, int16, int16) \
CUDA_DEVICE_BINARY(name, func, int16, uint16, int32, int32) \
CUDA_DEVICE_BINARY(name, func, int16, uint32, int64, int64) \
CUDA_DEVICE_BINARY(name, func, int16, int8, int16, int16) \
CUDA_DEVICE_BINARY(name, func, int16, int16, int16, int16) \
CUDA_DEVICE_BINARY(name, func, int16, int32, int32, int32) \
CUDA_DEVICE_BINARY(name, func, int16, int64, int64, int64) \
CUDA_DEVICE_BINARY(name, func, int16, bfloat16, float32, float32) \
CUDA_DEVICE_BINARY(name, func, int16, float16, float32, float32) \
CUDA_DEVICE_BINARY(name, func, int16, float32, float32, float32) \
CUDA_DEVICE_BINARY(name, func, int16, float64, float64, float64) \
CUDA_DEVICE_NOIMPL(name, func, int16, complex32, complex64, complex64) \
CUDA_DEVICE_BINARY(name, func, int16, complex64, complex64, complex64) \
CUDA_DEVICE_BINARY(name, func, int16, complex128, complex128, complex128) \
\
CUDA_DEVICE_BINARY(name, func, int32, uint8, int32, int32) \
CUDA_DEVICE_BINARY(name, func, int32, uint16, int32, int32) \
CUDA_DEVICE_BINARY(name, func, int32, uint32, int64, int64) \
CUDA_DEVICE_BINARY(name, func, int32, int8, int32, int32) \
CUDA_DEVICE_BINARY(name, func, int32, int16, int32, int32) \
CUDA_DEVICE_BINARY(name, func, int32, int32, int32, int32) \
CUDA_DEVICE_BINARY(name, func, int32, int64, int64, int64) \
CUDA_DEVICE_BINARY(name, func, int32, bfloat16, float64, float64) \
CUDA_DEVICE_BINARY(name, func, int32, float16, float64, float64) \
CUDA_DEVICE_BINARY(name, func, int32, float32, float64, float64) \
CUDA_DEVICE_BINARY(name, func, int32, float64, float64, float64) \
CUDA_DEVICE_NOIMPL(name, func, int32, complex32, complex128, complex128) \
CUDA_DEVICE_BINARY(name, func, int32, complex64, complex128, complex128) \
CUDA_DEVICE_BINARY(name, func, int32, complex128, complex128, complex128) \
\
CUDA_DEVICE_BINARY(name, func, int64, uint8, int64, int64) \
CUDA_DEVICE_BINARY(name, func, int64, uint16, int64, int64) \
CUDA_DEVICE_BINARY(name, func, int64, uint32, int64, int64) \
CUDA_DEVICE_BINARY(name, func, int64, int8, int64, int64) \
CUDA_DEVICE_BINARY(name, func, int64, int16, int64, int64) \
CUDA_DEVICE_BINARY(name, func, int64, int32, int64, int64) \
CUDA_DEVICE_BINARY(name, func, int64, int64, int64, int64) \
\
CUDA_DEVICE_BINARY(name, func, bfloat16, uint8, bfloat16, bfloat16) \
CUDA_DEVICE_BINARY(name, func, bfloat16, uint16, float32, float32) \
CUDA_DEVICE_BINARY(name, func, bfloat16, uint32, float64, float64) \
CUDA_DEVICE_BINARY(name, func, bfloat16, int8, bfloat16, bfloat16) \
CUDA_DEVICE_BINARY(name, func, bfloat16, int16, float32, float32) \
CUDA_DEVICE_BINARY(name, func, bfloat16, int32, float64, float64) \
CUDA_DEVICE_BINARY(name, func, bfloat16, bfloat16, bfloat16, bfloat16) \
CUDA_DEVICE_BINARY(name, func, bfloat16, float16, float32, float32) \
CUDA_DEVICE_BINARY(name, func, bfloat16, float32, float32, float32) \
CUDA_DEVICE_BINARY(name, func, bfloat16, float64, float64, float64) \
CUDA_DEVICE_NOIMPL(name, func, bfloat16, complex32, complex32, complex64) \
CUDA_DEVICE_BINARY(name, func, bfloat16, complex64, complex64, complex64) \
CUDA_DEVICE_BINARY(name, func, bfloat16, complex128, complex128, complex128) \
\
CUDA_DEVICE_BINARY(name, hfunc, float16, uint8, float16, float16) \
CUDA_DEVICE_BINARY(name, func, float16, uint16, float32, float32) \
CUDA_DEVICE_BINARY(name, func, float16, uint32, float64, float64) \
CUDA_DEVICE_BINARY(name, hfunc, float16, int8, float16, float16) \
CUDA_DEVICE_BINARY(name, func, float16, int16, float32, float32) \
CUDA_DEVICE_BINARY(name, func, float16, int32, float64, float64) \
CUDA_DEVICE_BINARY(name, func, float16, bfloat16, float32, float32) \
CUDA_DEVICE_BINARY(name, hfunc, float16, float16, float16, float16) \
CUDA_DEVICE_BINARY(name, func, float16, float32, float32, float32) \
CUDA_DEVICE_BINARY(name, func, float16, float64, float64, float64) \
CUDA_DEVICE_NOIMPL(name, func, float16, complex32, complex32, complex32) \
CUDA_DEVICE_BINARY(name, func, float16, complex64, complex64, complex64) \
CUDA_DEVICE_BINARY(name, func, float16, complex128, complex128, complex128) \
\
CUDA_DEVICE_BINARY(name, func, float32, uint8, float32, float32) \
CUDA_DEVICE_BINARY(name, func, float32, uint16, float32, float32) \
CUDA_DEVICE_BINARY(name, func, float32, uint32, float64, float64) \
CUDA_DEVICE_BINARY(name, func, float32, int8, float32, float32) \
CUDA_DEVICE_BINARY(name, func, float32, int16, float32, float32) \
CUDA_DEVICE_BINARY(name, func, float32, int32, float64, float64) \
CUDA_DEVICE_BINARY(name, func, float32, bfloat16, float32, float32) \
CUDA_DEVICE_BINARY(name, func, float32, float16, float32, float32) \
CUDA_DEVICE_BINARY(name, func, float32, float32, float32, float32) \
CUDA_DEVICE_BINARY(name, func, float32, float64, float64, float64) \
CUDA_DEVICE_NOIMPL(name, func, float32, complex32, complex64, complex64) \
CUDA_DEVICE_BINARY(name, func, float32, complex64, complex64, complex64) \
CUDA_DEVICE_BINARY(name, func, float32, complex128, complex128, complex128) \
\
CUDA_DEVICE_BINARY(name, func, float64, uint8, float64, float64) \
CUDA_DEVICE_BINARY(name, func, float64, uint16, float64, float64) \
CUDA_DEVICE_BINARY(name, func, float64, uint32, float64, float64) \
CUDA_DEVICE_BINARY(name, func, float64, int8, float64, float64) \
CUDA_DEVICE_BINARY(name, func, float64, int16, float64, float64) \
CUDA_DEVICE_BINARY(name, func, float64, int32, float64, float64) \
CUDA_DEVICE_BINARY(name, func, float64, bfloat16, float64, float64) \
CUDA_DEVICE_BINARY(name, func, float64, float16, float64, float64) \
CUDA_DEVICE_BINARY(name, func, float64, float32, float64, float64) \
CUDA_DEVICE_BINARY(name, func, float64, float64, float64, float64) \
CUDA_DEVICE_NOIMPL(name, func, float64, complex32, complex128, complex128) \
CUDA_DEVICE_BINARY(name, func, float64, complex64, complex128, complex128) \
CUDA_DEVICE_BINARY(name, func, float64, complex128, complex128, complex128) \
\
CUDA_DEVICE_NOIMPL(name, func, complex32, uint8, complex32, complex32) \
CUDA_DEVICE_NOIMPL(name, func, complex32, uint16, complex64, complex64) \
CUDA_DEVICE_NOIMPL(name, func, complex32, uint32, complex128, complex128) \
CUDA_DEVICE_NOIMPL(name, func, complex32, int8, complex32, complex32) \
CUDA_DEVICE_NOIMPL(name, func, complex32, int16, complex64, complex64) \
CUDA_DEVICE_NOIMPL(name, func, complex32, int32, complex128, complex128) \
CUDA_DEVICE_NOIMPL(name, func, complex32, bfloat16, complex64, complex64) \
CUDA_DEVICE_NOIMPL(name, func, complex32, float16, complex32, complex32) \
CUDA_DEVICE_NOIMPL(name, func, complex32, float32, complex64, complex64) \
CUDA_DEVICE_NOIMPL(name, func, complex32, float64, complex128, complex128) \
CUDA_DEVICE_NOIMPL(name, func, complex32, complex32, complex32, complex32) \
CUDA_DEVICE_NOIMPL(name, func, complex32, complex64, complex64, complex64) \
CUDA_DEVICE_NOIMPL(name, func, complex32, complex128, complex128, complex128) \
\
CUDA_DEVICE_BINARY(name, func, complex64, uint8, complex64, complex64) \
CUDA_DEVICE_BINARY(name, func, complex64, uint16, complex64, complex64) \
CUDA_DEVICE_BINARY(name, func, complex64, uint32, complex128, complex128) \
CUDA_DEVICE_BINARY(name, func, complex64, int8, complex64, complex64) \
CUDA_DEVICE_BINARY(name, func, complex64, int16, complex64, complex64) \
CUDA_DEVICE_BINARY(name, func, complex64, int32, complex128, complex128) \
CUDA_DEVICE_BINARY(name, func, complex64, bfloat16, complex64, complex64) \
CUDA_DEVICE_BINARY(name, func, complex64, float16, complex64, complex64) \
CUDA_DEVICE_BINARY(name, func, complex64, float32, complex64, complex64) \
CUDA_DEVICE_BINARY(name, func, complex64, float64, complex128, complex128) \
CUDA_DEVICE_NOIMPL(name, func, complex64, complex32, complex64, complex64) \
CUDA_DEVICE_BINARY(name, func, complex64, complex64, complex64, complex64) \
CUDA_DEVICE_BINARY(name, func, complex64, complex128, complex128, complex128) \
\
CUDA_DEVICE_BINARY(name, func, complex128, uint8, complex128, complex128) \
CUDA_DEVICE_BINARY(name, func, complex128, uint16, complex128, complex128) \
CUDA_DEVICE_BINARY(name, func, complex128, uint32, complex128, complex128) \
CUDA_DEVICE_BINARY(name, func, complex128, int8, complex128, complex128) \
CUDA_DEVICE_BINARY(name, func, complex128, int16, complex128, complex128) \
CUDA_DEVICE_BINARY(name, func, complex128, int32, complex128, complex128) \
CUDA_DEVICE_BINARY(name, func, complex128, bfloat16, complex128, complex128) \
CUDA_DEVICE_BINARY(name, func, complex128, float16, complex128, complex128) \
CUDA_DEVICE_BINARY(name, func, complex128, float32, complex128, complex128) \
CUDA_DEVICE_BINARY(name, func, complex128, float64, complex128, complex128) \
CUDA_DEVICE_NOIMPL(name, func, complex128, complex32, complex128, complex128) \
CUDA_DEVICE_BINARY(name, func, complex128, complex64, complex128, complex128) \
CUDA_DEVICE_BINARY(name, func, complex128, complex128, complex128, complex128) \
#define CUDA_DEVICE_ALL_BINARY_NO_COMPLEX(name, func, hfunc) \
CUDA_DEVICE_BINARY(name, func, uint8, uint8, uint8, uint8) \
CUDA_DEVICE_BINARY(name, func, uint8, uint16, uint16, uint16) \
CUDA_DEVICE_BINARY(name, func, uint8, uint32, uint32, uint32) \
CUDA_DEVICE_BINARY(name, func, uint8, uint64, uint64, uint64) \
CUDA_DEVICE_BINARY(name, func, uint8, int8, int16, int16) \
CUDA_DEVICE_BINARY(name, func, uint8, int16, int16, int16) \
CUDA_DEVICE_BINARY(name, func, uint8, int32, int32, int32) \
CUDA_DEVICE_BINARY(name, func, uint8, int64, int64, int64) \
CUDA_DEVICE_BINARY(name, func, uint8, bfloat16, bfloat16, bfloat16) \
CUDA_DEVICE_NOIMPL(name, hfunc, uint8, float16, float16, float16) \
CUDA_DEVICE_BINARY(name, func, uint8, float32, float32, float32) \
CUDA_DEVICE_BINARY(name, func, uint8, float64, float64, float64) \
CUDA_DEVICE_NOKERN(name, func, uint8, complex32, complex32, complex32) \
CUDA_DEVICE_NOKERN(name, func, uint8, complex64, complex64, complex64) \
CUDA_DEVICE_NOKERN(name, func, uint8, complex128, complex128, complex128) \
\
CUDA_DEVICE_BINARY(name, func, uint16, uint8, uint16, uint16) \
CUDA_DEVICE_BINARY(name, func, uint16, uint16, uint16, uint16) \
CUDA_DEVICE_BINARY(name, func, uint16, uint32, uint32, uint32) \
CUDA_DEVICE_BINARY(name, func, uint16, uint64, uint64, uint64) \
CUDA_DEVICE_BINARY(name, func, uint16, int8, int32, int32) \
CUDA_DEVICE_BINARY(name, func, uint16, int16, int32, int32) \
CUDA_DEVICE_BINARY(name, func, uint16, int32, int32, int32) \
CUDA_DEVICE_BINARY(name, func, uint16, int64, int64, int64) \
CUDA_DEVICE_BINARY(name, func, uint16, bfloat16, float32, float32) \
CUDA_DEVICE_BINARY(name, func, uint16, float16, float32, float32) \
CUDA_DEVICE_BINARY(name, func, uint16, float32, float32, float32) \
CUDA_DEVICE_BINARY(name, func, uint16, float64, float64, float64) \
CUDA_DEVICE_NOKERN(name, func, uint16, complex32, complex64, complex64) \
CUDA_DEVICE_NOKERN(name, func, uint16, complex64, complex64, complex64) \
CUDA_DEVICE_NOKERN(name, func, uint16, complex128, complex128, complex128) \
\
CUDA_DEVICE_BINARY(name, func, uint32, uint8, uint32, uint32) \
CUDA_DEVICE_BINARY(name, func, uint32, uint16, uint32, uint32) \
CUDA_DEVICE_BINARY(name, func, uint32, uint32, uint32, uint32) \
CUDA_DEVICE_BINARY(name, func, uint32, uint64, uint64, uint64) \
CUDA_DEVICE_BINARY(name, func, uint32, int8, int64, int64) \
CUDA_DEVICE_BINARY(name, func, uint32, int16, int64, int64) \
CUDA_DEVICE_BINARY(name, func, uint32, int32, int64, int64) \
CUDA_DEVICE_BINARY(name, func, uint32, int64, int64, int64) \
CUDA_DEVICE_BINARY(name, func, uint32, bfloat16, float64, float64) \
CUDA_DEVICE_BINARY(name, func, uint32, float16, float64, float64) \
CUDA_DEVICE_BINARY(name, func, uint32, float32, float64, float64) \
CUDA_DEVICE_BINARY(name, func, uint32, float64, float64, float64) \
CUDA_DEVICE_NOKERN(name, func, uint32, complex32, complex128, complex128) \
CUDA_DEVICE_NOKERN(name, func, uint32, complex64, complex128, complex128) \
CUDA_DEVICE_NOKERN(name, func, uint32, complex128, complex128, complex128) \
\
CUDA_DEVICE_BINARY(name, func, uint64, uint8, uint64, uint64) \
CUDA_DEVICE_BINARY(name, func, uint64, uint16, uint64, uint64) \
CUDA_DEVICE_BINARY(name, func, uint64, uint32, uint64, uint64) \
CUDA_DEVICE_BINARY(name, func, uint64, uint64, uint64, uint64) \
\
CUDA_DEVICE_BINARY(name, func, int8, uint8, int16, int16) \
CUDA_DEVICE_BINARY(name, func, int8, uint16, int32, int32) \
CUDA_DEVICE_BINARY(name, func, int8, uint32, int64, int64) \
CUDA_DEVICE_BINARY(name, func, int8, int8, int8, int8) \
CUDA_DEVICE_BINARY(name, func, int8, int16, int16, int16) \
CUDA_DEVICE_BINARY(name, func, int8, int32, int32, int32) \
CUDA_DEVICE_BINARY(name, func, int8, int64, int64, int64) \
CUDA_DEVICE_BINARY(name, func, int8, bfloat16, bfloat16, bfloat16) \
CUDA_DEVICE_NOIMPL(name, hfunc, int8, float16, float16, float16) \
CUDA_DEVICE_BINARY(name, func, int8, float32, float32, float32) \
CUDA_DEVICE_BINARY(name, func, int8, float64, float64, float64) \
CUDA_DEVICE_NOKERN(name, func, int8, complex32, complex32, complex32) \
CUDA_DEVICE_NOKERN(name, func, int8, complex64, complex64, complex64) \
CUDA_DEVICE_NOKERN(name, func, int8, complex128, complex128, complex128) \
\
CUDA_DEVICE_BINARY(name, func, int16, uint8, int16, int16) \
CUDA_DEVICE_BINARY(name, func, int16, uint16, int32, int32) \
CUDA_DEVICE_BINARY(name, func, int16, uint32, int64, int64) \
CUDA_DEVICE_BINARY(name, func, int16, int8, int16, int16) \
CUDA_DEVICE_BINARY(name, func, int16, int16, int16, int16) \
CUDA_DEVICE_BINARY(name, func, int16, int32, int32, int32) \
CUDA_DEVICE_BINARY(name, func, int16, int64, int64, int64) \
CUDA_DEVICE_BINARY(name, func, int16, bfloat16, float32, float32) \
CUDA_DEVICE_BINARY(name, func, int16, float16, float32, float32) \
CUDA_DEVICE_BINARY(name, func, int16, float32, float32, float32) \
CUDA_DEVICE_BINARY(name, func, int16, float64, float64, float64) \
CUDA_DEVICE_NOKERN(name, func, int16, complex32, complex64, complex64) \
CUDA_DEVICE_NOKERN(name, func, int16, complex64, complex64, complex64) \
CUDA_DEVICE_NOKERN(name, func, int16, complex128, complex128, complex128) \
\
CUDA_DEVICE_BINARY(name, func, int32, uint8, int32, int32) \
CUDA_DEVICE_BINARY(name, func, int32, uint16, int32, int32) \
CUDA_DEVICE_BINARY(name, func, int32, uint32, int64, int64) \
CUDA_DEVICE_BINARY(name, func, int32, int8, int32, int32) \
CUDA_DEVICE_BINARY(name, func, int32, int16, int32, int32) \
CUDA_DEVICE_BINARY(name, func, int32, int32, int32, int32) \
CUDA_DEVICE_BINARY(name, func, int32, int64, int64, int64) \
CUDA_DEVICE_BINARY(name, func, int32, bfloat16, float64, float64) \
CUDA_DEVICE_BINARY(name, func, int32, float16, float64, float64) \
CUDA_DEVICE_BINARY(name, func, int32, float32, float64, float64) \
CUDA_DEVICE_BINARY(name, func, int32, float64, float64, float64) \
CUDA_DEVICE_NOKERN(name, func, int32, complex32, complex128, complex128) \
CUDA_DEVICE_NOKERN(name, func, int32, complex64, complex128, complex128) \
CUDA_DEVICE_NOKERN(name, func, int32, complex128, complex128, complex128) \
\
CUDA_DEVICE_BINARY(name, func, int64, uint8, int64, int64) \
CUDA_DEVICE_BINARY(name, func, int64, uint16, int64, int64) \
CUDA_DEVICE_BINARY(name, func, int64, uint32, int64, int64) \
CUDA_DEVICE_BINARY(name, func, int64, int8, int64, int64) \
CUDA_DEVICE_BINARY(name, func, int64, int16, int64, int64) \
CUDA_DEVICE_BINARY(name, func, int64, int32, int64, int64) \
CUDA_DEVICE_BINARY(name, func, int64, int64, int64, int64) \
\
CUDA_DEVICE_BINARY(name, func, bfloat16, uint8, bfloat16, bfloat16) \
CUDA_DEVICE_BINARY(name, func, bfloat16, uint16, float32, float32) \
CUDA_DEVICE_BINARY(name, func, bfloat16, uint32, float64, float64) \
CUDA_DEVICE_BINARY(name, func, bfloat16, int8, bfloat16, bfloat16) \
CUDA_DEVICE_BINARY(name, func, bfloat16, int16, float32, float32) \
CUDA_DEVICE_BINARY(name, func, bfloat16, int32, float64, float64) \
CUDA_DEVICE_BINARY(name, func, bfloat16, bfloat16, bfloat16, bfloat16) \
CUDA_DEVICE_BINARY(name, func, bfloat16, float16, float32, float32) \
CUDA_DEVICE_BINARY(name, func, bfloat16, float32, float32, float32) \
CUDA_DEVICE_BINARY(name, func, bfloat16, float64, float64, float64) \
CUDA_DEVICE_NOKERN(name, func, bfloat16, complex32, complex32, complex32) \
CUDA_DEVICE_NOKERN(name, func, bfloat16, complex64, complex64, complex64) \
CUDA_DEVICE_NOKERN(name, func, bfloat16, complex128, complex128, complex128) \
\
CUDA_DEVICE_NOIMPL(name, hfunc, float16, uint8, float16, float16) \
CUDA_DEVICE_BINARY(name, func, float16, uint16, float32, float32) \
CUDA_DEVICE_BINARY(name, func, float16, uint32, float64, float64) \
CUDA_DEVICE_NOIMPL(name, hfunc, float16, int8, float16, float16) \
CUDA_DEVICE_BINARY(name, func, float16, int16, float32, float32) \
CUDA_DEVICE_BINARY(name, func, float16, int32, float64, float64) \
CUDA_DEVICE_BINARY(name, func, float16, bfloat16, float32, float32) \
CUDA_DEVICE_NOIMPL(name, hfunc, float16, float16, float16, float16) \
CUDA_DEVICE_BINARY(name, func, float16, float32, float32, float32) \
CUDA_DEVICE_BINARY(name, func, float16, float64, float64, float64) \
CUDA_DEVICE_NOKERN(name, func, float16, complex32, complex32, complex32) \
CUDA_DEVICE_NOKERN(name, func, float16, complex64, complex64, complex64) \
CUDA_DEVICE_NOKERN(name, func, float16, complex128, complex128, complex128) \
\
CUDA_DEVICE_BINARY(name, func, float32, uint8, float32, float32) \
CUDA_DEVICE_BINARY(name, func, float32, uint16, float32, float32) \
CUDA_DEVICE_BINARY(name, func, float32, uint32, float64, float64) \
CUDA_DEVICE_BINARY(name, func, float32, int8, float32, float32) \
CUDA_DEVICE_BINARY(name, func, float32, int16, float32, float32) \
CUDA_DEVICE_BINARY(name, func, float32, int32, float64, float64) \
CUDA_DEVICE_BINARY(name, func, float32, bfloat16, float32, float32) \
CUDA_DEVICE_BINARY(name, func, float32, float16, float32, float32) \
CUDA_DEVICE_BINARY(name, func, float32, float32, float32, float32) \
CUDA_DEVICE_BINARY(name, func, float32, float64, float64, float64) \
CUDA_DEVICE_NOKERN(name, func, float32, complex32, complex64, complex64) \
CUDA_DEVICE_NOKERN(name, func, float32, complex64, complex64, complex64) \
CUDA_DEVICE_NOKERN(name, func, float32, complex128, complex128, complex128) \
\
CUDA_DEVICE_BINARY(name, func, float64, uint8, float64, float64) \
CUDA_DEVICE_BINARY(name, func, float64, uint16, float64, float64) \
CUDA_DEVICE_BINARY(name, func, float64, uint32, float64, float64) \
CUDA_DEVICE_BINARY(name, func, float64, int8, float64, float64) \
CUDA_DEVICE_BINARY(name, func, float64, int16, float64, float64) \
CUDA_DEVICE_BINARY(name, func, float64, int32, float64, float64) \
CUDA_DEVICE_BINARY(name, func, float64, bfloat16, float64, float64) \
CUDA_DEVICE_BINARY(name, func, float64, float16, float64, float64) \
CUDA_DEVICE_BINARY(name, func, float64, float32, float64, float64) \
CUDA_DEVICE_BINARY(name, func, float64, float64, float64, float64) \
CUDA_DEVICE_NOKERN(name, func, float64, complex32, complex128, complex128) \
CUDA_DEVICE_NOKERN(name, func, float64, complex64, complex128, complex128) \
CUDA_DEVICE_NOKERN(name, func, float64, complex128, complex128, complex128) \
\
CUDA_DEVICE_NOKERN(name, func, complex32, uint8, complex32, complex32) \
CUDA_DEVICE_NOKERN(name, func, complex32, uint16, complex64, complex64) \
CUDA_DEVICE_NOKERN(name, func, complex32, uint32, complex128, complex128) \
CUDA_DEVICE_NOKERN(name, func, complex32, int8, complex32, complex32) \
CUDA_DEVICE_NOKERN(name, func, complex32, int16, complex64, complex64) \
CUDA_DEVICE_NOKERN(name, func, complex32, int32, complex128, complex128) \
CUDA_DEVICE_NOKERN(name, func, complex32, bfloat16, complex64, complex64) \
CUDA_DEVICE_NOKERN(name, func, complex32, float16, complex32, complex32) \
CUDA_DEVICE_NOKERN(name, func, complex32, float32, complex64, complex64) \
CUDA_DEVICE_NOKERN(name, func, complex32, float64, complex128, complex128) \
CUDA_DEVICE_NOKERN(name, func, complex32, complex32, complex32, complex32) \
CUDA_DEVICE_NOKERN(name, func, complex32, complex64, complex64, complex64) \
CUDA_DEVICE_NOKERN(name, func, complex32, complex128, complex128, complex128) \
\
CUDA_DEVICE_NOKERN(name, func, complex64, uint8, complex64, complex64) \
CUDA_DEVICE_NOKERN(name, func, complex64, uint16, complex64, complex64) \
CUDA_DEVICE_NOKERN(name, func, complex64, uint32, complex128, complex128) \
CUDA_DEVICE_NOKERN(name, func, complex64, int8, complex64, complex64) \
CUDA_DEVICE_NOKERN(name, func, complex64, int16, complex64, complex64) \
CUDA_DEVICE_NOKERN(name, func, complex64, int32, complex128, complex128) \
CUDA_DEVICE_NOKERN(name, func, complex64, bfloat16, complex64, complex64) \
CUDA_DEVICE_NOKERN(name, func, complex64, float16, complex64, complex64) \
CUDA_DEVICE_NOKERN(name, func, complex64, float32, complex64, complex64) \
CUDA_DEVICE_NOKERN(name, func, complex64, float64, complex128, complex128) \
CUDA_DEVICE_NOKERN(name, func, complex64, complex32, complex64, complex64) \
CUDA_DEVICE_NOKERN(name, func, complex64, complex64, complex64, complex64) \
CUDA_DEVICE_NOKERN(name, func, complex64, complex128, complex128, complex128) \
\
CUDA_DEVICE_NOKERN(name, func, complex128, uint8, complex128, complex128) \
CUDA_DEVICE_NOKERN(name, func, complex128, uint16, complex128, complex128) \
CUDA_DEVICE_NOKERN(name, func, complex128, uint32, complex128, complex128) \
CUDA_DEVICE_NOKERN(name, func, complex128, int8, complex128, complex128) \
CUDA_DEVICE_NOKERN(name, func, complex128, int16, complex128, complex128) \
CUDA_DEVICE_NOKERN(name, func, complex128, int32, complex128, complex128) \
CUDA_DEVICE_NOKERN(name, func, complex128, bfloat16, complex128, complex128) \
CUDA_DEVICE_NOKERN(name, func, complex128, float16, complex128, complex128) \
CUDA_DEVICE_NOKERN(name, func, complex128, float32, complex128, complex128) \
CUDA_DEVICE_NOKERN(name, func, complex128, float64, complex128, complex128) \
CUDA_DEVICE_NOKERN(name, func, complex128, complex32, complex128, complex128) \
CUDA_DEVICE_NOKERN(name, func, complex128, complex64, complex128, complex128) \
CUDA_DEVICE_NOKERN(name, func, complex128, complex128, complex128, complex128) \
#define CUDA_DEVICE_ALL_BINARY_FLOAT_RETURN(name, func, hfunc) \
CUDA_DEVICE_BINARY(name, hfunc, uint8, uint8, float16, float16) \
CUDA_DEVICE_BINARY(name, func, uint8, uint16, float32, float32) \
CUDA_DEVICE_BINARY(name, func, uint8, uint32, float64, float64) \
CUDA_DEVICE_NOKERN(name, func, uint8, uint64, uint64, uint64) \
CUDA_DEVICE_BINARY(name, hfunc, uint8, int8, float16, float16) \
CUDA_DEVICE_BINARY(name, func, uint8, int16, float32, float32) \
CUDA_DEVICE_BINARY(name, func, uint8, int32, float64, float64) \
CUDA_DEVICE_NOKERN(name, func, uint8, int64, int64, int64) \
CUDA_DEVICE_BINARY(name, func, uint8, bfloat16, bfloat16, bfloat16) \
CUDA_DEVICE_BINARY(name, hfunc, uint8, float16, float16, float16) \
CUDA_DEVICE_BINARY(name, func, uint8, float32, float32, float32) \
CUDA_DEVICE_BINARY(name, func, uint8, float64, float64, float64) \
CUDA_DEVICE_NOIMPL(name, func, uint8, complex32, complex32, complex32) \
CUDA_DEVICE_BINARY(name, func, uint8, complex64, complex64, complex64) \
CUDA_DEVICE_BINARY(name, func, uint8, complex128, complex128, complex128) \
\
CUDA_DEVICE_BINARY(name, func, uint16, uint8, float32, float32) \
CUDA_DEVICE_BINARY(name, func, uint16, uint16, float32, float32) \
CUDA_DEVICE_BINARY(name, func, uint16, uint32, float64, float64) \
CUDA_DEVICE_NOKERN(name, func, uint16, uint64, uint64, uint64) \
CUDA_DEVICE_BINARY(name, func, uint16, int8, float32, float32) \
CUDA_DEVICE_BINARY(name, func, uint16, int16, float32, float32) \
CUDA_DEVICE_BINARY(name, func, uint16, int32, float64, float64) \
CUDA_DEVICE_NOKERN(name, func, uint16, int64, int64, int64) \
CUDA_DEVICE_BINARY(name, func, uint16, bfloat16, float32, float32) \
CUDA_DEVICE_BINARY(name, func, uint16, float16, float32, float32) \
CUDA_DEVICE_BINARY(name, func, uint16, float32, float32, float32) \
CUDA_DEVICE_BINARY(name, func, uint16, float64, float64, float64) \
CUDA_DEVICE_NOIMPL(name, func, uint16, complex32, complex64, complex64) \
CUDA_DEVICE_BINARY(name, func, uint16, complex64, complex64, complex64) \
CUDA_DEVICE_BINARY(name, func, uint16, complex128, complex128, complex128) \
\
CUDA_DEVICE_BINARY(name, func, uint32, uint8, float64, float64) \
CUDA_DEVICE_BINARY(name, func, uint32, uint16, float64, float64) \
CUDA_DEVICE_BINARY(name, func, uint32, uint32, float64, float64) \
CUDA_DEVICE_NOKERN(name, func, uint32, uint64, uint64, uint64) \
CUDA_DEVICE_BINARY(name, func, uint32, int8, float64, float64) \
CUDA_DEVICE_BINARY(name, func, uint32, int16, float64, float64) \
CUDA_DEVICE_BINARY(name, func, uint32, int32, float64, float64) \
CUDA_DEVICE_NOKERN(name, func, uint32, int64, int64, int64) \
CUDA_DEVICE_BINARY(name, func, uint32, bfloat16, float64, float64) \
CUDA_DEVICE_BINARY(name, func, uint32, float16, float64, float64) \
CUDA_DEVICE_BINARY(name, func, uint32, float32, float64, float64) \
CUDA_DEVICE_BINARY(name, func, uint32, float64, float64, float64) \
CUDA_DEVICE_NOIMPL(name, func, uint32, complex32, complex128, complex128) \
CUDA_DEVICE_BINARY(name, func, uint32, complex64, complex128, complex128) \
CUDA_DEVICE_BINARY(name, func, uint32, complex128, complex128, complex128) \
\
CUDA_DEVICE_NOKERN(name, func, uint64, uint8, uint64, uint64) \
CUDA_DEVICE_NOKERN(name, func, uint64, uint16, uint64, uint64) \
CUDA_DEVICE_NOKERN(name, func, uint64, uint32, uint64, uint64) \
CUDA_DEVICE_NOKERN(name, func, uint64, uint64, uint64, uint64) \
\
CUDA_DEVICE_BINARY(name, hfunc, int8, uint8, float16, float16) \
CUDA_DEVICE_BINARY(name, func, int8, uint16, float32, float32) \
CUDA_DEVICE_BINARY(name, func, int8, uint32, float64, float64) \
CUDA_DEVICE_BINARY(name, hfunc, int8, int8, float16, float16) \
CUDA_DEVICE_BINARY(name, func, int8, int16, float32, float32) \
CUDA_DEVICE_BINARY(name, func, int8, int32, float64, float64) \
CUDA_DEVICE_NOKERN(name, func, int8, int64, int64, int64) \
CUDA_DEVICE_BINARY(name, func, int8, bfloat16, bfloat16, bfloat16) \
CUDA_DEVICE_BINARY(name, hfunc, int8, float16, float16, float16) \
CUDA_DEVICE_BINARY(name, func, int8, float32, float32, float32) \
CUDA_DEVICE_BINARY(name, func, int8, float64, float64, float64) \
CUDA_DEVICE_NOIMPL(name, func, int8, complex32, complex32, complex32) \
CUDA_DEVICE_BINARY(name, func, int8, complex64, complex64, complex64) \
CUDA_DEVICE_BINARY(name, func, int8, complex128, complex128, complex128) \
\
CUDA_DEVICE_BINARY(name, func, int16, uint8, float32, float32) \
CUDA_DEVICE_BINARY(name, func, int16, uint16, float32, float32) \
CUDA_DEVICE_BINARY(name, func, int16, uint32, float64, float64) \
CUDA_DEVICE_BINARY(name, func, int16, int8, float32, float32) \
CUDA_DEVICE_BINARY(name, func, int16, int16, float32, float32) \
CUDA_DEVICE_BINARY(name, func, int16, int32, float64, float64) \
CUDA_DEVICE_NOKERN(name, func, int16, int64, int64, int64) \
CUDA_DEVICE_BINARY(name, func, int16, bfloat16, float32, float32) \
CUDA_DEVICE_BINARY(name, func, int16, float16, float32, float32) \
CUDA_DEVICE_BINARY(name, func, int16, float32, float32, float32) \
CUDA_DEVICE_BINARY(name, func, int16, float64, float64, float64) \
CUDA_DEVICE_NOIMPL(name, func, int16, complex32, complex64, complex64) \
CUDA_DEVICE_BINARY(name, func, int16, complex64, complex64, complex64) \
CUDA_DEVICE_BINARY(name, func, int16, complex128, complex128, complex128) \
\
CUDA_DEVICE_BINARY(name, func, int32, uint8, float64, float64) \
CUDA_DEVICE_BINARY(name, func, int32, uint16, float64, float64) \
CUDA_DEVICE_BINARY(name, func, int32, uint32, float64, float64) \
CUDA_DEVICE_BINARY(name, func, int32, int8, float64, float64) \
CUDA_DEVICE_BINARY(name, func, int32, int16, float64, float64) \
CUDA_DEVICE_BINARY(name, func, int32, int32, float64, float64) \
CUDA_DEVICE_NOKERN(name, func, int32, int64, int64, int64) \
CUDA_DEVICE_BINARY(name, func, int32, bfloat16, float64, float64) \
CUDA_DEVICE_BINARY(name, func, int32, float16, float64, float64) \
CUDA_DEVICE_BINARY(name, func, int32, float32, float64, float64) \
CUDA_DEVICE_BINARY(name, func, int32, float64, float64, float64) \
CUDA_DEVICE_NOIMPL(name, func, int32, complex32, complex128, complex128) \
CUDA_DEVICE_BINARY(name, func, int32, complex64, complex128, complex128) \
CUDA_DEVICE_BINARY(name, func, int32, complex128, complex128, complex128) \
\
CUDA_DEVICE_NOKERN(name, func, int64, uint8, int64, int64) \
CUDA_DEVICE_NOKERN(name, func, int64, uint16, int64, int64) \
CUDA_DEVICE_NOKERN(name, func, int64, uint32, int64, int64) \
CUDA_DEVICE_NOKERN(name, func, int64, int8, int64, int64) \
CUDA_DEVICE_NOKERN(name, func, int64, int16, int64, int64) \
CUDA_DEVICE_NOKERN(name, func, int64, int32, int64, int64) \
CUDA_DEVICE_NOKERN(name, func, int64, int64, int64, int64) \
\
CUDA_DEVICE_BINARY(name, func, bfloat16, uint8, bfloat16, bfloat16) \
CUDA_DEVICE_BINARY(name, func, bfloat16, uint16, float32, float32) \
CUDA_DEVICE_BINARY(name, func, bfloat16, uint32, float64, float64) \
CUDA_DEVICE_BINARY(name, func, bfloat16, int8, bfloat16, bfloat16) \
CUDA_DEVICE_BINARY(name, func, bfloat16, int16, float32, float32) \
CUDA_DEVICE_BINARY(name, func, bfloat16, int32, float64, float64) \
CUDA_DEVICE_BINARY(name, func, bfloat16, bfloat16, bfloat16, bfloat16) \
CUDA_DEVICE_BINARY(name, func, bfloat16, float16, float32, float32) \
CUDA_DEVICE_BINARY(name, func, bfloat16, float32, float32, float32) \
CUDA_DEVICE_BINARY(name, func, bfloat16, float64, float64, float64) \
CUDA_DEVICE_NOIMPL(name, func, bfloat16, complex32, complex64, complex64) \
CUDA_DEVICE_BINARY(name, func, bfloat16, complex64, complex64, complex64) \
CUDA_DEVICE_BINARY(name, func, bfloat16, complex128, complex128, complex128) \
\
CUDA_DEVICE_BINARY(name, hfunc, float16, uint8, float16, float16) \
CUDA_DEVICE_BINARY(name, func, float16, uint16, float32, float32) \
CUDA_DEVICE_BINARY(name, func, float16, uint32, float64, float64) \
CUDA_DEVICE_BINARY(name, hfunc, float16, int8, float16, float16) \
CUDA_DEVICE_BINARY(name, func, float16, int16, float32, float32) \
CUDA_DEVICE_BINARY(name, func, float16, int32, float64, float64) \
CUDA_DEVICE_BINARY(name, func, float16, bfloat16, float32, float32) \
CUDA_DEVICE_BINARY(name, hfunc, float16, float16, float16, float16) \
CUDA_DEVICE_BINARY(name, func, float16, float32, float32, float32) \
CUDA_DEVICE_BINARY(name, func, float16, float64, float64, float64) \
CUDA_DEVICE_NOIMPL(name, func, float16, complex32, complex32, complex32) \
CUDA_DEVICE_BINARY(name, func, float16, complex64, complex64, complex64) \
CUDA_DEVICE_BINARY(name, func, float16, complex128, complex128, complex128) \
\
CUDA_DEVICE_BINARY(name, func, float32, uint8, float32, float32) \
CUDA_DEVICE_BINARY(name, func, float32, uint16, float32, float32) \
CUDA_DEVICE_BINARY(name, func, float32, uint32, float64, float64) \
CUDA_DEVICE_BINARY(name, func, float32, int8, float32, float32) \
CUDA_DEVICE_BINARY(name, func, float32, int16, float32, float32) \
CUDA_DEVICE_BINARY(name, func, float32, int32, float64, float64) \
CUDA_DEVICE_BINARY(name, func, float32, bfloat16, float32, float32) \
CUDA_DEVICE_BINARY(name, func, float32, float16, float32, float32) \
CUDA_DEVICE_BINARY(name, func, float32, float32, float32, float32) \
CUDA_DEVICE_BINARY(name, func, float32, float64, float64, float64) \
CUDA_DEVICE_NOIMPL(name, func, float32, complex32, complex64, complex64) \
CUDA_DEVICE_BINARY(name, func, float32, complex64, complex64, complex64) \
CUDA_DEVICE_BINARY(name, func, float32, complex128, complex128, complex128) \
\
CUDA_DEVICE_BINARY(name, func, float64, uint8, float64, float64) \
CUDA_DEVICE_BINARY(name, func, float64, uint16, float64, float64) \
CUDA_DEVICE_BINARY(name, func, float64, uint32, float64, float64) \
CUDA_DEVICE_BINARY(name, func, float64, int8, float64, float64) \
CUDA_DEVICE_BINARY(name, func, float64, int16, float64, float64) \
CUDA_DEVICE_BINARY(name, func, float64, int32, float64, float64) \
CUDA_DEVICE_BINARY(name, func, float64, bfloat16, float64, float64) \
CUDA_DEVICE_BINARY(name, func, float64, float16, float64, float64) \
CUDA_DEVICE_BINARY(name, func, float64, float32, float64, float64) \
CUDA_DEVICE_BINARY(name, func, float64, float64, float64, float64) \
CUDA_DEVICE_NOIMPL(name, func, float64, complex32, complex128, complex128) \
CUDA_DEVICE_BINARY(name, func, float64, complex64, complex128, complex128) \
CUDA_DEVICE_BINARY(name, func, float64, complex128, complex128, complex128) \
\
CUDA_DEVICE_NOIMPL(name, func, complex32, uint8, complex32, complex32) \
CUDA_DEVICE_NOIMPL(name, func, complex32, uint16, complex64, complex64) \
CUDA_DEVICE_NOIMPL(name, func, complex32, uint32, complex128, complex128) \
CUDA_DEVICE_NOIMPL(name, func, complex32, int8, complex32, complex32) \
CUDA_DEVICE_NOIMPL(name, func, complex32, int16, complex64, complex64) \
CUDA_DEVICE_NOIMPL(name, func, complex32, int32, complex128, complex128) \
CUDA_DEVICE_NOIMPL(name, func, complex32, bfloat16, complex64, complex64) \
CUDA_DEVICE_NOIMPL(name, func, complex32, float16, complex32, complex32) \
CUDA_DEVICE_NOIMPL(name, func, complex32, float32, complex64, complex64) \
CUDA_DEVICE_NOIMPL(name, func, complex32, float64, complex128, complex128) \
CUDA_DEVICE_NOIMPL(name, func, complex32, complex32, complex32, complex32) \
CUDA_DEVICE_NOIMPL(name, func, complex32, complex64, complex64, complex64) \
CUDA_DEVICE_NOIMPL(name, func, complex32, complex128, complex128, complex128) \
\
CUDA_DEVICE_BINARY(name, func, complex64, uint8, complex64, complex64) \
CUDA_DEVICE_BINARY(name, func, complex64, uint16, complex64, complex64) \
CUDA_DEVICE_BINARY(name, func, complex64, uint32, complex128, complex128) \
CUDA_DEVICE_BINARY(name, func, complex64, int8, complex64, complex64) \
CUDA_DEVICE_BINARY(name, func, complex64, int16, complex64, complex64) \
CUDA_DEVICE_BINARY(name, func, complex64, int32, complex128, complex128) \
CUDA_DEVICE_BINARY(name, func, complex64, bfloat16, complex64, complex64) \
CUDA_DEVICE_BINARY(name, func, complex64, float16, complex64, complex64) \
CUDA_DEVICE_BINARY(name, func, complex64, float32, complex64, complex64) \
CUDA_DEVICE_BINARY(name, func, complex64, float64, complex128, complex128) \
CUDA_DEVICE_NOIMPL(name, func, complex64, complex32, complex64, complex64) \
CUDA_DEVICE_BINARY(name, func, complex64, complex64, complex64, complex64) \
CUDA_DEVICE_BINARY(name, func, complex64, complex128, complex128, complex128) \
\
CUDA_DEVICE_BINARY(name, func, complex128, uint8, complex128, complex128) \
CUDA_DEVICE_BINARY(name, func, complex128, uint16, complex128, complex128) \
CUDA_DEVICE_BINARY(name, func, complex128, uint32, complex128, complex128) \
CUDA_DEVICE_BINARY(name, func, complex128, int8, complex128, complex128) \
CUDA_DEVICE_BINARY(name, func, complex128, int16, complex128, complex128) \
CUDA_DEVICE_BINARY(name, func, complex128, int32, complex128, complex128) \
CUDA_DEVICE_BINARY(name, func, complex128, bfloat16, complex128, complex128) \
CUDA_DEVICE_BINARY(name, func, complex128, float16, complex128, complex128) \
CUDA_DEVICE_BINARY(name, func, complex128, float32, complex128, complex128) \
CUDA_DEVICE_BINARY(name, func, complex128, float64, complex128, complex128) \
CUDA_DEVICE_NOIMPL(name, func, complex128, complex32, complex128, complex128) \
CUDA_DEVICE_BINARY(name, func, complex128, complex64, complex128, complex128) \
CUDA_DEVICE_BINARY(name, func, complex128, complex128, complex128, complex128)
#define add(x, y) x + y
CUDA_DEVICE_ALL_BINARY(add, add, __hadd)
#define subtract(x, y) x - y
CUDA_DEVICE_ALL_BINARY(subtract, subtract, __hsub)
#define multiply(x, y) x * y
CUDA_DEVICE_ALL_BINARY(multiply, multiply, __hmul)
#define floor_divide(x, y) x * y
CUDA_DEVICE_ALL_BINARY_NO_COMPLEX(floor_divide, _floor_divide, _floor_divide)
#define remainder(x, y) x % y
CUDA_DEVICE_ALL_BINARY_NO_COMPLEX(remainder, _remainder, _remainder)
#define divide(x, y) x / y
CUDA_DEVICE_ALL_BINARY_FLOAT_RETURN(divide, divide, __hdiv)
/*****************************************************************************/
/* Comparison */
/*****************************************************************************/
#define CUDA_DEVICE_ALL_COMPARISON(name, func, hfunc, cfunc) \
CUDA_DEVICE_BINARY(name, func, uint8, uint8, bool, uint8) \
CUDA_DEVICE_BINARY(name, func, uint8, uint16, bool, uint16) \
CUDA_DEVICE_BINARY(name, func, uint8, uint32, bool, uint32) \
CUDA_DEVICE_BINARY(name, func, uint8, uint64, bool, uint64) \
CUDA_DEVICE_BINARY(name, func, uint8, int8, bool, int16) \
CUDA_DEVICE_BINARY(name, func, uint8, int16, bool, int16) \
CUDA_DEVICE_BINARY(name, func, uint8, int32, bool, int32) \
CUDA_DEVICE_BINARY(name, func, uint8, int64, bool, int64) \
CUDA_DEVICE_BINARY(name, func, uint8, bfloat16, bool, bfloat16) \
CUDA_DEVICE_BINARY(name, hfunc, uint8, float16, bool, float16) \
CUDA_DEVICE_BINARY(name, func, uint8, float32, bool, float32) \
CUDA_DEVICE_BINARY(name, func, uint8, float64, bool, float64) \
CUDA_DEVICE_NOIMPL(name, cfunc, uint8, complex32, bool, complex32) \
CUDA_DEVICE_BINARY(name, cfunc, uint8, complex64, bool, complex64) \
CUDA_DEVICE_BINARY(name, cfunc, uint8, complex128, bool, complex128) \
\
CUDA_DEVICE_BINARY(name, func, uint16, uint8, bool, uint16) \
CUDA_DEVICE_BINARY(name, func, uint16, uint16, bool, uint16) \
CUDA_DEVICE_BINARY(name, func, uint16, uint32, bool, uint32) \
CUDA_DEVICE_BINARY(name, func, uint16, uint64, bool, uint64) \
CUDA_DEVICE_BINARY(name, func, uint16, int8, bool, int32) \
CUDA_DEVICE_BINARY(name, func, uint16, int16, bool, int32) \
CUDA_DEVICE_BINARY(name, func, uint16, int32, bool, int32) \
CUDA_DEVICE_BINARY(name, func, uint16, int64, bool, int64) \
CUDA_DEVICE_BINARY(name, func, uint16, bfloat16, bool, float32) \
CUDA_DEVICE_BINARY(name, func, uint16, float16, bool, float32) \
CUDA_DEVICE_BINARY(name, func, uint16, float32, bool, float32) \
CUDA_DEVICE_BINARY(name, func, uint16, float64, bool, float64) \
CUDA_DEVICE_NOIMPL(name, cfunc, uint16, complex32, bool, complex64) \
CUDA_DEVICE_BINARY(name, cfunc, uint16, complex64, bool, complex64) \
CUDA_DEVICE_BINARY(name, cfunc, uint16, complex128, bool, complex128) \
\
CUDA_DEVICE_BINARY(name, func, uint32, uint8, bool, uint32) \
CUDA_DEVICE_BINARY(name, func, uint32, uint16, bool, uint32) \
CUDA_DEVICE_BINARY(name, func, uint32, uint32, bool, uint32) \
CUDA_DEVICE_BINARY(name, func, uint32, uint64, bool, uint64) \
CUDA_DEVICE_BINARY(name, func, uint32, int8, bool, int64) \
CUDA_DEVICE_BINARY(name, func, uint32, int16, bool, int64) \
CUDA_DEVICE_BINARY(name, func, uint32, int32, bool, int64) \
CUDA_DEVICE_BINARY(name, func, uint32, int64, bool, int64) \
CUDA_DEVICE_BINARY(name, func, uint32, bfloat16, bool, float64) \
CUDA_DEVICE_BINARY(name, func, uint32, float16, bool, float64) \
CUDA_DEVICE_BINARY(name, func, uint32, float32, bool, float64) \
CUDA_DEVICE_BINARY(name, func, uint32, float64, bool, float64) \
CUDA_DEVICE_NOIMPL(name, cfunc, uint32, complex32, bool, complex128) \
CUDA_DEVICE_BINARY(name, cfunc, uint32, complex64, bool, complex128) \
CUDA_DEVICE_BINARY(name, cfunc, uint32, complex128, bool, complex128) \
\
CUDA_DEVICE_BINARY(name, func, uint64, uint8, bool, uint64) \
CUDA_DEVICE_BINARY(name, func, uint64, uint16, bool, uint64) \
CUDA_DEVICE_BINARY(name, func, uint64, uint32, bool, uint64) \
CUDA_DEVICE_BINARY(name, func, uint64, uint64, bool, uint64) \
\
CUDA_DEVICE_BINARY(name, func, int8, uint8, bool, int16) \
CUDA_DEVICE_BINARY(name, func, int8, uint16, bool, int32) \
CUDA_DEVICE_BINARY(name, func, int8, uint32, bool, int64) \
CUDA_DEVICE_BINARY(name, func, int8, int8, bool, int8) \
CUDA_DEVICE_BINARY(name, func, int8, int16, bool, int16) \
CUDA_DEVICE_BINARY(name, func, int8, int32, bool, int32) \
CUDA_DEVICE_BINARY(name, func, int8, int64, bool, int64) \
CUDA_DEVICE_BINARY(name, func, int8, bfloat16, bool, bfloat16) \
CUDA_DEVICE_BINARY(name, hfunc, int8, float16, bool, float16) \
CUDA_DEVICE_BINARY(name, func, int8, float32, bool, float32) \
CUDA_DEVICE_BINARY(name, func, int8, float64, bool, float64) \
CUDA_DEVICE_NOIMPL(name, cfunc, int8, complex32, bool, complex32) \
CUDA_DEVICE_BINARY(name, cfunc, int8, complex64, bool, complex64) \
CUDA_DEVICE_BINARY(name, cfunc, int8, complex128, bool, complex128) \
\
CUDA_DEVICE_BINARY(name, func, int16, uint8, bool, int16) \
CUDA_DEVICE_BINARY(name, func, int16, uint16, bool, int32) \
CUDA_DEVICE_BINARY(name, func, int16, uint32, bool, int64) \
CUDA_DEVICE_BINARY(name, func, int16, int8, bool, int16) \
CUDA_DEVICE_BINARY(name, func, int16, int16, bool, int16) \
CUDA_DEVICE_BINARY(name, func, int16, int32, bool, int32) \
CUDA_DEVICE_BINARY(name, func, int16, int64, bool, int64) \
CUDA_DEVICE_BINARY(name, func, int16, bfloat16, bool, float32) \
CUDA_DEVICE_BINARY(name, func, int16, float16, bool, float32) \
CUDA_DEVICE_BINARY(name, func, int16, float32, bool, float32) \
CUDA_DEVICE_BINARY(name, func, int16, float64, bool, float64) \
CUDA_DEVICE_NOIMPL(name, cfunc, int16, complex32, bool, complex64) \
CUDA_DEVICE_BINARY(name, cfunc, int16, complex64, bool, complex64) \
CUDA_DEVICE_BINARY(name, cfunc, int16, complex128, bool, complex128) \
\
CUDA_DEVICE_BINARY(name, func, int32, uint8, bool, int32) \
CUDA_DEVICE_BINARY(name, func, int32, uint16, bool, int32) \
CUDA_DEVICE_BINARY(name, func, int32, uint32, bool, int64) \
CUDA_DEVICE_BINARY(name, func, int32, int8, bool, int32) \
CUDA_DEVICE_BINARY(name, func, int32, int16, bool, int32) \
CUDA_DEVICE_BINARY(name, func, int32, int32, bool, int32) \
CUDA_DEVICE_BINARY(name, func, int32, int64, bool, int64) \
CUDA_DEVICE_BINARY(name, func, int32, bfloat16, bool, float64) \
CUDA_DEVICE_BINARY(name, func, int32, float16, bool, float64) \
CUDA_DEVICE_BINARY(name, func, int32, float32, bool, float64) \
CUDA_DEVICE_BINARY(name, func, int32, float64, bool, float64) \
CUDA_DEVICE_NOIMPL(name, cfunc, int32, complex32, bool, complex128) \
CUDA_DEVICE_BINARY(name, cfunc, int32, complex64, bool, complex128) \
CUDA_DEVICE_BINARY(name, cfunc, int32, complex128, bool, complex128) \
\
CUDA_DEVICE_BINARY(name, func, int64, uint8, bool, int64) \
CUDA_DEVICE_BINARY(name, func, int64, uint16, bool, int64) \
CUDA_DEVICE_BINARY(name, func, int64, uint32, bool, int64) \
CUDA_DEVICE_BINARY(name, func, int64, int8, bool, int64) \
CUDA_DEVICE_BINARY(name, func, int64, int16, bool, int64) \
CUDA_DEVICE_BINARY(name, func, int64, int32, bool, int64) \
CUDA_DEVICE_BINARY(name, func, int64, int64, bool, int64) \
\
CUDA_DEVICE_BINARY(name, func, bfloat16, uint8, bool, bfloat16) \
CUDA_DEVICE_BINARY(name, func, bfloat16, uint16, bool, float32) \
CUDA_DEVICE_BINARY(name, func, bfloat16, uint32, bool, float64) \
CUDA_DEVICE_BINARY(name, func, bfloat16, int8, bool, bfloat16) \
CUDA_DEVICE_BINARY(name, func, bfloat16, int16, bool, float32) \
CUDA_DEVICE_BINARY(name, func, bfloat16, int32, bool, float64) \
CUDA_DEVICE_BINARY(name, func, bfloat16, bfloat16, bool, bfloat16) \
CUDA_DEVICE_BINARY(name, func, bfloat16, float16, bool, float32) \
CUDA_DEVICE_BINARY(name, func, bfloat16, float32, bool, float32) \
CUDA_DEVICE_BINARY(name, func, bfloat16, float64, bool, float64) \
CUDA_DEVICE_NOIMPL(name, cfunc, bfloat16, complex32, bool, complex64) \
CUDA_DEVICE_BINARY(name, cfunc, bfloat16, complex64, bool, complex64) \
CUDA_DEVICE_BINARY(name, cfunc, bfloat16, complex128, bool, complex128) \
\
CUDA_DEVICE_BINARY(name, hfunc, float16, uint8, bool, float16) \
CUDA_DEVICE_BINARY(name, func, float16, uint16, bool, float32) \
CUDA_DEVICE_BINARY(name, func, float16, uint32, bool, float64) \
CUDA_DEVICE_BINARY(name, hfunc, float16, int8, bool, float16) \
CUDA_DEVICE_BINARY(name, func, float16, int16, bool, float32) \
CUDA_DEVICE_BINARY(name, func, float16, int32, bool, float64) \
CUDA_DEVICE_BINARY(name, func, float16, bfloat16, bool, float32) \
CUDA_DEVICE_BINARY(name, hfunc, float16, float16, bool, float16) \
CUDA_DEVICE_BINARY(name, func, float16, float32, bool, float32) \
CUDA_DEVICE_BINARY(name, func, float16, float64, bool, float64) \
CUDA_DEVICE_NOIMPL(name, cfunc, float16, complex32, bool, complex32) \
CUDA_DEVICE_BINARY(name, cfunc, float16, complex64, bool, complex64) \
CUDA_DEVICE_BINARY(name, cfunc, float16, complex128, bool, complex128) \
\
CUDA_DEVICE_BINARY(name, func, float32, uint8, bool, float32) \
CUDA_DEVICE_BINARY(name, func, float32, uint16, bool, float32) \
CUDA_DEVICE_BINARY(name, func, float32, uint32, bool, float64) \
CUDA_DEVICE_BINARY(name, func, float32, int8, bool, float32) \
CUDA_DEVICE_BINARY(name, func, float32, int16, bool, float32) \
CUDA_DEVICE_BINARY(name, func, float32, int32, bool, float64) \
CUDA_DEVICE_BINARY(name, func, float32, bfloat16, bool, float32) \
CUDA_DEVICE_BINARY(name, func, float32, float16, bool, float32) \
CUDA_DEVICE_BINARY(name, func, float32, float32, bool, float32) \
CUDA_DEVICE_BINARY(name, func, float32, float64, bool, float64) \
CUDA_DEVICE_NOIMPL(name, cfunc, float32, complex32, bool, complex64) \
CUDA_DEVICE_BINARY(name, cfunc, float32, complex64, bool, complex64) \
CUDA_DEVICE_BINARY(name, cfunc, float32, complex128, bool, complex128) \
\
CUDA_DEVICE_BINARY(name, func, float64, uint8, bool, float64) \
CUDA_DEVICE_BINARY(name, func, float64, uint16, bool, float64) \
CUDA_DEVICE_BINARY(name, func, float64, uint32, bool, float64) \
CUDA_DEVICE_BINARY(name, func, float64, int8, bool, float64) \
CUDA_DEVICE_BINARY(name, func, float64, int16, bool, float64) \
CUDA_DEVICE_BINARY(name, func, float64, int32, bool, float64) \
CUDA_DEVICE_BINARY(name, func, float64, bfloat16, bool, float64) \
CUDA_DEVICE_BINARY(name, func, float64, float16, bool, float64) \
CUDA_DEVICE_BINARY(name, func, float64, float32, bool, float64) \
CUDA_DEVICE_BINARY(name, func, float64, float64, bool, float64) \
CUDA_DEVICE_NOIMPL(name, cfunc, float64, complex32, bool, complex128) \
CUDA_DEVICE_BINARY(name, cfunc, float64, complex64, bool, complex128) \
CUDA_DEVICE_BINARY(name, cfunc, float64, complex128, bool, complex128) \
\
CUDA_DEVICE_NOIMPL(name, cfunc, complex32, uint8, bool, complex32) \
CUDA_DEVICE_NOIMPL(name, cfunc, complex32, uint16, bool, complex64) \
CUDA_DEVICE_NOIMPL(name, cfunc, complex32, uint32, bool, complex128) \
CUDA_DEVICE_NOIMPL(name, cfunc, complex32, int8, bool, complex32) \
CUDA_DEVICE_NOIMPL(name, cfunc, complex32, int16, bool, complex64) \
CUDA_DEVICE_NOIMPL(name, cfunc, complex32, int32, bool, complex128) \
CUDA_DEVICE_NOIMPL(name, cfunc, complex32, bfloat16, bool, complex64) \
CUDA_DEVICE_NOIMPL(name, cfunc, complex32, float16, bool, complex32) \
CUDA_DEVICE_NOIMPL(name, cfunc, complex32, float32, bool, complex64) \
CUDA_DEVICE_NOIMPL(name, cfunc, complex32, float64, bool, complex128) \
CUDA_DEVICE_NOIMPL(name, cfunc, complex32, complex32, bool, complex32) \
CUDA_DEVICE_NOIMPL(name, cfunc, complex32, complex64, bool, complex64) \
CUDA_DEVICE_NOIMPL(name, cfunc, complex32, complex128, bool, complex128) \
\
CUDA_DEVICE_BINARY(name, cfunc, complex64, uint8, bool, complex64) \
CUDA_DEVICE_BINARY(name, cfunc, complex64, uint16, bool, complex64) \
CUDA_DEVICE_BINARY(name, cfunc, complex64, uint32, bool, complex128) \
CUDA_DEVICE_BINARY(name, cfunc, complex64, int8, bool, complex64) \
CUDA_DEVICE_BINARY(name, cfunc, complex64, int16, bool, complex64) \
CUDA_DEVICE_BINARY(name, cfunc, complex64, int32, bool, complex128) \
CUDA_DEVICE_BINARY(name, cfunc, complex64, bfloat16, bool, complex64) \
CUDA_DEVICE_BINARY(name, cfunc, complex64, float16, bool, complex64) \
CUDA_DEVICE_BINARY(name, cfunc, complex64, float32, bool, complex64) \
CUDA_DEVICE_BINARY(name, cfunc, complex64, float64, bool, complex128) \
CUDA_DEVICE_NOIMPL(name, cfunc, complex64, complex32, bool, complex64) \
CUDA_DEVICE_BINARY(name, cfunc, complex64, complex64, bool, complex64) \
CUDA_DEVICE_BINARY(name, cfunc, complex64, complex128, bool, complex128) \
\
CUDA_DEVICE_BINARY(name, cfunc, complex128, uint8, bool, complex128) \
CUDA_DEVICE_BINARY(name, cfunc, complex128, uint16, bool, complex128) \
CUDA_DEVICE_BINARY(name, cfunc, complex128, uint32, bool, complex128) \
CUDA_DEVICE_BINARY(name, cfunc, complex128, int8, bool, complex128) \
CUDA_DEVICE_BINARY(name, cfunc, complex128, int16, bool, complex128) \
CUDA_DEVICE_BINARY(name, cfunc, complex128, int32, bool, complex128) \
CUDA_DEVICE_BINARY(name, cfunc, complex128, bfloat16, bool, complex128) \
CUDA_DEVICE_BINARY(name, cfunc, complex128, float16, bool, complex128) \
CUDA_DEVICE_BINARY(name, cfunc, complex128, float32, bool, complex128) \
CUDA_DEVICE_BINARY(name, cfunc, complex128, float64, bool, complex128) \
CUDA_DEVICE_NOIMPL(name, cfunc, complex128, complex32, bool, complex128) \
CUDA_DEVICE_BINARY(name, cfunc, complex128, complex64, bool, complex128) \
CUDA_DEVICE_BINARY(name, cfunc, complex128, complex128, bool, complex128)
#define less(x, y) x < y
CUDA_DEVICE_ALL_COMPARISON(less, less, __hlt, lexorder_lt)
#define less_equal(x, y) x <= y
CUDA_DEVICE_ALL_COMPARISON(less_equal, less_equal, __hle, lexorder_le)
#define greater_equal(x, y) x >= y
CUDA_DEVICE_ALL_COMPARISON(greater_equal, greater_equal, __hge, lexorder_ge)
#define greater(x, y) x > y
CUDA_DEVICE_ALL_COMPARISON(greater, greater, __hgt, lexorder_gt)
#define equal(x, y) x == y
CUDA_DEVICE_ALL_COMPARISON(equal, equal, __heq, equal)
#define not_equal(x, y) x != y
CUDA_DEVICE_ALL_COMPARISON(not_equal, not_equal, half_ne, not_equal)
#define equaln(x, y) (x == y || (x != x && y != y))
CUDA_DEVICE_ALL_COMPARISON(equaln, equaln, half_eqn, lexorder_eqn)
/*****************************************************************************/
/* Bitwise */
/*****************************************************************************/
#define CUDA_DEVICE_ALL_BITWISE(name, func) \
CUDA_DEVICE_BINARY(name, func, bool, bool, bool, bool) \
CUDA_DEVICE_BINARY(name, func, bool, uint8, uint8, uint8) \
CUDA_DEVICE_BINARY(name, func, bool, uint16, uint16, uint16) \
CUDA_DEVICE_BINARY(name, func, bool, uint32, uint32, uint32) \
CUDA_DEVICE_BINARY(name, func, bool, uint64, uint64, uint64) \
CUDA_DEVICE_BINARY(name, func, bool, int8, int8, int8) \
CUDA_DEVICE_BINARY(name, func, bool, int16, int16, int16) \
CUDA_DEVICE_BINARY(name, func, bool, int32, int32, int32) \
CUDA_DEVICE_BINARY(name, func, bool, int64, int64, int64) \
\
CUDA_DEVICE_BINARY(name, func, uint8, bool, uint8, uint8) \
CUDA_DEVICE_BINARY(name, func, uint8, uint8, uint8, uint8) \
CUDA_DEVICE_BINARY(name, func, uint8, uint16, uint16, uint16) \
CUDA_DEVICE_BINARY(name, func, uint8, uint32, uint32, uint32) \
CUDA_DEVICE_BINARY(name, func, uint8, uint64, uint64, uint64) \
CUDA_DEVICE_BINARY(name, func, uint8, int8, int16, int16) \
CUDA_DEVICE_BINARY(name, func, uint8, int16, int16, int16) \
CUDA_DEVICE_BINARY(name, func, uint8, int32, int32, int32) \
CUDA_DEVICE_BINARY(name, func, uint8, int64, int64, int64) \
\
CUDA_DEVICE_BINARY(name, func, uint16, bool, uint16, uint16) \
CUDA_DEVICE_BINARY(name, func, uint16, uint8, uint16, uint16) \
CUDA_DEVICE_BINARY(name, func, uint16, uint16, uint16, uint16) \
CUDA_DEVICE_BINARY(name, func, uint16, uint32, uint32, uint32) \
CUDA_DEVICE_BINARY(name, func, uint16, uint64, uint64, uint64) \
CUDA_DEVICE_BINARY(name, func, uint16, int8, int32, int32) \
CUDA_DEVICE_BINARY(name, func, uint16, int16, int32, int32) \
CUDA_DEVICE_BINARY(name, func, uint16, int32, int32, int32) \
CUDA_DEVICE_BINARY(name, func, uint16, int64, int64, int64) \
\
CUDA_DEVICE_BINARY(name, func, uint32, bool, uint32, uint32) \
CUDA_DEVICE_BINARY(name, func, uint32, uint8, uint32, uint32) \
CUDA_DEVICE_BINARY(name, func, uint32, uint16, uint32, uint32) \
CUDA_DEVICE_BINARY(name, func, uint32, uint32, uint32, uint32) \
CUDA_DEVICE_BINARY(name, func, uint32, uint64, uint64, uint64) \
CUDA_DEVICE_BINARY(name, func, uint32, int8, int64, int64) \
CUDA_DEVICE_BINARY(name, func, uint32, int16, int64, int64) \
CUDA_DEVICE_BINARY(name, func, uint32, int32, int64, int64) \
CUDA_DEVICE_BINARY(name, func, uint32, int64, int64, int64) \
\
CUDA_DEVICE_BINARY(name, func, uint64, bool, uint64, uint64) \
CUDA_DEVICE_BINARY(name, func, uint64, uint8, uint64, uint64) \
CUDA_DEVICE_BINARY(name, func, uint64, uint16, uint64, uint64) \
CUDA_DEVICE_BINARY(name, func, uint64, uint32, uint64, uint64) \
CUDA_DEVICE_BINARY(name, func, uint64, uint64, uint64, uint64) \
\
CUDA_DEVICE_BINARY(name, func, int8, bool, int8, int8) \
CUDA_DEVICE_BINARY(name, func, int8, uint8, int16, int16) \
CUDA_DEVICE_BINARY(name, func, int8, uint16, int32, int32) \
CUDA_DEVICE_BINARY(name, func, int8, uint32, int64, int64) \
CUDA_DEVICE_BINARY(name, func, int8, int8, int8, int8) \
CUDA_DEVICE_BINARY(name, func, int8, int16, int16, int16) \
CUDA_DEVICE_BINARY(name, func, int8, int32, int32, int32) \
CUDA_DEVICE_BINARY(name, func, int8, int64, int64, int64) \
\
CUDA_DEVICE_BINARY(name, func, int16, bool, int16, int16) \
CUDA_DEVICE_BINARY(name, func, int16, uint8, int16, int16) \
CUDA_DEVICE_BINARY(name, func, int16, uint16, int32, int32) \
CUDA_DEVICE_BINARY(name, func, int16, uint32, int64, int64) \
CUDA_DEVICE_BINARY(name, func, int16, int8, int16, int16) \
CUDA_DEVICE_BINARY(name, func, int16, int16, int16, int16) \
CUDA_DEVICE_BINARY(name, func, int16, int32, int32, int32) \
CUDA_DEVICE_BINARY(name, func, int16, int64, int64, int64) \
\
CUDA_DEVICE_BINARY(name, func, int32, bool, int32, int32) \
CUDA_DEVICE_BINARY(name, func, int32, uint8, int32, int32) \
CUDA_DEVICE_BINARY(name, func, int32, uint16, int32, int32) \
CUDA_DEVICE_BINARY(name, func, int32, uint32, int64, int64) \
CUDA_DEVICE_BINARY(name, func, int32, int8, int32, int32) \
CUDA_DEVICE_BINARY(name, func, int32, int16, int32, int32) \
CUDA_DEVICE_BINARY(name, func, int32, int32, int32, int32) \
CUDA_DEVICE_BINARY(name, func, int32, int64, int64, int64) \
\
CUDA_DEVICE_BINARY(name, func, int64, bool, int64, int64) \
CUDA_DEVICE_BINARY(name, func, int64, uint8, int64, int64) \
CUDA_DEVICE_BINARY(name, func, int64, uint16, int64, int64) \
CUDA_DEVICE_BINARY(name, func, int64, uint32, int64, int64) \
CUDA_DEVICE_BINARY(name, func, int64, int8, int64, int64) \
CUDA_DEVICE_BINARY(name, func, int64, int16, int64, int64) \
CUDA_DEVICE_BINARY(name, func, int64, int32, int64, int64) \
CUDA_DEVICE_BINARY(name, func, int64, int64, int64, int64)
#define bitwise_and(x, y) x & y
CUDA_DEVICE_ALL_BITWISE(bitwise_and, bitwise_and)
#define bitwise_or(x, y) x | y
CUDA_DEVICE_ALL_BITWISE(bitwise_or, bitwise_or)
#define bitwise_xor(x, y) x ^ y
CUDA_DEVICE_ALL_BITWISE(bitwise_xor, bitwise_xor)
/*****************************************************************************/
/* Two return values */
/*****************************************************************************/
#define CUDA_DEVICE_BINARY_MV(name, func, t0, t1, t2, t3) \
static __global__ void \
_1D_C_##name##_##t0##_##t1##_##t2##_##t3( \
const t0##_t *x0, const t1##_t *x1, t2##_t *x2, t2##_t *x3, \
int64_t N) \
{ \
int64_t index = threadIdx.x + blockIdx.x * blockDim.x; \
int64_t stride = blockDim.x * gridDim.x; \
\
for (int64_t i = index; i < N; i += stride) { \
func(&x2[i], &x3[i], x0[i], x1[i]); \
} \
} \
\
extern "C" void \
gm_cuda_device_fixed_1D_C_##name##_##t0##_##t1##_##t2##_##t3( \
const char *a0, const char *a1, char *a2, char *a3, \
int64_t N) \
{ \
const t0##_t *x0 = (const t0##_t *)a0; \
const t1##_t *x1 = (const t1##_t *)a1; \
t2##_t *x2 = (t2##_t *)a2; \
t3##_t *x3 = (t3##_t *)a3; \
int blockSize = 256; \
int64_t numBlocks = (N + blockSize - 1) / blockSize; \
\
_1D_C_##name##_##t0##_##t1##_##t2##_##t3<<<numBlocks, blockSize>>>( \
x0, x1, x2, x3, N); \
}
#define CUDA_DEVICE_ALL_BINARY_MV(name, func) \
CUDA_DEVICE_BINARY_MV(name, func, uint8, uint8, uint8, uint8) \
CUDA_DEVICE_BINARY_MV(name, func, uint16, uint16, uint16, uint16) \
CUDA_DEVICE_BINARY_MV(name, func, uint32, uint32, uint32, uint32) \
CUDA_DEVICE_BINARY_MV(name, func, uint64, uint64, uint64, uint64) \
CUDA_DEVICE_BINARY_MV(name, func, int8, int8, int8, int8) \
CUDA_DEVICE_BINARY_MV(name, func, int16, int16, int16, int16) \
CUDA_DEVICE_BINARY_MV(name, func, int32, int32, int32, int32) \
CUDA_DEVICE_BINARY_MV(name, func, int64, int64, int64, int64) \
CUDA_DEVICE_BINARY_MV(name, func, bfloat16, bfloat16, bfloat16, bfloat16) \
CUDA_DEVICE_BINARY_MV(name, func, float32, float32, float32, float32) \
CUDA_DEVICE_BINARY_MV(name, func, float64, float64, float64, float64)
CUDA_DEVICE_ALL_BINARY_MV(divmod, _divmod)
|
715f34f4c3387c8c9810e0f8ca0e580e974e0f5c.hip | // !!! This is a file automatically generated by hipify!!!
#include <torch/extension.h>
#include <ATen/ATen.h>
#include <ATen/hip/HIPContext.h>
#include <THH/THH.h>
#include <THH/THHAtomics.cuh>
#include <THH/THHDeviceUtils.cuh>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <cfloat>
#define BLOCKSIZE 1024
// NOTE: If use constant number such as 1. or 2., must use scalar_t(1.) or scalar_t(2.), or the values will be casted into double type.
// kernel function for forward and backward
template<typename scalar_t>
__global__ void SwishForward(const int nthreads,
const scalar_t *feat,
scalar_t *activations) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for (int i{tid}; i < nthreads; i+=stride) {
const scalar_t one(1.);
scalar_t val = feat[i];
activations[i] = val / (one + expf(-val));
}
}
template<typename scalar_t>
__global__ void SwishBackward(const int nthreads,
const scalar_t *feat,
const scalar_t *grad,
scalar_t *grad_feat) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for (int i{tid}; i < nthreads; i+=stride) {
const scalar_t one(1.);
scalar_t val = feat[i];
grad_feat[i] = (one + val / (one + expf(val))) / (one + expf(-val));
grad_feat[i] *= grad[i];
}
}
namespace swish_space {
template<typename scalar_t>
__forceinline__ __device__ scalar_t ReLU6(scalar_t val) {
const scalar_t zero(0.);
const scalar_t six(6.);
scalar_t res = val;
if (res < zero) res = zero;
if (res > six) res = six;
return res;
}
}
template<typename scalar_t>
__global__ void HSwishForward(const int nthreads,
const scalar_t *feat,
scalar_t *activations) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for (int i{tid}; i < nthreads; i+=stride) {
const scalar_t three(3.);
const scalar_t one_six(1. / 6.);
scalar_t val = feat[i];
activations[i] = val * swish_space::ReLU6(val + three) * one_six;
}
}
template<typename scalar_t>
__global__ void HSwishBackward(const int nthreads,
const scalar_t *feat,
const scalar_t *grad,
scalar_t *grad_feat) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for (int i{tid}; i < nthreads; i+=stride) {
const scalar_t zero(0.);
const scalar_t _three(-3.);
const scalar_t three(3.);
const scalar_t one_six(1. / 6.);
scalar_t val = feat[i];
grad_feat[i] = (swish_space::ReLU6(val + three) * one_six + ((val > _three && val < three) ? one_six : zero) * val) * grad[i];
}
}
// cuda forward and backward
at::Tensor Swish_forward_cuda(const at::Tensor &feat) {
// CHECK type and shape
AT_ASSERTM(feat.device().type() == c10::kCUDA, "feat should be cuda");
// allocate memory and cuda grid/block
auto activations = at::empty_like(feat);
const int num_samples = feat.numel();
dim3 grid(::min(
THCCeilDiv(num_samples, 2 * BLOCKSIZE), 4096
));
dim3 block(BLOCKSIZE);
if (activations.numel() == 0) {
THCudaCheck(hipGetLastError());
return activations;
}
// call kernel
AT_DISPATCH_FLOATING_TYPES_AND_HALF(activations.scalar_type(), "swish forward", [&] {
hipLaunchKernelGGL(( SwishForward<scalar_t>), dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
num_samples,
feat.contiguous().data_ptr<scalar_t>(),
activations.contiguous().data_ptr<scalar_t>()
);
});
THCudaCheck(hipGetLastError());
return activations;
}
at::Tensor Swish_backward_cuda(const at::Tensor &grad, const at::Tensor &feat) {
// CHECK type and shape
AT_ASSERTM(grad.device().type() == c10::kCUDA, "grad should be cuda");
AT_ASSERTM(feat.device().type() == c10::kCUDA, "feat should be cuda");
// allocate memory and cuda grid/block
auto grad_feat = at::empty_like(feat);
const int num_samples = feat.numel();
dim3 grid(::min(
// THCCeilDiv(num_samples, BLOCKSIZE), 4096
THCCeilDiv(num_samples, 2 * BLOCKSIZE), 4096
));
dim3 block(BLOCKSIZE);
if (grad_feat.numel() == 0) {
THCudaCheck(hipGetLastError());
return grad_feat;
}
// call kernel
AT_DISPATCH_FLOATING_TYPES_AND_HALF(grad_feat.scalar_type(), "swish backwrd", [&] {
hipLaunchKernelGGL(( SwishBackward<scalar_t>), dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
num_samples,
feat.contiguous().data_ptr<scalar_t>(),
grad.contiguous().data_ptr<scalar_t>(),
grad_feat.contiguous().data_ptr<scalar_t>()
);
});
THCudaCheck(hipGetLastError());
return grad_feat;
}
at::Tensor HSwish_forward_cuda(const at::Tensor &feat) {
// CHECK type and shape
AT_ASSERTM(feat.device().type() == c10::kCUDA, "feat should be cuda");
// allocate memory and cuda grid/block
auto activations = at::empty_like(feat);
const int num_samples = feat.numel();
dim3 grid(::min(
THCCeilDiv(num_samples, 2 * BLOCKSIZE), 4096
));
dim3 block(BLOCKSIZE);
if (activations.numel() == 0) {
THCudaCheck(hipGetLastError());
return activations;
}
// call kernel
AT_DISPATCH_FLOATING_TYPES_AND_HALF(activations.scalar_type(), "hswish forward", [&] {
hipLaunchKernelGGL(( HSwishForward<scalar_t>), dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
num_samples,
feat.contiguous().data_ptr<scalar_t>(),
activations.contiguous().data_ptr<scalar_t>()
);
});
THCudaCheck(hipGetLastError());
return activations;
}
at::Tensor HSwish_backward_cuda(const at::Tensor &grad, const at::Tensor &feat) {
// CHECK type and shape
AT_ASSERTM(grad.device().type() == c10::kCUDA, "grad should be cuda");
AT_ASSERTM(feat.device().type() == c10::kCUDA, "feat should be cuda");
// allocate memory and cuda grid/block
auto grad_feat = at::empty_like(feat);
const int num_samples = feat.numel();
dim3 grid(::min(
THCCeilDiv(num_samples, 2 * BLOCKSIZE), 4096
));
dim3 block(BLOCKSIZE);
if (grad_feat.numel() == 0) {
THCudaCheck(hipGetLastError());
return grad_feat;
}
// call kernel
AT_DISPATCH_FLOATING_TYPES_AND_HALF(grad_feat.scalar_type(), "hswish backwrd", [&] {
hipLaunchKernelGGL(( HSwishBackward<scalar_t>), dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
num_samples,
feat.contiguous().data_ptr<scalar_t>(),
grad.contiguous().data_ptr<scalar_t>(),
grad_feat.contiguous().data_ptr<scalar_t>()
);
});
THCudaCheck(hipGetLastError());
return grad_feat;
}
// python inferface
at::Tensor Swish_forward(const at::Tensor &feat) {
if (feat.device().type() != c10::kCUDA) {
AT_ERROR("this swish function only supports gpu mode\n");
}
at::DeviceGuard guard(feat.device());
return Swish_forward_cuda(feat);
}
at::Tensor Swish_backward(const at::Tensor &grad, const at::Tensor &feat) {
// TODO: try AT_ASSERTM
if (feat.device().type() != c10::kCUDA) {
AT_ERROR("this swish function only supports gpu mode\n");
}
at::DeviceGuard guard(feat.device());
return Swish_backward_cuda(grad, feat);
}
at::Tensor HSwish_forward(const at::Tensor &feat) {
if (feat.device().type() != c10::kCUDA) {
AT_ERROR("this swish function only supports gpu mode\n");
}
at::DeviceGuard guard(feat.device());
return HSwish_forward_cuda(feat);
}
at::Tensor HSwish_backward(const at::Tensor &grad, const at::Tensor &feat) {
// TODO: try AT_ASSERTM
if (feat.device().type() != c10::kCUDA) {
AT_ERROR("this swish function only supports gpu mode\n");
}
at::DeviceGuard guard(feat.device());
return HSwish_backward_cuda(grad, feat);
}
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
m.def("swish_forward", &Swish_forward, "swish forward");
m.def("swish_backward", &Swish_backward, "swish backward");
m.def("hswish_forward", &HSwish_forward, "hswish forward");
m.def("hswish_backward", &HSwish_backward, "hswish backward");
}
| 715f34f4c3387c8c9810e0f8ca0e580e974e0f5c.cu |
#include <torch/extension.h>
#include <ATen/ATen.h>
#include <ATen/cuda/CUDAContext.h>
#include <THC/THC.h>
#include <THC/THCAtomics.cuh>
#include <THC/THCDeviceUtils.cuh>
#include <cuda.h>
#include <cuda_runtime.h>
#include <cfloat>
#define BLOCKSIZE 1024
// NOTE: If use constant number such as 1. or 2., must use scalar_t(1.) or scalar_t(2.), or the values will be casted into double type.
// kernel function for forward and backward
template<typename scalar_t>
__global__ void SwishForward(const int nthreads,
const scalar_t *feat,
scalar_t *activations) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for (int i{tid}; i < nthreads; i+=stride) {
const scalar_t one(1.);
scalar_t val = feat[i];
activations[i] = val / (one + expf(-val));
}
}
template<typename scalar_t>
__global__ void SwishBackward(const int nthreads,
const scalar_t *feat,
const scalar_t *grad,
scalar_t *grad_feat) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for (int i{tid}; i < nthreads; i+=stride) {
const scalar_t one(1.);
scalar_t val = feat[i];
grad_feat[i] = (one + val / (one + expf(val))) / (one + expf(-val));
grad_feat[i] *= grad[i];
}
}
namespace swish_space {
template<typename scalar_t>
__forceinline__ __device__ scalar_t ReLU6(scalar_t val) {
const scalar_t zero(0.);
const scalar_t six(6.);
scalar_t res = val;
if (res < zero) res = zero;
if (res > six) res = six;
return res;
}
}
template<typename scalar_t>
__global__ void HSwishForward(const int nthreads,
const scalar_t *feat,
scalar_t *activations) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for (int i{tid}; i < nthreads; i+=stride) {
const scalar_t three(3.);
const scalar_t one_six(1. / 6.);
scalar_t val = feat[i];
activations[i] = val * swish_space::ReLU6(val + three) * one_six;
}
}
template<typename scalar_t>
__global__ void HSwishBackward(const int nthreads,
const scalar_t *feat,
const scalar_t *grad,
scalar_t *grad_feat) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for (int i{tid}; i < nthreads; i+=stride) {
const scalar_t zero(0.);
const scalar_t _three(-3.);
const scalar_t three(3.);
const scalar_t one_six(1. / 6.);
scalar_t val = feat[i];
grad_feat[i] = (swish_space::ReLU6(val + three) * one_six + ((val > _three && val < three) ? one_six : zero) * val) * grad[i];
}
}
// cuda forward and backward
at::Tensor Swish_forward_cuda(const at::Tensor &feat) {
// CHECK type and shape
AT_ASSERTM(feat.device().type() == c10::kCUDA, "feat should be cuda");
// allocate memory and cuda grid/block
auto activations = at::empty_like(feat);
const int num_samples = feat.numel();
dim3 grid(std::min(
THCCeilDiv(num_samples, 2 * BLOCKSIZE), 4096
));
dim3 block(BLOCKSIZE);
if (activations.numel() == 0) {
THCudaCheck(cudaGetLastError());
return activations;
}
// call kernel
AT_DISPATCH_FLOATING_TYPES_AND_HALF(activations.scalar_type(), "swish forward", [&] {
SwishForward<scalar_t><<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(
num_samples,
feat.contiguous().data_ptr<scalar_t>(),
activations.contiguous().data_ptr<scalar_t>()
);
});
THCudaCheck(cudaGetLastError());
return activations;
}
at::Tensor Swish_backward_cuda(const at::Tensor &grad, const at::Tensor &feat) {
// CHECK type and shape
AT_ASSERTM(grad.device().type() == c10::kCUDA, "grad should be cuda");
AT_ASSERTM(feat.device().type() == c10::kCUDA, "feat should be cuda");
// allocate memory and cuda grid/block
auto grad_feat = at::empty_like(feat);
const int num_samples = feat.numel();
dim3 grid(std::min(
// THCCeilDiv(num_samples, BLOCKSIZE), 4096
THCCeilDiv(num_samples, 2 * BLOCKSIZE), 4096
));
dim3 block(BLOCKSIZE);
if (grad_feat.numel() == 0) {
THCudaCheck(cudaGetLastError());
return grad_feat;
}
// call kernel
AT_DISPATCH_FLOATING_TYPES_AND_HALF(grad_feat.scalar_type(), "swish backwrd", [&] {
SwishBackward<scalar_t><<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(
num_samples,
feat.contiguous().data_ptr<scalar_t>(),
grad.contiguous().data_ptr<scalar_t>(),
grad_feat.contiguous().data_ptr<scalar_t>()
);
});
THCudaCheck(cudaGetLastError());
return grad_feat;
}
at::Tensor HSwish_forward_cuda(const at::Tensor &feat) {
// CHECK type and shape
AT_ASSERTM(feat.device().type() == c10::kCUDA, "feat should be cuda");
// allocate memory and cuda grid/block
auto activations = at::empty_like(feat);
const int num_samples = feat.numel();
dim3 grid(std::min(
THCCeilDiv(num_samples, 2 * BLOCKSIZE), 4096
));
dim3 block(BLOCKSIZE);
if (activations.numel() == 0) {
THCudaCheck(cudaGetLastError());
return activations;
}
// call kernel
AT_DISPATCH_FLOATING_TYPES_AND_HALF(activations.scalar_type(), "hswish forward", [&] {
HSwishForward<scalar_t><<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(
num_samples,
feat.contiguous().data_ptr<scalar_t>(),
activations.contiguous().data_ptr<scalar_t>()
);
});
THCudaCheck(cudaGetLastError());
return activations;
}
at::Tensor HSwish_backward_cuda(const at::Tensor &grad, const at::Tensor &feat) {
// CHECK type and shape
AT_ASSERTM(grad.device().type() == c10::kCUDA, "grad should be cuda");
AT_ASSERTM(feat.device().type() == c10::kCUDA, "feat should be cuda");
// allocate memory and cuda grid/block
auto grad_feat = at::empty_like(feat);
const int num_samples = feat.numel();
dim3 grid(std::min(
THCCeilDiv(num_samples, 2 * BLOCKSIZE), 4096
));
dim3 block(BLOCKSIZE);
if (grad_feat.numel() == 0) {
THCudaCheck(cudaGetLastError());
return grad_feat;
}
// call kernel
AT_DISPATCH_FLOATING_TYPES_AND_HALF(grad_feat.scalar_type(), "hswish backwrd", [&] {
HSwishBackward<scalar_t><<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(
num_samples,
feat.contiguous().data_ptr<scalar_t>(),
grad.contiguous().data_ptr<scalar_t>(),
grad_feat.contiguous().data_ptr<scalar_t>()
);
});
THCudaCheck(cudaGetLastError());
return grad_feat;
}
// python inferface
at::Tensor Swish_forward(const at::Tensor &feat) {
if (feat.device().type() != c10::kCUDA) {
AT_ERROR("this swish function only supports gpu mode\n");
}
at::DeviceGuard guard(feat.device());
return Swish_forward_cuda(feat);
}
at::Tensor Swish_backward(const at::Tensor &grad, const at::Tensor &feat) {
// TODO: try AT_ASSERTM
if (feat.device().type() != c10::kCUDA) {
AT_ERROR("this swish function only supports gpu mode\n");
}
at::DeviceGuard guard(feat.device());
return Swish_backward_cuda(grad, feat);
}
at::Tensor HSwish_forward(const at::Tensor &feat) {
if (feat.device().type() != c10::kCUDA) {
AT_ERROR("this swish function only supports gpu mode\n");
}
at::DeviceGuard guard(feat.device());
return HSwish_forward_cuda(feat);
}
at::Tensor HSwish_backward(const at::Tensor &grad, const at::Tensor &feat) {
// TODO: try AT_ASSERTM
if (feat.device().type() != c10::kCUDA) {
AT_ERROR("this swish function only supports gpu mode\n");
}
at::DeviceGuard guard(feat.device());
return HSwish_backward_cuda(grad, feat);
}
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
m.def("swish_forward", &Swish_forward, "swish forward");
m.def("swish_backward", &Swish_backward, "swish backward");
m.def("hswish_forward", &HSwish_forward, "hswish forward");
m.def("hswish_backward", &HSwish_backward, "hswish backward");
}
|
89da2177bec7ed59bbb904dd5748f2e409900d06.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cmath>
#include <cstdio>
#include <omp.h>
#include <algorithm>
#include "constants.h"
__global__ void dirichlet(double *const d_a, double *const d_a_new, int chunk_size) {
unsigned int bx = blockIdx.x;
unsigned int by = blockIdx.y;
unsigned int tx = threadIdx.x;
unsigned int ty = threadIdx.y;
unsigned int col = bx * blockDim.x + tx;
unsigned int row = by * blockDim.y + ty;
// Exit out of the thread if the row is greater than the chunk size
// Or if the column is greater than 0, because we only need to fill two columns
// And we use the same thread to write both
if (row > chunk_size + 1 || col > 0)
return;
const double y0 = 1;
d_a[row * WIDTH] = y0;
d_a[row * WIDTH + (WIDTH - 1)] = y0;
d_a_new[row * WIDTH] = y0;
d_a_new[row * WIDTH + (WIDTH - 1)] = y0;
}
__global__ void
jacobiKernel(double *d_a_new, const double *d_a, const int iy_end) {
unsigned int bx = blockIdx.x;
unsigned int by = blockIdx.y;
unsigned int tx = threadIdx.x;
unsigned int ty = threadIdx.y;
unsigned int row = by * blockDim.y + ty + 1;
unsigned int col = bx * blockDim.x + tx;
if (row < iy_end) {
if (col >= 1 && col < (WIDTH - 1)) {
const double new_val = 0.25 * (d_a[row * WIDTH + col + 1] + d_a[row * WIDTH + col - 1] +
d_a[(row + 1) * WIDTH + col] + d_a[(row - 1) * WIDTH + col]);
d_a_new[row * WIDTH + col] = new_val;
}
}
}
__host__ __inline__ void printMatrix(double *h_a) {
for (int y = 0; y < HEIGHT; y++) {
for (int x = 0; x < WIDTH; x++) {
printf("%f ", h_a[y * WIDTH + x]);
}
printf("\n");
}
}
__host__ __inline__ void
mergeMatrices(double *h_a, double *d_a, int chunk_size, int offset, int dev_id, int device_count) {
if (dev_id == 0) {
// Copy the first row
hipMemcpy(h_a, d_a,
WIDTH * sizeof(double),
hipMemcpyDeviceToHost);
}
// Copy each chunk based on the device id
hipMemcpy(h_a + offset, d_a + WIDTH,
::min((WIDTH * HEIGHT) - offset, WIDTH * chunk_size) * sizeof(double),
hipMemcpyDeviceToHost);
if (dev_id == device_count - 1) {
// Copy the last row
int lastRow = chunk_size * WIDTH + WIDTH;
offset = WIDTH * (HEIGHT - 1);
hipMemcpy(h_a + offset, d_a + lastRow,
WIDTH * sizeof(double),
hipMemcpyDeviceToHost);
}
}
double *jacobi(int device_count) {
double *h_a;
double *d_a_new[MAX_DEVICE];
int iy_end[MAX_DEVICE];
hipEvent_t start, stop;
float milliseconds = 0;
if (device_count == 0) {
hipGetDeviceCount(&device_count);
}
printf("Running with %d GPU(s)\n", device_count);
h_a = (double *) malloc(WIDTH * HEIGHT * sizeof(double));
#pragma omp parallel num_threads(device_count) shared(h_a)
{
// Each thread has its own d_a variable
double *d_a;
// As the number of threads is equal to the number of CUDA devices, each thread id
// can be seen as the device id
int dev_id = omp_get_thread_num();
// Set the device for each thread
hipSetDevice(dev_id);
int iy_start;
int chunk_size;
int chunk_size_low = (HEIGHT - 2) / device_count;
int chunk_size_high = chunk_size_low + 1;
// The number of ranks with a smaller chunk_size
// Example: HEIGHT = 8192, 2 devices
// num_ranks_low = 2 * 4095 + 2 - 8190 = 2
// Devices 0 and 1 with the same chunk size (4095)
// Example: HEIGHT = 8193, 2 devices
// num_ranks_low = 2 * 4095 + 2 - 8191 = 1
// Device 0 with chunk size 4095
// Device 1 with chunk size 4096
int num_ranks_low = device_count * chunk_size_low + device_count - (HEIGHT - 2);
if (dev_id < num_ranks_low)
chunk_size = chunk_size_low;
else
chunk_size = chunk_size_high;
// Each thread allocates its own d_a and d_a_new[dev_id] with its chunk size
// and two more rows: top and bottom
hipMalloc(&d_a, WIDTH * (chunk_size + 2) * sizeof(double));
hipMalloc(d_a_new + dev_id, WIDTH * (chunk_size + 2) * sizeof(double));
hipMemset(d_a, 0, WIDTH * (chunk_size + 2) * sizeof(double));
hipMemset(d_a_new[dev_id], 0, WIDTH * (chunk_size + 2) * sizeof(double));
// Calculate local domain boundaries
int iy_start_global; // My start index in the global array
if (dev_id < num_ranks_low) {
iy_start_global = dev_id * chunk_size_low + 1;
} else {
iy_start_global =
num_ranks_low * chunk_size_low + (dev_id - num_ranks_low) * chunk_size_high + 1;
}
iy_start = 1;
iy_end[dev_id] = iy_start + chunk_size;
dim3 dimBlock(TILE_SIZE_X, TILE_SIZE_Y, 1);
dim3 dimGridDirichlet(1, ::ceil((chunk_size + dimBlock.y - 1) / float(dimBlock.y)), 1);
// Set dirichlet boundary conditions on left and right border
hipLaunchKernelGGL(( dirichlet), dim3(dimGridDirichlet), dim3(dimBlock), 0, 0, d_a, d_a_new[dev_id], chunk_size);
hipGetLastError();
const int top = dev_id > 0 ? dev_id - 1 : (device_count - 1);
const int bottom = (dev_id + 1) % device_count;
hipMemcpy(d_a_new[top] + (iy_end[dev_id] * WIDTH),
d_a_new[dev_id] + iy_start * WIDTH, WIDTH * sizeof(double),
hipMemcpyDeviceToDevice);
hipMemcpy(d_a_new[bottom], d_a_new[dev_id] + (iy_end[dev_id] - 1) * WIDTH,
WIDTH * sizeof(double), hipMemcpyDeviceToDevice);
#pragma omp barrier
#if defined DEBUG && DEBUG == 1
mergeMatrices(h_a, d_a, chunk_size, iy_start_global * WIDTH, dev_id, device_count);
#pragma omp barrier
#pragma omp master
{
printf("Initialization\n");
printMatrix(h_a);
}
#endif
dim3 dimGrid((WIDTH + dimBlock.x - 1) / dimBlock.x,
(chunk_size + dimBlock.y - 1) / dimBlock.y, 1);
#pragma omp master
{
// Prepare the timer
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start);
}
for (int i = 0; i < NB_ITERS; i++) {
hipLaunchKernelGGL(( jacobiKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, d_a_new[dev_id], d_a, iy_end[dev_id]);
hipGetLastError();
// Send the first row of the current device to the bottom row of the "top" device
// Only if the current device isn't already the top one
if (dev_id > 0) {
hipMemcpyAsync(d_a_new[top] + iy_end[top] * WIDTH,
d_a_new[dev_id] + (iy_start - 1) * WIDTH,
WIDTH * sizeof(double),
hipMemcpyDeviceToDevice);
}
// Send the last row of the current device to the top row of the "bottom" device
// Only if the current device isn't already the bottom one
if (dev_id < device_count - 1) {
hipMemcpyAsync(d_a_new[bottom],
d_a_new[dev_id] + (iy_end[dev_id] - 1) * WIDTH,
WIDTH * sizeof(double),
hipMemcpyDeviceToDevice);
}
#pragma omp barrier
std::swap(d_a_new[dev_id], d_a);
}
#pragma omp barrier
#pragma omp master
{
hipEventRecord(stop);
hipEventSynchronize(stop);
hipEventElapsedTime(&milliseconds, start, stop);
printf("%d Jacobi iterations done in %lf seconds in a mesh : %dx%d\n", NB_ITERS, milliseconds / 1000, WIDTH,
HEIGHT);
}
mergeMatrices(h_a, d_a, chunk_size, iy_start_global * WIDTH, dev_id, device_count);
#if defined DEBUG && DEBUG == 1
#pragma omp barrier
#pragma omp master
{
printf("Final matrix\n");
printMatrix(h_a);
}
#endif
hipFree(d_a);
hipFree(d_a_new[dev_id]);
}
return h_a;
} | 89da2177bec7ed59bbb904dd5748f2e409900d06.cu | #include <cmath>
#include <cstdio>
#include <omp.h>
#include <algorithm>
#include "constants.h"
__global__ void dirichlet(double *const d_a, double *const d_a_new, int chunk_size) {
unsigned int bx = blockIdx.x;
unsigned int by = blockIdx.y;
unsigned int tx = threadIdx.x;
unsigned int ty = threadIdx.y;
unsigned int col = bx * blockDim.x + tx;
unsigned int row = by * blockDim.y + ty;
// Exit out of the thread if the row is greater than the chunk size
// Or if the column is greater than 0, because we only need to fill two columns
// And we use the same thread to write both
if (row > chunk_size + 1 || col > 0)
return;
const double y0 = 1;
d_a[row * WIDTH] = y0;
d_a[row * WIDTH + (WIDTH - 1)] = y0;
d_a_new[row * WIDTH] = y0;
d_a_new[row * WIDTH + (WIDTH - 1)] = y0;
}
__global__ void
jacobiKernel(double *d_a_new, const double *d_a, const int iy_end) {
unsigned int bx = blockIdx.x;
unsigned int by = blockIdx.y;
unsigned int tx = threadIdx.x;
unsigned int ty = threadIdx.y;
unsigned int row = by * blockDim.y + ty + 1;
unsigned int col = bx * blockDim.x + tx;
if (row < iy_end) {
if (col >= 1 && col < (WIDTH - 1)) {
const double new_val = 0.25 * (d_a[row * WIDTH + col + 1] + d_a[row * WIDTH + col - 1] +
d_a[(row + 1) * WIDTH + col] + d_a[(row - 1) * WIDTH + col]);
d_a_new[row * WIDTH + col] = new_val;
}
}
}
__host__ __inline__ void printMatrix(double *h_a) {
for (int y = 0; y < HEIGHT; y++) {
for (int x = 0; x < WIDTH; x++) {
printf("%f ", h_a[y * WIDTH + x]);
}
printf("\n");
}
}
__host__ __inline__ void
mergeMatrices(double *h_a, double *d_a, int chunk_size, int offset, int dev_id, int device_count) {
if (dev_id == 0) {
// Copy the first row
cudaMemcpy(h_a, d_a,
WIDTH * sizeof(double),
cudaMemcpyDeviceToHost);
}
// Copy each chunk based on the device id
cudaMemcpy(h_a + offset, d_a + WIDTH,
std::min((WIDTH * HEIGHT) - offset, WIDTH * chunk_size) * sizeof(double),
cudaMemcpyDeviceToHost);
if (dev_id == device_count - 1) {
// Copy the last row
int lastRow = chunk_size * WIDTH + WIDTH;
offset = WIDTH * (HEIGHT - 1);
cudaMemcpy(h_a + offset, d_a + lastRow,
WIDTH * sizeof(double),
cudaMemcpyDeviceToHost);
}
}
double *jacobi(int device_count) {
double *h_a;
double *d_a_new[MAX_DEVICE];
int iy_end[MAX_DEVICE];
cudaEvent_t start, stop;
float milliseconds = 0;
if (device_count == 0) {
cudaGetDeviceCount(&device_count);
}
printf("Running with %d GPU(s)\n", device_count);
h_a = (double *) malloc(WIDTH * HEIGHT * sizeof(double));
#pragma omp parallel num_threads(device_count) shared(h_a)
{
// Each thread has its own d_a variable
double *d_a;
// As the number of threads is equal to the number of CUDA devices, each thread id
// can be seen as the device id
int dev_id = omp_get_thread_num();
// Set the device for each thread
cudaSetDevice(dev_id);
int iy_start;
int chunk_size;
int chunk_size_low = (HEIGHT - 2) / device_count;
int chunk_size_high = chunk_size_low + 1;
// The number of ranks with a smaller chunk_size
// Example: HEIGHT = 8192, 2 devices
// num_ranks_low = 2 * 4095 + 2 - 8190 = 2
// Devices 0 and 1 with the same chunk size (4095)
// Example: HEIGHT = 8193, 2 devices
// num_ranks_low = 2 * 4095 + 2 - 8191 = 1
// Device 0 with chunk size 4095
// Device 1 with chunk size 4096
int num_ranks_low = device_count * chunk_size_low + device_count - (HEIGHT - 2);
if (dev_id < num_ranks_low)
chunk_size = chunk_size_low;
else
chunk_size = chunk_size_high;
// Each thread allocates its own d_a and d_a_new[dev_id] with its chunk size
// and two more rows: top and bottom
cudaMalloc(&d_a, WIDTH * (chunk_size + 2) * sizeof(double));
cudaMalloc(d_a_new + dev_id, WIDTH * (chunk_size + 2) * sizeof(double));
cudaMemset(d_a, 0, WIDTH * (chunk_size + 2) * sizeof(double));
cudaMemset(d_a_new[dev_id], 0, WIDTH * (chunk_size + 2) * sizeof(double));
// Calculate local domain boundaries
int iy_start_global; // My start index in the global array
if (dev_id < num_ranks_low) {
iy_start_global = dev_id * chunk_size_low + 1;
} else {
iy_start_global =
num_ranks_low * chunk_size_low + (dev_id - num_ranks_low) * chunk_size_high + 1;
}
iy_start = 1;
iy_end[dev_id] = iy_start + chunk_size;
dim3 dimBlock(TILE_SIZE_X, TILE_SIZE_Y, 1);
dim3 dimGridDirichlet(1, std::ceil((chunk_size + dimBlock.y - 1) / float(dimBlock.y)), 1);
// Set dirichlet boundary conditions on left and right border
dirichlet<<<dimGridDirichlet, dimBlock>>>(d_a, d_a_new[dev_id], chunk_size);
cudaGetLastError();
const int top = dev_id > 0 ? dev_id - 1 : (device_count - 1);
const int bottom = (dev_id + 1) % device_count;
cudaMemcpy(d_a_new[top] + (iy_end[dev_id] * WIDTH),
d_a_new[dev_id] + iy_start * WIDTH, WIDTH * sizeof(double),
cudaMemcpyDeviceToDevice);
cudaMemcpy(d_a_new[bottom], d_a_new[dev_id] + (iy_end[dev_id] - 1) * WIDTH,
WIDTH * sizeof(double), cudaMemcpyDeviceToDevice);
#pragma omp barrier
#if defined DEBUG && DEBUG == 1
mergeMatrices(h_a, d_a, chunk_size, iy_start_global * WIDTH, dev_id, device_count);
#pragma omp barrier
#pragma omp master
{
printf("Initialization\n");
printMatrix(h_a);
}
#endif
dim3 dimGrid((WIDTH + dimBlock.x - 1) / dimBlock.x,
(chunk_size + dimBlock.y - 1) / dimBlock.y, 1);
#pragma omp master
{
// Prepare the timer
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
}
for (int i = 0; i < NB_ITERS; i++) {
jacobiKernel<<<dimGrid, dimBlock>>>(d_a_new[dev_id], d_a, iy_end[dev_id]);
cudaGetLastError();
// Send the first row of the current device to the bottom row of the "top" device
// Only if the current device isn't already the top one
if (dev_id > 0) {
cudaMemcpyAsync(d_a_new[top] + iy_end[top] * WIDTH,
d_a_new[dev_id] + (iy_start - 1) * WIDTH,
WIDTH * sizeof(double),
cudaMemcpyDeviceToDevice);
}
// Send the last row of the current device to the top row of the "bottom" device
// Only if the current device isn't already the bottom one
if (dev_id < device_count - 1) {
cudaMemcpyAsync(d_a_new[bottom],
d_a_new[dev_id] + (iy_end[dev_id] - 1) * WIDTH,
WIDTH * sizeof(double),
cudaMemcpyDeviceToDevice);
}
#pragma omp barrier
std::swap(d_a_new[dev_id], d_a);
}
#pragma omp barrier
#pragma omp master
{
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&milliseconds, start, stop);
printf("%d Jacobi iterations done in %lf seconds in a mesh : %dx%d\n", NB_ITERS, milliseconds / 1000, WIDTH,
HEIGHT);
}
mergeMatrices(h_a, d_a, chunk_size, iy_start_global * WIDTH, dev_id, device_count);
#if defined DEBUG && DEBUG == 1
#pragma omp barrier
#pragma omp master
{
printf("Final matrix\n");
printMatrix(h_a);
}
#endif
cudaFree(d_a);
cudaFree(d_a_new[dev_id]);
}
return h_a;
} |
728ebf3119f065a54a5ad80031907ca178cf403e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <time.h>
#include <unistd.h>
#include <stdlib.h>
#include <math.h>
/*
Instrues
COMPILAR --> nvcc 2DstencilGPUSharedMemoryBlankBorderTimeSpaceSharingOpencvKarma.cu -o go `pkg-config --cflags --libs opencv` -w
EXECUTAR --> ./go DOMAIN_DIMS STENCIL_ORDER SPACE_TIME_BLOCK_TIMES BLOCK_DIM_X BLOCK_DIM_Y
*/
//////////////////////////////////////////////////////////////////////////
static void HandleError( hipError_t err,
const char *file,
int line ) {
if (err != hipSuccess) {
printf( "%s in %s at line %d\n", hipGetErrorString( err ),
file, line );
exit( EXIT_FAILURE );
}
}
#define HANDLE_ERROR( err ) (HandleError( err, __FILE__, __LINE__ ))
//////////////////////////////////////////////////////////////////////////
#include <iostream>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <stdio.h>
#include <opencv2/imgcodecs.hpp>
#include <math.h>
#include <string>
using namespace std;
//===> CONSTANTES karma model <===//
#ifndef MODEL_WIDTH
#define MODEL_WIDTH 0
#endif
#define Eh 3.0f
#define En 1.0f
#define Re 0.6f
#define tauE 5.0f
#define tauN 250.0f
#define gam 0.001f
#define East 1.5415f
#define DT 0.05f
#define DX (12.0f / MODEL_WIDTH)
/*
Funo somente da GPU que recebe os parametros para o calculo de um stencil
d_e - dado de entrada
d_r - dado de saida
d_v - campo que deve ser atualizado
c_coeff - varivel utilizada para armazenar o valores dos coeficcientes do stencil (utilizada apenas na verso com stencil simples usado anteriormente)
X - Y - Dimenses das estruturas de entrada
k - ordem do stencil
x -y - posio do centro do stencil na estrutura de entrada
GX - Dimenso horizontal da estrutura do dado de sada
Gx - Gy posio do centro do stencil na estrutura de saida
*/
__device__ void destinyTest(float *d_r,int GX, int Gx, int Gy,float val)
{
d_r[Gx + ((Gy) * (GX))] = val;
}
__device__ void _2Dstencil_(float *d_e, float *d_r, float *d_v, int X, int x, int y, int GX, int Gx, int Gy)
{
int h_e_i = x + (y * (X));
float temp = d_e[h_e_i];
float rv = d_v[h_e_i];
float Rn = (1.0f / (1.0f - expf(-Re))) - rv;
float p = (temp > En) * 1.0f;
float dv = (Rn * p - (1.0f - p) * rv) / tauN;
float Dn = rv * rv;
float hE = (1.0f - tanh(temp - Eh)) * temp * temp / 2.0f;
float du = (((East - Dn) * hE) - temp) / tauE;
float xlapr = d_e[(x + 1) + ((y) * (X))] - temp;
float xlapl = temp - d_e[(x - 1) + ((y) * (X))];
float xlapf = d_e[(x) + ((y + 1) * (X))] - temp;
float xlapb = temp - d_e[(x) + ((y - 1) * (X))];
float lap = xlapr - xlapl + xlapf - xlapb;
temp = (temp + (du * DT) + (lap * DT * gam / (DX * DX)));
d_v[h_e_i] = rv + dv * DT;
//d_v[h_e_i] = rv+rv;
h_e_i = Gx + ((Gy) * (GX));
d_r[h_e_i] = temp;//d_v[h_e_i];// d_e[h_e_i]+1;// = temp;
// d_r[h_e_i] = rv;
}
/*
funo chamada pelo host que controla as cpias e a ordem do calculo dos stencils bem como a carga para cada thread
*/
__global__ void _2Dstencil_global(float *d_e, float *d_r, float *d_v, int X, int Y, int times,bool eraseMiddle)
{
int x, y; //,h_e_i,h_r_i,Xs,Ys,Dx,Dy;
x = threadIdx.x + (blockIdx.x * blockDim.x);
y = threadIdx.y + (blockIdx.y * blockDim.y);
extern __shared__ float sharedOrig[];
int blockThreadIndex = threadIdx.x + threadIdx.y * blockDim.x;
// Xs = threadIdx.x;
// Ys = threadIdx.y;
int Dx = blockDim.x + (2 * times);
int Dy = blockDim.y + (2 * times);
int sharedTam = Dx * Dy;
float * shared = sharedOrig;
float * sharedRes = shared + sharedTam;
float * sharedV = sharedRes + sharedTam;
//float * sharedRes = &shared[sharedTam];
//float *sharedV = &sharedRes[sharedTam];
/*
Copia o Tile de memria compartilhada necessria para a configurao de tempo desejada
Stride utilizado pois a quantidade de elementos a serem copiados sempre maior que a quantidade de threads
As bordas
*/
for (int stride = blockThreadIndex; stride < sharedTam; stride += (blockDim.x * blockDim.y))
{
int sharedIdxX = stride % Dx;
int sharedIdxY = int(stride / Dx);
int globalIdxX =(blockIdx.x * blockDim.x) + sharedIdxX - times;
int globalIdxY =(blockIdx.y * blockDim.y) + sharedIdxY - times;
//int globalIdx = globalIdxX + (globalIdxX < 0) - (globalIdxX >= X) + (globalIdxY + (globalIdxY < 0) - (globalIdxY >= Y)) * X;
int globalIdx = globalIdxX + (-1*globalIdxX)*(globalIdxX < 0) - (globalIdxX-X+1)*(globalIdxX >= X) + (globalIdxY + (-1*globalIdxY)*(globalIdxY < 0) - (globalIdxY-Y+1)*(globalIdxY >= Y)) * X;
shared[stride] = d_e[globalIdx];
sharedV[stride] = d_v[globalIdx];
}
__syncthreads();
/*
Envia pra ser calculado todos os elementos alm do ultimo instante de tempo
*/
for (int t = 1; t < times; t++)
{
//_2Dstencil_(shared,sharedRes,c_coeff,Dx,Dy,k,threadIdx.x+k2,threadIdx.y+k2,Dx,threadIdx.x+k2,threadIdx.y+k2);
int tDx = blockDim.x + ((times - t) * 2);
int tDy = blockDim.y + ((times - t) * 2);
int tk2 = (t);
// int tDx = blockDim.x+(1*k);
// int tDy = blockDim.y+(1*k);
// int tk2 = (1)*k/2;
int tSharedTam = tDx * tDy;
for (int stride = blockThreadIndex; stride < tSharedTam; stride += (blockDim.x * blockDim.y))
{
//int globalIdx = (stride % tDx) + tk2 + Dx*(int(stride / Dx)) + tk2;
//destinyTest(shared, Dx, (stride % tDx) + tk2, int(stride / Dx) + tk2,t+1);
_2Dstencil_(shared, sharedRes, sharedV, Dx, (stride % tDx) + tk2, (int(stride / tDx)) + tk2, Dx, (stride % tDx) + tk2, (int(stride / tDx)) + tk2);
}
// __syncthreads();
// for (int stride = blockThreadIndex; stride < sharedTam; stride += (blockDim.x * blockDim.y))
// {
// shared[stride] = sharedRes[stride];
// }
float * temp = shared;
shared = sharedRes;
sharedRes = temp;
__syncthreads();
}
/*
Envia pra ser calculado todos os elementos do ultimo instante de tempo
*/
_2Dstencil_(shared, d_r, sharedV, Dx, ((x%(blockDim.x))+times), ((y%(blockDim.y))+times), X, x, y);
__syncthreads();
int globalIdx = x + y * X;
int sharedIdx = ((x%(blockDim.x))+times) + ((y%(blockDim.y))+times)*Dx;
d_v[globalIdx] = sharedV[sharedIdx];
if(eraseMiddle && x > X/2)
{
d_r[globalIdx] = 0.0f;
//d_v[globalIdx] = 0.5f;
}
// for(int stride=blockThreadIndex;stride<sharedTam;stride+=(blockDim.x*blockDim.y))
// {
// int globalIdx = (blockIdx.x*blockDim.x)-k2+stride%Dx + ((blockIdx.y*blockDim.y)-k2+stride/Dx)*X;
// if(globalIdx > 0 && (blockIdx.x*blockDim.x)-k2+stride%Dx < X && ((blockIdx.y*blockDim.y)-k2+stride/Dx)<Y)
// d_r[globalIdx] = sharedRes[stride];
// }
//destinyTest(d_r,X, x, y,1.0f);
// __syncthreads();
// for (int stride = blockThreadIndex; stride < sharedTam; stride += (blockDim.x * blockDim.y))
// {
// int globalIdxX = (blockIdx.x * blockDim.x) - k2 + stride % Dx;
// int globalIdxY = ((blockIdx.y * blockDim.y) - k2 + int(stride / Dx));
// int globalIdx = globalIdxX + (globalIdxX==-1) - (globalIdxX==X) + (globalIdxY + (globalIdxY==-1) - (globalIdxY==Y)) * X;
// if(blockIdx.x == 1 && blockIdx.y == 1)
// d_r[globalIdx] = shared[stride];
// }
// __syncthreads();
//int sharedIdx = ((x%(blockDim.x))+times) + ((y%(blockDim.y))+times)*Dx;
// int sharedIdxX = (blockIdx.x * blockDim.x) + times;
// int sharedIdxY = (blockIdx.y * blockDim.y) + times;
// int sharedIdx = sharedIdxX + sharedIdxY*Dx;
//int sharedIdx = ((x%(blockDim.x))+times) + ((y%(blockDim.y))+times)*Dx;
// int globalIdx = x + y * X;
//if(blockIdx.x == 0 && blockIdx.y ==1)
//d_v[globalIdx] = sharedV[sharedIdx];
}
int main(int argc, char *argv[])
{
/*
Declaraes e valores padroes
*/
float *h_e, *h_r, *h_v;
float *d_e, *d_r, *d_v;
int size, sharedSize;
int X = 32;
int Y = 32;
int times = 1,globalTimes = 1;
int BX = 32;
int BY = 32;
int GX = 1;
int GY = 1;
/*
Obteno dos parmetros de entrada
*/
if (argc > 1)
{
X = atoi(argv[1]);
Y = X;
}
if (argc > 2)
{
times = atoi(argv[2]);
}
if (argc > 3)
{
globalTimes = atoi(argv[3]);
}
if (X > 32)
{
GX = ceil((float)X / (float)32);
BX = 32;
}
if (Y > 32)
{
GY = ceil((float)Y / (float)32);
BY = 32;
}
/*
Allocaes de memria e configurao dos blocos e grid
*/
dim3 block_dim(BX, BY, 1);
dim3 grid_dim(GX, GY, 1);
//sharedSize = ((block_dim.x+k)*(block_dim.y+k))*sizeof(int);
sharedSize = ((block_dim.x + (2 * times)) * (block_dim.y + (2 * times))) * sizeof(float) * 3;
//sharedTam = ((block_dim.x+(k*2))*(block_dim.y+(k*2)));
size = X * Y * sizeof(float);
//tam = X * Y;
h_e = (float *)malloc(size);
h_r = (float *)malloc(size);
h_v = (float *)malloc(size);
HANDLE_ERROR( hipMalloc(&d_e, size) );
HANDLE_ERROR( hipMalloc(&d_r, size) );
HANDLE_ERROR( hipMalloc(&d_v, size) );
//Copia os dados do campo e envia para a GPU e inicializa o dominio de entrada
FILE *arq;
arq = fopen("entrada.txt", "rt");
for (int i = 0; i < X; i++)
for (int j = 0; j < Y; j++)
{
h_v[i + j * X] =0.5f;
int temp;
fscanf(arq," %d",&temp);
h_e[i + j * X] = temp;
}
fclose(arq);
HANDLE_ERROR( hipMemcpy(d_v, h_v, size, hipMemcpyHostToDevice) );
/*
Copy vectors from host memory to device memory
Copia os dados da entrada de volta a GPU
*/
HANDLE_ERROR( hipMemcpy(d_e, h_e, size, hipMemcpyHostToDevice) );
/*
Comea o Timer
*/
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
/******************
*** Kernel Call ***
*******************/
//_3Dstencil_global<<<blks,th_p_blk>>>(d_e,d_r,X,Y,Z);
/*
Executa o kernel
*/
bool reseted = false;
for(int i=0; i<globalTimes/times; i ++)
{
if(i*times > 8000 && !reseted)
{
hipLaunchKernelGGL(( _2Dstencil_global), dim3(grid_dim), dim3(block_dim), sharedSize, 0, d_e, d_r, d_v, X, Y, times,true);
reseted = true;
}else
{
hipLaunchKernelGGL(( _2Dstencil_global), dim3(grid_dim), dim3(block_dim), sharedSize, 0, d_e, d_r, d_v, X, Y, times,false);
}
hipError_t err = hipSuccess;
err = hipGetLastError();
if (err != hipSuccess)
{
fprintf(stderr, "Failed to launch _3Dstencil_global kernel (error code %s)!\n", hipGetErrorString(err));
}
float * temp = d_e;
d_e = d_r;
d_r = temp;
}
/*
Identifica possveis erros
*/
hipError_t err = hipSuccess;
err = hipGetLastError();
if (err != hipSuccess)
{
fprintf(stderr, "Failed to launch _3Dstencil_global kernel (error code %s)!\n", hipGetErrorString(err));
}
/******************
*** Kernel Call ***
*******************/
hipDeviceSynchronize();
/*
Para o Timer
*/
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
float elapsedTime;
hipEventElapsedTime(&elapsedTime, start, stop);
hipEventDestroy(start);
hipEventDestroy(stop);
//printf("X %d || Y %d \nBX %d || BY %d \n",X,Y,BX,BY);
//printf ("[%d,%.5f],\n", tam,elapsedTime);
/*
Copia o resultado de volta para o CPU
*/
HANDLE_ERROR( hipMemcpy(h_r, d_e, size, hipMemcpyDeviceToHost) );
/*
Copia o resultado para a imagem de visualizao
A estrutura de
*/
arq = fopen("resultado.txt", "wt");
for (int i = 0; i < X; i++)
{
for (int j = 0; j < Y; j++)
{
fprintf(arq," %6.4f",h_r[i+j*X]);
}
fprintf(arq,"\n");
}
fclose(arq);
hipFree(d_e);
hipFree(d_r);
std::free(h_e);
std::free(h_r);
return 0;
} /* main */
| 728ebf3119f065a54a5ad80031907ca178cf403e.cu | #include <stdio.h>
#include <time.h>
#include <unistd.h>
#include <stdlib.h>
#include <math.h>
/*
Instruções
COMPILAR --> nvcc 2DstencilGPUSharedMemoryBlankBorderTimeSpaceSharingOpencvKarma.cu -o go `pkg-config --cflags --libs opencv` -w
EXECUTAR --> ./go DOMAIN_DIMS STENCIL_ORDER SPACE_TIME_BLOCK_TIMES BLOCK_DIM_X BLOCK_DIM_Y
*/
//////////////////////////////////////////////////////////////////////////
static void HandleError( cudaError_t err,
const char *file,
int line ) {
if (err != cudaSuccess) {
printf( "%s in %s at line %d\n", cudaGetErrorString( err ),
file, line );
exit( EXIT_FAILURE );
}
}
#define HANDLE_ERROR( err ) (HandleError( err, __FILE__, __LINE__ ))
//////////////////////////////////////////////////////////////////////////
#include <iostream>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <stdio.h>
#include <opencv2/imgcodecs.hpp>
#include <math.h>
#include <string>
using namespace std;
//===> CONSTANTES karma model <===//
#ifndef MODEL_WIDTH
#define MODEL_WIDTH 0
#endif
#define Eh 3.0f
#define En 1.0f
#define Re 0.6f
#define tauE 5.0f
#define tauN 250.0f
#define gam 0.001f
#define East 1.5415f
#define DT 0.05f
#define DX (12.0f / MODEL_WIDTH)
/*
Função somente da GPU que recebe os parametros para o calculo de um stencil
d_e - dado de entrada
d_r - dado de saida
d_v - campo que deve ser atualizado
c_coeff - variável utilizada para armazenar o valores dos coeficcientes do stencil (utilizada apenas na versão com stencil simples usado anteriormente)
X - Y - Dimensões das estruturas de entrada
k - ordem do stencil
x -y - posição do centro do stencil na estrutura de entrada
GX - Dimensão horizontal da estrutura do dado de saída
Gx - Gy posição do centro do stencil na estrutura de saida
*/
__device__ void destinyTest(float *d_r,int GX, int Gx, int Gy,float val)
{
d_r[Gx + ((Gy) * (GX))] = val;
}
__device__ void _2Dstencil_(float *d_e, float *d_r, float *d_v, int X, int x, int y, int GX, int Gx, int Gy)
{
int h_e_i = x + (y * (X));
float temp = d_e[h_e_i];
float rv = d_v[h_e_i];
float Rn = (1.0f / (1.0f - expf(-Re))) - rv;
float p = (temp > En) * 1.0f;
float dv = (Rn * p - (1.0f - p) * rv) / tauN;
float Dn = rv * rv;
float hE = (1.0f - tanh(temp - Eh)) * temp * temp / 2.0f;
float du = (((East - Dn) * hE) - temp) / tauE;
float xlapr = d_e[(x + 1) + ((y) * (X))] - temp;
float xlapl = temp - d_e[(x - 1) + ((y) * (X))];
float xlapf = d_e[(x) + ((y + 1) * (X))] - temp;
float xlapb = temp - d_e[(x) + ((y - 1) * (X))];
float lap = xlapr - xlapl + xlapf - xlapb;
temp = (temp + (du * DT) + (lap * DT * gam / (DX * DX)));
d_v[h_e_i] = rv + dv * DT;
//d_v[h_e_i] = rv+rv;
h_e_i = Gx + ((Gy) * (GX));
d_r[h_e_i] = temp;//d_v[h_e_i];// d_e[h_e_i]+1;// = temp;
// d_r[h_e_i] = rv;
}
/*
função chamada pelo host que controla as cópias e a ordem do calculo dos stencils bem como a carga para cada thread
*/
__global__ void _2Dstencil_global(float *d_e, float *d_r, float *d_v, int X, int Y, int times,bool eraseMiddle)
{
int x, y; //,h_e_i,h_r_i,Xs,Ys,Dx,Dy;
x = threadIdx.x + (blockIdx.x * blockDim.x);
y = threadIdx.y + (blockIdx.y * blockDim.y);
extern __shared__ float sharedOrig[];
int blockThreadIndex = threadIdx.x + threadIdx.y * blockDim.x;
// Xs = threadIdx.x;
// Ys = threadIdx.y;
int Dx = blockDim.x + (2 * times);
int Dy = blockDim.y + (2 * times);
int sharedTam = Dx * Dy;
float * shared = sharedOrig;
float * sharedRes = shared + sharedTam;
float * sharedV = sharedRes + sharedTam;
//float * sharedRes = &shared[sharedTam];
//float *sharedV = &sharedRes[sharedTam];
/*
Copia o Tile de memória compartilhada necessária para a configuração de tempo desejada
Stride é utilizado pois a quantidade de elementos a serem copiados é sempre maior que a quantidade de threads
As bordas
*/
for (int stride = blockThreadIndex; stride < sharedTam; stride += (blockDim.x * blockDim.y))
{
int sharedIdxX = stride % Dx;
int sharedIdxY = int(stride / Dx);
int globalIdxX =(blockIdx.x * blockDim.x) + sharedIdxX - times;
int globalIdxY =(blockIdx.y * blockDim.y) + sharedIdxY - times;
//int globalIdx = globalIdxX + (globalIdxX < 0) - (globalIdxX >= X) + (globalIdxY + (globalIdxY < 0) - (globalIdxY >= Y)) * X;
int globalIdx = globalIdxX + (-1*globalIdxX)*(globalIdxX < 0) - (globalIdxX-X+1)*(globalIdxX >= X) + (globalIdxY + (-1*globalIdxY)*(globalIdxY < 0) - (globalIdxY-Y+1)*(globalIdxY >= Y)) * X;
shared[stride] = d_e[globalIdx];
sharedV[stride] = d_v[globalIdx];
}
__syncthreads();
/*
Envia pra ser calculado todos os elementos além do ultimo instante de tempo
*/
for (int t = 1; t < times; t++)
{
//_2Dstencil_(shared,sharedRes,c_coeff,Dx,Dy,k,threadIdx.x+k2,threadIdx.y+k2,Dx,threadIdx.x+k2,threadIdx.y+k2);
int tDx = blockDim.x + ((times - t) * 2);
int tDy = blockDim.y + ((times - t) * 2);
int tk2 = (t);
// int tDx = blockDim.x+(1*k);
// int tDy = blockDim.y+(1*k);
// int tk2 = (1)*k/2;
int tSharedTam = tDx * tDy;
for (int stride = blockThreadIndex; stride < tSharedTam; stride += (blockDim.x * blockDim.y))
{
//int globalIdx = (stride % tDx) + tk2 + Dx*(int(stride / Dx)) + tk2;
//destinyTest(shared, Dx, (stride % tDx) + tk2, int(stride / Dx) + tk2,t+1);
_2Dstencil_(shared, sharedRes, sharedV, Dx, (stride % tDx) + tk2, (int(stride / tDx)) + tk2, Dx, (stride % tDx) + tk2, (int(stride / tDx)) + tk2);
}
// __syncthreads();
// for (int stride = blockThreadIndex; stride < sharedTam; stride += (blockDim.x * blockDim.y))
// {
// shared[stride] = sharedRes[stride];
// }
float * temp = shared;
shared = sharedRes;
sharedRes = temp;
__syncthreads();
}
/*
Envia pra ser calculado todos os elementos do ultimo instante de tempo
*/
_2Dstencil_(shared, d_r, sharedV, Dx, ((x%(blockDim.x))+times), ((y%(blockDim.y))+times), X, x, y);
__syncthreads();
int globalIdx = x + y * X;
int sharedIdx = ((x%(blockDim.x))+times) + ((y%(blockDim.y))+times)*Dx;
d_v[globalIdx] = sharedV[sharedIdx];
if(eraseMiddle && x > X/2)
{
d_r[globalIdx] = 0.0f;
//d_v[globalIdx] = 0.5f;
}
// for(int stride=blockThreadIndex;stride<sharedTam;stride+=(blockDim.x*blockDim.y))
// {
// int globalIdx = (blockIdx.x*blockDim.x)-k2+stride%Dx + ((blockIdx.y*blockDim.y)-k2+stride/Dx)*X;
// if(globalIdx > 0 && (blockIdx.x*blockDim.x)-k2+stride%Dx < X && ((blockIdx.y*blockDim.y)-k2+stride/Dx)<Y)
// d_r[globalIdx] = sharedRes[stride];
// }
//destinyTest(d_r,X, x, y,1.0f);
// __syncthreads();
// for (int stride = blockThreadIndex; stride < sharedTam; stride += (blockDim.x * blockDim.y))
// {
// int globalIdxX = (blockIdx.x * blockDim.x) - k2 + stride % Dx;
// int globalIdxY = ((blockIdx.y * blockDim.y) - k2 + int(stride / Dx));
// int globalIdx = globalIdxX + (globalIdxX==-1) - (globalIdxX==X) + (globalIdxY + (globalIdxY==-1) - (globalIdxY==Y)) * X;
// if(blockIdx.x == 1 && blockIdx.y == 1)
// d_r[globalIdx] = shared[stride];
// }
// __syncthreads();
//int sharedIdx = ((x%(blockDim.x))+times) + ((y%(blockDim.y))+times)*Dx;
// int sharedIdxX = (blockIdx.x * blockDim.x) + times;
// int sharedIdxY = (blockIdx.y * blockDim.y) + times;
// int sharedIdx = sharedIdxX + sharedIdxY*Dx;
//int sharedIdx = ((x%(blockDim.x))+times) + ((y%(blockDim.y))+times)*Dx;
// int globalIdx = x + y * X;
//if(blockIdx.x == 0 && blockIdx.y ==1)
//d_v[globalIdx] = sharedV[sharedIdx];
}
int main(int argc, char *argv[])
{
/*
Declarações e valores padroes
*/
float *h_e, *h_r, *h_v;
float *d_e, *d_r, *d_v;
int size, sharedSize;
int X = 32;
int Y = 32;
int times = 1,globalTimes = 1;
int BX = 32;
int BY = 32;
int GX = 1;
int GY = 1;
/*
Obtenção dos parâmetros de entrada
*/
if (argc > 1)
{
X = atoi(argv[1]);
Y = X;
}
if (argc > 2)
{
times = atoi(argv[2]);
}
if (argc > 3)
{
globalTimes = atoi(argv[3]);
}
if (X > 32)
{
GX = ceil((float)X / (float)32);
BX = 32;
}
if (Y > 32)
{
GY = ceil((float)Y / (float)32);
BY = 32;
}
/*
Allocações de memória e configuração dos blocos e grid
*/
dim3 block_dim(BX, BY, 1);
dim3 grid_dim(GX, GY, 1);
//sharedSize = ((block_dim.x+k)*(block_dim.y+k))*sizeof(int);
sharedSize = ((block_dim.x + (2 * times)) * (block_dim.y + (2 * times))) * sizeof(float) * 3;
//sharedTam = ((block_dim.x+(k*2))*(block_dim.y+(k*2)));
size = X * Y * sizeof(float);
//tam = X * Y;
h_e = (float *)malloc(size);
h_r = (float *)malloc(size);
h_v = (float *)malloc(size);
HANDLE_ERROR( cudaMalloc(&d_e, size) );
HANDLE_ERROR( cudaMalloc(&d_r, size) );
HANDLE_ERROR( cudaMalloc(&d_v, size) );
//Copia os dados do campo e envia para a GPU e inicializa o dominio de entrada
FILE *arq;
arq = fopen("entrada.txt", "rt");
for (int i = 0; i < X; i++)
for (int j = 0; j < Y; j++)
{
h_v[i + j * X] =0.5f;
int temp;
fscanf(arq," %d",&temp);
h_e[i + j * X] = temp;
}
fclose(arq);
HANDLE_ERROR( cudaMemcpy(d_v, h_v, size, cudaMemcpyHostToDevice) );
/*
Copy vectors from host memory to device memory
Copia os dados da entrada de volta a GPU
*/
HANDLE_ERROR( cudaMemcpy(d_e, h_e, size, cudaMemcpyHostToDevice) );
/*
Começa o Timer
*/
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
/******************
*** Kernel Call ***
*******************/
//_3Dstencil_global<<<blks,th_p_blk>>>(d_e,d_r,X,Y,Z);
/*
Executa o kernel
*/
bool reseted = false;
for(int i=0; i<globalTimes/times; i ++)
{
if(i*times > 8000 && !reseted)
{
_2Dstencil_global<<<grid_dim, block_dim, sharedSize>>>(d_e, d_r, d_v, X, Y, times,true);
reseted = true;
}else
{
_2Dstencil_global<<<grid_dim, block_dim, sharedSize>>>(d_e, d_r, d_v, X, Y, times,false);
}
cudaError_t err = cudaSuccess;
err = cudaGetLastError();
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to launch _3Dstencil_global kernel (error code %s)!\n", cudaGetErrorString(err));
}
float * temp = d_e;
d_e = d_r;
d_r = temp;
}
/*
Identifica possíveis erros
*/
cudaError_t err = cudaSuccess;
err = cudaGetLastError();
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to launch _3Dstencil_global kernel (error code %s)!\n", cudaGetErrorString(err));
}
/******************
*** Kernel Call ***
*******************/
cudaDeviceSynchronize();
/*
Para o Timer
*/
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
float elapsedTime;
cudaEventElapsedTime(&elapsedTime, start, stop);
cudaEventDestroy(start);
cudaEventDestroy(stop);
//printf("X %d || Y %d \nBX %d || BY %d \n",X,Y,BX,BY);
//printf ("[%d,%.5f],\n", tam,elapsedTime);
/*
Copia o resultado de volta para o CPU
*/
HANDLE_ERROR( cudaMemcpy(h_r, d_e, size, cudaMemcpyDeviceToHost) );
/*
Copia o resultado para a imagem de visualização
A estrutura de
*/
arq = fopen("resultado.txt", "wt");
for (int i = 0; i < X; i++)
{
for (int j = 0; j < Y; j++)
{
fprintf(arq," %6.4f",h_r[i+j*X]);
}
fprintf(arq,"\n");
}
fclose(arq);
cudaFree(d_e);
cudaFree(d_r);
std::free(h_e);
std::free(h_r);
return 0;
} /* main */
|
7dc612cd3dbdaa65cdd9c795fbcfb3da01db1b55.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//This code is taken from https://www.olcf.ornl.gov/tutorials/cuda-vector-addition/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <iostream>
using namespace std;
// CUDA kernel. Each thread takes care of one element of c
__global__ void vecAdd(double *a, double *b, double *c, int n)
{
// Get our global thread ID
int id = blockIdx.x*blockDim.x+threadIdx.x;
// Make sure we do not go out of bounds
if (id < n)
c[id] = a[id] + b[id];
}
int main( int argc, char* argv[] )
{
cout<<"ESTA ES UNA PRUEBA"<<endl;
// Size of vectors
int n = 100000;
// Host input vectors
double *h_a;
double *h_b;
//Host output vector
double *h_c;
// Device input vectors
double *d_a;
double *d_b;
//Device output vector
double *d_c;
// Size, in bytes, of each vector
size_t bytes = n*sizeof(double);
// Allocate memory for each vector on host
h_a = (double*)malloc(bytes);
h_b = (double*)malloc(bytes);
h_c = (double*)malloc(bytes);
// Allocate memory for each vector on GPU
hipMalloc(&d_a, bytes);
hipMalloc(&d_b, bytes);
hipMalloc(&d_c, bytes);
int i;
// Initialize vectors on host
for( i = 0; i < n; i++ ) {
h_a[i] = sin(i)*sin(i);
h_b[i] = cos(i)*cos(i);
}
// Copy host vectors to device
hipMemcpy( d_a, h_a, bytes, hipMemcpyHostToDevice);
hipMemcpy( d_b, h_b, bytes, hipMemcpyHostToDevice);
int blockSize, gridSize;
// Number of threads in each thread block
blockSize = 1024;
// Number of thread blocks in grid
gridSize = (int)ceil((float)n/blockSize);
// Execute the kernel
hipLaunchKernelGGL(( vecAdd), dim3(gridSize), dim3(blockSize), 0, 0, d_a, d_b, d_c, n);
// Copy array back to host
hipMemcpy( h_c, d_c, bytes, hipMemcpyDeviceToHost );
// Sum up vector c and print result divided by n, this should equal 1 within error
double sum = 0;
for(i=0; i<n; i++)
sum += h_c[i];
printf("final result: %f\n", sum/n);
// Release device memory
hipFree(d_a);
hipFree(d_b);
hipFree(d_c);
// Release host memory
free(h_a);
free(h_b);
free(h_c);
return 0;
}
| 7dc612cd3dbdaa65cdd9c795fbcfb3da01db1b55.cu | //This code is taken from https://www.olcf.ornl.gov/tutorials/cuda-vector-addition/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <iostream>
using namespace std;
// CUDA kernel. Each thread takes care of one element of c
__global__ void vecAdd(double *a, double *b, double *c, int n)
{
// Get our global thread ID
int id = blockIdx.x*blockDim.x+threadIdx.x;
// Make sure we do not go out of bounds
if (id < n)
c[id] = a[id] + b[id];
}
int main( int argc, char* argv[] )
{
cout<<"ESTA ES UNA PRUEBA"<<endl;
// Size of vectors
int n = 100000;
// Host input vectors
double *h_a;
double *h_b;
//Host output vector
double *h_c;
// Device input vectors
double *d_a;
double *d_b;
//Device output vector
double *d_c;
// Size, in bytes, of each vector
size_t bytes = n*sizeof(double);
// Allocate memory for each vector on host
h_a = (double*)malloc(bytes);
h_b = (double*)malloc(bytes);
h_c = (double*)malloc(bytes);
// Allocate memory for each vector on GPU
cudaMalloc(&d_a, bytes);
cudaMalloc(&d_b, bytes);
cudaMalloc(&d_c, bytes);
int i;
// Initialize vectors on host
for( i = 0; i < n; i++ ) {
h_a[i] = sin(i)*sin(i);
h_b[i] = cos(i)*cos(i);
}
// Copy host vectors to device
cudaMemcpy( d_a, h_a, bytes, cudaMemcpyHostToDevice);
cudaMemcpy( d_b, h_b, bytes, cudaMemcpyHostToDevice);
int blockSize, gridSize;
// Number of threads in each thread block
blockSize = 1024;
// Number of thread blocks in grid
gridSize = (int)ceil((float)n/blockSize);
// Execute the kernel
vecAdd<<<gridSize, blockSize>>>(d_a, d_b, d_c, n);
// Copy array back to host
cudaMemcpy( h_c, d_c, bytes, cudaMemcpyDeviceToHost );
// Sum up vector c and print result divided by n, this should equal 1 within error
double sum = 0;
for(i=0; i<n; i++)
sum += h_c[i];
printf("final result: %f\n", sum/n);
// Release device memory
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
// Release host memory
free(h_a);
free(h_b);
free(h_c);
return 0;
}
|
f4371b2e8236cbe581846de9ef96f166dd193bcc.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "counting.h"
#include <cstdio>
#include <cassert>
#include <thrust/scan.h>
#include <thrust/transform.h>
#include <thrust/functional.h>
#include <thrust/device_ptr.h>
#include <thrust/execution_policy.h>
#define CHECK {\
auto e = hipDeviceSynchronize();\
if (e != hipSuccess) {\
printf("At " __FILE__ ":%d, %s\n", __LINE__, hipGetErrorString(e));\
abort();\
}\
}
__device__ __host__ int CeilDiv(int a, int b) { return (a-1)/b + 1; }
__device__ __host__ int CeilAlign(int a, int b) { return CeilDiv(a, b) * b; }
__global__ void yuan(const char *text, int *pos, int text_size) {
int textP = blockIdx.x * blockDim.x + threadIdx.x;
if (textP >= text_size) return;
const char *start = text + textP;
while (start >= text && *start > ' ') {
start--;
}
pos[textP] = text + textP - start;
}
struct to_key
{
__host__ __device__ int operator()(char c)
{
return c <= ' '? 0 : 1;
}
};
void CountPosition1(const char *text, int *pos, int text_size)
{
thrust :: transform (thrust :: device, text, text + text_size, pos, to_key());
thrust :: inclusive_scan_by_key (thrust :: device, pos, pos + text_size, pos, pos);
}
void CountPosition2(const char *text, int *pos, int text_size)
{
hipLaunchKernelGGL(( yuan), dim3((text_size/1024 + 1)) , dim3((1<<10)) , 0, 0, 0, text, pos, text_size);
}
| f4371b2e8236cbe581846de9ef96f166dd193bcc.cu | #include "counting.h"
#include <cstdio>
#include <cassert>
#include <thrust/scan.h>
#include <thrust/transform.h>
#include <thrust/functional.h>
#include <thrust/device_ptr.h>
#include <thrust/execution_policy.h>
#define CHECK {\
auto e = cudaDeviceSynchronize();\
if (e != cudaSuccess) {\
printf("At " __FILE__ ":%d, %s\n", __LINE__, cudaGetErrorString(e));\
abort();\
}\
}
__device__ __host__ int CeilDiv(int a, int b) { return (a-1)/b + 1; }
__device__ __host__ int CeilAlign(int a, int b) { return CeilDiv(a, b) * b; }
__global__ void yuan(const char *text, int *pos, int text_size) {
int textP = blockIdx.x * blockDim.x + threadIdx.x;
if (textP >= text_size) return;
const char *start = text + textP;
while (start >= text && *start > ' ') {
start--;
}
pos[textP] = text + textP - start;
}
struct to_key
{
__host__ __device__ int operator()(char c)
{
return c <= ' '? 0 : 1;
}
};
void CountPosition1(const char *text, int *pos, int text_size)
{
thrust :: transform (thrust :: device, text, text + text_size, pos, to_key());
thrust :: inclusive_scan_by_key (thrust :: device, pos, pos + text_size, pos, pos);
}
void CountPosition2(const char *text, int *pos, int text_size)
{
yuan<<< (text_size/1024 + 1) , (1<<10) >>>(text, pos, text_size);
}
|
12a4768bf106b241554406a557d7c5e4921bb4e1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <ctime>
#include <chrono>
#include <hiprand/hiprand.h>
#include <hiprand/hiprand_kernel.h>
#include <iostream>
using namespace std;
__device__ unsigned int reduce_sum(unsigned int in)
{
extern __shared__ unsigned int sdata[];
// Perform first level of reduction:
// - Write to shared memory
unsigned int ltid = threadIdx.x;
sdata[ltid] = in;
__syncthreads();
// Do reduction in shared mem
for (unsigned int s = blockDim.x / 2 ; s > 0 ; s >>= 1)
{
if (ltid < s)
{
sdata[ltid] += sdata[ltid + s];
}
__syncthreads();
}
return sdata[0];
}
__global__ void mykernel(int vectorsize, int *count, double *rands)
{
int id = blockIdx.x *blockDim.x + threadIdx.x;
int step = gridDim.x * blockDim.x;
const double *rand1 = rands + id;
const double *rand2 = rand1 + vectorsize;
int tempcount = 0;
for (int i = 0; i < vectorsize; i += step, rand1 +=step, rand2 += step)
{
double x = *rand1;
double y = *rand2;
if(((x*x)+(y*y)) < 1 )
tempcount++;
}
tempcount = reduce_sum(tempcount);
if (threadIdx.x == 0)
{
count[blockIdx.x] = tempcount;
}
}
double * createrands(double vectorsize)
{
hiprandGenerator_t prng;
hiprandCreateGenerator(&prng, HIPRAND_RNG_PSEUDO_MTGP32);
double *rands = 0;
hipMalloc((void **)&rands, 2* vectorsize * sizeof(double));
hiprandSetPseudoRandomGeneratorSeed(prng, 1337);
hiprandGenerateUniformDouble(prng, (double *)rands, 2 * vectorsize);
hiprandDestroyGenerator(prng);
return rands;
}
int main(void)
{
auto t_start = std::chrono::high_resolution_clock::now();
//int numgpus = 2;
double vectorsize = 33553920;
// cin >> vectorsize;
int blocksize = 1024;
int gridsize = ceil(vectorsize/blocksize);
size_t sharedmemsize = blocksize * sizeof(int);
int *count1, *count2, *cuda_count1, *cuda_count2;
count1 = (int *)malloc (gridsize * sizeof(int));
count2 = (int *)malloc (gridsize * sizeof(int));
//1st gpu
hipSetDevice(0);
double *rands1 = createrands(vectorsize);
hipMalloc((void **)&cuda_count1, gridsize *sizeof(int));
hipLaunchKernelGGL(( mykernel) , dim3(gridsize), dim3(blocksize), sharedmemsize, 0, vectorsize, cuda_count1, rands1);
//2nd gpu
hipSetDevice(1);
double *rands2 = createrands(vectorsize);
hipMalloc((void **)&cuda_count2, gridsize *sizeof(int));
hipLaunchKernelGGL(( mykernel) , dim3(gridsize), dim3(blocksize), sharedmemsize, 0, vectorsize, cuda_count2, rands2);
//1st gpu
hipSetDevice(0);
if (hipMemcpy (count1, cuda_count1, gridsize *sizeof(int), hipMemcpyDeviceToHost) != hipSuccess)
printf("failed to cpy back 1\n");
hipFree(cuda_count1);
//2nd gpu
hipSetDevice(1);
if (hipMemcpy (count2, cuda_count2, gridsize *sizeof(int), hipMemcpyDeviceToHost) != hipSuccess)
printf("failed to cpy back 2\n");
hipFree(cuda_count2);
int totalcount = 0;
for (int i = 0; i < gridsize; i ++)
{
totalcount += count1[i];
}
for (int i = 0; i < gridsize; i ++)
{
totalcount += count2[i];
}
printf("count = %d\n", totalcount);
float ratio = totalcount / (2*vectorsize);
printf("pi = %.15f \n", (ratio * 4));
auto t_end = std::chrono::high_resolution_clock::now();
printf("duration: %f\n", (std::chrono::duration<double, std::milli>(t_end-t_start).count()/1000));
return 0;
}
| 12a4768bf106b241554406a557d7c5e4921bb4e1.cu | #include <stdio.h>
#include <stdlib.h>
#include <ctime>
#include <chrono>
#include <curand.h>
#include <curand_kernel.h>
#include <iostream>
using namespace std;
__device__ unsigned int reduce_sum(unsigned int in)
{
extern __shared__ unsigned int sdata[];
// Perform first level of reduction:
// - Write to shared memory
unsigned int ltid = threadIdx.x;
sdata[ltid] = in;
__syncthreads();
// Do reduction in shared mem
for (unsigned int s = blockDim.x / 2 ; s > 0 ; s >>= 1)
{
if (ltid < s)
{
sdata[ltid] += sdata[ltid + s];
}
__syncthreads();
}
return sdata[0];
}
__global__ void mykernel(int vectorsize, int *count, double *rands)
{
int id = blockIdx.x *blockDim.x + threadIdx.x;
int step = gridDim.x * blockDim.x;
const double *rand1 = rands + id;
const double *rand2 = rand1 + vectorsize;
int tempcount = 0;
for (int i = 0; i < vectorsize; i += step, rand1 +=step, rand2 += step)
{
double x = *rand1;
double y = *rand2;
if(((x*x)+(y*y)) < 1 )
tempcount++;
}
tempcount = reduce_sum(tempcount);
if (threadIdx.x == 0)
{
count[blockIdx.x] = tempcount;
}
}
double * createrands(double vectorsize)
{
curandGenerator_t prng;
curandCreateGenerator(&prng, CURAND_RNG_PSEUDO_MTGP32);
double *rands = 0;
cudaMalloc((void **)&rands, 2* vectorsize * sizeof(double));
curandSetPseudoRandomGeneratorSeed(prng, 1337);
curandGenerateUniformDouble(prng, (double *)rands, 2 * vectorsize);
curandDestroyGenerator(prng);
return rands;
}
int main(void)
{
auto t_start = std::chrono::high_resolution_clock::now();
//int numgpus = 2;
double vectorsize = 33553920;
// cin >> vectorsize;
int blocksize = 1024;
int gridsize = ceil(vectorsize/blocksize);
size_t sharedmemsize = blocksize * sizeof(int);
int *count1, *count2, *cuda_count1, *cuda_count2;
count1 = (int *)malloc (gridsize * sizeof(int));
count2 = (int *)malloc (gridsize * sizeof(int));
//1st gpu
cudaSetDevice(0);
double *rands1 = createrands(vectorsize);
cudaMalloc((void **)&cuda_count1, gridsize *sizeof(int));
mykernel <<<gridsize, blocksize, sharedmemsize>>>(vectorsize, cuda_count1, rands1);
//2nd gpu
cudaSetDevice(1);
double *rands2 = createrands(vectorsize);
cudaMalloc((void **)&cuda_count2, gridsize *sizeof(int));
mykernel <<<gridsize, blocksize, sharedmemsize>>>(vectorsize, cuda_count2, rands2);
//1st gpu
cudaSetDevice(0);
if (cudaMemcpy (count1, cuda_count1, gridsize *sizeof(int), cudaMemcpyDeviceToHost) != cudaSuccess)
printf("failed to cpy back 1\n");
cudaFree(cuda_count1);
//2nd gpu
cudaSetDevice(1);
if (cudaMemcpy (count2, cuda_count2, gridsize *sizeof(int), cudaMemcpyDeviceToHost) != cudaSuccess)
printf("failed to cpy back 2\n");
cudaFree(cuda_count2);
int totalcount = 0;
for (int i = 0; i < gridsize; i ++)
{
totalcount += count1[i];
}
for (int i = 0; i < gridsize; i ++)
{
totalcount += count2[i];
}
printf("count = %d\n", totalcount);
float ratio = totalcount / (2*vectorsize);
printf("pi = %.15f \n", (ratio * 4));
auto t_end = std::chrono::high_resolution_clock::now();
printf("duration: %f\n", (std::chrono::duration<double, std::milli>(t_end-t_start).count()/1000));
return 0;
}
|
9ce077a2c8606e8f4bd0924c7798e600b7197b43.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <hip/hip_runtime.h>
#include <time.h>
#include <opencv2/core/core.hpp>
#include <opencv2/imgproc/imgproc.hpp>1
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/opencv.hpp>
#include "device_launch_parameters.h"
#include "GpuTimer.h"
#define NUM_TREADS 1024
using namespace cv;
using namespace std;
// cpu implementation
void rgb2grayCPU(unsigned char* color, unsigned char* gray, int numRows, int numCols, int numChannels) {
int grayOffset, colorOffset;
for (int i = 0; i < numRows; i++) {
for (int j = 0; j < numCols; j++) {formula
// linearize pixel coordinate tuple (i, j)
grayOffset = i * numCols + j;
colorOffset = grayOffset * numChannels;
// convert to gray
gray[grayOffset] = (0.21 * color[colorOffset + 2]) +
(0.71 * color[colorOffset + 1]) +
(0.07 * color[colorOffset]);
}
}
}
// gpu implementation
__global__ void rgb2grayGPU(unsigned char* Pout, unsigned char* Pin, int width, int height, int numChannels) {
// compute global thread coordinates
int row = threadIdx.y + blockIdx.y*blockDim.y;
int col = threadIdx.x + blockIdx.x*blockDim.x;
// linearize coordinates for data access
int grayOffset = row * width + col;
int colorOffset = grayOffset * numChannels;
if ((col < width) && (row < height)) {
Pout[grayOffset] = (0.21 * Pin[colorOffset + 2]) +
(0.71 * Pin[colorOffset + 1]) +
(0.07 * Pin[colorOffset]);
}
}
__global__
void colorToGrayscaleConversion(unsigned char* Pout, unsigned char* Pin, int width, int height, int numChannels){
int col = threadIdx.x + blockIdx.x*blockDim.x;
int row = threadIdx.y + blockIdx.y*blockDim.y;
if(col < with && row < height) {
int greyOffset = row*width + col;
int rgbOffset = greyOffset* numChannels;
unsigned char r = Pin [rgbOffset ];
unsigned char g = Pin [rgbOffset+1];
unsigned char b = Pin [rgbOffset+2];
Pout[grayOffset] = 0.21f*r +0.71f*g +0.07f*b;
}
}
int main(int argc, char *argv[]) {
if (argc == 1) {
printf("[!] Filename expected.\n");
return 0;
}
// read image
Mat image;
image = imread(argv[1], CV_LOAD_IMAGE_COLOR);
if (image.empty()) {
printf("Cannot read image file %s", argv[1]);
exit(1);
}
// define img params and timers
int imageChannels = 3;
int imageWidth = image.cols;
int imageHeight = image.rows;
size_t size_rgb = sizeof(unsigned char)*imageWidth*imageHeight*imageChannels;
size_t size_gray = sizeof(unsigned char)*imageWidth*imageHeight;
GpuTimer timer;
// allocate mem for host image vectors
unsigned char* h_grayImage = (unsigned char*)malloc(size_rgb);
unsigned char* h_grayImage_CPU = (unsigned char*)malloc(size_rgb);
// grab pointer to host rgb image
unsigned char* h_rgbImage = image.data;
// allocate mem for device rgb and gray
unsigned char* d_rgbImage;
unsigned char* d_grayImage;
hipMalloc((void**)&d_rgbImage, size_rgb);
hipMalloc((void**)&d_grayImage, size_gray);
// copy the rgb image from the host to the device and record the needed time
hipMemcpy(d_rgbImage, h_rgbImage, size_rgb, hipMemcpyHostToDevice);
// execution configuration parameters + kernel launch
dim3 dimBlock(16, 16, 1);
dim3 dimGrid(ceil(imageWidth/16.0), ceil(imageHeight/16.0), 1);
//dim3 dimBlock(NUM_THREADS, NUM_THREADS, 1);
//dim3 dimGrid(ceil(imageWidth/NUM_THREADS), ceil(imageHeight/NUM_THREADS), 1);
timer.Start();
//rgb2grayGPU<<<dimGrid, dimBlock>>>(d_grayImage, d_rgbImage, imageWidth, imageHeight, imageChannels);
hipLaunchKernelGGL(( colorToGrayscaleConversion), dim3(dimGrid), dim3(dimBlock), 0, 0, h_rgbImage, h_grayImage_CPU, imageHeight, imageWidth, imageChannels);
timer.Stop();
float d_t2 = timer.Elapsed();
printf("Implemented CUDA code ran in: %f msecs.\n", d_t2);
// copy gray image from device to host
hipMemcpy(h_grayImage, d_grayImage, size_gray, hipMemcpyDeviceToHost);
// do the processing on the CPU
clock_t begin = clock();
rgb2grayCPU(h_rgbImage, h_grayImage_CPU, imageHeight, imageWidth, imageChannels);
clock_t end = clockimageWidth/16.0();
// display images
Mat Image1(imageHeight, imageWidth, CV_8UC1, h_grayImage);
Mat Image2(imageHeight, imageWidth, CV_8UC1, h_grayImage_CPU);
namedWindow("CPUImage", WINDOW_NORMAL);
namedWindow("GPUImage", WINDOW_NORMAL);
imshow("GPUImage",Image1);
imshow("CPUImage",Image2);
waitKey(0);
// free host and device memory
image.release();
Image1.release();
Image2.release();
free(h_grayImage);
free(h_grayImage_CPU);
hipFree(d_rgbImage); hipFree(d_grayImage);
return 0;
}
| 9ce077a2c8606e8f4bd0924c7798e600b7197b43.cu | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <cuda.h>
#include <time.h>
#include <opencv2/core/core.hpp>
#include <opencv2/imgproc/imgproc.hpp>1
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/opencv.hpp>
#include "device_launch_parameters.h"
#include "GpuTimer.h"
#define NUM_TREADS 1024
using namespace cv;
using namespace std;
// cpu implementation
void rgb2grayCPU(unsigned char* color, unsigned char* gray, int numRows, int numCols, int numChannels) {
int grayOffset, colorOffset;
for (int i = 0; i < numRows; i++) {
for (int j = 0; j < numCols; j++) {formula
// linearize pixel coordinate tuple (i, j)
grayOffset = i * numCols + j;
colorOffset = grayOffset * numChannels;
// convert to gray
gray[grayOffset] = (0.21 * color[colorOffset + 2]) +
(0.71 * color[colorOffset + 1]) +
(0.07 * color[colorOffset]);
}
}
}
// gpu implementation
__global__ void rgb2grayGPU(unsigned char* Pout, unsigned char* Pin, int width, int height, int numChannels) {
// compute global thread coordinates
int row = threadIdx.y + blockIdx.y*blockDim.y;
int col = threadIdx.x + blockIdx.x*blockDim.x;
// linearize coordinates for data access
int grayOffset = row * width + col;
int colorOffset = grayOffset * numChannels;
if ((col < width) && (row < height)) {
Pout[grayOffset] = (0.21 * Pin[colorOffset + 2]) +
(0.71 * Pin[colorOffset + 1]) +
(0.07 * Pin[colorOffset]);
}
}
__global__
void colorToGrayscaleConversion(unsigned char* Pout, unsigned char* Pin, int width, int height, int numChannels){
int col = threadIdx.x + blockIdx.x*blockDim.x;
int row = threadIdx.y + blockIdx.y*blockDim.y;
if(col < with && row < height) {
int greyOffset = row*width + col;
int rgbOffset = greyOffset* numChannels;
unsigned char r = Pin [rgbOffset ];
unsigned char g = Pin [rgbOffset+1];
unsigned char b = Pin [rgbOffset+2];
Pout[grayOffset] = 0.21f*r +0.71f*g +0.07f*b;
}
}
int main(int argc, char *argv[]) {
if (argc == 1) {
printf("[!] Filename expected.\n");
return 0;
}
// read image
Mat image;
image = imread(argv[1], CV_LOAD_IMAGE_COLOR);
if (image.empty()) {
printf("Cannot read image file %s", argv[1]);
exit(1);
}
// define img params and timers
int imageChannels = 3;
int imageWidth = image.cols;
int imageHeight = image.rows;
size_t size_rgb = sizeof(unsigned char)*imageWidth*imageHeight*imageChannels;
size_t size_gray = sizeof(unsigned char)*imageWidth*imageHeight;
GpuTimer timer;
// allocate mem for host image vectors
unsigned char* h_grayImage = (unsigned char*)malloc(size_rgb);
unsigned char* h_grayImage_CPU = (unsigned char*)malloc(size_rgb);
// grab pointer to host rgb image
unsigned char* h_rgbImage = image.data;
// allocate mem for device rgb and gray
unsigned char* d_rgbImage;
unsigned char* d_grayImage;
cudaMalloc((void**)&d_rgbImage, size_rgb);
cudaMalloc((void**)&d_grayImage, size_gray);
// copy the rgb image from the host to the device and record the needed time
cudaMemcpy(d_rgbImage, h_rgbImage, size_rgb, cudaMemcpyHostToDevice);
// execution configuration parameters + kernel launch
dim3 dimBlock(16, 16, 1);
dim3 dimGrid(ceil(imageWidth/16.0), ceil(imageHeight/16.0), 1);
//dim3 dimBlock(NUM_THREADS, NUM_THREADS, 1);
//dim3 dimGrid(ceil(imageWidth/NUM_THREADS), ceil(imageHeight/NUM_THREADS), 1);
timer.Start();
//rgb2grayGPU<<<dimGrid, dimBlock>>>(d_grayImage, d_rgbImage, imageWidth, imageHeight, imageChannels);
colorToGrayscaleConversion<<<dimGrid, dimBlock>>>(h_rgbImage, h_grayImage_CPU, imageHeight, imageWidth, imageChannels);
timer.Stop();
float d_t2 = timer.Elapsed();
printf("Implemented CUDA code ran in: %f msecs.\n", d_t2);
// copy gray image from device to host
cudaMemcpy(h_grayImage, d_grayImage, size_gray, cudaMemcpyDeviceToHost);
// do the processing on the CPU
clock_t begin = clock();
rgb2grayCPU(h_rgbImage, h_grayImage_CPU, imageHeight, imageWidth, imageChannels);
clock_t end = clockimageWidth/16.0();
// display images
Mat Image1(imageHeight, imageWidth, CV_8UC1, h_grayImage);
Mat Image2(imageHeight, imageWidth, CV_8UC1, h_grayImage_CPU);
namedWindow("CPUImage", WINDOW_NORMAL);
namedWindow("GPUImage", WINDOW_NORMAL);
imshow("GPUImage",Image1);
imshow("CPUImage",Image2);
waitKey(0);
// free host and device memory
image.release();
Image1.release();
Image2.release();
free(h_grayImage);
free(h_grayImage_CPU);
cudaFree(d_rgbImage); cudaFree(d_grayImage);
return 0;
}
|
3d21354b76d3e88fa0f6229a0e83152c669f11f1.hip | // !!! This is a file automatically generated by hipify!!!
/*
* DESCRIPTION These functions are part of the submission to exercises of
* the "GPU Computing" lecture of the University of Heidelberg.
*
* Exercise 3 - Implementation for the global memory access
* measurements.
*
* AUTHORS Klaus Naumann
* Alexander Schapp
* Gnther Schindler
*
* LAST CHANGE 11. Nov 2014
*
********************************************************************************/
#include <stdio.h>
#include "chTimer.h"
int main( int argc, char *argv[])
{
int i;
double dBandwidth;
char *dmem_a, *dmem_b; // device data (variable scope annotation)
char *hmem;
long iIteration[13] = {1e3, 2e3, 1e4, 2e4, 1e5, 2e5, 1e6, 2e6, 1e7, 2e7, 1e8, 2e8, 5e9};
chTimerTimestamp tsStart, tsStop;
/* Run over 13 measurement points */
for(i = 0; i < 13; i++)
{
/* Allocate device memory */
hipMalloc(&dmem_a, iIteration[i]*sizeof(char));
hipMalloc(&dmem_b, iIteration[i]*sizeof(char));
hmem = (char*)malloc(iIteration[i]*sizeof(char));
/* Fill elements with '1' */
for(int j=0; j<iIteration[i]; j++)
hmem[j] = 'a';
hipMemcpy(dmem_b, hmem, iIteration[i]*sizeof(char), hipMemcpyHostToDevice);
/* Start timer */
chTimerGetTime(&tsStart);
/* Start memory copy */
hipMemcpy(dmem_a ,dmem_b , iIteration[i]*sizeof(char), hipMemcpyDeviceToDevice);
/* Stop timer */
chTimerGetTime(&tsStop);
/* Get bandwidth in Byte/sec and print it */
dBandwidth=chTimerBandwidth(&tsStart, &tsStop, (double) iIteration[i]);
printf("%li %.2e\n",iIteration[i],dBandwidth/1e9);
/* Free allocated device memory */
hipFree(dmem_a);
hipFree(dmem_b);
}
return 0;
}
| 3d21354b76d3e88fa0f6229a0e83152c669f11f1.cu | /*
* DESCRIPTION These functions are part of the submission to exercises of
* the "GPU Computing" lecture of the University of Heidelberg.
*
* Exercise 3 - Implementation for the global memory access
* measurements.
*
* AUTHORS Klaus Naumann
* Alexander Schapp
* Günther Schindler
*
* LAST CHANGE 11. Nov 2014
*
********************************************************************************/
#include <stdio.h>
#include "chTimer.h"
int main( int argc, char *argv[])
{
int i;
double dBandwidth;
char *dmem_a, *dmem_b; // device data (variable scope annotation)
char *hmem;
long iIteration[13] = {1e3, 2e3, 1e4, 2e4, 1e5, 2e5, 1e6, 2e6, 1e7, 2e7, 1e8, 2e8, 5e9};
chTimerTimestamp tsStart, tsStop;
/* Run over 13 measurement points */
for(i = 0; i < 13; i++)
{
/* Allocate device memory */
cudaMalloc(&dmem_a, iIteration[i]*sizeof(char));
cudaMalloc(&dmem_b, iIteration[i]*sizeof(char));
hmem = (char*)malloc(iIteration[i]*sizeof(char));
/* Fill elements with '1' */
for(int j=0; j<iIteration[i]; j++)
hmem[j] = 'a';
cudaMemcpy(dmem_b, hmem, iIteration[i]*sizeof(char), cudaMemcpyHostToDevice);
/* Start timer */
chTimerGetTime(&tsStart);
/* Start memory copy */
cudaMemcpy(dmem_a ,dmem_b , iIteration[i]*sizeof(char), cudaMemcpyDeviceToDevice);
/* Stop timer */
chTimerGetTime(&tsStop);
/* Get bandwidth in Byte/sec and print it */
dBandwidth=chTimerBandwidth(&tsStart, &tsStop, (double) iIteration[i]);
printf("%li %.2e\n",iIteration[i],dBandwidth/1e9);
/* Free allocated device memory */
cudaFree(dmem_a);
cudaFree(dmem_b);
}
return 0;
}
|
8c8251d9b2c84d6eac10f585ec5ebd726ccf0df9.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
// Copyright 2004-present Facebook. All Rights Reserved.
#include <hip/hip_runtime.h>
#include <algorithm>
#include <cassert>
#include "fbgemm_gpu/batched_unary_embeddings.cuh"
#include "fbgemm_gpu/cuda_utils.cuh"
#include "fbgemm_gpu/embedding_wrappers.cuh"
void fbgemm_gpu_test::batched_unary_embeddings_forward(
const int32_t N,
const int32_t B,
const int32_t T,
const float* __restrict__ weight,
const long* __restrict__ table_offsets,
const long* __restrict__ offsets,
const long* __restrict__ indices,
float* __restrict__ output) {
int32_t threads = std::min<int32_t>(B, 512);
dim3 blocks((B + threads - 1) / threads, T, N);
assert(T <= 65535);
assert(N <= 65535);
hipLaunchKernelGGL(( batched_unary_embeddings_forward_kernel<float>), dim3(blocks), dim3(threads), 0, 0,
N, B, T, weight, table_offsets, offsets, indices, output);
CUDA_CHECK(hipGetLastError());
}
void fbgemm_gpu_test::batched_unary_embeddings_backward(
const int32_t N,
const int32_t B,
const int32_t T,
const float* __restrict__ grad_output,
const long* __restrict__ table_offsets,
const long* __restrict__ offsets,
const long* __restrict__ indices,
float* __restrict__ grad_weight) {
int threads = std::min<int32_t>(N * T, 512);
dim3 blocks((N * T + threads - 1) / threads);
hipLaunchKernelGGL(( batched_unary_embeddings_backward_kernel<float>), dim3(blocks), dim3(threads), 0, 0,
N, B, T, grad_output, table_offsets, offsets, indices, grad_weight);
CUDA_CHECK(hipGetLastError());
}
| 8c8251d9b2c84d6eac10f585ec5ebd726ccf0df9.cu | /*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
// Copyright 2004-present Facebook. All Rights Reserved.
#include <cuda.h>
#include <algorithm>
#include <cassert>
#include "fbgemm_gpu/batched_unary_embeddings.cuh"
#include "fbgemm_gpu/cuda_utils.cuh"
#include "fbgemm_gpu/embedding_wrappers.cuh"
void fbgemm_gpu_test::batched_unary_embeddings_forward(
const int32_t N,
const int32_t B,
const int32_t T,
const float* __restrict__ weight,
const long* __restrict__ table_offsets,
const long* __restrict__ offsets,
const long* __restrict__ indices,
float* __restrict__ output) {
int32_t threads = std::min<int32_t>(B, 512);
dim3 blocks((B + threads - 1) / threads, T, N);
assert(T <= 65535);
assert(N <= 65535);
batched_unary_embeddings_forward_kernel<float><<<blocks, threads>>>(
N, B, T, weight, table_offsets, offsets, indices, output);
CUDA_CHECK(cudaGetLastError());
}
void fbgemm_gpu_test::batched_unary_embeddings_backward(
const int32_t N,
const int32_t B,
const int32_t T,
const float* __restrict__ grad_output,
const long* __restrict__ table_offsets,
const long* __restrict__ offsets,
const long* __restrict__ indices,
float* __restrict__ grad_weight) {
int threads = std::min<int32_t>(N * T, 512);
dim3 blocks((N * T + threads - 1) / threads);
batched_unary_embeddings_backward_kernel<float><<<blocks, threads>>>(
N, B, T, grad_output, table_offsets, offsets, indices, grad_weight);
CUDA_CHECK(cudaGetLastError());
}
|
dc05cb24148b3e83c598469620be0460909eed1d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "norm.h"
#include "based.h"
/**
* normlization of tnesor t:
* t = v * a;
* ||v|| = 1,it's mean <v,v> = e.
* INPUT: t is m1n
*
* OUTPUT: v*a = t.
*/
void streamedtnorm(float* t, const int m, const int n, float* v, float* a){
int ht = n/2+1;
int bat = m;
float* d_t;
hipfftComplex* d_fftData;
hipMalloc((void**)&d_t,sizeof(float)*bat*n);
hipMalloc((void**)&d_fftData,sizeof(hipfftComplex)*m*ht);
hipMemcpy(d_t,t,sizeof(float)*bat*n,hipMemcpyHostToDevice);
//tfft
hipfftHandle plan;
int n_f[1] = {n};
int in[1] = {n};
int ou[1] = {ht};
int stride_in = bat,dist_in = 1;
int stride_ou = bat,dist_ou = 1;
if(hipfftPlanMany(&plan,1,n_f,in,stride_in,dist_in,ou,stride_ou,dist_ou,
HIPFFT_R2C,bat) != HIPFFT_SUCCESS){
fprintf(stdout,"[%s]:[%d] CUFFT ERROR: Plan creation failed!",__FUNCTION__,__LINE__);
return;
}
if(hipfftExecR2C(plan,d_t,(hipfftComplex*)d_fftData)!=HIPFFT_SUCCESS){
fprintf(stdout,"[%s]:[%d] CUFFT ERROR: Exec failed!",__FUNCTION__,__LINE__);
return;
}
if(hipDeviceSynchronize() != hipSuccess){
fprintf(stdout,"[%s]:[%d] cuda synchronize err!",__FUNCTION__,__LINE__);
return;
}
hipFree(d_t);
if(hipfftDestroy(plan)!=HIPFFT_SUCCESS){
fprintf(stdout,"[%s]:[%d]cufftDestory faile!",__FUNCTION__,__LINE__);
return;
}
//solve normlize
int threads =512;
int blocks = 1;
//set stream
hipStream_t* stream = (hipStream_t*)malloc(sizeof(hipStream_t)*PLAN1D_SIZE);
#pragma unroll
for(int i=0;i<PLAN1D_SIZE;i++){
hipStreamCreate(&stream[i]);
}
hipComplex *d_hv,*d_ha;
hipMalloc((void**)&d_hv,sizeof(hipComplex)*m*ht);
hipMalloc((void**)&d_ha,sizeof(hipComplex)*ht);
int tube_num = ht/PLAN1D_SIZE;
int tube_s = ht%PLAN1D_SIZE;
if(tube_num > 0){
for(int j=0;j< tube_num;j++){
for(int i=0;i<PLAN1D_SIZE;i++){
hipLaunchKernelGGL(( d_normlize), dim3(blocks),dim3(threads),0,stream[i], d_fftData+i*m+j*m*PLAN1D_SIZE,m,d_hv+i*m+j*m*PLAN1D_SIZE,d_ha+i+j*PLAN1D_SIZE);
}
}
for(int i=0;i<tube_s;i++){
hipLaunchKernelGGL(( d_normlize), dim3(blocks),dim3(threads),0,stream[i], d_fftData+i*m+tube_num*m*PLAN1D_SIZE,m,d_hv+i*m+tube_num*m*PLAN1D_SIZE,d_ha+i+tube_num*PLAN1D_SIZE);
}
}else{
for(int i=0;i<tube_s;i++){
hipLaunchKernelGGL(( d_normlize), dim3(blocks),dim3(threads),0,stream[i], d_fftData+i*m,m,d_hv+i*m,d_ha+i);
}
}
#pragma unroll
for(int i=0;i<PLAN1D_SIZE;i++){
hipStreamSynchronize(stream[i]);
}
#pragma unroll
for(int i=0;i<PLAN1D_SIZE;i++){
hipStreamDestroy(stream[i]);
}
if(hipDeviceSynchronize() != hipSuccess){
fprintf(stdout,"[%s]:[%d] cuda synchronize err!",__FUNCTION__,__LINE__);
return;
}
hipFree(d_fftData);
//d_hv and d_ha take ifft
// int threads = 0;
// int blocks = 0;
int num = 0;
float *d_v,*d_a;
hipMalloc((void**)&d_v,sizeof(float)*m*n);
hipMalloc((void**)&d_a,sizeof(float)*n);
hipfftHandle iplan;
in[0] = ht;
ou[0] = n;
if(hipfftPlanMany(&iplan,1,n_f,in,stride_in,dist_in,ou,stride_ou,dist_ou,
HIPFFT_C2R,bat) != HIPFFT_SUCCESS){
fprintf(stdout,"[%s]:[%d] CUFFT ERROR: Plan creation failed!",__FUNCTION__,__LINE__);
return;
}
if(hipfftExecC2R(iplan,d_hv,d_v)!=HIPFFT_SUCCESS){
fprintf(stdout,"[%s]:[%d] CUFFT ERROR: Exec failed!",__FUNCTION__,__LINE__);
return;
}
if(hipDeviceSynchronize() != hipSuccess){
fprintf(stdout,"[%s]:[%d] cuda synchronize err!",__FUNCTION__,__LINE__);
return;
}
num = m*n;
if(num < 512){
threads = num;
blocks = 1;
}else{
threads = 512;
blocks = ((num%512) == 0)?num/512:num/512+1;
}
hipLaunchKernelGGL(( fftResultProcess), dim3(blocks),dim3(threads), 0, 0, d_v,num,n);
if(hipDeviceSynchronize() != hipSuccess){
fprintf(stdout,"[%s]:[%d] cuda synchronize err!",__FUNCTION__,__LINE__);
return;
}
hipMemcpy(v,d_v,sizeof(float)*m*n,hipMemcpyDeviceToHost);
hipFree(d_hv);
hipFree(d_v);
stride_in = 1;
stride_ou = 1;
dist_in = 1;
dist_ou = 1;
bat = 1;
if(hipfftPlanMany(&iplan,1,n_f,in,stride_in,dist_in,ou,stride_ou,dist_ou,
HIPFFT_C2R,bat) != HIPFFT_SUCCESS){
fprintf(stdout,"[%s]:[%d] CUFFT ERROR: Plan creation failed!",__FUNCTION__,__LINE__);
return;
}
if(hipfftExecC2R(iplan,d_ha,d_a)!=HIPFFT_SUCCESS){
fprintf(stdout,"[%s]:[%d] CUFFT ERROR: Exec failed!",__FUNCTION__,__LINE__);
return;
}
if(hipDeviceSynchronize() != hipSuccess){
fprintf(stdout,"[%s]:[%d] cuda synchronize err!",__FUNCTION__,__LINE__);
return;
}
if(hipfftDestroy(iplan)!=HIPFFT_SUCCESS){
fprintf(stdout,"[%s]:[%d]cufftDestory faile!",__FUNCTION__,__LINE__);
return;
}
num = n;
if(n < 512){
threads = num;
blocks = 1;
}else{
threads = 512;
blocks = ((num%512) == 0)?num/512:num/512+1;
}
hipLaunchKernelGGL(( fftResultProcess), dim3(blocks),dim3(threads), 0, 0, d_a,num,n);
if(hipDeviceSynchronize() != hipSuccess){
fprintf(stdout,"[%s]:[%d] cuda synchronize err!",__FUNCTION__,__LINE__);
return;
}
hipMemcpy(a,d_a,sizeof(float)*n,hipMemcpyDeviceToHost);
hipFree(d_ha);
hipFree(d_a);
}
| dc05cb24148b3e83c598469620be0460909eed1d.cu | #include "norm.h"
#include "based.h"
/**
* normlization of tnesor t:
* t = v * a;
* ||v|| = 1,it's mean <v,v> = e.
* INPUT: t is m×1×n
*
* OUTPUT: v*a = t.
*/
void streamedtnorm(float* t, const int m, const int n, float* v, float* a){
int ht = n/2+1;
int bat = m;
float* d_t;
cufftComplex* d_fftData;
cudaMalloc((void**)&d_t,sizeof(float)*bat*n);
cudaMalloc((void**)&d_fftData,sizeof(cufftComplex)*m*ht);
cudaMemcpy(d_t,t,sizeof(float)*bat*n,cudaMemcpyHostToDevice);
//tfft
cufftHandle plan;
int n_f[1] = {n};
int in[1] = {n};
int ou[1] = {ht};
int stride_in = bat,dist_in = 1;
int stride_ou = bat,dist_ou = 1;
if(cufftPlanMany(&plan,1,n_f,in,stride_in,dist_in,ou,stride_ou,dist_ou,
CUFFT_R2C,bat) != CUFFT_SUCCESS){
fprintf(stdout,"[%s]:[%d] CUFFT ERROR: Plan creation failed!",__FUNCTION__,__LINE__);
return;
}
if(cufftExecR2C(plan,d_t,(cufftComplex*)d_fftData)!=CUFFT_SUCCESS){
fprintf(stdout,"[%s]:[%d] CUFFT ERROR: Exec failed!",__FUNCTION__,__LINE__);
return;
}
if(cudaDeviceSynchronize() != cudaSuccess){
fprintf(stdout,"[%s]:[%d] cuda synchronize err!",__FUNCTION__,__LINE__);
return;
}
cudaFree(d_t);
if(cufftDestroy(plan)!=CUFFT_SUCCESS){
fprintf(stdout,"[%s]:[%d]cufftDestory faile!",__FUNCTION__,__LINE__);
return;
}
//solve normlize
int threads =512;
int blocks = 1;
//set stream
cudaStream_t* stream = (cudaStream_t*)malloc(sizeof(cudaStream_t)*PLAN1D_SIZE);
#pragma unroll
for(int i=0;i<PLAN1D_SIZE;i++){
cudaStreamCreate(&stream[i]);
}
cuComplex *d_hv,*d_ha;
cudaMalloc((void**)&d_hv,sizeof(cuComplex)*m*ht);
cudaMalloc((void**)&d_ha,sizeof(cuComplex)*ht);
int tube_num = ht/PLAN1D_SIZE;
int tube_s = ht%PLAN1D_SIZE;
if(tube_num > 0){
for(int j=0;j< tube_num;j++){
for(int i=0;i<PLAN1D_SIZE;i++){
d_normlize<<<blocks,threads,0,stream[i]>>>(d_fftData+i*m+j*m*PLAN1D_SIZE,m,d_hv+i*m+j*m*PLAN1D_SIZE,d_ha+i+j*PLAN1D_SIZE);
}
}
for(int i=0;i<tube_s;i++){
d_normlize<<<blocks,threads,0,stream[i]>>>(d_fftData+i*m+tube_num*m*PLAN1D_SIZE,m,d_hv+i*m+tube_num*m*PLAN1D_SIZE,d_ha+i+tube_num*PLAN1D_SIZE);
}
}else{
for(int i=0;i<tube_s;i++){
d_normlize<<<blocks,threads,0,stream[i]>>>(d_fftData+i*m,m,d_hv+i*m,d_ha+i);
}
}
#pragma unroll
for(int i=0;i<PLAN1D_SIZE;i++){
cudaStreamSynchronize(stream[i]);
}
#pragma unroll
for(int i=0;i<PLAN1D_SIZE;i++){
cudaStreamDestroy(stream[i]);
}
if(cudaDeviceSynchronize() != cudaSuccess){
fprintf(stdout,"[%s]:[%d] cuda synchronize err!",__FUNCTION__,__LINE__);
return;
}
cudaFree(d_fftData);
//d_hv and d_ha take ifft
// int threads = 0;
// int blocks = 0;
int num = 0;
float *d_v,*d_a;
cudaMalloc((void**)&d_v,sizeof(float)*m*n);
cudaMalloc((void**)&d_a,sizeof(float)*n);
cufftHandle iplan;
in[0] = ht;
ou[0] = n;
if(cufftPlanMany(&iplan,1,n_f,in,stride_in,dist_in,ou,stride_ou,dist_ou,
CUFFT_C2R,bat) != CUFFT_SUCCESS){
fprintf(stdout,"[%s]:[%d] CUFFT ERROR: Plan creation failed!",__FUNCTION__,__LINE__);
return;
}
if(cufftExecC2R(iplan,d_hv,d_v)!=CUFFT_SUCCESS){
fprintf(stdout,"[%s]:[%d] CUFFT ERROR: Exec failed!",__FUNCTION__,__LINE__);
return;
}
if(cudaDeviceSynchronize() != cudaSuccess){
fprintf(stdout,"[%s]:[%d] cuda synchronize err!",__FUNCTION__,__LINE__);
return;
}
num = m*n;
if(num < 512){
threads = num;
blocks = 1;
}else{
threads = 512;
blocks = ((num%512) == 0)?num/512:num/512+1;
}
fftResultProcess<<<blocks,threads>>>(d_v,num,n);
if(cudaDeviceSynchronize() != cudaSuccess){
fprintf(stdout,"[%s]:[%d] cuda synchronize err!",__FUNCTION__,__LINE__);
return;
}
cudaMemcpy(v,d_v,sizeof(float)*m*n,cudaMemcpyDeviceToHost);
cudaFree(d_hv);
cudaFree(d_v);
stride_in = 1;
stride_ou = 1;
dist_in = 1;
dist_ou = 1;
bat = 1;
if(cufftPlanMany(&iplan,1,n_f,in,stride_in,dist_in,ou,stride_ou,dist_ou,
CUFFT_C2R,bat) != CUFFT_SUCCESS){
fprintf(stdout,"[%s]:[%d] CUFFT ERROR: Plan creation failed!",__FUNCTION__,__LINE__);
return;
}
if(cufftExecC2R(iplan,d_ha,d_a)!=CUFFT_SUCCESS){
fprintf(stdout,"[%s]:[%d] CUFFT ERROR: Exec failed!",__FUNCTION__,__LINE__);
return;
}
if(cudaDeviceSynchronize() != cudaSuccess){
fprintf(stdout,"[%s]:[%d] cuda synchronize err!",__FUNCTION__,__LINE__);
return;
}
if(cufftDestroy(iplan)!=CUFFT_SUCCESS){
fprintf(stdout,"[%s]:[%d]cufftDestory faile!",__FUNCTION__,__LINE__);
return;
}
num = n;
if(n < 512){
threads = num;
blocks = 1;
}else{
threads = 512;
blocks = ((num%512) == 0)?num/512:num/512+1;
}
fftResultProcess<<<blocks,threads>>>(d_a,num,n);
if(cudaDeviceSynchronize() != cudaSuccess){
fprintf(stdout,"[%s]:[%d] cuda synchronize err!",__FUNCTION__,__LINE__);
return;
}
cudaMemcpy(a,d_a,sizeof(float)*n,cudaMemcpyDeviceToHost);
cudaFree(d_ha);
cudaFree(d_a);
}
|
c1d2887f6270d7fae6d9c3da761f314e37f6fb70.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// CUDA implementation of RGB to grayscale.
// Roughly 5x to 30x faster than OpenCV's implementation
//
// Converts an RGB color image to grayscale.
#include "CUDARGB2Y.h"
// Set your weights here.
constexpr double B_WEIGHT = 0.114;
constexpr double G_WEIGHT = 0.587;
constexpr double R_WEIGHT = 0.299;
// Internal; do NOT modify
constexpr int B_WT = static_cast<int>(64.0 * B_WEIGHT + 0.5);
constexpr int G_WT = static_cast<int>(64.0 * G_WEIGHT + 0.5);
constexpr int R_WT = static_cast<int>(64.0 * R_WEIGHT + 0.5);
template<bool weight>
__global__ void CUDARGB2Y_kernel(const hipTextureObject_t tex_img, const int pixels, uint8_t* const __restrict d_newimg) {
const unsigned int x = (blockIdx.x << 8) + threadIdx.x;
const uint8_t res = weight ? min(255, (B_WT*tex1Dfetch<int>(tex_img, 3 * x) + G_WT*tex1Dfetch<int>(tex_img, 3 * x + 1) + R_WT*tex1Dfetch<int>(tex_img, 3 * x + 2)) >> 6)
: (tex1Dfetch<int>(tex_img, 3 * x) + tex1Dfetch<int>(tex_img, 3 * x + 1) + tex1Dfetch<int>(tex_img, 3 * x + 2)) / 3;
if (x < pixels) d_newimg[x] = res;
}
void CUDARGB2Y(bool weight, const hipTextureObject_t tex_img, const int pixels, uint8_t* const __restrict d_newimg) {
(weight ? CUDARGB2Y_kernel<true> : CUDARGB2Y_kernel<falsehipLaunchKernelGGL((>)), dim3(((pixels - 1) >> 8) + 1), dim3(256), 0, 0, tex_img, pixels, d_newimg);
hipDeviceSynchronize();
}
| c1d2887f6270d7fae6d9c3da761f314e37f6fb70.cu | //
// CUDA implementation of RGB to grayscale.
// Roughly 5x to 30x faster than OpenCV's implementation
//
// Converts an RGB color image to grayscale.
#include "CUDARGB2Y.h"
// Set your weights here.
constexpr double B_WEIGHT = 0.114;
constexpr double G_WEIGHT = 0.587;
constexpr double R_WEIGHT = 0.299;
// Internal; do NOT modify
constexpr int B_WT = static_cast<int>(64.0 * B_WEIGHT + 0.5);
constexpr int G_WT = static_cast<int>(64.0 * G_WEIGHT + 0.5);
constexpr int R_WT = static_cast<int>(64.0 * R_WEIGHT + 0.5);
template<bool weight>
__global__ void CUDARGB2Y_kernel(const cudaTextureObject_t tex_img, const int pixels, uint8_t* const __restrict d_newimg) {
const unsigned int x = (blockIdx.x << 8) + threadIdx.x;
const uint8_t res = weight ? min(255, (B_WT*tex1Dfetch<int>(tex_img, 3 * x) + G_WT*tex1Dfetch<int>(tex_img, 3 * x + 1) + R_WT*tex1Dfetch<int>(tex_img, 3 * x + 2)) >> 6)
: (tex1Dfetch<int>(tex_img, 3 * x) + tex1Dfetch<int>(tex_img, 3 * x + 1) + tex1Dfetch<int>(tex_img, 3 * x + 2)) / 3;
if (x < pixels) d_newimg[x] = res;
}
void CUDARGB2Y(bool weight, const cudaTextureObject_t tex_img, const int pixels, uint8_t* const __restrict d_newimg) {
(weight ? CUDARGB2Y_kernel<true> : CUDARGB2Y_kernel<false>)<<<((pixels - 1) >> 8) + 1, 256>>>(tex_img, pixels, d_newimg);
cudaDeviceSynchronize();
}
|
25d4aabe208e7ae059b41e34d69e719177d80971.hip | // !!! This is a file automatically generated by hipify!!!
/***************************************************************************************************
* Copyright (c) 2020, Vijay Thakkar ([email protected]).
**************************************************************************************************/
//////////////////////////////////////////////////////////////////////
// THIS BENCHMARK FILE IS GENERATED AUTOMATICALLY : DO NOT MODIFY //
//////////////////////////////////////////////////////////////////////
#include "benchmark/benchmark.h"
#include "cuasr/gemm/device/default_srgemm_configuration.h"
#include "cuasr/gemm/device/srgemm.h"
#include "cuasr/functional.h"
#include "harness.h"
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 1
// Threadblock: 8 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_8x32x8_8x32x1_2x4_4x8_1x1(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<8, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<8, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_8x32x8_8x32x1_2x4_4x8_1x1)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 1
// Threadblock: 16 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_16x32x8_16x32x1_4x4_4x8_1x1(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<16, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_16x32x8_16x32x1_4x4_4x8_1x1)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 8
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 1
// Threadblock: 16 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_16x64x8_16x64x1_4x8_4x8_1x1(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_16x64x8_16x64x1_4x8_4x8_1x1)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 1
// Threadblock: 32 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_32x32x8_32x32x1_8x4_4x8_1x1(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_32x32x8_32x32x1_8x4_4x8_1x1)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 8
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 1
// Threadblock: 32 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_32x64x8_32x64x1_8x8_4x8_1x1(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_32x64x8_32x64x1_8x8_4x8_1x1)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 8
// Threads / Warp: 8 x 4
// Warps / Block: 1 x 1
// Threadblock: 64 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_64x32x8_64x32x1_8x8_8x4_1x1(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<64, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_64x32x8_64x32x1_8x8_8x4_1x1)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 2
// Threadblock: 8 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_8x32x8_8x16x1_2x2_4x8_1x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<8, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<8, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_8x32x8_8x16x1_2x2_4x8_1x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 2
// Threadblock: 8 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_8x64x8_8x32x1_2x4_4x8_1x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<8, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<8, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_8x64x8_8x32x1_2x4_4x8_1x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 2
// Threadblock: 16 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_16x32x8_16x16x1_4x2_4x8_1x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<16, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_16x32x8_16x16x1_4x2_4x8_1x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 2
// Threadblock: 16 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_16x64x8_16x32x1_4x4_4x8_1x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_16x64x8_16x32x1_4x4_4x8_1x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 8
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 2
// Threadblock: 16 x 128 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_16x128x8_16x64x1_4x8_4x8_1x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<16, 128, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_16x128x8_16x64x1_4x8_4x8_1x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 8 x 4
// Warps / Block: 1 x 2
// Threadblock: 32 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_32x32x8_32x16x1_4x4_8x4_1x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_32x32x8_32x16x1_4x4_8x4_1x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 2
// Threadblock: 32 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_32x64x8_32x32x1_8x4_4x8_1x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_32x64x8_32x32x1_8x4_4x8_1x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 8
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 2
// Threadblock: 32 x 128 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_32x128x8_32x64x1_8x8_4x8_1x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 128, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_32x128x8_32x64x1_8x8_4x8_1x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 8
// Threads / Warp: 8 x 4
// Warps / Block: 1 x 2
// Threadblock: 64 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 0)
static void BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_64x64x8_64x32x1_8x8_8x4_1x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<64, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_64x64x8_64x32x1_8x8_8x4_1x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 1
// Threadblock: 32 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_32x32x8_16x32x1_4x4_4x8_2x1(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_32x32x8_16x32x1_4x4_4x8_2x1)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 1
// Threadblock: 64 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_64x32x8_32x32x1_8x4_4x8_2x1(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_64x32x8_32x32x1_8x4_4x8_2x1)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 8
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 1
// Threadblock: 64 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_64x64x8_32x64x1_8x8_4x8_2x1(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_64x64x8_32x64x1_8x8_4x8_2x1)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 8
// Threads / Warp: 8 x 4
// Warps / Block: 2 x 1
// Threadblock: 128 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_128x32x8_64x32x1_8x8_8x4_2x1(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<128, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<64, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_128x32x8_64x32x1_8x8_8x4_2x1)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 2
// Threadblock: 16 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_16x32x8_8x16x1_2x2_4x8_2x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<16, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<8, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_16x32x8_8x16x1_2x2_4x8_2x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 2
// Threadblock: 16 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_16x64x8_8x32x1_2x4_4x8_2x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<8, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_16x64x8_8x32x1_2x4_4x8_2x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 2
// Threadblock: 32 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_32x32x8_16x16x1_4x2_4x8_2x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_32x32x8_16x16x1_4x2_4x8_2x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 2
// Threadblock: 32 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_32x64x8_16x32x1_4x4_4x8_2x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_32x64x8_16x32x1_4x4_4x8_2x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 8
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 2
// Threadblock: 32 x 128 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_32x128x8_16x64x1_4x8_4x8_2x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 128, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_32x128x8_16x64x1_4x8_4x8_2x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 8 x 4
// Warps / Block: 2 x 2
// Threadblock: 64 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_64x32x8_32x16x1_4x4_8x4_2x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_64x32x8_32x16x1_4x4_8x4_2x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 2
// Threadblock: 64 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_64x64x8_32x32x1_8x4_4x8_2x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_64x64x8_32x32x1_8x4_4x8_2x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 8
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 2
// Threadblock: 64 x 128 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_64x128x8_32x64x1_8x8_4x8_2x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 128, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_64x128x8_32x64x1_8x8_4x8_2x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 4
// Threads / Warp: 8 x 4
// Warps / Block: 2 x 2
// Threadblock: 128 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_128x32x8_64x16x1_8x4_8x4_2x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<128, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<64, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_128x32x8_64x16x1_8x4_8x4_2x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 8
// Threads / Warp: 8 x 4
// Warps / Block: 2 x 2
// Threadblock: 128 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_128x64x8_64x32x1_8x8_8x4_2x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<128, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<64, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_128x64x8_64x32x1_8x8_8x4_2x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 4
// Threadblock: 16 x 64 x 16
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_16x64x16_8x16x1_2x2_4x8_2x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 16>;
using WarpShape = cutlass::gemm::GemmShape<8, 16, 16>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_16x64x16_8x16x1_2x2_4x8_2x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 4
// Threadblock: 16 x 128 x 16
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_16x128x16_8x32x1_2x4_4x8_2x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<16, 128, 16>;
using WarpShape = cutlass::gemm::GemmShape<8, 32, 16>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_16x128x16_8x32x1_2x4_4x8_2x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 2
// Threads / Warp: 8 x 4
// Warps / Block: 2 x 4
// Threadblock: 32 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_32x32x8_16x8x1_2x2_8x4_2x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 8, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_32x32x8_16x8x1_2x2_8x4_2x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 4
// Threadblock: 32 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_32x64x8_16x16x1_4x2_4x8_2x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_32x64x8_16x16x1_4x2_4x8_2x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 4
// Threadblock: 32 x 128 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_32x128x8_16x32x1_4x4_4x8_2x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 128, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_32x128x8_16x32x1_4x4_4x8_2x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 8
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 4
// Threadblock: 32 x 256 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_32x256x8_16x64x1_4x8_4x8_2x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 256, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_32x256x8_16x64x1_4x8_4x8_2x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 8 x 4
// Warps / Block: 2 x 4
// Threadblock: 64 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_64x64x8_32x16x1_4x4_8x4_2x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_64x64x8_32x16x1_4x4_8x4_2x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 4
// Threadblock: 64 x 128 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_64x128x8_32x32x1_8x4_4x8_2x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 128, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_64x128x8_32x32x1_8x4_4x8_2x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 8
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 4
// Threadblock: 64 x 256 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_64x256x8_32x64x1_8x8_4x8_2x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 256, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_64x256x8_32x64x1_8x8_4x8_2x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 8
// Threads / Warp: 8 x 4
// Warps / Block: 2 x 4
// Threadblock: 128 x 128 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 0)
static void BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_128x128x8_64x32x1_8x8_8x4_2x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 8>;
using WarpShape = cutlass::gemm::GemmShape<64, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_128x128x8_64x32x1_8x8_8x4_2x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 4 x 2
// Threadblock: 32 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_32x32x8_8x16x1_2x2_4x8_4x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<8, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_32x32x8_8x16x1_2x2_4x8_4x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 4 x 2
// Threadblock: 64 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_64x32x8_16x16x1_4x2_4x8_4x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_64x32x8_16x16x1_4x2_4x8_4x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 4 x 2
// Threadblock: 64 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_64x64x8_16x32x1_4x4_4x8_4x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_64x64x8_16x32x1_4x4_4x8_4x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 8 x 4
// Warps / Block: 4 x 2
// Threadblock: 128 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_128x32x8_32x16x1_4x4_8x4_4x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<128, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_128x32x8_32x16x1_4x4_8x4_4x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 4 x 2
// Threadblock: 128 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_128x64x8_32x32x1_8x4_4x8_4x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<128, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_128x64x8_32x32x1_8x4_4x8_4x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 8
// Threads / Warp: 4 x 8
// Warps / Block: 4 x 2
// Threadblock: 128 x 128 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_128x128x8_32x64x1_8x8_4x8_4x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_128x128x8_32x64x1_8x8_4x8_4x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 4
// Threads / Warp: 8 x 4
// Warps / Block: 4 x 2
// Threadblock: 256 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_256x32x8_64x16x1_8x4_8x4_4x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<256, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<64, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_256x32x8_64x16x1_8x4_8x4_4x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 8
// Threads / Warp: 8 x 4
// Warps / Block: 4 x 2
// Threadblock: 256 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_256x64x8_64x32x1_8x8_8x4_4x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<256, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<64, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_256x64x8_64x32x1_8x8_8x4_4x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 4 x 4
// Threadblock: 32 x 64 x 16
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_32x64x16_8x16x1_2x2_4x8_4x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 16>;
using WarpShape = cutlass::gemm::GemmShape<8, 16, 16>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_32x64x16_8x16x1_2x2_4x8_4x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 4 x 4
// Threadblock: 32 x 128 x 16
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_32x128x16_8x32x1_2x4_4x8_4x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 128, 16>;
using WarpShape = cutlass::gemm::GemmShape<8, 32, 16>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_32x128x16_8x32x1_2x4_4x8_4x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 2
// Threads / Warp: 8 x 4
// Warps / Block: 4 x 4
// Threadblock: 64 x 32 x 16
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_64x32x16_16x8x1_2x2_8x4_4x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 16>;
using WarpShape = cutlass::gemm::GemmShape<16, 8, 16>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_64x32x16_16x8x1_2x2_8x4_4x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 4 x 4
// Threadblock: 64 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_64x64x8_16x16x1_4x2_4x8_4x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_64x64x8_16x16x1_4x2_4x8_4x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 4 x 4
// Threadblock: 64 x 128 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_64x128x8_16x32x1_4x4_4x8_4x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 128, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_64x128x8_16x32x1_4x4_4x8_4x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 8
// Threads / Warp: 4 x 8
// Warps / Block: 4 x 4
// Threadblock: 64 x 256 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_64x256x8_16x64x1_4x8_4x8_4x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 256, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_64x256x8_16x64x1_4x8_4x8_4x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 2
// Threads / Warp: 8 x 4
// Warps / Block: 4 x 4
// Threadblock: 128 x 32 x 16
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_128x32x16_32x8x1_4x2_8x4_4x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<128, 32, 16>;
using WarpShape = cutlass::gemm::GemmShape<32, 8, 16>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_128x32x16_32x8x1_4x2_8x4_4x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 8 x 4
// Warps / Block: 4 x 4
// Threadblock: 128 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_128x64x8_32x16x1_4x4_8x4_4x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<128, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_128x64x8_32x16x1_4x4_8x4_4x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 4 x 4
// Threadblock: 128 x 128 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_128x128x8_32x32x1_8x4_4x8_4x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_128x128x8_32x32x1_8x4_4x8_4x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 4
// Threads / Warp: 8 x 4
// Warps / Block: 4 x 4
// Threadblock: 256 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_256x64x8_64x16x1_8x4_8x4_4x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<256, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<64, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_256x64x8_64x16x1_8x4_8x4_4x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
| 25d4aabe208e7ae059b41e34d69e719177d80971.cu | /***************************************************************************************************
* Copyright (c) 2020, Vijay Thakkar ([email protected]).
**************************************************************************************************/
//////////////////////////////////////////////////////////////////////
// THIS BENCHMARK FILE IS GENERATED AUTOMATICALLY : DO NOT MODIFY //
//////////////////////////////////////////////////////////////////////
#include "benchmark/benchmark.h"
#include "cuasr/gemm/device/default_srgemm_configuration.h"
#include "cuasr/gemm/device/srgemm.h"
#include "cuasr/functional.h"
#include "harness.h"
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 1
// Threadblock: 8 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_8x32x8_8x32x1_2x4_4x8_1x1(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<8, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<8, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_8x32x8_8x32x1_2x4_4x8_1x1)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 1
// Threadblock: 16 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_16x32x8_16x32x1_4x4_4x8_1x1(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<16, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_16x32x8_16x32x1_4x4_4x8_1x1)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 8
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 1
// Threadblock: 16 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_16x64x8_16x64x1_4x8_4x8_1x1(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_16x64x8_16x64x1_4x8_4x8_1x1)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 1
// Threadblock: 32 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_32x32x8_32x32x1_8x4_4x8_1x1(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_32x32x8_32x32x1_8x4_4x8_1x1)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 8
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 1
// Threadblock: 32 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_32x64x8_32x64x1_8x8_4x8_1x1(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_32x64x8_32x64x1_8x8_4x8_1x1)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 8
// Threads / Warp: 8 x 4
// Warps / Block: 1 x 1
// Threadblock: 64 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_64x32x8_64x32x1_8x8_8x4_1x1(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<64, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_64x32x8_64x32x1_8x8_8x4_1x1)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 2
// Threadblock: 8 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_8x32x8_8x16x1_2x2_4x8_1x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<8, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<8, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_8x32x8_8x16x1_2x2_4x8_1x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 2
// Threadblock: 8 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_8x64x8_8x32x1_2x4_4x8_1x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<8, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<8, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_8x64x8_8x32x1_2x4_4x8_1x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 2
// Threadblock: 16 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_16x32x8_16x16x1_4x2_4x8_1x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<16, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_16x32x8_16x16x1_4x2_4x8_1x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 2
// Threadblock: 16 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_16x64x8_16x32x1_4x4_4x8_1x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_16x64x8_16x32x1_4x4_4x8_1x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 8
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 2
// Threadblock: 16 x 128 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_16x128x8_16x64x1_4x8_4x8_1x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<16, 128, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_16x128x8_16x64x1_4x8_4x8_1x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 8 x 4
// Warps / Block: 1 x 2
// Threadblock: 32 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_32x32x8_32x16x1_4x4_8x4_1x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_32x32x8_32x16x1_4x4_8x4_1x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 2
// Threadblock: 32 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_32x64x8_32x32x1_8x4_4x8_1x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_32x64x8_32x32x1_8x4_4x8_1x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 8
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 2
// Threadblock: 32 x 128 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_32x128x8_32x64x1_8x8_4x8_1x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 128, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_32x128x8_32x64x1_8x8_4x8_1x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 8
// Threads / Warp: 8 x 4
// Warps / Block: 1 x 2
// Threadblock: 64 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 0)
static void BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_64x64x8_64x32x1_8x8_8x4_1x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<64, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_64x64x8_64x32x1_8x8_8x4_1x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 1
// Threadblock: 32 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_32x32x8_16x32x1_4x4_4x8_2x1(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_32x32x8_16x32x1_4x4_4x8_2x1)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 1
// Threadblock: 64 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_64x32x8_32x32x1_8x4_4x8_2x1(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_64x32x8_32x32x1_8x4_4x8_2x1)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 8
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 1
// Threadblock: 64 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_64x64x8_32x64x1_8x8_4x8_2x1(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_64x64x8_32x64x1_8x8_4x8_2x1)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 8
// Threads / Warp: 8 x 4
// Warps / Block: 2 x 1
// Threadblock: 128 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_128x32x8_64x32x1_8x8_8x4_2x1(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<128, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<64, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_128x32x8_64x32x1_8x8_8x4_2x1)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 2
// Threadblock: 16 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_16x32x8_8x16x1_2x2_4x8_2x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<16, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<8, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_16x32x8_8x16x1_2x2_4x8_2x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 2
// Threadblock: 16 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_16x64x8_8x32x1_2x4_4x8_2x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<8, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_16x64x8_8x32x1_2x4_4x8_2x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 2
// Threadblock: 32 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_32x32x8_16x16x1_4x2_4x8_2x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_32x32x8_16x16x1_4x2_4x8_2x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 2
// Threadblock: 32 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_32x64x8_16x32x1_4x4_4x8_2x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_32x64x8_16x32x1_4x4_4x8_2x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 8
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 2
// Threadblock: 32 x 128 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_32x128x8_16x64x1_4x8_4x8_2x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 128, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_32x128x8_16x64x1_4x8_4x8_2x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 8 x 4
// Warps / Block: 2 x 2
// Threadblock: 64 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_64x32x8_32x16x1_4x4_8x4_2x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_64x32x8_32x16x1_4x4_8x4_2x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 2
// Threadblock: 64 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_64x64x8_32x32x1_8x4_4x8_2x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_64x64x8_32x32x1_8x4_4x8_2x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 8
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 2
// Threadblock: 64 x 128 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_64x128x8_32x64x1_8x8_4x8_2x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 128, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_64x128x8_32x64x1_8x8_4x8_2x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 4
// Threads / Warp: 8 x 4
// Warps / Block: 2 x 2
// Threadblock: 128 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_128x32x8_64x16x1_8x4_8x4_2x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<128, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<64, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_128x32x8_64x16x1_8x4_8x4_2x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 8
// Threads / Warp: 8 x 4
// Warps / Block: 2 x 2
// Threadblock: 128 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_128x64x8_64x32x1_8x8_8x4_2x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<128, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<64, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_128x64x8_64x32x1_8x8_8x4_2x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 4
// Threadblock: 16 x 64 x 16
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_16x64x16_8x16x1_2x2_4x8_2x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 16>;
using WarpShape = cutlass::gemm::GemmShape<8, 16, 16>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_16x64x16_8x16x1_2x2_4x8_2x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 4
// Threadblock: 16 x 128 x 16
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_16x128x16_8x32x1_2x4_4x8_2x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<16, 128, 16>;
using WarpShape = cutlass::gemm::GemmShape<8, 32, 16>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_16x128x16_8x32x1_2x4_4x8_2x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 2
// Threads / Warp: 8 x 4
// Warps / Block: 2 x 4
// Threadblock: 32 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_32x32x8_16x8x1_2x2_8x4_2x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 8, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_32x32x8_16x8x1_2x2_8x4_2x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 4
// Threadblock: 32 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_32x64x8_16x16x1_4x2_4x8_2x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_32x64x8_16x16x1_4x2_4x8_2x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 4
// Threadblock: 32 x 128 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_32x128x8_16x32x1_4x4_4x8_2x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 128, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_32x128x8_16x32x1_4x4_4x8_2x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 8
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 4
// Threadblock: 32 x 256 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_32x256x8_16x64x1_4x8_4x8_2x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 256, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_32x256x8_16x64x1_4x8_4x8_2x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 8 x 4
// Warps / Block: 2 x 4
// Threadblock: 64 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_64x64x8_32x16x1_4x4_8x4_2x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_64x64x8_32x16x1_4x4_8x4_2x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 4
// Threadblock: 64 x 128 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_64x128x8_32x32x1_8x4_4x8_2x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 128, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_64x128x8_32x32x1_8x4_4x8_2x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 8
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 4
// Threadblock: 64 x 256 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_64x256x8_32x64x1_8x8_4x8_2x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 256, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_64x256x8_32x64x1_8x8_4x8_2x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 8
// Threads / Warp: 8 x 4
// Warps / Block: 2 x 4
// Threadblock: 128 x 128 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 0)
static void BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_128x128x8_64x32x1_8x8_8x4_2x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 8>;
using WarpShape = cutlass::gemm::GemmShape<64, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_128x128x8_64x32x1_8x8_8x4_2x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 4 x 2
// Threadblock: 32 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_32x32x8_8x16x1_2x2_4x8_4x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<8, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_32x32x8_8x16x1_2x2_4x8_4x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 4 x 2
// Threadblock: 64 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_64x32x8_16x16x1_4x2_4x8_4x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_64x32x8_16x16x1_4x2_4x8_4x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 4 x 2
// Threadblock: 64 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_64x64x8_16x32x1_4x4_4x8_4x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_64x64x8_16x32x1_4x4_4x8_4x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 8 x 4
// Warps / Block: 4 x 2
// Threadblock: 128 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_128x32x8_32x16x1_4x4_8x4_4x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<128, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_128x32x8_32x16x1_4x4_8x4_4x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 4 x 2
// Threadblock: 128 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_128x64x8_32x32x1_8x4_4x8_4x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<128, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_128x64x8_32x32x1_8x4_4x8_4x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 8
// Threads / Warp: 4 x 8
// Warps / Block: 4 x 2
// Threadblock: 128 x 128 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_128x128x8_32x64x1_8x8_4x8_4x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_128x128x8_32x64x1_8x8_4x8_4x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 4
// Threads / Warp: 8 x 4
// Warps / Block: 4 x 2
// Threadblock: 256 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_256x32x8_64x16x1_8x4_8x4_4x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<256, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<64, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_256x32x8_64x16x1_8x4_8x4_4x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 8
// Threads / Warp: 8 x 4
// Warps / Block: 4 x 2
// Threadblock: 256 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_256x64x8_64x32x1_8x8_8x4_4x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<256, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<64, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_256x64x8_64x32x1_8x8_8x4_4x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 4 x 4
// Threadblock: 32 x 64 x 16
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_32x64x16_8x16x1_2x2_4x8_4x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 16>;
using WarpShape = cutlass::gemm::GemmShape<8, 16, 16>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_32x64x16_8x16x1_2x2_4x8_4x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 4 x 4
// Threadblock: 32 x 128 x 16
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_32x128x16_8x32x1_2x4_4x8_4x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 128, 16>;
using WarpShape = cutlass::gemm::GemmShape<8, 32, 16>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_32x128x16_8x32x1_2x4_4x8_4x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 2
// Threads / Warp: 8 x 4
// Warps / Block: 4 x 4
// Threadblock: 64 x 32 x 16
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_64x32x16_16x8x1_2x2_8x4_4x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 16>;
using WarpShape = cutlass::gemm::GemmShape<16, 8, 16>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_64x32x16_16x8x1_2x2_8x4_4x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 4 x 4
// Threadblock: 64 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_64x64x8_16x16x1_4x2_4x8_4x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_64x64x8_16x16x1_4x2_4x8_4x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 4 x 4
// Threadblock: 64 x 128 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_64x128x8_16x32x1_4x4_4x8_4x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 128, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_64x128x8_16x32x1_4x4_4x8_4x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 8
// Threads / Warp: 4 x 8
// Warps / Block: 4 x 4
// Threadblock: 64 x 256 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_64x256x8_16x64x1_4x8_4x8_4x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 256, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_64x256x8_16x64x1_4x8_4x8_4x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 2
// Threads / Warp: 8 x 4
// Warps / Block: 4 x 4
// Threadblock: 128 x 32 x 16
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_128x32x16_32x8x1_4x2_8x4_4x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<128, 32, 16>;
using WarpShape = cutlass::gemm::GemmShape<32, 8, 16>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_128x32x16_32x8x1_4x2_8x4_4x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 8 x 4
// Warps / Block: 4 x 4
// Threadblock: 128 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_128x64x8_32x16x1_4x4_8x4_4x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<128, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_128x64x8_32x16x1_4x4_8x4_4x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 4 x 4
// Threadblock: 128 x 128 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_128x128x8_32x32x1_8x4_4x8_4x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_128x128x8_32x32x1_8x4_4x8_4x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 4
// Threads / Warp: 8 x 4
// Warps / Block: 4 x 4
// Threadblock: 256 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_256x64x8_64x16x1_8x4_8x4_4x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<256, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<64, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::binary_or<precision>, cuasr::binary_and<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::RowMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_binary_or_binary_and_ssrgemm_nt_t_256x64x8_64x16x1_8x4_8x4_4x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
|
3407cbcf9fe2324c75f7402503cf5596c561b10e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <assert.h>
#include <time.h>
#define N 289
__global__ void MatMul(float d_A[N][N], float d_B[N][N], float d_C[N][N])
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
if (i < N && j < N)
{
for (int l = 0; l < N; l++)
{
//d_C[i][j] = d_C[i][j] + d_A[j][l] * d_B[l][i];
d_C[i][j] = d_C[i][j] + d_A[i][l] * d_B[l][j];
}
}
}
__global__ void setElement(float d_A[N][N], float d_B[N][N], float d_C[N][N])
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
if (i < N && j < N){
/* d_A[i][j] = i * (float)3.2 + j * (float)2.21;
d_B[i][j] = i * (float)1.3 + j * (float)3.1;
*/
d_A[i][j] = 1.0;
d_B[i][j] = 1.0;
d_C[i][j] = (float)0;
}
}
int main()
{
hipError_t res = hipSuccess;
int m,n,k;
m = n = k = N;
int i,j;
int ARRAY_SIZE = N * N;
int ARRAY_BYTES = ARRAY_SIZE * sizeof(float);
float h_A[N][N], h_B[N][N], h_C[N][N];
float (*d_A)[N], (*d_B)[N], (*d_C)[N];
res=hipMalloc((void**) &d_A, ARRAY_BYTES);
if(res!=hipSuccess ){
printf("\nCuda error!");
return -1;
}
res=hipMalloc((void**) &d_B, ARRAY_BYTES);
if( res!=hipSuccess ){
printf("\nCuda error!");
return -1;
}
res=hipMalloc((void**) &d_C, ARRAY_BYTES);
if( res!=hipSuccess ){
printf("\nCuda error!");
return -1;
}
// Kernel invocation with CONVENIENT amount of blocks
int xThreadsPerBlock=32;
int yThreadsPerBlock=32;
int xBlocks = (N+(xThreadsPerBlock-1))/xThreadsPerBlock;
int yBlocks = (N+(yThreadsPerBlock-1))/yThreadsPerBlock;
dim3 threadsPerBlock(xThreadsPerBlock,yThreadsPerBlock);
dim3 numBlocks( xBlocks,yBlocks );
hipLaunchKernelGGL(( setElement), dim3(numBlocks), dim3(threadsPerBlock), 0, 0, d_A, d_B, d_C);
hipDeviceSynchronize();
res=hipMemcpy(h_A, d_A, ARRAY_BYTES, hipMemcpyDeviceToHost);
if( res!=hipSuccess){
printf("\nCuda error!");
return -1;
}
res=hipMemcpy(h_B, d_B, ARRAY_BYTES, hipMemcpyDeviceToHost);
if( res!=hipSuccess){
printf("\nCuda error!");
return -1;
}
res=hipMemcpy(h_C, d_C, ARRAY_BYTES, hipMemcpyDeviceToHost);
if( res!=hipSuccess){
printf("\nCuda error!");
return -1;
}
fprintf(stdout, "Here is the matrix A:\n\n");
for(i=0;i<m;i++) {
for(j=0;j<k;j++) {
fprintf(stdout, "%10.2f",h_A[i][j]);
}
fprintf(stdout, "\n");
}
fprintf(stdout, "Here is the matrix B:\n\n");
for(i=0;i<k;i++) {
for(j=0;j<n;j++) {
fprintf(stdout, "%10.2f",h_B[i][j]);
}
fprintf(stdout, "\n");
}
hipLaunchKernelGGL(( MatMul), dim3(numBlocks), dim3(threadsPerBlock), 0, 0, d_A, d_B, d_C);
hipDeviceSynchronize();
res=hipMemcpy(h_C, d_C, ARRAY_BYTES, hipMemcpyDeviceToHost);
if( res!=hipSuccess){
printf("\nCuda error!");
return -1;
}
fprintf(stdout, "Here is the matrix C:\n\n");
for(i=0;i<m;i++) {
for(j=0;j<n;j++) {
fprintf(stdout, "%10.2f",h_C[i][j]);
}
fprintf(stdout, "\n");
}
// Clean up memory
hipHostFree(h_A);
hipHostFree(h_B);
hipHostFree(h_C);
hipFree(d_A);
hipFree(d_B);
hipFree(d_C);
}
| 3407cbcf9fe2324c75f7402503cf5596c561b10e.cu | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <assert.h>
#include <time.h>
#define N 289
__global__ void MatMul(float d_A[N][N], float d_B[N][N], float d_C[N][N])
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
if (i < N && j < N)
{
for (int l = 0; l < N; l++)
{
//d_C[i][j] = d_C[i][j] + d_A[j][l] * d_B[l][i];
d_C[i][j] = d_C[i][j] + d_A[i][l] * d_B[l][j];
}
}
}
__global__ void setElement(float d_A[N][N], float d_B[N][N], float d_C[N][N])
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
if (i < N && j < N){
/* d_A[i][j] = i * (float)3.2 + j * (float)2.21;
d_B[i][j] = i * (float)1.3 + j * (float)3.1;
*/
d_A[i][j] = 1.0;
d_B[i][j] = 1.0;
d_C[i][j] = (float)0;
}
}
int main()
{
cudaError_t res = cudaSuccess;
int m,n,k;
m = n = k = N;
int i,j;
int ARRAY_SIZE = N * N;
int ARRAY_BYTES = ARRAY_SIZE * sizeof(float);
float h_A[N][N], h_B[N][N], h_C[N][N];
float (*d_A)[N], (*d_B)[N], (*d_C)[N];
res=cudaMalloc((void**) &d_A, ARRAY_BYTES);
if(res!=cudaSuccess ){
printf("\nCuda error!");
return -1;
}
res=cudaMalloc((void**) &d_B, ARRAY_BYTES);
if( res!=cudaSuccess ){
printf("\nCuda error!");
return -1;
}
res=cudaMalloc((void**) &d_C, ARRAY_BYTES);
if( res!=cudaSuccess ){
printf("\nCuda error!");
return -1;
}
// Kernel invocation with CONVENIENT amount of blocks
int xThreadsPerBlock=32;
int yThreadsPerBlock=32;
int xBlocks = (N+(xThreadsPerBlock-1))/xThreadsPerBlock;
int yBlocks = (N+(yThreadsPerBlock-1))/yThreadsPerBlock;
dim3 threadsPerBlock(xThreadsPerBlock,yThreadsPerBlock);
dim3 numBlocks( xBlocks,yBlocks );
setElement<<<numBlocks, threadsPerBlock>>>(d_A, d_B, d_C);
cudaDeviceSynchronize();
res=cudaMemcpy(h_A, d_A, ARRAY_BYTES, cudaMemcpyDeviceToHost);
if( res!=cudaSuccess){
printf("\nCuda error!");
return -1;
}
res=cudaMemcpy(h_B, d_B, ARRAY_BYTES, cudaMemcpyDeviceToHost);
if( res!=cudaSuccess){
printf("\nCuda error!");
return -1;
}
res=cudaMemcpy(h_C, d_C, ARRAY_BYTES, cudaMemcpyDeviceToHost);
if( res!=cudaSuccess){
printf("\nCuda error!");
return -1;
}
fprintf(stdout, "Here is the matrix A:\n\n");
for(i=0;i<m;i++) {
for(j=0;j<k;j++) {
fprintf(stdout, "%10.2f",h_A[i][j]);
}
fprintf(stdout, "\n");
}
fprintf(stdout, "Here is the matrix B:\n\n");
for(i=0;i<k;i++) {
for(j=0;j<n;j++) {
fprintf(stdout, "%10.2f",h_B[i][j]);
}
fprintf(stdout, "\n");
}
MatMul<<<numBlocks, threadsPerBlock>>>(d_A, d_B, d_C);
cudaDeviceSynchronize();
res=cudaMemcpy(h_C, d_C, ARRAY_BYTES, cudaMemcpyDeviceToHost);
if( res!=cudaSuccess){
printf("\nCuda error!");
return -1;
}
fprintf(stdout, "Here is the matrix C:\n\n");
for(i=0;i<m;i++) {
for(j=0;j<n;j++) {
fprintf(stdout, "%10.2f",h_C[i][j]);
}
fprintf(stdout, "\n");
}
// Clean up memory
cudaFreeHost(h_A);
cudaFreeHost(h_B);
cudaFreeHost(h_C);
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
}
|
4b85eb6f5b6271766b01fe883ac824eb9bf5b73e.hip | // !!! This is a file automatically generated by hipify!!!
#include "CrossSectionUtilities.hh"
#include <cmath>
namespace MonteRay {
void
thinGrid(const totalXSFunct_t& xsFunc, linearGrid_t& linearGrid, double max_error) {
// thin grid
bool done;
do {
done = true;
unsigned i = 0;
for( auto previous_itr = linearGrid.begin(); previous_itr != linearGrid.end(); ++previous_itr) {
auto itr = previous_itr; ++itr;
if( itr == linearGrid.end() ) break;
auto next_itr = itr; ++next_itr;
if( next_itr == linearGrid.end() ) break;
// check log mid-point
double energy1 = previous_itr->first;
double energy2 = next_itr->first;
double energy = itr->first;
// calculated interpolatedXS
double lower = previous_itr->second;
double upper = next_itr->second;
double deltaE = energy2 - energy1;
double interpolatedXS = lower + (upper-lower) * (energy - energy1)/deltaE;
// check difference with real xs
double totalXS = xsFunc( energy );
double percentDiff = std::abs(totalXS - interpolatedXS ) * 100.0 / totalXS;
// printf( "Debug: i=%d E=%f, interp=%f, real=%f diff=%f \n", i, energy, interpolatedXS, totalXS, percentDiff);
if( percentDiff < max_error * 0.5 ) {
linearGrid.erase(itr);
done = false;
break;
}
++i;
}
} while( !done );
}
void
addPointsToGrid(const totalXSFunct_t& xsFunc, linearGrid_t& linearGrid, double max_error ) {
bool done;
// linearize
do {
done = true;
for( auto previous_itr = linearGrid.begin(); previous_itr != linearGrid.end(); ++previous_itr) {
auto itr = previous_itr; ++itr;
if( itr == linearGrid.end() ) break;
// check log mid-point
double energy1 = previous_itr->first;
double energy2 = itr->first;
double deltaE = energy2 - energy1;
if( deltaE > 1e-6 ) {
// don't add points finer than 1e-6
double energy = ::exp(( ::log(energy2) - ::log(energy1) )*0.5 + ::log(energy1));
// calculated interpolatedXS
double lower = previous_itr->second;
double upper = itr->second;
double interpolatedXS = lower + (upper-lower) * (energy - energy1)/deltaE;
// check difference with real xs
double totalXS = xsFunc( energy );
double percentDiff = std::abs(totalXS - interpolatedXS ) * 100.0 / totalXS;
if( percentDiff > max_error ) {
linearGrid.insert(itr, std::make_pair(energy, totalXS));
done = false;
}
}
}
} while ( !done );
}
bool
checkGrid(const totalXSFunct_t& xsFunc, linearGrid_t& linearGrid, double max_error, unsigned nIntermediateBins){
const bool debug = false;
if( debug ) printf( "Debug: createLinearGrid - checking linearization\n");
// check linearization
bool done = true;
do {
done = true;
auto start_itr = linearGrid.begin();
for( auto previous_itr = start_itr; previous_itr != linearGrid.end(); ++previous_itr) {
auto itr = previous_itr; ++itr;
if( itr == linearGrid.end() ) break;
// check log mid-point
double energy1 = previous_itr->first;
double energy2 = itr->first;
double deltaE = energy2 - energy1;
double lower = previous_itr->second;
double upper = itr->second;
// no need to go below 1-eV for photon data
if( std::abs( deltaE ) > 1e-6 ) {
nIntermediateBins = ::min( unsigned( deltaE / 1e-6 ), nIntermediateBins ) ;
} else {
nIntermediateBins = 0;
}
for( auto j=0; j<nIntermediateBins; ++j) {
double energy = energy1 + (deltaE*j)/nIntermediateBins;
// calculated interpolatedXS
double interpolatedXS = lower + (upper-lower) * (energy - energy1)/deltaE;
double totalXS = xsFunc( energy );
double percentDiff = std::abs(totalXS - interpolatedXS ) * 100.0 / totalXS;
if( percentDiff > max_error ) {
if( debug ) {
printf( "Debug: createLinearGrid - linearization failed for E=%.10f, real XS=%f, interpolated XS=%f, percent diff=%f\n",
energy, totalXS, interpolatedXS, percentDiff );
}
start_itr = linearGrid.insert(itr, std::make_pair(energy, totalXS));
done = false;
break;
}
if( debug ) {
printf( "Debug: createLinearGrid - linearization passed for E=%.10f, real XS=%f, interpolated XS=%f, percent diff=%f\n",
energy, totalXS, interpolatedXS, percentDiff );
}
}
}
} while ( !done );
return true;
}
}
| 4b85eb6f5b6271766b01fe883ac824eb9bf5b73e.cu | #include "CrossSectionUtilities.hh"
#include <cmath>
namespace MonteRay {
void
thinGrid(const totalXSFunct_t& xsFunc, linearGrid_t& linearGrid, double max_error) {
// thin grid
bool done;
do {
done = true;
unsigned i = 0;
for( auto previous_itr = linearGrid.begin(); previous_itr != linearGrid.end(); ++previous_itr) {
auto itr = previous_itr; ++itr;
if( itr == linearGrid.end() ) break;
auto next_itr = itr; ++next_itr;
if( next_itr == linearGrid.end() ) break;
// check log mid-point
double energy1 = previous_itr->first;
double energy2 = next_itr->first;
double energy = itr->first;
// calculated interpolatedXS
double lower = previous_itr->second;
double upper = next_itr->second;
double deltaE = energy2 - energy1;
double interpolatedXS = lower + (upper-lower) * (energy - energy1)/deltaE;
// check difference with real xs
double totalXS = xsFunc( energy );
double percentDiff = std::abs(totalXS - interpolatedXS ) * 100.0 / totalXS;
// printf( "Debug: i=%d E=%f, interp=%f, real=%f diff=%f \n", i, energy, interpolatedXS, totalXS, percentDiff);
if( percentDiff < max_error * 0.5 ) {
linearGrid.erase(itr);
done = false;
break;
}
++i;
}
} while( !done );
}
void
addPointsToGrid(const totalXSFunct_t& xsFunc, linearGrid_t& linearGrid, double max_error ) {
bool done;
// linearize
do {
done = true;
for( auto previous_itr = linearGrid.begin(); previous_itr != linearGrid.end(); ++previous_itr) {
auto itr = previous_itr; ++itr;
if( itr == linearGrid.end() ) break;
// check log mid-point
double energy1 = previous_itr->first;
double energy2 = itr->first;
double deltaE = energy2 - energy1;
if( deltaE > 1e-6 ) {
// don't add points finer than 1e-6
double energy = std::exp(( std::log(energy2) - std::log(energy1) )*0.5 + std::log(energy1));
// calculated interpolatedXS
double lower = previous_itr->second;
double upper = itr->second;
double interpolatedXS = lower + (upper-lower) * (energy - energy1)/deltaE;
// check difference with real xs
double totalXS = xsFunc( energy );
double percentDiff = std::abs(totalXS - interpolatedXS ) * 100.0 / totalXS;
if( percentDiff > max_error ) {
linearGrid.insert(itr, std::make_pair(energy, totalXS));
done = false;
}
}
}
} while ( !done );
}
bool
checkGrid(const totalXSFunct_t& xsFunc, linearGrid_t& linearGrid, double max_error, unsigned nIntermediateBins){
const bool debug = false;
if( debug ) printf( "Debug: createLinearGrid - checking linearization\n");
// check linearization
bool done = true;
do {
done = true;
auto start_itr = linearGrid.begin();
for( auto previous_itr = start_itr; previous_itr != linearGrid.end(); ++previous_itr) {
auto itr = previous_itr; ++itr;
if( itr == linearGrid.end() ) break;
// check log mid-point
double energy1 = previous_itr->first;
double energy2 = itr->first;
double deltaE = energy2 - energy1;
double lower = previous_itr->second;
double upper = itr->second;
// no need to go below 1-eV for photon data
if( std::abs( deltaE ) > 1e-6 ) {
nIntermediateBins = std::min( unsigned( deltaE / 1e-6 ), nIntermediateBins ) ;
} else {
nIntermediateBins = 0;
}
for( auto j=0; j<nIntermediateBins; ++j) {
double energy = energy1 + (deltaE*j)/nIntermediateBins;
// calculated interpolatedXS
double interpolatedXS = lower + (upper-lower) * (energy - energy1)/deltaE;
double totalXS = xsFunc( energy );
double percentDiff = std::abs(totalXS - interpolatedXS ) * 100.0 / totalXS;
if( percentDiff > max_error ) {
if( debug ) {
printf( "Debug: createLinearGrid - linearization failed for E=%.10f, real XS=%f, interpolated XS=%f, percent diff=%f\n",
energy, totalXS, interpolatedXS, percentDiff );
}
start_itr = linearGrid.insert(itr, std::make_pair(energy, totalXS));
done = false;
break;
}
if( debug ) {
printf( "Debug: createLinearGrid - linearization passed for E=%.10f, real XS=%f, interpolated XS=%f, percent diff=%f\n",
energy, totalXS, interpolatedXS, percentDiff );
}
}
}
} while ( !done );
return true;
}
}
|
1ae729e1f97f83b54a03d855e41090e21b7c7e2b.hip | // !!! This is a file automatically generated by hipify!!!
/***********************************************************************************
Implementing Breadth first search on CUDA using algorithm given in HiPC'07
paper "Accelerating Large Graph Algorithms on the GPU using CUDA"
Copyright (c) 2008 International Institute of Information Technology - Hyderabad.
All rights reserved.
Permission to use, copy, modify and distribute this software and its documentation for
educational purpose is hereby granted without fee, provided that the above copyright
notice and this permission notice appear in all copies of this software and that you do
not sell the software.
THE SOFTWARE IS PROVIDED "AS IS" AND WITHOUT WARRANTY OF ANY KIND,EXPRESS, IMPLIED OR
OTHERWISE.
Created by Pawan Harish.
************************************************************************************/
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <hip/hip_runtime.h>
#define MAX_THREADS_PER_BLOCK 512
int no_of_nodes;
int edge_list_size;
FILE *fp;
//Structure to hold a node information
struct Node
{
int starting;
int no_of_edges;
};
#include "kernel.hip"
#include "kernel2.cu"
void BFSGraph(int argc, char** argv);
////////////////////////////////////////////////////////////////////////////////
// Main Program
////////////////////////////////////////////////////////////////////////////////
int main( int argc, char** argv)
{
no_of_nodes=0;
edge_list_size=0;
BFSGraph( argc, argv);
}
void Usage(int argc, char**argv){
fprintf(stderr,"Usage: %s <input_file>\n", argv[0]);
}
////////////////////////////////////////////////////////////////////////////////
//Apply BFS on a Graph using CUDA
////////////////////////////////////////////////////////////////////////////////
void BFSGraph( int argc, char** argv)
{
char *input_f;
if(argc!=2){
Usage(argc, argv);
exit(0);
}
input_f = argv[1];
printf("Reading File\n");
//Read in Graph from a file
fp = fopen(input_f,"r");
if(!fp)
{
printf("Error Reading graph file\n");
return;
}
int source = 0;
fscanf(fp,"%d",&no_of_nodes);
int num_of_blocks = 1;
int num_of_threads_per_block = no_of_nodes;
//Make execution Parameters according to the number of nodes
//Distribute threads across multiple Blocks if necessary
if(no_of_nodes>MAX_THREADS_PER_BLOCK)
{
num_of_blocks = (int)ceil(no_of_nodes/(double)MAX_THREADS_PER_BLOCK);
num_of_threads_per_block = MAX_THREADS_PER_BLOCK;
}
// allocate host memory
Node* h_graph_nodes = (Node*) malloc(sizeof(Node)*no_of_nodes);
//bool *h_graph_mask = (bool*) malloc(sizeof(bool)*no_of_nodes);
//bool *h_updating_graph_mask = (bool*) malloc(sizeof(bool)*no_of_nodes);
bool *h_graph_visited = (bool*) malloc(sizeof(bool)*no_of_nodes);
int start, edgeno;
// initalize the memory
for( unsigned int i = 0; i < no_of_nodes; i++)
{
fscanf(fp,"%d %d",&start,&edgeno);
h_graph_nodes[i].starting = start;
h_graph_nodes[i].no_of_edges = edgeno;
//h_graph_mask[i]=false;
//h_updating_graph_mask[i]=false;
//h_graph_visited[i]=false;
}
//read the source node from the file
fscanf(fp,"%d",&source);
source=0;
//set the source node as true in the mask
//h_graph_mask[source]=true;
//h_graph_visited[source]=true;
fscanf(fp,"%d",&edge_list_size);
int id,cost;
int* h_graph_edges = (int*) malloc(sizeof(int)*edge_list_size);
for(int i=0; i < edge_list_size ; i++)
{
fscanf(fp,"%d",&id);
fscanf(fp,"%d",&cost);
h_graph_edges[i] = id;
}
if(fp)
fclose(fp);
printf("Read File\n");
//Copy the Node list to device memory
Node* d_graph_nodes;
hipMalloc( (void**) &d_graph_nodes, sizeof(Node)*no_of_nodes) ;
hipMemcpy( d_graph_nodes, h_graph_nodes, sizeof(Node)*no_of_nodes, hipMemcpyHostToDevice) ;
//Copy the Edge List to device Memory
int* d_graph_edges;
hipMalloc( (void**) &d_graph_edges, sizeof(int)*edge_list_size) ;
hipMemcpy( d_graph_edges, h_graph_edges, sizeof(int)*edge_list_size, hipMemcpyHostToDevice) ;
//Copy the Mask to device memory
bool* d_graph_mask;
hipMalloc( (void**) &d_graph_mask, sizeof(bool)*no_of_nodes) ;
if(hipMemset(d_graph_mask, false, no_of_nodes*sizeof(bool)) != hipSuccess){
printf("error in hipMemset(d_graph_mask");
exit(1);
}
if(hipMemset(d_graph_mask, true, sizeof(bool)) != hipSuccess){
printf("error in hipMemset(d_graph_mask");
exit(1);
}
bool* d_updating_graph_mask;
hipMalloc( (void**) &d_updating_graph_mask, sizeof(bool)*no_of_nodes) ;
if (hipMemset(d_updating_graph_mask, false, no_of_nodes*sizeof(bool))!= hipSuccess){
printf("error in hipMemset(d_graph_mask");
exit(1);
}
//Copy the Visited nodes array to device memory
bool* d_graph_visited;
hipMalloc( (void**) &d_graph_visited, sizeof(bool)*no_of_nodes) ;
if (hipMemset(d_graph_visited, false, no_of_nodes*sizeof(bool))!= hipSuccess){
printf("error in hipMemset(d_graph_mask");
exit(1);
}
if (hipMemset(d_graph_visited, true, sizeof(bool))!= hipSuccess){
printf("error in hipMemset(d_graph_mask");
exit(1);
}
// allocate mem for the result on host side
int8_t* h_cost = (int8_t*) malloc( sizeof(int8_t)*no_of_nodes);
for(int i=0;i<no_of_nodes;i++)
h_cost[i]=-1;
h_cost[source]=0;
// allocate device memory for result
int8_t* d_cost;
hipMalloc( (void**) &d_cost, sizeof(int8_t)*no_of_nodes);
hipMemcpy( d_cost, h_cost, sizeof(int8_t)*no_of_nodes, hipMemcpyHostToDevice) ;
//make a bool to check if the execution is over
bool *d_over;
hipMalloc( (void**) &d_over, sizeof(bool));
printf("Copied Everything to GPU memory\n");
// setup execution parameters
dim3 grid( num_of_blocks, 1, 1);
dim3 threads( num_of_threads_per_block, 1, 1);
int k=0;
printf("Start traversing the tree\n");
bool stop;
//Call the Kernel untill all the elements of Frontier are not false
do
{
//if no thread changes this value then the loop stops
stop=false;
hipMemcpy( d_over, &stop, sizeof(bool), hipMemcpyHostToDevice) ;
hipLaunchKernelGGL(( Kernel), dim3(grid), dim3(threads), 0 , 0, d_graph_nodes, d_graph_edges, d_graph_mask, d_updating_graph_mask, d_graph_visited, d_cost, no_of_nodes);
// check if kernel execution generated and error
hipLaunchKernelGGL(( Kernel2), dim3(grid), dim3(threads), 0 , 0, d_graph_mask, d_updating_graph_mask, d_graph_visited, d_over, no_of_nodes);
// check if kernel execution generated and error
hipMemcpy( &stop, d_over, sizeof(bool), hipMemcpyDeviceToHost) ;
k++;
}
while(stop);
printf("Kernel Executed %d times\n",k);
// copy result from device to host
hipMemcpy( h_cost, d_cost, sizeof(int8_t)*no_of_nodes, hipMemcpyDeviceToHost) ;
//Store the result into a file
FILE *fpo = fopen("result.txt","w");
for(int i=0;i<no_of_nodes;i++)
fprintf(fpo,"%d) cost:%d\n",i,h_cost[i]);
fclose(fpo);
printf("Result stored in result.txt\n");
// cleanup memory
free( h_graph_nodes);
free( h_graph_edges);
//free( h_graph_mask);
//free( h_updating_graph_mask);
free( h_graph_visited);
free( h_cost);
hipFree(d_graph_nodes);
hipFree(d_graph_edges);
hipFree(d_graph_mask);
hipFree(d_updating_graph_mask);
hipFree(d_graph_visited);
hipFree(d_cost);
}
| 1ae729e1f97f83b54a03d855e41090e21b7c7e2b.cu | /***********************************************************************************
Implementing Breadth first search on CUDA using algorithm given in HiPC'07
paper "Accelerating Large Graph Algorithms on the GPU using CUDA"
Copyright (c) 2008 International Institute of Information Technology - Hyderabad.
All rights reserved.
Permission to use, copy, modify and distribute this software and its documentation for
educational purpose is hereby granted without fee, provided that the above copyright
notice and this permission notice appear in all copies of this software and that you do
not sell the software.
THE SOFTWARE IS PROVIDED "AS IS" AND WITHOUT WARRANTY OF ANY KIND,EXPRESS, IMPLIED OR
OTHERWISE.
Created by Pawan Harish.
************************************************************************************/
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <cuda.h>
#define MAX_THREADS_PER_BLOCK 512
int no_of_nodes;
int edge_list_size;
FILE *fp;
//Structure to hold a node information
struct Node
{
int starting;
int no_of_edges;
};
#include "kernel.cu"
#include "kernel2.cu"
void BFSGraph(int argc, char** argv);
////////////////////////////////////////////////////////////////////////////////
// Main Program
////////////////////////////////////////////////////////////////////////////////
int main( int argc, char** argv)
{
no_of_nodes=0;
edge_list_size=0;
BFSGraph( argc, argv);
}
void Usage(int argc, char**argv){
fprintf(stderr,"Usage: %s <input_file>\n", argv[0]);
}
////////////////////////////////////////////////////////////////////////////////
//Apply BFS on a Graph using CUDA
////////////////////////////////////////////////////////////////////////////////
void BFSGraph( int argc, char** argv)
{
char *input_f;
if(argc!=2){
Usage(argc, argv);
exit(0);
}
input_f = argv[1];
printf("Reading File\n");
//Read in Graph from a file
fp = fopen(input_f,"r");
if(!fp)
{
printf("Error Reading graph file\n");
return;
}
int source = 0;
fscanf(fp,"%d",&no_of_nodes);
int num_of_blocks = 1;
int num_of_threads_per_block = no_of_nodes;
//Make execution Parameters according to the number of nodes
//Distribute threads across multiple Blocks if necessary
if(no_of_nodes>MAX_THREADS_PER_BLOCK)
{
num_of_blocks = (int)ceil(no_of_nodes/(double)MAX_THREADS_PER_BLOCK);
num_of_threads_per_block = MAX_THREADS_PER_BLOCK;
}
// allocate host memory
Node* h_graph_nodes = (Node*) malloc(sizeof(Node)*no_of_nodes);
//bool *h_graph_mask = (bool*) malloc(sizeof(bool)*no_of_nodes);
//bool *h_updating_graph_mask = (bool*) malloc(sizeof(bool)*no_of_nodes);
bool *h_graph_visited = (bool*) malloc(sizeof(bool)*no_of_nodes);
int start, edgeno;
// initalize the memory
for( unsigned int i = 0; i < no_of_nodes; i++)
{
fscanf(fp,"%d %d",&start,&edgeno);
h_graph_nodes[i].starting = start;
h_graph_nodes[i].no_of_edges = edgeno;
//h_graph_mask[i]=false;
//h_updating_graph_mask[i]=false;
//h_graph_visited[i]=false;
}
//read the source node from the file
fscanf(fp,"%d",&source);
source=0;
//set the source node as true in the mask
//h_graph_mask[source]=true;
//h_graph_visited[source]=true;
fscanf(fp,"%d",&edge_list_size);
int id,cost;
int* h_graph_edges = (int*) malloc(sizeof(int)*edge_list_size);
for(int i=0; i < edge_list_size ; i++)
{
fscanf(fp,"%d",&id);
fscanf(fp,"%d",&cost);
h_graph_edges[i] = id;
}
if(fp)
fclose(fp);
printf("Read File\n");
//Copy the Node list to device memory
Node* d_graph_nodes;
cudaMalloc( (void**) &d_graph_nodes, sizeof(Node)*no_of_nodes) ;
cudaMemcpy( d_graph_nodes, h_graph_nodes, sizeof(Node)*no_of_nodes, cudaMemcpyHostToDevice) ;
//Copy the Edge List to device Memory
int* d_graph_edges;
cudaMalloc( (void**) &d_graph_edges, sizeof(int)*edge_list_size) ;
cudaMemcpy( d_graph_edges, h_graph_edges, sizeof(int)*edge_list_size, cudaMemcpyHostToDevice) ;
//Copy the Mask to device memory
bool* d_graph_mask;
cudaMalloc( (void**) &d_graph_mask, sizeof(bool)*no_of_nodes) ;
if(cudaMemset(d_graph_mask, false, no_of_nodes*sizeof(bool)) != cudaSuccess){
printf("error in cudaMemset(d_graph_mask");
exit(1);
}
if(cudaMemset(d_graph_mask, true, sizeof(bool)) != cudaSuccess){
printf("error in cudaMemset(d_graph_mask");
exit(1);
}
bool* d_updating_graph_mask;
cudaMalloc( (void**) &d_updating_graph_mask, sizeof(bool)*no_of_nodes) ;
if (cudaMemset(d_updating_graph_mask, false, no_of_nodes*sizeof(bool))!= cudaSuccess){
printf("error in cudaMemset(d_graph_mask");
exit(1);
}
//Copy the Visited nodes array to device memory
bool* d_graph_visited;
cudaMalloc( (void**) &d_graph_visited, sizeof(bool)*no_of_nodes) ;
if (cudaMemset(d_graph_visited, false, no_of_nodes*sizeof(bool))!= cudaSuccess){
printf("error in cudaMemset(d_graph_mask");
exit(1);
}
if (cudaMemset(d_graph_visited, true, sizeof(bool))!= cudaSuccess){
printf("error in cudaMemset(d_graph_mask");
exit(1);
}
// allocate mem for the result on host side
int8_t* h_cost = (int8_t*) malloc( sizeof(int8_t)*no_of_nodes);
for(int i=0;i<no_of_nodes;i++)
h_cost[i]=-1;
h_cost[source]=0;
// allocate device memory for result
int8_t* d_cost;
cudaMalloc( (void**) &d_cost, sizeof(int8_t)*no_of_nodes);
cudaMemcpy( d_cost, h_cost, sizeof(int8_t)*no_of_nodes, cudaMemcpyHostToDevice) ;
//make a bool to check if the execution is over
bool *d_over;
cudaMalloc( (void**) &d_over, sizeof(bool));
printf("Copied Everything to GPU memory\n");
// setup execution parameters
dim3 grid( num_of_blocks, 1, 1);
dim3 threads( num_of_threads_per_block, 1, 1);
int k=0;
printf("Start traversing the tree\n");
bool stop;
//Call the Kernel untill all the elements of Frontier are not false
do
{
//if no thread changes this value then the loop stops
stop=false;
cudaMemcpy( d_over, &stop, sizeof(bool), cudaMemcpyHostToDevice) ;
Kernel<<< grid, threads, 0 >>>( d_graph_nodes, d_graph_edges, d_graph_mask, d_updating_graph_mask, d_graph_visited, d_cost, no_of_nodes);
// check if kernel execution generated and error
Kernel2<<< grid, threads, 0 >>>( d_graph_mask, d_updating_graph_mask, d_graph_visited, d_over, no_of_nodes);
// check if kernel execution generated and error
cudaMemcpy( &stop, d_over, sizeof(bool), cudaMemcpyDeviceToHost) ;
k++;
}
while(stop);
printf("Kernel Executed %d times\n",k);
// copy result from device to host
cudaMemcpy( h_cost, d_cost, sizeof(int8_t)*no_of_nodes, cudaMemcpyDeviceToHost) ;
//Store the result into a file
FILE *fpo = fopen("result.txt","w");
for(int i=0;i<no_of_nodes;i++)
fprintf(fpo,"%d) cost:%d\n",i,h_cost[i]);
fclose(fpo);
printf("Result stored in result.txt\n");
// cleanup memory
free( h_graph_nodes);
free( h_graph_edges);
//free( h_graph_mask);
//free( h_updating_graph_mask);
free( h_graph_visited);
free( h_cost);
cudaFree(d_graph_nodes);
cudaFree(d_graph_edges);
cudaFree(d_graph_mask);
cudaFree(d_updating_graph_mask);
cudaFree(d_graph_visited);
cudaFree(d_cost);
}
|
fc5ae009eec5b065d23407290df0136a911b672a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
hipError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size);
__global__ void addKernel(int *c, const int *a, const int *b)
{
int i = threadIdx.x;
c[i] = a[i] + b[i];
} | fc5ae009eec5b065d23407290df0136a911b672a.cu | #include "includes.h"
cudaError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size);
__global__ void addKernel(int *c, const int *a, const int *b)
{
int i = threadIdx.x;
c[i] = a[i] + b[i];
} |
6c4493ba8e9f659659595de6da3ec3e3cd4158ae.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <unistd.h>
#include <sys/time.h>
#include <stdint.h>
#define MAX 10
#define MIN 1
void lu_decomp(float *a, float *u,int dimension);
__global__ void DUKernel(float *D_a, float *D_u,unsigned int size);
uint64_t getTime();
int main(int argc, char **argv){
float *a, *u, *l;
int dimension;
dimension = atoi(argv[1]);
a= (float*)malloc(sizeof(float) * (dimension*dimension));
l= (float*)malloc(sizeof(float) * (dimension*dimension));
u= (float*)malloc(sizeof(float) * (dimension*dimension));
for(int i = 0; i<dimension; i++)
{
for(int j = 0; j<dimension; j++)
{
a[(i*dimension)+j] = rand() % (MAX - MIN) + MIN;
u[(i*dimension)+j] = a[(i*dimension)+j];
if(i == j)
{
l[(i*dimension)+j] = 1;
}
else
{
l[(i*dimension)+j] = 0;
}
}
}
for(int k = 0; k < dimension-1; k++)
{
for(int j=k+1; j < dimension; j++ )
{
l[(j*dimension)+k] = a[(j*dimension)+k]/a[(k*dimension)+k];
u[(j*dimension)+k]=0;
}
}
/*printf("U before\n");
for(int i = 0; i<dimension; i++)
{
for(int j = 0; j<dimension; j++)
{
printf("%15f",u[(i*dimension)+j]);
}
printf("\n");
}*/
lu_decomp(a,u,dimension);
/*
remove this comment for verification part
float temp =0;
float x=0;
float diff_allowed=10;
for(int i =0; i < dimension; i++)
{
for(int j=0; j < dimension; j++)
{
temp =0;
for(int k=0; k < dimension; k++)
{
temp = temp + l[(i*dimension)+k]* u[(k*dimension)+j];
temp=a[(i*dimension)+j];
}
//printf("%15f",temp);
if((abs(temp-a[(i*dimension)+j])>diff_allowed))
{
x=abs(temp-a[(i*dimension)+j]);
printf("problem");
printf("diff: %5f\n",x);
}
}
//printf("\n");
}
remove this comment for verification
*/
//printf("\n");
//printf("U Matrix:\n");
/*
for(int i = 0; i<dimension; i++)
{
for(int j = 0; j<dimension; j++)
{
printf("%15f",u[(i*dimension)+j]);
}
printf("\n");
}
for(int i = 0; i<dimension; i++)
{
for(int j = 0; j<dimension; j++)
{
printf("%15f",l[(i*dimension)+j]);
}
printf("\n");
}
printf("\n");
printf("Original Matrix:\n");
for(int i = 0; i<dimension; i++)
{
for(int j = 0; j<dimension; j++)
{
printf("%15f",a[(i*dimension)+j]);
}
printf("\n");
}*/
return 0;
}
void lu_decomp(float *a,float *u, int dimension)
{
float *d_a ,*d_u;
uint64_t astart, aend;
astart = getTime();
hipMalloc(&d_a, (dimension*dimension)*sizeof(float));
hipMalloc(&d_u, (dimension*dimension)*sizeof(float));
//Copying data to device from host
hipMemcpy(d_a, a, sizeof(float)*dimension*(dimension),hipMemcpyHostToDevice);
hipMemcpy(d_u, u, sizeof(float)*dimension*(dimension),hipMemcpyHostToDevice);
//Kernel call
if(dimension<1001)
hipLaunchKernelGGL(( DUKernel), dim3(dimension) ,dim3(dimension),4*dimension*dimension, 0, d_a, d_u ,dimension);
else
hipLaunchKernelGGL(( DUKernel), dim3((dimension*dimension/1000)),dim3(1000),4*dimension*dimension, 0, d_a, d_u ,dimension);
//DUKernel<<<1024 ,100,4*dimension*dimension>>>(d_a,d_u, dimension);
//Coping data to host from device
hipMemcpy(a,d_a,sizeof(float)*dimension*(dimension),hipMemcpyDeviceToHost);
//hipMemcpy(l,d_l,sizeof(float)*dimension*(dimension),hipMemcpyDeviceToHost);
hipMemcpy(u,d_u,sizeof(float)*dimension*(dimension),hipMemcpyDeviceToHost);
//Deallocating memory on the device
hipFree(d_a);
hipFree(d_u);
aend = getTime();
printf("%d ,%f \n",dimension,(aend-astart)/1000000.0);
}
__global__ void DUKernel(float *D_a,float *D_u, unsigned int dimension)
{
// 10x10 size matrix is for experiment, so argv[1]=10
extern __shared__ float temp[];
int k=threadIdx.x;
int j=blockIdx.x;
int p= threadIdx.x+(blockIdx.x*blockDim.x);
temp[p]=D_u[p];
__syncthreads();
int i=0;
int s=0;
while(i<threadIdx.x && s< blockIdx.x)
{
temp[p]=temp[p]-(temp[(s*dimension)+(k*(j/1000))+k] * ((temp[(j*dimension)+(i*(j/1000))+i])/temp[(j*dimension)+(j*(j/1000))+j]));
i++;
s++;
}
/* printf("outside1 Temp:%10f k:%d j:%d\n",temp[(k*dimension)+j],k,j);
float p=temp[(j*dimension)+k]/temp[(k*dimension)+k];
for(int i=(k+1);i<dimension;i++)
{
//printf("inside loop%d\n",i);
//printf("before Temp:%10f,j:%d i:%d\n",temp[(j*dimension)+i]);
temp[(j*dimension)+i]=temp[(j*dimension)+i]-(temp[(k*dimension)+i]*p);
//printf("after:Temp:%10f\n",temp[j*dimension+i]);
//printf("after j:%d i:%d",j,i);
}*/
__syncthreads();
D_u[p]=temp[p];
}
uint64_t getTime(){
struct timeval t;
gettimeofday(&t, NULL);
return (uint64_t)(t.tv_sec)*1000000 + (uint64_t)(t.tv_usec);
} | 6c4493ba8e9f659659595de6da3ec3e3cd4158ae.cu | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <unistd.h>
#include <sys/time.h>
#include <stdint.h>
#define MAX 10
#define MIN 1
void lu_decomp(float *a, float *u,int dimension);
__global__ void DUKernel(float *D_a, float *D_u,unsigned int size);
uint64_t getTime();
int main(int argc, char **argv){
float *a, *u, *l;
int dimension;
dimension = atoi(argv[1]);
a= (float*)malloc(sizeof(float) * (dimension*dimension));
l= (float*)malloc(sizeof(float) * (dimension*dimension));
u= (float*)malloc(sizeof(float) * (dimension*dimension));
for(int i = 0; i<dimension; i++)
{
for(int j = 0; j<dimension; j++)
{
a[(i*dimension)+j] = rand() % (MAX - MIN) + MIN;
u[(i*dimension)+j] = a[(i*dimension)+j];
if(i == j)
{
l[(i*dimension)+j] = 1;
}
else
{
l[(i*dimension)+j] = 0;
}
}
}
for(int k = 0; k < dimension-1; k++)
{
for(int j=k+1; j < dimension; j++ )
{
l[(j*dimension)+k] = a[(j*dimension)+k]/a[(k*dimension)+k];
u[(j*dimension)+k]=0;
}
}
/*printf("U before\n");
for(int i = 0; i<dimension; i++)
{
for(int j = 0; j<dimension; j++)
{
printf("%15f",u[(i*dimension)+j]);
}
printf("\n");
}*/
lu_decomp(a,u,dimension);
/*
remove this comment for verification part
float temp =0;
float x=0;
float diff_allowed=10;
for(int i =0; i < dimension; i++)
{
for(int j=0; j < dimension; j++)
{
temp =0;
for(int k=0; k < dimension; k++)
{
temp = temp + l[(i*dimension)+k]* u[(k*dimension)+j];
temp=a[(i*dimension)+j];
}
//printf("%15f",temp);
if((abs(temp-a[(i*dimension)+j])>diff_allowed))
{
x=abs(temp-a[(i*dimension)+j]);
printf("problem");
printf("diff: %5f\n",x);
}
}
//printf("\n");
}
remove this comment for verification
*/
//printf("\n");
//printf("U Matrix:\n");
/*
for(int i = 0; i<dimension; i++)
{
for(int j = 0; j<dimension; j++)
{
printf("%15f",u[(i*dimension)+j]);
}
printf("\n");
}
for(int i = 0; i<dimension; i++)
{
for(int j = 0; j<dimension; j++)
{
printf("%15f",l[(i*dimension)+j]);
}
printf("\n");
}
printf("\n");
printf("Original Matrix:\n");
for(int i = 0; i<dimension; i++)
{
for(int j = 0; j<dimension; j++)
{
printf("%15f",a[(i*dimension)+j]);
}
printf("\n");
}*/
return 0;
}
void lu_decomp(float *a,float *u, int dimension)
{
float *d_a ,*d_u;
uint64_t astart, aend;
astart = getTime();
cudaMalloc(&d_a, (dimension*dimension)*sizeof(float));
cudaMalloc(&d_u, (dimension*dimension)*sizeof(float));
//Copying data to device from host
cudaMemcpy(d_a, a, sizeof(float)*dimension*(dimension),cudaMemcpyHostToDevice);
cudaMemcpy(d_u, u, sizeof(float)*dimension*(dimension),cudaMemcpyHostToDevice);
//Kernel call
if(dimension<1001)
DUKernel<<<dimension ,dimension,4*dimension*dimension>>>(d_a, d_u ,dimension);
else
DUKernel<<<(dimension*dimension/1000),1000,4*dimension*dimension>>>(d_a, d_u ,dimension);
//DUKernel<<<1024 ,100,4*dimension*dimension>>>(d_a,d_u, dimension);
//Coping data to host from device
cudaMemcpy(a,d_a,sizeof(float)*dimension*(dimension),cudaMemcpyDeviceToHost);
//cudaMemcpy(l,d_l,sizeof(float)*dimension*(dimension),cudaMemcpyDeviceToHost);
cudaMemcpy(u,d_u,sizeof(float)*dimension*(dimension),cudaMemcpyDeviceToHost);
//Deallocating memory on the device
cudaFree(d_a);
cudaFree(d_u);
aend = getTime();
printf("%d ,%f \n",dimension,(aend-astart)/1000000.0);
}
__global__ void DUKernel(float *D_a,float *D_u, unsigned int dimension)
{
// 10x10 size matrix is for experiment, so argv[1]=10
extern __shared__ float temp[];
int k=threadIdx.x;
int j=blockIdx.x;
int p= threadIdx.x+(blockIdx.x*blockDim.x);
temp[p]=D_u[p];
__syncthreads();
int i=0;
int s=0;
while(i<threadIdx.x && s< blockIdx.x)
{
temp[p]=temp[p]-(temp[(s*dimension)+(k*(j/1000))+k] * ((temp[(j*dimension)+(i*(j/1000))+i])/temp[(j*dimension)+(j*(j/1000))+j]));
i++;
s++;
}
/* printf("outside1 Temp:%10f k:%d j:%d\n",temp[(k*dimension)+j],k,j);
float p=temp[(j*dimension)+k]/temp[(k*dimension)+k];
for(int i=(k+1);i<dimension;i++)
{
//printf("inside loop%d\n",i);
//printf("before Temp:%10f,j:%d i:%d\n",temp[(j*dimension)+i]);
temp[(j*dimension)+i]=temp[(j*dimension)+i]-(temp[(k*dimension)+i]*p);
//printf("after:Temp:%10f\n",temp[j*dimension+i]);
//printf("after j:%d i:%d",j,i);
}*/
__syncthreads();
D_u[p]=temp[p];
}
uint64_t getTime(){
struct timeval t;
gettimeofday(&t, NULL);
return (uint64_t)(t.tv_sec)*1000000 + (uint64_t)(t.tv_usec);
} |
b63c45256dce5239987ed90248f0acca481098ce.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/**********************************
When updating a kernel or adding a new one,
please compile the ptx file and commit it:
nvcc -ptx -arch=sm_30 SystemML.cu
***********************************/
/**
* Performs a slice operation where the input matrix is sparse and the output matrix is dense.
* This function avoids unnecessary sparse to dense conversion of the input matrix.
* Parallelization: rows of output matrix.
*
* @params inVal input val pointer
* @params inRowPtr input row pointer
* @params colInd input col index pointer
* @params ret dense output pointer
* @param rl row lower
* @param ru row upper
* @param cl column lower
* @param cu column upper
* @param retClen number of columns of output matrix
*/
extern "C"
/**
* Performs a slice operation where the input matrix is sparse and the output matrix is dense.
* This function avoids unnecessary sparse to dense conversion of the input matrix.
* Parallelization: subset of number of non-zeroes of input matrix.
*
* @params inVal input val pointer
* @params inRowPtr input row pointer
* @params colInd input col index pointer
* @params ret dense output pointer
* @param rl row lower
* @param ru row upper
* @param cl column lower
* @param cu column upper
* @param retClen number of columns of output matrix
*/
extern "C"
/**
* Performs a slice operation where the input matrix is dense and the output matrix is dense.
*
* @params in dense input pointer
* @params ret dense output pointer
* @param rl row lower
* @param ru row upper
* @param cl column lower
* @param cu column upper
* @param inClen number of columns of input matrix
* @param retRlen number of rows of output matrix
* @param retClen number of columns of output matrix
*/
extern "C"
/**
* Does a copy of upper to lower triangle of the given matrix
* @param ret the input and output array allocated on the GPU
* @param dim the number of rows of the square matrix ret
* @param N total number of elements of the matrix
*/
extern "C"
extern "C"
__global__ void matrix_atan(double *A, double *C, unsigned int size) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < size){
C[index] = atan(A[index]);
}
} | b63c45256dce5239987ed90248f0acca481098ce.cu | #include "includes.h"
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/**********************************
When updating a kernel or adding a new one,
please compile the ptx file and commit it:
nvcc -ptx -arch=sm_30 SystemML.cu
***********************************/
/**
* Performs a slice operation where the input matrix is sparse and the output matrix is dense.
* This function avoids unnecessary sparse to dense conversion of the input matrix.
* Parallelization: rows of output matrix.
*
* @params inVal input val pointer
* @params inRowPtr input row pointer
* @params colInd input col index pointer
* @params ret dense output pointer
* @param rl row lower
* @param ru row upper
* @param cl column lower
* @param cu column upper
* @param retClen number of columns of output matrix
*/
extern "C"
/**
* Performs a slice operation where the input matrix is sparse and the output matrix is dense.
* This function avoids unnecessary sparse to dense conversion of the input matrix.
* Parallelization: subset of number of non-zeroes of input matrix.
*
* @params inVal input val pointer
* @params inRowPtr input row pointer
* @params colInd input col index pointer
* @params ret dense output pointer
* @param rl row lower
* @param ru row upper
* @param cl column lower
* @param cu column upper
* @param retClen number of columns of output matrix
*/
extern "C"
/**
* Performs a slice operation where the input matrix is dense and the output matrix is dense.
*
* @params in dense input pointer
* @params ret dense output pointer
* @param rl row lower
* @param ru row upper
* @param cl column lower
* @param cu column upper
* @param inClen number of columns of input matrix
* @param retRlen number of rows of output matrix
* @param retClen number of columns of output matrix
*/
extern "C"
/**
* Does a copy of upper to lower triangle of the given matrix
* @param ret the input and output array allocated on the GPU
* @param dim the number of rows of the square matrix ret
* @param N total number of elements of the matrix
*/
extern "C"
extern "C"
__global__ void matrix_atan(double *A, double *C, unsigned int size) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < size){
C[index] = atan(A[index]);
}
} |
7e5cb9f96a7c5cacec4d8de730116cfb6a1a719a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <stdlib.h>
#include <stdio.h>
#include <time.h>
#include <math.h>
#include <rocblas.h>
#include "gpu_pc_v2_func.h"
void tree_gen(int** tree, int field, int rule){
for(int i = 0; i < field; i++){
tree[i][0] = rand() % 100;
int temp[rule];
temp[0] = tree[i][0];
for (int j = 1; j < rule; j++){
temp[j] = temp[j-1] + rand() % 20 + 1;
}
int temp_index = rule-1, tree_index = rule -1, level = log(rule+1) / log(2);
int step_index = level;
while (step_index >= 1){
int step = pow(2, (level - step_index + 1));
while (temp_index >= 0){
tree[i][tree_index] = temp[temp_index];
temp_index -= step;
tree_index--;
}
step_index--;
temp_index = rule - 1 - (pow(2, level - step_index) - 1);
}
}
cout<<"... Tree Gen ..."<<endl;
}
void header_gen(int** headers, int** tree, int field, int packet_num){
for (int i = 0; i < field; i++){
for(int j = 0; j < packet_num; j++){
headers[i][j] = rand() % 6000;
}
}
cout<<"... Header Gen ..."<<endl;
}
void bv_gen(long int** bv, long int* bv_final, int packet_num){
for (int i = 0; i < int_count; i++){
for (int j = 0; j < FIELD*(RULE+1); j++){
bv[j][i] = rand() % 1000000;
}
}
for(int i = 0; i < packet_num; i++){
bv_final[i] = -1;
}
cout<<"... BV Gen ..."<<endl;
}
void bv_gen_short(int* bv, int* bv_final, int packet_num){
for (int i = 0; i < FIELD*(RULE + 1)*int_count; i++){
bv[i] = rand() % 5;
}
for(int i = 0; i < packet_num; i++){
bv_final[i] = 1;
}
cout<<"... BV_Short Gen ..."<<endl;
}
void data_test(int** tree, int** headers, long int** bv, int* bv_final, int packet_num, int type){
if (type > 15 | type == 0){
return;
}
if (type % 2 == 1){
cout<<"Tree: "<<endl;
for(int i = 0; i < RULE; i++){
cout<<"Line: "<<i<<": ";
for(int j = 0; j < FIELD; j++){
cout<<tree[j][i]<<" ";
}
cout<<endl;
}
}
if (type % 4 == 2 | type % 4 == 3){
cout<<endl<<"Headers: "<<endl;
for(int i = 0; i < packet_num; i++){
cout<<"Header "<<i<<": ";
for(int j = 0; j < FIELD; j++){
cout<<headers[j][i]<<" ";
}
cout<<endl;
}
}
if (type % 8 == 4 | type % 8 == 5 | type % 8 == 6 | type % 8 == 7){
cout<<endl<<"bv: "<<endl;
for(int i = 0; i < ALLRULE; i++){
cout<<"Line "<<i<<": ";
for (int j = 0; j < FIELD*(RULE+1); j++){
cout<<bv[j][i]<<" ";
}
cout<<endl;
}
}
if (type > 7){
cout<<endl<<"bv_final: "<<endl;
for(int i = 0; i < packet_num; i++){
cout<<bv_final[i]<<" ";
}
cout<<endl;
}
cout<<"============== End of Print =============="<<endl;
}
__global__ void packet_classify(int* gpu_tree, int* gpu_headers, int* gpu_match_result, int packet_num){
__shared__ int gpu_tree_shared[FIELD*RULE];
int level = 0;
while(level * block_dim + threadIdx.x < FIELD * RULE){
gpu_tree_shared[level * block_dim + threadIdx.x] = gpu_tree[level * block_dim + threadIdx.x];
level++;
}
__syncthreads();
int i = 0;
while (i < RULE){
i = 2 * i + (gpu_headers[blockDim.x * blockIdx.x + threadIdx.x] <= gpu_tree_shared[(blockDim.x * blockIdx.x + threadIdx.x) / packet_num * RULE+i]) * 1 + (gpu_headers[blockDim.x * blockIdx.x + threadIdx.x] > gpu_tree_shared[(blockDim.x * blockIdx.x + threadIdx.x) / packet_num * RULE+i]) * 2;
}
gpu_match_result[blockDim.x * blockIdx.x + threadIdx.x] = i - RULE;
}
__global__ void pc_short(int* gpu_tree, int* gpu_headers, int* gpu_bv, int* gpu_bv_final, int packet_num){
__shared__ int gpu_tree_shared[FIELD*RULE];
__shared__ int gpu_bv_shared[FIELD*(RULE+1)*int_count];
if (threadIdx.x < FIELD * RULE){
gpu_tree_shared[threadIdx.x] = gpu_tree[threadIdx.x];
}
if (threadIdx.x >= FIELD * RULE && threadIdx.x <= FIELD * (RULE + 1) * int_count){
gpu_bv_shared[threadIdx.x - FIELD * RULE] = gpu_bv[threadIdx.x - FIELD * RULE];
}
__syncthreads();
int index = blockDim.x*blockIdx.x + threadIdx.x;
__shared__ int partial_result;
partial_result = 0xffffffff;
for (int j = 0; j < FIELD; j++){
int i = 0;
while (i < RULE){
i = 2 * i + (gpu_headers[index * FIELD + j] <= gpu_tree_shared[index % FIELD * RULE + i]) * 1 + (gpu_headers[index * FIELD + j] > gpu_tree_shared[index % FIELD * RULE + i]) * 2;
}
partial_result &= gpu_bv_shared[i - RULE];
}
gpu_bv_final[ index ] = partial_result;
}
__global__ void packet_merge(long int* gpu_bv, int* gpu_match_result, long int* gpu_merge_result, long int*gpu_bv_final, int packet_num){
int index = blockDim.x * blockIdx.x + threadIdx.x;
int packetIdx = index/int_count;
gpu_merge_result[index] = gpu_bv[gpu_match_result[packetIdx*15]*int_count + index%int_count] &
gpu_bv[gpu_match_result[packetIdx*15+1]*int_count + index%int_count] &
gpu_bv[gpu_match_result[packetIdx*15+2]*int_count + index%int_count] &
gpu_bv[gpu_match_result[packetIdx*15+3]*int_count + index%int_count] &
gpu_bv[gpu_match_result[packetIdx*15+4]*int_count + index%int_count] &
gpu_bv[gpu_match_result[packetIdx*15+5]*int_count + index%int_count] &
gpu_bv[gpu_match_result[packetIdx*15+6]*int_count + index%int_count] &
gpu_bv[gpu_match_result[packetIdx*15+7]*int_count + index%int_count] &
gpu_bv[gpu_match_result[packetIdx*15+8]*int_count + index%int_count] &
gpu_bv[gpu_match_result[packetIdx*15+9]*int_count + index%int_count] &
gpu_bv[gpu_match_result[packetIdx*15+10]*int_count + index%int_count] &
gpu_bv[gpu_match_result[packetIdx*15+11]*int_count + index%int_count] &
gpu_bv[gpu_match_result[packetIdx*15+12]*int_count + index%int_count] &
gpu_bv[gpu_match_result[packetIdx*15+13]*int_count + index%int_count] &
gpu_bv[gpu_match_result[packetIdx*15+14]*int_count + index%int_count];
__syncthreads();
if (blockDim.x * blockIdx.x + threadIdx.x < packet_num){
gpu_bv_final[blockDim.x*blockIdx.x+threadIdx.x] = gpu_merge_result[(blockDim.x*blockIdx.x+threadIdx.x)*int_count] &
gpu_merge_result[(blockDim.x*blockIdx.x+threadIdx.x)*int_count+1] &
gpu_merge_result[(blockDim.x*blockIdx.x+threadIdx.x)%int_count+2] &
gpu_merge_result[(blockDim.x*blockIdx.x+threadIdx.x)%int_count+3] &
gpu_merge_result[(blockDim.x*blockIdx.x+threadIdx.x)%int_count+4] &
gpu_merge_result[(blockDim.x*blockIdx.x+threadIdx.x)%int_count+5] &
gpu_merge_result[(blockDim.x*blockIdx.x+threadIdx.x)%int_count+6] &
gpu_merge_result[(blockDim.x*blockIdx.x+threadIdx.x)%int_count+7] &
gpu_merge_result[(blockDim.x*blockIdx.x+threadIdx.x)%int_count+8] &
gpu_merge_result[(blockDim.x*blockIdx.x+threadIdx.x)*int_count+9] &
gpu_merge_result[(blockDim.x*blockIdx.x+threadIdx.x)%int_count+10] &
gpu_merge_result[(blockDim.x*blockIdx.x+threadIdx.x)%int_count+11] &
gpu_merge_result[(blockDim.x*blockIdx.x+threadIdx.x)%int_count+12] &
gpu_merge_result[(blockDim.x*blockIdx.x+threadIdx.x)%int_count+13] &
gpu_merge_result[(blockDim.x*blockIdx.x+threadIdx.x)%int_count+14] &
gpu_merge_result[(blockDim.x*blockIdx.x+threadIdx.x)%int_count+15] &
gpu_merge_result[(blockDim.x*blockIdx.x+threadIdx.x)%int_count+16] &
gpu_merge_result[(blockDim.x*blockIdx.x+threadIdx.x)*int_count+17] &
gpu_merge_result[(blockDim.x*blockIdx.x+threadIdx.x)%int_count+18] &
gpu_merge_result[(blockDim.x*blockIdx.x+threadIdx.x)%int_count+19] &
gpu_merge_result[(blockDim.x*blockIdx.x+threadIdx.x)%int_count+20] &
gpu_merge_result[(blockDim.x*blockIdx.x+threadIdx.x)%int_count+21] &
gpu_merge_result[(blockDim.x*blockIdx.x+threadIdx.x)%int_count+22] &
gpu_merge_result[(blockDim.x*blockIdx.x+threadIdx.x)%int_count+23] &
gpu_merge_result[(blockDim.x*blockIdx.x+threadIdx.x)%int_count+24] &
gpu_merge_result[(blockDim.x*blockIdx.x+threadIdx.x)*int_count+25] &
gpu_merge_result[(blockDim.x*blockIdx.x+threadIdx.x)%int_count+26] &
gpu_merge_result[(blockDim.x*blockIdx.x+threadIdx.x)%int_count+27] &
gpu_merge_result[(blockDim.x*blockIdx.x+threadIdx.x)%int_count+28] &
gpu_merge_result[(blockDim.x*blockIdx.x+threadIdx.x)%int_count+29] &
gpu_merge_result[(blockDim.x*blockIdx.x+threadIdx.x)%int_count+30] &
gpu_merge_result[(blockDim.x*blockIdx.x+threadIdx.x)%int_count+31];
}
}
void merge(void* foo){
pthread_param_C* param = (pthread_param_C*) foo;
for (int i = 0; i < param->BATCH; i++){
//cout<<"[ Merge ] Thread: "<<param->thread_id<<", header # "<<i<<endl;
for (int j = 0; j < int_count; j++){
/*long int merge_partial = 0xffffffffffffffff;
for (int k = 0; k < FIELD; k++){
merge_partial &= param->merge_source[param->match_result[(param->thread_id * param->BATCH + i) * FIELD + k]][j];
if (merge_partial == 0){
break;
}
}
if (merge_partial != 0){
param->merge_result[(param->thread_id * param->BATCH + i) * int_count + j] = merge_partial;
break;
}*/
param->merge_result[(param->thread_id * param->BATCH + i) * int_count + j] = param->merge_source[param->match_result[(param->thread_id * param->BATCH + i) * FIELD + 0]][j] &
param->merge_source[param->match_result[(param->thread_id * param->BATCH + i) * FIELD + 1]][j] &
param->merge_source[param->match_result[(param->thread_id * param->BATCH + i) * FIELD + 2]][j] &
param->merge_source[param->match_result[(param->thread_id * param->BATCH + i) * FIELD + 3]][j] &
param->merge_source[param->match_result[(param->thread_id * param->BATCH + i) * FIELD + 4]][j] &
param->merge_source[param->match_result[(param->thread_id * param->BATCH + i) * FIELD + 5]][j];/*
param->merge_source[param->match_result[(param->thread_id * param->BATCH + i) * FIELD + 6]][j] &
param->merge_source[param->match_result[(param->thread_id * param->BATCH + i) * FIELD + 7]][j] &
param->merge_source[param->match_result[(param->thread_id * param->BATCH + i) * FIELD + 8]][j] &
param->merge_source[param->match_result[(param->thread_id * param->BATCH + i) * FIELD + 9]][j] &
param->merge_source[param->match_result[(param->thread_id * param->BATCH + i) * FIELD + 10]][j] &
param->merge_source[param->match_result[(param->thread_id * param->BATCH + i) * FIELD + 11]][j] &
param->merge_source[param->match_result[(param->thread_id * param->BATCH + i) * FIELD + 12]][j] &
param->merge_source[param->match_result[(param->thread_id * param->BATCH + i) * FIELD + 13]][j] &
param->merge_source[param->match_result[(param->thread_id * param->BATCH + i) * FIELD + 14]][j];*/
}
}
//cout<<"Thread "<<param->thread_id<<" finish!"<<endl;
}
void partial_merge(void* foo){
pthread_param_P* param = (pthread_param_P*) foo;
}
void final_merge(void* foo){
pthread_param_F* param = (pthread_param_F*) foo;
}
| 7e5cb9f96a7c5cacec4d8de730116cfb6a1a719a.cu | #include <iostream>
#include <stdlib.h>
#include <stdio.h>
#include <time.h>
#include <math.h>
#include <cublas.h>
#include "gpu_pc_v2_func.h"
void tree_gen(int** tree, int field, int rule){
for(int i = 0; i < field; i++){
tree[i][0] = rand() % 100;
int temp[rule];
temp[0] = tree[i][0];
for (int j = 1; j < rule; j++){
temp[j] = temp[j-1] + rand() % 20 + 1;
}
int temp_index = rule-1, tree_index = rule -1, level = log(rule+1) / log(2);
int step_index = level;
while (step_index >= 1){
int step = pow(2, (level - step_index + 1));
while (temp_index >= 0){
tree[i][tree_index] = temp[temp_index];
temp_index -= step;
tree_index--;
}
step_index--;
temp_index = rule - 1 - (pow(2, level - step_index) - 1);
}
}
cout<<"... Tree Gen ..."<<endl;
}
void header_gen(int** headers, int** tree, int field, int packet_num){
for (int i = 0; i < field; i++){
for(int j = 0; j < packet_num; j++){
headers[i][j] = rand() % 6000;
}
}
cout<<"... Header Gen ..."<<endl;
}
void bv_gen(long int** bv, long int* bv_final, int packet_num){
for (int i = 0; i < int_count; i++){
for (int j = 0; j < FIELD*(RULE+1); j++){
bv[j][i] = rand() % 1000000;
}
}
for(int i = 0; i < packet_num; i++){
bv_final[i] = -1;
}
cout<<"... BV Gen ..."<<endl;
}
void bv_gen_short(int* bv, int* bv_final, int packet_num){
for (int i = 0; i < FIELD*(RULE + 1)*int_count; i++){
bv[i] = rand() % 5;
}
for(int i = 0; i < packet_num; i++){
bv_final[i] = 1;
}
cout<<"... BV_Short Gen ..."<<endl;
}
void data_test(int** tree, int** headers, long int** bv, int* bv_final, int packet_num, int type){
if (type > 15 | type == 0){
return;
}
if (type % 2 == 1){
cout<<"Tree: "<<endl;
for(int i = 0; i < RULE; i++){
cout<<"Line: "<<i<<": ";
for(int j = 0; j < FIELD; j++){
cout<<tree[j][i]<<" ";
}
cout<<endl;
}
}
if (type % 4 == 2 | type % 4 == 3){
cout<<endl<<"Headers: "<<endl;
for(int i = 0; i < packet_num; i++){
cout<<"Header "<<i<<": ";
for(int j = 0; j < FIELD; j++){
cout<<headers[j][i]<<" ";
}
cout<<endl;
}
}
if (type % 8 == 4 | type % 8 == 5 | type % 8 == 6 | type % 8 == 7){
cout<<endl<<"bv: "<<endl;
for(int i = 0; i < ALLRULE; i++){
cout<<"Line "<<i<<": ";
for (int j = 0; j < FIELD*(RULE+1); j++){
cout<<bv[j][i]<<" ";
}
cout<<endl;
}
}
if (type > 7){
cout<<endl<<"bv_final: "<<endl;
for(int i = 0; i < packet_num; i++){
cout<<bv_final[i]<<" ";
}
cout<<endl;
}
cout<<"============== End of Print =============="<<endl;
}
__global__ void packet_classify(int* gpu_tree, int* gpu_headers, int* gpu_match_result, int packet_num){
__shared__ int gpu_tree_shared[FIELD*RULE];
int level = 0;
while(level * block_dim + threadIdx.x < FIELD * RULE){
gpu_tree_shared[level * block_dim + threadIdx.x] = gpu_tree[level * block_dim + threadIdx.x];
level++;
}
__syncthreads();
int i = 0;
while (i < RULE){
i = 2 * i + (gpu_headers[blockDim.x * blockIdx.x + threadIdx.x] <= gpu_tree_shared[(blockDim.x * blockIdx.x + threadIdx.x) / packet_num * RULE+i]) * 1 + (gpu_headers[blockDim.x * blockIdx.x + threadIdx.x] > gpu_tree_shared[(blockDim.x * blockIdx.x + threadIdx.x) / packet_num * RULE+i]) * 2;
}
gpu_match_result[blockDim.x * blockIdx.x + threadIdx.x] = i - RULE;
}
__global__ void pc_short(int* gpu_tree, int* gpu_headers, int* gpu_bv, int* gpu_bv_final, int packet_num){
__shared__ int gpu_tree_shared[FIELD*RULE];
__shared__ int gpu_bv_shared[FIELD*(RULE+1)*int_count];
if (threadIdx.x < FIELD * RULE){
gpu_tree_shared[threadIdx.x] = gpu_tree[threadIdx.x];
}
if (threadIdx.x >= FIELD * RULE && threadIdx.x <= FIELD * (RULE + 1) * int_count){
gpu_bv_shared[threadIdx.x - FIELD * RULE] = gpu_bv[threadIdx.x - FIELD * RULE];
}
__syncthreads();
int index = blockDim.x*blockIdx.x + threadIdx.x;
__shared__ int partial_result;
partial_result = 0xffffffff;
for (int j = 0; j < FIELD; j++){
int i = 0;
while (i < RULE){
i = 2 * i + (gpu_headers[index * FIELD + j] <= gpu_tree_shared[index % FIELD * RULE + i]) * 1 + (gpu_headers[index * FIELD + j] > gpu_tree_shared[index % FIELD * RULE + i]) * 2;
}
partial_result &= gpu_bv_shared[i - RULE];
}
gpu_bv_final[ index ] = partial_result;
}
__global__ void packet_merge(long int* gpu_bv, int* gpu_match_result, long int* gpu_merge_result, long int*gpu_bv_final, int packet_num){
int index = blockDim.x * blockIdx.x + threadIdx.x;
int packetIdx = index/int_count;
gpu_merge_result[index] = gpu_bv[gpu_match_result[packetIdx*15]*int_count + index%int_count] &
gpu_bv[gpu_match_result[packetIdx*15+1]*int_count + index%int_count] &
gpu_bv[gpu_match_result[packetIdx*15+2]*int_count + index%int_count] &
gpu_bv[gpu_match_result[packetIdx*15+3]*int_count + index%int_count] &
gpu_bv[gpu_match_result[packetIdx*15+4]*int_count + index%int_count] &
gpu_bv[gpu_match_result[packetIdx*15+5]*int_count + index%int_count] &
gpu_bv[gpu_match_result[packetIdx*15+6]*int_count + index%int_count] &
gpu_bv[gpu_match_result[packetIdx*15+7]*int_count + index%int_count] &
gpu_bv[gpu_match_result[packetIdx*15+8]*int_count + index%int_count] &
gpu_bv[gpu_match_result[packetIdx*15+9]*int_count + index%int_count] &
gpu_bv[gpu_match_result[packetIdx*15+10]*int_count + index%int_count] &
gpu_bv[gpu_match_result[packetIdx*15+11]*int_count + index%int_count] &
gpu_bv[gpu_match_result[packetIdx*15+12]*int_count + index%int_count] &
gpu_bv[gpu_match_result[packetIdx*15+13]*int_count + index%int_count] &
gpu_bv[gpu_match_result[packetIdx*15+14]*int_count + index%int_count];
__syncthreads();
if (blockDim.x * blockIdx.x + threadIdx.x < packet_num){
gpu_bv_final[blockDim.x*blockIdx.x+threadIdx.x] = gpu_merge_result[(blockDim.x*blockIdx.x+threadIdx.x)*int_count] &
gpu_merge_result[(blockDim.x*blockIdx.x+threadIdx.x)*int_count+1] &
gpu_merge_result[(blockDim.x*blockIdx.x+threadIdx.x)%int_count+2] &
gpu_merge_result[(blockDim.x*blockIdx.x+threadIdx.x)%int_count+3] &
gpu_merge_result[(blockDim.x*blockIdx.x+threadIdx.x)%int_count+4] &
gpu_merge_result[(blockDim.x*blockIdx.x+threadIdx.x)%int_count+5] &
gpu_merge_result[(blockDim.x*blockIdx.x+threadIdx.x)%int_count+6] &
gpu_merge_result[(blockDim.x*blockIdx.x+threadIdx.x)%int_count+7] &
gpu_merge_result[(blockDim.x*blockIdx.x+threadIdx.x)%int_count+8] &
gpu_merge_result[(blockDim.x*blockIdx.x+threadIdx.x)*int_count+9] &
gpu_merge_result[(blockDim.x*blockIdx.x+threadIdx.x)%int_count+10] &
gpu_merge_result[(blockDim.x*blockIdx.x+threadIdx.x)%int_count+11] &
gpu_merge_result[(blockDim.x*blockIdx.x+threadIdx.x)%int_count+12] &
gpu_merge_result[(blockDim.x*blockIdx.x+threadIdx.x)%int_count+13] &
gpu_merge_result[(blockDim.x*blockIdx.x+threadIdx.x)%int_count+14] &
gpu_merge_result[(blockDim.x*blockIdx.x+threadIdx.x)%int_count+15] &
gpu_merge_result[(blockDim.x*blockIdx.x+threadIdx.x)%int_count+16] &
gpu_merge_result[(blockDim.x*blockIdx.x+threadIdx.x)*int_count+17] &
gpu_merge_result[(blockDim.x*blockIdx.x+threadIdx.x)%int_count+18] &
gpu_merge_result[(blockDim.x*blockIdx.x+threadIdx.x)%int_count+19] &
gpu_merge_result[(blockDim.x*blockIdx.x+threadIdx.x)%int_count+20] &
gpu_merge_result[(blockDim.x*blockIdx.x+threadIdx.x)%int_count+21] &
gpu_merge_result[(blockDim.x*blockIdx.x+threadIdx.x)%int_count+22] &
gpu_merge_result[(blockDim.x*blockIdx.x+threadIdx.x)%int_count+23] &
gpu_merge_result[(blockDim.x*blockIdx.x+threadIdx.x)%int_count+24] &
gpu_merge_result[(blockDim.x*blockIdx.x+threadIdx.x)*int_count+25] &
gpu_merge_result[(blockDim.x*blockIdx.x+threadIdx.x)%int_count+26] &
gpu_merge_result[(blockDim.x*blockIdx.x+threadIdx.x)%int_count+27] &
gpu_merge_result[(blockDim.x*blockIdx.x+threadIdx.x)%int_count+28] &
gpu_merge_result[(blockDim.x*blockIdx.x+threadIdx.x)%int_count+29] &
gpu_merge_result[(blockDim.x*blockIdx.x+threadIdx.x)%int_count+30] &
gpu_merge_result[(blockDim.x*blockIdx.x+threadIdx.x)%int_count+31];
}
}
void merge(void* foo){
pthread_param_C* param = (pthread_param_C*) foo;
for (int i = 0; i < param->BATCH; i++){
//cout<<"[ Merge ] Thread: "<<param->thread_id<<", header # "<<i<<endl;
for (int j = 0; j < int_count; j++){
/*long int merge_partial = 0xffffffffffffffff;
for (int k = 0; k < FIELD; k++){
merge_partial &= param->merge_source[param->match_result[(param->thread_id * param->BATCH + i) * FIELD + k]][j];
if (merge_partial == 0){
break;
}
}
if (merge_partial != 0){
param->merge_result[(param->thread_id * param->BATCH + i) * int_count + j] = merge_partial;
break;
}*/
param->merge_result[(param->thread_id * param->BATCH + i) * int_count + j] = param->merge_source[param->match_result[(param->thread_id * param->BATCH + i) * FIELD + 0]][j] &
param->merge_source[param->match_result[(param->thread_id * param->BATCH + i) * FIELD + 1]][j] &
param->merge_source[param->match_result[(param->thread_id * param->BATCH + i) * FIELD + 2]][j] &
param->merge_source[param->match_result[(param->thread_id * param->BATCH + i) * FIELD + 3]][j] &
param->merge_source[param->match_result[(param->thread_id * param->BATCH + i) * FIELD + 4]][j] &
param->merge_source[param->match_result[(param->thread_id * param->BATCH + i) * FIELD + 5]][j];/*
param->merge_source[param->match_result[(param->thread_id * param->BATCH + i) * FIELD + 6]][j] &
param->merge_source[param->match_result[(param->thread_id * param->BATCH + i) * FIELD + 7]][j] &
param->merge_source[param->match_result[(param->thread_id * param->BATCH + i) * FIELD + 8]][j] &
param->merge_source[param->match_result[(param->thread_id * param->BATCH + i) * FIELD + 9]][j] &
param->merge_source[param->match_result[(param->thread_id * param->BATCH + i) * FIELD + 10]][j] &
param->merge_source[param->match_result[(param->thread_id * param->BATCH + i) * FIELD + 11]][j] &
param->merge_source[param->match_result[(param->thread_id * param->BATCH + i) * FIELD + 12]][j] &
param->merge_source[param->match_result[(param->thread_id * param->BATCH + i) * FIELD + 13]][j] &
param->merge_source[param->match_result[(param->thread_id * param->BATCH + i) * FIELD + 14]][j];*/
}
}
//cout<<"Thread "<<param->thread_id<<" finish!"<<endl;
}
void partial_merge(void* foo){
pthread_param_P* param = (pthread_param_P*) foo;
}
void final_merge(void* foo){
pthread_param_F* param = (pthread_param_F*) foo;
}
|
b8cfc8aaf9d6243a1a7a20269625cab50f99c2d8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#define DEBUGTITLE false
#define DEBUGVALUE false
#define EXPORT false
#define MAX 1000
#define MIN 1
#define MAXADJUST 12
#define MINADJUST 0
#define LIFEADJUST 1000
#define AMOUNTINTERACTION 100
#define AMOUNTTESTS 100
#define POW 20
typedef struct {
int id, generation,
life, actualLife,
strength,
speed, actualSpeed,
cDamage,
rate;
} Fighter;
static Fighter mainFighter;
void printFighter(Fighter data){
//printf("\n__%d__", i);
printf("\nID %d", data.id);
printf("\ngeneration %d", data.generation);
printf("\nlife %d/%d", data.actualLife, data.life);
printf("\nstrength %d", data.strength);
printf("\nspeed %d/%d", data.actualSpeed, data.speed);
printf("\ncDamage %d", data.cDamage);
printf("\nrate %d", data.rate);
printf("\n_____");
}
void printFighterExport(Fighter data){
//printf("\n__%d__", i);
printf("\n%d", data.id);
printf(";%d", data.generation);
printf(";%d", data.life);
printf(";%d", data.strength);
printf(";%d", data.speed);
printf(";%d", data.cDamage);
printf(";%d", data.rate);
}
int GetRandom(int min, int max){
return (int)(((float)rand()/RAND_MAX) * (max - min) + min);
}
int GetRandomNeg(){
int multi = 1;
if(GetRandom(0, 10) > 5){
multi = -1;
}
return GetRandom(MINADJUST, MAXADJUST) * multi;
}
int MaxMin(int value, int adjust){
if(MAX + adjust< value){
return MAX + adjust;
}
else if (MIN + adjust> value){
return MIN + adjust;
}
return value;
}
int GetSpeed(int life){
return MaxMin(MAX - life, 0);
}
void SetupMainFighter(){
mainFighter.id = -1;
mainFighter.generation = 0;
mainFighter.life = GetRandom(MIN, MAX) + LIFEADJUST;
mainFighter.strength = GetRandom(MIN, MAX);
mainFighter.speed = GetRandom(MIN, MAX);
mainFighter.cDamage = GetRandom(MIN, MAX);
mainFighter.actualLife = mainFighter.life;
mainFighter.actualSpeed = mainFighter.speed;
}
void CreateFighters(Fighter *data, int n) {
for (int i = 0; i < n; i++) {
data[i].id = i;
data[i].generation = 0;
data[i].life = GetRandom(MIN, MAX) + LIFEADJUST;
data[i].actualLife = data[i].life;
data[i].strength = GetRandom(MIN, MAX);
data[i].speed = GetRandom(MIN, MAX);
data[i].actualSpeed = data[i].speed;
data[i].cDamage = GetRandom(MIN, MAX);
}
}
void showFighters(Fighter *data, int n) {
printf("\nshowing fighters");
for (int i = 0; i < n; i++) {
printFighter(data[i]);
}
}
__device__
__host__
int get_damage(Fighter atk, Fighter target){
int str = atk.strength;
int atkSpeed = max(atk.actualSpeed, 1);
int targetSpeed = max(target.actualSpeed, 1);
int damage = (int)(str * ((float)atkSpeed / targetSpeed));
return damage;
}
__device__
__host__
int get_corruption(Fighter atk, Fighter target){
int cDam = atk.cDamage * 0.01f;
int atkLife = max(atk.actualLife, 1);
int targetLife = max(target.actualLife, 1);
int damage = (int)(cDam * ((float)atkLife / targetLife));
return damage;
}
__global__
void fight(Fighter *f, int n, Fighter mainFighter) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
int firstDamage;
int secondDamage;
int k = 0;
for(int i = index; i < n; i += stride)
{
while(k < AMOUNTINTERACTION & mainFighter.actualLife > 0 & f[i].actualLife > 0){
k++;
firstDamage = get_corruption(mainFighter, f[i]);
secondDamage = get_corruption(f[i], mainFighter);
f[i].actualLife -= get_damage(mainFighter, f[i]);
mainFighter.actualLife -= get_damage(f[i], mainFighter);
f[i].actualSpeed -= firstDamage;
mainFighter.actualSpeed -= secondDamage;
}
k = 0;
f[i].rate = abs(mainFighter.actualLife - f[i].actualLife);
}
}
int chooseWinner(Fighter *data, int index){
int first = abs(data[index].rate) ;
int second = abs(data[index + 1].rate) ;
#if DEBUGVALUE
printf("\nfirst: %d <> second %d: ", first, second);
#endif
if(first < second){
#if DEBUGVALUE
printf("\nchosen: %d id = %d", first, data[index].id);
#endif
return index;
}
else if(first > second){
#if DEBUGVALUE
printf("\nchosen: %d id = %d", second, data[index + 1].id);
#endif
return index + 1;
}
else{
int aux = 0;
if(GetRandom(0,2) > 0){
aux = 1;
}
return index + aux;
}
}
void selectFighters(Fighter *data, int n) {
n /= 2;
#if DEBUGTITLE
printf("\nSelecting");
#endif
int aux = 0;
int index;
int start = 0;
if(n % 2 == 1){
start = 2;
data[aux] = data[0];
data[aux].actualLife = data[aux].life;
data[aux].actualSpeed = data[aux].speed;
aux++;
data[aux] = data[1];
data[aux].actualLife = data[aux].life;
data[aux].actualSpeed = data[aux].speed;
aux++;
}
for (int i = start; i < n; i++) {
index = i * 2;
data[aux] = data[chooseWinner(data, index)];
#if DEBUGVALUE
printf("\nindex = %d id = %d", aux, data[aux].id);
#endif
data[aux].actualLife = data[aux].life;
data[aux].actualSpeed = data[aux].speed;
#if DEBUGVALUE > 1
printFighter(data[aux]);
#endif
aux++;
}
}
Fighter copyFighter(Fighter father){
Fighter son;
son.id = father.id;
son.generation = father.generation;
son.life = father.life * 1;
son.actualLife = father.life * 1;
son.strength = father.strength * 1;
son.speed = father.speed * 1;
son.actualSpeed = father.speed * 1;
son.cDamage = father.cDamage * 1;
return son;
}
void Reproduce(Fighter *data, Fighter father, int n) {
#if DEBUGTITLE
printf("\nMultipling father.rate: %d", father.rate);
#endif
data[0] = copyFighter(father);
for (int i = 1; i < n; i++) {
data[i].generation = father.generation + 1;
data[i].life = MaxMin(father.life + GetRandomNeg(), 0);
data[i].actualLife = data[i].life;
data[i].strength = MaxMin(father.strength + GetRandomNeg(), 0);
data[i].speed = MaxMin(father.speed + GetRandomNeg(), 0);
data[i].actualSpeed = data[i].speed;
data[i].cDamage = MaxMin(father.cDamage + GetRandomNeg(), 0);
#if DEBUGVALUE
printFighter(data[i]);
#endif
}
}
int main() {
int nMaxBodies = 2<<POW;
int nBodies = nMaxBodies;
int deviceId;
int numberOfSMs;
hipGetDevice(&deviceId);
hipDeviceGetAttribute(&numberOfSMs, hipDeviceAttributeMultiprocessorCount, deviceId);
size_t threadsPerBlock = 256;
size_t numberOfBlocks = 32 * numberOfSMs;
int bytes = nBodies * sizeof(Fighter);
Fighter *buf;
hipMallocManaged(&buf, bytes);
//hipMemPrefetchAsync(buf, bytes, deviceId);
CreateFighters(buf, nBodies);
Fighter champ;
SetupMainFighter();
printf("\nMIN;MAX;MIN;MINADJUST;MAXADJUST;AMOUNTINTERACTION;AMOUNTTESTS;MaxBodies");
printf("\n%d;%d;%d;%d;%d;%d;%d;%d",
MIN,MAX,MIN,MINADJUST,MAXADJUST,AMOUNTINTERACTION,AMOUNTTESTS,nMaxBodies);
#if EXPORT
printf("\nid;generation;life;strength;speed;cDamage;rate");
#endif
for (int t = 0; t < AMOUNTTESTS; t++){
while(nBodies > 2){
#if DEBUGTITLE
printf("\n###nBodies: %d###", nBodies);
#endif
hipLaunchKernelGGL(( fight), dim3(numberOfBlocks), dim3(threadsPerBlock), 0, 0, buf, nBodies, mainFighter);
hipError_t err = hipGetLastError();
if(err != hipSuccess) printf("Error: %s\n", hipGetErrorString(err));
hipError_t asyncErr = hipDeviceSynchronize();
if(asyncErr != hipSuccess) printf("Error: %s\n", hipGetErrorString(asyncErr));
selectFighters(buf, nBodies);
nBodies = (int)(nBodies / 2) ;
nBodies += (nBodies % 2);
#if DEBUGTITLE
printf("\n#####");
#endif
}
champ = buf[chooseWinner(buf, 0)];
#if EXPORT
printFighterExport(champ);
//printFighter(champ);
#endif
if(champ.rate == 0){
break;
}
nBodies = nMaxBodies;
Reproduce(buf, champ, nBodies);
}
hipMemPrefetchAsync(buf, bytes, hipCpuDeviceId);
champ.actualLife = champ.life;
champ.actualSpeed = champ.speed;
//printFighter(champ);
//printFighter(mainFighter);
#if DEBUGVALUE || true
int firstDamage;
int secondDamage;
int k = 0;
printf("\nMain.life: %d <> Champ.life: %d", mainFighter.actualLife, champ.actualLife);
while(k < AMOUNTINTERACTION & mainFighter.actualLife > 0 & champ.actualLife > 0){
k++;
firstDamage = get_corruption(mainFighter, champ);
secondDamage = get_corruption(champ, mainFighter);
champ.actualLife -= get_damage(mainFighter, champ);
mainFighter.actualLife -= get_damage(champ, mainFighter);
champ.actualSpeed -= firstDamage;
mainFighter.actualSpeed -= secondDamage;
printf("\nMain.life: %d <> Champ.life: %d", mainFighter.actualLife, champ.actualLife);
}
#endif
hipFree(buf);
}
| b8cfc8aaf9d6243a1a7a20269625cab50f99c2d8.cu | #include <math.h>
#include <stdio.h>
#include <stdlib.h>
#define DEBUGTITLE false
#define DEBUGVALUE false
#define EXPORT false
#define MAX 1000
#define MIN 1
#define MAXADJUST 12
#define MINADJUST 0
#define LIFEADJUST 1000
#define AMOUNTINTERACTION 100
#define AMOUNTTESTS 100
#define POW 20
typedef struct {
int id, generation,
life, actualLife,
strength,
speed, actualSpeed,
cDamage,
rate;
} Fighter;
static Fighter mainFighter;
void printFighter(Fighter data){
//printf("\n__%d__", i);
printf("\nID %d", data.id);
printf("\ngeneration %d", data.generation);
printf("\nlife %d/%d", data.actualLife, data.life);
printf("\nstrength %d", data.strength);
printf("\nspeed %d/%d", data.actualSpeed, data.speed);
printf("\ncDamage %d", data.cDamage);
printf("\nrate %d", data.rate);
printf("\n_____");
}
void printFighterExport(Fighter data){
//printf("\n__%d__", i);
printf("\n%d", data.id);
printf(";%d", data.generation);
printf(";%d", data.life);
printf(";%d", data.strength);
printf(";%d", data.speed);
printf(";%d", data.cDamage);
printf(";%d", data.rate);
}
int GetRandom(int min, int max){
return (int)(((float)rand()/RAND_MAX) * (max - min) + min);
}
int GetRandomNeg(){
int multi = 1;
if(GetRandom(0, 10) > 5){
multi = -1;
}
return GetRandom(MINADJUST, MAXADJUST) * multi;
}
int MaxMin(int value, int adjust){
if(MAX + adjust< value){
return MAX + adjust;
}
else if (MIN + adjust> value){
return MIN + adjust;
}
return value;
}
int GetSpeed(int life){
return MaxMin(MAX - life, 0);
}
void SetupMainFighter(){
mainFighter.id = -1;
mainFighter.generation = 0;
mainFighter.life = GetRandom(MIN, MAX) + LIFEADJUST;
mainFighter.strength = GetRandom(MIN, MAX);
mainFighter.speed = GetRandom(MIN, MAX);
mainFighter.cDamage = GetRandom(MIN, MAX);
mainFighter.actualLife = mainFighter.life;
mainFighter.actualSpeed = mainFighter.speed;
}
void CreateFighters(Fighter *data, int n) {
for (int i = 0; i < n; i++) {
data[i].id = i;
data[i].generation = 0;
data[i].life = GetRandom(MIN, MAX) + LIFEADJUST;
data[i].actualLife = data[i].life;
data[i].strength = GetRandom(MIN, MAX);
data[i].speed = GetRandom(MIN, MAX);
data[i].actualSpeed = data[i].speed;
data[i].cDamage = GetRandom(MIN, MAX);
}
}
void showFighters(Fighter *data, int n) {
printf("\nshowing fighters");
for (int i = 0; i < n; i++) {
printFighter(data[i]);
}
}
__device__
__host__
int get_damage(Fighter atk, Fighter target){
int str = atk.strength;
int atkSpeed = max(atk.actualSpeed, 1);
int targetSpeed = max(target.actualSpeed, 1);
int damage = (int)(str * ((float)atkSpeed / targetSpeed));
return damage;
}
__device__
__host__
int get_corruption(Fighter atk, Fighter target){
int cDam = atk.cDamage * 0.01f;
int atkLife = max(atk.actualLife, 1);
int targetLife = max(target.actualLife, 1);
int damage = (int)(cDam * ((float)atkLife / targetLife));
return damage;
}
__global__
void fight(Fighter *f, int n, Fighter mainFighter) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
int firstDamage;
int secondDamage;
int k = 0;
for(int i = index; i < n; i += stride)
{
while(k < AMOUNTINTERACTION & mainFighter.actualLife > 0 & f[i].actualLife > 0){
k++;
firstDamage = get_corruption(mainFighter, f[i]);
secondDamage = get_corruption(f[i], mainFighter);
f[i].actualLife -= get_damage(mainFighter, f[i]);
mainFighter.actualLife -= get_damage(f[i], mainFighter);
f[i].actualSpeed -= firstDamage;
mainFighter.actualSpeed -= secondDamage;
}
k = 0;
f[i].rate = abs(mainFighter.actualLife - f[i].actualLife);
}
}
int chooseWinner(Fighter *data, int index){
int first = abs(data[index].rate) ;
int second = abs(data[index + 1].rate) ;
#if DEBUGVALUE
printf("\nfirst: %d <> second %d: ", first, second);
#endif
if(first < second){
#if DEBUGVALUE
printf("\nchosen: %d id = %d", first, data[index].id);
#endif
return index;
}
else if(first > second){
#if DEBUGVALUE
printf("\nchosen: %d id = %d", second, data[index + 1].id);
#endif
return index + 1;
}
else{
int aux = 0;
if(GetRandom(0,2) > 0){
aux = 1;
}
return index + aux;
}
}
void selectFighters(Fighter *data, int n) {
n /= 2;
#if DEBUGTITLE
printf("\nSelecting");
#endif
int aux = 0;
int index;
int start = 0;
if(n % 2 == 1){
start = 2;
data[aux] = data[0];
data[aux].actualLife = data[aux].life;
data[aux].actualSpeed = data[aux].speed;
aux++;
data[aux] = data[1];
data[aux].actualLife = data[aux].life;
data[aux].actualSpeed = data[aux].speed;
aux++;
}
for (int i = start; i < n; i++) {
index = i * 2;
data[aux] = data[chooseWinner(data, index)];
#if DEBUGVALUE
printf("\nindex = %d id = %d", aux, data[aux].id);
#endif
data[aux].actualLife = data[aux].life;
data[aux].actualSpeed = data[aux].speed;
#if DEBUGVALUE > 1
printFighter(data[aux]);
#endif
aux++;
}
}
Fighter copyFighter(Fighter father){
Fighter son;
son.id = father.id;
son.generation = father.generation;
son.life = father.life * 1;
son.actualLife = father.life * 1;
son.strength = father.strength * 1;
son.speed = father.speed * 1;
son.actualSpeed = father.speed * 1;
son.cDamage = father.cDamage * 1;
return son;
}
void Reproduce(Fighter *data, Fighter father, int n) {
#if DEBUGTITLE
printf("\nMultipling father.rate: %d", father.rate);
#endif
data[0] = copyFighter(father);
for (int i = 1; i < n; i++) {
data[i].generation = father.generation + 1;
data[i].life = MaxMin(father.life + GetRandomNeg(), 0);
data[i].actualLife = data[i].life;
data[i].strength = MaxMin(father.strength + GetRandomNeg(), 0);
data[i].speed = MaxMin(father.speed + GetRandomNeg(), 0);
data[i].actualSpeed = data[i].speed;
data[i].cDamage = MaxMin(father.cDamage + GetRandomNeg(), 0);
#if DEBUGVALUE
printFighter(data[i]);
#endif
}
}
int main() {
int nMaxBodies = 2<<POW;
int nBodies = nMaxBodies;
int deviceId;
int numberOfSMs;
cudaGetDevice(&deviceId);
cudaDeviceGetAttribute(&numberOfSMs, cudaDevAttrMultiProcessorCount, deviceId);
size_t threadsPerBlock = 256;
size_t numberOfBlocks = 32 * numberOfSMs;
int bytes = nBodies * sizeof(Fighter);
Fighter *buf;
cudaMallocManaged(&buf, bytes);
//cudaMemPrefetchAsync(buf, bytes, deviceId);
CreateFighters(buf, nBodies);
Fighter champ;
SetupMainFighter();
printf("\nMIN;MAX;MIN;MINADJUST;MAXADJUST;AMOUNTINTERACTION;AMOUNTTESTS;MaxBodies");
printf("\n%d;%d;%d;%d;%d;%d;%d;%d",
MIN,MAX,MIN,MINADJUST,MAXADJUST,AMOUNTINTERACTION,AMOUNTTESTS,nMaxBodies);
#if EXPORT
printf("\nid;generation;life;strength;speed;cDamage;rate");
#endif
for (int t = 0; t < AMOUNTTESTS; t++){
while(nBodies > 2){
#if DEBUGTITLE
printf("\n###nBodies: %d###", nBodies);
#endif
fight<<<numberOfBlocks, threadsPerBlock>>>(buf, nBodies, mainFighter);
cudaError_t err = cudaGetLastError();
if(err != cudaSuccess) printf("Error: %s\n", cudaGetErrorString(err));
cudaError_t asyncErr = cudaDeviceSynchronize();
if(asyncErr != cudaSuccess) printf("Error: %s\n", cudaGetErrorString(asyncErr));
selectFighters(buf, nBodies);
nBodies = (int)(nBodies / 2) ;
nBodies += (nBodies % 2);
#if DEBUGTITLE
printf("\n#####");
#endif
}
champ = buf[chooseWinner(buf, 0)];
#if EXPORT
printFighterExport(champ);
//printFighter(champ);
#endif
if(champ.rate == 0){
break;
}
nBodies = nMaxBodies;
Reproduce(buf, champ, nBodies);
}
cudaMemPrefetchAsync(buf, bytes, cudaCpuDeviceId);
champ.actualLife = champ.life;
champ.actualSpeed = champ.speed;
//printFighter(champ);
//printFighter(mainFighter);
#if DEBUGVALUE || true
int firstDamage;
int secondDamage;
int k = 0;
printf("\nMain.life: %d <> Champ.life: %d", mainFighter.actualLife, champ.actualLife);
while(k < AMOUNTINTERACTION & mainFighter.actualLife > 0 & champ.actualLife > 0){
k++;
firstDamage = get_corruption(mainFighter, champ);
secondDamage = get_corruption(champ, mainFighter);
champ.actualLife -= get_damage(mainFighter, champ);
mainFighter.actualLife -= get_damage(champ, mainFighter);
champ.actualSpeed -= firstDamage;
mainFighter.actualSpeed -= secondDamage;
printf("\nMain.life: %d <> Champ.life: %d", mainFighter.actualLife, champ.actualLife);
}
#endif
cudaFree(buf);
}
|
d2ab261f028caab1f9903e4ddf9910b5e610a0bf.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Tencent is pleased to support the open source community by making TNN available.
//
// Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
#include <hipcub/hipcub.hpp>
#include <hipcub/hipcub.hpp>
#include <cub/block/block_store.cuh>
#include <hipcub/hipcub.hpp>
#include <cub/block/block_radix_sort.cuh>
#include "tnn/device/cuda/acc/cuda_layer_acc.h"
#include "tnn/utils/dims_vector_utils.h"
namespace TNN_NS {
DECLARE_CUDA_ACC(InstanceNorm, LAYER_INST_BATCH_NORM);
template<int THREAD_PER_BLOCK, typename T>
__global__ void instance_norm_kernel(const T * input, T* output, const float * gamma,
const float * beta, const int size, const int batch_size, const int C, const float eps) {
__shared__ double ssum1[THREAD_PER_BLOCK/32];
__shared__ double ssum2[THREAD_PER_BLOCK/32];
__shared__ float k;
__shared__ float b;
// const int batch_offset = blockIdx.y * size;
const int block_offset = blockIdx.x * size;
const T * ptr = input + block_offset;
T * dst = output + block_offset;
const int cid = blockIdx.x % C;
double thread_sum1 = 0.f;
double thread_sum2 = 0.f;
for (int i = threadIdx.x; i < size; i+=THREAD_PER_BLOCK) {
float value = get_float_value<T>(ptr[i]);
thread_sum1 += value;
thread_sum2 += value * value;
}
thread_sum1 += __shfl_down_sync(0xffffffff, thread_sum1, 16, 32);
thread_sum1 += __shfl_down_sync(0x0000ffff, thread_sum1, 8, 16);
thread_sum1 += __shfl_down_sync(0x000000ff, thread_sum1, 4, 8);
thread_sum1 += __shfl_down_sync(0x0000000f, thread_sum1, 2, 4);
thread_sum1 += __shfl_down_sync(0x00000003, thread_sum1, 1, 2);
thread_sum2 += __shfl_down_sync(0xffffffff, thread_sum2, 16, 32);
thread_sum2 += __shfl_down_sync(0x0000ffff, thread_sum2, 8, 16);
thread_sum2 += __shfl_down_sync(0x000000ff, thread_sum2, 4, 8);
thread_sum2 += __shfl_down_sync(0x0000000f, thread_sum2, 2, 4);
thread_sum2 += __shfl_down_sync(0x00000003, thread_sum2, 1, 2);
if (threadIdx.x % 32 == 0) {
ssum1[threadIdx.x / 32] = thread_sum1;
ssum2[threadIdx.x / 32] = thread_sum2;
}
__syncthreads();
if (threadIdx.x < blockDim.x / 32) {
thread_sum1 = ssum1[threadIdx.x];
thread_sum2 = ssum2[threadIdx.x];
} else {
thread_sum1 = 0;
thread_sum2 = 0;
}
thread_sum1 += __shfl_down_sync(0x0000000f, thread_sum1, 2, 4);
thread_sum1 += __shfl_down_sync(0x00000003, thread_sum1, 1, 2);
thread_sum2 += __shfl_down_sync(0x0000000f, thread_sum2, 2, 4);
thread_sum2 += __shfl_down_sync(0x00000003, thread_sum2, 1, 2);
if (threadIdx.x == 0) {
double mean = thread_sum1 / size;
double var = thread_sum2 / size - mean * mean;
k = gamma[cid] / sqrt(var + eps);
b = - mean * k + beta[cid];
}
__syncthreads();
#pragma unroll(4)
for (int i = threadIdx.x; i < size; i += THREAD_PER_BLOCK) {
dst[i] = convert_float_value<T>((get_float_value<T>(ptr[i]) * k + b));
}
}
Status CudaInstanceNormLayerAcc::Init(Context *context, LayerParam *param, LayerResource *resource,
const std::vector<Blob *> &inputs, const std::vector<Blob *> &outputs) {
Status ret = CudaLayerAcc::Init(context, param, resource, inputs, outputs);
if (ret != TNN_OK) {
return ret;
}
auto res = dynamic_cast<InstanceNormLayerResource *>(resource);
if (!res) {
LOGE("Error: InstanceNormLayerResource is nil\n");
return Status(TNNERR_MODEL_ERR, "Error: InstanceNormLayerResource is nil");
}
float *k_data = res->scale_handle.force_to<float *>();
int k_size = res->scale_handle.GetBytesSize();
float *b_data = res->bias_handle.force_to<float *>();
int b_size = res->bias_handle.GetBytesSize();
CreateTempBuf(k_size);
CreateTempBuf(b_size);
hipMemcpyAsync(tempbufs_[0].ptr, k_data, k_size, hipMemcpyHostToDevice, context_->GetStream());
hipMemcpyAsync(tempbufs_[1].ptr, b_data, b_size, hipMemcpyHostToDevice, context_->GetStream());
return TNN_OK;
}
Status CudaInstanceNormLayerAcc::Reshape(const std::vector<Blob *> &inputs, const std::vector<Blob *> &outputs) {
return TNN_OK;
}
Status CudaInstanceNormLayerAcc::Forward(const std::vector<Blob *> &inputs, const std::vector<Blob *> &outputs) {
Blob *input_blob = inputs[0];
Blob *output_blob = outputs[0];
auto dims = input_blob->GetBlobDesc().dims;
int num = dims[0];
int channels = dims[1];
int height = dims[2];
int width = dims[3];
int count = DimsVectorUtils::Count(dims);
int hw = height * width;
void* input_data = input_blob->GetHandle().base;
void* output_data = output_blob->GetHandle().base;
const int THREAD_PER_BLOCK = 128;
dim3 griddim;
griddim.x = channels * num;
if (input_blob->GetBlobDesc().data_type == DATA_TYPE_FLOAT) {
hipLaunchKernelGGL(( instance_norm_kernel<THREAD_PER_BLOCK, float>), dim3(griddim), dim3(THREAD_PER_BLOCK), 0, context_->GetStream(), (float*)input_data,
(float*)output_data, (const float *)tempbufs_[0].ptr, (const float *)tempbufs_[1].ptr, hw, channels * num, channels, 1e-5);
} else if (input_blob->GetBlobDesc().data_type == DATA_TYPE_HALF) {
hipLaunchKernelGGL(( instance_norm_kernel<THREAD_PER_BLOCK, __half>), dim3(griddim), dim3(THREAD_PER_BLOCK), 0, context_->GetStream(), (__half*)input_data,
(__half*)output_data, (const float *)tempbufs_[0].ptr, (const float *)tempbufs_[1].ptr, hw, channels * num, channels, 1e-5);
} else {
LOGE("Error: layer acc dont support datatype: %d\n", input_blob->GetBlobDesc().data_type);
return Status(TNNERR_MODEL_ERR, "Error: layer acc don't support datatype");
}
return TNN_OK;
}
REGISTER_CUDA_ACC(InstanceNorm, LAYER_INST_BATCH_NORM);
} // namespace TNN_NS
| d2ab261f028caab1f9903e4ddf9910b5e610a0bf.cu | // Tencent is pleased to support the open source community by making TNN available.
//
// Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
#include <cub/cub.cuh>
#include <cub/block/block_load.cuh>
#include <cub/block/block_store.cuh>
#include <cub/block/block_reduce.cuh>
#include <cub/block/block_radix_sort.cuh>
#include "tnn/device/cuda/acc/cuda_layer_acc.h"
#include "tnn/utils/dims_vector_utils.h"
namespace TNN_NS {
DECLARE_CUDA_ACC(InstanceNorm, LAYER_INST_BATCH_NORM);
template<int THREAD_PER_BLOCK, typename T>
__global__ void instance_norm_kernel(const T * input, T* output, const float * gamma,
const float * beta, const int size, const int batch_size, const int C, const float eps) {
__shared__ double ssum1[THREAD_PER_BLOCK/32];
__shared__ double ssum2[THREAD_PER_BLOCK/32];
__shared__ float k;
__shared__ float b;
// const int batch_offset = blockIdx.y * size;
const int block_offset = blockIdx.x * size;
const T * ptr = input + block_offset;
T * dst = output + block_offset;
const int cid = blockIdx.x % C;
double thread_sum1 = 0.f;
double thread_sum2 = 0.f;
for (int i = threadIdx.x; i < size; i+=THREAD_PER_BLOCK) {
float value = get_float_value<T>(ptr[i]);
thread_sum1 += value;
thread_sum2 += value * value;
}
thread_sum1 += __shfl_down_sync(0xffffffff, thread_sum1, 16, 32);
thread_sum1 += __shfl_down_sync(0x0000ffff, thread_sum1, 8, 16);
thread_sum1 += __shfl_down_sync(0x000000ff, thread_sum1, 4, 8);
thread_sum1 += __shfl_down_sync(0x0000000f, thread_sum1, 2, 4);
thread_sum1 += __shfl_down_sync(0x00000003, thread_sum1, 1, 2);
thread_sum2 += __shfl_down_sync(0xffffffff, thread_sum2, 16, 32);
thread_sum2 += __shfl_down_sync(0x0000ffff, thread_sum2, 8, 16);
thread_sum2 += __shfl_down_sync(0x000000ff, thread_sum2, 4, 8);
thread_sum2 += __shfl_down_sync(0x0000000f, thread_sum2, 2, 4);
thread_sum2 += __shfl_down_sync(0x00000003, thread_sum2, 1, 2);
if (threadIdx.x % 32 == 0) {
ssum1[threadIdx.x / 32] = thread_sum1;
ssum2[threadIdx.x / 32] = thread_sum2;
}
__syncthreads();
if (threadIdx.x < blockDim.x / 32) {
thread_sum1 = ssum1[threadIdx.x];
thread_sum2 = ssum2[threadIdx.x];
} else {
thread_sum1 = 0;
thread_sum2 = 0;
}
thread_sum1 += __shfl_down_sync(0x0000000f, thread_sum1, 2, 4);
thread_sum1 += __shfl_down_sync(0x00000003, thread_sum1, 1, 2);
thread_sum2 += __shfl_down_sync(0x0000000f, thread_sum2, 2, 4);
thread_sum2 += __shfl_down_sync(0x00000003, thread_sum2, 1, 2);
if (threadIdx.x == 0) {
double mean = thread_sum1 / size;
double var = thread_sum2 / size - mean * mean;
k = gamma[cid] / sqrt(var + eps);
b = - mean * k + beta[cid];
}
__syncthreads();
#pragma unroll(4)
for (int i = threadIdx.x; i < size; i += THREAD_PER_BLOCK) {
dst[i] = convert_float_value<T>((get_float_value<T>(ptr[i]) * k + b));
}
}
Status CudaInstanceNormLayerAcc::Init(Context *context, LayerParam *param, LayerResource *resource,
const std::vector<Blob *> &inputs, const std::vector<Blob *> &outputs) {
Status ret = CudaLayerAcc::Init(context, param, resource, inputs, outputs);
if (ret != TNN_OK) {
return ret;
}
auto res = dynamic_cast<InstanceNormLayerResource *>(resource);
if (!res) {
LOGE("Error: InstanceNormLayerResource is nil\n");
return Status(TNNERR_MODEL_ERR, "Error: InstanceNormLayerResource is nil");
}
float *k_data = res->scale_handle.force_to<float *>();
int k_size = res->scale_handle.GetBytesSize();
float *b_data = res->bias_handle.force_to<float *>();
int b_size = res->bias_handle.GetBytesSize();
CreateTempBuf(k_size);
CreateTempBuf(b_size);
cudaMemcpyAsync(tempbufs_[0].ptr, k_data, k_size, cudaMemcpyHostToDevice, context_->GetStream());
cudaMemcpyAsync(tempbufs_[1].ptr, b_data, b_size, cudaMemcpyHostToDevice, context_->GetStream());
return TNN_OK;
}
Status CudaInstanceNormLayerAcc::Reshape(const std::vector<Blob *> &inputs, const std::vector<Blob *> &outputs) {
return TNN_OK;
}
Status CudaInstanceNormLayerAcc::Forward(const std::vector<Blob *> &inputs, const std::vector<Blob *> &outputs) {
Blob *input_blob = inputs[0];
Blob *output_blob = outputs[0];
auto dims = input_blob->GetBlobDesc().dims;
int num = dims[0];
int channels = dims[1];
int height = dims[2];
int width = dims[3];
int count = DimsVectorUtils::Count(dims);
int hw = height * width;
void* input_data = input_blob->GetHandle().base;
void* output_data = output_blob->GetHandle().base;
const int THREAD_PER_BLOCK = 128;
dim3 griddim;
griddim.x = channels * num;
if (input_blob->GetBlobDesc().data_type == DATA_TYPE_FLOAT) {
instance_norm_kernel<THREAD_PER_BLOCK, float><<<griddim, THREAD_PER_BLOCK, 0, context_->GetStream()>>>((float*)input_data,
(float*)output_data, (const float *)tempbufs_[0].ptr, (const float *)tempbufs_[1].ptr, hw, channels * num, channels, 1e-5);
} else if (input_blob->GetBlobDesc().data_type == DATA_TYPE_HALF) {
instance_norm_kernel<THREAD_PER_BLOCK, __half><<<griddim, THREAD_PER_BLOCK, 0, context_->GetStream()>>>((__half*)input_data,
(__half*)output_data, (const float *)tempbufs_[0].ptr, (const float *)tempbufs_[1].ptr, hw, channels * num, channels, 1e-5);
} else {
LOGE("Error: layer acc dont support datatype: %d\n", input_blob->GetBlobDesc().data_type);
return Status(TNNERR_MODEL_ERR, "Error: layer acc don't support datatype");
}
return TNN_OK;
}
REGISTER_CUDA_ACC(InstanceNorm, LAYER_INST_BATCH_NORM);
} // namespace TNN_NS
|
5f9f8ada43e6c93ae6c8a01dd64613963c1fe622.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <helper_cuda.h>
__global__ void iota(float *a)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
a[i] = i;
}
int main(int argc, char *argv[])
{
int numElements = 1e+8;
// Allocate vector a in device memory.
float *d_a;
checkCudaErrors(hipMalloc((void **)&d_a, sizeof(float) * numElements));
// Determine the number of threads per block and the number of blocks per grid.
int numThreadsPerBlock = 256;
int numBlocksPerGrid = (numElements + numThreadsPerBlock - 1) / numThreadsPerBlock;
// Invoke the kernel on device asynchronously.
hipLaunchKernelGGL(( iota), dim3(numBlocksPerGrid), dim3(numThreadsPerBlock), 0, 0, d_a);
// Cleanup.
checkCudaErrors(hipFree(d_a));
checkCudaErrors(hipDeviceReset());
}
| 5f9f8ada43e6c93ae6c8a01dd64613963c1fe622.cu | #include <helper_cuda.h>
__global__ void iota(float *a)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
a[i] = i;
}
int main(int argc, char *argv[])
{
int numElements = 1e+8;
// Allocate vector a in device memory.
float *d_a;
checkCudaErrors(cudaMalloc((void **)&d_a, sizeof(float) * numElements));
// Determine the number of threads per block and the number of blocks per grid.
int numThreadsPerBlock = 256;
int numBlocksPerGrid = (numElements + numThreadsPerBlock - 1) / numThreadsPerBlock;
// Invoke the kernel on device asynchronously.
iota<<<numBlocksPerGrid, numThreadsPerBlock>>>(d_a);
// Cleanup.
checkCudaErrors(cudaFree(d_a));
checkCudaErrors(cudaDeviceReset());
}
|
d04845a831023b8bf4f17b9f73896812d396d515.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.6.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date November 2014
@generated from zherk_fermi_batched_k32.cu normal z -> d, Sat Nov 15 19:53:59 2014
@author Jakub Kurzak
@author Stan Tomov
@author Mark Gates
@author Azzam Haidar
[zcds]gemm_fermi.cu defines the CPU driver.
[zcds]gemm_fermi_kernels.h defines the block sizes for each precision.
gemm_stencil_defs.h defines types and functions for precision-independent code.
These files are included multiple times, once for each transpose version.
herk_stencil.cuh defines the GPU kernel (device function).
herk_kernel_batched.cuh defines the GPU kernel (global function).
The batched version uses herk_kernel_batched.cuh instead of herk_kernel.cuh.
*/
#include "common_magma.h"
#include "commonblas_d.h"
#define PRECISION_d
///////////////////////////////////////////////////////////////////////////////////////////////////
#include "dgemm_fermi_kernels_batched_k32.h"
///////////////////////////////////////////////////////////////////////////////////////////////////
/**
Purpose
-------
DSYRK performs one of the symmetric rank k operations
C := alpha*A*A**H + beta*C,
or
C := alpha*A**H*A + beta*C,
where alpha and beta are real scalars, C is an n by n symmetric
matrix and A is an n by k matrix in the first case and a k by n
matrix in the second case.
Parameters
----------
@param[in]
uplo CHARACTER*1.
On entry, uplo specifies whether the upper or lower
triangular part of the array C is to be referenced as
follows:
uplo = 'U' or 'u' Only the upper triangular part of C
is to be referenced.
uplo = 'L' or 'l' Only the lower triangular part of C
is to be referenced.
@param[in]
trans CHARACTER*1.
On entry, trans specifies the operation to be performed as
follows:
trans = 'N' or 'n' C := alpha*A*A**H + beta*C.
trans = 'C' or 'c' C := alpha*A**H*A + beta*C.
@param[in]
n INTEGER.
On entry, specifies the order of the matrix C. N must be
at least zero.
@param[in]
k INTEGER.
On entry with trans = 'N' or 'n', k specifies the number
of columns of the matrix A, and on entry with
trans = 'C' or 'c', k specifies the number of rows of the
matrix A. K must be at least zero.
@param[in]
alpha DOUBLE PRECISION
On entry, ALPHA specifies the scalar alpha.
@param[in]
dA DOUBLE_PRECISION array of DIMENSION ( ldda, ka ), where ka is
k when trans = MagmaNoTrans, and is n otherwise.
Before entry with trans = MagmaNoTrans, the leading m by k
part of the array dA must contain the matrix dA, otherwise
the leading k by m part of the array dA must contain the
matrix dA.
@param[in]
ldda INTEGER.
On entry, ldda specifies the first dimension of A as declared
in the calling (sub) program. When trans = MagmaNoTrans then
ldda must be at least max( 1, n ), otherwise ldda must be at
least max( 1, k ).
@param[in]
beta DOUBLE PRECISION.
On entry, BETA specifies the scalar beta. When BETA is
supplied as zero then dC need not be set on input.
@param[in,out]
dC DOUBLE_PRECISION array of DIMENSION ( lddc, n ).
Before entry with uplo = 'U' or 'u', the leading n by n
upper triangular part of the array C must contain the upper
triangular part of the symmetric matrix and the strictly
lower triangular part of C is not referenced. On exit, the
upper triangular part of the array C is overwritten by the
upper triangular part of the updated matrix.
Before entry with uplo = 'L' or 'l', the leading n by n
lower triangular part of the array C must contain the lower
triangular part of the symmetric matrix and the strictly
upper triangular part of C is not referenced. On exit, the
lower triangular part of the array C is overwritten by the
lower triangular part of the updated matrix.
Note that the imaginary parts of the diagonal elements need
not be set, they are assumed to be zero, and on exit they
are set to zero.
@param[in]
lddc INTEGER.
On entry, lddc specifies the first dimension of dC as declared
in the calling (sub) program. lddc must be at least
max( 1, m ).
@ingroup magma_dblas3
********************************************************************/
extern "C" void
magmablas_dsyrk_batched_k32(
magma_uplo_t uplo, magma_trans_t trans, magma_int_t n, magma_int_t k,
double alpha,
double const * const * dA_array, magma_int_t ldda,
double beta,
double **dC_array, magma_int_t lddc, magma_int_t batchCount )
{
double cbeta = MAGMA_D_MAKE( beta, 0. );
double calpha = MAGMA_D_MAKE( alpha, 0. );
magma_int_t info = 0;
if ( uplo != MagmaUpper && uplo != MagmaLower )
info = -1;
else if ( trans != MagmaNoTrans && trans != MagmaTrans && trans != MagmaConjTrans )
info = -2;
else if ( n < 0 )
info = -3;
else if ( k < 0 )
info = -4;
else if ( trans == MagmaNoTrans ? ldda < n : ldda < k )
info = -7;
else if ( lddc < n )
info = -10;
if (info != 0) {
magma_xerbla( __func__, -(info) );
return; //info;
}
magma_int_t arch = magma_getdevice_arch();
if ( arch < 200 ) {
printf("not supported \n"); // TODO call cublas
return;
}
// --------------------
// CUDA ARCH 2.x (Fermi) version
if ( n <= 0 || k <= 0 )
return;
size_t offsetA = 0;
int TransA = 0, TransB = 0, uploA = 0;
if ( uplo == MagmaLower )
uploA = 1;
else if ( uplo == MagmaUpper )
uploA = 2;
if ( trans == MagmaNoTrans )
#if defined(PRECISION_z) || defined(PRECISION_c)
TransB = 2;
#else
TransB = 1;
#endif
else if ( trans == MagmaTrans || trans == MagmaConjTrans)
#if defined(PRECISION_z) || defined(PRECISION_c)
TransA = 2;
#else
TransA = 1;
#endif
#ifdef TEXTURE_1D
size_t sizeA = (size_t) ldda * (size_t) (!TransA ? k : n);
size_t CUBLAS_MAX_1DBUF_SIZE = ((1 << 27) - 512);
if ( sizeA >= CUBLAS_MAX_1DBUF_SIZE )
{
printf("not supported \n"); // TODO call cublas
return;
}
// Set textures parameters
tex_ref_A.normalized = false;
tex_ref_A.filterMode = hipFilterModePoint;
tex_ref_A.addressMode[0] = hipAddressModeClamp;
// Bind A and B to texture references
hipError_t err;
err = hipBindTexture(&offsetA, tex_ref_A, dA_array[0], sizeA*sizeof(double));
if ( err != hipSuccess ) {
fprintf( stderr, "cannot bind A to texture: %s (%d)\n", hipGetErrorString(err), err );
return;
}
#endif
// Set up grids
dim3 dimBlock(DIM_X, DIM_Y);
offsetA = offsetA/sizeof(double);
if ( TransA == 0 && TransB == 1 ) {
dim3 dimGrid( (n - 1)/BLK_M_nt + 1,
(n - 1)/BLK_N_nt + 1 ,
batchCount );
hipLaunchKernelGGL(( magmablas_d_herk_kernel_fermi_nt_batched), dim3(dimGrid), dim3(dimBlock), 0, magma_stream ,
uploA, n, k, dA_array, ldda, dA_array, ldda, dC_array, lddc, calpha, cbeta,
(int)offsetA, (int)offsetA );
}
else if ( TransA == 0 && TransB == 2 ) {
dim3 dimGrid( (n - 1)/BLK_M_nc + 1,
(n - 1)/BLK_N_nc + 1 ,
batchCount );
hipLaunchKernelGGL(( magmablas_d_herk_kernel_fermi_nc_batched), dim3(dimGrid), dim3(dimBlock), 0, magma_stream ,
uploA, n, k, dA_array, ldda, dA_array, ldda, dC_array, lddc, calpha, cbeta,
(int)offsetA, (int)offsetA );
}
else if ( TransA == 1 && TransB == 0 ) {
dim3 dimGrid( (n - 1)/BLK_M_tn + 1,
(n - 1)/BLK_N_tn + 1 ,
batchCount );
hipLaunchKernelGGL(( magmablas_d_herk_kernel_fermi_tn_batched), dim3(dimGrid), dim3(dimBlock), 0, magma_stream ,
uploA, n, k, dA_array, ldda, dA_array, ldda, dC_array, lddc, calpha, cbeta,
(int)offsetA, (int)offsetA );
}
else if ( TransA == 2 && TransB == 0 ) {
dim3 dimGrid( (n - 1)/BLK_M_cn + 1,
(n - 1)/BLK_N_cn + 1 ,
batchCount );
hipLaunchKernelGGL(( magmablas_d_herk_kernel_fermi_cn_batched), dim3(dimGrid), dim3(dimBlock), 0, magma_stream ,
uploA, n, k, dA_array, ldda, dA_array, ldda, dC_array, lddc, calpha, cbeta,
(int)offsetA, (int)offsetA );
}
#ifdef TEXTURE_1D
hipUnbindTexture( tex_ref_A );
#endif
}
///////////////////////////////////////////////////////////////////////////////////////////////////
| d04845a831023b8bf4f17b9f73896812d396d515.cu | /*
-- MAGMA (version 1.6.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date November 2014
@generated from zherk_fermi_batched_k32.cu normal z -> d, Sat Nov 15 19:53:59 2014
@author Jakub Kurzak
@author Stan Tomov
@author Mark Gates
@author Azzam Haidar
[zcds]gemm_fermi.cu defines the CPU driver.
[zcds]gemm_fermi_kernels.h defines the block sizes for each precision.
gemm_stencil_defs.h defines types and functions for precision-independent code.
These files are included multiple times, once for each transpose version.
herk_stencil.cuh defines the GPU kernel (device function).
herk_kernel_batched.cuh defines the GPU kernel (global function).
The batched version uses herk_kernel_batched.cuh instead of herk_kernel.cuh.
*/
#include "common_magma.h"
#include "commonblas_d.h"
#define PRECISION_d
///////////////////////////////////////////////////////////////////////////////////////////////////
#include "dgemm_fermi_kernels_batched_k32.h"
///////////////////////////////////////////////////////////////////////////////////////////////////
/**
Purpose
-------
DSYRK performs one of the symmetric rank k operations
C := alpha*A*A**H + beta*C,
or
C := alpha*A**H*A + beta*C,
where alpha and beta are real scalars, C is an n by n symmetric
matrix and A is an n by k matrix in the first case and a k by n
matrix in the second case.
Parameters
----------
@param[in]
uplo CHARACTER*1.
On entry, uplo specifies whether the upper or lower
triangular part of the array C is to be referenced as
follows:
uplo = 'U' or 'u' Only the upper triangular part of C
is to be referenced.
uplo = 'L' or 'l' Only the lower triangular part of C
is to be referenced.
@param[in]
trans CHARACTER*1.
On entry, trans specifies the operation to be performed as
follows:
trans = 'N' or 'n' C := alpha*A*A**H + beta*C.
trans = 'C' or 'c' C := alpha*A**H*A + beta*C.
@param[in]
n INTEGER.
On entry, specifies the order of the matrix C. N must be
at least zero.
@param[in]
k INTEGER.
On entry with trans = 'N' or 'n', k specifies the number
of columns of the matrix A, and on entry with
trans = 'C' or 'c', k specifies the number of rows of the
matrix A. K must be at least zero.
@param[in]
alpha DOUBLE PRECISION
On entry, ALPHA specifies the scalar alpha.
@param[in]
dA DOUBLE_PRECISION array of DIMENSION ( ldda, ka ), where ka is
k when trans = MagmaNoTrans, and is n otherwise.
Before entry with trans = MagmaNoTrans, the leading m by k
part of the array dA must contain the matrix dA, otherwise
the leading k by m part of the array dA must contain the
matrix dA.
@param[in]
ldda INTEGER.
On entry, ldda specifies the first dimension of A as declared
in the calling (sub) program. When trans = MagmaNoTrans then
ldda must be at least max( 1, n ), otherwise ldda must be at
least max( 1, k ).
@param[in]
beta DOUBLE PRECISION.
On entry, BETA specifies the scalar beta. When BETA is
supplied as zero then dC need not be set on input.
@param[in,out]
dC DOUBLE_PRECISION array of DIMENSION ( lddc, n ).
Before entry with uplo = 'U' or 'u', the leading n by n
upper triangular part of the array C must contain the upper
triangular part of the symmetric matrix and the strictly
lower triangular part of C is not referenced. On exit, the
upper triangular part of the array C is overwritten by the
upper triangular part of the updated matrix.
Before entry with uplo = 'L' or 'l', the leading n by n
lower triangular part of the array C must contain the lower
triangular part of the symmetric matrix and the strictly
upper triangular part of C is not referenced. On exit, the
lower triangular part of the array C is overwritten by the
lower triangular part of the updated matrix.
Note that the imaginary parts of the diagonal elements need
not be set, they are assumed to be zero, and on exit they
are set to zero.
@param[in]
lddc INTEGER.
On entry, lddc specifies the first dimension of dC as declared
in the calling (sub) program. lddc must be at least
max( 1, m ).
@ingroup magma_dblas3
********************************************************************/
extern "C" void
magmablas_dsyrk_batched_k32(
magma_uplo_t uplo, magma_trans_t trans, magma_int_t n, magma_int_t k,
double alpha,
double const * const * dA_array, magma_int_t ldda,
double beta,
double **dC_array, magma_int_t lddc, magma_int_t batchCount )
{
double cbeta = MAGMA_D_MAKE( beta, 0. );
double calpha = MAGMA_D_MAKE( alpha, 0. );
magma_int_t info = 0;
if ( uplo != MagmaUpper && uplo != MagmaLower )
info = -1;
else if ( trans != MagmaNoTrans && trans != MagmaTrans && trans != MagmaConjTrans )
info = -2;
else if ( n < 0 )
info = -3;
else if ( k < 0 )
info = -4;
else if ( trans == MagmaNoTrans ? ldda < n : ldda < k )
info = -7;
else if ( lddc < n )
info = -10;
if (info != 0) {
magma_xerbla( __func__, -(info) );
return; //info;
}
magma_int_t arch = magma_getdevice_arch();
if ( arch < 200 ) {
printf("not supported \n"); // TODO call cublas
return;
}
// --------------------
// CUDA ARCH 2.x (Fermi) version
if ( n <= 0 || k <= 0 )
return;
size_t offsetA = 0;
int TransA = 0, TransB = 0, uploA = 0;
if ( uplo == MagmaLower )
uploA = 1;
else if ( uplo == MagmaUpper )
uploA = 2;
if ( trans == MagmaNoTrans )
#if defined(PRECISION_z) || defined(PRECISION_c)
TransB = 2;
#else
TransB = 1;
#endif
else if ( trans == MagmaTrans || trans == MagmaConjTrans)
#if defined(PRECISION_z) || defined(PRECISION_c)
TransA = 2;
#else
TransA = 1;
#endif
#ifdef TEXTURE_1D
size_t sizeA = (size_t) ldda * (size_t) (!TransA ? k : n);
size_t CUBLAS_MAX_1DBUF_SIZE = ((1 << 27) - 512);
if ( sizeA >= CUBLAS_MAX_1DBUF_SIZE )
{
printf("not supported \n"); // TODO call cublas
return;
}
// Set textures parameters
tex_ref_A.normalized = false;
tex_ref_A.filterMode = cudaFilterModePoint;
tex_ref_A.addressMode[0] = cudaAddressModeClamp;
// Bind A and B to texture references
cudaError_t err;
err = cudaBindTexture(&offsetA, tex_ref_A, dA_array[0], sizeA*sizeof(double));
if ( err != cudaSuccess ) {
fprintf( stderr, "cannot bind A to texture: %s (%d)\n", cudaGetErrorString(err), err );
return;
}
#endif
// Set up grids
dim3 dimBlock(DIM_X, DIM_Y);
offsetA = offsetA/sizeof(double);
if ( TransA == 0 && TransB == 1 ) {
dim3 dimGrid( (n - 1)/BLK_M_nt + 1,
(n - 1)/BLK_N_nt + 1 ,
batchCount );
magmablas_d_herk_kernel_fermi_nt_batched<<< dimGrid, dimBlock, 0, magma_stream >>>(
uploA, n, k, dA_array, ldda, dA_array, ldda, dC_array, lddc, calpha, cbeta,
(int)offsetA, (int)offsetA );
}
else if ( TransA == 0 && TransB == 2 ) {
dim3 dimGrid( (n - 1)/BLK_M_nc + 1,
(n - 1)/BLK_N_nc + 1 ,
batchCount );
magmablas_d_herk_kernel_fermi_nc_batched<<< dimGrid, dimBlock, 0, magma_stream >>>(
uploA, n, k, dA_array, ldda, dA_array, ldda, dC_array, lddc, calpha, cbeta,
(int)offsetA, (int)offsetA );
}
else if ( TransA == 1 && TransB == 0 ) {
dim3 dimGrid( (n - 1)/BLK_M_tn + 1,
(n - 1)/BLK_N_tn + 1 ,
batchCount );
magmablas_d_herk_kernel_fermi_tn_batched<<< dimGrid, dimBlock, 0, magma_stream >>>(
uploA, n, k, dA_array, ldda, dA_array, ldda, dC_array, lddc, calpha, cbeta,
(int)offsetA, (int)offsetA );
}
else if ( TransA == 2 && TransB == 0 ) {
dim3 dimGrid( (n - 1)/BLK_M_cn + 1,
(n - 1)/BLK_N_cn + 1 ,
batchCount );
magmablas_d_herk_kernel_fermi_cn_batched<<< dimGrid, dimBlock, 0, magma_stream >>>(
uploA, n, k, dA_array, ldda, dA_array, ldda, dC_array, lddc, calpha, cbeta,
(int)offsetA, (int)offsetA );
}
#ifdef TEXTURE_1D
cudaUnbindTexture( tex_ref_A );
#endif
}
///////////////////////////////////////////////////////////////////////////////////////////////////
|
73e3d4fd956588135a4856d7c31a889c1d2c3aad.hip | // !!! This is a file automatically generated by hipify!!!
/**
* @file main.cpp
*
* @author btran
*
* @date 2020-05-03
*
* Copyright (c) organization
*
*/
#include <hip/hip_runtime.h>
#include <iostream>
__global__ void addMat(float *matA, float *matB, float *matC,
const uint64_t row, const uint64_t col)
{
uint64_t i = threadIdx.x + blockIdx.x * blockDim.x;
uint64_t j = threadIdx.y + blockIdx.y * blockDim.y;
if (i < row && j < col) {
uint64_t index = i * col + j;
matC[index] = matA[index] + matB[index];
}
}
int main(int argc, char *argv[])
{
const uint64_t row = 3000, col = 3000;
hipEvent_t hostStart, hostStop, deviceStart, deviceStop;
hipEventCreate(&hostStart);
hipEventCreate(&hostStop);
hipEventCreate(&deviceStart);
hipEventCreate(&deviceStop);
float timeDifferenceOnHost, timeDifferenceOnDevice;
float *a = new float[row * col];
float *b = new float[row * col];
float *c = new float[row * col];
for (uint64_t i = 0; i < row; ++i) {
for (uint64_t j = 0; j < col; ++j) {
a[i * col + j] = i + j;
b[i * col + j] = i + j;
}
}
printf("Adding matrices on CPU...\n");
hipEventRecord(hostStart, 0);
for (uint64_t i = 0; i < row * col; ++i) {
c[i] = a[i] + b[i];
}
hipEventRecord(hostStop, 0);
hipEventElapsedTime(&timeDifferenceOnHost, hostStart, hostStop);
printf("Matrix addition over. Time taken on CPU: %5.5f\n",
timeDifferenceOnHost);
float *matA, *matB, *matC;
printf("Adding matrices on GPU...\n");
hipEventRecord(deviceStart, 0);
hipMalloc((void **)&matA, row * col * sizeof(float));
hipMalloc((void **)&matB, row * col * sizeof(float));
hipMalloc((void **)&matC, row * col * sizeof(float));
hipMemcpy(matA, a, row * col * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(matB, b, row * col * sizeof(float), hipMemcpyHostToDevice);
dim3 threadsPerBlock(32, 32);
dim3 numBlocks((row + 31) / 32, (col + 31) / 32);
hipLaunchKernelGGL(( addMat), dim3(numBlocks), dim3(threadsPerBlock), 0, 0, matA, matB, matC, row, col);
hipDeviceSynchronize();
hipMemcpy(c, matC, row * col * sizeof(float), hipMemcpyDeviceToHost);
hipEventRecord(deviceStop, 0);
hipEventElapsedTime(&timeDifferenceOnDevice, deviceStart, deviceStop);
printf("Matrix addition over. Time taken on GPU: %5.5f\n",
timeDifferenceOnDevice);
hipFree(matA);
hipFree(matB);
hipFree(matC);
hipEventDestroy(deviceStart);
hipEventDestroy(deviceStop);
hipEventDestroy(hostStart);
hipEventDestroy(hostStop);
delete[] a;
delete[] b;
delete[] c;
return 0;
}
| 73e3d4fd956588135a4856d7c31a889c1d2c3aad.cu | /**
* @file main.cpp
*
* @author btran
*
* @date 2020-05-03
*
* Copyright (c) organization
*
*/
#include <cuda_runtime.h>
#include <iostream>
__global__ void addMat(float *matA, float *matB, float *matC,
const uint64_t row, const uint64_t col)
{
uint64_t i = threadIdx.x + blockIdx.x * blockDim.x;
uint64_t j = threadIdx.y + blockIdx.y * blockDim.y;
if (i < row && j < col) {
uint64_t index = i * col + j;
matC[index] = matA[index] + matB[index];
}
}
int main(int argc, char *argv[])
{
const uint64_t row = 3000, col = 3000;
cudaEvent_t hostStart, hostStop, deviceStart, deviceStop;
cudaEventCreate(&hostStart);
cudaEventCreate(&hostStop);
cudaEventCreate(&deviceStart);
cudaEventCreate(&deviceStop);
float timeDifferenceOnHost, timeDifferenceOnDevice;
float *a = new float[row * col];
float *b = new float[row * col];
float *c = new float[row * col];
for (uint64_t i = 0; i < row; ++i) {
for (uint64_t j = 0; j < col; ++j) {
a[i * col + j] = i + j;
b[i * col + j] = i + j;
}
}
printf("Adding matrices on CPU...\n");
cudaEventRecord(hostStart, 0);
for (uint64_t i = 0; i < row * col; ++i) {
c[i] = a[i] + b[i];
}
cudaEventRecord(hostStop, 0);
cudaEventElapsedTime(&timeDifferenceOnHost, hostStart, hostStop);
printf("Matrix addition over. Time taken on CPU: %5.5f\n",
timeDifferenceOnHost);
float *matA, *matB, *matC;
printf("Adding matrices on GPU...\n");
cudaEventRecord(deviceStart, 0);
cudaMalloc((void **)&matA, row * col * sizeof(float));
cudaMalloc((void **)&matB, row * col * sizeof(float));
cudaMalloc((void **)&matC, row * col * sizeof(float));
cudaMemcpy(matA, a, row * col * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(matB, b, row * col * sizeof(float), cudaMemcpyHostToDevice);
dim3 threadsPerBlock(32, 32);
dim3 numBlocks((row + 31) / 32, (col + 31) / 32);
addMat<<<numBlocks, threadsPerBlock>>>(matA, matB, matC, row, col);
cudaDeviceSynchronize();
cudaMemcpy(c, matC, row * col * sizeof(float), cudaMemcpyDeviceToHost);
cudaEventRecord(deviceStop, 0);
cudaEventElapsedTime(&timeDifferenceOnDevice, deviceStart, deviceStop);
printf("Matrix addition over. Time taken on GPU: %5.5f\n",
timeDifferenceOnDevice);
cudaFree(matA);
cudaFree(matB);
cudaFree(matC);
cudaEventDestroy(deviceStart);
cudaEventDestroy(deviceStop);
cudaEventDestroy(hostStart);
cudaEventDestroy(hostStop);
delete[] a;
delete[] b;
delete[] c;
return 0;
}
|
589898afeb13173154eb902e7d8d5065651b75e7.hip | // !!! This is a file automatically generated by hipify!!!
#include "Hornet.hpp"
#include "Core/GPUHornet/BatchUpdate.cuh"
#include "Util/BatchFunctions.hpp"
#include <Host/FileUtil.hpp> //xlib::extract_filepath_noextension
#include <Device/CudaUtil.cuh> //xlib::deviceInfo
#include <algorithm> //std:.generate
#include <chrono> //std::chrono
#include <random> //std::mt19937_64
#include <hip/hip_runtime_api.h>
//nvprof --profile-from-start off --log-file log.txt --print-gpu-trace
using namespace hornets_nest;
using namespace timer;
using namespace std::string_literals;
using HornetGPU = hornets_nest::gpu::Hornet<EMPTY, EMPTY>;
void exec(int argc, char* argv[]);
/**
* @brief Example tester for Hornet
*/
int main(int argc, char* argv[]) {
exec(argc, argv);
hipDeviceReset();
}
void exec(int argc, char* argv[]) {
using namespace graph::structure_prop;
using namespace graph::parsing_prop;
xlib::device_info();
graph::GraphStd<vid_t, eoff_t> graph;
graph.read(argv[1]);
//graph.print();
//if (param.binary)
// graph.writeBinary(xlib::extract_filepath_noextension(argv[1]) + ".bin");
// graph.writeDimacs10th(xlib::extract_filepath_noextension(argv[1]) + ".graph");
//graph.writeMarket(xlib::extract_filepath_noextension(argv[1]) + ".mtx");
//--------------------------------------------------------------------------
auto weights = new int[graph.nE()];
std::iota(weights, weights + graph.nE(), 0);
//--------------------------------------------------------------------------
HornetInit hornet_init(graph.nV(), graph.nE(), graph.out_offsets_ptr(),
graph.out_edges_ptr());
//hornet_init.insertEdgeData(weights);
HornetGPU hornet_gpu(hornet_init);
//hornet_gpu.mem_manager_info();
//hornet_gpu.print();
//return;
//hornet_gpu.check_sorted_adjs();
std::cout << "------------------------------------------------" <<std::endl;
//--------------------------------------------------------------------------
using namespace batch_gen_property;
if (argc == 3) {
int batch_size = std::stoi(argv[2]);
vid_t* batch_src, *batch_dst;
cuMallocHost(batch_src, batch_size);
cuMallocHost(batch_dst, batch_size);
generateBatch(graph, batch_size, batch_src, batch_dst,
BatchGenType::INSERT, UNIQUE); //| PRINT
//vid_t batch_src[] = { 0, 0, 2 };
//vid_t batch_dst[] = { 2, 6, 7 };
gpu::BatchUpdate batch_update(batch_src, batch_dst, batch_size);
batch_update.print();
std::cout << "------------------------------------------------" <<std::endl;
using namespace gpu::batch_property;
hornet_gpu.allocateEdgeInsertion(batch_size,
IN_PLACE | REMOVE_CROSS_DUPLICATE);
//hornet_gpu.allocateEdgeDeletion(batch_size,
// IN_PLACE | REMOVE_CROSS_DUPLICATE);
hipProfilerStart();
Timer<DEVICE> TM(3);
TM.start();
hornet_gpu.insertEdgeBatch(batch_update);
//hornet_gpu.deleteEdgeBatch(batch_update);
TM.stop();
TM.print("Insertion "s + std::to_string(batch_size) + ": ");
hipProfilerStop();
//hornet_gpu.check_sorted_adjs();
//delete[] batch_src;
//delete[] batch_dst;
cuFreeHost(batch_src);
cuFreeHost(batch_dst);
hornet_gpu.print();
}
delete[] weights;
}
| 589898afeb13173154eb902e7d8d5065651b75e7.cu | #include "Hornet.hpp"
#include "Core/GPUHornet/BatchUpdate.cuh"
#include "Util/BatchFunctions.hpp"
#include <Host/FileUtil.hpp> //xlib::extract_filepath_noextension
#include <Device/CudaUtil.cuh> //xlib::deviceInfo
#include <algorithm> //std:.generate
#include <chrono> //std::chrono
#include <random> //std::mt19937_64
#include <cuda_profiler_api.h>
//nvprof --profile-from-start off --log-file log.txt --print-gpu-trace
using namespace hornets_nest;
using namespace timer;
using namespace std::string_literals;
using HornetGPU = hornets_nest::gpu::Hornet<EMPTY, EMPTY>;
void exec(int argc, char* argv[]);
/**
* @brief Example tester for Hornet
*/
int main(int argc, char* argv[]) {
exec(argc, argv);
cudaDeviceReset();
}
void exec(int argc, char* argv[]) {
using namespace graph::structure_prop;
using namespace graph::parsing_prop;
xlib::device_info();
graph::GraphStd<vid_t, eoff_t> graph;
graph.read(argv[1]);
//graph.print();
//if (param.binary)
// graph.writeBinary(xlib::extract_filepath_noextension(argv[1]) + ".bin");
// graph.writeDimacs10th(xlib::extract_filepath_noextension(argv[1]) + ".graph");
//graph.writeMarket(xlib::extract_filepath_noextension(argv[1]) + ".mtx");
//--------------------------------------------------------------------------
auto weights = new int[graph.nE()];
std::iota(weights, weights + graph.nE(), 0);
//--------------------------------------------------------------------------
HornetInit hornet_init(graph.nV(), graph.nE(), graph.out_offsets_ptr(),
graph.out_edges_ptr());
//hornet_init.insertEdgeData(weights);
HornetGPU hornet_gpu(hornet_init);
//hornet_gpu.mem_manager_info();
//hornet_gpu.print();
//return;
//hornet_gpu.check_sorted_adjs();
std::cout << "------------------------------------------------" <<std::endl;
//--------------------------------------------------------------------------
using namespace batch_gen_property;
if (argc == 3) {
int batch_size = std::stoi(argv[2]);
vid_t* batch_src, *batch_dst;
cuMallocHost(batch_src, batch_size);
cuMallocHost(batch_dst, batch_size);
generateBatch(graph, batch_size, batch_src, batch_dst,
BatchGenType::INSERT, UNIQUE); //| PRINT
//vid_t batch_src[] = { 0, 0, 2 };
//vid_t batch_dst[] = { 2, 6, 7 };
gpu::BatchUpdate batch_update(batch_src, batch_dst, batch_size);
batch_update.print();
std::cout << "------------------------------------------------" <<std::endl;
using namespace gpu::batch_property;
hornet_gpu.allocateEdgeInsertion(batch_size,
IN_PLACE | REMOVE_CROSS_DUPLICATE);
//hornet_gpu.allocateEdgeDeletion(batch_size,
// IN_PLACE | REMOVE_CROSS_DUPLICATE);
cudaProfilerStart();
Timer<DEVICE> TM(3);
TM.start();
hornet_gpu.insertEdgeBatch(batch_update);
//hornet_gpu.deleteEdgeBatch(batch_update);
TM.stop();
TM.print("Insertion "s + std::to_string(batch_size) + ": ");
cudaProfilerStop();
//hornet_gpu.check_sorted_adjs();
//delete[] batch_src;
//delete[] batch_dst;
cuFreeHost(batch_src);
cuFreeHost(batch_dst);
hornet_gpu.print();
}
delete[] weights;
}
|
b7430784e33cac1f2d4c0f1955a678368b6748a0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
Highly Optimized Object-oriented Many-particle Dynamics -- Blue Edition
(HOOMD-blue) Open Source Software License Copyright 2008-2011 Ames Laboratory
Iowa State University and The Regents of the University of Michigan All rights
reserved.
HOOMD-blue may contain modifications ("Contributions") provided, and to which
copyright is held, by various Contributors who have granted The Regents of the
University of Michigan the right to modify and/or distribute such Contributions.
You may redistribute, use, and create derivate works of HOOMD-blue, in source
and binary forms, provided you abide by the following conditions:
* Redistributions of source code must retain the above copyright notice, this
list of conditions, and the following disclaimer both in the code and
prominently in any materials provided with the distribution.
* Redistributions in binary form must reproduce the above copyright notice, this
list of conditions, and the following disclaimer in the documentation and/or
other materials provided with the distribution.
* All publications and presentations based on HOOMD-blue, including any reports
or published results obtained, in whole or in part, with HOOMD-blue, will
acknowledge its use according to the terms posted at the time of submission on:
http://codeblue.umich.edu/hoomd-blue/citations.html
* Any electronic documents citing HOOMD-Blue will link to the HOOMD-Blue website:
http://codeblue.umich.edu/hoomd-blue/
* Apart from the above required attributions, neither the name of the copyright
holder nor the names of HOOMD-blue's contributors may be used to endorse or
promote products derived from this software without specific prior written
permission.
Disclaimer
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER AND CONTRIBUTORS ``AS IS'' AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND/OR ANY
WARRANTIES THAT THIS SOFTWARE IS FREE OF INFRINGEMENT ARE DISCLAIMED.
IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
// Maintainer: dnlebard
#include "HarmonicDihedralForceGPU.cuh"
#include "DihedralData.cuh" // SERIOUSLY, DO I NEED THIS HERE??
#ifdef WIN32
#include <cassert>
#else
#include <assert.h>
#endif
// SMALL a relatively small number
#define SMALL 0.001f
/*! \file HarmonicDihedralForceGPU.cu
\brief Defines GPU kernel code for calculating the harmonic dihedral forces. Used by HarmonicDihedralForceComputeGPU.
*/
//! Texture for reading particle positions
texture<float4, 1, hipReadModeElementType> pdata_pos_tex;
//! Texture for reading dihedral parameters
texture<float4, 1, hipReadModeElementType> dihedral_params_tex;
//! Kernel for caculating harmonic dihedral forces on the GPU
/*! \param d_force Device memory to write computed forces
\param d_virial Device memory to write computed virials
\param pdata Particle data arrays to calculate forces on
\param box Box dimensions for periodic boundary condition handling
\param tlist Dihedral data to use in calculating the forces
*/
extern "C" __global__
void gpu_compute_harmonic_dihedral_forces_kernel(float4* d_force,
float* d_virial,
gpu_pdata_arrays pdata,
gpu_boxsize box,
gpu_dihedraltable_array tlist)
{
// start by identifying which particle we are to handle
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= pdata.N)
return;
// load in the length of the list for this thread (MEM TRANSFER: 4 bytes)
int n_dihedrals = tlist.n_dihedrals[idx];
// read in the position of our b-particle from the a-b-c triplet. (MEM TRANSFER: 16 bytes)
float4 idx_pos = tex1Dfetch(pdata_pos_tex, idx); // we can be either a, b, or c in the a-b-c-d quartet
float4 a_pos,b_pos,c_pos, d_pos; // allocate space for the a,b, and c atoms in the a-b-c-d quartet
// initialize the force to 0
float4 force_idx = make_float4(0.0f, 0.0f, 0.0f, 0.0f);
// initialize the virial to 0
float virial_idx = 0.0f;
// loop over all dihedrals
for (int dihedral_idx = 0; dihedral_idx < n_dihedrals; dihedral_idx++)
{
// the volatile fails to compile in device emulation mode (MEM TRANSFER: 8 bytes)
#ifdef _DEVICEEMU
uint4 cur_dihedral = tlist.dihedrals[tlist.pitch*dihedral_idx + idx];
uint1 cur_ABCD = tlist.dihedralABCD[tlist.pitch*dihedral_idx + idx];
#else
// the volatile is needed to force the compiler to load the uint2 coalesced
volatile uint4 cur_dihedral = tlist.dihedrals[tlist.pitch*dihedral_idx + idx];
volatile uint1 cur_ABCD = tlist.dihedralABCD[tlist.pitch*dihedral_idx + idx];
#endif
int cur_dihedral_x_idx = cur_dihedral.x;
int cur_dihedral_y_idx = cur_dihedral.y;
int cur_dihedral_z_idx = cur_dihedral.z;
int cur_dihedral_type = cur_dihedral.w;
int cur_dihedral_abcd = cur_ABCD.x;
// get the a-particle's position (MEM TRANSFER: 16 bytes)
float4 x_pos = tex1Dfetch(pdata_pos_tex, cur_dihedral_x_idx);
// get the c-particle's position (MEM TRANSFER: 16 bytes)
float4 y_pos = tex1Dfetch(pdata_pos_tex, cur_dihedral_y_idx);
// get the c-particle's position (MEM TRANSFER: 16 bytes)
float4 z_pos = tex1Dfetch(pdata_pos_tex, cur_dihedral_z_idx);
if (cur_dihedral_abcd == 0)
{
a_pos = idx_pos;
b_pos = x_pos;
c_pos = y_pos;
d_pos = z_pos;
}
if (cur_dihedral_abcd == 1)
{
b_pos = idx_pos;
a_pos = x_pos;
c_pos = y_pos;
d_pos = z_pos;
}
if (cur_dihedral_abcd == 2)
{
c_pos = idx_pos;
a_pos = x_pos;
b_pos = y_pos;
d_pos = z_pos;
}
if (cur_dihedral_abcd == 3)
{
d_pos = idx_pos;
a_pos = x_pos;
b_pos = y_pos;
c_pos = z_pos;
}
// calculate dr for a-b,c-b,and a-c(FLOPS: 9)
float dxab = a_pos.x - b_pos.x;
float dyab = a_pos.y - b_pos.y;
float dzab = a_pos.z - b_pos.z;
float dxcb = c_pos.x - b_pos.x;
float dycb = c_pos.y - b_pos.y;
float dzcb = c_pos.z - b_pos.z;
float dxdc = d_pos.x - c_pos.x;
float dydc = d_pos.y - c_pos.y;
float dzdc = d_pos.z - c_pos.z;
dxab -= box.Lx * rintf(dxab * box.Lxinv);
dxcb -= box.Lx * rintf(dxcb * box.Lxinv);
dxdc -= box.Lx * rintf(dxdc * box.Lxinv);
dyab -= box.Ly * rintf(dyab * box.Lyinv);
dycb -= box.Ly * rintf(dycb * box.Lyinv);
dydc -= box.Ly * rintf(dydc * box.Lyinv);
dzab -= box.Lz * rintf(dzab * box.Lzinv);
dzcb -= box.Lz * rintf(dzcb * box.Lzinv);
dzdc -= box.Lz * rintf(dzdc * box.Lzinv);
float dxcbm = -dxcb;
float dycbm = -dycb;
float dzcbm = -dzcb;
dxcbm -= box.Lx * rintf(dxcbm * box.Lxinv);
dycbm -= box.Ly * rintf(dycbm * box.Lyinv);
dzcbm -= box.Lz * rintf(dzcbm * box.Lzinv);
// get the dihedral parameters (MEM TRANSFER: 12 bytes)
float4 params = tex1Dfetch(dihedral_params_tex, cur_dihedral_type);
float K = params.x;
float sign = params.y;
float multi = params.z;
// printf("IN CUDA CODE: k = %f sign = %f multi = %f \n",K,sign,multi);
float aax = dyab*dzcbm - dzab*dycbm;
float aay = dzab*dxcbm - dxab*dzcbm;
float aaz = dxab*dycbm - dyab*dxcbm;
float bbx = dydc*dzcbm - dzdc*dycbm;
float bby = dzdc*dxcbm - dxdc*dzcbm;
float bbz = dxdc*dycbm - dydc*dxcbm;
float raasq = aax*aax + aay*aay + aaz*aaz;
float rbbsq = bbx*bbx + bby*bby + bbz*bbz;
float rgsq = dxcbm*dxcbm + dycbm*dycbm + dzcbm*dzcbm;
float rg = sqrtf(rgsq);
float rginv, raa2inv, rbb2inv;
rginv = raa2inv = rbb2inv = 0.0f;
if (rg > 0.0f) rginv = 1.0f/rg;
if (raasq > 0.0f) raa2inv = 1.0f/raasq;
if (rbbsq > 0.0f) rbb2inv = 1.0f/rbbsq;
float rabinv = sqrtf(raa2inv*rbb2inv);
float c_abcd = (aax*bbx + aay*bby + aaz*bbz)*rabinv;
float s_abcd = rg*rabinv*(aax*dxdc + aay*dydc + aaz*dzdc);
if (c_abcd > 1.0f) c_abcd = 1.0f;
if (c_abcd < -1.0f) c_abcd = -1.0f;
float p = 1.0f;
float ddfab;
float dfab = 0.0f;
int m = __float2int_rn(multi);
for (int jj = 0; jj < m; jj++)
{
ddfab = p*c_abcd - dfab*s_abcd;
dfab = p*s_abcd + dfab*c_abcd;
p = ddfab;
}
/////////////////////////
// FROM LAMMPS: sin_shift is always 0... so dropping all sin_shift terms!!!!
/////////////////////////
p *= sign;
dfab *= sign;
dfab *= -multi;
p += 1.0f;
if (multi < 1.0f)
{
p = 1.0f + sign;
dfab = 0.0f;
}
float fg = dxab*dxcbm + dyab*dycbm + dzab*dzcbm;
float hg = dxdc*dxcbm + dydc*dycbm + dzdc*dzcbm;
float fga = fg*raa2inv*rginv;
float hgb = hg*rbb2inv*rginv;
float gaa = -raa2inv*rg;
float gbb = rbb2inv*rg;
float dtfx = gaa*aax;
float dtfy = gaa*aay;
float dtfz = gaa*aaz;
float dtgx = fga*aax - hgb*bbx;
float dtgy = fga*aay - hgb*bby;
float dtgz = fga*aaz - hgb*bbz;
float dthx = gbb*bbx;
float dthy = gbb*bby;
float dthz = gbb*bbz;
//float df = -K * dfab;
float df = -K * dfab * float(0.500); // the 0.5 term is for 1/2K in the forces
float sx2 = df*dtgx;
float sy2 = df*dtgy;
float sz2 = df*dtgz;
float ffax = df*dtfx;
float ffay = df*dtfy;
float ffaz = df*dtfz;
float ffbx = sx2 - ffax;
float ffby = sy2 - ffay;
float ffbz = sz2 - ffaz;
float ffdx = df*dthx;
float ffdy = df*dthy;
float ffdz = df*dthz;
float ffcx = -sx2 - ffdx;
float ffcy = -sy2 - ffdy;
float ffcz = -sz2 - ffdz;
// Now, apply the force to each individual atom a,b,c,d
// and accumlate the energy/virial
// compute 1/4 of the energy, 1/4 for each atom in the dihedral
//float dihedral_eng = p*K*float(1.0/4.0);
float dihedral_eng = p*K*float(1.0/8.0); // the 1/8th term is (1/2)K * 1/4
float vx = (dxab*ffax) + (dxcb*ffcx) + (dxdc+dxcb)*ffdx;
float vy = (dyab*ffay) + (dycb*ffcy) + (dydc+dycb)*ffdy;
float vz = (dzab*ffaz) + (dzcb*ffcz) + (dzdc+dzcb)*ffdz;
// compute 1/4 of the virial, 1/4 for each atom in the dihedral
float dihedral_virial = float(1.0/12.0)*(vx + vy + vz);
if (cur_dihedral_abcd == 0)
{
force_idx.x += ffax;
force_idx.y += ffay;
force_idx.z += ffaz;
}
if (cur_dihedral_abcd == 1)
{
force_idx.x += ffbx;
force_idx.y += ffby;
force_idx.z += ffbz;
}
if (cur_dihedral_abcd == 2)
{
force_idx.x += ffcx;
force_idx.y += ffcy;
force_idx.z += ffcz;
}
if (cur_dihedral_abcd == 3)
{
force_idx.x += ffdx;
force_idx.y += ffdy;
force_idx.z += ffdz;
}
force_idx.w += dihedral_eng;
virial_idx += dihedral_virial;
}
// now that the force calculation is complete, write out the result (MEM TRANSFER: 20 bytes)
d_force[idx] = force_idx;
d_virial[idx] = virial_idx;
}
/*! \param d_force Device memory to write computed forces
\param d_virial Device memory to write computed virials
\param pdata Particle data on the GPU to perform the calculation on
\param box Box dimensions (in GPU format) to use for periodic boundary conditions
\param ttable List of dihedrals stored on the GPU
\param d_params K, sign,multiplicity params packed as padded float4 variables
\param n_dihedral_types Number of dihedral types in d_params
\param block_size Block size to use when performing calculations
\returns Any error code resulting from the kernel launch
\note Always returns hipSuccess in release builds to avoid the hipDeviceSynchronize()
\a d_params should include one float4 element per dihedral type. The x component contains K the spring constant
and the y component contains sign, and the z component the multiplicity.
*/
hipError_t gpu_compute_harmonic_dihedral_forces(float4* d_force,
float* d_virial,
const gpu_pdata_arrays &pdata,
const gpu_boxsize &box,
const gpu_dihedraltable_array &ttable,
float4 *d_params,
unsigned int n_dihedral_types,
int block_size)
{
assert(d_params);
// setup the grid to run the kernel
dim3 grid( (int)ceil((double)pdata.N / (double)block_size), 1, 1);
dim3 threads(block_size, 1, 1);
// bind the textures
hipError_t error = hipBindTexture(0, pdata_pos_tex, pdata.pos, sizeof(float4) * pdata.N);
if (error != hipSuccess)
return error;
error = hipBindTexture(0, dihedral_params_tex, d_params, sizeof(float4) * n_dihedral_types);
if (error != hipSuccess)
return error;
// run the kernel
hipLaunchKernelGGL(( gpu_compute_harmonic_dihedral_forces_kernel), dim3(grid), dim3(threads), 0, 0, d_force, d_virial, pdata, box, ttable);
return hipSuccess;
}
| b7430784e33cac1f2d4c0f1955a678368b6748a0.cu | /*
Highly Optimized Object-oriented Many-particle Dynamics -- Blue Edition
(HOOMD-blue) Open Source Software License Copyright 2008-2011 Ames Laboratory
Iowa State University and The Regents of the University of Michigan All rights
reserved.
HOOMD-blue may contain modifications ("Contributions") provided, and to which
copyright is held, by various Contributors who have granted The Regents of the
University of Michigan the right to modify and/or distribute such Contributions.
You may redistribute, use, and create derivate works of HOOMD-blue, in source
and binary forms, provided you abide by the following conditions:
* Redistributions of source code must retain the above copyright notice, this
list of conditions, and the following disclaimer both in the code and
prominently in any materials provided with the distribution.
* Redistributions in binary form must reproduce the above copyright notice, this
list of conditions, and the following disclaimer in the documentation and/or
other materials provided with the distribution.
* All publications and presentations based on HOOMD-blue, including any reports
or published results obtained, in whole or in part, with HOOMD-blue, will
acknowledge its use according to the terms posted at the time of submission on:
http://codeblue.umich.edu/hoomd-blue/citations.html
* Any electronic documents citing HOOMD-Blue will link to the HOOMD-Blue website:
http://codeblue.umich.edu/hoomd-blue/
* Apart from the above required attributions, neither the name of the copyright
holder nor the names of HOOMD-blue's contributors may be used to endorse or
promote products derived from this software without specific prior written
permission.
Disclaimer
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER AND CONTRIBUTORS ``AS IS'' AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND/OR ANY
WARRANTIES THAT THIS SOFTWARE IS FREE OF INFRINGEMENT ARE DISCLAIMED.
IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
// Maintainer: dnlebard
#include "HarmonicDihedralForceGPU.cuh"
#include "DihedralData.cuh" // SERIOUSLY, DO I NEED THIS HERE??
#ifdef WIN32
#include <cassert>
#else
#include <assert.h>
#endif
// SMALL a relatively small number
#define SMALL 0.001f
/*! \file HarmonicDihedralForceGPU.cu
\brief Defines GPU kernel code for calculating the harmonic dihedral forces. Used by HarmonicDihedralForceComputeGPU.
*/
//! Texture for reading particle positions
texture<float4, 1, cudaReadModeElementType> pdata_pos_tex;
//! Texture for reading dihedral parameters
texture<float4, 1, cudaReadModeElementType> dihedral_params_tex;
//! Kernel for caculating harmonic dihedral forces on the GPU
/*! \param d_force Device memory to write computed forces
\param d_virial Device memory to write computed virials
\param pdata Particle data arrays to calculate forces on
\param box Box dimensions for periodic boundary condition handling
\param tlist Dihedral data to use in calculating the forces
*/
extern "C" __global__
void gpu_compute_harmonic_dihedral_forces_kernel(float4* d_force,
float* d_virial,
gpu_pdata_arrays pdata,
gpu_boxsize box,
gpu_dihedraltable_array tlist)
{
// start by identifying which particle we are to handle
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= pdata.N)
return;
// load in the length of the list for this thread (MEM TRANSFER: 4 bytes)
int n_dihedrals = tlist.n_dihedrals[idx];
// read in the position of our b-particle from the a-b-c triplet. (MEM TRANSFER: 16 bytes)
float4 idx_pos = tex1Dfetch(pdata_pos_tex, idx); // we can be either a, b, or c in the a-b-c-d quartet
float4 a_pos,b_pos,c_pos, d_pos; // allocate space for the a,b, and c atoms in the a-b-c-d quartet
// initialize the force to 0
float4 force_idx = make_float4(0.0f, 0.0f, 0.0f, 0.0f);
// initialize the virial to 0
float virial_idx = 0.0f;
// loop over all dihedrals
for (int dihedral_idx = 0; dihedral_idx < n_dihedrals; dihedral_idx++)
{
// the volatile fails to compile in device emulation mode (MEM TRANSFER: 8 bytes)
#ifdef _DEVICEEMU
uint4 cur_dihedral = tlist.dihedrals[tlist.pitch*dihedral_idx + idx];
uint1 cur_ABCD = tlist.dihedralABCD[tlist.pitch*dihedral_idx + idx];
#else
// the volatile is needed to force the compiler to load the uint2 coalesced
volatile uint4 cur_dihedral = tlist.dihedrals[tlist.pitch*dihedral_idx + idx];
volatile uint1 cur_ABCD = tlist.dihedralABCD[tlist.pitch*dihedral_idx + idx];
#endif
int cur_dihedral_x_idx = cur_dihedral.x;
int cur_dihedral_y_idx = cur_dihedral.y;
int cur_dihedral_z_idx = cur_dihedral.z;
int cur_dihedral_type = cur_dihedral.w;
int cur_dihedral_abcd = cur_ABCD.x;
// get the a-particle's position (MEM TRANSFER: 16 bytes)
float4 x_pos = tex1Dfetch(pdata_pos_tex, cur_dihedral_x_idx);
// get the c-particle's position (MEM TRANSFER: 16 bytes)
float4 y_pos = tex1Dfetch(pdata_pos_tex, cur_dihedral_y_idx);
// get the c-particle's position (MEM TRANSFER: 16 bytes)
float4 z_pos = tex1Dfetch(pdata_pos_tex, cur_dihedral_z_idx);
if (cur_dihedral_abcd == 0)
{
a_pos = idx_pos;
b_pos = x_pos;
c_pos = y_pos;
d_pos = z_pos;
}
if (cur_dihedral_abcd == 1)
{
b_pos = idx_pos;
a_pos = x_pos;
c_pos = y_pos;
d_pos = z_pos;
}
if (cur_dihedral_abcd == 2)
{
c_pos = idx_pos;
a_pos = x_pos;
b_pos = y_pos;
d_pos = z_pos;
}
if (cur_dihedral_abcd == 3)
{
d_pos = idx_pos;
a_pos = x_pos;
b_pos = y_pos;
c_pos = z_pos;
}
// calculate dr for a-b,c-b,and a-c(FLOPS: 9)
float dxab = a_pos.x - b_pos.x;
float dyab = a_pos.y - b_pos.y;
float dzab = a_pos.z - b_pos.z;
float dxcb = c_pos.x - b_pos.x;
float dycb = c_pos.y - b_pos.y;
float dzcb = c_pos.z - b_pos.z;
float dxdc = d_pos.x - c_pos.x;
float dydc = d_pos.y - c_pos.y;
float dzdc = d_pos.z - c_pos.z;
dxab -= box.Lx * rintf(dxab * box.Lxinv);
dxcb -= box.Lx * rintf(dxcb * box.Lxinv);
dxdc -= box.Lx * rintf(dxdc * box.Lxinv);
dyab -= box.Ly * rintf(dyab * box.Lyinv);
dycb -= box.Ly * rintf(dycb * box.Lyinv);
dydc -= box.Ly * rintf(dydc * box.Lyinv);
dzab -= box.Lz * rintf(dzab * box.Lzinv);
dzcb -= box.Lz * rintf(dzcb * box.Lzinv);
dzdc -= box.Lz * rintf(dzdc * box.Lzinv);
float dxcbm = -dxcb;
float dycbm = -dycb;
float dzcbm = -dzcb;
dxcbm -= box.Lx * rintf(dxcbm * box.Lxinv);
dycbm -= box.Ly * rintf(dycbm * box.Lyinv);
dzcbm -= box.Lz * rintf(dzcbm * box.Lzinv);
// get the dihedral parameters (MEM TRANSFER: 12 bytes)
float4 params = tex1Dfetch(dihedral_params_tex, cur_dihedral_type);
float K = params.x;
float sign = params.y;
float multi = params.z;
// printf("IN CUDA CODE: k = %f sign = %f multi = %f \n",K,sign,multi);
float aax = dyab*dzcbm - dzab*dycbm;
float aay = dzab*dxcbm - dxab*dzcbm;
float aaz = dxab*dycbm - dyab*dxcbm;
float bbx = dydc*dzcbm - dzdc*dycbm;
float bby = dzdc*dxcbm - dxdc*dzcbm;
float bbz = dxdc*dycbm - dydc*dxcbm;
float raasq = aax*aax + aay*aay + aaz*aaz;
float rbbsq = bbx*bbx + bby*bby + bbz*bbz;
float rgsq = dxcbm*dxcbm + dycbm*dycbm + dzcbm*dzcbm;
float rg = sqrtf(rgsq);
float rginv, raa2inv, rbb2inv;
rginv = raa2inv = rbb2inv = 0.0f;
if (rg > 0.0f) rginv = 1.0f/rg;
if (raasq > 0.0f) raa2inv = 1.0f/raasq;
if (rbbsq > 0.0f) rbb2inv = 1.0f/rbbsq;
float rabinv = sqrtf(raa2inv*rbb2inv);
float c_abcd = (aax*bbx + aay*bby + aaz*bbz)*rabinv;
float s_abcd = rg*rabinv*(aax*dxdc + aay*dydc + aaz*dzdc);
if (c_abcd > 1.0f) c_abcd = 1.0f;
if (c_abcd < -1.0f) c_abcd = -1.0f;
float p = 1.0f;
float ddfab;
float dfab = 0.0f;
int m = __float2int_rn(multi);
for (int jj = 0; jj < m; jj++)
{
ddfab = p*c_abcd - dfab*s_abcd;
dfab = p*s_abcd + dfab*c_abcd;
p = ddfab;
}
/////////////////////////
// FROM LAMMPS: sin_shift is always 0... so dropping all sin_shift terms!!!!
/////////////////////////
p *= sign;
dfab *= sign;
dfab *= -multi;
p += 1.0f;
if (multi < 1.0f)
{
p = 1.0f + sign;
dfab = 0.0f;
}
float fg = dxab*dxcbm + dyab*dycbm + dzab*dzcbm;
float hg = dxdc*dxcbm + dydc*dycbm + dzdc*dzcbm;
float fga = fg*raa2inv*rginv;
float hgb = hg*rbb2inv*rginv;
float gaa = -raa2inv*rg;
float gbb = rbb2inv*rg;
float dtfx = gaa*aax;
float dtfy = gaa*aay;
float dtfz = gaa*aaz;
float dtgx = fga*aax - hgb*bbx;
float dtgy = fga*aay - hgb*bby;
float dtgz = fga*aaz - hgb*bbz;
float dthx = gbb*bbx;
float dthy = gbb*bby;
float dthz = gbb*bbz;
//float df = -K * dfab;
float df = -K * dfab * float(0.500); // the 0.5 term is for 1/2K in the forces
float sx2 = df*dtgx;
float sy2 = df*dtgy;
float sz2 = df*dtgz;
float ffax = df*dtfx;
float ffay = df*dtfy;
float ffaz = df*dtfz;
float ffbx = sx2 - ffax;
float ffby = sy2 - ffay;
float ffbz = sz2 - ffaz;
float ffdx = df*dthx;
float ffdy = df*dthy;
float ffdz = df*dthz;
float ffcx = -sx2 - ffdx;
float ffcy = -sy2 - ffdy;
float ffcz = -sz2 - ffdz;
// Now, apply the force to each individual atom a,b,c,d
// and accumlate the energy/virial
// compute 1/4 of the energy, 1/4 for each atom in the dihedral
//float dihedral_eng = p*K*float(1.0/4.0);
float dihedral_eng = p*K*float(1.0/8.0); // the 1/8th term is (1/2)K * 1/4
float vx = (dxab*ffax) + (dxcb*ffcx) + (dxdc+dxcb)*ffdx;
float vy = (dyab*ffay) + (dycb*ffcy) + (dydc+dycb)*ffdy;
float vz = (dzab*ffaz) + (dzcb*ffcz) + (dzdc+dzcb)*ffdz;
// compute 1/4 of the virial, 1/4 for each atom in the dihedral
float dihedral_virial = float(1.0/12.0)*(vx + vy + vz);
if (cur_dihedral_abcd == 0)
{
force_idx.x += ffax;
force_idx.y += ffay;
force_idx.z += ffaz;
}
if (cur_dihedral_abcd == 1)
{
force_idx.x += ffbx;
force_idx.y += ffby;
force_idx.z += ffbz;
}
if (cur_dihedral_abcd == 2)
{
force_idx.x += ffcx;
force_idx.y += ffcy;
force_idx.z += ffcz;
}
if (cur_dihedral_abcd == 3)
{
force_idx.x += ffdx;
force_idx.y += ffdy;
force_idx.z += ffdz;
}
force_idx.w += dihedral_eng;
virial_idx += dihedral_virial;
}
// now that the force calculation is complete, write out the result (MEM TRANSFER: 20 bytes)
d_force[idx] = force_idx;
d_virial[idx] = virial_idx;
}
/*! \param d_force Device memory to write computed forces
\param d_virial Device memory to write computed virials
\param pdata Particle data on the GPU to perform the calculation on
\param box Box dimensions (in GPU format) to use for periodic boundary conditions
\param ttable List of dihedrals stored on the GPU
\param d_params K, sign,multiplicity params packed as padded float4 variables
\param n_dihedral_types Number of dihedral types in d_params
\param block_size Block size to use when performing calculations
\returns Any error code resulting from the kernel launch
\note Always returns cudaSuccess in release builds to avoid the cudaThreadSynchronize()
\a d_params should include one float4 element per dihedral type. The x component contains K the spring constant
and the y component contains sign, and the z component the multiplicity.
*/
cudaError_t gpu_compute_harmonic_dihedral_forces(float4* d_force,
float* d_virial,
const gpu_pdata_arrays &pdata,
const gpu_boxsize &box,
const gpu_dihedraltable_array &ttable,
float4 *d_params,
unsigned int n_dihedral_types,
int block_size)
{
assert(d_params);
// setup the grid to run the kernel
dim3 grid( (int)ceil((double)pdata.N / (double)block_size), 1, 1);
dim3 threads(block_size, 1, 1);
// bind the textures
cudaError_t error = cudaBindTexture(0, pdata_pos_tex, pdata.pos, sizeof(float4) * pdata.N);
if (error != cudaSuccess)
return error;
error = cudaBindTexture(0, dihedral_params_tex, d_params, sizeof(float4) * n_dihedral_types);
if (error != cudaSuccess)
return error;
// run the kernel
gpu_compute_harmonic_dihedral_forces_kernel<<< grid, threads>>>(d_force, d_virial, pdata, box, ttable);
return cudaSuccess;
}
|
d3239181c3c0ac06d6c74d27319d5afd1b411f46.hip | // !!! This is a file automatically generated by hipify!!!
#include "object/structure/bvh.hpp"
using namespace px;
BaseBVH::BaseBVH(Point const &vertex_min, Point const &vertex_max)
: _vertex_min(vertex_min), _vertex_max(vertex_max)
{}
PX_CUDA_CALLABLE
bool BaseBVH::hitBox(Point const &vertex_min,
Point const &vertex_max,
Ray const &ray,
PREC const &t_start,
PREC const &t_end)
{
if (ray.original.x > vertex_min.x-DOUBLE_EPSILON && ray.original.x < vertex_max.x+DOUBLE_EPSILON &&
ray.original.y > vertex_min.y-DOUBLE_EPSILON && ray.original.y < vertex_max.y+DOUBLE_EPSILON &&
ray.original.z > vertex_min.z-DOUBLE_EPSILON && ray.original.z < vertex_max.z+DOUBLE_EPSILON)
return true;
auto tmin = ((ray.direction.x < 0 ? vertex_max.x : vertex_min.x) - ray.original.x) / ray.direction.x;
auto tmax = ((ray.direction.x < 0 ? vertex_min.x : vertex_max.x) - ray.original.x) / ray.direction.x;
auto tymin = ((ray.direction.y < 0 ? vertex_max.y : vertex_min.y) - ray.original.y) / ray.direction.y;
auto tymax = ((ray.direction.y < 0 ? vertex_min.y : vertex_max.y) - ray.original.y) / ray.direction.y;
if (tmin > tymax || tymin > tmax)
return false;
if (tymin > tmin)
tmin = tymin;
if (tymax < tmax)
tmax = tymax;
tymin = ((ray.direction.z < 0 ? vertex_max.z : vertex_min.z) - ray.original.z) / ray.direction.z;
tymax = ((ray.direction.z < 0 ? vertex_min.z : vertex_max.z) - ray.original.z) / ray.direction.z;
if (tmin > tymax || tymin > tmax)
return false;
if (tymin > tmin)
tmin = tymin;
if (tmin > t_start && tmin < t_end)
return true;
if (tymax < tmax)
tmax = tymax;
if (tmax > t_start && tmax < t_end)
return true;
return false;
}
PX_CUDA_CALLABLE
GeometryObj * BaseBVH::hit(Ray const &ray,
PREC const &t_start,
PREC const &t_end,
Point &intersect) const
{
if (BaseBVH::hitBox(_vertex_min, _vertex_max, ray, t_start, t_end))
{
GeometryObj *obj = nullptr;
PREC end_range = t_end, hit_at;
for (auto i = 0; i < _n; ++i)
{
auto tmp = _geos[i]->hit(ray, t_start, end_range, hit_at);
if (tmp != nullptr)
{
end_range = hit_at;
obj = tmp;
}
}
if (obj)
{
intersect = ray.direction;
intersect *= end_range;
intersect += ray.original;
return obj;
}
}
return nullptr;
}
const BaseGeometry *BVH::hit(Ray const &ray,
PREC const &t_start,
PREC const &t_end,
Point &intersect) const
{
if (BaseBVH::hitBox(_vertex_min, _vertex_max, ray, t_start, t_end))
{
const BaseGeometry *obj = nullptr, *tmp;
PREC end_range = t_end, hit_at;
for (const auto &g : _geos)
{
tmp = g->hit(ray, t_start, end_range, hit_at);
if (tmp != nullptr)
{
end_range = hit_at;
obj = tmp;
}
}
if (obj)
{
intersect = ray.direction;
intersect *= end_range;
intersect += ray.original;
return obj;
}
}
return nullptr;
}
PX_CUDA_CALLABLE
bool BaseBVH::hit(Ray const &ray,
PREC const &t_start,
PREC const &t_end) const
{
if (BaseBVH::hitBox(_vertex_min, _vertex_max, ray, t_start, t_end))
{
PREC hit_at;
for (auto i = 0; i < _n; ++i)
{
if (_geos[i]->hit(ray, t_start, t_end, hit_at))
return true;
}
}
return false;
}
bool BVH::hit(Ray const &ray,
PREC const &t_start,
PREC const &t_end) const
{
if (BaseBVH::hitBox(_vertex_min, _vertex_max, ray, t_start, t_end))
{
PREC t;
for (const auto &g : _geos)
{
if (g->hit(ray, t_start, t_end, t))
return true;
}
}
return false;
}
BVH::BVH()
: _gpu_obj(nullptr), _gpu_geos(nullptr), _need_upload(true)
{}
BVH::~BVH()
{
#ifdef USE_ROCM
clearGpuData();
#endif
}
void BVH::up2Gpu()
{
#ifdef USE_ROCM
if (_need_upload)
{
if (_gpu_obj == nullptr)
{
PX_CUDA_CHECK(hipMalloc(&_gpu_obj, sizeof(BaseBVH)));
}
auto count = 0;
for (const auto &g : _geos)
{
if (g == nullptr)
continue;
g->up2Gpu();
++count;
}
BaseBVH bb(_vertex_min, _vertex_max);
auto ptr = new GeometryObj*[count];
bb._n = count;
for (const auto &g : _geos)
{
if (g == nullptr)
continue;
ptr[--count] = g->devPtr();
if (count == 0)
break;
}
if (_gpu_geos != nullptr)
{
PX_CUDA_CHECK(hipFree(_gpu_geos));
_gpu_geos = nullptr;
}
PX_CUDA_CHECK(hipMalloc(&_gpu_geos, sizeof(GeometryObj*)*bb._n));
PX_CUDA_CHECK(hipMemcpy(_gpu_geos, ptr, sizeof(GeometryObj*)*bb._n, hipMemcpyHostToDevice));
bb._geos = _gpu_geos;
PX_CUDA_CHECK(hipMemcpy(_gpu_obj, &bb, sizeof(BaseBVH), hipMemcpyHostToDevice));
delete[] ptr;
_need_upload = false;
}
#endif
}
void BVH::clearGpuData()
{
#ifdef USE_ROCM
if (_gpu_obj != nullptr)
{
for (const auto &g : _geos)
{
if (g.use_count() == 1)
g->clearGpuData();
}
PX_CUDA_CHECK(hipFree(_gpu_geos));
PX_CUDA_CHECK(hipFree(_gpu_obj));
_gpu_geos = nullptr;
_gpu_obj = nullptr;
}
_need_upload = true;
#endif
}
void BVH::addObj(std::shared_ptr<BaseGeometry> const &obj)
{
_geos.emplace(obj);
int n_vert;
auto vert = obj->rawVertices(n_vert);
if (_geos.size() == 1)
{
_vertex_min.x = vert[0].x;
_vertex_max.x = vert[0].x;
_vertex_min.y = vert[0].y;
_vertex_max.y = vert[0].y;
_vertex_min.z = vert[0].z;
_vertex_max.z = vert[0].z;
}
#define SET_VERT(v) \
if (v.x < _vertex_min.x) _vertex_min.x = v.x; \
if (v.x > _vertex_max.x) _vertex_max.x = v.x; \
if (v.y < _vertex_min.y) _vertex_min.y = v.y; \
if (v.y > _vertex_max.y) _vertex_max.y = v.y; \
if (v.z < _vertex_min.z) _vertex_min.z = v.z; \
if (v.z > _vertex_max.z) _vertex_max.z = v.z;
if (obj->transform() == nullptr)
{
for (auto i = 0; i < n_vert; ++i)
{
SET_VERT(vert[i])
}
}
else
{
for (auto i = 0; i < n_vert; ++i)
{
auto v = obj->transform()->pointFromObjCoord(vert[i]);
SET_VERT(v)
}
}
#ifdef USE_ROCM
_need_upload = true;
#endif
} | d3239181c3c0ac06d6c74d27319d5afd1b411f46.cu | #include "object/structure/bvh.hpp"
using namespace px;
BaseBVH::BaseBVH(Point const &vertex_min, Point const &vertex_max)
: _vertex_min(vertex_min), _vertex_max(vertex_max)
{}
PX_CUDA_CALLABLE
bool BaseBVH::hitBox(Point const &vertex_min,
Point const &vertex_max,
Ray const &ray,
PREC const &t_start,
PREC const &t_end)
{
if (ray.original.x > vertex_min.x-DOUBLE_EPSILON && ray.original.x < vertex_max.x+DOUBLE_EPSILON &&
ray.original.y > vertex_min.y-DOUBLE_EPSILON && ray.original.y < vertex_max.y+DOUBLE_EPSILON &&
ray.original.z > vertex_min.z-DOUBLE_EPSILON && ray.original.z < vertex_max.z+DOUBLE_EPSILON)
return true;
auto tmin = ((ray.direction.x < 0 ? vertex_max.x : vertex_min.x) - ray.original.x) / ray.direction.x;
auto tmax = ((ray.direction.x < 0 ? vertex_min.x : vertex_max.x) - ray.original.x) / ray.direction.x;
auto tymin = ((ray.direction.y < 0 ? vertex_max.y : vertex_min.y) - ray.original.y) / ray.direction.y;
auto tymax = ((ray.direction.y < 0 ? vertex_min.y : vertex_max.y) - ray.original.y) / ray.direction.y;
if (tmin > tymax || tymin > tmax)
return false;
if (tymin > tmin)
tmin = tymin;
if (tymax < tmax)
tmax = tymax;
tymin = ((ray.direction.z < 0 ? vertex_max.z : vertex_min.z) - ray.original.z) / ray.direction.z;
tymax = ((ray.direction.z < 0 ? vertex_min.z : vertex_max.z) - ray.original.z) / ray.direction.z;
if (tmin > tymax || tymin > tmax)
return false;
if (tymin > tmin)
tmin = tymin;
if (tmin > t_start && tmin < t_end)
return true;
if (tymax < tmax)
tmax = tymax;
if (tmax > t_start && tmax < t_end)
return true;
return false;
}
PX_CUDA_CALLABLE
GeometryObj * BaseBVH::hit(Ray const &ray,
PREC const &t_start,
PREC const &t_end,
Point &intersect) const
{
if (BaseBVH::hitBox(_vertex_min, _vertex_max, ray, t_start, t_end))
{
GeometryObj *obj = nullptr;
PREC end_range = t_end, hit_at;
for (auto i = 0; i < _n; ++i)
{
auto tmp = _geos[i]->hit(ray, t_start, end_range, hit_at);
if (tmp != nullptr)
{
end_range = hit_at;
obj = tmp;
}
}
if (obj)
{
intersect = ray.direction;
intersect *= end_range;
intersect += ray.original;
return obj;
}
}
return nullptr;
}
const BaseGeometry *BVH::hit(Ray const &ray,
PREC const &t_start,
PREC const &t_end,
Point &intersect) const
{
if (BaseBVH::hitBox(_vertex_min, _vertex_max, ray, t_start, t_end))
{
const BaseGeometry *obj = nullptr, *tmp;
PREC end_range = t_end, hit_at;
for (const auto &g : _geos)
{
tmp = g->hit(ray, t_start, end_range, hit_at);
if (tmp != nullptr)
{
end_range = hit_at;
obj = tmp;
}
}
if (obj)
{
intersect = ray.direction;
intersect *= end_range;
intersect += ray.original;
return obj;
}
}
return nullptr;
}
PX_CUDA_CALLABLE
bool BaseBVH::hit(Ray const &ray,
PREC const &t_start,
PREC const &t_end) const
{
if (BaseBVH::hitBox(_vertex_min, _vertex_max, ray, t_start, t_end))
{
PREC hit_at;
for (auto i = 0; i < _n; ++i)
{
if (_geos[i]->hit(ray, t_start, t_end, hit_at))
return true;
}
}
return false;
}
bool BVH::hit(Ray const &ray,
PREC const &t_start,
PREC const &t_end) const
{
if (BaseBVH::hitBox(_vertex_min, _vertex_max, ray, t_start, t_end))
{
PREC t;
for (const auto &g : _geos)
{
if (g->hit(ray, t_start, t_end, t))
return true;
}
}
return false;
}
BVH::BVH()
: _gpu_obj(nullptr), _gpu_geos(nullptr), _need_upload(true)
{}
BVH::~BVH()
{
#ifdef USE_CUDA
clearGpuData();
#endif
}
void BVH::up2Gpu()
{
#ifdef USE_CUDA
if (_need_upload)
{
if (_gpu_obj == nullptr)
{
PX_CUDA_CHECK(cudaMalloc(&_gpu_obj, sizeof(BaseBVH)));
}
auto count = 0;
for (const auto &g : _geos)
{
if (g == nullptr)
continue;
g->up2Gpu();
++count;
}
BaseBVH bb(_vertex_min, _vertex_max);
auto ptr = new GeometryObj*[count];
bb._n = count;
for (const auto &g : _geos)
{
if (g == nullptr)
continue;
ptr[--count] = g->devPtr();
if (count == 0)
break;
}
if (_gpu_geos != nullptr)
{
PX_CUDA_CHECK(cudaFree(_gpu_geos));
_gpu_geos = nullptr;
}
PX_CUDA_CHECK(cudaMalloc(&_gpu_geos, sizeof(GeometryObj*)*bb._n));
PX_CUDA_CHECK(cudaMemcpy(_gpu_geos, ptr, sizeof(GeometryObj*)*bb._n, cudaMemcpyHostToDevice));
bb._geos = _gpu_geos;
PX_CUDA_CHECK(cudaMemcpy(_gpu_obj, &bb, sizeof(BaseBVH), cudaMemcpyHostToDevice));
delete[] ptr;
_need_upload = false;
}
#endif
}
void BVH::clearGpuData()
{
#ifdef USE_CUDA
if (_gpu_obj != nullptr)
{
for (const auto &g : _geos)
{
if (g.use_count() == 1)
g->clearGpuData();
}
PX_CUDA_CHECK(cudaFree(_gpu_geos));
PX_CUDA_CHECK(cudaFree(_gpu_obj));
_gpu_geos = nullptr;
_gpu_obj = nullptr;
}
_need_upload = true;
#endif
}
void BVH::addObj(std::shared_ptr<BaseGeometry> const &obj)
{
_geos.emplace(obj);
int n_vert;
auto vert = obj->rawVertices(n_vert);
if (_geos.size() == 1)
{
_vertex_min.x = vert[0].x;
_vertex_max.x = vert[0].x;
_vertex_min.y = vert[0].y;
_vertex_max.y = vert[0].y;
_vertex_min.z = vert[0].z;
_vertex_max.z = vert[0].z;
}
#define SET_VERT(v) \
if (v.x < _vertex_min.x) _vertex_min.x = v.x; \
if (v.x > _vertex_max.x) _vertex_max.x = v.x; \
if (v.y < _vertex_min.y) _vertex_min.y = v.y; \
if (v.y > _vertex_max.y) _vertex_max.y = v.y; \
if (v.z < _vertex_min.z) _vertex_min.z = v.z; \
if (v.z > _vertex_max.z) _vertex_max.z = v.z;
if (obj->transform() == nullptr)
{
for (auto i = 0; i < n_vert; ++i)
{
SET_VERT(vert[i])
}
}
else
{
for (auto i = 0; i < n_vert; ++i)
{
auto v = obj->transform()->pointFromObjCoord(vert[i]);
SET_VERT(v)
}
}
#ifdef USE_CUDA
_need_upload = true;
#endif
} |
92280b15ca05f87d62a865298503bc974c5214b2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2018-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <common/cudart_utils.h>
#include <gtest/gtest.h>
#include <test_utils.h>
#include <linalg/batched/gemv.cuh>
#include <random/rng.cuh>
#include "../test_utils.h"
namespace MLCommon {
namespace LinAlg {
namespace Batched {
template <typename T>
struct BatchGemvInputs {
T tolerance;
int m, n, batchSize;
unsigned long long int seed;
};
template <typename T, typename IdxType = int>
::std::ostream &operator<<(::std::ostream &os, const BatchGemvInputs<T> &dims) {
return os;
}
template <typename Type>
__global__ void naiveBatchGemvKernel(Type *y, const Type *A, const Type *x,
int m, int n) {
int batch = blockIdx.y;
int row = blockIdx.x;
int col = threadIdx.x;
if (row < m && col < n) {
auto prod = A[batch * m * n + row * n + col] * x[batch * n + col];
raft::myAtomicAdd(y + batch * m + row, prod);
}
}
template <typename Type>
void naiveBatchGemv(Type *y, const Type *A, const Type *x, int m, int n,
int batchSize, hipStream_t stream) {
static int TPB = raft::ceildiv(n, raft::WarpSize) * raft::WarpSize;
dim3 nblks(m, batchSize);
hipLaunchKernelGGL(( naiveBatchGemvKernel<Type>), dim3(nblks), dim3(TPB), 0, stream, y, A, x, m, n);
CUDA_CHECK(hipPeekAtLastError());
}
template <typename T>
class BatchGemvTest : public ::testing::TestWithParam<BatchGemvInputs<T>> {
protected:
void SetUp() override {
params = ::testing::TestWithParam<BatchGemvInputs<T>>::GetParam();
raft::random::Rng r(params.seed);
int len = params.batchSize * params.m * params.n;
int vecleny = params.batchSize * params.m;
int veclenx = params.batchSize * params.n;
CUDA_CHECK(hipStreamCreate(&stream));
raft::allocate(A, len);
raft::allocate(x, veclenx);
raft::allocate(out_ref, vecleny);
raft::allocate(out, vecleny);
r.uniform(A, len, T(-1.0), T(1.0), stream);
r.uniform(x, veclenx, T(-1.0), T(1.0), stream);
CUDA_CHECK(hipMemsetAsync(out_ref, 0, sizeof(T) * vecleny, stream));
naiveBatchGemv(out_ref, A, x, params.m, params.n, params.batchSize, stream);
gemv<T, int>(out, A, x, nullptr, T(1.0), T(0.0), params.m, params.n,
params.batchSize, stream);
CUDA_CHECK(hipStreamSynchronize(stream));
}
void TearDown() override {
CUDA_CHECK(hipFree(A));
CUDA_CHECK(hipFree(x));
CUDA_CHECK(hipFree(out_ref));
CUDA_CHECK(hipFree(out));
CUDA_CHECK(hipStreamSynchronize(stream));
CUDA_CHECK(hipStreamDestroy(stream));
}
protected:
hipStream_t stream;
BatchGemvInputs<T> params;
T *A, *x, *out_ref, *out;
};
const std::vector<BatchGemvInputs<float>> inputsf = {
{0.005f, 128, 128, 32, 1234ULL}, {0.005f, 128, 126, 32, 1234ULL},
{0.005f, 128, 125, 32, 1234ULL}, {0.005f, 126, 128, 32, 1234ULL},
{0.005f, 126, 126, 32, 1234ULL}, {0.005f, 126, 125, 32, 1234ULL},
{0.005f, 125, 128, 32, 1234ULL}, {0.005f, 125, 126, 32, 1234ULL},
{0.005f, 125, 125, 32, 1234ULL},
};
typedef BatchGemvTest<float> BatchGemvTestF;
TEST_P(BatchGemvTestF, Result) {
int vecleny = params.batchSize * params.m;
ASSERT_TRUE(devArrMatch(out_ref, out, vecleny,
raft::CompareApprox<float>(params.tolerance)));
}
INSTANTIATE_TEST_CASE_P(BatchGemvTests, BatchGemvTestF,
::testing::ValuesIn(inputsf));
typedef BatchGemvTest<double> BatchGemvTestD;
const std::vector<BatchGemvInputs<double>> inputsd = {
{0.0000001, 128, 128, 32, 1234ULL}, {0.0000001, 128, 126, 32, 1234ULL},
{0.0000001, 128, 125, 32, 1234ULL}, {0.0000001, 126, 128, 32, 1234ULL},
{0.0000001, 126, 126, 32, 1234ULL}, {0.0000001, 126, 125, 32, 1234ULL},
{0.0000001, 125, 128, 32, 1234ULL}, {0.0000001, 125, 126, 32, 1234ULL},
{0.0000001, 125, 125, 32, 1234ULL},
};
TEST_P(BatchGemvTestD, Result) {
int vecleny = params.batchSize * params.m;
ASSERT_TRUE(devArrMatch(out_ref, out, vecleny,
raft::CompareApprox<double>(params.tolerance)));
}
INSTANTIATE_TEST_CASE_P(BatchGemvTests, BatchGemvTestD,
::testing::ValuesIn(inputsd));
} // end namespace Batched
} // end namespace LinAlg
} // end namespace MLCommon
| 92280b15ca05f87d62a865298503bc974c5214b2.cu | /*
* Copyright (c) 2018-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <common/cudart_utils.h>
#include <gtest/gtest.h>
#include <test_utils.h>
#include <linalg/batched/gemv.cuh>
#include <random/rng.cuh>
#include "../test_utils.h"
namespace MLCommon {
namespace LinAlg {
namespace Batched {
template <typename T>
struct BatchGemvInputs {
T tolerance;
int m, n, batchSize;
unsigned long long int seed;
};
template <typename T, typename IdxType = int>
::std::ostream &operator<<(::std::ostream &os, const BatchGemvInputs<T> &dims) {
return os;
}
template <typename Type>
__global__ void naiveBatchGemvKernel(Type *y, const Type *A, const Type *x,
int m, int n) {
int batch = blockIdx.y;
int row = blockIdx.x;
int col = threadIdx.x;
if (row < m && col < n) {
auto prod = A[batch * m * n + row * n + col] * x[batch * n + col];
raft::myAtomicAdd(y + batch * m + row, prod);
}
}
template <typename Type>
void naiveBatchGemv(Type *y, const Type *A, const Type *x, int m, int n,
int batchSize, cudaStream_t stream) {
static int TPB = raft::ceildiv(n, raft::WarpSize) * raft::WarpSize;
dim3 nblks(m, batchSize);
naiveBatchGemvKernel<Type><<<nblks, TPB, 0, stream>>>(y, A, x, m, n);
CUDA_CHECK(cudaPeekAtLastError());
}
template <typename T>
class BatchGemvTest : public ::testing::TestWithParam<BatchGemvInputs<T>> {
protected:
void SetUp() override {
params = ::testing::TestWithParam<BatchGemvInputs<T>>::GetParam();
raft::random::Rng r(params.seed);
int len = params.batchSize * params.m * params.n;
int vecleny = params.batchSize * params.m;
int veclenx = params.batchSize * params.n;
CUDA_CHECK(cudaStreamCreate(&stream));
raft::allocate(A, len);
raft::allocate(x, veclenx);
raft::allocate(out_ref, vecleny);
raft::allocate(out, vecleny);
r.uniform(A, len, T(-1.0), T(1.0), stream);
r.uniform(x, veclenx, T(-1.0), T(1.0), stream);
CUDA_CHECK(cudaMemsetAsync(out_ref, 0, sizeof(T) * vecleny, stream));
naiveBatchGemv(out_ref, A, x, params.m, params.n, params.batchSize, stream);
gemv<T, int>(out, A, x, nullptr, T(1.0), T(0.0), params.m, params.n,
params.batchSize, stream);
CUDA_CHECK(cudaStreamSynchronize(stream));
}
void TearDown() override {
CUDA_CHECK(cudaFree(A));
CUDA_CHECK(cudaFree(x));
CUDA_CHECK(cudaFree(out_ref));
CUDA_CHECK(cudaFree(out));
CUDA_CHECK(cudaStreamSynchronize(stream));
CUDA_CHECK(cudaStreamDestroy(stream));
}
protected:
cudaStream_t stream;
BatchGemvInputs<T> params;
T *A, *x, *out_ref, *out;
};
const std::vector<BatchGemvInputs<float>> inputsf = {
{0.005f, 128, 128, 32, 1234ULL}, {0.005f, 128, 126, 32, 1234ULL},
{0.005f, 128, 125, 32, 1234ULL}, {0.005f, 126, 128, 32, 1234ULL},
{0.005f, 126, 126, 32, 1234ULL}, {0.005f, 126, 125, 32, 1234ULL},
{0.005f, 125, 128, 32, 1234ULL}, {0.005f, 125, 126, 32, 1234ULL},
{0.005f, 125, 125, 32, 1234ULL},
};
typedef BatchGemvTest<float> BatchGemvTestF;
TEST_P(BatchGemvTestF, Result) {
int vecleny = params.batchSize * params.m;
ASSERT_TRUE(devArrMatch(out_ref, out, vecleny,
raft::CompareApprox<float>(params.tolerance)));
}
INSTANTIATE_TEST_CASE_P(BatchGemvTests, BatchGemvTestF,
::testing::ValuesIn(inputsf));
typedef BatchGemvTest<double> BatchGemvTestD;
const std::vector<BatchGemvInputs<double>> inputsd = {
{0.0000001, 128, 128, 32, 1234ULL}, {0.0000001, 128, 126, 32, 1234ULL},
{0.0000001, 128, 125, 32, 1234ULL}, {0.0000001, 126, 128, 32, 1234ULL},
{0.0000001, 126, 126, 32, 1234ULL}, {0.0000001, 126, 125, 32, 1234ULL},
{0.0000001, 125, 128, 32, 1234ULL}, {0.0000001, 125, 126, 32, 1234ULL},
{0.0000001, 125, 125, 32, 1234ULL},
};
TEST_P(BatchGemvTestD, Result) {
int vecleny = params.batchSize * params.m;
ASSERT_TRUE(devArrMatch(out_ref, out, vecleny,
raft::CompareApprox<double>(params.tolerance)));
}
INSTANTIATE_TEST_CASE_P(BatchGemvTests, BatchGemvTestD,
::testing::ValuesIn(inputsd));
} // end namespace Batched
} // end namespace LinAlg
} // end namespace MLCommon
|
5cae5b7caf4b9c867a53f02b6b387c61eee74cf1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* \file dnn/src/cuda/group_local/forward/kern.cu
* MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
*
* Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied.
*/
#include "src/cuda/group_local/forward/kern.cuh"
#include "src/cuda/utils.cuh"
using namespace megdnn;
using namespace cuda;
namespace {
constexpr size_t NB = 4, ICB = 4;
// src layout is (N, G, IC, IH, IW)
// filter layout is (G, OH, OW, IC, FH, FW, OC)
// dst layout is (N, G, OC, OH, OW)
// NR_THREADS is 256
// gridDim.z is G
// gridDim.y is OC*OH*OW/NR_THREADS
// gridDim.x is N/NB
// blockDim.x is NR_THREADS
// INs and ONs are the stride on the src/dst batch size dim
// IC and OC are nr. channels per group
// Each thread tackles with NB (actually NB_cur if non-multiple-of-NB N is
// considered). Let oid = blockIdx.y*NR_THREADS + threadIdx.x (global thread ID
// along block axis y), and we flatten (OC, OH, OW) into one dimension, then
// each thread calculates the answer at dst position (n, blockIdx.z, oid), where
// n ranges from blockDim.x*NB + 0 to blockDim.x*NB + (NB-1). IC is processed at
// stride of ICB. On entrance of each iteration of the loop, NB * ICB spatial
// src planes are loaded into shared memory (presumably src spatial size is
// small).
template <uint32_t NB, uint32_t ICB, bool is_xcorr>
__global__ void forward_kernel(
const float* __restrict__ src, const float* __restrict__ filter,
float* __restrict__ dst, uint32_t N, uint32_t IC, uint32_t IH, uint32_t IW,
uint32_t OC, uint32_t OH, uint32_t OW, uint32_t FH, uint32_t FW, uint32_t INs,
uint32_t ONs, uint32_t PH, uint32_t PW, uint32_t SH, uint32_t SW) {
// NB * ICB * sizeof(float) * IH * IW
extern __shared__ float shared_mem[];
float* src_cache = shared_mem;
uint32_t tid = threadIdx.x;
uint32_t tstride = blockDim.x;
uint32_t oid = tid + blockIdx.y * tstride;
src += blockIdx.x * NB * INs + blockIdx.z * IC * IH * IW;
dst += blockIdx.x * NB * ONs + blockIdx.z * OC * OH * OW;
filter += blockIdx.z * OH * OW * IC * FH * FW * OC;
uint32_t op = oid / OC;
uint32_t oc = oid % OC;
uint32_t oh = op / OW;
uint32_t ow = op % OW;
float dst_reg[NB];
for (uint32_t nb = 0; nb < NB; ++nb)
dst_reg[nb] = 0.0f;
uint32_t NB_cur = min(N - blockIdx.x * NB, NB);
for (uint32_t ic = 0; ic < IC; ic += ICB) {
// read ICB-channel src
// (NB, ICB, IHs, IWs)
uint32_t ICB_cur = min(ICB, IC - ic);
for (uint32_t i = tid; i < NB_cur * ICB * IH * IW; i += tstride) {
uint32_t ip = i % (IH * IW);
uint32_t icb = i / (IH * IW) % ICB;
uint32_t nb = i / (IH * IW) / ICB;
src_cache[i] = (icb < ICB_cur) *
src[nb * INs + min(IC - 1, (ic + icb)) * IH * IW + ip];
}
__syncthreads();
if (oid < OC * OH * OW)
for (uint32_t fh = 0; fh < FH; ++fh) {
uint32_t ih;
if (is_xcorr)
ih = oh * SH + fh - PH;
else
ih = oh * SH + (FH - fh - 1) - PH;
if (ih < IH)
for (uint32_t fw = 0; fw < FW; ++fw) {
uint32_t iw;
if (is_xcorr)
iw = ow * SW + fw - PW;
else
iw = ow * SW + (FW - fw - 1) - PW;
if (iw < IW)
for (uint32_t icb = 0; icb < ICB_cur; ++icb) {
uint32_t fid = op * IC * FH * FW * OC +
(ic + icb) * FH * FW * OC +
fh * FW * OC + fw * OC + oc;
float fval = filter[fid];
float src_reg[NB];
#pragma unroll
for (uint32_t nb = 0; nb < NB; ++nb) {
src_reg[nb] = src_cache
[nb * ICB * IH * IW + icb * IH * IW +
ih * IW + iw];
}
#pragma unroll
for (uint32_t nb = 0; nb < NB; ++nb) {
dst_reg[nb] += src_reg[nb] * fval;
}
}
}
}
__syncthreads();
}
if (oid < OC * OH * OW) {
for (uint32_t nb = 0; nb < NB_cur; ++nb) {
dst[nb * ONs + oc * OH * OW + op] = dst_reg[nb];
}
}
}
} // namespace
void group_local::exec(
const float* src, const float* filter, float* dst, float* wptr, uint32_t N,
uint32_t IC, uint32_t IH, uint32_t IW, uint32_t OC, uint32_t OH, uint32_t OW,
uint32_t FH, uint32_t FW, uint32_t G, uint32_t PH, uint32_t PW, uint32_t SH,
uint32_t SW, hipStream_t stream) {
MEGDNN_MARK_USED_VAR(wptr);
size_t threads = 256;
dim3 blocks = dim3(DIVUP(N, NB), DIVUP(OC * OH * OW, threads), G);
uint32_t INs = G * IC * IH * IW, ONs = G * OC * OH * OW;
hipLaunchKernelGGL(( forward_kernel<NB, ICB, true>)
, dim3(blocks), dim3(threads), NB * ICB * sizeof(float) * IH * IW, stream,
src, filter, dst, N, IC, IH, IW, OC, OH, OW, FH, FW, INs, ONs, PH,
PW, SH, SW);
after_kernel_launch();
}
size_t group_local::get_share_mem_in_bytes(uint32_t IH, uint32_t IW) {
return NB * ICB * sizeof(float) * IH * IW;
}
| 5cae5b7caf4b9c867a53f02b6b387c61eee74cf1.cu | /**
* \file dnn/src/cuda/group_local/forward/kern.cu
* MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
*
* Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied.
*/
#include "src/cuda/group_local/forward/kern.cuh"
#include "src/cuda/utils.cuh"
using namespace megdnn;
using namespace cuda;
namespace {
constexpr size_t NB = 4, ICB = 4;
// src layout is (N, G, IC, IH, IW)
// filter layout is (G, OH, OW, IC, FH, FW, OC)
// dst layout is (N, G, OC, OH, OW)
// NR_THREADS is 256
// gridDim.z is G
// gridDim.y is OC*OH*OW/NR_THREADS
// gridDim.x is N/NB
// blockDim.x is NR_THREADS
// INs and ONs are the stride on the src/dst batch size dim
// IC and OC are nr. channels per group
// Each thread tackles with NB (actually NB_cur if non-multiple-of-NB N is
// considered). Let oid = blockIdx.y*NR_THREADS + threadIdx.x (global thread ID
// along block axis y), and we flatten (OC, OH, OW) into one dimension, then
// each thread calculates the answer at dst position (n, blockIdx.z, oid), where
// n ranges from blockDim.x*NB + 0 to blockDim.x*NB + (NB-1). IC is processed at
// stride of ICB. On entrance of each iteration of the loop, NB * ICB spatial
// src planes are loaded into shared memory (presumably src spatial size is
// small).
template <uint32_t NB, uint32_t ICB, bool is_xcorr>
__global__ void forward_kernel(
const float* __restrict__ src, const float* __restrict__ filter,
float* __restrict__ dst, uint32_t N, uint32_t IC, uint32_t IH, uint32_t IW,
uint32_t OC, uint32_t OH, uint32_t OW, uint32_t FH, uint32_t FW, uint32_t INs,
uint32_t ONs, uint32_t PH, uint32_t PW, uint32_t SH, uint32_t SW) {
// NB * ICB * sizeof(float) * IH * IW
extern __shared__ float shared_mem[];
float* src_cache = shared_mem;
uint32_t tid = threadIdx.x;
uint32_t tstride = blockDim.x;
uint32_t oid = tid + blockIdx.y * tstride;
src += blockIdx.x * NB * INs + blockIdx.z * IC * IH * IW;
dst += blockIdx.x * NB * ONs + blockIdx.z * OC * OH * OW;
filter += blockIdx.z * OH * OW * IC * FH * FW * OC;
uint32_t op = oid / OC;
uint32_t oc = oid % OC;
uint32_t oh = op / OW;
uint32_t ow = op % OW;
float dst_reg[NB];
for (uint32_t nb = 0; nb < NB; ++nb)
dst_reg[nb] = 0.0f;
uint32_t NB_cur = min(N - blockIdx.x * NB, NB);
for (uint32_t ic = 0; ic < IC; ic += ICB) {
// read ICB-channel src
// (NB, ICB, IHs, IWs)
uint32_t ICB_cur = min(ICB, IC - ic);
for (uint32_t i = tid; i < NB_cur * ICB * IH * IW; i += tstride) {
uint32_t ip = i % (IH * IW);
uint32_t icb = i / (IH * IW) % ICB;
uint32_t nb = i / (IH * IW) / ICB;
src_cache[i] = (icb < ICB_cur) *
src[nb * INs + min(IC - 1, (ic + icb)) * IH * IW + ip];
}
__syncthreads();
if (oid < OC * OH * OW)
for (uint32_t fh = 0; fh < FH; ++fh) {
uint32_t ih;
if (is_xcorr)
ih = oh * SH + fh - PH;
else
ih = oh * SH + (FH - fh - 1) - PH;
if (ih < IH)
for (uint32_t fw = 0; fw < FW; ++fw) {
uint32_t iw;
if (is_xcorr)
iw = ow * SW + fw - PW;
else
iw = ow * SW + (FW - fw - 1) - PW;
if (iw < IW)
for (uint32_t icb = 0; icb < ICB_cur; ++icb) {
uint32_t fid = op * IC * FH * FW * OC +
(ic + icb) * FH * FW * OC +
fh * FW * OC + fw * OC + oc;
float fval = filter[fid];
float src_reg[NB];
#pragma unroll
for (uint32_t nb = 0; nb < NB; ++nb) {
src_reg[nb] = src_cache
[nb * ICB * IH * IW + icb * IH * IW +
ih * IW + iw];
}
#pragma unroll
for (uint32_t nb = 0; nb < NB; ++nb) {
dst_reg[nb] += src_reg[nb] * fval;
}
}
}
}
__syncthreads();
}
if (oid < OC * OH * OW) {
for (uint32_t nb = 0; nb < NB_cur; ++nb) {
dst[nb * ONs + oc * OH * OW + op] = dst_reg[nb];
}
}
}
} // namespace
void group_local::exec(
const float* src, const float* filter, float* dst, float* wptr, uint32_t N,
uint32_t IC, uint32_t IH, uint32_t IW, uint32_t OC, uint32_t OH, uint32_t OW,
uint32_t FH, uint32_t FW, uint32_t G, uint32_t PH, uint32_t PW, uint32_t SH,
uint32_t SW, cudaStream_t stream) {
MEGDNN_MARK_USED_VAR(wptr);
size_t threads = 256;
dim3 blocks = dim3(DIVUP(N, NB), DIVUP(OC * OH * OW, threads), G);
uint32_t INs = G * IC * IH * IW, ONs = G * OC * OH * OW;
forward_kernel<NB, ICB, true>
<<<blocks, threads, NB * ICB * sizeof(float) * IH * IW, stream>>>(
src, filter, dst, N, IC, IH, IW, OC, OH, OW, FH, FW, INs, ONs, PH,
PW, SH, SW);
after_kernel_launch();
}
size_t group_local::get_share_mem_in_bytes(uint32_t IH, uint32_t IW) {
return NB * ICB * sizeof(float) * IH * IW;
}
|
5f7473ec95cfc22ad4bb265ab36d5c17c720684f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector arithmetic ======================================================
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector-and-scalar arithmetic ===========================================
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector comparison ======================================================
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector-and-scalar comparison ===========================================
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector math (one argument) =============================================
// Calculate the arc cosine of the input argument.
extern "C"
// Calculate the nonnegative arc hyperbolic cosine of the input argument.
extern "C"
// Calculate the arc sine of the input argument.
extern "C"
// Calculate the arc hyperbolic sine of the input argument.
extern "C"
// Calculate the arc tangent of the input argument.
extern "C"
// Calculate the arc hyperbolic tangent of the input argument.
extern "C"
// Calculate the cube root of the input argument.
extern "C"
// Calculate ceiling of the input argument.
extern "C"
// Calculate the cosine of the input argument.
extern "C"
// Calculate the hyperbolic cosine of the input argument.
extern "C"
// Calculate the cosine of the input argument p .
extern "C"
// Calculate the complementary error function of the input argument.
extern "C"
// Calculate the inverse complementary error function of the input argument.
extern "C"
// Calculate the scaled complementary error function of the input argument.
extern "C"
// Calculate the error function of the input argument.
extern "C"
// Calculate the inverse error function of the input argument.
extern "C"
// Calculate the base 10 exponential of the input argument.
extern "C"
// Calculate the base 2 exponential of the input argument.
extern "C"
// Calculate the base e exponential of the input argument.
extern "C"
// Calculate the base e exponential of the input argument, minus 1.
extern "C"
// Calculate the absolute value of its argument.
extern "C"
// Calculate the largest integer less than or equal to x.
extern "C"
// Calculate the value of the Bessel function of the first kind of order 0 for the input argument.
extern "C"
// Calculate the value of the Bessel function of the first kind of order 1 for the input argument.
extern "C"
// Calculate the natural logarithm of the absolute value of the gamma function of the input argument.
extern "C"
// Calculate the base 10 logarithm of the input argument.
extern "C"
// Calculate the value of l o g e ( 1 + x ) .
extern "C"
// Calculate the base 2 logarithm of the input argument.
extern "C"
// Calculate the doubleing point representation of the exponent of the input argument.
extern "C"
// Calculate the natural logarithm of the input argument.
extern "C"
// Calculate the standard normal cumulative distribution function.
extern "C"
// Calculate the inverse of the standard normal cumulative distribution function.
extern "C"
// Calculate reciprocal cube root function.
extern "C"
// Round input to nearest integer value in doubleing-point.
extern "C"
// Round to nearest integer value in doubleing-point.
extern "C"
// Calculate the reciprocal of the square root of the input argument.
extern "C"
// Calculate the sine of the input argument.
extern "C"
// Calculate the hyperbolic sine of the input argument.
extern "C"
// Calculate the sine of the input argument p .
extern "C"
// Calculate the square root of the input argument.
extern "C"
// Calculate the tangent of the input argument.
extern "C"
// Calculate the hyperbolic tangent of the input argument.
extern "C"
// Calculate the gamma function of the input argument.
extern "C"
// Truncate input argument to the integral part.
extern "C"
// Calculate the value of the Bessel function of the second kind of order 0 for the input argument.
extern "C"
// Calculate the value of the Bessel function of the second kind of order 1 for the input argument.
extern "C"
//=== Vector math (two arguments) ============================================
// Create value with given magnitude, copying sign of second value.
extern "C"
// Compute the positive difference between x and y.
extern "C"
// Divide two doubleing point values.
extern "C"
// Determine the maximum numeric value of the arguments.
extern "C"
// Determine the minimum numeric value of the arguments.
extern "C"
// Calculate the doubleing-point remainder of x / y.
extern "C"
// Calculate the square root of the sum of squares of two arguments.
extern "C"
// Return next representable single-precision doubleing-point value afer argument.
extern "C"
// Calculate the value of first argument to the power of second argument.
extern "C"
// Compute single-precision doubleing-point remainder.
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//WARNING : device_sum size should be gridDim.x
__global__ void vec_fdim (int n, double *result, double *x, double *y)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int idy = threadIdx.y + blockIdx.y * blockDim.y;
int id = idy * gridDim.x * blockDim.x + idx;
if (id < n)
{
result[id] = fdim(x[id], y[id]);
}
} | 5f7473ec95cfc22ad4bb265ab36d5c17c720684f.cu | #include "includes.h"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector arithmetic ======================================================
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector-and-scalar arithmetic ===========================================
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector comparison ======================================================
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector-and-scalar comparison ===========================================
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector math (one argument) =============================================
// Calculate the arc cosine of the input argument.
extern "C"
// Calculate the nonnegative arc hyperbolic cosine of the input argument.
extern "C"
// Calculate the arc sine of the input argument.
extern "C"
// Calculate the arc hyperbolic sine of the input argument.
extern "C"
// Calculate the arc tangent of the input argument.
extern "C"
// Calculate the arc hyperbolic tangent of the input argument.
extern "C"
// Calculate the cube root of the input argument.
extern "C"
// Calculate ceiling of the input argument.
extern "C"
// Calculate the cosine of the input argument.
extern "C"
// Calculate the hyperbolic cosine of the input argument.
extern "C"
// Calculate the cosine of the input argument × p .
extern "C"
// Calculate the complementary error function of the input argument.
extern "C"
// Calculate the inverse complementary error function of the input argument.
extern "C"
// Calculate the scaled complementary error function of the input argument.
extern "C"
// Calculate the error function of the input argument.
extern "C"
// Calculate the inverse error function of the input argument.
extern "C"
// Calculate the base 10 exponential of the input argument.
extern "C"
// Calculate the base 2 exponential of the input argument.
extern "C"
// Calculate the base e exponential of the input argument.
extern "C"
// Calculate the base e exponential of the input argument, minus 1.
extern "C"
// Calculate the absolute value of its argument.
extern "C"
// Calculate the largest integer less than or equal to x.
extern "C"
// Calculate the value of the Bessel function of the first kind of order 0 for the input argument.
extern "C"
// Calculate the value of the Bessel function of the first kind of order 1 for the input argument.
extern "C"
// Calculate the natural logarithm of the absolute value of the gamma function of the input argument.
extern "C"
// Calculate the base 10 logarithm of the input argument.
extern "C"
// Calculate the value of l o g e ( 1 + x ) .
extern "C"
// Calculate the base 2 logarithm of the input argument.
extern "C"
// Calculate the doubleing point representation of the exponent of the input argument.
extern "C"
// Calculate the natural logarithm of the input argument.
extern "C"
// Calculate the standard normal cumulative distribution function.
extern "C"
// Calculate the inverse of the standard normal cumulative distribution function.
extern "C"
// Calculate reciprocal cube root function.
extern "C"
// Round input to nearest integer value in doubleing-point.
extern "C"
// Round to nearest integer value in doubleing-point.
extern "C"
// Calculate the reciprocal of the square root of the input argument.
extern "C"
// Calculate the sine of the input argument.
extern "C"
// Calculate the hyperbolic sine of the input argument.
extern "C"
// Calculate the sine of the input argument × p .
extern "C"
// Calculate the square root of the input argument.
extern "C"
// Calculate the tangent of the input argument.
extern "C"
// Calculate the hyperbolic tangent of the input argument.
extern "C"
// Calculate the gamma function of the input argument.
extern "C"
// Truncate input argument to the integral part.
extern "C"
// Calculate the value of the Bessel function of the second kind of order 0 for the input argument.
extern "C"
// Calculate the value of the Bessel function of the second kind of order 1 for the input argument.
extern "C"
//=== Vector math (two arguments) ============================================
// Create value with given magnitude, copying sign of second value.
extern "C"
// Compute the positive difference between x and y.
extern "C"
// Divide two doubleing point values.
extern "C"
// Determine the maximum numeric value of the arguments.
extern "C"
// Determine the minimum numeric value of the arguments.
extern "C"
// Calculate the doubleing-point remainder of x / y.
extern "C"
// Calculate the square root of the sum of squares of two arguments.
extern "C"
// Return next representable single-precision doubleing-point value afer argument.
extern "C"
// Calculate the value of first argument to the power of second argument.
extern "C"
// Compute single-precision doubleing-point remainder.
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//WARNING : device_sum size should be gridDim.x
__global__ void vec_fdim (int n, double *result, double *x, double *y)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int idy = threadIdx.y + blockIdx.y * blockDim.y;
int id = idy * gridDim.x * blockDim.x + idx;
if (id < n)
{
result[id] = fdim(x[id], y[id]);
}
} |
6292a9cd442bcf4fcbb0302ef4b025b9e4b3bc1a.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include "CHECK.h"
#include "defs.h"
/*
Uses the GPU to perform a find Waldo on the input map.
Fills the locationType object with the location of the
waldo that it found.
@param map - one dimensional array to implement the 2D N by N array
containing the waldos
@param N - size of 1 dimension
@param gpufound - struct that should be filled with the locations
of the waldos
gpu->indices - filled with row and col of each waldo
waldo positions are added to the array in the
order of row then column
gpu->size - size of indices array
gpu->count - number of elements in the array
2 * number of waldos at end
In this case, the number of waldos in the map will be exactly one,
thus the gpuFindWaldo function will set gpufound->count to 2,
gpufound->indices[0] to the row position of the waldo, and
gpufound->indices[1] to the col position of the waldo
@return amount of time it takes to find waldo in millisecond
*/
float gpuFindWaldo(unsigned char * map, int N, locationType * gpufound)
{
unsigned char * dMap;
//create input array for GPU
CHECK(hipMalloc((void **)&dMap, sizeof(unsigned char) * N * N));
CHECK(hipMemcpy(dMap, map, sizeof(unsigned char) * N * N,
hipMemcpyHostToDevice));
//You may hipMalloc some more space here that you will need
//before the timing begins.
float gpuMsecTime = -1;
hipEvent_t start_cpu, stop_cpu;
//start the timing
CHECK(hipEventCreate(&start_cpu));
CHECK(hipEventCreate(&stop_cpu));
CHECK(hipEventRecord(start_cpu));
//Write the findWaldo function.
//Before exiting gpuFindWaldo, your code
//will need to have filled the gpufound struct.
//You can either do that here or in your findWaldo
//function.
//findWaldo(....);
//stop the timing
CHECK(hipEventRecord(stop_cpu));
CHECK(hipEventSynchronize(stop_cpu));
CHECK(hipEventElapsedTime(&gpuMsecTime, start_cpu, stop_cpu));
CHECK(hipFree(dMap));
//free any other spaces you allocated
return gpuMsecTime;
}
| 6292a9cd442bcf4fcbb0302ef4b025b9e4b3bc1a.cu | #include <stdio.h>
#include <stdlib.h>
#include <cuda_runtime.h>
#include "CHECK.h"
#include "defs.h"
/*
Uses the GPU to perform a find Waldo on the input map.
Fills the locationType object with the location of the
waldo that it found.
@param map - one dimensional array to implement the 2D N by N array
containing the waldos
@param N - size of 1 dimension
@param gpufound - struct that should be filled with the locations
of the waldos
gpu->indices - filled with row and col of each waldo
waldo positions are added to the array in the
order of row then column
gpu->size - size of indices array
gpu->count - number of elements in the array
2 * number of waldos at end
In this case, the number of waldos in the map will be exactly one,
thus the gpuFindWaldo function will set gpufound->count to 2,
gpufound->indices[0] to the row position of the waldo, and
gpufound->indices[1] to the col position of the waldo
@return amount of time it takes to find waldo in millisecond
*/
float gpuFindWaldo(unsigned char * map, int N, locationType * gpufound)
{
unsigned char * dMap;
//create input array for GPU
CHECK(cudaMalloc((void **)&dMap, sizeof(unsigned char) * N * N));
CHECK(cudaMemcpy(dMap, map, sizeof(unsigned char) * N * N,
cudaMemcpyHostToDevice));
//You may cudaMalloc some more space here that you will need
//before the timing begins.
float gpuMsecTime = -1;
cudaEvent_t start_cpu, stop_cpu;
//start the timing
CHECK(cudaEventCreate(&start_cpu));
CHECK(cudaEventCreate(&stop_cpu));
CHECK(cudaEventRecord(start_cpu));
//Write the findWaldo function.
//Before exiting gpuFindWaldo, your code
//will need to have filled the gpufound struct.
//You can either do that here or in your findWaldo
//function.
//findWaldo(....);
//stop the timing
CHECK(cudaEventRecord(stop_cpu));
CHECK(cudaEventSynchronize(stop_cpu));
CHECK(cudaEventElapsedTime(&gpuMsecTime, start_cpu, stop_cpu));
CHECK(cudaFree(dMap));
//free any other spaces you allocated
return gpuMsecTime;
}
|
b133380684f739aa1a34031a1466dde12c7d3099.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#include <ATen/ATen.h>
#include <THH/THH.h>
#include <THH/THHAtomics.cuh>
#include <THH/THHDeviceUtils.cuh>
#define CUDA_1D_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; \
i += blockDim.x * gridDim.x)
/*** Forward ***/
__device__ float bilinear_interpolate(const float* bottom_data, const int height, const int width,
float y, float x, const int index /* index for debug only*/) {
// deal with cases that inverse elements are out of feature map boundary
if (y < -1.0 || y > height || x < -1.0 || x > width) {
// empty
return 0;
}
if (y <= 0) {
y = 0;
}
if (x <= 0) {
x = 0;
}
int y_low = (int)y;
int x_low = (int)x;
int y_high;
int x_high;
if (y_low >= height - 1) {
y_high = y_low = height - 1;
y = (float)y_low;
} else {
y_high = y_low + 1;
}
if (x_low >= width - 1) {
x_high = x_low = width - 1;
x = (float)x_low;
} else {
x_high = x_low + 1;
}
float ly = y - y_low;
float lx = x - x_low;
float hy = 1. -ly, hx = 1. - lx;
// do bilinear interpolation
float v1 = bottom_data[y_low * width + x_low];
float v2 = bottom_data[y_low * width + x_high];
float v3 = bottom_data[y_high * width + x_low];
float v4 = bottom_data[y_high * width + x_high];
float w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx;
float val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
return val;
}
__global__ void ROIAlignForward(const int nthreads, const float* bottom_data, const float spatial_scale,
const int channels, const int height, const int width,
const int aligned_height, const int aligned_width, const int sampling_ratio,
const float* bottom_rois, float* top_data) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
// (n, c, ph, pw) is an element in the aligned output
int pw = index % aligned_width;
int ph = (index / aligned_width) % aligned_height;
int c = (index / aligned_width / aligned_height) % channels;
int n = index / aligned_width / aligned_height / channels;
const float* offset_bottom_rois = bottom_rois + n * 5;
int roi_batch_ind = offset_bottom_rois[0];
// Do not using rounding; this implementation detail is critical
float roi_start_w = offset_bottom_rois[1] * spatial_scale;
float roi_start_h = offset_bottom_rois[2] * spatial_scale;
float roi_end_w = offset_bottom_rois[3] * spatial_scale;
float roi_end_h = offset_bottom_rois[4] * spatial_scale;
// Force malformed ROIs to be 1x1
float roi_width = fmaxf(roi_end_w - roi_start_w, 1.f);
float roi_height = fmaxf(roi_end_h - roi_start_h, 1.f);
float bin_size_h = roi_height / aligned_height;
float bin_size_w = roi_width / aligned_width;
const float* offset_bottom_data =
bottom_data + (roi_batch_ind * channels + c) * height * width;
// We use roi_bin_grid to sample the grid and mimic integral
int roi_bin_grid_h = (sampling_ratio > 0)
? sampling_ratio
: ceil(roi_height / aligned_height); // e.g., = 2
int roi_bin_grid_w =
(sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / aligned_width);
// We do average (integral) pooling inside a bin
const float count = roi_bin_grid_h * roi_bin_grid_w; // e.g. = 4
float output_val = 0.;
for (int iy = 0; iy < roi_bin_grid_h; iy++) // e.g., iy = 0, 1
{
const float y = roi_start_h + ph * bin_size_h +
(iy + .5f) * bin_size_h / roi_bin_grid_h; // e.g., 0.5, 1.5
for (int ix = 0; ix < roi_bin_grid_w; ix++) {
const float x = roi_start_w + pw * bin_size_w +
(ix + .5f) * bin_size_w / roi_bin_grid_w;
float val = bilinear_interpolate(
offset_bottom_data, height, width, y, x, index);
output_val += val;
}
}
output_val /= count;
top_data[index] = output_val;
}
}
/*** Backward ***/
inline __device__ float gpu_atomic_add(const float val, float* address);
inline __device__ float gpu_atomic_add(const float val, float* address) {
return atomicAdd(address, val);
}
__device__ void bilinear_interpolate_gradient(const int height, const int width, float y, float x,
float& w1, float& w2, float& w3, float& w4,
int& x_low, int& x_high, int& y_low, int& y_high,
const int index /* index for debug only*/) {
// deal with cases that inverse elements are out of feature map boundary
if (y < -1.0 || y > height || x < -1.0 || x > width) {
// empty
w1 = w2 = w3 = w4 = 0.;
x_low = x_high = y_low = y_high = -1;
return;
}
if (y <= 0) {
y = 0;
}
if (x <= 0) {
x = 0;
}
y_low = (int)y;
x_low = (int)x;
if (y_low >= height - 1) {
y_high = y_low = height - 1;
y = (float)y_low;
} else {
y_high = y_low + 1;
}
if (x_low >= width - 1) {
x_high = x_low = width - 1;
x = (float)x_low;
} else {
x_high = x_low + 1;
}
float ly = y - y_low;
float lx = x - x_low;
float hy = 1. - ly, hx = 1. - lx;
w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx;
return;
}
__global__ void ROIAlignBackward(const int nthreads, const float* top_diff, const float spatial_scale,
const int channels, const int height, const int width,
const int aligned_height, const int aligned_width, const int sampling_ratio,
float* bottom_diff, const float* bottom_rois) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
// (n, c, ph, pw) is an element in the aligned output
int pw = index % aligned_width;
int ph = (index / aligned_width) % aligned_height;
int c = (index / aligned_width / aligned_height) % channels;
int n = index / aligned_width / aligned_height / channels;
const float* offset_bottom_rois = bottom_rois + n * 5;
int roi_batch_ind = offset_bottom_rois[0];
// Do not using rounding; this implementation detail is critical
float roi_start_w = offset_bottom_rois[1] * spatial_scale;
float roi_start_h = offset_bottom_rois[2] * spatial_scale;
float roi_end_w = offset_bottom_rois[3] * spatial_scale;
float roi_end_h = offset_bottom_rois[4] * spatial_scale;
// Force malformed ROIs to be 1x1
float roi_width = fmaxf(roi_end_w - roi_start_w, 1.f);
float roi_height = fmaxf(roi_end_h - roi_start_h, 1.f);
float bin_size_h = roi_height / aligned_height;
float bin_size_w = roi_width / aligned_width;
float* offset_bottom_diff =
bottom_diff + (roi_batch_ind * channels + c) * height * width;
int top_offset = (n * channels + c) * aligned_height * aligned_width;
const float* offset_top_diff = top_diff + top_offset;
const float top_diff_this_bin = offset_top_diff[ph * aligned_width + pw];
// We use roi_bin_grid to sample the grid and mimic integral
int roi_bin_grid_h = (sampling_ratio > 0)
? sampling_ratio
: ceil(roi_height / aligned_height); // e.g., = 2
int roi_bin_grid_w =
(sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / aligned_width);
// We do average (integral) pooling inside a bin
const float count = roi_bin_grid_h * roi_bin_grid_w; // e.g. = 4
for (int iy = 0; iy < roi_bin_grid_h; iy++) // e.g., iy = 0, 1
{
const float y = roi_start_h + ph * bin_size_h +
(iy + .5f) * bin_size_h / roi_bin_grid_h; // e.g., 0.5, 1.5
for (int ix = 0; ix < roi_bin_grid_w; ix++) {
const float x = roi_start_w + pw * bin_size_w +
(ix + .5f) * bin_size_w / roi_bin_grid_w;
float w1, w2, w3, w4;
int x_low, x_high, y_low, y_high;
bilinear_interpolate_gradient(
height, width, y, x, w1, w2, w3, w4,
x_low, x_high, y_low, y_high, index);
float g1 = top_diff_this_bin * w1 / count;
float g2 = top_diff_this_bin * w2 / count;
float g3 = top_diff_this_bin * w3 / count;
float g4 = top_diff_this_bin * w4 / count;
if (x_low >= 0 && x_high >= 0 && y_low >= 0 && y_high >= 0) {
gpu_atomic_add(g1, offset_bottom_diff + y_low * width + x_low);
gpu_atomic_add(g2, offset_bottom_diff + y_low * width + x_high);
gpu_atomic_add(g3, offset_bottom_diff + y_high * width + x_low);
gpu_atomic_add(g4, offset_bottom_diff + y_high * width + x_high);
} // if
} // ix
} // iy
} // CUDA_1D_KERNEL_LOOP
} // RoIAlignBackward
at::Tensor roi_align_forward_gpu(const at::Tensor& input,
const at::Tensor& rois,
const float spatial_scale,
const int pooled_height,
const int pooled_width,
const int sampling_ratio) {
AT_ASSERTM(input.type().is_cuda(), "input must be a CUDA tensor");
AT_ASSERTM(rois.type().is_cuda(), "rois must be a CUDA tensor");
auto num_rois = rois.size(0);
auto channels = input.size(1);
auto height = input.size(2);
auto width = input.size(3);
auto output = at::empty({num_rois, channels, pooled_height, pooled_width}, input.options());
auto output_size = num_rois * pooled_height * pooled_width * channels;
dim3 grid(::min(THCCeilDiv(output_size, 512L), 4096L));
dim3 block(512);
if (output.numel() == 0) {
THCudaCheck(hipGetLastError());
return output;
}
hipLaunchKernelGGL(( ROIAlignForward), dim3(grid), dim3(block), 0, 0,
output_size,
input.contiguous().data<float>(),
spatial_scale,
channels,
height,
width,
pooled_height,
pooled_width,
sampling_ratio,
rois.contiguous().data<float>(),
output.data<float>());
THCudaCheck(hipGetLastError());
return output;
}
at::Tensor roi_align_backward_gpu(const at::Tensor& grad,
const at::Tensor& rois,
const float spatial_scale,
const int pooled_height,
const int pooled_width,
const int batch_size,
const int channels,
const int height,
const int width,
const int sampling_ratio) {
AT_ASSERTM(grad.type().is_cuda(), "grad must be a CUDA tensor");
AT_ASSERTM(rois.type().is_cuda(), "rois must be a CUDA tensor");
auto num_rois = rois.size(0);
auto grad_input = at::zeros({batch_size, channels, height, width}, grad.options());
dim3 grid(::min(THCCeilDiv(grad.numel(), 512L), 4096L));
dim3 block(512);
// handle possibly empty gradients
if (grad.numel() == 0) {
THCudaCheck(hipGetLastError());
return grad_input;
}
hipLaunchKernelGGL(( ROIAlignBackward), dim3(grid), dim3(block), 0, 0,
grad.numel(),
grad.contiguous().data<float>(),
spatial_scale,
channels,
height,
width,
pooled_height,
pooled_width,
sampling_ratio,
grad_input.data<float>(),
rois.contiguous().data<float>());
THCudaCheck(hipGetLastError());
return grad_input;
}
| b133380684f739aa1a34031a1466dde12c7d3099.cu | // Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#include <ATen/ATen.h>
#include <THC/THC.h>
#include <THC/THCAtomics.cuh>
#include <THC/THCDeviceUtils.cuh>
#define CUDA_1D_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; \
i += blockDim.x * gridDim.x)
/*** Forward ***/
__device__ float bilinear_interpolate(const float* bottom_data, const int height, const int width,
float y, float x, const int index /* index for debug only*/) {
// deal with cases that inverse elements are out of feature map boundary
if (y < -1.0 || y > height || x < -1.0 || x > width) {
// empty
return 0;
}
if (y <= 0) {
y = 0;
}
if (x <= 0) {
x = 0;
}
int y_low = (int)y;
int x_low = (int)x;
int y_high;
int x_high;
if (y_low >= height - 1) {
y_high = y_low = height - 1;
y = (float)y_low;
} else {
y_high = y_low + 1;
}
if (x_low >= width - 1) {
x_high = x_low = width - 1;
x = (float)x_low;
} else {
x_high = x_low + 1;
}
float ly = y - y_low;
float lx = x - x_low;
float hy = 1. -ly, hx = 1. - lx;
// do bilinear interpolation
float v1 = bottom_data[y_low * width + x_low];
float v2 = bottom_data[y_low * width + x_high];
float v3 = bottom_data[y_high * width + x_low];
float v4 = bottom_data[y_high * width + x_high];
float w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx;
float val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
return val;
}
__global__ void ROIAlignForward(const int nthreads, const float* bottom_data, const float spatial_scale,
const int channels, const int height, const int width,
const int aligned_height, const int aligned_width, const int sampling_ratio,
const float* bottom_rois, float* top_data) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
// (n, c, ph, pw) is an element in the aligned output
int pw = index % aligned_width;
int ph = (index / aligned_width) % aligned_height;
int c = (index / aligned_width / aligned_height) % channels;
int n = index / aligned_width / aligned_height / channels;
const float* offset_bottom_rois = bottom_rois + n * 5;
int roi_batch_ind = offset_bottom_rois[0];
// Do not using rounding; this implementation detail is critical
float roi_start_w = offset_bottom_rois[1] * spatial_scale;
float roi_start_h = offset_bottom_rois[2] * spatial_scale;
float roi_end_w = offset_bottom_rois[3] * spatial_scale;
float roi_end_h = offset_bottom_rois[4] * spatial_scale;
// Force malformed ROIs to be 1x1
float roi_width = fmaxf(roi_end_w - roi_start_w, 1.f);
float roi_height = fmaxf(roi_end_h - roi_start_h, 1.f);
float bin_size_h = roi_height / aligned_height;
float bin_size_w = roi_width / aligned_width;
const float* offset_bottom_data =
bottom_data + (roi_batch_ind * channels + c) * height * width;
// We use roi_bin_grid to sample the grid and mimic integral
int roi_bin_grid_h = (sampling_ratio > 0)
? sampling_ratio
: ceil(roi_height / aligned_height); // e.g., = 2
int roi_bin_grid_w =
(sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / aligned_width);
// We do average (integral) pooling inside a bin
const float count = roi_bin_grid_h * roi_bin_grid_w; // e.g. = 4
float output_val = 0.;
for (int iy = 0; iy < roi_bin_grid_h; iy++) // e.g., iy = 0, 1
{
const float y = roi_start_h + ph * bin_size_h +
(iy + .5f) * bin_size_h / roi_bin_grid_h; // e.g., 0.5, 1.5
for (int ix = 0; ix < roi_bin_grid_w; ix++) {
const float x = roi_start_w + pw * bin_size_w +
(ix + .5f) * bin_size_w / roi_bin_grid_w;
float val = bilinear_interpolate(
offset_bottom_data, height, width, y, x, index);
output_val += val;
}
}
output_val /= count;
top_data[index] = output_val;
}
}
/*** Backward ***/
inline __device__ float gpu_atomic_add(const float val, float* address);
inline __device__ float gpu_atomic_add(const float val, float* address) {
return atomicAdd(address, val);
}
__device__ void bilinear_interpolate_gradient(const int height, const int width, float y, float x,
float& w1, float& w2, float& w3, float& w4,
int& x_low, int& x_high, int& y_low, int& y_high,
const int index /* index for debug only*/) {
// deal with cases that inverse elements are out of feature map boundary
if (y < -1.0 || y > height || x < -1.0 || x > width) {
// empty
w1 = w2 = w3 = w4 = 0.;
x_low = x_high = y_low = y_high = -1;
return;
}
if (y <= 0) {
y = 0;
}
if (x <= 0) {
x = 0;
}
y_low = (int)y;
x_low = (int)x;
if (y_low >= height - 1) {
y_high = y_low = height - 1;
y = (float)y_low;
} else {
y_high = y_low + 1;
}
if (x_low >= width - 1) {
x_high = x_low = width - 1;
x = (float)x_low;
} else {
x_high = x_low + 1;
}
float ly = y - y_low;
float lx = x - x_low;
float hy = 1. - ly, hx = 1. - lx;
w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx;
return;
}
__global__ void ROIAlignBackward(const int nthreads, const float* top_diff, const float spatial_scale,
const int channels, const int height, const int width,
const int aligned_height, const int aligned_width, const int sampling_ratio,
float* bottom_diff, const float* bottom_rois) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
// (n, c, ph, pw) is an element in the aligned output
int pw = index % aligned_width;
int ph = (index / aligned_width) % aligned_height;
int c = (index / aligned_width / aligned_height) % channels;
int n = index / aligned_width / aligned_height / channels;
const float* offset_bottom_rois = bottom_rois + n * 5;
int roi_batch_ind = offset_bottom_rois[0];
// Do not using rounding; this implementation detail is critical
float roi_start_w = offset_bottom_rois[1] * spatial_scale;
float roi_start_h = offset_bottom_rois[2] * spatial_scale;
float roi_end_w = offset_bottom_rois[3] * spatial_scale;
float roi_end_h = offset_bottom_rois[4] * spatial_scale;
// Force malformed ROIs to be 1x1
float roi_width = fmaxf(roi_end_w - roi_start_w, 1.f);
float roi_height = fmaxf(roi_end_h - roi_start_h, 1.f);
float bin_size_h = roi_height / aligned_height;
float bin_size_w = roi_width / aligned_width;
float* offset_bottom_diff =
bottom_diff + (roi_batch_ind * channels + c) * height * width;
int top_offset = (n * channels + c) * aligned_height * aligned_width;
const float* offset_top_diff = top_diff + top_offset;
const float top_diff_this_bin = offset_top_diff[ph * aligned_width + pw];
// We use roi_bin_grid to sample the grid and mimic integral
int roi_bin_grid_h = (sampling_ratio > 0)
? sampling_ratio
: ceil(roi_height / aligned_height); // e.g., = 2
int roi_bin_grid_w =
(sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / aligned_width);
// We do average (integral) pooling inside a bin
const float count = roi_bin_grid_h * roi_bin_grid_w; // e.g. = 4
for (int iy = 0; iy < roi_bin_grid_h; iy++) // e.g., iy = 0, 1
{
const float y = roi_start_h + ph * bin_size_h +
(iy + .5f) * bin_size_h / roi_bin_grid_h; // e.g., 0.5, 1.5
for (int ix = 0; ix < roi_bin_grid_w; ix++) {
const float x = roi_start_w + pw * bin_size_w +
(ix + .5f) * bin_size_w / roi_bin_grid_w;
float w1, w2, w3, w4;
int x_low, x_high, y_low, y_high;
bilinear_interpolate_gradient(
height, width, y, x, w1, w2, w3, w4,
x_low, x_high, y_low, y_high, index);
float g1 = top_diff_this_bin * w1 / count;
float g2 = top_diff_this_bin * w2 / count;
float g3 = top_diff_this_bin * w3 / count;
float g4 = top_diff_this_bin * w4 / count;
if (x_low >= 0 && x_high >= 0 && y_low >= 0 && y_high >= 0) {
gpu_atomic_add(g1, offset_bottom_diff + y_low * width + x_low);
gpu_atomic_add(g2, offset_bottom_diff + y_low * width + x_high);
gpu_atomic_add(g3, offset_bottom_diff + y_high * width + x_low);
gpu_atomic_add(g4, offset_bottom_diff + y_high * width + x_high);
} // if
} // ix
} // iy
} // CUDA_1D_KERNEL_LOOP
} // RoIAlignBackward
at::Tensor roi_align_forward_gpu(const at::Tensor& input,
const at::Tensor& rois,
const float spatial_scale,
const int pooled_height,
const int pooled_width,
const int sampling_ratio) {
AT_ASSERTM(input.type().is_cuda(), "input must be a CUDA tensor");
AT_ASSERTM(rois.type().is_cuda(), "rois must be a CUDA tensor");
auto num_rois = rois.size(0);
auto channels = input.size(1);
auto height = input.size(2);
auto width = input.size(3);
auto output = at::empty({num_rois, channels, pooled_height, pooled_width}, input.options());
auto output_size = num_rois * pooled_height * pooled_width * channels;
dim3 grid(std::min(THCCeilDiv(output_size, 512L), 4096L));
dim3 block(512);
if (output.numel() == 0) {
THCudaCheck(cudaGetLastError());
return output;
}
ROIAlignForward<<<grid, block, 0>>>(
output_size,
input.contiguous().data<float>(),
spatial_scale,
channels,
height,
width,
pooled_height,
pooled_width,
sampling_ratio,
rois.contiguous().data<float>(),
output.data<float>());
THCudaCheck(cudaGetLastError());
return output;
}
at::Tensor roi_align_backward_gpu(const at::Tensor& grad,
const at::Tensor& rois,
const float spatial_scale,
const int pooled_height,
const int pooled_width,
const int batch_size,
const int channels,
const int height,
const int width,
const int sampling_ratio) {
AT_ASSERTM(grad.type().is_cuda(), "grad must be a CUDA tensor");
AT_ASSERTM(rois.type().is_cuda(), "rois must be a CUDA tensor");
auto num_rois = rois.size(0);
auto grad_input = at::zeros({batch_size, channels, height, width}, grad.options());
dim3 grid(std::min(THCCeilDiv(grad.numel(), 512L), 4096L));
dim3 block(512);
// handle possibly empty gradients
if (grad.numel() == 0) {
THCudaCheck(cudaGetLastError());
return grad_input;
}
ROIAlignBackward<<<grid, block, 0>>>(
grad.numel(),
grad.contiguous().data<float>(),
spatial_scale,
channels,
height,
width,
pooled_height,
pooled_width,
sampling_ratio,
grad_input.data<float>(),
rois.contiguous().data<float>());
THCudaCheck(cudaGetLastError());
return grad_input;
}
|
1da46fffc058b984aa9f3178e7a2de1c8be51d0d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* ******************************************************************************
* *
* *
* * This program and the accompanying materials are made available under the
* * terms of the Apache License, Version 2.0 which is available at
* * https://www.apache.org/licenses/LICENSE-2.0.
* *
* * See the NOTICE file distributed with this work for additional
* * information regarding copyright ownership.
* * Unless required by applicable law or agreed to in writing, software
* * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* * License for the specific language governing permissions and limitations
* * under the License.
* *
* * SPDX-License-Identifier: Apache-2.0
* *****************************************************************************
*/
//
// @author Yurii Shyrma ([email protected]), created on 20.04.2018
//
#include<ops/declarable/helpers/transforms.h>
#include <array/ResultSet.h>
#include <helpers/ShapeUtils.h>
#include <numeric>
#include <array/NDArrayFactory.h>
#include <helpers/TAD.h>
#include <exceptions/cuda_exception.h>
#include <helpers/PointersManager.h>
#include <helpers/ConstantTadHelper.h>
namespace sd {
namespace ops {
namespace helpers {
///////////////////////////////////////////////////////////////////
template<typename T>
__global__ static void invertPermutationCuda(const void* vx, const Nd4jLong* xShapeInfo, void* vz, const Nd4jLong* zShapeInfo) {
const T* x = reinterpret_cast<const T*>(vx);
T* z = reinterpret_cast<T*>(vz);
__shared__ Nd4jLong len, totalThreads;
if (threadIdx.x == 0) {
len = shape::length(xShapeInfo);
totalThreads = gridDim.x * blockDim.x;
}
__syncthreads();
const auto tid = blockIdx.x * blockDim.x + threadIdx.x;
for (Nd4jLong i = tid; i < len; i += totalThreads) {
const auto xOffset = shape::getIndexOffset(i, xShapeInfo);
const Nd4jLong index = x[xOffset];
const auto zOffset = shape::getIndexOffset(index, zShapeInfo);
z[zOffset] = i;
}
}
///////////////////////////////////////////////////////////////////
template<typename T>
__host__ static void invertPermutationCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const hipStream_t *stream,
const void* vx, const Nd4jLong* xShapeInfo, void* vz, const Nd4jLong* zShapeInfo) {
hipLaunchKernelGGL(( invertPermutationCuda<T>), dim3(blocksPerGrid), dim3(threadsPerBlock), 1024, *stream, vx, xShapeInfo, vz, zShapeInfo);
}
////////////////////////////////////////////////////////////////////////
void invertPermutation(sd::LaunchContext* context, const NDArray& input, NDArray& output) {
const int threadsPerBlock = MAX_NUM_THREADS;
const int blocksPerGrid = (input.lengthOf() + threadsPerBlock - 1) / threadsPerBlock;
PointersManager manager(context, "invertPermutation");
NDArray::prepareSpecialUse({&output}, {&input});
BUILD_SINGLE_SELECTOR(input.dataType(), invertPermutationCudaLauncher, (blocksPerGrid, threadsPerBlock, context->getCudaStream(), input.specialBuffer(), input.specialShapeInfo(), output.specialBuffer(), output.specialShapeInfo()), LIBND4J_TYPES);
NDArray::registerSpecialUse({&output}, {&input});
manager.synchronize();
}
//////////////////////////////////////////////////////////////////////////
template<typename T>
__global__ static void traceCuda(const void* vx, const Nd4jLong* xShapeInfo, void* vz, const Nd4jLong* zShapeInfo, const uint diagLen) {
const auto x = reinterpret_cast<const T*>(vx);
auto z = reinterpret_cast<T*>(vz);
__shared__ T sharedMem[CUDA_BLOCK_SIZE];
__shared__ int xRank, zRank; // xRank = zRank + 2
__shared__ Nd4jLong xLen, zLen;
if (threadIdx.x == 0) {
xRank = shape::rank(xShapeInfo);
zRank = shape::rank(zShapeInfo);
xLen = shape::length(xShapeInfo);
zLen = shape::length(zShapeInfo); // corresponds to number of matrices
}
__syncthreads();
Nd4jLong coords[MAX_RANK];
for (uint m = blockIdx.x; m < zLen; m += gridDim.x) { // one block per each element of z, that is per each matrix
shape::index2coords(m, zShapeInfo, coords);
const auto zOffset = shape::getOffset(zShapeInfo, coords);
sharedMem[threadIdx.x] = 0;
for (uint i = threadIdx.x; i < diagLen; i += blockDim.x) {
coords[zRank] = coords[zRank + 1] = i;
const auto xOffset = shape::getOffset(xShapeInfo, coords);
sharedMem[threadIdx.x] += x[xOffset];
}
__syncthreads();
// aggregate sum
for (Nd4jLong activeThreads = blockDim.x / 2; activeThreads > 0; activeThreads /= 2) {
if (threadIdx.x < activeThreads)
sharedMem[threadIdx.x] += sharedMem[threadIdx.x + activeThreads];
__syncthreads();
}
if (threadIdx.x == 0)
z[zOffset] = *sharedMem;
__syncthreads();
}
}
///////////////////////////////////////////////////////////////////
template<typename T>
static void traceCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const hipStream_t *stream,
const void *vx, const Nd4jLong *xShapeInfo,
void *vz, const Nd4jLong *zShapeInfo,
const uint diagLen) {
hipLaunchKernelGGL(( traceCuda<T>), dim3(blocksPerGrid), dim3(threadsPerBlock), sharedMem, *stream, vx, xShapeInfo, vz, zShapeInfo, diagLen);
}
///////////////////////////////////////////////////////////////////
void trace(sd::LaunchContext* context, const NDArray& input, NDArray& output) {
PointersManager manager(context, "trace");
const uint diagLen = input.sizeAt(-1) < input.sizeAt(-2) ? input.sizeAt(-1) : input.sizeAt(-2);
const int threadsPerBlock = CUDA_BLOCK_SIZE;
const int blocksPerGrid = (output.lengthOf() + threadsPerBlock - 1) / threadsPerBlock;
const int sharedMem = 1024;
NDArray::prepareSpecialUse({&output}, {&input});
BUILD_SINGLE_SELECTOR(input.dataType(), traceCudaLauncher, (blocksPerGrid, threadsPerBlock, sharedMem, context->getCudaStream(), input.specialBuffer(), input.specialShapeInfo(), output.specialBuffer(), output.specialShapeInfo(), diagLen), LIBND4J_TYPES);
NDArray::registerSpecialUse({&output}, {&input});
manager.synchronize();
}
///////////////////////////////////////////////////////////////////
template<typename T>
__global__ static void triuBPCuda(const void* vx, const Nd4jLong* xShapeInfo, void* vz, const Nd4jLong* zShapeInfo, const int diag) {
// x and z have same shapes
const auto x = reinterpret_cast<const T*>(vx); // gradO
auto z = reinterpret_cast<T*>(vz); // gradI
__shared__ int rank, areSameOffsets;
__shared__ Nd4jLong len, totalThreads; // xLen = zLen
if (threadIdx.x == 0) {
areSameOffsets = shape::haveSameShapeAndStrides(xShapeInfo, zShapeInfo);
rank = shape::rank(xShapeInfo);
len = shape::length(zShapeInfo);
totalThreads = gridDim.x * blockDim.x;
}
__syncthreads();
Nd4jLong coords[MAX_RANK];
const auto tid = blockIdx.x * blockDim.x + threadIdx.x;
for (Nd4jLong i = tid; i < len; i += totalThreads) {
shape::index2coords(i, zShapeInfo, coords);
const auto zOffset = shape::getOffset(zShapeInfo, coords);
if((coords[rank - 2] + diag > coords[rank - 1])) // row + diag > col
z[zOffset] = 0;
else
z[zOffset] = x[areSameOffsets ? zOffset : shape::getOffset(xShapeInfo, coords)];
}
}
///////////////////////////////////////////////////////////////////
template<typename T>
static void triuBPCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const hipStream_t *stream, const void* vx, const Nd4jLong* xShapeInfo, void* vz, const Nd4jLong* zShapeInfo, const int diag) {
hipLaunchKernelGGL(( triuBPCuda<T>), dim3(blocksPerGrid), dim3(threadsPerBlock), sharedMem, *stream, vx, xShapeInfo, vz, zShapeInfo, diag);
}
///////////////////////////////////////////////////////////////////
void triuBP(sd::LaunchContext* context, const NDArray& input, const NDArray& gradO, NDArray& gradI, const int diagonal) {
const int threadsPerBlock = MAX_NUM_THREADS / 4;
const int blocksPerGrid = (gradO.lengthOf() + threadsPerBlock - 1) / threadsPerBlock;
const int sharedMem = threadsPerBlock * sizeof(int) * gradO.rankOf() + 128;
PointersManager manager(context, "triuBP");
NDArray::prepareSpecialUse({&gradI}, {&gradO});
BUILD_SINGLE_SELECTOR(gradI.dataType(), triuBPCudaLauncher, (blocksPerGrid, threadsPerBlock, sharedMem, context->getCudaStream(), gradO.specialBuffer(), gradO.specialShapeInfo(), gradI.specialBuffer(), gradI.specialShapeInfo(), diagonal), LIBND4J_TYPES);
NDArray::registerSpecialUse({&gradI}, {&gradO});
manager.synchronize();
}
///////////////////////////////////////////////////////////////////
template<typename T>
__global__ static void tileBPCuda(const void* vx, const Nd4jLong* xShapeInfo, void* vz, const Nd4jLong* zShapeInfo, Nd4jLong* globMem) {
// x and z have same shapes
const auto x = reinterpret_cast<const T*>(vx); // gradO
auto z = reinterpret_cast<T*>(vz); // gradI
__shared__ int xRank, zRank; // xRank >= zRank
__shared__ Nd4jLong numOfXOffsets, zLen, totalThreads; // xLen >= zLen
if (threadIdx.x == 0) {
xRank = shape::rank(zShapeInfo);
zLen = shape::length(zShapeInfo);
numOfXOffsets = shape::length(xShapeInfo) / zLen;
totalThreads = gridDim.x * blockDim.x;
}
__syncthreads();
const auto tid = blockIdx.x * blockDim.x + threadIdx.x;
int memBuff[MAX_RANK * 2];
auto xOffsets = globMem + tid * numOfXOffsets;
for (Nd4jLong i = tid; i < zLen; i += totalThreads) {
const auto zOffset = shape::getIndexOffset(i, zShapeInfo);
shape::outerArrayOffsets(xOffsets, i, xShapeInfo, zShapeInfo, memBuff);
z[zOffset] = x[xOffsets[0]]; // first offset
for (Nd4jLong j = 1; j < numOfXOffsets; ++j) // rest offsets
z[zOffset] += x[xOffsets[j]];
}
}
///////////////////////////////////////////////////////////////////
template<typename T>
static void tileBPCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const hipStream_t *stream, const void* vx, const Nd4jLong* xShapeInfo, void* vz, const Nd4jLong* zShapeInfo, Nd4jLong* globMem) {
hipLaunchKernelGGL(( tileBPCuda<T>), dim3(blocksPerGrid), dim3(threadsPerBlock), sharedMem, *stream, vx, xShapeInfo, vz, zShapeInfo, globMem);
}
//////////////////////////////////////////////////////////////////////////
void tileBP(sd::LaunchContext * context, const NDArray& gradO /*input*/, NDArray& gradI /*output*/, const std::vector<Nd4jLong> reps) {
NDArray memBuff('c', gradO.getShapeAsVector(), sd::DataType::INT64, context); // empty auxiliary array for storing device memory which will be used in kernel calculations
const int threadsPerBlock = MAX_NUM_THREADS / 4;
const int blocksPerGrid = (gradI.lengthOf() + threadsPerBlock - 1) / threadsPerBlock;
const int sharedMem = threadsPerBlock * sizeof(int) * 2 * gradO.rankOf() + 128;
PointersManager manager(context, "tileBP");
NDArray::prepareSpecialUse({&gradI}, {&gradO, &memBuff});
BUILD_SINGLE_SELECTOR(gradI.dataType(), tileBPCudaLauncher, (blocksPerGrid, threadsPerBlock, sharedMem, context->getCudaStream(), gradO.specialBuffer(), gradO.specialShapeInfo(), gradI.specialBuffer(), gradI.specialShapeInfo(), reinterpret_cast<Nd4jLong*>(memBuff.specialBuffer())), FLOAT_TYPES);
NDArray::registerSpecialUse({&gradI}, {&gradO, &memBuff});
manager.synchronize();
}
//////////////////////////////////////////////////////////////////////////
void eye(sd::LaunchContext * context, NDArray& output) {
output.setIdentity();
}
}
}
}
| 1da46fffc058b984aa9f3178e7a2de1c8be51d0d.cu | /*
* ******************************************************************************
* *
* *
* * This program and the accompanying materials are made available under the
* * terms of the Apache License, Version 2.0 which is available at
* * https://www.apache.org/licenses/LICENSE-2.0.
* *
* * See the NOTICE file distributed with this work for additional
* * information regarding copyright ownership.
* * Unless required by applicable law or agreed to in writing, software
* * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* * License for the specific language governing permissions and limitations
* * under the License.
* *
* * SPDX-License-Identifier: Apache-2.0
* *****************************************************************************
*/
//
// @author Yurii Shyrma ([email protected]), created on 20.04.2018
//
#include<ops/declarable/helpers/transforms.h>
#include <array/ResultSet.h>
#include <helpers/ShapeUtils.h>
#include <numeric>
#include <array/NDArrayFactory.h>
#include <helpers/TAD.h>
#include <exceptions/cuda_exception.h>
#include <helpers/PointersManager.h>
#include <helpers/ConstantTadHelper.h>
namespace sd {
namespace ops {
namespace helpers {
///////////////////////////////////////////////////////////////////
template<typename T>
__global__ static void invertPermutationCuda(const void* vx, const Nd4jLong* xShapeInfo, void* vz, const Nd4jLong* zShapeInfo) {
const T* x = reinterpret_cast<const T*>(vx);
T* z = reinterpret_cast<T*>(vz);
__shared__ Nd4jLong len, totalThreads;
if (threadIdx.x == 0) {
len = shape::length(xShapeInfo);
totalThreads = gridDim.x * blockDim.x;
}
__syncthreads();
const auto tid = blockIdx.x * blockDim.x + threadIdx.x;
for (Nd4jLong i = tid; i < len; i += totalThreads) {
const auto xOffset = shape::getIndexOffset(i, xShapeInfo);
const Nd4jLong index = x[xOffset];
const auto zOffset = shape::getIndexOffset(index, zShapeInfo);
z[zOffset] = i;
}
}
///////////////////////////////////////////////////////////////////
template<typename T>
__host__ static void invertPermutationCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const cudaStream_t *stream,
const void* vx, const Nd4jLong* xShapeInfo, void* vz, const Nd4jLong* zShapeInfo) {
invertPermutationCuda<T><<<blocksPerGrid, threadsPerBlock, 1024, *stream>>>(vx, xShapeInfo, vz, zShapeInfo);
}
////////////////////////////////////////////////////////////////////////
void invertPermutation(sd::LaunchContext* context, const NDArray& input, NDArray& output) {
const int threadsPerBlock = MAX_NUM_THREADS;
const int blocksPerGrid = (input.lengthOf() + threadsPerBlock - 1) / threadsPerBlock;
PointersManager manager(context, "invertPermutation");
NDArray::prepareSpecialUse({&output}, {&input});
BUILD_SINGLE_SELECTOR(input.dataType(), invertPermutationCudaLauncher, (blocksPerGrid, threadsPerBlock, context->getCudaStream(), input.specialBuffer(), input.specialShapeInfo(), output.specialBuffer(), output.specialShapeInfo()), LIBND4J_TYPES);
NDArray::registerSpecialUse({&output}, {&input});
manager.synchronize();
}
//////////////////////////////////////////////////////////////////////////
template<typename T>
__global__ static void traceCuda(const void* vx, const Nd4jLong* xShapeInfo, void* vz, const Nd4jLong* zShapeInfo, const uint diagLen) {
const auto x = reinterpret_cast<const T*>(vx);
auto z = reinterpret_cast<T*>(vz);
__shared__ T sharedMem[CUDA_BLOCK_SIZE];
__shared__ int xRank, zRank; // xRank = zRank + 2
__shared__ Nd4jLong xLen, zLen;
if (threadIdx.x == 0) {
xRank = shape::rank(xShapeInfo);
zRank = shape::rank(zShapeInfo);
xLen = shape::length(xShapeInfo);
zLen = shape::length(zShapeInfo); // corresponds to number of matrices
}
__syncthreads();
Nd4jLong coords[MAX_RANK];
for (uint m = blockIdx.x; m < zLen; m += gridDim.x) { // one block per each element of z, that is per each matrix
shape::index2coords(m, zShapeInfo, coords);
const auto zOffset = shape::getOffset(zShapeInfo, coords);
sharedMem[threadIdx.x] = 0;
for (uint i = threadIdx.x; i < diagLen; i += blockDim.x) {
coords[zRank] = coords[zRank + 1] = i;
const auto xOffset = shape::getOffset(xShapeInfo, coords);
sharedMem[threadIdx.x] += x[xOffset];
}
__syncthreads();
// aggregate sum
for (Nd4jLong activeThreads = blockDim.x / 2; activeThreads > 0; activeThreads /= 2) {
if (threadIdx.x < activeThreads)
sharedMem[threadIdx.x] += sharedMem[threadIdx.x + activeThreads];
__syncthreads();
}
if (threadIdx.x == 0)
z[zOffset] = *sharedMem;
__syncthreads();
}
}
///////////////////////////////////////////////////////////////////
template<typename T>
static void traceCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const cudaStream_t *stream,
const void *vx, const Nd4jLong *xShapeInfo,
void *vz, const Nd4jLong *zShapeInfo,
const uint diagLen) {
traceCuda<T><<<blocksPerGrid, threadsPerBlock, sharedMem, *stream>>>(vx, xShapeInfo, vz, zShapeInfo, diagLen);
}
///////////////////////////////////////////////////////////////////
void trace(sd::LaunchContext* context, const NDArray& input, NDArray& output) {
PointersManager manager(context, "trace");
const uint diagLen = input.sizeAt(-1) < input.sizeAt(-2) ? input.sizeAt(-1) : input.sizeAt(-2);
const int threadsPerBlock = CUDA_BLOCK_SIZE;
const int blocksPerGrid = (output.lengthOf() + threadsPerBlock - 1) / threadsPerBlock;
const int sharedMem = 1024;
NDArray::prepareSpecialUse({&output}, {&input});
BUILD_SINGLE_SELECTOR(input.dataType(), traceCudaLauncher, (blocksPerGrid, threadsPerBlock, sharedMem, context->getCudaStream(), input.specialBuffer(), input.specialShapeInfo(), output.specialBuffer(), output.specialShapeInfo(), diagLen), LIBND4J_TYPES);
NDArray::registerSpecialUse({&output}, {&input});
manager.synchronize();
}
///////////////////////////////////////////////////////////////////
template<typename T>
__global__ static void triuBPCuda(const void* vx, const Nd4jLong* xShapeInfo, void* vz, const Nd4jLong* zShapeInfo, const int diag) {
// x and z have same shapes
const auto x = reinterpret_cast<const T*>(vx); // gradO
auto z = reinterpret_cast<T*>(vz); // gradI
__shared__ int rank, areSameOffsets;
__shared__ Nd4jLong len, totalThreads; // xLen = zLen
if (threadIdx.x == 0) {
areSameOffsets = shape::haveSameShapeAndStrides(xShapeInfo, zShapeInfo);
rank = shape::rank(xShapeInfo);
len = shape::length(zShapeInfo);
totalThreads = gridDim.x * blockDim.x;
}
__syncthreads();
Nd4jLong coords[MAX_RANK];
const auto tid = blockIdx.x * blockDim.x + threadIdx.x;
for (Nd4jLong i = tid; i < len; i += totalThreads) {
shape::index2coords(i, zShapeInfo, coords);
const auto zOffset = shape::getOffset(zShapeInfo, coords);
if((coords[rank - 2] + diag > coords[rank - 1])) // row + diag > col
z[zOffset] = 0;
else
z[zOffset] = x[areSameOffsets ? zOffset : shape::getOffset(xShapeInfo, coords)];
}
}
///////////////////////////////////////////////////////////////////
template<typename T>
static void triuBPCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const cudaStream_t *stream, const void* vx, const Nd4jLong* xShapeInfo, void* vz, const Nd4jLong* zShapeInfo, const int diag) {
triuBPCuda<T><<<blocksPerGrid, threadsPerBlock, sharedMem, *stream>>>(vx, xShapeInfo, vz, zShapeInfo, diag);
}
///////////////////////////////////////////////////////////////////
void triuBP(sd::LaunchContext* context, const NDArray& input, const NDArray& gradO, NDArray& gradI, const int diagonal) {
const int threadsPerBlock = MAX_NUM_THREADS / 4;
const int blocksPerGrid = (gradO.lengthOf() + threadsPerBlock - 1) / threadsPerBlock;
const int sharedMem = threadsPerBlock * sizeof(int) * gradO.rankOf() + 128;
PointersManager manager(context, "triuBP");
NDArray::prepareSpecialUse({&gradI}, {&gradO});
BUILD_SINGLE_SELECTOR(gradI.dataType(), triuBPCudaLauncher, (blocksPerGrid, threadsPerBlock, sharedMem, context->getCudaStream(), gradO.specialBuffer(), gradO.specialShapeInfo(), gradI.specialBuffer(), gradI.specialShapeInfo(), diagonal), LIBND4J_TYPES);
NDArray::registerSpecialUse({&gradI}, {&gradO});
manager.synchronize();
}
///////////////////////////////////////////////////////////////////
template<typename T>
__global__ static void tileBPCuda(const void* vx, const Nd4jLong* xShapeInfo, void* vz, const Nd4jLong* zShapeInfo, Nd4jLong* globMem) {
// x and z have same shapes
const auto x = reinterpret_cast<const T*>(vx); // gradO
auto z = reinterpret_cast<T*>(vz); // gradI
__shared__ int xRank, zRank; // xRank >= zRank
__shared__ Nd4jLong numOfXOffsets, zLen, totalThreads; // xLen >= zLen
if (threadIdx.x == 0) {
xRank = shape::rank(zShapeInfo);
zLen = shape::length(zShapeInfo);
numOfXOffsets = shape::length(xShapeInfo) / zLen;
totalThreads = gridDim.x * blockDim.x;
}
__syncthreads();
const auto tid = blockIdx.x * blockDim.x + threadIdx.x;
int memBuff[MAX_RANK * 2];
auto xOffsets = globMem + tid * numOfXOffsets;
for (Nd4jLong i = tid; i < zLen; i += totalThreads) {
const auto zOffset = shape::getIndexOffset(i, zShapeInfo);
shape::outerArrayOffsets(xOffsets, i, xShapeInfo, zShapeInfo, memBuff);
z[zOffset] = x[xOffsets[0]]; // first offset
for (Nd4jLong j = 1; j < numOfXOffsets; ++j) // rest offsets
z[zOffset] += x[xOffsets[j]];
}
}
///////////////////////////////////////////////////////////////////
template<typename T>
static void tileBPCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const cudaStream_t *stream, const void* vx, const Nd4jLong* xShapeInfo, void* vz, const Nd4jLong* zShapeInfo, Nd4jLong* globMem) {
tileBPCuda<T><<<blocksPerGrid, threadsPerBlock, sharedMem, *stream>>>(vx, xShapeInfo, vz, zShapeInfo, globMem);
}
//////////////////////////////////////////////////////////////////////////
void tileBP(sd::LaunchContext * context, const NDArray& gradO /*input*/, NDArray& gradI /*output*/, const std::vector<Nd4jLong> reps) {
NDArray memBuff('c', gradO.getShapeAsVector(), sd::DataType::INT64, context); // empty auxiliary array for storing device memory which will be used in kernel calculations
const int threadsPerBlock = MAX_NUM_THREADS / 4;
const int blocksPerGrid = (gradI.lengthOf() + threadsPerBlock - 1) / threadsPerBlock;
const int sharedMem = threadsPerBlock * sizeof(int) * 2 * gradO.rankOf() + 128;
PointersManager manager(context, "tileBP");
NDArray::prepareSpecialUse({&gradI}, {&gradO, &memBuff});
BUILD_SINGLE_SELECTOR(gradI.dataType(), tileBPCudaLauncher, (blocksPerGrid, threadsPerBlock, sharedMem, context->getCudaStream(), gradO.specialBuffer(), gradO.specialShapeInfo(), gradI.specialBuffer(), gradI.specialShapeInfo(), reinterpret_cast<Nd4jLong*>(memBuff.specialBuffer())), FLOAT_TYPES);
NDArray::registerSpecialUse({&gradI}, {&gradO, &memBuff});
manager.synchronize();
}
//////////////////////////////////////////////////////////////////////////
void eye(sd::LaunchContext * context, NDArray& output) {
output.setIdentity();
}
}
}
}
|
1c804627546265893b05ce39cda4a2854321d24a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
///@file nabla_operator.cu
///@brief Operator that computes the forward differences along all dimensions
///@author Erich Kobler <[email protected]>
///@date 09.07.2018
#include "utils.h"
#include "tensor/d_tensor.h"
#include "nabla_operator.h"
template<typename T>
__global__ void forward_differences(
typename optox::DTensor<T, 3>::Ref y,
const typename optox::DTensor<T, 2>::ConstRef x,
T hx, T hy, T hz)
{
int ix = blockDim.x * blockIdx.x + threadIdx.x;
int iy = blockDim.y * blockIdx.y + threadIdx.y;
if (ix < x.size_[1] && iy < x.size_[0])
{
const int xp = ix + (ix < x.size_[1] - 1);
const int yp = iy + (iy < x.size_[0] - 1);
y(0, iy, ix) = (x(iy, xp) - x(iy, ix)) / hx;
y(1, iy, ix) = (x(yp, ix) - x(iy, ix)) / hy;
}
}
template<typename T>
__global__ void forward_differences(
typename optox::DTensor<T, 4>::Ref y,
const typename optox::DTensor<T, 3>::ConstRef x,
T hx, T hy, T hz)
{
int ix = blockDim.x * blockIdx.x + threadIdx.x;
int iy = blockDim.y * blockIdx.y + threadIdx.y;
int iz = blockDim.z * blockIdx.z + threadIdx.z;
if (ix < x.size_[2] && iy < x.size_[1] && iz < x.size_[0])
{
const int xp = ix + (ix < x.size_[2] - 1);
const int yp = iy + (iy < x.size_[1] - 1);
const int zp = iz + (iz < x.size_[0] - 1);
y(0, iz, iy , ix) = (x(iz, iy, xp) - x(iz, iy, ix)) / hx;
y(1, iz, iy , ix) = (x(iz, yp, ix) - x(iz, iy, ix)) / hy;
y(2, iz, iy , ix) = (x(zp, iy, ix) - x(iz, iy, ix)) / hz;
}
}
template<typename T, unsigned int N>
void optox::NablaOperator<T, N>::computeForward(optox::OperatorOutputVector &&outputs,
const optox::OperatorInputVector &inputs)
{
auto x = this->template getInput<T, N>(0, inputs);
auto y = this->template getOutput<T, N+1>(0, outputs);
if (y->size()[0] != N)
THROW_OPTOXEXCEPTION("NablaOperator: unsupported size");
dim3 dim_block;
dim3 dim_grid;
if (N == 2)
{
dim_block = dim3(32, 32);
dim_grid = dim3(divUp(x->size()[1], dim_block.x),
divUp(x->size()[0], dim_block.y));
}
else if (N == 3)
{
dim_block = dim3(16, 16, 3);
dim_grid = dim3(divUp(x->size()[2], dim_block.x),
divUp(x->size()[1], dim_block.y),
divUp(x->size()[0], dim_block.z));
}
else
THROW_OPTOXEXCEPTION("NablaOperator: unsupported dimension");
if (N == 2)
hipLaunchKernelGGL(( forward_differences<T>) , dim3(dim_grid), dim3(dim_block), 0, this->stream_, *y, *x, hx_, hy_, hz_);
else if (N == 3)
hipLaunchKernelGGL(( forward_differences<T>) , dim3(dim_grid), dim3(dim_block), 0, this->stream_, *y, *x, hx_, hy_, hz_);
else
THROW_OPTOXEXCEPTION("NablaOperator: unsupported dimension");
OPTOX_CUDA_CHECK;
}
template<typename T>
__global__ void backward_differences(
typename optox::DTensor<T, 2>::Ref x,
const typename optox::DTensor<T, 3>::ConstRef y,
T hx, T hy, T hz)
{
int ix = blockDim.x * blockIdx.x + threadIdx.x;
int iy = blockDim.y * blockIdx.y + threadIdx.y;
if (ix < x.size_[1] && iy < x.size_[0])
{
T div = T(0.0);
// x
T div_x = (ix > 0) ?
(ix < x.size_[1] - 1) ?
-y(0, iy, ix) + y(0, iy, ix - 1)
:
y(0, iy, ix - 1)
:
-y(0, iy, ix);
div += div_x / hx;
// y
T div_y = (iy > 0) ?
(iy < x.size_[0] - 1) ?
-y(1, iy, ix) + y(1, iy - 1, ix)
:
y(1, iy - 1, ix)
:
-y(1, iy, ix);
div += div_y / hy;
x(iy, ix) = div;
}
}
template<typename T>
__global__ void backward_differences(
typename optox::DTensor<T, 3>::Ref x,
const typename optox::DTensor<T, 4>::ConstRef y,
T hx, T hy, T hz)
{
int ix = blockDim.x * blockIdx.x + threadIdx.x;
int iy = blockDim.y * blockIdx.y + threadIdx.y;
int iz = blockDim.z * blockIdx.z + threadIdx.z;
if (ix < x.size_[2] && iy < x.size_[1] && iz < x.size_[0])
{
T div = T(0.0);
// x
T div_x = (ix > 0) ?
(ix < x.size_[2] - 1) ?
-y(0, iz, iy, ix) + y(0, iz, iy, ix - 1)
:
y(0, iz, iy, ix - 1)
:
-y(0, iz, iy, ix);
div += div_x / hx;
// y
T div_y = (iy > 0) ?
(iy < x.size_[1] - 1) ?
-y(1, iz, iy, ix) + y(1, iz, iy - 1, ix)
:
y(1, iz, iy - 1, ix)
:
-y(1, iz, iy, ix);
div += div_y / hy;
// z
T div_z = (iz > 0) ?
(iz < x.size_[0] - 1) ?
-y(2, iz, iy, ix) + y(2, iz - 1, iy, ix)
:
y(2, iz - 1, iy, ix)
:
-y(2, iz, iy, ix);
div += div_z / hz;
x(iz, iy, ix) = div;
}
}
template<typename T, unsigned int N>
void optox::NablaOperator<T, N>::computeAdjoint(optox::OperatorOutputVector &&outputs,
const optox::OperatorInputVector &inputs)
{
auto y = this->template getInput<T, N+1>(0, inputs);
auto x = this->template getOutput<T, N>(0, outputs);
if (y->size()[0] != N)
THROW_OPTOXEXCEPTION("NablaOperator: unsupported size");
dim3 dim_block;
dim3 dim_grid;
if (N == 2)
{
dim_block = dim3(32, 32);
dim_grid = dim3(divUp(x->size()[1], dim_block.x),
divUp(x->size()[0], dim_block.y));
}
else if (N == 3)
{
dim_block = dim3(16, 16, 3);
dim_grid = dim3(divUp(x->size()[2], dim_block.x),
divUp(x->size()[1], dim_block.y),
divUp(x->size()[0], dim_block.z));
}
else
THROW_OPTOXEXCEPTION("NablaOperator: unsupported dimension");
if (N == 2)
hipLaunchKernelGGL(( backward_differences<T>) , dim3(dim_grid), dim3(dim_block), 0, this->stream_, *x, *y, hx_, hy_, hz_);
else if (N == 3)
hipLaunchKernelGGL(( backward_differences<T>) , dim3(dim_grid), dim3(dim_block), 0, this->stream_, *x, *y, hx_, hy_, hz_);
else
THROW_OPTOXEXCEPTION("NablaOperator: unsupported dimension");
OPTOX_CUDA_CHECK;
}
#define REGISTER_OP_T(T, N) \
template class optox::NablaOperator<T, N>;;
#define REGISTER_OP(T) \
REGISTER_OP_T(T, 2) \
REGISTER_OP_T(T, 3)
OPTOX_CALL_REAL_NUMBER_TYPES(REGISTER_OP);
#undef REGISTER_OP
#undef REGISTER_OP_T
| 1c804627546265893b05ce39cda4a2854321d24a.cu | ///@file nabla_operator.cu
///@brief Operator that computes the forward differences along all dimensions
///@author Erich Kobler <[email protected]>
///@date 09.07.2018
#include "utils.h"
#include "tensor/d_tensor.h"
#include "nabla_operator.h"
template<typename T>
__global__ void forward_differences(
typename optox::DTensor<T, 3>::Ref y,
const typename optox::DTensor<T, 2>::ConstRef x,
T hx, T hy, T hz)
{
int ix = blockDim.x * blockIdx.x + threadIdx.x;
int iy = blockDim.y * blockIdx.y + threadIdx.y;
if (ix < x.size_[1] && iy < x.size_[0])
{
const int xp = ix + (ix < x.size_[1] - 1);
const int yp = iy + (iy < x.size_[0] - 1);
y(0, iy, ix) = (x(iy, xp) - x(iy, ix)) / hx;
y(1, iy, ix) = (x(yp, ix) - x(iy, ix)) / hy;
}
}
template<typename T>
__global__ void forward_differences(
typename optox::DTensor<T, 4>::Ref y,
const typename optox::DTensor<T, 3>::ConstRef x,
T hx, T hy, T hz)
{
int ix = blockDim.x * blockIdx.x + threadIdx.x;
int iy = blockDim.y * blockIdx.y + threadIdx.y;
int iz = blockDim.z * blockIdx.z + threadIdx.z;
if (ix < x.size_[2] && iy < x.size_[1] && iz < x.size_[0])
{
const int xp = ix + (ix < x.size_[2] - 1);
const int yp = iy + (iy < x.size_[1] - 1);
const int zp = iz + (iz < x.size_[0] - 1);
y(0, iz, iy , ix) = (x(iz, iy, xp) - x(iz, iy, ix)) / hx;
y(1, iz, iy , ix) = (x(iz, yp, ix) - x(iz, iy, ix)) / hy;
y(2, iz, iy , ix) = (x(zp, iy, ix) - x(iz, iy, ix)) / hz;
}
}
template<typename T, unsigned int N>
void optox::NablaOperator<T, N>::computeForward(optox::OperatorOutputVector &&outputs,
const optox::OperatorInputVector &inputs)
{
auto x = this->template getInput<T, N>(0, inputs);
auto y = this->template getOutput<T, N+1>(0, outputs);
if (y->size()[0] != N)
THROW_OPTOXEXCEPTION("NablaOperator: unsupported size");
dim3 dim_block;
dim3 dim_grid;
if (N == 2)
{
dim_block = dim3(32, 32);
dim_grid = dim3(divUp(x->size()[1], dim_block.x),
divUp(x->size()[0], dim_block.y));
}
else if (N == 3)
{
dim_block = dim3(16, 16, 3);
dim_grid = dim3(divUp(x->size()[2], dim_block.x),
divUp(x->size()[1], dim_block.y),
divUp(x->size()[0], dim_block.z));
}
else
THROW_OPTOXEXCEPTION("NablaOperator: unsupported dimension");
if (N == 2)
forward_differences<T> <<<dim_grid, dim_block, 0, this->stream_>>>(*y, *x, hx_, hy_, hz_);
else if (N == 3)
forward_differences<T> <<<dim_grid, dim_block, 0, this->stream_>>>(*y, *x, hx_, hy_, hz_);
else
THROW_OPTOXEXCEPTION("NablaOperator: unsupported dimension");
OPTOX_CUDA_CHECK;
}
template<typename T>
__global__ void backward_differences(
typename optox::DTensor<T, 2>::Ref x,
const typename optox::DTensor<T, 3>::ConstRef y,
T hx, T hy, T hz)
{
int ix = blockDim.x * blockIdx.x + threadIdx.x;
int iy = blockDim.y * blockIdx.y + threadIdx.y;
if (ix < x.size_[1] && iy < x.size_[0])
{
T div = T(0.0);
// x
T div_x = (ix > 0) ?
(ix < x.size_[1] - 1) ?
-y(0, iy, ix) + y(0, iy, ix - 1)
:
y(0, iy, ix - 1)
:
-y(0, iy, ix);
div += div_x / hx;
// y
T div_y = (iy > 0) ?
(iy < x.size_[0] - 1) ?
-y(1, iy, ix) + y(1, iy - 1, ix)
:
y(1, iy - 1, ix)
:
-y(1, iy, ix);
div += div_y / hy;
x(iy, ix) = div;
}
}
template<typename T>
__global__ void backward_differences(
typename optox::DTensor<T, 3>::Ref x,
const typename optox::DTensor<T, 4>::ConstRef y,
T hx, T hy, T hz)
{
int ix = blockDim.x * blockIdx.x + threadIdx.x;
int iy = blockDim.y * blockIdx.y + threadIdx.y;
int iz = blockDim.z * blockIdx.z + threadIdx.z;
if (ix < x.size_[2] && iy < x.size_[1] && iz < x.size_[0])
{
T div = T(0.0);
// x
T div_x = (ix > 0) ?
(ix < x.size_[2] - 1) ?
-y(0, iz, iy, ix) + y(0, iz, iy, ix - 1)
:
y(0, iz, iy, ix - 1)
:
-y(0, iz, iy, ix);
div += div_x / hx;
// y
T div_y = (iy > 0) ?
(iy < x.size_[1] - 1) ?
-y(1, iz, iy, ix) + y(1, iz, iy - 1, ix)
:
y(1, iz, iy - 1, ix)
:
-y(1, iz, iy, ix);
div += div_y / hy;
// z
T div_z = (iz > 0) ?
(iz < x.size_[0] - 1) ?
-y(2, iz, iy, ix) + y(2, iz - 1, iy, ix)
:
y(2, iz - 1, iy, ix)
:
-y(2, iz, iy, ix);
div += div_z / hz;
x(iz, iy, ix) = div;
}
}
template<typename T, unsigned int N>
void optox::NablaOperator<T, N>::computeAdjoint(optox::OperatorOutputVector &&outputs,
const optox::OperatorInputVector &inputs)
{
auto y = this->template getInput<T, N+1>(0, inputs);
auto x = this->template getOutput<T, N>(0, outputs);
if (y->size()[0] != N)
THROW_OPTOXEXCEPTION("NablaOperator: unsupported size");
dim3 dim_block;
dim3 dim_grid;
if (N == 2)
{
dim_block = dim3(32, 32);
dim_grid = dim3(divUp(x->size()[1], dim_block.x),
divUp(x->size()[0], dim_block.y));
}
else if (N == 3)
{
dim_block = dim3(16, 16, 3);
dim_grid = dim3(divUp(x->size()[2], dim_block.x),
divUp(x->size()[1], dim_block.y),
divUp(x->size()[0], dim_block.z));
}
else
THROW_OPTOXEXCEPTION("NablaOperator: unsupported dimension");
if (N == 2)
backward_differences<T> <<<dim_grid, dim_block, 0, this->stream_>>>(*x, *y, hx_, hy_, hz_);
else if (N == 3)
backward_differences<T> <<<dim_grid, dim_block, 0, this->stream_>>>(*x, *y, hx_, hy_, hz_);
else
THROW_OPTOXEXCEPTION("NablaOperator: unsupported dimension");
OPTOX_CUDA_CHECK;
}
#define REGISTER_OP_T(T, N) \
template class optox::NablaOperator<T, N>;;
#define REGISTER_OP(T) \
REGISTER_OP_T(T, 2) \
REGISTER_OP_T(T, 3)
OPTOX_CALL_REAL_NUMBER_TYPES(REGISTER_OP);
#undef REGISTER_OP
#undef REGISTER_OP_T
|
ee8b22cf0cb72820d29f01aef31fa257c32771d1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// TanH neuron activation function layer.
// Adapted from ReLU layer code written by Yangqing Jia
#include <vector>
#include "caffe/layers/tanh_layer.hpp"
namespace caffe {
#ifdef USE_ROCM
template<typename Dtype>
__global__ void TanHForward(const int_tp n, const Dtype* in, Dtype* out) {
CUDA_KERNEL_LOOP(index, n) {
out[index] = tanh(in[index]);
}
}
#endif // USE_ROCM
template<typename Dtype>
void TanHLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
const int_tp count = bottom[0]->count();
if (this->device_->backend() == BACKEND_CUDA) {
#ifdef USE_ROCM
// NOLINT_NEXT_LINE(whitespace/operators)
TanHForward<Dtype> CUDA_KERNEL(CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS)(
count, bottom_data, top_data);
CUDA_POST_KERNEL_CHECK;
#endif // USE_ROCM
} else {
#ifdef USE_GREENTEA
viennacl::ocl::context &ctx = viennacl::ocl::get_context(
this->device_->id());
viennacl::ocl::program &program = this->device_->template program<Dtype>();
viennacl::ocl::kernel &oclk_tanh = program.get_kernel(
CL_KERNEL_SELECT("tanh_forward"));
viennacl::ocl::enqueue(
oclk_tanh(count, WrapHandle((cl_mem) bottom_data, &ctx),
WrapHandle((cl_mem) top_data, &ctx)),
ctx.get_queue());
#endif // USE_GREENTEA
}
}
#ifdef USE_ROCM
template<typename Dtype>
__global__ void TanHBackward(const int_tp n, const Dtype* in_diff,
const Dtype* out_data, Dtype* out_diff) {
CUDA_KERNEL_LOOP(index, n) {
Dtype tanhx = out_data[index];
out_diff[index] = in_diff[index] * (1 - tanhx * tanhx);
}
}
#endif // USE_ROCM
template<typename Dtype>
void TanHLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[0]) {
const Dtype* top_data = top[0]->gpu_data();
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const int_tp count = bottom[0]->count();
if (this->device_->backend() == BACKEND_CUDA) {
#ifdef USE_ROCM
// NOLINT_NEXT_LINE(whitespace/operators)
TanHBackward<Dtype> CUDA_KERNEL(CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS)(
count, top_diff, top_data, bottom_diff);
CUDA_POST_KERNEL_CHECK;
#endif // USE_ROCM
} else {
#ifdef USE_GREENTEA
viennacl::ocl::context &ctx = viennacl::ocl::get_context(
this->device_->id());
viennacl::ocl::program &program = this->device_->template program<Dtype>();
viennacl::ocl::kernel &oclk_tanh = program.get_kernel(
CL_KERNEL_SELECT("tanh_backward"));
viennacl::ocl::enqueue(
oclk_tanh(count, WrapHandle((cl_mem) top_diff, &ctx),
WrapHandle((cl_mem) top_data, &ctx),
WrapHandle((cl_mem) bottom_diff, &ctx)),
ctx.get_queue());
#endif // USE_GREENTEA
}
}
}
INSTANTIATE_LAYER_GPU_FUNCS(TanHLayer);
} // namespace caffe
| ee8b22cf0cb72820d29f01aef31fa257c32771d1.cu | // TanH neuron activation function layer.
// Adapted from ReLU layer code written by Yangqing Jia
#include <vector>
#include "caffe/layers/tanh_layer.hpp"
namespace caffe {
#ifdef USE_CUDA
template<typename Dtype>
__global__ void TanHForward(const int_tp n, const Dtype* in, Dtype* out) {
CUDA_KERNEL_LOOP(index, n) {
out[index] = tanh(in[index]);
}
}
#endif // USE_CUDA
template<typename Dtype>
void TanHLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
const int_tp count = bottom[0]->count();
if (this->device_->backend() == BACKEND_CUDA) {
#ifdef USE_CUDA
// NOLINT_NEXT_LINE(whitespace/operators)
TanHForward<Dtype> CUDA_KERNEL(CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS)(
count, bottom_data, top_data);
CUDA_POST_KERNEL_CHECK;
#endif // USE_CUDA
} else {
#ifdef USE_GREENTEA
viennacl::ocl::context &ctx = viennacl::ocl::get_context(
this->device_->id());
viennacl::ocl::program &program = this->device_->template program<Dtype>();
viennacl::ocl::kernel &oclk_tanh = program.get_kernel(
CL_KERNEL_SELECT("tanh_forward"));
viennacl::ocl::enqueue(
oclk_tanh(count, WrapHandle((cl_mem) bottom_data, &ctx),
WrapHandle((cl_mem) top_data, &ctx)),
ctx.get_queue());
#endif // USE_GREENTEA
}
}
#ifdef USE_CUDA
template<typename Dtype>
__global__ void TanHBackward(const int_tp n, const Dtype* in_diff,
const Dtype* out_data, Dtype* out_diff) {
CUDA_KERNEL_LOOP(index, n) {
Dtype tanhx = out_data[index];
out_diff[index] = in_diff[index] * (1 - tanhx * tanhx);
}
}
#endif // USE_CUDA
template<typename Dtype>
void TanHLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[0]) {
const Dtype* top_data = top[0]->gpu_data();
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const int_tp count = bottom[0]->count();
if (this->device_->backend() == BACKEND_CUDA) {
#ifdef USE_CUDA
// NOLINT_NEXT_LINE(whitespace/operators)
TanHBackward<Dtype> CUDA_KERNEL(CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS)(
count, top_diff, top_data, bottom_diff);
CUDA_POST_KERNEL_CHECK;
#endif // USE_CUDA
} else {
#ifdef USE_GREENTEA
viennacl::ocl::context &ctx = viennacl::ocl::get_context(
this->device_->id());
viennacl::ocl::program &program = this->device_->template program<Dtype>();
viennacl::ocl::kernel &oclk_tanh = program.get_kernel(
CL_KERNEL_SELECT("tanh_backward"));
viennacl::ocl::enqueue(
oclk_tanh(count, WrapHandle((cl_mem) top_diff, &ctx),
WrapHandle((cl_mem) top_data, &ctx),
WrapHandle((cl_mem) bottom_diff, &ctx)),
ctx.get_queue());
#endif // USE_GREENTEA
}
}
}
INSTANTIATE_LAYER_GPU_FUNCS(TanHLayer);
} // namespace caffe
|
1816cf5ec8b61ccca30a2d0c24abec7ae8a6119d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//********************************************************//
// CUDA SIFT extractor by Marten Bjrkman aka Celebrandil //
// celle @ nada.kth.se //
//********************************************************//
#include <cstdio>
#include "cudautils.h"
#include "cudaImage.h"
#define SINGULAR_BATCH 32
template<int n>
__device__ float Householder(float *a, float *v, int row)
{
float sigma = 0.0f;
float beta = 0.0f;
for (int i=row+1;i<n;i++)
sigma += a[i] * a[i];
for (int i=0;i<row;i++)
v[i] = 0.0;
v[row] = 1.0;
for (int i=row+1;i<n;i++)
v[i] = a[i];
if (sigma!=0.0) {
float x1 = a[row];
float v1 = v[row];
float eig = sqrt(x1*x1 + sigma);
if (x1<=0.0)
v1 = x1 - eig;
else
v1 = -sigma / (x1 + eig);
beta = 2*v1*v1 / (sigma + v1*v1);
for (int i=row+1;i<n;i++)
v[i] /= v1;
}
return beta;
}
template <int n>
__device__ void SingularValues(float *A, float *a)
{
#define eps 1e-4f
// Householder bidiagonalization A = U*B*V^T 5.4.2
float vA[n];
float v[n];
for (int j=0;j<n;j++) {
for (int k=j;k<n;k++)
a[k] = A[k*n+j];
float betaU = Householder<n>(a, v, j);
for (int k=j;k<n;k++) {
float sum = 0.0f;
for (int l=j;l<n;l++)
sum += v[l] * A[l*n+k];
vA[k] = sum;
}
for (int k=j;k<n;k++)
for (int l=j;l<n;l++)
A[l*n+k] -= betaU*v[l]*vA[k];
if (j<n-1) {
for (int k=j+1;k<n;k++)
a[k] = A[j*n+k];
float betaV = Householder<n>(a, v, j+1);
for (int k=j;k<n;k++) {
float sum = 0.0f;
for (int l=j+1;l<n;l++)
sum += A[k*n+l] * v[l];
vA[k] = sum;
}
for (int k=j;k<n;k++)
for (int l=j+1;l<n;l++)
A[k*n+l] -= betaV*vA[k]*v[l];
}
}
// Golub-Kahan SVD Step B = U*D*V^T 8.6.2
for (int i=0;i<n-1;i++) {
a[i] = A[i*n+i];
v[i] = A[i*n+i+1];
}
a[n-1] = A[n*n-1];
v[n-1] = 0.0;
int q = n-1;
int cnt = 0;
while (q>0 && cnt<10000) {
for (int i=0;i<n-1;i++)
if (fabs(v[i])<eps*(fabs(a[i]) + fabs(a[i+1])))
v[i] = 0.0f;
q = n - 1;
while (q>0 && fabs(v[q-1])<eps)
q--;
if (q>0) {
int p = q;
while (p>0 && fabs(v[p-1])>eps)
p--;
bool dogivens = true;
for (int i=p;i<q;i++)
if (a[i]*a[i]<eps*eps) {
v[i] = 0.0f;
dogivens = false;
}
if (dogivens) {
float oldc = 1.0f;
float olds = 0.0f;
float y = a[p];
float z = v[p];
for (int k=p;k<q;k++) {
float sz = sqrt(y*y + z*z);
float c = y / sz;
float s = -z / sz;
if (k>p)
v[k-1] = olds*sz;
y = oldc*sz;
z = a[k+1]*s;
float h = a[k+1]*c;
sz = sqrt(y*y + z*z);
c = y / sz;
s = -z / sz;
a[k] = sz;
y = h;
if (k<q-1)
z = v[k+1];
oldc = c;
olds = s;
}
v[q-1] = y*olds;
a[q] = y*oldc;
}
}
cnt ++;
}
for (int i=0;i<n;i++)
a[i] = (a[i]<0.0f ? -a[i] : a[i]);
}
// 362
template <int n, int k>
__global__ void ComputeSingular(float *imgData, float *svdData, int svdWid)
{
#define SINGULAR_WIDTH ((SINGULAR_BATCH-1)*k + n)
__shared__ float buffer[SINGULAR_WIDTH];
float A[n*n];
float a[n];
const int tx = threadIdx.x;
const int bx = blockIdx.x;
const int by = blockIdx.y;
int imgWid = k*svdWid;
int readPos = __mul24(by, imgWid) + __mul24(bx*k, SINGULAR_BATCH);
for (int yp=0;yp<n;yp++) {
float *imgd = &imgData[readPos+yp*imgWid];
for (int xp=tx;xp<SINGULAR_WIDTH;xp+=SINGULAR_BATCH)
buffer[xp] = imgd[xp];
__syncthreads();
for (int xp=0;xp<n;xp++)
A[yp*n+xp] = buffer[tx*k+xp];
__syncthreads();
}
SingularValues<n>(A, a);
__syncthreads();
int writePos = __mul24(by, svdWid) + __mul24(bx, SINGULAR_BATCH);
for (int i=0;i<n-1;i++) {
for (int j=i+1;j<n;j++) {
if (a[i]<a[j]) {
float t = a[i];
a[i] = a[j];
a[j] = t;
}
}
}
float sum = 1e-10f;
for (int i=0;i<5*n/8;i++)
sum += a[i];
float tot = sum;
for (int i=5*n/8;i<n;i++)
tot += a[i];
svdData[writePos+tx] = 1.0f - sum/tot;
}
double ComputeSingular(CudaImage *img, CudaImage *svd)
{
int sw = svd->width;
int sh = svd->height;
TimerGPU timer(0);
if (img->d_data==NULL || svd->d_data==NULL) {
printf("ComputeSingular: missing data\n");
return 0.0;
}
dim3 blocks(iDivUp(sw, SINGULAR_BATCH), sh);
dim3 threads(SINGULAR_BATCH);
hipLaunchKernelGGL(( ComputeSingular<8,1>), dim3(blocks),dim3(threads), 0, 0, img->d_data, svd->d_data, sw);
checkMsg("ComputeSingular() execution failed\n");
safeCall(hipDeviceSynchronize());
double gpuTime = timer.read();
//#ifdef VERBOSE
printf("ComputeSingular time = %.2f ms\n", gpuTime);
//#endif
return gpuTime;
}
| 1816cf5ec8b61ccca30a2d0c24abec7ae8a6119d.cu | //********************************************************//
// CUDA SIFT extractor by Marten Björkman aka Celebrandil //
// celle @ nada.kth.se //
//********************************************************//
#include <cstdio>
#include "cudautils.h"
#include "cudaImage.h"
#define SINGULAR_BATCH 32
template<int n>
__device__ float Householder(float *a, float *v, int row)
{
float sigma = 0.0f;
float beta = 0.0f;
for (int i=row+1;i<n;i++)
sigma += a[i] * a[i];
for (int i=0;i<row;i++)
v[i] = 0.0;
v[row] = 1.0;
for (int i=row+1;i<n;i++)
v[i] = a[i];
if (sigma!=0.0) {
float x1 = a[row];
float v1 = v[row];
float eig = sqrt(x1*x1 + sigma);
if (x1<=0.0)
v1 = x1 - eig;
else
v1 = -sigma / (x1 + eig);
beta = 2*v1*v1 / (sigma + v1*v1);
for (int i=row+1;i<n;i++)
v[i] /= v1;
}
return beta;
}
template <int n>
__device__ void SingularValues(float *A, float *a)
{
#define eps 1e-4f
// Householder bidiagonalization A = U*B*V^T 5.4.2
float vA[n];
float v[n];
for (int j=0;j<n;j++) {
for (int k=j;k<n;k++)
a[k] = A[k*n+j];
float betaU = Householder<n>(a, v, j);
for (int k=j;k<n;k++) {
float sum = 0.0f;
for (int l=j;l<n;l++)
sum += v[l] * A[l*n+k];
vA[k] = sum;
}
for (int k=j;k<n;k++)
for (int l=j;l<n;l++)
A[l*n+k] -= betaU*v[l]*vA[k];
if (j<n-1) {
for (int k=j+1;k<n;k++)
a[k] = A[j*n+k];
float betaV = Householder<n>(a, v, j+1);
for (int k=j;k<n;k++) {
float sum = 0.0f;
for (int l=j+1;l<n;l++)
sum += A[k*n+l] * v[l];
vA[k] = sum;
}
for (int k=j;k<n;k++)
for (int l=j+1;l<n;l++)
A[k*n+l] -= betaV*vA[k]*v[l];
}
}
// Golub-Kahan SVD Step B = U*D*V^T 8.6.2
for (int i=0;i<n-1;i++) {
a[i] = A[i*n+i];
v[i] = A[i*n+i+1];
}
a[n-1] = A[n*n-1];
v[n-1] = 0.0;
int q = n-1;
int cnt = 0;
while (q>0 && cnt<10000) {
for (int i=0;i<n-1;i++)
if (fabs(v[i])<eps*(fabs(a[i]) + fabs(a[i+1])))
v[i] = 0.0f;
q = n - 1;
while (q>0 && fabs(v[q-1])<eps)
q--;
if (q>0) {
int p = q;
while (p>0 && fabs(v[p-1])>eps)
p--;
bool dogivens = true;
for (int i=p;i<q;i++)
if (a[i]*a[i]<eps*eps) {
v[i] = 0.0f;
dogivens = false;
}
if (dogivens) {
float oldc = 1.0f;
float olds = 0.0f;
float y = a[p];
float z = v[p];
for (int k=p;k<q;k++) {
float sz = sqrt(y*y + z*z);
float c = y / sz;
float s = -z / sz;
if (k>p)
v[k-1] = olds*sz;
y = oldc*sz;
z = a[k+1]*s;
float h = a[k+1]*c;
sz = sqrt(y*y + z*z);
c = y / sz;
s = -z / sz;
a[k] = sz;
y = h;
if (k<q-1)
z = v[k+1];
oldc = c;
olds = s;
}
v[q-1] = y*olds;
a[q] = y*oldc;
}
}
cnt ++;
}
for (int i=0;i<n;i++)
a[i] = (a[i]<0.0f ? -a[i] : a[i]);
}
// 362
template <int n, int k>
__global__ void ComputeSingular(float *imgData, float *svdData, int svdWid)
{
#define SINGULAR_WIDTH ((SINGULAR_BATCH-1)*k + n)
__shared__ float buffer[SINGULAR_WIDTH];
float A[n*n];
float a[n];
const int tx = threadIdx.x;
const int bx = blockIdx.x;
const int by = blockIdx.y;
int imgWid = k*svdWid;
int readPos = __mul24(by, imgWid) + __mul24(bx*k, SINGULAR_BATCH);
for (int yp=0;yp<n;yp++) {
float *imgd = &imgData[readPos+yp*imgWid];
for (int xp=tx;xp<SINGULAR_WIDTH;xp+=SINGULAR_BATCH)
buffer[xp] = imgd[xp];
__syncthreads();
for (int xp=0;xp<n;xp++)
A[yp*n+xp] = buffer[tx*k+xp];
__syncthreads();
}
SingularValues<n>(A, a);
__syncthreads();
int writePos = __mul24(by, svdWid) + __mul24(bx, SINGULAR_BATCH);
for (int i=0;i<n-1;i++) {
for (int j=i+1;j<n;j++) {
if (a[i]<a[j]) {
float t = a[i];
a[i] = a[j];
a[j] = t;
}
}
}
float sum = 1e-10f;
for (int i=0;i<5*n/8;i++)
sum += a[i];
float tot = sum;
for (int i=5*n/8;i<n;i++)
tot += a[i];
svdData[writePos+tx] = 1.0f - sum/tot;
}
double ComputeSingular(CudaImage *img, CudaImage *svd)
{
int sw = svd->width;
int sh = svd->height;
TimerGPU timer(0);
if (img->d_data==NULL || svd->d_data==NULL) {
printf("ComputeSingular: missing data\n");
return 0.0;
}
dim3 blocks(iDivUp(sw, SINGULAR_BATCH), sh);
dim3 threads(SINGULAR_BATCH);
ComputeSingular<8,1><<<blocks,threads>>>(img->d_data, svd->d_data, sw);
checkMsg("ComputeSingular() execution failed\n");
safeCall(cudaThreadSynchronize());
double gpuTime = timer.read();
//#ifdef VERBOSE
printf("ComputeSingular time = %.2f ms\n", gpuTime);
//#endif
return gpuTime;
}
|
respaldo.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "rocblas.h"
#include <hip/hip_runtime.h>
#include <float.h>
#include <math.h>
#include <sys/time.h>
#include <cblas.h>
#include <f77blas.h>
#include <pthread.h>
#include <assert.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <unistd.h>
#include <time.h>
#include <arrayfire.h>
#include <af/cuda.h>
#include <fitsio.h>
#include <cublasXt.h>
#include <gsl/gsl_math.h>
#include <gsl/gsl_vector.h>
#include <gsl/gsl_filter.h>
#include <gsl/gsl_min.h>
// [email protected]
//nvcc otroconfloat.cu -lcudart -lcublas -lcuda -lblasx -I/opt/arrayfire/include/ -L/opt/arrayfire/lib64/ -lafcuda -lcfitsio -o otroconfloat
/* sudo scp /home/yoyisaurio/Desktop/juguetes\ de\ CUDA/otroconfloat.cu [email protected]:/home/rarmijo/Desktop/ */
// nvcc calCompreInfo.cu -lcudart -lcublas -lcuda -lblasx -I/opt/arrayfire/include/ -L/opt/arrayfire/lib64/ -lafcuda -lcfitsio -lgsl -lgslcblas -lm -o calCompreInfo
// sudo scp /home/yoyisaurio/Desktop/proyecto/calCompreInfo.cu [email protected]:/home/rarmijo
// nvcc calCompreInfo.cu -lcudart -lcublas -lcuda -lblasx -I/opt/arrayfire/include/ -L/opt/arrayfire/lib64/ -lafcuda -lcfitsio -lgsl -lgslcblas -lm -o calCompreInfo
// ./calCompreInfo
// sudo scp /home/yoyisaurio/Desktop/proyecto/nuevo.cu [email protected]:/home/rarmijo/Desktop/proyecto
// sudo scp [email protected]:/home/rarmijo/float_calCompresion_baseNormal_cota99/ite0/reconsImg.fit /home/yoyisaurio/Desktop/ds9/reconsImg.fit
// sudo scp [email protected]:/home/rarmijo/Desktop/proyecto/float_calCompresion_baseNormal_cota99/ite0/reconsImg.fit /home/yoyisaurio/Desktop/ds9/nuevito.fit
// nvcc nuevo.cu -lcudart -lcublas -lcuda -lblasx -I/opt/arrayfire/include/ -L/opt/arrayfire/lib64/ -lafcuda -lcfitsio -o nuevo
struct parametros_BaseRect
{
float* u;
float* v;
float* w;
float delta_u;
float delta_v;
float* matrizDeUnos;
long cantVisi;
long N;
float estrechezDeBorde;
};
struct parametros_BaseNormal
{
float* u;
float* v;
float* w;
float delta_u;
float delta_v;
long cantVisi;
long N;
};
static int Stopping_Rule(float x0, float x1, float tolerance);
#define sqrt5 2.236067977499789696
char* numAString(int* numero)
{
int cantCarac = (*numero)/10 + 1;
char* numComoString = (char*) malloc(sizeof(char)*cantCarac);
return numComoString;
}
float calPendiente(float* x, int largoDeX, float* y)
{
float sumadeYs = 0.0;
float sumadeXs = 0.0;
float sumaDeLosCuadradosdeXs = 0.0;
float sumaDeMultdeXsconYs = 0.0;
for(int i=0; i<largoDeX; i++)
{
float xActual = x[i];
float yActual = y[i];
sumadeYs += yActual;
sumadeXs += xActual;
sumaDeMultdeXsconYs += xActual * yActual;
sumaDeLosCuadradosdeXs += xActual * xActual;
}
float cuadradoDeLaSumadeXs = sumadeXs * sumadeXs;
float numerador = largoDeX * sumaDeMultdeXsconYs - sumadeXs * sumadeYs;
float denominador = largoDeX * sumaDeLosCuadradosdeXs - cuadradoDeLaSumadeXs;
return numerador/denominador;
}
float* linspace(float a, float b, long n)
{
float c;
int i;
float* u;
hipMallocManaged(&u, n*sizeof(float));
c = (b - a)/(n - 1);
for(i = 0; i < n - 1; ++i)
u[i] = a + i*c;
u[n - 1] = b;
return u;
}
void imprimirVector(float* lista, int tamanoLista)
{
int i;
for(i=0;i<tamanoLista;i++)
{
printf("%f\n",lista[i]);
}
printf("\n");
}
void imprimirMatrizColumna(float* vector, long cantFilas, long cantColumnas)
{
long i,j;
for(i=0;i<cantFilas;i++)
{
for(j=0;j<cantColumnas;j++)
{
printf("%.12e ", vector[(((j)*(cantFilas))+(i))]);
}
printf("\n");
}
printf("\n");
}
void imprimirMatrizPura(float* matriz, int cantFilas, int cantColumnas)
{
for(int i=0; i<cantFilas; i++)
{
for(int j=0; j<cantColumnas; j++)
{
printf("%f ", matriz[i*cantColumnas+j]);
}
printf("\n");
}
printf("\n");
}
void escribirCoefs(float* coefs, char* nombreArchivo, long cantFilas, long cantColumnas)
{
FILE* archivo = fopen(nombreArchivo, "w");
for(long i=0;i<cantFilas;i++)
{
for(long j=0;j<cantColumnas;j++)
{
fprintf(archivo, "%.12e ", coefs[(((j)*(cantFilas))+(i))]);
}
fprintf(archivo, "\n");
}
fclose(archivo);
}
float** crearMatrizDouble(int cantFilas, int cantColumnas)
{
float** matriz = (float**) calloc(cantFilas, sizeof(float*));
int i;
for(i=0;i<cantFilas;i++)
{
matriz[i] = (float*) calloc(cantColumnas, sizeof(float));
}
return matriz;
}
void inicializarMatriz(float** matriz, int cantFilas, int cantColumnas)
{
int i;
int j;
int contador = 0;
for(i=0;i<cantFilas;i++)
{
for(j=0;j<cantColumnas;j++)
{
matriz[i][j] = contador;
contador++;
}
}
}
float* transformarMatrizAMatrizColumna(float** matriz, int cantFilas, int cantColumnas)
{
float* nuevoVector = (float*) calloc(cantFilas*cantColumnas,sizeof(float));
int i,j;
for(j=0;j<cantColumnas;j++)
{
for(i=0;i<cantFilas;i ++)
{
nuevoVector[(((j)*(cantFilas))+(i))]= matriz[i][j];
}
}
return nuevoVector;
}
float** transformarMatrizColumnaAMatriz(float* matrizColumna, int cantFilas, int cantColumnas)
{
float** matriz = crearMatrizDouble(cantFilas,cantColumnas);
int i,j;
for(j=0;j<cantColumnas;j++)
{
for(i=0;i<cantFilas;i ++)
{
matriz[i][j] = matrizColumna[(((j)*(cantFilas))+(i))];
}
}
return matriz;
}
void multMatrices(float* a, long m, long k, float* b, long n, float* c)
{
hipError_t cudaStat;
hipblasStatus_t stat;
cublasXtHandle_t handle;
stat = cublasXtCreate(&handle);
int devices[1] = { 0 };
if(cublasXtDeviceSelect(handle, 1, devices) != HIPBLAS_STATUS_SUCCESS)
{
printf("set devices fail\n");
}
float al = 1.0;
float bet = 0.0;
stat = cublasXtSgemm(handle,HIPBLAS_OP_N,HIPBLAS_OP_N,m,n,k,&al,a,m,b,k,&bet,c,m);
hipDeviceSynchronize();
for(long i=0; i<m*n;i++)
{
if(isnan(c[i]))
{
printf("Valor nan encontrado en multMatrices.\n");
break;
}
}
cublasXtDestroy(handle);
}
// void multMatrices(float* a, long m, long k, float* b, long n, float* c)
// {
// hipError_t cudaStat;
// hipblasStatus_t stat;
// hipblasHandle_t handle;
// stat = hipblasCreate(&handle);
// float al = 1.0;
// float bet = 0.0;
// stat = hipblasSgemm(handle,HIPBLAS_OP_N,HIPBLAS_OP_N,m,n,k,&al,a,m,b,k,&bet,c,m);
// hipDeviceSynchronize();
// for(long i=0; i<m*n;i++)
// {
// if(isnan(c[i]))
// {
// printf("Valor nan encontrado en multMatrices.\n");
// break;
// }
// }
// hipblasDestroy(handle);
// }
// void combinacionLinealMatrices(float al, float* a, long m, long k, float bet, float* c)
// {
// long n = k;
// hipError_t cudaStat;
// hipblasStatus_t stat;
// cublasXtHandle_t handle;
// float* b;
// hipMallocManaged(&b, k*n*sizeof(float));
// hipMemset(b, 0, k*n*sizeof(float));
// for(int i=0; i<n; i++)
// {
// b[(i*n+i)] = 1.0;
// }
// stat = cublasXtCreate(&handle);
// int devices[1] = { 0 };
// if(cublasXtDeviceSelect(handle, 1, devices) != HIPBLAS_STATUS_SUCCESS)
// {
// printf("set devices fail\n");
// }
// stat = cublasXtSgemm(handle,HIPBLAS_OP_N,HIPBLAS_OP_N,m,n,k,&al,a,m,b,k,&bet,c,m);
// hipDeviceSynchronize();
// for(long i=0; i<m*n;i++)
// {
// if(isnan(c[i]))
// {
// printf("Valor nan encontrado en combLinealMatrices.\n");
// break;
// }
// }
// hipFree(b);
// cublasXtDestroy(handle);
// }
__global__ void multMatrizPorConstante_kernel(float* matrizA, long cantFilas, long cantColumnas, float constante)
{
long miId = threadIdx.x + blockDim.x * blockIdx.x;
if(miId < cantFilas*cantColumnas)
{
matrizA[miId] = constante * matrizA[miId];
}
}
void multMatrizPorConstante(float* matrizA, long cantFilasMatrizA, long cantColumnasMatrizA, float constante)
{
long cantBloques = ceil((float) cantFilasMatrizA*cantColumnasMatrizA/1024);
hipLaunchKernelGGL(( multMatrizPorConstante_kernel), dim3(cantBloques),dim3(1024), 0, 0, matrizA, cantFilasMatrizA, cantColumnasMatrizA, constante);
hipDeviceSynchronize();
}
__global__ void combinacionLinealMatrices_kernel(float al, float* matrizA, long cantFilas, long cantColumnas, float bet, float* matrizB)
{
long miId = threadIdx.x + blockDim.x * blockIdx.x;
if(miId < cantFilas*cantColumnas)
{
matrizB[miId] = al * matrizA[miId] + bet * matrizB[miId];
}
}
void combinacionLinealMatrices(float al, float* matrizA, long cantFilas, long cantColumnas, float bet, float* matrizB)
{
long cantBloques = ceil((float) cantFilas*cantColumnas/1024);
hipLaunchKernelGGL(( combinacionLinealMatrices_kernel), dim3(cantBloques),dim3(1024), 0, 0, al, matrizA, cantFilas, cantColumnas, bet, matrizB);
hipDeviceSynchronize();
}
// void combinacionLinealMatrices(float al, float* a, long m, long k, float bet, float* c)
// {
// long n = k;
// hipError_t cudaStat;
// hipblasStatus_t stat;
// hipblasHandle_t handle;
// float* b;
// hipMallocManaged(&b, k*n*sizeof(float));
// hipMemset(b, 0, k*n*sizeof(float));
// for(int i=0; i<n; i++)
// {
// b[(i*n+i)] = 1.0;
// }
// stat = hipblasCreate(&handle);
// stat = hipblasSgemm(handle,HIPBLAS_OP_N,HIPBLAS_OP_N,m,n,k,&al,a,m,b,k,&bet,c,m);
// hipDeviceSynchronize();
// for(long i=0; i<m*n;i++)
// {
// if(isnan(c[i]))
// {
// printf("Valor nan encontrado en combLinealMatrices.\n");
// break;
// }
// }
// hipFree(b);
// hipblasDestroy(handle);
// }
// void transponerMatriz(float* matriz, int cantFilas, int cantColumnas, float* matrizTranspuesta)
// {
// for(int i=0;i<cantFilas;i++)
// {
// for(int j=0;j<cantColumnas;j++)
// {
// matrizTranspuesta[(((i)*(cantColumnas))+(j))] = matriz[(((j)*(cantFilas))+(i))];
// }
// }
// }
// __global__ void transponerMatriz_kernel(float* matrizA, float* matrizA_T, long cantFilas, long cantColumnas)
// {
// long miId = threadIdx.x + blockDim.x * blockIdx.x * blockDim.x * blockDim.y + blockIdx.y * gridDim.x * blockDim.x * blockDim.y;
// if(miId < cantFilas*cantColumnas)
// {
// long i = miId%cantFilas;
// long j = miId/cantFilas;
// matrizA_T[(i*cantColumnas+j)] = matrizA[(j*cantFilas+i)];
// }
// }
__global__ void transponerMatriz_kernel(float* matrizA, float* matrizA_T, long cantFilas, long cantColumnas)
{
long miId = threadIdx.x + blockDim.x * blockIdx.x;
if(miId < cantFilas*cantColumnas)
{
long i = miId%cantFilas;
long j = miId/cantFilas;
matrizA_T[(i*cantColumnas+j)] = matrizA[(j*cantFilas+i)];
}
}
void transponerMatriz(float* matrizA, long cantFilasMatrizA, long cantColumnasMatrizA, float* resultado)
{
long cantBloques = ceil((float) cantFilasMatrizA*cantColumnasMatrizA/1024);
hipLaunchKernelGGL(( transponerMatriz_kernel), dim3(cantBloques),dim3(1024), 0, 0, matrizA, resultado, cantFilasMatrizA, cantColumnasMatrizA);
hipDeviceSynchronize();
}
__global__ void restaVectorColumnaConVector_kernel(float* vectorA, long largoVectorA, float* vectorB, long largoVectorB, float* resultado)
{
long miId = threadIdx.x + blockDim.x * blockIdx.x;
if(miId < largoVectorA*largoVectorB)
{
long i = miId%largoVectorA;
long j = miId/largoVectorA;
resultado[miId] = vectorA[i] - vectorB[j];
}
}
float* restaVectorColumnaConVector(float* vectorA, long largoVectorA, float* vectorB, long largoVectorB)
{
float* resultado;
hipMallocManaged(&resultado,largoVectorA*largoVectorB*sizeof(float));
long cantBloques = ceil((float) largoVectorA*largoVectorB/1024);
hipLaunchKernelGGL(( restaVectorColumnaConVector_kernel), dim3(cantBloques),dim3(1024), 0, 0, vectorA, largoVectorA, vectorB, largoVectorB, resultado);
hipDeviceSynchronize();
return resultado;
}
void vectorColumnaAMatriz(float* vectorA, long cantFilas, long cantColumnas, float* nuevaMatriz)
{
float* vectorDeUnos;
hipMallocManaged(&vectorDeUnos,cantColumnas*sizeof(float));
for(long i=0; i<cantColumnas; i++)
{
vectorDeUnos[i] = 1.0;
}
multMatrices(vectorA, cantFilas, 1, vectorDeUnos, cantColumnas, nuevaMatriz);
hipFree(vectorDeUnos);
}
__global__ void hadamardProduct_kernel(float* matrizA, float* matrizB, float* resultado, long cantFilas, long cantColumnas)
{
long miId = threadIdx.x + blockDim.x * blockIdx.x;
if(miId < cantFilas*cantColumnas)
{
resultado[miId] = matrizA[miId]*matrizB[miId];
}
}
void hadamardProduct(float* matrizA, long cantFilasMatrizA, long cantColumnasMatrizA, float* matrizB, float* resultado)
{
long cantBloques = ceil((float) cantFilasMatrizA*cantColumnasMatrizA/1024);
hipLaunchKernelGGL(( hadamardProduct_kernel), dim3(cantBloques),dim3(1024), 0, 0, matrizA, matrizB, resultado, cantFilasMatrizA, cantColumnasMatrizA);
hipDeviceSynchronize();
}
float dotProduct(float* x, long n, float* y)
{
hipError_t cudaStat;
hipblasStatus_t stat;
hipblasHandle_t handle;
stat = hipblasCreate(&handle);
float result;
stat = hipblasSdot(handle,n,x,1,y,1,&result);
hipblasDestroy(handle);
return result;
}
__global__ void calcularExp_kernel(float* a, float* c, long cantFilas, long cantColumnas)
{
long miId = threadIdx.x + blockDim.x * blockIdx.x;
if(miId < cantFilas*cantColumnas)
{
c[miId] = exp(a[miId]);
}
}
void calcularExp(float* matrizA, long cantFilasMatrizA, long cantColumnasMatrizA)
{
long cantBloques = ceil((float) cantFilasMatrizA*cantColumnasMatrizA/1024);
hipLaunchKernelGGL(( calcularExp_kernel), dim3(cantBloques),dim3(1024), 0, 0, matrizA, matrizA, cantFilasMatrizA, cantColumnasMatrizA);
hipDeviceSynchronize();
}
__global__ void calcularInvFrac_kernel(float* a, float* c, long cantFilas, long cantColumnas)
{
long miId = threadIdx.x + blockDim.x * blockIdx.x;
if(miId < cantFilas*cantColumnas)
{
c[miId] = 1.0/a[miId];
}
}
void calcularInvFrac(float* matrizA, long cantFilasMatrizA, long cantColumnasMatrizA)
{
long cantBloques = ceil((float) cantFilasMatrizA*cantColumnasMatrizA/1024);
hipLaunchKernelGGL(( calcularInvFrac_kernel), dim3(cantBloques),dim3(1024), 0, 0, matrizA, matrizA, cantFilasMatrizA, cantColumnasMatrizA);
hipDeviceSynchronize();
}
void calVisModelo(float* MV, long cantFilasMV, long cantColumnasMV, float* MC, long cantColumnasMU, float* MU, float* matrizDeUnosTamN, float* visModelo_paso3)
{
float* MU_T;
hipMallocManaged(&MU_T, cantFilasMV*cantColumnasMU*sizeof(float));
transponerMatriz(MU, cantFilasMV, cantColumnasMU, MU_T);
float* visModelo_paso1;
hipMallocManaged(&visModelo_paso1, cantColumnasMV*cantFilasMV*sizeof(float));
hipMemset(visModelo_paso1, 0, cantColumnasMV*cantFilasMV*sizeof(float));
multMatrices(MC, cantColumnasMV, cantColumnasMU, MU_T, cantFilasMV, visModelo_paso1);
hipFree(MU_T);
float* transpuesta;
hipMallocManaged(&transpuesta, cantColumnasMV*cantFilasMV*sizeof(float));
transponerMatriz(visModelo_paso1, cantColumnasMV, cantFilasMV, transpuesta);
hipFree(visModelo_paso1);
float* visModelo_paso2;
hipMallocManaged(&visModelo_paso2, cantFilasMV*cantColumnasMV*sizeof(float));
hadamardProduct(MV, cantFilasMV, cantColumnasMV, transpuesta, visModelo_paso2);
hipFree(transpuesta);
multMatrices(visModelo_paso2, cantFilasMV, cantColumnasMV, matrizDeUnosTamN, 1, visModelo_paso3);
hipFree(visModelo_paso2);
}
float* calResidual(float* visObs, float* MV, long cantFilasMV, long cantColumnasMV, float* MC, long cantColumnasMU, float* MU, float* matrizDeUnosTamN)
{
float* visModelo;
hipMallocManaged(&visModelo, cantFilasMV*sizeof(float));
hipMemset(visModelo, 0, cantFilasMV*sizeof(float));
calVisModelo(MV, cantFilasMV, cantColumnasMV, MC, cantColumnasMU, MU, matrizDeUnosTamN, visModelo);
combinacionLinealMatrices(-1.0, visObs, cantFilasMV, 1, 1.0, visModelo);
return visModelo;
}
float calCosto(float* residual, long cantVisi, float* w)
{
float* resultado;
hipMallocManaged(&resultado, cantVisi*sizeof(float));
hadamardProduct(residual, cantVisi, 1, w, resultado);
float total = dotProduct(resultado, cantVisi, residual);
hipFree(resultado);
return total;
}
__global__ void MultPorDifer_kernel(float* matrizA, float* matrizB, float* resultado, long cantFilas, long cantColumnas)
{
long miId = threadIdx.x + blockDim.x * blockIdx.x;
if(miId < cantFilas*cantColumnas)
{
long posicionEnB = miId%cantFilas;
resultado[miId] = matrizA[miId]*matrizB[posicionEnB];
}
}
void MultPorDifer(float* matrizA, long cantFilas, long cantColumnas, float* diferencias, float* resultado)
{
long cantBloques = ceil((float) cantFilas*cantColumnas/1024);
hipLaunchKernelGGL(( MultPorDifer_kernel), dim3(cantBloques),dim3(1024), 0, 0, matrizA, diferencias, resultado, cantFilas, cantColumnas);
hipDeviceSynchronize();
}
void calGradiente(float* residual, float* MV, long cantFilasMV, long cantColumnasMV, float* MU, long cantColumnasMU, float* w, float* total_paso2)
{
float* diferencia;
hipMallocManaged(&diferencia, cantFilasMV*sizeof(float));
hadamardProduct(residual, cantFilasMV, 1, w, diferencia);
float* total_paso1;
hipMallocManaged(&total_paso1, cantColumnasMV*cantFilasMV*sizeof(float));
MultPorDifer(MV, cantFilasMV, cantColumnasMV, diferencia, total_paso1);
hipFree(diferencia);
float* total_paso1_5;
hipMallocManaged(&total_paso1_5, cantColumnasMV*cantFilasMV*sizeof(float));
transponerMatriz(total_paso1, cantFilasMV, cantColumnasMV, total_paso1_5);
hipFree(total_paso1);
multMatrices(total_paso1_5, cantColumnasMV, cantFilasMV, MU, cantColumnasMU, total_paso2);
hipFree(total_paso1_5);
}
float calAlpha(float* gradiente, long cantFilasMC, long cantColumnasMC, float* pActual, float* MV, long cantFilasMV, long cantColumnasMV, float* MU, long cantColumnasMU, float* w, float* matrizDeUnosTamN, int* flag_NOESPOSIBLEMINIMIZAR)
{
float* gradienteNegativo;
hipMallocManaged(&gradienteNegativo, cantFilasMC*cantColumnasMC*sizeof(float));
hipMemset(gradienteNegativo, 0, cantFilasMC*cantColumnasMC*sizeof(float));
combinacionLinealMatrices(-1.0, gradiente, cantFilasMC, cantColumnasMC, 0.0, gradienteNegativo);
float numerador = dotProduct(gradienteNegativo, cantFilasMC*cantColumnasMC, pActual);
hipFree(gradienteNegativo);
float* visModeloP;
hipMallocManaged(&visModeloP, cantFilasMV*sizeof(float));
hipMemset(visModeloP, 0, cantFilasMV*sizeof(float));
calVisModelo(MV, cantFilasMV, cantColumnasMV, pActual, cantColumnasMU, MU, matrizDeUnosTamN, visModeloP);
float* gradP;
hipMallocManaged(&gradP, cantFilasMC * cantColumnasMC*sizeof(float));
hipMemset(gradP, 0, cantFilasMC * cantColumnasMC*sizeof(float));
calGradiente(visModeloP, MV, cantFilasMV, cantColumnasMV, MU, cantColumnasMU, w, gradP);
hipFree(visModeloP);
float denominador = dotProduct(pActual, cantFilasMC * cantColumnasMC, gradP);
hipFree(gradP);
if(denominador == 0.0)
{
*flag_NOESPOSIBLEMINIMIZAR = 1;
}
return numerador/denominador;
}
float calBeta_Fletcher_Reeves(float* gradienteActual, long tamanoGradiente, float* gradienteAnterior)
{
float numerador = dotProduct(gradienteActual, tamanoGradiente, gradienteActual);
float denominador = dotProduct(gradienteAnterior, tamanoGradiente, gradienteAnterior);
float resultado = numerador/denominador;
return resultado;
}
float* calInfoFisherDiag(float* MV, long cantFilasMV, long cantColumnasMV, float* MU, float* w)
{
float* MV_T;
hipMallocManaged(&MV_T, cantFilasMV*cantColumnasMV*sizeof(float));
transponerMatriz(MV, cantFilasMV, cantColumnasMV, MV_T);
float* primeraMatriz_fase1;
hipMallocManaged(&primeraMatriz_fase1, cantColumnasMV*cantFilasMV*sizeof(float));
hadamardProduct(MV_T, cantColumnasMV, cantFilasMV, MV_T, primeraMatriz_fase1);
hipFree(MV_T);
float* wMatriz;
hipMallocManaged(&wMatriz, cantFilasMV*cantColumnasMV*sizeof(float));
hipMemset(wMatriz, 0, cantFilasMV*cantColumnasMV*sizeof(float));
vectorColumnaAMatriz(w, cantFilasMV, cantColumnasMV, wMatriz);
float* wmatriz_T;
hipMallocManaged(&wmatriz_T, cantFilasMV*cantColumnasMV*sizeof(float));
transponerMatriz(wMatriz, cantFilasMV, cantColumnasMV, wmatriz_T);
hipFree(wMatriz);
float* primeraMatriz_fase2;
hipMallocManaged(&primeraMatriz_fase2, cantColumnasMV*cantFilasMV*sizeof(float));
hadamardProduct(primeraMatriz_fase1, cantColumnasMV, cantFilasMV, wmatriz_T, primeraMatriz_fase2);
hipFree(primeraMatriz_fase1);
hipFree(wmatriz_T);
float* MU_T;
hipMallocManaged(&MU_T, cantFilasMV*cantColumnasMV*sizeof(float));
transponerMatriz(MU, cantFilasMV, cantColumnasMV, MU_T);
float* segundaMatriz;
hipMallocManaged(&segundaMatriz, cantFilasMV*cantColumnasMV*sizeof(float));
hadamardProduct(MU_T, cantFilasMV, cantColumnasMV, MU_T, segundaMatriz);
hipFree(MU_T);
float* resultado_fase1;
hipMallocManaged(&resultado_fase1, cantColumnasMV*cantFilasMV*sizeof(float));
hadamardProduct(primeraMatriz_fase2, cantColumnasMV, cantFilasMV, segundaMatriz, resultado_fase1);
hipFree(primeraMatriz_fase2);
hipFree(segundaMatriz);
float* vectorDeUnos;
hipMallocManaged(&vectorDeUnos, cantFilasMV*sizeof(float));
float* resultado_fase2;
hipMallocManaged(&resultado_fase2, cantColumnasMV*sizeof(float));
hipMemset(resultado_fase2, 0, cantColumnasMV*sizeof(float));
for(long i=0; i<cantFilasMV; i++)
{
vectorDeUnos[i] = 1;
}
multMatrices(resultado_fase1, cantColumnasMV, cantFilasMV, vectorDeUnos, 1, resultado_fase2);
hipFree(resultado_fase1);
float medidaInfoMaximoDiagonal = 0.0;
for (long i=0; i<cantColumnasMV; i++)
{
if(resultado_fase2[i] > medidaInfoMaximoDiagonal)
medidaInfoMaximoDiagonal = resultado_fase2[i];
}
float medidaInfoSumaDiagonal = dotProduct(resultado_fase2, cantColumnasMV, vectorDeUnos);
hipFree(vectorDeUnos);
hipFree(resultado_fase2);
float* medidasDeInfo = (float*) malloc(sizeof(float)*2);
medidasDeInfo[0] = medidaInfoSumaDiagonal;
medidasDeInfo[1] = medidaInfoMaximoDiagonal;
return medidasDeInfo;
}
float* estimacionDePlanoDeFourier(float* MV, long cantFilasMV, long cantColumnasMV, float* MC, long cantFilasMC, long cantColumnasMC, float* MU)
{
float* MU_T;
hipMallocManaged(&MU_T, cantFilasMV*cantColumnasMV*sizeof(float));
transponerMatriz(MU, cantFilasMV, cantColumnasMV, MU_T);
float* resultado_paso1;
hipMallocManaged(&resultado_paso1, cantFilasMC*cantFilasMV*sizeof(float));
hipMemset(resultado_paso1, 0, cantFilasMC*cantFilasMV*sizeof(float));
multMatrices(MC, cantFilasMC, cantColumnasMC, MU_T, cantFilasMV, resultado_paso1);
hipFree(MU_T);
float* resultado_paso2;
hipMallocManaged(&resultado_paso2, cantFilasMV*cantFilasMV*sizeof(float));
hipMemset(resultado_paso2, 0, cantFilasMV*cantFilasMV*sizeof(float));
multMatrices(MV, cantFilasMV, cantColumnasMV, resultado_paso1, cantFilasMV, resultado_paso2);
hipFree(resultado_paso1);
return resultado_paso2;
}
void printerror_cfitsio( int status)
{
if (status)
{
fits_report_error(stderr, status);
exit( status );
}
return;
}
void escribirTransformadaInversaFourier2D(float* estimacionFourier_ParteImag, float* estimacionFourier_ParteReal, long N, char* nombreArchivo)
{
af::array estimacionFourier_ParteImag_GPU(N, N, estimacionFourier_ParteImag);
af::array estimacionFourier_ParteReal_GPU(N, N, estimacionFourier_ParteReal);
af::array mapaFourierRecons = af::complex(estimacionFourier_ParteReal_GPU, estimacionFourier_ParteImag_GPU);
estimacionFourier_ParteImag_GPU.unlock();
estimacionFourier_ParteReal_GPU.unlock();
mapaFourierRecons = af::shift(mapaFourierRecons, (mapaFourierRecons.dims(0)+1)/2, (mapaFourierRecons.dims(1)+1)/2);
mapaFourierRecons = af::ifft2(mapaFourierRecons, N, N);
mapaFourierRecons = af::shift(mapaFourierRecons, (mapaFourierRecons.dims(0)+1)/2, (mapaFourierRecons.dims(1)+1)/2);
mapaFourierRecons = af::real(mapaFourierRecons);
mapaFourierRecons = af::flip(mapaFourierRecons, 0);
mapaFourierRecons = af::transpose(mapaFourierRecons);
float* auxiliar_mapaFourierRecons = mapaFourierRecons.device<float>();
float* inver_visi = (float*) calloc(N*N, sizeof(float));
hipMemcpy(inver_visi, auxiliar_mapaFourierRecons, N*N*sizeof(float), hipMemcpyDeviceToHost);
mapaFourierRecons.unlock();
fitsfile *fptr;
int status;
long fpixel, nelements;
int bitpix = FLOAT_IMG;
long naxis = 2;
long naxes[2] = {N, N};
remove(nombreArchivo);
status = 0;
if (fits_create_file(&fptr, nombreArchivo, &status))
printerror_cfitsio(status);
if (fits_create_img(fptr, bitpix, naxis, naxes, &status))
printerror_cfitsio(status);
fpixel = 1;
nelements = naxes[0] * naxes[1];
if (fits_write_img(fptr, TFLOAT, fpixel, nelements, inver_visi, &status))
printerror_cfitsio(status);
if (fits_close_file(fptr, &status))
printerror_cfitsio(status);
free(inver_visi);
}
float* calcularMV_Rect(float* v, float delta_v, long cantVisi, long N, float estrechezDeBorde, float ancho, float* matrizDeUnos)
{
float* desplazamientoEnV = linspace((-N/2.0) * delta_v, ((N/2.0) - 1.0) * delta_v, N);
float* primeraFraccionV;
hipMallocManaged(&primeraFraccionV, cantVisi * N * sizeof(float));
hipMemset(primeraFraccionV, 0, cantVisi * N * sizeof(float));
float* segundaFraccionV;
hipMallocManaged(&segundaFraccionV, cantVisi * N * sizeof(float));
for(long i=0; i<(cantVisi*N); i++)
{
segundaFraccionV[i] = 1.0;
}
float* matrizDiferenciaV = restaVectorColumnaConVector(v, cantVisi, desplazamientoEnV, N);
hipFree(desplazamientoEnV);
combinacionLinealMatrices(-1.0 * estrechezDeBorde, matrizDiferenciaV, cantVisi, N, 0.0, primeraFraccionV);
combinacionLinealMatrices(estrechezDeBorde, matrizDiferenciaV, cantVisi, N, -1 * estrechezDeBorde * ancho, segundaFraccionV);
hipFree(matrizDiferenciaV);
calcularExp(primeraFraccionV, cantVisi, N);
calcularExp(segundaFraccionV, cantVisi, N);
combinacionLinealMatrices(1.0, matrizDeUnos, cantVisi, N, 1.0, primeraFraccionV);
combinacionLinealMatrices(1.0, matrizDeUnos, cantVisi, N, 1.0, segundaFraccionV);
calcularInvFrac(primeraFraccionV, cantVisi, N);
calcularInvFrac(segundaFraccionV, cantVisi, N);
float* MV;
hipMallocManaged(&MV, cantVisi * N * sizeof(float));
for(long i=0; i<(cantVisi*N); i++)
{
MV[i] = 1.0/ancho;
}
combinacionLinealMatrices(1.0, primeraFraccionV, cantVisi, N, 1.0, segundaFraccionV);
hipFree(primeraFraccionV);
combinacionLinealMatrices(1.0/ancho, segundaFraccionV, cantVisi, N, -1.0, MV);
hipFree(segundaFraccionV);
return MV;
}
float* calcularMV_Rect_estFourier(float ancho, long N, float delta_v, float* matrizDeUnos, float estrechezDeBorde, float* matrizDeUnosEstFourier)
{
float* coordenadasVCentrosCeldas = linspace((-N/2.0) * delta_v, ((N/2.0) - 1.0) * delta_v, N);
combinacionLinealMatrices(0.5 * delta_v, matrizDeUnosEstFourier, N, 1, 1.0, coordenadasVCentrosCeldas);
float* MV_AF = calcularMV_Rect(coordenadasVCentrosCeldas, delta_v, N, N, estrechezDeBorde, ancho, matrizDeUnos);
hipFree(coordenadasVCentrosCeldas);
return MV_AF;
}
float* calcularMV_Normal(float* v, float delta_v, long cantVisi, long N, float anchoV)
{
float* CV;
hipMallocManaged(&CV, N * sizeof(float));
for(long i=0;i<N;i++)
{
CV[i] = 0.5 * delta_v;
}
float* CV_sinescalar = linspace((-N/2.0) * delta_v, ((N/2.0) - 1.0) * delta_v, N);
combinacionLinealMatrices(1.0, CV_sinescalar, N, 1, 1.0, CV);
hipFree(CV_sinescalar);
float* MV = restaVectorColumnaConVector(v, cantVisi, CV, N);
hipFree(CV);
multMatrizPorConstante(MV, cantVisi, N, 1.0/anchoV);
hadamardProduct(MV, cantVisi, N, MV, MV);
multMatrizPorConstante(MV, cantVisi, N, -0.5);
calcularExp(MV, cantVisi, N);
multMatrizPorConstante(MV, cantVisi, N, 1.0/sqrt(2.0 * M_PI * anchoV * anchoV));
return MV;
}
// float* calcularMV_Normal(float* v, float delta_v, int cantVisi, int N, float anchoV)
// {
// float* CV = (float*) calloc(N, sizeof(float));
// float* matrizDeCeros = (float*) calloc(cantVisi * N, sizeof(float));
// for(int i=0;i<N;i++)
// {
// CV[i] = 0.5 * delta_v;
// }
// float* CV_sinescalar = linspace((-N/2.0) * delta_v, ((N/2.0) - 1) * delta_v, N);
// combinacionLinealMatrices(1.0, CV_sinescalar, N, 1, 1.0, CV);
// free(CV_sinescalar);
// float* MV = restaVectorColumnaConVector(v, cantVisi, CV, N);
// free(CV);
// combinacionLinealMatrices(0.0, matrizDeCeros, cantVisi, N, 1.0/anchoV, MV);
// hadamardProduct(MV, cantVisi, N, MV, MV);
// combinacionLinealMatrices(0.0, matrizDeCeros, cantVisi, N, -0.5, MV);
// calcularExp(MV, cantVisi, N);
// combinacionLinealMatrices(0.0, matrizDeCeros, cantVisi, N, 1.0/sqrt(2.0 * M_PI * anchoV * anchoV), MV);
// free(matrizDeCeros);
// return MV;
// }
float* calcularMV_Normal_estFourier(float anchoV, long N, float delta_v, float* matrizDeUnosEstFourier)
{
float* coordenadasVCentrosCeldas = linspace((-N/2.0) * delta_v, ((N/2.0) - 1.0) * delta_v, N);
combinacionLinealMatrices(0.5 * delta_v, matrizDeUnosEstFourier, N, 1, 1.0, coordenadasVCentrosCeldas);
float* MV_AF = calcularMV_Normal(coordenadasVCentrosCeldas, delta_v, N, N, anchoV);
hipFree(coordenadasVCentrosCeldas);
return MV_AF;
}
int calCompresionSegunCota(char* nombreArCoef_comp_imag, char* nombreArCoef_comp_real, float* MC_imag, float* MC_imag_comp, float* MC_real, float* MC_real_comp, long cantFilas, long cantColumnas, float cotaEnergia)
{
long largo = cantFilas * cantColumnas;
float* MC_img_cuadrado;
hipMallocManaged(&MC_img_cuadrado, cantFilas*cantColumnas*sizeof(float));
float* MC_modulo;
hipMallocManaged(&MC_modulo, cantFilas*cantColumnas*sizeof(float));
hadamardProduct(MC_imag, cantFilas, cantColumnas, MC_imag, MC_img_cuadrado);
hadamardProduct(MC_real, cantFilas, cantColumnas, MC_real, MC_modulo);
combinacionLinealMatrices(1.0, MC_img_cuadrado, cantFilas, cantColumnas, 1.0, MC_modulo);
hipFree(MC_img_cuadrado);
af::array MC_modulo_GPU(cantFilas*cantColumnas, MC_modulo);
af::array MC_modulo_indicesOrde_GPU(cantFilas*cantColumnas);
af::array MC_modulo_Orde_GPU(cantFilas*cantColumnas);
af::sort(MC_modulo_Orde_GPU, MC_modulo_indicesOrde_GPU, MC_modulo_GPU, 0, false);
float total = af::sum<float>(MC_modulo_GPU);
MC_modulo_Orde_GPU = MC_modulo_Orde_GPU/total;
af::eval(MC_modulo_Orde_GPU);
af::sync();
float* auxiliar_MC_modulo_Orde_GPU = MC_modulo_Orde_GPU.device<float>();
float* coefsNormalizados = (float*) calloc(largo, sizeof(float));
hipMemcpy(coefsNormalizados, auxiliar_MC_modulo_Orde_GPU, cantFilas*cantColumnas*sizeof(float), hipMemcpyDeviceToHost);
MC_modulo_Orde_GPU.unlock();
long cantCoefsParaCota = 0;
float sumador = 0.0;
for(long i=0; i<largo; i++)
{
sumador += coefsNormalizados[i];
cantCoefsParaCota++;
if(sumador >= cotaEnergia)
{
break;
}
}
hipFree(MC_modulo);
free(coefsNormalizados);
MC_modulo_GPU = MC_modulo_indicesOrde_GPU(af::seq(0,(cantCoefsParaCota-1)));
af::array indRepComp = af::constant(0, largo);
indRepComp(MC_modulo_GPU) = 1;
MC_modulo_GPU.unlock();
MC_modulo_indicesOrde_GPU.unlock();
af::array MC_imag_GPU(cantFilas*cantColumnas, MC_imag);
af::array MC_real_GPU(cantFilas*cantColumnas, MC_real);
MC_imag_GPU = MC_imag_GPU * indRepComp;
MC_real_GPU = MC_real_GPU * indRepComp;
af::eval(MC_imag_GPU);
af::eval(MC_real_GPU);
af::sync();
indRepComp.unlock();
float* auxiliar_MC_imag_GPU = MC_imag_GPU.device<float>();
float* auxiliar_MC_real_GPU = MC_real_GPU.device<float>();
hipMemcpy(MC_imag_comp, auxiliar_MC_imag_GPU, cantFilas*cantColumnas*sizeof(float), hipMemcpyDeviceToHost);
MC_imag_GPU.unlock();
hipMemcpy(MC_real_comp, auxiliar_MC_real_GPU, cantFilas*cantColumnas*sizeof(float), hipMemcpyDeviceToHost);
MC_real_GPU.unlock();
escribirCoefs(MC_imag_comp, nombreArCoef_comp_imag, cantFilas, cantColumnas);
escribirCoefs(MC_real_comp, nombreArCoef_comp_real, cantFilas, cantColumnas);
return cantCoefsParaCota;
}
float* minGradConjugado_MinCuadra_escritura(char* nombreArchivoMin, char* nombreArchivoCoefs, float* MV, float* MU, float* visibilidades, float* w, long cantVisi, long N, float* matrizDeUnosTamN, int maxIter, float tol)
{
int flag_NOESPOSIBLEMINIMIZAR = 0;
float* MC;
hipMallocManaged(&MC, N*N*sizeof(float));
hipMemset(MC, 0, N*N*sizeof(float));
float* residualInit = calResidual(visibilidades, MV, cantVisi, N, MC, N, MU, matrizDeUnosTamN);
float* gradienteActual;
hipMallocManaged(&gradienteActual,N*N*sizeof(float));
hipMemset(gradienteActual, 0, N*N*sizeof(float));
float* gradienteAnterior;
hipMallocManaged(&gradienteAnterior,N*N*sizeof(float));
hipMemset(gradienteAnterior, 0, N*N*sizeof(float));
float* pActual;
hipMallocManaged(&pActual,N*N*sizeof(float));
hipMemset(pActual, 0, N*N*sizeof(float));
float costoInicial = calCosto(residualInit, cantVisi, w);
float costoAnterior = costoInicial;
float costoActual = costoInicial;
calGradiente(residualInit, MV, cantVisi, N, MU, N, w, gradienteAnterior);
hipFree(residualInit);
// for(int i=0; i<N*N; i++)
// {
// if(gradienteAnterior[i] != 0.0)
// {
// printf("En la linea %d es %f\n", i, gradienteAnterior[i]);
// }
// }
// exit(-1);
combinacionLinealMatrices(-1.0, gradienteAnterior, N, N, 0.0, pActual);
float diferenciaDeCosto = 1.0;
int i = 0;
float alpha = 0.0;
float epsilon = 1e-10;
float normalizacion = costoAnterior + costoActual + epsilon;
FILE* archivoMin = fopen(nombreArchivoMin, "w");
if(archivoMin == NULL)
{
printf("Error al crear o abrir el archivo para almacenar la minimizacion.\n");
exit(0);
}
while(maxIter > i && 2.0 * diferenciaDeCosto > tol * normalizacion)
{
alpha = calAlpha(gradienteAnterior, N, N, pActual, MV, cantVisi, N, MU, N, w, matrizDeUnosTamN, &flag_NOESPOSIBLEMINIMIZAR);
if(flag_NOESPOSIBLEMINIMIZAR == 1)
{
break;
}
combinacionLinealMatrices(alpha, pActual, N, N, 1.0, MC);
float* residual = calResidual(visibilidades, MV, cantVisi, N, MC, N, MU, matrizDeUnosTamN);
costoActual = calCosto(residual, cantVisi, w);
hipMallocManaged(&gradienteActual,N*N*sizeof(float));
hipMemset(gradienteActual, 0, N*N*sizeof(float));
calGradiente(residual, MV, cantVisi, N, MU, N, w, gradienteActual);
hipFree(residual);
float beta = calBeta_Fletcher_Reeves(gradienteActual, N*N, gradienteAnterior);
combinacionLinealMatrices(-1.0, gradienteActual, N, N, beta, pActual);
diferenciaDeCosto = abs(costoAnterior - costoActual);
normalizacion = costoAnterior + costoActual + epsilon;
float otro = costoActual - costoAnterior;
costoAnterior = costoActual;
float* auxiliar = gradienteAnterior;
gradienteAnterior = gradienteActual;
hipFree(auxiliar);
i++;
printf( "En la iteracion %d el valor de la funcion de costos es %f con un z de %.12e la diferencia con respecto al anterior costo es %.12e.\n", i, costoActual, alpha, otro);
fprintf(archivoMin, "En la iteracion %d el valor de la funcion de costos es %f con un z de %.12e la diferencia con respecto al anterior costo es %.12e.\n", i, costoActual, alpha, otro);
}
fclose(archivoMin);
hipFree(gradienteAnterior);
hipFree(pActual);
escribirCoefs(MC, nombreArchivoCoefs, N, N);
return MC;
}
float* minGradConjugado_MinCuadra(float* MV, float* MU, float* visibilidades, float* w, long cantVisi, long N, float* matrizDeUnosTamN, int maxIter, float tol)
{
int flag_NOESPOSIBLEMINIMIZAR = 0;
float* MC;
hipMallocManaged(&MC, N*N*sizeof(float));
hipMemset(MC, 0, N*N*sizeof(float));
float* residualInit = calResidual(visibilidades, MV, cantVisi, N, MC, N, MU, matrizDeUnosTamN);
float* gradienteActual;
hipMallocManaged(&gradienteActual,N*N*sizeof(float));
hipMemset(gradienteActual, 0, N*N*sizeof(float));
float* gradienteAnterior;
hipMallocManaged(&gradienteAnterior,N*N*sizeof(float));
hipMemset(gradienteAnterior, 0, N*N*sizeof(float));
float* pActual;
hipMallocManaged(&pActual,N*N*sizeof(float));
hipMemset(pActual, 0, N*N*sizeof(float));
float costoInicial = calCosto(residualInit, cantVisi, w);
float costoAnterior = costoInicial;
float costoActual = costoInicial;
calGradiente(residualInit, MV, cantVisi, N, MU, N, w, gradienteAnterior);
hipFree(residualInit);
combinacionLinealMatrices(-1.0, gradienteAnterior, N, N, 0.0, pActual);
float diferenciaDeCosto = 1.0;
int i = 0;
float alpha = 0.0;
float epsilon = 1e-10;
float normalizacion = costoAnterior + costoActual + epsilon;
while(maxIter > i && 2.0 * diferenciaDeCosto > tol * normalizacion)
{
alpha = calAlpha(gradienteAnterior, N, N, pActual, MV, cantVisi, N, MU, N, w, matrizDeUnosTamN, &flag_NOESPOSIBLEMINIMIZAR);
if(flag_NOESPOSIBLEMINIMIZAR == 1)
{
break;
}
combinacionLinealMatrices(alpha, pActual, N, N, 1.0, MC);
float* residual = calResidual(visibilidades, MV, cantVisi, N, MC, N, MU, matrizDeUnosTamN);
costoActual = calCosto(residual, cantVisi, w);
hipMallocManaged(&gradienteActual,N*N*sizeof(float));
hipMemset(gradienteActual, 0, N*N*sizeof(float));
calGradiente(residual, MV, cantVisi, N, MU, N, w, gradienteActual);
hipFree(residual);
float beta = calBeta_Fletcher_Reeves(gradienteActual, N*N, gradienteAnterior);
combinacionLinealMatrices(-1.0, gradienteActual, N, N, beta, pActual);
diferenciaDeCosto = abs(costoAnterior - costoActual);
normalizacion = costoAnterior + costoActual + epsilon;
float otro = costoActual - costoAnterior;
costoAnterior = costoActual;
float* auxiliar = gradienteAnterior;
gradienteAnterior = gradienteActual;
hipFree(auxiliar);
i++;
printf( "En la iteracion %d el valor de la funcion de costos es %f con un z de %.12e la diferencia con respecto al anterior costo es %.12e.\n", i, costoActual, alpha, otro);
}
hipFree(gradienteAnterior);
hipFree(pActual);
return MC;
}
float calculateSD(float* data, float mean, long cantElementos)
{
float SD = 0.0;
for (long i = 0; i < cantElementos; i++)
SD += pow(data[i] - mean, 2);
return sqrt(SD / 10);
}
float calculoDePSNRDeRecorte(float* estimacionFourier_ParteImag, float* estimacionFourier_ParteReal, long N, char* nombreArchivo, clock_t* tiempoTransInver_MejorCompresion)
{
int columnaDeInicio = 150;
int columnaDeTermino = 450;
int filaDeInicio = 100;
int filaDeTermino = 400;
*tiempoTransInver_MejorCompresion = clock();
af::array estimacionFourier_ParteImag_GPU(N, N, estimacionFourier_ParteImag);
af::array estimacionFourier_ParteReal_GPU(N, N, estimacionFourier_ParteReal);
af::array mapaFourierRecons = af::complex(estimacionFourier_ParteReal_GPU, estimacionFourier_ParteImag_GPU);
estimacionFourier_ParteImag_GPU.unlock();
estimacionFourier_ParteReal_GPU.unlock();
mapaFourierRecons = af::shift(mapaFourierRecons, (mapaFourierRecons.dims(0)+1)/2, (mapaFourierRecons.dims(1)+1)/2);
mapaFourierRecons = af::ifft2(mapaFourierRecons, N, N);
mapaFourierRecons = af::shift(mapaFourierRecons, (mapaFourierRecons.dims(0)+1)/2, (mapaFourierRecons.dims(1)+1)/2);
mapaFourierRecons = af::real(mapaFourierRecons);
*tiempoTransInver_MejorCompresion = clock() - *tiempoTransInver_MejorCompresion;
mapaFourierRecons = af::flip(mapaFourierRecons, 0);
mapaFourierRecons = af::transpose(mapaFourierRecons);
float* auxiliar_mapaFourierRecons = mapaFourierRecons.device<float>();
float* inver_visi = (float*) calloc(N*N, sizeof(float));
hipMemcpy(inver_visi, auxiliar_mapaFourierRecons, N*N*sizeof(float), hipMemcpyDeviceToHost);
mapaFourierRecons.unlock();
int cantFilasARecorrer = columnaDeTermino - columnaDeInicio + 1;
int cantColumnasARecorrer = filaDeTermino - filaDeInicio + 1;
int contador = 0;
int contadorEleExternos = 0;
float sumaDeValoresExternos = 0.0;
float maximoValorInterno = 0;
float* nuevaImagen = (float*) calloc(cantFilasARecorrer*cantColumnasARecorrer, sizeof(float));
float* elementosExternos = (float*) calloc(N*N, sizeof(float));
for(int j=0; j<N; j++)
{
for(int i=0; i<N; i++)
{
if(columnaDeInicio <= i && i <= columnaDeTermino && filaDeInicio <= j && j <= filaDeTermino)
{
nuevaImagen[contador] = inver_visi[i+j*N];
if(maximoValorInterno < inver_visi[i+j*N])
{
maximoValorInterno = inver_visi[i+j*N];
}
contador++;
}
else
{
elementosExternos[contadorEleExternos] = inver_visi[i+j*N];
sumaDeValoresExternos += elementosExternos[contadorEleExternos];
contadorEleExternos++;
}
}
}
float mediaExterna = sumaDeValoresExternos/contadorEleExternos;
float desvEstandar = calculateSD(elementosExternos, mediaExterna, contadorEleExternos);
free(elementosExternos);
float PSNR = maximoValorInterno/desvEstandar;
// printf("El contador es %d\n", contador);
// printf("La wea total es %d\n", cantFilasARecorrer*cantColumnasARecorrer);
// printf("La cantidad de elementos externos es %d\n", contadorEleExternos);
fitsfile *fptr;
int status;
long fpixel, nelements;
int bitpix = FLOAT_IMG;
long naxis = 2;
// long naxes[2] = {cantFilasARecorrer, cantColumnasARecorrer};
long naxes[2] = {N, N};
remove(nombreArchivo);
status = 0;
if (fits_create_file(&fptr, nombreArchivo, &status))
printerror_cfitsio(status);
if (fits_create_img(fptr, bitpix, naxis, naxes, &status))
printerror_cfitsio(status);
fpixel = 1;
nelements = naxes[0] * naxes[1];
// if (fits_write_img(fptr, TFLOAT, fpixel, nelements, nuevaImagen, &status))
if (fits_write_img(fptr, TFLOAT, fpixel, nelements, inver_visi, &status))
printerror_cfitsio(status);
if (fits_close_file(fptr, &status))
printerror_cfitsio(status);
free(inver_visi);
free(nuevaImagen);
return PSNR;
}
float calPSNRDeDistintasCompresiones(float inicioIntervalo, float finIntervalo, int cantParamEvaInfo, char rutaADirecSec[], char rutaADirecTer[], char nombreArReconsCompreImg[], float* MC_imag, float* MC_real, float* MV_AF, float* MU_AF, long N, clock_t* tiempoReconsParteImag_MejorCompresion, clock_t* tiempoReconsParteReal_MejorCompresion, clock_t* tiempoTransInver_MejorCompresion)
{
float cotaMinPSNR = 0.75;
float cotaMinCompresion = 0.2 * finIntervalo;
float* datosDelMin = (float*) malloc(sizeof(float)*4);
long cantCoefsMejorCompre = 0;
char nombreArchivoTXTCompre[] = "compresiones.txt";
char nombreArchivoDatosMinPSNR[] = "mejorTradeOffPSNRCompre.txt";
char nombreArchivoCompreImg[] = "compreImg";
char nombreDatosDeIte[] = "datosDeIte.txt";
char nombreDatosDeIteLegible[] = "datosDeIteLegible.txt";
char nombreCurvaPSNRSuavizada[] = "curvaPSNRSuavizada.txt";
float* paramEvaInfo = linspace(inicioIntervalo/100.0, finIntervalo/100.0, cantParamEvaInfo);
float* MC_comp_imag;
hipMallocManaged(&MC_comp_imag,N*N*sizeof(float));
hipMemset(MC_comp_imag, 0, N*N*sizeof(float));
float* MC_comp_real;
hipMallocManaged(&MC_comp_real,N*N*sizeof(float));
hipMemset(MC_comp_real, 0, N*N*sizeof(float));
long largo = N * N;
float* MC_img_cuadrado;
hipMallocManaged(&MC_img_cuadrado, N*N*sizeof(float));
float* MC_modulo;
hipMallocManaged(&MC_modulo, N*N*sizeof(float));
hadamardProduct(MC_imag, N, N, MC_imag, MC_img_cuadrado);
hadamardProduct(MC_real, N, N, MC_real, MC_modulo);
combinacionLinealMatrices(1.0, MC_img_cuadrado, N, N, 1.0, MC_modulo);
hipFree(MC_img_cuadrado);
af::array MC_modulo_GPU(N*N, MC_modulo);
hipFree(MC_modulo);
af::array MC_modulo_indicesOrde_GPU(N*N);
af::array MC_modulo_Orde_GPU(N*N);
af::sort(MC_modulo_Orde_GPU, MC_modulo_indicesOrde_GPU, MC_modulo_GPU, 0, false);
float total = af::sum<float>(MC_modulo_GPU);
MC_modulo_Orde_GPU = MC_modulo_Orde_GPU/total;
af::eval(MC_modulo_Orde_GPU);
af::eval(MC_modulo_indicesOrde_GPU);
af::sync();
float* auxiliar_MC_modulo_Orde_GPU = MC_modulo_Orde_GPU.device<float>();
float* auxiliar_MC_modulo_indicesOrde_GPU = MC_modulo_indicesOrde_GPU.device<float>();
float* coefsNormalizados = (float*) malloc(largo*sizeof(float));
hipMemcpy(coefsNormalizados, auxiliar_MC_modulo_Orde_GPU, N*N*sizeof(float), hipMemcpyDeviceToHost);
int* MC_modulo_indicesOrde_CPU = (int*) malloc(largo*sizeof(int));
hipMemcpy(MC_modulo_indicesOrde_CPU, auxiliar_MC_modulo_indicesOrde_GPU, N*N*sizeof(int), hipMemcpyDeviceToHost);
MC_modulo_Orde_GPU.unlock();
MC_modulo_GPU.unlock();
MC_modulo_indicesOrde_GPU.unlock();
long cantCoefsParaCota = 0;
float sumador = 0.0;
long iExterno = 0;
float* cantidadPorcentualDeCoefs = linspace(0.0, largo, largo+1);
combinacionLinealMatrices(0.0, cantidadPorcentualDeCoefs, largo+1, 1, 1.0/largo, cantidadPorcentualDeCoefs);
char* nombreArchivoCompresiones = (char*) malloc(sizeof(char)*strlen(rutaADirecSec)*strlen(nombreArchivoTXTCompre)+sizeof(char)*4);
strcpy(nombreArchivoCompresiones, rutaADirecSec);
strcat(nombreArchivoCompresiones, "/");
strcat(nombreArchivoCompresiones, nombreArchivoTXTCompre);
char* nombreArchivoDatosDeIte = (char*) malloc(sizeof(char)*strlen(rutaADirecSec)*strlen(nombreDatosDeIte)+sizeof(char)*4);
strcpy(nombreArchivoDatosDeIte, rutaADirecSec);
strcat(nombreArchivoDatosDeIte, "/");
strcat(nombreArchivoDatosDeIte, nombreDatosDeIte);
char* nombreArchivoDatosDeIteLegible = (char*) malloc(sizeof(char)*strlen(rutaADirecSec)*strlen(nombreDatosDeIteLegible)+sizeof(char)*4);
strcpy(nombreArchivoDatosDeIteLegible, rutaADirecSec);
strcat(nombreArchivoDatosDeIteLegible, "/");
strcat(nombreArchivoDatosDeIteLegible, nombreDatosDeIteLegible);
float* vectorDePSNR = (float*) calloc(cantParamEvaInfo, sizeof(float));
float* porcenReal = (float*) calloc(cantParamEvaInfo, sizeof(float));
float* porcenIdeal = (float*) calloc(cantParamEvaInfo, sizeof(float));
long* cantCoefsUsadas = (long*) calloc(cantParamEvaInfo, sizeof(long));
float* vectorDePorcenEnergia = (float*) calloc(cantParamEvaInfo, sizeof(float));
float* vectorDeDifePSNREntrePtosAdya = (float*) calloc(cantParamEvaInfo, sizeof(float));
int flag_inicioDeVentana = 1;
int cantPtsVentana = 0;
int inicioDeVentana = 0;
clock_t tiempoCualquiera;
for(long j=0; j<cantParamEvaInfo; j++)
{
sumador = 0.0;
cantCoefsParaCota = 0;
iExterno = 0;
for(long i=0; i<largo+1; i++)
{
if(cantidadPorcentualDeCoefs[i] < paramEvaInfo[cantParamEvaInfo-1-j])
{
sumador += coefsNormalizados[i];
cantCoefsParaCota++;
}
else
{
iExterno = i;
FILE* archivoDatosDeIte = fopen(nombreArchivoDatosDeIte, "a");
fprintf(archivoDatosDeIte, "%f %f %ld %f\n", paramEvaInfo[cantParamEvaInfo-1-j], cantidadPorcentualDeCoefs[i], cantCoefsParaCota, sumador);
fclose(archivoDatosDeIte);
FILE* archivoDatosDeIteLegible = fopen(nombreArchivoDatosDeIteLegible, "a");
fprintf(archivoDatosDeIteLegible, "Del %f%% solicitado, el mas cercano correspondiente al %f%% de coefs, lo que corresponde a %ld coeficientes los cuales poseen el %f%% de la energia.\n", paramEvaInfo[cantParamEvaInfo-1-j] * 100, cantidadPorcentualDeCoefs[i] * 100, cantCoefsParaCota, sumador * 100);
fclose(archivoDatosDeIteLegible);
printf("Del %f%% solicitado, el mas cercano correspondiente al %f%% de coefs, lo que corresponde a %ld coeficientes los cuales poseen el %f%% de la energia.\n", paramEvaInfo[cantParamEvaInfo-1-j] * 100, cantidadPorcentualDeCoefs[i] * 100, cantCoefsParaCota, sumador * 100);
break;
}
}
if(cantCoefsParaCota != 0)
{
int* indicesATomar_CPU = (int*) calloc(cantCoefsParaCota, sizeof(int));
for(int k=0; k<cantCoefsParaCota; k++)
{
indicesATomar_CPU[k] = MC_modulo_indicesOrde_CPU[k];
}
af::array indicesATomar_GPU(cantCoefsParaCota, indicesATomar_CPU);
free(indicesATomar_CPU);
af::array indRepComp = af::constant(0, largo);
indRepComp(indicesATomar_GPU) = 1;
indicesATomar_GPU.unlock();
af::array MC_imag_GPU(N*N, MC_imag);
af::array MC_real_GPU(N*N, MC_real);
MC_imag_GPU = MC_imag_GPU * indRepComp;
MC_real_GPU = MC_real_GPU * indRepComp;
af::eval(MC_imag_GPU);
af::eval(MC_real_GPU);
af::sync();
indRepComp.unlock();
float* auxiliar_MC_imag_GPU = MC_imag_GPU.device<float>();
float* auxiliar_MC_real_GPU = MC_real_GPU.device<float>();
hipMemcpy(MC_comp_imag, auxiliar_MC_imag_GPU, N*N*sizeof(float), hipMemcpyDeviceToHost);
MC_imag_GPU.unlock();
hipMemcpy(MC_comp_real, auxiliar_MC_real_GPU, N*N*sizeof(float), hipMemcpyDeviceToHost);
MC_real_GPU.unlock();
float* estimacionFourier_compre_ParteImag = estimacionDePlanoDeFourier(MV_AF, N, N, MC_comp_imag, N, N, MU_AF);
float* estimacionFourier_compre_ParteReal = estimacionDePlanoDeFourier(MV_AF, N, N, MC_comp_real, N, N, MU_AF);
int numero = j+1;
char* numComoString = numAString(&numero);
sprintf(numComoString, "%d", numero);
char* nombreArchivoReconsImgComp = (char*) malloc(sizeof(char)*strlen(rutaADirecTer)*strlen(numComoString)*strlen(nombreArchivoCompreImg)+sizeof(char)*7);
strcpy(nombreArchivoReconsImgComp, rutaADirecTer);
strcat(nombreArchivoReconsImgComp, "/");
strcat(nombreArchivoReconsImgComp, nombreArchivoCompreImg);
strcat(nombreArchivoReconsImgComp, "_");
strcat(nombreArchivoReconsImgComp, numComoString);
strcat(nombreArchivoReconsImgComp, ".fit");
float PSNRActual = calculoDePSNRDeRecorte(estimacionFourier_compre_ParteImag, estimacionFourier_compre_ParteReal, N, nombreArchivoReconsImgComp, &tiempoCualquiera);
porcenIdeal[j] = 1-paramEvaInfo[cantParamEvaInfo-1-j];
vectorDePSNR[j] = PSNRActual;
porcenReal[j] = 1-cantidadPorcentualDeCoefs[iExterno];
cantCoefsUsadas[j] = cantCoefsParaCota;
vectorDePorcenEnergia[j] = sumador;
FILE* archivoPSNR = fopen(nombreArchivoCompresiones, "a");
fprintf(archivoPSNR, "%f %f %f\n", 1-cantidadPorcentualDeCoefs[iExterno], 1-paramEvaInfo[cantParamEvaInfo-1-j], PSNRActual);
fclose(archivoPSNR);
hipFree(estimacionFourier_compre_ParteImag);
hipFree(estimacionFourier_compre_ParteReal);
free(numComoString);
free(nombreArchivoReconsImgComp);
}
}
float* vectorDePSNRFiltrado = (float*) calloc(cantParamEvaInfo, sizeof(float));
gsl_vector* vectorDePSNREnGSL = gsl_vector_alloc(cantParamEvaInfo);
gsl_vector* vectorDePSNREnGSLFiltrado = gsl_vector_alloc(cantParamEvaInfo);
for(int i=0; i<cantParamEvaInfo; i++)
{
gsl_vector_set(vectorDePSNREnGSL, i, vectorDePSNR[i]);
}
gsl_filter_gaussian_workspace* gauss_p = gsl_filter_gaussian_alloc(5);
gsl_filter_gaussian(GSL_FILTER_END_PADVALUE, 1.0, 0, vectorDePSNREnGSL, vectorDePSNREnGSLFiltrado, gauss_p);
for(int i=0; i<cantParamEvaInfo; i++)
{
vectorDePSNRFiltrado[i] = gsl_vector_get(vectorDePSNREnGSLFiltrado, i);
}
gsl_vector_free(vectorDePSNREnGSL);
gsl_vector_free(vectorDePSNREnGSLFiltrado);
gsl_filter_gaussian_free(gauss_p);
char* nombreArchivoCurvaPSNRSuavizada = (char*) malloc(sizeof(char)*strlen(rutaADirecSec)*strlen(nombreCurvaPSNRSuavizada)+sizeof(char)*4);
strcpy(nombreArchivoCurvaPSNRSuavizada, rutaADirecSec);
strcat(nombreArchivoCurvaPSNRSuavizada, "/");
strcat(nombreArchivoCurvaPSNRSuavizada, nombreCurvaPSNRSuavizada);
FILE* archivoCurvaPSNRSuavizada = fopen(nombreArchivoCurvaPSNRSuavizada, "a");
for(int i=0; i<cantParamEvaInfo; i++)
{
fprintf(archivoCurvaPSNRSuavizada, "%f\n", vectorDePSNRFiltrado[i]);
}
fclose(archivoCurvaPSNRSuavizada);
free(nombreArchivoCurvaPSNRSuavizada);
for(int j=0; j<cantParamEvaInfo; j++)
{
float porcenActual = porcenReal[j];
float porcenDifActual = vectorDePSNRFiltrado[j]/vectorDePSNRFiltrado[0];
if(j >= 1)
{
if(porcenActual >= cotaMinCompresion && porcenDifActual >= cotaMinPSNR)
{
if(flag_inicioDeVentana)
{
inicioDeVentana = j;
flag_inicioDeVentana = 0;
}
vectorDeDifePSNREntrePtosAdya[cantPtsVentana] = vectorDePSNRFiltrado[j] - vectorDePSNRFiltrado[j-1];
printf("%.12e\n", vectorDeDifePSNREntrePtosAdya[cantPtsVentana]);
cantPtsVentana++;
}
}
}
af::array vectorDeDifePSNREntrePtosAdya_GPU(cantPtsVentana, vectorDeDifePSNREntrePtosAdya);
free(vectorDeDifePSNREntrePtosAdya);
af::array vectorDeDifePSNREntrePtosAdya_indicesOrde_GPU(cantPtsVentana);
af::array vectorDeDifePSNREntrePtosAdya_Orde_GPU(cantPtsVentana);
af::sort(vectorDeDifePSNREntrePtosAdya_Orde_GPU, vectorDeDifePSNREntrePtosAdya_indicesOrde_GPU, vectorDeDifePSNREntrePtosAdya_GPU, 0, true);
vectorDeDifePSNREntrePtosAdya_GPU.unlock();
vectorDeDifePSNREntrePtosAdya_Orde_GPU.unlock();
int* auxiliar_vectorDeDifePSNREntrePtosAdya_indicesOrde_GPU = vectorDeDifePSNREntrePtosAdya_indicesOrde_GPU.device<int>();
int* vectorDeDifePSNREntrePtosAdya_indicesOrde_CPU = (int*) malloc(sizeof(int)*cantPtsVentana);
hipMemcpy(vectorDeDifePSNREntrePtosAdya_indicesOrde_CPU, auxiliar_vectorDeDifePSNREntrePtosAdya_indicesOrde_GPU, cantPtsVentana*sizeof(int), hipMemcpyDeviceToHost);
vectorDeDifePSNREntrePtosAdya_indicesOrde_GPU.unlock();
int indiceElegido = vectorDeDifePSNREntrePtosAdya_indicesOrde_CPU[0] + inicioDeVentana - 1;
// printf("El indice elegido es %d\n", indiceElegido);
free(vectorDeDifePSNREntrePtosAdya_indicesOrde_CPU);
datosDelMin[0] = porcenIdeal[indiceElegido];
datosDelMin[1] = porcenReal[indiceElegido];
cantCoefsMejorCompre = cantCoefsUsadas[indiceElegido];
datosDelMin[2] = vectorDePorcenEnergia[indiceElegido];
datosDelMin[3] = vectorDePSNR[indiceElegido];
free(vectorDePSNRFiltrado);
free(porcenIdeal);
free(porcenReal);
free(cantCoefsUsadas);
free(vectorDePorcenEnergia);
free(vectorDePSNR);
char* nombreArchivoMejorCompre = (char*) malloc(sizeof(char)*strlen(rutaADirecSec)*strlen(nombreArchivoDatosMinPSNR)+sizeof(char)*4);
strcpy(nombreArchivoMejorCompre, rutaADirecSec);
strcat(nombreArchivoMejorCompre, "/");
strcat(nombreArchivoMejorCompre, nombreArchivoDatosMinPSNR);
FILE* archivoMejorCompre = fopen(nombreArchivoMejorCompre, "w");
fprintf(archivoMejorCompre, "El tradeoff seleccionado con indice %d corresponde al %f%% de coefs, el mas cercano correspondiente al %f%% de coefs, lo que corresponde a %ld coeficientes los cuales poseen el %f%% de la energia y un PSNR de %f.\n", indiceElegido, (1-datosDelMin[0]) * 100, (1-datosDelMin[1]) * 100, cantCoefsMejorCompre, datosDelMin[2] * 100, datosDelMin[3]);
free(nombreArchivoMejorCompre);
free(datosDelMin);
fclose(archivoMejorCompre);
float* indicesATomar_CPU = (float*) malloc(cantCoefsMejorCompre*sizeof(float));
for(int k=0; k<cantCoefsMejorCompre; k++)
{
indicesATomar_CPU[k] = MC_modulo_indicesOrde_CPU[k];
}
af::array indicesATomar_GPU(cantCoefsMejorCompre, indicesATomar_CPU);
free(indicesATomar_CPU);
af::array indRepComp = af::constant(0, largo);
indRepComp(indicesATomar_GPU) = 1;
indicesATomar_GPU.unlock();
af::array MC_imag_GPU(N*N, MC_imag);
af::array MC_real_GPU(N*N, MC_real);
MC_imag_GPU = MC_imag_GPU * indRepComp;
MC_real_GPU = MC_real_GPU * indRepComp;
af::eval(MC_imag_GPU);
af::eval(MC_real_GPU);
af::sync();
indRepComp.unlock();
float* auxiliar_MC_imag_GPU = MC_imag_GPU.device<float>();
float* auxiliar_MC_real_GPU = MC_real_GPU.device<float>();
hipMemcpy(MC_comp_imag, auxiliar_MC_imag_GPU, N*N*sizeof(float), hipMemcpyDeviceToHost);
MC_imag_GPU.unlock();
hipMemcpy(MC_comp_real, auxiliar_MC_real_GPU, N*N*sizeof(float), hipMemcpyDeviceToHost);
MC_real_GPU.unlock();
*tiempoReconsParteImag_MejorCompresion = clock();
float* estimacionFourier_compre_ParteImag = estimacionDePlanoDeFourier(MV_AF, N, N, MC_comp_imag, N, N, MU_AF);
*tiempoReconsParteImag_MejorCompresion = clock() - *tiempoReconsParteImag_MejorCompresion;
*tiempoReconsParteReal_MejorCompresion = clock();
float* estimacionFourier_compre_ParteReal = estimacionDePlanoDeFourier(MV_AF, N, N, MC_comp_real, N, N, MU_AF);
*tiempoReconsParteReal_MejorCompresion = clock() - *tiempoReconsParteReal_MejorCompresion;
char* nombreArchivoReconsImgComp = (char*) malloc(sizeof(char)*strlen(rutaADirecSec)*strlen(nombreArReconsCompreImg)+sizeof(char)*4);
strcpy(nombreArchivoReconsImgComp, rutaADirecSec);
strcat(nombreArchivoReconsImgComp, "/");
strcat(nombreArchivoReconsImgComp, nombreArReconsCompreImg);
float PSNRActual = calculoDePSNRDeRecorte(estimacionFourier_compre_ParteImag, estimacionFourier_compre_ParteReal, N, nombreArchivoReconsImgComp, tiempoTransInver_MejorCompresion);
hipFree(estimacionFourier_compre_ParteImag);
hipFree(estimacionFourier_compre_ParteReal);
hipFree(MC_comp_imag);
hipFree(MC_comp_real);
hipFree(cantidadPorcentualDeCoefs);
hipFree(paramEvaInfo);
hipFree(MU_AF);
hipFree(MV_AF);
free(coefsNormalizados);
free(MC_modulo_indicesOrde_CPU);
free(nombreArchivoCompresiones);
free(nombreArchivoDatosDeIte);
free(nombreArchivoDatosDeIteLegible);
return cantCoefsMejorCompre;
}
void calCompSegunAncho_Normal_escritura(char nombreDirPrin[], char* nombreDirSec, char nombreDirTer[], float ancho, float cotaEnergia, int iterActual, int maxIter, float tol, float* u, float* v, float* w, float* visi_parteImaginaria, float* visi_parteReal, float delta_u, float delta_v, long cantVisi, long N, float* matrizDeUnosTamN)
{
float inicioPorcenCompre = 0.0;
float terminoPorcenCompre = 0.2;
int cantPorcen = 101;
// int cantPorcen = 2;
// ############### CONFIG. DE NOMBRES DE ARCHIVOS ##############
char nombreArReconsImg[] = "reconsImg.fit";
char nombreArReconsCompreImg[] = "reconsCompreImg.fit";
char nombreArMin_imag[] = "minCoefs_imag.txt";
char nombreArCoef_imag[] = "coefs_imag.txt";
char nombreArCoef_comp_imag[] = "coefs_comp_imag.txt";
char nombreArMin_real[] = "minCoefs_real.txt";
char nombreArCoef_real[] = "coefs_real.txt";
char nombreArCoef_comp_real[] = "coefs_comp_real.txt";
char nombreArInfoCompresion[] = "infoCompre.txt";
char nombreArInfoTiemposEjecu[] = "infoTiemposEjecu.txt";
// ############### CALCULO DE MU Y MV - CREACION DE DIRECTORIO SEGUNDARIO ##############
printf("...Comenzando calculo de MV...\n");
clock_t tiempoCalculoMV;
tiempoCalculoMV = clock();
float* MV = calcularMV_Normal(v, delta_v, cantVisi, N, ancho);
tiempoCalculoMV = clock() - tiempoCalculoMV;
float tiempoTotalCalculoMV = ((float)tiempoCalculoMV)/CLOCKS_PER_SEC;
printf("Calculo de MV completado.\n");
printf("...Comenzando calculo de MU...\n");
clock_t tiempoCalculoMU;
tiempoCalculoMU = clock();
float* MU = calcularMV_Normal(u, delta_u, cantVisi, N, ancho);
tiempoCalculoMU = clock() - tiempoCalculoMU;
float tiempoTotalCalculoMU = ((float)tiempoCalculoMU)/CLOCKS_PER_SEC;
printf("Calculo de MU completado.\n");
char* rutaADirecSec = (char*) malloc(strlen(nombreDirPrin)*strlen(nombreDirSec)*sizeof(char)+sizeof(char)*3);
strcpy(rutaADirecSec, nombreDirPrin);
strcat(rutaADirecSec, "/");
strcat(rutaADirecSec, nombreDirSec);
if(mkdir(rutaADirecSec, 0777) == -1)
{
printf("ERROR: No se pudo crear subdirectorio.");
printf("PROGRAMA ABORTADO.\n");
exit(0);
}
strcat(rutaADirecSec, "/");
// ############### MINIMIZACION DE COEFS, PARTE IMAGINARIA ##############
char* nombreArchivoMin_imag = (char*) malloc(strlen(nombreDirPrin)*strlen(nombreDirSec)*strlen(nombreArMin_imag)*sizeof(char)+sizeof(char)*3);
strcpy(nombreArchivoMin_imag, rutaADirecSec);
strcat(nombreArchivoMin_imag, nombreArMin_imag);
char* nombreArchivoCoefs_imag = (char*) malloc(strlen(nombreDirPrin)*strlen(nombreDirSec)*strlen(nombreArCoef_imag)*sizeof(char)+sizeof(char)*3);
strcpy(nombreArchivoCoefs_imag, rutaADirecSec);
strcat(nombreArchivoCoefs_imag, nombreArCoef_imag);
printf("...Comenzando minimizacion de coeficientes parte imaginaria...\n");
clock_t tiempoMinPartImag;
tiempoMinPartImag = clock();
float* MC_imag = minGradConjugado_MinCuadra_escritura(nombreArchivoMin_imag, nombreArchivoCoefs_imag, MV, MU, visi_parteImaginaria, w, cantVisi, N, matrizDeUnosTamN, maxIter, tol);
tiempoMinPartImag = clock() - tiempoMinPartImag;
float tiempoTotalMinPartImag = ((float)tiempoMinPartImag)/CLOCKS_PER_SEC;
printf("Proceso de minimizacion de coeficientes parte imaginaria terminado.\n");
free(nombreArchivoMin_imag);
free(nombreArchivoCoefs_imag);
// ############### MINIMIZACION DE COEFS, PARTE REAL ##############
char* nombreArchivoMin_real = (char*) malloc(strlen(nombreDirPrin)*strlen(nombreDirSec)*strlen(nombreArMin_real)*sizeof(char)+sizeof(char)*3);
strcpy(nombreArchivoMin_real, rutaADirecSec);
strcat(nombreArchivoMin_real, nombreArMin_real);
char* nombreArchivoCoefs_real = (char*) malloc(strlen(nombreDirPrin)*strlen(nombreDirSec)*strlen(nombreArCoef_real)*sizeof(char)+sizeof(char)*3);
strcpy(nombreArchivoCoefs_real, rutaADirecSec);
strcat(nombreArchivoCoefs_real, nombreArCoef_real);
printf("...Comenzando minimizacion de coeficientes parte real...\n");
clock_t tiempoMinPartReal;
tiempoMinPartReal = clock();
float* MC_real = minGradConjugado_MinCuadra_escritura(nombreArchivoMin_real, nombreArchivoCoefs_real, MV, MU, visi_parteReal, w, cantVisi, N, matrizDeUnosTamN, maxIter, tol);
tiempoMinPartReal = clock() - tiempoMinPartReal;
float tiempoTotalMinPartReal = ((float)tiempoMinPartReal)/CLOCKS_PER_SEC;
printf("Proceso de minimizacion de coeficientes parte real terminado.\n");
free(nombreArchivoMin_real);
free(nombreArchivoCoefs_real);
// ############### CALCULO NIVEL DE INFORMACION ##############
clock_t tiempoInfo;
tiempoInfo = clock();
float* medidasDeInfo = calInfoFisherDiag(MV, cantVisi, N, MU, w);
tiempoInfo = clock() - tiempoInfo;
float tiempoTotalInfo = ((float)tiempoInfo)/CLOCKS_PER_SEC;
hipFree(MU);
hipFree(MV);
// ############### RECONSTRUCCION DEL PLANO GRILLEADO Y ALMACENAMIENTO DE LA RECONSTRUCCION DE LA IMAGEN ##############
char* nombreArchivoReconsImg = (char*) malloc(strlen(nombreDirPrin)*strlen(nombreDirSec)*strlen(nombreArReconsImg)*sizeof(char)+sizeof(char)*3);
strcpy(nombreArchivoReconsImg, rutaADirecSec);
strcat(nombreArchivoReconsImg, nombreArReconsImg);
clock_t tiempoCalculoMV_AF;
tiempoCalculoMV_AF = clock();
float* MV_AF = calcularMV_Normal_estFourier(ancho, N, delta_v, matrizDeUnosTamN);
tiempoCalculoMV_AF = clock() - tiempoCalculoMV_AF;
float tiempoTotalCalculoMV_AF = ((float)tiempoCalculoMV_AF)/CLOCKS_PER_SEC;
clock_t tiempoCalculoMU_AF;
tiempoCalculoMU_AF = clock();
float* MU_AF = calcularMV_Normal_estFourier(ancho, N, delta_u, matrizDeUnosTamN);
tiempoCalculoMU_AF = clock() - tiempoCalculoMU_AF;
float tiempoTotalCalculoMU_AF = ((float)tiempoCalculoMU_AF)/CLOCKS_PER_SEC;
clock_t tiempoReconsFourierPartImag;
tiempoReconsFourierPartImag = clock();
float* estimacionFourier_ParteImag = estimacionDePlanoDeFourier(MV_AF, N, N, MC_imag, N, N, MU_AF);
tiempoReconsFourierPartImag = clock() - tiempoReconsFourierPartImag;
float tiempoTotalReconsFourierPartImag = ((float)tiempoReconsFourierPartImag)/CLOCKS_PER_SEC;
clock_t tiempoReconsFourierPartReal;
tiempoReconsFourierPartReal = clock();
float* estimacionFourier_ParteReal = estimacionDePlanoDeFourier(MV_AF, N, N, MC_real, N, N, MU_AF);
tiempoReconsFourierPartReal = clock() - tiempoReconsFourierPartReal;
float tiempoTotalReconsFourierPartReal = ((float)tiempoReconsFourierPartReal)/CLOCKS_PER_SEC;
clock_t tiempoReconsTransInver;
tiempoReconsTransInver = clock();
escribirTransformadaInversaFourier2D(estimacionFourier_ParteImag, estimacionFourier_ParteReal, N, nombreArchivoReconsImg);
tiempoReconsTransInver = clock() - tiempoReconsTransInver;
float tiempoTotalReconsTransInver = ((float)tiempoReconsTransInver)/CLOCKS_PER_SEC;
hipFree(estimacionFourier_ParteImag);
hipFree(estimacionFourier_ParteReal);
free(nombreArchivoReconsImg);
// ############### CALCULO DE GRADO DE COMPRESION ##############
char* rutaADirecTer = (char*) malloc(strlen(rutaADirecSec)*strlen(nombreDirTer)*sizeof(char)+sizeof(char)*3);
strcpy(rutaADirecTer, rutaADirecSec);
strcat(rutaADirecTer, "/");
strcat(rutaADirecTer, nombreDirTer);
if(mkdir(rutaADirecTer, 0777) == -1)
{
printf("ERROR: No se pudo crear subdirectorio.\n");
printf("PROGRAMA ABORTADO.\n");
exit(0);
}
strcat(rutaADirecTer, "/");
clock_t tiempoReconsFourierPartImagComp;
clock_t tiempoReconsFourierPartRealComp;
clock_t tiempoReconsTransInverComp;
printf("...Comenzando calculo de compresiones...\n");
clock_t tiempoCompresion;
tiempoCompresion = clock();
int cantCoefs = calPSNRDeDistintasCompresiones(inicioPorcenCompre, terminoPorcenCompre, cantPorcen, rutaADirecSec, rutaADirecTer, nombreArReconsCompreImg, MC_imag, MC_real, MV_AF, MU_AF, N, &tiempoReconsFourierPartImagComp, &tiempoReconsFourierPartRealComp, &tiempoReconsTransInverComp);
tiempoCompresion = clock() - tiempoCompresion;
float tiempoTotalCompresion = ((float)tiempoCompresion)/CLOCKS_PER_SEC;
printf("Proceso de calculo de compresiones terminado.\n");
free(rutaADirecTer);
char* nombreArchivoInfoComp = (char*) malloc(strlen(nombreDirPrin)*strlen(nombreArInfoCompresion)*sizeof(char)+sizeof(char)*2);
strcpy(nombreArchivoInfoComp, nombreDirPrin);
strcat(nombreArchivoInfoComp, "/");
strcat(nombreArchivoInfoComp, nombreArInfoCompresion);
FILE* archivo = fopen(nombreArchivoInfoComp, "a");
float nivelDeCompresion = 1.0 - cantCoefs * 1.0 / N*N;
fprintf(archivo, "%d %f %.12f %.12e %.12e %.12f %.12d\n", iterActual, ancho/delta_u, ancho, medidasDeInfo[0], medidasDeInfo[1], nivelDeCompresion, cantCoefs);
fclose(archivo);
free(nombreArchivoInfoComp);
free(medidasDeInfo);
hipFree(MC_real);
hipFree(MC_imag);
hipFree(MU_AF);
hipFree(MV_AF);
float tiempoTotalReconsFourierPartImagComp = ((float)tiempoReconsFourierPartImagComp)/CLOCKS_PER_SEC;
float tiempoTotalReconsFourierPartRealComp = ((float)tiempoReconsFourierPartRealComp)/CLOCKS_PER_SEC;
float tiempoTotalReconsTransInverComp = ((float)tiempoReconsTransInverComp)/CLOCKS_PER_SEC;
// ############### ESCRITURA DE ARCHIVO CON TIEMPOS DE EJECUCION ##############
char* nombreArchivoInfoTiemposEjecu = (char*) malloc(strlen(nombreDirPrin)*strlen(nombreArInfoTiemposEjecu)*sizeof(char)+sizeof(char)*2);
strcpy(nombreArchivoInfoTiemposEjecu, nombreDirPrin);
strcat(nombreArchivoInfoTiemposEjecu, "/");
strcat(nombreArchivoInfoTiemposEjecu, nombreArInfoTiemposEjecu);
FILE* archivoInfoTiemposEjecu = fopen(nombreArchivoInfoTiemposEjecu, "a");
fprintf(archivoInfoTiemposEjecu, "%d %.12f %.12e %.12e %.12e %.12e %.12e %.12e %.12e %.12e %.12e %.12e %.12e %.12e %.12e %.12e\n", iterActual, ancho, tiempoTotalCalculoMV, tiempoTotalCalculoMU, tiempoTotalMinPartImag, tiempoTotalMinPartReal, tiempoTotalInfo, tiempoTotalCompresion, tiempoTotalCalculoMV_AF, tiempoTotalCalculoMU_AF, tiempoTotalReconsFourierPartImag, tiempoTotalReconsFourierPartReal, tiempoTotalReconsTransInver, tiempoTotalReconsFourierPartImagComp, tiempoTotalReconsFourierPartRealComp, tiempoTotalReconsTransInverComp);
fclose(archivoInfoTiemposEjecu);
free(rutaADirecSec);
}
void calCompSegunAncho_Rect_escritura(char nombreDirPrin[], char* nombreDirSec, char nombreDirTer[], float ancho, float cotaEnergia, int iterActual, int maxIter, float tol, float* u, float* v, float* w, float* visi_parteImaginaria, float* visi_parteReal, float delta_u, float delta_v, float* matrizDeUnos, long cantVisi, long N, float* matrizDeUnosTamN, float estrechezDeBorde)
{
float inicioPorcenCompre = 0.0;
float terminoPorcenCompre = 0.2;
int cantPorcen = 101;
// int cantPorcen = 2;
// ############### CONFIG. DE NOMBRES DE ARCHIVOS ##############
char nombreArReconsImg[] = "reconsImg.fit";
char nombreArReconsCompreImg[] = "reconsCompreImg.fit";
char nombreArMin_imag[] = "minCoefs_imag.txt";
char nombreArCoef_imag[] = "coefs_imag.txt";
char nombreArCoef_comp_imag[] = "coefs_comp_imag.txt";
char nombreArMin_real[] = "minCoefs_real.txt";
char nombreArCoef_real[] = "coefs_real.txt";
char nombreArCoef_comp_real[] = "coefs_comp_real.txt";
char nombreArInfoCompresion[] = "infoCompre.txt";
char nombreArInfoTiemposEjecu[] = "infoTiemposEjecu.txt";
// ############### CALCULO DE MU Y MV - CREACION DE DIRECTORIO SEGUNDARIO ##############
printf("...Comenzando calculo de MV...\n");
clock_t tiempoCalculoMV;
tiempoCalculoMV = clock();
float* MV = calcularMV_Rect(v, delta_v, cantVisi, N, estrechezDeBorde, ancho, matrizDeUnos);
tiempoCalculoMV = clock() - tiempoCalculoMV;
float tiempoTotalCalculoMV = ((float)tiempoCalculoMV)/CLOCKS_PER_SEC;
printf("Calculo de MV completado.\n");
printf("...Comenzando calculo de MU...\n");
clock_t tiempoCalculoMU;
tiempoCalculoMU = clock();
float* MU = calcularMV_Rect(u, delta_u, cantVisi, N, estrechezDeBorde, ancho, matrizDeUnos);
tiempoCalculoMU = clock() - tiempoCalculoMU;
float tiempoTotalCalculoMU = ((float)tiempoCalculoMU)/CLOCKS_PER_SEC;
printf("Calculo de MU completado.\n");
char* rutaADirecSec = (char*) malloc(strlen(nombreDirPrin)*strlen(nombreDirSec)*sizeof(char)+sizeof(char)*3);
strcpy(rutaADirecSec, nombreDirPrin);
strcat(rutaADirecSec, "/");
strcat(rutaADirecSec, nombreDirSec);
if(mkdir(rutaADirecSec, 0777) == -1)
{
printf("ERROR: No se pudo crear subdirectorio.");
printf("PROGRAMA ABORTADO.\n");
exit(0);
}
strcat(rutaADirecSec, "/");
// ############### MINIMIZACION DE COEFS, PARTE IMAGINARIA ##############
char* nombreArchivoMin_imag = (char*) malloc(strlen(nombreDirPrin)*strlen(nombreDirSec)*strlen(nombreArMin_imag)*sizeof(char)+sizeof(char)*3);
strcpy(nombreArchivoMin_imag, rutaADirecSec);
strcat(nombreArchivoMin_imag, nombreArMin_imag);
char* nombreArchivoCoefs_imag = (char*) malloc(strlen(nombreDirPrin)*strlen(nombreDirSec)*strlen(nombreArCoef_imag)*sizeof(char)+sizeof(char)*3);
strcpy(nombreArchivoCoefs_imag, rutaADirecSec);
strcat(nombreArchivoCoefs_imag, nombreArCoef_imag);
printf("...Comenzando minimizacion de coeficientes parte imaginaria...\n");
clock_t tiempoMinPartImag;
tiempoMinPartImag = clock();
float* MC_imag = minGradConjugado_MinCuadra_escritura(nombreArchivoMin_imag, nombreArchivoCoefs_imag, MV, MU, visi_parteImaginaria, w, cantVisi, N, matrizDeUnosTamN, maxIter, tol);
tiempoMinPartImag = clock() - tiempoMinPartImag;
float tiempoTotalMinPartImag = ((float)tiempoMinPartImag)/CLOCKS_PER_SEC;
printf("Proceso de minimizacion de coeficientes parte imaginaria terminado.\n");
free(nombreArchivoMin_imag);
free(nombreArchivoCoefs_imag);
// ############### MINIMIZACION DE COEFS, PARTE REAL ##############
char* nombreArchivoMin_real = (char*) malloc(strlen(nombreDirPrin)*strlen(nombreDirSec)*strlen(nombreArMin_real)*sizeof(char)+sizeof(char)*3);
strcpy(nombreArchivoMin_real, rutaADirecSec);
strcat(nombreArchivoMin_real, nombreArMin_real);
char* nombreArchivoCoefs_real = (char*) malloc(strlen(nombreDirPrin)*strlen(nombreDirSec)*strlen(nombreArCoef_real)*sizeof(char)+sizeof(char)*3);
strcpy(nombreArchivoCoefs_real, rutaADirecSec);
strcat(nombreArchivoCoefs_real, nombreArCoef_real);
printf("...Comenzando minimizacion de coeficientes parte real...\n");
clock_t tiempoMinPartReal;
tiempoMinPartReal = clock();
float* MC_real = minGradConjugado_MinCuadra_escritura(nombreArchivoMin_real, nombreArchivoCoefs_real, MV, MU, visi_parteReal, w, cantVisi, N, matrizDeUnosTamN, maxIter, tol);
tiempoMinPartReal = clock() - tiempoMinPartReal;
float tiempoTotalMinPartReal = ((float)tiempoMinPartReal)/CLOCKS_PER_SEC;
printf("Proceso de minimizacion de coeficientes parte real terminado.\n");
free(nombreArchivoMin_real);
free(nombreArchivoCoefs_real);
// ############### CALCULO NIVEL DE INFORMACION ##############
clock_t tiempoInfo;
tiempoInfo = clock();
float* medidasDeInfo = calInfoFisherDiag(MV, cantVisi, N, MU, w);
tiempoInfo = clock() - tiempoInfo;
float tiempoTotalInfo = ((float)tiempoInfo)/CLOCKS_PER_SEC;
hipFree(MU);
hipFree(MV);
// ############### RECONSTRUCCION DEL PLANO GRILLEADO Y ALMACENAMIENTO DE LA RECONSTRUCCION DE LA IMAGEN ##############
char* nombreArchivoReconsImg = (char*) malloc(strlen(nombreDirPrin)*strlen(nombreDirSec)*strlen(nombreArReconsImg)*sizeof(char)+sizeof(char)*3);
strcpy(nombreArchivoReconsImg, rutaADirecSec);
strcat(nombreArchivoReconsImg, nombreArReconsImg);
clock_t tiempoCalculoMV_AF;
tiempoCalculoMV_AF = clock();
float* MV_AF = calcularMV_Rect_estFourier(ancho, N, delta_v, matrizDeUnos, estrechezDeBorde, matrizDeUnosTamN);
tiempoCalculoMV_AF = clock() - tiempoCalculoMV_AF;
float tiempoTotalCalculoMV_AF = ((float)tiempoCalculoMV_AF)/CLOCKS_PER_SEC;
clock_t tiempoCalculoMU_AF;
tiempoCalculoMU_AF = clock();
float* MU_AF = calcularMV_Rect_estFourier(ancho, N, delta_u, matrizDeUnos, estrechezDeBorde, matrizDeUnosTamN);
tiempoCalculoMU_AF = clock() - tiempoCalculoMU_AF;
float tiempoTotalCalculoMU_AF = ((float)tiempoCalculoMU_AF)/CLOCKS_PER_SEC;
clock_t tiempoReconsFourierPartImag;
tiempoReconsFourierPartImag = clock();
float* estimacionFourier_ParteImag = estimacionDePlanoDeFourier(MV_AF, N, N, MC_imag, N, N, MU_AF);
tiempoReconsFourierPartImag = clock() - tiempoReconsFourierPartImag;
float tiempoTotalReconsFourierPartImag = ((float)tiempoReconsFourierPartImag)/CLOCKS_PER_SEC;
clock_t tiempoReconsFourierPartReal;
tiempoReconsFourierPartReal = clock();
float* estimacionFourier_ParteReal = estimacionDePlanoDeFourier(MV_AF, N, N, MC_real, N, N, MU_AF);
tiempoReconsFourierPartReal = clock() - tiempoReconsFourierPartReal;
float tiempoTotalReconsFourierPartReal = ((float)tiempoReconsFourierPartReal)/CLOCKS_PER_SEC;
clock_t tiempoReconsTransInver;
tiempoReconsTransInver = clock();
escribirTransformadaInversaFourier2D(estimacionFourier_ParteImag, estimacionFourier_ParteReal, N, nombreArchivoReconsImg);
tiempoReconsTransInver = clock() - tiempoReconsTransInver;
float tiempoTotalReconsTransInver = ((float)tiempoReconsTransInver)/CLOCKS_PER_SEC;
hipFree(estimacionFourier_ParteImag);
hipFree(estimacionFourier_ParteReal);
free(nombreArchivoReconsImg);
// ############### CALCULO DE GRADO DE COMPRESION ##############
char* rutaADirecTer = (char*) malloc(strlen(rutaADirecSec)*strlen(nombreDirTer)*sizeof(char)+sizeof(char)*3);
strcpy(rutaADirecTer, rutaADirecSec);
strcat(rutaADirecTer, "/");
strcat(rutaADirecTer, nombreDirTer);
if(mkdir(rutaADirecTer, 0777) == -1)
{
printf("ERROR: No se pudo crear subdirectorio.\n");
printf("PROGRAMA ABORTADO.\n");
exit(0);
}
strcat(rutaADirecTer, "/");
clock_t tiempoReconsFourierPartImagComp;
clock_t tiempoReconsFourierPartRealComp;
clock_t tiempoReconsTransInverComp;
printf("...Comenzando calculo de compresiones...\n");
clock_t tiempoCompresion;
tiempoCompresion = clock();
int cantCoefs = calPSNRDeDistintasCompresiones(inicioPorcenCompre, terminoPorcenCompre, cantPorcen, rutaADirecSec, rutaADirecTer, nombreArReconsCompreImg, MC_imag, MC_real, MV_AF, MU_AF, N, &tiempoReconsFourierPartImagComp, &tiempoReconsFourierPartRealComp, &tiempoReconsTransInverComp);
tiempoCompresion = clock() - tiempoCompresion;
float tiempoTotalCompresion = ((float)tiempoCompresion)/CLOCKS_PER_SEC;
printf("Proceso de calculo de compresiones terminado.\n");
free(rutaADirecTer);
char* nombreArchivoInfoComp = (char*) malloc(strlen(nombreDirPrin)*strlen(nombreArInfoCompresion)*sizeof(char)+sizeof(char)*2);
strcpy(nombreArchivoInfoComp, nombreDirPrin);
strcat(nombreArchivoInfoComp, "/");
strcat(nombreArchivoInfoComp, nombreArInfoCompresion);
FILE* archivo = fopen(nombreArchivoInfoComp, "a");
float nivelDeCompresion = 1.0 - cantCoefs * 1.0 / N*N;
fprintf(archivo, "%d %f %.12f %.12e %.12e %.12f %.12d\n", iterActual, ancho/delta_u, ancho, medidasDeInfo[0], medidasDeInfo[1], nivelDeCompresion, cantCoefs);
fclose(archivo);
free(nombreArchivoInfoComp);
free(medidasDeInfo);
hipFree(MC_real);
hipFree(MC_imag);
hipFree(MU_AF);
hipFree(MV_AF);
float tiempoTotalReconsFourierPartImagComp = ((float)tiempoReconsFourierPartImagComp)/CLOCKS_PER_SEC;
float tiempoTotalReconsFourierPartRealComp = ((float)tiempoReconsFourierPartRealComp)/CLOCKS_PER_SEC;
float tiempoTotalReconsTransInverComp = ((float)tiempoReconsTransInverComp)/CLOCKS_PER_SEC;
// ############### ESCRITURA DE ARCHIVO CON TIEMPOS DE EJECUCION ##############
char* nombreArchivoInfoTiemposEjecu = (char*) malloc(strlen(nombreDirPrin)*strlen(nombreArInfoTiemposEjecu)*sizeof(char)+sizeof(char)*2);
strcpy(nombreArchivoInfoTiemposEjecu, nombreDirPrin);
strcat(nombreArchivoInfoTiemposEjecu, "/");
strcat(nombreArchivoInfoTiemposEjecu, nombreArInfoTiemposEjecu);
FILE* archivoInfoTiemposEjecu = fopen(nombreArchivoInfoTiemposEjecu, "a");
fprintf(archivoInfoTiemposEjecu, "%d %.12f %.12e %.12e %.12e %.12e %.12e %.12e %.12e %.12e %.12e %.12e %.12e %.12e %.12e %.12e\n", iterActual, ancho, tiempoTotalCalculoMV, tiempoTotalCalculoMU, tiempoTotalMinPartImag, tiempoTotalMinPartReal, tiempoTotalInfo, tiempoTotalCompresion, tiempoTotalCalculoMV_AF, tiempoTotalCalculoMU_AF, tiempoTotalReconsFourierPartImag, tiempoTotalReconsFourierPartReal, tiempoTotalReconsTransInver, tiempoTotalReconsFourierPartImagComp, tiempoTotalReconsFourierPartRealComp, tiempoTotalReconsTransInverComp);
fclose(archivoInfoTiemposEjecu);
free(rutaADirecSec);
}
double funcOptiInfo_Traza_Rect(double ancho, void* params)
{
struct parametros_BaseRect* ps = (struct parametros_BaseRect*) params;
float* MV = calcularMV_Rect(ps->v, ps->delta_v, ps->cantVisi, ps->N, ps->estrechezDeBorde, ancho, ps->matrizDeUnos);
float* MU = calcularMV_Rect(ps->u, ps->delta_u, ps->cantVisi, ps->N, ps->estrechezDeBorde, ancho, ps->matrizDeUnos);
float* medidasDeInfo = calInfoFisherDiag(MV, ps->cantVisi, ps->N, MU, ps->w);
float medidaSumaDeLaDiagonal = medidasDeInfo[0];
free(medidasDeInfo);
hipFree(MV);
hipFree(MU);
return -1 * medidaSumaDeLaDiagonal;
}
double funcOptiInfo_Traza_Normal(double ancho, void* params)
{
struct parametros_BaseNormal* ps = (struct parametros_BaseNormal*) params;
float* MV = calcularMV_Normal(ps->v, ps->delta_v, ps->cantVisi, ps->N, ancho);
float* MU = calcularMV_Normal(ps->u, ps->delta_u, ps->cantVisi, ps->N, ancho);
float* medidasDeInfo = calInfoFisherDiag(MV, ps->cantVisi, ps->N, MU, ps->w);
float medidaSumaDeLaDiagonal = medidasDeInfo[0];
free(medidasDeInfo);
hipFree(MV);
hipFree(MU);
return -1 * medidaSumaDeLaDiagonal;
}
double goldenMin_BaseRect(float* u, float* v, float* w, float delta_u, float delta_v, float* matrizDeUnos, long cantVisi, long N, float estrechezDeBorde)
{
int status;
int iter = 0, max_iter = 100;
const gsl_min_fminimizer_type *T;
gsl_min_fminimizer *s;
gsl_function F;
parametros_BaseRect actual;
actual.u = u;
actual.v = v;
actual.w = w;
actual.delta_u = delta_u;
actual.delta_v = delta_v;
actual.matrizDeUnos = matrizDeUnos;
actual.cantVisi = cantVisi;
actual.N = N;
actual.estrechezDeBorde = estrechezDeBorde;
double m;
double a = 1.0 * actual.delta_u, b = 5.0 * actual.delta_u;
F.function = &funcOptiInfo_Traza_Rect;
void* punteroVoidAActual = &actual;
F.params = punteroVoidAActual;
T = gsl_min_fminimizer_quad_golden;
s = gsl_min_fminimizer_alloc (T);
gsl_set_error_handler_off();
m = 1.0 * actual.delta_u;
int status_interval = gsl_min_fminimizer_set (s, &F, m, a, b);
while(status_interval)
{
m += 0.001 * actual.delta_u;
printf("m ahora es %f\n", m/actual.delta_u);
status_interval = gsl_min_fminimizer_set (s, &F, m, a, b);
}
printf ("using %s method\n",
gsl_min_fminimizer_name (s));
printf ("%5s [%9s, %9s] %9s\n",
"iter", "lower", "upper", "min");
printf ("%5d [%.7f, %.7f] %.7f\n",
iter, a, b, m);
do
{
iter++;
status = gsl_min_fminimizer_iterate (s);
m = gsl_min_fminimizer_x_minimum (s);
a = gsl_min_fminimizer_x_lower (s);
b = gsl_min_fminimizer_x_upper (s);
status = gsl_min_test_interval (a, b, 0.01, 0.01);
if (status == GSL_SUCCESS)
printf ("Converged:\n");
printf ("%5d [%.7f, %.7f] "
"%.7f\n",
iter, a/delta_u, b/delta_u, m/delta_u);
}
while (status == GSL_CONTINUE && iter < max_iter);
gsl_min_fminimizer_free (s);
return m;
}
double goldenMin_BaseNormal(float* u, float* v, float* w, float delta_u, float delta_v, long cantVisi, long N)
{
int status;
int iter = 0, max_iter = 100;
const gsl_min_fminimizer_type *T;
gsl_min_fminimizer *s;
gsl_function F;
parametros_BaseNormal actual;
actual.u = u;
actual.v = v;
actual.w = w;
actual.delta_u = delta_u;
actual.delta_v = delta_v;
actual.cantVisi = cantVisi;
actual.N = N;
double m = 1.5 * actual.delta_u, m_expected = M_PI;
double a = 1.0 * actual.delta_u, b = 5.0 * actual.delta_u;
F.function = &funcOptiInfo_Traza_Normal;
void* punteroVoidAActual = &actual;
F.params = punteroVoidAActual;
T = gsl_min_fminimizer_quad_golden;
s = gsl_min_fminimizer_alloc (T);
gsl_set_error_handler_off();
m = 1.0 * actual.delta_u;
int status_interval = gsl_min_fminimizer_set (s, &F, m, a, b);
while(status_interval)
{
m += 0.001 * actual.delta_u;
printf("m ahora es %f\n", m/actual.delta_u);
status_interval = gsl_min_fminimizer_set (s, &F, m, a, b);
}
printf ("using %s method\n",
gsl_min_fminimizer_name (s));
printf ("%5s [%9s, %9s] %9s\n",
"iter", "lower", "upper", "min");
printf ("%5d [%.7f, %.7f] %.7f\n",
iter, a, b, m);
do
{
iter++;
status = gsl_min_fminimizer_iterate (s);
m = gsl_min_fminimizer_x_minimum (s);
a = gsl_min_fminimizer_x_lower (s);
b = gsl_min_fminimizer_x_upper (s);
status
= gsl_min_test_interval (a, b, 0.001, 0.0);
if (status == GSL_SUCCESS)
printf ("Converged:\n");
printf ("%5d [%.7f, %.7f] "
"%.7f\n",
iter, a/delta_u, b/delta_u,m/delta_u);
}
while (status == GSL_CONTINUE && iter < max_iter);
gsl_min_fminimizer_free (s);
return m;
}
void lecturaDeTXT(char nombreArchivo[], float* frecuencia, float* u, float* v, float* w, float* visi_parteImaginaria, float* visi_parteReal, long cantVisi)
{
long contador = 0;
FILE *fp;
size_t len = 0;
char *line = NULL;
ssize_t read;
float c_constant = 2.99792458E8;
fp = fopen(nombreArchivo, "r");
if (fp == NULL)
{
printf("No se pudo abrir el archivo %s",nombreArchivo);
exit(0);
}
while ((read = getline(&line, &len, fp)) != -1)
{
*frecuencia = atof(strtok(line, " "));
visi_parteReal[contador] = atof(strtok(NULL, " "));
visi_parteImaginaria[contador] = atof(strtok(NULL, " "));
u[contador] = atof(strtok(NULL, " ")) * (*frecuencia)/c_constant;
v[contador] = atof(strtok(NULL, " ")) * (*frecuencia)/c_constant;
w[contador] = atof(strtok(NULL, " "));
contador++;
if(contador == cantVisi)
break;
}
free(line);
fclose(fp);
}
void lectCantVisi(char nombreArchivo[], long* cantVisi)
{
long contador = 0;
FILE *fp;
size_t len = 0;
char *line = NULL;
ssize_t read;
char* nombreNuevoTXT = (char*) malloc(strlen(nombreArchivo)*sizeof(char)+sizeof(char)*20);
strcpy(nombreNuevoTXT, nombreArchivo);
strcat(nombreNuevoTXT, "cantvisi.txt");
fp = fopen(nombreNuevoTXT, "r");
if (fp == NULL)
{
printf("No se pudo abrir el archivo %s",nombreArchivo);
exit(0);
}
read = getline(&line, &len, fp);
printf("Se han leido %s visibilidades.\n", line);
*cantVisi = atoi(line);
free(line);
free(nombreNuevoTXT);
fclose(fp);
}
void lectDeTXTcreadoDesdeMS(char nombreArchivo[], float* u, float* v, float* w, float* visi_parteImaginaria, float* visi_parteReal)
{
long contador = 0;
FILE *fp;
size_t len = 0;
char *line = NULL;
ssize_t read;
char* nombreNuevoTXT = (char*) malloc(strlen(nombreArchivo)*sizeof(char)+sizeof(char)*5);
strcpy(nombreNuevoTXT, nombreArchivo);
strcat(nombreNuevoTXT, ".txt");
fp = fopen(nombreNuevoTXT, "r");
if (fp == NULL)
{
printf("No se pudo abrir el archivo %s",nombreArchivo);
exit(0);
}
while ((read = getline(&line, &len, fp)) != -1)
{
visi_parteReal[contador] = atof(strtok(line, " "));
visi_parteImaginaria[contador] = atof(strtok(NULL, " "));
u[contador] = atof(strtok(NULL, " "));
v[contador] = atof(strtok(NULL, " "));
w[contador] = atof(strtok(NULL, " "));
contador++;
}
printf("El contador es %ld\n", contador);
free(line);
free(nombreNuevoTXT);
fclose(fp);
}
void lectDeTXTcreadoDesdeMSConLimite(char nombreArchivo[], float* u, float* v, float* w, float* visi_parteImaginaria, float* visi_parteReal, long inicio, long fin, long cantVisi)
{
long contador = 0;
long contadorIte = 0;
FILE *fp;
size_t len = 0;
char *line = NULL;
ssize_t read;
char* nombreNuevoTXT = (char*) malloc(strlen(nombreArchivo)*sizeof(char)+sizeof(char)*5);
strcpy(nombreNuevoTXT, nombreArchivo);
strcat(nombreNuevoTXT, ".txt");
fp = fopen(nombreNuevoTXT, "r");
printf("Nombre nuevo es %s\n", nombreNuevoTXT);
if (fp == NULL)
{
printf("No se pudo abrir el archivo %s",nombreArchivo);
exit(0);
}
while ((read = getline(&line, &len, fp)) != -1)
{
if (contadorIte >= inicio)
{
visi_parteReal[contador] = atof(strtok(line, " "));
visi_parteImaginaria[contador] = atof(strtok(NULL, " "));
u[contador] = atof(strtok(NULL, " "));
v[contador] = atof(strtok(NULL, " "));
w[contador] = atof(strtok(NULL, " "));
contador++;
}
contadorIte++;
if(contadorIte >= fin)
break;
}
printf("El contador es %ld\n", contador);
free(line);
free(nombreNuevoTXT);
fclose(fp);
}
void escrituraDeArchivoConParametros_Normal(char nombreArchivoPara[], char nombreArchivo[], char nombreDirPrin[], int cantVisi, int N, int maxIter, float tolGrad)
{
time_t t = time(NULL);
struct tm tm = *localtime(&t);
FILE* archivoDePara = fopen(nombreArchivoPara, "w");
fprintf(archivoDePara, "Programa inicio su ejecucion con fecha: %d-%d-%d %d:%d:%d\n", tm.tm_year + 1900, tm.tm_mon + 1,tm.tm_mday, tm.tm_hour, tm.tm_min, tm.tm_sec);
fprintf(archivoDePara, "Compresion con base normal utilizando informacion del archivo %s cuyos parametros de ejecucion fueron:\n", nombreArchivo);
fprintf(archivoDePara, "Cantidad de visibilidades(cantVisi): %d\n", cantVisi);
fprintf(archivoDePara, "Cantidad de Coefs(N x N): %d x %d = %d\n", N, N, N*N);
fprintf(archivoDePara, "Maximo de iteraciones impuesto para la minimizacion de coeficientes(maxIter): %d\n", maxIter);
fprintf(archivoDePara, "Grado de tolerancia a la minimizacion de los coefs(tolGrad): %.12e\n", tolGrad);
fclose(archivoDePara);
}
void escrituraDeArchivoConParametros_Rect(char nombreArchivoPara[], char nombreArchivo[], char nombreDirPrin[], long cantVisi, long N, int maxIter, float tolGrad, float estrechezDeBorde)
{
time_t t = time(NULL);
struct tm tm = *localtime(&t);
FILE* archivoDePara = fopen(nombreArchivoPara, "w");
fprintf(archivoDePara, "Programa inicio su ejecucion con fecha: %d-%d-%d %d:%d:%d\n", tm.tm_year + 1900, tm.tm_mon + 1,tm.tm_mday, tm.tm_hour, tm.tm_min, tm.tm_sec);
fprintf(archivoDePara, "Compresion con base rectangular utilizando informacion del archivo %s cuyos parametros de ejecucion fueron:\n", nombreArchivo);
fprintf(archivoDePara, "Estrechez de borde: %f\n", estrechezDeBorde);
fprintf(archivoDePara, "Cantidad de visibilidades(cantVisi): %ld\n", cantVisi);
fprintf(archivoDePara, "Cantidad de Coefs(N x N): %ld x %ld = %ld\n", N, N, N*N);
fprintf(archivoDePara, "Maximo de iteraciones impuesto para la minimizacion de coeficientes(maxIter): %d\n", maxIter);
fprintf(archivoDePara, "Grado de tolerancia a la minimizacion de los coefs(tolGrad): %.12e\n", tolGrad);
fclose(archivoDePara);
}
void calculoDeInfoCompre_BaseNormal(char nombreArchivo[], int maxIter, float tolGrad, float tolGolden, float* u, float* v, float* w, float* visi_parteImaginaria, float* visi_parteReal, float delta_u, float delta_v, long cantVisi, long N, float cotaEnergia, char nombreDirPrin[], char nombreDirSec[], char nombreDirTer[], int cantParamEvaInfo, float inicioIntervalo, float finIntervalo, float* matrizDeUnosEstFourier, float estrechezDeBorde)
{
float inicioIntervaloEscalado = inicioIntervalo * delta_u;
float finIntervaloEscalado = finIntervalo * delta_u;
char nombreArPara[] = "parametrosEjecucion.txt";
if(cotaEnergia > 1.0)
{
printf("ERROR: La cota de energia debe estar expresado en decimales, no en porcentajes.\n");
printf("PROGRAMA ABORTADO.\n");
exit(0);
}
if(mkdir(nombreDirPrin, 0777) == -1)
{
printf("ERROR: El directorio EXISTE, PELIGRO DE SOBREESCRITURA, por favor eliga otro nombre de directorio.\n");
printf("PROGRAMA ABORTADO.\n");
exit(0);
}
else
printf("Directorio creado.\n");
char* nombreArchivoPara = (char*) malloc(strlen(nombreDirPrin)*strlen(nombreArPara)*sizeof(char)+sizeof(char)*3);
strcpy(nombreArchivoPara, nombreDirPrin);
strcat(nombreArchivoPara, "/");
strcat(nombreArchivoPara, nombreArPara);
escrituraDeArchivoConParametros_Normal(nombreArchivoPara, nombreArchivo, nombreDirPrin, cantVisi, N, maxIter, tolGrad);
free(nombreArchivoPara);
// float optimo = goldenMin_BaseNormal(u, v, w, delta_u, delta_v, cantVisi, N);
// printf("El optimo esta en %.12f\n", optimo);
float* paramEvaInfo = linspace(inicioIntervaloEscalado, finIntervaloEscalado, cantParamEvaInfo);
// int i = 0;
for(int i=320; i<cantParamEvaInfo; i++)
{
char* numComoString = numAString(&i);
sprintf(numComoString, "%d", i);
char* nombreDirSecCopia = (char*) malloc(sizeof(char)*strlen(nombreDirSec)*strlen(numComoString));
strcpy(nombreDirSecCopia, nombreDirSec);
strcat(nombreDirSecCopia, numComoString);
calCompSegunAncho_Normal_escritura(nombreDirPrin, nombreDirSecCopia, nombreDirTer, paramEvaInfo[i], cotaEnergia, i, maxIter, tolGrad, u, v, w, visi_parteImaginaria, visi_parteReal, delta_u, delta_v, cantVisi, N, matrizDeUnosEstFourier);
free(numComoString);
free(nombreDirSecCopia);
}
}
void calculoDeInfoCompre_BaseRect(char nombreArchivo[], int maxIter, float tolGrad, float tolGolden, float* u, float* v, float* w, float* visi_parteImaginaria, float* visi_parteReal, float delta_u, float delta_v, float* matrizDeUnos, long cantVisi, long N, float cotaEnergia, char nombreDirPrin[], char nombreDirSec[], char nombreDirTer[], int cantParamEvaInfo, float inicioIntervalo, float finIntervalo, float* matrizDeUnosEstFourier, float estrechezDeBorde)
{
float inicioIntervaloEscalado = inicioIntervalo * delta_u;
float finIntervaloEscalado = finIntervalo * delta_u;
char nombreArPara[] = "parametrosEjecucion.txt";
if(cotaEnergia > 1.0)
{
printf("ERROR: La cota de energia debe estar expresado en decimales, no en porcentajes.\n");
printf("PROGRAMA ABORTADO.\n");
exit(0);
}
if(mkdir(nombreDirPrin, 0777) == -1)
{
printf("ERROR: El directorio EXISTE, PELIGRO DE SOBREESCRITURA, por favor eliga otro nombre de directorio.\n");
printf("PROGRAMA ABORTADO.\n");
exit(0);
}
else
printf("Directorio creado.\n");
char* nombreArchivoPara = (char*) malloc(strlen(nombreDirPrin)*strlen(nombreArPara)*sizeof(char)+sizeof(char)*3);
strcpy(nombreArchivoPara, nombreDirPrin);
strcat(nombreArchivoPara, "/");
strcat(nombreArchivoPara, nombreArPara);
escrituraDeArchivoConParametros_Rect(nombreArchivoPara, nombreArchivo, nombreDirPrin, cantVisi, N, maxIter, tolGrad, estrechezDeBorde);
free(nombreArchivoPara);
// float optimo = goldenMin_BaseRect(u, v, w, delta_u, delta_v, matrizDeUnos, cantVisi, N, estrechezDeBorde);
// printf("El optimo esta en %.12f\n", optimo);
float* paramEvaInfo = linspace(inicioIntervaloEscalado, finIntervaloEscalado, cantParamEvaInfo);
// int i = 0;
for(int i=0; i<cantParamEvaInfo; i++)
{
char* numComoString = numAString(&i);
sprintf(numComoString, "%d", i);
char* nombreDirSecCopia = (char*) malloc(sizeof(char)*strlen(nombreDirSec)*strlen(numComoString));
strcpy(nombreDirSecCopia, nombreDirSec);
strcat(nombreDirSecCopia, numComoString);
calCompSegunAncho_Rect_escritura(nombreDirPrin, nombreDirSecCopia, nombreDirTer, paramEvaInfo[i], cotaEnergia, i, maxIter, tolGrad, u, v, w, visi_parteImaginaria, visi_parteReal, delta_u, delta_v, matrizDeUnos, cantVisi, N, matrizDeUnosEstFourier, estrechezDeBorde);
free(numComoString);
free(nombreDirSecCopia);
}
}
void calImagenesADistintasCompresiones_Rect(float inicioIntervalo, float finIntervalo, int cantParamEvaInfo, char nombreDirPrin[], float ancho, int maxIter, float tol, float* u, float* v, float* w, float* visi_parteImaginaria, float* visi_parteReal, float delta_u, float delta_v, float* matrizDeUnos, long cantVisi, long N, float* matrizDeUnosTamN, float estrechezDeBorde)
{
if(mkdir(nombreDirPrin, 0777) == -1)
{
printf("ERROR: El directorio EXISTE, PELIGRO DE SOBREESCRITURA, por favor eliga otro nombre de directorio.\n");
printf("PROGRAMA ABORTADO.\n");
exit(0);
}
else
printf("Directorio creado.\n");
char nombreArReconsCompreImg[] = "reconsCompreImg";
float* paramEvaInfo = linspace(inicioIntervalo/100.0, finIntervalo/100.0, cantParamEvaInfo);
// ############### CALCULO DE MU Y MV - CREACION DE DIRECTORIO SEGUNDARIO ##############
printf("...Comenzando calculo de MV...\n");
float* MV = calcularMV_Rect(v, delta_v, cantVisi, N, estrechezDeBorde, ancho, matrizDeUnos);
printf("Calculo de MV completado.\n");
printf("...Comenzando calculo de MU...\n");
float* MU = calcularMV_Rect(u, delta_u, cantVisi, N, estrechezDeBorde, ancho, matrizDeUnos);
printf("Calculo de MU completado.\n");
// ############### MINIMIZACION DE COEFS, PARTE IMAGINARIA ##############
printf("...Comenzando minimizacion de coeficientes parte imaginaria...\n");
float* MC_imag = minGradConjugado_MinCuadra(MV, MU, visi_parteImaginaria, w, cantVisi, N, matrizDeUnosTamN, maxIter, tol);
printf("Proceso de minimizacion de coeficientes parte imaginaria terminado.\n");
// ############### MINIMIZACION DE COEFS, PARTE REAL ##############
printf("...Comenzando minimizacion de coeficientes parte real...\n");
float* MC_real = minGradConjugado_MinCuadra(MV, MU, visi_parteReal, w, cantVisi, N, matrizDeUnosTamN, maxIter, tol);
printf("Proceso de minimizacion de coeficientes parte real terminado.\n");
float* MV_AF = calcularMV_Rect_estFourier(ancho, N, delta_v, matrizDeUnos, estrechezDeBorde, matrizDeUnosTamN);
float* MU_AF = calcularMV_Rect_estFourier(ancho, N, delta_u, matrizDeUnos, estrechezDeBorde, matrizDeUnosTamN);
float* MC_comp_imag;
hipMallocManaged(&MC_comp_imag,N*N*sizeof(float));
hipMemset(MC_comp_imag, 0, N*N*sizeof(float));
float* MC_comp_real;
hipMallocManaged(&MC_comp_real,N*N*sizeof(float));
hipMemset(MC_comp_real, 0, N*N*sizeof(float));
long largo = N * N;
float* MC_img_cuadrado;
hipMallocManaged(&MC_img_cuadrado, N*N*sizeof(float));
float* MC_modulo;
hipMallocManaged(&MC_modulo, N*N*sizeof(float));
hadamardProduct(MC_imag, N, N, MC_imag, MC_img_cuadrado);
hadamardProduct(MC_real, N, N, MC_real, MC_modulo);
combinacionLinealMatrices(1.0, MC_img_cuadrado, N, N, 1.0, MC_modulo);
hipFree(MC_img_cuadrado);
af::array MC_modulo_GPU(N*N, MC_modulo);
hipFree(MC_modulo);
af::array MC_modulo_indicesOrde_GPU(N*N);
af::array MC_modulo_Orde_GPU(N*N);
af::sort(MC_modulo_Orde_GPU, MC_modulo_indicesOrde_GPU, MC_modulo_GPU, 0, false);
float total = af::sum<float>(MC_modulo_GPU);
MC_modulo_Orde_GPU = MC_modulo_Orde_GPU/total;
af::eval(MC_modulo_Orde_GPU);
af::eval(MC_modulo_indicesOrde_GPU);
af::sync();
float* auxiliar_MC_modulo_Orde_GPU = MC_modulo_Orde_GPU.device<float>();
float* auxiliar_MC_modulo_indicesOrde_GPU = MC_modulo_indicesOrde_GPU.device<float>();
float* coefsNormalizados = (float*) malloc(largo*sizeof(float));
hipMemcpy(coefsNormalizados, auxiliar_MC_modulo_Orde_GPU, N*N*sizeof(float), hipMemcpyDeviceToHost);
int* MC_modulo_indicesOrde_CPU = (int*) malloc(largo*sizeof(int));
hipMemcpy(MC_modulo_indicesOrde_CPU, auxiliar_MC_modulo_indicesOrde_GPU, N*N*sizeof(int), hipMemcpyDeviceToHost);
MC_modulo_Orde_GPU.unlock();
MC_modulo_GPU.unlock();
MC_modulo_indicesOrde_GPU.unlock();
long cantCoefsParaCota = 0;
float sumador = 0.0;
float* cantCoefsPorParametro = (float*) malloc(sizeof(float)*cantParamEvaInfo);
float* cantidadPorcentualDeCoefs = linspace(1.0, largo, largo);
combinacionLinealMatrices(0.0, cantidadPorcentualDeCoefs, largo, 1, 1.0/N, cantidadPorcentualDeCoefs);
for(long j=0; j<cantParamEvaInfo; j++)
{
sumador = 0.0;
cantCoefsParaCota = 0;
for(long i=0; i<largo; i++)
{
sumador += coefsNormalizados[i];
cantCoefsParaCota++;
if(cantidadPorcentualDeCoefs[i] >= paramEvaInfo[j])
{
printf("Del %f%% solicitado, se ha tomado el mas cercano correspondiente al %f%% de coefs, lo que corresponde a un total de %ld coeficientes los cuales poseen el %f%% de la energia.\n", paramEvaInfo[j], cantidadPorcentualDeCoefs[i], cantCoefsParaCota, sumador);
break;
}
}
float* indicesATomar_CPU = (float*) malloc(cantCoefsParaCota*sizeof(float));
for(int k=0; k<cantCoefsParaCota; k++)
{
indicesATomar_CPU[k] = MC_modulo_indicesOrde_CPU[k];
}
af::array indicesATomar_GPU(cantCoefsParaCota, indicesATomar_CPU);
free(indicesATomar_CPU);
af::array indRepComp = af::constant(0, largo);
indRepComp(indicesATomar_GPU) = 1;
indicesATomar_GPU.unlock();
af::array MC_imag_GPU(N*N, MC_imag);
af::array MC_real_GPU(N*N, MC_real);
MC_imag_GPU = MC_imag_GPU * indRepComp;
MC_real_GPU = MC_real_GPU * indRepComp;
af::eval(MC_imag_GPU);
af::eval(MC_real_GPU);
af::sync();
indRepComp.unlock();
float* auxiliar_MC_imag_GPU = MC_imag_GPU.device<float>();
float* auxiliar_MC_real_GPU = MC_real_GPU.device<float>();
hipMemcpy(MC_comp_imag, auxiliar_MC_imag_GPU, N*N*sizeof(float), hipMemcpyDeviceToHost);
MC_imag_GPU.unlock();
hipMemcpy(MC_comp_real, auxiliar_MC_real_GPU, N*N*sizeof(float), hipMemcpyDeviceToHost);
MC_real_GPU.unlock();
float* estimacionFourier_compre_ParteImag = estimacionDePlanoDeFourier(MV_AF, N, N, MC_comp_imag, N, N, MU_AF);
float* estimacionFourier_compre_ParteReal = estimacionDePlanoDeFourier(MV_AF, N, N, MC_comp_real, N, N, MU_AF);
int numero = j+1;
char* numComoString = numAString(&numero);
sprintf(numComoString, "%d", numero);
char* nombreArchivoReconsImgComp = (char*) malloc(sizeof(char)*strlen(nombreDirPrin)*strlen(numComoString)*strlen(nombreArReconsCompreImg)+sizeof(char)*7);
strcpy(nombreArchivoReconsImgComp, nombreDirPrin);
strcat(nombreArchivoReconsImgComp, "/");
strcat(nombreArchivoReconsImgComp, nombreArReconsCompreImg);
strcat(nombreArchivoReconsImgComp, "_");
strcat(nombreArchivoReconsImgComp, numComoString);
strcat(nombreArchivoReconsImgComp, ".fit");
printf("%s\n", nombreArchivoReconsImgComp);
escribirTransformadaInversaFourier2D(estimacionFourier_compre_ParteImag, estimacionFourier_compre_ParteReal, N, nombreArchivoReconsImgComp);
hipFree(estimacionFourier_compre_ParteImag);
hipFree(estimacionFourier_compre_ParteReal);
free(numComoString);
free(nombreArchivoReconsImgComp);
}
hipFree(MU_AF);
hipFree(MV_AF);
free(coefsNormalizados);
free(MC_modulo_indicesOrde_CPU);
}
void filtroGaussiano()
{
int largoVector = 100;
float* porcenReal = (float*) malloc(sizeof(float)*largoVector);
float* vector = (float*) malloc(sizeof(float)*largoVector);
long contador = 0;
FILE *fp;
size_t len = 0;
char *line = NULL;
ssize_t read;
fp = fopen("/home/rarmijo/psnr_hd142_rect.txt", "r");
if (fp == NULL)
{
printf("No se pudo abrir el archivo");
exit(0);
}
while ((read = getline(&line, &len, fp)) != -1)
{
porcenReal[largoVector-1-contador] = atof(strtok(line, " "));
strtok(NULL, " ");
vector[contador] = atof(strtok(NULL, " "));
contador++;
}
printf("El contador es %ld\n", contador);
free(line);
fclose(fp);
// for(int i=0; i<largoVector; i++)
// {
// printf("%f\n", porcenReal[i]);
// }
// exit(-1);
float* vectorFiltrado = (float*) calloc(largoVector, sizeof(float));
gsl_vector* copiaVectorEnGSL = gsl_vector_alloc(largoVector);
gsl_vector* vectorEnGSLFiltrado = gsl_vector_alloc(largoVector);
for(int i=0; i<largoVector; i++)
{
gsl_vector_set(copiaVectorEnGSL, i, vector[largoVector-1-i]);
}
gsl_filter_gaussian_workspace* gauss_p = gsl_filter_gaussian_alloc(largoVector);
gsl_filter_gaussian(GSL_FILTER_END_TRUNCATE, 1.0, 0, copiaVectorEnGSL, vectorEnGSLFiltrado, gauss_p);
for(int i=0; i<largoVector; i++)
{
vectorFiltrado[i] = gsl_vector_get(copiaVectorEnGSL, i);
}
gsl_vector_free(copiaVectorEnGSL);
gsl_vector_free(vectorEnGSLFiltrado);
gsl_filter_gaussian_free(gauss_p);
float* listaDeMetricas = (float*) malloc(sizeof(float)*largoVector);
float* primeraRecta_subListaDeX = (float*) calloc(largoVector, sizeof(float));
float* primeraRecta_subListaDeY = (float*) calloc(largoVector, sizeof(float));
float* segundaRecta_subListaDeX = (float*) calloc(largoVector, sizeof(float));
float* segundaRecta_subListaDeY = (float*) calloc(largoVector, sizeof(float));
memcpy(segundaRecta_subListaDeX, porcenReal, sizeof(float)*largoVector);
memcpy(segundaRecta_subListaDeY, vectorFiltrado, sizeof(float)*largoVector);
primeraRecta_subListaDeX[0] = porcenReal[0];
primeraRecta_subListaDeY[0] = vectorFiltrado[0];
for(int i=1; i<largoVector-1; i++)
{
primeraRecta_subListaDeX[i] = porcenReal[i];
primeraRecta_subListaDeY[i] = vectorFiltrado[i];
float pendienteDePrimeraRecta = calPendiente(primeraRecta_subListaDeX, i+1, primeraRecta_subListaDeY);
// printf("En la iteracion %d la pendienteDePrimeraRecta es %f\n", i, pendienteDePrimeraRecta);
segundaRecta_subListaDeX[i-1] = 0.0;
segundaRecta_subListaDeY[i-1] = 0.0;
float pendienteDeSegundaRecta = calPendiente(&(segundaRecta_subListaDeX[i]), largoVector-i, &(segundaRecta_subListaDeY[i]));
// printf("En la iteracion %d la pendienteDeSegundaRecta es %f\n", i, pendienteDeSegundaRecta);
listaDeMetricas[i] = -1 * pendienteDeSegundaRecta/pendienteDePrimeraRecta;
printf("%f\n", listaDeMetricas[i]);
}
free(primeraRecta_subListaDeX);
free(primeraRecta_subListaDeY);
free(segundaRecta_subListaDeX);
free(segundaRecta_subListaDeY);
}
int main()
{
// PARAMETROS GENERALES
long cantVisi = 15034;
long inicio = 0;
long fin = 15034;
// long cantVisi = 30000;
// long inicio = 0;
// long fin = 30000;
int N = 512;
// long N = 1600; //HLTau_B6cont.calavg.tav300s
int maxIter = 100;
float tolGrad = 1E-12;
float delta_x = 0.02;
// float delta_x = 0.005; //HLTau_B6cont.calavg.tav300s
// float delta_x = 0.03; //co65
float delta_x_rad = (delta_x * M_PI)/648000.0;
float delta_u = 1.0/(N*delta_x_rad);
float delta_v = 1.0/(N*delta_x_rad);
//PARAMETROS PARTICULARES DE BASE RECT
float estrechezDeBorde = 1000.0;
// float frecuencia;
// float *u, *v, *w, *visi_parteImaginaria, *visi_parteReal;
// hipMallocManaged(&u, cantVisi*sizeof(float));
// hipMallocManaged(&v, cantVisi*sizeof(float));
// hipMallocManaged(&w, cantVisi*sizeof(float));
// hipMallocManaged(&visi_parteImaginaria, cantVisi*sizeof(float));
// hipMallocManaged(&visi_parteReal, cantVisi*sizeof(float));
// char nombreArchivo[] = "hd142_b9cont_self_tav.0.0.txt";
// lecturaDeTXT(nombreArchivo, &frecuencia, u, v, w, visi_parteImaginaria, visi_parteReal, cantVisi);
// // ########### NOTEBOOK ##############
// char nombreArchivo[] = "/home/yoyisaurio/Desktop/HLTau_B6cont.calavg.tav300s";
// char comandoCasaconScript[] = "/home/yoyisaurio/casa-pipeline-release-5.6.2-2.el7/bin/casa -c /home/yoyisaurio/Desktop/proyecto/deMSaTXT.py";
// // ########### PC-LAB ##############
// char nombreArchivo[] = "/home/rarmijo/Desktop/proyecto/HLTau_B6cont.calavg.tav300s";
// char comandoCasaconScript[] = "/home/rarmijo/casa-pipeline-release-5.6.2-2.el7/bin/casa -c ./deMSaTXT.py";
// // ########### PC-LAB ##############
// char nombreArchivo[] = "./co65.ms";
// char comandoCasaconScript[] = "/home/rarmijo/casa-pipeline-release-5.6.2-2.el7/bin/casa -c ./deMSaTXT.py";
// // ########### BEAM ##############
// char nombreArchivo[] = "./HLTau_B6cont.calavg.tav300s";
// char comandoCasaconScript[] = "casa -c ./deMSaTXT.py";
// // ########### BEAM ##############
// char nombreArchivo[] = "./FREQ78.ms";
// char comandoCasaconScript[] = "casa -c ./deMSaTXT.py";
// // // ########### BEAM ##############
// char nombreArchivo[] = "./co65.ms";
// char comandoCasaconScript[] = "casa -c ./deMSaTXT.py";
// ########### BEAM ##############
char nombreArchivo[] = "./hd142_b9cont_self_tav.ms";
char comandoCasaconScript[] = "casa -c ./deMSaTXT.py";
// // ########### BEAM ##############
// char nombreArchivo[] = "/home/rarmijo/HLTau_Band6_CalibratedData/HLTau_B6cont.calavg";
// char comandoCasaconScript[] = "casa -c ./deMSaTXT.py";
// char* comandoScriptMSaTXT = (char*) malloc(strlen(comandoCasaconScript)*strlen(nombreArchivo)*sizeof(char)+sizeof(char)*3);
// strcpy(comandoScriptMSaTXT, comandoCasaconScript);
// strcat(comandoScriptMSaTXT, " ");
// strcat(comandoScriptMSaTXT, nombreArchivo);
// system(comandoScriptMSaTXT);
// free(comandoScriptMSaTXT);
lectCantVisi(nombreArchivo, &cantVisi);
float *u, *v, *w, *visi_parteImaginaria, *visi_parteReal;
hipMallocManaged(&u, cantVisi*sizeof(float));
hipMallocManaged(&v, cantVisi*sizeof(float));
hipMallocManaged(&w, cantVisi*sizeof(float));
hipMallocManaged(&visi_parteImaginaria, cantVisi*sizeof(float));
hipMallocManaged(&visi_parteReal, cantVisi*sizeof(float));
lectDeTXTcreadoDesdeMS(nombreArchivo, u, v, w, visi_parteImaginaria, visi_parteReal);
// lectDeTXTcreadoDesdeMSConLimite(nombreArchivo, u, v, w, visi_parteImaginaria, visi_parteReal, inicio, fin, cantVisi);
float* matrizDeUnos, *matrizDeUnosEstFourier;
hipMallocManaged(&matrizDeUnos, cantVisi*N*sizeof(float));
for(long i=0; i<(cantVisi*N); i++)
{
matrizDeUnos[i] = 1.0;
}
hipMallocManaged(&matrizDeUnosEstFourier, N*sizeof(float));
for(long i=0; i<N; i++)
{
matrizDeUnosEstFourier[i] = 1.0;
}
float cotaEnergia = 0.99;
// char nombreDirPrin[] = "float_calCompresion_baseNormal_cota";
char nombreDirPrin[] = "experi_hd142_Normal_visi800_parte2";
char nombreDirSec[] = "ite";
char nombreDirTer[] = "compresiones";
char nombreArchivoTiempo[] = "tiempo.txt";
int cantParamEvaInfo = 800;
// float inicioIntervalo = 0.8;
float inicioIntervalo = 1.0;
float finIntervalo = 3.0;
float tolGolden = 1E-12;
int iterActual = 0;
clock_t t;
t = clock();
calculoDeInfoCompre_BaseNormal(nombreArchivo, maxIter, tolGrad, tolGolden, u, v, w, visi_parteImaginaria, visi_parteReal, delta_u, delta_v, cantVisi, N, cotaEnergia, nombreDirPrin, nombreDirSec, nombreDirTer, cantParamEvaInfo, inicioIntervalo, finIntervalo, matrizDeUnosEstFourier, estrechezDeBorde);
// calculoDeInfoCompre_BaseRect(nombreArchivo, maxIter, tolGrad, tolGolden, u, v, w, visi_parteImaginaria, visi_parteReal, delta_u, delta_v, matrizDeUnos, cantVisi, N, cotaEnergia, nombreDirPrin, nombreDirSec, nombreDirTer, cantParamEvaInfo, inicioIntervalo, finIntervalo, matrizDeUnosEstFourier, estrechezDeBorde);
t = clock() - t;
float time_taken = ((float)t)/CLOCKS_PER_SEC;
char* nombreCompletoArchivoTiempo = (char*) malloc(sizeof(char)*strlen(nombreArchivoTiempo)*strlen(nombreDirPrin)+sizeof(char)*3);
strcpy(nombreCompletoArchivoTiempo, nombreDirPrin);
strcat(nombreCompletoArchivoTiempo, "/");
strcat(nombreCompletoArchivoTiempo, nombreArchivoTiempo);
FILE* archivoTiempo = fopen(nombreCompletoArchivoTiempo, "w");
float minutitos = time_taken/60;
float horas = minutitos/60;
printf("El tiempo de ejecucion fue %.12f segundos o %.12f minutos o %.12f horas.\n", time_taken, minutitos, horas);
fprintf(archivoTiempo, "El tiempo de ejecucion fue %.12f segundos o %.12f minutos o %.12f horas.\n", time_taken, minutitos, horas);
fclose(archivoTiempo);
// // char nombreDirPrin[] = "calCompresiones_Normal";
// // char nombreArchivoTiempo[] = "tiempo.txt";
// // int cantParamEvaInfo = 100;
// // float inicioIntervalo = 1.0;
// // float finIntervalo = 100.0;
// // float tolGolden = 1E-12;
// // float nuevoAncho = 1.0 * delta_u;
// // clock_t t;
// // t = clock();
// // calPSNRDeDistintasCompresiones_Normal(inicioIntervalo, finIntervalo, cantParamEvaInfo, nombreDirPrin, nuevoAncho, maxIter, tolGrad, u, v, w, visi_parteImaginaria, visi_parteReal, delta_u, delta_v, cantVisi, N, matrizDeUnosEstFourier, estrechezDeBorde);
// // // calPSNRDeDistintasCompresiones_Rect(inicioIntervalo, finIntervalo, cantParamEvaInfo, nombreDirPrin, nuevoAncho, maxIter, tolGrad, u, v, w, visi_parteImaginaria, visi_parteReal, delta_u, delta_v, matrizDeUnos, cantVisi, N, matrizDeUnosEstFourier, estrechezDeBorde);
// // // calImagenesADistintasCompresiones_Rect(inicioIntervalo, finIntervalo, cantParamEvaInfo, nombreDirPrin, nuevoAncho, maxIter, tolGrad, u, v, w, visi_parteImaginaria, visi_parteReal, delta_u, delta_v, matrizDeUnos, cantVisi, N, matrizDeUnosEstFourier, estrechezDeBorde);
// // t = clock() - t;
// // float time_taken = ((float)t)/CLOCKS_PER_SEC;
// // char* nombreCompletoArchivoTiempo = (char*) malloc(sizeof(char)*strlen(nombreArchivoTiempo)*strlen(nombreDirPrin)+sizeof(char)*3);
// // strcpy(nombreCompletoArchivoTiempo, nombreDirPrin);
// // strcat(nombreCompletoArchivoTiempo, "/");
// // strcat(nombreCompletoArchivoTiempo, nombreArchivoTiempo);
// // FILE* archivoTiempo = fopen(nombreCompletoArchivoTiempo, "w");
// // float minutitos = time_taken/60;
// // float horas = minutitos/60;
// // printf("El tiempo de ejecucion fue %.12f segundos o %.12f minutos o %.12f horas.\n", time_taken, minutitos, horas);
// // fprintf(archivoTiempo, "El tiempo de ejecucion fue %.12f segundos o %.12f minutos o %.12f horas.\n", time_taken, minutitos, horas);
// // fclose(archivoTiempo);
//
//
// // printf("...Comenzando calculo de MV...\n");
// // clock_t tiempoCalculoMV;
// // tiempoCalculoMV = clock();
// // float* MV = calcularMV_Rect(v, delta_v, cantVisi, N, estrechezDeBorde, delta_v, matrizDeUnos);
// // tiempoCalculoMV = clock() - tiempoCalculoMV;
// // float tiempoTotalCalculoMV = ((float)tiempoCalculoMV)/CLOCKS_PER_SEC;
// // printf("Calculo de MV completado.\n");
// //
// // printf("...Comenzando calculo de MU...\n");
// // clock_t tiempoCalculoMU;
// // tiempoCalculoMU = clock();
// // float* MU = calcularMV_Rect(u, delta_u, cantVisi, N, estrechezDeBorde, delta_u, matrizDeUnos);
// // tiempoCalculoMU = clock() - tiempoCalculoMU;
// // float tiempoTotalCalculoMU = ((float)tiempoCalculoMU)/CLOCKS_PER_SEC;
// // printf("Calculo de MU completado.\n");
// //
// // int blockSize; // The launch configurator returned block size
// // int minGridSize; // The minimum grid size needed to achieve the
// // // maximum occupancy for a full device launch
// // int gridSize; // The actual grid size needed, based on input size
// //
// // hipOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, transponerMatriz_kernel, 0, 0);
// // // Round up according to array size
// // gridSize = (cantVisi*N + blockSize - 1) / blockSize;
// //
// // // long cantBloques = ceil((float) cantFilas*N/1024);
// // // hadamardProduct_kernel<<<gridSize,blockSize>>>(MU, MV, matrizDeUnos, cantVisi, N);
// // // combinacionLinealMatrices_kernel<<<gridSize,blockSize>>>(5.0, MU, cantVisi, N, 5.0, MV);
// // transponerMatriz_kernel<<<gridSize,blockSize>>>(MU, matrizDeUnos, cantVisi, N);
// // hipDeviceSynchronize();
// //
// // // calculate theoretical occupancy
// // int maxActiveBlocks;
// // hipOccupancyMaxActiveBlocksPerMultiprocessor(&maxActiveBlocks, transponerMatriz_kernel, blockSize, 0);
// //
// // int device;
// // hipDeviceProp_t props;
// // hipGetDevice(&device);
// // hipGetDeviceProperties(&props, device);
// //
// // float occupancy = (maxActiveBlocks * blockSize / props.warpSize) /
// // (float)(props.maxThreadsPerMultiProcessor /
// // props.warpSize);
// //
// // printf("Launched blocks of size %d. Theoretical occupancy: %f\n",
// // blockSize, occupancy);
//
// hipFree(u);
// hipFree(v);
// hipFree(w);
// hipFree(visi_parteImaginaria);
// hipFree(visi_parteReal);
// hipFree(matrizDeUnos);
// hipFree(matrizDeUnosEstFourier);
}
| respaldo.cu | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "cublas_v2.h"
#include <cuda_runtime.h>
#include <float.h>
#include <math.h>
#include <sys/time.h>
#include <cblas.h>
#include <f77blas.h>
#include <pthread.h>
#include <assert.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <unistd.h>
#include <time.h>
#include <arrayfire.h>
#include <af/cuda.h>
#include <fitsio.h>
#include <cublasXt.h>
#include <gsl/gsl_math.h>
#include <gsl/gsl_vector.h>
#include <gsl/gsl_filter.h>
#include <gsl/gsl_min.h>
// [email protected]
//nvcc otroconfloat.cu -lcudart -lcublas -lcuda -lblasx -I/opt/arrayfire/include/ -L/opt/arrayfire/lib64/ -lafcuda -lcfitsio -o otroconfloat
/* sudo scp /home/yoyisaurio/Desktop/juguetes\ de\ CUDA/otroconfloat.cu [email protected]:/home/rarmijo/Desktop/ */
// nvcc calCompreInfo.cu -lcudart -lcublas -lcuda -lblasx -I/opt/arrayfire/include/ -L/opt/arrayfire/lib64/ -lafcuda -lcfitsio -lgsl -lgslcblas -lm -o calCompreInfo
// sudo scp /home/yoyisaurio/Desktop/proyecto/calCompreInfo.cu [email protected]:/home/rarmijo
// nvcc calCompreInfo.cu -lcudart -lcublas -lcuda -lblasx -I/opt/arrayfire/include/ -L/opt/arrayfire/lib64/ -lafcuda -lcfitsio -lgsl -lgslcblas -lm -o calCompreInfo
// ./calCompreInfo
// sudo scp /home/yoyisaurio/Desktop/proyecto/nuevo.cu [email protected]:/home/rarmijo/Desktop/proyecto
// sudo scp [email protected]:/home/rarmijo/float_calCompresion_baseNormal_cota99/ite0/reconsImg.fit /home/yoyisaurio/Desktop/ds9/reconsImg.fit
// sudo scp [email protected]:/home/rarmijo/Desktop/proyecto/float_calCompresion_baseNormal_cota99/ite0/reconsImg.fit /home/yoyisaurio/Desktop/ds9/nuevito.fit
// nvcc nuevo.cu -lcudart -lcublas -lcuda -lblasx -I/opt/arrayfire/include/ -L/opt/arrayfire/lib64/ -lafcuda -lcfitsio -o nuevo
struct parametros_BaseRect
{
float* u;
float* v;
float* w;
float delta_u;
float delta_v;
float* matrizDeUnos;
long cantVisi;
long N;
float estrechezDeBorde;
};
struct parametros_BaseNormal
{
float* u;
float* v;
float* w;
float delta_u;
float delta_v;
long cantVisi;
long N;
};
static int Stopping_Rule(float x0, float x1, float tolerance);
#define sqrt5 2.236067977499789696
char* numAString(int* numero)
{
int cantCarac = (*numero)/10 + 1;
char* numComoString = (char*) malloc(sizeof(char)*cantCarac);
return numComoString;
}
float calPendiente(float* x, int largoDeX, float* y)
{
float sumadeYs = 0.0;
float sumadeXs = 0.0;
float sumaDeLosCuadradosdeXs = 0.0;
float sumaDeMultdeXsconYs = 0.0;
for(int i=0; i<largoDeX; i++)
{
float xActual = x[i];
float yActual = y[i];
sumadeYs += yActual;
sumadeXs += xActual;
sumaDeMultdeXsconYs += xActual * yActual;
sumaDeLosCuadradosdeXs += xActual * xActual;
}
float cuadradoDeLaSumadeXs = sumadeXs * sumadeXs;
float numerador = largoDeX * sumaDeMultdeXsconYs - sumadeXs * sumadeYs;
float denominador = largoDeX * sumaDeLosCuadradosdeXs - cuadradoDeLaSumadeXs;
return numerador/denominador;
}
float* linspace(float a, float b, long n)
{
float c;
int i;
float* u;
cudaMallocManaged(&u, n*sizeof(float));
c = (b - a)/(n - 1);
for(i = 0; i < n - 1; ++i)
u[i] = a + i*c;
u[n - 1] = b;
return u;
}
void imprimirVector(float* lista, int tamanoLista)
{
int i;
for(i=0;i<tamanoLista;i++)
{
printf("%f\n",lista[i]);
}
printf("\n");
}
void imprimirMatrizColumna(float* vector, long cantFilas, long cantColumnas)
{
long i,j;
for(i=0;i<cantFilas;i++)
{
for(j=0;j<cantColumnas;j++)
{
printf("%.12e ", vector[(((j)*(cantFilas))+(i))]);
}
printf("\n");
}
printf("\n");
}
void imprimirMatrizPura(float* matriz, int cantFilas, int cantColumnas)
{
for(int i=0; i<cantFilas; i++)
{
for(int j=0; j<cantColumnas; j++)
{
printf("%f ", matriz[i*cantColumnas+j]);
}
printf("\n");
}
printf("\n");
}
void escribirCoefs(float* coefs, char* nombreArchivo, long cantFilas, long cantColumnas)
{
FILE* archivo = fopen(nombreArchivo, "w");
for(long i=0;i<cantFilas;i++)
{
for(long j=0;j<cantColumnas;j++)
{
fprintf(archivo, "%.12e ", coefs[(((j)*(cantFilas))+(i))]);
}
fprintf(archivo, "\n");
}
fclose(archivo);
}
float** crearMatrizDouble(int cantFilas, int cantColumnas)
{
float** matriz = (float**) calloc(cantFilas, sizeof(float*));
int i;
for(i=0;i<cantFilas;i++)
{
matriz[i] = (float*) calloc(cantColumnas, sizeof(float));
}
return matriz;
}
void inicializarMatriz(float** matriz, int cantFilas, int cantColumnas)
{
int i;
int j;
int contador = 0;
for(i=0;i<cantFilas;i++)
{
for(j=0;j<cantColumnas;j++)
{
matriz[i][j] = contador;
contador++;
}
}
}
float* transformarMatrizAMatrizColumna(float** matriz, int cantFilas, int cantColumnas)
{
float* nuevoVector = (float*) calloc(cantFilas*cantColumnas,sizeof(float));
int i,j;
for(j=0;j<cantColumnas;j++)
{
for(i=0;i<cantFilas;i ++)
{
nuevoVector[(((j)*(cantFilas))+(i))]= matriz[i][j];
}
}
return nuevoVector;
}
float** transformarMatrizColumnaAMatriz(float* matrizColumna, int cantFilas, int cantColumnas)
{
float** matriz = crearMatrizDouble(cantFilas,cantColumnas);
int i,j;
for(j=0;j<cantColumnas;j++)
{
for(i=0;i<cantFilas;i ++)
{
matriz[i][j] = matrizColumna[(((j)*(cantFilas))+(i))];
}
}
return matriz;
}
void multMatrices(float* a, long m, long k, float* b, long n, float* c)
{
cudaError_t cudaStat;
cublasStatus_t stat;
cublasXtHandle_t handle;
stat = cublasXtCreate(&handle);
int devices[1] = { 0 };
if(cublasXtDeviceSelect(handle, 1, devices) != CUBLAS_STATUS_SUCCESS)
{
printf("set devices fail\n");
}
float al = 1.0;
float bet = 0.0;
stat = cublasXtSgemm(handle,CUBLAS_OP_N,CUBLAS_OP_N,m,n,k,&al,a,m,b,k,&bet,c,m);
cudaDeviceSynchronize();
for(long i=0; i<m*n;i++)
{
if(isnan(c[i]))
{
printf("Valor nan encontrado en multMatrices.\n");
break;
}
}
cublasXtDestroy(handle);
}
// void multMatrices(float* a, long m, long k, float* b, long n, float* c)
// {
// cudaError_t cudaStat;
// cublasStatus_t stat;
// cublasHandle_t handle;
// stat = cublasCreate(&handle);
// float al = 1.0;
// float bet = 0.0;
// stat = cublasSgemm(handle,CUBLAS_OP_N,CUBLAS_OP_N,m,n,k,&al,a,m,b,k,&bet,c,m);
// cudaDeviceSynchronize();
// for(long i=0; i<m*n;i++)
// {
// if(isnan(c[i]))
// {
// printf("Valor nan encontrado en multMatrices.\n");
// break;
// }
// }
// cublasDestroy(handle);
// }
// void combinacionLinealMatrices(float al, float* a, long m, long k, float bet, float* c)
// {
// long n = k;
// cudaError_t cudaStat;
// cublasStatus_t stat;
// cublasXtHandle_t handle;
// float* b;
// cudaMallocManaged(&b, k*n*sizeof(float));
// cudaMemset(b, 0, k*n*sizeof(float));
// for(int i=0; i<n; i++)
// {
// b[(i*n+i)] = 1.0;
// }
// stat = cublasXtCreate(&handle);
// int devices[1] = { 0 };
// if(cublasXtDeviceSelect(handle, 1, devices) != CUBLAS_STATUS_SUCCESS)
// {
// printf("set devices fail\n");
// }
// stat = cublasXtSgemm(handle,CUBLAS_OP_N,CUBLAS_OP_N,m,n,k,&al,a,m,b,k,&bet,c,m);
// cudaDeviceSynchronize();
// for(long i=0; i<m*n;i++)
// {
// if(isnan(c[i]))
// {
// printf("Valor nan encontrado en combLinealMatrices.\n");
// break;
// }
// }
// cudaFree(b);
// cublasXtDestroy(handle);
// }
__global__ void multMatrizPorConstante_kernel(float* matrizA, long cantFilas, long cantColumnas, float constante)
{
long miId = threadIdx.x + blockDim.x * blockIdx.x;
if(miId < cantFilas*cantColumnas)
{
matrizA[miId] = constante * matrizA[miId];
}
}
void multMatrizPorConstante(float* matrizA, long cantFilasMatrizA, long cantColumnasMatrizA, float constante)
{
long cantBloques = ceil((float) cantFilasMatrizA*cantColumnasMatrizA/1024);
multMatrizPorConstante_kernel<<<cantBloques,1024>>>(matrizA, cantFilasMatrizA, cantColumnasMatrizA, constante);
cudaDeviceSynchronize();
}
__global__ void combinacionLinealMatrices_kernel(float al, float* matrizA, long cantFilas, long cantColumnas, float bet, float* matrizB)
{
long miId = threadIdx.x + blockDim.x * blockIdx.x;
if(miId < cantFilas*cantColumnas)
{
matrizB[miId] = al * matrizA[miId] + bet * matrizB[miId];
}
}
void combinacionLinealMatrices(float al, float* matrizA, long cantFilas, long cantColumnas, float bet, float* matrizB)
{
long cantBloques = ceil((float) cantFilas*cantColumnas/1024);
combinacionLinealMatrices_kernel<<<cantBloques,1024>>>(al, matrizA, cantFilas, cantColumnas, bet, matrizB);
cudaDeviceSynchronize();
}
// void combinacionLinealMatrices(float al, float* a, long m, long k, float bet, float* c)
// {
// long n = k;
// cudaError_t cudaStat;
// cublasStatus_t stat;
// cublasHandle_t handle;
// float* b;
// cudaMallocManaged(&b, k*n*sizeof(float));
// cudaMemset(b, 0, k*n*sizeof(float));
// for(int i=0; i<n; i++)
// {
// b[(i*n+i)] = 1.0;
// }
// stat = cublasCreate(&handle);
// stat = cublasSgemm(handle,CUBLAS_OP_N,CUBLAS_OP_N,m,n,k,&al,a,m,b,k,&bet,c,m);
// cudaDeviceSynchronize();
// for(long i=0; i<m*n;i++)
// {
// if(isnan(c[i]))
// {
// printf("Valor nan encontrado en combLinealMatrices.\n");
// break;
// }
// }
// cudaFree(b);
// cublasDestroy(handle);
// }
// void transponerMatriz(float* matriz, int cantFilas, int cantColumnas, float* matrizTranspuesta)
// {
// for(int i=0;i<cantFilas;i++)
// {
// for(int j=0;j<cantColumnas;j++)
// {
// matrizTranspuesta[(((i)*(cantColumnas))+(j))] = matriz[(((j)*(cantFilas))+(i))];
// }
// }
// }
// __global__ void transponerMatriz_kernel(float* matrizA, float* matrizA_T, long cantFilas, long cantColumnas)
// {
// long miId = threadIdx.x + blockDim.x * blockIdx.x * blockDim.x * blockDim.y + blockIdx.y * gridDim.x * blockDim.x * blockDim.y;
// if(miId < cantFilas*cantColumnas)
// {
// long i = miId%cantFilas;
// long j = miId/cantFilas;
// matrizA_T[(i*cantColumnas+j)] = matrizA[(j*cantFilas+i)];
// }
// }
__global__ void transponerMatriz_kernel(float* matrizA, float* matrizA_T, long cantFilas, long cantColumnas)
{
long miId = threadIdx.x + blockDim.x * blockIdx.x;
if(miId < cantFilas*cantColumnas)
{
long i = miId%cantFilas;
long j = miId/cantFilas;
matrizA_T[(i*cantColumnas+j)] = matrizA[(j*cantFilas+i)];
}
}
void transponerMatriz(float* matrizA, long cantFilasMatrizA, long cantColumnasMatrizA, float* resultado)
{
long cantBloques = ceil((float) cantFilasMatrizA*cantColumnasMatrizA/1024);
transponerMatriz_kernel<<<cantBloques,1024>>>(matrizA, resultado, cantFilasMatrizA, cantColumnasMatrizA);
cudaDeviceSynchronize();
}
__global__ void restaVectorColumnaConVector_kernel(float* vectorA, long largoVectorA, float* vectorB, long largoVectorB, float* resultado)
{
long miId = threadIdx.x + blockDim.x * blockIdx.x;
if(miId < largoVectorA*largoVectorB)
{
long i = miId%largoVectorA;
long j = miId/largoVectorA;
resultado[miId] = vectorA[i] - vectorB[j];
}
}
float* restaVectorColumnaConVector(float* vectorA, long largoVectorA, float* vectorB, long largoVectorB)
{
float* resultado;
cudaMallocManaged(&resultado,largoVectorA*largoVectorB*sizeof(float));
long cantBloques = ceil((float) largoVectorA*largoVectorB/1024);
restaVectorColumnaConVector_kernel<<<cantBloques,1024>>>(vectorA, largoVectorA, vectorB, largoVectorB, resultado);
cudaDeviceSynchronize();
return resultado;
}
void vectorColumnaAMatriz(float* vectorA, long cantFilas, long cantColumnas, float* nuevaMatriz)
{
float* vectorDeUnos;
cudaMallocManaged(&vectorDeUnos,cantColumnas*sizeof(float));
for(long i=0; i<cantColumnas; i++)
{
vectorDeUnos[i] = 1.0;
}
multMatrices(vectorA, cantFilas, 1, vectorDeUnos, cantColumnas, nuevaMatriz);
cudaFree(vectorDeUnos);
}
__global__ void hadamardProduct_kernel(float* matrizA, float* matrizB, float* resultado, long cantFilas, long cantColumnas)
{
long miId = threadIdx.x + blockDim.x * blockIdx.x;
if(miId < cantFilas*cantColumnas)
{
resultado[miId] = matrizA[miId]*matrizB[miId];
}
}
void hadamardProduct(float* matrizA, long cantFilasMatrizA, long cantColumnasMatrizA, float* matrizB, float* resultado)
{
long cantBloques = ceil((float) cantFilasMatrizA*cantColumnasMatrizA/1024);
hadamardProduct_kernel<<<cantBloques,1024>>>(matrizA, matrizB, resultado, cantFilasMatrizA, cantColumnasMatrizA);
cudaDeviceSynchronize();
}
float dotProduct(float* x, long n, float* y)
{
cudaError_t cudaStat;
cublasStatus_t stat;
cublasHandle_t handle;
stat = cublasCreate(&handle);
float result;
stat = cublasSdot(handle,n,x,1,y,1,&result);
cublasDestroy(handle);
return result;
}
__global__ void calcularExp_kernel(float* a, float* c, long cantFilas, long cantColumnas)
{
long miId = threadIdx.x + blockDim.x * blockIdx.x;
if(miId < cantFilas*cantColumnas)
{
c[miId] = exp(a[miId]);
}
}
void calcularExp(float* matrizA, long cantFilasMatrizA, long cantColumnasMatrizA)
{
long cantBloques = ceil((float) cantFilasMatrizA*cantColumnasMatrizA/1024);
calcularExp_kernel<<<cantBloques,1024>>>(matrizA, matrizA, cantFilasMatrizA, cantColumnasMatrizA);
cudaDeviceSynchronize();
}
__global__ void calcularInvFrac_kernel(float* a, float* c, long cantFilas, long cantColumnas)
{
long miId = threadIdx.x + blockDim.x * blockIdx.x;
if(miId < cantFilas*cantColumnas)
{
c[miId] = 1.0/a[miId];
}
}
void calcularInvFrac(float* matrizA, long cantFilasMatrizA, long cantColumnasMatrizA)
{
long cantBloques = ceil((float) cantFilasMatrizA*cantColumnasMatrizA/1024);
calcularInvFrac_kernel<<<cantBloques,1024>>>(matrizA, matrizA, cantFilasMatrizA, cantColumnasMatrizA);
cudaDeviceSynchronize();
}
void calVisModelo(float* MV, long cantFilasMV, long cantColumnasMV, float* MC, long cantColumnasMU, float* MU, float* matrizDeUnosTamN, float* visModelo_paso3)
{
float* MU_T;
cudaMallocManaged(&MU_T, cantFilasMV*cantColumnasMU*sizeof(float));
transponerMatriz(MU, cantFilasMV, cantColumnasMU, MU_T);
float* visModelo_paso1;
cudaMallocManaged(&visModelo_paso1, cantColumnasMV*cantFilasMV*sizeof(float));
cudaMemset(visModelo_paso1, 0, cantColumnasMV*cantFilasMV*sizeof(float));
multMatrices(MC, cantColumnasMV, cantColumnasMU, MU_T, cantFilasMV, visModelo_paso1);
cudaFree(MU_T);
float* transpuesta;
cudaMallocManaged(&transpuesta, cantColumnasMV*cantFilasMV*sizeof(float));
transponerMatriz(visModelo_paso1, cantColumnasMV, cantFilasMV, transpuesta);
cudaFree(visModelo_paso1);
float* visModelo_paso2;
cudaMallocManaged(&visModelo_paso2, cantFilasMV*cantColumnasMV*sizeof(float));
hadamardProduct(MV, cantFilasMV, cantColumnasMV, transpuesta, visModelo_paso2);
cudaFree(transpuesta);
multMatrices(visModelo_paso2, cantFilasMV, cantColumnasMV, matrizDeUnosTamN, 1, visModelo_paso3);
cudaFree(visModelo_paso2);
}
float* calResidual(float* visObs, float* MV, long cantFilasMV, long cantColumnasMV, float* MC, long cantColumnasMU, float* MU, float* matrizDeUnosTamN)
{
float* visModelo;
cudaMallocManaged(&visModelo, cantFilasMV*sizeof(float));
cudaMemset(visModelo, 0, cantFilasMV*sizeof(float));
calVisModelo(MV, cantFilasMV, cantColumnasMV, MC, cantColumnasMU, MU, matrizDeUnosTamN, visModelo);
combinacionLinealMatrices(-1.0, visObs, cantFilasMV, 1, 1.0, visModelo);
return visModelo;
}
float calCosto(float* residual, long cantVisi, float* w)
{
float* resultado;
cudaMallocManaged(&resultado, cantVisi*sizeof(float));
hadamardProduct(residual, cantVisi, 1, w, resultado);
float total = dotProduct(resultado, cantVisi, residual);
cudaFree(resultado);
return total;
}
__global__ void MultPorDifer_kernel(float* matrizA, float* matrizB, float* resultado, long cantFilas, long cantColumnas)
{
long miId = threadIdx.x + blockDim.x * blockIdx.x;
if(miId < cantFilas*cantColumnas)
{
long posicionEnB = miId%cantFilas;
resultado[miId] = matrizA[miId]*matrizB[posicionEnB];
}
}
void MultPorDifer(float* matrizA, long cantFilas, long cantColumnas, float* diferencias, float* resultado)
{
long cantBloques = ceil((float) cantFilas*cantColumnas/1024);
MultPorDifer_kernel<<<cantBloques,1024>>>(matrizA, diferencias, resultado, cantFilas, cantColumnas);
cudaDeviceSynchronize();
}
void calGradiente(float* residual, float* MV, long cantFilasMV, long cantColumnasMV, float* MU, long cantColumnasMU, float* w, float* total_paso2)
{
float* diferencia;
cudaMallocManaged(&diferencia, cantFilasMV*sizeof(float));
hadamardProduct(residual, cantFilasMV, 1, w, diferencia);
float* total_paso1;
cudaMallocManaged(&total_paso1, cantColumnasMV*cantFilasMV*sizeof(float));
MultPorDifer(MV, cantFilasMV, cantColumnasMV, diferencia, total_paso1);
cudaFree(diferencia);
float* total_paso1_5;
cudaMallocManaged(&total_paso1_5, cantColumnasMV*cantFilasMV*sizeof(float));
transponerMatriz(total_paso1, cantFilasMV, cantColumnasMV, total_paso1_5);
cudaFree(total_paso1);
multMatrices(total_paso1_5, cantColumnasMV, cantFilasMV, MU, cantColumnasMU, total_paso2);
cudaFree(total_paso1_5);
}
float calAlpha(float* gradiente, long cantFilasMC, long cantColumnasMC, float* pActual, float* MV, long cantFilasMV, long cantColumnasMV, float* MU, long cantColumnasMU, float* w, float* matrizDeUnosTamN, int* flag_NOESPOSIBLEMINIMIZAR)
{
float* gradienteNegativo;
cudaMallocManaged(&gradienteNegativo, cantFilasMC*cantColumnasMC*sizeof(float));
cudaMemset(gradienteNegativo, 0, cantFilasMC*cantColumnasMC*sizeof(float));
combinacionLinealMatrices(-1.0, gradiente, cantFilasMC, cantColumnasMC, 0.0, gradienteNegativo);
float numerador = dotProduct(gradienteNegativo, cantFilasMC*cantColumnasMC, pActual);
cudaFree(gradienteNegativo);
float* visModeloP;
cudaMallocManaged(&visModeloP, cantFilasMV*sizeof(float));
cudaMemset(visModeloP, 0, cantFilasMV*sizeof(float));
calVisModelo(MV, cantFilasMV, cantColumnasMV, pActual, cantColumnasMU, MU, matrizDeUnosTamN, visModeloP);
float* gradP;
cudaMallocManaged(&gradP, cantFilasMC * cantColumnasMC*sizeof(float));
cudaMemset(gradP, 0, cantFilasMC * cantColumnasMC*sizeof(float));
calGradiente(visModeloP, MV, cantFilasMV, cantColumnasMV, MU, cantColumnasMU, w, gradP);
cudaFree(visModeloP);
float denominador = dotProduct(pActual, cantFilasMC * cantColumnasMC, gradP);
cudaFree(gradP);
if(denominador == 0.0)
{
*flag_NOESPOSIBLEMINIMIZAR = 1;
}
return numerador/denominador;
}
float calBeta_Fletcher_Reeves(float* gradienteActual, long tamanoGradiente, float* gradienteAnterior)
{
float numerador = dotProduct(gradienteActual, tamanoGradiente, gradienteActual);
float denominador = dotProduct(gradienteAnterior, tamanoGradiente, gradienteAnterior);
float resultado = numerador/denominador;
return resultado;
}
float* calInfoFisherDiag(float* MV, long cantFilasMV, long cantColumnasMV, float* MU, float* w)
{
float* MV_T;
cudaMallocManaged(&MV_T, cantFilasMV*cantColumnasMV*sizeof(float));
transponerMatriz(MV, cantFilasMV, cantColumnasMV, MV_T);
float* primeraMatriz_fase1;
cudaMallocManaged(&primeraMatriz_fase1, cantColumnasMV*cantFilasMV*sizeof(float));
hadamardProduct(MV_T, cantColumnasMV, cantFilasMV, MV_T, primeraMatriz_fase1);
cudaFree(MV_T);
float* wMatriz;
cudaMallocManaged(&wMatriz, cantFilasMV*cantColumnasMV*sizeof(float));
cudaMemset(wMatriz, 0, cantFilasMV*cantColumnasMV*sizeof(float));
vectorColumnaAMatriz(w, cantFilasMV, cantColumnasMV, wMatriz);
float* wmatriz_T;
cudaMallocManaged(&wmatriz_T, cantFilasMV*cantColumnasMV*sizeof(float));
transponerMatriz(wMatriz, cantFilasMV, cantColumnasMV, wmatriz_T);
cudaFree(wMatriz);
float* primeraMatriz_fase2;
cudaMallocManaged(&primeraMatriz_fase2, cantColumnasMV*cantFilasMV*sizeof(float));
hadamardProduct(primeraMatriz_fase1, cantColumnasMV, cantFilasMV, wmatriz_T, primeraMatriz_fase2);
cudaFree(primeraMatriz_fase1);
cudaFree(wmatriz_T);
float* MU_T;
cudaMallocManaged(&MU_T, cantFilasMV*cantColumnasMV*sizeof(float));
transponerMatriz(MU, cantFilasMV, cantColumnasMV, MU_T);
float* segundaMatriz;
cudaMallocManaged(&segundaMatriz, cantFilasMV*cantColumnasMV*sizeof(float));
hadamardProduct(MU_T, cantFilasMV, cantColumnasMV, MU_T, segundaMatriz);
cudaFree(MU_T);
float* resultado_fase1;
cudaMallocManaged(&resultado_fase1, cantColumnasMV*cantFilasMV*sizeof(float));
hadamardProduct(primeraMatriz_fase2, cantColumnasMV, cantFilasMV, segundaMatriz, resultado_fase1);
cudaFree(primeraMatriz_fase2);
cudaFree(segundaMatriz);
float* vectorDeUnos;
cudaMallocManaged(&vectorDeUnos, cantFilasMV*sizeof(float));
float* resultado_fase2;
cudaMallocManaged(&resultado_fase2, cantColumnasMV*sizeof(float));
cudaMemset(resultado_fase2, 0, cantColumnasMV*sizeof(float));
for(long i=0; i<cantFilasMV; i++)
{
vectorDeUnos[i] = 1;
}
multMatrices(resultado_fase1, cantColumnasMV, cantFilasMV, vectorDeUnos, 1, resultado_fase2);
cudaFree(resultado_fase1);
float medidaInfoMaximoDiagonal = 0.0;
for (long i=0; i<cantColumnasMV; i++)
{
if(resultado_fase2[i] > medidaInfoMaximoDiagonal)
medidaInfoMaximoDiagonal = resultado_fase2[i];
}
float medidaInfoSumaDiagonal = dotProduct(resultado_fase2, cantColumnasMV, vectorDeUnos);
cudaFree(vectorDeUnos);
cudaFree(resultado_fase2);
float* medidasDeInfo = (float*) malloc(sizeof(float)*2);
medidasDeInfo[0] = medidaInfoSumaDiagonal;
medidasDeInfo[1] = medidaInfoMaximoDiagonal;
return medidasDeInfo;
}
float* estimacionDePlanoDeFourier(float* MV, long cantFilasMV, long cantColumnasMV, float* MC, long cantFilasMC, long cantColumnasMC, float* MU)
{
float* MU_T;
cudaMallocManaged(&MU_T, cantFilasMV*cantColumnasMV*sizeof(float));
transponerMatriz(MU, cantFilasMV, cantColumnasMV, MU_T);
float* resultado_paso1;
cudaMallocManaged(&resultado_paso1, cantFilasMC*cantFilasMV*sizeof(float));
cudaMemset(resultado_paso1, 0, cantFilasMC*cantFilasMV*sizeof(float));
multMatrices(MC, cantFilasMC, cantColumnasMC, MU_T, cantFilasMV, resultado_paso1);
cudaFree(MU_T);
float* resultado_paso2;
cudaMallocManaged(&resultado_paso2, cantFilasMV*cantFilasMV*sizeof(float));
cudaMemset(resultado_paso2, 0, cantFilasMV*cantFilasMV*sizeof(float));
multMatrices(MV, cantFilasMV, cantColumnasMV, resultado_paso1, cantFilasMV, resultado_paso2);
cudaFree(resultado_paso1);
return resultado_paso2;
}
void printerror_cfitsio( int status)
{
if (status)
{
fits_report_error(stderr, status);
exit( status );
}
return;
}
void escribirTransformadaInversaFourier2D(float* estimacionFourier_ParteImag, float* estimacionFourier_ParteReal, long N, char* nombreArchivo)
{
af::array estimacionFourier_ParteImag_GPU(N, N, estimacionFourier_ParteImag);
af::array estimacionFourier_ParteReal_GPU(N, N, estimacionFourier_ParteReal);
af::array mapaFourierRecons = af::complex(estimacionFourier_ParteReal_GPU, estimacionFourier_ParteImag_GPU);
estimacionFourier_ParteImag_GPU.unlock();
estimacionFourier_ParteReal_GPU.unlock();
mapaFourierRecons = af::shift(mapaFourierRecons, (mapaFourierRecons.dims(0)+1)/2, (mapaFourierRecons.dims(1)+1)/2);
mapaFourierRecons = af::ifft2(mapaFourierRecons, N, N);
mapaFourierRecons = af::shift(mapaFourierRecons, (mapaFourierRecons.dims(0)+1)/2, (mapaFourierRecons.dims(1)+1)/2);
mapaFourierRecons = af::real(mapaFourierRecons);
mapaFourierRecons = af::flip(mapaFourierRecons, 0);
mapaFourierRecons = af::transpose(mapaFourierRecons);
float* auxiliar_mapaFourierRecons = mapaFourierRecons.device<float>();
float* inver_visi = (float*) calloc(N*N, sizeof(float));
cudaMemcpy(inver_visi, auxiliar_mapaFourierRecons, N*N*sizeof(float), cudaMemcpyDeviceToHost);
mapaFourierRecons.unlock();
fitsfile *fptr;
int status;
long fpixel, nelements;
int bitpix = FLOAT_IMG;
long naxis = 2;
long naxes[2] = {N, N};
remove(nombreArchivo);
status = 0;
if (fits_create_file(&fptr, nombreArchivo, &status))
printerror_cfitsio(status);
if (fits_create_img(fptr, bitpix, naxis, naxes, &status))
printerror_cfitsio(status);
fpixel = 1;
nelements = naxes[0] * naxes[1];
if (fits_write_img(fptr, TFLOAT, fpixel, nelements, inver_visi, &status))
printerror_cfitsio(status);
if (fits_close_file(fptr, &status))
printerror_cfitsio(status);
free(inver_visi);
}
float* calcularMV_Rect(float* v, float delta_v, long cantVisi, long N, float estrechezDeBorde, float ancho, float* matrizDeUnos)
{
float* desplazamientoEnV = linspace((-N/2.0) * delta_v, ((N/2.0) - 1.0) * delta_v, N);
float* primeraFraccionV;
cudaMallocManaged(&primeraFraccionV, cantVisi * N * sizeof(float));
cudaMemset(primeraFraccionV, 0, cantVisi * N * sizeof(float));
float* segundaFraccionV;
cudaMallocManaged(&segundaFraccionV, cantVisi * N * sizeof(float));
for(long i=0; i<(cantVisi*N); i++)
{
segundaFraccionV[i] = 1.0;
}
float* matrizDiferenciaV = restaVectorColumnaConVector(v, cantVisi, desplazamientoEnV, N);
cudaFree(desplazamientoEnV);
combinacionLinealMatrices(-1.0 * estrechezDeBorde, matrizDiferenciaV, cantVisi, N, 0.0, primeraFraccionV);
combinacionLinealMatrices(estrechezDeBorde, matrizDiferenciaV, cantVisi, N, -1 * estrechezDeBorde * ancho, segundaFraccionV);
cudaFree(matrizDiferenciaV);
calcularExp(primeraFraccionV, cantVisi, N);
calcularExp(segundaFraccionV, cantVisi, N);
combinacionLinealMatrices(1.0, matrizDeUnos, cantVisi, N, 1.0, primeraFraccionV);
combinacionLinealMatrices(1.0, matrizDeUnos, cantVisi, N, 1.0, segundaFraccionV);
calcularInvFrac(primeraFraccionV, cantVisi, N);
calcularInvFrac(segundaFraccionV, cantVisi, N);
float* MV;
cudaMallocManaged(&MV, cantVisi * N * sizeof(float));
for(long i=0; i<(cantVisi*N); i++)
{
MV[i] = 1.0/ancho;
}
combinacionLinealMatrices(1.0, primeraFraccionV, cantVisi, N, 1.0, segundaFraccionV);
cudaFree(primeraFraccionV);
combinacionLinealMatrices(1.0/ancho, segundaFraccionV, cantVisi, N, -1.0, MV);
cudaFree(segundaFraccionV);
return MV;
}
float* calcularMV_Rect_estFourier(float ancho, long N, float delta_v, float* matrizDeUnos, float estrechezDeBorde, float* matrizDeUnosEstFourier)
{
float* coordenadasVCentrosCeldas = linspace((-N/2.0) * delta_v, ((N/2.0) - 1.0) * delta_v, N);
combinacionLinealMatrices(0.5 * delta_v, matrizDeUnosEstFourier, N, 1, 1.0, coordenadasVCentrosCeldas);
float* MV_AF = calcularMV_Rect(coordenadasVCentrosCeldas, delta_v, N, N, estrechezDeBorde, ancho, matrizDeUnos);
cudaFree(coordenadasVCentrosCeldas);
return MV_AF;
}
float* calcularMV_Normal(float* v, float delta_v, long cantVisi, long N, float anchoV)
{
float* CV;
cudaMallocManaged(&CV, N * sizeof(float));
for(long i=0;i<N;i++)
{
CV[i] = 0.5 * delta_v;
}
float* CV_sinescalar = linspace((-N/2.0) * delta_v, ((N/2.0) - 1.0) * delta_v, N);
combinacionLinealMatrices(1.0, CV_sinescalar, N, 1, 1.0, CV);
cudaFree(CV_sinescalar);
float* MV = restaVectorColumnaConVector(v, cantVisi, CV, N);
cudaFree(CV);
multMatrizPorConstante(MV, cantVisi, N, 1.0/anchoV);
hadamardProduct(MV, cantVisi, N, MV, MV);
multMatrizPorConstante(MV, cantVisi, N, -0.5);
calcularExp(MV, cantVisi, N);
multMatrizPorConstante(MV, cantVisi, N, 1.0/sqrt(2.0 * M_PI * anchoV * anchoV));
return MV;
}
// float* calcularMV_Normal(float* v, float delta_v, int cantVisi, int N, float anchoV)
// {
// float* CV = (float*) calloc(N, sizeof(float));
// float* matrizDeCeros = (float*) calloc(cantVisi * N, sizeof(float));
// for(int i=0;i<N;i++)
// {
// CV[i] = 0.5 * delta_v;
// }
// float* CV_sinescalar = linspace((-N/2.0) * delta_v, ((N/2.0) - 1) * delta_v, N);
// combinacionLinealMatrices(1.0, CV_sinescalar, N, 1, 1.0, CV);
// free(CV_sinescalar);
// float* MV = restaVectorColumnaConVector(v, cantVisi, CV, N);
// free(CV);
// combinacionLinealMatrices(0.0, matrizDeCeros, cantVisi, N, 1.0/anchoV, MV);
// hadamardProduct(MV, cantVisi, N, MV, MV);
// combinacionLinealMatrices(0.0, matrizDeCeros, cantVisi, N, -0.5, MV);
// calcularExp(MV, cantVisi, N);
// combinacionLinealMatrices(0.0, matrizDeCeros, cantVisi, N, 1.0/sqrt(2.0 * M_PI * anchoV * anchoV), MV);
// free(matrizDeCeros);
// return MV;
// }
float* calcularMV_Normal_estFourier(float anchoV, long N, float delta_v, float* matrizDeUnosEstFourier)
{
float* coordenadasVCentrosCeldas = linspace((-N/2.0) * delta_v, ((N/2.0) - 1.0) * delta_v, N);
combinacionLinealMatrices(0.5 * delta_v, matrizDeUnosEstFourier, N, 1, 1.0, coordenadasVCentrosCeldas);
float* MV_AF = calcularMV_Normal(coordenadasVCentrosCeldas, delta_v, N, N, anchoV);
cudaFree(coordenadasVCentrosCeldas);
return MV_AF;
}
int calCompresionSegunCota(char* nombreArCoef_comp_imag, char* nombreArCoef_comp_real, float* MC_imag, float* MC_imag_comp, float* MC_real, float* MC_real_comp, long cantFilas, long cantColumnas, float cotaEnergia)
{
long largo = cantFilas * cantColumnas;
float* MC_img_cuadrado;
cudaMallocManaged(&MC_img_cuadrado, cantFilas*cantColumnas*sizeof(float));
float* MC_modulo;
cudaMallocManaged(&MC_modulo, cantFilas*cantColumnas*sizeof(float));
hadamardProduct(MC_imag, cantFilas, cantColumnas, MC_imag, MC_img_cuadrado);
hadamardProduct(MC_real, cantFilas, cantColumnas, MC_real, MC_modulo);
combinacionLinealMatrices(1.0, MC_img_cuadrado, cantFilas, cantColumnas, 1.0, MC_modulo);
cudaFree(MC_img_cuadrado);
af::array MC_modulo_GPU(cantFilas*cantColumnas, MC_modulo);
af::array MC_modulo_indicesOrde_GPU(cantFilas*cantColumnas);
af::array MC_modulo_Orde_GPU(cantFilas*cantColumnas);
af::sort(MC_modulo_Orde_GPU, MC_modulo_indicesOrde_GPU, MC_modulo_GPU, 0, false);
float total = af::sum<float>(MC_modulo_GPU);
MC_modulo_Orde_GPU = MC_modulo_Orde_GPU/total;
af::eval(MC_modulo_Orde_GPU);
af::sync();
float* auxiliar_MC_modulo_Orde_GPU = MC_modulo_Orde_GPU.device<float>();
float* coefsNormalizados = (float*) calloc(largo, sizeof(float));
cudaMemcpy(coefsNormalizados, auxiliar_MC_modulo_Orde_GPU, cantFilas*cantColumnas*sizeof(float), cudaMemcpyDeviceToHost);
MC_modulo_Orde_GPU.unlock();
long cantCoefsParaCota = 0;
float sumador = 0.0;
for(long i=0; i<largo; i++)
{
sumador += coefsNormalizados[i];
cantCoefsParaCota++;
if(sumador >= cotaEnergia)
{
break;
}
}
cudaFree(MC_modulo);
free(coefsNormalizados);
MC_modulo_GPU = MC_modulo_indicesOrde_GPU(af::seq(0,(cantCoefsParaCota-1)));
af::array indRepComp = af::constant(0, largo);
indRepComp(MC_modulo_GPU) = 1;
MC_modulo_GPU.unlock();
MC_modulo_indicesOrde_GPU.unlock();
af::array MC_imag_GPU(cantFilas*cantColumnas, MC_imag);
af::array MC_real_GPU(cantFilas*cantColumnas, MC_real);
MC_imag_GPU = MC_imag_GPU * indRepComp;
MC_real_GPU = MC_real_GPU * indRepComp;
af::eval(MC_imag_GPU);
af::eval(MC_real_GPU);
af::sync();
indRepComp.unlock();
float* auxiliar_MC_imag_GPU = MC_imag_GPU.device<float>();
float* auxiliar_MC_real_GPU = MC_real_GPU.device<float>();
cudaMemcpy(MC_imag_comp, auxiliar_MC_imag_GPU, cantFilas*cantColumnas*sizeof(float), cudaMemcpyDeviceToHost);
MC_imag_GPU.unlock();
cudaMemcpy(MC_real_comp, auxiliar_MC_real_GPU, cantFilas*cantColumnas*sizeof(float), cudaMemcpyDeviceToHost);
MC_real_GPU.unlock();
escribirCoefs(MC_imag_comp, nombreArCoef_comp_imag, cantFilas, cantColumnas);
escribirCoefs(MC_real_comp, nombreArCoef_comp_real, cantFilas, cantColumnas);
return cantCoefsParaCota;
}
float* minGradConjugado_MinCuadra_escritura(char* nombreArchivoMin, char* nombreArchivoCoefs, float* MV, float* MU, float* visibilidades, float* w, long cantVisi, long N, float* matrizDeUnosTamN, int maxIter, float tol)
{
int flag_NOESPOSIBLEMINIMIZAR = 0;
float* MC;
cudaMallocManaged(&MC, N*N*sizeof(float));
cudaMemset(MC, 0, N*N*sizeof(float));
float* residualInit = calResidual(visibilidades, MV, cantVisi, N, MC, N, MU, matrizDeUnosTamN);
float* gradienteActual;
cudaMallocManaged(&gradienteActual,N*N*sizeof(float));
cudaMemset(gradienteActual, 0, N*N*sizeof(float));
float* gradienteAnterior;
cudaMallocManaged(&gradienteAnterior,N*N*sizeof(float));
cudaMemset(gradienteAnterior, 0, N*N*sizeof(float));
float* pActual;
cudaMallocManaged(&pActual,N*N*sizeof(float));
cudaMemset(pActual, 0, N*N*sizeof(float));
float costoInicial = calCosto(residualInit, cantVisi, w);
float costoAnterior = costoInicial;
float costoActual = costoInicial;
calGradiente(residualInit, MV, cantVisi, N, MU, N, w, gradienteAnterior);
cudaFree(residualInit);
// for(int i=0; i<N*N; i++)
// {
// if(gradienteAnterior[i] != 0.0)
// {
// printf("En la linea %d es %f\n", i, gradienteAnterior[i]);
// }
// }
// exit(-1);
combinacionLinealMatrices(-1.0, gradienteAnterior, N, N, 0.0, pActual);
float diferenciaDeCosto = 1.0;
int i = 0;
float alpha = 0.0;
float epsilon = 1e-10;
float normalizacion = costoAnterior + costoActual + epsilon;
FILE* archivoMin = fopen(nombreArchivoMin, "w");
if(archivoMin == NULL)
{
printf("Error al crear o abrir el archivo para almacenar la minimizacion.\n");
exit(0);
}
while(maxIter > i && 2.0 * diferenciaDeCosto > tol * normalizacion)
{
alpha = calAlpha(gradienteAnterior, N, N, pActual, MV, cantVisi, N, MU, N, w, matrizDeUnosTamN, &flag_NOESPOSIBLEMINIMIZAR);
if(flag_NOESPOSIBLEMINIMIZAR == 1)
{
break;
}
combinacionLinealMatrices(alpha, pActual, N, N, 1.0, MC);
float* residual = calResidual(visibilidades, MV, cantVisi, N, MC, N, MU, matrizDeUnosTamN);
costoActual = calCosto(residual, cantVisi, w);
cudaMallocManaged(&gradienteActual,N*N*sizeof(float));
cudaMemset(gradienteActual, 0, N*N*sizeof(float));
calGradiente(residual, MV, cantVisi, N, MU, N, w, gradienteActual);
cudaFree(residual);
float beta = calBeta_Fletcher_Reeves(gradienteActual, N*N, gradienteAnterior);
combinacionLinealMatrices(-1.0, gradienteActual, N, N, beta, pActual);
diferenciaDeCosto = abs(costoAnterior - costoActual);
normalizacion = costoAnterior + costoActual + epsilon;
float otro = costoActual - costoAnterior;
costoAnterior = costoActual;
float* auxiliar = gradienteAnterior;
gradienteAnterior = gradienteActual;
cudaFree(auxiliar);
i++;
printf( "En la iteracion %d el valor de la funcion de costos es %f con un z de %.12e la diferencia con respecto al anterior costo es %.12e.\n", i, costoActual, alpha, otro);
fprintf(archivoMin, "En la iteracion %d el valor de la funcion de costos es %f con un z de %.12e la diferencia con respecto al anterior costo es %.12e.\n", i, costoActual, alpha, otro);
}
fclose(archivoMin);
cudaFree(gradienteAnterior);
cudaFree(pActual);
escribirCoefs(MC, nombreArchivoCoefs, N, N);
return MC;
}
float* minGradConjugado_MinCuadra(float* MV, float* MU, float* visibilidades, float* w, long cantVisi, long N, float* matrizDeUnosTamN, int maxIter, float tol)
{
int flag_NOESPOSIBLEMINIMIZAR = 0;
float* MC;
cudaMallocManaged(&MC, N*N*sizeof(float));
cudaMemset(MC, 0, N*N*sizeof(float));
float* residualInit = calResidual(visibilidades, MV, cantVisi, N, MC, N, MU, matrizDeUnosTamN);
float* gradienteActual;
cudaMallocManaged(&gradienteActual,N*N*sizeof(float));
cudaMemset(gradienteActual, 0, N*N*sizeof(float));
float* gradienteAnterior;
cudaMallocManaged(&gradienteAnterior,N*N*sizeof(float));
cudaMemset(gradienteAnterior, 0, N*N*sizeof(float));
float* pActual;
cudaMallocManaged(&pActual,N*N*sizeof(float));
cudaMemset(pActual, 0, N*N*sizeof(float));
float costoInicial = calCosto(residualInit, cantVisi, w);
float costoAnterior = costoInicial;
float costoActual = costoInicial;
calGradiente(residualInit, MV, cantVisi, N, MU, N, w, gradienteAnterior);
cudaFree(residualInit);
combinacionLinealMatrices(-1.0, gradienteAnterior, N, N, 0.0, pActual);
float diferenciaDeCosto = 1.0;
int i = 0;
float alpha = 0.0;
float epsilon = 1e-10;
float normalizacion = costoAnterior + costoActual + epsilon;
while(maxIter > i && 2.0 * diferenciaDeCosto > tol * normalizacion)
{
alpha = calAlpha(gradienteAnterior, N, N, pActual, MV, cantVisi, N, MU, N, w, matrizDeUnosTamN, &flag_NOESPOSIBLEMINIMIZAR);
if(flag_NOESPOSIBLEMINIMIZAR == 1)
{
break;
}
combinacionLinealMatrices(alpha, pActual, N, N, 1.0, MC);
float* residual = calResidual(visibilidades, MV, cantVisi, N, MC, N, MU, matrizDeUnosTamN);
costoActual = calCosto(residual, cantVisi, w);
cudaMallocManaged(&gradienteActual,N*N*sizeof(float));
cudaMemset(gradienteActual, 0, N*N*sizeof(float));
calGradiente(residual, MV, cantVisi, N, MU, N, w, gradienteActual);
cudaFree(residual);
float beta = calBeta_Fletcher_Reeves(gradienteActual, N*N, gradienteAnterior);
combinacionLinealMatrices(-1.0, gradienteActual, N, N, beta, pActual);
diferenciaDeCosto = abs(costoAnterior - costoActual);
normalizacion = costoAnterior + costoActual + epsilon;
float otro = costoActual - costoAnterior;
costoAnterior = costoActual;
float* auxiliar = gradienteAnterior;
gradienteAnterior = gradienteActual;
cudaFree(auxiliar);
i++;
printf( "En la iteracion %d el valor de la funcion de costos es %f con un z de %.12e la diferencia con respecto al anterior costo es %.12e.\n", i, costoActual, alpha, otro);
}
cudaFree(gradienteAnterior);
cudaFree(pActual);
return MC;
}
float calculateSD(float* data, float mean, long cantElementos)
{
float SD = 0.0;
for (long i = 0; i < cantElementos; i++)
SD += pow(data[i] - mean, 2);
return sqrt(SD / 10);
}
float calculoDePSNRDeRecorte(float* estimacionFourier_ParteImag, float* estimacionFourier_ParteReal, long N, char* nombreArchivo, clock_t* tiempoTransInver_MejorCompresion)
{
int columnaDeInicio = 150;
int columnaDeTermino = 450;
int filaDeInicio = 100;
int filaDeTermino = 400;
*tiempoTransInver_MejorCompresion = clock();
af::array estimacionFourier_ParteImag_GPU(N, N, estimacionFourier_ParteImag);
af::array estimacionFourier_ParteReal_GPU(N, N, estimacionFourier_ParteReal);
af::array mapaFourierRecons = af::complex(estimacionFourier_ParteReal_GPU, estimacionFourier_ParteImag_GPU);
estimacionFourier_ParteImag_GPU.unlock();
estimacionFourier_ParteReal_GPU.unlock();
mapaFourierRecons = af::shift(mapaFourierRecons, (mapaFourierRecons.dims(0)+1)/2, (mapaFourierRecons.dims(1)+1)/2);
mapaFourierRecons = af::ifft2(mapaFourierRecons, N, N);
mapaFourierRecons = af::shift(mapaFourierRecons, (mapaFourierRecons.dims(0)+1)/2, (mapaFourierRecons.dims(1)+1)/2);
mapaFourierRecons = af::real(mapaFourierRecons);
*tiempoTransInver_MejorCompresion = clock() - *tiempoTransInver_MejorCompresion;
mapaFourierRecons = af::flip(mapaFourierRecons, 0);
mapaFourierRecons = af::transpose(mapaFourierRecons);
float* auxiliar_mapaFourierRecons = mapaFourierRecons.device<float>();
float* inver_visi = (float*) calloc(N*N, sizeof(float));
cudaMemcpy(inver_visi, auxiliar_mapaFourierRecons, N*N*sizeof(float), cudaMemcpyDeviceToHost);
mapaFourierRecons.unlock();
int cantFilasARecorrer = columnaDeTermino - columnaDeInicio + 1;
int cantColumnasARecorrer = filaDeTermino - filaDeInicio + 1;
int contador = 0;
int contadorEleExternos = 0;
float sumaDeValoresExternos = 0.0;
float maximoValorInterno = 0;
float* nuevaImagen = (float*) calloc(cantFilasARecorrer*cantColumnasARecorrer, sizeof(float));
float* elementosExternos = (float*) calloc(N*N, sizeof(float));
for(int j=0; j<N; j++)
{
for(int i=0; i<N; i++)
{
if(columnaDeInicio <= i && i <= columnaDeTermino && filaDeInicio <= j && j <= filaDeTermino)
{
nuevaImagen[contador] = inver_visi[i+j*N];
if(maximoValorInterno < inver_visi[i+j*N])
{
maximoValorInterno = inver_visi[i+j*N];
}
contador++;
}
else
{
elementosExternos[contadorEleExternos] = inver_visi[i+j*N];
sumaDeValoresExternos += elementosExternos[contadorEleExternos];
contadorEleExternos++;
}
}
}
float mediaExterna = sumaDeValoresExternos/contadorEleExternos;
float desvEstandar = calculateSD(elementosExternos, mediaExterna, contadorEleExternos);
free(elementosExternos);
float PSNR = maximoValorInterno/desvEstandar;
// printf("El contador es %d\n", contador);
// printf("La wea total es %d\n", cantFilasARecorrer*cantColumnasARecorrer);
// printf("La cantidad de elementos externos es %d\n", contadorEleExternos);
fitsfile *fptr;
int status;
long fpixel, nelements;
int bitpix = FLOAT_IMG;
long naxis = 2;
// long naxes[2] = {cantFilasARecorrer, cantColumnasARecorrer};
long naxes[2] = {N, N};
remove(nombreArchivo);
status = 0;
if (fits_create_file(&fptr, nombreArchivo, &status))
printerror_cfitsio(status);
if (fits_create_img(fptr, bitpix, naxis, naxes, &status))
printerror_cfitsio(status);
fpixel = 1;
nelements = naxes[0] * naxes[1];
// if (fits_write_img(fptr, TFLOAT, fpixel, nelements, nuevaImagen, &status))
if (fits_write_img(fptr, TFLOAT, fpixel, nelements, inver_visi, &status))
printerror_cfitsio(status);
if (fits_close_file(fptr, &status))
printerror_cfitsio(status);
free(inver_visi);
free(nuevaImagen);
return PSNR;
}
float calPSNRDeDistintasCompresiones(float inicioIntervalo, float finIntervalo, int cantParamEvaInfo, char rutaADirecSec[], char rutaADirecTer[], char nombreArReconsCompreImg[], float* MC_imag, float* MC_real, float* MV_AF, float* MU_AF, long N, clock_t* tiempoReconsParteImag_MejorCompresion, clock_t* tiempoReconsParteReal_MejorCompresion, clock_t* tiempoTransInver_MejorCompresion)
{
float cotaMinPSNR = 0.75;
float cotaMinCompresion = 0.2 * finIntervalo;
float* datosDelMin = (float*) malloc(sizeof(float)*4);
long cantCoefsMejorCompre = 0;
char nombreArchivoTXTCompre[] = "compresiones.txt";
char nombreArchivoDatosMinPSNR[] = "mejorTradeOffPSNRCompre.txt";
char nombreArchivoCompreImg[] = "compreImg";
char nombreDatosDeIte[] = "datosDeIte.txt";
char nombreDatosDeIteLegible[] = "datosDeIteLegible.txt";
char nombreCurvaPSNRSuavizada[] = "curvaPSNRSuavizada.txt";
float* paramEvaInfo = linspace(inicioIntervalo/100.0, finIntervalo/100.0, cantParamEvaInfo);
float* MC_comp_imag;
cudaMallocManaged(&MC_comp_imag,N*N*sizeof(float));
cudaMemset(MC_comp_imag, 0, N*N*sizeof(float));
float* MC_comp_real;
cudaMallocManaged(&MC_comp_real,N*N*sizeof(float));
cudaMemset(MC_comp_real, 0, N*N*sizeof(float));
long largo = N * N;
float* MC_img_cuadrado;
cudaMallocManaged(&MC_img_cuadrado, N*N*sizeof(float));
float* MC_modulo;
cudaMallocManaged(&MC_modulo, N*N*sizeof(float));
hadamardProduct(MC_imag, N, N, MC_imag, MC_img_cuadrado);
hadamardProduct(MC_real, N, N, MC_real, MC_modulo);
combinacionLinealMatrices(1.0, MC_img_cuadrado, N, N, 1.0, MC_modulo);
cudaFree(MC_img_cuadrado);
af::array MC_modulo_GPU(N*N, MC_modulo);
cudaFree(MC_modulo);
af::array MC_modulo_indicesOrde_GPU(N*N);
af::array MC_modulo_Orde_GPU(N*N);
af::sort(MC_modulo_Orde_GPU, MC_modulo_indicesOrde_GPU, MC_modulo_GPU, 0, false);
float total = af::sum<float>(MC_modulo_GPU);
MC_modulo_Orde_GPU = MC_modulo_Orde_GPU/total;
af::eval(MC_modulo_Orde_GPU);
af::eval(MC_modulo_indicesOrde_GPU);
af::sync();
float* auxiliar_MC_modulo_Orde_GPU = MC_modulo_Orde_GPU.device<float>();
float* auxiliar_MC_modulo_indicesOrde_GPU = MC_modulo_indicesOrde_GPU.device<float>();
float* coefsNormalizados = (float*) malloc(largo*sizeof(float));
cudaMemcpy(coefsNormalizados, auxiliar_MC_modulo_Orde_GPU, N*N*sizeof(float), cudaMemcpyDeviceToHost);
int* MC_modulo_indicesOrde_CPU = (int*) malloc(largo*sizeof(int));
cudaMemcpy(MC_modulo_indicesOrde_CPU, auxiliar_MC_modulo_indicesOrde_GPU, N*N*sizeof(int), cudaMemcpyDeviceToHost);
MC_modulo_Orde_GPU.unlock();
MC_modulo_GPU.unlock();
MC_modulo_indicesOrde_GPU.unlock();
long cantCoefsParaCota = 0;
float sumador = 0.0;
long iExterno = 0;
float* cantidadPorcentualDeCoefs = linspace(0.0, largo, largo+1);
combinacionLinealMatrices(0.0, cantidadPorcentualDeCoefs, largo+1, 1, 1.0/largo, cantidadPorcentualDeCoefs);
char* nombreArchivoCompresiones = (char*) malloc(sizeof(char)*strlen(rutaADirecSec)*strlen(nombreArchivoTXTCompre)+sizeof(char)*4);
strcpy(nombreArchivoCompresiones, rutaADirecSec);
strcat(nombreArchivoCompresiones, "/");
strcat(nombreArchivoCompresiones, nombreArchivoTXTCompre);
char* nombreArchivoDatosDeIte = (char*) malloc(sizeof(char)*strlen(rutaADirecSec)*strlen(nombreDatosDeIte)+sizeof(char)*4);
strcpy(nombreArchivoDatosDeIte, rutaADirecSec);
strcat(nombreArchivoDatosDeIte, "/");
strcat(nombreArchivoDatosDeIte, nombreDatosDeIte);
char* nombreArchivoDatosDeIteLegible = (char*) malloc(sizeof(char)*strlen(rutaADirecSec)*strlen(nombreDatosDeIteLegible)+sizeof(char)*4);
strcpy(nombreArchivoDatosDeIteLegible, rutaADirecSec);
strcat(nombreArchivoDatosDeIteLegible, "/");
strcat(nombreArchivoDatosDeIteLegible, nombreDatosDeIteLegible);
float* vectorDePSNR = (float*) calloc(cantParamEvaInfo, sizeof(float));
float* porcenReal = (float*) calloc(cantParamEvaInfo, sizeof(float));
float* porcenIdeal = (float*) calloc(cantParamEvaInfo, sizeof(float));
long* cantCoefsUsadas = (long*) calloc(cantParamEvaInfo, sizeof(long));
float* vectorDePorcenEnergia = (float*) calloc(cantParamEvaInfo, sizeof(float));
float* vectorDeDifePSNREntrePtosAdya = (float*) calloc(cantParamEvaInfo, sizeof(float));
int flag_inicioDeVentana = 1;
int cantPtsVentana = 0;
int inicioDeVentana = 0;
clock_t tiempoCualquiera;
for(long j=0; j<cantParamEvaInfo; j++)
{
sumador = 0.0;
cantCoefsParaCota = 0;
iExterno = 0;
for(long i=0; i<largo+1; i++)
{
if(cantidadPorcentualDeCoefs[i] < paramEvaInfo[cantParamEvaInfo-1-j])
{
sumador += coefsNormalizados[i];
cantCoefsParaCota++;
}
else
{
iExterno = i;
FILE* archivoDatosDeIte = fopen(nombreArchivoDatosDeIte, "a");
fprintf(archivoDatosDeIte, "%f %f %ld %f\n", paramEvaInfo[cantParamEvaInfo-1-j], cantidadPorcentualDeCoefs[i], cantCoefsParaCota, sumador);
fclose(archivoDatosDeIte);
FILE* archivoDatosDeIteLegible = fopen(nombreArchivoDatosDeIteLegible, "a");
fprintf(archivoDatosDeIteLegible, "Del %f%% solicitado, el mas cercano correspondiente al %f%% de coefs, lo que corresponde a %ld coeficientes los cuales poseen el %f%% de la energia.\n", paramEvaInfo[cantParamEvaInfo-1-j] * 100, cantidadPorcentualDeCoefs[i] * 100, cantCoefsParaCota, sumador * 100);
fclose(archivoDatosDeIteLegible);
printf("Del %f%% solicitado, el mas cercano correspondiente al %f%% de coefs, lo que corresponde a %ld coeficientes los cuales poseen el %f%% de la energia.\n", paramEvaInfo[cantParamEvaInfo-1-j] * 100, cantidadPorcentualDeCoefs[i] * 100, cantCoefsParaCota, sumador * 100);
break;
}
}
if(cantCoefsParaCota != 0)
{
int* indicesATomar_CPU = (int*) calloc(cantCoefsParaCota, sizeof(int));
for(int k=0; k<cantCoefsParaCota; k++)
{
indicesATomar_CPU[k] = MC_modulo_indicesOrde_CPU[k];
}
af::array indicesATomar_GPU(cantCoefsParaCota, indicesATomar_CPU);
free(indicesATomar_CPU);
af::array indRepComp = af::constant(0, largo);
indRepComp(indicesATomar_GPU) = 1;
indicesATomar_GPU.unlock();
af::array MC_imag_GPU(N*N, MC_imag);
af::array MC_real_GPU(N*N, MC_real);
MC_imag_GPU = MC_imag_GPU * indRepComp;
MC_real_GPU = MC_real_GPU * indRepComp;
af::eval(MC_imag_GPU);
af::eval(MC_real_GPU);
af::sync();
indRepComp.unlock();
float* auxiliar_MC_imag_GPU = MC_imag_GPU.device<float>();
float* auxiliar_MC_real_GPU = MC_real_GPU.device<float>();
cudaMemcpy(MC_comp_imag, auxiliar_MC_imag_GPU, N*N*sizeof(float), cudaMemcpyDeviceToHost);
MC_imag_GPU.unlock();
cudaMemcpy(MC_comp_real, auxiliar_MC_real_GPU, N*N*sizeof(float), cudaMemcpyDeviceToHost);
MC_real_GPU.unlock();
float* estimacionFourier_compre_ParteImag = estimacionDePlanoDeFourier(MV_AF, N, N, MC_comp_imag, N, N, MU_AF);
float* estimacionFourier_compre_ParteReal = estimacionDePlanoDeFourier(MV_AF, N, N, MC_comp_real, N, N, MU_AF);
int numero = j+1;
char* numComoString = numAString(&numero);
sprintf(numComoString, "%d", numero);
char* nombreArchivoReconsImgComp = (char*) malloc(sizeof(char)*strlen(rutaADirecTer)*strlen(numComoString)*strlen(nombreArchivoCompreImg)+sizeof(char)*7);
strcpy(nombreArchivoReconsImgComp, rutaADirecTer);
strcat(nombreArchivoReconsImgComp, "/");
strcat(nombreArchivoReconsImgComp, nombreArchivoCompreImg);
strcat(nombreArchivoReconsImgComp, "_");
strcat(nombreArchivoReconsImgComp, numComoString);
strcat(nombreArchivoReconsImgComp, ".fit");
float PSNRActual = calculoDePSNRDeRecorte(estimacionFourier_compre_ParteImag, estimacionFourier_compre_ParteReal, N, nombreArchivoReconsImgComp, &tiempoCualquiera);
porcenIdeal[j] = 1-paramEvaInfo[cantParamEvaInfo-1-j];
vectorDePSNR[j] = PSNRActual;
porcenReal[j] = 1-cantidadPorcentualDeCoefs[iExterno];
cantCoefsUsadas[j] = cantCoefsParaCota;
vectorDePorcenEnergia[j] = sumador;
FILE* archivoPSNR = fopen(nombreArchivoCompresiones, "a");
fprintf(archivoPSNR, "%f %f %f\n", 1-cantidadPorcentualDeCoefs[iExterno], 1-paramEvaInfo[cantParamEvaInfo-1-j], PSNRActual);
fclose(archivoPSNR);
cudaFree(estimacionFourier_compre_ParteImag);
cudaFree(estimacionFourier_compre_ParteReal);
free(numComoString);
free(nombreArchivoReconsImgComp);
}
}
float* vectorDePSNRFiltrado = (float*) calloc(cantParamEvaInfo, sizeof(float));
gsl_vector* vectorDePSNREnGSL = gsl_vector_alloc(cantParamEvaInfo);
gsl_vector* vectorDePSNREnGSLFiltrado = gsl_vector_alloc(cantParamEvaInfo);
for(int i=0; i<cantParamEvaInfo; i++)
{
gsl_vector_set(vectorDePSNREnGSL, i, vectorDePSNR[i]);
}
gsl_filter_gaussian_workspace* gauss_p = gsl_filter_gaussian_alloc(5);
gsl_filter_gaussian(GSL_FILTER_END_PADVALUE, 1.0, 0, vectorDePSNREnGSL, vectorDePSNREnGSLFiltrado, gauss_p);
for(int i=0; i<cantParamEvaInfo; i++)
{
vectorDePSNRFiltrado[i] = gsl_vector_get(vectorDePSNREnGSLFiltrado, i);
}
gsl_vector_free(vectorDePSNREnGSL);
gsl_vector_free(vectorDePSNREnGSLFiltrado);
gsl_filter_gaussian_free(gauss_p);
char* nombreArchivoCurvaPSNRSuavizada = (char*) malloc(sizeof(char)*strlen(rutaADirecSec)*strlen(nombreCurvaPSNRSuavizada)+sizeof(char)*4);
strcpy(nombreArchivoCurvaPSNRSuavizada, rutaADirecSec);
strcat(nombreArchivoCurvaPSNRSuavizada, "/");
strcat(nombreArchivoCurvaPSNRSuavizada, nombreCurvaPSNRSuavizada);
FILE* archivoCurvaPSNRSuavizada = fopen(nombreArchivoCurvaPSNRSuavizada, "a");
for(int i=0; i<cantParamEvaInfo; i++)
{
fprintf(archivoCurvaPSNRSuavizada, "%f\n", vectorDePSNRFiltrado[i]);
}
fclose(archivoCurvaPSNRSuavizada);
free(nombreArchivoCurvaPSNRSuavizada);
for(int j=0; j<cantParamEvaInfo; j++)
{
float porcenActual = porcenReal[j];
float porcenDifActual = vectorDePSNRFiltrado[j]/vectorDePSNRFiltrado[0];
if(j >= 1)
{
if(porcenActual >= cotaMinCompresion && porcenDifActual >= cotaMinPSNR)
{
if(flag_inicioDeVentana)
{
inicioDeVentana = j;
flag_inicioDeVentana = 0;
}
vectorDeDifePSNREntrePtosAdya[cantPtsVentana] = vectorDePSNRFiltrado[j] - vectorDePSNRFiltrado[j-1];
printf("%.12e\n", vectorDeDifePSNREntrePtosAdya[cantPtsVentana]);
cantPtsVentana++;
}
}
}
af::array vectorDeDifePSNREntrePtosAdya_GPU(cantPtsVentana, vectorDeDifePSNREntrePtosAdya);
free(vectorDeDifePSNREntrePtosAdya);
af::array vectorDeDifePSNREntrePtosAdya_indicesOrde_GPU(cantPtsVentana);
af::array vectorDeDifePSNREntrePtosAdya_Orde_GPU(cantPtsVentana);
af::sort(vectorDeDifePSNREntrePtosAdya_Orde_GPU, vectorDeDifePSNREntrePtosAdya_indicesOrde_GPU, vectorDeDifePSNREntrePtosAdya_GPU, 0, true);
vectorDeDifePSNREntrePtosAdya_GPU.unlock();
vectorDeDifePSNREntrePtosAdya_Orde_GPU.unlock();
int* auxiliar_vectorDeDifePSNREntrePtosAdya_indicesOrde_GPU = vectorDeDifePSNREntrePtosAdya_indicesOrde_GPU.device<int>();
int* vectorDeDifePSNREntrePtosAdya_indicesOrde_CPU = (int*) malloc(sizeof(int)*cantPtsVentana);
cudaMemcpy(vectorDeDifePSNREntrePtosAdya_indicesOrde_CPU, auxiliar_vectorDeDifePSNREntrePtosAdya_indicesOrde_GPU, cantPtsVentana*sizeof(int), cudaMemcpyDeviceToHost);
vectorDeDifePSNREntrePtosAdya_indicesOrde_GPU.unlock();
int indiceElegido = vectorDeDifePSNREntrePtosAdya_indicesOrde_CPU[0] + inicioDeVentana - 1;
// printf("El indice elegido es %d\n", indiceElegido);
free(vectorDeDifePSNREntrePtosAdya_indicesOrde_CPU);
datosDelMin[0] = porcenIdeal[indiceElegido];
datosDelMin[1] = porcenReal[indiceElegido];
cantCoefsMejorCompre = cantCoefsUsadas[indiceElegido];
datosDelMin[2] = vectorDePorcenEnergia[indiceElegido];
datosDelMin[3] = vectorDePSNR[indiceElegido];
free(vectorDePSNRFiltrado);
free(porcenIdeal);
free(porcenReal);
free(cantCoefsUsadas);
free(vectorDePorcenEnergia);
free(vectorDePSNR);
char* nombreArchivoMejorCompre = (char*) malloc(sizeof(char)*strlen(rutaADirecSec)*strlen(nombreArchivoDatosMinPSNR)+sizeof(char)*4);
strcpy(nombreArchivoMejorCompre, rutaADirecSec);
strcat(nombreArchivoMejorCompre, "/");
strcat(nombreArchivoMejorCompre, nombreArchivoDatosMinPSNR);
FILE* archivoMejorCompre = fopen(nombreArchivoMejorCompre, "w");
fprintf(archivoMejorCompre, "El tradeoff seleccionado con indice %d corresponde al %f%% de coefs, el mas cercano correspondiente al %f%% de coefs, lo que corresponde a %ld coeficientes los cuales poseen el %f%% de la energia y un PSNR de %f.\n", indiceElegido, (1-datosDelMin[0]) * 100, (1-datosDelMin[1]) * 100, cantCoefsMejorCompre, datosDelMin[2] * 100, datosDelMin[3]);
free(nombreArchivoMejorCompre);
free(datosDelMin);
fclose(archivoMejorCompre);
float* indicesATomar_CPU = (float*) malloc(cantCoefsMejorCompre*sizeof(float));
for(int k=0; k<cantCoefsMejorCompre; k++)
{
indicesATomar_CPU[k] = MC_modulo_indicesOrde_CPU[k];
}
af::array indicesATomar_GPU(cantCoefsMejorCompre, indicesATomar_CPU);
free(indicesATomar_CPU);
af::array indRepComp = af::constant(0, largo);
indRepComp(indicesATomar_GPU) = 1;
indicesATomar_GPU.unlock();
af::array MC_imag_GPU(N*N, MC_imag);
af::array MC_real_GPU(N*N, MC_real);
MC_imag_GPU = MC_imag_GPU * indRepComp;
MC_real_GPU = MC_real_GPU * indRepComp;
af::eval(MC_imag_GPU);
af::eval(MC_real_GPU);
af::sync();
indRepComp.unlock();
float* auxiliar_MC_imag_GPU = MC_imag_GPU.device<float>();
float* auxiliar_MC_real_GPU = MC_real_GPU.device<float>();
cudaMemcpy(MC_comp_imag, auxiliar_MC_imag_GPU, N*N*sizeof(float), cudaMemcpyDeviceToHost);
MC_imag_GPU.unlock();
cudaMemcpy(MC_comp_real, auxiliar_MC_real_GPU, N*N*sizeof(float), cudaMemcpyDeviceToHost);
MC_real_GPU.unlock();
*tiempoReconsParteImag_MejorCompresion = clock();
float* estimacionFourier_compre_ParteImag = estimacionDePlanoDeFourier(MV_AF, N, N, MC_comp_imag, N, N, MU_AF);
*tiempoReconsParteImag_MejorCompresion = clock() - *tiempoReconsParteImag_MejorCompresion;
*tiempoReconsParteReal_MejorCompresion = clock();
float* estimacionFourier_compre_ParteReal = estimacionDePlanoDeFourier(MV_AF, N, N, MC_comp_real, N, N, MU_AF);
*tiempoReconsParteReal_MejorCompresion = clock() - *tiempoReconsParteReal_MejorCompresion;
char* nombreArchivoReconsImgComp = (char*) malloc(sizeof(char)*strlen(rutaADirecSec)*strlen(nombreArReconsCompreImg)+sizeof(char)*4);
strcpy(nombreArchivoReconsImgComp, rutaADirecSec);
strcat(nombreArchivoReconsImgComp, "/");
strcat(nombreArchivoReconsImgComp, nombreArReconsCompreImg);
float PSNRActual = calculoDePSNRDeRecorte(estimacionFourier_compre_ParteImag, estimacionFourier_compre_ParteReal, N, nombreArchivoReconsImgComp, tiempoTransInver_MejorCompresion);
cudaFree(estimacionFourier_compre_ParteImag);
cudaFree(estimacionFourier_compre_ParteReal);
cudaFree(MC_comp_imag);
cudaFree(MC_comp_real);
cudaFree(cantidadPorcentualDeCoefs);
cudaFree(paramEvaInfo);
cudaFree(MU_AF);
cudaFree(MV_AF);
free(coefsNormalizados);
free(MC_modulo_indicesOrde_CPU);
free(nombreArchivoCompresiones);
free(nombreArchivoDatosDeIte);
free(nombreArchivoDatosDeIteLegible);
return cantCoefsMejorCompre;
}
void calCompSegunAncho_Normal_escritura(char nombreDirPrin[], char* nombreDirSec, char nombreDirTer[], float ancho, float cotaEnergia, int iterActual, int maxIter, float tol, float* u, float* v, float* w, float* visi_parteImaginaria, float* visi_parteReal, float delta_u, float delta_v, long cantVisi, long N, float* matrizDeUnosTamN)
{
float inicioPorcenCompre = 0.0;
float terminoPorcenCompre = 0.2;
int cantPorcen = 101;
// int cantPorcen = 2;
// ############### CONFIG. DE NOMBRES DE ARCHIVOS ##############
char nombreArReconsImg[] = "reconsImg.fit";
char nombreArReconsCompreImg[] = "reconsCompreImg.fit";
char nombreArMin_imag[] = "minCoefs_imag.txt";
char nombreArCoef_imag[] = "coefs_imag.txt";
char nombreArCoef_comp_imag[] = "coefs_comp_imag.txt";
char nombreArMin_real[] = "minCoefs_real.txt";
char nombreArCoef_real[] = "coefs_real.txt";
char nombreArCoef_comp_real[] = "coefs_comp_real.txt";
char nombreArInfoCompresion[] = "infoCompre.txt";
char nombreArInfoTiemposEjecu[] = "infoTiemposEjecu.txt";
// ############### CALCULO DE MU Y MV - CREACION DE DIRECTORIO SEGUNDARIO ##############
printf("...Comenzando calculo de MV...\n");
clock_t tiempoCalculoMV;
tiempoCalculoMV = clock();
float* MV = calcularMV_Normal(v, delta_v, cantVisi, N, ancho);
tiempoCalculoMV = clock() - tiempoCalculoMV;
float tiempoTotalCalculoMV = ((float)tiempoCalculoMV)/CLOCKS_PER_SEC;
printf("Calculo de MV completado.\n");
printf("...Comenzando calculo de MU...\n");
clock_t tiempoCalculoMU;
tiempoCalculoMU = clock();
float* MU = calcularMV_Normal(u, delta_u, cantVisi, N, ancho);
tiempoCalculoMU = clock() - tiempoCalculoMU;
float tiempoTotalCalculoMU = ((float)tiempoCalculoMU)/CLOCKS_PER_SEC;
printf("Calculo de MU completado.\n");
char* rutaADirecSec = (char*) malloc(strlen(nombreDirPrin)*strlen(nombreDirSec)*sizeof(char)+sizeof(char)*3);
strcpy(rutaADirecSec, nombreDirPrin);
strcat(rutaADirecSec, "/");
strcat(rutaADirecSec, nombreDirSec);
if(mkdir(rutaADirecSec, 0777) == -1)
{
printf("ERROR: No se pudo crear subdirectorio.");
printf("PROGRAMA ABORTADO.\n");
exit(0);
}
strcat(rutaADirecSec, "/");
// ############### MINIMIZACION DE COEFS, PARTE IMAGINARIA ##############
char* nombreArchivoMin_imag = (char*) malloc(strlen(nombreDirPrin)*strlen(nombreDirSec)*strlen(nombreArMin_imag)*sizeof(char)+sizeof(char)*3);
strcpy(nombreArchivoMin_imag, rutaADirecSec);
strcat(nombreArchivoMin_imag, nombreArMin_imag);
char* nombreArchivoCoefs_imag = (char*) malloc(strlen(nombreDirPrin)*strlen(nombreDirSec)*strlen(nombreArCoef_imag)*sizeof(char)+sizeof(char)*3);
strcpy(nombreArchivoCoefs_imag, rutaADirecSec);
strcat(nombreArchivoCoefs_imag, nombreArCoef_imag);
printf("...Comenzando minimizacion de coeficientes parte imaginaria...\n");
clock_t tiempoMinPartImag;
tiempoMinPartImag = clock();
float* MC_imag = minGradConjugado_MinCuadra_escritura(nombreArchivoMin_imag, nombreArchivoCoefs_imag, MV, MU, visi_parteImaginaria, w, cantVisi, N, matrizDeUnosTamN, maxIter, tol);
tiempoMinPartImag = clock() - tiempoMinPartImag;
float tiempoTotalMinPartImag = ((float)tiempoMinPartImag)/CLOCKS_PER_SEC;
printf("Proceso de minimizacion de coeficientes parte imaginaria terminado.\n");
free(nombreArchivoMin_imag);
free(nombreArchivoCoefs_imag);
// ############### MINIMIZACION DE COEFS, PARTE REAL ##############
char* nombreArchivoMin_real = (char*) malloc(strlen(nombreDirPrin)*strlen(nombreDirSec)*strlen(nombreArMin_real)*sizeof(char)+sizeof(char)*3);
strcpy(nombreArchivoMin_real, rutaADirecSec);
strcat(nombreArchivoMin_real, nombreArMin_real);
char* nombreArchivoCoefs_real = (char*) malloc(strlen(nombreDirPrin)*strlen(nombreDirSec)*strlen(nombreArCoef_real)*sizeof(char)+sizeof(char)*3);
strcpy(nombreArchivoCoefs_real, rutaADirecSec);
strcat(nombreArchivoCoefs_real, nombreArCoef_real);
printf("...Comenzando minimizacion de coeficientes parte real...\n");
clock_t tiempoMinPartReal;
tiempoMinPartReal = clock();
float* MC_real = minGradConjugado_MinCuadra_escritura(nombreArchivoMin_real, nombreArchivoCoefs_real, MV, MU, visi_parteReal, w, cantVisi, N, matrizDeUnosTamN, maxIter, tol);
tiempoMinPartReal = clock() - tiempoMinPartReal;
float tiempoTotalMinPartReal = ((float)tiempoMinPartReal)/CLOCKS_PER_SEC;
printf("Proceso de minimizacion de coeficientes parte real terminado.\n");
free(nombreArchivoMin_real);
free(nombreArchivoCoefs_real);
// ############### CALCULO NIVEL DE INFORMACION ##############
clock_t tiempoInfo;
tiempoInfo = clock();
float* medidasDeInfo = calInfoFisherDiag(MV, cantVisi, N, MU, w);
tiempoInfo = clock() - tiempoInfo;
float tiempoTotalInfo = ((float)tiempoInfo)/CLOCKS_PER_SEC;
cudaFree(MU);
cudaFree(MV);
// ############### RECONSTRUCCION DEL PLANO GRILLEADO Y ALMACENAMIENTO DE LA RECONSTRUCCION DE LA IMAGEN ##############
char* nombreArchivoReconsImg = (char*) malloc(strlen(nombreDirPrin)*strlen(nombreDirSec)*strlen(nombreArReconsImg)*sizeof(char)+sizeof(char)*3);
strcpy(nombreArchivoReconsImg, rutaADirecSec);
strcat(nombreArchivoReconsImg, nombreArReconsImg);
clock_t tiempoCalculoMV_AF;
tiempoCalculoMV_AF = clock();
float* MV_AF = calcularMV_Normal_estFourier(ancho, N, delta_v, matrizDeUnosTamN);
tiempoCalculoMV_AF = clock() - tiempoCalculoMV_AF;
float tiempoTotalCalculoMV_AF = ((float)tiempoCalculoMV_AF)/CLOCKS_PER_SEC;
clock_t tiempoCalculoMU_AF;
tiempoCalculoMU_AF = clock();
float* MU_AF = calcularMV_Normal_estFourier(ancho, N, delta_u, matrizDeUnosTamN);
tiempoCalculoMU_AF = clock() - tiempoCalculoMU_AF;
float tiempoTotalCalculoMU_AF = ((float)tiempoCalculoMU_AF)/CLOCKS_PER_SEC;
clock_t tiempoReconsFourierPartImag;
tiempoReconsFourierPartImag = clock();
float* estimacionFourier_ParteImag = estimacionDePlanoDeFourier(MV_AF, N, N, MC_imag, N, N, MU_AF);
tiempoReconsFourierPartImag = clock() - tiempoReconsFourierPartImag;
float tiempoTotalReconsFourierPartImag = ((float)tiempoReconsFourierPartImag)/CLOCKS_PER_SEC;
clock_t tiempoReconsFourierPartReal;
tiempoReconsFourierPartReal = clock();
float* estimacionFourier_ParteReal = estimacionDePlanoDeFourier(MV_AF, N, N, MC_real, N, N, MU_AF);
tiempoReconsFourierPartReal = clock() - tiempoReconsFourierPartReal;
float tiempoTotalReconsFourierPartReal = ((float)tiempoReconsFourierPartReal)/CLOCKS_PER_SEC;
clock_t tiempoReconsTransInver;
tiempoReconsTransInver = clock();
escribirTransformadaInversaFourier2D(estimacionFourier_ParteImag, estimacionFourier_ParteReal, N, nombreArchivoReconsImg);
tiempoReconsTransInver = clock() - tiempoReconsTransInver;
float tiempoTotalReconsTransInver = ((float)tiempoReconsTransInver)/CLOCKS_PER_SEC;
cudaFree(estimacionFourier_ParteImag);
cudaFree(estimacionFourier_ParteReal);
free(nombreArchivoReconsImg);
// ############### CALCULO DE GRADO DE COMPRESION ##############
char* rutaADirecTer = (char*) malloc(strlen(rutaADirecSec)*strlen(nombreDirTer)*sizeof(char)+sizeof(char)*3);
strcpy(rutaADirecTer, rutaADirecSec);
strcat(rutaADirecTer, "/");
strcat(rutaADirecTer, nombreDirTer);
if(mkdir(rutaADirecTer, 0777) == -1)
{
printf("ERROR: No se pudo crear subdirectorio.\n");
printf("PROGRAMA ABORTADO.\n");
exit(0);
}
strcat(rutaADirecTer, "/");
clock_t tiempoReconsFourierPartImagComp;
clock_t tiempoReconsFourierPartRealComp;
clock_t tiempoReconsTransInverComp;
printf("...Comenzando calculo de compresiones...\n");
clock_t tiempoCompresion;
tiempoCompresion = clock();
int cantCoefs = calPSNRDeDistintasCompresiones(inicioPorcenCompre, terminoPorcenCompre, cantPorcen, rutaADirecSec, rutaADirecTer, nombreArReconsCompreImg, MC_imag, MC_real, MV_AF, MU_AF, N, &tiempoReconsFourierPartImagComp, &tiempoReconsFourierPartRealComp, &tiempoReconsTransInverComp);
tiempoCompresion = clock() - tiempoCompresion;
float tiempoTotalCompresion = ((float)tiempoCompresion)/CLOCKS_PER_SEC;
printf("Proceso de calculo de compresiones terminado.\n");
free(rutaADirecTer);
char* nombreArchivoInfoComp = (char*) malloc(strlen(nombreDirPrin)*strlen(nombreArInfoCompresion)*sizeof(char)+sizeof(char)*2);
strcpy(nombreArchivoInfoComp, nombreDirPrin);
strcat(nombreArchivoInfoComp, "/");
strcat(nombreArchivoInfoComp, nombreArInfoCompresion);
FILE* archivo = fopen(nombreArchivoInfoComp, "a");
float nivelDeCompresion = 1.0 - cantCoefs * 1.0 / N*N;
fprintf(archivo, "%d %f %.12f %.12e %.12e %.12f %.12d\n", iterActual, ancho/delta_u, ancho, medidasDeInfo[0], medidasDeInfo[1], nivelDeCompresion, cantCoefs);
fclose(archivo);
free(nombreArchivoInfoComp);
free(medidasDeInfo);
cudaFree(MC_real);
cudaFree(MC_imag);
cudaFree(MU_AF);
cudaFree(MV_AF);
float tiempoTotalReconsFourierPartImagComp = ((float)tiempoReconsFourierPartImagComp)/CLOCKS_PER_SEC;
float tiempoTotalReconsFourierPartRealComp = ((float)tiempoReconsFourierPartRealComp)/CLOCKS_PER_SEC;
float tiempoTotalReconsTransInverComp = ((float)tiempoReconsTransInverComp)/CLOCKS_PER_SEC;
// ############### ESCRITURA DE ARCHIVO CON TIEMPOS DE EJECUCION ##############
char* nombreArchivoInfoTiemposEjecu = (char*) malloc(strlen(nombreDirPrin)*strlen(nombreArInfoTiemposEjecu)*sizeof(char)+sizeof(char)*2);
strcpy(nombreArchivoInfoTiemposEjecu, nombreDirPrin);
strcat(nombreArchivoInfoTiemposEjecu, "/");
strcat(nombreArchivoInfoTiemposEjecu, nombreArInfoTiemposEjecu);
FILE* archivoInfoTiemposEjecu = fopen(nombreArchivoInfoTiemposEjecu, "a");
fprintf(archivoInfoTiemposEjecu, "%d %.12f %.12e %.12e %.12e %.12e %.12e %.12e %.12e %.12e %.12e %.12e %.12e %.12e %.12e %.12e\n", iterActual, ancho, tiempoTotalCalculoMV, tiempoTotalCalculoMU, tiempoTotalMinPartImag, tiempoTotalMinPartReal, tiempoTotalInfo, tiempoTotalCompresion, tiempoTotalCalculoMV_AF, tiempoTotalCalculoMU_AF, tiempoTotalReconsFourierPartImag, tiempoTotalReconsFourierPartReal, tiempoTotalReconsTransInver, tiempoTotalReconsFourierPartImagComp, tiempoTotalReconsFourierPartRealComp, tiempoTotalReconsTransInverComp);
fclose(archivoInfoTiemposEjecu);
free(rutaADirecSec);
}
void calCompSegunAncho_Rect_escritura(char nombreDirPrin[], char* nombreDirSec, char nombreDirTer[], float ancho, float cotaEnergia, int iterActual, int maxIter, float tol, float* u, float* v, float* w, float* visi_parteImaginaria, float* visi_parteReal, float delta_u, float delta_v, float* matrizDeUnos, long cantVisi, long N, float* matrizDeUnosTamN, float estrechezDeBorde)
{
float inicioPorcenCompre = 0.0;
float terminoPorcenCompre = 0.2;
int cantPorcen = 101;
// int cantPorcen = 2;
// ############### CONFIG. DE NOMBRES DE ARCHIVOS ##############
char nombreArReconsImg[] = "reconsImg.fit";
char nombreArReconsCompreImg[] = "reconsCompreImg.fit";
char nombreArMin_imag[] = "minCoefs_imag.txt";
char nombreArCoef_imag[] = "coefs_imag.txt";
char nombreArCoef_comp_imag[] = "coefs_comp_imag.txt";
char nombreArMin_real[] = "minCoefs_real.txt";
char nombreArCoef_real[] = "coefs_real.txt";
char nombreArCoef_comp_real[] = "coefs_comp_real.txt";
char nombreArInfoCompresion[] = "infoCompre.txt";
char nombreArInfoTiemposEjecu[] = "infoTiemposEjecu.txt";
// ############### CALCULO DE MU Y MV - CREACION DE DIRECTORIO SEGUNDARIO ##############
printf("...Comenzando calculo de MV...\n");
clock_t tiempoCalculoMV;
tiempoCalculoMV = clock();
float* MV = calcularMV_Rect(v, delta_v, cantVisi, N, estrechezDeBorde, ancho, matrizDeUnos);
tiempoCalculoMV = clock() - tiempoCalculoMV;
float tiempoTotalCalculoMV = ((float)tiempoCalculoMV)/CLOCKS_PER_SEC;
printf("Calculo de MV completado.\n");
printf("...Comenzando calculo de MU...\n");
clock_t tiempoCalculoMU;
tiempoCalculoMU = clock();
float* MU = calcularMV_Rect(u, delta_u, cantVisi, N, estrechezDeBorde, ancho, matrizDeUnos);
tiempoCalculoMU = clock() - tiempoCalculoMU;
float tiempoTotalCalculoMU = ((float)tiempoCalculoMU)/CLOCKS_PER_SEC;
printf("Calculo de MU completado.\n");
char* rutaADirecSec = (char*) malloc(strlen(nombreDirPrin)*strlen(nombreDirSec)*sizeof(char)+sizeof(char)*3);
strcpy(rutaADirecSec, nombreDirPrin);
strcat(rutaADirecSec, "/");
strcat(rutaADirecSec, nombreDirSec);
if(mkdir(rutaADirecSec, 0777) == -1)
{
printf("ERROR: No se pudo crear subdirectorio.");
printf("PROGRAMA ABORTADO.\n");
exit(0);
}
strcat(rutaADirecSec, "/");
// ############### MINIMIZACION DE COEFS, PARTE IMAGINARIA ##############
char* nombreArchivoMin_imag = (char*) malloc(strlen(nombreDirPrin)*strlen(nombreDirSec)*strlen(nombreArMin_imag)*sizeof(char)+sizeof(char)*3);
strcpy(nombreArchivoMin_imag, rutaADirecSec);
strcat(nombreArchivoMin_imag, nombreArMin_imag);
char* nombreArchivoCoefs_imag = (char*) malloc(strlen(nombreDirPrin)*strlen(nombreDirSec)*strlen(nombreArCoef_imag)*sizeof(char)+sizeof(char)*3);
strcpy(nombreArchivoCoefs_imag, rutaADirecSec);
strcat(nombreArchivoCoefs_imag, nombreArCoef_imag);
printf("...Comenzando minimizacion de coeficientes parte imaginaria...\n");
clock_t tiempoMinPartImag;
tiempoMinPartImag = clock();
float* MC_imag = minGradConjugado_MinCuadra_escritura(nombreArchivoMin_imag, nombreArchivoCoefs_imag, MV, MU, visi_parteImaginaria, w, cantVisi, N, matrizDeUnosTamN, maxIter, tol);
tiempoMinPartImag = clock() - tiempoMinPartImag;
float tiempoTotalMinPartImag = ((float)tiempoMinPartImag)/CLOCKS_PER_SEC;
printf("Proceso de minimizacion de coeficientes parte imaginaria terminado.\n");
free(nombreArchivoMin_imag);
free(nombreArchivoCoefs_imag);
// ############### MINIMIZACION DE COEFS, PARTE REAL ##############
char* nombreArchivoMin_real = (char*) malloc(strlen(nombreDirPrin)*strlen(nombreDirSec)*strlen(nombreArMin_real)*sizeof(char)+sizeof(char)*3);
strcpy(nombreArchivoMin_real, rutaADirecSec);
strcat(nombreArchivoMin_real, nombreArMin_real);
char* nombreArchivoCoefs_real = (char*) malloc(strlen(nombreDirPrin)*strlen(nombreDirSec)*strlen(nombreArCoef_real)*sizeof(char)+sizeof(char)*3);
strcpy(nombreArchivoCoefs_real, rutaADirecSec);
strcat(nombreArchivoCoefs_real, nombreArCoef_real);
printf("...Comenzando minimizacion de coeficientes parte real...\n");
clock_t tiempoMinPartReal;
tiempoMinPartReal = clock();
float* MC_real = minGradConjugado_MinCuadra_escritura(nombreArchivoMin_real, nombreArchivoCoefs_real, MV, MU, visi_parteReal, w, cantVisi, N, matrizDeUnosTamN, maxIter, tol);
tiempoMinPartReal = clock() - tiempoMinPartReal;
float tiempoTotalMinPartReal = ((float)tiempoMinPartReal)/CLOCKS_PER_SEC;
printf("Proceso de minimizacion de coeficientes parte real terminado.\n");
free(nombreArchivoMin_real);
free(nombreArchivoCoefs_real);
// ############### CALCULO NIVEL DE INFORMACION ##############
clock_t tiempoInfo;
tiempoInfo = clock();
float* medidasDeInfo = calInfoFisherDiag(MV, cantVisi, N, MU, w);
tiempoInfo = clock() - tiempoInfo;
float tiempoTotalInfo = ((float)tiempoInfo)/CLOCKS_PER_SEC;
cudaFree(MU);
cudaFree(MV);
// ############### RECONSTRUCCION DEL PLANO GRILLEADO Y ALMACENAMIENTO DE LA RECONSTRUCCION DE LA IMAGEN ##############
char* nombreArchivoReconsImg = (char*) malloc(strlen(nombreDirPrin)*strlen(nombreDirSec)*strlen(nombreArReconsImg)*sizeof(char)+sizeof(char)*3);
strcpy(nombreArchivoReconsImg, rutaADirecSec);
strcat(nombreArchivoReconsImg, nombreArReconsImg);
clock_t tiempoCalculoMV_AF;
tiempoCalculoMV_AF = clock();
float* MV_AF = calcularMV_Rect_estFourier(ancho, N, delta_v, matrizDeUnos, estrechezDeBorde, matrizDeUnosTamN);
tiempoCalculoMV_AF = clock() - tiempoCalculoMV_AF;
float tiempoTotalCalculoMV_AF = ((float)tiempoCalculoMV_AF)/CLOCKS_PER_SEC;
clock_t tiempoCalculoMU_AF;
tiempoCalculoMU_AF = clock();
float* MU_AF = calcularMV_Rect_estFourier(ancho, N, delta_u, matrizDeUnos, estrechezDeBorde, matrizDeUnosTamN);
tiempoCalculoMU_AF = clock() - tiempoCalculoMU_AF;
float tiempoTotalCalculoMU_AF = ((float)tiempoCalculoMU_AF)/CLOCKS_PER_SEC;
clock_t tiempoReconsFourierPartImag;
tiempoReconsFourierPartImag = clock();
float* estimacionFourier_ParteImag = estimacionDePlanoDeFourier(MV_AF, N, N, MC_imag, N, N, MU_AF);
tiempoReconsFourierPartImag = clock() - tiempoReconsFourierPartImag;
float tiempoTotalReconsFourierPartImag = ((float)tiempoReconsFourierPartImag)/CLOCKS_PER_SEC;
clock_t tiempoReconsFourierPartReal;
tiempoReconsFourierPartReal = clock();
float* estimacionFourier_ParteReal = estimacionDePlanoDeFourier(MV_AF, N, N, MC_real, N, N, MU_AF);
tiempoReconsFourierPartReal = clock() - tiempoReconsFourierPartReal;
float tiempoTotalReconsFourierPartReal = ((float)tiempoReconsFourierPartReal)/CLOCKS_PER_SEC;
clock_t tiempoReconsTransInver;
tiempoReconsTransInver = clock();
escribirTransformadaInversaFourier2D(estimacionFourier_ParteImag, estimacionFourier_ParteReal, N, nombreArchivoReconsImg);
tiempoReconsTransInver = clock() - tiempoReconsTransInver;
float tiempoTotalReconsTransInver = ((float)tiempoReconsTransInver)/CLOCKS_PER_SEC;
cudaFree(estimacionFourier_ParteImag);
cudaFree(estimacionFourier_ParteReal);
free(nombreArchivoReconsImg);
// ############### CALCULO DE GRADO DE COMPRESION ##############
char* rutaADirecTer = (char*) malloc(strlen(rutaADirecSec)*strlen(nombreDirTer)*sizeof(char)+sizeof(char)*3);
strcpy(rutaADirecTer, rutaADirecSec);
strcat(rutaADirecTer, "/");
strcat(rutaADirecTer, nombreDirTer);
if(mkdir(rutaADirecTer, 0777) == -1)
{
printf("ERROR: No se pudo crear subdirectorio.\n");
printf("PROGRAMA ABORTADO.\n");
exit(0);
}
strcat(rutaADirecTer, "/");
clock_t tiempoReconsFourierPartImagComp;
clock_t tiempoReconsFourierPartRealComp;
clock_t tiempoReconsTransInverComp;
printf("...Comenzando calculo de compresiones...\n");
clock_t tiempoCompresion;
tiempoCompresion = clock();
int cantCoefs = calPSNRDeDistintasCompresiones(inicioPorcenCompre, terminoPorcenCompre, cantPorcen, rutaADirecSec, rutaADirecTer, nombreArReconsCompreImg, MC_imag, MC_real, MV_AF, MU_AF, N, &tiempoReconsFourierPartImagComp, &tiempoReconsFourierPartRealComp, &tiempoReconsTransInverComp);
tiempoCompresion = clock() - tiempoCompresion;
float tiempoTotalCompresion = ((float)tiempoCompresion)/CLOCKS_PER_SEC;
printf("Proceso de calculo de compresiones terminado.\n");
free(rutaADirecTer);
char* nombreArchivoInfoComp = (char*) malloc(strlen(nombreDirPrin)*strlen(nombreArInfoCompresion)*sizeof(char)+sizeof(char)*2);
strcpy(nombreArchivoInfoComp, nombreDirPrin);
strcat(nombreArchivoInfoComp, "/");
strcat(nombreArchivoInfoComp, nombreArInfoCompresion);
FILE* archivo = fopen(nombreArchivoInfoComp, "a");
float nivelDeCompresion = 1.0 - cantCoefs * 1.0 / N*N;
fprintf(archivo, "%d %f %.12f %.12e %.12e %.12f %.12d\n", iterActual, ancho/delta_u, ancho, medidasDeInfo[0], medidasDeInfo[1], nivelDeCompresion, cantCoefs);
fclose(archivo);
free(nombreArchivoInfoComp);
free(medidasDeInfo);
cudaFree(MC_real);
cudaFree(MC_imag);
cudaFree(MU_AF);
cudaFree(MV_AF);
float tiempoTotalReconsFourierPartImagComp = ((float)tiempoReconsFourierPartImagComp)/CLOCKS_PER_SEC;
float tiempoTotalReconsFourierPartRealComp = ((float)tiempoReconsFourierPartRealComp)/CLOCKS_PER_SEC;
float tiempoTotalReconsTransInverComp = ((float)tiempoReconsTransInverComp)/CLOCKS_PER_SEC;
// ############### ESCRITURA DE ARCHIVO CON TIEMPOS DE EJECUCION ##############
char* nombreArchivoInfoTiemposEjecu = (char*) malloc(strlen(nombreDirPrin)*strlen(nombreArInfoTiemposEjecu)*sizeof(char)+sizeof(char)*2);
strcpy(nombreArchivoInfoTiemposEjecu, nombreDirPrin);
strcat(nombreArchivoInfoTiemposEjecu, "/");
strcat(nombreArchivoInfoTiemposEjecu, nombreArInfoTiemposEjecu);
FILE* archivoInfoTiemposEjecu = fopen(nombreArchivoInfoTiemposEjecu, "a");
fprintf(archivoInfoTiemposEjecu, "%d %.12f %.12e %.12e %.12e %.12e %.12e %.12e %.12e %.12e %.12e %.12e %.12e %.12e %.12e %.12e\n", iterActual, ancho, tiempoTotalCalculoMV, tiempoTotalCalculoMU, tiempoTotalMinPartImag, tiempoTotalMinPartReal, tiempoTotalInfo, tiempoTotalCompresion, tiempoTotalCalculoMV_AF, tiempoTotalCalculoMU_AF, tiempoTotalReconsFourierPartImag, tiempoTotalReconsFourierPartReal, tiempoTotalReconsTransInver, tiempoTotalReconsFourierPartImagComp, tiempoTotalReconsFourierPartRealComp, tiempoTotalReconsTransInverComp);
fclose(archivoInfoTiemposEjecu);
free(rutaADirecSec);
}
double funcOptiInfo_Traza_Rect(double ancho, void* params)
{
struct parametros_BaseRect* ps = (struct parametros_BaseRect*) params;
float* MV = calcularMV_Rect(ps->v, ps->delta_v, ps->cantVisi, ps->N, ps->estrechezDeBorde, ancho, ps->matrizDeUnos);
float* MU = calcularMV_Rect(ps->u, ps->delta_u, ps->cantVisi, ps->N, ps->estrechezDeBorde, ancho, ps->matrizDeUnos);
float* medidasDeInfo = calInfoFisherDiag(MV, ps->cantVisi, ps->N, MU, ps->w);
float medidaSumaDeLaDiagonal = medidasDeInfo[0];
free(medidasDeInfo);
cudaFree(MV);
cudaFree(MU);
return -1 * medidaSumaDeLaDiagonal;
}
double funcOptiInfo_Traza_Normal(double ancho, void* params)
{
struct parametros_BaseNormal* ps = (struct parametros_BaseNormal*) params;
float* MV = calcularMV_Normal(ps->v, ps->delta_v, ps->cantVisi, ps->N, ancho);
float* MU = calcularMV_Normal(ps->u, ps->delta_u, ps->cantVisi, ps->N, ancho);
float* medidasDeInfo = calInfoFisherDiag(MV, ps->cantVisi, ps->N, MU, ps->w);
float medidaSumaDeLaDiagonal = medidasDeInfo[0];
free(medidasDeInfo);
cudaFree(MV);
cudaFree(MU);
return -1 * medidaSumaDeLaDiagonal;
}
double goldenMin_BaseRect(float* u, float* v, float* w, float delta_u, float delta_v, float* matrizDeUnos, long cantVisi, long N, float estrechezDeBorde)
{
int status;
int iter = 0, max_iter = 100;
const gsl_min_fminimizer_type *T;
gsl_min_fminimizer *s;
gsl_function F;
parametros_BaseRect actual;
actual.u = u;
actual.v = v;
actual.w = w;
actual.delta_u = delta_u;
actual.delta_v = delta_v;
actual.matrizDeUnos = matrizDeUnos;
actual.cantVisi = cantVisi;
actual.N = N;
actual.estrechezDeBorde = estrechezDeBorde;
double m;
double a = 1.0 * actual.delta_u, b = 5.0 * actual.delta_u;
F.function = &funcOptiInfo_Traza_Rect;
void* punteroVoidAActual = &actual;
F.params = punteroVoidAActual;
T = gsl_min_fminimizer_quad_golden;
s = gsl_min_fminimizer_alloc (T);
gsl_set_error_handler_off();
m = 1.0 * actual.delta_u;
int status_interval = gsl_min_fminimizer_set (s, &F, m, a, b);
while(status_interval)
{
m += 0.001 * actual.delta_u;
printf("m ahora es %f\n", m/actual.delta_u);
status_interval = gsl_min_fminimizer_set (s, &F, m, a, b);
}
printf ("using %s method\n",
gsl_min_fminimizer_name (s));
printf ("%5s [%9s, %9s] %9s\n",
"iter", "lower", "upper", "min");
printf ("%5d [%.7f, %.7f] %.7f\n",
iter, a, b, m);
do
{
iter++;
status = gsl_min_fminimizer_iterate (s);
m = gsl_min_fminimizer_x_minimum (s);
a = gsl_min_fminimizer_x_lower (s);
b = gsl_min_fminimizer_x_upper (s);
status = gsl_min_test_interval (a, b, 0.01, 0.01);
if (status == GSL_SUCCESS)
printf ("Converged:\n");
printf ("%5d [%.7f, %.7f] "
"%.7f\n",
iter, a/delta_u, b/delta_u, m/delta_u);
}
while (status == GSL_CONTINUE && iter < max_iter);
gsl_min_fminimizer_free (s);
return m;
}
double goldenMin_BaseNormal(float* u, float* v, float* w, float delta_u, float delta_v, long cantVisi, long N)
{
int status;
int iter = 0, max_iter = 100;
const gsl_min_fminimizer_type *T;
gsl_min_fminimizer *s;
gsl_function F;
parametros_BaseNormal actual;
actual.u = u;
actual.v = v;
actual.w = w;
actual.delta_u = delta_u;
actual.delta_v = delta_v;
actual.cantVisi = cantVisi;
actual.N = N;
double m = 1.5 * actual.delta_u, m_expected = M_PI;
double a = 1.0 * actual.delta_u, b = 5.0 * actual.delta_u;
F.function = &funcOptiInfo_Traza_Normal;
void* punteroVoidAActual = &actual;
F.params = punteroVoidAActual;
T = gsl_min_fminimizer_quad_golden;
s = gsl_min_fminimizer_alloc (T);
gsl_set_error_handler_off();
m = 1.0 * actual.delta_u;
int status_interval = gsl_min_fminimizer_set (s, &F, m, a, b);
while(status_interval)
{
m += 0.001 * actual.delta_u;
printf("m ahora es %f\n", m/actual.delta_u);
status_interval = gsl_min_fminimizer_set (s, &F, m, a, b);
}
printf ("using %s method\n",
gsl_min_fminimizer_name (s));
printf ("%5s [%9s, %9s] %9s\n",
"iter", "lower", "upper", "min");
printf ("%5d [%.7f, %.7f] %.7f\n",
iter, a, b, m);
do
{
iter++;
status = gsl_min_fminimizer_iterate (s);
m = gsl_min_fminimizer_x_minimum (s);
a = gsl_min_fminimizer_x_lower (s);
b = gsl_min_fminimizer_x_upper (s);
status
= gsl_min_test_interval (a, b, 0.001, 0.0);
if (status == GSL_SUCCESS)
printf ("Converged:\n");
printf ("%5d [%.7f, %.7f] "
"%.7f\n",
iter, a/delta_u, b/delta_u,m/delta_u);
}
while (status == GSL_CONTINUE && iter < max_iter);
gsl_min_fminimizer_free (s);
return m;
}
void lecturaDeTXT(char nombreArchivo[], float* frecuencia, float* u, float* v, float* w, float* visi_parteImaginaria, float* visi_parteReal, long cantVisi)
{
long contador = 0;
FILE *fp;
size_t len = 0;
char *line = NULL;
ssize_t read;
float c_constant = 2.99792458E8;
fp = fopen(nombreArchivo, "r");
if (fp == NULL)
{
printf("No se pudo abrir el archivo %s",nombreArchivo);
exit(0);
}
while ((read = getline(&line, &len, fp)) != -1)
{
*frecuencia = atof(strtok(line, " "));
visi_parteReal[contador] = atof(strtok(NULL, " "));
visi_parteImaginaria[contador] = atof(strtok(NULL, " "));
u[contador] = atof(strtok(NULL, " ")) * (*frecuencia)/c_constant;
v[contador] = atof(strtok(NULL, " ")) * (*frecuencia)/c_constant;
w[contador] = atof(strtok(NULL, " "));
contador++;
if(contador == cantVisi)
break;
}
free(line);
fclose(fp);
}
void lectCantVisi(char nombreArchivo[], long* cantVisi)
{
long contador = 0;
FILE *fp;
size_t len = 0;
char *line = NULL;
ssize_t read;
char* nombreNuevoTXT = (char*) malloc(strlen(nombreArchivo)*sizeof(char)+sizeof(char)*20);
strcpy(nombreNuevoTXT, nombreArchivo);
strcat(nombreNuevoTXT, "cantvisi.txt");
fp = fopen(nombreNuevoTXT, "r");
if (fp == NULL)
{
printf("No se pudo abrir el archivo %s",nombreArchivo);
exit(0);
}
read = getline(&line, &len, fp);
printf("Se han leido %s visibilidades.\n", line);
*cantVisi = atoi(line);
free(line);
free(nombreNuevoTXT);
fclose(fp);
}
void lectDeTXTcreadoDesdeMS(char nombreArchivo[], float* u, float* v, float* w, float* visi_parteImaginaria, float* visi_parteReal)
{
long contador = 0;
FILE *fp;
size_t len = 0;
char *line = NULL;
ssize_t read;
char* nombreNuevoTXT = (char*) malloc(strlen(nombreArchivo)*sizeof(char)+sizeof(char)*5);
strcpy(nombreNuevoTXT, nombreArchivo);
strcat(nombreNuevoTXT, ".txt");
fp = fopen(nombreNuevoTXT, "r");
if (fp == NULL)
{
printf("No se pudo abrir el archivo %s",nombreArchivo);
exit(0);
}
while ((read = getline(&line, &len, fp)) != -1)
{
visi_parteReal[contador] = atof(strtok(line, " "));
visi_parteImaginaria[contador] = atof(strtok(NULL, " "));
u[contador] = atof(strtok(NULL, " "));
v[contador] = atof(strtok(NULL, " "));
w[contador] = atof(strtok(NULL, " "));
contador++;
}
printf("El contador es %ld\n", contador);
free(line);
free(nombreNuevoTXT);
fclose(fp);
}
void lectDeTXTcreadoDesdeMSConLimite(char nombreArchivo[], float* u, float* v, float* w, float* visi_parteImaginaria, float* visi_parteReal, long inicio, long fin, long cantVisi)
{
long contador = 0;
long contadorIte = 0;
FILE *fp;
size_t len = 0;
char *line = NULL;
ssize_t read;
char* nombreNuevoTXT = (char*) malloc(strlen(nombreArchivo)*sizeof(char)+sizeof(char)*5);
strcpy(nombreNuevoTXT, nombreArchivo);
strcat(nombreNuevoTXT, ".txt");
fp = fopen(nombreNuevoTXT, "r");
printf("Nombre nuevo es %s\n", nombreNuevoTXT);
if (fp == NULL)
{
printf("No se pudo abrir el archivo %s",nombreArchivo);
exit(0);
}
while ((read = getline(&line, &len, fp)) != -1)
{
if (contadorIte >= inicio)
{
visi_parteReal[contador] = atof(strtok(line, " "));
visi_parteImaginaria[contador] = atof(strtok(NULL, " "));
u[contador] = atof(strtok(NULL, " "));
v[contador] = atof(strtok(NULL, " "));
w[contador] = atof(strtok(NULL, " "));
contador++;
}
contadorIte++;
if(contadorIte >= fin)
break;
}
printf("El contador es %ld\n", contador);
free(line);
free(nombreNuevoTXT);
fclose(fp);
}
void escrituraDeArchivoConParametros_Normal(char nombreArchivoPara[], char nombreArchivo[], char nombreDirPrin[], int cantVisi, int N, int maxIter, float tolGrad)
{
time_t t = time(NULL);
struct tm tm = *localtime(&t);
FILE* archivoDePara = fopen(nombreArchivoPara, "w");
fprintf(archivoDePara, "Programa inicio su ejecucion con fecha: %d-%d-%d %d:%d:%d\n", tm.tm_year + 1900, tm.tm_mon + 1,tm.tm_mday, tm.tm_hour, tm.tm_min, tm.tm_sec);
fprintf(archivoDePara, "Compresion con base normal utilizando informacion del archivo %s cuyos parametros de ejecucion fueron:\n", nombreArchivo);
fprintf(archivoDePara, "Cantidad de visibilidades(cantVisi): %d\n", cantVisi);
fprintf(archivoDePara, "Cantidad de Coefs(N x N): %d x %d = %d\n", N, N, N*N);
fprintf(archivoDePara, "Maximo de iteraciones impuesto para la minimizacion de coeficientes(maxIter): %d\n", maxIter);
fprintf(archivoDePara, "Grado de tolerancia a la minimizacion de los coefs(tolGrad): %.12e\n", tolGrad);
fclose(archivoDePara);
}
void escrituraDeArchivoConParametros_Rect(char nombreArchivoPara[], char nombreArchivo[], char nombreDirPrin[], long cantVisi, long N, int maxIter, float tolGrad, float estrechezDeBorde)
{
time_t t = time(NULL);
struct tm tm = *localtime(&t);
FILE* archivoDePara = fopen(nombreArchivoPara, "w");
fprintf(archivoDePara, "Programa inicio su ejecucion con fecha: %d-%d-%d %d:%d:%d\n", tm.tm_year + 1900, tm.tm_mon + 1,tm.tm_mday, tm.tm_hour, tm.tm_min, tm.tm_sec);
fprintf(archivoDePara, "Compresion con base rectangular utilizando informacion del archivo %s cuyos parametros de ejecucion fueron:\n", nombreArchivo);
fprintf(archivoDePara, "Estrechez de borde: %f\n", estrechezDeBorde);
fprintf(archivoDePara, "Cantidad de visibilidades(cantVisi): %ld\n", cantVisi);
fprintf(archivoDePara, "Cantidad de Coefs(N x N): %ld x %ld = %ld\n", N, N, N*N);
fprintf(archivoDePara, "Maximo de iteraciones impuesto para la minimizacion de coeficientes(maxIter): %d\n", maxIter);
fprintf(archivoDePara, "Grado de tolerancia a la minimizacion de los coefs(tolGrad): %.12e\n", tolGrad);
fclose(archivoDePara);
}
void calculoDeInfoCompre_BaseNormal(char nombreArchivo[], int maxIter, float tolGrad, float tolGolden, float* u, float* v, float* w, float* visi_parteImaginaria, float* visi_parteReal, float delta_u, float delta_v, long cantVisi, long N, float cotaEnergia, char nombreDirPrin[], char nombreDirSec[], char nombreDirTer[], int cantParamEvaInfo, float inicioIntervalo, float finIntervalo, float* matrizDeUnosEstFourier, float estrechezDeBorde)
{
float inicioIntervaloEscalado = inicioIntervalo * delta_u;
float finIntervaloEscalado = finIntervalo * delta_u;
char nombreArPara[] = "parametrosEjecucion.txt";
if(cotaEnergia > 1.0)
{
printf("ERROR: La cota de energia debe estar expresado en decimales, no en porcentajes.\n");
printf("PROGRAMA ABORTADO.\n");
exit(0);
}
if(mkdir(nombreDirPrin, 0777) == -1)
{
printf("ERROR: El directorio EXISTE, PELIGRO DE SOBREESCRITURA, por favor eliga otro nombre de directorio.\n");
printf("PROGRAMA ABORTADO.\n");
exit(0);
}
else
printf("Directorio creado.\n");
char* nombreArchivoPara = (char*) malloc(strlen(nombreDirPrin)*strlen(nombreArPara)*sizeof(char)+sizeof(char)*3);
strcpy(nombreArchivoPara, nombreDirPrin);
strcat(nombreArchivoPara, "/");
strcat(nombreArchivoPara, nombreArPara);
escrituraDeArchivoConParametros_Normal(nombreArchivoPara, nombreArchivo, nombreDirPrin, cantVisi, N, maxIter, tolGrad);
free(nombreArchivoPara);
// float optimo = goldenMin_BaseNormal(u, v, w, delta_u, delta_v, cantVisi, N);
// printf("El optimo esta en %.12f\n", optimo);
float* paramEvaInfo = linspace(inicioIntervaloEscalado, finIntervaloEscalado, cantParamEvaInfo);
// int i = 0;
for(int i=320; i<cantParamEvaInfo; i++)
{
char* numComoString = numAString(&i);
sprintf(numComoString, "%d", i);
char* nombreDirSecCopia = (char*) malloc(sizeof(char)*strlen(nombreDirSec)*strlen(numComoString));
strcpy(nombreDirSecCopia, nombreDirSec);
strcat(nombreDirSecCopia, numComoString);
calCompSegunAncho_Normal_escritura(nombreDirPrin, nombreDirSecCopia, nombreDirTer, paramEvaInfo[i], cotaEnergia, i, maxIter, tolGrad, u, v, w, visi_parteImaginaria, visi_parteReal, delta_u, delta_v, cantVisi, N, matrizDeUnosEstFourier);
free(numComoString);
free(nombreDirSecCopia);
}
}
void calculoDeInfoCompre_BaseRect(char nombreArchivo[], int maxIter, float tolGrad, float tolGolden, float* u, float* v, float* w, float* visi_parteImaginaria, float* visi_parteReal, float delta_u, float delta_v, float* matrizDeUnos, long cantVisi, long N, float cotaEnergia, char nombreDirPrin[], char nombreDirSec[], char nombreDirTer[], int cantParamEvaInfo, float inicioIntervalo, float finIntervalo, float* matrizDeUnosEstFourier, float estrechezDeBorde)
{
float inicioIntervaloEscalado = inicioIntervalo * delta_u;
float finIntervaloEscalado = finIntervalo * delta_u;
char nombreArPara[] = "parametrosEjecucion.txt";
if(cotaEnergia > 1.0)
{
printf("ERROR: La cota de energia debe estar expresado en decimales, no en porcentajes.\n");
printf("PROGRAMA ABORTADO.\n");
exit(0);
}
if(mkdir(nombreDirPrin, 0777) == -1)
{
printf("ERROR: El directorio EXISTE, PELIGRO DE SOBREESCRITURA, por favor eliga otro nombre de directorio.\n");
printf("PROGRAMA ABORTADO.\n");
exit(0);
}
else
printf("Directorio creado.\n");
char* nombreArchivoPara = (char*) malloc(strlen(nombreDirPrin)*strlen(nombreArPara)*sizeof(char)+sizeof(char)*3);
strcpy(nombreArchivoPara, nombreDirPrin);
strcat(nombreArchivoPara, "/");
strcat(nombreArchivoPara, nombreArPara);
escrituraDeArchivoConParametros_Rect(nombreArchivoPara, nombreArchivo, nombreDirPrin, cantVisi, N, maxIter, tolGrad, estrechezDeBorde);
free(nombreArchivoPara);
// float optimo = goldenMin_BaseRect(u, v, w, delta_u, delta_v, matrizDeUnos, cantVisi, N, estrechezDeBorde);
// printf("El optimo esta en %.12f\n", optimo);
float* paramEvaInfo = linspace(inicioIntervaloEscalado, finIntervaloEscalado, cantParamEvaInfo);
// int i = 0;
for(int i=0; i<cantParamEvaInfo; i++)
{
char* numComoString = numAString(&i);
sprintf(numComoString, "%d", i);
char* nombreDirSecCopia = (char*) malloc(sizeof(char)*strlen(nombreDirSec)*strlen(numComoString));
strcpy(nombreDirSecCopia, nombreDirSec);
strcat(nombreDirSecCopia, numComoString);
calCompSegunAncho_Rect_escritura(nombreDirPrin, nombreDirSecCopia, nombreDirTer, paramEvaInfo[i], cotaEnergia, i, maxIter, tolGrad, u, v, w, visi_parteImaginaria, visi_parteReal, delta_u, delta_v, matrizDeUnos, cantVisi, N, matrizDeUnosEstFourier, estrechezDeBorde);
free(numComoString);
free(nombreDirSecCopia);
}
}
void calImagenesADistintasCompresiones_Rect(float inicioIntervalo, float finIntervalo, int cantParamEvaInfo, char nombreDirPrin[], float ancho, int maxIter, float tol, float* u, float* v, float* w, float* visi_parteImaginaria, float* visi_parteReal, float delta_u, float delta_v, float* matrizDeUnos, long cantVisi, long N, float* matrizDeUnosTamN, float estrechezDeBorde)
{
if(mkdir(nombreDirPrin, 0777) == -1)
{
printf("ERROR: El directorio EXISTE, PELIGRO DE SOBREESCRITURA, por favor eliga otro nombre de directorio.\n");
printf("PROGRAMA ABORTADO.\n");
exit(0);
}
else
printf("Directorio creado.\n");
char nombreArReconsCompreImg[] = "reconsCompreImg";
float* paramEvaInfo = linspace(inicioIntervalo/100.0, finIntervalo/100.0, cantParamEvaInfo);
// ############### CALCULO DE MU Y MV - CREACION DE DIRECTORIO SEGUNDARIO ##############
printf("...Comenzando calculo de MV...\n");
float* MV = calcularMV_Rect(v, delta_v, cantVisi, N, estrechezDeBorde, ancho, matrizDeUnos);
printf("Calculo de MV completado.\n");
printf("...Comenzando calculo de MU...\n");
float* MU = calcularMV_Rect(u, delta_u, cantVisi, N, estrechezDeBorde, ancho, matrizDeUnos);
printf("Calculo de MU completado.\n");
// ############### MINIMIZACION DE COEFS, PARTE IMAGINARIA ##############
printf("...Comenzando minimizacion de coeficientes parte imaginaria...\n");
float* MC_imag = minGradConjugado_MinCuadra(MV, MU, visi_parteImaginaria, w, cantVisi, N, matrizDeUnosTamN, maxIter, tol);
printf("Proceso de minimizacion de coeficientes parte imaginaria terminado.\n");
// ############### MINIMIZACION DE COEFS, PARTE REAL ##############
printf("...Comenzando minimizacion de coeficientes parte real...\n");
float* MC_real = minGradConjugado_MinCuadra(MV, MU, visi_parteReal, w, cantVisi, N, matrizDeUnosTamN, maxIter, tol);
printf("Proceso de minimizacion de coeficientes parte real terminado.\n");
float* MV_AF = calcularMV_Rect_estFourier(ancho, N, delta_v, matrizDeUnos, estrechezDeBorde, matrizDeUnosTamN);
float* MU_AF = calcularMV_Rect_estFourier(ancho, N, delta_u, matrizDeUnos, estrechezDeBorde, matrizDeUnosTamN);
float* MC_comp_imag;
cudaMallocManaged(&MC_comp_imag,N*N*sizeof(float));
cudaMemset(MC_comp_imag, 0, N*N*sizeof(float));
float* MC_comp_real;
cudaMallocManaged(&MC_comp_real,N*N*sizeof(float));
cudaMemset(MC_comp_real, 0, N*N*sizeof(float));
long largo = N * N;
float* MC_img_cuadrado;
cudaMallocManaged(&MC_img_cuadrado, N*N*sizeof(float));
float* MC_modulo;
cudaMallocManaged(&MC_modulo, N*N*sizeof(float));
hadamardProduct(MC_imag, N, N, MC_imag, MC_img_cuadrado);
hadamardProduct(MC_real, N, N, MC_real, MC_modulo);
combinacionLinealMatrices(1.0, MC_img_cuadrado, N, N, 1.0, MC_modulo);
cudaFree(MC_img_cuadrado);
af::array MC_modulo_GPU(N*N, MC_modulo);
cudaFree(MC_modulo);
af::array MC_modulo_indicesOrde_GPU(N*N);
af::array MC_modulo_Orde_GPU(N*N);
af::sort(MC_modulo_Orde_GPU, MC_modulo_indicesOrde_GPU, MC_modulo_GPU, 0, false);
float total = af::sum<float>(MC_modulo_GPU);
MC_modulo_Orde_GPU = MC_modulo_Orde_GPU/total;
af::eval(MC_modulo_Orde_GPU);
af::eval(MC_modulo_indicesOrde_GPU);
af::sync();
float* auxiliar_MC_modulo_Orde_GPU = MC_modulo_Orde_GPU.device<float>();
float* auxiliar_MC_modulo_indicesOrde_GPU = MC_modulo_indicesOrde_GPU.device<float>();
float* coefsNormalizados = (float*) malloc(largo*sizeof(float));
cudaMemcpy(coefsNormalizados, auxiliar_MC_modulo_Orde_GPU, N*N*sizeof(float), cudaMemcpyDeviceToHost);
int* MC_modulo_indicesOrde_CPU = (int*) malloc(largo*sizeof(int));
cudaMemcpy(MC_modulo_indicesOrde_CPU, auxiliar_MC_modulo_indicesOrde_GPU, N*N*sizeof(int), cudaMemcpyDeviceToHost);
MC_modulo_Orde_GPU.unlock();
MC_modulo_GPU.unlock();
MC_modulo_indicesOrde_GPU.unlock();
long cantCoefsParaCota = 0;
float sumador = 0.0;
float* cantCoefsPorParametro = (float*) malloc(sizeof(float)*cantParamEvaInfo);
float* cantidadPorcentualDeCoefs = linspace(1.0, largo, largo);
combinacionLinealMatrices(0.0, cantidadPorcentualDeCoefs, largo, 1, 1.0/N, cantidadPorcentualDeCoefs);
for(long j=0; j<cantParamEvaInfo; j++)
{
sumador = 0.0;
cantCoefsParaCota = 0;
for(long i=0; i<largo; i++)
{
sumador += coefsNormalizados[i];
cantCoefsParaCota++;
if(cantidadPorcentualDeCoefs[i] >= paramEvaInfo[j])
{
printf("Del %f%% solicitado, se ha tomado el mas cercano correspondiente al %f%% de coefs, lo que corresponde a un total de %ld coeficientes los cuales poseen el %f%% de la energia.\n", paramEvaInfo[j], cantidadPorcentualDeCoefs[i], cantCoefsParaCota, sumador);
break;
}
}
float* indicesATomar_CPU = (float*) malloc(cantCoefsParaCota*sizeof(float));
for(int k=0; k<cantCoefsParaCota; k++)
{
indicesATomar_CPU[k] = MC_modulo_indicesOrde_CPU[k];
}
af::array indicesATomar_GPU(cantCoefsParaCota, indicesATomar_CPU);
free(indicesATomar_CPU);
af::array indRepComp = af::constant(0, largo);
indRepComp(indicesATomar_GPU) = 1;
indicesATomar_GPU.unlock();
af::array MC_imag_GPU(N*N, MC_imag);
af::array MC_real_GPU(N*N, MC_real);
MC_imag_GPU = MC_imag_GPU * indRepComp;
MC_real_GPU = MC_real_GPU * indRepComp;
af::eval(MC_imag_GPU);
af::eval(MC_real_GPU);
af::sync();
indRepComp.unlock();
float* auxiliar_MC_imag_GPU = MC_imag_GPU.device<float>();
float* auxiliar_MC_real_GPU = MC_real_GPU.device<float>();
cudaMemcpy(MC_comp_imag, auxiliar_MC_imag_GPU, N*N*sizeof(float), cudaMemcpyDeviceToHost);
MC_imag_GPU.unlock();
cudaMemcpy(MC_comp_real, auxiliar_MC_real_GPU, N*N*sizeof(float), cudaMemcpyDeviceToHost);
MC_real_GPU.unlock();
float* estimacionFourier_compre_ParteImag = estimacionDePlanoDeFourier(MV_AF, N, N, MC_comp_imag, N, N, MU_AF);
float* estimacionFourier_compre_ParteReal = estimacionDePlanoDeFourier(MV_AF, N, N, MC_comp_real, N, N, MU_AF);
int numero = j+1;
char* numComoString = numAString(&numero);
sprintf(numComoString, "%d", numero);
char* nombreArchivoReconsImgComp = (char*) malloc(sizeof(char)*strlen(nombreDirPrin)*strlen(numComoString)*strlen(nombreArReconsCompreImg)+sizeof(char)*7);
strcpy(nombreArchivoReconsImgComp, nombreDirPrin);
strcat(nombreArchivoReconsImgComp, "/");
strcat(nombreArchivoReconsImgComp, nombreArReconsCompreImg);
strcat(nombreArchivoReconsImgComp, "_");
strcat(nombreArchivoReconsImgComp, numComoString);
strcat(nombreArchivoReconsImgComp, ".fit");
printf("%s\n", nombreArchivoReconsImgComp);
escribirTransformadaInversaFourier2D(estimacionFourier_compre_ParteImag, estimacionFourier_compre_ParteReal, N, nombreArchivoReconsImgComp);
cudaFree(estimacionFourier_compre_ParteImag);
cudaFree(estimacionFourier_compre_ParteReal);
free(numComoString);
free(nombreArchivoReconsImgComp);
}
cudaFree(MU_AF);
cudaFree(MV_AF);
free(coefsNormalizados);
free(MC_modulo_indicesOrde_CPU);
}
void filtroGaussiano()
{
int largoVector = 100;
float* porcenReal = (float*) malloc(sizeof(float)*largoVector);
float* vector = (float*) malloc(sizeof(float)*largoVector);
long contador = 0;
FILE *fp;
size_t len = 0;
char *line = NULL;
ssize_t read;
fp = fopen("/home/rarmijo/psnr_hd142_rect.txt", "r");
if (fp == NULL)
{
printf("No se pudo abrir el archivo");
exit(0);
}
while ((read = getline(&line, &len, fp)) != -1)
{
porcenReal[largoVector-1-contador] = atof(strtok(line, " "));
strtok(NULL, " ");
vector[contador] = atof(strtok(NULL, " "));
contador++;
}
printf("El contador es %ld\n", contador);
free(line);
fclose(fp);
// for(int i=0; i<largoVector; i++)
// {
// printf("%f\n", porcenReal[i]);
// }
// exit(-1);
float* vectorFiltrado = (float*) calloc(largoVector, sizeof(float));
gsl_vector* copiaVectorEnGSL = gsl_vector_alloc(largoVector);
gsl_vector* vectorEnGSLFiltrado = gsl_vector_alloc(largoVector);
for(int i=0; i<largoVector; i++)
{
gsl_vector_set(copiaVectorEnGSL, i, vector[largoVector-1-i]);
}
gsl_filter_gaussian_workspace* gauss_p = gsl_filter_gaussian_alloc(largoVector);
gsl_filter_gaussian(GSL_FILTER_END_TRUNCATE, 1.0, 0, copiaVectorEnGSL, vectorEnGSLFiltrado, gauss_p);
for(int i=0; i<largoVector; i++)
{
vectorFiltrado[i] = gsl_vector_get(copiaVectorEnGSL, i);
}
gsl_vector_free(copiaVectorEnGSL);
gsl_vector_free(vectorEnGSLFiltrado);
gsl_filter_gaussian_free(gauss_p);
float* listaDeMetricas = (float*) malloc(sizeof(float)*largoVector);
float* primeraRecta_subListaDeX = (float*) calloc(largoVector, sizeof(float));
float* primeraRecta_subListaDeY = (float*) calloc(largoVector, sizeof(float));
float* segundaRecta_subListaDeX = (float*) calloc(largoVector, sizeof(float));
float* segundaRecta_subListaDeY = (float*) calloc(largoVector, sizeof(float));
memcpy(segundaRecta_subListaDeX, porcenReal, sizeof(float)*largoVector);
memcpy(segundaRecta_subListaDeY, vectorFiltrado, sizeof(float)*largoVector);
primeraRecta_subListaDeX[0] = porcenReal[0];
primeraRecta_subListaDeY[0] = vectorFiltrado[0];
for(int i=1; i<largoVector-1; i++)
{
primeraRecta_subListaDeX[i] = porcenReal[i];
primeraRecta_subListaDeY[i] = vectorFiltrado[i];
float pendienteDePrimeraRecta = calPendiente(primeraRecta_subListaDeX, i+1, primeraRecta_subListaDeY);
// printf("En la iteracion %d la pendienteDePrimeraRecta es %f\n", i, pendienteDePrimeraRecta);
segundaRecta_subListaDeX[i-1] = 0.0;
segundaRecta_subListaDeY[i-1] = 0.0;
float pendienteDeSegundaRecta = calPendiente(&(segundaRecta_subListaDeX[i]), largoVector-i, &(segundaRecta_subListaDeY[i]));
// printf("En la iteracion %d la pendienteDeSegundaRecta es %f\n", i, pendienteDeSegundaRecta);
listaDeMetricas[i] = -1 * pendienteDeSegundaRecta/pendienteDePrimeraRecta;
printf("%f\n", listaDeMetricas[i]);
}
free(primeraRecta_subListaDeX);
free(primeraRecta_subListaDeY);
free(segundaRecta_subListaDeX);
free(segundaRecta_subListaDeY);
}
int main()
{
// PARAMETROS GENERALES
long cantVisi = 15034;
long inicio = 0;
long fin = 15034;
// long cantVisi = 30000;
// long inicio = 0;
// long fin = 30000;
int N = 512;
// long N = 1600; //HLTau_B6cont.calavg.tav300s
int maxIter = 100;
float tolGrad = 1E-12;
float delta_x = 0.02;
// float delta_x = 0.005; //HLTau_B6cont.calavg.tav300s
// float delta_x = 0.03; //co65
float delta_x_rad = (delta_x * M_PI)/648000.0;
float delta_u = 1.0/(N*delta_x_rad);
float delta_v = 1.0/(N*delta_x_rad);
//PARAMETROS PARTICULARES DE BASE RECT
float estrechezDeBorde = 1000.0;
// float frecuencia;
// float *u, *v, *w, *visi_parteImaginaria, *visi_parteReal;
// cudaMallocManaged(&u, cantVisi*sizeof(float));
// cudaMallocManaged(&v, cantVisi*sizeof(float));
// cudaMallocManaged(&w, cantVisi*sizeof(float));
// cudaMallocManaged(&visi_parteImaginaria, cantVisi*sizeof(float));
// cudaMallocManaged(&visi_parteReal, cantVisi*sizeof(float));
// char nombreArchivo[] = "hd142_b9cont_self_tav.0.0.txt";
// lecturaDeTXT(nombreArchivo, &frecuencia, u, v, w, visi_parteImaginaria, visi_parteReal, cantVisi);
// // ########### NOTEBOOK ##############
// char nombreArchivo[] = "/home/yoyisaurio/Desktop/HLTau_B6cont.calavg.tav300s";
// char comandoCasaconScript[] = "/home/yoyisaurio/casa-pipeline-release-5.6.2-2.el7/bin/casa -c /home/yoyisaurio/Desktop/proyecto/deMSaTXT.py";
// // ########### PC-LAB ##############
// char nombreArchivo[] = "/home/rarmijo/Desktop/proyecto/HLTau_B6cont.calavg.tav300s";
// char comandoCasaconScript[] = "/home/rarmijo/casa-pipeline-release-5.6.2-2.el7/bin/casa -c ./deMSaTXT.py";
// // ########### PC-LAB ##############
// char nombreArchivo[] = "./co65.ms";
// char comandoCasaconScript[] = "/home/rarmijo/casa-pipeline-release-5.6.2-2.el7/bin/casa -c ./deMSaTXT.py";
// // ########### BEAM ##############
// char nombreArchivo[] = "./HLTau_B6cont.calavg.tav300s";
// char comandoCasaconScript[] = "casa -c ./deMSaTXT.py";
// // ########### BEAM ##############
// char nombreArchivo[] = "./FREQ78.ms";
// char comandoCasaconScript[] = "casa -c ./deMSaTXT.py";
// // // ########### BEAM ##############
// char nombreArchivo[] = "./co65.ms";
// char comandoCasaconScript[] = "casa -c ./deMSaTXT.py";
// ########### BEAM ##############
char nombreArchivo[] = "./hd142_b9cont_self_tav.ms";
char comandoCasaconScript[] = "casa -c ./deMSaTXT.py";
// // ########### BEAM ##############
// char nombreArchivo[] = "/home/rarmijo/HLTau_Band6_CalibratedData/HLTau_B6cont.calavg";
// char comandoCasaconScript[] = "casa -c ./deMSaTXT.py";
// char* comandoScriptMSaTXT = (char*) malloc(strlen(comandoCasaconScript)*strlen(nombreArchivo)*sizeof(char)+sizeof(char)*3);
// strcpy(comandoScriptMSaTXT, comandoCasaconScript);
// strcat(comandoScriptMSaTXT, " ");
// strcat(comandoScriptMSaTXT, nombreArchivo);
// system(comandoScriptMSaTXT);
// free(comandoScriptMSaTXT);
lectCantVisi(nombreArchivo, &cantVisi);
float *u, *v, *w, *visi_parteImaginaria, *visi_parteReal;
cudaMallocManaged(&u, cantVisi*sizeof(float));
cudaMallocManaged(&v, cantVisi*sizeof(float));
cudaMallocManaged(&w, cantVisi*sizeof(float));
cudaMallocManaged(&visi_parteImaginaria, cantVisi*sizeof(float));
cudaMallocManaged(&visi_parteReal, cantVisi*sizeof(float));
lectDeTXTcreadoDesdeMS(nombreArchivo, u, v, w, visi_parteImaginaria, visi_parteReal);
// lectDeTXTcreadoDesdeMSConLimite(nombreArchivo, u, v, w, visi_parteImaginaria, visi_parteReal, inicio, fin, cantVisi);
float* matrizDeUnos, *matrizDeUnosEstFourier;
cudaMallocManaged(&matrizDeUnos, cantVisi*N*sizeof(float));
for(long i=0; i<(cantVisi*N); i++)
{
matrizDeUnos[i] = 1.0;
}
cudaMallocManaged(&matrizDeUnosEstFourier, N*sizeof(float));
for(long i=0; i<N; i++)
{
matrizDeUnosEstFourier[i] = 1.0;
}
float cotaEnergia = 0.99;
// char nombreDirPrin[] = "float_calCompresion_baseNormal_cota";
char nombreDirPrin[] = "experi_hd142_Normal_visi800_parte2";
char nombreDirSec[] = "ite";
char nombreDirTer[] = "compresiones";
char nombreArchivoTiempo[] = "tiempo.txt";
int cantParamEvaInfo = 800;
// float inicioIntervalo = 0.8;
float inicioIntervalo = 1.0;
float finIntervalo = 3.0;
float tolGolden = 1E-12;
int iterActual = 0;
clock_t t;
t = clock();
calculoDeInfoCompre_BaseNormal(nombreArchivo, maxIter, tolGrad, tolGolden, u, v, w, visi_parteImaginaria, visi_parteReal, delta_u, delta_v, cantVisi, N, cotaEnergia, nombreDirPrin, nombreDirSec, nombreDirTer, cantParamEvaInfo, inicioIntervalo, finIntervalo, matrizDeUnosEstFourier, estrechezDeBorde);
// calculoDeInfoCompre_BaseRect(nombreArchivo, maxIter, tolGrad, tolGolden, u, v, w, visi_parteImaginaria, visi_parteReal, delta_u, delta_v, matrizDeUnos, cantVisi, N, cotaEnergia, nombreDirPrin, nombreDirSec, nombreDirTer, cantParamEvaInfo, inicioIntervalo, finIntervalo, matrizDeUnosEstFourier, estrechezDeBorde);
t = clock() - t;
float time_taken = ((float)t)/CLOCKS_PER_SEC;
char* nombreCompletoArchivoTiempo = (char*) malloc(sizeof(char)*strlen(nombreArchivoTiempo)*strlen(nombreDirPrin)+sizeof(char)*3);
strcpy(nombreCompletoArchivoTiempo, nombreDirPrin);
strcat(nombreCompletoArchivoTiempo, "/");
strcat(nombreCompletoArchivoTiempo, nombreArchivoTiempo);
FILE* archivoTiempo = fopen(nombreCompletoArchivoTiempo, "w");
float minutitos = time_taken/60;
float horas = minutitos/60;
printf("El tiempo de ejecucion fue %.12f segundos o %.12f minutos o %.12f horas.\n", time_taken, minutitos, horas);
fprintf(archivoTiempo, "El tiempo de ejecucion fue %.12f segundos o %.12f minutos o %.12f horas.\n", time_taken, minutitos, horas);
fclose(archivoTiempo);
// // char nombreDirPrin[] = "calCompresiones_Normal";
// // char nombreArchivoTiempo[] = "tiempo.txt";
// // int cantParamEvaInfo = 100;
// // float inicioIntervalo = 1.0;
// // float finIntervalo = 100.0;
// // float tolGolden = 1E-12;
// // float nuevoAncho = 1.0 * delta_u;
// // clock_t t;
// // t = clock();
// // calPSNRDeDistintasCompresiones_Normal(inicioIntervalo, finIntervalo, cantParamEvaInfo, nombreDirPrin, nuevoAncho, maxIter, tolGrad, u, v, w, visi_parteImaginaria, visi_parteReal, delta_u, delta_v, cantVisi, N, matrizDeUnosEstFourier, estrechezDeBorde);
// // // calPSNRDeDistintasCompresiones_Rect(inicioIntervalo, finIntervalo, cantParamEvaInfo, nombreDirPrin, nuevoAncho, maxIter, tolGrad, u, v, w, visi_parteImaginaria, visi_parteReal, delta_u, delta_v, matrizDeUnos, cantVisi, N, matrizDeUnosEstFourier, estrechezDeBorde);
// // // calImagenesADistintasCompresiones_Rect(inicioIntervalo, finIntervalo, cantParamEvaInfo, nombreDirPrin, nuevoAncho, maxIter, tolGrad, u, v, w, visi_parteImaginaria, visi_parteReal, delta_u, delta_v, matrizDeUnos, cantVisi, N, matrizDeUnosEstFourier, estrechezDeBorde);
// // t = clock() - t;
// // float time_taken = ((float)t)/CLOCKS_PER_SEC;
// // char* nombreCompletoArchivoTiempo = (char*) malloc(sizeof(char)*strlen(nombreArchivoTiempo)*strlen(nombreDirPrin)+sizeof(char)*3);
// // strcpy(nombreCompletoArchivoTiempo, nombreDirPrin);
// // strcat(nombreCompletoArchivoTiempo, "/");
// // strcat(nombreCompletoArchivoTiempo, nombreArchivoTiempo);
// // FILE* archivoTiempo = fopen(nombreCompletoArchivoTiempo, "w");
// // float minutitos = time_taken/60;
// // float horas = minutitos/60;
// // printf("El tiempo de ejecucion fue %.12f segundos o %.12f minutos o %.12f horas.\n", time_taken, minutitos, horas);
// // fprintf(archivoTiempo, "El tiempo de ejecucion fue %.12f segundos o %.12f minutos o %.12f horas.\n", time_taken, minutitos, horas);
// // fclose(archivoTiempo);
//
//
// // printf("...Comenzando calculo de MV...\n");
// // clock_t tiempoCalculoMV;
// // tiempoCalculoMV = clock();
// // float* MV = calcularMV_Rect(v, delta_v, cantVisi, N, estrechezDeBorde, delta_v, matrizDeUnos);
// // tiempoCalculoMV = clock() - tiempoCalculoMV;
// // float tiempoTotalCalculoMV = ((float)tiempoCalculoMV)/CLOCKS_PER_SEC;
// // printf("Calculo de MV completado.\n");
// //
// // printf("...Comenzando calculo de MU...\n");
// // clock_t tiempoCalculoMU;
// // tiempoCalculoMU = clock();
// // float* MU = calcularMV_Rect(u, delta_u, cantVisi, N, estrechezDeBorde, delta_u, matrizDeUnos);
// // tiempoCalculoMU = clock() - tiempoCalculoMU;
// // float tiempoTotalCalculoMU = ((float)tiempoCalculoMU)/CLOCKS_PER_SEC;
// // printf("Calculo de MU completado.\n");
// //
// // int blockSize; // The launch configurator returned block size
// // int minGridSize; // The minimum grid size needed to achieve the
// // // maximum occupancy for a full device launch
// // int gridSize; // The actual grid size needed, based on input size
// //
// // cudaOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, transponerMatriz_kernel, 0, 0);
// // // Round up according to array size
// // gridSize = (cantVisi*N + blockSize - 1) / blockSize;
// //
// // // long cantBloques = ceil((float) cantFilas*N/1024);
// // // hadamardProduct_kernel<<<gridSize,blockSize>>>(MU, MV, matrizDeUnos, cantVisi, N);
// // // combinacionLinealMatrices_kernel<<<gridSize,blockSize>>>(5.0, MU, cantVisi, N, 5.0, MV);
// // transponerMatriz_kernel<<<gridSize,blockSize>>>(MU, matrizDeUnos, cantVisi, N);
// // cudaDeviceSynchronize();
// //
// // // calculate theoretical occupancy
// // int maxActiveBlocks;
// // cudaOccupancyMaxActiveBlocksPerMultiprocessor(&maxActiveBlocks, transponerMatriz_kernel, blockSize, 0);
// //
// // int device;
// // cudaDeviceProp props;
// // cudaGetDevice(&device);
// // cudaGetDeviceProperties(&props, device);
// //
// // float occupancy = (maxActiveBlocks * blockSize / props.warpSize) /
// // (float)(props.maxThreadsPerMultiProcessor /
// // props.warpSize);
// //
// // printf("Launched blocks of size %d. Theoretical occupancy: %f\n",
// // blockSize, occupancy);
//
// cudaFree(u);
// cudaFree(v);
// cudaFree(w);
// cudaFree(visi_parteImaginaria);
// cudaFree(visi_parteReal);
// cudaFree(matrizDeUnos);
// cudaFree(matrizDeUnosEstFourier);
}
|
efe4e0c9faa5c8bcf5a332c6a5eaea4e0be2d21d.hip | // !!! This is a file automatically generated by hipify!!!
//
// DeformableGPUSurfaceMT.cpp
//
// Copyright (C) 2013 by University of Stuttgart (VISUS).
// All rights reserved.
//
// Created on : Sep 17, 2013
// Author : scharnkn
//
#include "vislib_gl/graphics/gl/IncludeAllGL.h"
#include "DeformableGPUSurfaceMT.h"
//#ifndef CUDA_NO_SM_11_ATOMIC_INTRINSICS
// printf("WARNING! Not using atomics!\n");
//#endif
#include "ogl_error_check.h"
#include "cuda_error_check.h"
#include "HostArr.h"
#include "DiffusionSolver.h"
#include "CUDAGrid.cuh"
#include <algorithm>
#include <hip/hip_runtime.h>
#define WGL_NV_gpu_affinity
#include <cuda_gl_interop.h>
#include <thrust/sort.h>
#include <thrust/reduce.h>
#include <thrust/device_ptr.h>
#include "vislib/Array.h"
#include "vislib/math/Vector.h"
//#define USE_TIMER
using namespace megamol;
using namespace megamol::protein_cuda;
/**
* Samples the field at a given position using linear interpolation.
*
* @param pos The position
* @return The sampled value of the field
*/
float4 SampleFieldAtPosTrilin(
float pos[3],
float4 *field,
float gridOrg[3],
float gridDelta[3],
int gridSize[3]) {
int cell[3];
float x[3];
// Get id of the cell containing the given position and interpolation
// coefficients
x[0] = (pos[0]-gridOrg[0])/gridDelta[0];
x[1] = (pos[1]-gridOrg[1])/gridDelta[1];
x[2] = (pos[2]-gridOrg[2])/gridDelta[2];
cell[0] = (int)(x[0]);
cell[1] = (int)(x[1]);
cell[2] = (int)(x[2]);
x[0] = x[0]-(float)cell[0]; // alpha
x[1] = x[1]-(float)cell[1]; // beta
x[2] = x[2]-(float)cell[2]; // gamma
float alpha = x[0];
float beta = x[1];
float gamma = x[2];
cell[0] = ::min(::max(cell[0], int(0)), gridSize[0]-2);
cell[1] = ::min(::max(cell[1], int(0)), gridSize[1]-2);
cell[2] = ::min(::max(cell[2], int(0)), gridSize[2]-2);
// Get values at corners of current cell
float4 n0, n1, n2, n3, n4, n5, n6, n7;
// printf("dim %i %i %i\n", gridSize[0], gridSize[1], gridSize[2]);
// printf("cell %i %i %i\n", cell[0], cell[1], cell[2]);
size_t fieldSize =gridSize[0]*gridSize[1]*gridSize[2];
if (gridSize[0]*(gridSize[1]*(cell[2]+0) + (cell[1]+0))+cell[0]+0 > fieldSize) {
printf("Overflow %i\n", gridSize[0]*(gridSize[1]*(cell[2]+0) + (cell[1]+0))+cell[0]+0);
}
n0 = field[gridSize[0]*(gridSize[1]*(cell[2]+0) + (cell[1]+0))+cell[0]+0];
n1 = field[gridSize[0]*(gridSize[1]*(cell[2]+0) + (cell[1]+0))+cell[0]+1];
n2 = field[gridSize[0]*(gridSize[1]*(cell[2]+0) + (cell[1]+1))+cell[0]+0];
n3 = field[gridSize[0]*(gridSize[1]*(cell[2]+0) + (cell[1]+1))+cell[0]+1];
n4 = field[gridSize[0]*(gridSize[1]*(cell[2]+1) + (cell[1]+0))+cell[0]+0];
n5 = field[gridSize[0]*(gridSize[1]*(cell[2]+1) + (cell[1]+0))+cell[0]+1];
n6 = field[gridSize[0]*(gridSize[1]*(cell[2]+1) + (cell[1]+1))+cell[0]+0];
n7 = field[gridSize[0]*(gridSize[1]*(cell[2]+1) + (cell[1]+1))+cell[0]+1];
float4 a, b, c, d, e, f, g, h;
a = n0;
b = n1 - n0;
c = n2 - n0;
d = n3 - n1 - n2 + n0;
e = n4 - n0;
f = n5 - n1 - n4 + n0;
g = n6 - n2 - n4 + n0;
h = n7 - n3 - n5 - n6 + n1 + n2 + n4 - n0;
return a + b*alpha + c*beta + d*alpha*beta + e*gamma + f*alpha*gamma
+ g*beta*gamma + h*alpha*beta*gamma;
}
float SampleFieldAtPosTrilin(
float pos[3],
float *field,
float gridOrg[3],
float gridDelta[3],
int gridSize[3]) {
int cell[3];
float x[3];
// Get id of the cell containing the given position and interpolation
// coefficients
x[0] = (pos[0]-gridOrg[0])/gridDelta[0];
x[1] = (pos[1]-gridOrg[1])/gridDelta[1];
x[2] = (pos[2]-gridOrg[2])/gridDelta[2];
cell[0] = (int)(x[0]);
cell[1] = (int)(x[1]);
cell[2] = (int)(x[2]);
x[0] = x[0]-(float)cell[0]; // alpha
x[1] = x[1]-(float)cell[1]; // beta
x[2] = x[2]-(float)cell[2]; // gamma
float alpha = x[0];
float beta = x[1];
float gamma = x[2];
cell[0] = ::min(::max(cell[0], int(0)), gridSize[0]-2);
cell[1] = ::min(::max(cell[1], int(0)), gridSize[1]-2);
cell[2] = ::min(::max(cell[2], int(0)), gridSize[2]-2);
// Get values at corners of current cell
float n0, n1, n2, n3, n4, n5, n6, n7;
n0 = field[gridSize[0]*(gridSize[1]*(cell[2]+0) + (cell[1]+0))+cell[0]+0];
n1 = field[gridSize[0]*(gridSize[1]*(cell[2]+0) + (cell[1]+0))+cell[0]+1];
n2 = field[gridSize[0]*(gridSize[1]*(cell[2]+0) + (cell[1]+1))+cell[0]+0];
n3 = field[gridSize[0]*(gridSize[1]*(cell[2]+0) + (cell[1]+1))+cell[0]+1];
n4 = field[gridSize[0]*(gridSize[1]*(cell[2]+1) + (cell[1]+0))+cell[0]+0];
n5 = field[gridSize[0]*(gridSize[1]*(cell[2]+1) + (cell[1]+0))+cell[0]+1];
n6 = field[gridSize[0]*(gridSize[1]*(cell[2]+1) + (cell[1]+1))+cell[0]+0];
n7 = field[gridSize[0]*(gridSize[1]*(cell[2]+1) + (cell[1]+1))+cell[0]+1];
float a, b, c, d, e, f, g, h;
a = n0;
b = n1 - n0;
c = n2 - n0;
d = n3 - n1 - n2 + n0;
e = n4 - n0;
f = n5 - n1 - n4 + n0;
g = n6 - n2 - n4 + n0;
h = n7 - n3 - n5 - n6 + n1 + n2 + n4 - n0;
return a + b*alpha + c*beta + d*alpha*beta + e*gamma + f*alpha*gamma
+ g*beta*gamma + h*alpha*beta*gamma;
}
/**
* 'Safe' inverse sqrt, that prevents dividing by zero
*
* @param x The input value
* @return The inverse sqrt if x>0, 0.0 otherwise
*/
inline __host__ __device__ float safeRsqrtf(float x) {
if (x > 0.0) {
return 1.0f/sqrtf(x);
} else {
return 0.0f;
}
}
/**
* 'Safe' normalize function for float3 that uses safe rsqrt
*
* @param v The input vector to be normalized
* @return The normalized vector v
*/
inline __device__ float safeInvLength(float3 v) {
return safeRsqrtf(dot(v, v));
}
/**
* 'Safe' normalize function for float2 that uses safe rsqrt
*
* @param v The input vector to be normalized
* @return The normalized vector v
*/
inline __device__ float2 safeNormalize(float2 v) {
float invLen = safeRsqrtf(dot(v, v));
return v * invLen;
}
/**
* 'Safe' normalize function for float3 that uses safe rsqrt
*
* @param v The input vector to be normalized
* @return The normalized vector v
*/
inline __host__ __device__ float3 safeNormalize(float3 v) {
float invLen = safeRsqrtf(dot(v, v));
return v * invLen;
}
////////////////////////////////////////////////////////////////////////////////
// Inline device functions //
////////////////////////////////////////////////////////////////////////////////
/**
* @return Returns the thread index based on the current CUDA grid dimensions
*/
inline __device__ uint getThreadIdx() {
return __umul24(__umul24(blockIdx.y, gridDim.x) + blockIdx.x, blockDim.x) +
threadIdx.x;
}
////////////////////////////////////////////////////////////////////////////////
// Global device functions //
////////////////////////////////////////////////////////////////////////////////
/**
* Computes the gradient of a given scalar field using central differences.
* Border areas are omitted.
*
* @param[out] grad_D The gradient field
* @param[in] field_D The scalar field
*/
__global__ void DeformableGPUSurfaceMT_CalcVolGradient_D(float4 *grad_D, float *field_D) {
const uint idx = ::getThreadIdx();
// Get grid coordinates
uint3 gridCoord = make_uint3(
idx % gridSize_D.x,
(idx / gridSize_D.x) % gridSize_D.y,
(idx / gridSize_D.x) / gridSize_D.y);
// Omit border cells (gradient remains zero)
if (gridCoord.x == 0) return;
if (gridCoord.y == 0) return;
if (gridCoord.z == 0) return;
if (gridCoord.x >= gridSize_D.x - 1) return;
if (gridCoord.y >= gridSize_D.y - 1) return;
if (gridCoord.z >= gridSize_D.z - 1) return;
float3 grad;
grad.x =
field_D[GetPosIdxByGridCoords(make_uint3(gridCoord.x+1, gridCoord.y+0, gridCoord.z+0))]-
field_D[GetPosIdxByGridCoords(make_uint3(gridCoord.x-1, gridCoord.y+0, gridCoord.z+0))];
grad.y =
field_D[GetPosIdxByGridCoords(make_uint3(gridCoord.x, gridCoord.y+1, gridCoord.z+0))]-
field_D[GetPosIdxByGridCoords(make_uint3(gridCoord.x, gridCoord.y-1, gridCoord.z+0))];
grad.z =
field_D[GetPosIdxByGridCoords(make_uint3(gridCoord.x, gridCoord.y+0, gridCoord.z+1))]-
field_D[GetPosIdxByGridCoords(make_uint3(gridCoord.x, gridCoord.y+0, gridCoord.z-1))];
grad = safeNormalize(grad);
grad_D[idx].x = grad.x;
grad_D[idx].y = grad.y;
grad_D[idx].z = grad.z;
}
/**
* Computes the gradient of a given scalar field using central differences.
* Border areas are omitted.
*
* @param[out] grad_D The gradient field
* @param[in] field_D The scalar field
* @param[in] field_D The distance field
*/
__global__ void DeformableGPUSurfaceMT_CalcVolGradientWithDistField_D(float4 *grad_D, float *field_D,
float *distField_D, float minDist, float isovalue) {
const uint idx = ::getThreadIdx();
// Get grid coordinates
uint3 gridCoord = ::GetGridCoordsByPosIdx(idx);
// Omit border cells (gradient remains zero)
if (gridCoord.x == 0) return;
if (gridCoord.y == 0) return;
if (gridCoord.z == 0) return;
if (gridCoord.x >= gridSize_D.x - 1) return;
if (gridCoord.y >= gridSize_D.y - 1) return;
if (gridCoord.z >= gridSize_D.z - 1) return;
float distSample = ::SampleFieldAt_D<float, false>(gridCoord, distField_D);
float volSample = ::SampleFieldAt_D<float, false>(gridCoord, field_D);
float3 grad = make_float3(0.0, 0.0, 0.0);
if (distSample > minDist) {
grad.x =
distField_D[GetPosIdxByGridCoords(make_uint3(gridCoord.x+1, gridCoord.y+0, gridCoord.z+0))]-
distField_D[GetPosIdxByGridCoords(make_uint3(gridCoord.x+0, gridCoord.y+0, gridCoord.z+0))];
grad.y =
distField_D[GetPosIdxByGridCoords(make_uint3(gridCoord.x, gridCoord.y+1, gridCoord.z+0))]-
distField_D[GetPosIdxByGridCoords(make_uint3(gridCoord.x, gridCoord.y+0, gridCoord.z+0))];
grad.z =
distField_D[GetPosIdxByGridCoords(make_uint3(gridCoord.x, gridCoord.y+0, gridCoord.z+1))]-
distField_D[GetPosIdxByGridCoords(make_uint3(gridCoord.x, gridCoord.y+0, gridCoord.z+0))];
if (volSample < isovalue) {
grad.x *= -1.0;
grad.y *= -1.0;
grad.z *= -1.0;
}
} else {
grad.x =
field_D[GetPosIdxByGridCoords(make_uint3(gridCoord.x+1, gridCoord.y+0, gridCoord.z+0))]-
field_D[GetPosIdxByGridCoords(make_uint3(gridCoord.x+0, gridCoord.y+0, gridCoord.z+0))];
grad.y =
field_D[GetPosIdxByGridCoords(make_uint3(gridCoord.x, gridCoord.y+1, gridCoord.z+0))]-
field_D[GetPosIdxByGridCoords(make_uint3(gridCoord.x, gridCoord.y+0, gridCoord.z+0))];
grad.z =
field_D[GetPosIdxByGridCoords(make_uint3(gridCoord.x, gridCoord.y+0, gridCoord.z+1))]-
field_D[GetPosIdxByGridCoords(make_uint3(gridCoord.x, gridCoord.y+0, gridCoord.z+0))];
}
grad = safeNormalize(grad);
grad_D[idx].x = grad.x;
grad_D[idx].y = grad.y;
grad_D[idx].z = grad.z;
}
/**
* Computes a distance field based on the vertex positions.
*
* @param[in] vertexPos_D The vertex data buffer (device memory)
* @param[out] distField_D The distance field buffer (device memory)
* @param[in] vertexCnt The number of vertices
* @param[in] dataArrOffs The vertex position offset for the vertex data buffer
* @param[in] dataArrSize The stride of the vertex data buffer
*/
__global__ void DeformableGPUSurfaceMT_ComputeDistField_D(
float *vertexPos_D,
float *distField_D,
uint vertexCnt,
uint dataArrOffs,
uint dataArrSize) {
// TODO This is very slow since it basically bruteforces all vertex
// positions and stores the distance to the nearest one.
const uint idx = getThreadIdx();
if (idx >= gridSize_D.x*gridSize_D.y*gridSize_D.z) {
return;
}
// Get world space position of gridPoint
uint3 gridCoords = GetGridCoordsByPosIdx(idx);
float3 latticePos = TransformToWorldSpace(make_float3(
gridCoords.x,
gridCoords.y,
gridCoords.z));
// Loop through all vertices to find minimal distance
float3 pos = make_float3(vertexPos_D[0], vertexPos_D[1], vertexPos_D[2]);
float len;
len = (latticePos.x-pos.x)*(latticePos.x-pos.x)+
(latticePos.y-pos.y)*(latticePos.y-pos.y)+
(latticePos.z-pos.z)*(latticePos.z-pos.z);
float dist2 = len;
for (uint i = 0; i < vertexCnt; ++i) {
pos = make_float3(
vertexPos_D[dataArrSize*i+dataArrOffs+0],
vertexPos_D[dataArrSize*i+dataArrOffs+1],
vertexPos_D[dataArrSize*i+dataArrOffs+2]);
len = (latticePos.x-pos.x)*(latticePos.x-pos.x)+
(latticePos.y-pos.y)*(latticePos.y-pos.y)+
(latticePos.z-pos.z)*(latticePos.z-pos.z);
dist2 = min(dist2, len);
}
distField_D[idx] = sqrt(dist2);
}
/**
* Writes a flag for every vertex that is adjacent to a corrupt triangles.
*
* @param[in,out] vertexData_D The buffer with the vertex data
* @param[in] vertexDataStride The stride for the vertex data
* buffer
* @param[in] vertexDataOffsPos The position offset in the vertex
* data buffer
* @param[in] vertexDataOffsCorruptFlag The corruption flag offset in the
* vertex data buffer
* @param[in] triangleVtxIdx_D Array with triangle vertex indices
* @param[in] volume_D The target volume defining the
* iso-surface
* @param[in] externalForcesScl_D Array with the scale factor for the
* external force
* @param[in] triangleCnt The number of triangles
* @param[in] minDispl Minimum force scale to keep going
* @param[in] isoval The iso-value defining the iso-surface
*
* TODO
*/
__global__ void DeformableGPUSurfaceMT_FlagCorruptTriangles_D(
float *vertexFlag_D,
float *corruptTriangles_D,
float *vertexData_D,
uint vertexDataStride,
uint vertexDataOffsPos,
uint vertexDataOffsNormal,
uint *triangleVtxIdx_D,
float *targetVol_D,
const unsigned int *targetActiveCells_D,
float4 *externalForces_D,
uint triangleCnt,
float isoval) {
const uint idx = ::getThreadIdx();
if (idx >= triangleCnt) {
return;
}
/* Alternative 1: Sample volume at triangle midpoint */
// const uint baseIdx0 = vertexDataStride*triangleVtxIdx_D[3*idx+0];
// const uint baseIdx1 = vertexDataStride*triangleVtxIdx_D[3*idx+1];
// const uint baseIdx2 = vertexDataStride*triangleVtxIdx_D[3*idx+2];
// const float3 p0 = make_float3(vertexData_D[baseIdx0+vertexDataOffsPos+0],
// vertexData_D[baseIdx0+vertexDataOffsPos+1],
// vertexData_D[baseIdx0+vertexDataOffsPos+2]);
// const float3 p1 = make_float3(vertexData_D[baseIdx1+vertexDataOffsPos+0],
// vertexData_D[baseIdx1+vertexDataOffsPos+1],
// vertexData_D[baseIdx1+vertexDataOffsPos+2]);
// const float3 p2 = make_float3(vertexData_D[baseIdx2+vertexDataOffsPos+0],
// vertexData_D[baseIdx2+vertexDataOffsPos+1],
// vertexData_D[baseIdx2+vertexDataOffsPos+2]);
// // Sample volume at midpoint
// const float3 midPoint = (p0+p1+p2)/3.0;
// const float volSampleMidPoint = ::SampleFieldAtPosTricub_D<float>(midPoint, targetVol_D);
// float flag = float(::fabs(volSampleMidPoint-isoval) > 0.3);
// vertexFlag_D[triangleVtxIdx_D[3*idx+0]] = flag;
// vertexFlag_D[triangleVtxIdx_D[3*idx+1]] = flag;
// vertexFlag_D[triangleVtxIdx_D[3*idx+2]] = flag;
/* Alternative 2: use area and angles */
// const uint baseIdx0 = vertexDataStride*triangleVtxIdx_D[3*idx+0];
// const uint baseIdx1 = vertexDataStride*triangleVtxIdx_D[3*idx+1];
// const uint baseIdx2 = vertexDataStride*triangleVtxIdx_D[3*idx+2];
// const float3 p0 = make_float3(
// vertexData_D[baseIdx0+vertexDataOffsPos+0],
// vertexData_D[baseIdx0+vertexDataOffsPos+1],
// vertexData_D[baseIdx0+vertexDataOffsPos+2]);
// const float3 p1 = make_float3(
// vertexData_D[baseIdx1+vertexDataOffsPos+0],
// vertexData_D[baseIdx1+vertexDataOffsPos+1],
// vertexData_D[baseIdx1+vertexDataOffsPos+2]);
// const float3 p2 = make_float3(
// vertexData_D[baseIdx2+vertexDataOffsPos+0],
// vertexData_D[baseIdx2+vertexDataOffsPos+1],
// vertexData_D[baseIdx2+vertexDataOffsPos+2]);
//
// float3 v01 = (p0-p1);
// float3 v02 = (p0-p2);
// float3 v10 = (p1-p0);
// float3 v12 = (p1-p2);
// float3 v21 = (p2-p1);
// float3 v20 = (p2-p0);
//
// // Compute minimum angle
// float dot0 = acos(dot(normalize(v01), normalize(v02)));
// float dot1 = acos(dot(normalize(v10), normalize(v12)));
// float dot2 = acos(dot(normalize(v21), normalize(v20)));
// float minDot = min(dot0, min(dot1, dot2));
//
// // Compute area of the triangle
// float3 midPnt = (p0+p1)*0.5;
// float3 hVec = p2 - midPnt;
// float area = length(p0-p1)*length(hVec)*0.5;
// area = gridDelta_D.x*gridDelta_D.y-1;
//
// float maxCellFaceArea = gridDelta_D.x*gridDelta_D.y; // Find max grid delta
//
// //float flag = float((minDot < 0.1)||(area > maxCellFaceArea));
// float flag = float(minDot < 0.2);
//
// // TODO Is there no atomic write?
//// vertexFlag_D[triangleVtxIdx_D[3*idx+0]] = float(bool(currFlag0) || bool(flag));
//// vertexFlag_D[triangleVtxIdx_D[3*idx+1]] = float(bool(currFlag1) || bool(flag));
//// vertexFlag_D[triangleVtxIdx_D[3*idx+2]] = float(bool(currFlag2) || bool(flag));
//
// // DEBUG
// if (flag == 1.0) {
// vertexFlag_D[triangleVtxIdx_D[3*idx+0]] = 1.0;
// vertexFlag_D[triangleVtxIdx_D[3*idx+1]] = 1.0;
// vertexFlag_D[triangleVtxIdx_D[3*idx+2]] = 1.0;
// }
// // END DEBUG
// corruptTriangles_D[idx] = flag;
// /* Alternative 3 Check whether the vertex lies in an active cell of the
// target volume */
//
// const uint baseIdx0 = vertexDataStride*triangleVtxIdx_D[3*idx+0];
// const uint baseIdx1 = vertexDataStride*triangleVtxIdx_D[3*idx+1];
// const uint baseIdx2 = vertexDataStride*triangleVtxIdx_D[3*idx+2];
// const float3 p0 = make_float3(
// vertexData_D[baseIdx0+vertexDataOffsPos+0],
// vertexData_D[baseIdx0+vertexDataOffsPos+1],
// vertexData_D[baseIdx0+vertexDataOffsPos+2]);
// const float3 p1 = make_float3(
// vertexData_D[baseIdx1+vertexDataOffsPos+0],
// vertexData_D[baseIdx1+vertexDataOffsPos+1],
// vertexData_D[baseIdx1+vertexDataOffsPos+2]);
// const float3 p2 = make_float3(
// vertexData_D[baseIdx2+vertexDataOffsPos+0],
// vertexData_D[baseIdx2+vertexDataOffsPos+1],
// vertexData_D[baseIdx2+vertexDataOffsPos+2]);
//
// // Sample volume at midpoint
// const float3 midpoint = (p0+p1+p2)/3.0;
//
// // Get integer cell index
// int3 coords;
// coords.x = int((midpoint.x-gridOrg_D.x)/gridDelta_D.x);
// coords.y = int((midpoint.y-gridOrg_D.y)/gridDelta_D.y);
// coords.z = int((midpoint.z-gridOrg_D.z)/gridDelta_D.z);
//
// int cellIDx = ::GetCellIdxByGridCoords(coords);
// uint cellState = targetActiveCells_D[cellIDx];
//
// float currFlag0 = vertexFlag_D[triangleVtxIdx_D[3*idx+0]];
// float currFlag1 = vertexFlag_D[triangleVtxIdx_D[3*idx+1]];
// float currFlag2 = vertexFlag_D[triangleVtxIdx_D[3*idx+2]];
//// __syncthreads();
//// vertexFlag_D[triangleVtxIdx_D[3*idx+0]] = float(bool(currFlag0) || bool(1-cellState));
//// vertexFlag_D[triangleVtxIdx_D[3*idx+1]] = float(bool(currFlag1) || bool(1-cellState));
//// vertexFlag_D[triangleVtxIdx_D[3*idx+2]] = float(bool(currFlag2) || bool(1-cellState));
//// vertexFlag_D[triangleVtxIdx_D[3*idx+0]] = 1.0;
//// vertexFlag_D[triangleVtxIdx_D[3*idx+1]] = 1.0;
//// vertexFlag_D[triangleVtxIdx_D[3*idx+2]] = 1.0;
//
//
// corruptTriangles_D[idx] = float(1-cellState);
/* Alternative 4 Check whether all the vertices lies in an active cell of the
target volume */
const uint baseIdx0 = vertexDataStride*triangleVtxIdx_D[3*idx+0];
const uint baseIdx1 = vertexDataStride*triangleVtxIdx_D[3*idx+1];
const uint baseIdx2 = vertexDataStride*triangleVtxIdx_D[3*idx+2];
const float3 p0 = make_float3(
vertexData_D[baseIdx0+vertexDataOffsPos+0],
vertexData_D[baseIdx0+vertexDataOffsPos+1],
vertexData_D[baseIdx0+vertexDataOffsPos+2]);
const float3 p1 = make_float3(
vertexData_D[baseIdx1+vertexDataOffsPos+0],
vertexData_D[baseIdx1+vertexDataOffsPos+1],
vertexData_D[baseIdx1+vertexDataOffsPos+2]);
const float3 p2 = make_float3(
vertexData_D[baseIdx2+vertexDataOffsPos+0],
vertexData_D[baseIdx2+vertexDataOffsPos+1],
vertexData_D[baseIdx2+vertexDataOffsPos+2]);
float3 vec0 = (p1 - p0);
float3 vec1 = (p2 - p0);
float3 norm = normalize(cross(vec0, vec1));
// Sample volume at midpoint
const float3 midpoint = (p0+p1+p2)/3.0;
// Sample gradient from external forces
float4 externalForces = SampleFieldAtPosTrilin_D<float4, false>(midpoint, externalForces_D);
float3 normField = make_float3(externalForces.x, externalForces.y, externalForces.z);
float dotNormsAbs = dot(norm, normField);
// Get integer cell index
int3 coords;
coords.x = int((midpoint.x-gridOrg_D.x)/gridDelta_D.x);
coords.y = int((midpoint.y-gridOrg_D.y)/gridDelta_D.y);
coords.z = int((midpoint.z-gridOrg_D.z)/gridDelta_D.z);
int3 coords0;
coords0.x = int((p0.x-gridOrg_D.x)/gridDelta_D.x);
coords0.y = int((p0.y-gridOrg_D.y)/gridDelta_D.y);
coords0.z = int((p0.z-gridOrg_D.z)/gridDelta_D.z);
int3 coords1;
coords1.x = int((p1.x-gridOrg_D.x)/gridDelta_D.x);
coords1.y = int((p1.y-gridOrg_D.y)/gridDelta_D.y);
coords1.z = int((p1.z-gridOrg_D.z)/gridDelta_D.z);
int3 coords2;
coords2.x = int((p2.x-gridOrg_D.x)/gridDelta_D.x);
coords2.y = int((p2.y-gridOrg_D.y)/gridDelta_D.y);
coords2.z = int((p2.z-gridOrg_D.z)/gridDelta_D.z);
int cellIDx = ::GetCellIdxByGridCoords(coords);
int cellIDx0 = ::GetCellIdxByGridCoords(coords0);
int cellIDx1 = ::GetCellIdxByGridCoords(coords1);
int cellIDx2 = ::GetCellIdxByGridCoords(coords2);
uint cellState = targetActiveCells_D[cellIDx];
uint cellState0 = targetActiveCells_D[cellIDx0];
uint cellState1 = targetActiveCells_D[cellIDx1];
uint cellState2 = targetActiveCells_D[cellIDx2];
// float currFlag0 = vertexFlag_D[triangleVtxIdx_D[3*idx+0]];
// float currFlag1 = vertexFlag_D[triangleVtxIdx_D[3*idx+1]];
// float currFlag2 = vertexFlag_D[triangleVtxIdx_D[3*idx+2]];
// __syncthreads();
// vertexFlag_D[triangleVtxIdx_D[3*idx+0]] = float(bool(currFlag0) || bool(1-cellState));
// vertexFlag_D[triangleVtxIdx_D[3*idx+1]] = float(bool(currFlag1) || bool(1-cellState));
// vertexFlag_D[triangleVtxIdx_D[3*idx+2]] = float(bool(currFlag2) || bool(1-cellState));
// vertexFlag_D[triangleVtxIdx_D[3*idx+0]] = 1.0;
// vertexFlag_D[triangleVtxIdx_D[3*idx+1]] = 1.0;
// vertexFlag_D[triangleVtxIdx_D[3*idx+2]] = 1.0;
// Criteria for good triangles
bool flag = bool(cellState) &&
bool(cellState0) &&
bool(cellState1) &&
bool(cellState2);
// (dotNormsAbs >= 0);
//&& (dotNormsAbs <= 0.5);
corruptTriangles_D[idx] = float(!flag);
}
/**
* TODO
* @return Position and path length addition
*/
__device__ float4 UpdateVtxPosSingle_D (
float3 posStart, // Starting position
float4 *gradient_D, // External forces
float *targetVol_D, // The target volume
float minDisplScl, // Minimum displacement for convergence
float forcesScl, // General scaling factor for forces
float isovalue) { // Isovalue
float3 pos = posStart;
float sample = SampleFieldAtPosTrilin_D<float, false>(pos, targetVol_D);
bool outside = sample <= isovalue;
float extForcesScl;
if (outside) extForcesScl = 1.0;
else extForcesScl = -1.0;
float len = 0.0f;
bool converged = false;
int steps = 0;
const int maxSteps = 3;
do {
// Get volume sample
float sample = SampleFieldAtPosTrilin_D<float, false>(pos, targetVol_D);
// Switch sign and scale down if necessary
bool negative = extForcesScl < 0;
bool outside = sample <= isovalue;
int switchSign = int((negative && outside)||(!negative && !outside));
extForcesScl = extForcesScl*(1.0*(1-switchSign) - 1.0*switchSign);
extForcesScl *= (1.0*(1-switchSign) + 0.5*(switchSign));
// Get external forces sample and scale
float4 extForceTmp = SampleFieldAtPosTrilin_D<float4, false>(pos, gradient_D);
float3 extForce = make_float3(extForceTmp.x, extForceTmp.y, extForceTmp.z);
extForce = safeNormalize(extForce);
// Accumulate path
len += extForcesScl*forcesScl;
extForce *= extForcesScl*forcesScl;
// Propagate vertex and increase path length
pos += extForce;
if (length(extForce) <= minDisplScl) {
converged = true;
}
steps++;
} while (!converged || steps < maxSteps);
return make_float4(pos.x, pos.y, pos.z, len);
}
/**
* TODO
*/
__device__ float DeformableGPUSurfaceMT_IntUncertaintyOverCorruptAreaRec_D(
float3 pos1, float3 pos2, float3 pos3, // Vertex positions of the triangle
float len1, float len2, float len3, // Vertex path lengths of the triangle
float4 *gradient_D, // External forces
float *targetVol_D, // The target volume
unsigned int *targetActiveCells_D, // Active cells of the target volume
float minDisplScl, // Minimum displacement for convergence
float forcesScl, // General scaling factor for forces
float isovalue, // Isovalue
float &triArea,
uint depth
) {
const uint maxDepth = 2;
// 1. Propagate vertices until they converge to a fixed position
float4 newPosLen1, newPosLen2, newPosLen3;
newPosLen1 = UpdateVtxPosSingle_D (pos1, gradient_D, targetVol_D,
minDisplScl, forcesScl, isovalue);
newPosLen2 = UpdateVtxPosSingle_D (pos2, gradient_D, targetVol_D,
minDisplScl, forcesScl, isovalue);
newPosLen3 = UpdateVtxPosSingle_D (pos3, gradient_D, targetVol_D,
minDisplScl, forcesScl, isovalue);
float3 newPos1, newPos2, newPos3;
newPos1 = make_float3(newPosLen1.x, newPosLen1.y, newPosLen1.z);
newPos2 = make_float3(newPosLen2.x, newPosLen2.y, newPosLen2.z);
newPos3 = make_float3(newPosLen3.x, newPosLen3.y, newPosLen3.z);
// 2. Check whether the resulting triangle is valid
float3 midpoint = (newPos1+newPos2+newPos3)/3.0;
int3 coords;
coords.x = int((midpoint.x-gridOrg_D.x)/gridDelta_D.x);
coords.y = int((midpoint.y-gridOrg_D.y)/gridDelta_D.y);
coords.z = int((midpoint.z-gridOrg_D.z)/gridDelta_D.z);
int cellIDx = ::GetCellIdxByGridCoords(coords);
uint cellState = targetActiveCells_D[cellIDx];
if ((cellState == 1)||(depth >= maxDepth)) {
// printf("%.16f;%.16f;%.16f;%.16f;%.16f;%.16f;%.16f;%.16f;%.16f\n",
// newPos1.x, newPos1.y, newPos1.z,
// newPos2.x, newPos2.y, newPos2.z,
// newPos3.x, newPos3.y, newPos3.z);
// if (depth >= 2) printf("Thread %u, depth %u\n",::getThreadIdx(), depth);
// 3a. Cell is active, therefore triangle is valid
// --> Compute integrated uncertainty value
// Get triangle area
float a = length(newPos1 - newPos2);
float b = length(newPos1 - newPos3);
float c = length(newPos2 - newPos3);
// Compute area (Heron's formula)
float rad = (a + b - c)*(c + a - b)*(a + b + c)*(b + c - a);
// Make sure radicand is not negative
rad = rad > 0.0f ? rad : 0.0f;
float area = 0.25f*sqrt(rad);
triArea = area;
// Get average value
float avgValue = (len1+newPosLen1.w+len2+newPosLen2.w+len3+newPosLen3.w)/3.0f;
// Approximate integration
return triArea*avgValue;
} else {
float triArea1, triArea2, triArea3, triArea4;
// 3b. Cell is not active, therefore, triangle is not valid
// --> Subdivide and call recursively
float3 p12 = (newPos1+newPos2)/2.0;
float3 p13 = (newPos1+newPos3)/2.0;
float3 p32 = (newPos3+newPos2)/2.0;
float l12 = (len1+newPosLen1.w+len2+newPosLen2.w)/2.0;
float l13 = (len1+newPosLen1.w+len3+newPosLen3.w)/2.0;
float l32 = (len3+newPosLen3.w+len2+newPosLen2.w)/2.0;
float intUncertainty1 =
DeformableGPUSurfaceMT_IntUncertaintyOverCorruptAreaRec_D(
newPos1, p12, p13,
len1+newPosLen1.w, l12, l13,
gradient_D, targetVol_D, targetActiveCells_D,
minDisplScl, forcesScl, isovalue, triArea1,
depth+1);
float intUncertainty2 =
DeformableGPUSurfaceMT_IntUncertaintyOverCorruptAreaRec_D(
p13, p32, newPos3,
l13, l32, len3+newPosLen3.w,
gradient_D, targetVol_D, targetActiveCells_D,
minDisplScl, forcesScl, isovalue, triArea2,
depth+1);
float intUncertainty3 =
DeformableGPUSurfaceMT_IntUncertaintyOverCorruptAreaRec_D(
p12, p13, p32,
l12, l13, l32,
gradient_D, targetVol_D, targetActiveCells_D,
minDisplScl, forcesScl, isovalue, triArea3,
depth+1);
float intUncertainty4 =
DeformableGPUSurfaceMT_IntUncertaintyOverCorruptAreaRec_D(
p12, p32, newPos2,
l12, l32, len2+newPosLen2.w,
gradient_D, targetVol_D, targetActiveCells_D,
minDisplScl, forcesScl, isovalue, triArea4,
depth+1);
triArea = triArea1 + triArea2 + triArea3 + triArea4;
return intUncertainty1 + intUncertainty2 + intUncertainty3 + intUncertainty4;
}
}
/**
* TODO
*/
__global__ void DeformableGPUSurfaceMT_IntUncertaintyOverCorruptArea_D(
float *corruptTriangles_D,
float *vertexData_D,
float *vertexPathLen_D,
uint vertexDataStride,
uint vertexDataOffsPos,
uint vertexDataOffsNormal,
uint *triangleVtxIdx_D,
float *targetVol_D,
float4 *gradient_D,
unsigned int *targetActiveCells_D,
uint triangleCnt,
float isovalue,
float minDisplScl,
float forcesScl,
float *corruptTrianglesIntUncertainty_D,
float *trianglesArea_D) {
const uint idx = ::getThreadIdx();
if (idx >= triangleCnt) {
return;
}
// Triangle is not corrupt
if (corruptTriangles_D[idx] == 0) {
return;
}
// Get initial positions from main memory
uint baseIdx0 = vertexDataStride*triangleVtxIdx_D[3*idx+0];
uint baseIdx1 = vertexDataStride*triangleVtxIdx_D[3*idx+1];
uint baseIdx2 = vertexDataStride*triangleVtxIdx_D[3*idx+2];
float3 pos1 = make_float3(
vertexData_D[baseIdx0+vertexDataOffsPos+0],
vertexData_D[baseIdx0+vertexDataOffsPos+1],
vertexData_D[baseIdx0+vertexDataOffsPos+2]);
float3 pos2 = make_float3(
vertexData_D[baseIdx1+vertexDataOffsPos+0],
vertexData_D[baseIdx1+vertexDataOffsPos+1],
vertexData_D[baseIdx1+vertexDataOffsPos+2]);
float3 pos3 = make_float3(
vertexData_D[baseIdx2+vertexDataOffsPos+0],
vertexData_D[baseIdx2+vertexDataOffsPos+1],
vertexData_D[baseIdx2+vertexDataOffsPos+2]);
// Get initial path lengths from previous morphing
float len1 = vertexPathLen_D[triangleVtxIdx_D[3*idx+0]];
float len2 = vertexPathLen_D[triangleVtxIdx_D[3*idx+1]];
float len3 = vertexPathLen_D[triangleVtxIdx_D[3*idx+2]];
float triArea = 0.0;
// Integrate path lengths
float intUncertainty = DeformableGPUSurfaceMT_IntUncertaintyOverCorruptAreaRec_D(
pos1, pos2, pos3, // Vertex positions of the triangle
len1, len2, len3, // Vertex path lengths of the triangle
gradient_D, // External forces
targetVol_D, // The target volume
targetActiveCells_D, // Active cells of the target volume
minDisplScl, // Minimum displacement for convergence
forcesScl, // General scaling factor for forces
isovalue, // Isovalue
triArea, // Area associated with this triangle
0 // Initial recursion depth
);
corruptTrianglesIntUncertainty_D[idx] = intUncertainty;
trianglesArea_D[idx] = triArea;
}
/**
* Initializes the scale factor for the external forces with either -1.0 (if the
* starting position of the vector is inside the isosurface, or 1.0 (vice
* versa).
*
* @param[in] arr_D The external forces data buffer
* @param[in] volume_D The volume the isosurface is extracted from
* @param[in] vertexPos_D The vertex data buffer
* @param[in] nElements The number of vertices
* @param[in] isoval The isovalue that defines the isosurface
* @param[in] dataArrOffs The offset for vertex positions in the vertex
* data buffer
* @param[in] dataArrSize The stride of the vertex data buffer TODO
*/
__global__ void DeformableGPUSurfaceMT_InitExternalForceScl_D (
float *arr_D,
float *displLen_D,
float *volume_D,
float *vertexPos_D,
float minDispl,
uint nElements,
float isoval,
uint dataArrOffs,
uint dataArrSize) {
const uint idx = getThreadIdx();
if (idx >= nElements) {
return;
}
float3 pos = make_float3(
vertexPos_D[dataArrSize*idx+dataArrOffs+0],
vertexPos_D[dataArrSize*idx+dataArrOffs+1],
vertexPos_D[dataArrSize*idx+dataArrOffs+2]);
// If the sampled value is smaller than isoval, we are outside the
// isosurface TODO Make this smarter
if (SampleFieldAtPosTrilin_D<float, false>(pos, volume_D) <= isoval) {
arr_D[idx] = 1.0;
} else {
arr_D[idx] = -1.0;
}
// Init last displ scl with something bigger then minDispl;
displLen_D[idx] = minDispl + 0.1;
}
__global__ void DeformableGPUSurfaceMT_MeshLaplacian_D(
float *in_D,
uint inOffs,
uint inStride,
int *vertexNeighbours_D,
uint maxNeighbours,
uint vertexCnt,
float *out_D,
uint outOffs,
uint outStride) {
const uint idx = ::getThreadIdx();
if (idx >= vertexCnt) {
return;
}
// Get initial position from global device memory
float3 inOwn = make_float3(
in_D[inStride*idx+inOffs+0],
in_D[inStride*idx+inOffs+1],
in_D[inStride*idx+inOffs+2]);
uint activeNeighbourCnt = 0;
float3 out = make_float3(0.0, 0.0, 0.0);
for(int i = 0; i < maxNeighbours; ++i) {
int isIdxValid = int(vertexNeighbours_D[maxNeighbours*idx+i] >= 0); // Check if idx != -1
float3 in;
int tmpIdx = isIdxValid*vertexNeighbours_D[maxNeighbours*idx+i]; // Map negative indices to 0
in.x = in_D[inStride*tmpIdx+inOffs+0];
in.y = in_D[inStride*tmpIdx+inOffs+1];
in.z = in_D[inStride*tmpIdx+inOffs+2];
out += (in - inOwn)*isIdxValid;
activeNeighbourCnt += 1.0f*isIdxValid;
}
out /= activeNeighbourCnt;
out_D[outStride*idx+outOffs+0] = out.x;
out_D[outStride*idx+outOffs+1] = out.y;
out_D[outStride*idx+outOffs+2] = out.z;
}
/**
* Updates the positions of all vertices based on external and internal forces.
* The external force is computed on the fly based on a the given volume.
* Samples are aquired using tricubic interpolation.
*
* @param[in] targetVolume_D The volume the isosurface is extracted
* from
* @param[in,out] vertexPosMapped_D The vertex data buffer
* @param[in] vertexExternalForces_D The external force and scale factor
* (in 'w') for all vertices
* @param[in] vertexNeighbours_D The neighbour indices of all vertices
* @param[in] gradient_D Array with the gradient
* @param[in] vtxNormals_D The current normals of all vertices
* @param[in] vertexCount The number of vertices
* @param[in] externalWeight Weighting factor for the external
* forces. The factor for internal forces
* is implicitely defined by
* 1.0-'externalWeight'
* @param[in] forcesScl General scale factor for the final
* combined force
* @param[in] stiffness The stiffness of the springs defining
* the internal forces
* @param[in] isoval The isovalue defining the isosurface
* @param[in] minDispl The minimum displacement for the
* vertices to be updated
* @param[in] dataArrOffs The vertex position offset in the
* vertex data buffer
* @param[in] dataArrSize The stride of the vertex data buffer TODO
*/
__global__ void DeformableGPUSurfaceMT_UpdateVtxPos_D(
float *targetVolume_D,
float *vertexPosMapped_D,
float *vertexExternalForcesScl_D,
float *displLen_D,
float *vtxUncertainty_D,
float4 *gradient_D,
float3 *laplacian_D,
float3 *laplacian2_D,
uint vertexCnt,
float externalWeight,
float forcesScl,
float stiffness,
float isoval,
float minDispl,
bool useCubicInterpolation,
bool trackPath,
uint dataArrOffsPos,
uint dataArrOffsNormal,
uint dataArrSize) {
const uint idx = ::getThreadIdx();
if (idx >= vertexCnt) {
return;
}
// Check convergence criterion
float lastDisplLen = displLen_D[idx];
if (lastDisplLen <= minDispl) return; // Vertex is converged
const uint posBaseIdx = dataArrSize*idx+dataArrOffsPos;
/* Retrieve stuff from global device memory */
// Get initial position from global device memory
float3 posOld = make_float3(
vertexPosMapped_D[posBaseIdx+0],
vertexPosMapped_D[posBaseIdx+1],
vertexPosMapped_D[posBaseIdx+2]);
// Get initial scale factor for external forces
float externalForcesScl = vertexExternalForcesScl_D[idx];
// Get partial derivatives
float3 laplacian = laplacian_D[idx];
float3 laplacian2 = laplacian2_D[idx];
/* Update position */
// No warp divergence here, since useCubicInterpolation is the same for all
// threads
const float sampleDens = useCubicInterpolation
? SampleFieldAtPosTricub_D<float, false>(posOld, targetVolume_D)
: SampleFieldAtPosTrilin_D<float, false>(posOld, targetVolume_D);
// Switch sign and scale down if necessary
bool negative = externalForcesScl < 0;
bool outside = sampleDens <= isoval;
int switchSign = int((negative && outside)||(!negative && !outside));
externalForcesScl = externalForcesScl*(1.0*(1-switchSign) - 1.0*switchSign);
externalForcesScl *= (1.0*(1-switchSign) + 0.5*(switchSign));
//externalForcesScl *= (1.0*(1-switchSign) + (switchSign));
// Sample gradient by cubic interpolation
float4 externalForceTmp = useCubicInterpolation
? SampleFieldAtPosTricub_D<float4, false>(posOld, gradient_D)
: SampleFieldAtPosTrilin_D<float4, false>(posOld, gradient_D);
float3 externalForce;
externalForce.x = externalForceTmp.x;
externalForce.y = externalForceTmp.y;
externalForce.z = externalForceTmp.z;
// externalForce = safeNormalize(externalForce);
externalForce *= forcesScl*externalForcesScl*externalWeight;
float3 internalForce = (1.0-externalWeight)*forcesScl*((1.0 - stiffness)*laplacian - stiffness*laplacian2);
// Umbrella internal force
float3 force = externalForce + internalForce;
float3 posNew = posOld + force;
/* Write back to global device memory */
// New pos
vertexPosMapped_D[posBaseIdx+0] = posNew.x;
vertexPosMapped_D[posBaseIdx+1] = posNew.y;
vertexPosMapped_D[posBaseIdx+2] = posNew.z;
// Write external forces scale factor back to global device memory
vertexExternalForcesScl_D[idx] = externalForcesScl;
// No branching occurs here, since the parameter is set globally
float3 diff = posNew-posOld;
float diffLen = length(diff);
//float diffLenInternal = length(forcesScl*((1.0 - stiffness)*laplacian - stiffness*laplacian2));
if ((trackPath)&&(abs(externalForcesScl) == 1.0f)) {
//vtxUncertainty_D[idx] += length(externalForce);
vtxUncertainty_D[idx] += diffLen;
}
// Displ scl for convergence
displLen_D[idx] = diffLen;
}
/**
* Updates the positions of all vertices based on external and internal forces.
* The external force is computed on the fly based on a the given volume.
* Samples are aquired using tricubic interpolation.
*
* @param[in] targetVolume_D The volume the isosurface is extracted
* from
* @param[in,out] vertexPosMapped_D The vertex data buffer
* @param[in] vertexExternalForces_D The external force and scale factor
* (in 'w') for all vertices
* @param[in] vertexNeighbours_D The neighbour indices of all vertices
* @param[in] gradient_D Array with the gradient
* @param[in] vtxNormals_D The current normals of all vertices
* @param[in] vertexCount The number of vertices
* @param[in] externalWeight Weighting factor for the external
* forces. The factor for internal forces
* is implicitely defined by
* 1.0-'externalWeight'
* @param[in] forcesScl General scale factor for the final
* combined force
* @param[in] stiffness The stiffness of the springs defining
* the internal forces
* @param[in] isoval The isovalue defining the isosurface
* @param[in] minDispl The minimum displacement for the
* vertices to be updated
* @param[in] dataArrOffs The vertex position offset in the
* vertex data buffer
* @param[in] dataArrSize The stride of the vertex data buffer TODO
*/
__global__ void DeformableGPUSurfaceMT_UpdateVtxPosNoThinPlate_D(
float *targetVolume_D,
float *vertexPosMapped_D,
float *vertexExternalForcesScl_D,
float *displLen_D,
float *vtxUncertainty_D,
float4 *gradient_D,
float3 *laplacian_D,
uint vertexCnt,
float externalWeight,
float forcesScl,
float isoval,
float minDispl,
bool useCubicInterpolation,
bool trackPath,
uint dataArrOffsPos,
uint dataArrOffsNormal,
uint dataArrSize) {
const uint idx = ::getThreadIdx();
if (idx >= vertexCnt) {
return;
}
// Check convergence criterion
float lastDisplLen = displLen_D[idx];
if (lastDisplLen <= minDispl) {
displLen_D[idx] = 0.0;
return; // Vertex is converged
}
const uint posBaseIdx = dataArrSize*idx+dataArrOffsPos;
/* Retrieve stuff from global device memory */
// Get initial position from global device memory
float3 posOld = make_float3(
vertexPosMapped_D[posBaseIdx+0],
vertexPosMapped_D[posBaseIdx+1],
vertexPosMapped_D[posBaseIdx+2]);
// Get initial scale factor for external forces
float externalForcesScl = vertexExternalForcesScl_D[idx];
// Get partial derivatives
float3 laplacian = laplacian_D[idx];
/* Update position */
// No warp divergence here, since useCubicInterpolation is the same for all
// threads
const float sampleDens = useCubicInterpolation
? SampleFieldAtPosTricub_D<float, false>(posOld, targetVolume_D)
: SampleFieldAtPosTrilin_D<float, false>(posOld, targetVolume_D);
// Switch sign and scale down if necessary
bool negative = externalForcesScl < 0;
bool outside = sampleDens <= isoval;
int switchSign = int((negative && outside)||(!negative && !outside));
externalForcesScl = externalForcesScl*(1.0*(1-switchSign) - 1.0*switchSign);
externalForcesScl *= (1.0*(1-switchSign) + 0.5*(switchSign));
//externalForcesScl *= (1.0*(1-switchSign) + (switchSign));
// Sample gradient by cubic interpolation
float4 externalForceTmp = useCubicInterpolation
? SampleFieldAtPosTricub_D<float4, false>(posOld, gradient_D)
: SampleFieldAtPosTrilin_D<float4, false>(posOld, gradient_D);
float3 externalForce;
externalForce.x = externalForceTmp.x;
externalForce.y = externalForceTmp.y;
externalForce.z = externalForceTmp.z;
// externalForce = safeNormalize(externalForce);
externalForce *= forcesScl*externalForcesScl*externalWeight;
float3 internalForce = (1.0-externalWeight)*forcesScl*laplacian;
// Umbrella internal force
float3 force = externalForce + internalForce;
float3 posNew = posOld + force;
/* Write back to global device memory */
// New pos
vertexPosMapped_D[posBaseIdx+0] = posNew.x;
vertexPosMapped_D[posBaseIdx+1] = posNew.y;
vertexPosMapped_D[posBaseIdx+2] = posNew.z;
// Write external forces scale factor back to global device memory
vertexExternalForcesScl_D[idx] = externalForcesScl;
// No branching occurs here, since the parameter is set globally
float3 diff = posNew-posOld;
float diffLen = length(diff);
//float diffLenInternal = length(forcesScl*((1.0 - stiffness)*laplacian - stiffness*laplacian2));
if ((trackPath)&&(abs(externalForcesScl) == 1.0f)) {
//vtxUncertainty_D[idx] += length(externalForce);
vtxUncertainty_D[idx] += diffLen;
}
// Displ scl for convergence
displLen_D[idx] = diffLen;
}
/**
* Updates the positions of all vertices based on external and internal forces.
* The external force is computed on the fly based on a the given volume.
* Samples are aquired using tricubic interpolation.
*
* @param[in] targetVolume_D The volume the isosurface is extracted
* from
* @param[in,out] vertexPosMapped_D The vertex data buffer
* @param[in] vertexExternalForces_D The external force and scale factor
* (in 'w') for all vertices
* @param[in] vertexCount The number of vertices
* @param[in] forcesScl General scale factor for the final
* combined force
* @param[in] isoval The isovalue defining the isosurface
* @param[in] minDispl The minimum displacement for the
* vertices to be updated
* @param[in] dataArrOffs The vertex position offset in the
* vertex data buffer
* @param[in] dataArrSize The stride of the vertex data buffer TODO
*/
__global__ void DeformableGPUSurfaceMT_UpdateVtxPosExternalOnly_D(
float *targetVolume_D,
float *vertexPosMapped_D,
float *vertexExternalForcesScl_D,
float *displLen_D,
float *vtxUncertainty_D,
float4 *gradient_D,
int *accumPath_D,
uint vertexCnt,
float forcesScl,
float isoval,
float minDispl,
bool useCubicInterpolation,
bool trackPath,
uint dataArrOffsPos,
uint dataArrOffsNormal,
uint dataArrSize) {
const uint idx = ::getThreadIdx();
if (idx >= vertexCnt) {
return;
}
// Check convergence criterion
float lastDisplLen = displLen_D[idx];
if (lastDisplLen <= minDispl) {
displLen_D[idx] = 0.0;
return; // Vertex is converged
}
const uint posBaseIdx = dataArrSize*idx+dataArrOffsPos;
/* Retrieve stuff from global device memory */
// Get initial position from global device memory
float3 posOld = make_float3(
vertexPosMapped_D[posBaseIdx+0],
vertexPosMapped_D[posBaseIdx+1],
vertexPosMapped_D[posBaseIdx+2]);
// Get initial scale factor for external forces
float externalForcesScl = vertexExternalForcesScl_D[idx];
// Check whether the difflen is to be subtracted or added
int accumFactorOld = accumPath_D[idx];
/* Update position */
// No warp divergence here, since useCubicInterpolation is the same for all
// threads
const float sampleDens = useCubicInterpolation
? SampleFieldAtPosTricub_D<float, false>(posOld, targetVolume_D)
: SampleFieldAtPosTrilin_D<float, false>(posOld, targetVolume_D);
// Switch sign and scale down if necessary
bool negative = externalForcesScl < 0;
bool outside = sampleDens <= isoval;
int switchSign = int((negative && outside)||(!negative && !outside));
externalForcesScl = externalForcesScl*(1.0*(1-switchSign) - 1.0*switchSign);
externalForcesScl *= (1.0*(1-switchSign) + 0.5*(switchSign));
// if (bool(switchSign) && (accumPath_D[idx] != 0)) {
// accumPath_D[idx] = 0;
// } else if (bool(switchSign) && (accumPath_D[idx] == 0)) {
// accumPath_D[idx] = 1;
// }
// Change to zero if one and to one if zero
int accumFactorNew = (1-accumFactorOld);
int accumFactor = switchSign*accumFactorNew + (1-switchSign)*accumFactorOld;
// Sample gradient by cubic interpolation
float4 externalForceTmp = useCubicInterpolation
? SampleFieldAtPosTricub_D<float4, false>(posOld, gradient_D)
: SampleFieldAtPosTrilin_D<float4, false>(posOld, gradient_D);
float3 externalForce;
externalForce.x = externalForceTmp.x;
externalForce.y = externalForceTmp.y;
externalForce.z = externalForceTmp.z;
//externalForce = safeNormalize(externalForce);
externalForce = normalize(externalForce);
externalForce *= forcesScl*externalForcesScl;
// Umbrella internal force
float3 posNew = posOld + externalForce;
/* Write back to global device memory */
// New pos
vertexPosMapped_D[posBaseIdx+0] = posNew.x;
vertexPosMapped_D[posBaseIdx+1] = posNew.y;
vertexPosMapped_D[posBaseIdx+2] = posNew.z;
// Write external forces scale factor back to global device memory
vertexExternalForcesScl_D[idx] = externalForcesScl;
//float3 diff = posNew-posOld;
//float diffLen = length(diff);
float diffLen = abs(forcesScl*externalForcesScl);
accumPath_D[idx] = accumFactor;
// No branching since trackpath is equal for all threads
if (trackPath) {
// if (accumPath_D[idx] == 0) {
// vtxUncertainty_D[idx] += diffLen;
// } else if(accumPath_D[idx] != 0) {
// vtxUncertainty_D[idx] -= diffLen;
// }
vtxUncertainty_D[idx] += (1-accumFactor)*diffLen - accumFactor*diffLen;
}
// Displ scl for convergence
displLen_D[idx] = diffLen;
}
/**
* Updates the positions of all vertices based on external and internal forces.
* The external force is computed on the fly based on a the given volume.
* Samples are aquired using tricubic interpolation.
*
* @param[in] targetVolume_D The volume the isosurface is extracted
* from
* @param[in,out] vertexPosMapped_D The vertex data buffer
* @param[in] vertexExternalForces_D The external force and scale factor
* (in 'w') for all vertices
* @param[in] vertexCount The number of vertices
* @param[in] forcesScl General scale factor for the final
* combined force
* @param[in] isoval The isovalue defining the isosurface
* @param[in] minDispl The minimum displacement for the
* vertices to be updated
* @param[in] dataArrOffs The vertex position offset in the
* vertex data buffer
* @param[in] dataArrSize The stride of the vertex data buffer TODO
*/
__global__ void DeformableGPUSurfaceMT_UpdateVtxPosExternalOnlySubdiv_D(
float *targetVolume_D,
float *vertexPosMapped_D,
float *vertexExternalForcesScl_D,
float *displLen_D,
float *vtxUncertainty_D,
float4 *gradient_D,
int *accumPath_D,
float *vertexFlag_D,
uint vertexCnt,
float forcesScl,
float isoval,
float minDispl,
bool useCubicInterpolation,
bool trackPath,
uint dataArrOffsPos,
uint dataArrOffsNormal,
uint dataArrSize) {
const uint idx = ::getThreadIdx();
if (idx >= vertexCnt) {
return;
}
// Check convergence criterion
float lastDisplLen = displLen_D[idx];
// if ((lastDisplLen <= minDispl)||(vertexFlag_D[idx] == 0.0)) {
// displLen_D[idx] = 0.0;
// return; // Vertex is converged
// }
if (lastDisplLen <= minDispl) {
displLen_D[idx] = 0.0;
return; // Vertex is converged
}
const uint posBaseIdx = dataArrSize*idx+dataArrOffsPos;
/* Retrieve stuff from global device memory */
// Get initial position from global device memory
float3 posOld = make_float3(
vertexPosMapped_D[posBaseIdx+0],
vertexPosMapped_D[posBaseIdx+1],
vertexPosMapped_D[posBaseIdx+2]);
// Get initial scale factor for external forces
float externalForcesScl = vertexExternalForcesScl_D[idx];
// Check whether the difflen is to be subtracted or added
int accumFactorOld = accumPath_D[idx];
/* Update position */
// No warp divergence here, since useCubicInterpolation is the same for all
// threads
const float sampleDens = useCubicInterpolation
? SampleFieldAtPosTricub_D<float, false>(posOld, targetVolume_D)
: SampleFieldAtPosTrilin_D<float, false>(posOld, targetVolume_D);
// Switch sign and scale down if necessary
bool negative = externalForcesScl < 0;
bool outside = sampleDens <= isoval;
int switchSign = int((negative && outside)||(!negative && !outside));
externalForcesScl = externalForcesScl*(1.0*(1-switchSign) - 1.0*switchSign);
externalForcesScl *= (1.0*(1-switchSign) + 0.5*(switchSign));
//externalForcesScl *= (1.0*(1-switchSign) + (switchSign));
// if (bool(switchSign) && (accumPath_D[idx] != 0)) {
// accumPath_D[idx] = 0;
// } else if (bool(switchSign) && (accumPath_D[idx] == 0)) {
// accumPath_D[idx] = 1;
// }
// Change to zero if one and to one if zero
int accumFactorNew = (1-accumFactorOld);
int accumFactor = switchSign*accumFactorNew + (1-switchSign)*accumFactorOld;
// Sample gradient by cubic interpolation
float4 externalForceTmp = useCubicInterpolation
? SampleFieldAtPosTricub_D<float4, false>(posOld, gradient_D)
: SampleFieldAtPosTrilin_D<float4, false>(posOld, gradient_D);
float3 externalForce;
externalForce.x = externalForceTmp.x;
externalForce.y = externalForceTmp.y;
externalForce.z = externalForceTmp.z;
externalForce = safeNormalize(externalForce);
externalForce *= forcesScl*externalForcesScl;
// Umbrella internal force
float3 posNew = posOld + externalForce;
/* Write back to global device memory */
// New pos
vertexPosMapped_D[posBaseIdx+0] = posNew.x;
vertexPosMapped_D[posBaseIdx+1] = posNew.y;
vertexPosMapped_D[posBaseIdx+2] = posNew.z;
// Write external forces scale factor back to global device memory
vertexExternalForcesScl_D[idx] = externalForcesScl;
//float3 diff = posNew-posOld;
//float diffLen = length(diff);
float diffLen = abs(forcesScl*externalForcesScl);
accumPath_D[idx] = accumFactor;
// No branching since trackpath is equal for all threads
if (trackPath) {
// if (accumPath_D[idx] == 0) {
// vtxUncertainty_D[idx] += diffLen;
// } else if(accumPath_D[idx] != 0) {
// vtxUncertainty_D[idx] -= diffLen;
// }
vtxUncertainty_D[idx] += (1-accumFactor)*diffLen - accumFactor*diffLen;
}
// Displ scl for convergence
displLen_D[idx] = diffLen;
}
/*
* DeformableGPUSurfaceMT::DeformableGPUSurfaceMT
*/
DeformableGPUSurfaceMT::DeformableGPUSurfaceMT() : GPUSurfaceMT(),
vboCorruptTriangleVertexFlag(0), vboVtxPath(0), vboVtxAttr(0),
nFlaggedVertices(0) {
}
/*
* DeformableGPUSurfaceMT::DeformableGPUSurfaceMT
*/
DeformableGPUSurfaceMT::DeformableGPUSurfaceMT(const DeformableGPUSurfaceMT& other) :
GPUSurfaceMT(other) {
CudaSafeCall(this->vertexExternalForcesScl_D.Validate(other.vertexExternalForcesScl_D.GetCount()));
CudaSafeCall(hipMemcpy(
this->vertexExternalForcesScl_D.Peek(),
other.vertexExternalForcesScl_D.PeekConst(),
this->vertexExternalForcesScl_D.GetCount()*sizeof(float2),
hipMemcpyDeviceToDevice));
CudaSafeCall(this->externalForces_D.Validate(other.externalForces_D.GetCount()));
CudaSafeCall(hipMemcpy(
this->externalForces_D.Peek(),
other.externalForces_D.PeekConst(),
this->externalForces_D.GetCount()*sizeof(float),
hipMemcpyDeviceToDevice));
CudaSafeCall(this->laplacian_D.Validate(other.laplacian_D.GetCount()));
CudaSafeCall(hipMemcpy(
this->laplacian_D.Peek(),
other.laplacian_D.PeekConst(),
this->laplacian_D.GetCount()*sizeof(float3),
hipMemcpyDeviceToDevice));
CudaSafeCall(this->laplacian2_D.Validate(other.laplacian2_D.GetCount()));
CudaSafeCall(hipMemcpy(
this->laplacian2_D.Peek(),
other.laplacian2_D.PeekConst(),
this->laplacian2_D.GetCount()*sizeof(float3),
hipMemcpyDeviceToDevice));
CudaSafeCall(this->displLen_D.Validate(other.displLen_D.GetCount()));
CudaSafeCall(hipMemcpy(
this->displLen_D.Peek(),
other.displLen_D.PeekConst(),
this->displLen_D.GetCount()*sizeof(float),
hipMemcpyDeviceToDevice));
CudaSafeCall(this->accTriangleData_D.Validate(other.accTriangleData_D.GetCount()));
CudaSafeCall(hipMemcpy(
this->accTriangleData_D.Peek(),
other.accTriangleData_D.PeekConst(),
this->accTriangleData_D.GetCount()*sizeof(float),
hipMemcpyDeviceToDevice));
/* Make deep copy of corrupt triangle flag buffer */
if (other.vboCorruptTriangleVertexFlag) {
// Destroy if necessary
if (this->vboCorruptTriangleVertexFlag) {
glBindBufferARB(GL_ARRAY_BUFFER, this->vboCorruptTriangleVertexFlag);
glDeleteBuffersARB(1, &this->vboCorruptTriangleVertexFlag);
glBindBufferARB(GL_ARRAY_BUFFER, 0);
this->vboCorruptTriangleVertexFlag = 0;
}
// Create vertex buffer object for triangle indices
glGenBuffersARB(1, &this->vboCorruptTriangleVertexFlag);
CheckForGLError();
// Map as copy buffer
glBindBufferARB(GL_COPY_READ_BUFFER, other.vboCorruptTriangleVertexFlag);
glBindBufferARB(GL_COPY_WRITE_BUFFER, this->vboCorruptTriangleVertexFlag);
glBufferDataARB(GL_COPY_WRITE_BUFFER,
sizeof(float)*this->vertexCnt, 0, GL_DYNAMIC_DRAW);
// Copy data
glCopyBufferSubData(GL_COPY_READ_BUFFER, GL_COPY_WRITE_BUFFER, 0, 0,
sizeof(float)*this->vertexCnt);
glBindBufferARB(GL_COPY_WRITE_BUFFER, 0);
glBindBufferARB(GL_COPY_READ_BUFFER, 0);
CheckForGLError();
}
/* Make deep copy of uncertainty vbo */
if (other.vboVtxPath) {
// Destroy if necessary
if (this->vboVtxPath) {
glBindBufferARB(GL_ARRAY_BUFFER, this->vboVtxPath);
glDeleteBuffersARB(1, &this->vboVtxPath);
glBindBufferARB(GL_ARRAY_BUFFER, 0);
this->vboVtxPath = 0;
}
// Create vertex buffer object for triangle indices
glGenBuffersARB(1, &this->vboVtxPath);
CheckForGLError();
// Map as copy buffer
glBindBufferARB(GL_COPY_READ_BUFFER, other.vboVtxPath);
glBindBufferARB(GL_COPY_WRITE_BUFFER, this->vboVtxPath);
glBufferDataARB(GL_COPY_WRITE_BUFFER,
sizeof(float)*this->vertexCnt, 0, GL_DYNAMIC_DRAW);
// Copy data
glCopyBufferSubData(GL_COPY_READ_BUFFER, GL_COPY_WRITE_BUFFER, 0, 0,
sizeof(float)*this->vertexCnt);
glBindBufferARB(GL_COPY_WRITE_BUFFER, 0);
glBindBufferARB(GL_COPY_READ_BUFFER, 0);
CheckForGLError();
}
}
/*
* DeformableGPUSurfaceMT::~DeformableGPUSurfaceMT
*/
DeformableGPUSurfaceMT::~DeformableGPUSurfaceMT() {
}
/*
* ComputeTriangleArea_D
*/
__global__ void DeformableGPUSurfaceMT_ComputeTriangleAreas_D(
float *trianglesArea_D,
float *vertexData_D,
uint *triangleIdx_D,
uint triangleCnt) {
const int vertexDataOffsPos = 0;
//const int vertexDataOffsNormal = 3;
//const int vertexDataOffsTexCoord = 6;
const int vertexDataStride = 9;
const uint idx = ::getThreadIdx();
if (idx >= triangleCnt) {
return;
}
float3 pos0, pos1, pos2;
pos0.x = vertexData_D[vertexDataStride*triangleIdx_D[3*idx+0]+vertexDataOffsPos+0];
pos0.y = vertexData_D[vertexDataStride*triangleIdx_D[3*idx+0]+vertexDataOffsPos+1];
pos0.z = vertexData_D[vertexDataStride*triangleIdx_D[3*idx+0]+vertexDataOffsPos+2];
pos1.x = vertexData_D[vertexDataStride*triangleIdx_D[3*idx+1]+vertexDataOffsPos+0];
pos1.y = vertexData_D[vertexDataStride*triangleIdx_D[3*idx+1]+vertexDataOffsPos+1];
pos1.z = vertexData_D[vertexDataStride*triangleIdx_D[3*idx+1]+vertexDataOffsPos+2];
pos2.x = vertexData_D[vertexDataStride*triangleIdx_D[3*idx+2]+vertexDataOffsPos+0];
pos2.y = vertexData_D[vertexDataStride*triangleIdx_D[3*idx+2]+vertexDataOffsPos+1];
pos2.z = vertexData_D[vertexDataStride*triangleIdx_D[3*idx+2]+vertexDataOffsPos+2];
// compute edge lengths
float a = length(pos0 - pos1);
float b = length(pos0 - pos2);
float c = length(pos1 - pos2);
// Compute area (Heron's formula)
float rad = (a + b - c)*(c + a - b)*(a + b + c)*(b + c - a);
// Make sure radicand is not negative
rad = rad > 0.0f ? rad : 0.0f;
float area = 0.25f*sqrt(rad);
trianglesArea_D[idx] = area;
}
/*
* DeformableGPUSurfaceMT::GetTotalSurfArea
*/
float DeformableGPUSurfaceMT::GetTotalSurfArea() {
// Compute triangle areas of all (non-corrupt) triangles
if (!CudaSafeCall(this->accTriangleArea_D.Validate(this->triangleCnt))) {
return false;
}
if (!CudaSafeCall(this->accTriangleArea_D.Set(0x00))) {
return false;
}
cudaGraphicsResource* cudaTokens[2];
// Register memory with CUDA
if (!CudaSafeCall(hipGraphicsGLRegisterBuffer(
&cudaTokens[0],
this->vboVtxData,
hipGraphicsMapFlagsNone))) {
return false;
}
if (!CudaSafeCall(hipGraphicsGLRegisterBuffer(
&cudaTokens[1],
this->vboTriangleIdx,
hipGraphicsMapFlagsNone))) {
return false;
}
if (!CudaSafeCall(hipGraphicsMapResources(2, cudaTokens, 0))) {
return false;
}
// Get mapped pointers to the vertex data buffers
float *vboPt;
size_t vboSize;
if (!CudaSafeCall(hipGraphicsResourceGetMappedPointer(
reinterpret_cast<void**>(&vboPt), // The mapped pointer
&vboSize, // The size of the accessible data
cudaTokens[0]))) { // The mapped resource
return false;
}
// Get mapped pointers to the vertex data buffers
uint *triangleIdxPt;
if (!CudaSafeCall(hipGraphicsResourceGetMappedPointer(
reinterpret_cast<void**>(&triangleIdxPt), // The mapped pointer
&vboSize, // The size of the accessible data
cudaTokens[1]))) { // The mapped resource
return false;
}
#ifdef USE_TIMER
float dt_ms;
hipEvent_t event1, event2;
hipEventCreate(&event1);
hipEventCreate(&event2);
hipEventRecord(event1, 0);
#endif
// Call kernel
hipLaunchKernelGGL(( DeformableGPUSurfaceMT_ComputeTriangleAreas_D) , dim3(Grid(this->triangleCnt, 256)), dim3(256) , 0, 0,
this->accTriangleArea_D.Peek(),
vboPt,
triangleIdxPt,
this->triangleCnt);
::CheckForCudaErrorSync();
#ifdef USE_TIMER
hipEventRecord(event2, 0);
hipEventSynchronize(event1);
hipEventSynchronize(event2);
hipEventElapsedTime(&dt_ms, event1, event2);
printf("CUDA time for 'ComputeTriangleArea_D %.10f sec\n",
dt_ms/1000.0);
#endif
// Compute sum of all (non-corrupt) triangle areas
float totalArea = thrust::reduce(
thrust::device_ptr<float>(this->accTriangleArea_D.Peek()),
thrust::device_ptr<float>(this->accTriangleArea_D.Peek() + this->triangleCnt));
::CheckForCudaErrorSync();
if (!CudaSafeCall(hipGraphicsUnmapResources(2, cudaTokens, 0))) {
return false;
}
if (!CudaSafeCall(hipGraphicsUnregisterResource(cudaTokens[0]))) {
return false;
}
if (!CudaSafeCall(hipGraphicsUnregisterResource(cudaTokens[1]))) {
return false;
}
// // DEBUG Copy back and accumuluate
// HostArr<float> accTriangleArea;
// accTriangleArea.Validate(this->accTriangleArea_D.GetCount());
// this->accTriangleArea_D.CopyToHost(accTriangleArea.Peek());
// float sum = 0.0f;
// for (int i = 0; i < this->accTriangleArea_D.GetCount(); ++i) {
// sum = sum + accTriangleArea.Peek()[i];
// }
// printf("sum: %f, triangles %i\n", sum, this->triangleCnt);
// return sum;
// // END DEBUG
return totalArea;
}
/*
* ComputeTriangleArea_D
*/
__global__ void DeformableGPUSurfaceMT_ComputeValidTriangleAreas_D(
float *trianglesArea_D,
float *vertexData_D,
uint *triangleIdx_D,
float *corruptTriFlag_D,
uint triangleCnt) {
const int vertexDataOffsPos = 0;
//const int vertexDataOffsNormal = 3;
//const int vertexDataOffsTexCoord = 6;
const int vertexDataStride = 9;
const uint idx = ::getThreadIdx();
if (idx >= triangleCnt) {
return;
}
float3 pos0, pos1, pos2;
pos0.x = vertexData_D[vertexDataStride*triangleIdx_D[3*idx+0]+vertexDataOffsPos+0];
pos0.y = vertexData_D[vertexDataStride*triangleIdx_D[3*idx+0]+vertexDataOffsPos+1];
pos0.z = vertexData_D[vertexDataStride*triangleIdx_D[3*idx+0]+vertexDataOffsPos+2];
pos1.x = vertexData_D[vertexDataStride*triangleIdx_D[3*idx+1]+vertexDataOffsPos+0];
pos1.y = vertexData_D[vertexDataStride*triangleIdx_D[3*idx+1]+vertexDataOffsPos+1];
pos1.z = vertexData_D[vertexDataStride*triangleIdx_D[3*idx+1]+vertexDataOffsPos+2];
pos2.x = vertexData_D[vertexDataStride*triangleIdx_D[3*idx+2]+vertexDataOffsPos+0];
pos2.y = vertexData_D[vertexDataStride*triangleIdx_D[3*idx+2]+vertexDataOffsPos+1];
pos2.z = vertexData_D[vertexDataStride*triangleIdx_D[3*idx+2]+vertexDataOffsPos+2];
// compute edge lengths
float a = length(pos0 - pos1);
float b = length(pos0 - pos2);
float c = length(pos1 - pos2);
// Compute area (Heron's formula)
float rad = (a + b - c)*(c + a - b)*(a + b + c)*(b + c - a);
// Make sure radicand is not negative
rad = rad > 0.0f ? rad : 0.0f;
float area = 0.25f*sqrt(rad);
trianglesArea_D[idx] = area*(1.0-corruptTriFlag_D[idx]);
}
/*
* DeformableGPUSurfaceMT::GetTotalValidSurfArea
*/
float DeformableGPUSurfaceMT::GetTotalValidSurfArea() {
// Compute triangle areas of all (non-corrupt) triangles
if (!CudaSafeCall(this->accTriangleArea_D.Validate(this->triangleCnt))) {
return false;
}
if (!CudaSafeCall(this->accTriangleArea_D.Set(0x00))) {
return false;
}
cudaGraphicsResource* cudaTokens[2];
// Register memory with CUDA
if (!CudaSafeCall(hipGraphicsGLRegisterBuffer(
&cudaTokens[0],
this->vboVtxData,
hipGraphicsMapFlagsNone))) {
return false;
}
if (!CudaSafeCall(hipGraphicsGLRegisterBuffer(
&cudaTokens[1],
this->vboTriangleIdx,
hipGraphicsMapFlagsNone))) {
return false;
}
if (!CudaSafeCall(hipGraphicsMapResources(2, cudaTokens, 0))) {
return false;
}
// Get mapped pointers to the vertex data buffers
float *vboPt;
size_t vboSize;
if (!CudaSafeCall(hipGraphicsResourceGetMappedPointer(
reinterpret_cast<void**>(&vboPt), // The mapped pointer
&vboSize, // The size of the accessible data
cudaTokens[0]))) { // The mapped resource
return false;
}
// Get mapped pointers to the vertex data buffers
uint *triangleIdxPt;
if (!CudaSafeCall(hipGraphicsResourceGetMappedPointer(
reinterpret_cast<void**>(&triangleIdxPt), // The mapped pointer
&vboSize, // The size of the accessible data
cudaTokens[1]))) { // The mapped resource
return false;
}
#ifdef USE_TIMER
float dt_ms;
hipEvent_t event1, event2;
hipEventCreate(&event1);
hipEventCreate(&event2);
hipEventRecord(event1, 0);
#endif
// Call kernel
hipLaunchKernelGGL(( DeformableGPUSurfaceMT_ComputeValidTriangleAreas_D) , dim3(Grid(this->triangleCnt, 256)), dim3(256) , 0, 0,
this->accTriangleArea_D.Peek(),
vboPt,
triangleIdxPt,
this->corruptTriangles_D.Peek(),
this->triangleCnt);
::CheckForCudaErrorSync();
#ifdef USE_TIMER
hipEventRecord(event2, 0);
hipEventSynchronize(event1);
hipEventSynchronize(event2);
hipEventElapsedTime(&dt_ms, event1, event2);
printf("CUDA time for 'ComputeTriangleArea_D %.10f sec\n",
dt_ms/1000.0);
#endif
// Compute sum of all (non-corrupt) triangle areas
float totalArea = thrust::reduce(
thrust::device_ptr<float>(this->accTriangleArea_D.Peek()),
thrust::device_ptr<float>(this->accTriangleArea_D.Peek() + this->triangleCnt));
::CheckForCudaErrorSync();
if (!CudaSafeCall(hipGraphicsUnmapResources(2, cudaTokens, 0))) {
return false;
}
if (!CudaSafeCall(hipGraphicsUnregisterResource(cudaTokens[0]))) {
return false;
}
if (!CudaSafeCall(hipGraphicsUnregisterResource(cudaTokens[1]))) {
return false;
}
// // DEBUG Copy back and accumuluate
// HostArr<float> accTriangleArea;
// accTriangleArea.Validate(this->accTriangleArea_D.GetCount());
// this->accTriangleArea_D.CopyToHost(accTriangleArea.Peek());
// float sum = 0.0f;
// for (int i = 0; i < this->accTriangleArea_D.GetCount(); ++i) {
// sum = sum + accTriangleArea.Peek()[i];
// }
// printf("sum: %f, triangles %i\n", sum, this->triangleCnt);
// return sum;
// // END DEBUG
return totalArea;
}
/*
* DeformableGPUSurfaceMT::FlagCorruptTriangles
*/
bool DeformableGPUSurfaceMT::FlagCorruptTriangles(
float *volume_D,
const uint *targetActiveCells,
int3 volDim,
float3 volOrg,
float3 volDelta,
float isovalue) {
using namespace megamol::core::utility::log;
// Init constant device params
if (!initGridParams(volDim, volOrg, volDelta)) {
Log::DefaultLog.WriteError(
"%s: could not init constant device params",
this->ClassName());
return false;
}
if (!this->InitCorruptFlagVBO(this->vertexCnt)) {
return false;
}
// Allocate memory for corrupt triangles
if (!CudaSafeCall(this->corruptTriangles_D.Validate(this->triangleCnt))) {
return false;
}
// Init with zero
if (!CudaSafeCall(this->corruptTriangles_D.Set(0x00))) {
return false;
}
// ::CheckForCudaErrorSync();
cudaGraphicsResource* cudaTokens[3];
// Register memory with CUDA
if (!CudaSafeCall(hipGraphicsGLRegisterBuffer(
&cudaTokens[0],
this->vboVtxData,
hipGraphicsMapFlagsNone))) {
return false;
}
// ::CheckForCudaErrorSync();
if (!CudaSafeCall(hipGraphicsGLRegisterBuffer(
&cudaTokens[1],
this->vboTriangleIdx,
hipGraphicsMapFlagsNone))) {
return false;
}
if (!CudaSafeCall(hipGraphicsGLRegisterBuffer(
&cudaTokens[2],
this->vboCorruptTriangleVertexFlag,
hipGraphicsMapFlagsNone))) {
return false;
}
// ::CheckForCudaErrorSync();
// Map cuda ressource handles
if (!CudaSafeCall(hipGraphicsMapResources(3, cudaTokens, 0))) {
return false;
}
// ::CheckForCudaErrorSync();
/* Get mapped pointers to the vertex data buffer */
float *vboPt;
size_t vboSize;
float* vboFlagPt;
unsigned int *vboTriangleIdxPt;
if (!CudaSafeCall(hipGraphicsResourceGetMappedPointer(
reinterpret_cast<void**>(&vboPt),
&vboSize,
cudaTokens[0]))) {
return false;
}
// ::CheckForCudaErrorSync();
if (!CudaSafeCall(hipGraphicsResourceGetMappedPointer(
reinterpret_cast<void**>(&vboTriangleIdxPt),
&vboSize,
cudaTokens[1]))) {
return false;
}
if (!CudaSafeCall(hipGraphicsResourceGetMappedPointer(
reinterpret_cast<void**>(&vboFlagPt),
&vboSize,
cudaTokens[2]))) {
return false;
}
::CheckForCudaErrorSync();
if (!CudaSafeCall(hipMemset(vboFlagPt, 0x00, this->vertexCnt*sizeof(float)))) {
return false;
}
// Call kernel
hipLaunchKernelGGL(( DeformableGPUSurfaceMT_FlagCorruptTriangles_D) , dim3(Grid(this->triangleCnt, 256)), dim3(256) , 0, 0,
vboFlagPt,
this->corruptTriangles_D.Peek(),
vboPt,
AbstractGPUSurface::vertexDataStride,
AbstractGPUSurface::vertexDataOffsPos,
AbstractGPUSurface::vertexDataOffsNormal,
vboTriangleIdxPt,
volume_D,
targetActiveCells,
(float4*)(this->externalForces_D.Peek()),
this->triangleCnt,
isovalue);
::CheckForCudaErrorSync();
// Set vertex flags according to triangle flags
HostArr<float> triFlags, vtxFlags;
HostArr<uint> triIdx;
triFlags.Validate(this->triangleCnt);
triIdx.Validate(this->triangleCnt*3);
vtxFlags.Validate(this->vertexCnt);
hipMemcpy(vtxFlags.Peek(), vboFlagPt,
sizeof(float)*this->vertexCnt, hipMemcpyDeviceToHost);
hipMemcpy(triIdx.Peek(), vboTriangleIdxPt,
sizeof(uint)*this->triangleCnt*3, hipMemcpyDeviceToHost);
hipMemcpy(triFlags.Peek(), this->corruptTriangles_D.Peek(),
sizeof(float)*this->triangleCnt, hipMemcpyDeviceToHost);
vtxFlags.Set(0x00);
for (int i = 0; i < this->triangleCnt; ++i) {
float triFlag = triFlags.Peek()[i];
if (triFlag == 1.0) {
vtxFlags.Peek()[triIdx.Peek()[3*i+0]] = 1.0;
vtxFlags.Peek()[triIdx.Peek()[3*i+1]] = 1.0;
vtxFlags.Peek()[triIdx.Peek()[3*i+2]] = 1.0;
}
}
// DEBUG Check validity of vertex flags
HostArr<bool> vtxFlagValid;
vtxFlagValid.Validate(this->vertexCnt);
vtxFlagValid.Set(0x00);
for (int i = 0; i < this->triangleCnt; ++i) {
float triFlag = triFlags.Peek()[i];
float vtxFlag0 = vtxFlags.Peek()[triIdx.Peek()[3*i+0]];
float vtxFlag1 = vtxFlags.Peek()[triIdx.Peek()[3*i+1]];
float vtxFlag2 = vtxFlags.Peek()[triIdx.Peek()[3*i+2]];
if (triFlag == 1.0) {
if (vtxFlag0 == 1.0) {
vtxFlagValid.Peek()[triIdx.Peek()[3*i+0]] = true;
} else {
printf("INVALIV zero VERTEX FLAG %i (0)\n", triIdx.Peek()[3*i+0]);
}
if (vtxFlag1 == 1.0) {
vtxFlagValid.Peek()[triIdx.Peek()[3*i+1]] = true;
} else {
printf("INVALIV zero VERTEX FLAG %i (1)\n", triIdx.Peek()[3*i+1]);
}
if (vtxFlag2 == 1.0) {
vtxFlagValid.Peek()[triIdx.Peek()[3*i+2]] = true;
} else {
printf("INVALIV zero VERTEX FLAG %i (2)\n", triIdx.Peek()[3*i+2]);
}
}
}
for (int i = 0; i < this->vertexCnt; ++i) {
if (vtxFlags.Peek()[i] == 1.0) {
if (vtxFlagValid.Peek()[i] == false) {
printf("INVALIV one VERTEX FLAG %i\n", i);
}
}
}
vtxFlagValid.Release();
// END DEBUG
hipMemcpy(vboFlagPt, vtxFlags.Peek(),
sizeof(float)*this->vertexCnt, hipMemcpyHostToDevice);
if (!CudaSafeCall(hipGetLastError())) {
return false;
}
triIdx.Release();
vtxFlags.Release();
triFlags.Release();
if (!CudaSafeCall(hipGetLastError())) {
return false;
}
// ::CheckForCudaErrorSync();
if (!CudaSafeCall(hipGraphicsUnmapResources(3, cudaTokens, 0))) {
return false;
}
// ::CheckForCudaErrorSync();
if (!CudaSafeCall(hipGraphicsUnregisterResource(cudaTokens[0]))) {
return false;
}
// ::CheckForCudaErrorSync();
if (!CudaSafeCall(hipGraphicsUnregisterResource(cudaTokens[1]))) {
return false;
}
if (!CudaSafeCall(hipGraphicsUnregisterResource(cudaTokens[2]))) {
return false;
}
return true;
}
/*
* DeformableGPUSurfaceMT::InitCorruptFlagVBO
*/
bool DeformableGPUSurfaceMT::InitCorruptFlagVBO(size_t vertexCnt) {
// Destroy if necessary
if (this->vboCorruptTriangleVertexFlag) {
glBindBufferARB(GL_ELEMENT_ARRAY_BUFFER, this->vboCorruptTriangleVertexFlag);
glDeleteBuffersARB(1, &this->vboCorruptTriangleVertexFlag);
this->vboCorruptTriangleVertexFlag = 0;
}
// Create vertex buffer object for corrupt vertex flag
glGenBuffersARB(1, &this->vboCorruptTriangleVertexFlag);
glBindBufferARB(GL_ARRAY_BUFFER, this->vboCorruptTriangleVertexFlag);
glBufferDataARB(GL_ARRAY_BUFFER, sizeof(float)*vertexCnt, 0, GL_DYNAMIC_DRAW);
glBindBufferARB(GL_ARRAY_BUFFER, 0);
return CheckForGLError();
}
/*
* DeformableGPUSurfaceMT::InitVtxPathVBO
*/
bool DeformableGPUSurfaceMT::InitVtxPathVBO(size_t vertexCnt) {
// Destroy if necessary
if (this->vboVtxPath) {
glBindBufferARB(GL_ARRAY_BUFFER, this->vboVtxPath);
glDeleteBuffersARB(1, &this->vboVtxPath);
this->vboVtxPath = 0;
glBindBufferARB(GL_ARRAY_BUFFER, 0);
}
// Create vertex buffer object for corrupt vertex flag
glGenBuffersARB(1, &this->vboVtxPath);
glBindBufferARB(GL_ARRAY_BUFFER, this->vboVtxPath);
glBufferDataARB(GL_ARRAY_BUFFER, sizeof(float)*vertexCnt, 0, GL_DYNAMIC_DRAW);
glBindBufferARB(GL_ARRAY_BUFFER, 0);
// printf("InitVtxPathVBO: %u bytes\n", sizeof(float)*vertexCnt);
return CheckForGLError();
}
/*
* DeformableGPUSurfaceMT::InitVtxAttribVBO
*/
bool DeformableGPUSurfaceMT::InitVtxAttribVBO(size_t vertexCnt) {
// Destroy if necessary
if (this->vboVtxAttr) {
glBindBufferARB(GL_ARRAY_BUFFER, this->vboVtxAttr);
glDeleteBuffersARB(1, &this->vboVtxAttr);
this->vboVtxAttr = 0;
glBindBufferARB(GL_ARRAY_BUFFER, 0);
}
// Create vertex buffer object for corrupt vertex flag
glGenBuffersARB(1, &this->vboVtxAttr);
glBindBufferARB(GL_ARRAY_BUFFER, this->vboVtxAttr);
glBufferDataARB(GL_ARRAY_BUFFER, sizeof(float)*vertexCnt, 0, GL_DYNAMIC_DRAW);
glBindBufferARB(GL_ARRAY_BUFFER, 0);
// printf("InitVtxPathVBO: %u bytes\n", sizeof(float)*vertexCnt);
return CheckForGLError();
}
/*
* DeformableGPUSurfaceMT::initExtForcesGradient
*/
bool DeformableGPUSurfaceMT::initExtForcesGradient(float *volTarget_D,
int3 volDim, float3 volOrg, float3 volDelta) {
using namespace megamol::core::utility::log;
int volSize = volDim.x*volDim.y*volDim.z;
// Init constant device params
if (!initGridParams(volDim, volOrg, volDelta)) {
Log::DefaultLog.WriteError(
"%s: could not init constant device params",
this->ClassName());
return false;
}
// Allocate memory
if (!CudaSafeCall(this->externalForces_D.Validate(volSize*4))) {
Log::DefaultLog.WriteError(
"%s: could not allocate memory",
this->ClassName());
return false;
}
// Init with zero
if (!CudaSafeCall(this->externalForces_D.Set(0))) {
Log::DefaultLog.WriteError(
"%s: could not init memory",
this->ClassName());
return false;
}
#ifdef USE_CUDA_TIMER
float dt_ms;
hipEvent_t event1, event2;
hipEventCreate(&event1);
hipEventCreate(&event2);
hipEventRecord(event1, 0);
#endif
// Calculate gradient using finite differences
hipLaunchKernelGGL(( DeformableGPUSurfaceMT_CalcVolGradient_D) , dim3(Grid(volSize, 256)), dim3(256) , 0, 0,
(float4*)this->externalForces_D.Peek(), volTarget_D);
#ifdef USE_CUDA_TIMER
hipEventRecord(event2, 0);
hipEventSynchronize(event1);
hipEventSynchronize(event2);
hipEventElapsedTime(&dt_ms, event1, event2);
printf("CUDA time for 'CalcVolGradient_D': %.10f sec\n",
dt_ms/1000.0f);
#endif
return true;
}
/*
* DeformableGPUSurfaceMT::initExtForcesDistfield
*/
bool DeformableGPUSurfaceMT::initExtForcesDistfield(
float *volume_D,
float *vertexBuffer_D,
int3 volDim,
float3 volOrg,
float3 volDelta,
float distfieldDist,
float isovalue) {
using namespace megamol::core::utility::log;
int volSize = volDim.x*volDim.y*volDim.z;
/* Init grid parameters */
// Init constant device params
if (!initGridParams(volDim, volOrg, volDelta)) {
Log::DefaultLog.WriteError(
"%s: could not init constant device params",
this->ClassName());
return false;
}
// Compute distance field
if (!CudaSafeCall(this->distField_D.Validate(volSize))) {
return false;
}
#ifdef USE_TIMER
float dt_ms;
hipEvent_t event1, event2;
hipEventCreate(&event1);
hipEventCreate(&event2);
hipEventRecord(event1, 0);
#endif
hipLaunchKernelGGL(( DeformableGPUSurfaceMT_ComputeDistField_D) , dim3(Grid(volSize, 256)), dim3(256) , 0, 0,
vertexBuffer_D,
this->distField_D.Peek(),
this->vertexCnt,
this->vertexDataOffsPos,
this->vertexDataStride);
#ifdef USE_TIMER
hipEventRecord(event2, 0);
hipEventSynchronize(event1);
hipEventSynchronize(event2);
hipEventElapsedTime(&dt_ms, event1, event2);
printf("CUDA time for 'ComputeDistField_D': %.10f sec\n",
dt_ms/1000.0f);
#endif
// Compute gradient
if (!CudaSafeCall(this->externalForces_D.Validate(volSize*4))) {
return false;
}
if (!CudaSafeCall(this->externalForces_D.Set(0))) {
return false;
}
#ifdef USE_TIMER
hipEventRecord(event1, 0);
#endif
// Calculate gradient using finite differences
hipLaunchKernelGGL(( DeformableGPUSurfaceMT_CalcVolGradientWithDistField_D) , dim3(Grid(volSize, 256)), dim3(256) , 0, 0,
(float4*)this->externalForces_D.Peek(),
volume_D,
this->distField_D.Peek(), distfieldDist, isovalue);
#ifdef USE_TIMER
hipEventRecord(event2, 0);
hipEventSynchronize(event1);
hipEventSynchronize(event2);
hipEventElapsedTime(&dt_ms, event1, event2);
printf("CUDA time for 'CalcVolGradientWithDistField_D': %.10f sec\n",
dt_ms/1000.0f);
#endif
return CudaSafeCall(hipGetLastError());
}
bool DeformableGPUSurfaceMT::initExtForcesGVF(
float *volumeTarget_D,
const unsigned int *cellStatesTarget_D,
int3 volDim,
float3 volOrg,
float3 volDelta,
float isovalue,
float gvfScl,
unsigned int gvfIt) {
using namespace megamol::core::utility::log;
/* Init grid parameters */
// Init constant device params
if (!initGridParams(volDim, volOrg, volDelta)) {
Log::DefaultLog.WriteError(
"%s: could not init constant device params",
this->ClassName());
return false;
}
int volSize = volDim.x*volDim.y*volDim.z;
// Compute external forces
if (!CudaSafeCall(this->externalForces_D.Validate(volSize*4))) {
return false;
}
if (!CudaSafeCall(this->externalForces_D.Set(0))) {
return false;
}
if (!CudaSafeCall(this->gvfTmp_D.Validate(volSize*4))) {
return false;
}
if (!CudaSafeCall(this->gvfTmp_D.Set(0))) {
return false;
}
if (!CudaSafeCall(this->gvfConstData_D.Validate(volSize*4))) {
return false;
}
if (!CudaSafeCall(this->gvfConstData_D.Set(0))) {
return false;
}
// Use GVF
if (!DiffusionSolver::CalcGVF(
volumeTarget_D,
this->gvfConstData_D.Peek(),
cellStatesTarget_D,
volDim,
volDelta,
volOrg,
this->externalForces_D.Peek(),
this->gvfTmp_D.Peek(),
gvfIt,
gvfScl)) {
return false;
}
return true;
}
/*
* DeformableGPUSurfaceMT::initExtForcesTwoWayGVF
*/
bool DeformableGPUSurfaceMT::initExtForcesTwoWayGVF(
float *volumeSource_D,
float *volumeTarget_D,
const unsigned int *cellStatesSource_D,
const unsigned int *cellStatesTarget_D,
int3 volDim,
float3 volOrg,
float3 volDelta,
float isovalue,
float gvfScl,
unsigned int gvfIt) {
using namespace megamol::core::utility::log;
/* Init grid parameters */
// Init constant device params
if (!initGridParams(volDim, volOrg, volDelta)) {
Log::DefaultLog.WriteError(
"%s: could not init constant device params",
this->ClassName());
return false;
}
using namespace megamol::core::utility::log;
//#ifdef USE_TIMER
float dt_ms;
hipEvent_t event1, event2;
hipEventCreate(&event1);
hipEventCreate(&event2);
hipEventRecord(event1, 0);
//#endif
int volSize = volDim.x*volDim.y*volDim.z;
// Compute external forces
if (!CudaSafeCall(this->externalForces_D.Validate(volSize*4))) {
return false;
}
if (!CudaSafeCall(this->externalForces_D.Set(0))) {
return false;
}
if (!CudaSafeCall(this->gvfTmp_D.Validate(volSize*4))) {
return false;
}
if (!CudaSafeCall(this->gvfTmp_D.Set(0))) {
return false;
}
if (!CudaSafeCall(this->gvfConstData_D.Validate(volSize*4))) {
return false;
}
if (!CudaSafeCall(this->gvfConstData_D.Set(0))) {
return false;
}
// Calculate two way gvf by using isotropic diffusion
if (!DiffusionSolver::CalcTwoWayGVF(
volumeSource_D,
volumeTarget_D,
cellStatesSource_D,
cellStatesTarget_D,
volDim,
volOrg,
volDelta,
this->gvfConstData_D.Peek(),
this->externalForces_D.Peek(),
this->gvfTmp_D.Peek(),
gvfIt,
gvfScl)) {
return false;
}
//#ifdef USE_TIMER
hipEventRecord(event2, 0);
hipEventSynchronize(event1);
hipEventSynchronize(event2);
hipEventElapsedTime(&dt_ms, event1, event2);
// Log::DefaultLog.WriteInfo(
// "%s: Time for bi-directional diffusion %f\n",
// "DeformableGPUSurfaceMT", dt_ms/1000.0f);
//#endif
// printf("GVF : %.10f\n",
// dt_ms/1000.0f);
return true;
}
/*
* DeformableGPUSurfaceMT::InitGridParams
*/
bool DeformableGPUSurfaceMT::InitGridParams(uint3 gridSize, float3 org, float3 delta) {
hipMemcpyToSymbol(gridSize_D, &gridSize, sizeof(uint3));
hipMemcpyToSymbol(gridOrg_D, &org, sizeof(float3));
hipMemcpyToSymbol(gridDelta_D, &delta, sizeof(float3));
// printf("Init grid with org %f %f %f, delta %f %f %f, dim %u %u %u\n", org.x,
// org.y, org.z, delta.x, delta.y, delta.z, gridSize.x, gridSize.y,
// gridSize.z);
return CudaSafeCall(hipGetLastError());
}
/*
* DeformableGPUSurfaceMT_IntOverTriangles_D
*/
__global__ void DeformableGPUSurfaceMT_IntOverTriangles_D(
float *trianglesAreaWeightedVertexVals_D,
float *trianglesArea_D,
uint *triangleIdx_D,
float *scalarValue_D,
uint triangleCnt) {
const uint idx = ::getThreadIdx();
if (idx >= triangleCnt) {
return;
}
// Compute average
float avgVal = (scalarValue_D[triangleIdx_D[idx*3+0]] +
scalarValue_D[triangleIdx_D[idx*3+1]] +
scalarValue_D[triangleIdx_D[idx*3+2]])/3.0;
trianglesAreaWeightedVertexVals_D[idx] = avgVal*trianglesArea_D[idx];
}
/*
* DeformableGPUSurfaceMT_IntOverValidTriangles_D
*/
__global__ void DeformableGPUSurfaceMT_IntOverValidTriangles_D(
float *trianglesAreaWeightedVertexVals_D,
float *trianglesArea_D,
uint *triangleIdx_D,
float *scalarValue_D,
float *corruptTriFlag_D,
uint triangleCnt) {
const uint idx = ::getThreadIdx();
if (idx >= triangleCnt) {
return;
}
// Compute average
float avgVal = (scalarValue_D[triangleIdx_D[idx*3+0]] +
scalarValue_D[triangleIdx_D[idx*3+1]] +
scalarValue_D[triangleIdx_D[idx*3+2]])/3.0;
trianglesAreaWeightedVertexVals_D[idx] = avgVal*trianglesArea_D[idx]*(1.0-corruptTriFlag_D[idx]);
}
/*
* DeformableGPUSurfaceMT::IntOverSurfArea
*/
float DeformableGPUSurfaceMT::IntOverSurfArea(float *value_D) {
// Compute triangle areas of all (non-corrupt) triangles
if (!CudaSafeCall(this->accTriangleData_D.Validate(this->triangleCnt))) {
return false;
}
cudaGraphicsResource* cudaTokens[1];
if (!CudaSafeCall(hipGraphicsGLRegisterBuffer(
&cudaTokens[0],
this->vboTriangleIdx,
hipGraphicsMapFlagsNone))) {
return false;
}
if (!CudaSafeCall(hipGraphicsMapResources(1, cudaTokens, 0))) {
return false;
}
// Get mapped pointers to the vertex data buffers
uint *triangleIdxPt;
size_t vboSize;
if (!CudaSafeCall(hipGraphicsResourceGetMappedPointer(
reinterpret_cast<void**>(&triangleIdxPt), // The mapped pointer
&vboSize, // The size of the accessible data
cudaTokens[0]))) { // The mapped resource
return false;
}
#ifdef USE_TIMER
float dt_ms;
hipEvent_t event1, event2;
hipEventCreate(&event1);
hipEventCreate(&event2);
hipEventRecord(event1, 0);
#endif
// Call kernel
hipLaunchKernelGGL(( DeformableGPUSurfaceMT_IntOverTriangles_D) , dim3(Grid(this->triangleCnt, 256)), dim3(256) , 0, 0,
this->accTriangleData_D.Peek(),
this->accTriangleArea_D.Peek(),
triangleIdxPt,
value_D,
this->triangleCnt);
::CheckForCudaErrorSync();
#ifdef USE_TIMER
hipEventRecord(event2, 0);
hipEventSynchronize(event1);
hipEventSynchronize(event2);
hipEventElapsedTime(&dt_ms, event1, event2);
printf("CUDA time for 'intOverTriangles_D %.10f sec\n",
dt_ms/1000.0);
#endif
// Compute sum of all (non-corrupt) triangle areas
float integralVal = thrust::reduce(
thrust::device_ptr<float>(this->accTriangleData_D.Peek()),
thrust::device_ptr<float>(this->accTriangleData_D.Peek() + this->triangleCnt));
if (!CudaSafeCall(hipGraphicsUnmapResources(1, cudaTokens, 0))) {
return false;
}
// ::CheckForCudaErrorSync();
if (!CudaSafeCall(hipGraphicsUnregisterResource(cudaTokens[0]))) {
return false;
}
return integralVal;
}
/**
* Integrate scalar value (given per vertex in value_D) over surface area.
*
* @return The integral value
*/
float DeformableGPUSurfaceMT::IntVtxPathOverSurfArea() {
// TODO Assumes triangle area to be computed
// Device array for accumulated data
if (!CudaSafeCall(this->accTriangleData_D.Validate(this->triangleCnt))) {
return false;
}
if (!CudaSafeCall(this->accTriangleData_D.Set(0x00))) {
return false;
}
cudaGraphicsResource* cudaTokens[2];
if (!CudaSafeCall(hipGraphicsGLRegisterBuffer(
&cudaTokens[0],
this->vboTriangleIdx,
hipGraphicsMapFlagsNone))) {
return false;
}
if (!CudaSafeCall(hipGraphicsGLRegisterBuffer(
&cudaTokens[1],
this->vboVtxPath,
hipGraphicsMapFlagsNone))) {
return false;
}
if (!CudaSafeCall(hipGraphicsMapResources(2, cudaTokens, 0))) {
return false;
}
// Get mapped pointers to the vertex data buffers
uint *triangleIdxPt;
size_t vboSizeTri;
if (!CudaSafeCall(hipGraphicsResourceGetMappedPointer(
reinterpret_cast<void**>(&triangleIdxPt), // The mapped pointer
&vboSizeTri, // The size of the accessible data
cudaTokens[0]))) { // The mapped resource
return false;
}
// Get mapped pointers to the vertex data buffers
float *uncertaintyPt;
size_t vboSizeUncertainty;
if (!CudaSafeCall(hipGraphicsResourceGetMappedPointer(
reinterpret_cast<void**>(&uncertaintyPt), // The mapped pointer
&vboSizeUncertainty, // The size of the accessible data
cudaTokens[1]))) { // The mapped resource
return false;
}
#ifdef USE_TIMER
float dt_ms;
hipEvent_t event1, event2;
hipEventCreate(&event1);
hipEventCreate(&event2);
hipEventRecord(event1, 0);
#endif
// Call kernel
hipLaunchKernelGGL(( DeformableGPUSurfaceMT_IntOverTriangles_D) , dim3(Grid(this->triangleCnt, 256)), dim3(256) , 0, 0,
this->accTriangleData_D.Peek(),
this->accTriangleArea_D.Peek(),
triangleIdxPt,
uncertaintyPt,
this->triangleCnt);
::CheckForCudaErrorSync();
#ifdef USE_TIMER
hipEventRecord(event2, 0);
hipEventSynchronize(event1);
hipEventSynchronize(event2);
hipEventElapsedTime(&dt_ms, event1, event2);
printf("CUDA time for 'intOverTriangles_D %.10f sec\n",
dt_ms/1000.0);
#endif
// Compute sum of all (non-corrupt) triangle areas
float integralVal = thrust::reduce(
thrust::device_ptr<float>(this->accTriangleData_D.Peek()),
thrust::device_ptr<float>(this->accTriangleData_D.Peek() + this->triangleCnt));
if (!CudaSafeCall(hipGraphicsUnmapResources(2, cudaTokens, 0))) {
return false;
}
// ::CheckForCudaErrorSync();
if (!CudaSafeCall(hipGraphicsUnregisterResource(cudaTokens[0]))) {
return false;
}
if (!CudaSafeCall(hipGraphicsUnregisterResource(cudaTokens[1]))) {
return false;
}
return integralVal;
}
/**
* Integrate scalar value (given per vertex in value_D) over surface area.
*
* @return The integral value
*/
float DeformableGPUSurfaceMT::IntVtxPathOverValidSurfArea() {
// TODO Assumes triangle area to be computed
// Device array for accumulated data
if (!CudaSafeCall(this->accTriangleData_D.Validate(this->triangleCnt))) {
return false;
}
if (!CudaSafeCall(this->accTriangleData_D.Set(0x00))) {
return false;
}
cudaGraphicsResource* cudaTokens[2];
if (!CudaSafeCall(hipGraphicsGLRegisterBuffer(
&cudaTokens[0],
this->vboTriangleIdx,
hipGraphicsMapFlagsNone))) {
return false;
}
if (!CudaSafeCall(hipGraphicsGLRegisterBuffer(
&cudaTokens[1],
this->vboVtxPath,
hipGraphicsMapFlagsNone))) {
return false;
}
if (!CudaSafeCall(hipGraphicsMapResources(2, cudaTokens, 0))) {
return false;
}
// Get mapped pointers to the vertex data buffers
uint *triangleIdxPt;
size_t vboSizeTri;
if (!CudaSafeCall(hipGraphicsResourceGetMappedPointer(
reinterpret_cast<void**>(&triangleIdxPt), // The mapped pointer
&vboSizeTri, // The size of the accessible data
cudaTokens[0]))) { // The mapped resource
return false;
}
// Get mapped pointers to the vertex data buffers
float *uncertaintyPt;
size_t vboSizeUncertainty;
if (!CudaSafeCall(hipGraphicsResourceGetMappedPointer(
reinterpret_cast<void**>(&uncertaintyPt), // The mapped pointer
&vboSizeUncertainty, // The size of the accessible data
cudaTokens[1]))) { // The mapped resource
return false;
}
#ifdef USE_TIMER
float dt_ms;
hipEvent_t event1, event2;
hipEventCreate(&event1);
hipEventCreate(&event2);
hipEventRecord(event1, 0);
#endif
// Call kernel
hipLaunchKernelGGL(( DeformableGPUSurfaceMT_IntOverValidTriangles_D) , dim3(Grid(this->triangleCnt, 256)), dim3(256) , 0, 0,
this->accTriangleData_D.Peek(),
this->accTriangleArea_D.Peek(),
triangleIdxPt,
uncertaintyPt,
this->corruptTriangles_D.Peek(),
this->triangleCnt);
::CheckForCudaErrorSync();
#ifdef USE_TIMER
hipEventRecord(event2, 0);
hipEventSynchronize(event1);
hipEventSynchronize(event2);
hipEventElapsedTime(&dt_ms, event1, event2);
printf("CUDA time for 'intOverTriangles_D %.10f sec\n",
dt_ms/1000.0);
#endif
// Compute sum of all (non-corrupt) triangle areas
float integralVal = thrust::reduce(
thrust::device_ptr<float>(this->accTriangleData_D.Peek()),
thrust::device_ptr<float>(this->accTriangleData_D.Peek() + this->triangleCnt));
if (!CudaSafeCall(hipGraphicsUnmapResources(2, cudaTokens, 0))) {
return false;
}
// ::CheckForCudaErrorSync();
if (!CudaSafeCall(hipGraphicsUnregisterResource(cudaTokens[0]))) {
return false;
}
if (!CudaSafeCall(hipGraphicsUnregisterResource(cudaTokens[1]))) {
return false;
}
return integralVal;
}
/**
* Integrate scalar value (given per vertex in value_D) over surface area.
*
* @return The integral value
*/
float DeformableGPUSurfaceMT::IntVtxAttribOverSurfArea() {
// TODO Assumes triangle area to be computed
// Device array for accumulated data
if (!CudaSafeCall(this->accTriangleData_D.Validate(this->triangleCnt))) {
return false;
}
if (!CudaSafeCall(this->accTriangleData_D.Set(0x00))) {
return false;
}
cudaGraphicsResource* cudaTokens[2];
if (!CudaSafeCall(hipGraphicsGLRegisterBuffer(
&cudaTokens[0],
this->vboTriangleIdx,
hipGraphicsMapFlagsNone))) {
return false;
}
if (!CudaSafeCall(hipGraphicsGLRegisterBuffer(
&cudaTokens[1],
this->vboVtxAttr,
hipGraphicsMapFlagsNone))) {
return false;
}
if (!CudaSafeCall(hipGraphicsMapResources(2, cudaTokens, 0))) {
return false;
}
// Get mapped pointers to the vertex data buffers
uint *triangleIdxPt;
size_t vboSizeTri;
if (!CudaSafeCall(hipGraphicsResourceGetMappedPointer(
reinterpret_cast<void**>(&triangleIdxPt), // The mapped pointer
&vboSizeTri, // The size of the accessible data
cudaTokens[0]))) { // The mapped resource
return false;
}
// Get mapped pointers to the vertex data buffers
float *vertexAttrPt;
size_t vboVertexAttrSize;
if (!CudaSafeCall(hipGraphicsResourceGetMappedPointer(
reinterpret_cast<void**>(&vertexAttrPt), // The mapped pointer
&vboVertexAttrSize, // The size of the accessible data
cudaTokens[1]))) { // The mapped resource
return false;
}
#ifdef USE_TIMER
float dt_ms;
hipEvent_t event1, event2;
hipEventCreate(&event1);
hipEventCreate(&event2);
hipEventRecord(event1, 0);
#endif
// Call kernel
hipLaunchKernelGGL(( DeformableGPUSurfaceMT_IntOverTriangles_D) , dim3(Grid(this->triangleCnt, 256)), dim3(256) , 0, 0,
this->accTriangleData_D.Peek(),
this->accTriangleArea_D.Peek(),
triangleIdxPt,
vertexAttrPt,
this->triangleCnt);
::CheckForCudaErrorSync();
#ifdef USE_TIMER
hipEventRecord(event2, 0);
hipEventSynchronize(event1);
hipEventSynchronize(event2);
hipEventElapsedTime(&dt_ms, event1, event2);
printf("CUDA time for 'intOverTriangles_D %.10f sec\n",
dt_ms/1000.0);
#endif
// Compute sum of all (non-corrupt) triangle areas
float integralVal = thrust::reduce(
thrust::device_ptr<float>(this->accTriangleData_D.Peek()),
thrust::device_ptr<float>(this->accTriangleData_D.Peek() + this->triangleCnt));
if (!CudaSafeCall(hipGraphicsUnmapResources(2, cudaTokens, 0))) {
return false;
}
// ::CheckForCudaErrorSync();
if (!CudaSafeCall(hipGraphicsUnregisterResource(cudaTokens[0]))) {
return false;
}
if (!CudaSafeCall(hipGraphicsUnregisterResource(cudaTokens[1]))) {
return false;
}
return integralVal;
}
/**
* Integrate scalar value (given per vertex in value_D) over surface area.
*
* @return The integral value
*/
float DeformableGPUSurfaceMT::IntVtxAttribOverValidSurfArea() {
// TODO Assumes triangle area to be computed
// Device array for accumulated data
if (!CudaSafeCall(this->accTriangleData_D.Validate(this->triangleCnt))) {
return false;
}
if (!CudaSafeCall(this->accTriangleData_D.Set(0x00))) {
return false;
}
cudaGraphicsResource* cudaTokens[2];
if (!CudaSafeCall(hipGraphicsGLRegisterBuffer(
&cudaTokens[0],
this->vboTriangleIdx,
hipGraphicsMapFlagsNone))) {
return false;
}
if (!CudaSafeCall(hipGraphicsGLRegisterBuffer(
&cudaTokens[1],
this->vboVtxAttr,
hipGraphicsMapFlagsNone))) {
return false;
}
if (!CudaSafeCall(hipGraphicsMapResources(2, cudaTokens, 0))) {
return false;
}
// Get mapped pointers to the vertex data buffers
uint *triangleIdxPt;
size_t vboSizeTri;
if (!CudaSafeCall(hipGraphicsResourceGetMappedPointer(
reinterpret_cast<void**>(&triangleIdxPt), // The mapped pointer
&vboSizeTri, // The size of the accessible data
cudaTokens[0]))) { // The mapped resource
return false;
}
// Get mapped pointers to the vertex data buffers
float *vertexAttrPt;
size_t vboVertexAttrSize;
if (!CudaSafeCall(hipGraphicsResourceGetMappedPointer(
reinterpret_cast<void**>(&vertexAttrPt), // The mapped pointer
&vboVertexAttrSize, // The size of the accessible data
cudaTokens[1]))) { // The mapped resource
return false;
}
#ifdef USE_TIMER
float dt_ms;
hipEvent_t event1, event2;
hipEventCreate(&event1);
hipEventCreate(&event2);
hipEventRecord(event1, 0);
#endif
// Call kernel
hipLaunchKernelGGL(( DeformableGPUSurfaceMT_IntOverValidTriangles_D) , dim3(Grid(this->triangleCnt, 256)), dim3(256) , 0, 0,
this->accTriangleData_D.Peek(),
this->accTriangleArea_D.Peek(),
triangleIdxPt,
vertexAttrPt,
this->corruptTriangles_D.Peek(),
this->triangleCnt);
::CheckForCudaErrorSync();
#ifdef USE_TIMER
hipEventRecord(event2, 0);
hipEventSynchronize(event1);
hipEventSynchronize(event2);
hipEventElapsedTime(&dt_ms, event1, event2);
printf("CUDA time for 'intOverTriangles_D %.10f sec\n",
dt_ms/1000.0);
#endif
// Compute sum of all (non-corrupt) triangle areas
float integralVal = thrust::reduce(
thrust::device_ptr<float>(this->accTriangleData_D.Peek()),
thrust::device_ptr<float>(this->accTriangleData_D.Peek() + this->triangleCnt));
if (!CudaSafeCall(hipGraphicsUnmapResources(2, cudaTokens, 0))) {
return false;
}
// ::CheckForCudaErrorSync();
if (!CudaSafeCall(hipGraphicsUnregisterResource(cudaTokens[0]))) {
return false;
}
if (!CudaSafeCall(hipGraphicsUnregisterResource(cudaTokens[1]))) {
return false;
}
return integralVal;
}
/*
* DeformableGPUSurfaceMT::IntOverCorruptSurfArea
*/
float DeformableGPUSurfaceMT::IntOverCorruptSurfArea() {
return 0.0f;
}
/**
* TODO
* @return Position and path length addition
*/
float4 UpdateVtxPosSingle (
float3 posStart, // Starting position
float4 *gradient, // External forces
float *targetVol, // The target volume
float minDisplScl, // Minimum displacement for convergence
float forcesScl, // General scaling factor for forces
float isovalue,
float org[3], float delta[3], int dim[3],
int maxSteps,
int maxLevel,
float initStepSize) { // Isovalue
float3 pos = posStart;
float sample = SampleFieldAtPosTrilin((float*)(&pos), targetVol, org, delta, dim);
bool outside = sample <= isovalue;
float extForcesScl;
if (outside) extForcesScl = 1.0;
else extForcesScl = -1.0;
float len = 0.0f;
bool converged = false;
int steps = 0;
do {
// printf("current pos: %f %f %f\n", pos.x, pos.y, pos.z);
// Get volume sample
float sample = SampleFieldAtPosTrilin((float*)(&pos), targetVol, org, delta, dim);
// Switch sign and scale down if necessary
bool negative = extForcesScl < 0;
bool outside = sample <= isovalue;
int switchSign = int((negative && outside)||(!negative && !outside));
extForcesScl = extForcesScl*(1.0*(1-switchSign) - 1.0*switchSign);
extForcesScl *= (1.0*(1-switchSign) + 0.5*(switchSign));
// Get external forces sample and scale
float4 extForceTmp = SampleFieldAtPosTrilin((float*)(&pos), gradient, org, delta, dim);
float3 extForce = make_float3(extForceTmp.x, extForceTmp.y, extForceTmp.z);
extForce = safeNormalize(extForce);
// Accumulate path
len += extForcesScl*forcesScl;
extForce *= extForcesScl*forcesScl;
// Propagate vertex and increase path length
pos += extForce;
if (length(extForce) <= minDisplScl) {
converged = true;
}
steps++;
} while (!converged && steps < maxSteps);
return make_float4(pos.x, pos.y, pos.z, len);
}
/**
* TODO
*/
float DeformableGPUSurfaceMT::IntUncertaintyOverCorruptAreaRec(
float3 pos1, float3 pos2, float3 pos3, // Vertex positions of the triangle
float len1, float len2, float len3, // Vertex path lengths of the triangle
float4 *gradient, // External forces
float *targetVol, // The target volume
unsigned int *targetActiveCells, // Active cells of the target volume
float minDisplScl, // Minimum displacement for convergence
float forcesScl, // General scaling factor for forces
float isovalue, // Isovalue
float &triArea,
uint depth,
float org[3], float delta[3], int dim[3],
vislib::Array<float> &triArr,
int maxSteps,
int maxLevel,
float initStepSize) {
// printf("depth: %i\n", depth);
// 1. Propagate vertices until they converge to a fixed position
float4 newPosLen1, newPosLen2, newPosLen3;
newPosLen1 = UpdateVtxPosSingle(pos1, gradient, targetVol,
minDisplScl, forcesScl, isovalue, org, delta, dim,
maxSteps,
maxLevel,
initStepSize);
newPosLen2 = UpdateVtxPosSingle(pos2, gradient, targetVol,
minDisplScl, forcesScl, isovalue, org, delta, dim,
maxSteps,
maxLevel,
initStepSize);
newPosLen3 = UpdateVtxPosSingle(pos3, gradient, targetVol,
minDisplScl, forcesScl, isovalue, org, delta, dim,
maxSteps,
maxLevel,
initStepSize);
float3 newPos1, newPos2, newPos3;
newPos1 = make_float3(newPosLen1.x, newPosLen1.y, newPosLen1.z);
newPos2 = make_float3(newPosLen2.x, newPosLen2.y, newPosLen2.z);
newPos3 = make_float3(newPosLen3.x, newPosLen3.y, newPosLen3.z);
// 2. Check whether the resulting triangle is valid
float3 midpoint = (newPos1+newPos2+newPos3)/3.0;
int3 coords;
coords.x = int((midpoint.x-org[0])/delta[0]);
coords.y = int((midpoint.y-org[1])/delta[1]);
coords.z = int((midpoint.z-org[2])/delta[2]);
//int cellIDx = ::GetCellIdxByGridCoords(coords);
int cellIdx = (dim[0]-1)*((dim[1]-1)*coords.z + coords.y) + coords.x;
uint cellState = targetActiveCells[cellIdx];
if ((cellState == 1) || (depth >= (int)maxLevel)) {
triArr.Add(newPos1.x);
triArr.Add(newPos1.y);
triArr.Add(newPos1.z);
triArr.Add(newPos2.x);
triArr.Add(newPos2.y);
triArr.Add(newPos2.z);
triArr.Add(newPos3.x);
triArr.Add(newPos3.y);
triArr.Add(newPos3.z);
// printf("%.16f;%.16f;%.16f;%.16f;%.16f;%.16f;%.16f;%.16f;%.16f\n",
// newPos1.x, newPos1.y, newPos1.z,
// newPos2.x, newPos2.y, newPos2.z,
// newPos3.x, newPos3.y, newPos3.z);
// 3a. Cell is active, therefore triangle is valid
// --> Compute integrated uncertainty value
// Get triangle area
float a = length(newPos1 - newPos2);
float b = length(newPos1 - newPos3);
float c = length(newPos2 - newPos3);
// Compute area (Heron's formula)
float rad = (a + b - c)*(c + a - b)*(a + b + c)*(b + c - a);
// Make sure radicand is not negative
rad = rad > 0.0f ? rad : 0.0f;
float area = 0.25f*sqrt(rad);
triArea = area;
// Get average value
float avgValue = (len1+newPosLen1.w+len2+newPosLen2.w+len3+newPosLen3.w)/3.0f;
// Approximate integration
return triArea*avgValue;
} else {
float triArea1, triArea2, triArea3, triArea4;
// 3b. Cell is not active, therefore, triangle is not valid
// --> Subdivide and call recursively
float3 p12 = (newPos1+newPos2)/2.0;
float3 p13 = (newPos1+newPos3)/2.0;
float3 p32 = (newPos3+newPos2)/2.0;
float l12 = (len1+newPosLen1.w+len2+newPosLen2.w)/2.0;
float l13 = (len1+newPosLen1.w+len3+newPosLen3.w)/2.0;
float l32 = (len3+newPosLen3.w+len2+newPosLen2.w)/2.0;
float intUncertainty1 =
DeformableGPUSurfaceMT::IntUncertaintyOverCorruptAreaRec(
newPos1, p12, p13,
len1+newPosLen1.w, l12, l13,
gradient, targetVol, targetActiveCells,
minDisplScl, forcesScl, isovalue, triArea1,
depth+1, org, delta, dim, triArr,
maxSteps,
maxLevel,
initStepSize);
float intUncertainty2 =
DeformableGPUSurfaceMT::IntUncertaintyOverCorruptAreaRec(
p13, p32, newPos3,
l13, l32, len3+newPosLen3.w,
gradient, targetVol, targetActiveCells,
minDisplScl, forcesScl, isovalue, triArea2,
depth+1, org, delta, dim, triArr,
maxSteps,
maxLevel,
initStepSize);
float intUncertainty3 =
DeformableGPUSurfaceMT::IntUncertaintyOverCorruptAreaRec(
p12, p13, p32,
l12, l13, l32,
gradient, targetVol, targetActiveCells,
minDisplScl, forcesScl, isovalue, triArea3,
depth+1, org, delta, dim, triArr,
maxSteps,
maxLevel,
initStepSize);
float intUncertainty4 =
DeformableGPUSurfaceMT::IntUncertaintyOverCorruptAreaRec(
p12, p32, newPos2,
l12, l32, len2+newPosLen2.w,
gradient, targetVol, targetActiveCells,
minDisplScl, forcesScl, isovalue, triArea4,
depth+1, org, delta, dim, triArr,
maxSteps,
maxLevel,
initStepSize);
triArea = triArea1 + triArea2 + triArea3 + triArea4;
return intUncertainty1 + intUncertainty2 + intUncertainty3 + intUncertainty4;
}
}
/*
* DeformableGPUSurfaceMT::IntUncertaintyOverCorruptSurfArea
*/
float DeformableGPUSurfaceMT::IntUncertaintyOverCorruptSurfArea(
float &corruptArea,
float minDisplScl,
float isovalue,
float forcesScl,
unsigned int *targetActiveCells_D,
float *targetVol_D,
int3 volDim,
float3 volOrg,
float3 volDelta,
vislib::Array<float> &triArr,
int maxDepth,
int maxLevel,
float initStepSize) {
using namespace megamol::core::utility::log;
size_t fieldSize = volDim.x*volDim.y*volDim.z;
size_t cellCnt = (volDim.x-1)*(volDim.y-1)*(volDim.z-1);
// // Allocate memory for corrupt triangles
// if (!CudaSafeCall(this->intUncertaintyCorrupt_D.Validate(this->triangleCnt))) {
// return false;
// }
// // Init with zero
// if (!CudaSafeCall(this->intUncertaintyCorrupt_D.Set(0x00))) {
// return false;
// }
//
// if (!CudaSafeCall(this->accTriangleArea_D.Validate(this->triangleCnt))) {
// return false;
// }
// if (!CudaSafeCall(this->accTriangleArea_D.Set(0x00))){
// return false;
// }
//
// // Init constant device params
// if (!initGridParams(volDim, volOrg, volDelta)) {
// Log::DefaultLog.WriteError(
// "%s: could not init constant device params",
// this->ClassName());
// return false;
// }
//
cudaGraphicsResource* cudaTokens[3];
// Register memory with CUDA
if (!CudaSafeCall(hipGraphicsGLRegisterBuffer(
&cudaTokens[0],
this->vboVtxData,
hipGraphicsMapFlagsNone))) {
return false;
}
// ::CheckForCudaErrorSync();
if (!CudaSafeCall(hipGraphicsGLRegisterBuffer(
&cudaTokens[1],
this->vboTriangleIdx,
hipGraphicsMapFlagsNone))) {
return false;
}
if (!CudaSafeCall(hipGraphicsGLRegisterBuffer(
&cudaTokens[2],
this->vboVtxPath,
hipGraphicsMapFlagsNone))) {
return false;
}
// ::CheckForCudaErrorSync();
// Map cuda ressource handles
if (!CudaSafeCall(hipGraphicsMapResources(3, cudaTokens, 0))) {
return false;
}
// ::CheckForCudaErrorSync();
/* Get mapped pointers to the vertex data buffer */
float *vboPt;
size_t vboSize;
float* vboVtxPathPt;
unsigned int *vboTriangleIdxPt;
if (!CudaSafeCall(hipGraphicsResourceGetMappedPointer(
reinterpret_cast<void**>(&vboPt),
&vboSize,
cudaTokens[0]))) {
return false;
}
::CheckForCudaErrorSync();
if (!CudaSafeCall(hipGraphicsResourceGetMappedPointer(
reinterpret_cast<void**>(&vboTriangleIdxPt),
&vboSize,
cudaTokens[1]))) {
return false;
}
if (!CudaSafeCall(hipGraphicsResourceGetMappedPointer(
reinterpret_cast<void**>(&vboVtxPathPt),
&vboSize,
cudaTokens[2]))) {
return false;
}
//
//#ifdef USE_TIMER
// float dt_ms;
// hipEvent_t event1, event2;
// hipEventCreate(&event1);
// hipEventCreate(&event2);
// hipEventRecord(event1, 0);
//#endif
//
// ::CheckForCudaErrorSync();
//
// // Call kernel
// DeformableGPUSurfaceMT_IntUncertaintyOverCorruptArea_D <<< Grid(this->triangleCnt, 256), 256 >>> (
// this->corruptTriangles_D.Peek(),
// vboPt,
// vboVtxPathPt,
// this->vertexDataStride,
// this->vertexDataOffsPos,
// this->vertexDataOffsNormal,
// vboTriangleIdxPt,
// targetVol_D,
// (float4*)this->externalForces_D.Peek(),
// targetActiveCells_D,
// this->triangleCnt,
// isovalue,
// minDisplScl,
// forcesScl,
// this->intUncertaintyCorrupt_D.Peek(),
// this->accTriangleArea_D.Peek());
//
// ::CheckForCudaErrorSync();
//
//#ifdef USE_TIMER
// hipEventRecord(event2, 0);
// hipEventSynchronize(event1);
// hipEventSynchronize(event2);
// hipEventElapsedTime(&dt_ms, event1, event2);
// printf("CUDA time for 'intOverTriangles_D %.10f sec\n",
// dt_ms/1000.0);
//#endif
//
// // Compute sum of all (non-corrupt) triangle areas
// float integralVal = thrust::reduce(
// thrust::device_ptr<float>(this->intUncertaintyCorrupt_D.Peek()),
// thrust::device_ptr<float>(this->intUncertaintyCorrupt_D.Peek() + this->triangleCnt));
//
// corruptArea = thrust::reduce(
// thrust::device_ptr<float>(this->accTriangleArea_D.Peek()),
// thrust::device_ptr<float>(this->accTriangleArea_D.Peek() + this->triangleCnt));
//
// ::CheckForCudaErrorSync();
//
// if (!CudaSafeCall(hipGetLastError())) {
// return false;
// }
float integralVal = 0.0f;
corruptArea = 0.0f;
// Get necessary data from GPU
HostArr<float> corruptTriangles;
HostArr<float> vertexBuffer;
HostArr<unsigned int> triangleIdx;
HostArr<float> uncertainty;
HostArr<float> gradient;
HostArr<float> targetVol;
HostArr<unsigned int> targetActiveCells;
corruptTriangles.Validate(this->corruptTriangles_D.GetCount());
vertexBuffer.Validate(this->vertexDataStride*this->vertexCnt);
triangleIdx.Validate(this->triangleCnt*3);
uncertainty.Validate(this->vertexCnt);
gradient.Validate(fieldSize*4);
targetVol.Validate(fieldSize);
targetActiveCells.Validate(cellCnt);
if (!CudaSafeCall(hipMemcpy(corruptTriangles.Peek(), this->corruptTriangles_D.Peek(),
corruptTriangles.GetCount()*sizeof(float), hipMemcpyDeviceToHost))) {
return false;
}
if (!CudaSafeCall(hipMemcpy(vertexBuffer.Peek(), vboPt,
vertexBuffer.GetCount()*sizeof(float), hipMemcpyDeviceToHost))) {
return false;
}
if (!CudaSafeCall(hipMemcpy(triangleIdx.Peek(), vboTriangleIdxPt,
triangleIdx.GetCount()*sizeof(unsigned int), hipMemcpyDeviceToHost))) {
return false;
}
if (!CudaSafeCall(hipMemcpy(uncertainty.Peek(), vboVtxPathPt,
uncertainty.GetCount()*sizeof(float), hipMemcpyDeviceToHost))) {
return false;
}
if (!CudaSafeCall(hipMemcpy(gradient.Peek(), this->externalForces_D.Peek(),
gradient.GetCount()*sizeof(float), hipMemcpyDeviceToHost))) {
return false;
}
if (!CudaSafeCall(hipMemcpy(targetVol.Peek(), targetVol_D,
targetVol.GetCount()*sizeof(float), hipMemcpyDeviceToHost))) {
return false;
}
if (!CudaSafeCall(hipMemcpy(targetActiveCells.Peek(), targetActiveCells_D,
targetActiveCells.GetCount()*sizeof(float), hipMemcpyDeviceToHost))) {
return false;
}
// Loop over all corrupt triangles
for (int idx = 0; idx < this->triangleCnt; ++idx) {
// Check whether the triangle is corrupt
if (corruptTriangles.Peek()[idx] == 1.0f) {
// Get initial positions from main memory
uint baseIdx0 = vertexDataStride*triangleIdx.Peek()[3*idx+0];
uint baseIdx1 = vertexDataStride*triangleIdx.Peek()[3*idx+1];
uint baseIdx2 = vertexDataStride*triangleIdx.Peek()[3*idx+2];
float3 pos1 = make_float3(
vertexBuffer.Peek()[baseIdx0+vertexDataOffsPos+0],
vertexBuffer.Peek()[baseIdx0+vertexDataOffsPos+1],
vertexBuffer.Peek()[baseIdx0+vertexDataOffsPos+2]);
float3 pos2 = make_float3(
vertexBuffer.Peek()[baseIdx1+vertexDataOffsPos+0],
vertexBuffer.Peek()[baseIdx1+vertexDataOffsPos+1],
vertexBuffer.Peek()[baseIdx1+vertexDataOffsPos+2]);
float3 pos3 = make_float3(
vertexBuffer.Peek()[baseIdx2+vertexDataOffsPos+0],
vertexBuffer.Peek()[baseIdx2+vertexDataOffsPos+1],
vertexBuffer.Peek()[baseIdx2+vertexDataOffsPos+2]);
// Get initial path lengths from previous morphing
float len1 = uncertainty.Peek()[triangleIdx.Peek()[3*idx+0]];
float len2 = uncertainty.Peek()[triangleIdx.Peek()[3*idx+1]];
float len3 = uncertainty.Peek()[triangleIdx.Peek()[3*idx+2]];
integralVal += this->IntUncertaintyOverCorruptAreaRec(
pos1, pos2, pos3, // Vertex positions of the triangle
len1, len2, len3, // Vertex path lengths of the triangle
(float4*)(gradient.Peek()), // External forces
targetVol.Peek(), // The target volume
targetActiveCells.Peek(), // Active cells of the target volume
minDisplScl, // Minimum displacement for convergence
forcesScl, // General scaling factor for forces
isovalue, // Isovalue
corruptArea,
0,
(float*)&volOrg,
(float*)&volDelta,
(int*)&volDim,
triArr,
maxDepth,
maxLevel,
initStepSize);
}
}
// Cleanup
vertexBuffer.Release();
corruptTriangles.Release();
triangleIdx.Release();
uncertainty.Release();
gradient.Release();
targetVol.Release();
targetActiveCells.Release();
// ::CheckForCudaErrorSync();
if (!CudaSafeCall(hipGraphicsUnmapResources(3, cudaTokens, 0))) {
return false;
}
// ::CheckForCudaErrorSync();
if (!CudaSafeCall(hipGraphicsUnregisterResource(cudaTokens[0]))) {
return false;
}
// ::CheckForCudaErrorSync();
if (!CudaSafeCall(hipGraphicsUnregisterResource(cudaTokens[1]))) {
return false;
}
if (!CudaSafeCall(hipGraphicsUnregisterResource(cudaTokens[2]))) {
return false;
}
return integralVal;
}
/*
* DeformableGPUSurfaceMT::MorphToVolumeGradient
*/
bool DeformableGPUSurfaceMT::MorphToVolumeGradient(
float *volume_D,
int3 volDim,
float3 volOrg,
float3 volDelta,
float isovalue,
InterpolationMode interpMode,
size_t maxIt,
float surfMappedMinDisplScl,
float springStiffness,
float forceScl,
float externalForcesWeight) {
using megamol::core::utility::log::Log;
/* Init grid parameters */
// Init constant device params
if (!initGridParams(volDim, volOrg, volDelta)) {
Log::DefaultLog.WriteError(
"%s: could not init constant device params",
this->ClassName());
return false;
}
cudaGraphicsResource* cudaTokens[2];
if ((!this->triangleIdxReady)||(!this->neighboursReady)) {
return false;
}
if (volume_D == NULL) {
return false;
}
if (!initExtForcesGradient(volume_D,
volDim, volOrg, volDelta)) {
return false;
}
// Init vbo with uncertainty information
if (!this->InitVtxPathVBO(this->vertexCnt)) {
return false;
}
// Register memory with CUDA
if (!CudaSafeCall(hipGraphicsGLRegisterBuffer(
&cudaTokens[0], this->vboVtxData,
hipGraphicsMapFlagsNone))) {
return false;
}
// Register memory with CUDA
if (!CudaSafeCall(hipGraphicsGLRegisterBuffer(
&cudaTokens[1], this->vboVtxPath,
hipGraphicsMapFlagsNone))) {
return false;
}
if (!CudaSafeCall(hipGraphicsMapResources(2, cudaTokens, 0))) {
return false;
}
// Get mapped pointers to the vertex data buffers
float *vboPt;
size_t vboSize;
if (!CudaSafeCall(hipGraphicsResourceGetMappedPointer(
reinterpret_cast<void**>(&vboPt), // The mapped pointer
&vboSize, // The size of the accessible data
cudaTokens[0]))) { // The mapped resource
return false;
}
// Get mapped pointers to the vertex data buffers
float *vboVtxPathPt;
if (!CudaSafeCall(hipGraphicsResourceGetMappedPointer(
reinterpret_cast<void**>(&vboVtxPathPt), // The mapped pointer
&vboSize, // The size of the accessible data
cudaTokens[1]))) { // The mapped resource
return false;
}
// Init forces scale factor with -1 or 1, depending on whether they start
// outside or inside the isosurface
if (!CudaSafeCall(this->vertexExternalForcesScl_D.Validate(this->vertexCnt))) {
return false;
}
if (!CudaSafeCall(this->displLen_D.Validate(this->vertexCnt))) {
return false;
}
#ifdef USE_TIMER
float dt_ms;
hipEvent_t event1, event2;
hipEventCreate(&event1);
hipEventCreate(&event2);
hipEventRecord(event1, 0);
#endif
hipLaunchKernelGGL(( DeformableGPUSurfaceMT_InitExternalForceScl_D) , dim3(Grid(this->vertexCnt, 256)), dim3(256) , 0, 0,
(float*)this->vertexExternalForcesScl_D.Peek(),
this->displLen_D.Peek(),
volume_D,
vboPt,
surfMappedMinDisplScl,
this->vertexCnt,
isovalue,
this->vertexDataOffsPos,
this->vertexDataStride);
#ifdef USE_TIMER
hipEventRecord(event2, 0);
hipEventSynchronize(event1);
hipEventSynchronize(event2);
hipEventElapsedTime(&dt_ms, event1, event2);
printf("CUDA time for 'InitExternalForceScl_D': %.10f sec\n",
dt_ms/1000.0f);
#endif
// Iterations for new position
if (!this->updateVtxPos(
volume_D,
vboPt,
vboVtxPathPt,
volDim,
volOrg,
volDelta,
isovalue,
(interpMode == INTERP_CUBIC),
maxIt,
surfMappedMinDisplScl,
springStiffness,
forceScl,
externalForcesWeight,
false,
false)) {
return false;
}
if (!CudaSafeCall(hipGraphicsUnmapResources(2, cudaTokens, 0))) {
return false;
}
if (!CudaSafeCall(hipGraphicsUnregisterResource(cudaTokens[0]))) {
return false;
}
if (!CudaSafeCall(hipGraphicsUnregisterResource(cudaTokens[1]))) {
return false;
}
return true;
}
/*
* DeformableGPUSurfaceMT::MorphToVolumeDistfield
*/
bool DeformableGPUSurfaceMT::MorphToVolumeDistfield(
float *volume_D,
int3 volDim,
float3 volOrg,
float3 volDelta,
float isovalue,
InterpolationMode interpMode,
size_t maxIt,
float surfMappedMinDisplScl,
float springStiffness,
float forceScl,
float externalForcesWeight,
float distfieldDist) {
using megamol::core::utility::log::Log;
/* Init grid parameters */
// Init constant device params
if (!initGridParams(volDim, volOrg, volDelta)) {
Log::DefaultLog.WriteError(
"%s: could not init constant device params",
this->ClassName());
return false;
}
cudaGraphicsResource* cudaTokens[2];
if ((!this->triangleIdxReady)||(!this->neighboursReady)) {
return false;
}
if (volume_D == NULL) {
return false;
}
// Init vbo with uncertainty information
if (!this->InitVtxPathVBO(this->vertexCnt)) {
return false;
}
// Register memory with CUDA
if (!CudaSafeCall(hipGraphicsGLRegisterBuffer(
&cudaTokens[0], this->vboVtxData,
hipGraphicsMapFlagsNone))) {
return false;
}
// Register memory with CUDA
if (!CudaSafeCall(hipGraphicsGLRegisterBuffer(
&cudaTokens[1], this->vboVtxPath,
hipGraphicsMapFlagsNone))) {
return false;
}
if (!CudaSafeCall(hipGraphicsMapResources(2, cudaTokens, 0))) {
return false;
}
// Get mapped pointers to the vertex data buffers
float *vboPt;
size_t vboSize;
if (!CudaSafeCall(hipGraphicsResourceGetMappedPointer(
reinterpret_cast<void**>(&vboPt), // The mapped pointer
&vboSize, // The size of the accessible data
cudaTokens[0]))) { // The mapped resource
return false;
}
// Get mapped pointers to the vertex data buffers
float *vboVtxPathPt;
if (!CudaSafeCall(hipGraphicsResourceGetMappedPointer(
reinterpret_cast<void**>(&vboVtxPathPt), // The mapped pointer
&vboSize, // The size of the accessible data
cudaTokens[1]))) { // The mapped resource
return false;
}
if (!this->initExtForcesDistfield(
volume_D,
vboPt,
volDim,
volOrg,
volDelta,
distfieldDist,
isovalue)) {
return false;
}
// Init forces scale factor with -1 or 1, depending on whether they start
// outside or inside the isosurface
if (!CudaSafeCall(this->vertexExternalForcesScl_D.Validate(this->vertexCnt))) {
return false;
}
if (!CudaSafeCall(this->displLen_D.Validate(this->vertexCnt))) {
return false;
}
#ifdef USE_TIMER
float dt_ms;
hipEvent_t event1, event2;
hipEventCreate(&event1);
hipEventCreate(&event2);
hipEventRecord(event1, 0);
#endif
hipLaunchKernelGGL(( DeformableGPUSurfaceMT_InitExternalForceScl_D) , dim3(Grid(this->vertexCnt, 256)), dim3(256) , 0, 0,
(float*)this->vertexExternalForcesScl_D.Peek(),
this->displLen_D.Peek(),
volume_D,
vboPt,
surfMappedMinDisplScl,
this->vertexCnt,
isovalue,
this->vertexDataOffsPos,
this->vertexDataStride);
#ifdef USE_TIMER
hipEventRecord(event2, 0);
hipEventSynchronize(event1);
hipEventSynchronize(event2);
hipEventElapsedTime(&dt_ms, event1, event2);
printf("CUDA time for 'InitExternalForceScl_D': %.10f sec\n",
dt_ms/1000.0f);
#endif
// Iterations for new position
if (!this->updateVtxPos(
volume_D,
vboPt,
vboVtxPathPt,
volDim,
volOrg,
volDelta,
isovalue,
(interpMode == INTERP_CUBIC),
maxIt,
surfMappedMinDisplScl,
springStiffness,
forceScl,
externalForcesWeight,
true)) {
return false;
}
if (!CudaSafeCall(hipGraphicsUnmapResources(2, cudaTokens, 0))) {
return false;
}
if (!CudaSafeCall(hipGraphicsUnregisterResource(cudaTokens[0]))) {
return false;
}
if (!CudaSafeCall(hipGraphicsUnregisterResource(cudaTokens[1]))) {
return false;
}
return true;
}
/*
* DeformableGPUSurfaceMT::MorphToVolumeGVF
*/
bool DeformableGPUSurfaceMT::MorphToVolumeGVF(float *volumeSource_D,
float *volumeTarget_D,
const unsigned int *targetCubeStates_D,
int3 volDim,
float3 volOrg,
float3 volDelta,
float isovalue,
InterpolationMode interpMode,
size_t maxIt,
float surfMappedMinDisplScl,
float springStiffness,
float forceScl,
float externalForcesWeight,
float gvfScl,
unsigned int gvfIt) {
using namespace megamol::core::utility::log;
/* Init grid parameters */
// Init constant device params
if (!initGridParams(volDim, volOrg, volDelta)) {
Log::DefaultLog.WriteError(
"%s: could not init constant device params",
this->ClassName());
return false;
}
using megamol::core::utility::log::Log;
cudaGraphicsResource* cudaTokens[2];
if ((!this->triangleIdxReady)||(!this->neighboursReady)) {
return false;
}
if (volumeTarget_D == NULL) {
return false;
}
if (!this->initExtForcesGVF(
volumeTarget_D,
targetCubeStates_D,
volDim,
volOrg,
volDelta,
isovalue,
gvfScl,
gvfIt)) {
return false;
}
// Init vbo with uncertainty information
if (!this->InitVtxPathVBO(this->vertexCnt)) {
return false;
}
// Register memory with CUDA
if (!CudaSafeCall(hipGraphicsGLRegisterBuffer(
&cudaTokens[0], this->vboVtxData,
hipGraphicsMapFlagsNone))) {
return false;
}
// Register memory with CUDA
if (!CudaSafeCall(hipGraphicsGLRegisterBuffer(
&cudaTokens[1], this->vboVtxPath,
hipGraphicsMapFlagsNone))) {
return false;
}
if (!CudaSafeCall(hipGraphicsMapResources(2, cudaTokens, 0))) {
return false;
}
// Get mapped pointers to the vertex data buffers
float *vboPt;
size_t vboSize;
if (!CudaSafeCall(hipGraphicsResourceGetMappedPointer(
reinterpret_cast<void**>(&vboPt), // The mapped pointer
&vboSize, // The size of the accessible data
cudaTokens[0]))) { // The mapped resource
return false;
}
// Get mapped pointers to the vertex data buffers
float *vboVtxPathPt;
if (!CudaSafeCall(hipGraphicsResourceGetMappedPointer(
reinterpret_cast<void**>(&vboVtxPathPt), // The mapped pointer
&vboSize, // The size of the accessible data
cudaTokens[1]))) { // The mapped resource
return false;
}
// Init forces scale factor with -1 or 1, depending on whether they start
// outside or inside the isosurface
if (!CudaSafeCall(this->vertexExternalForcesScl_D.Validate(this->vertexCnt))) {
return false;
}
if (!CudaSafeCall(this->displLen_D.Validate(this->vertexCnt))) {
return false;
}
#ifdef USE_TIMER
float dt_ms;
hipEvent_t event1, event2;
hipEventCreate(&event1);
hipEventCreate(&event2);
hipEventRecord(event1, 0);
#endif
hipLaunchKernelGGL(( DeformableGPUSurfaceMT_InitExternalForceScl_D) , dim3(Grid(this->vertexCnt, 256)), dim3(256) , 0, 0,
(float*)this->vertexExternalForcesScl_D.Peek(),
this->displLen_D.Peek(),
volumeTarget_D,
vboPt,
surfMappedMinDisplScl,
this->vertexCnt,
isovalue,
this->vertexDataOffsPos,
this->vertexDataStride);
#ifdef USE_TIMER
hipEventRecord(event2, 0);
hipEventSynchronize(event1);
hipEventSynchronize(event2);
hipEventElapsedTime(&dt_ms, event1, event2);
printf("CUDA time for 'InitExternalForceScl_D': %.10f sec\n",
dt_ms/1000.0f);
#endif
// Iterations for new position
if (!this->updateVtxPos(
volumeTarget_D,
vboPt,
vboVtxPathPt,
volDim,
volOrg,
volDelta,
isovalue,
(interpMode == INTERP_CUBIC),
maxIt,
surfMappedMinDisplScl,
springStiffness,
forceScl,
externalForcesWeight,
true)) {
return false;
}
if (!CudaSafeCall(hipGraphicsUnmapResources(2, cudaTokens, 0))) {
return false;
}
if (!CudaSafeCall(hipGraphicsUnregisterResource(cudaTokens[0]))) {
return false;
}
if (!CudaSafeCall(hipGraphicsUnregisterResource(cudaTokens[1]))) {
return false;
}
return true;
}
/*
* DeformableGPUSurfaceMT::MorphToVolumeTwoWayGVFBM
*/
bool DeformableGPUSurfaceMT::MorphToVolumeTwoWayGVFBM(
float *volumeSource_D,
float *volumeTarget_D,
const unsigned int *cellStatesSource_D,
const unsigned int *cellStatesTarget_D,
int3 volDim,
float3 volOrg,
float3 volDelta,
float isovalue,
InterpolationMode interpMode,
size_t maxIt,
float surfMappedMinDisplScl,
float springStiffness,
float forceScl,
float externalForcesWeight,
float gvfScl,
unsigned int gvfIt,
bool trackPath,
bool recomputeGVF,
float &t_gvf,
float &t_map) {
using megamol::core::utility::log::Log;
// printf("MORPH\n");
/* Init grid parameters */
// Init constant device params
if (!initGridParams(volDim, volOrg, volDelta)) {
Log::DefaultLog.WriteError(
"%s: could not init constant device params",
this->ClassName());
return false;
}
cudaGraphicsResource* cudaTokens[2];
if ((!this->triangleIdxReady)||(!this->neighboursReady)) {
return false;
}
if ((volumeTarget_D == NULL)||(volumeSource_D == NULL)) {
return false;
}
float dt_ms;
hipEvent_t event1, event2;
hipEventCreate(&event1);
hipEventCreate(&event2);
hipEventRecord(event1, 0);
if (recomputeGVF) {
if (!this->initExtForcesTwoWayGVF(
volumeSource_D,
volumeTarget_D,
cellStatesSource_D,
cellStatesTarget_D,
volDim, volOrg, volDelta,
isovalue, gvfScl, gvfIt)) {
return false;
}
}
hipEventRecord(event2, 0);
hipEventSynchronize(event1);
hipEventSynchronize(event2);
hipEventElapsedTime(&dt_ms, event1, event2);
t_gvf = dt_ms;
// printf("GVF %f ms\n", t_gvf);
if (trackPath) {
// Init vbo with uncertainty information
if (!this->InitVtxPathVBO(this->vertexCnt)) {
return false;
}
}
// Register memory with CUDA
if (!CudaSafeCall(hipGraphicsGLRegisterBuffer(
&cudaTokens[0], this->vboVtxData,
hipGraphicsMapFlagsNone))) {
return false;
}
// Register memory with CUDA
if (!CudaSafeCall(hipGraphicsGLRegisterBuffer(
&cudaTokens[1], this->vboVtxPath,
hipGraphicsMapFlagsNone))) {
return false;
}
if (!CudaSafeCall(hipGraphicsMapResources(2, cudaTokens, 0))) {
return false;
}
// Get mapped pointers to the vertex data buffers
float *vboPt;
size_t vboSize;
if (!CudaSafeCall(hipGraphicsResourceGetMappedPointer(
reinterpret_cast<void**>(&vboPt), // The mapped pointer
&vboSize, // The size of the accessible data
cudaTokens[0]))) { // The mapped resource
return false;
}
// Get mapped pointers to the vertex data buffers
float *vboVtxPathPt;
if (!CudaSafeCall(hipGraphicsResourceGetMappedPointer(
reinterpret_cast<void**>(&vboVtxPathPt), // The mapped pointer
&vboSize, // The size of the accessible data
cudaTokens[1]))) { // The mapped resource
return false;
}
// Init forces scale factor with -1 or 1, depending on whether they start
// outside or inside the isosurface
if (!CudaSafeCall(this->vertexExternalForcesScl_D.Validate(this->vertexCnt))) {
return false;
}
if (!CudaSafeCall(this->displLen_D.Validate(this->vertexCnt))) {
return false;
}
hipEventRecord(event1, 0);
hipLaunchKernelGGL(( DeformableGPUSurfaceMT_InitExternalForceScl_D) , dim3(Grid(this->vertexCnt, 256)), dim3(256) , 0, 0,
(float*)this->vertexExternalForcesScl_D.Peek(),
this->displLen_D.Peek(),
volumeTarget_D,
vboPt,
surfMappedMinDisplScl,
this->vertexCnt,
isovalue,
this->vertexDataOffsPos,
this->vertexDataStride);
#ifdef USE_TIMER
hipEventRecord(event2, 0);
hipEventSynchronize(event1);
hipEventSynchronize(event2);
hipEventElapsedTime(&dt_ms, event1, event2);
printf("CUDA time for 'InitExternalForceScl_D': %.10f sec\n",
dt_ms/1000.0f);
#endif
// Iterations for new position
if (!this->updateVtxPos(
volumeTarget_D,
vboPt,
vboVtxPathPt,
volDim,
volOrg,
volDelta,
isovalue,
(interpMode == INTERP_CUBIC),
maxIt,
surfMappedMinDisplScl,
springStiffness,
forceScl,
externalForcesWeight,
trackPath, // Track path
true)) { // Use external forces only
return false;
}
hipEventRecord(event2, 0);
hipEventSynchronize(event1);
hipEventSynchronize(event2);
hipEventElapsedTime(&dt_ms, event1, event2);
t_map = dt_ms;
if (!CudaSafeCall(hipGraphicsUnmapResources(2, cudaTokens, 0))) {
return false;
}
if (!CudaSafeCall(hipGraphicsUnregisterResource(cudaTokens[0]))) {
return false;
}
if (!CudaSafeCall(hipGraphicsUnregisterResource(cudaTokens[1]))) {
return false;
}
return true;
}
/*
* DeformableGPUSurfaceMT::MorphToVolumeTwoWayGVF
*/
bool DeformableGPUSurfaceMT::MorphToVolumeTwoWayGVF(
float *volumeSource_D,
float *volumeTarget_D,
const unsigned int *cellStatesSource_D,
const unsigned int *cellStatesTarget_D,
int3 volDim,
float3 volOrg,
float3 volDelta,
float isovalue,
InterpolationMode interpMode,
size_t maxIt,
float surfMappedMinDisplScl,
float springStiffness,
float forceScl,
float externalForcesWeight,
float gvfScl,
unsigned int gvfIt,
bool trackPath,
bool recomputeGVF) {
using megamol::core::utility::log::Log;
// printf("MORPH\n");
/* Init grid parameters */
// Init constant device params
if (!initGridParams(volDim, volOrg, volDelta)) {
Log::DefaultLog.WriteError(
"%s: could not init constant device params",
this->ClassName());
return false;
}
cudaGraphicsResource* cudaTokens[2];
if ((!this->triangleIdxReady)||(!this->neighboursReady)) {
return false;
}
if ((volumeTarget_D == NULL)||(volumeSource_D == NULL)) {
return false;
}
//#define USE_TIMER
#ifdef USE_TIMER
float dt_ms;
hipEvent_t event1, event2;
hipEventCreate(&event1);
hipEventCreate(&event2);
hipEventRecord(event1, 0);
#endif
if (recomputeGVF) {
if (!this->initExtForcesTwoWayGVF(
volumeSource_D,
volumeTarget_D,
cellStatesSource_D,
cellStatesTarget_D,
volDim, volOrg, volDelta,
isovalue, gvfScl, gvfIt)) {
return false;
}
}
#ifdef USE_TIMER
hipEventRecord(event2, 0);
hipEventSynchronize(event1);
hipEventSynchronize(event2);
hipEventElapsedTime(&dt_ms, event1, event2);
printf("CUDA time for GVF: %.10f sec\n",
dt_ms/1000.0f);
#endif
if (trackPath) {
// Init vbo with uncertainty information
if (!this->InitVtxPathVBO(this->vertexCnt)) {
return false;
}
}
// Register memory with CUDA
if (!CudaSafeCall(hipGraphicsGLRegisterBuffer(
&cudaTokens[0], this->vboVtxData,
hipGraphicsMapFlagsNone))) {
return false;
}
// Register memory with CUDA
if (!CudaSafeCall(hipGraphicsGLRegisterBuffer(
&cudaTokens[1], this->vboVtxPath,
hipGraphicsMapFlagsNone))) {
return false;
}
if (!CudaSafeCall(hipGraphicsMapResources(2, cudaTokens, 0))) {
return false;
}
// Get mapped pointers to the vertex data buffers
float *vboPt;
size_t vboSize;
if (!CudaSafeCall(hipGraphicsResourceGetMappedPointer(
reinterpret_cast<void**>(&vboPt), // The mapped pointer
&vboSize, // The size of the accessible data
cudaTokens[0]))) { // The mapped resource
return false;
}
// Get mapped pointers to the vertex data buffers
float *vboVtxPathPt;
if (!CudaSafeCall(hipGraphicsResourceGetMappedPointer(
reinterpret_cast<void**>(&vboVtxPathPt), // The mapped pointer
&vboSize, // The size of the accessible data
cudaTokens[1]))) { // The mapped resource
return false;
}
// Init forces scale factor with -1 or 1, depending on whether they start
// outside or inside the isosurface
if (!CudaSafeCall(this->vertexExternalForcesScl_D.Validate(this->vertexCnt))) {
return false;
}
if (!CudaSafeCall(this->displLen_D.Validate(this->vertexCnt))) {
return false;
}
hipLaunchKernelGGL(( DeformableGPUSurfaceMT_InitExternalForceScl_D) , dim3(Grid(this->vertexCnt, 256)), dim3(256) , 0, 0,
(float*)this->vertexExternalForcesScl_D.Peek(),
this->displLen_D.Peek(),
volumeTarget_D,
vboPt,
surfMappedMinDisplScl,
this->vertexCnt,
isovalue,
this->vertexDataOffsPos,
this->vertexDataStride);
#ifdef USE_TIMER
hipEventRecord(event2, 0);
hipEventSynchronize(event1);
hipEventSynchronize(event2);
hipEventElapsedTime(&dt_ms, event1, event2);
printf("CUDA time for 'InitExternalForceScl_D': %.10f sec\n",
dt_ms/1000.0f);
#endif
// Iterations for new position
if (!this->updateVtxPos(
volumeTarget_D,
vboPt,
vboVtxPathPt,
volDim,
volOrg,
volDelta,
isovalue,
(interpMode == INTERP_CUBIC),
maxIt,
surfMappedMinDisplScl,
springStiffness,
forceScl,
externalForcesWeight,
trackPath, // Track path
true)) { // Use external forces only
return false;
}
if (!CudaSafeCall(hipGraphicsUnmapResources(2, cudaTokens, 0))) {
return false;
}
if (!CudaSafeCall(hipGraphicsUnregisterResource(cudaTokens[0]))) {
return false;
}
if (!CudaSafeCall(hipGraphicsUnregisterResource(cudaTokens[1]))) {
return false;
}
#undef USE_TIMER
return true;
}
/*
* DeformableGPUSurfaceMT::MorphToVolumeTwoWayGVF
*/
bool DeformableGPUSurfaceMT::MorphToVolumeTwoWayGVFSubdiv(
float *volumeSource_D,
float *volumeTarget_D,
const unsigned int *cellStatesSource_D,
const unsigned int *cellStatesTarget_D,
int3 volDim,
float3 volOrg,
float3 volDelta,
float isovalue,
InterpolationMode interpMode,
size_t maxIt,
float surfMappedMinDisplScl,
float springStiffness,
float forceScl,
float externalForcesWeight,
float gvfScl,
unsigned int gvfIt,
bool trackPath,
bool recomputeGVF) {
using megamol::core::utility::log::Log;
// printf("MORPH\n");
/* Init grid parameters */
// Init constant device params
if (!initGridParams(volDim, volOrg, volDelta)) {
Log::DefaultLog.WriteError(
"%s: could not init constant device params",
this->ClassName());
return false;
}
cudaGraphicsResource* cudaTokens[2];
if ((!this->triangleIdxReady)||(!this->neighboursReady)) {
return false;
}
if ((volumeTarget_D == NULL)||(volumeSource_D == NULL)) {
return false;
}
if (recomputeGVF) {
if (!this->initExtForcesTwoWayGVF(
volumeSource_D,
volumeTarget_D,
cellStatesSource_D,
cellStatesTarget_D,
volDim, volOrg, volDelta,
isovalue, gvfScl, gvfIt)) {
return false;
}
}
if (trackPath) {
// Init vbo with uncertainty information
if (!this->InitVtxPathVBO(this->vertexCnt)) {
return false;
}
}
// Register memory with CUDA
if (!CudaSafeCall(hipGraphicsGLRegisterBuffer(
&cudaTokens[0], this->vboVtxData,
hipGraphicsMapFlagsNone))) {
return false;
}
// Register memory with CUDA
if (!CudaSafeCall(hipGraphicsGLRegisterBuffer(
&cudaTokens[1], this->vboVtxPath,
hipGraphicsMapFlagsNone))) {
return false;
}
if (!CudaSafeCall(hipGraphicsMapResources(2, cudaTokens, 0))) {
return false;
}
// Get mapped pointers to the vertex data buffers
float *vboPt;
size_t vboSize;
if (!CudaSafeCall(hipGraphicsResourceGetMappedPointer(
reinterpret_cast<void**>(&vboPt), // The mapped pointer
&vboSize, // The size of the accessible data
cudaTokens[0]))) { // The mapped resource
return false;
}
// Get mapped pointers to the vertex data buffers
float *vboVtxPathPt;
if (!CudaSafeCall(hipGraphicsResourceGetMappedPointer(
reinterpret_cast<void**>(&vboVtxPathPt), // The mapped pointer
&vboSize, // The size of the accessible data
cudaTokens[1]))) { // The mapped resource
return false;
}
// Init forces scale factor with -1 or 1, depending on whether they start
// outside or inside the isosurface
if (!CudaSafeCall(this->vertexExternalForcesScl_D.Validate(this->vertexCnt))) {
return false;
}
if (!CudaSafeCall(this->displLen_D.Validate(this->vertexCnt))) {
return false;
}
#ifdef USE_TIMER
float dt_ms;
hipEvent_t event1, event2;
hipEventCreate(&event1);
hipEventCreate(&event2);
hipEventRecord(event1, 0);
#endif
hipLaunchKernelGGL(( DeformableGPUSurfaceMT_InitExternalForceScl_D) , dim3(Grid(this->vertexCnt, 256)), dim3(256) , 0, 0,
(float*)this->vertexExternalForcesScl_D.Peek(),
this->displLen_D.Peek(),
volumeTarget_D,
vboPt,
surfMappedMinDisplScl,
this->vertexCnt,
isovalue,
this->vertexDataOffsPos,
this->vertexDataStride);
#ifdef USE_TIMER
hipEventRecord(event2, 0);
hipEventSynchronize(event1);
hipEventSynchronize(event2);
hipEventElapsedTime(&dt_ms, event1, event2);
printf("CUDA time for 'InitExternalForceScl_D': %.10f sec\n",
dt_ms/1000.0f);
#endif
// Iterations for new position
if (!this->updateVtxPosSubdiv(
volumeTarget_D,
vboPt,
vboVtxPathPt,
volDim,
volOrg,
volDelta,
isovalue,
(interpMode == INTERP_CUBIC),
maxIt,
surfMappedMinDisplScl,
springStiffness,
forceScl,
externalForcesWeight,
trackPath, // Track path
true)) { // Use external forces only
return false;
}
if (!CudaSafeCall(hipGraphicsUnmapResources(2, cudaTokens, 0))) {
return false;
}
if (!CudaSafeCall(hipGraphicsUnregisterResource(cudaTokens[0]))) {
return false;
}
if (!CudaSafeCall(hipGraphicsUnregisterResource(cudaTokens[1]))) {
return false;
}
return true;
}
/*
* DeformableGPUSurfaceMT::operator=
*/
DeformableGPUSurfaceMT& DeformableGPUSurfaceMT::operator=(const DeformableGPUSurfaceMT &rhs) {
GPUSurfaceMT::operator =(rhs);
CudaSafeCall(this->vertexExternalForcesScl_D.Validate(rhs.vertexExternalForcesScl_D.GetCount()));
CudaSafeCall(hipMemcpy(
this->vertexExternalForcesScl_D.Peek(),
rhs.vertexExternalForcesScl_D.PeekConst(),
this->vertexExternalForcesScl_D.GetCount()*sizeof(float),
hipMemcpyDeviceToDevice));
CudaSafeCall(this->displLen_D.Validate(rhs.displLen_D.GetCount()));
CudaSafeCall(hipMemcpy(
this->displLen_D.Peek(),
rhs.displLen_D.PeekConst(),
this->displLen_D.GetCount()*sizeof(float),
hipMemcpyDeviceToDevice));
CudaSafeCall(this->externalForces_D.Validate(rhs.externalForces_D.GetCount()));
CudaSafeCall(hipMemcpy(
this->externalForces_D.Peek(),
rhs.externalForces_D.PeekConst(),
this->externalForces_D.GetCount()*sizeof(float),
hipMemcpyDeviceToDevice));
CudaSafeCall(this->laplacian_D.Validate(rhs.laplacian_D.GetCount()));
CudaSafeCall(hipMemcpy(
this->laplacian_D.Peek(),
rhs.laplacian_D.PeekConst(),
this->laplacian_D.GetCount()*sizeof(float3),
hipMemcpyDeviceToDevice));
CudaSafeCall(this->laplacian2_D.Validate(rhs.laplacian2_D.GetCount()));
CudaSafeCall(hipMemcpy(
this->laplacian2_D.Peek(),
rhs.laplacian2_D.PeekConst(),
this->laplacian2_D.GetCount()*sizeof(float3),
hipMemcpyDeviceToDevice));
CudaSafeCall(this->gvfTmp_D.Validate(rhs.gvfTmp_D.GetCount()));
CudaSafeCall(hipMemcpy(
this->gvfTmp_D.Peek(),
rhs.gvfTmp_D.PeekConst(),
this->gvfTmp_D.GetCount()*sizeof(float),
hipMemcpyDeviceToDevice));
CudaSafeCall(this->gvfConstData_D.Validate(rhs.gvfConstData_D.GetCount()));
CudaSafeCall(hipMemcpy(
this->gvfConstData_D.Peek(),
rhs.gvfConstData_D.PeekConst(),
this->gvfConstData_D.GetCount()*sizeof(float),
hipMemcpyDeviceToDevice));
CudaSafeCall(this->distField_D.Validate(rhs.distField_D.GetCount()));
CudaSafeCall(hipMemcpy(
this->distField_D.Peek(),
rhs.distField_D.PeekConst(),
this->distField_D.GetCount()*sizeof(float),
hipMemcpyDeviceToDevice));
CudaSafeCall(this->accTriangleData_D.Validate(rhs.accTriangleData_D.GetCount()));
CudaSafeCall(hipMemcpy(
this->accTriangleData_D.Peek(),
rhs.accTriangleData_D.PeekConst(),
this->accTriangleData_D.GetCount()*sizeof(float),
hipMemcpyDeviceToDevice));
CudaSafeCall(this->corruptTriangles_D.Validate(rhs.corruptTriangles_D.GetCount()));
CudaSafeCall(hipMemcpy(
this->corruptTriangles_D.Peek(),
rhs.corruptTriangles_D.PeekConst(),
this->corruptTriangles_D.GetCount()*sizeof(float),
hipMemcpyDeviceToDevice));
CudaSafeCall(this->accTriangleArea_D.Validate(rhs.accTriangleArea_D.GetCount()));
CudaSafeCall(hipMemcpy(
this->accTriangleArea_D.Peek(),
rhs.accTriangleArea_D.PeekConst(),
this->accTriangleArea_D.GetCount()*sizeof(float),
hipMemcpyDeviceToDevice));
/* Make deep copy of corrupt triangle flag buffer */
if (rhs.vboCorruptTriangleVertexFlag) {
// Destroy if necessary
if (this->vboCorruptTriangleVertexFlag) {
glBindBufferARB(GL_ARRAY_BUFFER, this->vboCorruptTriangleVertexFlag);
glDeleteBuffersARB(1, &this->vboCorruptTriangleVertexFlag);
glBindBufferARB(GL_ARRAY_BUFFER, 0);
this->vboCorruptTriangleVertexFlag = 0;
}
// Create vertex buffer object for triangle indices
glGenBuffersARB(1, &this->vboCorruptTriangleVertexFlag);
CheckForGLError();
// Map as copy buffer
glBindBufferARB(GL_COPY_READ_BUFFER, rhs.vboCorruptTriangleVertexFlag);
glBindBufferARB(GL_COPY_WRITE_BUFFER, this->vboCorruptTriangleVertexFlag);
glBufferDataARB(GL_COPY_WRITE_BUFFER,
sizeof(float)*this->vertexCnt, 0, GL_DYNAMIC_DRAW);
// Copy data
glCopyBufferSubData(GL_COPY_READ_BUFFER, GL_COPY_WRITE_BUFFER, 0, 0,
sizeof(float)*this->vertexCnt);
glBindBufferARB(GL_COPY_WRITE_BUFFER, 0);
glBindBufferARB(GL_COPY_READ_BUFFER, 0);
CheckForGLError();
}
/* Make deep copy of uncertainty vbo */
if (rhs.vboVtxPath) {
// Destroy if necessary
if (this->vboVtxPath) {
glBindBufferARB(GL_ARRAY_BUFFER, this->vboVtxPath);
glDeleteBuffersARB(1, &this->vboVtxPath);
glBindBufferARB(GL_ARRAY_BUFFER, 0);
this->vboVtxPath = 0;
}
// Create vertex buffer object for triangle indices
glGenBuffersARB(1, &this->vboVtxPath);
CheckForGLError();
// Map as copy buffer
glBindBufferARB(GL_COPY_READ_BUFFER, rhs.vboVtxPath);
glBindBufferARB(GL_COPY_WRITE_BUFFER, this->vboVtxPath);
glBufferDataARB(GL_COPY_WRITE_BUFFER,
sizeof(float)*this->vertexCnt, 0, GL_DYNAMIC_DRAW);
// Copy data
glCopyBufferSubData(GL_COPY_READ_BUFFER, GL_COPY_WRITE_BUFFER, 0, 0,
sizeof(float)*this->vertexCnt);
glBindBufferARB(GL_COPY_WRITE_BUFFER, 0);
glBindBufferARB(GL_COPY_READ_BUFFER, 0);
CheckForGLError();
}
return *this;
}
/*
* DeformableGPUSurfaceMT_GetTriangleEdgeCnt_D
*/
__global__ void DeformableGPUSurfaceMT_GetTriangleEdgeCnt_D (
int *triangleEdgeOffs_D,
uint *triangleNeighbors_D,
uint triangleCnt) {
const uint triIdx = ::getThreadIdx();
if (triIdx >= triangleCnt) return;
uint cnt = 0;
uint n0 = triangleNeighbors_D[3*triIdx+0];
cnt = cnt + int(n0 > triIdx);
uint n1 = triangleNeighbors_D[3*triIdx+1];
cnt = cnt + int(n1 > triIdx);
uint n2 = triangleNeighbors_D[3*triIdx+2];
cnt = cnt + int(n2 > triIdx);
triangleEdgeOffs_D[triIdx] = cnt;
}
__device__ uint2 getAdjEdge_D (uint v0, uint v1, uint v2,
uint w0, uint w1, uint w2) {
int idx0=-1, idx1=-1;
int v[3], w[3];
v[0] = v0; v[1] = v1; v[2] = v2;
w[0] = w0; w[1] = w1; w[2] = w2;
for (int i = 0; i < 3; ++i) {
for (int j = 0; j < 3; ++j) {
if (v[i] == w[j]) {
if (idx0 < 0) {
idx0 = v[i];
} else {
if (v[i] != idx0) {
idx1 = v[i];
}
}
}
}
}
return make_uint2(idx0, idx1);
}
__device__ bool hasAdjEdge_D (uint v0, uint v1, uint v2,
uint w0, uint w1, uint w2) {
int cnt = 0;
int idx0 = -1;
int v[3], w[3];
v[0] = v0; v[1] = v1; v[2] = v2;
w[0] = w0; w[1] = w1; w[2] = w2;
for (int i = 0; i < 3; ++i) {
for (int j = 0; j < 3; ++j) {
if (v[i] == w[j]) {
if (idx0 < 0) {
idx0 = v[i];
cnt++;
} else {
if (v[i] != idx0) {
cnt++;
}
}
}
}
}
if (cnt >=2) return true;
else return false;
}
/*
* DeformableGPUSurfaceMT_BuildEdgeList_D
*/
__global__ void DeformableGPUSurfaceMT_BuildEdgeList_D (
uint *edgeList_D,
int *triangleEdgeOffs_D,
uint *triangleNeighbors_D,
uint *triangleIdx_D,
uint triangleCnt) {
const uint triIdx = ::getThreadIdx();
if (triIdx >= triangleCnt) return;
uint3 idx = make_uint3(triangleIdx_D[3*triIdx+0],
triangleIdx_D[3*triIdx+1],
triangleIdx_D[3*triIdx+2]);
uint cnt = 0;
uint n0 = triangleNeighbors_D[3*triIdx+0];
uint offs = triangleEdgeOffs_D[triIdx];
// TODO Comparing all three vertex indices necessary? Use only two?
if (n0 > triIdx) {
uint3 nIdx = make_uint3(triangleIdx_D[3*n0+0],
triangleIdx_D[3*n0+1],
triangleIdx_D[3*n0+2]);
uint2 e = getAdjEdge_D(idx.x, idx.y, idx.z, nIdx.x, nIdx.y, nIdx.z);
// printf("%u %u: %u %u %u, %u %u %u\n", e.x, e.y, idx.x, idx.y, idx.z, nIdx.x, nIdx.y, nIdx.z);
edgeList_D[2*offs+0] = e.x;
edgeList_D[2*offs+1] = e.y;
// printf("edge %u %u\n", e.x, e.y);
cnt++;
}
uint n1 = triangleNeighbors_D[3*triIdx+1];
if (n1 > triIdx) {
uint3 nIdx = make_uint3(triangleIdx_D[3*n1+0],
triangleIdx_D[3*n1+1],
triangleIdx_D[3*n1+2]);
uint2 e = getAdjEdge_D(idx.x, idx.y, idx.z, nIdx.x, nIdx.y, nIdx.z);
edgeList_D[2*(offs+cnt)+0] = e.x;
edgeList_D[2*(offs+cnt)+1] = e.y;
cnt++;
}
uint n2 = triangleNeighbors_D[3*triIdx+2];
if (n2 > triIdx) {
uint3 nIdx = make_uint3(triangleIdx_D[3*n2+0],
triangleIdx_D[3*n2+1],
triangleIdx_D[3*n2+2]);
uint2 e = getAdjEdge_D(idx.x, idx.y, idx.z, nIdx.x, nIdx.y, nIdx.z);
edgeList_D[2*(offs+cnt)+0] = e.x;
edgeList_D[2*(offs+cnt)+1] = e.y;
}
}
__device__ uint getLocalEdgeOffsInTriangle_D(
uint i0,
uint i1,
uint *triangleNeighbors_D,
uint *triangleIdx_D,
uint triIdx) {
uint cnt = 0;
uint v[3];
v[0] = triangleIdx_D[3*triIdx+0];
v[1] = triangleIdx_D[3*triIdx+1];
v[2] = triangleIdx_D[3*triIdx+2];
uint n[3];
n[0] = triangleNeighbors_D[3*triIdx+0];
n[1] = triangleNeighbors_D[3*triIdx+1];
n[2] = triangleNeighbors_D[3*triIdx+2];
for (int i = 0; i < 3; ++i) {
if (n[i] < triIdx) continue; // This edge is not associated with this triangle
if ((v[i] == i0)&&(v[(i+1)%3] == i1)||
(v[i] == i1)&&(v[(i+1)%3] == i0)) {
cnt++;
break;
} else {
cnt++;
}
}
return cnt-1;
}
/*
* DeformableGPUSurfaceMT_ComputeTriEdgeList_D
*/
__global__ void DeformableGPUSurfaceMT_ComputeTriEdgeList_D (
uint *triEdgeList_D,
int *triangleEdgeOffs_D,
uint *triangleNeighbors_D,
uint *triangleIdx_D,
uint triangleCnt) {
const uint triIdx = ::getThreadIdx();
if (triIdx >= triangleCnt) return;
uint3 idx = make_uint3(triangleIdx_D[3*triIdx+0],
triangleIdx_D[3*triIdx+1],
triangleIdx_D[3*triIdx+2]);
//uint offs = triangleEdgeOffs_D[triIdx];
// Get first edge
uint n0 = triangleNeighbors_D[3*triIdx+0];
uint nGlobalOffs;
uint nLocalOffs;
if (n0 < triIdx) { // Edge is associated with neighbor
nGlobalOffs = triangleEdgeOffs_D[n0];
nLocalOffs = getLocalEdgeOffsInTriangle_D(idx.x, idx.y,
triangleNeighbors_D, triangleIdx_D, n0);
} else { // Egde is associated with self
nGlobalOffs = triangleEdgeOffs_D[triIdx];
nLocalOffs = getLocalEdgeOffsInTriangle_D(idx.x, idx.y,
triangleNeighbors_D, triangleIdx_D, triIdx);
}
triEdgeList_D[3*triIdx+0] = nGlobalOffs + nLocalOffs;
// Get second edge
uint n1 = triangleNeighbors_D[3*triIdx+1];
if (n1 < triIdx) { // Edge is associated with neighbor
nGlobalOffs = triangleEdgeOffs_D[n1];
nLocalOffs = getLocalEdgeOffsInTriangle_D(idx.y, idx.z,
triangleNeighbors_D, triangleIdx_D, n1);
} else { // Egde is associated with self
nGlobalOffs = triangleEdgeOffs_D[triIdx];
nLocalOffs = getLocalEdgeOffsInTriangle_D(idx.y, idx.z,
triangleNeighbors_D, triangleIdx_D, triIdx);
}
triEdgeList_D[3*triIdx+1] = nGlobalOffs + nLocalOffs;
// Get third edge
uint n2 = triangleNeighbors_D[3*triIdx+2];
if (n2 < triIdx) { // Edge is associated with neighbor
nGlobalOffs = triangleEdgeOffs_D[n2];
nLocalOffs = getLocalEdgeOffsInTriangle_D(idx.z, idx.x,
triangleNeighbors_D, triangleIdx_D, n2);
} else { // Egde is associated with self
nGlobalOffs = triangleEdgeOffs_D[triIdx];
nLocalOffs = getLocalEdgeOffsInTriangle_D(idx.z, idx.x,
triangleNeighbors_D, triangleIdx_D, triIdx);
}
triEdgeList_D[3*triIdx+2] = nGlobalOffs + nLocalOffs;
}
__global__ void FlagLongEdges_D(
uint *edgeFlag_D,
uint *edges_D,
float *vertexData_D,
float maxLenSqrt,
uint edgeCnt) {
const uint idx = ::getThreadIdx();
if (idx >= edgeCnt) return;
float3 pos0 = make_float3(vertexData_D[9*edges_D[2*idx+0]+0],
vertexData_D[9*edges_D[2*idx+0]+1],
vertexData_D[9*edges_D[2*idx+0]+2]);
float3 pos1 = make_float3(vertexData_D[9*edges_D[2*idx+1]+0],
vertexData_D[9*edges_D[2*idx+1]+1],
vertexData_D[9*edges_D[2*idx+1]+2]);
float lenSqrt = (pos0.x - pos1.x)*(pos0.x - pos1.x) +
(pos0.y - pos1.y)*(pos0.y - pos1.y) +
(pos0.z - pos1.z)*(pos0.z - pos1.z);
edgeFlag_D[idx] = uint(lenSqrt > maxLenSqrt);
}
__global__ void ComputeNewVertices(
float *newVertices_D,
float *vertexFlag_D,
uint *subDivEdgeIdxOffs_D,
uint *edgeFlag_D,
uint *edges_D,
float *vertexData_D,
uint oldVertexCnt,
uint edgeCnt) {
const uint idx = ::getThreadIdx();
if (idx >= edgeCnt) return;
if (edgeFlag_D[idx] == 0) return;
float3 pos0 = make_float3(vertexData_D[9*edges_D[2*idx+0]+0],
vertexData_D[9*edges_D[2*idx+0]+1],
vertexData_D[9*edges_D[2*idx+0]+2]);
float3 pos1 = make_float3(vertexData_D[9*edges_D[2*idx+1]+0],
vertexData_D[9*edges_D[2*idx+1]+1],
vertexData_D[9*edges_D[2*idx+1]+2]);
float3 posNew = (pos1+pos0)*0.5;
uint edgeIdxOffs = subDivEdgeIdxOffs_D[idx];
newVertices_D[3*edgeIdxOffs+0] = posNew.x;
newVertices_D[3*edgeIdxOffs+1] = posNew.y;
newVertices_D[3*edgeIdxOffs+2] = posNew.z;
vertexFlag_D[oldVertexCnt+edgeIdxOffs] = 1.0; // mark this vertex as new
// printf("Vertex %f %f %f\n", posNew.x, posNew.y, posNew.z);
}
__global__ void ComputeSubdivCnt_D(
uint *subdivCnt_D,
uint *triangleEdgeList_D,
uint *edgeFlag_D,
uint *edges_D,
uint *oldTrianglesIdxOffset,
uint triangleCnt) {
const uint idx = ::getThreadIdx();
if (idx >= triangleCnt) return;
uint edgeIdx0 = triangleEdgeList_D[3*idx+0];
uint edgeIdx1 = triangleEdgeList_D[3*idx+1];
uint edgeIdx2 = triangleEdgeList_D[3*idx+2];
bool flag0 = bool(edgeFlag_D[edgeIdx0]);
bool flag1 = bool(edgeFlag_D[edgeIdx1]);
bool flag2 = bool(edgeFlag_D[edgeIdx2]);
if (flag0 && flag1 && flag2) {
subdivCnt_D[idx] = 4;
oldTrianglesIdxOffset[idx] = 0;
} else if ((flag0 && flag1)||(flag1 && flag2)||(flag2 && flag0)) {
subdivCnt_D[idx] = 3;
oldTrianglesIdxOffset[idx] = 0;
} else if (flag0 || flag1 || flag2) {
subdivCnt_D[idx] = 2;
oldTrianglesIdxOffset[idx] = 0;
} else {
subdivCnt_D[idx] = 0;
oldTrianglesIdxOffset[idx] = 1;
}
}
// TODO Orientation of new triangles should match neighbor triangles
__global__ void ComputeSubdiv_D(
uint *newTriangles,
uint *newTriangleIdxOffsets,
uint *triangleEdgeList_D,
uint *triangleIdx_D,
uint *edgeFlag_D,
uint *edges_D,
uint *subDivEdgeIdxOffs_D,
uint *oldSubDivLevels_D,
uint *subDivLevels_D,
uint *oldTrianglesIdxOffsets_D,
uint vertexCntOld,
uint keptTrianglesCnt,
uint triangleCnt) {
const uint idx = ::getThreadIdx();
if (idx >= triangleCnt) return;
uint edgeIdx0 = triangleEdgeList_D[3*idx+0];
uint edgeIdx1 = triangleEdgeList_D[3*idx+1];
uint edgeIdx2 = triangleEdgeList_D[3*idx+2];
bool flag0 = bool(edgeFlag_D[edgeIdx0]);
bool flag1 = bool(edgeFlag_D[edgeIdx1]);
bool flag2 = bool(edgeFlag_D[edgeIdx2]);
uint v0 = triangleIdx_D[3*idx+0];
uint v1 = triangleIdx_D[3*idx+1];
uint v2 = triangleIdx_D[3*idx+2];
uint e0 = triangleEdgeList_D[3*idx+0];
uint e1 = triangleEdgeList_D[3*idx+1];
uint e2 = triangleEdgeList_D[3*idx+2];
uint triIdxOffs = newTriangleIdxOffsets[idx];
if (flag0 && flag1 && flag2) { // Spawn 4 new triangles
uint vNew0 = vertexCntOld + subDivEdgeIdxOffs_D[e0];
uint vNew1 = vertexCntOld + subDivEdgeIdxOffs_D[e1];
uint vNew2 = vertexCntOld + subDivEdgeIdxOffs_D[e2];
// #0
newTriangles[3*triIdxOffs+0] = v0;
newTriangles[3*triIdxOffs+1] = vNew0;
newTriangles[3*triIdxOffs+2] = vNew2;
// #1
newTriangles[3*triIdxOffs+3] = v1;
newTriangles[3*triIdxOffs+4] = vNew1;
newTriangles[3*triIdxOffs+5] = vNew0;
// #2
newTriangles[3*triIdxOffs+6] = v2;
newTriangles[3*triIdxOffs+7] = vNew2;
newTriangles[3*triIdxOffs+8] = vNew1;
// #3
newTriangles[3*triIdxOffs+9] = vNew0;
newTriangles[3*triIdxOffs+10] = vNew1;
newTriangles[3*triIdxOffs+11] = vNew2;
// Write subdiv levels
uint parentSubdiv = oldSubDivLevels_D[idx];
subDivLevels_D[keptTrianglesCnt+triIdxOffs+0] = parentSubdiv + 1;
subDivLevels_D[keptTrianglesCnt+triIdxOffs+1] = parentSubdiv + 1;
subDivLevels_D[keptTrianglesCnt+triIdxOffs+2] = parentSubdiv + 1;
subDivLevels_D[keptTrianglesCnt+triIdxOffs+3] = parentSubdiv + 1;
} else if (flag0 && flag1) { // Spawn 3 new triangles
uint vNew0 = vertexCntOld + subDivEdgeIdxOffs_D[e0];
uint vNew1 = vertexCntOld + subDivEdgeIdxOffs_D[e1];
// #0
newTriangles[3*triIdxOffs+0] = v1;
newTriangles[3*triIdxOffs+1] = vNew1;
newTriangles[3*triIdxOffs+2] = vNew0;
// #1
newTriangles[3*triIdxOffs+3] = v0;
newTriangles[3*triIdxOffs+4] = vNew0;
newTriangles[3*triIdxOffs+5] = vNew1;
// #2
newTriangles[3*triIdxOffs+6] = v2;
newTriangles[3*triIdxOffs+7] = v0;
newTriangles[3*triIdxOffs+8] = vNew1;
// Write subdiv levels
uint parentSubdiv = oldSubDivLevels_D[idx];
subDivLevels_D[keptTrianglesCnt+triIdxOffs+0] = parentSubdiv + 1;
subDivLevels_D[keptTrianglesCnt+triIdxOffs+1] = parentSubdiv + 1;
subDivLevels_D[keptTrianglesCnt+triIdxOffs+2] = parentSubdiv + 1;
} else if (flag1 && flag2) { // Spawn 3 new triangles
uint vNew1 = vertexCntOld + subDivEdgeIdxOffs_D[e1];
uint vNew2 = vertexCntOld + subDivEdgeIdxOffs_D[e2];
// #0
newTriangles[3*triIdxOffs+0] = v2;
newTriangles[3*triIdxOffs+1] = vNew2;
newTriangles[3*triIdxOffs+2] = vNew1;
// #1
newTriangles[3*triIdxOffs+3] = v0;
newTriangles[3*triIdxOffs+4] = vNew1;
newTriangles[3*triIdxOffs+5] = vNew2;
// #2
newTriangles[3*triIdxOffs+6] = v0;
newTriangles[3*triIdxOffs+7] = v1;
newTriangles[3*triIdxOffs+8] = vNew1;
// Write subdiv levels
uint parentSubdiv = oldSubDivLevels_D[idx];
subDivLevels_D[keptTrianglesCnt+triIdxOffs+0] = parentSubdiv + 1;
subDivLevels_D[keptTrianglesCnt+triIdxOffs+1] = parentSubdiv + 1;
subDivLevels_D[keptTrianglesCnt+triIdxOffs+2] = parentSubdiv + 1;
} else if (flag2 && flag0) { // Spawn 3 new triangles
uint vNew2 = vertexCntOld + subDivEdgeIdxOffs_D[e2];
uint vNew0 = vertexCntOld + subDivEdgeIdxOffs_D[e0];
// #0
newTriangles[3*triIdxOffs+0] = v0;
newTriangles[3*triIdxOffs+1] = vNew0;
newTriangles[3*triIdxOffs+2] = vNew2;
// #1
newTriangles[3*triIdxOffs+3] = v2;
newTriangles[3*triIdxOffs+4] = vNew2;
newTriangles[3*triIdxOffs+5] = vNew0;
// #2
newTriangles[3*triIdxOffs+6] = v1;
newTriangles[3*triIdxOffs+7] = v2;
newTriangles[3*triIdxOffs+8] = vNew0;
// Write subdiv levels
uint parentSubdiv = oldSubDivLevels_D[idx];
subDivLevels_D[keptTrianglesCnt+triIdxOffs+0] = parentSubdiv + 1;
subDivLevels_D[keptTrianglesCnt+triIdxOffs+1] = parentSubdiv + 1;
subDivLevels_D[keptTrianglesCnt+triIdxOffs+2] = parentSubdiv + 1;
} else if (flag0) { // Spawn 2 new triangles
uint vNew0 = vertexCntOld + subDivEdgeIdxOffs_D[e0];
// #0
newTriangles[3*triIdxOffs+0] = v0;
newTriangles[3*triIdxOffs+1] = vNew0;
newTriangles[3*triIdxOffs+2] = v2;
// #1
newTriangles[3*triIdxOffs+3] = v1;
newTriangles[3*triIdxOffs+4] = v2;
newTriangles[3*triIdxOffs+5] = vNew0;
// Write subdiv levels
uint parentSubdiv = oldSubDivLevels_D[idx];
subDivLevels_D[keptTrianglesCnt+triIdxOffs+0] = parentSubdiv + 1;
subDivLevels_D[keptTrianglesCnt+triIdxOffs+1] = parentSubdiv + 1;
} else if (flag1) { // Spawn 2 new triangles
uint vNew1 = vertexCntOld + subDivEdgeIdxOffs_D[e1];
// #0
newTriangles[3*triIdxOffs+0] = v0;
newTriangles[3*triIdxOffs+1] = v1;
newTriangles[3*triIdxOffs+2] = vNew1;
// #1
newTriangles[3*triIdxOffs+3] = v0;
newTriangles[3*triIdxOffs+4] = vNew1;
newTriangles[3*triIdxOffs+5] = v2;
// Write subdiv levels
uint parentSubdiv = oldSubDivLevels_D[idx];
subDivLevels_D[keptTrianglesCnt+triIdxOffs+0] = parentSubdiv + 1;
subDivLevels_D[keptTrianglesCnt+triIdxOffs+1] = parentSubdiv + 1;
} else if (flag2) { // Spawn 2 new triangles
uint vNew2 = vertexCntOld + subDivEdgeIdxOffs_D[e2];
// #0
newTriangles[3*triIdxOffs+0] = v0;
newTriangles[3*triIdxOffs+1] = v1;
newTriangles[3*triIdxOffs+2] = vNew2;
// #1
newTriangles[3*triIdxOffs+3] = v1;
newTriangles[3*triIdxOffs+4] = v2;
newTriangles[3*triIdxOffs+5] = vNew2;
// Write subdiv levels
uint parentSubdiv = oldSubDivLevels_D[idx];
subDivLevels_D[keptTrianglesCnt+triIdxOffs+0] = parentSubdiv + 1;
subDivLevels_D[keptTrianglesCnt+triIdxOffs+1] = parentSubdiv + 1;
} else {
// Write back subdiv level
subDivLevels_D[oldTrianglesIdxOffsets_D[idx]] = oldSubDivLevels_D[idx];
}
}
// TODO: !!! This method assumed a certain ordering in the three neighbors of
// !!! a triangle. Is this actually true?
__global__ void ComputeSubdivTriNeighbors_D (
uint *newTriangleNeighbors_D,
uint *oldTriangleNeighbors_D,
uint *newTriangleIdxOffsets,
uint *triangleEdgeList_D,
uint *triangleIdx_D,
uint *edgeFlag_D,
uint *edges_D,
uint *subDivEdgeIdxOffs_D,
uint *subdivCnt_D,
uint *oldTriangleIdxOffset,
uint *newTriangles_D,
uint vertexCntOld,
uint numberOfKeptTriangles,
uint oldTriangleCnt) {
const uint idx = ::getThreadIdx();
if (idx >= oldTriangleCnt) return;
uint edgeIdx0 = triangleEdgeList_D[3*idx+0];
uint edgeIdx1 = triangleEdgeList_D[3*idx+1];
uint edgeIdx2 = triangleEdgeList_D[3*idx+2];
bool flag0 = bool(edgeFlag_D[edgeIdx0]);
bool flag1 = bool(edgeFlag_D[edgeIdx1]);
bool flag2 = bool(edgeFlag_D[edgeIdx2]);
uint v0 = triangleIdx_D[3*idx+0];
uint v1 = triangleIdx_D[3*idx+1];
uint v2 = triangleIdx_D[3*idx+2];
uint e0 = triangleEdgeList_D[3*idx+0];
uint e1 = triangleEdgeList_D[3*idx+1];
uint e2 = triangleEdgeList_D[3*idx+2];
uint triIdxOffs = newTriangleIdxOffsets[idx];
if (!(flag0 || flag1 || flag2)) { // No subdivision
uint newIdx = oldTriangleIdxOffset[idx];
uint oldN0 = oldTriangleNeighbors_D[3*idx+0];
uint oldN1 = oldTriangleNeighbors_D[3*idx+1];
uint oldN2 = oldTriangleNeighbors_D[3*idx+2];
uint subDivCntN0 = subdivCnt_D[oldN0];
if (subDivCntN0 > 0) {
for (int i = 0; i < subDivCntN0; ++i) {
uint u0, u1, u2;
u0 = newTriangles_D[3*(newTriangleIdxOffsets[oldN0]+i)+0];
u1 = newTriangles_D[3*(newTriangleIdxOffsets[oldN0]+i)+1];
u2 = newTriangles_D[3*(newTriangleIdxOffsets[oldN0]+i)+2];
if (hasAdjEdge_D (v0, v1, v2, u0, u1, u2)) {
newTriangleNeighbors_D[3*newIdx+0] =
numberOfKeptTriangles + newTriangleIdxOffsets[oldN0]+i;
}
}
} else {
newTriangleNeighbors_D[3*newIdx+0] = oldTriangleIdxOffset[oldN0];
}
uint subDivCntN1 = subdivCnt_D[oldN1];
if (subDivCntN1 > 0) {
for (int i = 0; i < subDivCntN1; ++i) {
uint u0, u1, u2;
u0 = newTriangles_D[3*(newTriangleIdxOffsets[oldN1]+i)+0];
u1 = newTriangles_D[3*(newTriangleIdxOffsets[oldN1]+i)+1];
u2 = newTriangles_D[3*(newTriangleIdxOffsets[oldN1]+i)+2];
if (hasAdjEdge_D (v0, v1, v2, u0, u1, u2)) {
newTriangleNeighbors_D[3*newIdx+1]=
numberOfKeptTriangles + newTriangleIdxOffsets[oldN1]+i;
}
}
} else {
newTriangleNeighbors_D[3*newIdx+1] = oldTriangleIdxOffset[oldN1];
}
uint subDivCntN2 = subdivCnt_D[oldN2];
if (subDivCntN2 > 0) {
for (int i = 0; i < subDivCntN2; ++i) {
uint u0, u1, u2;
u0 = newTriangles_D[3*(newTriangleIdxOffsets[oldN2]+i)+0];
u1 = newTriangles_D[3*(newTriangleIdxOffsets[oldN2]+i)+1];
u2 = newTriangles_D[3*(newTriangleIdxOffsets[oldN2]+i)+2];
if (hasAdjEdge_D (v0, v1, v2, u0, u1, u2)) {
newTriangleNeighbors_D[3*newIdx+2]=
numberOfKeptTriangles + newTriangleIdxOffsets[oldN2]+i;
}
}
} else {
newTriangleNeighbors_D[3*newIdx+2] = oldTriangleIdxOffset[oldN2];
}
} else if (flag0 && !flag1 && !flag2) { // 2 new triangles have been spawned
uint vNew0 = vertexCntOld + subDivEdgeIdxOffs_D[e0];
// Get index of neighbors of old triangle
uint oldN0 = oldTriangleNeighbors_D[3*idx+0];
uint oldN1 = oldTriangleNeighbors_D[3*idx+1];
uint oldN2 = oldTriangleNeighbors_D[3*idx+2];
/* Get neighbors of triangle #0 */
// Get respective vertex indices of this triangle
uint w0 = v0;
uint w1 = vNew0;
uint w2 = v2;
// This neighbor has to be determined by comparing vertex indices
uint subDivCntN0 = subdivCnt_D[oldN0];
if (subDivCntN0 > 0) {
for (int i = 0; i < subDivCntN0; ++i) {
uint u0, u1, u2;
u0 = newTriangles_D[3*(newTriangleIdxOffsets[oldN0]+i)+0];
u1 = newTriangles_D[3*(newTriangleIdxOffsets[oldN0]+i)+1];
u2 = newTriangles_D[3*(newTriangleIdxOffsets[oldN0]+i)+2];
if (hasAdjEdge_D (w0, w1, w2, u0, u1, u2)) {
newTriangleNeighbors_D[3*(numberOfKeptTriangles+triIdxOffs)+0]=
numberOfKeptTriangles + newTriangleIdxOffsets[oldN0]+i;
}
}
} else {
newTriangleNeighbors_D[3*(numberOfKeptTriangles+triIdxOffs)+0]=
oldTriangleIdxOffset[oldN0];
}
// This neighbor is the other subdivision
newTriangleNeighbors_D[3*(numberOfKeptTriangles+triIdxOffs)+1] =
numberOfKeptTriangles+triIdxOffs+1;
// This neighbor has to be determined by comparing vertex indices
uint subDivCntN2 = subdivCnt_D[oldN2];
if (subDivCntN2 > 0) {
for (int i = 0; i < subDivCntN2; ++i) {
uint u0, u1, u2;
u0 = newTriangles_D[3*(newTriangleIdxOffsets[oldN2]+i)+0];
u1 = newTriangles_D[3*(newTriangleIdxOffsets[oldN2]+i)+1];
u2 = newTriangles_D[3*(newTriangleIdxOffsets[oldN2]+i)+2];
if (hasAdjEdge_D (w0, w1, w2, u0, u1, u2)) {
newTriangleNeighbors_D[3*(numberOfKeptTriangles+triIdxOffs)+2]=
numberOfKeptTriangles + newTriangleIdxOffsets[oldN2]+i;
}
}
} else {
newTriangleNeighbors_D[3*(numberOfKeptTriangles+triIdxOffs)+2]=
oldTriangleIdxOffset[oldN2];
}
/* Get neighbors of triangle #1 */
// Get respective vertex indices of this triangle
w0 = v1;
w1 = v2;
w2 = vNew0;
// This neighbor has to be determined by comparing vertex indices
uint subDivCntN1 = subdivCnt_D[oldN1];
if (subDivCntN1 > 0) {
for (int i = 0; i < subDivCntN1; ++i) {
uint u0, u1, u2;
u0 = newTriangles_D[3*(newTriangleIdxOffsets[oldN1]+i)+0];
u1 = newTriangles_D[3*(newTriangleIdxOffsets[oldN1]+i)+1];
u2 = newTriangles_D[3*(newTriangleIdxOffsets[oldN1]+i)+2];
if (hasAdjEdge_D (w0, w1, w2, u0, u1, u2)) {
newTriangleNeighbors_D[3*(numberOfKeptTriangles+triIdxOffs)+3]=
numberOfKeptTriangles + newTriangleIdxOffsets[oldN1]+i;
}
}
} else {
newTriangleNeighbors_D[3*(numberOfKeptTriangles+triIdxOffs)+3]=
oldTriangleIdxOffset[oldN1];
}
// This neighbor is the other subdivision
newTriangleNeighbors_D[3*(numberOfKeptTriangles+triIdxOffs)+4] =
numberOfKeptTriangles+triIdxOffs;
// This neighbor has to be determined by comparing vertex indices
subDivCntN0 = subdivCnt_D[oldN0];
if (subDivCntN0 > 0) {
for (int i = 0; i < subDivCntN0; ++i) {
uint u0, u1, u2;
u0 = newTriangles_D[3*(newTriangleIdxOffsets[oldN0]+i)+0];
u1 = newTriangles_D[3*(newTriangleIdxOffsets[oldN0]+i)+1];
u2 = newTriangles_D[3*(newTriangleIdxOffsets[oldN0]+i)+2];
if (hasAdjEdge_D (w0, w1, w2, u0, u1, u2)) {
newTriangleNeighbors_D[3*(numberOfKeptTriangles+triIdxOffs)+5]=
numberOfKeptTriangles + newTriangleIdxOffsets[oldN0]+i;
}
}
} else {
newTriangleNeighbors_D[3*(numberOfKeptTriangles+triIdxOffs)+5]=
oldTriangleIdxOffset[oldN0];
}
} else if (!flag0 && flag1 && !flag2) { // 2 new triangles have been spawned
uint vNew1 = vertexCntOld + subDivEdgeIdxOffs_D[e1];
// Get index of neighbors of old triangle
uint oldN0 = oldTriangleNeighbors_D[3*idx+0];
uint oldN1 = oldTriangleNeighbors_D[3*idx+1];
uint oldN2 = oldTriangleNeighbors_D[3*idx+2];
// #0
uint w0 = v0;
uint w1 = v1;
uint w2 = vNew1;
// This neighbor has to be determined by comparing vertex indices
uint subDivCntN0 = subdivCnt_D[oldN0];
if (subDivCntN0 > 0) {
for (int i = 0; i < subDivCntN0; ++i) {
uint u0, u1, u2;
u0 = newTriangles_D[3*(newTriangleIdxOffsets[oldN0]+i)+0];
u1 = newTriangles_D[3*(newTriangleIdxOffsets[oldN0]+i)+1];
u2 = newTriangles_D[3*(newTriangleIdxOffsets[oldN0]+i)+2];
if (hasAdjEdge_D (w0, w1, w2, u0, u1, u2)) {
newTriangleNeighbors_D[3*(numberOfKeptTriangles+triIdxOffs)+0]=
numberOfKeptTriangles + newTriangleIdxOffsets[oldN0]+i;
}
}
} else {
newTriangleNeighbors_D[3*(numberOfKeptTriangles+triIdxOffs)+0]=
oldTriangleIdxOffset[oldN0];
}
// This neighbor has to be determined by comparing vertex indices
uint subDivCntN1 = subdivCnt_D[oldN1];
if (subDivCntN1 > 0) {
for (int i = 0; i < subDivCntN1; ++i) {
uint u0, u1, u2;
u0 = newTriangles_D[3*(newTriangleIdxOffsets[oldN1]+i)+0];
u1 = newTriangles_D[3*(newTriangleIdxOffsets[oldN1]+i)+1];
u2 = newTriangles_D[3*(newTriangleIdxOffsets[oldN1]+i)+2];
if (hasAdjEdge_D (w0, w1, w2, u0, u1, u2)) {
newTriangleNeighbors_D[3*(numberOfKeptTriangles+triIdxOffs)+1]=
numberOfKeptTriangles + newTriangleIdxOffsets[oldN1]+i;
}
}
} else {
newTriangleNeighbors_D[3*(numberOfKeptTriangles+triIdxOffs)+1]=
oldTriangleIdxOffset[oldN1];
}
// This neighbor is the other subdivision
newTriangleNeighbors_D[3*(numberOfKeptTriangles+triIdxOffs)+2] =
numberOfKeptTriangles+triIdxOffs+1;
// #1
w0 = v0;
w1 = vNew1;
w2 = v2;
// This neighbor is the other subdivision
newTriangleNeighbors_D[3*(numberOfKeptTriangles+triIdxOffs)+3] =
numberOfKeptTriangles+triIdxOffs;
// This neighbor has to be determined by comparing vertex indices
subDivCntN1 = subdivCnt_D[oldN1];
if (subDivCntN1 > 0) {
for (int i = 0; i < subDivCntN1; ++i) {
uint u0, u1, u2;
u0 = newTriangles_D[3*(newTriangleIdxOffsets[oldN1]+i)+0];
u1 = newTriangles_D[3*(newTriangleIdxOffsets[oldN1]+i)+1];
u2 = newTriangles_D[3*(newTriangleIdxOffsets[oldN1]+i)+2];
if (hasAdjEdge_D (w0, w1, w2, u0, u1, u2)) {
newTriangleNeighbors_D[3*(numberOfKeptTriangles+triIdxOffs)+4]=
numberOfKeptTriangles + newTriangleIdxOffsets[oldN1]+i;
}
}
} else {
newTriangleNeighbors_D[3*(numberOfKeptTriangles+triIdxOffs)+4]=
oldTriangleIdxOffset[oldN1];
}
// This neighbor has to be determined by comparing vertex indices
uint subDivCntN2 = subdivCnt_D[oldN2];
if (subDivCntN2 > 0) {
for (int i = 0; i < subDivCntN2; ++i) {
uint u0, u1, u2;
u0 = newTriangles_D[3*(newTriangleIdxOffsets[oldN2]+i)+0];
u1 = newTriangles_D[3*(newTriangleIdxOffsets[oldN2]+i)+1];
u2 = newTriangles_D[3*(newTriangleIdxOffsets[oldN2]+i)+2];
if (hasAdjEdge_D (w0, w1, w2, u0, u1, u2)) {
newTriangleNeighbors_D[3*(numberOfKeptTriangles+triIdxOffs)+5]=
numberOfKeptTriangles + newTriangleIdxOffsets[oldN2]+i;
}
}
} else {
newTriangleNeighbors_D[3*(numberOfKeptTriangles+triIdxOffs)+5]=
oldTriangleIdxOffset[oldN2];
}
} else if (!flag0 && !flag1 && flag2) { // 2 new triangles have been spawned
uint vNew2 = vertexCntOld + subDivEdgeIdxOffs_D[e2];
// Get index of neighbors of old triangle
uint oldN0 = oldTriangleNeighbors_D[3*idx+0];
uint oldN1 = oldTriangleNeighbors_D[3*idx+1];
uint oldN2 = oldTriangleNeighbors_D[3*idx+2];
// #0
uint w0 = v0;
uint w1 = v1;
uint w2 = vNew2;
// This neighbor has to be determined by comparing vertex indices
uint subDivCntN0 = subdivCnt_D[oldN0];
if (subDivCntN0 > 0) {
for (int i = 0; i < subDivCntN0; ++i) {
uint u0, u1, u2;
u0 = newTriangles_D[3*(newTriangleIdxOffsets[oldN0]+i)+0];
u1 = newTriangles_D[3*(newTriangleIdxOffsets[oldN0]+i)+1];
u2 = newTriangles_D[3*(newTriangleIdxOffsets[oldN0]+i)+2];
if (hasAdjEdge_D (w0, w1, w2, u0, u1, u2)) {
newTriangleNeighbors_D[3*(numberOfKeptTriangles+triIdxOffs)+0]=
numberOfKeptTriangles + newTriangleIdxOffsets[oldN0]+i;
}
}
} else {
newTriangleNeighbors_D[3*(numberOfKeptTriangles+triIdxOffs)+0]=
oldTriangleIdxOffset[oldN0];
}
// This neighbor is the other subdivision
newTriangleNeighbors_D[3*(numberOfKeptTriangles+triIdxOffs)+1] =
numberOfKeptTriangles+triIdxOffs + 1;
// This neighbor has to be determined by comparing vertex indices
uint subDivCntN2 = subdivCnt_D[oldN2];
if (subDivCntN2 > 0) {
for (int i = 0; i < subDivCntN2; ++i) {
uint u0, u1, u2;
u0 = newTriangles_D[3*(newTriangleIdxOffsets[oldN2]+i)+0];
u1 = newTriangles_D[3*(newTriangleIdxOffsets[oldN2]+i)+1];
u2 = newTriangles_D[3*(newTriangleIdxOffsets[oldN2]+i)+2];
if (hasAdjEdge_D (w0, w1, w2, u0, u1, u2)) {
newTriangleNeighbors_D[3*(numberOfKeptTriangles+triIdxOffs)+2]=
numberOfKeptTriangles + newTriangleIdxOffsets[oldN2]+i;
}
}
} else {
newTriangleNeighbors_D[3*(numberOfKeptTriangles+triIdxOffs)+2]=
oldTriangleIdxOffset[oldN2];
}
// #1
w0 = v1;
w1 = v2;
w2 = vNew2;
// This neighbor has to be determined by comparing vertex indices
uint subDivCntN1 = subdivCnt_D[oldN1];
if (subDivCntN1 > 0) {
for (int i = 0; i < subDivCntN1; ++i) {
uint u0, u1, u2;
u0 = newTriangles_D[3*(newTriangleIdxOffsets[oldN1]+i)+0];
u1 = newTriangles_D[3*(newTriangleIdxOffsets[oldN1]+i)+1];
u2 = newTriangles_D[3*(newTriangleIdxOffsets[oldN1]+i)+2];
if (hasAdjEdge_D (w0, w1, w2, u0, u1, u2)) {
newTriangleNeighbors_D[3*(numberOfKeptTriangles+triIdxOffs)+3]=
numberOfKeptTriangles + newTriangleIdxOffsets[oldN1]+i;
}
}
} else {
newTriangleNeighbors_D[3*(numberOfKeptTriangles+triIdxOffs)+3]=
oldTriangleIdxOffset[oldN1];
}
// This neighbor has to be determined by comparing vertex indices
subDivCntN2 = subdivCnt_D[oldN2];
if (subDivCntN2 > 0) {
for (int i = 0; i < subDivCntN2; ++i) {
uint u0, u1, u2;
u0 = newTriangles_D[3*(newTriangleIdxOffsets[oldN2]+i)+0];
u1 = newTriangles_D[3*(newTriangleIdxOffsets[oldN2]+i)+1];
u2 = newTriangles_D[3*(newTriangleIdxOffsets[oldN2]+i)+2];
if (hasAdjEdge_D (w0, w1, w2, u0, u1, u2)) {
newTriangleNeighbors_D[3*(numberOfKeptTriangles+triIdxOffs)+4]=
numberOfKeptTriangles + newTriangleIdxOffsets[oldN2]+i;
}
}
} else {
newTriangleNeighbors_D[3*(numberOfKeptTriangles+triIdxOffs)+4]=
oldTriangleIdxOffset[oldN2];
}
// This neighbor is the other subdivision
newTriangleNeighbors_D[3*(numberOfKeptTriangles+triIdxOffs)+5] =
numberOfKeptTriangles+triIdxOffs;
} else if (flag0 && flag1 && !flag2) { // 3 new triangles have been spawned
uint vNew0 = vertexCntOld + subDivEdgeIdxOffs_D[e0];
uint vNew1 = vertexCntOld + subDivEdgeIdxOffs_D[e1];
// Get index of neighbors of old triangle
uint oldN0 = oldTriangleNeighbors_D[3*idx+0];
uint oldN1 = oldTriangleNeighbors_D[3*idx+1];
uint oldN2 = oldTriangleNeighbors_D[3*idx+2];
// #0
uint w0 = v1;
uint w1 = vNew1;
uint w2 = vNew0;
// This neighbor has to be determined by comparing vertex indices
uint subDivCntN1 = subdivCnt_D[oldN1];
if (subDivCntN1 > 0) {
for (int i = 0; i < subDivCntN1; ++i) {
uint u0, u1, u2;
u0 = newTriangles_D[3*(newTriangleIdxOffsets[oldN1]+i)+0];
u1 = newTriangles_D[3*(newTriangleIdxOffsets[oldN1]+i)+1];
u2 = newTriangles_D[3*(newTriangleIdxOffsets[oldN1]+i)+2];
if (hasAdjEdge_D (w0, w1, w2, u0, u1, u2)) {
newTriangleNeighbors_D[3*(numberOfKeptTriangles+triIdxOffs)+0]=
numberOfKeptTriangles + newTriangleIdxOffsets[oldN1]+i;
}
}
} else {
newTriangleNeighbors_D[3*(numberOfKeptTriangles+triIdxOffs)+0] =
oldTriangleIdxOffset[oldN1];
}
// This neighbor is the other subdivision
newTriangleNeighbors_D[3*(numberOfKeptTriangles+triIdxOffs)+1] =
numberOfKeptTriangles+triIdxOffs + 1;
// This neighbor has to be determined by comparing vertex indices
uint subDivCntN0 = subdivCnt_D[oldN0];
if (subDivCntN0 > 0) {
for (int i = 0; i < subDivCntN0; ++i) {
uint u0, u1, u2;
u0 = newTriangles_D[3*(newTriangleIdxOffsets[oldN0]+i)+0];
u1 = newTriangles_D[3*(newTriangleIdxOffsets[oldN0]+i)+1];
u2 = newTriangles_D[3*(newTriangleIdxOffsets[oldN0]+i)+2];
if (hasAdjEdge_D (w0, w1, w2, u0, u1, u2)) {
newTriangleNeighbors_D[3*(numberOfKeptTriangles+triIdxOffs)+2]=
numberOfKeptTriangles + newTriangleIdxOffsets[oldN0]+i;
}
}
} else {
newTriangleNeighbors_D[3*(numberOfKeptTriangles+triIdxOffs)+2]=
oldTriangleIdxOffset[oldN0];
}
// #1
w0 = v0;
w1 = vNew0;
w2 = vNew1;
// This neighbor has to be determined by comparing vertex indices
subDivCntN0 = subdivCnt_D[oldN0];
if (subDivCntN0 > 0) {
for (int i = 0; i < subDivCntN0; ++i) {
uint u0, u1, u2;
u0 = newTriangles_D[3*(newTriangleIdxOffsets[oldN0]+i)+0];
u1 = newTriangles_D[3*(newTriangleIdxOffsets[oldN0]+i)+1];
u2 = newTriangles_D[3*(newTriangleIdxOffsets[oldN0]+i)+2];
if (hasAdjEdge_D (w0, w1, w2, u0, u1, u2)) {
newTriangleNeighbors_D[3*(numberOfKeptTriangles+triIdxOffs)+3]=
numberOfKeptTriangles + newTriangleIdxOffsets[oldN0]+i;
}
}
} else {
newTriangleNeighbors_D[3*(numberOfKeptTriangles+triIdxOffs)+3] =
oldTriangleIdxOffset[oldN0];
}
// This neighbor is the other subdivision
newTriangleNeighbors_D[3*(numberOfKeptTriangles+triIdxOffs)+4] =
numberOfKeptTriangles+triIdxOffs;
// This neighbor is the other subdivision
newTriangleNeighbors_D[3*(numberOfKeptTriangles+triIdxOffs)+5] =
numberOfKeptTriangles+triIdxOffs + 2;
// #2
w0 = v2;
w1 = v0;
w2 = vNew1;
// This neighbor has to be determined by comparing vertex indices
uint subDivCntN2 = subdivCnt_D[oldN2];
if (subDivCntN2 > 0) {
for (int i = 0; i < subDivCntN2; ++i) {
uint u0, u1, u2;
u0 = newTriangles_D[3*(newTriangleIdxOffsets[oldN2]+i)+0];
u1 = newTriangles_D[3*(newTriangleIdxOffsets[oldN2]+i)+1];
u2 = newTriangles_D[3*(newTriangleIdxOffsets[oldN2]+i)+2];
if (hasAdjEdge_D (w0, w1, w2, u0, u1, u2)) {
newTriangleNeighbors_D[3*(numberOfKeptTriangles+triIdxOffs)+6]=
numberOfKeptTriangles + newTriangleIdxOffsets[oldN2]+i;
}
}
} else {
newTriangleNeighbors_D[3*(numberOfKeptTriangles+triIdxOffs)+6] =
oldTriangleIdxOffset[oldN2];
}
// This neighbor is the other subdivision
newTriangleNeighbors_D[3*(numberOfKeptTriangles+triIdxOffs) + 7] =
numberOfKeptTriangles+triIdxOffs + 1;
// This neighbor has to be determined by comparing vertex indices
subDivCntN1 = subdivCnt_D[oldN1];
if (subDivCntN1 > 0) {
for (int i = 0; i < subDivCntN1; ++i) {
uint u0, u1, u2;
u0 = newTriangles_D[3*(newTriangleIdxOffsets[oldN1]+i)+0];
u1 = newTriangles_D[3*(newTriangleIdxOffsets[oldN1]+i)+1];
u2 = newTriangles_D[3*(newTriangleIdxOffsets[oldN1]+i)+2];
if (hasAdjEdge_D (w0, w1, w2, u0, u1, u2)) {
newTriangleNeighbors_D[3*(numberOfKeptTriangles+triIdxOffs)+8]=
numberOfKeptTriangles + newTriangleIdxOffsets[oldN1]+i;
}
}
} else {
newTriangleNeighbors_D[3*(numberOfKeptTriangles+triIdxOffs)+8] =
oldTriangleIdxOffset[oldN1];
}
} else if (!flag0 && flag1 && flag2) { // 3 new triangles have been spawned
uint vNew1 = vertexCntOld + subDivEdgeIdxOffs_D[e1];
uint vNew2 = vertexCntOld + subDivEdgeIdxOffs_D[e2];
// Get index of neighbors of old triangle
uint oldN0 = oldTriangleNeighbors_D[3*idx+0];
uint oldN1 = oldTriangleNeighbors_D[3*idx+1];
uint oldN2 = oldTriangleNeighbors_D[3*idx+2];
// #0
uint w0 = v2;
uint w1 = vNew2;
uint w2 = vNew1;
// This neighbor has to be determined by comparing vertex indices
uint subDivCntN2 = subdivCnt_D[oldN2];
if (subDivCntN2 > 0) {
for (int i = 0; i < subDivCntN2; ++i) {
uint u0, u1, u2;
u0 = newTriangles_D[3*(newTriangleIdxOffsets[oldN2]+i)+0];
u1 = newTriangles_D[3*(newTriangleIdxOffsets[oldN2]+i)+1];
u2 = newTriangles_D[3*(newTriangleIdxOffsets[oldN2]+i)+2];
if (hasAdjEdge_D (w0, w1, w2, u0, u1, u2)) {
newTriangleNeighbors_D[3*(numberOfKeptTriangles+triIdxOffs)+0]=
numberOfKeptTriangles + newTriangleIdxOffsets[oldN2]+i;
}
}
} else {
newTriangleNeighbors_D[3*(numberOfKeptTriangles+triIdxOffs)+0] =
oldTriangleIdxOffset[oldN2];
}
// This neighbor is the other subdivision
newTriangleNeighbors_D[3*(numberOfKeptTriangles+triIdxOffs) + 1] =
numberOfKeptTriangles+triIdxOffs + 1;
// This neighbor has to be determined by comparing vertex indices
uint subDivCntN1 = subdivCnt_D[oldN1];
if (subDivCntN1 > 0) {
for (int i = 0; i < subDivCntN1; ++i) {
uint u0, u1, u2;
u0 = newTriangles_D[3*(newTriangleIdxOffsets[oldN1]+i)+0];
u1 = newTriangles_D[3*(newTriangleIdxOffsets[oldN1]+i)+1];
u2 = newTriangles_D[3*(newTriangleIdxOffsets[oldN1]+i)+2];
if (hasAdjEdge_D (w0, w1, w2, u0, u1, u2)) {
newTriangleNeighbors_D[3*(numberOfKeptTriangles+triIdxOffs)+2]=
numberOfKeptTriangles + newTriangleIdxOffsets[oldN1]+i;
}
}
} else {
newTriangleNeighbors_D[3*(numberOfKeptTriangles+triIdxOffs)+2] =
oldTriangleIdxOffset[oldN1];
}
// #1
w0 = v0;
w1 = vNew1;
w2 = vNew2;
// This neighbor is the other subdivision
newTriangleNeighbors_D[3*(numberOfKeptTriangles+triIdxOffs) + 3] =
numberOfKeptTriangles+triIdxOffs + 2;
// This neighbor is the other subdivision
newTriangleNeighbors_D[3*(numberOfKeptTriangles+triIdxOffs) + 4] =
numberOfKeptTriangles+triIdxOffs;
// This neighbor has to be determined by comparing vertex indices
subDivCntN2 = subdivCnt_D[oldN2];
if (subDivCntN2 > 0) {
for (int i = 0; i < subDivCntN2; ++i) {
uint u0, u1, u2;
u0 = newTriangles_D[3*(newTriangleIdxOffsets[oldN2]+i)+0];
u1 = newTriangles_D[3*(newTriangleIdxOffsets[oldN2]+i)+1];
u2 = newTriangles_D[3*(newTriangleIdxOffsets[oldN2]+i)+2];
if (hasAdjEdge_D (w0, w1, w2, u0, u1, u2)) {
newTriangleNeighbors_D[3*(numberOfKeptTriangles+triIdxOffs)+5]=
numberOfKeptTriangles + newTriangleIdxOffsets[oldN2]+i;
}
}
} else {
newTriangleNeighbors_D[3*(numberOfKeptTriangles+triIdxOffs)+5] =
oldTriangleIdxOffset[oldN2];
}
// #2
w0 = v0;
w1 = v1;
w2 = vNew1;
// This neighbor has to be determined by comparing vertex indices
uint subDivCntN0 = subdivCnt_D[oldN0];
if (subDivCntN0 > 0) {
for (int i = 0; i < subDivCntN0; ++i) {
uint u0, u1, u2;
u0 = newTriangles_D[3*(newTriangleIdxOffsets[oldN0]+i)+0];
u1 = newTriangles_D[3*(newTriangleIdxOffsets[oldN0]+i)+1];
u2 = newTriangles_D[3*(newTriangleIdxOffsets[oldN0]+i)+2];
if (hasAdjEdge_D (w0, w1, w2, u0, u1, u2)) {
newTriangleNeighbors_D[3*(numberOfKeptTriangles+triIdxOffs)+6]=
numberOfKeptTriangles + newTriangleIdxOffsets[oldN0]+i;
}
}
} else {
newTriangleNeighbors_D[3*(numberOfKeptTriangles+triIdxOffs)+6] =
oldTriangleIdxOffset[oldN0];
}
// This neighbor has to be determined by comparing vertex indices
subDivCntN1 = subdivCnt_D[oldN1];
if (subDivCntN1 > 0) {
for (int i = 0; i < subDivCntN1; ++i) {
uint u0, u1, u2;
u0 = newTriangles_D[3*(newTriangleIdxOffsets[oldN1]+i)+0];
u1 = newTriangles_D[3*(newTriangleIdxOffsets[oldN1]+i)+1];
u2 = newTriangles_D[3*(newTriangleIdxOffsets[oldN1]+i)+2];
if (hasAdjEdge_D (w0, w1, w2, u0, u1, u2)) {
newTriangleNeighbors_D[3*(numberOfKeptTriangles+triIdxOffs)+7]=
numberOfKeptTriangles + newTriangleIdxOffsets[oldN1]+i;
}
}
} else {
newTriangleNeighbors_D[3*(numberOfKeptTriangles+triIdxOffs)+7] =
oldTriangleIdxOffset[oldN1];
}
// This neighbor is the other subdivision
newTriangleNeighbors_D[3*(numberOfKeptTriangles+triIdxOffs) + 8] =
numberOfKeptTriangles+triIdxOffs + 1;
} else if (flag0 && !flag1 && flag2) { // 3 new triangles have been spawned
// Get index of neighbors of old triangle
uint oldN0 = oldTriangleNeighbors_D[3*idx+0];
uint oldN1 = oldTriangleNeighbors_D[3*idx+1];
uint oldN2 = oldTriangleNeighbors_D[3*idx+2];
uint vNew2 = vertexCntOld + subDivEdgeIdxOffs_D[e2];
uint vNew0 = vertexCntOld + subDivEdgeIdxOffs_D[e0];
// #0
uint w0 = v0;
uint w1 = vNew0;
uint w2 = vNew2;
// This neighbor has to be determined by comparing vertex indices
// TODO DEBUG!!
uint subDivCntN0 = subdivCnt_D[oldN0];
if (subDivCntN0 > 0) {
for (int i = 0; i < subDivCntN0; ++i) {
uint u0, u1, u2;
u0 = newTriangles_D[3*(newTriangleIdxOffsets[oldN0]+i)+0];
u1 = newTriangles_D[3*(newTriangleIdxOffsets[oldN0]+i)+1];
u2 = newTriangles_D[3*(newTriangleIdxOffsets[oldN0]+i)+2];
if (hasAdjEdge_D (w0, w1, w2, u0, u1, u2)) {
newTriangleNeighbors_D[3*(numberOfKeptTriangles+triIdxOffs)+0]=
numberOfKeptTriangles + newTriangleIdxOffsets[oldN0]+i;
}
}
} else {
newTriangleNeighbors_D[3*(numberOfKeptTriangles+triIdxOffs)+0] =
oldTriangleIdxOffset[oldN0];
}
// This neighbor is the other subdivision
newTriangleNeighbors_D[3*(numberOfKeptTriangles+triIdxOffs) + 1] =
numberOfKeptTriangles+triIdxOffs + 1;
// This neighbor has to be determined by comparing vertex indices
uint subDivCntN2 = subdivCnt_D[oldN2];
if (subDivCntN2 > 0) {
for (int i = 0; i < subDivCntN2; ++i) {
uint u0, u1, u2;
u0 = newTriangles_D[3*(newTriangleIdxOffsets[oldN2]+i)+0];
u1 = newTriangles_D[3*(newTriangleIdxOffsets[oldN2]+i)+1];
u2 = newTriangles_D[3*(newTriangleIdxOffsets[oldN2]+i)+2];
if (hasAdjEdge_D (w0, w1, w2, u0, u1, u2)) {
newTriangleNeighbors_D[3*(numberOfKeptTriangles+triIdxOffs)+2]=
numberOfKeptTriangles + newTriangleIdxOffsets[oldN2]+i;
}
}
} else {
newTriangleNeighbors_D[3*(numberOfKeptTriangles+triIdxOffs)+2] =
oldTriangleIdxOffset[oldN2];
}
// #1
w0 = v2;
w1 = vNew2;
w2 = vNew0;
// This neighbor has to be determined by comparing vertex indices
subDivCntN2 = subdivCnt_D[oldN2];
if (subDivCntN2 > 0) {
for (int i = 0; i < subDivCntN2; ++i) {
uint u0, u1, u2;
u0 = newTriangles_D[3*(newTriangleIdxOffsets[oldN2]+i)+0];
u1 = newTriangles_D[3*(newTriangleIdxOffsets[oldN2]+i)+1];
u2 = newTriangles_D[3*(newTriangleIdxOffsets[oldN2]+i)+2];
if (hasAdjEdge_D (w0, w1, w2, u0, u1, u2)) {
newTriangleNeighbors_D[3*(numberOfKeptTriangles+triIdxOffs)+3]=
numberOfKeptTriangles + newTriangleIdxOffsets[oldN2]+i;
}
}
} else {
newTriangleNeighbors_D[3*(numberOfKeptTriangles+triIdxOffs)+3] =
oldTriangleIdxOffset[oldN2];
}
// This neighbor is the other subdivision
newTriangleNeighbors_D[3*(numberOfKeptTriangles+triIdxOffs) + 4] =
numberOfKeptTriangles+triIdxOffs;
newTriangleNeighbors_D[3*(numberOfKeptTriangles+triIdxOffs) + 5] =
numberOfKeptTriangles+triIdxOffs+2;
// #2
w0 = v1;
w1 = v2;
w2 = vNew0;
// This neighbor has to be determined by comparing vertex indices
uint subDivCntN1 = subdivCnt_D[oldN1];
if (subDivCntN1 > 0) {
for (int i = 0; i < subDivCntN1; ++i) {
uint u0, u1, u2;
u0 = newTriangles_D[3*(newTriangleIdxOffsets[oldN1]+i)+0];
u1 = newTriangles_D[3*(newTriangleIdxOffsets[oldN1]+i)+1];
u2 = newTriangles_D[3*(newTriangleIdxOffsets[oldN1]+i)+2];
if (hasAdjEdge_D (w0, w1, w2, u0, u1, u2)) {
newTriangleNeighbors_D[3*(numberOfKeptTriangles+triIdxOffs)+6]=
numberOfKeptTriangles + newTriangleIdxOffsets[oldN1]+i;
}
}
} else {
newTriangleNeighbors_D[3*(numberOfKeptTriangles+triIdxOffs)+6] =
oldTriangleIdxOffset[oldN1];
}
// This neighbor is the other subdivision
newTriangleNeighbors_D[3*(numberOfKeptTriangles+triIdxOffs) + 7] =
numberOfKeptTriangles+triIdxOffs + 1;
subDivCntN0 = subdivCnt_D[oldN0];
if (subDivCntN0 > 0) {
for (int i = 0; i < subDivCntN0; ++i) {
uint u0, u1, u2;
u0 = newTriangles_D[3*(newTriangleIdxOffsets[oldN0]+i)+0];
u1 = newTriangles_D[3*(newTriangleIdxOffsets[oldN0]+i)+1];
u2 = newTriangles_D[3*(newTriangleIdxOffsets[oldN0]+i)+2];
if (hasAdjEdge_D (w0, w1, w2, u0, u1, u2)) {
newTriangleNeighbors_D[3*(numberOfKeptTriangles+triIdxOffs)+8]=
numberOfKeptTriangles + newTriangleIdxOffsets[oldN0]+i;
}
}
} else {
newTriangleNeighbors_D[3*(numberOfKeptTriangles+triIdxOffs)+8] =
oldTriangleIdxOffset[oldN0];
}
} else if (flag0 && flag1 && flag2) { // 4 new triangles have been spawned
uint vNew0 = vertexCntOld + subDivEdgeIdxOffs_D[e0];
uint vNew1 = vertexCntOld + subDivEdgeIdxOffs_D[e1];
uint vNew2 = vertexCntOld + subDivEdgeIdxOffs_D[e2];
// Get index of neighbors of old triangle
uint oldN0 = oldTriangleNeighbors_D[3*idx+0];
uint oldN1 = oldTriangleNeighbors_D[3*idx+1];
uint oldN2 = oldTriangleNeighbors_D[3*idx+2];
// #0
uint w0 = v0;
uint w1 = vNew0;
uint w2 = vNew2;
// This neighbor has to be determined by comparing vertex indices
uint subDivCntN0 = subdivCnt_D[oldN0];
if (subDivCntN0 > 0) {
for (int i = 0; i < subDivCntN0; ++i) {
uint u0, u1, u2;
u0 = newTriangles_D[3*(newTriangleIdxOffsets[oldN0]+i)+0];
u1 = newTriangles_D[3*(newTriangleIdxOffsets[oldN0]+i)+1];
u2 = newTriangles_D[3*(newTriangleIdxOffsets[oldN0]+i)+2];
if (hasAdjEdge_D (w0, w1, w2, u0, u1, u2)) {
newTriangleNeighbors_D[3*(numberOfKeptTriangles+triIdxOffs)+0]=
numberOfKeptTriangles + newTriangleIdxOffsets[oldN0]+i;
}
}
} else {
newTriangleNeighbors_D[3*(numberOfKeptTriangles+triIdxOffs)+0]=
oldTriangleIdxOffset[oldN0];
}
// This neighbor is the middle subdivision
newTriangleNeighbors_D[3*(numberOfKeptTriangles+triIdxOffs)+1] =
numberOfKeptTriangles+triIdxOffs + 3;
// This neighbor has to be determined by comparing vertex indices
uint subDivCntN2 = subdivCnt_D[oldN2];
if (subDivCntN2 > 0) {
for (int i = 0; i < subDivCntN2; ++i) {
uint u0, u1, u2;
u0 = newTriangles_D[3*(newTriangleIdxOffsets[oldN2]+i)+0];
u1 = newTriangles_D[3*(newTriangleIdxOffsets[oldN2]+i)+1];
u2 = newTriangles_D[3*(newTriangleIdxOffsets[oldN2]+i)+2];
if (hasAdjEdge_D (w0, w1, w2, u0, u1, u2)) {
newTriangleNeighbors_D[3*(numberOfKeptTriangles+triIdxOffs)+2]=
numberOfKeptTriangles + newTriangleIdxOffsets[oldN2]+i;
}
}
} else {
newTriangleNeighbors_D[3*(numberOfKeptTriangles+triIdxOffs)+2]=
oldTriangleIdxOffset[oldN2];
}
// #1
w0 = v1;
w1 = vNew1;
w2 = vNew0;
// This neighbor has to be determined by comparing vertex indices
uint subDivCntN1 = subdivCnt_D[oldN1];
if (subDivCntN1 > 0) {
for (int i = 0; i < subDivCntN1; ++i) {
uint u0, u1, u2;
u0 = newTriangles_D[3*(newTriangleIdxOffsets[oldN1]+i)+0];
u1 = newTriangles_D[3*(newTriangleIdxOffsets[oldN1]+i)+1];
u2 = newTriangles_D[3*(newTriangleIdxOffsets[oldN1]+i)+2];
if (hasAdjEdge_D (w0, w1, w2, u0, u1, u2)) {
newTriangleNeighbors_D[3*(numberOfKeptTriangles+triIdxOffs)+3]=
numberOfKeptTriangles + newTriangleIdxOffsets[oldN1]+i;
}
}
} else {
newTriangleNeighbors_D[3*(numberOfKeptTriangles+triIdxOffs)+3]=
oldTriangleIdxOffset[oldN1];
}
// This neighbor is the middle subdivision
newTriangleNeighbors_D[3*(numberOfKeptTriangles+triIdxOffs)+4] =
numberOfKeptTriangles+triIdxOffs + 3;
// This neighbor has to be determined by comparing vertex indices
subDivCntN0 = subdivCnt_D[oldN0];
if (subDivCntN0 > 0) {
for (int i = 0; i < subDivCntN0; ++i) {
uint u0, u1, u2;
u0 = newTriangles_D[3*(newTriangleIdxOffsets[oldN0]+i)+0];
u1 = newTriangles_D[3*(newTriangleIdxOffsets[oldN0]+i)+1];
u2 = newTriangles_D[3*(newTriangleIdxOffsets[oldN0]+i)+2];
if (hasAdjEdge_D (w0, w1, w2, u0, u1, u2)) {
newTriangleNeighbors_D[3*(numberOfKeptTriangles+triIdxOffs)+5]=
numberOfKeptTriangles + newTriangleIdxOffsets[oldN0]+i;
}
}
} else {
newTriangleNeighbors_D[3*(numberOfKeptTriangles+triIdxOffs)+5]=
oldTriangleIdxOffset[oldN0];
}
// #2
w0 = v2;
w1 = vNew2;
w2 = vNew1;
// This neighbor has to be determined by comparing vertex indices
subDivCntN2 = subdivCnt_D[oldN2];
if (subDivCntN2 > 0) {
for (int i = 0; i < subDivCntN2; ++i) {
uint u0, u1, u2;
u0 = newTriangles_D[3*(newTriangleIdxOffsets[oldN2]+i)+0];
u1 = newTriangles_D[3*(newTriangleIdxOffsets[oldN2]+i)+1];
u2 = newTriangles_D[3*(newTriangleIdxOffsets[oldN2]+i)+2];
if (hasAdjEdge_D (w0, w1, w2, u0, u1, u2)) {
newTriangleNeighbors_D[3*(numberOfKeptTriangles+triIdxOffs)+6]=
numberOfKeptTriangles + newTriangleIdxOffsets[oldN2]+i;
}
}
} else {
newTriangleNeighbors_D[3*(numberOfKeptTriangles+triIdxOffs)+6]=
oldTriangleIdxOffset[oldN2];
}
// This neighbor is the middle subdivision
newTriangleNeighbors_D[3*(numberOfKeptTriangles+triIdxOffs)+7] =
numberOfKeptTriangles+triIdxOffs + 3;
// This neighbor has to be determined by comparing vertex indices
subDivCntN1 = subdivCnt_D[oldN1];
if (subDivCntN1 > 0) {
for (int i = 0; i < subDivCntN1; ++i) {
uint u0, u1, u2;
u0 = newTriangles_D[3*(newTriangleIdxOffsets[oldN1]+i)+0];
u1 = newTriangles_D[3*(newTriangleIdxOffsets[oldN1]+i)+1];
u2 = newTriangles_D[3*(newTriangleIdxOffsets[oldN1]+i)+2];
if (hasAdjEdge_D (w0, w1, w2, u0, u1, u2)) {
newTriangleNeighbors_D[3*(numberOfKeptTriangles+triIdxOffs)+8]=
numberOfKeptTriangles + newTriangleIdxOffsets[oldN1]+i;
}
}
} else {
newTriangleNeighbors_D[3*(numberOfKeptTriangles+triIdxOffs)+8]=
oldTriangleIdxOffset[oldN1];
}
// #3 This is the middle triangle
w0 = vNew0;
w1 = vNew1;
w2 = vNew2;
// This neighbor is the middle subdivision
newTriangleNeighbors_D[3*(numberOfKeptTriangles+triIdxOffs)+9] =
numberOfKeptTriangles+triIdxOffs + 1;
// This neighbor is the middle subdivision
newTriangleNeighbors_D[3*(numberOfKeptTriangles+triIdxOffs)+10] =
numberOfKeptTriangles+triIdxOffs + 2;
// This neighbor is the middle subdivision
newTriangleNeighbors_D[3*(numberOfKeptTriangles+triIdxOffs)+11] =
numberOfKeptTriangles+triIdxOffs + 0;
}
}
__global__ void CopyNewDataToVertexBuffer_D(
float *newVertices_D,
float *newBuffer_D,
uint oldVertexCnt,
uint newVertexCnt) {
const uint vertexDataStride = 9;
const uint idx = ::getThreadIdx();
if (idx >= newVertexCnt) return;
newBuffer_D[vertexDataStride*(oldVertexCnt+idx)+0] = newVertices_D[3*idx+0];
newBuffer_D[vertexDataStride*(oldVertexCnt+idx)+1] = newVertices_D[3*idx+1];
newBuffer_D[vertexDataStride*(oldVertexCnt+idx)+2] = newVertices_D[3*idx+2];
newBuffer_D[vertexDataStride*(oldVertexCnt+idx)+3] = 1.0; // Normal
newBuffer_D[vertexDataStride*(oldVertexCnt+idx)+4] = 0.0;
newBuffer_D[vertexDataStride*(oldVertexCnt+idx)+5] = 0.0;
newBuffer_D[vertexDataStride*(oldVertexCnt+idx)+6] = 0.0; // TC
newBuffer_D[vertexDataStride*(oldVertexCnt+idx)+7] = 0.0;
newBuffer_D[vertexDataStride*(oldVertexCnt+idx)+8] = 0.0;
}
__global__ void CopyOldDataToTriangleBuffer_D(
uint *oldTriangleIdx_D,
uint *oldTriangleIdxOffs_D,
uint *newTriangleIdx_D,
uint *subdivCnt_D,
uint oldTriangleCnt) {
const uint idx = ::getThreadIdx();
if (idx >= oldTriangleCnt) return;
if (subdivCnt_D[idx] > 0) return; // Subdivided triangles are dismissed
uint newIdx = oldTriangleIdxOffs_D[idx];
newTriangleIdx_D[3*newIdx+0] = oldTriangleIdx_D[3*idx+0];
newTriangleIdx_D[3*newIdx+1] = oldTriangleIdx_D[3*idx+1];
newTriangleIdx_D[3*newIdx+2] = oldTriangleIdx_D[3*idx+2];
}
/*
* DeformableGPUSurfaceMT::RefineMesh
*/
int DeformableGPUSurfaceMT::RefineMesh(
uint maxSubdivLevel,
float *volume_D,
int3 volDim,
float3 volOrg,
float3 volDelta,
float isovalue,
float maxEdgeLen) {
using megamol::core::utility::log::Log;
// Init grid parameters
// Init constant device params
if (!initGridParams(volDim, volOrg, volDelta)) {
Log::DefaultLog.WriteError(
"%s: could not init constant device params",
this->ClassName());
return -1;
}
cudaGraphicsResource* cudaTokens[2];
// Register memory with CUDA
if (!CudaSafeCall(hipGraphicsGLRegisterBuffer(
&cudaTokens[0], this->vboVtxData,
hipGraphicsMapFlagsNone))) {
Log::DefaultLog.WriteError(
"%s: could register buffer",
this->ClassName());
return -1;
}
// Register memory with CUDA
if (!CudaSafeCall(hipGraphicsGLRegisterBuffer(
&cudaTokens[1], this->vboTriangleIdx,
hipGraphicsMapFlagsNone))) {
Log::DefaultLog.WriteError(
"%s: could not register buffer",
this->ClassName());
return -1;
}
if (!CudaSafeCall(hipGraphicsMapResources(2, cudaTokens))) {
Log::DefaultLog.WriteError(
"%s: could not map recources",
this->ClassName());
return -1;
}
// Get mapped pointers to the vertex data buffers
float *vboPt;
size_t vboSize;
if (!CudaSafeCall(hipGraphicsResourceGetMappedPointer(
reinterpret_cast<void**>(&vboPt), // The mapped pointer
&vboSize, // The size of the accessible data
cudaTokens[0]))) { // The mapped resource
Log::DefaultLog.WriteError(
"%s: could not obtain device pointer",
this->ClassName());
return -1;
}
// Get mapped pointers to the vertex data buffers
unsigned int *vboTriIdxPt;
size_t vboTriSize;
if (!CudaSafeCall(hipGraphicsResourceGetMappedPointer(
reinterpret_cast<void**>(&vboTriIdxPt), // The mapped pointer
&vboTriSize, // The size of the accessible data
cudaTokens[1]))) { // The mapped resource
Log::DefaultLog.WriteError(
"%s: could not obtain device pointer",
this->ClassName());
return -1;
}
/* 1. Compute edge list */
//#define USE_TIMER
#ifdef USE_TIMER
float dt_ms;
hipEvent_t event1, event2, eventStart, eventEnd;
hipEventCreate(&event1);
hipEventCreate(&event2);
hipEventCreate(&eventStart);
hipEventCreate(&eventEnd);
hipEventRecord(event1, 0);
hipEventRecord(eventStart, 0);
#endif
const uint edgeCnt = (this->triangleCnt*3)/2;
// printf("EDGE COUNT %u\n", edgeCnt);
// Get the number of edges associated with each triangle
if (!CudaSafeCall(this->triangleEdgeOffs_D.Validate(this->triangleCnt))) {
return -1;
}
if (!CudaSafeCall(this->triangleEdgeOffs_D.Set(0x00))) {
return -1;
}
// Check whether triangle neighbors have been computed
if (this->triangleNeighbors_D.GetCount() != this->triangleCnt*3) {
Log::DefaultLog.WriteError(
"%s: need triangle neighbors",
this->ClassName());
return -1;
}
hipLaunchKernelGGL(( DeformableGPUSurfaceMT_GetTriangleEdgeCnt_D) , dim3(Grid(this->triangleCnt, 256)), dim3(256) , 0, 0,
this->triangleEdgeOffs_D.Peek(),
this->triangleNeighbors_D.Peek(),
this->triangleCnt);
if (!CheckForCudaError()) {
return -1;
}
// Compute prefix sum
thrust::exclusive_scan(
thrust::device_ptr<int>(this->triangleEdgeOffs_D.Peek()),
thrust::device_ptr<int>(this->triangleEdgeOffs_D.Peek() + this->triangleCnt),
thrust::device_ptr<int>(this->triangleEdgeOffs_D.Peek()));
if (!CheckForCudaError()) {
return -1;
}
// Build up edge list based on the offsets
if (!CudaSafeCall(this->edges_D.Validate(edgeCnt*2))) {
return -1;
}
if (!CudaSafeCall(this->edges_D.Set(0x00))) {
return -1;
}
hipLaunchKernelGGL(( DeformableGPUSurfaceMT_BuildEdgeList_D) , dim3(Grid(this->triangleCnt, 256)), dim3(256) , 0, 0,
this->edges_D.Peek(),
this->triangleEdgeOffs_D.Peek(),
this->triangleNeighbors_D.Peek(),
vboTriIdxPt,
this->triangleCnt);
if (!CheckForCudaError()) {
return -1;
}
// // DEBUG Print edges
// this->edges.Validate(this->edges_D.GetCount());
// if (!CudaSafeCall(this->edges_D.CopyToHost(this->edges.Peek()))){
// return false;
// }
// for (int e = 0; e < edgeCnt; ++e) {
// printf("EDGE %i: %u %u\n", e,
// this->edges.Peek()[2*e+0],
// this->edges.Peek()[2*e+1]);
// }
// // END DEBUG
#ifdef USE_TIMER
hipEventRecord(event2, 0);
hipEventSynchronize(event1);
hipEventSynchronize(event2);
hipEventElapsedTime(&dt_ms, event1, event2);
printf("CUDA time for computing edge list: %.10f sec\n",
dt_ms/1000.0f);
hipEventRecord(event1, 0);
#endif
/* 2. Flag long edges and determine number of newly created vertices */
// Build up edge list based on the offsets
if (!CudaSafeCall(this->subDivEdgeFlag_D.Validate(edgeCnt))) {
return -1;
}
if (!CudaSafeCall(this->subDivEdgeFlag_D.Set(0x00))) { // Set to 'false'
return -1;
}
hipLaunchKernelGGL(( FlagLongEdges_D) , dim3(Grid(edgeCnt, 256)), dim3(256) , 0, 0,
this->subDivEdgeFlag_D.Peek(),
this->edges_D.Peek(),
vboPt,
maxEdgeLen*maxEdgeLen,
this->edges_D.GetCount()/2);
if (!CheckForCudaError()) {
return -1;
}
// Compute prefix sum
if (!CudaSafeCall(this->subDivEdgeIdxOffs_D.Validate(edgeCnt))) {
return -1;
}
if (!CudaSafeCall(this->subDivEdgeIdxOffs_D.Set(0x00))) { // Set to 'false'
return -1;
}
thrust::exclusive_scan(
thrust::device_ptr<uint>(this->subDivEdgeFlag_D.Peek()),
thrust::device_ptr<uint>(this->subDivEdgeFlag_D.Peek() + edgeCnt),
thrust::device_ptr<uint>(this->subDivEdgeIdxOffs_D.Peek()));
uint accTmp;
if (!CudaSafeCall(hipMemcpy(&accTmp, this->subDivEdgeFlag_D.Peek()+(edgeCnt-1), sizeof(uint),
hipMemcpyDeviceToHost))) {
return -1;
}
this->newVertexCnt = accTmp;
if (!CudaSafeCall(hipMemcpy(&accTmp, this->subDivEdgeIdxOffs_D.Peek()+(edgeCnt-1), sizeof(uint),
hipMemcpyDeviceToHost))) {
return -1;
}
this->newVertexCnt += accTmp;
this->nFlaggedVertices += this->newVertexCnt;
if (this->newVertexCnt == 0) {
// !! Unmap/registers vbos because they will be reinitialized
CudaSafeCall(hipGraphicsUnmapResources(2, cudaTokens, 0));
CudaSafeCall(hipGraphicsUnregisterResource(cudaTokens[0]));
CudaSafeCall(hipGraphicsUnregisterResource(cudaTokens[1]));
return 0;
}
// printf("Need %i new vertices (old triangle count %u)\n", newVertexCnt, this->triangleCnt);
// // DEBUG print edge flag
// HostArr<uint> edgeFlag;
// edgeFlag.Validate(this->subDivEdgeFlag_D.GetCount());
// this->subDivEdgeFlag_D.CopyToHost(edgeFlag.Peek());
// for (int i = 0; i < edgeCnt; ++i) {
// printf("EDGEFLAG %i %u\n", i, edgeFlag.Peek()[i]);
// }
// edgeFlag.Release();
// // END DEBUG
#ifdef USE_TIMER
hipEventRecord(event2, 0);
hipEventSynchronize(event1);
hipEventSynchronize(event2);
hipEventElapsedTime(&dt_ms, event1, event2);
printf("CUDA time for flagging edges and thrust reduce: %.10f sec\n",
dt_ms/1000.0f);
hipEventRecord(event1, 0);
#endif
/* 3. Interpolate new vertex positions associated with the flagged edges */
if (!CudaSafeCall(this->newVertices_D.Validate(this->newVertexCnt*3))) {
return -1;
}
if (this->vertexFlag_D.GetCount() != this->vertexCnt) { // First subdivision round
if (!CudaSafeCall(this->vertexFlag_D.Validate(this->newVertexCnt + this->vertexCnt))) {
return -1;
}
if (!CudaSafeCall(this->vertexFlag_D.Set(0x00))) {
return -1;
}
} else { // Need to save old flags
if (!CudaSafeCall(this->vertexFlagTmp_D.Validate(this->vertexFlag_D.GetCount()))) {
return -1;
}
if (!CudaSafeCall(hipMemcpy(
this->vertexFlagTmp_D.Peek(),
this->vertexFlag_D.Peek(),
sizeof(float)*this->vertexFlag_D.GetCount(),
hipMemcpyDeviceToDevice))) {
return -1;
}
if (!CudaSafeCall(this->vertexFlag_D.Validate(this->newVertexCnt + this->vertexCnt))) {
return -1;
}
if (!CudaSafeCall(this->vertexFlag_D.Set(0x00))) {
return -1;
}
if (!CudaSafeCall(hipMemcpy(
this->vertexFlag_D.Peek(),
this->vertexFlagTmp_D.Peek(),
sizeof(float)*this->vertexFlagTmp_D.GetCount(),
hipMemcpyDeviceToDevice))) {
return -1;
}
}
hipLaunchKernelGGL(( ComputeNewVertices) , dim3(Grid(edgeCnt, 256)), dim3(256) , 0, 0,
this->newVertices_D.Peek(),
this->vertexFlag_D.Peek(),
this->subDivEdgeIdxOffs_D.Peek(),
this->subDivEdgeFlag_D.Peek(),
this->edges_D.Peek(),
vboPt,
this->vertexCnt,
edgeCnt);
if (!CheckForCudaError()) {
return -1;
}
// Compute number of flagged vertices
this->nFlaggedVertices = thrust::reduce(
thrust::device_ptr<float>(this->vertexFlag_D.Peek()),
thrust::device_ptr<float>(this->vertexFlag_D.Peek() + this->vertexCnt));
#ifdef USE_TIMER
hipEventRecord(event2, 0);
hipEventSynchronize(event1);
hipEventSynchronize(event2);
hipEventElapsedTime(&dt_ms, event1, event2);
printf("CUDA time for interpolating new vertices: %.10f sec\n",
dt_ms/1000.0f);
hipEventRecord(event1, 0);
#endif
/* 4. Build triangle-edge-list */
if (this->triangleNeighbors_D.GetCount() != this->triangleCnt*3) {
Log::DefaultLog.WriteError(
"%s: need triangle neighbors",
this->ClassName());
// !! Unmap/registers vbos because they will be reinitialized
CudaSafeCall(hipGraphicsUnmapResources(2, cudaTokens, 0));
CudaSafeCall(hipGraphicsUnregisterResource(cudaTokens[0]));
CudaSafeCall(hipGraphicsUnregisterResource(cudaTokens[1]));
return -1;
}
if (!CudaSafeCall(this->triangleEdgeList_D.Validate(this->triangleCnt*3))) {
return -1;
}
hipLaunchKernelGGL(( DeformableGPUSurfaceMT_ComputeTriEdgeList_D) , dim3(Grid(this->triangleCnt, 256)), dim3(256) , 0, 0,
this->triangleEdgeList_D.Peek(),
this->triangleEdgeOffs_D.Peek(),
this->triangleNeighbors_D.Peek(),
vboTriIdxPt,
this->triangleCnt);
if (!CheckForCudaErrorSync()) {
return -1;
}
// // DEBUG Triangle edge list
// HostArr<unsigned int> triangleEdgeList;
// triangleEdgeList.Validate(this->triangleEdgeList_D.GetCount());
// if (!CudaSafeCall(this->triangleEdgeList_D.CopyToHost(triangleEdgeList.Peek()))){
// return false;
// }
// for (int e = 0; e < this->triangleCnt; ++e) {
// printf("Tri %i, edges: %u %u %u\n", e,
// triangleEdgeList.Peek()[3*e+0],
// triangleEdgeList.Peek()[3*e+1],
// triangleEdgeList.Peek()[3*e+2]);
// }
// triangleEdgeList.Release();
// // END DEBUG
#ifdef USE_TIMER
hipEventRecord(event2, 0);
hipEventSynchronize(event1);
hipEventSynchronize(event2);
hipEventElapsedTime(&dt_ms, event1, event2);
printf("CUDA time for triangle edge list: %.10f sec\n",
dt_ms/1000.0f);
hipEventRecord(event1, 0);
#endif
/* 5. Determine number of newly created triangles */
if (!CudaSafeCall(this->subDivCnt_D.Validate(this->triangleCnt))) {
return -1;
}
if (!CudaSafeCall(this->subDivCnt_D.Set(0x00))) {
return -1;
}
if (!CudaSafeCall(this->oldTrianglesIdxOffs_D.Validate(this->triangleCnt))) {
return -1;
}
hipLaunchKernelGGL(( ComputeSubdivCnt_D) , dim3(Grid(this->triangleCnt, 256)), dim3(256) , 0, 0,
this->subDivCnt_D.Peek(),
this->triangleEdgeList_D.Peek(),
this->subDivEdgeFlag_D.Peek(),
this->edges_D.Peek(),
this->oldTrianglesIdxOffs_D.Peek(),
this->triangleCnt);
if (!CheckForCudaErrorSync()) {
return -1;
}
if (!CudaSafeCall(this->newTrianglesIdxOffs_D.Validate(this->triangleCnt))) {
return -1;
}
// Compute prefix sum
thrust::exclusive_scan(
thrust::device_ptr<uint>(this->subDivCnt_D.Peek()),
thrust::device_ptr<uint>(this->subDivCnt_D.Peek() + this->triangleCnt),
thrust::device_ptr<uint>(this->newTrianglesIdxOffs_D.Peek()));
uint newTrianglesCnt;
if (!CudaSafeCall(hipMemcpy(&accTmp, this->subDivCnt_D.Peek()+(this->triangleCnt-1), sizeof(uint),
hipMemcpyDeviceToHost))) {
return -1;
}
newTrianglesCnt = accTmp;
if (!CudaSafeCall(hipMemcpy(&accTmp, this->newTrianglesIdxOffs_D.Peek()+(this->triangleCnt-1), sizeof(uint),
hipMemcpyDeviceToHost))) {
return -1;
}
newTrianglesCnt += accTmp;
// printf("Need %i new triangles\n", newTrianglesCnt);
uint nOldTriangles = thrust::reduce(
thrust::device_ptr<uint>(this->oldTrianglesIdxOffs_D.Peek()),
thrust::device_ptr<uint>(this->oldTrianglesIdxOffs_D.Peek() + this->triangleCnt));
thrust::exclusive_scan(
thrust::device_ptr<uint>(this->oldTrianglesIdxOffs_D.Peek()),
thrust::device_ptr<uint>(this->oldTrianglesIdxOffs_D.Peek() + this->triangleCnt),
thrust::device_ptr<uint>(this->oldTrianglesIdxOffs_D.Peek()));
// printf("Keep %i old triangles\n", nOldTriangles);
#ifdef USE_TIMER
hipEventRecord(event2, 0);
hipEventSynchronize(event1);
hipEventSynchronize(event2);
hipEventElapsedTime(&dt_ms, event1, event2);
printf("CUDA time for computing number of new triangles: %.10f sec\n",
dt_ms/1000.0f);
hipEventRecord(event1, 0);
#endif
/* 6. Create new triangles with respective vertex indices */
if (this->subDivLevels_D.GetCount() != this->triangleCnt) {
// This is the first subdivision
if (!CudaSafeCall(this->oldSubDivLevels_D.Validate(this->triangleCnt))) {
return -1;
}
if (!CudaSafeCall(this->oldSubDivLevels_D.Set(0x00))) {
return -1;
}
} else { // Store old subdivision levels
if (!CudaSafeCall(this->oldSubDivLevels_D.Validate(this->triangleCnt))) {
return -1;
}
if (!CudaSafeCall(hipMemcpy(this->oldSubDivLevels_D.Peek(),
this->subDivLevels_D.Peek(), sizeof(unsigned int)*this->triangleCnt,
hipMemcpyDeviceToDevice))){
return -1;
}
}
// Allocate memory for new subdivision levels (old and new triangles)
if (!CudaSafeCall(this->subDivLevels_D.Validate(nOldTriangles+newTrianglesCnt))) {
return -1;
}
if (!CudaSafeCall(this->newTriangles_D.Validate(newTrianglesCnt*3))) {
return -1;
}
hipLaunchKernelGGL(( ComputeSubdiv_D) , dim3(Grid(this->triangleCnt, 256)), dim3(256) , 0, 0,
this->newTriangles_D.Peek(),
this->newTrianglesIdxOffs_D.Peek(),
this->triangleEdgeList_D.Peek(),
vboTriIdxPt,
this->subDivEdgeFlag_D.Peek(),
this->edges_D.Peek(),
this->subDivEdgeIdxOffs_D.Peek(),
this->oldSubDivLevels_D.Peek(),
this->subDivLevels_D.Peek(),
this->oldTrianglesIdxOffs_D.Peek(),
this->vertexCnt,
nOldTriangles,
this->triangleCnt);
if (!CheckForCudaErrorSync()) {
return -1;
}
// // DEBUG Print new triangles
// HostArr<uint> newTriangles;
// newTriangles.Validate(this->newTriangles_D.GetCount());
// this->newTriangles_D.CopyToHost(newTriangles.Peek());
// for (int i = 0; i < this->newTriangles_D.GetCount()/3; ++i) {
// printf("NEW TRI %i: %u %u %u\n", i,
// newTriangles.Peek()[3*i+0],
// newTriangles.Peek()[3*i+1],
// newTriangles.Peek()[3*i+2]);
// }
// newTriangles.Release();
// // END DEBUG
// // DEBUG Print subdivision levels
// HostArr<uint> subDivisionLevels;
// subDivisionLevels.Validate(this->subDivLevels_D.GetCount());
// this->subDivLevels_D.CopyToHost(subDivisionLevels.Peek());
// for (int i = 0; i < this->subDivLevels_D.GetCount(); ++i) {
// printf("SUBDIV LVL %i: %u \n", i,
// subDivisionLevels.Peek()[i]);
// }
// subDivisionLevels.Release();
// // END DEBUG
#ifdef USE_TIMER
hipEventRecord(event2, 0);
hipEventSynchronize(event1);
hipEventSynchronize(event2);
hipEventElapsedTime(&dt_ms, event1, event2);
printf("CUDA time for computing new triangles: %.10f sec\n",
dt_ms/1000.0f);
hipEventRecord(event1, 0);
#endif
/* 7. (Re-)compute triangle neighbors */
if (!CudaSafeCall(this->newTriangleNeighbors_D.Validate((nOldTriangles+newTrianglesCnt)*3))) {
return -1;
}
if (!CudaSafeCall(this->newTriangleNeighbors_D.Set(0x00))) {
return -1;
}
hipLaunchKernelGGL(( ComputeSubdivTriNeighbors_D) , dim3(Grid(this->triangleCnt, 256)), dim3(256) , 0, 0,
this->newTriangleNeighbors_D.Peek(),
this->triangleNeighbors_D.Peek(),
this->newTrianglesIdxOffs_D.Peek(),
this->triangleEdgeList_D.Peek(),
vboTriIdxPt,
this->subDivEdgeFlag_D.Peek(),
this->edges_D.Peek(),
this->subDivEdgeIdxOffs_D.Peek(),
this->subDivCnt_D.Peek(),
this->oldTrianglesIdxOffs_D.Peek(),
this->newTriangles_D.Peek(),
this->vertexCnt,
nOldTriangles,
this->triangleCnt);
// Reallocate old array TODO Simply swap pointers?
if (!CudaSafeCall(this->triangleNeighbors_D.Validate(this->newTriangleNeighbors_D.GetCount()))) {
return -1;
}
if (!CudaSafeCall(hipMemcpy(
this->triangleNeighbors_D.Peek(),
this->newTriangleNeighbors_D.Peek(),
this->newTriangleNeighbors_D.GetCount()*sizeof(unsigned int),
hipMemcpyDeviceToDevice))) {
return -1;
}
#ifdef USE_TIMER
hipEventRecord(event2, 0);
hipEventSynchronize(event1);
hipEventSynchronize(event2);
hipEventElapsedTime(&dt_ms, event1, event2);
printf("CUDA time for updating triangle neighbors: %.10f sec\n",
dt_ms/1000.0f);
hipEventRecord(event1, 0);
#endif
/* 8. Update VBOs for vertex data and triangle indices */
// // DEBUG Print oldTriangles index offset and subdivision count
// HostArr<unsigned int> oldTrianglesIdxOffs;
// oldTrianglesIdxOffs.Validate(this->oldTrianglesIdxOffs_D.GetCount());
// if (!CudaSafeCall(this->oldTrianglesIdxOffs_D.CopyToHost(oldTrianglesIdxOffs.Peek()))) {
// return -1;
// }
// HostArr<unsigned int> subDivCnt;
// subDivCnt.Validate(this->subDivCnt_D.GetCount());
// if (!CudaSafeCall(this->subDivCnt_D.CopyToHost(subDivCnt.Peek()))) {
// return -1;
// }
// for (int i = 0; i < this->triangleCnt; ++i) {
// printf("%i: offs: %u, subdiv %u\n", i, oldTrianglesIdxOffs.Peek()[i],
// subDivCnt.Peek()[i]);
// }
// subDivCnt.Release();
// oldTrianglesIdxOffs.Release();
// // END DEBUG
// // DEBUG print old vertex buffer
// HostArr<float> vertexBuffer;
// vertexBuffer.Validate(this->vertexDataStride*this->vertexCnt);
// hipMemcpy(vertexBuffer.Peek(), vboPt, vertexBuffer.GetCount()*sizeof(float), hipMemcpyDeviceToHost);
// for (int i = 0; i < this->vertexCnt; ++i) {
// printf("Old Vertex Buffer %i: %f %f %f, %f %f %f, %f %f %f\n", i,
// vertexBuffer.Peek()[9*i+0],
// vertexBuffer.Peek()[9*i+1],
// vertexBuffer.Peek()[9*i+2],
// vertexBuffer.Peek()[9*i+3],
// vertexBuffer.Peek()[9*i+4],
// vertexBuffer.Peek()[9*i+5],
// vertexBuffer.Peek()[9*i+6],
// vertexBuffer.Peek()[9*i+7],
// vertexBuffer.Peek()[9*i+8]);
// }
// vertexBuffer.Release();
// // END DEBUG
// // DEBUG print old triangle index buffer
// HostArr<uint> triangleBuffer;
// triangleBuffer.Validate(3*this->triangleCnt);
// hipMemcpy(triangleBuffer.Peek(), vboTriIdxPt,
// triangleBuffer.GetCount()*sizeof(uint), hipMemcpyDeviceToHost);
// for (int i = 0; i < this->triangleCnt; ++i) {
// printf("Old Triangle Buffer %i: %u %u %u\n",i,
// triangleBuffer.Peek()[3*i+0],
// triangleBuffer.Peek()[3*i+1],
// triangleBuffer.Peek()[3*i+2]);
// }
// triangleBuffer.Release();
// // END DEBUG
// Make copy of old data
if (!CudaSafeCall(this->oldTriangles_D.Validate(this->triangleCnt*3))) {
return -1;
}
if (!CudaSafeCall(this->trackedSubdivVertexData_D.Validate(this->vertexCnt*this->vertexDataStride))) {
return -1;
}
if (!CudaSafeCall(hipMemcpy(this->oldTriangles_D.Peek(), vboTriIdxPt,
sizeof(unsigned int)*this->oldTriangles_D.GetCount(),
hipMemcpyDeviceToDevice))) {
return -1;
}
if (!CudaSafeCall(hipMemcpy(this->trackedSubdivVertexData_D.Peek(), vboPt,
sizeof(float)*this->trackedSubdivVertexData_D.GetCount(),
hipMemcpyDeviceToDevice))) {
return -1;
}
// !! Unmap/registers vbos because they will be reinitialized
if (!CudaSafeCall(hipGraphicsUnmapResources(2, cudaTokens, 0))) {
return -1;
}
if (!CudaSafeCall(hipGraphicsUnregisterResource(cudaTokens[0]))) {
return -1;
}
if (!CudaSafeCall(hipGraphicsUnregisterResource(cudaTokens[1]))) {
return -1;
}
// Re-initialize VBOS
uint oldVertexCnt = this->vertexCnt;
this->vertexCnt += newVertexCnt;
uint oldTriangleCount = this->triangleCnt;
this->triangleCnt = nOldTriangles + newTrianglesCnt;
this->InitTriangleIdxVBO(this->triangleCnt);
this->InitVertexDataVBO(this->vertexCnt);
// Register and get pointers
// Register memory with CUDA
if (!CudaSafeCall(hipGraphicsGLRegisterBuffer(
&cudaTokens[0], this->vboVtxData,
hipGraphicsMapFlagsNone))) {
return -1;
}
// Register memory with CUDA
if (!CudaSafeCall(hipGraphicsGLRegisterBuffer(
&cudaTokens[1], this->vboTriangleIdx,
hipGraphicsMapFlagsNone))) {
return -1;
}
if (!CudaSafeCall(hipGraphicsMapResources(2, cudaTokens, 0))) {
return -1;
}
// Get mapped pointers to the vertex data buffers
if (!CudaSafeCall(hipGraphicsResourceGetMappedPointer(
reinterpret_cast<void**>(&vboPt), // The mapped pointer
&vboSize, // The size of the accessible data
cudaTokens[0]))) { // The mapped resource
return -1;
}
// Get mapped pointers to the vertex data buffers
if (!CudaSafeCall(hipGraphicsResourceGetMappedPointer(
reinterpret_cast<void**>(&vboTriIdxPt), // The mapped pointer
&vboTriSize, // The size of the accessible data
cudaTokens[1]))) { // The mapped resource
return -1;
}
// Copy old vertex data to new buffer
if (!CudaSafeCall(hipMemcpy(vboPt, this->trackedSubdivVertexData_D.Peek(),
sizeof(float)*this->vertexDataStride*oldVertexCnt,
hipMemcpyDeviceToDevice))) {
return -1;
}
// Copy new vertex data to new buffer
hipLaunchKernelGGL(( CopyNewDataToVertexBuffer_D) , dim3(Grid(this->vertexCnt, 256)), dim3(256) , 0, 0,
this->newVertices_D.Peek(),
vboPt,
oldVertexCnt,
newVertexCnt);
if (!CheckForCudaError()) {
return -1;
}
// // DEBUG print old vertex buffer
// vertexBuffer.Validate(this->vertexDataStride*this->vertexCnt);
// hipMemcpy(vertexBuffer.Peek(), vboPt, vertexBuffer.GetCount()*sizeof(float), hipMemcpyDeviceToHost);
// for (int i = 0; i < this->vertexCnt; ++i) {
// printf("New Vertex Buffer %i: %f %f %f, %f %f %f, %f %f %f\n", i,
// vertexBuffer.Peek()[9*i+0],
// vertexBuffer.Peek()[9*i+1],
// vertexBuffer.Peek()[9*i+2],
// vertexBuffer.Peek()[9*i+3],
// vertexBuffer.Peek()[9*i+4],
// vertexBuffer.Peek()[9*i+5],
// vertexBuffer.Peek()[9*i+6],
// vertexBuffer.Peek()[9*i+7],
// vertexBuffer.Peek()[9*i+8]);
// }
// vertexBuffer.Release();
// // END DEBUG
// Copy old triangle indices to VBO
hipLaunchKernelGGL(( CopyOldDataToTriangleBuffer_D) , dim3(Grid(oldTriangleCount, 256)), dim3(256) , 0, 0,
this->oldTriangles_D.Peek(),
this->oldTrianglesIdxOffs_D.Peek(),
vboTriIdxPt,
this->subDivCnt_D.Peek(),
oldTriangleCount);
// Copy new data to triangle VBO
if (!CudaSafeCall(hipMemcpy(
vboTriIdxPt + 3*nOldTriangles, // New data starts after old data
this->newTriangles_D.Peek(),
sizeof(uint)*this->newTriangles_D.GetCount(),
hipMemcpyDeviceToDevice))) {
return -1;
}
// // DEBUG Print new triangle neighbors
// HostArr<uint> triNeighbors;
// triNeighbors.Validate(this->triangleNeighbors_D.GetCount());
// HostArr<uint> triangleBuffer;
// triangleBuffer.Validate(3*this->triangleCnt);
// hipMemcpy(triangleBuffer.Peek(), vboTriIdxPt,
// triangleBuffer.GetCount()*sizeof(uint), hipMemcpyDeviceToHost);
// if (!CudaSafeCall(this->triangleNeighbors_D.CopyToHost(triNeighbors.Peek()))) {
// return -1;
// }
// for (int i = 0; i < this->triangleNeighbors_D.GetCount()/3; ++i) {
//
//// printf("TRI NEIGHBORS %i: %u %u %u\n", i,
//// triNeighbors.Peek()[3*i+0],
//// triNeighbors.Peek()[3*i+1],
//// triNeighbors.Peek()[3*i+2]);
//
// // Check neighbor consistency
// uint v0 = triangleBuffer.Peek()[3*i+0];
// uint v1 = triangleBuffer.Peek()[3*i+1];
// uint v2 = triangleBuffer.Peek()[3*i+2];
//
// uint n00 = triangleBuffer.Peek()[3*triNeighbors.Peek()[3*i+0]+0];
// uint n01 = triangleBuffer.Peek()[3*triNeighbors.Peek()[3*i+0]+1];
// uint n02 = triangleBuffer.Peek()[3*triNeighbors.Peek()[3*i+0]+2];
//
// uint n10 = triangleBuffer.Peek()[3*triNeighbors.Peek()[3*i+1]+0];
// uint n11 = triangleBuffer.Peek()[3*triNeighbors.Peek()[3*i+1]+1];
// uint n12 = triangleBuffer.Peek()[3*triNeighbors.Peek()[3*i+1]+2];
//
// uint n20 = triangleBuffer.Peek()[3*triNeighbors.Peek()[3*i+2]+0];
// uint n21 = triangleBuffer.Peek()[3*triNeighbors.Peek()[3*i+2]+1];
// uint n22 = triangleBuffer.Peek()[3*triNeighbors.Peek()[3*i+2]+2];
//
//// printf("n0 %u %u %u, n1 %u %u %u, n2 %u %u %u\n",
//// n00, n01, n02, n10, n11, n12, n20, n21, n22);
//
// uint cnt = 0;
// bool flag0=false, flag1=false, flag2=false;
// if (v0 == n00) cnt++; if (v0 == n01) cnt++; if (v0 == n02) cnt++;
// if (v1 == n00) cnt++; if (v1 == n01) cnt++; if (v1 == n02) cnt++;
// if (v2 == n00) cnt++; if (v2 == n01) cnt++; if (v2 == n02) cnt++;
// if (cnt < 2) {
// flag0 = true;
//
// }
//
// cnt = 0;
// if (v0 == n10) cnt++; if (v0 == n11) cnt++; if (v0 == n12) cnt++;
// if (v1 == n10) cnt++; if (v1 == n11) cnt++; if (v1 == n12) cnt++;
// if (v2 == n10) cnt++; if (v2 == n11) cnt++; if (v2 == n12) cnt++;
// if (cnt < 2) {
// flag1 = true;
// }
//
// cnt = 0;
// if (v0 == n20) cnt++; if (v0 == n21) cnt++; if (v0 == n22) cnt++;
// if (v1 == n20) cnt++; if (v1 == n21) cnt++; if (v1 == n22) cnt++;
// if (v2 == n20) cnt++; if (v2 == n21) cnt++; if (v2 == n22) cnt++;
// if (cnt < 2) {
// flag2 = true;
// }
//
// if (flag0||flag1||flag2) {
// printf("TRI NEIGHBORS %i: %u %u %u\n", i,
// triNeighbors.Peek()[3*i+0],
// triNeighbors.Peek()[3*i+1],
// triNeighbors.Peek()[3*i+2]);
// }
// if (flag0) printf("----> %u inconsistent\n", triNeighbors.Peek()[3*i+0]);
// if (flag1) printf("----> %u inconsistent\n", triNeighbors.Peek()[3*i+1]);
// if (flag2) printf("----> %u inconsistent\n", triNeighbors.Peek()[3*i+2]);
//
// }
// triangleBuffer.Release();
// triNeighbors.Release();
// // END DEBUG
//
//// // DEBUG print new triangle index buffer
////// HostArr<uint> triangleBuffer;
//// triangleBuffer.Validate(3*this->triangleCnt);
//// hipMemcpy(triangleBuffer.Peek(), vboTriIdxPt,
//// triangleBuffer.GetCount()*sizeof(uint), hipMemcpyDeviceToHost);
//// for (int i = 0; i < this->triangleCnt; ++i) {
//// if ((i > 8200)&&(i < 8300)) {
//// printf("New Triangle Buffer %i: %u %u %u (vertex count %u)\n", i,
//// triangleBuffer.Peek()[3*i+0],
//// triangleBuffer.Peek()[3*i+1],
//// triangleBuffer.Peek()[3*i+2],
//// this->vertexCnt);
//// }
//// }
//// triangleBuffer.Release();
//// // END DEBUG
#ifdef USE_TIMER
hipEventRecord(event2, 0);
hipEventSynchronize(event1);
hipEventSynchronize(event2);
hipEventElapsedTime(&dt_ms, event1, event2);
printf("CUDA time for updating VBOs: %.10f sec\n",
dt_ms/1000.0f);
hipEventRecord(event1, 0);
hipEventRecord(eventEnd, 0);
hipEventSynchronize(eventStart);
hipEventSynchronize(eventEnd);
hipEventElapsedTime(&dt_ms, eventStart, eventEnd);
printf("==> Total CUDA time for mesh refinement: %.10f sec\n",
dt_ms/1000.0f);
#endif
// Cleanup
if (!CudaSafeCall(hipGraphicsUnmapResources(2, cudaTokens, 0))) {
return -1;
}
if (!CudaSafeCall(hipGraphicsUnregisterResource(cudaTokens[0]))) {
return -1;
}
if (!CudaSafeCall(hipGraphicsUnregisterResource(cudaTokens[1]))) {
return -1;
}
return newTrianglesCnt;
#undef USE_TIMER
}
/*
* DeformableGPUSurfaceMT::Release
*/
void DeformableGPUSurfaceMT::Release() {
GPUSurfaceMT::Release();
CudaSafeCall(this->vertexExternalForcesScl_D.Release());
CudaSafeCall(this->gvfTmp_D.Release());
CudaSafeCall(this->gvfConstData_D.Release());
CudaSafeCall(this->laplacian_D.Release());
CudaSafeCall(this->laplacian2_D.Release());
CudaSafeCall(this->displLen_D.Release());
CudaSafeCall(this->distField_D.Release());
CudaSafeCall(this->externalForces_D.Release());
CudaSafeCall(this->accTriangleData_D.Release());
CudaSafeCall(this->accTriangleArea_D.Release());
CudaSafeCall(this->corruptTriangles_D.Release());
CudaSafeCall(this->intUncertaintyCorrupt_D.Release());
CudaSafeCall(this->accumPath_D.Release());
CudaSafeCall(triangleEdgeOffs_D.Release());
CudaSafeCall(triangleEdgeList_D.Release());
CudaSafeCall(subDivEdgeFlag_D.Release());
CudaSafeCall(subDivEdgeIdxOffs_D.Release());
CudaSafeCall(newVertices_D.Release());
CudaSafeCall(newTriangles_D.Release());
CudaSafeCall(oldTriangles_D.Release());
CudaSafeCall(trackedSubdivVertexData_D.Release());
CudaSafeCall(subDivCnt_D.Release());
CudaSafeCall(newTrianglesIdxOffs_D.Release());
CudaSafeCall(oldTrianglesIdxOffs_D.Release());
CudaSafeCall(newTriangleNeighbors_D.Release());
CudaSafeCall(subDivLevels_D.Release());
CudaSafeCall(oldSubDivLevels_D.Release());
CudaSafeCall(vertexFlag_D.Release());
CudaSafeCall(vertexFlagTmp_D.Release());
CudaSafeCall(vertexUncertaintyTmp_D.Release());
CudaSafeCall(triangleFaceNormals_D.Release());
CudaSafeCall(triangleIdxTmp_D.Release());
CudaSafeCall(outputArrayTmp_D.Release());
CudaSafeCall(reducedVertexKeysTmp_D.Release());
CudaSafeCall(reducedNormalsTmp_D.Release());
CudaSafeCall(vertexNormalsIndxOffs_D.Release());
CudaSafeCall(this->geometricLaplacian_D.Release());
if (this->vboCorruptTriangleVertexFlag) {
glBindBufferARB(GL_ARRAY_BUFFER, this->vboCorruptTriangleVertexFlag);
glDeleteBuffersARB(1, &this->vboCorruptTriangleVertexFlag);
glBindBufferARB(GL_ARRAY_BUFFER, 0);
this->vboCorruptTriangleVertexFlag = 0;
glBindBufferARB(GL_ARRAY_BUFFER, 0);
CheckForGLError();
}
if (this->vboVtxPath) {
glBindBufferARB(GL_ARRAY_BUFFER, this->vboVtxPath);
glDeleteBuffersARB(1, &this->vboVtxPath);
glBindBufferARB(GL_ARRAY_BUFFER, 0);
this->vboVtxPath = 0;
glBindBufferARB(GL_ARRAY_BUFFER, 0);
CheckForGLError();
}
if (this->vboVtxAttr) {
glBindBufferARB(GL_ARRAY_BUFFER, this->vboVtxAttr);
glDeleteBuffersARB(1, &this->vboVtxAttr);
glBindBufferARB(GL_ARRAY_BUFFER, 0);
this->vboVtxAttr = 0;
glBindBufferARB(GL_ARRAY_BUFFER, 0);
CheckForGLError();
}
::CheckForGLError();
}
/*
* DeformableGPUSurfaceMT::updateVtxPos
*/
bool DeformableGPUSurfaceMT::updateVtxPos(
float* volTarget_D,
float* vertexBuffer_D,
float* vtxUncertainty_D,
int3 volDim,
float3 volOrg,
float3 volDelta,
float isovalue,
bool useCubicInterpolation,
size_t maxIt,
float surfMappedMinDisplScl,
float springStiffness,
float forceScl,
float externalForcesWeight,
bool trackPath,
bool externalForcesOnly,
bool useThinPlate) {
using namespace megamol::core::utility::log;
// Init constant device params
if (!initGridParams(volDim, volOrg, volDelta)) {
Log::DefaultLog.WriteError(
"%s: could not init constant device params",
this->ClassName());
return false;
}
if (!CudaSafeCall(this->laplacian_D.Validate(this->vertexCnt))) {
return false;
}
if (!CudaSafeCall(this->laplacian_D.Set(0))) {
return false;
}
if (!CudaSafeCall(this->laplacian2_D.Validate(this->vertexCnt))) {
return false;
}
if (!CudaSafeCall(this->laplacian2_D.Set(0))) {
return false;
}
if (!CudaSafeCall(this->accumPath_D.Validate(this->vertexCnt))) {
return false;
}
if (!CudaSafeCall(this->accumPath_D.Set(0x00))) {
return false;
}
// Init uncertainty buffer with zero
if (trackPath) {
if (!CudaSafeCall(hipMemset(vtxUncertainty_D, 0x00, this->vertexCnt*sizeof(float)))) {
return false;
}
}
//#define USE_TIMER
#ifdef USE_TIMER
float dt_ms;
hipEvent_t event1, event2;
hipEventCreate(&event1);
hipEventCreate(&event2);
hipEventRecord(event1, 0);
#endif
#ifdef USE_TIMER
hipEvent_t eventStart, eventEnd;
hipEventCreate(&eventStart);
hipEventCreate(&eventEnd);
#endif
int iterationsNeeded = maxIt;
if (!externalForcesOnly) {
// TODO Timer
for (uint i = 0; i < maxIt; ++i) {
// Calc laplacian
hipLaunchKernelGGL(( DeformableGPUSurfaceMT_MeshLaplacian_D) , dim3(Grid(this->vertexCnt, 256)), dim3(256) , 0, 0,
vertexBuffer_D,
this->vertexDataOffsPos,
this->vertexDataStride,
this->vertexNeighbours_D.Peek(),
18,
this->vertexCnt,
(float*)this->laplacian_D.Peek(),
0,
3);
::CheckForCudaErrorSync();
if (useThinPlate) {
// Calc laplacian^2
hipLaunchKernelGGL(( DeformableGPUSurfaceMT_MeshLaplacian_D) , dim3(Grid(this->vertexCnt, 256)), dim3(256) , 0, 0,
(float*)this->laplacian_D.Peek(),
0,
3,
this->vertexNeighbours_D.Peek(),
18,
this->vertexCnt,
(float*)this->laplacian2_D.Peek(),
0,
3);
::CheckForCudaErrorSync();
// Update vertex position
hipLaunchKernelGGL(( DeformableGPUSurfaceMT_UpdateVtxPos_D) , dim3(Grid(this->vertexCnt, 256)), dim3(256) , 0, 0,
volTarget_D,
vertexBuffer_D,
(float*)this->vertexExternalForcesScl_D.Peek(),
this->displLen_D.Peek(),
vtxUncertainty_D,
(float4*)this->externalForces_D.Peek(),
this->laplacian_D.Peek(),
this->laplacian2_D.Peek(),
this->vertexCnt,
externalForcesWeight,
forceScl,
springStiffness,
isovalue,
surfMappedMinDisplScl,
useCubicInterpolation,
trackPath, // Track path of vertices
this->vertexDataOffsPos,
this->vertexDataOffsNormal,
this->vertexDataStride);
} else { // No thin plate aspect
// Update vertex position
hipLaunchKernelGGL(( DeformableGPUSurfaceMT_UpdateVtxPosNoThinPlate_D) , dim3(Grid(this->vertexCnt, 256)), dim3(256) , 0, 0,
volTarget_D,
vertexBuffer_D,
(float*)this->vertexExternalForcesScl_D.Peek(),
this->displLen_D.Peek(),
vtxUncertainty_D,
(float4*)this->externalForces_D.Peek(),
this->laplacian_D.Peek(),
this->vertexCnt,
externalForcesWeight,
forceScl,
isovalue,
surfMappedMinDisplScl,
useCubicInterpolation,
trackPath, // Track path of vertices
this->vertexDataOffsPos,
this->vertexDataOffsNormal,
this->vertexDataStride);
}
// Accumulate displacement length of this iteration step
float avgDisplLen = 0.0f;
avgDisplLen = thrust::reduce(
thrust::device_ptr<float>(this->displLen_D.Peek()),
thrust::device_ptr<float>(this->displLen_D.Peek() + this->vertexCnt));
if (!CudaSafeCall(hipGetLastError())) {
return false;
}
avgDisplLen /= static_cast<float>(this->vertexCnt);
// if (i%5 == 0) printf("It: %i, avgDispl: %.16f, min %.16f\n", i, avgDisplLen, surfMappedMinDisplScl);
// printf("It Reg: %i, avgDispl: %.16f, min %.16f\n", i, avgDisplLen, surfMappedMinDisplScl);
if (avgDisplLen < surfMappedMinDisplScl) {
iterationsNeeded =i+1;
break;
}
::CheckForCudaErrorSync();
}
} else {
for (uint i = 0; i < maxIt; ++i) {
// this->PrintVertexBuffer(1);
// // DEBUG print parameters
// printf("PARAMS:\n");
// printf("vertex count %u\n", this->vertexCnt);
// printf("forcesScl %f\n", forceScl);
// printf("isovalue %f\n", isovalue);
// printf("surfMappedMinDisplScl %f\n", surfMappedMinDisplScl);
// if (useCubicInterpolation) printf("useCubicInterpolation TRUE\n");
// else printf("useCubicInterpolation FALSE\n");
// if (trackPath) printf("trackPath TRUE\n");
// else printf("trackPath FALSE\n");
// // END DEBUG
// // DEBUG Print voltarget_D
// if (i == 0) {
// HostArr<float> volTarget;
// size_t gridSize = volDim.x*volDim.y*volDim.z;
// volTarget.Validate(gridSize);
// CudaSafeCall(hipMemcpy(volTarget.Peek(), volTarget_D,
// sizeof(float)*gridSize,
// hipMemcpyDeviceToHost));
//
// for (int i = 0; i < gridSize; ++i) {
// printf("VOL %.16f\n", volTarget.Peek()[i]);
// }
//
// volTarget.Release();
// }
// // END DEBUG
// hipEventRecord(eventStart, 0);
// Update vertex position
hipLaunchKernelGGL(( DeformableGPUSurfaceMT_UpdateVtxPosExternalOnly_D) , dim3(Grid(this->vertexCnt, 256)), dim3(256) , 0, 0,
volTarget_D,
vertexBuffer_D,
(float*)this->vertexExternalForcesScl_D.Peek(),
this->displLen_D.Peek(),
vtxUncertainty_D,
(float4*)this->externalForces_D.Peek(),
this->accumPath_D.Peek(),
this->vertexCnt,
forceScl,
isovalue,
surfMappedMinDisplScl,
useCubicInterpolation,
trackPath, // Track path of vertices
this->vertexDataOffsPos,
this->vertexDataOffsNormal,
this->vertexDataStride);
// hipEventRecord(eventEnd, 0);
// hipEventSynchronize(eventEnd);
// hipEventSynchronize(eventStart);
// hipEventElapsedTime(&dt_ms, eventStart, eventEnd);
//// Log::DefaultLog.WriteInfo(
//// "%s: Time for iteration (%u vertices): %f sec\n",
//// "DeformableGPUSurfaceMT",
//// this->vertexCnt,
//// dt_ms/1000.0f);
// hipEventRecord(eventStart, 0);
// Accumulate displacement length of this iteration step
float avgDisplLen = 0.0f;
avgDisplLen = thrust::reduce(
thrust::device_ptr<float>(this->displLen_D.Peek()),
thrust::device_ptr<float>(this->displLen_D.Peek() + this->vertexCnt));
if (!CudaSafeCall(hipGetLastError())) {
return false;
}
// hipEventRecord(eventEnd, 0);
// hipEventSynchronize(eventEnd);
// hipEventSynchronize(eventStart);
// hipEventElapsedTime(&dt_ms, eventStart, eventEnd);
// Log::DefaultLog.WriteInfo(
// "%s: Time for thrust::reduce (%u vertices): %f sec\n",
// "DeformableGPUSurfaceMT",
// this->vertexCnt,
// dt_ms/1000.0f);
avgDisplLen /= static_cast<float>(this->vertexCnt);
// if (i%5 == 0) printf("It %i, avgDispl: %.16f, min %.16f\n", i, avgDisplLen, surfMappedMinDisplScl);
// printf("It: %i, avgDispl: %.16f, min %.16f\n", i, avgDisplLen, surfMappedMinDisplScl);
if (avgDisplLen < surfMappedMinDisplScl) {
iterationsNeeded =i+1;
break;
}
::CheckForCudaErrorSync();
}
}
#ifdef USE_TIMER
hipEventRecord(event2, 0);
hipEventSynchronize(event1);
hipEventSynchronize(event2);
hipEventElapsedTime(&dt_ms, event1, event2);
Log::DefaultLog.WriteInfo(
"%s: Time for mapping (%u iterations, %u vertices): %f sec\n",
"DeformableGPUSurfaceMT",
iterationsNeeded, this->vertexCnt, dt_ms/1000.0f);
//printf("Mapping : %.10f\n",
// dt_ms/1000.0f);
#endif
#undef USE_TIMER
return CudaSafeCall(hipGetLastError());
}
/*
* DeformableGPUSurfaceMT::updateVtxPos
*/
bool DeformableGPUSurfaceMT::updateVtxPosSubdiv(
float* volTarget_D,
float* vertexBuffer_D,
float* vtxUncertainty_D,
int3 volDim,
float3 volOrg,
float3 volDelta,
float isovalue,
bool useCubicInterpolation,
size_t maxIt,
float surfMappedMinDisplScl,
float springStiffness,
float forceScl,
float externalForcesWeight,
bool trackPath,
bool externalForcesOnly,
bool useThinPlate) {
using namespace megamol::core::utility::log;
// Init constant device params
if (!initGridParams(volDim, volOrg, volDelta)) {
Log::DefaultLog.WriteError(
"%s: could not init constant device params",
this->ClassName());
return false;
}
if (!CudaSafeCall(this->laplacian_D.Validate(this->vertexCnt))) {
return false;
}
if (!CudaSafeCall(this->laplacian_D.Set(0))) {
return false;
}
if (!CudaSafeCall(this->laplacian2_D.Validate(this->vertexCnt))) {
return false;
}
if (!CudaSafeCall(this->laplacian2_D.Set(0))) {
return false;
}
if (!CudaSafeCall(this->accumPath_D.Validate(this->vertexCnt))) {
return false;
}
if (!CudaSafeCall(this->accumPath_D.Set(0x00))) {
return false;
}
// Init uncertainty buffer with zero
if (trackPath) {
if (!CudaSafeCall(hipMemset(vtxUncertainty_D, 0x00, this->vertexCnt*sizeof(float)))) {
return false;
}
}
//#ifdef USE_TIMER
//float dt_ms;
hipEvent_t event1, event2;
hipEventCreate(&event1);
hipEventCreate(&event2);
hipEventRecord(event1, 0);
//#endif
int iterationsNeeded = maxIt;
if (!externalForcesOnly) {
// TODO Timer
for (uint i = 0; i < maxIt; ++i) {
// Calc laplacian
hipLaunchKernelGGL(( DeformableGPUSurfaceMT_MeshLaplacian_D) , dim3(Grid(this->vertexCnt, 256)), dim3(256) , 0, 0,
vertexBuffer_D,
this->vertexDataOffsPos,
this->vertexDataStride,
this->vertexNeighbours_D.Peek(),
18,
this->vertexCnt,
(float*)this->laplacian_D.Peek(),
0,
3);
::CheckForCudaErrorSync();
if (useThinPlate) {
// Calc laplacian^2
hipLaunchKernelGGL(( DeformableGPUSurfaceMT_MeshLaplacian_D) , dim3(Grid(this->vertexCnt, 256)), dim3(256) , 0, 0,
(float*)this->laplacian_D.Peek(),
0,
3,
this->vertexNeighbours_D.Peek(),
18,
this->vertexCnt,
(float*)this->laplacian2_D.Peek(),
0,
3);
::CheckForCudaErrorSync();
// Update vertex position
hipLaunchKernelGGL(( DeformableGPUSurfaceMT_UpdateVtxPos_D) , dim3(Grid(this->vertexCnt, 256)), dim3(256) , 0, 0,
volTarget_D,
vertexBuffer_D,
(float*)this->vertexExternalForcesScl_D.Peek(),
this->displLen_D.Peek(),
vtxUncertainty_D,
(float4*)this->externalForces_D.Peek(),
this->laplacian_D.Peek(),
this->laplacian2_D.Peek(),
this->vertexCnt,
externalForcesWeight,
forceScl,
springStiffness,
isovalue,
surfMappedMinDisplScl,
useCubicInterpolation,
trackPath, // Track path of vertices
this->vertexDataOffsPos,
this->vertexDataOffsNormal,
this->vertexDataStride);
} else { // No thin plate aspect
// Update vertex position
hipLaunchKernelGGL(( DeformableGPUSurfaceMT_UpdateVtxPosNoThinPlate_D) , dim3(Grid(this->vertexCnt, 256)), dim3(256) , 0, 0,
volTarget_D,
vertexBuffer_D,
(float*)this->vertexExternalForcesScl_D.Peek(),
this->displLen_D.Peek(),
vtxUncertainty_D,
(float4*)this->externalForces_D.Peek(),
this->laplacian_D.Peek(),
this->vertexCnt,
externalForcesWeight,
forceScl,
isovalue,
surfMappedMinDisplScl,
useCubicInterpolation,
trackPath, // Track path of vertices
this->vertexDataOffsPos,
this->vertexDataOffsNormal,
this->vertexDataStride);
}
// Accumulate displacement length of this iteration step
float avgDisplLen = 0.0f;
avgDisplLen = thrust::reduce(
thrust::device_ptr<float>(this->displLen_D.Peek()),
thrust::device_ptr<float>(this->displLen_D.Peek() + this->vertexCnt));
if (!CudaSafeCall(hipGetLastError())) {
return false;
}
avgDisplLen /= static_cast<float>(this->vertexCnt);
// if (i%5 == 0) printf("It: %i, avgDispl: %.16f, min %.16f\n", i, avgDisplLen, surfMappedMinDisplScl);
// printf("It: %i, avgDispl: %.16f, min %.16f\n", i, avgDisplLen, surfMappedMinDisplScl);
if (avgDisplLen < surfMappedMinDisplScl) {
iterationsNeeded =i+1;
break;
}
::CheckForCudaErrorSync();
}
} else {
// TODO Timer
for (uint i = 0; i < maxIt; ++i) {
// Update vertex position
hipLaunchKernelGGL(( DeformableGPUSurfaceMT_UpdateVtxPosExternalOnlySubdiv_D) , dim3(Grid(this->vertexCnt, 256)), dim3(256) , 0, 0,
volTarget_D,
vertexBuffer_D,
(float*)this->vertexExternalForcesScl_D.Peek(),
this->displLen_D.Peek(),
vtxUncertainty_D,
(float4*)this->externalForces_D.Peek(),
this->accumPath_D.Peek(),
this->vertexFlag_D.Peek(),
this->vertexCnt,
forceScl,
isovalue,
surfMappedMinDisplScl,
useCubicInterpolation,
trackPath, // Track path of vertices
this->vertexDataOffsPos,
this->vertexDataOffsNormal,
this->vertexDataStride);
// Accumulate displacement length of this iteration step
float avgDisplLen = 0.0f;
avgDisplLen = thrust::reduce(
thrust::device_ptr<float>(this->displLen_D.Peek()),
thrust::device_ptr<float>(this->displLen_D.Peek() + this->vertexCnt));
if (!CudaSafeCall(hipGetLastError())) {
return false;
}
avgDisplLen /= static_cast<float>(this->nFlaggedVertices);
// printf("New vertex count %u\n", this->nFlaggedVertices);
// if (i%5 == 0) printf("It %i, avgDispl: %.16f, min %.16f\n", i, avgDisplLen, surfMappedMinDisplScl);
// printf("It: %i, avgDispl: %.16f, min %.1f\n", i, avgDisplLen, surfMappedMinDisplScl);
if (avgDisplLen < surfMappedMinDisplScl) {
iterationsNeeded =i+1;
break;
}
::CheckForCudaErrorSync();
}
}
#ifdef USE_TIMER
hipEventRecord(event2, 0);
hipEventSynchronize(event1);
hipEventSynchronize(event2);
hipEventElapsedTime(&dt_ms, event1, event2);
Log::DefaultLog.WriteInfo(
"%s: Time for mapping (%u iterations, %u vertices): %f sec\n",
"DeformableGPUSurfaceMT",
iterationsNeeded, this->vertexCnt, dt_ms/1000.0f);
//printf("Mapping : %.10f\n",
// dt_ms/1000.0f);
#endif
return CudaSafeCall(hipGetLastError());
}
/*
* ComputeVtxDiffValue0_D
*/
__global__ void DeformableGPUSurfaceMT_ComputeVtxDiffValue0_D(
float *diff_D,
float *tex0_D,
float *vtxData0_D,
size_t vertexCnt) {
const uint idx = ::getThreadIdx();
if (idx >= vertexCnt) {
return;
}
const int vertexDataStride = 9; // TODO
const int vertexDataOffsPos = 0;
float3 pos;
pos.x = vtxData0_D[vertexDataStride*idx + vertexDataOffsPos +0];
pos.y = vtxData0_D[vertexDataStride*idx + vertexDataOffsPos +1];
pos.z = vtxData0_D[vertexDataStride*idx + vertexDataOffsPos +2];
diff_D[idx] = ::SampleFieldAtPosTrilin_D<float, true>(pos, tex0_D);
}
/*
* ComputeVtxDiffValue1_D
*/
__global__ void DeformableGPUSurfaceMT_ComputeVtxDiffValue1_D(
float *diff_D,
float *tex1_D,
float *vtxData1_D,
size_t vertexCnt) {
const uint idx = ::getThreadIdx();
if (idx >= vertexCnt) {
return;
}
const int vertexDataStride = 9; // TODO
const int vertexDataOffsPos = 0;
float valFirst = diff_D[idx];
float3 pos;
pos.x = vtxData1_D[vertexDataStride*idx + vertexDataOffsPos +0];
pos.y = vtxData1_D[vertexDataStride*idx + vertexDataOffsPos +1];
pos.z = vtxData1_D[vertexDataStride*idx + vertexDataOffsPos +2];
float valSec = ::SampleFieldAtPosTrilin_D<float, true>(pos, tex1_D);
valSec = abs(valSec-valFirst);
diff_D[idx] = valSec;
}
/*
* DeformableGPUSurfaceMT_ComputeVtxDiffValue1Fitted_D
*/
__global__ void DeformableGPUSurfaceMT_ComputeVtxDiffValue1Fitted_D(
float *diff_D,
float *tex1_D,
float *vtxData1_D,
float *rotation_D,
float3 translation,
float3 centroid,
size_t vertexCnt) {
const uint idx = ::getThreadIdx();
if (idx >= vertexCnt) {
return;
}
const int vertexDataStride = 9; // TODO
const int vertexDataOffsPos = 0;
//float valFirst = diff_D[idx];
float3 pos;
pos.x = vtxData1_D[vertexDataStride*idx + vertexDataOffsPos +0];
pos.y = vtxData1_D[vertexDataStride*idx + vertexDataOffsPos +1];
pos.z = vtxData1_D[vertexDataStride*idx + vertexDataOffsPos +2];
// Revert translation to move to origin
pos.x -= translation.x;
pos.y -= translation.y;
pos.z -= translation.z;
// Revert rotation
float3 posRot;
posRot.x = rotation_D[0] * pos.x +
rotation_D[3] * pos.y +
rotation_D[6] * pos.z;
posRot.y = rotation_D[1] * pos.x +
rotation_D[4] * pos.y +
rotation_D[7] * pos.z;
posRot.z = rotation_D[2] * pos.x +
rotation_D[5] * pos.y +
rotation_D[8] * pos.z;
// Move to old centroid
posRot.x += centroid.x;
posRot.y += centroid.y;
posRot.z += centroid.z;
float valSec = ::SampleFieldAtPosTrilin_D<float, true>(posRot, tex1_D);
//valSec = abs(valSec-valFirst);
diff_D[idx] = valSec;
printf("%f\n", valSec);
}
/*
* ComputeVtxSignDiffValue1_D
*/
__global__ void DeformableGPUSurfaceMT_ComputeVtxSignDiffValue1_D(
float *signdiff_D,
float *tex1_D,
float *vtxData1_D,
size_t vertexCnt) {
const uint idx = ::getThreadIdx();
if (idx >= vertexCnt) {
return;
}
const int vertexDataStride = 9; // TODO
const int vertexDataOffsPos = 0;
float valFirst = signdiff_D[idx];
float3 pos;
pos.x = vtxData1_D[vertexDataStride*idx + vertexDataOffsPos +0];
pos.y = vtxData1_D[vertexDataStride*idx + vertexDataOffsPos +1];
pos.z = vtxData1_D[vertexDataStride*idx + vertexDataOffsPos +2];
float valSec = ::SampleFieldAtPosTrilin_D<float, true>(pos, tex1_D);
valSec = float(valSec*valFirst < 0); // TODO Use binary operator
signdiff_D[idx] = valSec;
}
/*
* ComputeVtxSignDiffValue1_D
*/
__global__ void DeformableGPUSurfaceMT_ComputeVtxSignDiffValue1Fitted_D(
float *signdiff_D,
float *tex1_D,
float *vtxData1_D,
float *rotation_D,
float3 translation,
float3 centroid,
size_t vertexCnt) {
const uint idx = ::getThreadIdx();
if (idx >= vertexCnt) {
return;
}
const int vertexDataStride = 9; // TODO
const int vertexDataOffsPos = 0;
float valFirst = signdiff_D[idx];
// float3 pos;
// pos.x = vtxData1_D[vertexDataStride*idx + vertexDataOffsPos +0];
// pos.y = vtxData1_D[vertexDataStride*idx + vertexDataOffsPos +1];
// pos.z = vtxData1_D[vertexDataStride*idx + vertexDataOffsPos +2];
float3 pos;
pos.x = vtxData1_D[vertexDataStride*idx + vertexDataOffsPos +0];
pos.y = vtxData1_D[vertexDataStride*idx + vertexDataOffsPos +1];
pos.z = vtxData1_D[vertexDataStride*idx + vertexDataOffsPos +2];
// Revert translation to move to origin
pos.x -= translation.x;
pos.y -= translation.y;
pos.z -= translation.z;
// Revert rotation
float3 posRot;
posRot.x = rotation_D[0] * pos.x +
rotation_D[3] * pos.y +
rotation_D[6] * pos.z;
posRot.y = rotation_D[1] * pos.x +
rotation_D[4] * pos.y +
rotation_D[7] * pos.z;
posRot.z = rotation_D[2] * pos.x +
rotation_D[5] * pos.y +
rotation_D[8] * pos.z;
// Move to old centroid
posRot.x += centroid.x;
posRot.y += centroid.y;
posRot.z += centroid.z;
float valSec = ::SampleFieldAtPosTrilin_D<float, true>(posRot, tex1_D);
valSec = float(valSec*valFirst < 0); // TODO Use binary operator
signdiff_D[idx] = valSec;
}
/*
* DeformableGPUSurfaceMT::ComputeVtxDiffValue
*/
bool DeformableGPUSurfaceMT::ComputeVtxDiffValue(
float *diff_D,
float *tex0_D,
int3 texDim0,
float3 texOrg0,
float3 texDelta0,
float *tex1_D,
int3 texDim1,
float3 texOrg1,
float3 texDelta1,
GLuint vtxDataVBO0,
GLuint vtxDataVBO1,
size_t vertexCnt) {
using namespace megamol::core::utility::log;
/* Get pointers to vertex data */
cudaGraphicsResource* cudaTokens[2];
// Register memory with CUDA
if (!CudaSafeCall(hipGraphicsGLRegisterBuffer(
&cudaTokens[0], vtxDataVBO0,
hipGraphicsMapFlagsNone))) {
return false;
}
// Register memory with CUDA
if (!CudaSafeCall(hipGraphicsGLRegisterBuffer(
&cudaTokens[1], vtxDataVBO1,
hipGraphicsMapFlagsNone))) {
return false;
}
if (!CudaSafeCall(hipGraphicsMapResources(2, cudaTokens, 0))) {
return false;
}
// Get mapped pointers to the vertex data buffers
float *vboPt0, *vboPt1;
size_t vboSize;
if (!CudaSafeCall(hipGraphicsResourceGetMappedPointer(
reinterpret_cast<void**>(&vboPt0), // The mapped pointer
&vboSize, // The size of the accessible data
cudaTokens[0]))) { // The mapped resource
return false;
}
// Get mapped pointers to the vertex data buffers
if (!CudaSafeCall(hipGraphicsResourceGetMappedPointer(
reinterpret_cast<void**>(&vboPt1), // The mapped pointer
&vboSize, // The size of the accessible data
cudaTokens[1]))) { // The mapped resource
return false;
}
// Init CUDA grid for texture #0
if (!initGridParams(texDim0, texOrg0, texDelta0)) {
Log::DefaultLog.WriteError(
"%s: could not init constant device params",
DeformableGPUSurfaceMT::ClassName());
return false;
}
// Call first kernel
#ifdef USE_TIMER
float dt_ms;
hipEvent_t event1, event2;
hipEventCreate(&event1);
hipEventCreate(&event2);
hipEventRecord(event1, 0);
#endif
hipLaunchKernelGGL(( DeformableGPUSurfaceMT_ComputeVtxDiffValue0_D) , dim3(Grid(vertexCnt, 256)), dim3(256) , 0, 0,
diff_D,
tex0_D,
vboPt0,
vertexCnt);
#ifdef USE_TIMER
hipEventRecord(event2, 0);
hipEventSynchronize(event1);
hipEventSynchronize(event2);
hipEventElapsedTime(&dt_ms, event1, event2);
printf("CUDA time for 'ComputeVtxDiffValue0_D': %.10f sec\n",
dt_ms/1000.0f);
#endif
// Init CUDA grid for texture #1
if (!initGridParams(texDim1, texOrg1, texDelta1)) {
Log::DefaultLog.WriteError(
"%s: could not init constant device params",
DeformableGPUSurfaceMT::ClassName());
return false;
}
// Call second kernel
#ifdef USE_TIMER
hipEventRecord(event1, 0);
#endif
hipLaunchKernelGGL(( DeformableGPUSurfaceMT_ComputeVtxDiffValue1_D) , dim3(Grid(vertexCnt, 256)), dim3(256) , 0, 0,
diff_D,
tex1_D,
vboPt1,
vertexCnt);
#ifdef USE_TIMER
hipEventRecord(event2, 0);
hipEventSynchronize(event1);
hipEventSynchronize(event2);
hipEventElapsedTime(&dt_ms, event1, event2);
printf("CUDA time for 'ComputeVtxDiffValue1_D': %.10f sec\n",
dt_ms/1000.0f);
#endif
if (!CudaSafeCall(hipGraphicsUnmapResources(2, cudaTokens, 0))) {
return false;
}
if (!CudaSafeCall(hipGraphicsUnregisterResource(cudaTokens[0]))) {
return false;
}
if (!CudaSafeCall(hipGraphicsUnregisterResource(cudaTokens[1]))) {
return false;
}
return true;
}
/*
* DeformableGPUSurfaceMT::ComputeVtxDiffValueFitted
*/
bool DeformableGPUSurfaceMT::ComputeVtxDiffValueFitted(
float *diff_D,
float centroid[3],
float rotMat[9],
float transVec[3],
float *tex0_D,
int3 texDim0,
float3 texOrg0,
float3 texDelta0,
float *tex1_D,
int3 texDim1,
float3 texOrg1,
float3 texDelta1,
GLuint vtxDataVBO0,
GLuint vtxDataVBO1,
size_t vertexCnt) {
CudaDevArr<float> rotate_D;
// Rotate for best fit
rotate_D.Validate(9);
if (!CudaSafeCall(hipMemcpy((void *)rotate_D.Peek(), &rotMat[0],
9*sizeof(float), hipMemcpyHostToDevice))) {
return false;
}
using namespace megamol::core::utility::log;
/* Get pointers to vertex data */
cudaGraphicsResource* cudaTokens[2];
// Register memory with CUDA
if (!CudaSafeCall(hipGraphicsGLRegisterBuffer(
&cudaTokens[0], vtxDataVBO0,
hipGraphicsMapFlagsNone))) {
return false;
}
// Register memory with CUDA
if (!CudaSafeCall(hipGraphicsGLRegisterBuffer(
&cudaTokens[1], vtxDataVBO1,
hipGraphicsMapFlagsNone))) {
return false;
}
if (!CudaSafeCall(hipGraphicsMapResources(2, cudaTokens, 0))) {
return false;
}
// Get mapped pointers to the vertex data buffers
float *vboPt0, *vboPt1;
size_t vboSize;
if (!CudaSafeCall(hipGraphicsResourceGetMappedPointer(
reinterpret_cast<void**>(&vboPt0), // The mapped pointer
&vboSize, // The size of the accessible data
cudaTokens[0]))) { // The mapped resource
return false;
}
// Get mapped pointers to the vertex data buffers
if (!CudaSafeCall(hipGraphicsResourceGetMappedPointer(
reinterpret_cast<void**>(&vboPt1), // The mapped pointer
&vboSize, // The size of the accessible data
cudaTokens[1]))) { // The mapped resource
return false;
}
// Init CUDA grid for texture #0
if (!initGridParams(texDim0, texOrg0, texDelta0)) {
Log::DefaultLog.WriteError(
"%s: could not init constant device params",
DeformableGPUSurfaceMT::ClassName());
return false;
}
// Call first kernel
#ifdef USE_TIMER
float dt_ms;
hipEvent_t event1, event2;
hipEventCreate(&event1);
hipEventCreate(&event2);
hipEventRecord(event1, 0);
#endif
hipLaunchKernelGGL(( DeformableGPUSurfaceMT_ComputeVtxDiffValue0_D) , dim3(Grid(vertexCnt, 256)), dim3(256) , 0, 0,
diff_D,
tex0_D,
vboPt0,
vertexCnt);
#ifdef USE_TIMER
hipEventRecord(event2, 0);
hipEventSynchronize(event1);
hipEventSynchronize(event2);
hipEventElapsedTime(&dt_ms, event1, event2);
printf("CUDA time for 'ComputeVtxDiffValue0_D': %.10f sec\n",
dt_ms/1000.0f);
#endif
// Init CUDA grid for texture #1
if (!initGridParams(texDim1, texOrg1, texDelta1)) {
Log::DefaultLog.WriteError(
"%s: could not init constant device params",
DeformableGPUSurfaceMT::ClassName());
return false;
}
// Call second kernel
#ifdef USE_TIMER
hipEventRecord(event1, 0);
#endif
hipLaunchKernelGGL(( DeformableGPUSurfaceMT_ComputeVtxDiffValue1Fitted_D) , dim3(Grid(vertexCnt, 256)), dim3(256) , 0, 0,
diff_D,
tex1_D,
vboPt1,
rotate_D.Peek(),
make_float3(transVec[0],transVec[1],transVec[2]),
make_float3(centroid[0],centroid[1],centroid[2]),
vertexCnt);
#ifdef USE_TIMER
hipEventRecord(event2, 0);
hipEventSynchronize(event1);
hipEventSynchronize(event2);
hipEventElapsedTime(&dt_ms, event1, event2);
printf("CUDA time for 'ComputeVtxDiffValue1_D': %.10f sec\n",
dt_ms/1000.0f);
#endif
if (!CudaSafeCall(hipGraphicsUnmapResources(2, cudaTokens, 0))) {
return false;
}
if (!CudaSafeCall(hipGraphicsUnregisterResource(cudaTokens[0]))) {
return false;
}
if (!CudaSafeCall(hipGraphicsUnregisterResource(cudaTokens[1]))) {
return false;
}
if (!CudaSafeCall(rotate_D.Release())) {
return false;
}
return true;
}
/*
* DeformableGPUSurfaceMT::ComputeVtxSignDiffValue
*/
bool DeformableGPUSurfaceMT::ComputeVtxSignDiffValue(
float *signdiff_D,
float *tex0_D,
int3 texDim0,
float3 texOrg0,
float3 texDelta0,
float *tex1_D,
int3 texDim1,
float3 texOrg1,
float3 texDelta1,
GLuint vtxDataVBO0,
GLuint vtxDataVBO1,
size_t vertexCnt) {
using namespace megamol::core::utility::log;
/* Get pointers to vertex data */
cudaGraphicsResource* cudaTokens[2];
// Register memory with CUDA
if (!CudaSafeCall(hipGraphicsGLRegisterBuffer(
&cudaTokens[0], vtxDataVBO0,
hipGraphicsMapFlagsNone))) {
return false;
}
// Register memory with CUDA
if (!CudaSafeCall(hipGraphicsGLRegisterBuffer(
&cudaTokens[1], vtxDataVBO1,
hipGraphicsMapFlagsNone))) {
return false;
}
if (!CudaSafeCall(hipGraphicsMapResources(2, cudaTokens, 0))) {
return false;
}
// Get mapped pointers to the vertex data buffers
float *vboPt0;
size_t vboSize;
if (!CudaSafeCall(hipGraphicsResourceGetMappedPointer(
reinterpret_cast<void**>(&vboPt0), // The mapped pointer
&vboSize, // The size of the accessible data
cudaTokens[0]))) { // The mapped resource
return false;
}
// Get mapped pointers to the vertex data buffers
float *vboPt1;
if (!CudaSafeCall(hipGraphicsResourceGetMappedPointer(
reinterpret_cast<void**>(&vboPt1), // The mapped pointer
&vboSize, // The size of the accessible data
cudaTokens[1]))) { // The mapped resource
return false;
}
// Init CUDA grid for texture #0
if (!initGridParams(texDim0, texOrg0, texDelta0)) {
Log::DefaultLog.WriteError(
"%s: could not init constant device params",
DeformableGPUSurfaceMT::ClassName());
return false;
}
// Call first kernel
#ifdef USE_TIMER
float dt_ms;
hipEvent_t event1, event2;
hipEventCreate(&event1);
hipEventCreate(&event2);
hipEventRecord(event1, 0);
#endif
hipLaunchKernelGGL(( DeformableGPUSurfaceMT_ComputeVtxDiffValue0_D) , dim3(Grid(vertexCnt, 256)), dim3(256) , 0, 0,
signdiff_D,
tex0_D,
vboPt0,
vertexCnt);
#ifdef USE_TIMER
hipEventRecord(event2, 0);
hipEventSynchronize(event1);
hipEventSynchronize(event2);
hipEventElapsedTime(&dt_ms, event1, event2);
printf("CUDA time for 'ComputeVtxSignDiffValue0_D': %.10f sec\n",
dt_ms/1000.0f);
#endif
// Init CUDA grid for texture #1
if (!initGridParams(texDim1, texOrg1, texDelta1)) {
Log::DefaultLog.WriteError(
"%s: could not init constant device params",
DeformableGPUSurfaceMT::ClassName());
return false;
}
// Call second kernel
#ifdef USE_TIMER
hipEventRecord(event1, 0);
#endif
hipLaunchKernelGGL(( DeformableGPUSurfaceMT_ComputeVtxSignDiffValue1_D) , dim3(Grid(vertexCnt, 256)), dim3(256) , 0, 0,
signdiff_D,
tex1_D,
vboPt1,
vertexCnt);
#ifdef USE_TIMER
hipEventRecord(event2, 0);
hipEventSynchronize(event1);
hipEventSynchronize(event2);
hipEventElapsedTime(&dt_ms, event1, event2);
printf("CUDA time for 'ComputeVtxDiffValue1_D': %.10f sec\n",
dt_ms/1000.0f);
#endif
if (!CudaSafeCall(hipGraphicsUnmapResources(2, cudaTokens, 0))) {
return false;
}
if (!CudaSafeCall(hipGraphicsUnregisterResource(cudaTokens[0]))) {
return false;
}
if (!CudaSafeCall(hipGraphicsUnregisterResource(cudaTokens[1]))) {
return false;
}
return true;
}
/*
* DeformableGPUSurfaceMT::ComputeVtxSignDiffValueFitted
*/
bool DeformableGPUSurfaceMT::ComputeVtxSignDiffValueFitted(
float *signdiff_D,
float centroid[3],
float rotMat[9],
float transVec[3],
float *tex0_D,
int3 texDim0,
float3 texOrg0,
float3 texDelta0,
float *tex1_D,
int3 texDim1,
float3 texOrg1,
float3 texDelta1,
GLuint vtxDataVBO0,
GLuint vtxDataVBO1,
size_t vertexCnt) {
CudaDevArr<float> rotate_D;
// Rotate for best fit
rotate_D.Validate(9);
if (!CudaSafeCall(hipMemcpy((void *)rotate_D.Peek(), &rotMat[0],
9*sizeof(float), hipMemcpyHostToDevice))) {
return false;
}
using namespace megamol::core::utility::log;
/* Get pointers to vertex data */
cudaGraphicsResource* cudaTokens[2];
// Register memory with CUDA
if (!CudaSafeCall(hipGraphicsGLRegisterBuffer(
&cudaTokens[0], vtxDataVBO0,
hipGraphicsMapFlagsNone))) {
return false;
}
// Register memory with CUDA
if (!CudaSafeCall(hipGraphicsGLRegisterBuffer(
&cudaTokens[1], vtxDataVBO1,
hipGraphicsMapFlagsNone))) {
return false;
}
if (!CudaSafeCall(hipGraphicsMapResources(2, cudaTokens, 0))) {
return false;
}
// Get mapped pointers to the vertex data buffers
float *vboPt0;
size_t vboSize;
if (!CudaSafeCall(hipGraphicsResourceGetMappedPointer(
reinterpret_cast<void**>(&vboPt0), // The mapped pointer
&vboSize, // The size of the accessible data
cudaTokens[0]))) { // The mapped resource
return false;
}
// Get mapped pointers to the vertex data buffers
float *vboPt1;
if (!CudaSafeCall(hipGraphicsResourceGetMappedPointer(
reinterpret_cast<void**>(&vboPt1), // The mapped pointer
&vboSize, // The size of the accessible data
cudaTokens[1]))) { // The mapped resource
return false;
}
// Init CUDA grid for texture #0
if (!initGridParams(texDim0, texOrg0, texDelta0)) {
Log::DefaultLog.WriteError(
"%s: could not init constant device params",
DeformableGPUSurfaceMT::ClassName());
return false;
}
// Call first kernel
#ifdef USE_TIMER
float dt_ms;
hipEvent_t event1, event2;
hipEventCreate(&event1);
hipEventCreate(&event2);
hipEventRecord(event1, 0);
#endif
hipLaunchKernelGGL(( DeformableGPUSurfaceMT_ComputeVtxDiffValue0_D) , dim3(Grid(vertexCnt, 256)), dim3(256) , 0, 0,
signdiff_D,
tex0_D,
vboPt0,
vertexCnt);
#ifdef USE_TIMER
hipEventRecord(event2, 0);
hipEventSynchronize(event1);
hipEventSynchronize(event2);
hipEventElapsedTime(&dt_ms, event1, event2);
printf("CUDA time for 'ComputeVtxSignDiffValue0_D': %.10f sec\n",
dt_ms/1000.0f);
#endif
// Init CUDA grid for texture #1
if (!initGridParams(texDim1, texOrg1, texDelta1)) {
Log::DefaultLog.WriteError(
"%s: could not init constant device params",
DeformableGPUSurfaceMT::ClassName());
return false;
}
// Call second kernel
#ifdef USE_TIMER
hipEventRecord(event1, 0);
#endif
hipLaunchKernelGGL(( DeformableGPUSurfaceMT_ComputeVtxSignDiffValue1Fitted_D) , dim3(Grid(vertexCnt, 256)), dim3(256) , 0, 0,
signdiff_D,
tex1_D,
vboPt1,
rotate_D.Peek(),
make_float3(transVec[0],transVec[1],transVec[2]),
make_float3(centroid[0],centroid[1],centroid[2]),
vertexCnt);
#ifdef USE_TIMER
hipEventRecord(event2, 0);
hipEventSynchronize(event1);
hipEventSynchronize(event2);
hipEventElapsedTime(&dt_ms, event1, event2);
printf("CUDA time for 'ComputeVtxDiffValue1_D': %.10f sec\n",
dt_ms/1000.0f);
#endif
if (!CudaSafeCall(hipGraphicsUnmapResources(2, cudaTokens, 0))) {
return false;
}
if (!CudaSafeCall(hipGraphicsUnregisterResource(cudaTokens[0]))) {
return false;
}
if (!CudaSafeCall(hipGraphicsUnregisterResource(cudaTokens[1]))) {
return false;
}
if (!CudaSafeCall(rotate_D.Release())) {
return false;
}
return true;
}
/*
* DeformableGPUSurfaceMT_CalcHausdorffDistance_D
*/
__global__ void DeformableGPUSurfaceMT_CalcHausdorffDistance_D(
float *vtxData1,
float *vtxData2,
float *hausdorffdistVtx_D,
uint vertexCnt1,
uint vertexCnt2) {
const uint posOffs = 0; // TODO Define const device vars
const uint stride = 9;
const uint idx = ::getThreadIdx();
if (idx >= vertexCnt1) {
return;
}
float3 pos1 = make_float3(
vtxData1[stride*idx+posOffs+0],
vtxData1[stride*idx+posOffs+1],
vtxData1[stride*idx+posOffs+2]);
float3 pos2;
float distSqr;
float minDistSqr = 10000000.0;
for (int i = 0; i < vertexCnt2; ++i) {
pos2 = make_float3(
vtxData2[stride*i+posOffs+0],
vtxData2[stride*i+posOffs+1],
vtxData2[stride*i+posOffs+2]);
distSqr = (pos2.x-pos1.x)*(pos2.x-pos1.x) +
(pos2.y-pos1.y)*(pos2.y-pos1.y) +
(pos2.z-pos1.z)*(pos2.z-pos1.z);
minDistSqr = min(minDistSqr,distSqr);
}
hausdorffdistVtx_D[idx] = minDistSqr;
}
/*
* DeformableGPUSurfaceMT::CalcHausdorffDistance
*/
float DeformableGPUSurfaceMT::CalcHausdorffDistance(
DeformableGPUSurfaceMT *surf1,
DeformableGPUSurfaceMT *surf2,
float *hausdorffdistVtx_D,
bool symmetric) {
// TODO Implement symmetric version
/* Get pointers to vertex data */
cudaGraphicsResource* cudaTokens[2];
// Register memory with CUDA
if (!CudaSafeCall(hipGraphicsGLRegisterBuffer(
&cudaTokens[0], surf1->GetVtxDataVBO(),
hipGraphicsMapFlagsNone))) {
return 0.0f;
}
// Register memory with CUDA
if (!CudaSafeCall(hipGraphicsGLRegisterBuffer(
&cudaTokens[1], surf2->GetVtxDataVBO(),
hipGraphicsMapFlagsNone))) {
return 0.0f;
}
if (!CudaSafeCall(hipGraphicsMapResources(2, cudaTokens, 0))) {
return 0.0f;
}
// Get mapped pointers to the vertex data buffers
float *vboPt0, *vboPt1;
size_t vboSize;
if (!CudaSafeCall(hipGraphicsResourceGetMappedPointer(
reinterpret_cast<void**>(&vboPt0), // The mapped pointer
&vboSize, // The size of the accessible data
cudaTokens[0]))) { // The mapped resource
return 0.0f;
}
// Get mapped pointers to the vertex data buffers
if (!CudaSafeCall(hipGraphicsResourceGetMappedPointer(
reinterpret_cast<void**>(&vboPt1), // The mapped pointer
&vboSize, // The size of the accessible data
cudaTokens[1]))) { // The mapped resource
return 0.0f;
}
// Calc kernel
// TODO Implement less lazy and faster version of Hausdorff distance
hipLaunchKernelGGL(( DeformableGPUSurfaceMT_CalcHausdorffDistance_D) , dim3(Grid(surf1->GetVertexCnt(), 256)), dim3(256) , 0, 0,
vboPt0,
vboPt1,
hausdorffdistVtx_D,
surf1->GetVertexCnt(),
surf2->GetVertexCnt());
if (!CudaSafeCall(hipGraphicsUnmapResources(2, cudaTokens, 0))) {
return 0.0f;
}
if (!CudaSafeCall(hipGraphicsUnregisterResource(cudaTokens[0]))) {
return 0.0f;
}
if (!CudaSafeCall(hipGraphicsUnregisterResource(cudaTokens[1]))) {
return 0.0f;
}
float res = 0.0;
res = thrust::reduce(
thrust::device_ptr<float>(hausdorffdistVtx_D),
thrust::device_ptr<float>(hausdorffdistVtx_D + surf1->GetVertexCnt()),
-1.0,
thrust::maximum<float>());
return sqrt(res);
}
__global__ void TrackPathSubdivVertices_D(
float *sourceVolume_D,
float *vertexData_D,
float *vertexFlag_D,
float *vertexExternalForcesScl_D,
float *displLen_D,
float *vtxUncertainty_D,
float4 *gradient_D,
int *accumPath_D,
uint vertexCnt,
float forcesScl,
float isoval,
float minDispl) {
const uint idx = ::getThreadIdx();
if (idx >= vertexCnt) {
return;
}
if (vertexFlag_D[idx] == 0.0) {
vertexData_D[9*idx+3] = 0.0;
vertexData_D[9*idx+4] = 1.0;
vertexData_D[9*idx+5] = 0.0;
displLen_D[idx] = 0.0; // Old vertices are per definition converged
return; // This is an old vertex
}
// Check convergence criterion
float lastDisplLen = displLen_D[idx];
if (lastDisplLen <= minDispl) {
displLen_D[idx] = 0.0;
return; // Vertex is converged
}
/* Retrieve stuff from global device memory */
// Get initial position from global device memory
float3 posOld = make_float3(
vertexData_D[9*idx+0],
vertexData_D[9*idx+1],
vertexData_D[9*idx+2]);
// Get initial scale factor for external forces
float externalForcesScl = vertexExternalForcesScl_D[idx];
//float externalForcesSclOld = externalForcesScl;
/* Update position */
// No warp divergence here, since useCubicInterpolation is the same for all
// threads
//const float sampleDens = SampleFieldAtPosTrilin_D<float>(posOld, sourceVolume_D);
const float sampleDens = SampleFieldAtPosTricub_D<float, false>(posOld, sourceVolume_D);
// Switch sign and scale down if necessary
bool negative = externalForcesScl < 0;
bool outside = sampleDens <= isoval;
int switchSign = int((negative && outside)||(!negative && !outside));
externalForcesScl = externalForcesScl*(1.0*(1-switchSign) - 1.0*switchSign);
externalForcesScl *= (1.0*(1-switchSign) + 0.5*(switchSign));
if (bool(switchSign) && (accumPath_D[idx] != 0)) {
accumPath_D[idx] = 0;
} else if (bool(switchSign) && (accumPath_D[idx] == 0)) {
accumPath_D[idx] = 1;
}
// Sample gradient
//float4 externalForceTmp = SampleFieldAtPosTrilin_D<float4>(posOld, gradient_D);
float4 externalForceTmp = SampleFieldAtPosTricub_D<float4, false>(posOld, gradient_D);
float3 externalForce;
externalForce.x = externalForceTmp.x;
externalForce.y = externalForceTmp.y;
externalForce.z = externalForceTmp.z;
externalForce = safeNormalize(externalForce);
externalForce *= forcesScl*externalForcesScl;
float3 posNew = posOld + externalForce; // Integrate backwards
/* Write back to global device memory */
// New pos
vertexData_D[9*idx+0] = posNew.x;
vertexData_D[9*idx+1] = posNew.y;
vertexData_D[9*idx+2] = posNew.z;
// Write external forces scale factor back to global device memory
vertexExternalForcesScl_D[idx] = externalForcesScl;
// float3 diff = posNew-posOld;
// float diffLen = length(diff);
float diffLen = abs(forcesScl*externalForcesScl);
// if ((abs(externalForcesScl) == 1.0f)) {
// vtxUncertainty_D[idx] += diffLen;
// }
if (accumPath_D[idx] == 0) {
vtxUncertainty_D[idx] += diffLen;
} else if(accumPath_D[idx] != 0) {
vtxUncertainty_D[idx] -= diffLen;
}
// Displ scl for convergence
displLen_D[idx] = diffLen;
//displLen_D[idx] = 0.1;
vertexData_D[9*idx+3] = 1.0;
vertexData_D[9*idx+4] = 0.0;
vertexData_D[9*idx+5] = 1.0;
}
/*
* DeformableGPUSurfaceMT::ComputeUncertaintyForSubdivVertices
*/
bool DeformableGPUSurfaceMT::TrackPathSubdivVertices(
float *sourceVolume_D,
int3 volDim,
float3 volOrg,
float3 volDelta,
float forcesScl,
float minDispl,
float isoval,
uint maxIt) {
using namespace megamol::core::utility::log;
// Init constant device params
if (!initGridParams(volDim, volOrg, volDelta)) {
Log::DefaultLog.WriteError(
"%s: could not init constant device params",
this->ClassName());
return false;
}
/* 1. Reinitialize VBO and copy back uncertainty values */
cudaGraphicsResource* cudaTokens[1];
cudaGraphicsResource* cudaTokens2[2];
if (!CudaSafeCall(hipGraphicsGLRegisterBuffer(
&cudaTokens[0],
this->vboVtxPath,
hipGraphicsMapFlagsNone))) {
return false;
}
if (!CudaSafeCall(hipGraphicsMapResources(1, cudaTokens, 0))) {
return false;
}
// Get mapped pointers to the vertex data buffers
float *uncertaintyPt;
size_t vboVtxPathSize;
if (!CudaSafeCall(hipGraphicsResourceGetMappedPointer(
reinterpret_cast<void**>(&uncertaintyPt), // The mapped pointer
&vboVtxPathSize, // The size of the accessible data
cudaTokens[0]))) { // The mapped resource
return false;
}
// Copy old values to temporary array
if (!CudaSafeCall(this->vertexUncertaintyTmp_D.Validate(vboVtxPathSize/sizeof(float)))) {
return false;
}
if (!CudaSafeCall(hipMemcpy(
this->vertexUncertaintyTmp_D.Peek(),
uncertaintyPt,
vboVtxPathSize,
hipMemcpyDeviceToDevice))) {
return false;
}
if (!CudaSafeCall(hipGraphicsUnmapResources(1, cudaTokens, 0))) {
return false;
}
if (!CudaSafeCall(hipGraphicsUnregisterResource(cudaTokens[0]))) {
return false;
}
// Re-initiaize VBO
if (!this->InitVtxPathVBO(this->vertexCnt)) {
return false;
}
if (!CudaSafeCall(hipGraphicsGLRegisterBuffer(
&cudaTokens2[0],
this->vboVtxPath,
hipGraphicsMapFlagsNone))) {
return false;
}
if (!CudaSafeCall(hipGraphicsGLRegisterBuffer(
&cudaTokens2[1],
this->vboVtxData,
hipGraphicsMapFlagsNone))) {
return false;
}
if (!CudaSafeCall(hipGraphicsMapResources(2, cudaTokens2, 0))) {
return false;
}
float *vboVertexPt;
size_t vboVertexSize;
// Get mapped pointers to the vertex data buffers
if (!CudaSafeCall(hipGraphicsResourceGetMappedPointer(
reinterpret_cast<void**>(&uncertaintyPt), // The mapped pointer
&vboVtxPathSize, // The size of the accessible data
cudaTokens2[0]))) { // The mapped resource
return false;
}
// Get mapped pointers to the vertex data buffers
if (!CudaSafeCall(hipGraphicsResourceGetMappedPointer(
reinterpret_cast<void**>(&vboVertexPt), // The mapped pointer
&vboVertexSize, // The size of the accessible data
cudaTokens2[1]))) { // The mapped resource
return false;
}
if (!CudaSafeCall(hipMemset(uncertaintyPt, 0x00, vboVtxPathSize))) {
return false;
}
if (!CudaSafeCall(hipMemcpy(
uncertaintyPt,
this->vertexUncertaintyTmp_D.Peek(),
sizeof(float)*this->vertexUncertaintyTmp_D.GetCount(),
hipMemcpyDeviceToDevice))) {
return false;
}
/* 2. Write uncertainty values of new vertices */
// Get copy of vertex buffer
if (!CudaSafeCall(this->trackedSubdivVertexData_D.Validate(this->vertexCnt*this->vertexDataStride))) {
return false;
}
if (!CudaSafeCall(hipMemcpy(this->trackedSubdivVertexData_D.Peek(),
vboVertexPt,
vboVertexSize,
hipMemcpyDeviceToDevice))) {
return false;
}
// Check/prepare necessary arrays
if (sourceVolume_D == NULL) {
return false;
}
if (!CudaSafeCall(this->displLen_D.Validate(this->vertexCnt))) {
return false;
}
if (!CudaSafeCall(this->displLen_D.Set(0x00))) {
return false;
}
if (!CudaSafeCall(this->vertexExternalForcesScl_D.Validate(this->vertexCnt))) {
return false;
}
hipLaunchKernelGGL(( DeformableGPUSurfaceMT_InitExternalForceScl_D) , dim3(Grid(this->vertexCnt, 256)), dim3(256) , 0, 0,
(float*)this->vertexExternalForcesScl_D.Peek(),
this->displLen_D.Peek(),
sourceVolume_D,
this->trackedSubdivVertexData_D.Peek(),
minDispl,
this->vertexCnt,
isoval,
this->vertexDataOffsPos,
this->vertexDataStride);
if (!CheckForCudaError()) {
return false;
}
if (this->vertexFlag_D.GetCount() != this->vertexCnt) {
if (!CudaSafeCall(this->vertexFlag_D.Validate(this->vertexCnt))) {
return -1;
}
if (!CudaSafeCall(this->vertexFlag_D.Set(0x00))) {
return -1;
}
}
if (!CudaSafeCall(this->accumPath_D.Validate(this->vertexCnt))) {
return false;
}
if (!CudaSafeCall(this->accumPath_D.Set(0x00))) {
return false;
}
uint iterationsNeeded = 0;
for (uint i = 0; i < maxIt; ++i) {
// Update vertex position
hipLaunchKernelGGL(( TrackPathSubdivVertices_D) , dim3(Grid(this->vertexCnt, 256)), dim3(256) , 0, 0,
sourceVolume_D,
this->trackedSubdivVertexData_D.Peek(),
this->vertexFlag_D.Peek(),
this->vertexExternalForcesScl_D.Peek(),
this->displLen_D.Peek(),
uncertaintyPt,
(float4*)(this->externalForces_D.Peek()),
this->accumPath_D.Peek(),
this->vertexCnt,
forcesScl,
isoval,
minDispl);
if (!CheckForCudaError()) {
return false;
}
// Accumulate displacement length of this iteration step
float avgDisplLen = 0.0f;
avgDisplLen = thrust::reduce(
thrust::device_ptr<float>(this->displLen_D.Peek()),
thrust::device_ptr<float>(this->displLen_D.Peek() + this->vertexCnt));
if (!CudaSafeCall(hipGetLastError())) {
return false;
}
// printf("Number of flagged vertices %u, %f\n", this->nFlaggedVertices, avgDisplLen);
avgDisplLen /= static_cast<float>(this->nFlaggedVertices);
// if (i%10 == 0) printf("It %i, avgDispl: %.16f, min %.16f\n", i, avgDisplLen, minDispl);
// printf("It: %i, avgDispl: %.16f, min %.16f\n", i, avgDisplLen, minDispl);
if (avgDisplLen < minDispl) {
iterationsNeeded = i+1;
break;
}
::CheckForCudaErrorSync();
}
if (!CudaSafeCall(hipGraphicsUnmapResources(2, cudaTokens2, 0))) {
return false;
}
if (!CudaSafeCall(hipGraphicsUnregisterResource(cudaTokens2[0]))) {
return false;
}
if (!CudaSafeCall(hipGraphicsUnregisterResource(cudaTokens2[1]))) {
return false;
}
return CheckForCudaError();
}
/*
* DeformableGPUSurfaceMT_ComputeSurfAttribDiff0_D
*/
__global__ void DeformableGPUSurfaceMT_ComputeSurfAttribSignDiff0_D (
float *vertexAttrib_D,
float *vertexDataEnd_D,
float *tex0_D,
uint vertexCnt) {
const uint idx = ::getThreadIdx();
if (idx >= vertexCnt) {
return;
}
const int vertexDataStride = 9; // TODO
const int vertexDataOffsPos = 0;
float3 pos;
pos.x = vertexDataEnd_D[vertexDataStride*idx + vertexDataOffsPos +0];
pos.y = vertexDataEnd_D[vertexDataStride*idx + vertexDataOffsPos +1];
pos.z = vertexDataEnd_D[vertexDataStride*idx + vertexDataOffsPos +2];
vertexAttrib_D[idx] = ::SampleFieldAtPosTrilin_D<float, true>(pos, tex0_D);
}
/*
* DeformableGPUSurfaceMT_ComputeSurfAttribDiff1_D
*/
__global__ void DeformableGPUSurfaceMT_ComputeSurfAttribSignDiff1_D (
float *vertexAttrib_D,
float *vertexDataStart_D,
float *vertexDataTrackedBack_D,
float *vertexFlag_D,
float *tex1_D,
float *rotation_D,
float3 translation,
float3 centroid,
uint vertexCnt) {
const uint idx = ::getThreadIdx();
if (idx >= vertexCnt) {
return;
}
const int vertexDataStride = 9; // TODO
const int vertexDataOffsPos = 0;
float3 pos;
if (vertexFlag_D[idx] == 1.0) {
pos.x = vertexDataTrackedBack_D[vertexDataStride*idx + vertexDataOffsPos +0];
pos.y = vertexDataTrackedBack_D[vertexDataStride*idx + vertexDataOffsPos +1];
pos.z = vertexDataTrackedBack_D[vertexDataStride*idx + vertexDataOffsPos +2];
} else {
pos.x = vertexDataStart_D[vertexDataStride*idx + vertexDataOffsPos +0];
pos.y = vertexDataStart_D[vertexDataStride*idx + vertexDataOffsPos +1];
pos.z = vertexDataStart_D[vertexDataStride*idx + vertexDataOffsPos +2];
}
// Revert translation to move to origin
pos.x -= translation.x;
pos.y -= translation.y;
pos.z -= translation.z;
// Revert rotation
float3 posRot;
posRot.x = rotation_D[0] * pos.x +
rotation_D[3] * pos.y +
rotation_D[6] * pos.z;
posRot.y = rotation_D[1] * pos.x +
rotation_D[4] * pos.y +
rotation_D[7] * pos.z;
posRot.z = rotation_D[2] * pos.x +
rotation_D[5] * pos.y +
rotation_D[8] * pos.z;
// Move to old centroid
posRot.x += centroid.x;
posRot.y += centroid.y;
posRot.z += centroid.z;
float attribOld = vertexAttrib_D[idx];
float attribNew = ::SampleFieldAtPosTrilin_D<float, true>(posRot, tex1_D);
vertexAttrib_D[idx] = int(attribOld*attribNew < 0); // 1.0 or 0.0
}
/*
* DeformableGPUSurfaceMT::ComputeSurfAttribSignDiff
*/
bool DeformableGPUSurfaceMT::ComputeSurfAttribSignDiff(
DeformableGPUSurfaceMT &surfStart,
float centroid[3], // In case the start surface has been fitted using RMSD
float rotMat[9],
float transVec[3],
float *tex0_D,
int3 texDim0,
float3 texOrg0,
float3 texDelta0,
float *tex1_D,
int3 texDim1,
float3 texOrg1,
float3 texDelta1) {
using namespace megamol::core::utility::log;
if (!this->InitVtxAttribVBO(this->vertexCnt)) {
return false;
}
// Get pointer to vertex attribute array
cudaGraphicsResource* cudaTokens[3];
if (!CudaSafeCall(hipGraphicsGLRegisterBuffer(
&cudaTokens[0],
this->vboVtxAttr,
hipGraphicsMapFlagsNone))) {
return false;
}
if (!CudaSafeCall(hipGraphicsGLRegisterBuffer(
&cudaTokens[1],
this->vboVtxData,
hipGraphicsMapFlagsNone))) {
return false;
}
if (!CudaSafeCall(hipGraphicsGLRegisterBuffer(
&cudaTokens[2],
surfStart.GetVtxDataVBO(),
hipGraphicsMapFlagsNone))) {
return false;
}
if (!CudaSafeCall(hipGraphicsMapResources(3, cudaTokens, 0))) {
return false;
}
// Get mapped pointers to the vertex data buffers
float *vertexAttrib_D;
size_t vboVtxAttribSize;
if (!CudaSafeCall(hipGraphicsResourceGetMappedPointer(
reinterpret_cast<void**>(&vertexAttrib_D), // The mapped pointer
&vboVtxAttribSize, // The size of the accessible data
cudaTokens[0]))) { // The mapped resource
return false;
}
// Get mapped pointers to the vertex data buffers
float *vertexDataEnd_D;
size_t vboEndSize;
if (!CudaSafeCall(hipGraphicsResourceGetMappedPointer(
reinterpret_cast<void**>(&vertexDataEnd_D), // The mapped pointer
&vboEndSize, // The size of the accessible data
cudaTokens[1]))) { // The mapped resource
return false;
}
// Get mapped pointers to the vertex data buffers
float *vertexDataStart_D;
size_t vboStartSize;
if (!CudaSafeCall(hipGraphicsResourceGetMappedPointer(
reinterpret_cast<void**>(&vertexDataStart_D), // The mapped pointer
&vboStartSize, // The size of the accessible data
cudaTokens[2]))) { // The mapped resource
return false;
}
// Init grid params
// Init CUDA grid for texture #0
if (!initGridParams(texDim0, texOrg0, texDelta0)) {
Log::DefaultLog.WriteError(
"%s: could not init constant device params",
DeformableGPUSurfaceMT::ClassName());
return false;
}
// Compute difference for new and old vertices (after subdivision)
// Part one: sample value for new vertices
hipLaunchKernelGGL(( DeformableGPUSurfaceMT_ComputeSurfAttribSignDiff0_D) , dim3(Grid(this->vertexCnt, 256)), dim3(256) , 0, 0,
vertexAttrib_D,
vertexDataEnd_D,
tex0_D,
this->vertexCnt);
CudaDevArr<float> rotate_D;
// Rotate for best fit
rotate_D.Validate(9);
if (!CudaSafeCall(hipMemcpy((void *)rotate_D.Peek(), &rotMat[0],
9*sizeof(float), hipMemcpyHostToDevice))) {
return false;
}
// Init grid params
// Init CUDA grid for texture #0
if (!initGridParams(texDim1, texOrg1, texDelta1)) {
Log::DefaultLog.WriteError(
"%s: could not init constant device params",
DeformableGPUSurfaceMT::ClassName());
return false;
}
if (this->vertexFlag_D.GetCount() == 0) {
if (!CudaSafeCall(this->vertexFlag_D.Validate(this->vertexCnt))) {
return false;
}
if (!CudaSafeCall(this->vertexFlag_D.Set(0x00))) {
return false;
}
}
// Compute difference for new and old vertices (after subdivision)
// Part two: sample value for old/tracked back vertices
hipLaunchKernelGGL(( DeformableGPUSurfaceMT_ComputeSurfAttribSignDiff1_D) , dim3(Grid(this->vertexCnt, 256)), dim3(256) , 0, 0,
vertexAttrib_D,
vertexDataStart_D,
this->trackedSubdivVertexData_D.Peek(), // Tracked back vertices, needed for sampling
this->vertexFlag_D.Peek(),
tex1_D,
rotate_D.Peek(),
make_float3(transVec[0],transVec[1],transVec[2]),
make_float3(centroid[0],centroid[1],centroid[2]),
this->vertexCnt);
if (!CheckForCudaError()) {
return false;
}
rotate_D.Release();
if (!CudaSafeCall(hipGraphicsUnmapResources(3, cudaTokens, 0))) {
return false;
}
if (!CudaSafeCall(hipGraphicsUnregisterResource(cudaTokens[0]))) {
return false;
}
if (!CudaSafeCall(hipGraphicsUnregisterResource(cudaTokens[1]))) {
return false;
}
if (!CudaSafeCall(hipGraphicsUnregisterResource(cudaTokens[2]))) {
return false;
}
return true;
}
/*
* DeformableGPUSurfaceMT_ComputeSurfAttribDiff0_D
*/
__global__ void DeformableGPUSurfaceMT_ComputeSurfAttribDiff0_D (
float *vertexAttrib_D,
float *vertexDataEnd_D,
float *tex0_D,
uint vertexCnt) {
const uint idx = ::getThreadIdx();
if (idx >= vertexCnt) {
return;
}
const int vertexDataStride = 9; // TODO
const int vertexDataOffsPos = 0;
float3 pos;
pos.x = vertexDataEnd_D[vertexDataStride*idx + vertexDataOffsPos +0];
pos.y = vertexDataEnd_D[vertexDataStride*idx + vertexDataOffsPos +1];
pos.z = vertexDataEnd_D[vertexDataStride*idx + vertexDataOffsPos +2];
vertexAttrib_D[idx] = ::SampleFieldAtPosTrilin_D<float, true>(pos, tex0_D);
}
/*
* DeformableGPUSurfaceMT_ComputeSurfAttribDiff1_D
*/
__global__ void DeformableGPUSurfaceMT_ComputeSurfAttribDiff1_D (
float *vertexAttrib_D,
float *vertexDataStart_D,
float *vertexDataTrackedBack_D,
float *vertexFlag_D,
float *tex1_D,
float *rotation_D,
float3 translation,
float3 centroid,
uint vertexCnt) {
const uint idx = ::getThreadIdx();
if (idx >= vertexCnt) {
return;
}
const int vertexDataStride = 9; // TODO
const int vertexDataOffsPos = 0;
float3 pos;
if (vertexFlag_D[idx] == 1.0) {
pos.x = vertexDataTrackedBack_D[vertexDataStride*idx + vertexDataOffsPos +0];
pos.y = vertexDataTrackedBack_D[vertexDataStride*idx + vertexDataOffsPos +1];
pos.z = vertexDataTrackedBack_D[vertexDataStride*idx + vertexDataOffsPos +2];
} else {
pos.x = vertexDataStart_D[vertexDataStride*idx + vertexDataOffsPos +0];
pos.y = vertexDataStart_D[vertexDataStride*idx + vertexDataOffsPos +1];
pos.z = vertexDataStart_D[vertexDataStride*idx + vertexDataOffsPos +2];
}
// Revert translation to move to origin
pos.x -= translation.x;
pos.y -= translation.y;
pos.z -= translation.z;
// Revert rotation
float3 posRot;
posRot.x = rotation_D[0] * pos.x +
rotation_D[3] * pos.y +
rotation_D[6] * pos.z;
posRot.y = rotation_D[1] * pos.x +
rotation_D[4] * pos.y +
rotation_D[7] * pos.z;
posRot.z = rotation_D[2] * pos.x +
rotation_D[5] * pos.y +
rotation_D[8] * pos.z;
// Move to old centroid
posRot.x += centroid.x;
posRot.y += centroid.y;
posRot.z += centroid.z;
vertexAttrib_D[idx] = abs(vertexAttrib_D[idx] - ::SampleFieldAtPosTrilin_D<float, true>(posRot, tex1_D));
}
/*
* DeformableGPUSurfaceMT::ComputeSurfAttribDiff
*/
bool DeformableGPUSurfaceMT::ComputeSurfAttribDiff(
DeformableGPUSurfaceMT &surfStart,
float centroid[3], // In case the start surface has been fitted using RMSD
float rotMat[9],
float transVec[3],
float *tex0_D,
int3 texDim0,
float3 texOrg0,
float3 texDelta0,
float *tex1_D,
int3 texDim1,
float3 texOrg1,
float3 texDelta1) {
using namespace megamol::core::utility::log;
if (!this->InitVtxAttribVBO(this->vertexCnt)) {
return false;
}
// Get pointer to vertex attribute array
cudaGraphicsResource* cudaTokens[3];
if (!CudaSafeCall(hipGraphicsGLRegisterBuffer(
&cudaTokens[0],
this->vboVtxAttr,
hipGraphicsMapFlagsNone))) {
return false;
}
if (!CudaSafeCall(hipGraphicsGLRegisterBuffer(
&cudaTokens[1],
this->vboVtxData,
hipGraphicsMapFlagsNone))) {
return false;
}
if (!CudaSafeCall(hipGraphicsGLRegisterBuffer(
&cudaTokens[2],
surfStart.GetVtxDataVBO(),
hipGraphicsMapFlagsNone))) {
return false;
}
if (!CudaSafeCall(hipGraphicsMapResources(3, cudaTokens, 0))) {
return false;
}
// Get mapped pointers to the vertex data buffers
float *vertexAttrib_D;
size_t vboVtxAttribSize;
if (!CudaSafeCall(hipGraphicsResourceGetMappedPointer(
reinterpret_cast<void**>(&vertexAttrib_D), // The mapped pointer
&vboVtxAttribSize, // The size of the accessible data
cudaTokens[0]))) { // The mapped resource
return false;
}
// Get mapped pointers to the vertex data buffers
float *vertexDataEnd_D;
size_t vboEndSize;
if (!CudaSafeCall(hipGraphicsResourceGetMappedPointer(
reinterpret_cast<void**>(&vertexDataEnd_D), // The mapped pointer
&vboEndSize, // The size of the accessible data
cudaTokens[1]))) { // The mapped resource
return false;
}
// Get mapped pointers to the vertex data buffers
float *vertexDataStart_D;
size_t vboStartSize;
if (!CudaSafeCall(hipGraphicsResourceGetMappedPointer(
reinterpret_cast<void**>(&vertexDataStart_D), // The mapped pointer
&vboStartSize, // The size of the accessible data
cudaTokens[2]))) { // The mapped resource
return false;
}
// Init grid params
// Init CUDA grid for texture #0
if (!initGridParams(texDim0, texOrg0, texDelta0)) {
Log::DefaultLog.WriteError(
"%s: could not init constant device params",
DeformableGPUSurfaceMT::ClassName());
return false;
}
// Compute difference for new and old vertices (after subdivision)
// Part one: sample value for new vertices
hipLaunchKernelGGL(( DeformableGPUSurfaceMT_ComputeSurfAttribDiff0_D) , dim3(Grid(this->vertexCnt, 256)), dim3(256) , 0, 0,
vertexAttrib_D,
vertexDataEnd_D,
tex0_D,
this->vertexCnt);
CudaDevArr<float> rotate_D;
// Rotate for best fit
rotate_D.Validate(9);
if (!CudaSafeCall(hipMemcpy((void *)rotate_D.Peek(), &rotMat[0],
9*sizeof(float), hipMemcpyHostToDevice))) {
return false;
}
// Init grid params
// Init CUDA grid for texture #1
if (!initGridParams(texDim1, texOrg1, texDelta1)) {
Log::DefaultLog.WriteError(
"%s: could not init constant device params",
DeformableGPUSurfaceMT::ClassName());
return false;
}
if (this->vertexFlag_D.GetCount() == 0) {
if (!CudaSafeCall(this->vertexFlag_D.Validate(this->vertexCnt))) {
return false;
}
if (!CudaSafeCall(this->vertexFlag_D.Set(0x00))) {
return false;
}
}
// Compute difference for new and old vertices (after subdivision)
// Part two: sample value for old/tracked back vertices
hipLaunchKernelGGL(( DeformableGPUSurfaceMT_ComputeSurfAttribDiff1_D) , dim3(Grid(this->vertexCnt, 256)), dim3(256) , 0, 0,
vertexAttrib_D,
vertexDataStart_D,
this->trackedSubdivVertexData_D.Peek(), // Tracked back vertices, needed for sampling
this->vertexFlag_D.Peek(),
tex1_D,
rotate_D.Peek(),
make_float3(transVec[0],transVec[1],transVec[2]),
make_float3(centroid[0],centroid[1],centroid[2]),
this->vertexCnt);
if (!CheckForCudaError()) {
return false;
}
rotate_D.Release();
if (!CudaSafeCall(hipGraphicsUnmapResources(3, cudaTokens, 0))) {
return false;
}
if (!CudaSafeCall(hipGraphicsUnregisterResource(cudaTokens[0]))) {
return false;
}
if (!CudaSafeCall(hipGraphicsUnregisterResource(cudaTokens[1]))) {
return false;
}
if (!CudaSafeCall(hipGraphicsUnregisterResource(cudaTokens[2]))) {
return false;
}
return true;
}
__global__ void DeformableGPUSurfaceMT_ComputeTriangleFaceNormal_D(
float3 *triFaceNormals_D,
float *vertexData_D,
uint *triangleidx_D,
uint triangleCnt) {
const uint idx = ::getThreadIdx();
if (idx >= triangleCnt) {
return;
}
float3 pos0 = make_float3(
vertexData_D[9*triangleidx_D[3*idx+0]+0],
vertexData_D[9*triangleidx_D[3*idx+0]+1],
vertexData_D[9*triangleidx_D[3*idx+0]+2]);
float3 pos1 = make_float3(
vertexData_D[9*triangleidx_D[3*idx+1]+0],
vertexData_D[9*triangleidx_D[3*idx+1]+1],
vertexData_D[9*triangleidx_D[3*idx+1]+2]);
float3 pos2 = make_float3(
vertexData_D[9*triangleidx_D[3*idx+2]+0],
vertexData_D[9*triangleidx_D[3*idx+2]+1],
vertexData_D[9*triangleidx_D[3*idx+2]+2]);
float3 vec0 = (pos1 - pos0);
float3 vec1 = (pos2 - pos0);
float3 norm = normalize(cross(vec0, vec1));
// Write normal
triFaceNormals_D[idx*3+0] = norm;
triFaceNormals_D[idx*3+1] = norm;
triFaceNormals_D[idx*3+2] = norm;
}
__global__ void DeformableGPUSurfaceMT_CheckTriNormals_D(
float3 *triFaceNormals_D,
uint *triangleNeighbors_D,
uint triangleCnt) {
const uint idx = ::getThreadIdx();
if (idx >= triangleCnt) {
return;
}
uint n0 = triangleNeighbors_D[3*idx+0];
uint n1 = triangleNeighbors_D[3*idx+1];
uint n2 = triangleNeighbors_D[3*idx+2];
float3 norm = normalize(triFaceNormals_D[idx]);
float3 norm0 = normalize(triFaceNormals_D[n0]);
float3 norm1 = normalize(triFaceNormals_D[n1]);
float3 norm2 = normalize(triFaceNormals_D[n2]);
float3 avgNorm = (norm0+norm1+norm2)*0.3;
__syncthreads();
if ((dot(norm, avgNorm) < 0)) {
triFaceNormals_D[idx] = make_float3(0.0, 0.0, 0.0);
}
}
__global__ void DeformableGPUSurfaceMT_ComputeNormalsSubdiv_D(
float *vertexData_D,
float3 *normals_D,
uint vertexCnt) {
const uint idx = ::getThreadIdx();
if (idx >= vertexCnt) {
return;
}
float3 norm = normalize(normals_D[idx]);
// Write normal
vertexData_D[idx*9+3] = norm.x;
vertexData_D[idx*9+4] = norm.y;
vertexData_D[idx*9+5] = norm.z;
}
/*
* see http://blog.csdn.net/newtonbear/article/details/12768377
*/
template <typename Key, typename Value>
int reduce_by_key_with_raw_pointers(Key* d_key, Key* d_key_last, Value* d_value,
Key* d_okey, Value* d_ovalue) {
thrust::device_ptr<Key> d_keyp = thrust::device_pointer_cast(d_key);
thrust::device_ptr<Key> d_key_lastp = thrust::device_pointer_cast(d_key_last);
thrust::device_ptr<Value> d_valuep = thrust::device_pointer_cast(d_value);
thrust::device_ptr<Key> d_okeyp = thrust::device_pointer_cast(d_okey);
thrust::device_ptr<Value> d_ovaluep = thrust::device_pointer_cast(d_ovalue);
thrust::pair<thrust::device_ptr<Key>, thrust::device_ptr<Value> > new_end;
new_end = thrust::reduce_by_key(d_keyp, d_key_lastp, d_valuep, d_okeyp, d_ovaluep);
return new_end.first - d_okeyp;
}
void OutputDevArrayUint(uint* d_array, int count, const char* name) {
// DEBUG Print
HostArr<uint> h_array;
h_array.Validate(count);
if (!CudaSafeCall(hipMemcpy(h_array.Peek(), d_array, sizeof(uint)*count, hipMemcpyDeviceToHost))) {
return;
}
for (int i = 0; i < count; ++i) {
printf("%s %i: %u\n", name, i, h_array.Peek()[i]);
}
h_array.Release();
// END DEBUG
}
void OutputDevArrayFloat3(float3* d_array, int count, const char* name) {
// DEBUG Print
HostArr<float3> h_array;
h_array.Validate(count);
if (!CudaSafeCall(hipMemcpy(h_array.Peek(), d_array, sizeof(float3)*count,
hipMemcpyDeviceToHost))) {
return;
}
for (int i = 0; i < count; ++i) {
printf("%s %i: %f %f %f\n", name, i,
h_array.Peek()[i].x,
h_array.Peek()[i].y,
h_array.Peek()[i].z);
}
h_array.Release();
// END DEBUG
}
/*
* DeformableGPUSurfaceMT::ComputeNormalsSubdiv
*/
bool DeformableGPUSurfaceMT::ComputeNormalsSubdiv() {
// Get pointer to vertex attribute array
cudaGraphicsResource* cudaTokens[2];
if (!CudaSafeCall(hipGraphicsGLRegisterBuffer(
&cudaTokens[0],
this->vboVtxData,
hipGraphicsMapFlagsNone))) {
return false;
}
if (!CudaSafeCall(hipGraphicsGLRegisterBuffer(
&cudaTokens[1],
this->vboTriangleIdx,
hipGraphicsMapFlagsNone))) {
return false;
}
if (!CudaSafeCall(hipGraphicsMapResources(2, cudaTokens, 0))) {
return false;
}
// Get mapped pointers to the vertex data buffers
float *vertexBuffer_D;
size_t vboVertexBufferSize;
if (!CudaSafeCall(hipGraphicsResourceGetMappedPointer(
reinterpret_cast<void**>(&vertexBuffer_D), // The mapped pointer
&vboVertexBufferSize, // The size of the accessible data
cudaTokens[0]))) { // The mapped resource
return false;
}
// Get mapped pointers to the vertex data buffers
unsigned int *triIdx_D;
size_t vboTriIdxSize;
if (!CudaSafeCall(hipGraphicsResourceGetMappedPointer(
reinterpret_cast<void**>(&triIdx_D), // The mapped pointer
&vboTriIdxSize, // The size of the accessible data
cudaTokens[1]))) { // The mapped resource
return false;
}
// 1. Compute triangle face normals
if (!CudaSafeCall(this->triangleFaceNormals_D.Validate(this->triangleCnt*3))) {
return false;
}
hipLaunchKernelGGL(( DeformableGPUSurfaceMT_ComputeTriangleFaceNormal_D) , dim3(Grid(this->triangleCnt, 256)), dim3(256) , 0, 0,
this->triangleFaceNormals_D.Peek(),
vertexBuffer_D,
triIdx_D,
this->triangleCnt);
if (!CheckForCudaError()) {
return false;
}
// // DEBUG CHECK FACE NORMALS
// DeformableGPUSurfaceMT_CheckTriNormals_D <<< Grid(this->triangleCnt, 256), 256 >>> (
// this->triangleFaceNormals_D.Peek(),
// this->triangleNeighbors_D.Peek(),
// this->triangleCnt);
// 2. Sort triangle normals by key
// Copy triangle indices
if (!CudaSafeCall(this->triangleIdxTmp_D.Validate(this->triangleCnt*3))) {
return false;
}
if (!CudaSafeCall(hipMemcpy(this->triangleIdxTmp_D.Peek(), triIdx_D,
sizeof(uint)*this->triangleCnt*3, hipMemcpyDeviceToDevice))) {
return false;
}
thrust::sort_by_key(
thrust::device_ptr<uint>(this->triangleIdxTmp_D.Peek()),
thrust::device_ptr<uint>(this->triangleIdxTmp_D.Peek() + this->triangleCnt*3),
thrust::device_ptr<float3>(this->triangleFaceNormals_D.Peek()));
if (!CheckForCudaError()) {
return false;
}
// OutputDevArrayUint(this->triangleIdxTmp_D.Peek(), this->triangleCnt*3, "TRI IDX");
// 3. Reduce vertex normals by key
// if (!CudaSafeCall(this->vertexNormalsIndxOffs_D.Validate(this->triangleCnt*3))) {
// return false;
// }
// if (!CudaSafeCall(this->reducedVertexKeysTmp_D.Validate(this->vertexCnt))) {
// return false;
// }
// thrust::device_ptr<uint> D = thrust::device_ptr<uint>(this->vertexNormalsIndxOffs_D.Peek());
// thrust::fill(D, D + this->vertexCnt, 1);
// thrust::device_ptr<uint> dev_ptr(this->vertexNormalsIndxOffs_D.Peek());
// thrust::fill(dev_ptr, dev_ptr + this->triangleCnt*3, 1);
// int n = reduce_by_key_with_raw_pointers<uint, uint>(
// this->triangleIdxTmp_D.Peek(),
// this->triangleIdxTmp_D.Peek() + this->triangleCnt*3,
// this->vertexNormalsIndxOffs_D.Peek(),
// this->triangleIdxTmp_D.Peek(),
// this->reducedVertexKeysTmp_D.Peek());
//OutputDevArrayUint(this->reducedVertexKeysTmp_D.Peek(), this->vertexCnt, "NORMAL CNT");
if (!CudaSafeCall(this->reducedNormalsTmp_D.Validate(this->vertexCnt))) {
return false;
}
if (!CudaSafeCall(this->outputArrayTmp_D.Validate(this->vertexCnt))) {
return false;
}
int n = reduce_by_key_with_raw_pointers<uint, float3>(
this->triangleIdxTmp_D.Peek(),
this->triangleIdxTmp_D.Peek() + this->triangleCnt*3,
this->triangleFaceNormals_D.Peek(),
this->outputArrayTmp_D.Peek(),
this->reducedNormalsTmp_D.Peek());
// OutputDevArrayFloat3(this->reducedNormalsTmp_D.Peek(), this->vertexCnt, "NORMAL ");
// printf("N %u, vertexCnt %u\n", n, this->vertexCnt);
// Compute actual normals
hipLaunchKernelGGL(( DeformableGPUSurfaceMT_ComputeNormalsSubdiv_D) , dim3(Grid(this->vertexCnt, 256)), dim3(256) , 0, 0,
vertexBuffer_D,
this->reducedNormalsTmp_D.Peek(),
this->vertexCnt);
if (!CheckForCudaError()) {
return false;
}
if (!CudaSafeCall(hipGraphicsUnmapResources(2, cudaTokens, 0))) {
return false;
}
if (!CudaSafeCall(hipGraphicsUnregisterResource(cudaTokens[0]))) {
return false;
}
if (!CudaSafeCall(hipGraphicsUnregisterResource(cudaTokens[1]))) {
return false;
}
return ::CheckForCudaError();
}
/*
* DeformableGPUSurfaceMT::PrintVertexBuffer
*/
void DeformableGPUSurfaceMT::PrintVertexBuffer(size_t cnt) {
// Get pointer to vertex attribute array
cudaGraphicsResource* cudaTokens[1];
CudaSafeCall(hipGraphicsGLRegisterBuffer(
&cudaTokens[0],
this->vboVtxData,
hipGraphicsMapFlagsNone));
CudaSafeCall(hipGraphicsMapResources(1, cudaTokens, 0));
// Get mapped pointers to the vertex data buffers
float *vertexBuffer_D;
size_t vboVertexBufferSize;
CudaSafeCall(hipGraphicsResourceGetMappedPointer(
reinterpret_cast<void**>(&vertexBuffer_D), // The mapped pointer
&vboVertexBufferSize, // The size of the accessible data
cudaTokens[0]));
HostArr<float> vertexBuffer;
vertexBuffer.Validate(cnt*this->vertexDataStride);
CudaSafeCall(hipMemcpy(vertexBuffer.Peek(), vertexBuffer_D,
sizeof(float)*cnt*this->vertexDataStride,
hipMemcpyDeviceToHost));
for (int i = 0; i < cnt; ++i) {
printf("VERTEX BUFFER %f %f %f, %f %f %f, %f %f %f\n",
vertexBuffer.Peek()[i*this->vertexDataStride+0],
vertexBuffer.Peek()[i*this->vertexDataStride+1],
vertexBuffer.Peek()[i*this->vertexDataStride+2],
vertexBuffer.Peek()[i*this->vertexDataStride+3],
vertexBuffer.Peek()[i*this->vertexDataStride+4],
vertexBuffer.Peek()[i*this->vertexDataStride+5],
vertexBuffer.Peek()[i*this->vertexDataStride+6],
vertexBuffer.Peek()[i*this->vertexDataStride+7],
vertexBuffer.Peek()[i*this->vertexDataStride+8]);
}
vertexBuffer.Release();
CudaSafeCall(hipGraphicsUnmapResources(1, cudaTokens, 0));
CudaSafeCall(hipGraphicsUnregisterResource(cudaTokens[0]));
}
void DeformableGPUSurfaceMT::PrintExternalForces(size_t cnt) {
HostArr<float> externalForces;
externalForces.Validate(cnt*4);
CudaSafeCall(hipMemcpy(externalForces.Peek(), this->externalForces_D.Peek(),
sizeof(float)*cnt*4,
hipMemcpyDeviceToHost));
for (int i = 0; i < cnt; ++i) {
printf("EXT FORCES %f %f %f\n",
externalForces.Peek()[4*i+0],
externalForces.Peek()[4*i+1],
externalForces.Peek()[4*i+2]);
}
externalForces.Release();
}
void DeformableGPUSurfaceMT::PrintCubeStates(size_t cnt) {
HostArr<unsigned int> cubeStates;
cubeStates.Validate(cnt);
CudaSafeCall(hipMemcpy(cubeStates.Peek(), this->cubeStates_D.Peek(),
sizeof(unsigned int)*cnt,
hipMemcpyDeviceToHost));
for (int i = 0; i < cnt; ++i) {
printf("CUBESTATES %u\n", cubeStates.Peek()[i]);
}
cubeStates.Release();
}
/*
* DeformableGPUSurfaceMT::ComputeMeshLaplacian
*/
bool DeformableGPUSurfaceMT::ComputeMeshLaplacian() {
typedef vislib::math::Vector<float, 3> Vec3f;
// Get pointer to vertex attribute array
cudaGraphicsResource* cudaTokens[2];
if (!CudaSafeCall(hipGraphicsGLRegisterBuffer(
&cudaTokens[0],
this->vboVtxData,
hipGraphicsMapFlagsNone))) {
return false;
}
if (!CudaSafeCall(hipGraphicsGLRegisterBuffer(
&cudaTokens[1],
this->vboTriangleIdx,
hipGraphicsMapFlagsNone))) {
return false;
}
if (!CudaSafeCall(hipGraphicsMapResources(2, cudaTokens, 0))) {
return false;
}
// Get mapped pointers to the vertex data buffers
float *vertexBuffer_D;
size_t vboVertexBufferSize;
if (!CudaSafeCall(hipGraphicsResourceGetMappedPointer(
reinterpret_cast<void**>(&vertexBuffer_D), // The mapped pointer
&vboVertexBufferSize, // The size of the accessible data
cudaTokens[0]))) { // The mapped resource
return false;
}
// Get mapped pointers to the vertex data buffers
unsigned int *triIdx_D;
size_t vboTriIdxSize;
if (!CudaSafeCall(hipGraphicsResourceGetMappedPointer(
reinterpret_cast<void**>(&triIdx_D), // The mapped pointer
&vboTriIdxSize, // The size of the accessible data
cudaTokens[1]))) { // The mapped resource
return false;
}
// Copy vertex data and triangle indices to CPU
HostArr<float> vertexData;
HostArr<unsigned int> triIdx;
vertexData.Validate(this->vertexCnt*9);
if (!CudaSafeCall(hipMemcpy(vertexData.Peek(), vertexBuffer_D,
sizeof(float)*this->vertexCnt*9, hipMemcpyDeviceToHost))) {
return false;
}
triIdx.Validate(this->triangleCnt*3);
if (!CudaSafeCall(hipMemcpy(triIdx.Peek(), triIdx_D,
sizeof(uint)*this->triangleCnt*3, hipMemcpyDeviceToHost))) {
return false;
}
// Build vertex neighbor list
vislib::Array<vislib::Array<uint> > vtxNeighbors;
vtxNeighbors.SetCount(this->vertexCnt);
// Loop through all triangles
for (size_t tri = 0; tri < this->triangleCnt; ++tri) {
std::size_t idx0 = triIdx.Peek()[3*tri+0];
std::size_t idx1 = triIdx.Peek()[3*tri+1];
std::size_t idx2 = triIdx.Peek()[3*tri+2];
if (vtxNeighbors[idx0].Find(idx1) == NULL) {
vtxNeighbors[idx0].Append(idx1);
}
if (vtxNeighbors[idx0].Find(idx2) == NULL) {
vtxNeighbors[idx0].Append(idx2);
}
if (vtxNeighbors[idx1].Find(idx0) == NULL) {
vtxNeighbors[idx1].Append(idx0);
}
if (vtxNeighbors[idx1].Find(idx2) == NULL) {
vtxNeighbors[idx1].Append(idx2);
}
if (vtxNeighbors[idx2].Find(idx0) == NULL) {
vtxNeighbors[idx2].Append(idx0);
}
if (vtxNeighbors[idx2].Find(idx1) == NULL) {
vtxNeighbors[idx2].Append(idx1);
}
}
// // DEBUG printf vertex neighbor list
// printf("Computing vertex neighbor list...\n");
// for (size_t v = 0; v < this->vertexCnt; ++v) {
// printf("%u: ", v);
// for (size_t n = 0; n < vtxNeighbors[v].Count(); ++n) {
// printf("%u ", vtxNeighbors[v][n]);
// }
// printf("\n");
// }
// // End DEBUG
printf("Computing mesh Laplacian ...\n");
HostArr<float> vtxLaplacian;
vtxLaplacian.Validate(this->vertexCnt*3);
// Loop through all vertices
for (size_t v = 0; v < this->vertexCnt; ++v) {
float normSum = 0.0f;
vtxLaplacian.Peek()[3*v+0] = 0.0f;
vtxLaplacian.Peek()[3*v+1] = 0.0f;
vtxLaplacian.Peek()[3*v+2] = 0.0f;
Vec3f pos(vertexData.Peek()[9*v+0],
vertexData.Peek()[9*v+1],
vertexData.Peek()[9*v+2]);
//float minAngle = 1000.0f;
//float maxAngle = 0.0f;
Vec3f currNPos;
Vec3f nextNPos;
for (size_t n = 0; n < vtxNeighbors[v].Count(); ++n) {
// Get position of neighbor
uint nIdxCurr = vtxNeighbors[v][n];
if (n == vtxNeighbors[v].Count()-1)
uint nIdxNext = vtxNeighbors[v][0];
else
uint nIdxNext = vtxNeighbors[v][n+1];
currNPos.Set(vertexData.Peek()[9*nIdxCurr+0],
vertexData.Peek()[9*nIdxCurr+1],
vertexData.Peek()[9*nIdxCurr+2]);
nextNPos.Set(vertexData.Peek()[9*nIdxCurr+0],
vertexData.Peek()[9*nIdxCurr+1],
vertexData.Peek()[9*nIdxCurr+2]);
// normSum += (pos-posN).Length();
// Vec3f dist = pos-posN;
// dist.Normalise();
// vtxLaplacian.Peek()[3*v+0] += dist.X();
// vtxLaplacian.Peek()[3*v+1] += dist.Y();
// vtxLaplacian.Peek()[3*v+2] += dist.Z();
}
// Normalize
vtxLaplacian.Peek()[3*v+0] /= normSum;
vtxLaplacian.Peek()[3*v+1] /= normSum;
vtxLaplacian.Peek()[3*v+2] /= normSum;
}
// // DEBUG Print mesh Laplacian norm
// for (size_t v = 0; v < this->vertexCnt; ++v) {
// printf("Laplacian %u: %f\n", v, vtxLaplacian.Peek()[v]);
// }
// // End DEBUG
// Write to vertex attribute array
if (!CudaSafeCall(this->geometricLaplacian_D.Validate(this->vertexCnt*3))) {
return false;
}
if (!CudaSafeCall(hipMemcpy(this->geometricLaplacian_D.Peek(), vtxLaplacian.Peek(),
sizeof(float)*this->vertexCnt*3, hipMemcpyHostToDevice))) {
return false;
}
// Cleanup
vertexData.Release();
triIdx.Release();
vtxLaplacian.Release();
vtxNeighbors.Clear();
if (!CudaSafeCall(hipGraphicsUnmapResources(2, cudaTokens, 0))) {
return false;
}
if (!CudaSafeCall(hipGraphicsUnregisterResource(cudaTokens[0]))) {
return false;
}
if (!CudaSafeCall(hipGraphicsUnregisterResource(cudaTokens[1]))) {
return false;
}
return ::CheckForCudaError();
}
/*
* DeformableGPUSurfaceMT_ComputeSurfAttribDiff1_D
*/
__global__ void DeformableGPUSurfaceMT_ComputeAttribDiff_D (
float *vertexAttrib_D,
float *meshLaplacian_D,
float *meshLaplacianOther_D,
uint vertexCnt) {
const uint idx = ::getThreadIdx();
if (idx >= vertexCnt) {
return;
}
float3 otherAttrib = make_float3(
meshLaplacianOther_D[3*idx+0],
meshLaplacianOther_D[3*idx+1],
meshLaplacianOther_D[3*idx+2]);
float3 thisAttrib = make_float3(
meshLaplacian_D[3*idx+0],
meshLaplacian_D[3*idx+1],
meshLaplacian_D[3*idx+2]);
//vertexAttrib_D[idx] = abs(thisAttrib-otherAttrib);
vertexAttrib_D[idx] = length(thisAttrib-otherAttrib);
}
/*
* DeformableGPUSurfaceMT::ComputeMeshLaplacianDiff
*/
bool DeformableGPUSurfaceMT::ComputeMeshLaplacianDiff(
DeformableGPUSurfaceMT &surfStart) {
if (this->nFlaggedVertices != 0) {
printf("No subdivision allowed in this case!\n");
return false;
}
typedef vislib::math::Vector<float, 3> Vec3f;
if (!this->InitVtxAttribVBO(this->vertexCnt)) {
return false;
}
if (!surfStart.ComputeMeshLaplacian()) {
return false;
}
if (!this->ComputeMeshLaplacian()) {
return false;
}
// Get pointer to vertex attribute array
cudaGraphicsResource* cudaTokens[1];
if (!CudaSafeCall(hipGraphicsGLRegisterBuffer(
&cudaTokens[0],
this->vboVtxAttr,
hipGraphicsMapFlagsNone))) {
return false;
}
if (!CudaSafeCall(hipGraphicsMapResources(1, cudaTokens, 0))) {
return false;
}
// Get mapped pointers to the vertex data buffers
float *vertexAttrib_D;
size_t vertexAttribSize;
if (!CudaSafeCall(hipGraphicsResourceGetMappedPointer(
reinterpret_cast<void**>(&vertexAttrib_D), // The mapped pointer
&vertexAttribSize, // The size of the accessible data
cudaTokens[0]))) { // The mapped resource
return false;
}
// Compute difference
hipLaunchKernelGGL(( DeformableGPUSurfaceMT_ComputeAttribDiff_D) , dim3(Grid(this->vertexCnt, 256)), dim3(256) , 0, 0,
vertexAttrib_D,
this->PeekGeomLaplacian(),
surfStart.PeekGeomLaplacian(),
this->vertexCnt);
if (!CheckForCudaError()) {
return false;
}
if (!CudaSafeCall(hipGraphicsUnmapResources(1, cudaTokens, 0))) {
return false;
}
if (!CudaSafeCall(hipGraphicsUnregisterResource(cudaTokens[0]))) {
return false;
}
return ::CheckForCudaError();
}
| efe4e0c9faa5c8bcf5a332c6a5eaea4e0be2d21d.cu | //
// DeformableGPUSurfaceMT.cpp
//
// Copyright (C) 2013 by University of Stuttgart (VISUS).
// All rights reserved.
//
// Created on : Sep 17, 2013
// Author : scharnkn
//
#include "vislib_gl/graphics/gl/IncludeAllGL.h"
#include "DeformableGPUSurfaceMT.h"
//#ifndef CUDA_NO_SM_11_ATOMIC_INTRINSICS
// printf("WARNING! Not using atomics!\n");
//#endif
#include "ogl_error_check.h"
#include "cuda_error_check.h"
#include "HostArr.h"
#include "DiffusionSolver.h"
#include "CUDAGrid.cuh"
#include <algorithm>
#include <cuda_runtime.h>
#define WGL_NV_gpu_affinity
#include <cuda_gl_interop.h>
#include <thrust/sort.h>
#include <thrust/reduce.h>
#include <thrust/device_ptr.h>
#include "vislib/Array.h"
#include "vislib/math/Vector.h"
//#define USE_TIMER
using namespace megamol;
using namespace megamol::protein_cuda;
/**
* Samples the field at a given position using linear interpolation.
*
* @param pos The position
* @return The sampled value of the field
*/
float4 SampleFieldAtPosTrilin(
float pos[3],
float4 *field,
float gridOrg[3],
float gridDelta[3],
int gridSize[3]) {
int cell[3];
float x[3];
// Get id of the cell containing the given position and interpolation
// coefficients
x[0] = (pos[0]-gridOrg[0])/gridDelta[0];
x[1] = (pos[1]-gridOrg[1])/gridDelta[1];
x[2] = (pos[2]-gridOrg[2])/gridDelta[2];
cell[0] = (int)(x[0]);
cell[1] = (int)(x[1]);
cell[2] = (int)(x[2]);
x[0] = x[0]-(float)cell[0]; // alpha
x[1] = x[1]-(float)cell[1]; // beta
x[2] = x[2]-(float)cell[2]; // gamma
float alpha = x[0];
float beta = x[1];
float gamma = x[2];
cell[0] = std::min(std::max(cell[0], int(0)), gridSize[0]-2);
cell[1] = std::min(std::max(cell[1], int(0)), gridSize[1]-2);
cell[2] = std::min(std::max(cell[2], int(0)), gridSize[2]-2);
// Get values at corners of current cell
float4 n0, n1, n2, n3, n4, n5, n6, n7;
// printf("dim %i %i %i\n", gridSize[0], gridSize[1], gridSize[2]);
// printf("cell %i %i %i\n", cell[0], cell[1], cell[2]);
size_t fieldSize =gridSize[0]*gridSize[1]*gridSize[2];
if (gridSize[0]*(gridSize[1]*(cell[2]+0) + (cell[1]+0))+cell[0]+0 > fieldSize) {
printf("Overflow %i\n", gridSize[0]*(gridSize[1]*(cell[2]+0) + (cell[1]+0))+cell[0]+0);
}
n0 = field[gridSize[0]*(gridSize[1]*(cell[2]+0) + (cell[1]+0))+cell[0]+0];
n1 = field[gridSize[0]*(gridSize[1]*(cell[2]+0) + (cell[1]+0))+cell[0]+1];
n2 = field[gridSize[0]*(gridSize[1]*(cell[2]+0) + (cell[1]+1))+cell[0]+0];
n3 = field[gridSize[0]*(gridSize[1]*(cell[2]+0) + (cell[1]+1))+cell[0]+1];
n4 = field[gridSize[0]*(gridSize[1]*(cell[2]+1) + (cell[1]+0))+cell[0]+0];
n5 = field[gridSize[0]*(gridSize[1]*(cell[2]+1) + (cell[1]+0))+cell[0]+1];
n6 = field[gridSize[0]*(gridSize[1]*(cell[2]+1) + (cell[1]+1))+cell[0]+0];
n7 = field[gridSize[0]*(gridSize[1]*(cell[2]+1) + (cell[1]+1))+cell[0]+1];
float4 a, b, c, d, e, f, g, h;
a = n0;
b = n1 - n0;
c = n2 - n0;
d = n3 - n1 - n2 + n0;
e = n4 - n0;
f = n5 - n1 - n4 + n0;
g = n6 - n2 - n4 + n0;
h = n7 - n3 - n5 - n6 + n1 + n2 + n4 - n0;
return a + b*alpha + c*beta + d*alpha*beta + e*gamma + f*alpha*gamma
+ g*beta*gamma + h*alpha*beta*gamma;
}
float SampleFieldAtPosTrilin(
float pos[3],
float *field,
float gridOrg[3],
float gridDelta[3],
int gridSize[3]) {
int cell[3];
float x[3];
// Get id of the cell containing the given position and interpolation
// coefficients
x[0] = (pos[0]-gridOrg[0])/gridDelta[0];
x[1] = (pos[1]-gridOrg[1])/gridDelta[1];
x[2] = (pos[2]-gridOrg[2])/gridDelta[2];
cell[0] = (int)(x[0]);
cell[1] = (int)(x[1]);
cell[2] = (int)(x[2]);
x[0] = x[0]-(float)cell[0]; // alpha
x[1] = x[1]-(float)cell[1]; // beta
x[2] = x[2]-(float)cell[2]; // gamma
float alpha = x[0];
float beta = x[1];
float gamma = x[2];
cell[0] = std::min(std::max(cell[0], int(0)), gridSize[0]-2);
cell[1] = std::min(std::max(cell[1], int(0)), gridSize[1]-2);
cell[2] = std::min(std::max(cell[2], int(0)), gridSize[2]-2);
// Get values at corners of current cell
float n0, n1, n2, n3, n4, n5, n6, n7;
n0 = field[gridSize[0]*(gridSize[1]*(cell[2]+0) + (cell[1]+0))+cell[0]+0];
n1 = field[gridSize[0]*(gridSize[1]*(cell[2]+0) + (cell[1]+0))+cell[0]+1];
n2 = field[gridSize[0]*(gridSize[1]*(cell[2]+0) + (cell[1]+1))+cell[0]+0];
n3 = field[gridSize[0]*(gridSize[1]*(cell[2]+0) + (cell[1]+1))+cell[0]+1];
n4 = field[gridSize[0]*(gridSize[1]*(cell[2]+1) + (cell[1]+0))+cell[0]+0];
n5 = field[gridSize[0]*(gridSize[1]*(cell[2]+1) + (cell[1]+0))+cell[0]+1];
n6 = field[gridSize[0]*(gridSize[1]*(cell[2]+1) + (cell[1]+1))+cell[0]+0];
n7 = field[gridSize[0]*(gridSize[1]*(cell[2]+1) + (cell[1]+1))+cell[0]+1];
float a, b, c, d, e, f, g, h;
a = n0;
b = n1 - n0;
c = n2 - n0;
d = n3 - n1 - n2 + n0;
e = n4 - n0;
f = n5 - n1 - n4 + n0;
g = n6 - n2 - n4 + n0;
h = n7 - n3 - n5 - n6 + n1 + n2 + n4 - n0;
return a + b*alpha + c*beta + d*alpha*beta + e*gamma + f*alpha*gamma
+ g*beta*gamma + h*alpha*beta*gamma;
}
/**
* 'Safe' inverse sqrt, that prevents dividing by zero
*
* @param x The input value
* @return The inverse sqrt if x>0, 0.0 otherwise
*/
inline __host__ __device__ float safeRsqrtf(float x) {
if (x > 0.0) {
return 1.0f/sqrtf(x);
} else {
return 0.0f;
}
}
/**
* 'Safe' normalize function for float3 that uses safe rsqrt
*
* @param v The input vector to be normalized
* @return The normalized vector v
*/
inline __device__ float safeInvLength(float3 v) {
return safeRsqrtf(dot(v, v));
}
/**
* 'Safe' normalize function for float2 that uses safe rsqrt
*
* @param v The input vector to be normalized
* @return The normalized vector v
*/
inline __device__ float2 safeNormalize(float2 v) {
float invLen = safeRsqrtf(dot(v, v));
return v * invLen;
}
/**
* 'Safe' normalize function for float3 that uses safe rsqrt
*
* @param v The input vector to be normalized
* @return The normalized vector v
*/
inline __host__ __device__ float3 safeNormalize(float3 v) {
float invLen = safeRsqrtf(dot(v, v));
return v * invLen;
}
////////////////////////////////////////////////////////////////////////////////
// Inline device functions //
////////////////////////////////////////////////////////////////////////////////
/**
* @return Returns the thread index based on the current CUDA grid dimensions
*/
inline __device__ uint getThreadIdx() {
return __umul24(__umul24(blockIdx.y, gridDim.x) + blockIdx.x, blockDim.x) +
threadIdx.x;
}
////////////////////////////////////////////////////////////////////////////////
// Global device functions //
////////////////////////////////////////////////////////////////////////////////
/**
* Computes the gradient of a given scalar field using central differences.
* Border areas are omitted.
*
* @param[out] grad_D The gradient field
* @param[in] field_D The scalar field
*/
__global__ void DeformableGPUSurfaceMT_CalcVolGradient_D(float4 *grad_D, float *field_D) {
const uint idx = ::getThreadIdx();
// Get grid coordinates
uint3 gridCoord = make_uint3(
idx % gridSize_D.x,
(idx / gridSize_D.x) % gridSize_D.y,
(idx / gridSize_D.x) / gridSize_D.y);
// Omit border cells (gradient remains zero)
if (gridCoord.x == 0) return;
if (gridCoord.y == 0) return;
if (gridCoord.z == 0) return;
if (gridCoord.x >= gridSize_D.x - 1) return;
if (gridCoord.y >= gridSize_D.y - 1) return;
if (gridCoord.z >= gridSize_D.z - 1) return;
float3 grad;
grad.x =
field_D[GetPosIdxByGridCoords(make_uint3(gridCoord.x+1, gridCoord.y+0, gridCoord.z+0))]-
field_D[GetPosIdxByGridCoords(make_uint3(gridCoord.x-1, gridCoord.y+0, gridCoord.z+0))];
grad.y =
field_D[GetPosIdxByGridCoords(make_uint3(gridCoord.x, gridCoord.y+1, gridCoord.z+0))]-
field_D[GetPosIdxByGridCoords(make_uint3(gridCoord.x, gridCoord.y-1, gridCoord.z+0))];
grad.z =
field_D[GetPosIdxByGridCoords(make_uint3(gridCoord.x, gridCoord.y+0, gridCoord.z+1))]-
field_D[GetPosIdxByGridCoords(make_uint3(gridCoord.x, gridCoord.y+0, gridCoord.z-1))];
grad = safeNormalize(grad);
grad_D[idx].x = grad.x;
grad_D[idx].y = grad.y;
grad_D[idx].z = grad.z;
}
/**
* Computes the gradient of a given scalar field using central differences.
* Border areas are omitted.
*
* @param[out] grad_D The gradient field
* @param[in] field_D The scalar field
* @param[in] field_D The distance field
*/
__global__ void DeformableGPUSurfaceMT_CalcVolGradientWithDistField_D(float4 *grad_D, float *field_D,
float *distField_D, float minDist, float isovalue) {
const uint idx = ::getThreadIdx();
// Get grid coordinates
uint3 gridCoord = ::GetGridCoordsByPosIdx(idx);
// Omit border cells (gradient remains zero)
if (gridCoord.x == 0) return;
if (gridCoord.y == 0) return;
if (gridCoord.z == 0) return;
if (gridCoord.x >= gridSize_D.x - 1) return;
if (gridCoord.y >= gridSize_D.y - 1) return;
if (gridCoord.z >= gridSize_D.z - 1) return;
float distSample = ::SampleFieldAt_D<float, false>(gridCoord, distField_D);
float volSample = ::SampleFieldAt_D<float, false>(gridCoord, field_D);
float3 grad = make_float3(0.0, 0.0, 0.0);
if (distSample > minDist) {
grad.x =
distField_D[GetPosIdxByGridCoords(make_uint3(gridCoord.x+1, gridCoord.y+0, gridCoord.z+0))]-
distField_D[GetPosIdxByGridCoords(make_uint3(gridCoord.x+0, gridCoord.y+0, gridCoord.z+0))];
grad.y =
distField_D[GetPosIdxByGridCoords(make_uint3(gridCoord.x, gridCoord.y+1, gridCoord.z+0))]-
distField_D[GetPosIdxByGridCoords(make_uint3(gridCoord.x, gridCoord.y+0, gridCoord.z+0))];
grad.z =
distField_D[GetPosIdxByGridCoords(make_uint3(gridCoord.x, gridCoord.y+0, gridCoord.z+1))]-
distField_D[GetPosIdxByGridCoords(make_uint3(gridCoord.x, gridCoord.y+0, gridCoord.z+0))];
if (volSample < isovalue) {
grad.x *= -1.0;
grad.y *= -1.0;
grad.z *= -1.0;
}
} else {
grad.x =
field_D[GetPosIdxByGridCoords(make_uint3(gridCoord.x+1, gridCoord.y+0, gridCoord.z+0))]-
field_D[GetPosIdxByGridCoords(make_uint3(gridCoord.x+0, gridCoord.y+0, gridCoord.z+0))];
grad.y =
field_D[GetPosIdxByGridCoords(make_uint3(gridCoord.x, gridCoord.y+1, gridCoord.z+0))]-
field_D[GetPosIdxByGridCoords(make_uint3(gridCoord.x, gridCoord.y+0, gridCoord.z+0))];
grad.z =
field_D[GetPosIdxByGridCoords(make_uint3(gridCoord.x, gridCoord.y+0, gridCoord.z+1))]-
field_D[GetPosIdxByGridCoords(make_uint3(gridCoord.x, gridCoord.y+0, gridCoord.z+0))];
}
grad = safeNormalize(grad);
grad_D[idx].x = grad.x;
grad_D[idx].y = grad.y;
grad_D[idx].z = grad.z;
}
/**
* Computes a distance field based on the vertex positions.
*
* @param[in] vertexPos_D The vertex data buffer (device memory)
* @param[out] distField_D The distance field buffer (device memory)
* @param[in] vertexCnt The number of vertices
* @param[in] dataArrOffs The vertex position offset for the vertex data buffer
* @param[in] dataArrSize The stride of the vertex data buffer
*/
__global__ void DeformableGPUSurfaceMT_ComputeDistField_D(
float *vertexPos_D,
float *distField_D,
uint vertexCnt,
uint dataArrOffs,
uint dataArrSize) {
// TODO This is very slow since it basically bruteforces all vertex
// positions and stores the distance to the nearest one.
const uint idx = getThreadIdx();
if (idx >= gridSize_D.x*gridSize_D.y*gridSize_D.z) {
return;
}
// Get world space position of gridPoint
uint3 gridCoords = GetGridCoordsByPosIdx(idx);
float3 latticePos = TransformToWorldSpace(make_float3(
gridCoords.x,
gridCoords.y,
gridCoords.z));
// Loop through all vertices to find minimal distance
float3 pos = make_float3(vertexPos_D[0], vertexPos_D[1], vertexPos_D[2]);
float len;
len = (latticePos.x-pos.x)*(latticePos.x-pos.x)+
(latticePos.y-pos.y)*(latticePos.y-pos.y)+
(latticePos.z-pos.z)*(latticePos.z-pos.z);
float dist2 = len;
for (uint i = 0; i < vertexCnt; ++i) {
pos = make_float3(
vertexPos_D[dataArrSize*i+dataArrOffs+0],
vertexPos_D[dataArrSize*i+dataArrOffs+1],
vertexPos_D[dataArrSize*i+dataArrOffs+2]);
len = (latticePos.x-pos.x)*(latticePos.x-pos.x)+
(latticePos.y-pos.y)*(latticePos.y-pos.y)+
(latticePos.z-pos.z)*(latticePos.z-pos.z);
dist2 = min(dist2, len);
}
distField_D[idx] = sqrt(dist2);
}
/**
* Writes a flag for every vertex that is adjacent to a corrupt triangles.
*
* @param[in,out] vertexData_D The buffer with the vertex data
* @param[in] vertexDataStride The stride for the vertex data
* buffer
* @param[in] vertexDataOffsPos The position offset in the vertex
* data buffer
* @param[in] vertexDataOffsCorruptFlag The corruption flag offset in the
* vertex data buffer
* @param[in] triangleVtxIdx_D Array with triangle vertex indices
* @param[in] volume_D The target volume defining the
* iso-surface
* @param[in] externalForcesScl_D Array with the scale factor for the
* external force
* @param[in] triangleCnt The number of triangles
* @param[in] minDispl Minimum force scale to keep going
* @param[in] isoval The iso-value defining the iso-surface
*
* TODO
*/
__global__ void DeformableGPUSurfaceMT_FlagCorruptTriangles_D(
float *vertexFlag_D,
float *corruptTriangles_D,
float *vertexData_D,
uint vertexDataStride,
uint vertexDataOffsPos,
uint vertexDataOffsNormal,
uint *triangleVtxIdx_D,
float *targetVol_D,
const unsigned int *targetActiveCells_D,
float4 *externalForces_D,
uint triangleCnt,
float isoval) {
const uint idx = ::getThreadIdx();
if (idx >= triangleCnt) {
return;
}
/* Alternative 1: Sample volume at triangle midpoint */
// const uint baseIdx0 = vertexDataStride*triangleVtxIdx_D[3*idx+0];
// const uint baseIdx1 = vertexDataStride*triangleVtxIdx_D[3*idx+1];
// const uint baseIdx2 = vertexDataStride*triangleVtxIdx_D[3*idx+2];
// const float3 p0 = make_float3(vertexData_D[baseIdx0+vertexDataOffsPos+0],
// vertexData_D[baseIdx0+vertexDataOffsPos+1],
// vertexData_D[baseIdx0+vertexDataOffsPos+2]);
// const float3 p1 = make_float3(vertexData_D[baseIdx1+vertexDataOffsPos+0],
// vertexData_D[baseIdx1+vertexDataOffsPos+1],
// vertexData_D[baseIdx1+vertexDataOffsPos+2]);
// const float3 p2 = make_float3(vertexData_D[baseIdx2+vertexDataOffsPos+0],
// vertexData_D[baseIdx2+vertexDataOffsPos+1],
// vertexData_D[baseIdx2+vertexDataOffsPos+2]);
// // Sample volume at midpoint
// const float3 midPoint = (p0+p1+p2)/3.0;
// const float volSampleMidPoint = ::SampleFieldAtPosTricub_D<float>(midPoint, targetVol_D);
// float flag = float(::fabs(volSampleMidPoint-isoval) > 0.3);
// vertexFlag_D[triangleVtxIdx_D[3*idx+0]] = flag;
// vertexFlag_D[triangleVtxIdx_D[3*idx+1]] = flag;
// vertexFlag_D[triangleVtxIdx_D[3*idx+2]] = flag;
/* Alternative 2: use area and angles */
// const uint baseIdx0 = vertexDataStride*triangleVtxIdx_D[3*idx+0];
// const uint baseIdx1 = vertexDataStride*triangleVtxIdx_D[3*idx+1];
// const uint baseIdx2 = vertexDataStride*triangleVtxIdx_D[3*idx+2];
// const float3 p0 = make_float3(
// vertexData_D[baseIdx0+vertexDataOffsPos+0],
// vertexData_D[baseIdx0+vertexDataOffsPos+1],
// vertexData_D[baseIdx0+vertexDataOffsPos+2]);
// const float3 p1 = make_float3(
// vertexData_D[baseIdx1+vertexDataOffsPos+0],
// vertexData_D[baseIdx1+vertexDataOffsPos+1],
// vertexData_D[baseIdx1+vertexDataOffsPos+2]);
// const float3 p2 = make_float3(
// vertexData_D[baseIdx2+vertexDataOffsPos+0],
// vertexData_D[baseIdx2+vertexDataOffsPos+1],
// vertexData_D[baseIdx2+vertexDataOffsPos+2]);
//
// float3 v01 = (p0-p1);
// float3 v02 = (p0-p2);
// float3 v10 = (p1-p0);
// float3 v12 = (p1-p2);
// float3 v21 = (p2-p1);
// float3 v20 = (p2-p0);
//
// // Compute minimum angle
// float dot0 = acos(dot(normalize(v01), normalize(v02)));
// float dot1 = acos(dot(normalize(v10), normalize(v12)));
// float dot2 = acos(dot(normalize(v21), normalize(v20)));
// float minDot = min(dot0, min(dot1, dot2));
//
// // Compute area of the triangle
// float3 midPnt = (p0+p1)*0.5;
// float3 hVec = p2 - midPnt;
// float area = length(p0-p1)*length(hVec)*0.5;
// area = gridDelta_D.x*gridDelta_D.y-1;
//
// float maxCellFaceArea = gridDelta_D.x*gridDelta_D.y; // Find max grid delta
//
// //float flag = float((minDot < 0.1)||(area > maxCellFaceArea));
// float flag = float(minDot < 0.2);
//
// // TODO Is there no atomic write?
//// vertexFlag_D[triangleVtxIdx_D[3*idx+0]] = float(bool(currFlag0) || bool(flag));
//// vertexFlag_D[triangleVtxIdx_D[3*idx+1]] = float(bool(currFlag1) || bool(flag));
//// vertexFlag_D[triangleVtxIdx_D[3*idx+2]] = float(bool(currFlag2) || bool(flag));
//
// // DEBUG
// if (flag == 1.0) {
// vertexFlag_D[triangleVtxIdx_D[3*idx+0]] = 1.0;
// vertexFlag_D[triangleVtxIdx_D[3*idx+1]] = 1.0;
// vertexFlag_D[triangleVtxIdx_D[3*idx+2]] = 1.0;
// }
// // END DEBUG
// corruptTriangles_D[idx] = flag;
// /* Alternative 3 Check whether the vertex lies in an active cell of the
// target volume */
//
// const uint baseIdx0 = vertexDataStride*triangleVtxIdx_D[3*idx+0];
// const uint baseIdx1 = vertexDataStride*triangleVtxIdx_D[3*idx+1];
// const uint baseIdx2 = vertexDataStride*triangleVtxIdx_D[3*idx+2];
// const float3 p0 = make_float3(
// vertexData_D[baseIdx0+vertexDataOffsPos+0],
// vertexData_D[baseIdx0+vertexDataOffsPos+1],
// vertexData_D[baseIdx0+vertexDataOffsPos+2]);
// const float3 p1 = make_float3(
// vertexData_D[baseIdx1+vertexDataOffsPos+0],
// vertexData_D[baseIdx1+vertexDataOffsPos+1],
// vertexData_D[baseIdx1+vertexDataOffsPos+2]);
// const float3 p2 = make_float3(
// vertexData_D[baseIdx2+vertexDataOffsPos+0],
// vertexData_D[baseIdx2+vertexDataOffsPos+1],
// vertexData_D[baseIdx2+vertexDataOffsPos+2]);
//
// // Sample volume at midpoint
// const float3 midpoint = (p0+p1+p2)/3.0;
//
// // Get integer cell index
// int3 coords;
// coords.x = int((midpoint.x-gridOrg_D.x)/gridDelta_D.x);
// coords.y = int((midpoint.y-gridOrg_D.y)/gridDelta_D.y);
// coords.z = int((midpoint.z-gridOrg_D.z)/gridDelta_D.z);
//
// int cellIDx = ::GetCellIdxByGridCoords(coords);
// uint cellState = targetActiveCells_D[cellIDx];
//
// float currFlag0 = vertexFlag_D[triangleVtxIdx_D[3*idx+0]];
// float currFlag1 = vertexFlag_D[triangleVtxIdx_D[3*idx+1]];
// float currFlag2 = vertexFlag_D[triangleVtxIdx_D[3*idx+2]];
//// __syncthreads();
//// vertexFlag_D[triangleVtxIdx_D[3*idx+0]] = float(bool(currFlag0) || bool(1-cellState));
//// vertexFlag_D[triangleVtxIdx_D[3*idx+1]] = float(bool(currFlag1) || bool(1-cellState));
//// vertexFlag_D[triangleVtxIdx_D[3*idx+2]] = float(bool(currFlag2) || bool(1-cellState));
//// vertexFlag_D[triangleVtxIdx_D[3*idx+0]] = 1.0;
//// vertexFlag_D[triangleVtxIdx_D[3*idx+1]] = 1.0;
//// vertexFlag_D[triangleVtxIdx_D[3*idx+2]] = 1.0;
//
//
// corruptTriangles_D[idx] = float(1-cellState);
/* Alternative 4 Check whether all the vertices lies in an active cell of the
target volume */
const uint baseIdx0 = vertexDataStride*triangleVtxIdx_D[3*idx+0];
const uint baseIdx1 = vertexDataStride*triangleVtxIdx_D[3*idx+1];
const uint baseIdx2 = vertexDataStride*triangleVtxIdx_D[3*idx+2];
const float3 p0 = make_float3(
vertexData_D[baseIdx0+vertexDataOffsPos+0],
vertexData_D[baseIdx0+vertexDataOffsPos+1],
vertexData_D[baseIdx0+vertexDataOffsPos+2]);
const float3 p1 = make_float3(
vertexData_D[baseIdx1+vertexDataOffsPos+0],
vertexData_D[baseIdx1+vertexDataOffsPos+1],
vertexData_D[baseIdx1+vertexDataOffsPos+2]);
const float3 p2 = make_float3(
vertexData_D[baseIdx2+vertexDataOffsPos+0],
vertexData_D[baseIdx2+vertexDataOffsPos+1],
vertexData_D[baseIdx2+vertexDataOffsPos+2]);
float3 vec0 = (p1 - p0);
float3 vec1 = (p2 - p0);
float3 norm = normalize(cross(vec0, vec1));
// Sample volume at midpoint
const float3 midpoint = (p0+p1+p2)/3.0;
// Sample gradient from external forces
float4 externalForces = SampleFieldAtPosTrilin_D<float4, false>(midpoint, externalForces_D);
float3 normField = make_float3(externalForces.x, externalForces.y, externalForces.z);
float dotNormsAbs = dot(norm, normField);
// Get integer cell index
int3 coords;
coords.x = int((midpoint.x-gridOrg_D.x)/gridDelta_D.x);
coords.y = int((midpoint.y-gridOrg_D.y)/gridDelta_D.y);
coords.z = int((midpoint.z-gridOrg_D.z)/gridDelta_D.z);
int3 coords0;
coords0.x = int((p0.x-gridOrg_D.x)/gridDelta_D.x);
coords0.y = int((p0.y-gridOrg_D.y)/gridDelta_D.y);
coords0.z = int((p0.z-gridOrg_D.z)/gridDelta_D.z);
int3 coords1;
coords1.x = int((p1.x-gridOrg_D.x)/gridDelta_D.x);
coords1.y = int((p1.y-gridOrg_D.y)/gridDelta_D.y);
coords1.z = int((p1.z-gridOrg_D.z)/gridDelta_D.z);
int3 coords2;
coords2.x = int((p2.x-gridOrg_D.x)/gridDelta_D.x);
coords2.y = int((p2.y-gridOrg_D.y)/gridDelta_D.y);
coords2.z = int((p2.z-gridOrg_D.z)/gridDelta_D.z);
int cellIDx = ::GetCellIdxByGridCoords(coords);
int cellIDx0 = ::GetCellIdxByGridCoords(coords0);
int cellIDx1 = ::GetCellIdxByGridCoords(coords1);
int cellIDx2 = ::GetCellIdxByGridCoords(coords2);
uint cellState = targetActiveCells_D[cellIDx];
uint cellState0 = targetActiveCells_D[cellIDx0];
uint cellState1 = targetActiveCells_D[cellIDx1];
uint cellState2 = targetActiveCells_D[cellIDx2];
// float currFlag0 = vertexFlag_D[triangleVtxIdx_D[3*idx+0]];
// float currFlag1 = vertexFlag_D[triangleVtxIdx_D[3*idx+1]];
// float currFlag2 = vertexFlag_D[triangleVtxIdx_D[3*idx+2]];
// __syncthreads();
// vertexFlag_D[triangleVtxIdx_D[3*idx+0]] = float(bool(currFlag0) || bool(1-cellState));
// vertexFlag_D[triangleVtxIdx_D[3*idx+1]] = float(bool(currFlag1) || bool(1-cellState));
// vertexFlag_D[triangleVtxIdx_D[3*idx+2]] = float(bool(currFlag2) || bool(1-cellState));
// vertexFlag_D[triangleVtxIdx_D[3*idx+0]] = 1.0;
// vertexFlag_D[triangleVtxIdx_D[3*idx+1]] = 1.0;
// vertexFlag_D[triangleVtxIdx_D[3*idx+2]] = 1.0;
// Criteria for good triangles
bool flag = bool(cellState) &&
bool(cellState0) &&
bool(cellState1) &&
bool(cellState2);
// (dotNormsAbs >= 0);
//&& (dotNormsAbs <= 0.5);
corruptTriangles_D[idx] = float(!flag);
}
/**
* TODO
* @return Position and path length addition
*/
__device__ float4 UpdateVtxPosSingle_D (
float3 posStart, // Starting position
float4 *gradient_D, // External forces
float *targetVol_D, // The target volume
float minDisplScl, // Minimum displacement for convergence
float forcesScl, // General scaling factor for forces
float isovalue) { // Isovalue
float3 pos = posStart;
float sample = SampleFieldAtPosTrilin_D<float, false>(pos, targetVol_D);
bool outside = sample <= isovalue;
float extForcesScl;
if (outside) extForcesScl = 1.0;
else extForcesScl = -1.0;
float len = 0.0f;
bool converged = false;
int steps = 0;
const int maxSteps = 3;
do {
// Get volume sample
float sample = SampleFieldAtPosTrilin_D<float, false>(pos, targetVol_D);
// Switch sign and scale down if necessary
bool negative = extForcesScl < 0;
bool outside = sample <= isovalue;
int switchSign = int((negative && outside)||(!negative && !outside));
extForcesScl = extForcesScl*(1.0*(1-switchSign) - 1.0*switchSign);
extForcesScl *= (1.0*(1-switchSign) + 0.5*(switchSign));
// Get external forces sample and scale
float4 extForceTmp = SampleFieldAtPosTrilin_D<float4, false>(pos, gradient_D);
float3 extForce = make_float3(extForceTmp.x, extForceTmp.y, extForceTmp.z);
extForce = safeNormalize(extForce);
// Accumulate path
len += extForcesScl*forcesScl;
extForce *= extForcesScl*forcesScl;
// Propagate vertex and increase path length
pos += extForce;
if (length(extForce) <= minDisplScl) {
converged = true;
}
steps++;
} while (!converged || steps < maxSteps);
return make_float4(pos.x, pos.y, pos.z, len);
}
/**
* TODO
*/
__device__ float DeformableGPUSurfaceMT_IntUncertaintyOverCorruptAreaRec_D(
float3 pos1, float3 pos2, float3 pos3, // Vertex positions of the triangle
float len1, float len2, float len3, // Vertex path lengths of the triangle
float4 *gradient_D, // External forces
float *targetVol_D, // The target volume
unsigned int *targetActiveCells_D, // Active cells of the target volume
float minDisplScl, // Minimum displacement for convergence
float forcesScl, // General scaling factor for forces
float isovalue, // Isovalue
float &triArea,
uint depth
) {
const uint maxDepth = 2;
// 1. Propagate vertices until they converge to a fixed position
float4 newPosLen1, newPosLen2, newPosLen3;
newPosLen1 = UpdateVtxPosSingle_D (pos1, gradient_D, targetVol_D,
minDisplScl, forcesScl, isovalue);
newPosLen2 = UpdateVtxPosSingle_D (pos2, gradient_D, targetVol_D,
minDisplScl, forcesScl, isovalue);
newPosLen3 = UpdateVtxPosSingle_D (pos3, gradient_D, targetVol_D,
minDisplScl, forcesScl, isovalue);
float3 newPos1, newPos2, newPos3;
newPos1 = make_float3(newPosLen1.x, newPosLen1.y, newPosLen1.z);
newPos2 = make_float3(newPosLen2.x, newPosLen2.y, newPosLen2.z);
newPos3 = make_float3(newPosLen3.x, newPosLen3.y, newPosLen3.z);
// 2. Check whether the resulting triangle is valid
float3 midpoint = (newPos1+newPos2+newPos3)/3.0;
int3 coords;
coords.x = int((midpoint.x-gridOrg_D.x)/gridDelta_D.x);
coords.y = int((midpoint.y-gridOrg_D.y)/gridDelta_D.y);
coords.z = int((midpoint.z-gridOrg_D.z)/gridDelta_D.z);
int cellIDx = ::GetCellIdxByGridCoords(coords);
uint cellState = targetActiveCells_D[cellIDx];
if ((cellState == 1)||(depth >= maxDepth)) {
// printf("%.16f;%.16f;%.16f;%.16f;%.16f;%.16f;%.16f;%.16f;%.16f\n",
// newPos1.x, newPos1.y, newPos1.z,
// newPos2.x, newPos2.y, newPos2.z,
// newPos3.x, newPos3.y, newPos3.z);
// if (depth >= 2) printf("Thread %u, depth %u\n",::getThreadIdx(), depth);
// 3a. Cell is active, therefore triangle is valid
// --> Compute integrated uncertainty value
// Get triangle area
float a = length(newPos1 - newPos2);
float b = length(newPos1 - newPos3);
float c = length(newPos2 - newPos3);
// Compute area (Heron's formula)
float rad = (a + b - c)*(c + a - b)*(a + b + c)*(b + c - a);
// Make sure radicand is not negative
rad = rad > 0.0f ? rad : 0.0f;
float area = 0.25f*sqrt(rad);
triArea = area;
// Get average value
float avgValue = (len1+newPosLen1.w+len2+newPosLen2.w+len3+newPosLen3.w)/3.0f;
// Approximate integration
return triArea*avgValue;
} else {
float triArea1, triArea2, triArea3, triArea4;
// 3b. Cell is not active, therefore, triangle is not valid
// --> Subdivide and call recursively
float3 p12 = (newPos1+newPos2)/2.0;
float3 p13 = (newPos1+newPos3)/2.0;
float3 p32 = (newPos3+newPos2)/2.0;
float l12 = (len1+newPosLen1.w+len2+newPosLen2.w)/2.0;
float l13 = (len1+newPosLen1.w+len3+newPosLen3.w)/2.0;
float l32 = (len3+newPosLen3.w+len2+newPosLen2.w)/2.0;
float intUncertainty1 =
DeformableGPUSurfaceMT_IntUncertaintyOverCorruptAreaRec_D(
newPos1, p12, p13,
len1+newPosLen1.w, l12, l13,
gradient_D, targetVol_D, targetActiveCells_D,
minDisplScl, forcesScl, isovalue, triArea1,
depth+1);
float intUncertainty2 =
DeformableGPUSurfaceMT_IntUncertaintyOverCorruptAreaRec_D(
p13, p32, newPos3,
l13, l32, len3+newPosLen3.w,
gradient_D, targetVol_D, targetActiveCells_D,
minDisplScl, forcesScl, isovalue, triArea2,
depth+1);
float intUncertainty3 =
DeformableGPUSurfaceMT_IntUncertaintyOverCorruptAreaRec_D(
p12, p13, p32,
l12, l13, l32,
gradient_D, targetVol_D, targetActiveCells_D,
minDisplScl, forcesScl, isovalue, triArea3,
depth+1);
float intUncertainty4 =
DeformableGPUSurfaceMT_IntUncertaintyOverCorruptAreaRec_D(
p12, p32, newPos2,
l12, l32, len2+newPosLen2.w,
gradient_D, targetVol_D, targetActiveCells_D,
minDisplScl, forcesScl, isovalue, triArea4,
depth+1);
triArea = triArea1 + triArea2 + triArea3 + triArea4;
return intUncertainty1 + intUncertainty2 + intUncertainty3 + intUncertainty4;
}
}
/**
* TODO
*/
__global__ void DeformableGPUSurfaceMT_IntUncertaintyOverCorruptArea_D(
float *corruptTriangles_D,
float *vertexData_D,
float *vertexPathLen_D,
uint vertexDataStride,
uint vertexDataOffsPos,
uint vertexDataOffsNormal,
uint *triangleVtxIdx_D,
float *targetVol_D,
float4 *gradient_D,
unsigned int *targetActiveCells_D,
uint triangleCnt,
float isovalue,
float minDisplScl,
float forcesScl,
float *corruptTrianglesIntUncertainty_D,
float *trianglesArea_D) {
const uint idx = ::getThreadIdx();
if (idx >= triangleCnt) {
return;
}
// Triangle is not corrupt
if (corruptTriangles_D[idx] == 0) {
return;
}
// Get initial positions from main memory
uint baseIdx0 = vertexDataStride*triangleVtxIdx_D[3*idx+0];
uint baseIdx1 = vertexDataStride*triangleVtxIdx_D[3*idx+1];
uint baseIdx2 = vertexDataStride*triangleVtxIdx_D[3*idx+2];
float3 pos1 = make_float3(
vertexData_D[baseIdx0+vertexDataOffsPos+0],
vertexData_D[baseIdx0+vertexDataOffsPos+1],
vertexData_D[baseIdx0+vertexDataOffsPos+2]);
float3 pos2 = make_float3(
vertexData_D[baseIdx1+vertexDataOffsPos+0],
vertexData_D[baseIdx1+vertexDataOffsPos+1],
vertexData_D[baseIdx1+vertexDataOffsPos+2]);
float3 pos3 = make_float3(
vertexData_D[baseIdx2+vertexDataOffsPos+0],
vertexData_D[baseIdx2+vertexDataOffsPos+1],
vertexData_D[baseIdx2+vertexDataOffsPos+2]);
// Get initial path lengths from previous morphing
float len1 = vertexPathLen_D[triangleVtxIdx_D[3*idx+0]];
float len2 = vertexPathLen_D[triangleVtxIdx_D[3*idx+1]];
float len3 = vertexPathLen_D[triangleVtxIdx_D[3*idx+2]];
float triArea = 0.0;
// Integrate path lengths
float intUncertainty = DeformableGPUSurfaceMT_IntUncertaintyOverCorruptAreaRec_D(
pos1, pos2, pos3, // Vertex positions of the triangle
len1, len2, len3, // Vertex path lengths of the triangle
gradient_D, // External forces
targetVol_D, // The target volume
targetActiveCells_D, // Active cells of the target volume
minDisplScl, // Minimum displacement for convergence
forcesScl, // General scaling factor for forces
isovalue, // Isovalue
triArea, // Area associated with this triangle
0 // Initial recursion depth
);
corruptTrianglesIntUncertainty_D[idx] = intUncertainty;
trianglesArea_D[idx] = triArea;
}
/**
* Initializes the scale factor for the external forces with either -1.0 (if the
* starting position of the vector is inside the isosurface, or 1.0 (vice
* versa).
*
* @param[in] arr_D The external forces data buffer
* @param[in] volume_D The volume the isosurface is extracted from
* @param[in] vertexPos_D The vertex data buffer
* @param[in] nElements The number of vertices
* @param[in] isoval The isovalue that defines the isosurface
* @param[in] dataArrOffs The offset for vertex positions in the vertex
* data buffer
* @param[in] dataArrSize The stride of the vertex data buffer TODO
*/
__global__ void DeformableGPUSurfaceMT_InitExternalForceScl_D (
float *arr_D,
float *displLen_D,
float *volume_D,
float *vertexPos_D,
float minDispl,
uint nElements,
float isoval,
uint dataArrOffs,
uint dataArrSize) {
const uint idx = getThreadIdx();
if (idx >= nElements) {
return;
}
float3 pos = make_float3(
vertexPos_D[dataArrSize*idx+dataArrOffs+0],
vertexPos_D[dataArrSize*idx+dataArrOffs+1],
vertexPos_D[dataArrSize*idx+dataArrOffs+2]);
// If the sampled value is smaller than isoval, we are outside the
// isosurface TODO Make this smarter
if (SampleFieldAtPosTrilin_D<float, false>(pos, volume_D) <= isoval) {
arr_D[idx] = 1.0;
} else {
arr_D[idx] = -1.0;
}
// Init last displ scl with something bigger then minDispl;
displLen_D[idx] = minDispl + 0.1;
}
__global__ void DeformableGPUSurfaceMT_MeshLaplacian_D(
float *in_D,
uint inOffs,
uint inStride,
int *vertexNeighbours_D,
uint maxNeighbours,
uint vertexCnt,
float *out_D,
uint outOffs,
uint outStride) {
const uint idx = ::getThreadIdx();
if (idx >= vertexCnt) {
return;
}
// Get initial position from global device memory
float3 inOwn = make_float3(
in_D[inStride*idx+inOffs+0],
in_D[inStride*idx+inOffs+1],
in_D[inStride*idx+inOffs+2]);
uint activeNeighbourCnt = 0;
float3 out = make_float3(0.0, 0.0, 0.0);
for(int i = 0; i < maxNeighbours; ++i) {
int isIdxValid = int(vertexNeighbours_D[maxNeighbours*idx+i] >= 0); // Check if idx != -1
float3 in;
int tmpIdx = isIdxValid*vertexNeighbours_D[maxNeighbours*idx+i]; // Map negative indices to 0
in.x = in_D[inStride*tmpIdx+inOffs+0];
in.y = in_D[inStride*tmpIdx+inOffs+1];
in.z = in_D[inStride*tmpIdx+inOffs+2];
out += (in - inOwn)*isIdxValid;
activeNeighbourCnt += 1.0f*isIdxValid;
}
out /= activeNeighbourCnt;
out_D[outStride*idx+outOffs+0] = out.x;
out_D[outStride*idx+outOffs+1] = out.y;
out_D[outStride*idx+outOffs+2] = out.z;
}
/**
* Updates the positions of all vertices based on external and internal forces.
* The external force is computed on the fly based on a the given volume.
* Samples are aquired using tricubic interpolation.
*
* @param[in] targetVolume_D The volume the isosurface is extracted
* from
* @param[in,out] vertexPosMapped_D The vertex data buffer
* @param[in] vertexExternalForces_D The external force and scale factor
* (in 'w') for all vertices
* @param[in] vertexNeighbours_D The neighbour indices of all vertices
* @param[in] gradient_D Array with the gradient
* @param[in] vtxNormals_D The current normals of all vertices
* @param[in] vertexCount The number of vertices
* @param[in] externalWeight Weighting factor for the external
* forces. The factor for internal forces
* is implicitely defined by
* 1.0-'externalWeight'
* @param[in] forcesScl General scale factor for the final
* combined force
* @param[in] stiffness The stiffness of the springs defining
* the internal forces
* @param[in] isoval The isovalue defining the isosurface
* @param[in] minDispl The minimum displacement for the
* vertices to be updated
* @param[in] dataArrOffs The vertex position offset in the
* vertex data buffer
* @param[in] dataArrSize The stride of the vertex data buffer TODO
*/
__global__ void DeformableGPUSurfaceMT_UpdateVtxPos_D(
float *targetVolume_D,
float *vertexPosMapped_D,
float *vertexExternalForcesScl_D,
float *displLen_D,
float *vtxUncertainty_D,
float4 *gradient_D,
float3 *laplacian_D,
float3 *laplacian2_D,
uint vertexCnt,
float externalWeight,
float forcesScl,
float stiffness,
float isoval,
float minDispl,
bool useCubicInterpolation,
bool trackPath,
uint dataArrOffsPos,
uint dataArrOffsNormal,
uint dataArrSize) {
const uint idx = ::getThreadIdx();
if (idx >= vertexCnt) {
return;
}
// Check convergence criterion
float lastDisplLen = displLen_D[idx];
if (lastDisplLen <= minDispl) return; // Vertex is converged
const uint posBaseIdx = dataArrSize*idx+dataArrOffsPos;
/* Retrieve stuff from global device memory */
// Get initial position from global device memory
float3 posOld = make_float3(
vertexPosMapped_D[posBaseIdx+0],
vertexPosMapped_D[posBaseIdx+1],
vertexPosMapped_D[posBaseIdx+2]);
// Get initial scale factor for external forces
float externalForcesScl = vertexExternalForcesScl_D[idx];
// Get partial derivatives
float3 laplacian = laplacian_D[idx];
float3 laplacian2 = laplacian2_D[idx];
/* Update position */
// No warp divergence here, since useCubicInterpolation is the same for all
// threads
const float sampleDens = useCubicInterpolation
? SampleFieldAtPosTricub_D<float, false>(posOld, targetVolume_D)
: SampleFieldAtPosTrilin_D<float, false>(posOld, targetVolume_D);
// Switch sign and scale down if necessary
bool negative = externalForcesScl < 0;
bool outside = sampleDens <= isoval;
int switchSign = int((negative && outside)||(!negative && !outside));
externalForcesScl = externalForcesScl*(1.0*(1-switchSign) - 1.0*switchSign);
externalForcesScl *= (1.0*(1-switchSign) + 0.5*(switchSign));
//externalForcesScl *= (1.0*(1-switchSign) + (switchSign));
// Sample gradient by cubic interpolation
float4 externalForceTmp = useCubicInterpolation
? SampleFieldAtPosTricub_D<float4, false>(posOld, gradient_D)
: SampleFieldAtPosTrilin_D<float4, false>(posOld, gradient_D);
float3 externalForce;
externalForce.x = externalForceTmp.x;
externalForce.y = externalForceTmp.y;
externalForce.z = externalForceTmp.z;
// externalForce = safeNormalize(externalForce);
externalForce *= forcesScl*externalForcesScl*externalWeight;
float3 internalForce = (1.0-externalWeight)*forcesScl*((1.0 - stiffness)*laplacian - stiffness*laplacian2);
// Umbrella internal force
float3 force = externalForce + internalForce;
float3 posNew = posOld + force;
/* Write back to global device memory */
// New pos
vertexPosMapped_D[posBaseIdx+0] = posNew.x;
vertexPosMapped_D[posBaseIdx+1] = posNew.y;
vertexPosMapped_D[posBaseIdx+2] = posNew.z;
// Write external forces scale factor back to global device memory
vertexExternalForcesScl_D[idx] = externalForcesScl;
// No branching occurs here, since the parameter is set globally
float3 diff = posNew-posOld;
float diffLen = length(diff);
//float diffLenInternal = length(forcesScl*((1.0 - stiffness)*laplacian - stiffness*laplacian2));
if ((trackPath)&&(abs(externalForcesScl) == 1.0f)) {
//vtxUncertainty_D[idx] += length(externalForce);
vtxUncertainty_D[idx] += diffLen;
}
// Displ scl for convergence
displLen_D[idx] = diffLen;
}
/**
* Updates the positions of all vertices based on external and internal forces.
* The external force is computed on the fly based on a the given volume.
* Samples are aquired using tricubic interpolation.
*
* @param[in] targetVolume_D The volume the isosurface is extracted
* from
* @param[in,out] vertexPosMapped_D The vertex data buffer
* @param[in] vertexExternalForces_D The external force and scale factor
* (in 'w') for all vertices
* @param[in] vertexNeighbours_D The neighbour indices of all vertices
* @param[in] gradient_D Array with the gradient
* @param[in] vtxNormals_D The current normals of all vertices
* @param[in] vertexCount The number of vertices
* @param[in] externalWeight Weighting factor for the external
* forces. The factor for internal forces
* is implicitely defined by
* 1.0-'externalWeight'
* @param[in] forcesScl General scale factor for the final
* combined force
* @param[in] stiffness The stiffness of the springs defining
* the internal forces
* @param[in] isoval The isovalue defining the isosurface
* @param[in] minDispl The minimum displacement for the
* vertices to be updated
* @param[in] dataArrOffs The vertex position offset in the
* vertex data buffer
* @param[in] dataArrSize The stride of the vertex data buffer TODO
*/
__global__ void DeformableGPUSurfaceMT_UpdateVtxPosNoThinPlate_D(
float *targetVolume_D,
float *vertexPosMapped_D,
float *vertexExternalForcesScl_D,
float *displLen_D,
float *vtxUncertainty_D,
float4 *gradient_D,
float3 *laplacian_D,
uint vertexCnt,
float externalWeight,
float forcesScl,
float isoval,
float minDispl,
bool useCubicInterpolation,
bool trackPath,
uint dataArrOffsPos,
uint dataArrOffsNormal,
uint dataArrSize) {
const uint idx = ::getThreadIdx();
if (idx >= vertexCnt) {
return;
}
// Check convergence criterion
float lastDisplLen = displLen_D[idx];
if (lastDisplLen <= minDispl) {
displLen_D[idx] = 0.0;
return; // Vertex is converged
}
const uint posBaseIdx = dataArrSize*idx+dataArrOffsPos;
/* Retrieve stuff from global device memory */
// Get initial position from global device memory
float3 posOld = make_float3(
vertexPosMapped_D[posBaseIdx+0],
vertexPosMapped_D[posBaseIdx+1],
vertexPosMapped_D[posBaseIdx+2]);
// Get initial scale factor for external forces
float externalForcesScl = vertexExternalForcesScl_D[idx];
// Get partial derivatives
float3 laplacian = laplacian_D[idx];
/* Update position */
// No warp divergence here, since useCubicInterpolation is the same for all
// threads
const float sampleDens = useCubicInterpolation
? SampleFieldAtPosTricub_D<float, false>(posOld, targetVolume_D)
: SampleFieldAtPosTrilin_D<float, false>(posOld, targetVolume_D);
// Switch sign and scale down if necessary
bool negative = externalForcesScl < 0;
bool outside = sampleDens <= isoval;
int switchSign = int((negative && outside)||(!negative && !outside));
externalForcesScl = externalForcesScl*(1.0*(1-switchSign) - 1.0*switchSign);
externalForcesScl *= (1.0*(1-switchSign) + 0.5*(switchSign));
//externalForcesScl *= (1.0*(1-switchSign) + (switchSign));
// Sample gradient by cubic interpolation
float4 externalForceTmp = useCubicInterpolation
? SampleFieldAtPosTricub_D<float4, false>(posOld, gradient_D)
: SampleFieldAtPosTrilin_D<float4, false>(posOld, gradient_D);
float3 externalForce;
externalForce.x = externalForceTmp.x;
externalForce.y = externalForceTmp.y;
externalForce.z = externalForceTmp.z;
// externalForce = safeNormalize(externalForce);
externalForce *= forcesScl*externalForcesScl*externalWeight;
float3 internalForce = (1.0-externalWeight)*forcesScl*laplacian;
// Umbrella internal force
float3 force = externalForce + internalForce;
float3 posNew = posOld + force;
/* Write back to global device memory */
// New pos
vertexPosMapped_D[posBaseIdx+0] = posNew.x;
vertexPosMapped_D[posBaseIdx+1] = posNew.y;
vertexPosMapped_D[posBaseIdx+2] = posNew.z;
// Write external forces scale factor back to global device memory
vertexExternalForcesScl_D[idx] = externalForcesScl;
// No branching occurs here, since the parameter is set globally
float3 diff = posNew-posOld;
float diffLen = length(diff);
//float diffLenInternal = length(forcesScl*((1.0 - stiffness)*laplacian - stiffness*laplacian2));
if ((trackPath)&&(abs(externalForcesScl) == 1.0f)) {
//vtxUncertainty_D[idx] += length(externalForce);
vtxUncertainty_D[idx] += diffLen;
}
// Displ scl for convergence
displLen_D[idx] = diffLen;
}
/**
* Updates the positions of all vertices based on external and internal forces.
* The external force is computed on the fly based on a the given volume.
* Samples are aquired using tricubic interpolation.
*
* @param[in] targetVolume_D The volume the isosurface is extracted
* from
* @param[in,out] vertexPosMapped_D The vertex data buffer
* @param[in] vertexExternalForces_D The external force and scale factor
* (in 'w') for all vertices
* @param[in] vertexCount The number of vertices
* @param[in] forcesScl General scale factor for the final
* combined force
* @param[in] isoval The isovalue defining the isosurface
* @param[in] minDispl The minimum displacement for the
* vertices to be updated
* @param[in] dataArrOffs The vertex position offset in the
* vertex data buffer
* @param[in] dataArrSize The stride of the vertex data buffer TODO
*/
__global__ void DeformableGPUSurfaceMT_UpdateVtxPosExternalOnly_D(
float *targetVolume_D,
float *vertexPosMapped_D,
float *vertexExternalForcesScl_D,
float *displLen_D,
float *vtxUncertainty_D,
float4 *gradient_D,
int *accumPath_D,
uint vertexCnt,
float forcesScl,
float isoval,
float minDispl,
bool useCubicInterpolation,
bool trackPath,
uint dataArrOffsPos,
uint dataArrOffsNormal,
uint dataArrSize) {
const uint idx = ::getThreadIdx();
if (idx >= vertexCnt) {
return;
}
// Check convergence criterion
float lastDisplLen = displLen_D[idx];
if (lastDisplLen <= minDispl) {
displLen_D[idx] = 0.0;
return; // Vertex is converged
}
const uint posBaseIdx = dataArrSize*idx+dataArrOffsPos;
/* Retrieve stuff from global device memory */
// Get initial position from global device memory
float3 posOld = make_float3(
vertexPosMapped_D[posBaseIdx+0],
vertexPosMapped_D[posBaseIdx+1],
vertexPosMapped_D[posBaseIdx+2]);
// Get initial scale factor for external forces
float externalForcesScl = vertexExternalForcesScl_D[idx];
// Check whether the difflen is to be subtracted or added
int accumFactorOld = accumPath_D[idx];
/* Update position */
// No warp divergence here, since useCubicInterpolation is the same for all
// threads
const float sampleDens = useCubicInterpolation
? SampleFieldAtPosTricub_D<float, false>(posOld, targetVolume_D)
: SampleFieldAtPosTrilin_D<float, false>(posOld, targetVolume_D);
// Switch sign and scale down if necessary
bool negative = externalForcesScl < 0;
bool outside = sampleDens <= isoval;
int switchSign = int((negative && outside)||(!negative && !outside));
externalForcesScl = externalForcesScl*(1.0*(1-switchSign) - 1.0*switchSign);
externalForcesScl *= (1.0*(1-switchSign) + 0.5*(switchSign));
// if (bool(switchSign) && (accumPath_D[idx] != 0)) {
// accumPath_D[idx] = 0;
// } else if (bool(switchSign) && (accumPath_D[idx] == 0)) {
// accumPath_D[idx] = 1;
// }
// Change to zero if one and to one if zero
int accumFactorNew = (1-accumFactorOld);
int accumFactor = switchSign*accumFactorNew + (1-switchSign)*accumFactorOld;
// Sample gradient by cubic interpolation
float4 externalForceTmp = useCubicInterpolation
? SampleFieldAtPosTricub_D<float4, false>(posOld, gradient_D)
: SampleFieldAtPosTrilin_D<float4, false>(posOld, gradient_D);
float3 externalForce;
externalForce.x = externalForceTmp.x;
externalForce.y = externalForceTmp.y;
externalForce.z = externalForceTmp.z;
//externalForce = safeNormalize(externalForce);
externalForce = normalize(externalForce);
externalForce *= forcesScl*externalForcesScl;
// Umbrella internal force
float3 posNew = posOld + externalForce;
/* Write back to global device memory */
// New pos
vertexPosMapped_D[posBaseIdx+0] = posNew.x;
vertexPosMapped_D[posBaseIdx+1] = posNew.y;
vertexPosMapped_D[posBaseIdx+2] = posNew.z;
// Write external forces scale factor back to global device memory
vertexExternalForcesScl_D[idx] = externalForcesScl;
//float3 diff = posNew-posOld;
//float diffLen = length(diff);
float diffLen = abs(forcesScl*externalForcesScl);
accumPath_D[idx] = accumFactor;
// No branching since trackpath is equal for all threads
if (trackPath) {
// if (accumPath_D[idx] == 0) {
// vtxUncertainty_D[idx] += diffLen;
// } else if(accumPath_D[idx] != 0) {
// vtxUncertainty_D[idx] -= diffLen;
// }
vtxUncertainty_D[idx] += (1-accumFactor)*diffLen - accumFactor*diffLen;
}
// Displ scl for convergence
displLen_D[idx] = diffLen;
}
/**
* Updates the positions of all vertices based on external and internal forces.
* The external force is computed on the fly based on a the given volume.
* Samples are aquired using tricubic interpolation.
*
* @param[in] targetVolume_D The volume the isosurface is extracted
* from
* @param[in,out] vertexPosMapped_D The vertex data buffer
* @param[in] vertexExternalForces_D The external force and scale factor
* (in 'w') for all vertices
* @param[in] vertexCount The number of vertices
* @param[in] forcesScl General scale factor for the final
* combined force
* @param[in] isoval The isovalue defining the isosurface
* @param[in] minDispl The minimum displacement for the
* vertices to be updated
* @param[in] dataArrOffs The vertex position offset in the
* vertex data buffer
* @param[in] dataArrSize The stride of the vertex data buffer TODO
*/
__global__ void DeformableGPUSurfaceMT_UpdateVtxPosExternalOnlySubdiv_D(
float *targetVolume_D,
float *vertexPosMapped_D,
float *vertexExternalForcesScl_D,
float *displLen_D,
float *vtxUncertainty_D,
float4 *gradient_D,
int *accumPath_D,
float *vertexFlag_D,
uint vertexCnt,
float forcesScl,
float isoval,
float minDispl,
bool useCubicInterpolation,
bool trackPath,
uint dataArrOffsPos,
uint dataArrOffsNormal,
uint dataArrSize) {
const uint idx = ::getThreadIdx();
if (idx >= vertexCnt) {
return;
}
// Check convergence criterion
float lastDisplLen = displLen_D[idx];
// if ((lastDisplLen <= minDispl)||(vertexFlag_D[idx] == 0.0)) {
// displLen_D[idx] = 0.0;
// return; // Vertex is converged
// }
if (lastDisplLen <= minDispl) {
displLen_D[idx] = 0.0;
return; // Vertex is converged
}
const uint posBaseIdx = dataArrSize*idx+dataArrOffsPos;
/* Retrieve stuff from global device memory */
// Get initial position from global device memory
float3 posOld = make_float3(
vertexPosMapped_D[posBaseIdx+0],
vertexPosMapped_D[posBaseIdx+1],
vertexPosMapped_D[posBaseIdx+2]);
// Get initial scale factor for external forces
float externalForcesScl = vertexExternalForcesScl_D[idx];
// Check whether the difflen is to be subtracted or added
int accumFactorOld = accumPath_D[idx];
/* Update position */
// No warp divergence here, since useCubicInterpolation is the same for all
// threads
const float sampleDens = useCubicInterpolation
? SampleFieldAtPosTricub_D<float, false>(posOld, targetVolume_D)
: SampleFieldAtPosTrilin_D<float, false>(posOld, targetVolume_D);
// Switch sign and scale down if necessary
bool negative = externalForcesScl < 0;
bool outside = sampleDens <= isoval;
int switchSign = int((negative && outside)||(!negative && !outside));
externalForcesScl = externalForcesScl*(1.0*(1-switchSign) - 1.0*switchSign);
externalForcesScl *= (1.0*(1-switchSign) + 0.5*(switchSign));
//externalForcesScl *= (1.0*(1-switchSign) + (switchSign));
// if (bool(switchSign) && (accumPath_D[idx] != 0)) {
// accumPath_D[idx] = 0;
// } else if (bool(switchSign) && (accumPath_D[idx] == 0)) {
// accumPath_D[idx] = 1;
// }
// Change to zero if one and to one if zero
int accumFactorNew = (1-accumFactorOld);
int accumFactor = switchSign*accumFactorNew + (1-switchSign)*accumFactorOld;
// Sample gradient by cubic interpolation
float4 externalForceTmp = useCubicInterpolation
? SampleFieldAtPosTricub_D<float4, false>(posOld, gradient_D)
: SampleFieldAtPosTrilin_D<float4, false>(posOld, gradient_D);
float3 externalForce;
externalForce.x = externalForceTmp.x;
externalForce.y = externalForceTmp.y;
externalForce.z = externalForceTmp.z;
externalForce = safeNormalize(externalForce);
externalForce *= forcesScl*externalForcesScl;
// Umbrella internal force
float3 posNew = posOld + externalForce;
/* Write back to global device memory */
// New pos
vertexPosMapped_D[posBaseIdx+0] = posNew.x;
vertexPosMapped_D[posBaseIdx+1] = posNew.y;
vertexPosMapped_D[posBaseIdx+2] = posNew.z;
// Write external forces scale factor back to global device memory
vertexExternalForcesScl_D[idx] = externalForcesScl;
//float3 diff = posNew-posOld;
//float diffLen = length(diff);
float diffLen = abs(forcesScl*externalForcesScl);
accumPath_D[idx] = accumFactor;
// No branching since trackpath is equal for all threads
if (trackPath) {
// if (accumPath_D[idx] == 0) {
// vtxUncertainty_D[idx] += diffLen;
// } else if(accumPath_D[idx] != 0) {
// vtxUncertainty_D[idx] -= diffLen;
// }
vtxUncertainty_D[idx] += (1-accumFactor)*diffLen - accumFactor*diffLen;
}
// Displ scl for convergence
displLen_D[idx] = diffLen;
}
/*
* DeformableGPUSurfaceMT::DeformableGPUSurfaceMT
*/
DeformableGPUSurfaceMT::DeformableGPUSurfaceMT() : GPUSurfaceMT(),
vboCorruptTriangleVertexFlag(0), vboVtxPath(0), vboVtxAttr(0),
nFlaggedVertices(0) {
}
/*
* DeformableGPUSurfaceMT::DeformableGPUSurfaceMT
*/
DeformableGPUSurfaceMT::DeformableGPUSurfaceMT(const DeformableGPUSurfaceMT& other) :
GPUSurfaceMT(other) {
CudaSafeCall(this->vertexExternalForcesScl_D.Validate(other.vertexExternalForcesScl_D.GetCount()));
CudaSafeCall(cudaMemcpy(
this->vertexExternalForcesScl_D.Peek(),
other.vertexExternalForcesScl_D.PeekConst(),
this->vertexExternalForcesScl_D.GetCount()*sizeof(float2),
cudaMemcpyDeviceToDevice));
CudaSafeCall(this->externalForces_D.Validate(other.externalForces_D.GetCount()));
CudaSafeCall(cudaMemcpy(
this->externalForces_D.Peek(),
other.externalForces_D.PeekConst(),
this->externalForces_D.GetCount()*sizeof(float),
cudaMemcpyDeviceToDevice));
CudaSafeCall(this->laplacian_D.Validate(other.laplacian_D.GetCount()));
CudaSafeCall(cudaMemcpy(
this->laplacian_D.Peek(),
other.laplacian_D.PeekConst(),
this->laplacian_D.GetCount()*sizeof(float3),
cudaMemcpyDeviceToDevice));
CudaSafeCall(this->laplacian2_D.Validate(other.laplacian2_D.GetCount()));
CudaSafeCall(cudaMemcpy(
this->laplacian2_D.Peek(),
other.laplacian2_D.PeekConst(),
this->laplacian2_D.GetCount()*sizeof(float3),
cudaMemcpyDeviceToDevice));
CudaSafeCall(this->displLen_D.Validate(other.displLen_D.GetCount()));
CudaSafeCall(cudaMemcpy(
this->displLen_D.Peek(),
other.displLen_D.PeekConst(),
this->displLen_D.GetCount()*sizeof(float),
cudaMemcpyDeviceToDevice));
CudaSafeCall(this->accTriangleData_D.Validate(other.accTriangleData_D.GetCount()));
CudaSafeCall(cudaMemcpy(
this->accTriangleData_D.Peek(),
other.accTriangleData_D.PeekConst(),
this->accTriangleData_D.GetCount()*sizeof(float),
cudaMemcpyDeviceToDevice));
/* Make deep copy of corrupt triangle flag buffer */
if (other.vboCorruptTriangleVertexFlag) {
// Destroy if necessary
if (this->vboCorruptTriangleVertexFlag) {
glBindBufferARB(GL_ARRAY_BUFFER, this->vboCorruptTriangleVertexFlag);
glDeleteBuffersARB(1, &this->vboCorruptTriangleVertexFlag);
glBindBufferARB(GL_ARRAY_BUFFER, 0);
this->vboCorruptTriangleVertexFlag = 0;
}
// Create vertex buffer object for triangle indices
glGenBuffersARB(1, &this->vboCorruptTriangleVertexFlag);
CheckForGLError();
// Map as copy buffer
glBindBufferARB(GL_COPY_READ_BUFFER, other.vboCorruptTriangleVertexFlag);
glBindBufferARB(GL_COPY_WRITE_BUFFER, this->vboCorruptTriangleVertexFlag);
glBufferDataARB(GL_COPY_WRITE_BUFFER,
sizeof(float)*this->vertexCnt, 0, GL_DYNAMIC_DRAW);
// Copy data
glCopyBufferSubData(GL_COPY_READ_BUFFER, GL_COPY_WRITE_BUFFER, 0, 0,
sizeof(float)*this->vertexCnt);
glBindBufferARB(GL_COPY_WRITE_BUFFER, 0);
glBindBufferARB(GL_COPY_READ_BUFFER, 0);
CheckForGLError();
}
/* Make deep copy of uncertainty vbo */
if (other.vboVtxPath) {
// Destroy if necessary
if (this->vboVtxPath) {
glBindBufferARB(GL_ARRAY_BUFFER, this->vboVtxPath);
glDeleteBuffersARB(1, &this->vboVtxPath);
glBindBufferARB(GL_ARRAY_BUFFER, 0);
this->vboVtxPath = 0;
}
// Create vertex buffer object for triangle indices
glGenBuffersARB(1, &this->vboVtxPath);
CheckForGLError();
// Map as copy buffer
glBindBufferARB(GL_COPY_READ_BUFFER, other.vboVtxPath);
glBindBufferARB(GL_COPY_WRITE_BUFFER, this->vboVtxPath);
glBufferDataARB(GL_COPY_WRITE_BUFFER,
sizeof(float)*this->vertexCnt, 0, GL_DYNAMIC_DRAW);
// Copy data
glCopyBufferSubData(GL_COPY_READ_BUFFER, GL_COPY_WRITE_BUFFER, 0, 0,
sizeof(float)*this->vertexCnt);
glBindBufferARB(GL_COPY_WRITE_BUFFER, 0);
glBindBufferARB(GL_COPY_READ_BUFFER, 0);
CheckForGLError();
}
}
/*
* DeformableGPUSurfaceMT::~DeformableGPUSurfaceMT
*/
DeformableGPUSurfaceMT::~DeformableGPUSurfaceMT() {
}
/*
* ComputeTriangleArea_D
*/
__global__ void DeformableGPUSurfaceMT_ComputeTriangleAreas_D(
float *trianglesArea_D,
float *vertexData_D,
uint *triangleIdx_D,
uint triangleCnt) {
const int vertexDataOffsPos = 0;
//const int vertexDataOffsNormal = 3;
//const int vertexDataOffsTexCoord = 6;
const int vertexDataStride = 9;
const uint idx = ::getThreadIdx();
if (idx >= triangleCnt) {
return;
}
float3 pos0, pos1, pos2;
pos0.x = vertexData_D[vertexDataStride*triangleIdx_D[3*idx+0]+vertexDataOffsPos+0];
pos0.y = vertexData_D[vertexDataStride*triangleIdx_D[3*idx+0]+vertexDataOffsPos+1];
pos0.z = vertexData_D[vertexDataStride*triangleIdx_D[3*idx+0]+vertexDataOffsPos+2];
pos1.x = vertexData_D[vertexDataStride*triangleIdx_D[3*idx+1]+vertexDataOffsPos+0];
pos1.y = vertexData_D[vertexDataStride*triangleIdx_D[3*idx+1]+vertexDataOffsPos+1];
pos1.z = vertexData_D[vertexDataStride*triangleIdx_D[3*idx+1]+vertexDataOffsPos+2];
pos2.x = vertexData_D[vertexDataStride*triangleIdx_D[3*idx+2]+vertexDataOffsPos+0];
pos2.y = vertexData_D[vertexDataStride*triangleIdx_D[3*idx+2]+vertexDataOffsPos+1];
pos2.z = vertexData_D[vertexDataStride*triangleIdx_D[3*idx+2]+vertexDataOffsPos+2];
// compute edge lengths
float a = length(pos0 - pos1);
float b = length(pos0 - pos2);
float c = length(pos1 - pos2);
// Compute area (Heron's formula)
float rad = (a + b - c)*(c + a - b)*(a + b + c)*(b + c - a);
// Make sure radicand is not negative
rad = rad > 0.0f ? rad : 0.0f;
float area = 0.25f*sqrt(rad);
trianglesArea_D[idx] = area;
}
/*
* DeformableGPUSurfaceMT::GetTotalSurfArea
*/
float DeformableGPUSurfaceMT::GetTotalSurfArea() {
// Compute triangle areas of all (non-corrupt) triangles
if (!CudaSafeCall(this->accTriangleArea_D.Validate(this->triangleCnt))) {
return false;
}
if (!CudaSafeCall(this->accTriangleArea_D.Set(0x00))) {
return false;
}
cudaGraphicsResource* cudaTokens[2];
// Register memory with CUDA
if (!CudaSafeCall(cudaGraphicsGLRegisterBuffer(
&cudaTokens[0],
this->vboVtxData,
cudaGraphicsMapFlagsNone))) {
return false;
}
if (!CudaSafeCall(cudaGraphicsGLRegisterBuffer(
&cudaTokens[1],
this->vboTriangleIdx,
cudaGraphicsMapFlagsNone))) {
return false;
}
if (!CudaSafeCall(cudaGraphicsMapResources(2, cudaTokens, 0))) {
return false;
}
// Get mapped pointers to the vertex data buffers
float *vboPt;
size_t vboSize;
if (!CudaSafeCall(cudaGraphicsResourceGetMappedPointer(
reinterpret_cast<void**>(&vboPt), // The mapped pointer
&vboSize, // The size of the accessible data
cudaTokens[0]))) { // The mapped resource
return false;
}
// Get mapped pointers to the vertex data buffers
uint *triangleIdxPt;
if (!CudaSafeCall(cudaGraphicsResourceGetMappedPointer(
reinterpret_cast<void**>(&triangleIdxPt), // The mapped pointer
&vboSize, // The size of the accessible data
cudaTokens[1]))) { // The mapped resource
return false;
}
#ifdef USE_TIMER
float dt_ms;
cudaEvent_t event1, event2;
cudaEventCreate(&event1);
cudaEventCreate(&event2);
cudaEventRecord(event1, 0);
#endif
// Call kernel
DeformableGPUSurfaceMT_ComputeTriangleAreas_D <<< Grid(this->triangleCnt, 256), 256 >>> (
this->accTriangleArea_D.Peek(),
vboPt,
triangleIdxPt,
this->triangleCnt);
::CheckForCudaErrorSync();
#ifdef USE_TIMER
cudaEventRecord(event2, 0);
cudaEventSynchronize(event1);
cudaEventSynchronize(event2);
cudaEventElapsedTime(&dt_ms, event1, event2);
printf("CUDA time for 'ComputeTriangleArea_D %.10f sec\n",
dt_ms/1000.0);
#endif
// Compute sum of all (non-corrupt) triangle areas
float totalArea = thrust::reduce(
thrust::device_ptr<float>(this->accTriangleArea_D.Peek()),
thrust::device_ptr<float>(this->accTriangleArea_D.Peek() + this->triangleCnt));
::CheckForCudaErrorSync();
if (!CudaSafeCall(cudaGraphicsUnmapResources(2, cudaTokens, 0))) {
return false;
}
if (!CudaSafeCall(cudaGraphicsUnregisterResource(cudaTokens[0]))) {
return false;
}
if (!CudaSafeCall(cudaGraphicsUnregisterResource(cudaTokens[1]))) {
return false;
}
// // DEBUG Copy back and accumuluate
// HostArr<float> accTriangleArea;
// accTriangleArea.Validate(this->accTriangleArea_D.GetCount());
// this->accTriangleArea_D.CopyToHost(accTriangleArea.Peek());
// float sum = 0.0f;
// for (int i = 0; i < this->accTriangleArea_D.GetCount(); ++i) {
// sum = sum + accTriangleArea.Peek()[i];
// }
// printf("sum: %f, triangles %i\n", sum, this->triangleCnt);
// return sum;
// // END DEBUG
return totalArea;
}
/*
* ComputeTriangleArea_D
*/
__global__ void DeformableGPUSurfaceMT_ComputeValidTriangleAreas_D(
float *trianglesArea_D,
float *vertexData_D,
uint *triangleIdx_D,
float *corruptTriFlag_D,
uint triangleCnt) {
const int vertexDataOffsPos = 0;
//const int vertexDataOffsNormal = 3;
//const int vertexDataOffsTexCoord = 6;
const int vertexDataStride = 9;
const uint idx = ::getThreadIdx();
if (idx >= triangleCnt) {
return;
}
float3 pos0, pos1, pos2;
pos0.x = vertexData_D[vertexDataStride*triangleIdx_D[3*idx+0]+vertexDataOffsPos+0];
pos0.y = vertexData_D[vertexDataStride*triangleIdx_D[3*idx+0]+vertexDataOffsPos+1];
pos0.z = vertexData_D[vertexDataStride*triangleIdx_D[3*idx+0]+vertexDataOffsPos+2];
pos1.x = vertexData_D[vertexDataStride*triangleIdx_D[3*idx+1]+vertexDataOffsPos+0];
pos1.y = vertexData_D[vertexDataStride*triangleIdx_D[3*idx+1]+vertexDataOffsPos+1];
pos1.z = vertexData_D[vertexDataStride*triangleIdx_D[3*idx+1]+vertexDataOffsPos+2];
pos2.x = vertexData_D[vertexDataStride*triangleIdx_D[3*idx+2]+vertexDataOffsPos+0];
pos2.y = vertexData_D[vertexDataStride*triangleIdx_D[3*idx+2]+vertexDataOffsPos+1];
pos2.z = vertexData_D[vertexDataStride*triangleIdx_D[3*idx+2]+vertexDataOffsPos+2];
// compute edge lengths
float a = length(pos0 - pos1);
float b = length(pos0 - pos2);
float c = length(pos1 - pos2);
// Compute area (Heron's formula)
float rad = (a + b - c)*(c + a - b)*(a + b + c)*(b + c - a);
// Make sure radicand is not negative
rad = rad > 0.0f ? rad : 0.0f;
float area = 0.25f*sqrt(rad);
trianglesArea_D[idx] = area*(1.0-corruptTriFlag_D[idx]);
}
/*
* DeformableGPUSurfaceMT::GetTotalValidSurfArea
*/
float DeformableGPUSurfaceMT::GetTotalValidSurfArea() {
// Compute triangle areas of all (non-corrupt) triangles
if (!CudaSafeCall(this->accTriangleArea_D.Validate(this->triangleCnt))) {
return false;
}
if (!CudaSafeCall(this->accTriangleArea_D.Set(0x00))) {
return false;
}
cudaGraphicsResource* cudaTokens[2];
// Register memory with CUDA
if (!CudaSafeCall(cudaGraphicsGLRegisterBuffer(
&cudaTokens[0],
this->vboVtxData,
cudaGraphicsMapFlagsNone))) {
return false;
}
if (!CudaSafeCall(cudaGraphicsGLRegisterBuffer(
&cudaTokens[1],
this->vboTriangleIdx,
cudaGraphicsMapFlagsNone))) {
return false;
}
if (!CudaSafeCall(cudaGraphicsMapResources(2, cudaTokens, 0))) {
return false;
}
// Get mapped pointers to the vertex data buffers
float *vboPt;
size_t vboSize;
if (!CudaSafeCall(cudaGraphicsResourceGetMappedPointer(
reinterpret_cast<void**>(&vboPt), // The mapped pointer
&vboSize, // The size of the accessible data
cudaTokens[0]))) { // The mapped resource
return false;
}
// Get mapped pointers to the vertex data buffers
uint *triangleIdxPt;
if (!CudaSafeCall(cudaGraphicsResourceGetMappedPointer(
reinterpret_cast<void**>(&triangleIdxPt), // The mapped pointer
&vboSize, // The size of the accessible data
cudaTokens[1]))) { // The mapped resource
return false;
}
#ifdef USE_TIMER
float dt_ms;
cudaEvent_t event1, event2;
cudaEventCreate(&event1);
cudaEventCreate(&event2);
cudaEventRecord(event1, 0);
#endif
// Call kernel
DeformableGPUSurfaceMT_ComputeValidTriangleAreas_D <<< Grid(this->triangleCnt, 256), 256 >>> (
this->accTriangleArea_D.Peek(),
vboPt,
triangleIdxPt,
this->corruptTriangles_D.Peek(),
this->triangleCnt);
::CheckForCudaErrorSync();
#ifdef USE_TIMER
cudaEventRecord(event2, 0);
cudaEventSynchronize(event1);
cudaEventSynchronize(event2);
cudaEventElapsedTime(&dt_ms, event1, event2);
printf("CUDA time for 'ComputeTriangleArea_D %.10f sec\n",
dt_ms/1000.0);
#endif
// Compute sum of all (non-corrupt) triangle areas
float totalArea = thrust::reduce(
thrust::device_ptr<float>(this->accTriangleArea_D.Peek()),
thrust::device_ptr<float>(this->accTriangleArea_D.Peek() + this->triangleCnt));
::CheckForCudaErrorSync();
if (!CudaSafeCall(cudaGraphicsUnmapResources(2, cudaTokens, 0))) {
return false;
}
if (!CudaSafeCall(cudaGraphicsUnregisterResource(cudaTokens[0]))) {
return false;
}
if (!CudaSafeCall(cudaGraphicsUnregisterResource(cudaTokens[1]))) {
return false;
}
// // DEBUG Copy back and accumuluate
// HostArr<float> accTriangleArea;
// accTriangleArea.Validate(this->accTriangleArea_D.GetCount());
// this->accTriangleArea_D.CopyToHost(accTriangleArea.Peek());
// float sum = 0.0f;
// for (int i = 0; i < this->accTriangleArea_D.GetCount(); ++i) {
// sum = sum + accTriangleArea.Peek()[i];
// }
// printf("sum: %f, triangles %i\n", sum, this->triangleCnt);
// return sum;
// // END DEBUG
return totalArea;
}
/*
* DeformableGPUSurfaceMT::FlagCorruptTriangles
*/
bool DeformableGPUSurfaceMT::FlagCorruptTriangles(
float *volume_D,
const uint *targetActiveCells,
int3 volDim,
float3 volOrg,
float3 volDelta,
float isovalue) {
using namespace megamol::core::utility::log;
// Init constant device params
if (!initGridParams(volDim, volOrg, volDelta)) {
Log::DefaultLog.WriteError(
"%s: could not init constant device params",
this->ClassName());
return false;
}
if (!this->InitCorruptFlagVBO(this->vertexCnt)) {
return false;
}
// Allocate memory for corrupt triangles
if (!CudaSafeCall(this->corruptTriangles_D.Validate(this->triangleCnt))) {
return false;
}
// Init with zero
if (!CudaSafeCall(this->corruptTriangles_D.Set(0x00))) {
return false;
}
// ::CheckForCudaErrorSync();
cudaGraphicsResource* cudaTokens[3];
// Register memory with CUDA
if (!CudaSafeCall(cudaGraphicsGLRegisterBuffer(
&cudaTokens[0],
this->vboVtxData,
cudaGraphicsMapFlagsNone))) {
return false;
}
// ::CheckForCudaErrorSync();
if (!CudaSafeCall(cudaGraphicsGLRegisterBuffer(
&cudaTokens[1],
this->vboTriangleIdx,
cudaGraphicsMapFlagsNone))) {
return false;
}
if (!CudaSafeCall(cudaGraphicsGLRegisterBuffer(
&cudaTokens[2],
this->vboCorruptTriangleVertexFlag,
cudaGraphicsMapFlagsNone))) {
return false;
}
// ::CheckForCudaErrorSync();
// Map cuda ressource handles
if (!CudaSafeCall(cudaGraphicsMapResources(3, cudaTokens, 0))) {
return false;
}
// ::CheckForCudaErrorSync();
/* Get mapped pointers to the vertex data buffer */
float *vboPt;
size_t vboSize;
float* vboFlagPt;
unsigned int *vboTriangleIdxPt;
if (!CudaSafeCall(cudaGraphicsResourceGetMappedPointer(
reinterpret_cast<void**>(&vboPt),
&vboSize,
cudaTokens[0]))) {
return false;
}
// ::CheckForCudaErrorSync();
if (!CudaSafeCall(cudaGraphicsResourceGetMappedPointer(
reinterpret_cast<void**>(&vboTriangleIdxPt),
&vboSize,
cudaTokens[1]))) {
return false;
}
if (!CudaSafeCall(cudaGraphicsResourceGetMappedPointer(
reinterpret_cast<void**>(&vboFlagPt),
&vboSize,
cudaTokens[2]))) {
return false;
}
::CheckForCudaErrorSync();
if (!CudaSafeCall(cudaMemset(vboFlagPt, 0x00, this->vertexCnt*sizeof(float)))) {
return false;
}
// Call kernel
DeformableGPUSurfaceMT_FlagCorruptTriangles_D <<< Grid(this->triangleCnt, 256), 256 >>> (
vboFlagPt,
this->corruptTriangles_D.Peek(),
vboPt,
AbstractGPUSurface::vertexDataStride,
AbstractGPUSurface::vertexDataOffsPos,
AbstractGPUSurface::vertexDataOffsNormal,
vboTriangleIdxPt,
volume_D,
targetActiveCells,
(float4*)(this->externalForces_D.Peek()),
this->triangleCnt,
isovalue);
::CheckForCudaErrorSync();
// Set vertex flags according to triangle flags
HostArr<float> triFlags, vtxFlags;
HostArr<uint> triIdx;
triFlags.Validate(this->triangleCnt);
triIdx.Validate(this->triangleCnt*3);
vtxFlags.Validate(this->vertexCnt);
cudaMemcpy(vtxFlags.Peek(), vboFlagPt,
sizeof(float)*this->vertexCnt, cudaMemcpyDeviceToHost);
cudaMemcpy(triIdx.Peek(), vboTriangleIdxPt,
sizeof(uint)*this->triangleCnt*3, cudaMemcpyDeviceToHost);
cudaMemcpy(triFlags.Peek(), this->corruptTriangles_D.Peek(),
sizeof(float)*this->triangleCnt, cudaMemcpyDeviceToHost);
vtxFlags.Set(0x00);
for (int i = 0; i < this->triangleCnt; ++i) {
float triFlag = triFlags.Peek()[i];
if (triFlag == 1.0) {
vtxFlags.Peek()[triIdx.Peek()[3*i+0]] = 1.0;
vtxFlags.Peek()[triIdx.Peek()[3*i+1]] = 1.0;
vtxFlags.Peek()[triIdx.Peek()[3*i+2]] = 1.0;
}
}
// DEBUG Check validity of vertex flags
HostArr<bool> vtxFlagValid;
vtxFlagValid.Validate(this->vertexCnt);
vtxFlagValid.Set(0x00);
for (int i = 0; i < this->triangleCnt; ++i) {
float triFlag = triFlags.Peek()[i];
float vtxFlag0 = vtxFlags.Peek()[triIdx.Peek()[3*i+0]];
float vtxFlag1 = vtxFlags.Peek()[triIdx.Peek()[3*i+1]];
float vtxFlag2 = vtxFlags.Peek()[triIdx.Peek()[3*i+2]];
if (triFlag == 1.0) {
if (vtxFlag0 == 1.0) {
vtxFlagValid.Peek()[triIdx.Peek()[3*i+0]] = true;
} else {
printf("INVALIV zero VERTEX FLAG %i (0)\n", triIdx.Peek()[3*i+0]);
}
if (vtxFlag1 == 1.0) {
vtxFlagValid.Peek()[triIdx.Peek()[3*i+1]] = true;
} else {
printf("INVALIV zero VERTEX FLAG %i (1)\n", triIdx.Peek()[3*i+1]);
}
if (vtxFlag2 == 1.0) {
vtxFlagValid.Peek()[triIdx.Peek()[3*i+2]] = true;
} else {
printf("INVALIV zero VERTEX FLAG %i (2)\n", triIdx.Peek()[3*i+2]);
}
}
}
for (int i = 0; i < this->vertexCnt; ++i) {
if (vtxFlags.Peek()[i] == 1.0) {
if (vtxFlagValid.Peek()[i] == false) {
printf("INVALIV one VERTEX FLAG %i\n", i);
}
}
}
vtxFlagValid.Release();
// END DEBUG
cudaMemcpy(vboFlagPt, vtxFlags.Peek(),
sizeof(float)*this->vertexCnt, cudaMemcpyHostToDevice);
if (!CudaSafeCall(cudaGetLastError())) {
return false;
}
triIdx.Release();
vtxFlags.Release();
triFlags.Release();
if (!CudaSafeCall(cudaGetLastError())) {
return false;
}
// ::CheckForCudaErrorSync();
if (!CudaSafeCall(cudaGraphicsUnmapResources(3, cudaTokens, 0))) {
return false;
}
// ::CheckForCudaErrorSync();
if (!CudaSafeCall(cudaGraphicsUnregisterResource(cudaTokens[0]))) {
return false;
}
// ::CheckForCudaErrorSync();
if (!CudaSafeCall(cudaGraphicsUnregisterResource(cudaTokens[1]))) {
return false;
}
if (!CudaSafeCall(cudaGraphicsUnregisterResource(cudaTokens[2]))) {
return false;
}
return true;
}
/*
* DeformableGPUSurfaceMT::InitCorruptFlagVBO
*/
bool DeformableGPUSurfaceMT::InitCorruptFlagVBO(size_t vertexCnt) {
// Destroy if necessary
if (this->vboCorruptTriangleVertexFlag) {
glBindBufferARB(GL_ELEMENT_ARRAY_BUFFER, this->vboCorruptTriangleVertexFlag);
glDeleteBuffersARB(1, &this->vboCorruptTriangleVertexFlag);
this->vboCorruptTriangleVertexFlag = 0;
}
// Create vertex buffer object for corrupt vertex flag
glGenBuffersARB(1, &this->vboCorruptTriangleVertexFlag);
glBindBufferARB(GL_ARRAY_BUFFER, this->vboCorruptTriangleVertexFlag);
glBufferDataARB(GL_ARRAY_BUFFER, sizeof(float)*vertexCnt, 0, GL_DYNAMIC_DRAW);
glBindBufferARB(GL_ARRAY_BUFFER, 0);
return CheckForGLError();
}
/*
* DeformableGPUSurfaceMT::InitVtxPathVBO
*/
bool DeformableGPUSurfaceMT::InitVtxPathVBO(size_t vertexCnt) {
// Destroy if necessary
if (this->vboVtxPath) {
glBindBufferARB(GL_ARRAY_BUFFER, this->vboVtxPath);
glDeleteBuffersARB(1, &this->vboVtxPath);
this->vboVtxPath = 0;
glBindBufferARB(GL_ARRAY_BUFFER, 0);
}
// Create vertex buffer object for corrupt vertex flag
glGenBuffersARB(1, &this->vboVtxPath);
glBindBufferARB(GL_ARRAY_BUFFER, this->vboVtxPath);
glBufferDataARB(GL_ARRAY_BUFFER, sizeof(float)*vertexCnt, 0, GL_DYNAMIC_DRAW);
glBindBufferARB(GL_ARRAY_BUFFER, 0);
// printf("InitVtxPathVBO: %u bytes\n", sizeof(float)*vertexCnt);
return CheckForGLError();
}
/*
* DeformableGPUSurfaceMT::InitVtxAttribVBO
*/
bool DeformableGPUSurfaceMT::InitVtxAttribVBO(size_t vertexCnt) {
// Destroy if necessary
if (this->vboVtxAttr) {
glBindBufferARB(GL_ARRAY_BUFFER, this->vboVtxAttr);
glDeleteBuffersARB(1, &this->vboVtxAttr);
this->vboVtxAttr = 0;
glBindBufferARB(GL_ARRAY_BUFFER, 0);
}
// Create vertex buffer object for corrupt vertex flag
glGenBuffersARB(1, &this->vboVtxAttr);
glBindBufferARB(GL_ARRAY_BUFFER, this->vboVtxAttr);
glBufferDataARB(GL_ARRAY_BUFFER, sizeof(float)*vertexCnt, 0, GL_DYNAMIC_DRAW);
glBindBufferARB(GL_ARRAY_BUFFER, 0);
// printf("InitVtxPathVBO: %u bytes\n", sizeof(float)*vertexCnt);
return CheckForGLError();
}
/*
* DeformableGPUSurfaceMT::initExtForcesGradient
*/
bool DeformableGPUSurfaceMT::initExtForcesGradient(float *volTarget_D,
int3 volDim, float3 volOrg, float3 volDelta) {
using namespace megamol::core::utility::log;
int volSize = volDim.x*volDim.y*volDim.z;
// Init constant device params
if (!initGridParams(volDim, volOrg, volDelta)) {
Log::DefaultLog.WriteError(
"%s: could not init constant device params",
this->ClassName());
return false;
}
// Allocate memory
if (!CudaSafeCall(this->externalForces_D.Validate(volSize*4))) {
Log::DefaultLog.WriteError(
"%s: could not allocate memory",
this->ClassName());
return false;
}
// Init with zero
if (!CudaSafeCall(this->externalForces_D.Set(0))) {
Log::DefaultLog.WriteError(
"%s: could not init memory",
this->ClassName());
return false;
}
#ifdef USE_CUDA_TIMER
float dt_ms;
cudaEvent_t event1, event2;
cudaEventCreate(&event1);
cudaEventCreate(&event2);
cudaEventRecord(event1, 0);
#endif
// Calculate gradient using finite differences
DeformableGPUSurfaceMT_CalcVolGradient_D <<< Grid(volSize, 256), 256 >>> (
(float4*)this->externalForces_D.Peek(), volTarget_D);
#ifdef USE_CUDA_TIMER
cudaEventRecord(event2, 0);
cudaEventSynchronize(event1);
cudaEventSynchronize(event2);
cudaEventElapsedTime(&dt_ms, event1, event2);
printf("CUDA time for 'CalcVolGradient_D': %.10f sec\n",
dt_ms/1000.0f);
#endif
return true;
}
/*
* DeformableGPUSurfaceMT::initExtForcesDistfield
*/
bool DeformableGPUSurfaceMT::initExtForcesDistfield(
float *volume_D,
float *vertexBuffer_D,
int3 volDim,
float3 volOrg,
float3 volDelta,
float distfieldDist,
float isovalue) {
using namespace megamol::core::utility::log;
int volSize = volDim.x*volDim.y*volDim.z;
/* Init grid parameters */
// Init constant device params
if (!initGridParams(volDim, volOrg, volDelta)) {
Log::DefaultLog.WriteError(
"%s: could not init constant device params",
this->ClassName());
return false;
}
// Compute distance field
if (!CudaSafeCall(this->distField_D.Validate(volSize))) {
return false;
}
#ifdef USE_TIMER
float dt_ms;
cudaEvent_t event1, event2;
cudaEventCreate(&event1);
cudaEventCreate(&event2);
cudaEventRecord(event1, 0);
#endif
DeformableGPUSurfaceMT_ComputeDistField_D <<< Grid(volSize, 256), 256 >>> (
vertexBuffer_D,
this->distField_D.Peek(),
this->vertexCnt,
this->vertexDataOffsPos,
this->vertexDataStride);
#ifdef USE_TIMER
cudaEventRecord(event2, 0);
cudaEventSynchronize(event1);
cudaEventSynchronize(event2);
cudaEventElapsedTime(&dt_ms, event1, event2);
printf("CUDA time for 'ComputeDistField_D': %.10f sec\n",
dt_ms/1000.0f);
#endif
// Compute gradient
if (!CudaSafeCall(this->externalForces_D.Validate(volSize*4))) {
return false;
}
if (!CudaSafeCall(this->externalForces_D.Set(0))) {
return false;
}
#ifdef USE_TIMER
cudaEventRecord(event1, 0);
#endif
// Calculate gradient using finite differences
DeformableGPUSurfaceMT_CalcVolGradientWithDistField_D <<< Grid(volSize, 256), 256 >>> (
(float4*)this->externalForces_D.Peek(),
volume_D,
this->distField_D.Peek(), distfieldDist, isovalue);
#ifdef USE_TIMER
cudaEventRecord(event2, 0);
cudaEventSynchronize(event1);
cudaEventSynchronize(event2);
cudaEventElapsedTime(&dt_ms, event1, event2);
printf("CUDA time for 'CalcVolGradientWithDistField_D': %.10f sec\n",
dt_ms/1000.0f);
#endif
return CudaSafeCall(cudaGetLastError());
}
bool DeformableGPUSurfaceMT::initExtForcesGVF(
float *volumeTarget_D,
const unsigned int *cellStatesTarget_D,
int3 volDim,
float3 volOrg,
float3 volDelta,
float isovalue,
float gvfScl,
unsigned int gvfIt) {
using namespace megamol::core::utility::log;
/* Init grid parameters */
// Init constant device params
if (!initGridParams(volDim, volOrg, volDelta)) {
Log::DefaultLog.WriteError(
"%s: could not init constant device params",
this->ClassName());
return false;
}
int volSize = volDim.x*volDim.y*volDim.z;
// Compute external forces
if (!CudaSafeCall(this->externalForces_D.Validate(volSize*4))) {
return false;
}
if (!CudaSafeCall(this->externalForces_D.Set(0))) {
return false;
}
if (!CudaSafeCall(this->gvfTmp_D.Validate(volSize*4))) {
return false;
}
if (!CudaSafeCall(this->gvfTmp_D.Set(0))) {
return false;
}
if (!CudaSafeCall(this->gvfConstData_D.Validate(volSize*4))) {
return false;
}
if (!CudaSafeCall(this->gvfConstData_D.Set(0))) {
return false;
}
// Use GVF
if (!DiffusionSolver::CalcGVF(
volumeTarget_D,
this->gvfConstData_D.Peek(),
cellStatesTarget_D,
volDim,
volDelta,
volOrg,
this->externalForces_D.Peek(),
this->gvfTmp_D.Peek(),
gvfIt,
gvfScl)) {
return false;
}
return true;
}
/*
* DeformableGPUSurfaceMT::initExtForcesTwoWayGVF
*/
bool DeformableGPUSurfaceMT::initExtForcesTwoWayGVF(
float *volumeSource_D,
float *volumeTarget_D,
const unsigned int *cellStatesSource_D,
const unsigned int *cellStatesTarget_D,
int3 volDim,
float3 volOrg,
float3 volDelta,
float isovalue,
float gvfScl,
unsigned int gvfIt) {
using namespace megamol::core::utility::log;
/* Init grid parameters */
// Init constant device params
if (!initGridParams(volDim, volOrg, volDelta)) {
Log::DefaultLog.WriteError(
"%s: could not init constant device params",
this->ClassName());
return false;
}
using namespace megamol::core::utility::log;
//#ifdef USE_TIMER
float dt_ms;
cudaEvent_t event1, event2;
cudaEventCreate(&event1);
cudaEventCreate(&event2);
cudaEventRecord(event1, 0);
//#endif
int volSize = volDim.x*volDim.y*volDim.z;
// Compute external forces
if (!CudaSafeCall(this->externalForces_D.Validate(volSize*4))) {
return false;
}
if (!CudaSafeCall(this->externalForces_D.Set(0))) {
return false;
}
if (!CudaSafeCall(this->gvfTmp_D.Validate(volSize*4))) {
return false;
}
if (!CudaSafeCall(this->gvfTmp_D.Set(0))) {
return false;
}
if (!CudaSafeCall(this->gvfConstData_D.Validate(volSize*4))) {
return false;
}
if (!CudaSafeCall(this->gvfConstData_D.Set(0))) {
return false;
}
// Calculate two way gvf by using isotropic diffusion
if (!DiffusionSolver::CalcTwoWayGVF(
volumeSource_D,
volumeTarget_D,
cellStatesSource_D,
cellStatesTarget_D,
volDim,
volOrg,
volDelta,
this->gvfConstData_D.Peek(),
this->externalForces_D.Peek(),
this->gvfTmp_D.Peek(),
gvfIt,
gvfScl)) {
return false;
}
//#ifdef USE_TIMER
cudaEventRecord(event2, 0);
cudaEventSynchronize(event1);
cudaEventSynchronize(event2);
cudaEventElapsedTime(&dt_ms, event1, event2);
// Log::DefaultLog.WriteInfo(
// "%s: Time for bi-directional diffusion %f\n",
// "DeformableGPUSurfaceMT", dt_ms/1000.0f);
//#endif
// printf("GVF : %.10f\n",
// dt_ms/1000.0f);
return true;
}
/*
* DeformableGPUSurfaceMT::InitGridParams
*/
bool DeformableGPUSurfaceMT::InitGridParams(uint3 gridSize, float3 org, float3 delta) {
cudaMemcpyToSymbol(gridSize_D, &gridSize, sizeof(uint3));
cudaMemcpyToSymbol(gridOrg_D, &org, sizeof(float3));
cudaMemcpyToSymbol(gridDelta_D, &delta, sizeof(float3));
// printf("Init grid with org %f %f %f, delta %f %f %f, dim %u %u %u\n", org.x,
// org.y, org.z, delta.x, delta.y, delta.z, gridSize.x, gridSize.y,
// gridSize.z);
return CudaSafeCall(cudaGetLastError());
}
/*
* DeformableGPUSurfaceMT_IntOverTriangles_D
*/
__global__ void DeformableGPUSurfaceMT_IntOverTriangles_D(
float *trianglesAreaWeightedVertexVals_D,
float *trianglesArea_D,
uint *triangleIdx_D,
float *scalarValue_D,
uint triangleCnt) {
const uint idx = ::getThreadIdx();
if (idx >= triangleCnt) {
return;
}
// Compute average
float avgVal = (scalarValue_D[triangleIdx_D[idx*3+0]] +
scalarValue_D[triangleIdx_D[idx*3+1]] +
scalarValue_D[triangleIdx_D[idx*3+2]])/3.0;
trianglesAreaWeightedVertexVals_D[idx] = avgVal*trianglesArea_D[idx];
}
/*
* DeformableGPUSurfaceMT_IntOverValidTriangles_D
*/
__global__ void DeformableGPUSurfaceMT_IntOverValidTriangles_D(
float *trianglesAreaWeightedVertexVals_D,
float *trianglesArea_D,
uint *triangleIdx_D,
float *scalarValue_D,
float *corruptTriFlag_D,
uint triangleCnt) {
const uint idx = ::getThreadIdx();
if (idx >= triangleCnt) {
return;
}
// Compute average
float avgVal = (scalarValue_D[triangleIdx_D[idx*3+0]] +
scalarValue_D[triangleIdx_D[idx*3+1]] +
scalarValue_D[triangleIdx_D[idx*3+2]])/3.0;
trianglesAreaWeightedVertexVals_D[idx] = avgVal*trianglesArea_D[idx]*(1.0-corruptTriFlag_D[idx]);
}
/*
* DeformableGPUSurfaceMT::IntOverSurfArea
*/
float DeformableGPUSurfaceMT::IntOverSurfArea(float *value_D) {
// Compute triangle areas of all (non-corrupt) triangles
if (!CudaSafeCall(this->accTriangleData_D.Validate(this->triangleCnt))) {
return false;
}
cudaGraphicsResource* cudaTokens[1];
if (!CudaSafeCall(cudaGraphicsGLRegisterBuffer(
&cudaTokens[0],
this->vboTriangleIdx,
cudaGraphicsMapFlagsNone))) {
return false;
}
if (!CudaSafeCall(cudaGraphicsMapResources(1, cudaTokens, 0))) {
return false;
}
// Get mapped pointers to the vertex data buffers
uint *triangleIdxPt;
size_t vboSize;
if (!CudaSafeCall(cudaGraphicsResourceGetMappedPointer(
reinterpret_cast<void**>(&triangleIdxPt), // The mapped pointer
&vboSize, // The size of the accessible data
cudaTokens[0]))) { // The mapped resource
return false;
}
#ifdef USE_TIMER
float dt_ms;
cudaEvent_t event1, event2;
cudaEventCreate(&event1);
cudaEventCreate(&event2);
cudaEventRecord(event1, 0);
#endif
// Call kernel
DeformableGPUSurfaceMT_IntOverTriangles_D <<< Grid(this->triangleCnt, 256), 256 >>> (
this->accTriangleData_D.Peek(),
this->accTriangleArea_D.Peek(),
triangleIdxPt,
value_D,
this->triangleCnt);
::CheckForCudaErrorSync();
#ifdef USE_TIMER
cudaEventRecord(event2, 0);
cudaEventSynchronize(event1);
cudaEventSynchronize(event2);
cudaEventElapsedTime(&dt_ms, event1, event2);
printf("CUDA time for 'intOverTriangles_D %.10f sec\n",
dt_ms/1000.0);
#endif
// Compute sum of all (non-corrupt) triangle areas
float integralVal = thrust::reduce(
thrust::device_ptr<float>(this->accTriangleData_D.Peek()),
thrust::device_ptr<float>(this->accTriangleData_D.Peek() + this->triangleCnt));
if (!CudaSafeCall(cudaGraphicsUnmapResources(1, cudaTokens, 0))) {
return false;
}
// ::CheckForCudaErrorSync();
if (!CudaSafeCall(cudaGraphicsUnregisterResource(cudaTokens[0]))) {
return false;
}
return integralVal;
}
/**
* Integrate scalar value (given per vertex in value_D) over surface area.
*
* @return The integral value
*/
float DeformableGPUSurfaceMT::IntVtxPathOverSurfArea() {
// TODO Assumes triangle area to be computed
// Device array for accumulated data
if (!CudaSafeCall(this->accTriangleData_D.Validate(this->triangleCnt))) {
return false;
}
if (!CudaSafeCall(this->accTriangleData_D.Set(0x00))) {
return false;
}
cudaGraphicsResource* cudaTokens[2];
if (!CudaSafeCall(cudaGraphicsGLRegisterBuffer(
&cudaTokens[0],
this->vboTriangleIdx,
cudaGraphicsMapFlagsNone))) {
return false;
}
if (!CudaSafeCall(cudaGraphicsGLRegisterBuffer(
&cudaTokens[1],
this->vboVtxPath,
cudaGraphicsMapFlagsNone))) {
return false;
}
if (!CudaSafeCall(cudaGraphicsMapResources(2, cudaTokens, 0))) {
return false;
}
// Get mapped pointers to the vertex data buffers
uint *triangleIdxPt;
size_t vboSizeTri;
if (!CudaSafeCall(cudaGraphicsResourceGetMappedPointer(
reinterpret_cast<void**>(&triangleIdxPt), // The mapped pointer
&vboSizeTri, // The size of the accessible data
cudaTokens[0]))) { // The mapped resource
return false;
}
// Get mapped pointers to the vertex data buffers
float *uncertaintyPt;
size_t vboSizeUncertainty;
if (!CudaSafeCall(cudaGraphicsResourceGetMappedPointer(
reinterpret_cast<void**>(&uncertaintyPt), // The mapped pointer
&vboSizeUncertainty, // The size of the accessible data
cudaTokens[1]))) { // The mapped resource
return false;
}
#ifdef USE_TIMER
float dt_ms;
cudaEvent_t event1, event2;
cudaEventCreate(&event1);
cudaEventCreate(&event2);
cudaEventRecord(event1, 0);
#endif
// Call kernel
DeformableGPUSurfaceMT_IntOverTriangles_D <<< Grid(this->triangleCnt, 256), 256 >>> (
this->accTriangleData_D.Peek(),
this->accTriangleArea_D.Peek(),
triangleIdxPt,
uncertaintyPt,
this->triangleCnt);
::CheckForCudaErrorSync();
#ifdef USE_TIMER
cudaEventRecord(event2, 0);
cudaEventSynchronize(event1);
cudaEventSynchronize(event2);
cudaEventElapsedTime(&dt_ms, event1, event2);
printf("CUDA time for 'intOverTriangles_D %.10f sec\n",
dt_ms/1000.0);
#endif
// Compute sum of all (non-corrupt) triangle areas
float integralVal = thrust::reduce(
thrust::device_ptr<float>(this->accTriangleData_D.Peek()),
thrust::device_ptr<float>(this->accTriangleData_D.Peek() + this->triangleCnt));
if (!CudaSafeCall(cudaGraphicsUnmapResources(2, cudaTokens, 0))) {
return false;
}
// ::CheckForCudaErrorSync();
if (!CudaSafeCall(cudaGraphicsUnregisterResource(cudaTokens[0]))) {
return false;
}
if (!CudaSafeCall(cudaGraphicsUnregisterResource(cudaTokens[1]))) {
return false;
}
return integralVal;
}
/**
* Integrate scalar value (given per vertex in value_D) over surface area.
*
* @return The integral value
*/
float DeformableGPUSurfaceMT::IntVtxPathOverValidSurfArea() {
// TODO Assumes triangle area to be computed
// Device array for accumulated data
if (!CudaSafeCall(this->accTriangleData_D.Validate(this->triangleCnt))) {
return false;
}
if (!CudaSafeCall(this->accTriangleData_D.Set(0x00))) {
return false;
}
cudaGraphicsResource* cudaTokens[2];
if (!CudaSafeCall(cudaGraphicsGLRegisterBuffer(
&cudaTokens[0],
this->vboTriangleIdx,
cudaGraphicsMapFlagsNone))) {
return false;
}
if (!CudaSafeCall(cudaGraphicsGLRegisterBuffer(
&cudaTokens[1],
this->vboVtxPath,
cudaGraphicsMapFlagsNone))) {
return false;
}
if (!CudaSafeCall(cudaGraphicsMapResources(2, cudaTokens, 0))) {
return false;
}
// Get mapped pointers to the vertex data buffers
uint *triangleIdxPt;
size_t vboSizeTri;
if (!CudaSafeCall(cudaGraphicsResourceGetMappedPointer(
reinterpret_cast<void**>(&triangleIdxPt), // The mapped pointer
&vboSizeTri, // The size of the accessible data
cudaTokens[0]))) { // The mapped resource
return false;
}
// Get mapped pointers to the vertex data buffers
float *uncertaintyPt;
size_t vboSizeUncertainty;
if (!CudaSafeCall(cudaGraphicsResourceGetMappedPointer(
reinterpret_cast<void**>(&uncertaintyPt), // The mapped pointer
&vboSizeUncertainty, // The size of the accessible data
cudaTokens[1]))) { // The mapped resource
return false;
}
#ifdef USE_TIMER
float dt_ms;
cudaEvent_t event1, event2;
cudaEventCreate(&event1);
cudaEventCreate(&event2);
cudaEventRecord(event1, 0);
#endif
// Call kernel
DeformableGPUSurfaceMT_IntOverValidTriangles_D <<< Grid(this->triangleCnt, 256), 256 >>> (
this->accTriangleData_D.Peek(),
this->accTriangleArea_D.Peek(),
triangleIdxPt,
uncertaintyPt,
this->corruptTriangles_D.Peek(),
this->triangleCnt);
::CheckForCudaErrorSync();
#ifdef USE_TIMER
cudaEventRecord(event2, 0);
cudaEventSynchronize(event1);
cudaEventSynchronize(event2);
cudaEventElapsedTime(&dt_ms, event1, event2);
printf("CUDA time for 'intOverTriangles_D %.10f sec\n",
dt_ms/1000.0);
#endif
// Compute sum of all (non-corrupt) triangle areas
float integralVal = thrust::reduce(
thrust::device_ptr<float>(this->accTriangleData_D.Peek()),
thrust::device_ptr<float>(this->accTriangleData_D.Peek() + this->triangleCnt));
if (!CudaSafeCall(cudaGraphicsUnmapResources(2, cudaTokens, 0))) {
return false;
}
// ::CheckForCudaErrorSync();
if (!CudaSafeCall(cudaGraphicsUnregisterResource(cudaTokens[0]))) {
return false;
}
if (!CudaSafeCall(cudaGraphicsUnregisterResource(cudaTokens[1]))) {
return false;
}
return integralVal;
}
/**
* Integrate scalar value (given per vertex in value_D) over surface area.
*
* @return The integral value
*/
float DeformableGPUSurfaceMT::IntVtxAttribOverSurfArea() {
// TODO Assumes triangle area to be computed
// Device array for accumulated data
if (!CudaSafeCall(this->accTriangleData_D.Validate(this->triangleCnt))) {
return false;
}
if (!CudaSafeCall(this->accTriangleData_D.Set(0x00))) {
return false;
}
cudaGraphicsResource* cudaTokens[2];
if (!CudaSafeCall(cudaGraphicsGLRegisterBuffer(
&cudaTokens[0],
this->vboTriangleIdx,
cudaGraphicsMapFlagsNone))) {
return false;
}
if (!CudaSafeCall(cudaGraphicsGLRegisterBuffer(
&cudaTokens[1],
this->vboVtxAttr,
cudaGraphicsMapFlagsNone))) {
return false;
}
if (!CudaSafeCall(cudaGraphicsMapResources(2, cudaTokens, 0))) {
return false;
}
// Get mapped pointers to the vertex data buffers
uint *triangleIdxPt;
size_t vboSizeTri;
if (!CudaSafeCall(cudaGraphicsResourceGetMappedPointer(
reinterpret_cast<void**>(&triangleIdxPt), // The mapped pointer
&vboSizeTri, // The size of the accessible data
cudaTokens[0]))) { // The mapped resource
return false;
}
// Get mapped pointers to the vertex data buffers
float *vertexAttrPt;
size_t vboVertexAttrSize;
if (!CudaSafeCall(cudaGraphicsResourceGetMappedPointer(
reinterpret_cast<void**>(&vertexAttrPt), // The mapped pointer
&vboVertexAttrSize, // The size of the accessible data
cudaTokens[1]))) { // The mapped resource
return false;
}
#ifdef USE_TIMER
float dt_ms;
cudaEvent_t event1, event2;
cudaEventCreate(&event1);
cudaEventCreate(&event2);
cudaEventRecord(event1, 0);
#endif
// Call kernel
DeformableGPUSurfaceMT_IntOverTriangles_D <<< Grid(this->triangleCnt, 256), 256 >>> (
this->accTriangleData_D.Peek(),
this->accTriangleArea_D.Peek(),
triangleIdxPt,
vertexAttrPt,
this->triangleCnt);
::CheckForCudaErrorSync();
#ifdef USE_TIMER
cudaEventRecord(event2, 0);
cudaEventSynchronize(event1);
cudaEventSynchronize(event2);
cudaEventElapsedTime(&dt_ms, event1, event2);
printf("CUDA time for 'intOverTriangles_D %.10f sec\n",
dt_ms/1000.0);
#endif
// Compute sum of all (non-corrupt) triangle areas
float integralVal = thrust::reduce(
thrust::device_ptr<float>(this->accTriangleData_D.Peek()),
thrust::device_ptr<float>(this->accTriangleData_D.Peek() + this->triangleCnt));
if (!CudaSafeCall(cudaGraphicsUnmapResources(2, cudaTokens, 0))) {
return false;
}
// ::CheckForCudaErrorSync();
if (!CudaSafeCall(cudaGraphicsUnregisterResource(cudaTokens[0]))) {
return false;
}
if (!CudaSafeCall(cudaGraphicsUnregisterResource(cudaTokens[1]))) {
return false;
}
return integralVal;
}
/**
* Integrate scalar value (given per vertex in value_D) over surface area.
*
* @return The integral value
*/
float DeformableGPUSurfaceMT::IntVtxAttribOverValidSurfArea() {
// TODO Assumes triangle area to be computed
// Device array for accumulated data
if (!CudaSafeCall(this->accTriangleData_D.Validate(this->triangleCnt))) {
return false;
}
if (!CudaSafeCall(this->accTriangleData_D.Set(0x00))) {
return false;
}
cudaGraphicsResource* cudaTokens[2];
if (!CudaSafeCall(cudaGraphicsGLRegisterBuffer(
&cudaTokens[0],
this->vboTriangleIdx,
cudaGraphicsMapFlagsNone))) {
return false;
}
if (!CudaSafeCall(cudaGraphicsGLRegisterBuffer(
&cudaTokens[1],
this->vboVtxAttr,
cudaGraphicsMapFlagsNone))) {
return false;
}
if (!CudaSafeCall(cudaGraphicsMapResources(2, cudaTokens, 0))) {
return false;
}
// Get mapped pointers to the vertex data buffers
uint *triangleIdxPt;
size_t vboSizeTri;
if (!CudaSafeCall(cudaGraphicsResourceGetMappedPointer(
reinterpret_cast<void**>(&triangleIdxPt), // The mapped pointer
&vboSizeTri, // The size of the accessible data
cudaTokens[0]))) { // The mapped resource
return false;
}
// Get mapped pointers to the vertex data buffers
float *vertexAttrPt;
size_t vboVertexAttrSize;
if (!CudaSafeCall(cudaGraphicsResourceGetMappedPointer(
reinterpret_cast<void**>(&vertexAttrPt), // The mapped pointer
&vboVertexAttrSize, // The size of the accessible data
cudaTokens[1]))) { // The mapped resource
return false;
}
#ifdef USE_TIMER
float dt_ms;
cudaEvent_t event1, event2;
cudaEventCreate(&event1);
cudaEventCreate(&event2);
cudaEventRecord(event1, 0);
#endif
// Call kernel
DeformableGPUSurfaceMT_IntOverValidTriangles_D <<< Grid(this->triangleCnt, 256), 256 >>> (
this->accTriangleData_D.Peek(),
this->accTriangleArea_D.Peek(),
triangleIdxPt,
vertexAttrPt,
this->corruptTriangles_D.Peek(),
this->triangleCnt);
::CheckForCudaErrorSync();
#ifdef USE_TIMER
cudaEventRecord(event2, 0);
cudaEventSynchronize(event1);
cudaEventSynchronize(event2);
cudaEventElapsedTime(&dt_ms, event1, event2);
printf("CUDA time for 'intOverTriangles_D %.10f sec\n",
dt_ms/1000.0);
#endif
// Compute sum of all (non-corrupt) triangle areas
float integralVal = thrust::reduce(
thrust::device_ptr<float>(this->accTriangleData_D.Peek()),
thrust::device_ptr<float>(this->accTriangleData_D.Peek() + this->triangleCnt));
if (!CudaSafeCall(cudaGraphicsUnmapResources(2, cudaTokens, 0))) {
return false;
}
// ::CheckForCudaErrorSync();
if (!CudaSafeCall(cudaGraphicsUnregisterResource(cudaTokens[0]))) {
return false;
}
if (!CudaSafeCall(cudaGraphicsUnregisterResource(cudaTokens[1]))) {
return false;
}
return integralVal;
}
/*
* DeformableGPUSurfaceMT::IntOverCorruptSurfArea
*/
float DeformableGPUSurfaceMT::IntOverCorruptSurfArea() {
return 0.0f;
}
/**
* TODO
* @return Position and path length addition
*/
float4 UpdateVtxPosSingle (
float3 posStart, // Starting position
float4 *gradient, // External forces
float *targetVol, // The target volume
float minDisplScl, // Minimum displacement for convergence
float forcesScl, // General scaling factor for forces
float isovalue,
float org[3], float delta[3], int dim[3],
int maxSteps,
int maxLevel,
float initStepSize) { // Isovalue
float3 pos = posStart;
float sample = SampleFieldAtPosTrilin((float*)(&pos), targetVol, org, delta, dim);
bool outside = sample <= isovalue;
float extForcesScl;
if (outside) extForcesScl = 1.0;
else extForcesScl = -1.0;
float len = 0.0f;
bool converged = false;
int steps = 0;
do {
// printf("current pos: %f %f %f\n", pos.x, pos.y, pos.z);
// Get volume sample
float sample = SampleFieldAtPosTrilin((float*)(&pos), targetVol, org, delta, dim);
// Switch sign and scale down if necessary
bool negative = extForcesScl < 0;
bool outside = sample <= isovalue;
int switchSign = int((negative && outside)||(!negative && !outside));
extForcesScl = extForcesScl*(1.0*(1-switchSign) - 1.0*switchSign);
extForcesScl *= (1.0*(1-switchSign) + 0.5*(switchSign));
// Get external forces sample and scale
float4 extForceTmp = SampleFieldAtPosTrilin((float*)(&pos), gradient, org, delta, dim);
float3 extForce = make_float3(extForceTmp.x, extForceTmp.y, extForceTmp.z);
extForce = safeNormalize(extForce);
// Accumulate path
len += extForcesScl*forcesScl;
extForce *= extForcesScl*forcesScl;
// Propagate vertex and increase path length
pos += extForce;
if (length(extForce) <= minDisplScl) {
converged = true;
}
steps++;
} while (!converged && steps < maxSteps);
return make_float4(pos.x, pos.y, pos.z, len);
}
/**
* TODO
*/
float DeformableGPUSurfaceMT::IntUncertaintyOverCorruptAreaRec(
float3 pos1, float3 pos2, float3 pos3, // Vertex positions of the triangle
float len1, float len2, float len3, // Vertex path lengths of the triangle
float4 *gradient, // External forces
float *targetVol, // The target volume
unsigned int *targetActiveCells, // Active cells of the target volume
float minDisplScl, // Minimum displacement for convergence
float forcesScl, // General scaling factor for forces
float isovalue, // Isovalue
float &triArea,
uint depth,
float org[3], float delta[3], int dim[3],
vislib::Array<float> &triArr,
int maxSteps,
int maxLevel,
float initStepSize) {
// printf("depth: %i\n", depth);
// 1. Propagate vertices until they converge to a fixed position
float4 newPosLen1, newPosLen2, newPosLen3;
newPosLen1 = UpdateVtxPosSingle(pos1, gradient, targetVol,
minDisplScl, forcesScl, isovalue, org, delta, dim,
maxSteps,
maxLevel,
initStepSize);
newPosLen2 = UpdateVtxPosSingle(pos2, gradient, targetVol,
minDisplScl, forcesScl, isovalue, org, delta, dim,
maxSteps,
maxLevel,
initStepSize);
newPosLen3 = UpdateVtxPosSingle(pos3, gradient, targetVol,
minDisplScl, forcesScl, isovalue, org, delta, dim,
maxSteps,
maxLevel,
initStepSize);
float3 newPos1, newPos2, newPos3;
newPos1 = make_float3(newPosLen1.x, newPosLen1.y, newPosLen1.z);
newPos2 = make_float3(newPosLen2.x, newPosLen2.y, newPosLen2.z);
newPos3 = make_float3(newPosLen3.x, newPosLen3.y, newPosLen3.z);
// 2. Check whether the resulting triangle is valid
float3 midpoint = (newPos1+newPos2+newPos3)/3.0;
int3 coords;
coords.x = int((midpoint.x-org[0])/delta[0]);
coords.y = int((midpoint.y-org[1])/delta[1]);
coords.z = int((midpoint.z-org[2])/delta[2]);
//int cellIDx = ::GetCellIdxByGridCoords(coords);
int cellIdx = (dim[0]-1)*((dim[1]-1)*coords.z + coords.y) + coords.x;
uint cellState = targetActiveCells[cellIdx];
if ((cellState == 1) || (depth >= (int)maxLevel)) {
triArr.Add(newPos1.x);
triArr.Add(newPos1.y);
triArr.Add(newPos1.z);
triArr.Add(newPos2.x);
triArr.Add(newPos2.y);
triArr.Add(newPos2.z);
triArr.Add(newPos3.x);
triArr.Add(newPos3.y);
triArr.Add(newPos3.z);
// printf("%.16f;%.16f;%.16f;%.16f;%.16f;%.16f;%.16f;%.16f;%.16f\n",
// newPos1.x, newPos1.y, newPos1.z,
// newPos2.x, newPos2.y, newPos2.z,
// newPos3.x, newPos3.y, newPos3.z);
// 3a. Cell is active, therefore triangle is valid
// --> Compute integrated uncertainty value
// Get triangle area
float a = length(newPos1 - newPos2);
float b = length(newPos1 - newPos3);
float c = length(newPos2 - newPos3);
// Compute area (Heron's formula)
float rad = (a + b - c)*(c + a - b)*(a + b + c)*(b + c - a);
// Make sure radicand is not negative
rad = rad > 0.0f ? rad : 0.0f;
float area = 0.25f*sqrt(rad);
triArea = area;
// Get average value
float avgValue = (len1+newPosLen1.w+len2+newPosLen2.w+len3+newPosLen3.w)/3.0f;
// Approximate integration
return triArea*avgValue;
} else {
float triArea1, triArea2, triArea3, triArea4;
// 3b. Cell is not active, therefore, triangle is not valid
// --> Subdivide and call recursively
float3 p12 = (newPos1+newPos2)/2.0;
float3 p13 = (newPos1+newPos3)/2.0;
float3 p32 = (newPos3+newPos2)/2.0;
float l12 = (len1+newPosLen1.w+len2+newPosLen2.w)/2.0;
float l13 = (len1+newPosLen1.w+len3+newPosLen3.w)/2.0;
float l32 = (len3+newPosLen3.w+len2+newPosLen2.w)/2.0;
float intUncertainty1 =
DeformableGPUSurfaceMT::IntUncertaintyOverCorruptAreaRec(
newPos1, p12, p13,
len1+newPosLen1.w, l12, l13,
gradient, targetVol, targetActiveCells,
minDisplScl, forcesScl, isovalue, triArea1,
depth+1, org, delta, dim, triArr,
maxSteps,
maxLevel,
initStepSize);
float intUncertainty2 =
DeformableGPUSurfaceMT::IntUncertaintyOverCorruptAreaRec(
p13, p32, newPos3,
l13, l32, len3+newPosLen3.w,
gradient, targetVol, targetActiveCells,
minDisplScl, forcesScl, isovalue, triArea2,
depth+1, org, delta, dim, triArr,
maxSteps,
maxLevel,
initStepSize);
float intUncertainty3 =
DeformableGPUSurfaceMT::IntUncertaintyOverCorruptAreaRec(
p12, p13, p32,
l12, l13, l32,
gradient, targetVol, targetActiveCells,
minDisplScl, forcesScl, isovalue, triArea3,
depth+1, org, delta, dim, triArr,
maxSteps,
maxLevel,
initStepSize);
float intUncertainty4 =
DeformableGPUSurfaceMT::IntUncertaintyOverCorruptAreaRec(
p12, p32, newPos2,
l12, l32, len2+newPosLen2.w,
gradient, targetVol, targetActiveCells,
minDisplScl, forcesScl, isovalue, triArea4,
depth+1, org, delta, dim, triArr,
maxSteps,
maxLevel,
initStepSize);
triArea = triArea1 + triArea2 + triArea3 + triArea4;
return intUncertainty1 + intUncertainty2 + intUncertainty3 + intUncertainty4;
}
}
/*
* DeformableGPUSurfaceMT::IntUncertaintyOverCorruptSurfArea
*/
float DeformableGPUSurfaceMT::IntUncertaintyOverCorruptSurfArea(
float &corruptArea,
float minDisplScl,
float isovalue,
float forcesScl,
unsigned int *targetActiveCells_D,
float *targetVol_D,
int3 volDim,
float3 volOrg,
float3 volDelta,
vislib::Array<float> &triArr,
int maxDepth,
int maxLevel,
float initStepSize) {
using namespace megamol::core::utility::log;
size_t fieldSize = volDim.x*volDim.y*volDim.z;
size_t cellCnt = (volDim.x-1)*(volDim.y-1)*(volDim.z-1);
// // Allocate memory for corrupt triangles
// if (!CudaSafeCall(this->intUncertaintyCorrupt_D.Validate(this->triangleCnt))) {
// return false;
// }
// // Init with zero
// if (!CudaSafeCall(this->intUncertaintyCorrupt_D.Set(0x00))) {
// return false;
// }
//
// if (!CudaSafeCall(this->accTriangleArea_D.Validate(this->triangleCnt))) {
// return false;
// }
// if (!CudaSafeCall(this->accTriangleArea_D.Set(0x00))){
// return false;
// }
//
// // Init constant device params
// if (!initGridParams(volDim, volOrg, volDelta)) {
// Log::DefaultLog.WriteError(
// "%s: could not init constant device params",
// this->ClassName());
// return false;
// }
//
cudaGraphicsResource* cudaTokens[3];
// Register memory with CUDA
if (!CudaSafeCall(cudaGraphicsGLRegisterBuffer(
&cudaTokens[0],
this->vboVtxData,
cudaGraphicsMapFlagsNone))) {
return false;
}
// ::CheckForCudaErrorSync();
if (!CudaSafeCall(cudaGraphicsGLRegisterBuffer(
&cudaTokens[1],
this->vboTriangleIdx,
cudaGraphicsMapFlagsNone))) {
return false;
}
if (!CudaSafeCall(cudaGraphicsGLRegisterBuffer(
&cudaTokens[2],
this->vboVtxPath,
cudaGraphicsMapFlagsNone))) {
return false;
}
// ::CheckForCudaErrorSync();
// Map cuda ressource handles
if (!CudaSafeCall(cudaGraphicsMapResources(3, cudaTokens, 0))) {
return false;
}
// ::CheckForCudaErrorSync();
/* Get mapped pointers to the vertex data buffer */
float *vboPt;
size_t vboSize;
float* vboVtxPathPt;
unsigned int *vboTriangleIdxPt;
if (!CudaSafeCall(cudaGraphicsResourceGetMappedPointer(
reinterpret_cast<void**>(&vboPt),
&vboSize,
cudaTokens[0]))) {
return false;
}
::CheckForCudaErrorSync();
if (!CudaSafeCall(cudaGraphicsResourceGetMappedPointer(
reinterpret_cast<void**>(&vboTriangleIdxPt),
&vboSize,
cudaTokens[1]))) {
return false;
}
if (!CudaSafeCall(cudaGraphicsResourceGetMappedPointer(
reinterpret_cast<void**>(&vboVtxPathPt),
&vboSize,
cudaTokens[2]))) {
return false;
}
//
//#ifdef USE_TIMER
// float dt_ms;
// cudaEvent_t event1, event2;
// cudaEventCreate(&event1);
// cudaEventCreate(&event2);
// cudaEventRecord(event1, 0);
//#endif
//
// ::CheckForCudaErrorSync();
//
// // Call kernel
// DeformableGPUSurfaceMT_IntUncertaintyOverCorruptArea_D <<< Grid(this->triangleCnt, 256), 256 >>> (
// this->corruptTriangles_D.Peek(),
// vboPt,
// vboVtxPathPt,
// this->vertexDataStride,
// this->vertexDataOffsPos,
// this->vertexDataOffsNormal,
// vboTriangleIdxPt,
// targetVol_D,
// (float4*)this->externalForces_D.Peek(),
// targetActiveCells_D,
// this->triangleCnt,
// isovalue,
// minDisplScl,
// forcesScl,
// this->intUncertaintyCorrupt_D.Peek(),
// this->accTriangleArea_D.Peek());
//
// ::CheckForCudaErrorSync();
//
//#ifdef USE_TIMER
// cudaEventRecord(event2, 0);
// cudaEventSynchronize(event1);
// cudaEventSynchronize(event2);
// cudaEventElapsedTime(&dt_ms, event1, event2);
// printf("CUDA time for 'intOverTriangles_D %.10f sec\n",
// dt_ms/1000.0);
//#endif
//
// // Compute sum of all (non-corrupt) triangle areas
// float integralVal = thrust::reduce(
// thrust::device_ptr<float>(this->intUncertaintyCorrupt_D.Peek()),
// thrust::device_ptr<float>(this->intUncertaintyCorrupt_D.Peek() + this->triangleCnt));
//
// corruptArea = thrust::reduce(
// thrust::device_ptr<float>(this->accTriangleArea_D.Peek()),
// thrust::device_ptr<float>(this->accTriangleArea_D.Peek() + this->triangleCnt));
//
// ::CheckForCudaErrorSync();
//
// if (!CudaSafeCall(cudaGetLastError())) {
// return false;
// }
float integralVal = 0.0f;
corruptArea = 0.0f;
// Get necessary data from GPU
HostArr<float> corruptTriangles;
HostArr<float> vertexBuffer;
HostArr<unsigned int> triangleIdx;
HostArr<float> uncertainty;
HostArr<float> gradient;
HostArr<float> targetVol;
HostArr<unsigned int> targetActiveCells;
corruptTriangles.Validate(this->corruptTriangles_D.GetCount());
vertexBuffer.Validate(this->vertexDataStride*this->vertexCnt);
triangleIdx.Validate(this->triangleCnt*3);
uncertainty.Validate(this->vertexCnt);
gradient.Validate(fieldSize*4);
targetVol.Validate(fieldSize);
targetActiveCells.Validate(cellCnt);
if (!CudaSafeCall(cudaMemcpy(corruptTriangles.Peek(), this->corruptTriangles_D.Peek(),
corruptTriangles.GetCount()*sizeof(float), cudaMemcpyDeviceToHost))) {
return false;
}
if (!CudaSafeCall(cudaMemcpy(vertexBuffer.Peek(), vboPt,
vertexBuffer.GetCount()*sizeof(float), cudaMemcpyDeviceToHost))) {
return false;
}
if (!CudaSafeCall(cudaMemcpy(triangleIdx.Peek(), vboTriangleIdxPt,
triangleIdx.GetCount()*sizeof(unsigned int), cudaMemcpyDeviceToHost))) {
return false;
}
if (!CudaSafeCall(cudaMemcpy(uncertainty.Peek(), vboVtxPathPt,
uncertainty.GetCount()*sizeof(float), cudaMemcpyDeviceToHost))) {
return false;
}
if (!CudaSafeCall(cudaMemcpy(gradient.Peek(), this->externalForces_D.Peek(),
gradient.GetCount()*sizeof(float), cudaMemcpyDeviceToHost))) {
return false;
}
if (!CudaSafeCall(cudaMemcpy(targetVol.Peek(), targetVol_D,
targetVol.GetCount()*sizeof(float), cudaMemcpyDeviceToHost))) {
return false;
}
if (!CudaSafeCall(cudaMemcpy(targetActiveCells.Peek(), targetActiveCells_D,
targetActiveCells.GetCount()*sizeof(float), cudaMemcpyDeviceToHost))) {
return false;
}
// Loop over all corrupt triangles
for (int idx = 0; idx < this->triangleCnt; ++idx) {
// Check whether the triangle is corrupt
if (corruptTriangles.Peek()[idx] == 1.0f) {
// Get initial positions from main memory
uint baseIdx0 = vertexDataStride*triangleIdx.Peek()[3*idx+0];
uint baseIdx1 = vertexDataStride*triangleIdx.Peek()[3*idx+1];
uint baseIdx2 = vertexDataStride*triangleIdx.Peek()[3*idx+2];
float3 pos1 = make_float3(
vertexBuffer.Peek()[baseIdx0+vertexDataOffsPos+0],
vertexBuffer.Peek()[baseIdx0+vertexDataOffsPos+1],
vertexBuffer.Peek()[baseIdx0+vertexDataOffsPos+2]);
float3 pos2 = make_float3(
vertexBuffer.Peek()[baseIdx1+vertexDataOffsPos+0],
vertexBuffer.Peek()[baseIdx1+vertexDataOffsPos+1],
vertexBuffer.Peek()[baseIdx1+vertexDataOffsPos+2]);
float3 pos3 = make_float3(
vertexBuffer.Peek()[baseIdx2+vertexDataOffsPos+0],
vertexBuffer.Peek()[baseIdx2+vertexDataOffsPos+1],
vertexBuffer.Peek()[baseIdx2+vertexDataOffsPos+2]);
// Get initial path lengths from previous morphing
float len1 = uncertainty.Peek()[triangleIdx.Peek()[3*idx+0]];
float len2 = uncertainty.Peek()[triangleIdx.Peek()[3*idx+1]];
float len3 = uncertainty.Peek()[triangleIdx.Peek()[3*idx+2]];
integralVal += this->IntUncertaintyOverCorruptAreaRec(
pos1, pos2, pos3, // Vertex positions of the triangle
len1, len2, len3, // Vertex path lengths of the triangle
(float4*)(gradient.Peek()), // External forces
targetVol.Peek(), // The target volume
targetActiveCells.Peek(), // Active cells of the target volume
minDisplScl, // Minimum displacement for convergence
forcesScl, // General scaling factor for forces
isovalue, // Isovalue
corruptArea,
0,
(float*)&volOrg,
(float*)&volDelta,
(int*)&volDim,
triArr,
maxDepth,
maxLevel,
initStepSize);
}
}
// Cleanup
vertexBuffer.Release();
corruptTriangles.Release();
triangleIdx.Release();
uncertainty.Release();
gradient.Release();
targetVol.Release();
targetActiveCells.Release();
// ::CheckForCudaErrorSync();
if (!CudaSafeCall(cudaGraphicsUnmapResources(3, cudaTokens, 0))) {
return false;
}
// ::CheckForCudaErrorSync();
if (!CudaSafeCall(cudaGraphicsUnregisterResource(cudaTokens[0]))) {
return false;
}
// ::CheckForCudaErrorSync();
if (!CudaSafeCall(cudaGraphicsUnregisterResource(cudaTokens[1]))) {
return false;
}
if (!CudaSafeCall(cudaGraphicsUnregisterResource(cudaTokens[2]))) {
return false;
}
return integralVal;
}
/*
* DeformableGPUSurfaceMT::MorphToVolumeGradient
*/
bool DeformableGPUSurfaceMT::MorphToVolumeGradient(
float *volume_D,
int3 volDim,
float3 volOrg,
float3 volDelta,
float isovalue,
InterpolationMode interpMode,
size_t maxIt,
float surfMappedMinDisplScl,
float springStiffness,
float forceScl,
float externalForcesWeight) {
using megamol::core::utility::log::Log;
/* Init grid parameters */
// Init constant device params
if (!initGridParams(volDim, volOrg, volDelta)) {
Log::DefaultLog.WriteError(
"%s: could not init constant device params",
this->ClassName());
return false;
}
cudaGraphicsResource* cudaTokens[2];
if ((!this->triangleIdxReady)||(!this->neighboursReady)) {
return false;
}
if (volume_D == NULL) {
return false;
}
if (!initExtForcesGradient(volume_D,
volDim, volOrg, volDelta)) {
return false;
}
// Init vbo with uncertainty information
if (!this->InitVtxPathVBO(this->vertexCnt)) {
return false;
}
// Register memory with CUDA
if (!CudaSafeCall(cudaGraphicsGLRegisterBuffer(
&cudaTokens[0], this->vboVtxData,
cudaGraphicsMapFlagsNone))) {
return false;
}
// Register memory with CUDA
if (!CudaSafeCall(cudaGraphicsGLRegisterBuffer(
&cudaTokens[1], this->vboVtxPath,
cudaGraphicsMapFlagsNone))) {
return false;
}
if (!CudaSafeCall(cudaGraphicsMapResources(2, cudaTokens, 0))) {
return false;
}
// Get mapped pointers to the vertex data buffers
float *vboPt;
size_t vboSize;
if (!CudaSafeCall(cudaGraphicsResourceGetMappedPointer(
reinterpret_cast<void**>(&vboPt), // The mapped pointer
&vboSize, // The size of the accessible data
cudaTokens[0]))) { // The mapped resource
return false;
}
// Get mapped pointers to the vertex data buffers
float *vboVtxPathPt;
if (!CudaSafeCall(cudaGraphicsResourceGetMappedPointer(
reinterpret_cast<void**>(&vboVtxPathPt), // The mapped pointer
&vboSize, // The size of the accessible data
cudaTokens[1]))) { // The mapped resource
return false;
}
// Init forces scale factor with -1 or 1, depending on whether they start
// outside or inside the isosurface
if (!CudaSafeCall(this->vertexExternalForcesScl_D.Validate(this->vertexCnt))) {
return false;
}
if (!CudaSafeCall(this->displLen_D.Validate(this->vertexCnt))) {
return false;
}
#ifdef USE_TIMER
float dt_ms;
cudaEvent_t event1, event2;
cudaEventCreate(&event1);
cudaEventCreate(&event2);
cudaEventRecord(event1, 0);
#endif
DeformableGPUSurfaceMT_InitExternalForceScl_D <<< Grid(this->vertexCnt, 256), 256 >>> (
(float*)this->vertexExternalForcesScl_D.Peek(),
this->displLen_D.Peek(),
volume_D,
vboPt,
surfMappedMinDisplScl,
this->vertexCnt,
isovalue,
this->vertexDataOffsPos,
this->vertexDataStride);
#ifdef USE_TIMER
cudaEventRecord(event2, 0);
cudaEventSynchronize(event1);
cudaEventSynchronize(event2);
cudaEventElapsedTime(&dt_ms, event1, event2);
printf("CUDA time for 'InitExternalForceScl_D': %.10f sec\n",
dt_ms/1000.0f);
#endif
// Iterations for new position
if (!this->updateVtxPos(
volume_D,
vboPt,
vboVtxPathPt,
volDim,
volOrg,
volDelta,
isovalue,
(interpMode == INTERP_CUBIC),
maxIt,
surfMappedMinDisplScl,
springStiffness,
forceScl,
externalForcesWeight,
false,
false)) {
return false;
}
if (!CudaSafeCall(cudaGraphicsUnmapResources(2, cudaTokens, 0))) {
return false;
}
if (!CudaSafeCall(cudaGraphicsUnregisterResource(cudaTokens[0]))) {
return false;
}
if (!CudaSafeCall(cudaGraphicsUnregisterResource(cudaTokens[1]))) {
return false;
}
return true;
}
/*
* DeformableGPUSurfaceMT::MorphToVolumeDistfield
*/
bool DeformableGPUSurfaceMT::MorphToVolumeDistfield(
float *volume_D,
int3 volDim,
float3 volOrg,
float3 volDelta,
float isovalue,
InterpolationMode interpMode,
size_t maxIt,
float surfMappedMinDisplScl,
float springStiffness,
float forceScl,
float externalForcesWeight,
float distfieldDist) {
using megamol::core::utility::log::Log;
/* Init grid parameters */
// Init constant device params
if (!initGridParams(volDim, volOrg, volDelta)) {
Log::DefaultLog.WriteError(
"%s: could not init constant device params",
this->ClassName());
return false;
}
cudaGraphicsResource* cudaTokens[2];
if ((!this->triangleIdxReady)||(!this->neighboursReady)) {
return false;
}
if (volume_D == NULL) {
return false;
}
// Init vbo with uncertainty information
if (!this->InitVtxPathVBO(this->vertexCnt)) {
return false;
}
// Register memory with CUDA
if (!CudaSafeCall(cudaGraphicsGLRegisterBuffer(
&cudaTokens[0], this->vboVtxData,
cudaGraphicsMapFlagsNone))) {
return false;
}
// Register memory with CUDA
if (!CudaSafeCall(cudaGraphicsGLRegisterBuffer(
&cudaTokens[1], this->vboVtxPath,
cudaGraphicsMapFlagsNone))) {
return false;
}
if (!CudaSafeCall(cudaGraphicsMapResources(2, cudaTokens, 0))) {
return false;
}
// Get mapped pointers to the vertex data buffers
float *vboPt;
size_t vboSize;
if (!CudaSafeCall(cudaGraphicsResourceGetMappedPointer(
reinterpret_cast<void**>(&vboPt), // The mapped pointer
&vboSize, // The size of the accessible data
cudaTokens[0]))) { // The mapped resource
return false;
}
// Get mapped pointers to the vertex data buffers
float *vboVtxPathPt;
if (!CudaSafeCall(cudaGraphicsResourceGetMappedPointer(
reinterpret_cast<void**>(&vboVtxPathPt), // The mapped pointer
&vboSize, // The size of the accessible data
cudaTokens[1]))) { // The mapped resource
return false;
}
if (!this->initExtForcesDistfield(
volume_D,
vboPt,
volDim,
volOrg,
volDelta,
distfieldDist,
isovalue)) {
return false;
}
// Init forces scale factor with -1 or 1, depending on whether they start
// outside or inside the isosurface
if (!CudaSafeCall(this->vertexExternalForcesScl_D.Validate(this->vertexCnt))) {
return false;
}
if (!CudaSafeCall(this->displLen_D.Validate(this->vertexCnt))) {
return false;
}
#ifdef USE_TIMER
float dt_ms;
cudaEvent_t event1, event2;
cudaEventCreate(&event1);
cudaEventCreate(&event2);
cudaEventRecord(event1, 0);
#endif
DeformableGPUSurfaceMT_InitExternalForceScl_D <<< Grid(this->vertexCnt, 256), 256 >>> (
(float*)this->vertexExternalForcesScl_D.Peek(),
this->displLen_D.Peek(),
volume_D,
vboPt,
surfMappedMinDisplScl,
this->vertexCnt,
isovalue,
this->vertexDataOffsPos,
this->vertexDataStride);
#ifdef USE_TIMER
cudaEventRecord(event2, 0);
cudaEventSynchronize(event1);
cudaEventSynchronize(event2);
cudaEventElapsedTime(&dt_ms, event1, event2);
printf("CUDA time for 'InitExternalForceScl_D': %.10f sec\n",
dt_ms/1000.0f);
#endif
// Iterations for new position
if (!this->updateVtxPos(
volume_D,
vboPt,
vboVtxPathPt,
volDim,
volOrg,
volDelta,
isovalue,
(interpMode == INTERP_CUBIC),
maxIt,
surfMappedMinDisplScl,
springStiffness,
forceScl,
externalForcesWeight,
true)) {
return false;
}
if (!CudaSafeCall(cudaGraphicsUnmapResources(2, cudaTokens, 0))) {
return false;
}
if (!CudaSafeCall(cudaGraphicsUnregisterResource(cudaTokens[0]))) {
return false;
}
if (!CudaSafeCall(cudaGraphicsUnregisterResource(cudaTokens[1]))) {
return false;
}
return true;
}
/*
* DeformableGPUSurfaceMT::MorphToVolumeGVF
*/
bool DeformableGPUSurfaceMT::MorphToVolumeGVF(float *volumeSource_D,
float *volumeTarget_D,
const unsigned int *targetCubeStates_D,
int3 volDim,
float3 volOrg,
float3 volDelta,
float isovalue,
InterpolationMode interpMode,
size_t maxIt,
float surfMappedMinDisplScl,
float springStiffness,
float forceScl,
float externalForcesWeight,
float gvfScl,
unsigned int gvfIt) {
using namespace megamol::core::utility::log;
/* Init grid parameters */
// Init constant device params
if (!initGridParams(volDim, volOrg, volDelta)) {
Log::DefaultLog.WriteError(
"%s: could not init constant device params",
this->ClassName());
return false;
}
using megamol::core::utility::log::Log;
cudaGraphicsResource* cudaTokens[2];
if ((!this->triangleIdxReady)||(!this->neighboursReady)) {
return false;
}
if (volumeTarget_D == NULL) {
return false;
}
if (!this->initExtForcesGVF(
volumeTarget_D,
targetCubeStates_D,
volDim,
volOrg,
volDelta,
isovalue,
gvfScl,
gvfIt)) {
return false;
}
// Init vbo with uncertainty information
if (!this->InitVtxPathVBO(this->vertexCnt)) {
return false;
}
// Register memory with CUDA
if (!CudaSafeCall(cudaGraphicsGLRegisterBuffer(
&cudaTokens[0], this->vboVtxData,
cudaGraphicsMapFlagsNone))) {
return false;
}
// Register memory with CUDA
if (!CudaSafeCall(cudaGraphicsGLRegisterBuffer(
&cudaTokens[1], this->vboVtxPath,
cudaGraphicsMapFlagsNone))) {
return false;
}
if (!CudaSafeCall(cudaGraphicsMapResources(2, cudaTokens, 0))) {
return false;
}
// Get mapped pointers to the vertex data buffers
float *vboPt;
size_t vboSize;
if (!CudaSafeCall(cudaGraphicsResourceGetMappedPointer(
reinterpret_cast<void**>(&vboPt), // The mapped pointer
&vboSize, // The size of the accessible data
cudaTokens[0]))) { // The mapped resource
return false;
}
// Get mapped pointers to the vertex data buffers
float *vboVtxPathPt;
if (!CudaSafeCall(cudaGraphicsResourceGetMappedPointer(
reinterpret_cast<void**>(&vboVtxPathPt), // The mapped pointer
&vboSize, // The size of the accessible data
cudaTokens[1]))) { // The mapped resource
return false;
}
// Init forces scale factor with -1 or 1, depending on whether they start
// outside or inside the isosurface
if (!CudaSafeCall(this->vertexExternalForcesScl_D.Validate(this->vertexCnt))) {
return false;
}
if (!CudaSafeCall(this->displLen_D.Validate(this->vertexCnt))) {
return false;
}
#ifdef USE_TIMER
float dt_ms;
cudaEvent_t event1, event2;
cudaEventCreate(&event1);
cudaEventCreate(&event2);
cudaEventRecord(event1, 0);
#endif
DeformableGPUSurfaceMT_InitExternalForceScl_D <<< Grid(this->vertexCnt, 256), 256 >>> (
(float*)this->vertexExternalForcesScl_D.Peek(),
this->displLen_D.Peek(),
volumeTarget_D,
vboPt,
surfMappedMinDisplScl,
this->vertexCnt,
isovalue,
this->vertexDataOffsPos,
this->vertexDataStride);
#ifdef USE_TIMER
cudaEventRecord(event2, 0);
cudaEventSynchronize(event1);
cudaEventSynchronize(event2);
cudaEventElapsedTime(&dt_ms, event1, event2);
printf("CUDA time for 'InitExternalForceScl_D': %.10f sec\n",
dt_ms/1000.0f);
#endif
// Iterations for new position
if (!this->updateVtxPos(
volumeTarget_D,
vboPt,
vboVtxPathPt,
volDim,
volOrg,
volDelta,
isovalue,
(interpMode == INTERP_CUBIC),
maxIt,
surfMappedMinDisplScl,
springStiffness,
forceScl,
externalForcesWeight,
true)) {
return false;
}
if (!CudaSafeCall(cudaGraphicsUnmapResources(2, cudaTokens, 0))) {
return false;
}
if (!CudaSafeCall(cudaGraphicsUnregisterResource(cudaTokens[0]))) {
return false;
}
if (!CudaSafeCall(cudaGraphicsUnregisterResource(cudaTokens[1]))) {
return false;
}
return true;
}
/*
* DeformableGPUSurfaceMT::MorphToVolumeTwoWayGVFBM
*/
bool DeformableGPUSurfaceMT::MorphToVolumeTwoWayGVFBM(
float *volumeSource_D,
float *volumeTarget_D,
const unsigned int *cellStatesSource_D,
const unsigned int *cellStatesTarget_D,
int3 volDim,
float3 volOrg,
float3 volDelta,
float isovalue,
InterpolationMode interpMode,
size_t maxIt,
float surfMappedMinDisplScl,
float springStiffness,
float forceScl,
float externalForcesWeight,
float gvfScl,
unsigned int gvfIt,
bool trackPath,
bool recomputeGVF,
float &t_gvf,
float &t_map) {
using megamol::core::utility::log::Log;
// printf("MORPH\n");
/* Init grid parameters */
// Init constant device params
if (!initGridParams(volDim, volOrg, volDelta)) {
Log::DefaultLog.WriteError(
"%s: could not init constant device params",
this->ClassName());
return false;
}
cudaGraphicsResource* cudaTokens[2];
if ((!this->triangleIdxReady)||(!this->neighboursReady)) {
return false;
}
if ((volumeTarget_D == NULL)||(volumeSource_D == NULL)) {
return false;
}
float dt_ms;
cudaEvent_t event1, event2;
cudaEventCreate(&event1);
cudaEventCreate(&event2);
cudaEventRecord(event1, 0);
if (recomputeGVF) {
if (!this->initExtForcesTwoWayGVF(
volumeSource_D,
volumeTarget_D,
cellStatesSource_D,
cellStatesTarget_D,
volDim, volOrg, volDelta,
isovalue, gvfScl, gvfIt)) {
return false;
}
}
cudaEventRecord(event2, 0);
cudaEventSynchronize(event1);
cudaEventSynchronize(event2);
cudaEventElapsedTime(&dt_ms, event1, event2);
t_gvf = dt_ms;
// printf("GVF %f ms\n", t_gvf);
if (trackPath) {
// Init vbo with uncertainty information
if (!this->InitVtxPathVBO(this->vertexCnt)) {
return false;
}
}
// Register memory with CUDA
if (!CudaSafeCall(cudaGraphicsGLRegisterBuffer(
&cudaTokens[0], this->vboVtxData,
cudaGraphicsMapFlagsNone))) {
return false;
}
// Register memory with CUDA
if (!CudaSafeCall(cudaGraphicsGLRegisterBuffer(
&cudaTokens[1], this->vboVtxPath,
cudaGraphicsMapFlagsNone))) {
return false;
}
if (!CudaSafeCall(cudaGraphicsMapResources(2, cudaTokens, 0))) {
return false;
}
// Get mapped pointers to the vertex data buffers
float *vboPt;
size_t vboSize;
if (!CudaSafeCall(cudaGraphicsResourceGetMappedPointer(
reinterpret_cast<void**>(&vboPt), // The mapped pointer
&vboSize, // The size of the accessible data
cudaTokens[0]))) { // The mapped resource
return false;
}
// Get mapped pointers to the vertex data buffers
float *vboVtxPathPt;
if (!CudaSafeCall(cudaGraphicsResourceGetMappedPointer(
reinterpret_cast<void**>(&vboVtxPathPt), // The mapped pointer
&vboSize, // The size of the accessible data
cudaTokens[1]))) { // The mapped resource
return false;
}
// Init forces scale factor with -1 or 1, depending on whether they start
// outside or inside the isosurface
if (!CudaSafeCall(this->vertexExternalForcesScl_D.Validate(this->vertexCnt))) {
return false;
}
if (!CudaSafeCall(this->displLen_D.Validate(this->vertexCnt))) {
return false;
}
cudaEventRecord(event1, 0);
DeformableGPUSurfaceMT_InitExternalForceScl_D <<< Grid(this->vertexCnt, 256), 256 >>> (
(float*)this->vertexExternalForcesScl_D.Peek(),
this->displLen_D.Peek(),
volumeTarget_D,
vboPt,
surfMappedMinDisplScl,
this->vertexCnt,
isovalue,
this->vertexDataOffsPos,
this->vertexDataStride);
#ifdef USE_TIMER
cudaEventRecord(event2, 0);
cudaEventSynchronize(event1);
cudaEventSynchronize(event2);
cudaEventElapsedTime(&dt_ms, event1, event2);
printf("CUDA time for 'InitExternalForceScl_D': %.10f sec\n",
dt_ms/1000.0f);
#endif
// Iterations for new position
if (!this->updateVtxPos(
volumeTarget_D,
vboPt,
vboVtxPathPt,
volDim,
volOrg,
volDelta,
isovalue,
(interpMode == INTERP_CUBIC),
maxIt,
surfMappedMinDisplScl,
springStiffness,
forceScl,
externalForcesWeight,
trackPath, // Track path
true)) { // Use external forces only
return false;
}
cudaEventRecord(event2, 0);
cudaEventSynchronize(event1);
cudaEventSynchronize(event2);
cudaEventElapsedTime(&dt_ms, event1, event2);
t_map = dt_ms;
if (!CudaSafeCall(cudaGraphicsUnmapResources(2, cudaTokens, 0))) {
return false;
}
if (!CudaSafeCall(cudaGraphicsUnregisterResource(cudaTokens[0]))) {
return false;
}
if (!CudaSafeCall(cudaGraphicsUnregisterResource(cudaTokens[1]))) {
return false;
}
return true;
}
/*
* DeformableGPUSurfaceMT::MorphToVolumeTwoWayGVF
*/
bool DeformableGPUSurfaceMT::MorphToVolumeTwoWayGVF(
float *volumeSource_D,
float *volumeTarget_D,
const unsigned int *cellStatesSource_D,
const unsigned int *cellStatesTarget_D,
int3 volDim,
float3 volOrg,
float3 volDelta,
float isovalue,
InterpolationMode interpMode,
size_t maxIt,
float surfMappedMinDisplScl,
float springStiffness,
float forceScl,
float externalForcesWeight,
float gvfScl,
unsigned int gvfIt,
bool trackPath,
bool recomputeGVF) {
using megamol::core::utility::log::Log;
// printf("MORPH\n");
/* Init grid parameters */
// Init constant device params
if (!initGridParams(volDim, volOrg, volDelta)) {
Log::DefaultLog.WriteError(
"%s: could not init constant device params",
this->ClassName());
return false;
}
cudaGraphicsResource* cudaTokens[2];
if ((!this->triangleIdxReady)||(!this->neighboursReady)) {
return false;
}
if ((volumeTarget_D == NULL)||(volumeSource_D == NULL)) {
return false;
}
//#define USE_TIMER
#ifdef USE_TIMER
float dt_ms;
cudaEvent_t event1, event2;
cudaEventCreate(&event1);
cudaEventCreate(&event2);
cudaEventRecord(event1, 0);
#endif
if (recomputeGVF) {
if (!this->initExtForcesTwoWayGVF(
volumeSource_D,
volumeTarget_D,
cellStatesSource_D,
cellStatesTarget_D,
volDim, volOrg, volDelta,
isovalue, gvfScl, gvfIt)) {
return false;
}
}
#ifdef USE_TIMER
cudaEventRecord(event2, 0);
cudaEventSynchronize(event1);
cudaEventSynchronize(event2);
cudaEventElapsedTime(&dt_ms, event1, event2);
printf("CUDA time for GVF: %.10f sec\n",
dt_ms/1000.0f);
#endif
if (trackPath) {
// Init vbo with uncertainty information
if (!this->InitVtxPathVBO(this->vertexCnt)) {
return false;
}
}
// Register memory with CUDA
if (!CudaSafeCall(cudaGraphicsGLRegisterBuffer(
&cudaTokens[0], this->vboVtxData,
cudaGraphicsMapFlagsNone))) {
return false;
}
// Register memory with CUDA
if (!CudaSafeCall(cudaGraphicsGLRegisterBuffer(
&cudaTokens[1], this->vboVtxPath,
cudaGraphicsMapFlagsNone))) {
return false;
}
if (!CudaSafeCall(cudaGraphicsMapResources(2, cudaTokens, 0))) {
return false;
}
// Get mapped pointers to the vertex data buffers
float *vboPt;
size_t vboSize;
if (!CudaSafeCall(cudaGraphicsResourceGetMappedPointer(
reinterpret_cast<void**>(&vboPt), // The mapped pointer
&vboSize, // The size of the accessible data
cudaTokens[0]))) { // The mapped resource
return false;
}
// Get mapped pointers to the vertex data buffers
float *vboVtxPathPt;
if (!CudaSafeCall(cudaGraphicsResourceGetMappedPointer(
reinterpret_cast<void**>(&vboVtxPathPt), // The mapped pointer
&vboSize, // The size of the accessible data
cudaTokens[1]))) { // The mapped resource
return false;
}
// Init forces scale factor with -1 or 1, depending on whether they start
// outside or inside the isosurface
if (!CudaSafeCall(this->vertexExternalForcesScl_D.Validate(this->vertexCnt))) {
return false;
}
if (!CudaSafeCall(this->displLen_D.Validate(this->vertexCnt))) {
return false;
}
DeformableGPUSurfaceMT_InitExternalForceScl_D <<< Grid(this->vertexCnt, 256), 256 >>> (
(float*)this->vertexExternalForcesScl_D.Peek(),
this->displLen_D.Peek(),
volumeTarget_D,
vboPt,
surfMappedMinDisplScl,
this->vertexCnt,
isovalue,
this->vertexDataOffsPos,
this->vertexDataStride);
#ifdef USE_TIMER
cudaEventRecord(event2, 0);
cudaEventSynchronize(event1);
cudaEventSynchronize(event2);
cudaEventElapsedTime(&dt_ms, event1, event2);
printf("CUDA time for 'InitExternalForceScl_D': %.10f sec\n",
dt_ms/1000.0f);
#endif
// Iterations for new position
if (!this->updateVtxPos(
volumeTarget_D,
vboPt,
vboVtxPathPt,
volDim,
volOrg,
volDelta,
isovalue,
(interpMode == INTERP_CUBIC),
maxIt,
surfMappedMinDisplScl,
springStiffness,
forceScl,
externalForcesWeight,
trackPath, // Track path
true)) { // Use external forces only
return false;
}
if (!CudaSafeCall(cudaGraphicsUnmapResources(2, cudaTokens, 0))) {
return false;
}
if (!CudaSafeCall(cudaGraphicsUnregisterResource(cudaTokens[0]))) {
return false;
}
if (!CudaSafeCall(cudaGraphicsUnregisterResource(cudaTokens[1]))) {
return false;
}
#undef USE_TIMER
return true;
}
/*
* DeformableGPUSurfaceMT::MorphToVolumeTwoWayGVF
*/
bool DeformableGPUSurfaceMT::MorphToVolumeTwoWayGVFSubdiv(
float *volumeSource_D,
float *volumeTarget_D,
const unsigned int *cellStatesSource_D,
const unsigned int *cellStatesTarget_D,
int3 volDim,
float3 volOrg,
float3 volDelta,
float isovalue,
InterpolationMode interpMode,
size_t maxIt,
float surfMappedMinDisplScl,
float springStiffness,
float forceScl,
float externalForcesWeight,
float gvfScl,
unsigned int gvfIt,
bool trackPath,
bool recomputeGVF) {
using megamol::core::utility::log::Log;
// printf("MORPH\n");
/* Init grid parameters */
// Init constant device params
if (!initGridParams(volDim, volOrg, volDelta)) {
Log::DefaultLog.WriteError(
"%s: could not init constant device params",
this->ClassName());
return false;
}
cudaGraphicsResource* cudaTokens[2];
if ((!this->triangleIdxReady)||(!this->neighboursReady)) {
return false;
}
if ((volumeTarget_D == NULL)||(volumeSource_D == NULL)) {
return false;
}
if (recomputeGVF) {
if (!this->initExtForcesTwoWayGVF(
volumeSource_D,
volumeTarget_D,
cellStatesSource_D,
cellStatesTarget_D,
volDim, volOrg, volDelta,
isovalue, gvfScl, gvfIt)) {
return false;
}
}
if (trackPath) {
// Init vbo with uncertainty information
if (!this->InitVtxPathVBO(this->vertexCnt)) {
return false;
}
}
// Register memory with CUDA
if (!CudaSafeCall(cudaGraphicsGLRegisterBuffer(
&cudaTokens[0], this->vboVtxData,
cudaGraphicsMapFlagsNone))) {
return false;
}
// Register memory with CUDA
if (!CudaSafeCall(cudaGraphicsGLRegisterBuffer(
&cudaTokens[1], this->vboVtxPath,
cudaGraphicsMapFlagsNone))) {
return false;
}
if (!CudaSafeCall(cudaGraphicsMapResources(2, cudaTokens, 0))) {
return false;
}
// Get mapped pointers to the vertex data buffers
float *vboPt;
size_t vboSize;
if (!CudaSafeCall(cudaGraphicsResourceGetMappedPointer(
reinterpret_cast<void**>(&vboPt), // The mapped pointer
&vboSize, // The size of the accessible data
cudaTokens[0]))) { // The mapped resource
return false;
}
// Get mapped pointers to the vertex data buffers
float *vboVtxPathPt;
if (!CudaSafeCall(cudaGraphicsResourceGetMappedPointer(
reinterpret_cast<void**>(&vboVtxPathPt), // The mapped pointer
&vboSize, // The size of the accessible data
cudaTokens[1]))) { // The mapped resource
return false;
}
// Init forces scale factor with -1 or 1, depending on whether they start
// outside or inside the isosurface
if (!CudaSafeCall(this->vertexExternalForcesScl_D.Validate(this->vertexCnt))) {
return false;
}
if (!CudaSafeCall(this->displLen_D.Validate(this->vertexCnt))) {
return false;
}
#ifdef USE_TIMER
float dt_ms;
cudaEvent_t event1, event2;
cudaEventCreate(&event1);
cudaEventCreate(&event2);
cudaEventRecord(event1, 0);
#endif
DeformableGPUSurfaceMT_InitExternalForceScl_D <<< Grid(this->vertexCnt, 256), 256 >>> (
(float*)this->vertexExternalForcesScl_D.Peek(),
this->displLen_D.Peek(),
volumeTarget_D,
vboPt,
surfMappedMinDisplScl,
this->vertexCnt,
isovalue,
this->vertexDataOffsPos,
this->vertexDataStride);
#ifdef USE_TIMER
cudaEventRecord(event2, 0);
cudaEventSynchronize(event1);
cudaEventSynchronize(event2);
cudaEventElapsedTime(&dt_ms, event1, event2);
printf("CUDA time for 'InitExternalForceScl_D': %.10f sec\n",
dt_ms/1000.0f);
#endif
// Iterations for new position
if (!this->updateVtxPosSubdiv(
volumeTarget_D,
vboPt,
vboVtxPathPt,
volDim,
volOrg,
volDelta,
isovalue,
(interpMode == INTERP_CUBIC),
maxIt,
surfMappedMinDisplScl,
springStiffness,
forceScl,
externalForcesWeight,
trackPath, // Track path
true)) { // Use external forces only
return false;
}
if (!CudaSafeCall(cudaGraphicsUnmapResources(2, cudaTokens, 0))) {
return false;
}
if (!CudaSafeCall(cudaGraphicsUnregisterResource(cudaTokens[0]))) {
return false;
}
if (!CudaSafeCall(cudaGraphicsUnregisterResource(cudaTokens[1]))) {
return false;
}
return true;
}
/*
* DeformableGPUSurfaceMT::operator=
*/
DeformableGPUSurfaceMT& DeformableGPUSurfaceMT::operator=(const DeformableGPUSurfaceMT &rhs) {
GPUSurfaceMT::operator =(rhs);
CudaSafeCall(this->vertexExternalForcesScl_D.Validate(rhs.vertexExternalForcesScl_D.GetCount()));
CudaSafeCall(cudaMemcpy(
this->vertexExternalForcesScl_D.Peek(),
rhs.vertexExternalForcesScl_D.PeekConst(),
this->vertexExternalForcesScl_D.GetCount()*sizeof(float),
cudaMemcpyDeviceToDevice));
CudaSafeCall(this->displLen_D.Validate(rhs.displLen_D.GetCount()));
CudaSafeCall(cudaMemcpy(
this->displLen_D.Peek(),
rhs.displLen_D.PeekConst(),
this->displLen_D.GetCount()*sizeof(float),
cudaMemcpyDeviceToDevice));
CudaSafeCall(this->externalForces_D.Validate(rhs.externalForces_D.GetCount()));
CudaSafeCall(cudaMemcpy(
this->externalForces_D.Peek(),
rhs.externalForces_D.PeekConst(),
this->externalForces_D.GetCount()*sizeof(float),
cudaMemcpyDeviceToDevice));
CudaSafeCall(this->laplacian_D.Validate(rhs.laplacian_D.GetCount()));
CudaSafeCall(cudaMemcpy(
this->laplacian_D.Peek(),
rhs.laplacian_D.PeekConst(),
this->laplacian_D.GetCount()*sizeof(float3),
cudaMemcpyDeviceToDevice));
CudaSafeCall(this->laplacian2_D.Validate(rhs.laplacian2_D.GetCount()));
CudaSafeCall(cudaMemcpy(
this->laplacian2_D.Peek(),
rhs.laplacian2_D.PeekConst(),
this->laplacian2_D.GetCount()*sizeof(float3),
cudaMemcpyDeviceToDevice));
CudaSafeCall(this->gvfTmp_D.Validate(rhs.gvfTmp_D.GetCount()));
CudaSafeCall(cudaMemcpy(
this->gvfTmp_D.Peek(),
rhs.gvfTmp_D.PeekConst(),
this->gvfTmp_D.GetCount()*sizeof(float),
cudaMemcpyDeviceToDevice));
CudaSafeCall(this->gvfConstData_D.Validate(rhs.gvfConstData_D.GetCount()));
CudaSafeCall(cudaMemcpy(
this->gvfConstData_D.Peek(),
rhs.gvfConstData_D.PeekConst(),
this->gvfConstData_D.GetCount()*sizeof(float),
cudaMemcpyDeviceToDevice));
CudaSafeCall(this->distField_D.Validate(rhs.distField_D.GetCount()));
CudaSafeCall(cudaMemcpy(
this->distField_D.Peek(),
rhs.distField_D.PeekConst(),
this->distField_D.GetCount()*sizeof(float),
cudaMemcpyDeviceToDevice));
CudaSafeCall(this->accTriangleData_D.Validate(rhs.accTriangleData_D.GetCount()));
CudaSafeCall(cudaMemcpy(
this->accTriangleData_D.Peek(),
rhs.accTriangleData_D.PeekConst(),
this->accTriangleData_D.GetCount()*sizeof(float),
cudaMemcpyDeviceToDevice));
CudaSafeCall(this->corruptTriangles_D.Validate(rhs.corruptTriangles_D.GetCount()));
CudaSafeCall(cudaMemcpy(
this->corruptTriangles_D.Peek(),
rhs.corruptTriangles_D.PeekConst(),
this->corruptTriangles_D.GetCount()*sizeof(float),
cudaMemcpyDeviceToDevice));
CudaSafeCall(this->accTriangleArea_D.Validate(rhs.accTriangleArea_D.GetCount()));
CudaSafeCall(cudaMemcpy(
this->accTriangleArea_D.Peek(),
rhs.accTriangleArea_D.PeekConst(),
this->accTriangleArea_D.GetCount()*sizeof(float),
cudaMemcpyDeviceToDevice));
/* Make deep copy of corrupt triangle flag buffer */
if (rhs.vboCorruptTriangleVertexFlag) {
// Destroy if necessary
if (this->vboCorruptTriangleVertexFlag) {
glBindBufferARB(GL_ARRAY_BUFFER, this->vboCorruptTriangleVertexFlag);
glDeleteBuffersARB(1, &this->vboCorruptTriangleVertexFlag);
glBindBufferARB(GL_ARRAY_BUFFER, 0);
this->vboCorruptTriangleVertexFlag = 0;
}
// Create vertex buffer object for triangle indices
glGenBuffersARB(1, &this->vboCorruptTriangleVertexFlag);
CheckForGLError();
// Map as copy buffer
glBindBufferARB(GL_COPY_READ_BUFFER, rhs.vboCorruptTriangleVertexFlag);
glBindBufferARB(GL_COPY_WRITE_BUFFER, this->vboCorruptTriangleVertexFlag);
glBufferDataARB(GL_COPY_WRITE_BUFFER,
sizeof(float)*this->vertexCnt, 0, GL_DYNAMIC_DRAW);
// Copy data
glCopyBufferSubData(GL_COPY_READ_BUFFER, GL_COPY_WRITE_BUFFER, 0, 0,
sizeof(float)*this->vertexCnt);
glBindBufferARB(GL_COPY_WRITE_BUFFER, 0);
glBindBufferARB(GL_COPY_READ_BUFFER, 0);
CheckForGLError();
}
/* Make deep copy of uncertainty vbo */
if (rhs.vboVtxPath) {
// Destroy if necessary
if (this->vboVtxPath) {
glBindBufferARB(GL_ARRAY_BUFFER, this->vboVtxPath);
glDeleteBuffersARB(1, &this->vboVtxPath);
glBindBufferARB(GL_ARRAY_BUFFER, 0);
this->vboVtxPath = 0;
}
// Create vertex buffer object for triangle indices
glGenBuffersARB(1, &this->vboVtxPath);
CheckForGLError();
// Map as copy buffer
glBindBufferARB(GL_COPY_READ_BUFFER, rhs.vboVtxPath);
glBindBufferARB(GL_COPY_WRITE_BUFFER, this->vboVtxPath);
glBufferDataARB(GL_COPY_WRITE_BUFFER,
sizeof(float)*this->vertexCnt, 0, GL_DYNAMIC_DRAW);
// Copy data
glCopyBufferSubData(GL_COPY_READ_BUFFER, GL_COPY_WRITE_BUFFER, 0, 0,
sizeof(float)*this->vertexCnt);
glBindBufferARB(GL_COPY_WRITE_BUFFER, 0);
glBindBufferARB(GL_COPY_READ_BUFFER, 0);
CheckForGLError();
}
return *this;
}
/*
* DeformableGPUSurfaceMT_GetTriangleEdgeCnt_D
*/
__global__ void DeformableGPUSurfaceMT_GetTriangleEdgeCnt_D (
int *triangleEdgeOffs_D,
uint *triangleNeighbors_D,
uint triangleCnt) {
const uint triIdx = ::getThreadIdx();
if (triIdx >= triangleCnt) return;
uint cnt = 0;
uint n0 = triangleNeighbors_D[3*triIdx+0];
cnt = cnt + int(n0 > triIdx);
uint n1 = triangleNeighbors_D[3*triIdx+1];
cnt = cnt + int(n1 > triIdx);
uint n2 = triangleNeighbors_D[3*triIdx+2];
cnt = cnt + int(n2 > triIdx);
triangleEdgeOffs_D[triIdx] = cnt;
}
__device__ uint2 getAdjEdge_D (uint v0, uint v1, uint v2,
uint w0, uint w1, uint w2) {
int idx0=-1, idx1=-1;
int v[3], w[3];
v[0] = v0; v[1] = v1; v[2] = v2;
w[0] = w0; w[1] = w1; w[2] = w2;
for (int i = 0; i < 3; ++i) {
for (int j = 0; j < 3; ++j) {
if (v[i] == w[j]) {
if (idx0 < 0) {
idx0 = v[i];
} else {
if (v[i] != idx0) {
idx1 = v[i];
}
}
}
}
}
return make_uint2(idx0, idx1);
}
__device__ bool hasAdjEdge_D (uint v0, uint v1, uint v2,
uint w0, uint w1, uint w2) {
int cnt = 0;
int idx0 = -1;
int v[3], w[3];
v[0] = v0; v[1] = v1; v[2] = v2;
w[0] = w0; w[1] = w1; w[2] = w2;
for (int i = 0; i < 3; ++i) {
for (int j = 0; j < 3; ++j) {
if (v[i] == w[j]) {
if (idx0 < 0) {
idx0 = v[i];
cnt++;
} else {
if (v[i] != idx0) {
cnt++;
}
}
}
}
}
if (cnt >=2) return true;
else return false;
}
/*
* DeformableGPUSurfaceMT_BuildEdgeList_D
*/
__global__ void DeformableGPUSurfaceMT_BuildEdgeList_D (
uint *edgeList_D,
int *triangleEdgeOffs_D,
uint *triangleNeighbors_D,
uint *triangleIdx_D,
uint triangleCnt) {
const uint triIdx = ::getThreadIdx();
if (triIdx >= triangleCnt) return;
uint3 idx = make_uint3(triangleIdx_D[3*triIdx+0],
triangleIdx_D[3*triIdx+1],
triangleIdx_D[3*triIdx+2]);
uint cnt = 0;
uint n0 = triangleNeighbors_D[3*triIdx+0];
uint offs = triangleEdgeOffs_D[triIdx];
// TODO Comparing all three vertex indices necessary? Use only two?
if (n0 > triIdx) {
uint3 nIdx = make_uint3(triangleIdx_D[3*n0+0],
triangleIdx_D[3*n0+1],
triangleIdx_D[3*n0+2]);
uint2 e = getAdjEdge_D(idx.x, idx.y, idx.z, nIdx.x, nIdx.y, nIdx.z);
// printf("%u %u: %u %u %u, %u %u %u\n", e.x, e.y, idx.x, idx.y, idx.z, nIdx.x, nIdx.y, nIdx.z);
edgeList_D[2*offs+0] = e.x;
edgeList_D[2*offs+1] = e.y;
// printf("edge %u %u\n", e.x, e.y);
cnt++;
}
uint n1 = triangleNeighbors_D[3*triIdx+1];
if (n1 > triIdx) {
uint3 nIdx = make_uint3(triangleIdx_D[3*n1+0],
triangleIdx_D[3*n1+1],
triangleIdx_D[3*n1+2]);
uint2 e = getAdjEdge_D(idx.x, idx.y, idx.z, nIdx.x, nIdx.y, nIdx.z);
edgeList_D[2*(offs+cnt)+0] = e.x;
edgeList_D[2*(offs+cnt)+1] = e.y;
cnt++;
}
uint n2 = triangleNeighbors_D[3*triIdx+2];
if (n2 > triIdx) {
uint3 nIdx = make_uint3(triangleIdx_D[3*n2+0],
triangleIdx_D[3*n2+1],
triangleIdx_D[3*n2+2]);
uint2 e = getAdjEdge_D(idx.x, idx.y, idx.z, nIdx.x, nIdx.y, nIdx.z);
edgeList_D[2*(offs+cnt)+0] = e.x;
edgeList_D[2*(offs+cnt)+1] = e.y;
}
}
__device__ uint getLocalEdgeOffsInTriangle_D(
uint i0,
uint i1,
uint *triangleNeighbors_D,
uint *triangleIdx_D,
uint triIdx) {
uint cnt = 0;
uint v[3];
v[0] = triangleIdx_D[3*triIdx+0];
v[1] = triangleIdx_D[3*triIdx+1];
v[2] = triangleIdx_D[3*triIdx+2];
uint n[3];
n[0] = triangleNeighbors_D[3*triIdx+0];
n[1] = triangleNeighbors_D[3*triIdx+1];
n[2] = triangleNeighbors_D[3*triIdx+2];
for (int i = 0; i < 3; ++i) {
if (n[i] < triIdx) continue; // This edge is not associated with this triangle
if ((v[i] == i0)&&(v[(i+1)%3] == i1)||
(v[i] == i1)&&(v[(i+1)%3] == i0)) {
cnt++;
break;
} else {
cnt++;
}
}
return cnt-1;
}
/*
* DeformableGPUSurfaceMT_ComputeTriEdgeList_D
*/
__global__ void DeformableGPUSurfaceMT_ComputeTriEdgeList_D (
uint *triEdgeList_D,
int *triangleEdgeOffs_D,
uint *triangleNeighbors_D,
uint *triangleIdx_D,
uint triangleCnt) {
const uint triIdx = ::getThreadIdx();
if (triIdx >= triangleCnt) return;
uint3 idx = make_uint3(triangleIdx_D[3*triIdx+0],
triangleIdx_D[3*triIdx+1],
triangleIdx_D[3*triIdx+2]);
//uint offs = triangleEdgeOffs_D[triIdx];
// Get first edge
uint n0 = triangleNeighbors_D[3*triIdx+0];
uint nGlobalOffs;
uint nLocalOffs;
if (n0 < triIdx) { // Edge is associated with neighbor
nGlobalOffs = triangleEdgeOffs_D[n0];
nLocalOffs = getLocalEdgeOffsInTriangle_D(idx.x, idx.y,
triangleNeighbors_D, triangleIdx_D, n0);
} else { // Egde is associated with self
nGlobalOffs = triangleEdgeOffs_D[triIdx];
nLocalOffs = getLocalEdgeOffsInTriangle_D(idx.x, idx.y,
triangleNeighbors_D, triangleIdx_D, triIdx);
}
triEdgeList_D[3*triIdx+0] = nGlobalOffs + nLocalOffs;
// Get second edge
uint n1 = triangleNeighbors_D[3*triIdx+1];
if (n1 < triIdx) { // Edge is associated with neighbor
nGlobalOffs = triangleEdgeOffs_D[n1];
nLocalOffs = getLocalEdgeOffsInTriangle_D(idx.y, idx.z,
triangleNeighbors_D, triangleIdx_D, n1);
} else { // Egde is associated with self
nGlobalOffs = triangleEdgeOffs_D[triIdx];
nLocalOffs = getLocalEdgeOffsInTriangle_D(idx.y, idx.z,
triangleNeighbors_D, triangleIdx_D, triIdx);
}
triEdgeList_D[3*triIdx+1] = nGlobalOffs + nLocalOffs;
// Get third edge
uint n2 = triangleNeighbors_D[3*triIdx+2];
if (n2 < triIdx) { // Edge is associated with neighbor
nGlobalOffs = triangleEdgeOffs_D[n2];
nLocalOffs = getLocalEdgeOffsInTriangle_D(idx.z, idx.x,
triangleNeighbors_D, triangleIdx_D, n2);
} else { // Egde is associated with self
nGlobalOffs = triangleEdgeOffs_D[triIdx];
nLocalOffs = getLocalEdgeOffsInTriangle_D(idx.z, idx.x,
triangleNeighbors_D, triangleIdx_D, triIdx);
}
triEdgeList_D[3*triIdx+2] = nGlobalOffs + nLocalOffs;
}
__global__ void FlagLongEdges_D(
uint *edgeFlag_D,
uint *edges_D,
float *vertexData_D,
float maxLenSqrt,
uint edgeCnt) {
const uint idx = ::getThreadIdx();
if (idx >= edgeCnt) return;
float3 pos0 = make_float3(vertexData_D[9*edges_D[2*idx+0]+0],
vertexData_D[9*edges_D[2*idx+0]+1],
vertexData_D[9*edges_D[2*idx+0]+2]);
float3 pos1 = make_float3(vertexData_D[9*edges_D[2*idx+1]+0],
vertexData_D[9*edges_D[2*idx+1]+1],
vertexData_D[9*edges_D[2*idx+1]+2]);
float lenSqrt = (pos0.x - pos1.x)*(pos0.x - pos1.x) +
(pos0.y - pos1.y)*(pos0.y - pos1.y) +
(pos0.z - pos1.z)*(pos0.z - pos1.z);
edgeFlag_D[idx] = uint(lenSqrt > maxLenSqrt);
}
__global__ void ComputeNewVertices(
float *newVertices_D,
float *vertexFlag_D,
uint *subDivEdgeIdxOffs_D,
uint *edgeFlag_D,
uint *edges_D,
float *vertexData_D,
uint oldVertexCnt,
uint edgeCnt) {
const uint idx = ::getThreadIdx();
if (idx >= edgeCnt) return;
if (edgeFlag_D[idx] == 0) return;
float3 pos0 = make_float3(vertexData_D[9*edges_D[2*idx+0]+0],
vertexData_D[9*edges_D[2*idx+0]+1],
vertexData_D[9*edges_D[2*idx+0]+2]);
float3 pos1 = make_float3(vertexData_D[9*edges_D[2*idx+1]+0],
vertexData_D[9*edges_D[2*idx+1]+1],
vertexData_D[9*edges_D[2*idx+1]+2]);
float3 posNew = (pos1+pos0)*0.5;
uint edgeIdxOffs = subDivEdgeIdxOffs_D[idx];
newVertices_D[3*edgeIdxOffs+0] = posNew.x;
newVertices_D[3*edgeIdxOffs+1] = posNew.y;
newVertices_D[3*edgeIdxOffs+2] = posNew.z;
vertexFlag_D[oldVertexCnt+edgeIdxOffs] = 1.0; // mark this vertex as new
// printf("Vertex %f %f %f\n", posNew.x, posNew.y, posNew.z);
}
__global__ void ComputeSubdivCnt_D(
uint *subdivCnt_D,
uint *triangleEdgeList_D,
uint *edgeFlag_D,
uint *edges_D,
uint *oldTrianglesIdxOffset,
uint triangleCnt) {
const uint idx = ::getThreadIdx();
if (idx >= triangleCnt) return;
uint edgeIdx0 = triangleEdgeList_D[3*idx+0];
uint edgeIdx1 = triangleEdgeList_D[3*idx+1];
uint edgeIdx2 = triangleEdgeList_D[3*idx+2];
bool flag0 = bool(edgeFlag_D[edgeIdx0]);
bool flag1 = bool(edgeFlag_D[edgeIdx1]);
bool flag2 = bool(edgeFlag_D[edgeIdx2]);
if (flag0 && flag1 && flag2) {
subdivCnt_D[idx] = 4;
oldTrianglesIdxOffset[idx] = 0;
} else if ((flag0 && flag1)||(flag1 && flag2)||(flag2 && flag0)) {
subdivCnt_D[idx] = 3;
oldTrianglesIdxOffset[idx] = 0;
} else if (flag0 || flag1 || flag2) {
subdivCnt_D[idx] = 2;
oldTrianglesIdxOffset[idx] = 0;
} else {
subdivCnt_D[idx] = 0;
oldTrianglesIdxOffset[idx] = 1;
}
}
// TODO Orientation of new triangles should match neighbor triangles
__global__ void ComputeSubdiv_D(
uint *newTriangles,
uint *newTriangleIdxOffsets,
uint *triangleEdgeList_D,
uint *triangleIdx_D,
uint *edgeFlag_D,
uint *edges_D,
uint *subDivEdgeIdxOffs_D,
uint *oldSubDivLevels_D,
uint *subDivLevels_D,
uint *oldTrianglesIdxOffsets_D,
uint vertexCntOld,
uint keptTrianglesCnt,
uint triangleCnt) {
const uint idx = ::getThreadIdx();
if (idx >= triangleCnt) return;
uint edgeIdx0 = triangleEdgeList_D[3*idx+0];
uint edgeIdx1 = triangleEdgeList_D[3*idx+1];
uint edgeIdx2 = triangleEdgeList_D[3*idx+2];
bool flag0 = bool(edgeFlag_D[edgeIdx0]);
bool flag1 = bool(edgeFlag_D[edgeIdx1]);
bool flag2 = bool(edgeFlag_D[edgeIdx2]);
uint v0 = triangleIdx_D[3*idx+0];
uint v1 = triangleIdx_D[3*idx+1];
uint v2 = triangleIdx_D[3*idx+2];
uint e0 = triangleEdgeList_D[3*idx+0];
uint e1 = triangleEdgeList_D[3*idx+1];
uint e2 = triangleEdgeList_D[3*idx+2];
uint triIdxOffs = newTriangleIdxOffsets[idx];
if (flag0 && flag1 && flag2) { // Spawn 4 new triangles
uint vNew0 = vertexCntOld + subDivEdgeIdxOffs_D[e0];
uint vNew1 = vertexCntOld + subDivEdgeIdxOffs_D[e1];
uint vNew2 = vertexCntOld + subDivEdgeIdxOffs_D[e2];
// #0
newTriangles[3*triIdxOffs+0] = v0;
newTriangles[3*triIdxOffs+1] = vNew0;
newTriangles[3*triIdxOffs+2] = vNew2;
// #1
newTriangles[3*triIdxOffs+3] = v1;
newTriangles[3*triIdxOffs+4] = vNew1;
newTriangles[3*triIdxOffs+5] = vNew0;
// #2
newTriangles[3*triIdxOffs+6] = v2;
newTriangles[3*triIdxOffs+7] = vNew2;
newTriangles[3*triIdxOffs+8] = vNew1;
// #3
newTriangles[3*triIdxOffs+9] = vNew0;
newTriangles[3*triIdxOffs+10] = vNew1;
newTriangles[3*triIdxOffs+11] = vNew2;
// Write subdiv levels
uint parentSubdiv = oldSubDivLevels_D[idx];
subDivLevels_D[keptTrianglesCnt+triIdxOffs+0] = parentSubdiv + 1;
subDivLevels_D[keptTrianglesCnt+triIdxOffs+1] = parentSubdiv + 1;
subDivLevels_D[keptTrianglesCnt+triIdxOffs+2] = parentSubdiv + 1;
subDivLevels_D[keptTrianglesCnt+triIdxOffs+3] = parentSubdiv + 1;
} else if (flag0 && flag1) { // Spawn 3 new triangles
uint vNew0 = vertexCntOld + subDivEdgeIdxOffs_D[e0];
uint vNew1 = vertexCntOld + subDivEdgeIdxOffs_D[e1];
// #0
newTriangles[3*triIdxOffs+0] = v1;
newTriangles[3*triIdxOffs+1] = vNew1;
newTriangles[3*triIdxOffs+2] = vNew0;
// #1
newTriangles[3*triIdxOffs+3] = v0;
newTriangles[3*triIdxOffs+4] = vNew0;
newTriangles[3*triIdxOffs+5] = vNew1;
// #2
newTriangles[3*triIdxOffs+6] = v2;
newTriangles[3*triIdxOffs+7] = v0;
newTriangles[3*triIdxOffs+8] = vNew1;
// Write subdiv levels
uint parentSubdiv = oldSubDivLevels_D[idx];
subDivLevels_D[keptTrianglesCnt+triIdxOffs+0] = parentSubdiv + 1;
subDivLevels_D[keptTrianglesCnt+triIdxOffs+1] = parentSubdiv + 1;
subDivLevels_D[keptTrianglesCnt+triIdxOffs+2] = parentSubdiv + 1;
} else if (flag1 && flag2) { // Spawn 3 new triangles
uint vNew1 = vertexCntOld + subDivEdgeIdxOffs_D[e1];
uint vNew2 = vertexCntOld + subDivEdgeIdxOffs_D[e2];
// #0
newTriangles[3*triIdxOffs+0] = v2;
newTriangles[3*triIdxOffs+1] = vNew2;
newTriangles[3*triIdxOffs+2] = vNew1;
// #1
newTriangles[3*triIdxOffs+3] = v0;
newTriangles[3*triIdxOffs+4] = vNew1;
newTriangles[3*triIdxOffs+5] = vNew2;
// #2
newTriangles[3*triIdxOffs+6] = v0;
newTriangles[3*triIdxOffs+7] = v1;
newTriangles[3*triIdxOffs+8] = vNew1;
// Write subdiv levels
uint parentSubdiv = oldSubDivLevels_D[idx];
subDivLevels_D[keptTrianglesCnt+triIdxOffs+0] = parentSubdiv + 1;
subDivLevels_D[keptTrianglesCnt+triIdxOffs+1] = parentSubdiv + 1;
subDivLevels_D[keptTrianglesCnt+triIdxOffs+2] = parentSubdiv + 1;
} else if (flag2 && flag0) { // Spawn 3 new triangles
uint vNew2 = vertexCntOld + subDivEdgeIdxOffs_D[e2];
uint vNew0 = vertexCntOld + subDivEdgeIdxOffs_D[e0];
// #0
newTriangles[3*triIdxOffs+0] = v0;
newTriangles[3*triIdxOffs+1] = vNew0;
newTriangles[3*triIdxOffs+2] = vNew2;
// #1
newTriangles[3*triIdxOffs+3] = v2;
newTriangles[3*triIdxOffs+4] = vNew2;
newTriangles[3*triIdxOffs+5] = vNew0;
// #2
newTriangles[3*triIdxOffs+6] = v1;
newTriangles[3*triIdxOffs+7] = v2;
newTriangles[3*triIdxOffs+8] = vNew0;
// Write subdiv levels
uint parentSubdiv = oldSubDivLevels_D[idx];
subDivLevels_D[keptTrianglesCnt+triIdxOffs+0] = parentSubdiv + 1;
subDivLevels_D[keptTrianglesCnt+triIdxOffs+1] = parentSubdiv + 1;
subDivLevels_D[keptTrianglesCnt+triIdxOffs+2] = parentSubdiv + 1;
} else if (flag0) { // Spawn 2 new triangles
uint vNew0 = vertexCntOld + subDivEdgeIdxOffs_D[e0];
// #0
newTriangles[3*triIdxOffs+0] = v0;
newTriangles[3*triIdxOffs+1] = vNew0;
newTriangles[3*triIdxOffs+2] = v2;
// #1
newTriangles[3*triIdxOffs+3] = v1;
newTriangles[3*triIdxOffs+4] = v2;
newTriangles[3*triIdxOffs+5] = vNew0;
// Write subdiv levels
uint parentSubdiv = oldSubDivLevels_D[idx];
subDivLevels_D[keptTrianglesCnt+triIdxOffs+0] = parentSubdiv + 1;
subDivLevels_D[keptTrianglesCnt+triIdxOffs+1] = parentSubdiv + 1;
} else if (flag1) { // Spawn 2 new triangles
uint vNew1 = vertexCntOld + subDivEdgeIdxOffs_D[e1];
// #0
newTriangles[3*triIdxOffs+0] = v0;
newTriangles[3*triIdxOffs+1] = v1;
newTriangles[3*triIdxOffs+2] = vNew1;
// #1
newTriangles[3*triIdxOffs+3] = v0;
newTriangles[3*triIdxOffs+4] = vNew1;
newTriangles[3*triIdxOffs+5] = v2;
// Write subdiv levels
uint parentSubdiv = oldSubDivLevels_D[idx];
subDivLevels_D[keptTrianglesCnt+triIdxOffs+0] = parentSubdiv + 1;
subDivLevels_D[keptTrianglesCnt+triIdxOffs+1] = parentSubdiv + 1;
} else if (flag2) { // Spawn 2 new triangles
uint vNew2 = vertexCntOld + subDivEdgeIdxOffs_D[e2];
// #0
newTriangles[3*triIdxOffs+0] = v0;
newTriangles[3*triIdxOffs+1] = v1;
newTriangles[3*triIdxOffs+2] = vNew2;
// #1
newTriangles[3*triIdxOffs+3] = v1;
newTriangles[3*triIdxOffs+4] = v2;
newTriangles[3*triIdxOffs+5] = vNew2;
// Write subdiv levels
uint parentSubdiv = oldSubDivLevels_D[idx];
subDivLevels_D[keptTrianglesCnt+triIdxOffs+0] = parentSubdiv + 1;
subDivLevels_D[keptTrianglesCnt+triIdxOffs+1] = parentSubdiv + 1;
} else {
// Write back subdiv level
subDivLevels_D[oldTrianglesIdxOffsets_D[idx]] = oldSubDivLevels_D[idx];
}
}
// TODO: !!! This method assumed a certain ordering in the three neighbors of
// !!! a triangle. Is this actually true?
__global__ void ComputeSubdivTriNeighbors_D (
uint *newTriangleNeighbors_D,
uint *oldTriangleNeighbors_D,
uint *newTriangleIdxOffsets,
uint *triangleEdgeList_D,
uint *triangleIdx_D,
uint *edgeFlag_D,
uint *edges_D,
uint *subDivEdgeIdxOffs_D,
uint *subdivCnt_D,
uint *oldTriangleIdxOffset,
uint *newTriangles_D,
uint vertexCntOld,
uint numberOfKeptTriangles,
uint oldTriangleCnt) {
const uint idx = ::getThreadIdx();
if (idx >= oldTriangleCnt) return;
uint edgeIdx0 = triangleEdgeList_D[3*idx+0];
uint edgeIdx1 = triangleEdgeList_D[3*idx+1];
uint edgeIdx2 = triangleEdgeList_D[3*idx+2];
bool flag0 = bool(edgeFlag_D[edgeIdx0]);
bool flag1 = bool(edgeFlag_D[edgeIdx1]);
bool flag2 = bool(edgeFlag_D[edgeIdx2]);
uint v0 = triangleIdx_D[3*idx+0];
uint v1 = triangleIdx_D[3*idx+1];
uint v2 = triangleIdx_D[3*idx+2];
uint e0 = triangleEdgeList_D[3*idx+0];
uint e1 = triangleEdgeList_D[3*idx+1];
uint e2 = triangleEdgeList_D[3*idx+2];
uint triIdxOffs = newTriangleIdxOffsets[idx];
if (!(flag0 || flag1 || flag2)) { // No subdivision
uint newIdx = oldTriangleIdxOffset[idx];
uint oldN0 = oldTriangleNeighbors_D[3*idx+0];
uint oldN1 = oldTriangleNeighbors_D[3*idx+1];
uint oldN2 = oldTriangleNeighbors_D[3*idx+2];
uint subDivCntN0 = subdivCnt_D[oldN0];
if (subDivCntN0 > 0) {
for (int i = 0; i < subDivCntN0; ++i) {
uint u0, u1, u2;
u0 = newTriangles_D[3*(newTriangleIdxOffsets[oldN0]+i)+0];
u1 = newTriangles_D[3*(newTriangleIdxOffsets[oldN0]+i)+1];
u2 = newTriangles_D[3*(newTriangleIdxOffsets[oldN0]+i)+2];
if (hasAdjEdge_D (v0, v1, v2, u0, u1, u2)) {
newTriangleNeighbors_D[3*newIdx+0] =
numberOfKeptTriangles + newTriangleIdxOffsets[oldN0]+i;
}
}
} else {
newTriangleNeighbors_D[3*newIdx+0] = oldTriangleIdxOffset[oldN0];
}
uint subDivCntN1 = subdivCnt_D[oldN1];
if (subDivCntN1 > 0) {
for (int i = 0; i < subDivCntN1; ++i) {
uint u0, u1, u2;
u0 = newTriangles_D[3*(newTriangleIdxOffsets[oldN1]+i)+0];
u1 = newTriangles_D[3*(newTriangleIdxOffsets[oldN1]+i)+1];
u2 = newTriangles_D[3*(newTriangleIdxOffsets[oldN1]+i)+2];
if (hasAdjEdge_D (v0, v1, v2, u0, u1, u2)) {
newTriangleNeighbors_D[3*newIdx+1]=
numberOfKeptTriangles + newTriangleIdxOffsets[oldN1]+i;
}
}
} else {
newTriangleNeighbors_D[3*newIdx+1] = oldTriangleIdxOffset[oldN1];
}
uint subDivCntN2 = subdivCnt_D[oldN2];
if (subDivCntN2 > 0) {
for (int i = 0; i < subDivCntN2; ++i) {
uint u0, u1, u2;
u0 = newTriangles_D[3*(newTriangleIdxOffsets[oldN2]+i)+0];
u1 = newTriangles_D[3*(newTriangleIdxOffsets[oldN2]+i)+1];
u2 = newTriangles_D[3*(newTriangleIdxOffsets[oldN2]+i)+2];
if (hasAdjEdge_D (v0, v1, v2, u0, u1, u2)) {
newTriangleNeighbors_D[3*newIdx+2]=
numberOfKeptTriangles + newTriangleIdxOffsets[oldN2]+i;
}
}
} else {
newTriangleNeighbors_D[3*newIdx+2] = oldTriangleIdxOffset[oldN2];
}
} else if (flag0 && !flag1 && !flag2) { // 2 new triangles have been spawned
uint vNew0 = vertexCntOld + subDivEdgeIdxOffs_D[e0];
// Get index of neighbors of old triangle
uint oldN0 = oldTriangleNeighbors_D[3*idx+0];
uint oldN1 = oldTriangleNeighbors_D[3*idx+1];
uint oldN2 = oldTriangleNeighbors_D[3*idx+2];
/* Get neighbors of triangle #0 */
// Get respective vertex indices of this triangle
uint w0 = v0;
uint w1 = vNew0;
uint w2 = v2;
// This neighbor has to be determined by comparing vertex indices
uint subDivCntN0 = subdivCnt_D[oldN0];
if (subDivCntN0 > 0) {
for (int i = 0; i < subDivCntN0; ++i) {
uint u0, u1, u2;
u0 = newTriangles_D[3*(newTriangleIdxOffsets[oldN0]+i)+0];
u1 = newTriangles_D[3*(newTriangleIdxOffsets[oldN0]+i)+1];
u2 = newTriangles_D[3*(newTriangleIdxOffsets[oldN0]+i)+2];
if (hasAdjEdge_D (w0, w1, w2, u0, u1, u2)) {
newTriangleNeighbors_D[3*(numberOfKeptTriangles+triIdxOffs)+0]=
numberOfKeptTriangles + newTriangleIdxOffsets[oldN0]+i;
}
}
} else {
newTriangleNeighbors_D[3*(numberOfKeptTriangles+triIdxOffs)+0]=
oldTriangleIdxOffset[oldN0];
}
// This neighbor is the other subdivision
newTriangleNeighbors_D[3*(numberOfKeptTriangles+triIdxOffs)+1] =
numberOfKeptTriangles+triIdxOffs+1;
// This neighbor has to be determined by comparing vertex indices
uint subDivCntN2 = subdivCnt_D[oldN2];
if (subDivCntN2 > 0) {
for (int i = 0; i < subDivCntN2; ++i) {
uint u0, u1, u2;
u0 = newTriangles_D[3*(newTriangleIdxOffsets[oldN2]+i)+0];
u1 = newTriangles_D[3*(newTriangleIdxOffsets[oldN2]+i)+1];
u2 = newTriangles_D[3*(newTriangleIdxOffsets[oldN2]+i)+2];
if (hasAdjEdge_D (w0, w1, w2, u0, u1, u2)) {
newTriangleNeighbors_D[3*(numberOfKeptTriangles+triIdxOffs)+2]=
numberOfKeptTriangles + newTriangleIdxOffsets[oldN2]+i;
}
}
} else {
newTriangleNeighbors_D[3*(numberOfKeptTriangles+triIdxOffs)+2]=
oldTriangleIdxOffset[oldN2];
}
/* Get neighbors of triangle #1 */
// Get respective vertex indices of this triangle
w0 = v1;
w1 = v2;
w2 = vNew0;
// This neighbor has to be determined by comparing vertex indices
uint subDivCntN1 = subdivCnt_D[oldN1];
if (subDivCntN1 > 0) {
for (int i = 0; i < subDivCntN1; ++i) {
uint u0, u1, u2;
u0 = newTriangles_D[3*(newTriangleIdxOffsets[oldN1]+i)+0];
u1 = newTriangles_D[3*(newTriangleIdxOffsets[oldN1]+i)+1];
u2 = newTriangles_D[3*(newTriangleIdxOffsets[oldN1]+i)+2];
if (hasAdjEdge_D (w0, w1, w2, u0, u1, u2)) {
newTriangleNeighbors_D[3*(numberOfKeptTriangles+triIdxOffs)+3]=
numberOfKeptTriangles + newTriangleIdxOffsets[oldN1]+i;
}
}
} else {
newTriangleNeighbors_D[3*(numberOfKeptTriangles+triIdxOffs)+3]=
oldTriangleIdxOffset[oldN1];
}
// This neighbor is the other subdivision
newTriangleNeighbors_D[3*(numberOfKeptTriangles+triIdxOffs)+4] =
numberOfKeptTriangles+triIdxOffs;
// This neighbor has to be determined by comparing vertex indices
subDivCntN0 = subdivCnt_D[oldN0];
if (subDivCntN0 > 0) {
for (int i = 0; i < subDivCntN0; ++i) {
uint u0, u1, u2;
u0 = newTriangles_D[3*(newTriangleIdxOffsets[oldN0]+i)+0];
u1 = newTriangles_D[3*(newTriangleIdxOffsets[oldN0]+i)+1];
u2 = newTriangles_D[3*(newTriangleIdxOffsets[oldN0]+i)+2];
if (hasAdjEdge_D (w0, w1, w2, u0, u1, u2)) {
newTriangleNeighbors_D[3*(numberOfKeptTriangles+triIdxOffs)+5]=
numberOfKeptTriangles + newTriangleIdxOffsets[oldN0]+i;
}
}
} else {
newTriangleNeighbors_D[3*(numberOfKeptTriangles+triIdxOffs)+5]=
oldTriangleIdxOffset[oldN0];
}
} else if (!flag0 && flag1 && !flag2) { // 2 new triangles have been spawned
uint vNew1 = vertexCntOld + subDivEdgeIdxOffs_D[e1];
// Get index of neighbors of old triangle
uint oldN0 = oldTriangleNeighbors_D[3*idx+0];
uint oldN1 = oldTriangleNeighbors_D[3*idx+1];
uint oldN2 = oldTriangleNeighbors_D[3*idx+2];
// #0
uint w0 = v0;
uint w1 = v1;
uint w2 = vNew1;
// This neighbor has to be determined by comparing vertex indices
uint subDivCntN0 = subdivCnt_D[oldN0];
if (subDivCntN0 > 0) {
for (int i = 0; i < subDivCntN0; ++i) {
uint u0, u1, u2;
u0 = newTriangles_D[3*(newTriangleIdxOffsets[oldN0]+i)+0];
u1 = newTriangles_D[3*(newTriangleIdxOffsets[oldN0]+i)+1];
u2 = newTriangles_D[3*(newTriangleIdxOffsets[oldN0]+i)+2];
if (hasAdjEdge_D (w0, w1, w2, u0, u1, u2)) {
newTriangleNeighbors_D[3*(numberOfKeptTriangles+triIdxOffs)+0]=
numberOfKeptTriangles + newTriangleIdxOffsets[oldN0]+i;
}
}
} else {
newTriangleNeighbors_D[3*(numberOfKeptTriangles+triIdxOffs)+0]=
oldTriangleIdxOffset[oldN0];
}
// This neighbor has to be determined by comparing vertex indices
uint subDivCntN1 = subdivCnt_D[oldN1];
if (subDivCntN1 > 0) {
for (int i = 0; i < subDivCntN1; ++i) {
uint u0, u1, u2;
u0 = newTriangles_D[3*(newTriangleIdxOffsets[oldN1]+i)+0];
u1 = newTriangles_D[3*(newTriangleIdxOffsets[oldN1]+i)+1];
u2 = newTriangles_D[3*(newTriangleIdxOffsets[oldN1]+i)+2];
if (hasAdjEdge_D (w0, w1, w2, u0, u1, u2)) {
newTriangleNeighbors_D[3*(numberOfKeptTriangles+triIdxOffs)+1]=
numberOfKeptTriangles + newTriangleIdxOffsets[oldN1]+i;
}
}
} else {
newTriangleNeighbors_D[3*(numberOfKeptTriangles+triIdxOffs)+1]=
oldTriangleIdxOffset[oldN1];
}
// This neighbor is the other subdivision
newTriangleNeighbors_D[3*(numberOfKeptTriangles+triIdxOffs)+2] =
numberOfKeptTriangles+triIdxOffs+1;
// #1
w0 = v0;
w1 = vNew1;
w2 = v2;
// This neighbor is the other subdivision
newTriangleNeighbors_D[3*(numberOfKeptTriangles+triIdxOffs)+3] =
numberOfKeptTriangles+triIdxOffs;
// This neighbor has to be determined by comparing vertex indices
subDivCntN1 = subdivCnt_D[oldN1];
if (subDivCntN1 > 0) {
for (int i = 0; i < subDivCntN1; ++i) {
uint u0, u1, u2;
u0 = newTriangles_D[3*(newTriangleIdxOffsets[oldN1]+i)+0];
u1 = newTriangles_D[3*(newTriangleIdxOffsets[oldN1]+i)+1];
u2 = newTriangles_D[3*(newTriangleIdxOffsets[oldN1]+i)+2];
if (hasAdjEdge_D (w0, w1, w2, u0, u1, u2)) {
newTriangleNeighbors_D[3*(numberOfKeptTriangles+triIdxOffs)+4]=
numberOfKeptTriangles + newTriangleIdxOffsets[oldN1]+i;
}
}
} else {
newTriangleNeighbors_D[3*(numberOfKeptTriangles+triIdxOffs)+4]=
oldTriangleIdxOffset[oldN1];
}
// This neighbor has to be determined by comparing vertex indices
uint subDivCntN2 = subdivCnt_D[oldN2];
if (subDivCntN2 > 0) {
for (int i = 0; i < subDivCntN2; ++i) {
uint u0, u1, u2;
u0 = newTriangles_D[3*(newTriangleIdxOffsets[oldN2]+i)+0];
u1 = newTriangles_D[3*(newTriangleIdxOffsets[oldN2]+i)+1];
u2 = newTriangles_D[3*(newTriangleIdxOffsets[oldN2]+i)+2];
if (hasAdjEdge_D (w0, w1, w2, u0, u1, u2)) {
newTriangleNeighbors_D[3*(numberOfKeptTriangles+triIdxOffs)+5]=
numberOfKeptTriangles + newTriangleIdxOffsets[oldN2]+i;
}
}
} else {
newTriangleNeighbors_D[3*(numberOfKeptTriangles+triIdxOffs)+5]=
oldTriangleIdxOffset[oldN2];
}
} else if (!flag0 && !flag1 && flag2) { // 2 new triangles have been spawned
uint vNew2 = vertexCntOld + subDivEdgeIdxOffs_D[e2];
// Get index of neighbors of old triangle
uint oldN0 = oldTriangleNeighbors_D[3*idx+0];
uint oldN1 = oldTriangleNeighbors_D[3*idx+1];
uint oldN2 = oldTriangleNeighbors_D[3*idx+2];
// #0
uint w0 = v0;
uint w1 = v1;
uint w2 = vNew2;
// This neighbor has to be determined by comparing vertex indices
uint subDivCntN0 = subdivCnt_D[oldN0];
if (subDivCntN0 > 0) {
for (int i = 0; i < subDivCntN0; ++i) {
uint u0, u1, u2;
u0 = newTriangles_D[3*(newTriangleIdxOffsets[oldN0]+i)+0];
u1 = newTriangles_D[3*(newTriangleIdxOffsets[oldN0]+i)+1];
u2 = newTriangles_D[3*(newTriangleIdxOffsets[oldN0]+i)+2];
if (hasAdjEdge_D (w0, w1, w2, u0, u1, u2)) {
newTriangleNeighbors_D[3*(numberOfKeptTriangles+triIdxOffs)+0]=
numberOfKeptTriangles + newTriangleIdxOffsets[oldN0]+i;
}
}
} else {
newTriangleNeighbors_D[3*(numberOfKeptTriangles+triIdxOffs)+0]=
oldTriangleIdxOffset[oldN0];
}
// This neighbor is the other subdivision
newTriangleNeighbors_D[3*(numberOfKeptTriangles+triIdxOffs)+1] =
numberOfKeptTriangles+triIdxOffs + 1;
// This neighbor has to be determined by comparing vertex indices
uint subDivCntN2 = subdivCnt_D[oldN2];
if (subDivCntN2 > 0) {
for (int i = 0; i < subDivCntN2; ++i) {
uint u0, u1, u2;
u0 = newTriangles_D[3*(newTriangleIdxOffsets[oldN2]+i)+0];
u1 = newTriangles_D[3*(newTriangleIdxOffsets[oldN2]+i)+1];
u2 = newTriangles_D[3*(newTriangleIdxOffsets[oldN2]+i)+2];
if (hasAdjEdge_D (w0, w1, w2, u0, u1, u2)) {
newTriangleNeighbors_D[3*(numberOfKeptTriangles+triIdxOffs)+2]=
numberOfKeptTriangles + newTriangleIdxOffsets[oldN2]+i;
}
}
} else {
newTriangleNeighbors_D[3*(numberOfKeptTriangles+triIdxOffs)+2]=
oldTriangleIdxOffset[oldN2];
}
// #1
w0 = v1;
w1 = v2;
w2 = vNew2;
// This neighbor has to be determined by comparing vertex indices
uint subDivCntN1 = subdivCnt_D[oldN1];
if (subDivCntN1 > 0) {
for (int i = 0; i < subDivCntN1; ++i) {
uint u0, u1, u2;
u0 = newTriangles_D[3*(newTriangleIdxOffsets[oldN1]+i)+0];
u1 = newTriangles_D[3*(newTriangleIdxOffsets[oldN1]+i)+1];
u2 = newTriangles_D[3*(newTriangleIdxOffsets[oldN1]+i)+2];
if (hasAdjEdge_D (w0, w1, w2, u0, u1, u2)) {
newTriangleNeighbors_D[3*(numberOfKeptTriangles+triIdxOffs)+3]=
numberOfKeptTriangles + newTriangleIdxOffsets[oldN1]+i;
}
}
} else {
newTriangleNeighbors_D[3*(numberOfKeptTriangles+triIdxOffs)+3]=
oldTriangleIdxOffset[oldN1];
}
// This neighbor has to be determined by comparing vertex indices
subDivCntN2 = subdivCnt_D[oldN2];
if (subDivCntN2 > 0) {
for (int i = 0; i < subDivCntN2; ++i) {
uint u0, u1, u2;
u0 = newTriangles_D[3*(newTriangleIdxOffsets[oldN2]+i)+0];
u1 = newTriangles_D[3*(newTriangleIdxOffsets[oldN2]+i)+1];
u2 = newTriangles_D[3*(newTriangleIdxOffsets[oldN2]+i)+2];
if (hasAdjEdge_D (w0, w1, w2, u0, u1, u2)) {
newTriangleNeighbors_D[3*(numberOfKeptTriangles+triIdxOffs)+4]=
numberOfKeptTriangles + newTriangleIdxOffsets[oldN2]+i;
}
}
} else {
newTriangleNeighbors_D[3*(numberOfKeptTriangles+triIdxOffs)+4]=
oldTriangleIdxOffset[oldN2];
}
// This neighbor is the other subdivision
newTriangleNeighbors_D[3*(numberOfKeptTriangles+triIdxOffs)+5] =
numberOfKeptTriangles+triIdxOffs;
} else if (flag0 && flag1 && !flag2) { // 3 new triangles have been spawned
uint vNew0 = vertexCntOld + subDivEdgeIdxOffs_D[e0];
uint vNew1 = vertexCntOld + subDivEdgeIdxOffs_D[e1];
// Get index of neighbors of old triangle
uint oldN0 = oldTriangleNeighbors_D[3*idx+0];
uint oldN1 = oldTriangleNeighbors_D[3*idx+1];
uint oldN2 = oldTriangleNeighbors_D[3*idx+2];
// #0
uint w0 = v1;
uint w1 = vNew1;
uint w2 = vNew0;
// This neighbor has to be determined by comparing vertex indices
uint subDivCntN1 = subdivCnt_D[oldN1];
if (subDivCntN1 > 0) {
for (int i = 0; i < subDivCntN1; ++i) {
uint u0, u1, u2;
u0 = newTriangles_D[3*(newTriangleIdxOffsets[oldN1]+i)+0];
u1 = newTriangles_D[3*(newTriangleIdxOffsets[oldN1]+i)+1];
u2 = newTriangles_D[3*(newTriangleIdxOffsets[oldN1]+i)+2];
if (hasAdjEdge_D (w0, w1, w2, u0, u1, u2)) {
newTriangleNeighbors_D[3*(numberOfKeptTriangles+triIdxOffs)+0]=
numberOfKeptTriangles + newTriangleIdxOffsets[oldN1]+i;
}
}
} else {
newTriangleNeighbors_D[3*(numberOfKeptTriangles+triIdxOffs)+0] =
oldTriangleIdxOffset[oldN1];
}
// This neighbor is the other subdivision
newTriangleNeighbors_D[3*(numberOfKeptTriangles+triIdxOffs)+1] =
numberOfKeptTriangles+triIdxOffs + 1;
// This neighbor has to be determined by comparing vertex indices
uint subDivCntN0 = subdivCnt_D[oldN0];
if (subDivCntN0 > 0) {
for (int i = 0; i < subDivCntN0; ++i) {
uint u0, u1, u2;
u0 = newTriangles_D[3*(newTriangleIdxOffsets[oldN0]+i)+0];
u1 = newTriangles_D[3*(newTriangleIdxOffsets[oldN0]+i)+1];
u2 = newTriangles_D[3*(newTriangleIdxOffsets[oldN0]+i)+2];
if (hasAdjEdge_D (w0, w1, w2, u0, u1, u2)) {
newTriangleNeighbors_D[3*(numberOfKeptTriangles+triIdxOffs)+2]=
numberOfKeptTriangles + newTriangleIdxOffsets[oldN0]+i;
}
}
} else {
newTriangleNeighbors_D[3*(numberOfKeptTriangles+triIdxOffs)+2]=
oldTriangleIdxOffset[oldN0];
}
// #1
w0 = v0;
w1 = vNew0;
w2 = vNew1;
// This neighbor has to be determined by comparing vertex indices
subDivCntN0 = subdivCnt_D[oldN0];
if (subDivCntN0 > 0) {
for (int i = 0; i < subDivCntN0; ++i) {
uint u0, u1, u2;
u0 = newTriangles_D[3*(newTriangleIdxOffsets[oldN0]+i)+0];
u1 = newTriangles_D[3*(newTriangleIdxOffsets[oldN0]+i)+1];
u2 = newTriangles_D[3*(newTriangleIdxOffsets[oldN0]+i)+2];
if (hasAdjEdge_D (w0, w1, w2, u0, u1, u2)) {
newTriangleNeighbors_D[3*(numberOfKeptTriangles+triIdxOffs)+3]=
numberOfKeptTriangles + newTriangleIdxOffsets[oldN0]+i;
}
}
} else {
newTriangleNeighbors_D[3*(numberOfKeptTriangles+triIdxOffs)+3] =
oldTriangleIdxOffset[oldN0];
}
// This neighbor is the other subdivision
newTriangleNeighbors_D[3*(numberOfKeptTriangles+triIdxOffs)+4] =
numberOfKeptTriangles+triIdxOffs;
// This neighbor is the other subdivision
newTriangleNeighbors_D[3*(numberOfKeptTriangles+triIdxOffs)+5] =
numberOfKeptTriangles+triIdxOffs + 2;
// #2
w0 = v2;
w1 = v0;
w2 = vNew1;
// This neighbor has to be determined by comparing vertex indices
uint subDivCntN2 = subdivCnt_D[oldN2];
if (subDivCntN2 > 0) {
for (int i = 0; i < subDivCntN2; ++i) {
uint u0, u1, u2;
u0 = newTriangles_D[3*(newTriangleIdxOffsets[oldN2]+i)+0];
u1 = newTriangles_D[3*(newTriangleIdxOffsets[oldN2]+i)+1];
u2 = newTriangles_D[3*(newTriangleIdxOffsets[oldN2]+i)+2];
if (hasAdjEdge_D (w0, w1, w2, u0, u1, u2)) {
newTriangleNeighbors_D[3*(numberOfKeptTriangles+triIdxOffs)+6]=
numberOfKeptTriangles + newTriangleIdxOffsets[oldN2]+i;
}
}
} else {
newTriangleNeighbors_D[3*(numberOfKeptTriangles+triIdxOffs)+6] =
oldTriangleIdxOffset[oldN2];
}
// This neighbor is the other subdivision
newTriangleNeighbors_D[3*(numberOfKeptTriangles+triIdxOffs) + 7] =
numberOfKeptTriangles+triIdxOffs + 1;
// This neighbor has to be determined by comparing vertex indices
subDivCntN1 = subdivCnt_D[oldN1];
if (subDivCntN1 > 0) {
for (int i = 0; i < subDivCntN1; ++i) {
uint u0, u1, u2;
u0 = newTriangles_D[3*(newTriangleIdxOffsets[oldN1]+i)+0];
u1 = newTriangles_D[3*(newTriangleIdxOffsets[oldN1]+i)+1];
u2 = newTriangles_D[3*(newTriangleIdxOffsets[oldN1]+i)+2];
if (hasAdjEdge_D (w0, w1, w2, u0, u1, u2)) {
newTriangleNeighbors_D[3*(numberOfKeptTriangles+triIdxOffs)+8]=
numberOfKeptTriangles + newTriangleIdxOffsets[oldN1]+i;
}
}
} else {
newTriangleNeighbors_D[3*(numberOfKeptTriangles+triIdxOffs)+8] =
oldTriangleIdxOffset[oldN1];
}
} else if (!flag0 && flag1 && flag2) { // 3 new triangles have been spawned
uint vNew1 = vertexCntOld + subDivEdgeIdxOffs_D[e1];
uint vNew2 = vertexCntOld + subDivEdgeIdxOffs_D[e2];
// Get index of neighbors of old triangle
uint oldN0 = oldTriangleNeighbors_D[3*idx+0];
uint oldN1 = oldTriangleNeighbors_D[3*idx+1];
uint oldN2 = oldTriangleNeighbors_D[3*idx+2];
// #0
uint w0 = v2;
uint w1 = vNew2;
uint w2 = vNew1;
// This neighbor has to be determined by comparing vertex indices
uint subDivCntN2 = subdivCnt_D[oldN2];
if (subDivCntN2 > 0) {
for (int i = 0; i < subDivCntN2; ++i) {
uint u0, u1, u2;
u0 = newTriangles_D[3*(newTriangleIdxOffsets[oldN2]+i)+0];
u1 = newTriangles_D[3*(newTriangleIdxOffsets[oldN2]+i)+1];
u2 = newTriangles_D[3*(newTriangleIdxOffsets[oldN2]+i)+2];
if (hasAdjEdge_D (w0, w1, w2, u0, u1, u2)) {
newTriangleNeighbors_D[3*(numberOfKeptTriangles+triIdxOffs)+0]=
numberOfKeptTriangles + newTriangleIdxOffsets[oldN2]+i;
}
}
} else {
newTriangleNeighbors_D[3*(numberOfKeptTriangles+triIdxOffs)+0] =
oldTriangleIdxOffset[oldN2];
}
// This neighbor is the other subdivision
newTriangleNeighbors_D[3*(numberOfKeptTriangles+triIdxOffs) + 1] =
numberOfKeptTriangles+triIdxOffs + 1;
// This neighbor has to be determined by comparing vertex indices
uint subDivCntN1 = subdivCnt_D[oldN1];
if (subDivCntN1 > 0) {
for (int i = 0; i < subDivCntN1; ++i) {
uint u0, u1, u2;
u0 = newTriangles_D[3*(newTriangleIdxOffsets[oldN1]+i)+0];
u1 = newTriangles_D[3*(newTriangleIdxOffsets[oldN1]+i)+1];
u2 = newTriangles_D[3*(newTriangleIdxOffsets[oldN1]+i)+2];
if (hasAdjEdge_D (w0, w1, w2, u0, u1, u2)) {
newTriangleNeighbors_D[3*(numberOfKeptTriangles+triIdxOffs)+2]=
numberOfKeptTriangles + newTriangleIdxOffsets[oldN1]+i;
}
}
} else {
newTriangleNeighbors_D[3*(numberOfKeptTriangles+triIdxOffs)+2] =
oldTriangleIdxOffset[oldN1];
}
// #1
w0 = v0;
w1 = vNew1;
w2 = vNew2;
// This neighbor is the other subdivision
newTriangleNeighbors_D[3*(numberOfKeptTriangles+triIdxOffs) + 3] =
numberOfKeptTriangles+triIdxOffs + 2;
// This neighbor is the other subdivision
newTriangleNeighbors_D[3*(numberOfKeptTriangles+triIdxOffs) + 4] =
numberOfKeptTriangles+triIdxOffs;
// This neighbor has to be determined by comparing vertex indices
subDivCntN2 = subdivCnt_D[oldN2];
if (subDivCntN2 > 0) {
for (int i = 0; i < subDivCntN2; ++i) {
uint u0, u1, u2;
u0 = newTriangles_D[3*(newTriangleIdxOffsets[oldN2]+i)+0];
u1 = newTriangles_D[3*(newTriangleIdxOffsets[oldN2]+i)+1];
u2 = newTriangles_D[3*(newTriangleIdxOffsets[oldN2]+i)+2];
if (hasAdjEdge_D (w0, w1, w2, u0, u1, u2)) {
newTriangleNeighbors_D[3*(numberOfKeptTriangles+triIdxOffs)+5]=
numberOfKeptTriangles + newTriangleIdxOffsets[oldN2]+i;
}
}
} else {
newTriangleNeighbors_D[3*(numberOfKeptTriangles+triIdxOffs)+5] =
oldTriangleIdxOffset[oldN2];
}
// #2
w0 = v0;
w1 = v1;
w2 = vNew1;
// This neighbor has to be determined by comparing vertex indices
uint subDivCntN0 = subdivCnt_D[oldN0];
if (subDivCntN0 > 0) {
for (int i = 0; i < subDivCntN0; ++i) {
uint u0, u1, u2;
u0 = newTriangles_D[3*(newTriangleIdxOffsets[oldN0]+i)+0];
u1 = newTriangles_D[3*(newTriangleIdxOffsets[oldN0]+i)+1];
u2 = newTriangles_D[3*(newTriangleIdxOffsets[oldN0]+i)+2];
if (hasAdjEdge_D (w0, w1, w2, u0, u1, u2)) {
newTriangleNeighbors_D[3*(numberOfKeptTriangles+triIdxOffs)+6]=
numberOfKeptTriangles + newTriangleIdxOffsets[oldN0]+i;
}
}
} else {
newTriangleNeighbors_D[3*(numberOfKeptTriangles+triIdxOffs)+6] =
oldTriangleIdxOffset[oldN0];
}
// This neighbor has to be determined by comparing vertex indices
subDivCntN1 = subdivCnt_D[oldN1];
if (subDivCntN1 > 0) {
for (int i = 0; i < subDivCntN1; ++i) {
uint u0, u1, u2;
u0 = newTriangles_D[3*(newTriangleIdxOffsets[oldN1]+i)+0];
u1 = newTriangles_D[3*(newTriangleIdxOffsets[oldN1]+i)+1];
u2 = newTriangles_D[3*(newTriangleIdxOffsets[oldN1]+i)+2];
if (hasAdjEdge_D (w0, w1, w2, u0, u1, u2)) {
newTriangleNeighbors_D[3*(numberOfKeptTriangles+triIdxOffs)+7]=
numberOfKeptTriangles + newTriangleIdxOffsets[oldN1]+i;
}
}
} else {
newTriangleNeighbors_D[3*(numberOfKeptTriangles+triIdxOffs)+7] =
oldTriangleIdxOffset[oldN1];
}
// This neighbor is the other subdivision
newTriangleNeighbors_D[3*(numberOfKeptTriangles+triIdxOffs) + 8] =
numberOfKeptTriangles+triIdxOffs + 1;
} else if (flag0 && !flag1 && flag2) { // 3 new triangles have been spawned
// Get index of neighbors of old triangle
uint oldN0 = oldTriangleNeighbors_D[3*idx+0];
uint oldN1 = oldTriangleNeighbors_D[3*idx+1];
uint oldN2 = oldTriangleNeighbors_D[3*idx+2];
uint vNew2 = vertexCntOld + subDivEdgeIdxOffs_D[e2];
uint vNew0 = vertexCntOld + subDivEdgeIdxOffs_D[e0];
// #0
uint w0 = v0;
uint w1 = vNew0;
uint w2 = vNew2;
// This neighbor has to be determined by comparing vertex indices
// TODO DEBUG!!
uint subDivCntN0 = subdivCnt_D[oldN0];
if (subDivCntN0 > 0) {
for (int i = 0; i < subDivCntN0; ++i) {
uint u0, u1, u2;
u0 = newTriangles_D[3*(newTriangleIdxOffsets[oldN0]+i)+0];
u1 = newTriangles_D[3*(newTriangleIdxOffsets[oldN0]+i)+1];
u2 = newTriangles_D[3*(newTriangleIdxOffsets[oldN0]+i)+2];
if (hasAdjEdge_D (w0, w1, w2, u0, u1, u2)) {
newTriangleNeighbors_D[3*(numberOfKeptTriangles+triIdxOffs)+0]=
numberOfKeptTriangles + newTriangleIdxOffsets[oldN0]+i;
}
}
} else {
newTriangleNeighbors_D[3*(numberOfKeptTriangles+triIdxOffs)+0] =
oldTriangleIdxOffset[oldN0];
}
// This neighbor is the other subdivision
newTriangleNeighbors_D[3*(numberOfKeptTriangles+triIdxOffs) + 1] =
numberOfKeptTriangles+triIdxOffs + 1;
// This neighbor has to be determined by comparing vertex indices
uint subDivCntN2 = subdivCnt_D[oldN2];
if (subDivCntN2 > 0) {
for (int i = 0; i < subDivCntN2; ++i) {
uint u0, u1, u2;
u0 = newTriangles_D[3*(newTriangleIdxOffsets[oldN2]+i)+0];
u1 = newTriangles_D[3*(newTriangleIdxOffsets[oldN2]+i)+1];
u2 = newTriangles_D[3*(newTriangleIdxOffsets[oldN2]+i)+2];
if (hasAdjEdge_D (w0, w1, w2, u0, u1, u2)) {
newTriangleNeighbors_D[3*(numberOfKeptTriangles+triIdxOffs)+2]=
numberOfKeptTriangles + newTriangleIdxOffsets[oldN2]+i;
}
}
} else {
newTriangleNeighbors_D[3*(numberOfKeptTriangles+triIdxOffs)+2] =
oldTriangleIdxOffset[oldN2];
}
// #1
w0 = v2;
w1 = vNew2;
w2 = vNew0;
// This neighbor has to be determined by comparing vertex indices
subDivCntN2 = subdivCnt_D[oldN2];
if (subDivCntN2 > 0) {
for (int i = 0; i < subDivCntN2; ++i) {
uint u0, u1, u2;
u0 = newTriangles_D[3*(newTriangleIdxOffsets[oldN2]+i)+0];
u1 = newTriangles_D[3*(newTriangleIdxOffsets[oldN2]+i)+1];
u2 = newTriangles_D[3*(newTriangleIdxOffsets[oldN2]+i)+2];
if (hasAdjEdge_D (w0, w1, w2, u0, u1, u2)) {
newTriangleNeighbors_D[3*(numberOfKeptTriangles+triIdxOffs)+3]=
numberOfKeptTriangles + newTriangleIdxOffsets[oldN2]+i;
}
}
} else {
newTriangleNeighbors_D[3*(numberOfKeptTriangles+triIdxOffs)+3] =
oldTriangleIdxOffset[oldN2];
}
// This neighbor is the other subdivision
newTriangleNeighbors_D[3*(numberOfKeptTriangles+triIdxOffs) + 4] =
numberOfKeptTriangles+triIdxOffs;
newTriangleNeighbors_D[3*(numberOfKeptTriangles+triIdxOffs) + 5] =
numberOfKeptTriangles+triIdxOffs+2;
// #2
w0 = v1;
w1 = v2;
w2 = vNew0;
// This neighbor has to be determined by comparing vertex indices
uint subDivCntN1 = subdivCnt_D[oldN1];
if (subDivCntN1 > 0) {
for (int i = 0; i < subDivCntN1; ++i) {
uint u0, u1, u2;
u0 = newTriangles_D[3*(newTriangleIdxOffsets[oldN1]+i)+0];
u1 = newTriangles_D[3*(newTriangleIdxOffsets[oldN1]+i)+1];
u2 = newTriangles_D[3*(newTriangleIdxOffsets[oldN1]+i)+2];
if (hasAdjEdge_D (w0, w1, w2, u0, u1, u2)) {
newTriangleNeighbors_D[3*(numberOfKeptTriangles+triIdxOffs)+6]=
numberOfKeptTriangles + newTriangleIdxOffsets[oldN1]+i;
}
}
} else {
newTriangleNeighbors_D[3*(numberOfKeptTriangles+triIdxOffs)+6] =
oldTriangleIdxOffset[oldN1];
}
// This neighbor is the other subdivision
newTriangleNeighbors_D[3*(numberOfKeptTriangles+triIdxOffs) + 7] =
numberOfKeptTriangles+triIdxOffs + 1;
subDivCntN0 = subdivCnt_D[oldN0];
if (subDivCntN0 > 0) {
for (int i = 0; i < subDivCntN0; ++i) {
uint u0, u1, u2;
u0 = newTriangles_D[3*(newTriangleIdxOffsets[oldN0]+i)+0];
u1 = newTriangles_D[3*(newTriangleIdxOffsets[oldN0]+i)+1];
u2 = newTriangles_D[3*(newTriangleIdxOffsets[oldN0]+i)+2];
if (hasAdjEdge_D (w0, w1, w2, u0, u1, u2)) {
newTriangleNeighbors_D[3*(numberOfKeptTriangles+triIdxOffs)+8]=
numberOfKeptTriangles + newTriangleIdxOffsets[oldN0]+i;
}
}
} else {
newTriangleNeighbors_D[3*(numberOfKeptTriangles+triIdxOffs)+8] =
oldTriangleIdxOffset[oldN0];
}
} else if (flag0 && flag1 && flag2) { // 4 new triangles have been spawned
uint vNew0 = vertexCntOld + subDivEdgeIdxOffs_D[e0];
uint vNew1 = vertexCntOld + subDivEdgeIdxOffs_D[e1];
uint vNew2 = vertexCntOld + subDivEdgeIdxOffs_D[e2];
// Get index of neighbors of old triangle
uint oldN0 = oldTriangleNeighbors_D[3*idx+0];
uint oldN1 = oldTriangleNeighbors_D[3*idx+1];
uint oldN2 = oldTriangleNeighbors_D[3*idx+2];
// #0
uint w0 = v0;
uint w1 = vNew0;
uint w2 = vNew2;
// This neighbor has to be determined by comparing vertex indices
uint subDivCntN0 = subdivCnt_D[oldN0];
if (subDivCntN0 > 0) {
for (int i = 0; i < subDivCntN0; ++i) {
uint u0, u1, u2;
u0 = newTriangles_D[3*(newTriangleIdxOffsets[oldN0]+i)+0];
u1 = newTriangles_D[3*(newTriangleIdxOffsets[oldN0]+i)+1];
u2 = newTriangles_D[3*(newTriangleIdxOffsets[oldN0]+i)+2];
if (hasAdjEdge_D (w0, w1, w2, u0, u1, u2)) {
newTriangleNeighbors_D[3*(numberOfKeptTriangles+triIdxOffs)+0]=
numberOfKeptTriangles + newTriangleIdxOffsets[oldN0]+i;
}
}
} else {
newTriangleNeighbors_D[3*(numberOfKeptTriangles+triIdxOffs)+0]=
oldTriangleIdxOffset[oldN0];
}
// This neighbor is the middle subdivision
newTriangleNeighbors_D[3*(numberOfKeptTriangles+triIdxOffs)+1] =
numberOfKeptTriangles+triIdxOffs + 3;
// This neighbor has to be determined by comparing vertex indices
uint subDivCntN2 = subdivCnt_D[oldN2];
if (subDivCntN2 > 0) {
for (int i = 0; i < subDivCntN2; ++i) {
uint u0, u1, u2;
u0 = newTriangles_D[3*(newTriangleIdxOffsets[oldN2]+i)+0];
u1 = newTriangles_D[3*(newTriangleIdxOffsets[oldN2]+i)+1];
u2 = newTriangles_D[3*(newTriangleIdxOffsets[oldN2]+i)+2];
if (hasAdjEdge_D (w0, w1, w2, u0, u1, u2)) {
newTriangleNeighbors_D[3*(numberOfKeptTriangles+triIdxOffs)+2]=
numberOfKeptTriangles + newTriangleIdxOffsets[oldN2]+i;
}
}
} else {
newTriangleNeighbors_D[3*(numberOfKeptTriangles+triIdxOffs)+2]=
oldTriangleIdxOffset[oldN2];
}
// #1
w0 = v1;
w1 = vNew1;
w2 = vNew0;
// This neighbor has to be determined by comparing vertex indices
uint subDivCntN1 = subdivCnt_D[oldN1];
if (subDivCntN1 > 0) {
for (int i = 0; i < subDivCntN1; ++i) {
uint u0, u1, u2;
u0 = newTriangles_D[3*(newTriangleIdxOffsets[oldN1]+i)+0];
u1 = newTriangles_D[3*(newTriangleIdxOffsets[oldN1]+i)+1];
u2 = newTriangles_D[3*(newTriangleIdxOffsets[oldN1]+i)+2];
if (hasAdjEdge_D (w0, w1, w2, u0, u1, u2)) {
newTriangleNeighbors_D[3*(numberOfKeptTriangles+triIdxOffs)+3]=
numberOfKeptTriangles + newTriangleIdxOffsets[oldN1]+i;
}
}
} else {
newTriangleNeighbors_D[3*(numberOfKeptTriangles+triIdxOffs)+3]=
oldTriangleIdxOffset[oldN1];
}
// This neighbor is the middle subdivision
newTriangleNeighbors_D[3*(numberOfKeptTriangles+triIdxOffs)+4] =
numberOfKeptTriangles+triIdxOffs + 3;
// This neighbor has to be determined by comparing vertex indices
subDivCntN0 = subdivCnt_D[oldN0];
if (subDivCntN0 > 0) {
for (int i = 0; i < subDivCntN0; ++i) {
uint u0, u1, u2;
u0 = newTriangles_D[3*(newTriangleIdxOffsets[oldN0]+i)+0];
u1 = newTriangles_D[3*(newTriangleIdxOffsets[oldN0]+i)+1];
u2 = newTriangles_D[3*(newTriangleIdxOffsets[oldN0]+i)+2];
if (hasAdjEdge_D (w0, w1, w2, u0, u1, u2)) {
newTriangleNeighbors_D[3*(numberOfKeptTriangles+triIdxOffs)+5]=
numberOfKeptTriangles + newTriangleIdxOffsets[oldN0]+i;
}
}
} else {
newTriangleNeighbors_D[3*(numberOfKeptTriangles+triIdxOffs)+5]=
oldTriangleIdxOffset[oldN0];
}
// #2
w0 = v2;
w1 = vNew2;
w2 = vNew1;
// This neighbor has to be determined by comparing vertex indices
subDivCntN2 = subdivCnt_D[oldN2];
if (subDivCntN2 > 0) {
for (int i = 0; i < subDivCntN2; ++i) {
uint u0, u1, u2;
u0 = newTriangles_D[3*(newTriangleIdxOffsets[oldN2]+i)+0];
u1 = newTriangles_D[3*(newTriangleIdxOffsets[oldN2]+i)+1];
u2 = newTriangles_D[3*(newTriangleIdxOffsets[oldN2]+i)+2];
if (hasAdjEdge_D (w0, w1, w2, u0, u1, u2)) {
newTriangleNeighbors_D[3*(numberOfKeptTriangles+triIdxOffs)+6]=
numberOfKeptTriangles + newTriangleIdxOffsets[oldN2]+i;
}
}
} else {
newTriangleNeighbors_D[3*(numberOfKeptTriangles+triIdxOffs)+6]=
oldTriangleIdxOffset[oldN2];
}
// This neighbor is the middle subdivision
newTriangleNeighbors_D[3*(numberOfKeptTriangles+triIdxOffs)+7] =
numberOfKeptTriangles+triIdxOffs + 3;
// This neighbor has to be determined by comparing vertex indices
subDivCntN1 = subdivCnt_D[oldN1];
if (subDivCntN1 > 0) {
for (int i = 0; i < subDivCntN1; ++i) {
uint u0, u1, u2;
u0 = newTriangles_D[3*(newTriangleIdxOffsets[oldN1]+i)+0];
u1 = newTriangles_D[3*(newTriangleIdxOffsets[oldN1]+i)+1];
u2 = newTriangles_D[3*(newTriangleIdxOffsets[oldN1]+i)+2];
if (hasAdjEdge_D (w0, w1, w2, u0, u1, u2)) {
newTriangleNeighbors_D[3*(numberOfKeptTriangles+triIdxOffs)+8]=
numberOfKeptTriangles + newTriangleIdxOffsets[oldN1]+i;
}
}
} else {
newTriangleNeighbors_D[3*(numberOfKeptTriangles+triIdxOffs)+8]=
oldTriangleIdxOffset[oldN1];
}
// #3 This is the middle triangle
w0 = vNew0;
w1 = vNew1;
w2 = vNew2;
// This neighbor is the middle subdivision
newTriangleNeighbors_D[3*(numberOfKeptTriangles+triIdxOffs)+9] =
numberOfKeptTriangles+triIdxOffs + 1;
// This neighbor is the middle subdivision
newTriangleNeighbors_D[3*(numberOfKeptTriangles+triIdxOffs)+10] =
numberOfKeptTriangles+triIdxOffs + 2;
// This neighbor is the middle subdivision
newTriangleNeighbors_D[3*(numberOfKeptTriangles+triIdxOffs)+11] =
numberOfKeptTriangles+triIdxOffs + 0;
}
}
__global__ void CopyNewDataToVertexBuffer_D(
float *newVertices_D,
float *newBuffer_D,
uint oldVertexCnt,
uint newVertexCnt) {
const uint vertexDataStride = 9;
const uint idx = ::getThreadIdx();
if (idx >= newVertexCnt) return;
newBuffer_D[vertexDataStride*(oldVertexCnt+idx)+0] = newVertices_D[3*idx+0];
newBuffer_D[vertexDataStride*(oldVertexCnt+idx)+1] = newVertices_D[3*idx+1];
newBuffer_D[vertexDataStride*(oldVertexCnt+idx)+2] = newVertices_D[3*idx+2];
newBuffer_D[vertexDataStride*(oldVertexCnt+idx)+3] = 1.0; // Normal
newBuffer_D[vertexDataStride*(oldVertexCnt+idx)+4] = 0.0;
newBuffer_D[vertexDataStride*(oldVertexCnt+idx)+5] = 0.0;
newBuffer_D[vertexDataStride*(oldVertexCnt+idx)+6] = 0.0; // TC
newBuffer_D[vertexDataStride*(oldVertexCnt+idx)+7] = 0.0;
newBuffer_D[vertexDataStride*(oldVertexCnt+idx)+8] = 0.0;
}
__global__ void CopyOldDataToTriangleBuffer_D(
uint *oldTriangleIdx_D,
uint *oldTriangleIdxOffs_D,
uint *newTriangleIdx_D,
uint *subdivCnt_D,
uint oldTriangleCnt) {
const uint idx = ::getThreadIdx();
if (idx >= oldTriangleCnt) return;
if (subdivCnt_D[idx] > 0) return; // Subdivided triangles are dismissed
uint newIdx = oldTriangleIdxOffs_D[idx];
newTriangleIdx_D[3*newIdx+0] = oldTriangleIdx_D[3*idx+0];
newTriangleIdx_D[3*newIdx+1] = oldTriangleIdx_D[3*idx+1];
newTriangleIdx_D[3*newIdx+2] = oldTriangleIdx_D[3*idx+2];
}
/*
* DeformableGPUSurfaceMT::RefineMesh
*/
int DeformableGPUSurfaceMT::RefineMesh(
uint maxSubdivLevel,
float *volume_D,
int3 volDim,
float3 volOrg,
float3 volDelta,
float isovalue,
float maxEdgeLen) {
using megamol::core::utility::log::Log;
// Init grid parameters
// Init constant device params
if (!initGridParams(volDim, volOrg, volDelta)) {
Log::DefaultLog.WriteError(
"%s: could not init constant device params",
this->ClassName());
return -1;
}
cudaGraphicsResource* cudaTokens[2];
// Register memory with CUDA
if (!CudaSafeCall(cudaGraphicsGLRegisterBuffer(
&cudaTokens[0], this->vboVtxData,
cudaGraphicsMapFlagsNone))) {
Log::DefaultLog.WriteError(
"%s: could register buffer",
this->ClassName());
return -1;
}
// Register memory with CUDA
if (!CudaSafeCall(cudaGraphicsGLRegisterBuffer(
&cudaTokens[1], this->vboTriangleIdx,
cudaGraphicsMapFlagsNone))) {
Log::DefaultLog.WriteError(
"%s: could not register buffer",
this->ClassName());
return -1;
}
if (!CudaSafeCall(cudaGraphicsMapResources(2, cudaTokens))) {
Log::DefaultLog.WriteError(
"%s: could not map recources",
this->ClassName());
return -1;
}
// Get mapped pointers to the vertex data buffers
float *vboPt;
size_t vboSize;
if (!CudaSafeCall(cudaGraphicsResourceGetMappedPointer(
reinterpret_cast<void**>(&vboPt), // The mapped pointer
&vboSize, // The size of the accessible data
cudaTokens[0]))) { // The mapped resource
Log::DefaultLog.WriteError(
"%s: could not obtain device pointer",
this->ClassName());
return -1;
}
// Get mapped pointers to the vertex data buffers
unsigned int *vboTriIdxPt;
size_t vboTriSize;
if (!CudaSafeCall(cudaGraphicsResourceGetMappedPointer(
reinterpret_cast<void**>(&vboTriIdxPt), // The mapped pointer
&vboTriSize, // The size of the accessible data
cudaTokens[1]))) { // The mapped resource
Log::DefaultLog.WriteError(
"%s: could not obtain device pointer",
this->ClassName());
return -1;
}
/* 1. Compute edge list */
//#define USE_TIMER
#ifdef USE_TIMER
float dt_ms;
cudaEvent_t event1, event2, eventStart, eventEnd;
cudaEventCreate(&event1);
cudaEventCreate(&event2);
cudaEventCreate(&eventStart);
cudaEventCreate(&eventEnd);
cudaEventRecord(event1, 0);
cudaEventRecord(eventStart, 0);
#endif
const uint edgeCnt = (this->triangleCnt*3)/2;
// printf("EDGE COUNT %u\n", edgeCnt);
// Get the number of edges associated with each triangle
if (!CudaSafeCall(this->triangleEdgeOffs_D.Validate(this->triangleCnt))) {
return -1;
}
if (!CudaSafeCall(this->triangleEdgeOffs_D.Set(0x00))) {
return -1;
}
// Check whether triangle neighbors have been computed
if (this->triangleNeighbors_D.GetCount() != this->triangleCnt*3) {
Log::DefaultLog.WriteError(
"%s: need triangle neighbors",
this->ClassName());
return -1;
}
DeformableGPUSurfaceMT_GetTriangleEdgeCnt_D <<< Grid(this->triangleCnt, 256), 256 >>>(
this->triangleEdgeOffs_D.Peek(),
this->triangleNeighbors_D.Peek(),
this->triangleCnt);
if (!CheckForCudaError()) {
return -1;
}
// Compute prefix sum
thrust::exclusive_scan(
thrust::device_ptr<int>(this->triangleEdgeOffs_D.Peek()),
thrust::device_ptr<int>(this->triangleEdgeOffs_D.Peek() + this->triangleCnt),
thrust::device_ptr<int>(this->triangleEdgeOffs_D.Peek()));
if (!CheckForCudaError()) {
return -1;
}
// Build up edge list based on the offsets
if (!CudaSafeCall(this->edges_D.Validate(edgeCnt*2))) {
return -1;
}
if (!CudaSafeCall(this->edges_D.Set(0x00))) {
return -1;
}
DeformableGPUSurfaceMT_BuildEdgeList_D <<< Grid(this->triangleCnt, 256), 256 >>>(
this->edges_D.Peek(),
this->triangleEdgeOffs_D.Peek(),
this->triangleNeighbors_D.Peek(),
vboTriIdxPt,
this->triangleCnt);
if (!CheckForCudaError()) {
return -1;
}
// // DEBUG Print edges
// this->edges.Validate(this->edges_D.GetCount());
// if (!CudaSafeCall(this->edges_D.CopyToHost(this->edges.Peek()))){
// return false;
// }
// for (int e = 0; e < edgeCnt; ++e) {
// printf("EDGE %i: %u %u\n", e,
// this->edges.Peek()[2*e+0],
// this->edges.Peek()[2*e+1]);
// }
// // END DEBUG
#ifdef USE_TIMER
cudaEventRecord(event2, 0);
cudaEventSynchronize(event1);
cudaEventSynchronize(event2);
cudaEventElapsedTime(&dt_ms, event1, event2);
printf("CUDA time for computing edge list: %.10f sec\n",
dt_ms/1000.0f);
cudaEventRecord(event1, 0);
#endif
/* 2. Flag long edges and determine number of newly created vertices */
// Build up edge list based on the offsets
if (!CudaSafeCall(this->subDivEdgeFlag_D.Validate(edgeCnt))) {
return -1;
}
if (!CudaSafeCall(this->subDivEdgeFlag_D.Set(0x00))) { // Set to 'false'
return -1;
}
FlagLongEdges_D <<< Grid(edgeCnt, 256), 256 >>> (
this->subDivEdgeFlag_D.Peek(),
this->edges_D.Peek(),
vboPt,
maxEdgeLen*maxEdgeLen,
this->edges_D.GetCount()/2);
if (!CheckForCudaError()) {
return -1;
}
// Compute prefix sum
if (!CudaSafeCall(this->subDivEdgeIdxOffs_D.Validate(edgeCnt))) {
return -1;
}
if (!CudaSafeCall(this->subDivEdgeIdxOffs_D.Set(0x00))) { // Set to 'false'
return -1;
}
thrust::exclusive_scan(
thrust::device_ptr<uint>(this->subDivEdgeFlag_D.Peek()),
thrust::device_ptr<uint>(this->subDivEdgeFlag_D.Peek() + edgeCnt),
thrust::device_ptr<uint>(this->subDivEdgeIdxOffs_D.Peek()));
uint accTmp;
if (!CudaSafeCall(cudaMemcpy(&accTmp, this->subDivEdgeFlag_D.Peek()+(edgeCnt-1), sizeof(uint),
cudaMemcpyDeviceToHost))) {
return -1;
}
this->newVertexCnt = accTmp;
if (!CudaSafeCall(cudaMemcpy(&accTmp, this->subDivEdgeIdxOffs_D.Peek()+(edgeCnt-1), sizeof(uint),
cudaMemcpyDeviceToHost))) {
return -1;
}
this->newVertexCnt += accTmp;
this->nFlaggedVertices += this->newVertexCnt;
if (this->newVertexCnt == 0) {
// !! Unmap/registers vbos because they will be reinitialized
CudaSafeCall(cudaGraphicsUnmapResources(2, cudaTokens, 0));
CudaSafeCall(cudaGraphicsUnregisterResource(cudaTokens[0]));
CudaSafeCall(cudaGraphicsUnregisterResource(cudaTokens[1]));
return 0;
}
// printf("Need %i new vertices (old triangle count %u)\n", newVertexCnt, this->triangleCnt);
// // DEBUG print edge flag
// HostArr<uint> edgeFlag;
// edgeFlag.Validate(this->subDivEdgeFlag_D.GetCount());
// this->subDivEdgeFlag_D.CopyToHost(edgeFlag.Peek());
// for (int i = 0; i < edgeCnt; ++i) {
// printf("EDGEFLAG %i %u\n", i, edgeFlag.Peek()[i]);
// }
// edgeFlag.Release();
// // END DEBUG
#ifdef USE_TIMER
cudaEventRecord(event2, 0);
cudaEventSynchronize(event1);
cudaEventSynchronize(event2);
cudaEventElapsedTime(&dt_ms, event1, event2);
printf("CUDA time for flagging edges and thrust reduce: %.10f sec\n",
dt_ms/1000.0f);
cudaEventRecord(event1, 0);
#endif
/* 3. Interpolate new vertex positions associated with the flagged edges */
if (!CudaSafeCall(this->newVertices_D.Validate(this->newVertexCnt*3))) {
return -1;
}
if (this->vertexFlag_D.GetCount() != this->vertexCnt) { // First subdivision round
if (!CudaSafeCall(this->vertexFlag_D.Validate(this->newVertexCnt + this->vertexCnt))) {
return -1;
}
if (!CudaSafeCall(this->vertexFlag_D.Set(0x00))) {
return -1;
}
} else { // Need to save old flags
if (!CudaSafeCall(this->vertexFlagTmp_D.Validate(this->vertexFlag_D.GetCount()))) {
return -1;
}
if (!CudaSafeCall(cudaMemcpy(
this->vertexFlagTmp_D.Peek(),
this->vertexFlag_D.Peek(),
sizeof(float)*this->vertexFlag_D.GetCount(),
cudaMemcpyDeviceToDevice))) {
return -1;
}
if (!CudaSafeCall(this->vertexFlag_D.Validate(this->newVertexCnt + this->vertexCnt))) {
return -1;
}
if (!CudaSafeCall(this->vertexFlag_D.Set(0x00))) {
return -1;
}
if (!CudaSafeCall(cudaMemcpy(
this->vertexFlag_D.Peek(),
this->vertexFlagTmp_D.Peek(),
sizeof(float)*this->vertexFlagTmp_D.GetCount(),
cudaMemcpyDeviceToDevice))) {
return -1;
}
}
ComputeNewVertices <<< Grid(edgeCnt, 256), 256 >>> (
this->newVertices_D.Peek(),
this->vertexFlag_D.Peek(),
this->subDivEdgeIdxOffs_D.Peek(),
this->subDivEdgeFlag_D.Peek(),
this->edges_D.Peek(),
vboPt,
this->vertexCnt,
edgeCnt);
if (!CheckForCudaError()) {
return -1;
}
// Compute number of flagged vertices
this->nFlaggedVertices = thrust::reduce(
thrust::device_ptr<float>(this->vertexFlag_D.Peek()),
thrust::device_ptr<float>(this->vertexFlag_D.Peek() + this->vertexCnt));
#ifdef USE_TIMER
cudaEventRecord(event2, 0);
cudaEventSynchronize(event1);
cudaEventSynchronize(event2);
cudaEventElapsedTime(&dt_ms, event1, event2);
printf("CUDA time for interpolating new vertices: %.10f sec\n",
dt_ms/1000.0f);
cudaEventRecord(event1, 0);
#endif
/* 4. Build triangle-edge-list */
if (this->triangleNeighbors_D.GetCount() != this->triangleCnt*3) {
Log::DefaultLog.WriteError(
"%s: need triangle neighbors",
this->ClassName());
// !! Unmap/registers vbos because they will be reinitialized
CudaSafeCall(cudaGraphicsUnmapResources(2, cudaTokens, 0));
CudaSafeCall(cudaGraphicsUnregisterResource(cudaTokens[0]));
CudaSafeCall(cudaGraphicsUnregisterResource(cudaTokens[1]));
return -1;
}
if (!CudaSafeCall(this->triangleEdgeList_D.Validate(this->triangleCnt*3))) {
return -1;
}
DeformableGPUSurfaceMT_ComputeTriEdgeList_D <<< Grid(this->triangleCnt, 256), 256 >>> (
this->triangleEdgeList_D.Peek(),
this->triangleEdgeOffs_D.Peek(),
this->triangleNeighbors_D.Peek(),
vboTriIdxPt,
this->triangleCnt);
if (!CheckForCudaErrorSync()) {
return -1;
}
// // DEBUG Triangle edge list
// HostArr<unsigned int> triangleEdgeList;
// triangleEdgeList.Validate(this->triangleEdgeList_D.GetCount());
// if (!CudaSafeCall(this->triangleEdgeList_D.CopyToHost(triangleEdgeList.Peek()))){
// return false;
// }
// for (int e = 0; e < this->triangleCnt; ++e) {
// printf("Tri %i, edges: %u %u %u\n", e,
// triangleEdgeList.Peek()[3*e+0],
// triangleEdgeList.Peek()[3*e+1],
// triangleEdgeList.Peek()[3*e+2]);
// }
// triangleEdgeList.Release();
// // END DEBUG
#ifdef USE_TIMER
cudaEventRecord(event2, 0);
cudaEventSynchronize(event1);
cudaEventSynchronize(event2);
cudaEventElapsedTime(&dt_ms, event1, event2);
printf("CUDA time for triangle edge list: %.10f sec\n",
dt_ms/1000.0f);
cudaEventRecord(event1, 0);
#endif
/* 5. Determine number of newly created triangles */
if (!CudaSafeCall(this->subDivCnt_D.Validate(this->triangleCnt))) {
return -1;
}
if (!CudaSafeCall(this->subDivCnt_D.Set(0x00))) {
return -1;
}
if (!CudaSafeCall(this->oldTrianglesIdxOffs_D.Validate(this->triangleCnt))) {
return -1;
}
ComputeSubdivCnt_D <<< Grid(this->triangleCnt, 256), 256 >>> (
this->subDivCnt_D.Peek(),
this->triangleEdgeList_D.Peek(),
this->subDivEdgeFlag_D.Peek(),
this->edges_D.Peek(),
this->oldTrianglesIdxOffs_D.Peek(),
this->triangleCnt);
if (!CheckForCudaErrorSync()) {
return -1;
}
if (!CudaSafeCall(this->newTrianglesIdxOffs_D.Validate(this->triangleCnt))) {
return -1;
}
// Compute prefix sum
thrust::exclusive_scan(
thrust::device_ptr<uint>(this->subDivCnt_D.Peek()),
thrust::device_ptr<uint>(this->subDivCnt_D.Peek() + this->triangleCnt),
thrust::device_ptr<uint>(this->newTrianglesIdxOffs_D.Peek()));
uint newTrianglesCnt;
if (!CudaSafeCall(cudaMemcpy(&accTmp, this->subDivCnt_D.Peek()+(this->triangleCnt-1), sizeof(uint),
cudaMemcpyDeviceToHost))) {
return -1;
}
newTrianglesCnt = accTmp;
if (!CudaSafeCall(cudaMemcpy(&accTmp, this->newTrianglesIdxOffs_D.Peek()+(this->triangleCnt-1), sizeof(uint),
cudaMemcpyDeviceToHost))) {
return -1;
}
newTrianglesCnt += accTmp;
// printf("Need %i new triangles\n", newTrianglesCnt);
uint nOldTriangles = thrust::reduce(
thrust::device_ptr<uint>(this->oldTrianglesIdxOffs_D.Peek()),
thrust::device_ptr<uint>(this->oldTrianglesIdxOffs_D.Peek() + this->triangleCnt));
thrust::exclusive_scan(
thrust::device_ptr<uint>(this->oldTrianglesIdxOffs_D.Peek()),
thrust::device_ptr<uint>(this->oldTrianglesIdxOffs_D.Peek() + this->triangleCnt),
thrust::device_ptr<uint>(this->oldTrianglesIdxOffs_D.Peek()));
// printf("Keep %i old triangles\n", nOldTriangles);
#ifdef USE_TIMER
cudaEventRecord(event2, 0);
cudaEventSynchronize(event1);
cudaEventSynchronize(event2);
cudaEventElapsedTime(&dt_ms, event1, event2);
printf("CUDA time for computing number of new triangles: %.10f sec\n",
dt_ms/1000.0f);
cudaEventRecord(event1, 0);
#endif
/* 6. Create new triangles with respective vertex indices */
if (this->subDivLevels_D.GetCount() != this->triangleCnt) {
// This is the first subdivision
if (!CudaSafeCall(this->oldSubDivLevels_D.Validate(this->triangleCnt))) {
return -1;
}
if (!CudaSafeCall(this->oldSubDivLevels_D.Set(0x00))) {
return -1;
}
} else { // Store old subdivision levels
if (!CudaSafeCall(this->oldSubDivLevels_D.Validate(this->triangleCnt))) {
return -1;
}
if (!CudaSafeCall(cudaMemcpy(this->oldSubDivLevels_D.Peek(),
this->subDivLevels_D.Peek(), sizeof(unsigned int)*this->triangleCnt,
cudaMemcpyDeviceToDevice))){
return -1;
}
}
// Allocate memory for new subdivision levels (old and new triangles)
if (!CudaSafeCall(this->subDivLevels_D.Validate(nOldTriangles+newTrianglesCnt))) {
return -1;
}
if (!CudaSafeCall(this->newTriangles_D.Validate(newTrianglesCnt*3))) {
return -1;
}
ComputeSubdiv_D <<< Grid(this->triangleCnt, 256), 256 >>> (
this->newTriangles_D.Peek(),
this->newTrianglesIdxOffs_D.Peek(),
this->triangleEdgeList_D.Peek(),
vboTriIdxPt,
this->subDivEdgeFlag_D.Peek(),
this->edges_D.Peek(),
this->subDivEdgeIdxOffs_D.Peek(),
this->oldSubDivLevels_D.Peek(),
this->subDivLevels_D.Peek(),
this->oldTrianglesIdxOffs_D.Peek(),
this->vertexCnt,
nOldTriangles,
this->triangleCnt);
if (!CheckForCudaErrorSync()) {
return -1;
}
// // DEBUG Print new triangles
// HostArr<uint> newTriangles;
// newTriangles.Validate(this->newTriangles_D.GetCount());
// this->newTriangles_D.CopyToHost(newTriangles.Peek());
// for (int i = 0; i < this->newTriangles_D.GetCount()/3; ++i) {
// printf("NEW TRI %i: %u %u %u\n", i,
// newTriangles.Peek()[3*i+0],
// newTriangles.Peek()[3*i+1],
// newTriangles.Peek()[3*i+2]);
// }
// newTriangles.Release();
// // END DEBUG
// // DEBUG Print subdivision levels
// HostArr<uint> subDivisionLevels;
// subDivisionLevels.Validate(this->subDivLevels_D.GetCount());
// this->subDivLevels_D.CopyToHost(subDivisionLevels.Peek());
// for (int i = 0; i < this->subDivLevels_D.GetCount(); ++i) {
// printf("SUBDIV LVL %i: %u \n", i,
// subDivisionLevels.Peek()[i]);
// }
// subDivisionLevels.Release();
// // END DEBUG
#ifdef USE_TIMER
cudaEventRecord(event2, 0);
cudaEventSynchronize(event1);
cudaEventSynchronize(event2);
cudaEventElapsedTime(&dt_ms, event1, event2);
printf("CUDA time for computing new triangles: %.10f sec\n",
dt_ms/1000.0f);
cudaEventRecord(event1, 0);
#endif
/* 7. (Re-)compute triangle neighbors */
if (!CudaSafeCall(this->newTriangleNeighbors_D.Validate((nOldTriangles+newTrianglesCnt)*3))) {
return -1;
}
if (!CudaSafeCall(this->newTriangleNeighbors_D.Set(0x00))) {
return -1;
}
ComputeSubdivTriNeighbors_D <<< Grid(this->triangleCnt, 256), 256 >>> (
this->newTriangleNeighbors_D.Peek(),
this->triangleNeighbors_D.Peek(),
this->newTrianglesIdxOffs_D.Peek(),
this->triangleEdgeList_D.Peek(),
vboTriIdxPt,
this->subDivEdgeFlag_D.Peek(),
this->edges_D.Peek(),
this->subDivEdgeIdxOffs_D.Peek(),
this->subDivCnt_D.Peek(),
this->oldTrianglesIdxOffs_D.Peek(),
this->newTriangles_D.Peek(),
this->vertexCnt,
nOldTriangles,
this->triangleCnt);
// Reallocate old array TODO Simply swap pointers?
if (!CudaSafeCall(this->triangleNeighbors_D.Validate(this->newTriangleNeighbors_D.GetCount()))) {
return -1;
}
if (!CudaSafeCall(cudaMemcpy(
this->triangleNeighbors_D.Peek(),
this->newTriangleNeighbors_D.Peek(),
this->newTriangleNeighbors_D.GetCount()*sizeof(unsigned int),
cudaMemcpyDeviceToDevice))) {
return -1;
}
#ifdef USE_TIMER
cudaEventRecord(event2, 0);
cudaEventSynchronize(event1);
cudaEventSynchronize(event2);
cudaEventElapsedTime(&dt_ms, event1, event2);
printf("CUDA time for updating triangle neighbors: %.10f sec\n",
dt_ms/1000.0f);
cudaEventRecord(event1, 0);
#endif
/* 8. Update VBOs for vertex data and triangle indices */
// // DEBUG Print oldTriangles index offset and subdivision count
// HostArr<unsigned int> oldTrianglesIdxOffs;
// oldTrianglesIdxOffs.Validate(this->oldTrianglesIdxOffs_D.GetCount());
// if (!CudaSafeCall(this->oldTrianglesIdxOffs_D.CopyToHost(oldTrianglesIdxOffs.Peek()))) {
// return -1;
// }
// HostArr<unsigned int> subDivCnt;
// subDivCnt.Validate(this->subDivCnt_D.GetCount());
// if (!CudaSafeCall(this->subDivCnt_D.CopyToHost(subDivCnt.Peek()))) {
// return -1;
// }
// for (int i = 0; i < this->triangleCnt; ++i) {
// printf("%i: offs: %u, subdiv %u\n", i, oldTrianglesIdxOffs.Peek()[i],
// subDivCnt.Peek()[i]);
// }
// subDivCnt.Release();
// oldTrianglesIdxOffs.Release();
// // END DEBUG
// // DEBUG print old vertex buffer
// HostArr<float> vertexBuffer;
// vertexBuffer.Validate(this->vertexDataStride*this->vertexCnt);
// cudaMemcpy(vertexBuffer.Peek(), vboPt, vertexBuffer.GetCount()*sizeof(float), cudaMemcpyDeviceToHost);
// for (int i = 0; i < this->vertexCnt; ++i) {
// printf("Old Vertex Buffer %i: %f %f %f, %f %f %f, %f %f %f\n", i,
// vertexBuffer.Peek()[9*i+0],
// vertexBuffer.Peek()[9*i+1],
// vertexBuffer.Peek()[9*i+2],
// vertexBuffer.Peek()[9*i+3],
// vertexBuffer.Peek()[9*i+4],
// vertexBuffer.Peek()[9*i+5],
// vertexBuffer.Peek()[9*i+6],
// vertexBuffer.Peek()[9*i+7],
// vertexBuffer.Peek()[9*i+8]);
// }
// vertexBuffer.Release();
// // END DEBUG
// // DEBUG print old triangle index buffer
// HostArr<uint> triangleBuffer;
// triangleBuffer.Validate(3*this->triangleCnt);
// cudaMemcpy(triangleBuffer.Peek(), vboTriIdxPt,
// triangleBuffer.GetCount()*sizeof(uint), cudaMemcpyDeviceToHost);
// for (int i = 0; i < this->triangleCnt; ++i) {
// printf("Old Triangle Buffer %i: %u %u %u\n",i,
// triangleBuffer.Peek()[3*i+0],
// triangleBuffer.Peek()[3*i+1],
// triangleBuffer.Peek()[3*i+2]);
// }
// triangleBuffer.Release();
// // END DEBUG
// Make copy of old data
if (!CudaSafeCall(this->oldTriangles_D.Validate(this->triangleCnt*3))) {
return -1;
}
if (!CudaSafeCall(this->trackedSubdivVertexData_D.Validate(this->vertexCnt*this->vertexDataStride))) {
return -1;
}
if (!CudaSafeCall(cudaMemcpy(this->oldTriangles_D.Peek(), vboTriIdxPt,
sizeof(unsigned int)*this->oldTriangles_D.GetCount(),
cudaMemcpyDeviceToDevice))) {
return -1;
}
if (!CudaSafeCall(cudaMemcpy(this->trackedSubdivVertexData_D.Peek(), vboPt,
sizeof(float)*this->trackedSubdivVertexData_D.GetCount(),
cudaMemcpyDeviceToDevice))) {
return -1;
}
// !! Unmap/registers vbos because they will be reinitialized
if (!CudaSafeCall(cudaGraphicsUnmapResources(2, cudaTokens, 0))) {
return -1;
}
if (!CudaSafeCall(cudaGraphicsUnregisterResource(cudaTokens[0]))) {
return -1;
}
if (!CudaSafeCall(cudaGraphicsUnregisterResource(cudaTokens[1]))) {
return -1;
}
// Re-initialize VBOS
uint oldVertexCnt = this->vertexCnt;
this->vertexCnt += newVertexCnt;
uint oldTriangleCount = this->triangleCnt;
this->triangleCnt = nOldTriangles + newTrianglesCnt;
this->InitTriangleIdxVBO(this->triangleCnt);
this->InitVertexDataVBO(this->vertexCnt);
// Register and get pointers
// Register memory with CUDA
if (!CudaSafeCall(cudaGraphicsGLRegisterBuffer(
&cudaTokens[0], this->vboVtxData,
cudaGraphicsMapFlagsNone))) {
return -1;
}
// Register memory with CUDA
if (!CudaSafeCall(cudaGraphicsGLRegisterBuffer(
&cudaTokens[1], this->vboTriangleIdx,
cudaGraphicsMapFlagsNone))) {
return -1;
}
if (!CudaSafeCall(cudaGraphicsMapResources(2, cudaTokens, 0))) {
return -1;
}
// Get mapped pointers to the vertex data buffers
if (!CudaSafeCall(cudaGraphicsResourceGetMappedPointer(
reinterpret_cast<void**>(&vboPt), // The mapped pointer
&vboSize, // The size of the accessible data
cudaTokens[0]))) { // The mapped resource
return -1;
}
// Get mapped pointers to the vertex data buffers
if (!CudaSafeCall(cudaGraphicsResourceGetMappedPointer(
reinterpret_cast<void**>(&vboTriIdxPt), // The mapped pointer
&vboTriSize, // The size of the accessible data
cudaTokens[1]))) { // The mapped resource
return -1;
}
// Copy old vertex data to new buffer
if (!CudaSafeCall(cudaMemcpy(vboPt, this->trackedSubdivVertexData_D.Peek(),
sizeof(float)*this->vertexDataStride*oldVertexCnt,
cudaMemcpyDeviceToDevice))) {
return -1;
}
// Copy new vertex data to new buffer
CopyNewDataToVertexBuffer_D <<< Grid(this->vertexCnt, 256), 256 >>> (
this->newVertices_D.Peek(),
vboPt,
oldVertexCnt,
newVertexCnt);
if (!CheckForCudaError()) {
return -1;
}
// // DEBUG print old vertex buffer
// vertexBuffer.Validate(this->vertexDataStride*this->vertexCnt);
// cudaMemcpy(vertexBuffer.Peek(), vboPt, vertexBuffer.GetCount()*sizeof(float), cudaMemcpyDeviceToHost);
// for (int i = 0; i < this->vertexCnt; ++i) {
// printf("New Vertex Buffer %i: %f %f %f, %f %f %f, %f %f %f\n", i,
// vertexBuffer.Peek()[9*i+0],
// vertexBuffer.Peek()[9*i+1],
// vertexBuffer.Peek()[9*i+2],
// vertexBuffer.Peek()[9*i+3],
// vertexBuffer.Peek()[9*i+4],
// vertexBuffer.Peek()[9*i+5],
// vertexBuffer.Peek()[9*i+6],
// vertexBuffer.Peek()[9*i+7],
// vertexBuffer.Peek()[9*i+8]);
// }
// vertexBuffer.Release();
// // END DEBUG
// Copy old triangle indices to VBO
CopyOldDataToTriangleBuffer_D <<< Grid(oldTriangleCount, 256), 256 >>> (
this->oldTriangles_D.Peek(),
this->oldTrianglesIdxOffs_D.Peek(),
vboTriIdxPt,
this->subDivCnt_D.Peek(),
oldTriangleCount);
// Copy new data to triangle VBO
if (!CudaSafeCall(cudaMemcpy(
vboTriIdxPt + 3*nOldTriangles, // New data starts after old data
this->newTriangles_D.Peek(),
sizeof(uint)*this->newTriangles_D.GetCount(),
cudaMemcpyDeviceToDevice))) {
return -1;
}
// // DEBUG Print new triangle neighbors
// HostArr<uint> triNeighbors;
// triNeighbors.Validate(this->triangleNeighbors_D.GetCount());
// HostArr<uint> triangleBuffer;
// triangleBuffer.Validate(3*this->triangleCnt);
// cudaMemcpy(triangleBuffer.Peek(), vboTriIdxPt,
// triangleBuffer.GetCount()*sizeof(uint), cudaMemcpyDeviceToHost);
// if (!CudaSafeCall(this->triangleNeighbors_D.CopyToHost(triNeighbors.Peek()))) {
// return -1;
// }
// for (int i = 0; i < this->triangleNeighbors_D.GetCount()/3; ++i) {
//
//// printf("TRI NEIGHBORS %i: %u %u %u\n", i,
//// triNeighbors.Peek()[3*i+0],
//// triNeighbors.Peek()[3*i+1],
//// triNeighbors.Peek()[3*i+2]);
//
// // Check neighbor consistency
// uint v0 = triangleBuffer.Peek()[3*i+0];
// uint v1 = triangleBuffer.Peek()[3*i+1];
// uint v2 = triangleBuffer.Peek()[3*i+2];
//
// uint n00 = triangleBuffer.Peek()[3*triNeighbors.Peek()[3*i+0]+0];
// uint n01 = triangleBuffer.Peek()[3*triNeighbors.Peek()[3*i+0]+1];
// uint n02 = triangleBuffer.Peek()[3*triNeighbors.Peek()[3*i+0]+2];
//
// uint n10 = triangleBuffer.Peek()[3*triNeighbors.Peek()[3*i+1]+0];
// uint n11 = triangleBuffer.Peek()[3*triNeighbors.Peek()[3*i+1]+1];
// uint n12 = triangleBuffer.Peek()[3*triNeighbors.Peek()[3*i+1]+2];
//
// uint n20 = triangleBuffer.Peek()[3*triNeighbors.Peek()[3*i+2]+0];
// uint n21 = triangleBuffer.Peek()[3*triNeighbors.Peek()[3*i+2]+1];
// uint n22 = triangleBuffer.Peek()[3*triNeighbors.Peek()[3*i+2]+2];
//
//// printf("n0 %u %u %u, n1 %u %u %u, n2 %u %u %u\n",
//// n00, n01, n02, n10, n11, n12, n20, n21, n22);
//
// uint cnt = 0;
// bool flag0=false, flag1=false, flag2=false;
// if (v0 == n00) cnt++; if (v0 == n01) cnt++; if (v0 == n02) cnt++;
// if (v1 == n00) cnt++; if (v1 == n01) cnt++; if (v1 == n02) cnt++;
// if (v2 == n00) cnt++; if (v2 == n01) cnt++; if (v2 == n02) cnt++;
// if (cnt < 2) {
// flag0 = true;
//
// }
//
// cnt = 0;
// if (v0 == n10) cnt++; if (v0 == n11) cnt++; if (v0 == n12) cnt++;
// if (v1 == n10) cnt++; if (v1 == n11) cnt++; if (v1 == n12) cnt++;
// if (v2 == n10) cnt++; if (v2 == n11) cnt++; if (v2 == n12) cnt++;
// if (cnt < 2) {
// flag1 = true;
// }
//
// cnt = 0;
// if (v0 == n20) cnt++; if (v0 == n21) cnt++; if (v0 == n22) cnt++;
// if (v1 == n20) cnt++; if (v1 == n21) cnt++; if (v1 == n22) cnt++;
// if (v2 == n20) cnt++; if (v2 == n21) cnt++; if (v2 == n22) cnt++;
// if (cnt < 2) {
// flag2 = true;
// }
//
// if (flag0||flag1||flag2) {
// printf("TRI NEIGHBORS %i: %u %u %u\n", i,
// triNeighbors.Peek()[3*i+0],
// triNeighbors.Peek()[3*i+1],
// triNeighbors.Peek()[3*i+2]);
// }
// if (flag0) printf("----> %u inconsistent\n", triNeighbors.Peek()[3*i+0]);
// if (flag1) printf("----> %u inconsistent\n", triNeighbors.Peek()[3*i+1]);
// if (flag2) printf("----> %u inconsistent\n", triNeighbors.Peek()[3*i+2]);
//
// }
// triangleBuffer.Release();
// triNeighbors.Release();
// // END DEBUG
//
//// // DEBUG print new triangle index buffer
////// HostArr<uint> triangleBuffer;
//// triangleBuffer.Validate(3*this->triangleCnt);
//// cudaMemcpy(triangleBuffer.Peek(), vboTriIdxPt,
//// triangleBuffer.GetCount()*sizeof(uint), cudaMemcpyDeviceToHost);
//// for (int i = 0; i < this->triangleCnt; ++i) {
//// if ((i > 8200)&&(i < 8300)) {
//// printf("New Triangle Buffer %i: %u %u %u (vertex count %u)\n", i,
//// triangleBuffer.Peek()[3*i+0],
//// triangleBuffer.Peek()[3*i+1],
//// triangleBuffer.Peek()[3*i+2],
//// this->vertexCnt);
//// }
//// }
//// triangleBuffer.Release();
//// // END DEBUG
#ifdef USE_TIMER
cudaEventRecord(event2, 0);
cudaEventSynchronize(event1);
cudaEventSynchronize(event2);
cudaEventElapsedTime(&dt_ms, event1, event2);
printf("CUDA time for updating VBOs: %.10f sec\n",
dt_ms/1000.0f);
cudaEventRecord(event1, 0);
cudaEventRecord(eventEnd, 0);
cudaEventSynchronize(eventStart);
cudaEventSynchronize(eventEnd);
cudaEventElapsedTime(&dt_ms, eventStart, eventEnd);
printf("==> Total CUDA time for mesh refinement: %.10f sec\n",
dt_ms/1000.0f);
#endif
// Cleanup
if (!CudaSafeCall(cudaGraphicsUnmapResources(2, cudaTokens, 0))) {
return -1;
}
if (!CudaSafeCall(cudaGraphicsUnregisterResource(cudaTokens[0]))) {
return -1;
}
if (!CudaSafeCall(cudaGraphicsUnregisterResource(cudaTokens[1]))) {
return -1;
}
return newTrianglesCnt;
#undef USE_TIMER
}
/*
* DeformableGPUSurfaceMT::Release
*/
void DeformableGPUSurfaceMT::Release() {
GPUSurfaceMT::Release();
CudaSafeCall(this->vertexExternalForcesScl_D.Release());
CudaSafeCall(this->gvfTmp_D.Release());
CudaSafeCall(this->gvfConstData_D.Release());
CudaSafeCall(this->laplacian_D.Release());
CudaSafeCall(this->laplacian2_D.Release());
CudaSafeCall(this->displLen_D.Release());
CudaSafeCall(this->distField_D.Release());
CudaSafeCall(this->externalForces_D.Release());
CudaSafeCall(this->accTriangleData_D.Release());
CudaSafeCall(this->accTriangleArea_D.Release());
CudaSafeCall(this->corruptTriangles_D.Release());
CudaSafeCall(this->intUncertaintyCorrupt_D.Release());
CudaSafeCall(this->accumPath_D.Release());
CudaSafeCall(triangleEdgeOffs_D.Release());
CudaSafeCall(triangleEdgeList_D.Release());
CudaSafeCall(subDivEdgeFlag_D.Release());
CudaSafeCall(subDivEdgeIdxOffs_D.Release());
CudaSafeCall(newVertices_D.Release());
CudaSafeCall(newTriangles_D.Release());
CudaSafeCall(oldTriangles_D.Release());
CudaSafeCall(trackedSubdivVertexData_D.Release());
CudaSafeCall(subDivCnt_D.Release());
CudaSafeCall(newTrianglesIdxOffs_D.Release());
CudaSafeCall(oldTrianglesIdxOffs_D.Release());
CudaSafeCall(newTriangleNeighbors_D.Release());
CudaSafeCall(subDivLevels_D.Release());
CudaSafeCall(oldSubDivLevels_D.Release());
CudaSafeCall(vertexFlag_D.Release());
CudaSafeCall(vertexFlagTmp_D.Release());
CudaSafeCall(vertexUncertaintyTmp_D.Release());
CudaSafeCall(triangleFaceNormals_D.Release());
CudaSafeCall(triangleIdxTmp_D.Release());
CudaSafeCall(outputArrayTmp_D.Release());
CudaSafeCall(reducedVertexKeysTmp_D.Release());
CudaSafeCall(reducedNormalsTmp_D.Release());
CudaSafeCall(vertexNormalsIndxOffs_D.Release());
CudaSafeCall(this->geometricLaplacian_D.Release());
if (this->vboCorruptTriangleVertexFlag) {
glBindBufferARB(GL_ARRAY_BUFFER, this->vboCorruptTriangleVertexFlag);
glDeleteBuffersARB(1, &this->vboCorruptTriangleVertexFlag);
glBindBufferARB(GL_ARRAY_BUFFER, 0);
this->vboCorruptTriangleVertexFlag = 0;
glBindBufferARB(GL_ARRAY_BUFFER, 0);
CheckForGLError();
}
if (this->vboVtxPath) {
glBindBufferARB(GL_ARRAY_BUFFER, this->vboVtxPath);
glDeleteBuffersARB(1, &this->vboVtxPath);
glBindBufferARB(GL_ARRAY_BUFFER, 0);
this->vboVtxPath = 0;
glBindBufferARB(GL_ARRAY_BUFFER, 0);
CheckForGLError();
}
if (this->vboVtxAttr) {
glBindBufferARB(GL_ARRAY_BUFFER, this->vboVtxAttr);
glDeleteBuffersARB(1, &this->vboVtxAttr);
glBindBufferARB(GL_ARRAY_BUFFER, 0);
this->vboVtxAttr = 0;
glBindBufferARB(GL_ARRAY_BUFFER, 0);
CheckForGLError();
}
::CheckForGLError();
}
/*
* DeformableGPUSurfaceMT::updateVtxPos
*/
bool DeformableGPUSurfaceMT::updateVtxPos(
float* volTarget_D,
float* vertexBuffer_D,
float* vtxUncertainty_D,
int3 volDim,
float3 volOrg,
float3 volDelta,
float isovalue,
bool useCubicInterpolation,
size_t maxIt,
float surfMappedMinDisplScl,
float springStiffness,
float forceScl,
float externalForcesWeight,
bool trackPath,
bool externalForcesOnly,
bool useThinPlate) {
using namespace megamol::core::utility::log;
// Init constant device params
if (!initGridParams(volDim, volOrg, volDelta)) {
Log::DefaultLog.WriteError(
"%s: could not init constant device params",
this->ClassName());
return false;
}
if (!CudaSafeCall(this->laplacian_D.Validate(this->vertexCnt))) {
return false;
}
if (!CudaSafeCall(this->laplacian_D.Set(0))) {
return false;
}
if (!CudaSafeCall(this->laplacian2_D.Validate(this->vertexCnt))) {
return false;
}
if (!CudaSafeCall(this->laplacian2_D.Set(0))) {
return false;
}
if (!CudaSafeCall(this->accumPath_D.Validate(this->vertexCnt))) {
return false;
}
if (!CudaSafeCall(this->accumPath_D.Set(0x00))) {
return false;
}
// Init uncertainty buffer with zero
if (trackPath) {
if (!CudaSafeCall(cudaMemset(vtxUncertainty_D, 0x00, this->vertexCnt*sizeof(float)))) {
return false;
}
}
//#define USE_TIMER
#ifdef USE_TIMER
float dt_ms;
cudaEvent_t event1, event2;
cudaEventCreate(&event1);
cudaEventCreate(&event2);
cudaEventRecord(event1, 0);
#endif
#ifdef USE_TIMER
cudaEvent_t eventStart, eventEnd;
cudaEventCreate(&eventStart);
cudaEventCreate(&eventEnd);
#endif
int iterationsNeeded = maxIt;
if (!externalForcesOnly) {
// TODO Timer
for (uint i = 0; i < maxIt; ++i) {
// Calc laplacian
DeformableGPUSurfaceMT_MeshLaplacian_D <<< Grid(this->vertexCnt, 256), 256 >>> (
vertexBuffer_D,
this->vertexDataOffsPos,
this->vertexDataStride,
this->vertexNeighbours_D.Peek(),
18,
this->vertexCnt,
(float*)this->laplacian_D.Peek(),
0,
3);
::CheckForCudaErrorSync();
if (useThinPlate) {
// Calc laplacian^2
DeformableGPUSurfaceMT_MeshLaplacian_D <<< Grid(this->vertexCnt, 256), 256 >>> (
(float*)this->laplacian_D.Peek(),
0,
3,
this->vertexNeighbours_D.Peek(),
18,
this->vertexCnt,
(float*)this->laplacian2_D.Peek(),
0,
3);
::CheckForCudaErrorSync();
// Update vertex position
DeformableGPUSurfaceMT_UpdateVtxPos_D <<< Grid(this->vertexCnt, 256), 256 >>> (
volTarget_D,
vertexBuffer_D,
(float*)this->vertexExternalForcesScl_D.Peek(),
this->displLen_D.Peek(),
vtxUncertainty_D,
(float4*)this->externalForces_D.Peek(),
this->laplacian_D.Peek(),
this->laplacian2_D.Peek(),
this->vertexCnt,
externalForcesWeight,
forceScl,
springStiffness,
isovalue,
surfMappedMinDisplScl,
useCubicInterpolation,
trackPath, // Track path of vertices
this->vertexDataOffsPos,
this->vertexDataOffsNormal,
this->vertexDataStride);
} else { // No thin plate aspect
// Update vertex position
DeformableGPUSurfaceMT_UpdateVtxPosNoThinPlate_D <<< Grid(this->vertexCnt, 256), 256 >>> (
volTarget_D,
vertexBuffer_D,
(float*)this->vertexExternalForcesScl_D.Peek(),
this->displLen_D.Peek(),
vtxUncertainty_D,
(float4*)this->externalForces_D.Peek(),
this->laplacian_D.Peek(),
this->vertexCnt,
externalForcesWeight,
forceScl,
isovalue,
surfMappedMinDisplScl,
useCubicInterpolation,
trackPath, // Track path of vertices
this->vertexDataOffsPos,
this->vertexDataOffsNormal,
this->vertexDataStride);
}
// Accumulate displacement length of this iteration step
float avgDisplLen = 0.0f;
avgDisplLen = thrust::reduce(
thrust::device_ptr<float>(this->displLen_D.Peek()),
thrust::device_ptr<float>(this->displLen_D.Peek() + this->vertexCnt));
if (!CudaSafeCall(cudaGetLastError())) {
return false;
}
avgDisplLen /= static_cast<float>(this->vertexCnt);
// if (i%5 == 0) printf("It: %i, avgDispl: %.16f, min %.16f\n", i, avgDisplLen, surfMappedMinDisplScl);
// printf("It Reg: %i, avgDispl: %.16f, min %.16f\n", i, avgDisplLen, surfMappedMinDisplScl);
if (avgDisplLen < surfMappedMinDisplScl) {
iterationsNeeded =i+1;
break;
}
::CheckForCudaErrorSync();
}
} else {
for (uint i = 0; i < maxIt; ++i) {
// this->PrintVertexBuffer(1);
// // DEBUG print parameters
// printf("PARAMS:\n");
// printf("vertex count %u\n", this->vertexCnt);
// printf("forcesScl %f\n", forceScl);
// printf("isovalue %f\n", isovalue);
// printf("surfMappedMinDisplScl %f\n", surfMappedMinDisplScl);
// if (useCubicInterpolation) printf("useCubicInterpolation TRUE\n");
// else printf("useCubicInterpolation FALSE\n");
// if (trackPath) printf("trackPath TRUE\n");
// else printf("trackPath FALSE\n");
// // END DEBUG
// // DEBUG Print voltarget_D
// if (i == 0) {
// HostArr<float> volTarget;
// size_t gridSize = volDim.x*volDim.y*volDim.z;
// volTarget.Validate(gridSize);
// CudaSafeCall(cudaMemcpy(volTarget.Peek(), volTarget_D,
// sizeof(float)*gridSize,
// cudaMemcpyDeviceToHost));
//
// for (int i = 0; i < gridSize; ++i) {
// printf("VOL %.16f\n", volTarget.Peek()[i]);
// }
//
// volTarget.Release();
// }
// // END DEBUG
// cudaEventRecord(eventStart, 0);
// Update vertex position
DeformableGPUSurfaceMT_UpdateVtxPosExternalOnly_D <<< Grid(this->vertexCnt, 256), 256 >>> (
volTarget_D,
vertexBuffer_D,
(float*)this->vertexExternalForcesScl_D.Peek(),
this->displLen_D.Peek(),
vtxUncertainty_D,
(float4*)this->externalForces_D.Peek(),
this->accumPath_D.Peek(),
this->vertexCnt,
forceScl,
isovalue,
surfMappedMinDisplScl,
useCubicInterpolation,
trackPath, // Track path of vertices
this->vertexDataOffsPos,
this->vertexDataOffsNormal,
this->vertexDataStride);
// cudaEventRecord(eventEnd, 0);
// cudaEventSynchronize(eventEnd);
// cudaEventSynchronize(eventStart);
// cudaEventElapsedTime(&dt_ms, eventStart, eventEnd);
//// Log::DefaultLog.WriteInfo(
//// "%s: Time for iteration (%u vertices): %f sec\n",
//// "DeformableGPUSurfaceMT",
//// this->vertexCnt,
//// dt_ms/1000.0f);
// cudaEventRecord(eventStart, 0);
// Accumulate displacement length of this iteration step
float avgDisplLen = 0.0f;
avgDisplLen = thrust::reduce(
thrust::device_ptr<float>(this->displLen_D.Peek()),
thrust::device_ptr<float>(this->displLen_D.Peek() + this->vertexCnt));
if (!CudaSafeCall(cudaGetLastError())) {
return false;
}
// cudaEventRecord(eventEnd, 0);
// cudaEventSynchronize(eventEnd);
// cudaEventSynchronize(eventStart);
// cudaEventElapsedTime(&dt_ms, eventStart, eventEnd);
// Log::DefaultLog.WriteInfo(
// "%s: Time for thrust::reduce (%u vertices): %f sec\n",
// "DeformableGPUSurfaceMT",
// this->vertexCnt,
// dt_ms/1000.0f);
avgDisplLen /= static_cast<float>(this->vertexCnt);
// if (i%5 == 0) printf("It %i, avgDispl: %.16f, min %.16f\n", i, avgDisplLen, surfMappedMinDisplScl);
// printf("It: %i, avgDispl: %.16f, min %.16f\n", i, avgDisplLen, surfMappedMinDisplScl);
if (avgDisplLen < surfMappedMinDisplScl) {
iterationsNeeded =i+1;
break;
}
::CheckForCudaErrorSync();
}
}
#ifdef USE_TIMER
cudaEventRecord(event2, 0);
cudaEventSynchronize(event1);
cudaEventSynchronize(event2);
cudaEventElapsedTime(&dt_ms, event1, event2);
Log::DefaultLog.WriteInfo(
"%s: Time for mapping (%u iterations, %u vertices): %f sec\n",
"DeformableGPUSurfaceMT",
iterationsNeeded, this->vertexCnt, dt_ms/1000.0f);
//printf("Mapping : %.10f\n",
// dt_ms/1000.0f);
#endif
#undef USE_TIMER
return CudaSafeCall(cudaGetLastError());
}
/*
* DeformableGPUSurfaceMT::updateVtxPos
*/
bool DeformableGPUSurfaceMT::updateVtxPosSubdiv(
float* volTarget_D,
float* vertexBuffer_D,
float* vtxUncertainty_D,
int3 volDim,
float3 volOrg,
float3 volDelta,
float isovalue,
bool useCubicInterpolation,
size_t maxIt,
float surfMappedMinDisplScl,
float springStiffness,
float forceScl,
float externalForcesWeight,
bool trackPath,
bool externalForcesOnly,
bool useThinPlate) {
using namespace megamol::core::utility::log;
// Init constant device params
if (!initGridParams(volDim, volOrg, volDelta)) {
Log::DefaultLog.WriteError(
"%s: could not init constant device params",
this->ClassName());
return false;
}
if (!CudaSafeCall(this->laplacian_D.Validate(this->vertexCnt))) {
return false;
}
if (!CudaSafeCall(this->laplacian_D.Set(0))) {
return false;
}
if (!CudaSafeCall(this->laplacian2_D.Validate(this->vertexCnt))) {
return false;
}
if (!CudaSafeCall(this->laplacian2_D.Set(0))) {
return false;
}
if (!CudaSafeCall(this->accumPath_D.Validate(this->vertexCnt))) {
return false;
}
if (!CudaSafeCall(this->accumPath_D.Set(0x00))) {
return false;
}
// Init uncertainty buffer with zero
if (trackPath) {
if (!CudaSafeCall(cudaMemset(vtxUncertainty_D, 0x00, this->vertexCnt*sizeof(float)))) {
return false;
}
}
//#ifdef USE_TIMER
//float dt_ms;
cudaEvent_t event1, event2;
cudaEventCreate(&event1);
cudaEventCreate(&event2);
cudaEventRecord(event1, 0);
//#endif
int iterationsNeeded = maxIt;
if (!externalForcesOnly) {
// TODO Timer
for (uint i = 0; i < maxIt; ++i) {
// Calc laplacian
DeformableGPUSurfaceMT_MeshLaplacian_D <<< Grid(this->vertexCnt, 256), 256 >>> (
vertexBuffer_D,
this->vertexDataOffsPos,
this->vertexDataStride,
this->vertexNeighbours_D.Peek(),
18,
this->vertexCnt,
(float*)this->laplacian_D.Peek(),
0,
3);
::CheckForCudaErrorSync();
if (useThinPlate) {
// Calc laplacian^2
DeformableGPUSurfaceMT_MeshLaplacian_D <<< Grid(this->vertexCnt, 256), 256 >>> (
(float*)this->laplacian_D.Peek(),
0,
3,
this->vertexNeighbours_D.Peek(),
18,
this->vertexCnt,
(float*)this->laplacian2_D.Peek(),
0,
3);
::CheckForCudaErrorSync();
// Update vertex position
DeformableGPUSurfaceMT_UpdateVtxPos_D <<< Grid(this->vertexCnt, 256), 256 >>> (
volTarget_D,
vertexBuffer_D,
(float*)this->vertexExternalForcesScl_D.Peek(),
this->displLen_D.Peek(),
vtxUncertainty_D,
(float4*)this->externalForces_D.Peek(),
this->laplacian_D.Peek(),
this->laplacian2_D.Peek(),
this->vertexCnt,
externalForcesWeight,
forceScl,
springStiffness,
isovalue,
surfMappedMinDisplScl,
useCubicInterpolation,
trackPath, // Track path of vertices
this->vertexDataOffsPos,
this->vertexDataOffsNormal,
this->vertexDataStride);
} else { // No thin plate aspect
// Update vertex position
DeformableGPUSurfaceMT_UpdateVtxPosNoThinPlate_D <<< Grid(this->vertexCnt, 256), 256 >>> (
volTarget_D,
vertexBuffer_D,
(float*)this->vertexExternalForcesScl_D.Peek(),
this->displLen_D.Peek(),
vtxUncertainty_D,
(float4*)this->externalForces_D.Peek(),
this->laplacian_D.Peek(),
this->vertexCnt,
externalForcesWeight,
forceScl,
isovalue,
surfMappedMinDisplScl,
useCubicInterpolation,
trackPath, // Track path of vertices
this->vertexDataOffsPos,
this->vertexDataOffsNormal,
this->vertexDataStride);
}
// Accumulate displacement length of this iteration step
float avgDisplLen = 0.0f;
avgDisplLen = thrust::reduce(
thrust::device_ptr<float>(this->displLen_D.Peek()),
thrust::device_ptr<float>(this->displLen_D.Peek() + this->vertexCnt));
if (!CudaSafeCall(cudaGetLastError())) {
return false;
}
avgDisplLen /= static_cast<float>(this->vertexCnt);
// if (i%5 == 0) printf("It: %i, avgDispl: %.16f, min %.16f\n", i, avgDisplLen, surfMappedMinDisplScl);
// printf("It: %i, avgDispl: %.16f, min %.16f\n", i, avgDisplLen, surfMappedMinDisplScl);
if (avgDisplLen < surfMappedMinDisplScl) {
iterationsNeeded =i+1;
break;
}
::CheckForCudaErrorSync();
}
} else {
// TODO Timer
for (uint i = 0; i < maxIt; ++i) {
// Update vertex position
DeformableGPUSurfaceMT_UpdateVtxPosExternalOnlySubdiv_D <<< Grid(this->vertexCnt, 256), 256 >>> (
volTarget_D,
vertexBuffer_D,
(float*)this->vertexExternalForcesScl_D.Peek(),
this->displLen_D.Peek(),
vtxUncertainty_D,
(float4*)this->externalForces_D.Peek(),
this->accumPath_D.Peek(),
this->vertexFlag_D.Peek(),
this->vertexCnt,
forceScl,
isovalue,
surfMappedMinDisplScl,
useCubicInterpolation,
trackPath, // Track path of vertices
this->vertexDataOffsPos,
this->vertexDataOffsNormal,
this->vertexDataStride);
// Accumulate displacement length of this iteration step
float avgDisplLen = 0.0f;
avgDisplLen = thrust::reduce(
thrust::device_ptr<float>(this->displLen_D.Peek()),
thrust::device_ptr<float>(this->displLen_D.Peek() + this->vertexCnt));
if (!CudaSafeCall(cudaGetLastError())) {
return false;
}
avgDisplLen /= static_cast<float>(this->nFlaggedVertices);
// printf("New vertex count %u\n", this->nFlaggedVertices);
// if (i%5 == 0) printf("It %i, avgDispl: %.16f, min %.16f\n", i, avgDisplLen, surfMappedMinDisplScl);
// printf("It: %i, avgDispl: %.16f, min %.1f\n", i, avgDisplLen, surfMappedMinDisplScl);
if (avgDisplLen < surfMappedMinDisplScl) {
iterationsNeeded =i+1;
break;
}
::CheckForCudaErrorSync();
}
}
#ifdef USE_TIMER
cudaEventRecord(event2, 0);
cudaEventSynchronize(event1);
cudaEventSynchronize(event2);
cudaEventElapsedTime(&dt_ms, event1, event2);
Log::DefaultLog.WriteInfo(
"%s: Time for mapping (%u iterations, %u vertices): %f sec\n",
"DeformableGPUSurfaceMT",
iterationsNeeded, this->vertexCnt, dt_ms/1000.0f);
//printf("Mapping : %.10f\n",
// dt_ms/1000.0f);
#endif
return CudaSafeCall(cudaGetLastError());
}
/*
* ComputeVtxDiffValue0_D
*/
__global__ void DeformableGPUSurfaceMT_ComputeVtxDiffValue0_D(
float *diff_D,
float *tex0_D,
float *vtxData0_D,
size_t vertexCnt) {
const uint idx = ::getThreadIdx();
if (idx >= vertexCnt) {
return;
}
const int vertexDataStride = 9; // TODO
const int vertexDataOffsPos = 0;
float3 pos;
pos.x = vtxData0_D[vertexDataStride*idx + vertexDataOffsPos +0];
pos.y = vtxData0_D[vertexDataStride*idx + vertexDataOffsPos +1];
pos.z = vtxData0_D[vertexDataStride*idx + vertexDataOffsPos +2];
diff_D[idx] = ::SampleFieldAtPosTrilin_D<float, true>(pos, tex0_D);
}
/*
* ComputeVtxDiffValue1_D
*/
__global__ void DeformableGPUSurfaceMT_ComputeVtxDiffValue1_D(
float *diff_D,
float *tex1_D,
float *vtxData1_D,
size_t vertexCnt) {
const uint idx = ::getThreadIdx();
if (idx >= vertexCnt) {
return;
}
const int vertexDataStride = 9; // TODO
const int vertexDataOffsPos = 0;
float valFirst = diff_D[idx];
float3 pos;
pos.x = vtxData1_D[vertexDataStride*idx + vertexDataOffsPos +0];
pos.y = vtxData1_D[vertexDataStride*idx + vertexDataOffsPos +1];
pos.z = vtxData1_D[vertexDataStride*idx + vertexDataOffsPos +2];
float valSec = ::SampleFieldAtPosTrilin_D<float, true>(pos, tex1_D);
valSec = abs(valSec-valFirst);
diff_D[idx] = valSec;
}
/*
* DeformableGPUSurfaceMT_ComputeVtxDiffValue1Fitted_D
*/
__global__ void DeformableGPUSurfaceMT_ComputeVtxDiffValue1Fitted_D(
float *diff_D,
float *tex1_D,
float *vtxData1_D,
float *rotation_D,
float3 translation,
float3 centroid,
size_t vertexCnt) {
const uint idx = ::getThreadIdx();
if (idx >= vertexCnt) {
return;
}
const int vertexDataStride = 9; // TODO
const int vertexDataOffsPos = 0;
//float valFirst = diff_D[idx];
float3 pos;
pos.x = vtxData1_D[vertexDataStride*idx + vertexDataOffsPos +0];
pos.y = vtxData1_D[vertexDataStride*idx + vertexDataOffsPos +1];
pos.z = vtxData1_D[vertexDataStride*idx + vertexDataOffsPos +2];
// Revert translation to move to origin
pos.x -= translation.x;
pos.y -= translation.y;
pos.z -= translation.z;
// Revert rotation
float3 posRot;
posRot.x = rotation_D[0] * pos.x +
rotation_D[3] * pos.y +
rotation_D[6] * pos.z;
posRot.y = rotation_D[1] * pos.x +
rotation_D[4] * pos.y +
rotation_D[7] * pos.z;
posRot.z = rotation_D[2] * pos.x +
rotation_D[5] * pos.y +
rotation_D[8] * pos.z;
// Move to old centroid
posRot.x += centroid.x;
posRot.y += centroid.y;
posRot.z += centroid.z;
float valSec = ::SampleFieldAtPosTrilin_D<float, true>(posRot, tex1_D);
//valSec = abs(valSec-valFirst);
diff_D[idx] = valSec;
printf("%f\n", valSec);
}
/*
* ComputeVtxSignDiffValue1_D
*/
__global__ void DeformableGPUSurfaceMT_ComputeVtxSignDiffValue1_D(
float *signdiff_D,
float *tex1_D,
float *vtxData1_D,
size_t vertexCnt) {
const uint idx = ::getThreadIdx();
if (idx >= vertexCnt) {
return;
}
const int vertexDataStride = 9; // TODO
const int vertexDataOffsPos = 0;
float valFirst = signdiff_D[idx];
float3 pos;
pos.x = vtxData1_D[vertexDataStride*idx + vertexDataOffsPos +0];
pos.y = vtxData1_D[vertexDataStride*idx + vertexDataOffsPos +1];
pos.z = vtxData1_D[vertexDataStride*idx + vertexDataOffsPos +2];
float valSec = ::SampleFieldAtPosTrilin_D<float, true>(pos, tex1_D);
valSec = float(valSec*valFirst < 0); // TODO Use binary operator
signdiff_D[idx] = valSec;
}
/*
* ComputeVtxSignDiffValue1_D
*/
__global__ void DeformableGPUSurfaceMT_ComputeVtxSignDiffValue1Fitted_D(
float *signdiff_D,
float *tex1_D,
float *vtxData1_D,
float *rotation_D,
float3 translation,
float3 centroid,
size_t vertexCnt) {
const uint idx = ::getThreadIdx();
if (idx >= vertexCnt) {
return;
}
const int vertexDataStride = 9; // TODO
const int vertexDataOffsPos = 0;
float valFirst = signdiff_D[idx];
// float3 pos;
// pos.x = vtxData1_D[vertexDataStride*idx + vertexDataOffsPos +0];
// pos.y = vtxData1_D[vertexDataStride*idx + vertexDataOffsPos +1];
// pos.z = vtxData1_D[vertexDataStride*idx + vertexDataOffsPos +2];
float3 pos;
pos.x = vtxData1_D[vertexDataStride*idx + vertexDataOffsPos +0];
pos.y = vtxData1_D[vertexDataStride*idx + vertexDataOffsPos +1];
pos.z = vtxData1_D[vertexDataStride*idx + vertexDataOffsPos +2];
// Revert translation to move to origin
pos.x -= translation.x;
pos.y -= translation.y;
pos.z -= translation.z;
// Revert rotation
float3 posRot;
posRot.x = rotation_D[0] * pos.x +
rotation_D[3] * pos.y +
rotation_D[6] * pos.z;
posRot.y = rotation_D[1] * pos.x +
rotation_D[4] * pos.y +
rotation_D[7] * pos.z;
posRot.z = rotation_D[2] * pos.x +
rotation_D[5] * pos.y +
rotation_D[8] * pos.z;
// Move to old centroid
posRot.x += centroid.x;
posRot.y += centroid.y;
posRot.z += centroid.z;
float valSec = ::SampleFieldAtPosTrilin_D<float, true>(posRot, tex1_D);
valSec = float(valSec*valFirst < 0); // TODO Use binary operator
signdiff_D[idx] = valSec;
}
/*
* DeformableGPUSurfaceMT::ComputeVtxDiffValue
*/
bool DeformableGPUSurfaceMT::ComputeVtxDiffValue(
float *diff_D,
float *tex0_D,
int3 texDim0,
float3 texOrg0,
float3 texDelta0,
float *tex1_D,
int3 texDim1,
float3 texOrg1,
float3 texDelta1,
GLuint vtxDataVBO0,
GLuint vtxDataVBO1,
size_t vertexCnt) {
using namespace megamol::core::utility::log;
/* Get pointers to vertex data */
cudaGraphicsResource* cudaTokens[2];
// Register memory with CUDA
if (!CudaSafeCall(cudaGraphicsGLRegisterBuffer(
&cudaTokens[0], vtxDataVBO0,
cudaGraphicsMapFlagsNone))) {
return false;
}
// Register memory with CUDA
if (!CudaSafeCall(cudaGraphicsGLRegisterBuffer(
&cudaTokens[1], vtxDataVBO1,
cudaGraphicsMapFlagsNone))) {
return false;
}
if (!CudaSafeCall(cudaGraphicsMapResources(2, cudaTokens, 0))) {
return false;
}
// Get mapped pointers to the vertex data buffers
float *vboPt0, *vboPt1;
size_t vboSize;
if (!CudaSafeCall(cudaGraphicsResourceGetMappedPointer(
reinterpret_cast<void**>(&vboPt0), // The mapped pointer
&vboSize, // The size of the accessible data
cudaTokens[0]))) { // The mapped resource
return false;
}
// Get mapped pointers to the vertex data buffers
if (!CudaSafeCall(cudaGraphicsResourceGetMappedPointer(
reinterpret_cast<void**>(&vboPt1), // The mapped pointer
&vboSize, // The size of the accessible data
cudaTokens[1]))) { // The mapped resource
return false;
}
// Init CUDA grid for texture #0
if (!initGridParams(texDim0, texOrg0, texDelta0)) {
Log::DefaultLog.WriteError(
"%s: could not init constant device params",
DeformableGPUSurfaceMT::ClassName());
return false;
}
// Call first kernel
#ifdef USE_TIMER
float dt_ms;
cudaEvent_t event1, event2;
cudaEventCreate(&event1);
cudaEventCreate(&event2);
cudaEventRecord(event1, 0);
#endif
DeformableGPUSurfaceMT_ComputeVtxDiffValue0_D <<< Grid(vertexCnt, 256), 256 >>> (
diff_D,
tex0_D,
vboPt0,
vertexCnt);
#ifdef USE_TIMER
cudaEventRecord(event2, 0);
cudaEventSynchronize(event1);
cudaEventSynchronize(event2);
cudaEventElapsedTime(&dt_ms, event1, event2);
printf("CUDA time for 'ComputeVtxDiffValue0_D': %.10f sec\n",
dt_ms/1000.0f);
#endif
// Init CUDA grid for texture #1
if (!initGridParams(texDim1, texOrg1, texDelta1)) {
Log::DefaultLog.WriteError(
"%s: could not init constant device params",
DeformableGPUSurfaceMT::ClassName());
return false;
}
// Call second kernel
#ifdef USE_TIMER
cudaEventRecord(event1, 0);
#endif
DeformableGPUSurfaceMT_ComputeVtxDiffValue1_D <<< Grid(vertexCnt, 256), 256 >>> (
diff_D,
tex1_D,
vboPt1,
vertexCnt);
#ifdef USE_TIMER
cudaEventRecord(event2, 0);
cudaEventSynchronize(event1);
cudaEventSynchronize(event2);
cudaEventElapsedTime(&dt_ms, event1, event2);
printf("CUDA time for 'ComputeVtxDiffValue1_D': %.10f sec\n",
dt_ms/1000.0f);
#endif
if (!CudaSafeCall(cudaGraphicsUnmapResources(2, cudaTokens, 0))) {
return false;
}
if (!CudaSafeCall(cudaGraphicsUnregisterResource(cudaTokens[0]))) {
return false;
}
if (!CudaSafeCall(cudaGraphicsUnregisterResource(cudaTokens[1]))) {
return false;
}
return true;
}
/*
* DeformableGPUSurfaceMT::ComputeVtxDiffValueFitted
*/
bool DeformableGPUSurfaceMT::ComputeVtxDiffValueFitted(
float *diff_D,
float centroid[3],
float rotMat[9],
float transVec[3],
float *tex0_D,
int3 texDim0,
float3 texOrg0,
float3 texDelta0,
float *tex1_D,
int3 texDim1,
float3 texOrg1,
float3 texDelta1,
GLuint vtxDataVBO0,
GLuint vtxDataVBO1,
size_t vertexCnt) {
CudaDevArr<float> rotate_D;
// Rotate for best fit
rotate_D.Validate(9);
if (!CudaSafeCall(cudaMemcpy((void *)rotate_D.Peek(), &rotMat[0],
9*sizeof(float), cudaMemcpyHostToDevice))) {
return false;
}
using namespace megamol::core::utility::log;
/* Get pointers to vertex data */
cudaGraphicsResource* cudaTokens[2];
// Register memory with CUDA
if (!CudaSafeCall(cudaGraphicsGLRegisterBuffer(
&cudaTokens[0], vtxDataVBO0,
cudaGraphicsMapFlagsNone))) {
return false;
}
// Register memory with CUDA
if (!CudaSafeCall(cudaGraphicsGLRegisterBuffer(
&cudaTokens[1], vtxDataVBO1,
cudaGraphicsMapFlagsNone))) {
return false;
}
if (!CudaSafeCall(cudaGraphicsMapResources(2, cudaTokens, 0))) {
return false;
}
// Get mapped pointers to the vertex data buffers
float *vboPt0, *vboPt1;
size_t vboSize;
if (!CudaSafeCall(cudaGraphicsResourceGetMappedPointer(
reinterpret_cast<void**>(&vboPt0), // The mapped pointer
&vboSize, // The size of the accessible data
cudaTokens[0]))) { // The mapped resource
return false;
}
// Get mapped pointers to the vertex data buffers
if (!CudaSafeCall(cudaGraphicsResourceGetMappedPointer(
reinterpret_cast<void**>(&vboPt1), // The mapped pointer
&vboSize, // The size of the accessible data
cudaTokens[1]))) { // The mapped resource
return false;
}
// Init CUDA grid for texture #0
if (!initGridParams(texDim0, texOrg0, texDelta0)) {
Log::DefaultLog.WriteError(
"%s: could not init constant device params",
DeformableGPUSurfaceMT::ClassName());
return false;
}
// Call first kernel
#ifdef USE_TIMER
float dt_ms;
cudaEvent_t event1, event2;
cudaEventCreate(&event1);
cudaEventCreate(&event2);
cudaEventRecord(event1, 0);
#endif
DeformableGPUSurfaceMT_ComputeVtxDiffValue0_D <<< Grid(vertexCnt, 256), 256 >>> (
diff_D,
tex0_D,
vboPt0,
vertexCnt);
#ifdef USE_TIMER
cudaEventRecord(event2, 0);
cudaEventSynchronize(event1);
cudaEventSynchronize(event2);
cudaEventElapsedTime(&dt_ms, event1, event2);
printf("CUDA time for 'ComputeVtxDiffValue0_D': %.10f sec\n",
dt_ms/1000.0f);
#endif
// Init CUDA grid for texture #1
if (!initGridParams(texDim1, texOrg1, texDelta1)) {
Log::DefaultLog.WriteError(
"%s: could not init constant device params",
DeformableGPUSurfaceMT::ClassName());
return false;
}
// Call second kernel
#ifdef USE_TIMER
cudaEventRecord(event1, 0);
#endif
DeformableGPUSurfaceMT_ComputeVtxDiffValue1Fitted_D <<< Grid(vertexCnt, 256), 256 >>> (
diff_D,
tex1_D,
vboPt1,
rotate_D.Peek(),
make_float3(transVec[0],transVec[1],transVec[2]),
make_float3(centroid[0],centroid[1],centroid[2]),
vertexCnt);
#ifdef USE_TIMER
cudaEventRecord(event2, 0);
cudaEventSynchronize(event1);
cudaEventSynchronize(event2);
cudaEventElapsedTime(&dt_ms, event1, event2);
printf("CUDA time for 'ComputeVtxDiffValue1_D': %.10f sec\n",
dt_ms/1000.0f);
#endif
if (!CudaSafeCall(cudaGraphicsUnmapResources(2, cudaTokens, 0))) {
return false;
}
if (!CudaSafeCall(cudaGraphicsUnregisterResource(cudaTokens[0]))) {
return false;
}
if (!CudaSafeCall(cudaGraphicsUnregisterResource(cudaTokens[1]))) {
return false;
}
if (!CudaSafeCall(rotate_D.Release())) {
return false;
}
return true;
}
/*
* DeformableGPUSurfaceMT::ComputeVtxSignDiffValue
*/
bool DeformableGPUSurfaceMT::ComputeVtxSignDiffValue(
float *signdiff_D,
float *tex0_D,
int3 texDim0,
float3 texOrg0,
float3 texDelta0,
float *tex1_D,
int3 texDim1,
float3 texOrg1,
float3 texDelta1,
GLuint vtxDataVBO0,
GLuint vtxDataVBO1,
size_t vertexCnt) {
using namespace megamol::core::utility::log;
/* Get pointers to vertex data */
cudaGraphicsResource* cudaTokens[2];
// Register memory with CUDA
if (!CudaSafeCall(cudaGraphicsGLRegisterBuffer(
&cudaTokens[0], vtxDataVBO0,
cudaGraphicsMapFlagsNone))) {
return false;
}
// Register memory with CUDA
if (!CudaSafeCall(cudaGraphicsGLRegisterBuffer(
&cudaTokens[1], vtxDataVBO1,
cudaGraphicsMapFlagsNone))) {
return false;
}
if (!CudaSafeCall(cudaGraphicsMapResources(2, cudaTokens, 0))) {
return false;
}
// Get mapped pointers to the vertex data buffers
float *vboPt0;
size_t vboSize;
if (!CudaSafeCall(cudaGraphicsResourceGetMappedPointer(
reinterpret_cast<void**>(&vboPt0), // The mapped pointer
&vboSize, // The size of the accessible data
cudaTokens[0]))) { // The mapped resource
return false;
}
// Get mapped pointers to the vertex data buffers
float *vboPt1;
if (!CudaSafeCall(cudaGraphicsResourceGetMappedPointer(
reinterpret_cast<void**>(&vboPt1), // The mapped pointer
&vboSize, // The size of the accessible data
cudaTokens[1]))) { // The mapped resource
return false;
}
// Init CUDA grid for texture #0
if (!initGridParams(texDim0, texOrg0, texDelta0)) {
Log::DefaultLog.WriteError(
"%s: could not init constant device params",
DeformableGPUSurfaceMT::ClassName());
return false;
}
// Call first kernel
#ifdef USE_TIMER
float dt_ms;
cudaEvent_t event1, event2;
cudaEventCreate(&event1);
cudaEventCreate(&event2);
cudaEventRecord(event1, 0);
#endif
DeformableGPUSurfaceMT_ComputeVtxDiffValue0_D <<< Grid(vertexCnt, 256), 256 >>> (
signdiff_D,
tex0_D,
vboPt0,
vertexCnt);
#ifdef USE_TIMER
cudaEventRecord(event2, 0);
cudaEventSynchronize(event1);
cudaEventSynchronize(event2);
cudaEventElapsedTime(&dt_ms, event1, event2);
printf("CUDA time for 'ComputeVtxSignDiffValue0_D': %.10f sec\n",
dt_ms/1000.0f);
#endif
// Init CUDA grid for texture #1
if (!initGridParams(texDim1, texOrg1, texDelta1)) {
Log::DefaultLog.WriteError(
"%s: could not init constant device params",
DeformableGPUSurfaceMT::ClassName());
return false;
}
// Call second kernel
#ifdef USE_TIMER
cudaEventRecord(event1, 0);
#endif
DeformableGPUSurfaceMT_ComputeVtxSignDiffValue1_D <<< Grid(vertexCnt, 256), 256 >>> (
signdiff_D,
tex1_D,
vboPt1,
vertexCnt);
#ifdef USE_TIMER
cudaEventRecord(event2, 0);
cudaEventSynchronize(event1);
cudaEventSynchronize(event2);
cudaEventElapsedTime(&dt_ms, event1, event2);
printf("CUDA time for 'ComputeVtxDiffValue1_D': %.10f sec\n",
dt_ms/1000.0f);
#endif
if (!CudaSafeCall(cudaGraphicsUnmapResources(2, cudaTokens, 0))) {
return false;
}
if (!CudaSafeCall(cudaGraphicsUnregisterResource(cudaTokens[0]))) {
return false;
}
if (!CudaSafeCall(cudaGraphicsUnregisterResource(cudaTokens[1]))) {
return false;
}
return true;
}
/*
* DeformableGPUSurfaceMT::ComputeVtxSignDiffValueFitted
*/
bool DeformableGPUSurfaceMT::ComputeVtxSignDiffValueFitted(
float *signdiff_D,
float centroid[3],
float rotMat[9],
float transVec[3],
float *tex0_D,
int3 texDim0,
float3 texOrg0,
float3 texDelta0,
float *tex1_D,
int3 texDim1,
float3 texOrg1,
float3 texDelta1,
GLuint vtxDataVBO0,
GLuint vtxDataVBO1,
size_t vertexCnt) {
CudaDevArr<float> rotate_D;
// Rotate for best fit
rotate_D.Validate(9);
if (!CudaSafeCall(cudaMemcpy((void *)rotate_D.Peek(), &rotMat[0],
9*sizeof(float), cudaMemcpyHostToDevice))) {
return false;
}
using namespace megamol::core::utility::log;
/* Get pointers to vertex data */
cudaGraphicsResource* cudaTokens[2];
// Register memory with CUDA
if (!CudaSafeCall(cudaGraphicsGLRegisterBuffer(
&cudaTokens[0], vtxDataVBO0,
cudaGraphicsMapFlagsNone))) {
return false;
}
// Register memory with CUDA
if (!CudaSafeCall(cudaGraphicsGLRegisterBuffer(
&cudaTokens[1], vtxDataVBO1,
cudaGraphicsMapFlagsNone))) {
return false;
}
if (!CudaSafeCall(cudaGraphicsMapResources(2, cudaTokens, 0))) {
return false;
}
// Get mapped pointers to the vertex data buffers
float *vboPt0;
size_t vboSize;
if (!CudaSafeCall(cudaGraphicsResourceGetMappedPointer(
reinterpret_cast<void**>(&vboPt0), // The mapped pointer
&vboSize, // The size of the accessible data
cudaTokens[0]))) { // The mapped resource
return false;
}
// Get mapped pointers to the vertex data buffers
float *vboPt1;
if (!CudaSafeCall(cudaGraphicsResourceGetMappedPointer(
reinterpret_cast<void**>(&vboPt1), // The mapped pointer
&vboSize, // The size of the accessible data
cudaTokens[1]))) { // The mapped resource
return false;
}
// Init CUDA grid for texture #0
if (!initGridParams(texDim0, texOrg0, texDelta0)) {
Log::DefaultLog.WriteError(
"%s: could not init constant device params",
DeformableGPUSurfaceMT::ClassName());
return false;
}
// Call first kernel
#ifdef USE_TIMER
float dt_ms;
cudaEvent_t event1, event2;
cudaEventCreate(&event1);
cudaEventCreate(&event2);
cudaEventRecord(event1, 0);
#endif
DeformableGPUSurfaceMT_ComputeVtxDiffValue0_D <<< Grid(vertexCnt, 256), 256 >>> (
signdiff_D,
tex0_D,
vboPt0,
vertexCnt);
#ifdef USE_TIMER
cudaEventRecord(event2, 0);
cudaEventSynchronize(event1);
cudaEventSynchronize(event2);
cudaEventElapsedTime(&dt_ms, event1, event2);
printf("CUDA time for 'ComputeVtxSignDiffValue0_D': %.10f sec\n",
dt_ms/1000.0f);
#endif
// Init CUDA grid for texture #1
if (!initGridParams(texDim1, texOrg1, texDelta1)) {
Log::DefaultLog.WriteError(
"%s: could not init constant device params",
DeformableGPUSurfaceMT::ClassName());
return false;
}
// Call second kernel
#ifdef USE_TIMER
cudaEventRecord(event1, 0);
#endif
DeformableGPUSurfaceMT_ComputeVtxSignDiffValue1Fitted_D <<< Grid(vertexCnt, 256), 256 >>> (
signdiff_D,
tex1_D,
vboPt1,
rotate_D.Peek(),
make_float3(transVec[0],transVec[1],transVec[2]),
make_float3(centroid[0],centroid[1],centroid[2]),
vertexCnt);
#ifdef USE_TIMER
cudaEventRecord(event2, 0);
cudaEventSynchronize(event1);
cudaEventSynchronize(event2);
cudaEventElapsedTime(&dt_ms, event1, event2);
printf("CUDA time for 'ComputeVtxDiffValue1_D': %.10f sec\n",
dt_ms/1000.0f);
#endif
if (!CudaSafeCall(cudaGraphicsUnmapResources(2, cudaTokens, 0))) {
return false;
}
if (!CudaSafeCall(cudaGraphicsUnregisterResource(cudaTokens[0]))) {
return false;
}
if (!CudaSafeCall(cudaGraphicsUnregisterResource(cudaTokens[1]))) {
return false;
}
if (!CudaSafeCall(rotate_D.Release())) {
return false;
}
return true;
}
/*
* DeformableGPUSurfaceMT_CalcHausdorffDistance_D
*/
__global__ void DeformableGPUSurfaceMT_CalcHausdorffDistance_D(
float *vtxData1,
float *vtxData2,
float *hausdorffdistVtx_D,
uint vertexCnt1,
uint vertexCnt2) {
const uint posOffs = 0; // TODO Define const device vars
const uint stride = 9;
const uint idx = ::getThreadIdx();
if (idx >= vertexCnt1) {
return;
}
float3 pos1 = make_float3(
vtxData1[stride*idx+posOffs+0],
vtxData1[stride*idx+posOffs+1],
vtxData1[stride*idx+posOffs+2]);
float3 pos2;
float distSqr;
float minDistSqr = 10000000.0;
for (int i = 0; i < vertexCnt2; ++i) {
pos2 = make_float3(
vtxData2[stride*i+posOffs+0],
vtxData2[stride*i+posOffs+1],
vtxData2[stride*i+posOffs+2]);
distSqr = (pos2.x-pos1.x)*(pos2.x-pos1.x) +
(pos2.y-pos1.y)*(pos2.y-pos1.y) +
(pos2.z-pos1.z)*(pos2.z-pos1.z);
minDistSqr = min(minDistSqr,distSqr);
}
hausdorffdistVtx_D[idx] = minDistSqr;
}
/*
* DeformableGPUSurfaceMT::CalcHausdorffDistance
*/
float DeformableGPUSurfaceMT::CalcHausdorffDistance(
DeformableGPUSurfaceMT *surf1,
DeformableGPUSurfaceMT *surf2,
float *hausdorffdistVtx_D,
bool symmetric) {
// TODO Implement symmetric version
/* Get pointers to vertex data */
cudaGraphicsResource* cudaTokens[2];
// Register memory with CUDA
if (!CudaSafeCall(cudaGraphicsGLRegisterBuffer(
&cudaTokens[0], surf1->GetVtxDataVBO(),
cudaGraphicsMapFlagsNone))) {
return 0.0f;
}
// Register memory with CUDA
if (!CudaSafeCall(cudaGraphicsGLRegisterBuffer(
&cudaTokens[1], surf2->GetVtxDataVBO(),
cudaGraphicsMapFlagsNone))) {
return 0.0f;
}
if (!CudaSafeCall(cudaGraphicsMapResources(2, cudaTokens, 0))) {
return 0.0f;
}
// Get mapped pointers to the vertex data buffers
float *vboPt0, *vboPt1;
size_t vboSize;
if (!CudaSafeCall(cudaGraphicsResourceGetMappedPointer(
reinterpret_cast<void**>(&vboPt0), // The mapped pointer
&vboSize, // The size of the accessible data
cudaTokens[0]))) { // The mapped resource
return 0.0f;
}
// Get mapped pointers to the vertex data buffers
if (!CudaSafeCall(cudaGraphicsResourceGetMappedPointer(
reinterpret_cast<void**>(&vboPt1), // The mapped pointer
&vboSize, // The size of the accessible data
cudaTokens[1]))) { // The mapped resource
return 0.0f;
}
// Calc kernel
// TODO Implement less lazy and faster version of Hausdorff distance
DeformableGPUSurfaceMT_CalcHausdorffDistance_D <<< Grid(surf1->GetVertexCnt(), 256), 256 >>> (
vboPt0,
vboPt1,
hausdorffdistVtx_D,
surf1->GetVertexCnt(),
surf2->GetVertexCnt());
if (!CudaSafeCall(cudaGraphicsUnmapResources(2, cudaTokens, 0))) {
return 0.0f;
}
if (!CudaSafeCall(cudaGraphicsUnregisterResource(cudaTokens[0]))) {
return 0.0f;
}
if (!CudaSafeCall(cudaGraphicsUnregisterResource(cudaTokens[1]))) {
return 0.0f;
}
float res = 0.0;
res = thrust::reduce(
thrust::device_ptr<float>(hausdorffdistVtx_D),
thrust::device_ptr<float>(hausdorffdistVtx_D + surf1->GetVertexCnt()),
-1.0,
thrust::maximum<float>());
return sqrt(res);
}
__global__ void TrackPathSubdivVertices_D(
float *sourceVolume_D,
float *vertexData_D,
float *vertexFlag_D,
float *vertexExternalForcesScl_D,
float *displLen_D,
float *vtxUncertainty_D,
float4 *gradient_D,
int *accumPath_D,
uint vertexCnt,
float forcesScl,
float isoval,
float minDispl) {
const uint idx = ::getThreadIdx();
if (idx >= vertexCnt) {
return;
}
if (vertexFlag_D[idx] == 0.0) {
vertexData_D[9*idx+3] = 0.0;
vertexData_D[9*idx+4] = 1.0;
vertexData_D[9*idx+5] = 0.0;
displLen_D[idx] = 0.0; // Old vertices are per definition converged
return; // This is an old vertex
}
// Check convergence criterion
float lastDisplLen = displLen_D[idx];
if (lastDisplLen <= minDispl) {
displLen_D[idx] = 0.0;
return; // Vertex is converged
}
/* Retrieve stuff from global device memory */
// Get initial position from global device memory
float3 posOld = make_float3(
vertexData_D[9*idx+0],
vertexData_D[9*idx+1],
vertexData_D[9*idx+2]);
// Get initial scale factor for external forces
float externalForcesScl = vertexExternalForcesScl_D[idx];
//float externalForcesSclOld = externalForcesScl;
/* Update position */
// No warp divergence here, since useCubicInterpolation is the same for all
// threads
//const float sampleDens = SampleFieldAtPosTrilin_D<float>(posOld, sourceVolume_D);
const float sampleDens = SampleFieldAtPosTricub_D<float, false>(posOld, sourceVolume_D);
// Switch sign and scale down if necessary
bool negative = externalForcesScl < 0;
bool outside = sampleDens <= isoval;
int switchSign = int((negative && outside)||(!negative && !outside));
externalForcesScl = externalForcesScl*(1.0*(1-switchSign) - 1.0*switchSign);
externalForcesScl *= (1.0*(1-switchSign) + 0.5*(switchSign));
if (bool(switchSign) && (accumPath_D[idx] != 0)) {
accumPath_D[idx] = 0;
} else if (bool(switchSign) && (accumPath_D[idx] == 0)) {
accumPath_D[idx] = 1;
}
// Sample gradient
//float4 externalForceTmp = SampleFieldAtPosTrilin_D<float4>(posOld, gradient_D);
float4 externalForceTmp = SampleFieldAtPosTricub_D<float4, false>(posOld, gradient_D);
float3 externalForce;
externalForce.x = externalForceTmp.x;
externalForce.y = externalForceTmp.y;
externalForce.z = externalForceTmp.z;
externalForce = safeNormalize(externalForce);
externalForce *= forcesScl*externalForcesScl;
float3 posNew = posOld + externalForce; // Integrate backwards
/* Write back to global device memory */
// New pos
vertexData_D[9*idx+0] = posNew.x;
vertexData_D[9*idx+1] = posNew.y;
vertexData_D[9*idx+2] = posNew.z;
// Write external forces scale factor back to global device memory
vertexExternalForcesScl_D[idx] = externalForcesScl;
// float3 diff = posNew-posOld;
// float diffLen = length(diff);
float diffLen = abs(forcesScl*externalForcesScl);
// if ((abs(externalForcesScl) == 1.0f)) {
// vtxUncertainty_D[idx] += diffLen;
// }
if (accumPath_D[idx] == 0) {
vtxUncertainty_D[idx] += diffLen;
} else if(accumPath_D[idx] != 0) {
vtxUncertainty_D[idx] -= diffLen;
}
// Displ scl for convergence
displLen_D[idx] = diffLen;
//displLen_D[idx] = 0.1;
vertexData_D[9*idx+3] = 1.0;
vertexData_D[9*idx+4] = 0.0;
vertexData_D[9*idx+5] = 1.0;
}
/*
* DeformableGPUSurfaceMT::ComputeUncertaintyForSubdivVertices
*/
bool DeformableGPUSurfaceMT::TrackPathSubdivVertices(
float *sourceVolume_D,
int3 volDim,
float3 volOrg,
float3 volDelta,
float forcesScl,
float minDispl,
float isoval,
uint maxIt) {
using namespace megamol::core::utility::log;
// Init constant device params
if (!initGridParams(volDim, volOrg, volDelta)) {
Log::DefaultLog.WriteError(
"%s: could not init constant device params",
this->ClassName());
return false;
}
/* 1. Reinitialize VBO and copy back uncertainty values */
cudaGraphicsResource* cudaTokens[1];
cudaGraphicsResource* cudaTokens2[2];
if (!CudaSafeCall(cudaGraphicsGLRegisterBuffer(
&cudaTokens[0],
this->vboVtxPath,
cudaGraphicsMapFlagsNone))) {
return false;
}
if (!CudaSafeCall(cudaGraphicsMapResources(1, cudaTokens, 0))) {
return false;
}
// Get mapped pointers to the vertex data buffers
float *uncertaintyPt;
size_t vboVtxPathSize;
if (!CudaSafeCall(cudaGraphicsResourceGetMappedPointer(
reinterpret_cast<void**>(&uncertaintyPt), // The mapped pointer
&vboVtxPathSize, // The size of the accessible data
cudaTokens[0]))) { // The mapped resource
return false;
}
// Copy old values to temporary array
if (!CudaSafeCall(this->vertexUncertaintyTmp_D.Validate(vboVtxPathSize/sizeof(float)))) {
return false;
}
if (!CudaSafeCall(cudaMemcpy(
this->vertexUncertaintyTmp_D.Peek(),
uncertaintyPt,
vboVtxPathSize,
cudaMemcpyDeviceToDevice))) {
return false;
}
if (!CudaSafeCall(cudaGraphicsUnmapResources(1, cudaTokens, 0))) {
return false;
}
if (!CudaSafeCall(cudaGraphicsUnregisterResource(cudaTokens[0]))) {
return false;
}
// Re-initiaize VBO
if (!this->InitVtxPathVBO(this->vertexCnt)) {
return false;
}
if (!CudaSafeCall(cudaGraphicsGLRegisterBuffer(
&cudaTokens2[0],
this->vboVtxPath,
cudaGraphicsMapFlagsNone))) {
return false;
}
if (!CudaSafeCall(cudaGraphicsGLRegisterBuffer(
&cudaTokens2[1],
this->vboVtxData,
cudaGraphicsMapFlagsNone))) {
return false;
}
if (!CudaSafeCall(cudaGraphicsMapResources(2, cudaTokens2, 0))) {
return false;
}
float *vboVertexPt;
size_t vboVertexSize;
// Get mapped pointers to the vertex data buffers
if (!CudaSafeCall(cudaGraphicsResourceGetMappedPointer(
reinterpret_cast<void**>(&uncertaintyPt), // The mapped pointer
&vboVtxPathSize, // The size of the accessible data
cudaTokens2[0]))) { // The mapped resource
return false;
}
// Get mapped pointers to the vertex data buffers
if (!CudaSafeCall(cudaGraphicsResourceGetMappedPointer(
reinterpret_cast<void**>(&vboVertexPt), // The mapped pointer
&vboVertexSize, // The size of the accessible data
cudaTokens2[1]))) { // The mapped resource
return false;
}
if (!CudaSafeCall(cudaMemset(uncertaintyPt, 0x00, vboVtxPathSize))) {
return false;
}
if (!CudaSafeCall(cudaMemcpy(
uncertaintyPt,
this->vertexUncertaintyTmp_D.Peek(),
sizeof(float)*this->vertexUncertaintyTmp_D.GetCount(),
cudaMemcpyDeviceToDevice))) {
return false;
}
/* 2. Write uncertainty values of new vertices */
// Get copy of vertex buffer
if (!CudaSafeCall(this->trackedSubdivVertexData_D.Validate(this->vertexCnt*this->vertexDataStride))) {
return false;
}
if (!CudaSafeCall(cudaMemcpy(this->trackedSubdivVertexData_D.Peek(),
vboVertexPt,
vboVertexSize,
cudaMemcpyDeviceToDevice))) {
return false;
}
// Check/prepare necessary arrays
if (sourceVolume_D == NULL) {
return false;
}
if (!CudaSafeCall(this->displLen_D.Validate(this->vertexCnt))) {
return false;
}
if (!CudaSafeCall(this->displLen_D.Set(0x00))) {
return false;
}
if (!CudaSafeCall(this->vertexExternalForcesScl_D.Validate(this->vertexCnt))) {
return false;
}
DeformableGPUSurfaceMT_InitExternalForceScl_D <<< Grid(this->vertexCnt, 256), 256 >>> (
(float*)this->vertexExternalForcesScl_D.Peek(),
this->displLen_D.Peek(),
sourceVolume_D,
this->trackedSubdivVertexData_D.Peek(),
minDispl,
this->vertexCnt,
isoval,
this->vertexDataOffsPos,
this->vertexDataStride);
if (!CheckForCudaError()) {
return false;
}
if (this->vertexFlag_D.GetCount() != this->vertexCnt) {
if (!CudaSafeCall(this->vertexFlag_D.Validate(this->vertexCnt))) {
return -1;
}
if (!CudaSafeCall(this->vertexFlag_D.Set(0x00))) {
return -1;
}
}
if (!CudaSafeCall(this->accumPath_D.Validate(this->vertexCnt))) {
return false;
}
if (!CudaSafeCall(this->accumPath_D.Set(0x00))) {
return false;
}
uint iterationsNeeded = 0;
for (uint i = 0; i < maxIt; ++i) {
// Update vertex position
TrackPathSubdivVertices_D <<< Grid(this->vertexCnt, 256), 256 >>> (
sourceVolume_D,
this->trackedSubdivVertexData_D.Peek(),
this->vertexFlag_D.Peek(),
this->vertexExternalForcesScl_D.Peek(),
this->displLen_D.Peek(),
uncertaintyPt,
(float4*)(this->externalForces_D.Peek()),
this->accumPath_D.Peek(),
this->vertexCnt,
forcesScl,
isoval,
minDispl);
if (!CheckForCudaError()) {
return false;
}
// Accumulate displacement length of this iteration step
float avgDisplLen = 0.0f;
avgDisplLen = thrust::reduce(
thrust::device_ptr<float>(this->displLen_D.Peek()),
thrust::device_ptr<float>(this->displLen_D.Peek() + this->vertexCnt));
if (!CudaSafeCall(cudaGetLastError())) {
return false;
}
// printf("Number of flagged vertices %u, %f\n", this->nFlaggedVertices, avgDisplLen);
avgDisplLen /= static_cast<float>(this->nFlaggedVertices);
// if (i%10 == 0) printf("It %i, avgDispl: %.16f, min %.16f\n", i, avgDisplLen, minDispl);
// printf("It: %i, avgDispl: %.16f, min %.16f\n", i, avgDisplLen, minDispl);
if (avgDisplLen < minDispl) {
iterationsNeeded = i+1;
break;
}
::CheckForCudaErrorSync();
}
if (!CudaSafeCall(cudaGraphicsUnmapResources(2, cudaTokens2, 0))) {
return false;
}
if (!CudaSafeCall(cudaGraphicsUnregisterResource(cudaTokens2[0]))) {
return false;
}
if (!CudaSafeCall(cudaGraphicsUnregisterResource(cudaTokens2[1]))) {
return false;
}
return CheckForCudaError();
}
/*
* DeformableGPUSurfaceMT_ComputeSurfAttribDiff0_D
*/
__global__ void DeformableGPUSurfaceMT_ComputeSurfAttribSignDiff0_D (
float *vertexAttrib_D,
float *vertexDataEnd_D,
float *tex0_D,
uint vertexCnt) {
const uint idx = ::getThreadIdx();
if (idx >= vertexCnt) {
return;
}
const int vertexDataStride = 9; // TODO
const int vertexDataOffsPos = 0;
float3 pos;
pos.x = vertexDataEnd_D[vertexDataStride*idx + vertexDataOffsPos +0];
pos.y = vertexDataEnd_D[vertexDataStride*idx + vertexDataOffsPos +1];
pos.z = vertexDataEnd_D[vertexDataStride*idx + vertexDataOffsPos +2];
vertexAttrib_D[idx] = ::SampleFieldAtPosTrilin_D<float, true>(pos, tex0_D);
}
/*
* DeformableGPUSurfaceMT_ComputeSurfAttribDiff1_D
*/
__global__ void DeformableGPUSurfaceMT_ComputeSurfAttribSignDiff1_D (
float *vertexAttrib_D,
float *vertexDataStart_D,
float *vertexDataTrackedBack_D,
float *vertexFlag_D,
float *tex1_D,
float *rotation_D,
float3 translation,
float3 centroid,
uint vertexCnt) {
const uint idx = ::getThreadIdx();
if (idx >= vertexCnt) {
return;
}
const int vertexDataStride = 9; // TODO
const int vertexDataOffsPos = 0;
float3 pos;
if (vertexFlag_D[idx] == 1.0) {
pos.x = vertexDataTrackedBack_D[vertexDataStride*idx + vertexDataOffsPos +0];
pos.y = vertexDataTrackedBack_D[vertexDataStride*idx + vertexDataOffsPos +1];
pos.z = vertexDataTrackedBack_D[vertexDataStride*idx + vertexDataOffsPos +2];
} else {
pos.x = vertexDataStart_D[vertexDataStride*idx + vertexDataOffsPos +0];
pos.y = vertexDataStart_D[vertexDataStride*idx + vertexDataOffsPos +1];
pos.z = vertexDataStart_D[vertexDataStride*idx + vertexDataOffsPos +2];
}
// Revert translation to move to origin
pos.x -= translation.x;
pos.y -= translation.y;
pos.z -= translation.z;
// Revert rotation
float3 posRot;
posRot.x = rotation_D[0] * pos.x +
rotation_D[3] * pos.y +
rotation_D[6] * pos.z;
posRot.y = rotation_D[1] * pos.x +
rotation_D[4] * pos.y +
rotation_D[7] * pos.z;
posRot.z = rotation_D[2] * pos.x +
rotation_D[5] * pos.y +
rotation_D[8] * pos.z;
// Move to old centroid
posRot.x += centroid.x;
posRot.y += centroid.y;
posRot.z += centroid.z;
float attribOld = vertexAttrib_D[idx];
float attribNew = ::SampleFieldAtPosTrilin_D<float, true>(posRot, tex1_D);
vertexAttrib_D[idx] = int(attribOld*attribNew < 0); // 1.0 or 0.0
}
/*
* DeformableGPUSurfaceMT::ComputeSurfAttribSignDiff
*/
bool DeformableGPUSurfaceMT::ComputeSurfAttribSignDiff(
DeformableGPUSurfaceMT &surfStart,
float centroid[3], // In case the start surface has been fitted using RMSD
float rotMat[9],
float transVec[3],
float *tex0_D,
int3 texDim0,
float3 texOrg0,
float3 texDelta0,
float *tex1_D,
int3 texDim1,
float3 texOrg1,
float3 texDelta1) {
using namespace megamol::core::utility::log;
if (!this->InitVtxAttribVBO(this->vertexCnt)) {
return false;
}
// Get pointer to vertex attribute array
cudaGraphicsResource* cudaTokens[3];
if (!CudaSafeCall(cudaGraphicsGLRegisterBuffer(
&cudaTokens[0],
this->vboVtxAttr,
cudaGraphicsMapFlagsNone))) {
return false;
}
if (!CudaSafeCall(cudaGraphicsGLRegisterBuffer(
&cudaTokens[1],
this->vboVtxData,
cudaGraphicsMapFlagsNone))) {
return false;
}
if (!CudaSafeCall(cudaGraphicsGLRegisterBuffer(
&cudaTokens[2],
surfStart.GetVtxDataVBO(),
cudaGraphicsMapFlagsNone))) {
return false;
}
if (!CudaSafeCall(cudaGraphicsMapResources(3, cudaTokens, 0))) {
return false;
}
// Get mapped pointers to the vertex data buffers
float *vertexAttrib_D;
size_t vboVtxAttribSize;
if (!CudaSafeCall(cudaGraphicsResourceGetMappedPointer(
reinterpret_cast<void**>(&vertexAttrib_D), // The mapped pointer
&vboVtxAttribSize, // The size of the accessible data
cudaTokens[0]))) { // The mapped resource
return false;
}
// Get mapped pointers to the vertex data buffers
float *vertexDataEnd_D;
size_t vboEndSize;
if (!CudaSafeCall(cudaGraphicsResourceGetMappedPointer(
reinterpret_cast<void**>(&vertexDataEnd_D), // The mapped pointer
&vboEndSize, // The size of the accessible data
cudaTokens[1]))) { // The mapped resource
return false;
}
// Get mapped pointers to the vertex data buffers
float *vertexDataStart_D;
size_t vboStartSize;
if (!CudaSafeCall(cudaGraphicsResourceGetMappedPointer(
reinterpret_cast<void**>(&vertexDataStart_D), // The mapped pointer
&vboStartSize, // The size of the accessible data
cudaTokens[2]))) { // The mapped resource
return false;
}
// Init grid params
// Init CUDA grid for texture #0
if (!initGridParams(texDim0, texOrg0, texDelta0)) {
Log::DefaultLog.WriteError(
"%s: could not init constant device params",
DeformableGPUSurfaceMT::ClassName());
return false;
}
// Compute difference for new and old vertices (after subdivision)
// Part one: sample value for new vertices
DeformableGPUSurfaceMT_ComputeSurfAttribSignDiff0_D <<< Grid(this->vertexCnt, 256), 256 >>> (
vertexAttrib_D,
vertexDataEnd_D,
tex0_D,
this->vertexCnt);
CudaDevArr<float> rotate_D;
// Rotate for best fit
rotate_D.Validate(9);
if (!CudaSafeCall(cudaMemcpy((void *)rotate_D.Peek(), &rotMat[0],
9*sizeof(float), cudaMemcpyHostToDevice))) {
return false;
}
// Init grid params
// Init CUDA grid for texture #0
if (!initGridParams(texDim1, texOrg1, texDelta1)) {
Log::DefaultLog.WriteError(
"%s: could not init constant device params",
DeformableGPUSurfaceMT::ClassName());
return false;
}
if (this->vertexFlag_D.GetCount() == 0) {
if (!CudaSafeCall(this->vertexFlag_D.Validate(this->vertexCnt))) {
return false;
}
if (!CudaSafeCall(this->vertexFlag_D.Set(0x00))) {
return false;
}
}
// Compute difference for new and old vertices (after subdivision)
// Part two: sample value for old/tracked back vertices
DeformableGPUSurfaceMT_ComputeSurfAttribSignDiff1_D <<< Grid(this->vertexCnt, 256), 256 >>> (
vertexAttrib_D,
vertexDataStart_D,
this->trackedSubdivVertexData_D.Peek(), // Tracked back vertices, needed for sampling
this->vertexFlag_D.Peek(),
tex1_D,
rotate_D.Peek(),
make_float3(transVec[0],transVec[1],transVec[2]),
make_float3(centroid[0],centroid[1],centroid[2]),
this->vertexCnt);
if (!CheckForCudaError()) {
return false;
}
rotate_D.Release();
if (!CudaSafeCall(cudaGraphicsUnmapResources(3, cudaTokens, 0))) {
return false;
}
if (!CudaSafeCall(cudaGraphicsUnregisterResource(cudaTokens[0]))) {
return false;
}
if (!CudaSafeCall(cudaGraphicsUnregisterResource(cudaTokens[1]))) {
return false;
}
if (!CudaSafeCall(cudaGraphicsUnregisterResource(cudaTokens[2]))) {
return false;
}
return true;
}
/*
* DeformableGPUSurfaceMT_ComputeSurfAttribDiff0_D
*/
__global__ void DeformableGPUSurfaceMT_ComputeSurfAttribDiff0_D (
float *vertexAttrib_D,
float *vertexDataEnd_D,
float *tex0_D,
uint vertexCnt) {
const uint idx = ::getThreadIdx();
if (idx >= vertexCnt) {
return;
}
const int vertexDataStride = 9; // TODO
const int vertexDataOffsPos = 0;
float3 pos;
pos.x = vertexDataEnd_D[vertexDataStride*idx + vertexDataOffsPos +0];
pos.y = vertexDataEnd_D[vertexDataStride*idx + vertexDataOffsPos +1];
pos.z = vertexDataEnd_D[vertexDataStride*idx + vertexDataOffsPos +2];
vertexAttrib_D[idx] = ::SampleFieldAtPosTrilin_D<float, true>(pos, tex0_D);
}
/*
* DeformableGPUSurfaceMT_ComputeSurfAttribDiff1_D
*/
__global__ void DeformableGPUSurfaceMT_ComputeSurfAttribDiff1_D (
float *vertexAttrib_D,
float *vertexDataStart_D,
float *vertexDataTrackedBack_D,
float *vertexFlag_D,
float *tex1_D,
float *rotation_D,
float3 translation,
float3 centroid,
uint vertexCnt) {
const uint idx = ::getThreadIdx();
if (idx >= vertexCnt) {
return;
}
const int vertexDataStride = 9; // TODO
const int vertexDataOffsPos = 0;
float3 pos;
if (vertexFlag_D[idx] == 1.0) {
pos.x = vertexDataTrackedBack_D[vertexDataStride*idx + vertexDataOffsPos +0];
pos.y = vertexDataTrackedBack_D[vertexDataStride*idx + vertexDataOffsPos +1];
pos.z = vertexDataTrackedBack_D[vertexDataStride*idx + vertexDataOffsPos +2];
} else {
pos.x = vertexDataStart_D[vertexDataStride*idx + vertexDataOffsPos +0];
pos.y = vertexDataStart_D[vertexDataStride*idx + vertexDataOffsPos +1];
pos.z = vertexDataStart_D[vertexDataStride*idx + vertexDataOffsPos +2];
}
// Revert translation to move to origin
pos.x -= translation.x;
pos.y -= translation.y;
pos.z -= translation.z;
// Revert rotation
float3 posRot;
posRot.x = rotation_D[0] * pos.x +
rotation_D[3] * pos.y +
rotation_D[6] * pos.z;
posRot.y = rotation_D[1] * pos.x +
rotation_D[4] * pos.y +
rotation_D[7] * pos.z;
posRot.z = rotation_D[2] * pos.x +
rotation_D[5] * pos.y +
rotation_D[8] * pos.z;
// Move to old centroid
posRot.x += centroid.x;
posRot.y += centroid.y;
posRot.z += centroid.z;
vertexAttrib_D[idx] = abs(vertexAttrib_D[idx] - ::SampleFieldAtPosTrilin_D<float, true>(posRot, tex1_D));
}
/*
* DeformableGPUSurfaceMT::ComputeSurfAttribDiff
*/
bool DeformableGPUSurfaceMT::ComputeSurfAttribDiff(
DeformableGPUSurfaceMT &surfStart,
float centroid[3], // In case the start surface has been fitted using RMSD
float rotMat[9],
float transVec[3],
float *tex0_D,
int3 texDim0,
float3 texOrg0,
float3 texDelta0,
float *tex1_D,
int3 texDim1,
float3 texOrg1,
float3 texDelta1) {
using namespace megamol::core::utility::log;
if (!this->InitVtxAttribVBO(this->vertexCnt)) {
return false;
}
// Get pointer to vertex attribute array
cudaGraphicsResource* cudaTokens[3];
if (!CudaSafeCall(cudaGraphicsGLRegisterBuffer(
&cudaTokens[0],
this->vboVtxAttr,
cudaGraphicsMapFlagsNone))) {
return false;
}
if (!CudaSafeCall(cudaGraphicsGLRegisterBuffer(
&cudaTokens[1],
this->vboVtxData,
cudaGraphicsMapFlagsNone))) {
return false;
}
if (!CudaSafeCall(cudaGraphicsGLRegisterBuffer(
&cudaTokens[2],
surfStart.GetVtxDataVBO(),
cudaGraphicsMapFlagsNone))) {
return false;
}
if (!CudaSafeCall(cudaGraphicsMapResources(3, cudaTokens, 0))) {
return false;
}
// Get mapped pointers to the vertex data buffers
float *vertexAttrib_D;
size_t vboVtxAttribSize;
if (!CudaSafeCall(cudaGraphicsResourceGetMappedPointer(
reinterpret_cast<void**>(&vertexAttrib_D), // The mapped pointer
&vboVtxAttribSize, // The size of the accessible data
cudaTokens[0]))) { // The mapped resource
return false;
}
// Get mapped pointers to the vertex data buffers
float *vertexDataEnd_D;
size_t vboEndSize;
if (!CudaSafeCall(cudaGraphicsResourceGetMappedPointer(
reinterpret_cast<void**>(&vertexDataEnd_D), // The mapped pointer
&vboEndSize, // The size of the accessible data
cudaTokens[1]))) { // The mapped resource
return false;
}
// Get mapped pointers to the vertex data buffers
float *vertexDataStart_D;
size_t vboStartSize;
if (!CudaSafeCall(cudaGraphicsResourceGetMappedPointer(
reinterpret_cast<void**>(&vertexDataStart_D), // The mapped pointer
&vboStartSize, // The size of the accessible data
cudaTokens[2]))) { // The mapped resource
return false;
}
// Init grid params
// Init CUDA grid for texture #0
if (!initGridParams(texDim0, texOrg0, texDelta0)) {
Log::DefaultLog.WriteError(
"%s: could not init constant device params",
DeformableGPUSurfaceMT::ClassName());
return false;
}
// Compute difference for new and old vertices (after subdivision)
// Part one: sample value for new vertices
DeformableGPUSurfaceMT_ComputeSurfAttribDiff0_D <<< Grid(this->vertexCnt, 256), 256 >>> (
vertexAttrib_D,
vertexDataEnd_D,
tex0_D,
this->vertexCnt);
CudaDevArr<float> rotate_D;
// Rotate for best fit
rotate_D.Validate(9);
if (!CudaSafeCall(cudaMemcpy((void *)rotate_D.Peek(), &rotMat[0],
9*sizeof(float), cudaMemcpyHostToDevice))) {
return false;
}
// Init grid params
// Init CUDA grid for texture #1
if (!initGridParams(texDim1, texOrg1, texDelta1)) {
Log::DefaultLog.WriteError(
"%s: could not init constant device params",
DeformableGPUSurfaceMT::ClassName());
return false;
}
if (this->vertexFlag_D.GetCount() == 0) {
if (!CudaSafeCall(this->vertexFlag_D.Validate(this->vertexCnt))) {
return false;
}
if (!CudaSafeCall(this->vertexFlag_D.Set(0x00))) {
return false;
}
}
// Compute difference for new and old vertices (after subdivision)
// Part two: sample value for old/tracked back vertices
DeformableGPUSurfaceMT_ComputeSurfAttribDiff1_D <<< Grid(this->vertexCnt, 256), 256 >>> (
vertexAttrib_D,
vertexDataStart_D,
this->trackedSubdivVertexData_D.Peek(), // Tracked back vertices, needed for sampling
this->vertexFlag_D.Peek(),
tex1_D,
rotate_D.Peek(),
make_float3(transVec[0],transVec[1],transVec[2]),
make_float3(centroid[0],centroid[1],centroid[2]),
this->vertexCnt);
if (!CheckForCudaError()) {
return false;
}
rotate_D.Release();
if (!CudaSafeCall(cudaGraphicsUnmapResources(3, cudaTokens, 0))) {
return false;
}
if (!CudaSafeCall(cudaGraphicsUnregisterResource(cudaTokens[0]))) {
return false;
}
if (!CudaSafeCall(cudaGraphicsUnregisterResource(cudaTokens[1]))) {
return false;
}
if (!CudaSafeCall(cudaGraphicsUnregisterResource(cudaTokens[2]))) {
return false;
}
return true;
}
__global__ void DeformableGPUSurfaceMT_ComputeTriangleFaceNormal_D(
float3 *triFaceNormals_D,
float *vertexData_D,
uint *triangleidx_D,
uint triangleCnt) {
const uint idx = ::getThreadIdx();
if (idx >= triangleCnt) {
return;
}
float3 pos0 = make_float3(
vertexData_D[9*triangleidx_D[3*idx+0]+0],
vertexData_D[9*triangleidx_D[3*idx+0]+1],
vertexData_D[9*triangleidx_D[3*idx+0]+2]);
float3 pos1 = make_float3(
vertexData_D[9*triangleidx_D[3*idx+1]+0],
vertexData_D[9*triangleidx_D[3*idx+1]+1],
vertexData_D[9*triangleidx_D[3*idx+1]+2]);
float3 pos2 = make_float3(
vertexData_D[9*triangleidx_D[3*idx+2]+0],
vertexData_D[9*triangleidx_D[3*idx+2]+1],
vertexData_D[9*triangleidx_D[3*idx+2]+2]);
float3 vec0 = (pos1 - pos0);
float3 vec1 = (pos2 - pos0);
float3 norm = normalize(cross(vec0, vec1));
// Write normal
triFaceNormals_D[idx*3+0] = norm;
triFaceNormals_D[idx*3+1] = norm;
triFaceNormals_D[idx*3+2] = norm;
}
__global__ void DeformableGPUSurfaceMT_CheckTriNormals_D(
float3 *triFaceNormals_D,
uint *triangleNeighbors_D,
uint triangleCnt) {
const uint idx = ::getThreadIdx();
if (idx >= triangleCnt) {
return;
}
uint n0 = triangleNeighbors_D[3*idx+0];
uint n1 = triangleNeighbors_D[3*idx+1];
uint n2 = triangleNeighbors_D[3*idx+2];
float3 norm = normalize(triFaceNormals_D[idx]);
float3 norm0 = normalize(triFaceNormals_D[n0]);
float3 norm1 = normalize(triFaceNormals_D[n1]);
float3 norm2 = normalize(triFaceNormals_D[n2]);
float3 avgNorm = (norm0+norm1+norm2)*0.3;
__syncthreads();
if ((dot(norm, avgNorm) < 0)) {
triFaceNormals_D[idx] = make_float3(0.0, 0.0, 0.0);
}
}
__global__ void DeformableGPUSurfaceMT_ComputeNormalsSubdiv_D(
float *vertexData_D,
float3 *normals_D,
uint vertexCnt) {
const uint idx = ::getThreadIdx();
if (idx >= vertexCnt) {
return;
}
float3 norm = normalize(normals_D[idx]);
// Write normal
vertexData_D[idx*9+3] = norm.x;
vertexData_D[idx*9+4] = norm.y;
vertexData_D[idx*9+5] = norm.z;
}
/*
* see http://blog.csdn.net/newtonbear/article/details/12768377
*/
template <typename Key, typename Value>
int reduce_by_key_with_raw_pointers(Key* d_key, Key* d_key_last, Value* d_value,
Key* d_okey, Value* d_ovalue) {
thrust::device_ptr<Key> d_keyp = thrust::device_pointer_cast(d_key);
thrust::device_ptr<Key> d_key_lastp = thrust::device_pointer_cast(d_key_last);
thrust::device_ptr<Value> d_valuep = thrust::device_pointer_cast(d_value);
thrust::device_ptr<Key> d_okeyp = thrust::device_pointer_cast(d_okey);
thrust::device_ptr<Value> d_ovaluep = thrust::device_pointer_cast(d_ovalue);
thrust::pair<thrust::device_ptr<Key>, thrust::device_ptr<Value> > new_end;
new_end = thrust::reduce_by_key(d_keyp, d_key_lastp, d_valuep, d_okeyp, d_ovaluep);
return new_end.first - d_okeyp;
}
void OutputDevArrayUint(uint* d_array, int count, const char* name) {
// DEBUG Print
HostArr<uint> h_array;
h_array.Validate(count);
if (!CudaSafeCall(cudaMemcpy(h_array.Peek(), d_array, sizeof(uint)*count, cudaMemcpyDeviceToHost))) {
return;
}
for (int i = 0; i < count; ++i) {
printf("%s %i: %u\n", name, i, h_array.Peek()[i]);
}
h_array.Release();
// END DEBUG
}
void OutputDevArrayFloat3(float3* d_array, int count, const char* name) {
// DEBUG Print
HostArr<float3> h_array;
h_array.Validate(count);
if (!CudaSafeCall(cudaMemcpy(h_array.Peek(), d_array, sizeof(float3)*count,
cudaMemcpyDeviceToHost))) {
return;
}
for (int i = 0; i < count; ++i) {
printf("%s %i: %f %f %f\n", name, i,
h_array.Peek()[i].x,
h_array.Peek()[i].y,
h_array.Peek()[i].z);
}
h_array.Release();
// END DEBUG
}
/*
* DeformableGPUSurfaceMT::ComputeNormalsSubdiv
*/
bool DeformableGPUSurfaceMT::ComputeNormalsSubdiv() {
// Get pointer to vertex attribute array
cudaGraphicsResource* cudaTokens[2];
if (!CudaSafeCall(cudaGraphicsGLRegisterBuffer(
&cudaTokens[0],
this->vboVtxData,
cudaGraphicsMapFlagsNone))) {
return false;
}
if (!CudaSafeCall(cudaGraphicsGLRegisterBuffer(
&cudaTokens[1],
this->vboTriangleIdx,
cudaGraphicsMapFlagsNone))) {
return false;
}
if (!CudaSafeCall(cudaGraphicsMapResources(2, cudaTokens, 0))) {
return false;
}
// Get mapped pointers to the vertex data buffers
float *vertexBuffer_D;
size_t vboVertexBufferSize;
if (!CudaSafeCall(cudaGraphicsResourceGetMappedPointer(
reinterpret_cast<void**>(&vertexBuffer_D), // The mapped pointer
&vboVertexBufferSize, // The size of the accessible data
cudaTokens[0]))) { // The mapped resource
return false;
}
// Get mapped pointers to the vertex data buffers
unsigned int *triIdx_D;
size_t vboTriIdxSize;
if (!CudaSafeCall(cudaGraphicsResourceGetMappedPointer(
reinterpret_cast<void**>(&triIdx_D), // The mapped pointer
&vboTriIdxSize, // The size of the accessible data
cudaTokens[1]))) { // The mapped resource
return false;
}
// 1. Compute triangle face normals
if (!CudaSafeCall(this->triangleFaceNormals_D.Validate(this->triangleCnt*3))) {
return false;
}
DeformableGPUSurfaceMT_ComputeTriangleFaceNormal_D <<< Grid(this->triangleCnt, 256), 256 >>> (
this->triangleFaceNormals_D.Peek(),
vertexBuffer_D,
triIdx_D,
this->triangleCnt);
if (!CheckForCudaError()) {
return false;
}
// // DEBUG CHECK FACE NORMALS
// DeformableGPUSurfaceMT_CheckTriNormals_D <<< Grid(this->triangleCnt, 256), 256 >>> (
// this->triangleFaceNormals_D.Peek(),
// this->triangleNeighbors_D.Peek(),
// this->triangleCnt);
// 2. Sort triangle normals by key
// Copy triangle indices
if (!CudaSafeCall(this->triangleIdxTmp_D.Validate(this->triangleCnt*3))) {
return false;
}
if (!CudaSafeCall(cudaMemcpy(this->triangleIdxTmp_D.Peek(), triIdx_D,
sizeof(uint)*this->triangleCnt*3, cudaMemcpyDeviceToDevice))) {
return false;
}
thrust::sort_by_key(
thrust::device_ptr<uint>(this->triangleIdxTmp_D.Peek()),
thrust::device_ptr<uint>(this->triangleIdxTmp_D.Peek() + this->triangleCnt*3),
thrust::device_ptr<float3>(this->triangleFaceNormals_D.Peek()));
if (!CheckForCudaError()) {
return false;
}
// OutputDevArrayUint(this->triangleIdxTmp_D.Peek(), this->triangleCnt*3, "TRI IDX");
// 3. Reduce vertex normals by key
// if (!CudaSafeCall(this->vertexNormalsIndxOffs_D.Validate(this->triangleCnt*3))) {
// return false;
// }
// if (!CudaSafeCall(this->reducedVertexKeysTmp_D.Validate(this->vertexCnt))) {
// return false;
// }
// thrust::device_ptr<uint> D = thrust::device_ptr<uint>(this->vertexNormalsIndxOffs_D.Peek());
// thrust::fill(D, D + this->vertexCnt, 1);
// thrust::device_ptr<uint> dev_ptr(this->vertexNormalsIndxOffs_D.Peek());
// thrust::fill(dev_ptr, dev_ptr + this->triangleCnt*3, 1);
// int n = reduce_by_key_with_raw_pointers<uint, uint>(
// this->triangleIdxTmp_D.Peek(),
// this->triangleIdxTmp_D.Peek() + this->triangleCnt*3,
// this->vertexNormalsIndxOffs_D.Peek(),
// this->triangleIdxTmp_D.Peek(),
// this->reducedVertexKeysTmp_D.Peek());
//OutputDevArrayUint(this->reducedVertexKeysTmp_D.Peek(), this->vertexCnt, "NORMAL CNT");
if (!CudaSafeCall(this->reducedNormalsTmp_D.Validate(this->vertexCnt))) {
return false;
}
if (!CudaSafeCall(this->outputArrayTmp_D.Validate(this->vertexCnt))) {
return false;
}
int n = reduce_by_key_with_raw_pointers<uint, float3>(
this->triangleIdxTmp_D.Peek(),
this->triangleIdxTmp_D.Peek() + this->triangleCnt*3,
this->triangleFaceNormals_D.Peek(),
this->outputArrayTmp_D.Peek(),
this->reducedNormalsTmp_D.Peek());
// OutputDevArrayFloat3(this->reducedNormalsTmp_D.Peek(), this->vertexCnt, "NORMAL ");
// printf("N %u, vertexCnt %u\n", n, this->vertexCnt);
// Compute actual normals
DeformableGPUSurfaceMT_ComputeNormalsSubdiv_D <<< Grid(this->vertexCnt, 256), 256 >>> (
vertexBuffer_D,
this->reducedNormalsTmp_D.Peek(),
this->vertexCnt);
if (!CheckForCudaError()) {
return false;
}
if (!CudaSafeCall(cudaGraphicsUnmapResources(2, cudaTokens, 0))) {
return false;
}
if (!CudaSafeCall(cudaGraphicsUnregisterResource(cudaTokens[0]))) {
return false;
}
if (!CudaSafeCall(cudaGraphicsUnregisterResource(cudaTokens[1]))) {
return false;
}
return ::CheckForCudaError();
}
/*
* DeformableGPUSurfaceMT::PrintVertexBuffer
*/
void DeformableGPUSurfaceMT::PrintVertexBuffer(size_t cnt) {
// Get pointer to vertex attribute array
cudaGraphicsResource* cudaTokens[1];
CudaSafeCall(cudaGraphicsGLRegisterBuffer(
&cudaTokens[0],
this->vboVtxData,
cudaGraphicsMapFlagsNone));
CudaSafeCall(cudaGraphicsMapResources(1, cudaTokens, 0));
// Get mapped pointers to the vertex data buffers
float *vertexBuffer_D;
size_t vboVertexBufferSize;
CudaSafeCall(cudaGraphicsResourceGetMappedPointer(
reinterpret_cast<void**>(&vertexBuffer_D), // The mapped pointer
&vboVertexBufferSize, // The size of the accessible data
cudaTokens[0]));
HostArr<float> vertexBuffer;
vertexBuffer.Validate(cnt*this->vertexDataStride);
CudaSafeCall(cudaMemcpy(vertexBuffer.Peek(), vertexBuffer_D,
sizeof(float)*cnt*this->vertexDataStride,
cudaMemcpyDeviceToHost));
for (int i = 0; i < cnt; ++i) {
printf("VERTEX BUFFER %f %f %f, %f %f %f, %f %f %f\n",
vertexBuffer.Peek()[i*this->vertexDataStride+0],
vertexBuffer.Peek()[i*this->vertexDataStride+1],
vertexBuffer.Peek()[i*this->vertexDataStride+2],
vertexBuffer.Peek()[i*this->vertexDataStride+3],
vertexBuffer.Peek()[i*this->vertexDataStride+4],
vertexBuffer.Peek()[i*this->vertexDataStride+5],
vertexBuffer.Peek()[i*this->vertexDataStride+6],
vertexBuffer.Peek()[i*this->vertexDataStride+7],
vertexBuffer.Peek()[i*this->vertexDataStride+8]);
}
vertexBuffer.Release();
CudaSafeCall(cudaGraphicsUnmapResources(1, cudaTokens, 0));
CudaSafeCall(cudaGraphicsUnregisterResource(cudaTokens[0]));
}
void DeformableGPUSurfaceMT::PrintExternalForces(size_t cnt) {
HostArr<float> externalForces;
externalForces.Validate(cnt*4);
CudaSafeCall(cudaMemcpy(externalForces.Peek(), this->externalForces_D.Peek(),
sizeof(float)*cnt*4,
cudaMemcpyDeviceToHost));
for (int i = 0; i < cnt; ++i) {
printf("EXT FORCES %f %f %f\n",
externalForces.Peek()[4*i+0],
externalForces.Peek()[4*i+1],
externalForces.Peek()[4*i+2]);
}
externalForces.Release();
}
void DeformableGPUSurfaceMT::PrintCubeStates(size_t cnt) {
HostArr<unsigned int> cubeStates;
cubeStates.Validate(cnt);
CudaSafeCall(cudaMemcpy(cubeStates.Peek(), this->cubeStates_D.Peek(),
sizeof(unsigned int)*cnt,
cudaMemcpyDeviceToHost));
for (int i = 0; i < cnt; ++i) {
printf("CUBESTATES %u\n", cubeStates.Peek()[i]);
}
cubeStates.Release();
}
/*
* DeformableGPUSurfaceMT::ComputeMeshLaplacian
*/
bool DeformableGPUSurfaceMT::ComputeMeshLaplacian() {
typedef vislib::math::Vector<float, 3> Vec3f;
// Get pointer to vertex attribute array
cudaGraphicsResource* cudaTokens[2];
if (!CudaSafeCall(cudaGraphicsGLRegisterBuffer(
&cudaTokens[0],
this->vboVtxData,
cudaGraphicsMapFlagsNone))) {
return false;
}
if (!CudaSafeCall(cudaGraphicsGLRegisterBuffer(
&cudaTokens[1],
this->vboTriangleIdx,
cudaGraphicsMapFlagsNone))) {
return false;
}
if (!CudaSafeCall(cudaGraphicsMapResources(2, cudaTokens, 0))) {
return false;
}
// Get mapped pointers to the vertex data buffers
float *vertexBuffer_D;
size_t vboVertexBufferSize;
if (!CudaSafeCall(cudaGraphicsResourceGetMappedPointer(
reinterpret_cast<void**>(&vertexBuffer_D), // The mapped pointer
&vboVertexBufferSize, // The size of the accessible data
cudaTokens[0]))) { // The mapped resource
return false;
}
// Get mapped pointers to the vertex data buffers
unsigned int *triIdx_D;
size_t vboTriIdxSize;
if (!CudaSafeCall(cudaGraphicsResourceGetMappedPointer(
reinterpret_cast<void**>(&triIdx_D), // The mapped pointer
&vboTriIdxSize, // The size of the accessible data
cudaTokens[1]))) { // The mapped resource
return false;
}
// Copy vertex data and triangle indices to CPU
HostArr<float> vertexData;
HostArr<unsigned int> triIdx;
vertexData.Validate(this->vertexCnt*9);
if (!CudaSafeCall(cudaMemcpy(vertexData.Peek(), vertexBuffer_D,
sizeof(float)*this->vertexCnt*9, cudaMemcpyDeviceToHost))) {
return false;
}
triIdx.Validate(this->triangleCnt*3);
if (!CudaSafeCall(cudaMemcpy(triIdx.Peek(), triIdx_D,
sizeof(uint)*this->triangleCnt*3, cudaMemcpyDeviceToHost))) {
return false;
}
// Build vertex neighbor list
vislib::Array<vislib::Array<uint> > vtxNeighbors;
vtxNeighbors.SetCount(this->vertexCnt);
// Loop through all triangles
for (size_t tri = 0; tri < this->triangleCnt; ++tri) {
std::size_t idx0 = triIdx.Peek()[3*tri+0];
std::size_t idx1 = triIdx.Peek()[3*tri+1];
std::size_t idx2 = triIdx.Peek()[3*tri+2];
if (vtxNeighbors[idx0].Find(idx1) == NULL) {
vtxNeighbors[idx0].Append(idx1);
}
if (vtxNeighbors[idx0].Find(idx2) == NULL) {
vtxNeighbors[idx0].Append(idx2);
}
if (vtxNeighbors[idx1].Find(idx0) == NULL) {
vtxNeighbors[idx1].Append(idx0);
}
if (vtxNeighbors[idx1].Find(idx2) == NULL) {
vtxNeighbors[idx1].Append(idx2);
}
if (vtxNeighbors[idx2].Find(idx0) == NULL) {
vtxNeighbors[idx2].Append(idx0);
}
if (vtxNeighbors[idx2].Find(idx1) == NULL) {
vtxNeighbors[idx2].Append(idx1);
}
}
// // DEBUG printf vertex neighbor list
// printf("Computing vertex neighbor list...\n");
// for (size_t v = 0; v < this->vertexCnt; ++v) {
// printf("%u: ", v);
// for (size_t n = 0; n < vtxNeighbors[v].Count(); ++n) {
// printf("%u ", vtxNeighbors[v][n]);
// }
// printf("\n");
// }
// // End DEBUG
printf("Computing mesh Laplacian ...\n");
HostArr<float> vtxLaplacian;
vtxLaplacian.Validate(this->vertexCnt*3);
// Loop through all vertices
for (size_t v = 0; v < this->vertexCnt; ++v) {
float normSum = 0.0f;
vtxLaplacian.Peek()[3*v+0] = 0.0f;
vtxLaplacian.Peek()[3*v+1] = 0.0f;
vtxLaplacian.Peek()[3*v+2] = 0.0f;
Vec3f pos(vertexData.Peek()[9*v+0],
vertexData.Peek()[9*v+1],
vertexData.Peek()[9*v+2]);
//float minAngle = 1000.0f;
//float maxAngle = 0.0f;
Vec3f currNPos;
Vec3f nextNPos;
for (size_t n = 0; n < vtxNeighbors[v].Count(); ++n) {
// Get position of neighbor
uint nIdxCurr = vtxNeighbors[v][n];
if (n == vtxNeighbors[v].Count()-1)
uint nIdxNext = vtxNeighbors[v][0];
else
uint nIdxNext = vtxNeighbors[v][n+1];
currNPos.Set(vertexData.Peek()[9*nIdxCurr+0],
vertexData.Peek()[9*nIdxCurr+1],
vertexData.Peek()[9*nIdxCurr+2]);
nextNPos.Set(vertexData.Peek()[9*nIdxCurr+0],
vertexData.Peek()[9*nIdxCurr+1],
vertexData.Peek()[9*nIdxCurr+2]);
// normSum += (pos-posN).Length();
// Vec3f dist = pos-posN;
// dist.Normalise();
// vtxLaplacian.Peek()[3*v+0] += dist.X();
// vtxLaplacian.Peek()[3*v+1] += dist.Y();
// vtxLaplacian.Peek()[3*v+2] += dist.Z();
}
// Normalize
vtxLaplacian.Peek()[3*v+0] /= normSum;
vtxLaplacian.Peek()[3*v+1] /= normSum;
vtxLaplacian.Peek()[3*v+2] /= normSum;
}
// // DEBUG Print mesh Laplacian norm
// for (size_t v = 0; v < this->vertexCnt; ++v) {
// printf("Laplacian %u: %f\n", v, vtxLaplacian.Peek()[v]);
// }
// // End DEBUG
// Write to vertex attribute array
if (!CudaSafeCall(this->geometricLaplacian_D.Validate(this->vertexCnt*3))) {
return false;
}
if (!CudaSafeCall(cudaMemcpy(this->geometricLaplacian_D.Peek(), vtxLaplacian.Peek(),
sizeof(float)*this->vertexCnt*3, cudaMemcpyHostToDevice))) {
return false;
}
// Cleanup
vertexData.Release();
triIdx.Release();
vtxLaplacian.Release();
vtxNeighbors.Clear();
if (!CudaSafeCall(cudaGraphicsUnmapResources(2, cudaTokens, 0))) {
return false;
}
if (!CudaSafeCall(cudaGraphicsUnregisterResource(cudaTokens[0]))) {
return false;
}
if (!CudaSafeCall(cudaGraphicsUnregisterResource(cudaTokens[1]))) {
return false;
}
return ::CheckForCudaError();
}
/*
* DeformableGPUSurfaceMT_ComputeSurfAttribDiff1_D
*/
__global__ void DeformableGPUSurfaceMT_ComputeAttribDiff_D (
float *vertexAttrib_D,
float *meshLaplacian_D,
float *meshLaplacianOther_D,
uint vertexCnt) {
const uint idx = ::getThreadIdx();
if (idx >= vertexCnt) {
return;
}
float3 otherAttrib = make_float3(
meshLaplacianOther_D[3*idx+0],
meshLaplacianOther_D[3*idx+1],
meshLaplacianOther_D[3*idx+2]);
float3 thisAttrib = make_float3(
meshLaplacian_D[3*idx+0],
meshLaplacian_D[3*idx+1],
meshLaplacian_D[3*idx+2]);
//vertexAttrib_D[idx] = abs(thisAttrib-otherAttrib);
vertexAttrib_D[idx] = length(thisAttrib-otherAttrib);
}
/*
* DeformableGPUSurfaceMT::ComputeMeshLaplacianDiff
*/
bool DeformableGPUSurfaceMT::ComputeMeshLaplacianDiff(
DeformableGPUSurfaceMT &surfStart) {
if (this->nFlaggedVertices != 0) {
printf("No subdivision allowed in this case!\n");
return false;
}
typedef vislib::math::Vector<float, 3> Vec3f;
if (!this->InitVtxAttribVBO(this->vertexCnt)) {
return false;
}
if (!surfStart.ComputeMeshLaplacian()) {
return false;
}
if (!this->ComputeMeshLaplacian()) {
return false;
}
// Get pointer to vertex attribute array
cudaGraphicsResource* cudaTokens[1];
if (!CudaSafeCall(cudaGraphicsGLRegisterBuffer(
&cudaTokens[0],
this->vboVtxAttr,
cudaGraphicsMapFlagsNone))) {
return false;
}
if (!CudaSafeCall(cudaGraphicsMapResources(1, cudaTokens, 0))) {
return false;
}
// Get mapped pointers to the vertex data buffers
float *vertexAttrib_D;
size_t vertexAttribSize;
if (!CudaSafeCall(cudaGraphicsResourceGetMappedPointer(
reinterpret_cast<void**>(&vertexAttrib_D), // The mapped pointer
&vertexAttribSize, // The size of the accessible data
cudaTokens[0]))) { // The mapped resource
return false;
}
// Compute difference
DeformableGPUSurfaceMT_ComputeAttribDiff_D <<< Grid(this->vertexCnt, 256), 256 >>> (
vertexAttrib_D,
this->PeekGeomLaplacian(),
surfStart.PeekGeomLaplacian(),
this->vertexCnt);
if (!CheckForCudaError()) {
return false;
}
if (!CudaSafeCall(cudaGraphicsUnmapResources(1, cudaTokens, 0))) {
return false;
}
if (!CudaSafeCall(cudaGraphicsUnregisterResource(cudaTokens[0]))) {
return false;
}
return ::CheckForCudaError();
}
|
0fcdefa4357faa637b7b11feafd20eaa1d47da76.hip | // !!! This is a file automatically generated by hipify!!!
#include "rocblas.h"
/**************/
/* CUBLASTDOT */
/**************/
hipblasStatus_t cublasTdot(hipblasHandle_t handle, int n, const float *x, int incx, const float *y, int incy, float *result) {
return hipblasSdot(handle, n, x, incx, y, incy, result);
}
hipblasStatus_t cublasTdot(hipblasHandle_t handle, int n, const double *x, int incx, const double *y, int incy, double *result) {
return hipblasDdot(handle, n, x, incx, y, incy, result);
}
/***************/
/* CUBLASTAXPY */
/***************/
hipblasStatus_t cublasTaxpy(hipblasHandle_t handle, int n, const float *alpha, const float *x, int incx, float *y, int incy) {
return hipblasSaxpy(handle, n, alpha, x, incx, y, incy);
}
hipblasStatus_t cublasTaxpy(hipblasHandle_t handle, int n, const double *alpha, const double *x, int incx, double *y, int incy) {
return hipblasDaxpy(handle, n, alpha, x, incx, y, incy);
}
| 0fcdefa4357faa637b7b11feafd20eaa1d47da76.cu | #include "cublas_v2.h"
/**************/
/* CUBLASTDOT */
/**************/
cublasStatus_t cublasTdot(cublasHandle_t handle, int n, const float *x, int incx, const float *y, int incy, float *result) {
return cublasSdot(handle, n, x, incx, y, incy, result);
}
cublasStatus_t cublasTdot(cublasHandle_t handle, int n, const double *x, int incx, const double *y, int incy, double *result) {
return cublasDdot(handle, n, x, incx, y, incy, result);
}
/***************/
/* CUBLASTAXPY */
/***************/
cublasStatus_t cublasTaxpy(cublasHandle_t handle, int n, const float *alpha, const float *x, int incx, float *y, int incy) {
return cublasSaxpy(handle, n, alpha, x, incx, y, incy);
}
cublasStatus_t cublasTaxpy(cublasHandle_t handle, int n, const double *alpha, const double *x, int incx, double *y, int incy) {
return cublasDaxpy(handle, n, alpha, x, incx, y, incy);
}
|
cfbbb474a13a0cf79a6e4a087531e406c80e5899.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**********************************************************************************
* Numerical Solution for the Cubic-Quintic Nonlinear Schrodinger Equation *
* using second order split step Fourier method. *
* Coded by: Omar Ashour, Texas A&M University at Qatar, February 2015. *
* ********************************************************************************/
#include <stddef.h>
#include "../lib/cu_helpers.h"
#include <hipfft.h>
// Grid Parameters
#define XN 32 // Number of x-spatial nodes
#define YN 32 // Number of y-spatial nodes
#define ZN 32 // Number of z-spatial nodes
#define TN 1000 // Number of temporal nodes
#define LX 50.0f // x-spatial domain [-LX,LX)
#define LY 50.0f // y-spatial domain [-LY,LY)
#define LZ 50.0f // z-spatial domain [-LZ,LZ)
#define TT 10.0f // Max time
#define DX (2*LX / XN) // x-spatial step size
#define DY (2*LY / YN) // y-spatial step size
#define DZ (2*LZ / ZN) // z-spatial step size
#define DT (TT / TN) // temporal step size
// Gaussian Parameters
#define A_S (3.0f/sqrt(8.0f))
#define R_S (sqrt(32.0f/9.0f))
#define A 0.6f
#define R (1.0f/(A*sqrt(1.0f-A*A)))
// Index flattening macro
// Flat[x + WIDTH * (y + DEPTH * z)] = Original[x, y, z]
#define ind(i,j,k) ((((i * ZN) * YN) + (j * YN)) + k)
// ____WIDTH____
// |_|_|_|_|_|_|_|H
// |_|_|_|_|_|_|_|E
// Z|_|_|_|_|_|_|_|I
// N|_|_|_|_|_|_|_|G
// |_|_|_|_|_|_|_|H
// |_|_|_|_|_|_|_|T
// \_\_\_\_\_\_\_\D
// \_\_\_\_\_\_\_\E
// Y\_\_\_\_\_\_\_\P
// N\_\_\_\_\_\_\_\T
// \_\_\_\_\_\_\_\H
// XN
// Timing parameters
#define IRVL 10 // Timing interval. Take a reading every N iterations.
// Output files
#define VTK_0 "gpu_ffts_0.vtk"
#define VTK_1 "gpu_ffts_1.vtk"
#define TIME_F "gpu_ffts_time.m"
// Function prototypes
__global__ void nonlin(hipfftComplex *psi, float dt, int xn, int yn, int zn);
__global__ void lin(hipfftComplex *psi, float *k2, float dt, int xn, int yn, int zn);
__global__ void normalize(hipfftComplex *psi, int size, int xn, int yn, int zn);
int main(void)
{
// Timing info
hipEvent_t begin_event, end_event;
hipEventCreate(&begin_event);
hipEventCreate(&end_event);
// Print basic info about simulation
printf("XN: %d. DX: %f, DT: %f, dt/dx^2: %f\n", XN, DX, DT, DT/(DX*DX));
// Allocate host arrays
float *h_x = (float*)malloc(sizeof(float) * XN);
float *h_y = (float*)malloc(sizeof(float) * YN);
float *h_z = (float*)malloc(sizeof(float) * ZN);
float *h_k2 = (float*)malloc(sizeof(float) * XN * YN * ZN);
float *h_kx = (float*)malloc(XN * sizeof(float));
float *h_ky = (float*)malloc(YN * sizeof(float));
float *h_kz = (float*)malloc(ZN * sizeof(float));
float *h_max = (float*)calloc(TN+1, sizeof(float));
hipfftComplex *h_psi = (hipfftComplex*)malloc(
sizeof(hipfftComplex) * XN * YN * ZN);
hipfftComplex *h_psi_0 = (hipfftComplex*)malloc(
sizeof(hipfftComplex) * XN * YN * ZN);
// Create transform plans
hipfftHandle plan;
CUFFT_SAFE_CALL(hipfftPlan3d(&plan, XN, YN, ZN, HIPFFT_C2C));
// Create wavenumbers
float dkx = 2*M_PI/XN/DX;
for(int i = XN/2; i >= 0; i--)
h_kx[XN/2 - i]=(XN/2 - i) * dkx;
for(int i = XN/2+1; i < XN; i++)
h_kx[i]=(i - XN) * dkx;
float dky = 2*M_PI/YN/DY;
for(int i = YN/2; i >= 0; i--)
h_ky[YN/2 - i]=(YN/2 - i) * dky;
for(int i = YN/2+1; i < YN; i++)
h_ky[i]=(i - YN) * dky;
float dkz = 2*M_PI/ZN/DZ;
for(int i = ZN/2; i >= 0; i--)
h_kz[ZN/2 - i]=(ZN/2 - i) * dkz;
for(int i = ZN/2+1; i < ZN; i++)
h_kz[i]=(i - ZN) * dkz;
// Initialize x, y and z
for(int i = 0; i < XN ; i++)
h_x[i] = (i-XN/2)*DX;
for(int i = 0; i < YN ; i++)
h_y[i] = (i-YN/2)*DY;
for(int i = 0; i < ZN ; i++)
h_z[i] = (i-ZN/2)*DZ;
// Initial conditions on host
for(int i = 0; i < XN; i++)
for(int j = 0; j < YN; j++)
for(int k = 0; k < ZN; k++)
{
h_psi[ind(i,j,k)].x = A_S*A*
exp(-(h_x[i]*h_x[i]+h_y[j]*h_y[j]+h_z[k]*h_z[k])
/(2*R*R*R_S*R_S));
h_psi[ind(i,j,k)].y = 0;
h_psi_0[ind(i,j,k)].x = h_psi[ind(i,j,k)].x;
h_psi_0[ind(i,j,k)].y = h_psi[ind(i,j,k)].y;
h_k2[ind(i,j,k)] = h_kx[i]*h_kx[i] + h_ky[j]*h_ky[j] + h_kz[k]*h_kz[k];
}
// Allocate and copy device memory
hipfftComplex *d_psi; float *d_k2;
CUDAR_SAFE_CALL(hipMalloc((void **)&d_psi, sizeof(hipfftComplex)*XN*YN*ZN));
CUDAR_SAFE_CALL(hipMalloc((void **)&d_k2, sizeof(float)*XN*YN*ZN));
CUDAR_SAFE_CALL(hipMemcpy(d_psi, h_psi, sizeof(hipfftComplex)*XN*YN*ZN,
hipMemcpyHostToDevice));
CUDAR_SAFE_CALL(hipMemcpy(d_k2, h_k2, sizeof(float)*XN*YN*ZN,
hipMemcpyHostToDevice));
// Initialize the grid
dim3 threadsPerBlock(8,8,8);
dim3 blocksPerGrid((XN + 7)/8,(YN+7)/8,(ZN+7)/8);
// Find max(|psi|) for initial pulse.
//cmax_psi(psi, h_max, 0, XN*YN*ZN);
// Print timing info to file
float time_value;
FILE *fp = fopen(TIME_F, "w");
fprintf(fp, "steps = [0:%d:%d];\n", IRVL, TN);
fprintf(fp, "time = [0, ");
// Forward transform
CUFFT_SAFE_CALL(hipfftExecC2C(plan, d_psi, d_psi, HIPFFT_FORWARD));
// Timing starts here
hipEventRecord(begin_event, 0);
for (int i = 1; i <= TN; i++)
{
// Solve linear part
hipLaunchKernelGGL(( lin), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_psi, d_k2, DT/2, XN, YN, ZN);
CUDAR_SAFE_CALL(hipPeekAtLastError());
// Backward transform
CUFFT_SAFE_CALL(hipfftExecC2C(plan, d_psi, d_psi, HIPFFT_BACKWARD));
// Normalize the transform
hipLaunchKernelGGL(( normalize), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_psi, XN*YN*ZN, XN, YN, ZN);
CUDAR_SAFE_CALL(hipPeekAtLastError());
// Solve nonlinear part
hipLaunchKernelGGL(( nonlin), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_psi, DT, XN, YN, ZN);
CUDAR_SAFE_CALL(hipPeekAtLastError());
// Forward transform
CUFFT_SAFE_CALL(hipfftExecC2C(plan, d_psi, d_psi, HIPFFT_FORWARD));
// Linear calculation
hipLaunchKernelGGL(( lin), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_psi, d_k2, DT/2, XN, YN, ZN);
CUDAR_SAFE_CALL(hipPeekAtLastError());
// Save max |psi| for printing
//cmax_psi(psi, h_max, i, XN*YN*ZN);
// Print time at specific intervals
if(i % IRVL == 0)
{
hipEventRecord(end_event, 0);
hipEventSynchronize(end_event);
hipEventElapsedTime(&time_value, begin_event, end_event);
fprintf(fp, "%f, ", time_value);
}
}
// Wrap up timing file
fprintf(fp, "];\n");
fprintf(fp, "plot(steps, time/1000, '-*r');\n");
fclose(fp);
// Backward tranform to retreive data
CUFFT_SAFE_CALL(hipfftExecC2C(plan, d_psi, d_psi, HIPFFT_BACKWARD));
// Normalize the transform
hipLaunchKernelGGL(( normalize), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_psi, XN*YN*ZN, XN, YN, ZN);
CUDAR_SAFE_CALL(hipPeekAtLastError());
// Copy results to device
CUDAR_SAFE_CALL(hipMemcpy(h_psi, d_psi, sizeof(hipfftComplex)*XN*YN*ZN,
hipMemcpyDeviceToHost));
// Plot results
vtk_3dcf(h_x, h_y, h_z, h_psi, XN, YN, ZN, VTK_1);
vtk_3dcf(h_x, h_y, h_z, h_psi_0, XN, YN, ZN, VTK_0);
// Clean up
CUFFT_SAFE_CALL(hipfftDestroy(plan));
free(h_x);
free(h_y);
free(h_z);
free(h_k2);
free(h_kx);
free(h_ky);
free(h_kz);
free(h_psi);
free(h_psi_0);
free(h_max);
CUDAR_SAFE_CALL(hipFree(d_psi));
CUDAR_SAFE_CALL(hipFree(d_k2));
return 0;
}
__global__ void nonlin(hipfftComplex *psi, float dt, int xn, int yn, int zn)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
int k = threadIdx.z + blockIdx.z * blockDim.z;
// Avoid first and last point (boundary conditions) (needs fixing)
// if (i >= xn - 1 || j >= yn-1 || || k >= zn-1 || i == 0 || j == 0 || k == 0) return;
if (i >= xn || j >= yn || k >= zn) return;
float psi2 = cuCabsf(psi[ind(i,j,k)])*cuCabsf(psi[ind(i,j,k)]);
float non = psi2 - psi2*psi2;
psi[ind(i,j,k)] = cuCmulf(psi[ind(i,j,k)],
make_cuComplex(cos(non*dt), sin(non*dt)));
}
__global__ void lin(hipfftComplex *psi, float *k2, float dt, int xn, int yn, int zn)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
int k = threadIdx.z + blockIdx.z * blockDim.z;
// Avoid first and last point (boundary conditions) (needs fixing)
// if (i >= xn - 1 || j >= yn-1 || || k >= zn-1 || i == 0 || j == 0 || k == 0) return;
if (i >= xn || j >= yn || k >= zn) return;
psi[ind(i,j,k)] = cuCmulf(psi[ind(i,j,k)],
make_cuComplex(cos(k2[ind(i,j,k)]*dt), -sin(k2[ind(i,j,k)]*dt)));
}
__global__ void normalize(hipfftComplex *psi, int size, int xn, int yn, int zn)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
int k = threadIdx.z + blockIdx.z * blockDim.z;
// Stay within range since the grid might be larger
if (i >= xn || j >= yn || k >= zn) return;
psi[ind(i,j,k)].x = psi[ind(i,j,k)].x/size;
psi[ind(i,j,k)].y = psi[ind(i,j,k)].y/size;
}
| cfbbb474a13a0cf79a6e4a087531e406c80e5899.cu | /**********************************************************************************
* Numerical Solution for the Cubic-Quintic Nonlinear Schrodinger Equation *
* using second order split step Fourier method. *
* Coded by: Omar Ashour, Texas A&M University at Qatar, February 2015. *
* ********************************************************************************/
#include <stddef.h>
#include "../lib/cu_helpers.h"
#include <cufft.h>
// Grid Parameters
#define XN 32 // Number of x-spatial nodes
#define YN 32 // Number of y-spatial nodes
#define ZN 32 // Number of z-spatial nodes
#define TN 1000 // Number of temporal nodes
#define LX 50.0f // x-spatial domain [-LX,LX)
#define LY 50.0f // y-spatial domain [-LY,LY)
#define LZ 50.0f // z-spatial domain [-LZ,LZ)
#define TT 10.0f // Max time
#define DX (2*LX / XN) // x-spatial step size
#define DY (2*LY / YN) // y-spatial step size
#define DZ (2*LZ / ZN) // z-spatial step size
#define DT (TT / TN) // temporal step size
// Gaussian Parameters
#define A_S (3.0f/sqrt(8.0f))
#define R_S (sqrt(32.0f/9.0f))
#define A 0.6f
#define R (1.0f/(A*sqrt(1.0f-A*A)))
// Index flattening macro
// Flat[x + WIDTH * (y + DEPTH * z)] = Original[x, y, z]
#define ind(i,j,k) ((((i * ZN) * YN) + (j * YN)) + k)
// ____WIDTH____
// |_|_|_|_|_|_|_|H
// |_|_|_|_|_|_|_|E
// Z|_|_|_|_|_|_|_|I
// N|_|_|_|_|_|_|_|G
// |_|_|_|_|_|_|_|H
// |_|_|_|_|_|_|_|T
// \_\_\_\_\_\_\_\D
// \_\_\_\_\_\_\_\E
// Y\_\_\_\_\_\_\_\P
// N\_\_\_\_\_\_\_\T
// \_\_\_\_\_\_\_\H
// XN
// Timing parameters
#define IRVL 10 // Timing interval. Take a reading every N iterations.
// Output files
#define VTK_0 "gpu_ffts_0.vtk"
#define VTK_1 "gpu_ffts_1.vtk"
#define TIME_F "gpu_ffts_time.m"
// Function prototypes
__global__ void nonlin(cufftComplex *psi, float dt, int xn, int yn, int zn);
__global__ void lin(cufftComplex *psi, float *k2, float dt, int xn, int yn, int zn);
__global__ void normalize(cufftComplex *psi, int size, int xn, int yn, int zn);
int main(void)
{
// Timing info
cudaEvent_t begin_event, end_event;
cudaEventCreate(&begin_event);
cudaEventCreate(&end_event);
// Print basic info about simulation
printf("XN: %d. DX: %f, DT: %f, dt/dx^2: %f\n", XN, DX, DT, DT/(DX*DX));
// Allocate host arrays
float *h_x = (float*)malloc(sizeof(float) * XN);
float *h_y = (float*)malloc(sizeof(float) * YN);
float *h_z = (float*)malloc(sizeof(float) * ZN);
float *h_k2 = (float*)malloc(sizeof(float) * XN * YN * ZN);
float *h_kx = (float*)malloc(XN * sizeof(float));
float *h_ky = (float*)malloc(YN * sizeof(float));
float *h_kz = (float*)malloc(ZN * sizeof(float));
float *h_max = (float*)calloc(TN+1, sizeof(float));
cufftComplex *h_psi = (cufftComplex*)malloc(
sizeof(cufftComplex) * XN * YN * ZN);
cufftComplex *h_psi_0 = (cufftComplex*)malloc(
sizeof(cufftComplex) * XN * YN * ZN);
// Create transform plans
cufftHandle plan;
CUFFT_SAFE_CALL(cufftPlan3d(&plan, XN, YN, ZN, CUFFT_C2C));
// Create wavenumbers
float dkx = 2*M_PI/XN/DX;
for(int i = XN/2; i >= 0; i--)
h_kx[XN/2 - i]=(XN/2 - i) * dkx;
for(int i = XN/2+1; i < XN; i++)
h_kx[i]=(i - XN) * dkx;
float dky = 2*M_PI/YN/DY;
for(int i = YN/2; i >= 0; i--)
h_ky[YN/2 - i]=(YN/2 - i) * dky;
for(int i = YN/2+1; i < YN; i++)
h_ky[i]=(i - YN) * dky;
float dkz = 2*M_PI/ZN/DZ;
for(int i = ZN/2; i >= 0; i--)
h_kz[ZN/2 - i]=(ZN/2 - i) * dkz;
for(int i = ZN/2+1; i < ZN; i++)
h_kz[i]=(i - ZN) * dkz;
// Initialize x, y and z
for(int i = 0; i < XN ; i++)
h_x[i] = (i-XN/2)*DX;
for(int i = 0; i < YN ; i++)
h_y[i] = (i-YN/2)*DY;
for(int i = 0; i < ZN ; i++)
h_z[i] = (i-ZN/2)*DZ;
// Initial conditions on host
for(int i = 0; i < XN; i++)
for(int j = 0; j < YN; j++)
for(int k = 0; k < ZN; k++)
{
h_psi[ind(i,j,k)].x = A_S*A*
exp(-(h_x[i]*h_x[i]+h_y[j]*h_y[j]+h_z[k]*h_z[k])
/(2*R*R*R_S*R_S));
h_psi[ind(i,j,k)].y = 0;
h_psi_0[ind(i,j,k)].x = h_psi[ind(i,j,k)].x;
h_psi_0[ind(i,j,k)].y = h_psi[ind(i,j,k)].y;
h_k2[ind(i,j,k)] = h_kx[i]*h_kx[i] + h_ky[j]*h_ky[j] + h_kz[k]*h_kz[k];
}
// Allocate and copy device memory
cufftComplex *d_psi; float *d_k2;
CUDAR_SAFE_CALL(cudaMalloc((void **)&d_psi, sizeof(cufftComplex)*XN*YN*ZN));
CUDAR_SAFE_CALL(cudaMalloc((void **)&d_k2, sizeof(float)*XN*YN*ZN));
CUDAR_SAFE_CALL(cudaMemcpy(d_psi, h_psi, sizeof(cufftComplex)*XN*YN*ZN,
cudaMemcpyHostToDevice));
CUDAR_SAFE_CALL(cudaMemcpy(d_k2, h_k2, sizeof(float)*XN*YN*ZN,
cudaMemcpyHostToDevice));
// Initialize the grid
dim3 threadsPerBlock(8,8,8);
dim3 blocksPerGrid((XN + 7)/8,(YN+7)/8,(ZN+7)/8);
// Find max(|psi|) for initial pulse.
//cmax_psi(psi, h_max, 0, XN*YN*ZN);
// Print timing info to file
float time_value;
FILE *fp = fopen(TIME_F, "w");
fprintf(fp, "steps = [0:%d:%d];\n", IRVL, TN);
fprintf(fp, "time = [0, ");
// Forward transform
CUFFT_SAFE_CALL(cufftExecC2C(plan, d_psi, d_psi, CUFFT_FORWARD));
// Timing starts here
cudaEventRecord(begin_event, 0);
for (int i = 1; i <= TN; i++)
{
// Solve linear part
lin<<<blocksPerGrid, threadsPerBlock>>>(d_psi, d_k2, DT/2, XN, YN, ZN);
CUDAR_SAFE_CALL(cudaPeekAtLastError());
// Backward transform
CUFFT_SAFE_CALL(cufftExecC2C(plan, d_psi, d_psi, CUFFT_INVERSE));
// Normalize the transform
normalize<<<blocksPerGrid, threadsPerBlock>>>(d_psi, XN*YN*ZN, XN, YN, ZN);
CUDAR_SAFE_CALL(cudaPeekAtLastError());
// Solve nonlinear part
nonlin<<<blocksPerGrid, threadsPerBlock>>>(d_psi, DT, XN, YN, ZN);
CUDAR_SAFE_CALL(cudaPeekAtLastError());
// Forward transform
CUFFT_SAFE_CALL(cufftExecC2C(plan, d_psi, d_psi, CUFFT_FORWARD));
// Linear calculation
lin<<<blocksPerGrid, threadsPerBlock>>>(d_psi, d_k2, DT/2, XN, YN, ZN);
CUDAR_SAFE_CALL(cudaPeekAtLastError());
// Save max |psi| for printing
//cmax_psi(psi, h_max, i, XN*YN*ZN);
// Print time at specific intervals
if(i % IRVL == 0)
{
cudaEventRecord(end_event, 0);
cudaEventSynchronize(end_event);
cudaEventElapsedTime(&time_value, begin_event, end_event);
fprintf(fp, "%f, ", time_value);
}
}
// Wrap up timing file
fprintf(fp, "];\n");
fprintf(fp, "plot(steps, time/1000, '-*r');\n");
fclose(fp);
// Backward tranform to retreive data
CUFFT_SAFE_CALL(cufftExecC2C(plan, d_psi, d_psi, CUFFT_INVERSE));
// Normalize the transform
normalize<<<blocksPerGrid, threadsPerBlock>>>(d_psi, XN*YN*ZN, XN, YN, ZN);
CUDAR_SAFE_CALL(cudaPeekAtLastError());
// Copy results to device
CUDAR_SAFE_CALL(cudaMemcpy(h_psi, d_psi, sizeof(cufftComplex)*XN*YN*ZN,
cudaMemcpyDeviceToHost));
// Plot results
vtk_3dcf(h_x, h_y, h_z, h_psi, XN, YN, ZN, VTK_1);
vtk_3dcf(h_x, h_y, h_z, h_psi_0, XN, YN, ZN, VTK_0);
// Clean up
CUFFT_SAFE_CALL(cufftDestroy(plan));
free(h_x);
free(h_y);
free(h_z);
free(h_k2);
free(h_kx);
free(h_ky);
free(h_kz);
free(h_psi);
free(h_psi_0);
free(h_max);
CUDAR_SAFE_CALL(cudaFree(d_psi));
CUDAR_SAFE_CALL(cudaFree(d_k2));
return 0;
}
__global__ void nonlin(cufftComplex *psi, float dt, int xn, int yn, int zn)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
int k = threadIdx.z + blockIdx.z * blockDim.z;
// Avoid first and last point (boundary conditions) (needs fixing)
// if (i >= xn - 1 || j >= yn-1 || || k >= zn-1 || i == 0 || j == 0 || k == 0) return;
if (i >= xn || j >= yn || k >= zn) return;
float psi2 = cuCabsf(psi[ind(i,j,k)])*cuCabsf(psi[ind(i,j,k)]);
float non = psi2 - psi2*psi2;
psi[ind(i,j,k)] = cuCmulf(psi[ind(i,j,k)],
make_cuComplex(cos(non*dt), sin(non*dt)));
}
__global__ void lin(cufftComplex *psi, float *k2, float dt, int xn, int yn, int zn)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
int k = threadIdx.z + blockIdx.z * blockDim.z;
// Avoid first and last point (boundary conditions) (needs fixing)
// if (i >= xn - 1 || j >= yn-1 || || k >= zn-1 || i == 0 || j == 0 || k == 0) return;
if (i >= xn || j >= yn || k >= zn) return;
psi[ind(i,j,k)] = cuCmulf(psi[ind(i,j,k)],
make_cuComplex(cos(k2[ind(i,j,k)]*dt), -sin(k2[ind(i,j,k)]*dt)));
}
__global__ void normalize(cufftComplex *psi, int size, int xn, int yn, int zn)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
int k = threadIdx.z + blockIdx.z * blockDim.z;
// Stay within range since the grid might be larger
if (i >= xn || j >= yn || k >= zn) return;
psi[ind(i,j,k)].x = psi[ind(i,j,k)].x/size;
psi[ind(i,j,k)].y = psi[ind(i,j,k)].y/size;
}
|
8a7f818e45812258be158cfd19e6884cdfd3967c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Copyright (C) 1993-2011, NVIDIA Corporation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#if !defined CUDA_DISABLER
#include "internal_shared.hpp"
#include "opencv2/gpu/device/saturate_cast.hpp"
#include "opencv2/gpu/device/vec_math.hpp"
#include "opencv2/gpu/device/limits.hpp"
#include "opencv2/gpu/device/border_interpolate.hpp"
#include "opencv2/gpu/device/static_check.hpp"
namespace cv { namespace gpu { namespace device
{
namespace column_filter
{
#define MAX_KERNEL_SIZE 32
__constant__ float c_kernel[MAX_KERNEL_SIZE];
void loadKernel(const float kernel[], int ksize)
{
cudaSafeCall( hipMemcpyToSymbol(c_kernel, kernel, ksize * sizeof(float)) );
}
template <int KSIZE, typename T, typename D, typename B>
__global__ void linearColumnFilter(const PtrStepSz<T> src, PtrStep<D> dst, const int anchor, const B brd)
{
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 200)
const int BLOCK_DIM_X = 16;
const int BLOCK_DIM_Y = 16;
const int PATCH_PER_BLOCK = 4;
const int HALO_SIZE = KSIZE <= 16 ? 1 : 2;
#else
const int BLOCK_DIM_X = 16;
const int BLOCK_DIM_Y = 8;
const int PATCH_PER_BLOCK = 2;
const int HALO_SIZE = 2;
#endif
typedef typename TypeVec<float, VecTraits<T>::cn>::vec_type sum_t;
__shared__ sum_t smem[(PATCH_PER_BLOCK + 2 * HALO_SIZE) * BLOCK_DIM_Y][BLOCK_DIM_X];
const int x = blockIdx.x * BLOCK_DIM_X + threadIdx.x;
if (x >= src.cols)
return;
const T* src_col = src.ptr() + x;
const int yStart = blockIdx.y * (BLOCK_DIM_Y * PATCH_PER_BLOCK) + threadIdx.y;
if (blockIdx.y > 0)
{
//Upper halo
#pragma unroll
for (int j = 0; j < HALO_SIZE; ++j)
smem[threadIdx.y + j * BLOCK_DIM_Y][threadIdx.x] = saturate_cast<sum_t>(src(yStart - (HALO_SIZE - j) * BLOCK_DIM_Y, x));
}
else
{
//Upper halo
#pragma unroll
for (int j = 0; j < HALO_SIZE; ++j)
smem[threadIdx.y + j * BLOCK_DIM_Y][threadIdx.x] = saturate_cast<sum_t>(brd.at_low(yStart - (HALO_SIZE - j) * BLOCK_DIM_Y, src_col, src.step));
}
if (blockIdx.y + 2 < gridDim.y)
{
//Main data
#pragma unroll
for (int j = 0; j < PATCH_PER_BLOCK; ++j)
smem[threadIdx.y + HALO_SIZE * BLOCK_DIM_Y + j * BLOCK_DIM_Y][threadIdx.x] = saturate_cast<sum_t>(src(yStart + j * BLOCK_DIM_Y, x));
//Lower halo
#pragma unroll
for (int j = 0; j < HALO_SIZE; ++j)
smem[threadIdx.y + (PATCH_PER_BLOCK + HALO_SIZE) * BLOCK_DIM_Y + j * BLOCK_DIM_Y][threadIdx.x] = saturate_cast<sum_t>(src(yStart + (PATCH_PER_BLOCK + j) * BLOCK_DIM_Y, x));
}
else
{
//Main data
#pragma unroll
for (int j = 0; j < PATCH_PER_BLOCK; ++j)
smem[threadIdx.y + HALO_SIZE * BLOCK_DIM_Y + j * BLOCK_DIM_Y][threadIdx.x] = saturate_cast<sum_t>(brd.at_high(yStart + j * BLOCK_DIM_Y, src_col, src.step));
//Lower halo
#pragma unroll
for (int j = 0; j < HALO_SIZE; ++j)
smem[threadIdx.y + (PATCH_PER_BLOCK + HALO_SIZE) * BLOCK_DIM_Y + j * BLOCK_DIM_Y][threadIdx.x] = saturate_cast<sum_t>(brd.at_high(yStart + (PATCH_PER_BLOCK + j) * BLOCK_DIM_Y, src_col, src.step));
}
__syncthreads();
#pragma unroll
for (int j = 0; j < PATCH_PER_BLOCK; ++j)
{
const int y = yStart + j * BLOCK_DIM_Y;
if (y < src.rows)
{
sum_t sum = VecTraits<sum_t>::all(0);
#pragma unroll
for (int k = 0; k < KSIZE; ++k)
sum = sum + smem[threadIdx.y + HALO_SIZE * BLOCK_DIM_Y + j * BLOCK_DIM_Y - anchor + k][threadIdx.x] * c_kernel[k];
dst(y, x) = saturate_cast<D>(sum);
}
}
}
template <int KSIZE, typename T, typename D, template<typename> class B>
void linearColumnFilter_caller(PtrStepSz<T> src, PtrStepSz<D> dst, int anchor, int cc, hipStream_t stream)
{
int BLOCK_DIM_X;
int BLOCK_DIM_Y;
int PATCH_PER_BLOCK;
if (cc >= 20)
{
BLOCK_DIM_X = 16;
BLOCK_DIM_Y = 16;
PATCH_PER_BLOCK = 4;
}
else
{
BLOCK_DIM_X = 16;
BLOCK_DIM_Y = 8;
PATCH_PER_BLOCK = 2;
}
const dim3 block(BLOCK_DIM_X, BLOCK_DIM_Y);
const dim3 grid(divUp(src.cols, BLOCK_DIM_X), divUp(src.rows, BLOCK_DIM_Y * PATCH_PER_BLOCK));
B<T> brd(src.rows);
hipLaunchKernelGGL(( linearColumnFilter<KSIZE, T, D>), dim3(grid), dim3(block), 0, stream, src, dst, anchor, brd);
cudaSafeCall( hipGetLastError() );
if (stream == 0)
cudaSafeCall( hipDeviceSynchronize() );
}
template <typename T, typename D>
void linearColumnFilter_gpu(PtrStepSzb src, PtrStepSzb dst, const float kernel[], int ksize, int anchor, int brd_type, int cc, hipStream_t stream)
{
typedef void (*caller_t)(PtrStepSz<T> src, PtrStepSz<D> dst, int anchor, int cc, hipStream_t stream);
static const caller_t callers[5][33] =
{
{
0,
linearColumnFilter_caller< 1, T, D, BrdColReflect101>,
linearColumnFilter_caller< 2, T, D, BrdColReflect101>,
linearColumnFilter_caller< 3, T, D, BrdColReflect101>,
linearColumnFilter_caller< 4, T, D, BrdColReflect101>,
linearColumnFilter_caller< 5, T, D, BrdColReflect101>,
linearColumnFilter_caller< 6, T, D, BrdColReflect101>,
linearColumnFilter_caller< 7, T, D, BrdColReflect101>,
linearColumnFilter_caller< 8, T, D, BrdColReflect101>,
linearColumnFilter_caller< 9, T, D, BrdColReflect101>,
linearColumnFilter_caller<10, T, D, BrdColReflect101>,
linearColumnFilter_caller<11, T, D, BrdColReflect101>,
linearColumnFilter_caller<12, T, D, BrdColReflect101>,
linearColumnFilter_caller<13, T, D, BrdColReflect101>,
linearColumnFilter_caller<14, T, D, BrdColReflect101>,
linearColumnFilter_caller<15, T, D, BrdColReflect101>,
linearColumnFilter_caller<16, T, D, BrdColReflect101>,
linearColumnFilter_caller<17, T, D, BrdColReflect101>,
linearColumnFilter_caller<18, T, D, BrdColReflect101>,
linearColumnFilter_caller<19, T, D, BrdColReflect101>,
linearColumnFilter_caller<20, T, D, BrdColReflect101>,
linearColumnFilter_caller<21, T, D, BrdColReflect101>,
linearColumnFilter_caller<22, T, D, BrdColReflect101>,
linearColumnFilter_caller<23, T, D, BrdColReflect101>,
linearColumnFilter_caller<24, T, D, BrdColReflect101>,
linearColumnFilter_caller<25, T, D, BrdColReflect101>,
linearColumnFilter_caller<26, T, D, BrdColReflect101>,
linearColumnFilter_caller<27, T, D, BrdColReflect101>,
linearColumnFilter_caller<28, T, D, BrdColReflect101>,
linearColumnFilter_caller<29, T, D, BrdColReflect101>,
linearColumnFilter_caller<30, T, D, BrdColReflect101>,
linearColumnFilter_caller<31, T, D, BrdColReflect101>,
linearColumnFilter_caller<32, T, D, BrdColReflect101>
},
{
0,
linearColumnFilter_caller< 1, T, D, BrdColReplicate>,
linearColumnFilter_caller< 2, T, D, BrdColReplicate>,
linearColumnFilter_caller< 3, T, D, BrdColReplicate>,
linearColumnFilter_caller< 4, T, D, BrdColReplicate>,
linearColumnFilter_caller< 5, T, D, BrdColReplicate>,
linearColumnFilter_caller< 6, T, D, BrdColReplicate>,
linearColumnFilter_caller< 7, T, D, BrdColReplicate>,
linearColumnFilter_caller< 8, T, D, BrdColReplicate>,
linearColumnFilter_caller< 9, T, D, BrdColReplicate>,
linearColumnFilter_caller<10, T, D, BrdColReplicate>,
linearColumnFilter_caller<11, T, D, BrdColReplicate>,
linearColumnFilter_caller<12, T, D, BrdColReplicate>,
linearColumnFilter_caller<13, T, D, BrdColReplicate>,
linearColumnFilter_caller<14, T, D, BrdColReplicate>,
linearColumnFilter_caller<15, T, D, BrdColReplicate>,
linearColumnFilter_caller<16, T, D, BrdColReplicate>,
linearColumnFilter_caller<17, T, D, BrdColReplicate>,
linearColumnFilter_caller<18, T, D, BrdColReplicate>,
linearColumnFilter_caller<19, T, D, BrdColReplicate>,
linearColumnFilter_caller<20, T, D, BrdColReplicate>,
linearColumnFilter_caller<21, T, D, BrdColReplicate>,
linearColumnFilter_caller<22, T, D, BrdColReplicate>,
linearColumnFilter_caller<23, T, D, BrdColReplicate>,
linearColumnFilter_caller<24, T, D, BrdColReplicate>,
linearColumnFilter_caller<25, T, D, BrdColReplicate>,
linearColumnFilter_caller<26, T, D, BrdColReplicate>,
linearColumnFilter_caller<27, T, D, BrdColReplicate>,
linearColumnFilter_caller<28, T, D, BrdColReplicate>,
linearColumnFilter_caller<29, T, D, BrdColReplicate>,
linearColumnFilter_caller<30, T, D, BrdColReplicate>,
linearColumnFilter_caller<31, T, D, BrdColReplicate>,
linearColumnFilter_caller<32, T, D, BrdColReplicate>
},
{
0,
linearColumnFilter_caller< 1, T, D, BrdColConstant>,
linearColumnFilter_caller< 2, T, D, BrdColConstant>,
linearColumnFilter_caller< 3, T, D, BrdColConstant>,
linearColumnFilter_caller< 4, T, D, BrdColConstant>,
linearColumnFilter_caller< 5, T, D, BrdColConstant>,
linearColumnFilter_caller< 6, T, D, BrdColConstant>,
linearColumnFilter_caller< 7, T, D, BrdColConstant>,
linearColumnFilter_caller< 8, T, D, BrdColConstant>,
linearColumnFilter_caller< 9, T, D, BrdColConstant>,
linearColumnFilter_caller<10, T, D, BrdColConstant>,
linearColumnFilter_caller<11, T, D, BrdColConstant>,
linearColumnFilter_caller<12, T, D, BrdColConstant>,
linearColumnFilter_caller<13, T, D, BrdColConstant>,
linearColumnFilter_caller<14, T, D, BrdColConstant>,
linearColumnFilter_caller<15, T, D, BrdColConstant>,
linearColumnFilter_caller<16, T, D, BrdColConstant>,
linearColumnFilter_caller<17, T, D, BrdColConstant>,
linearColumnFilter_caller<18, T, D, BrdColConstant>,
linearColumnFilter_caller<19, T, D, BrdColConstant>,
linearColumnFilter_caller<20, T, D, BrdColConstant>,
linearColumnFilter_caller<21, T, D, BrdColConstant>,
linearColumnFilter_caller<22, T, D, BrdColConstant>,
linearColumnFilter_caller<23, T, D, BrdColConstant>,
linearColumnFilter_caller<24, T, D, BrdColConstant>,
linearColumnFilter_caller<25, T, D, BrdColConstant>,
linearColumnFilter_caller<26, T, D, BrdColConstant>,
linearColumnFilter_caller<27, T, D, BrdColConstant>,
linearColumnFilter_caller<28, T, D, BrdColConstant>,
linearColumnFilter_caller<29, T, D, BrdColConstant>,
linearColumnFilter_caller<30, T, D, BrdColConstant>,
linearColumnFilter_caller<31, T, D, BrdColConstant>,
linearColumnFilter_caller<32, T, D, BrdColConstant>
},
{
0,
linearColumnFilter_caller< 1, T, D, BrdColReflect>,
linearColumnFilter_caller< 2, T, D, BrdColReflect>,
linearColumnFilter_caller< 3, T, D, BrdColReflect>,
linearColumnFilter_caller< 4, T, D, BrdColReflect>,
linearColumnFilter_caller< 5, T, D, BrdColReflect>,
linearColumnFilter_caller< 6, T, D, BrdColReflect>,
linearColumnFilter_caller< 7, T, D, BrdColReflect>,
linearColumnFilter_caller< 8, T, D, BrdColReflect>,
linearColumnFilter_caller< 9, T, D, BrdColReflect>,
linearColumnFilter_caller<10, T, D, BrdColReflect>,
linearColumnFilter_caller<11, T, D, BrdColReflect>,
linearColumnFilter_caller<12, T, D, BrdColReflect>,
linearColumnFilter_caller<13, T, D, BrdColReflect>,
linearColumnFilter_caller<14, T, D, BrdColReflect>,
linearColumnFilter_caller<15, T, D, BrdColReflect>,
linearColumnFilter_caller<16, T, D, BrdColReflect>,
linearColumnFilter_caller<17, T, D, BrdColReflect>,
linearColumnFilter_caller<18, T, D, BrdColReflect>,
linearColumnFilter_caller<19, T, D, BrdColReflect>,
linearColumnFilter_caller<20, T, D, BrdColReflect>,
linearColumnFilter_caller<21, T, D, BrdColReflect>,
linearColumnFilter_caller<22, T, D, BrdColReflect>,
linearColumnFilter_caller<23, T, D, BrdColReflect>,
linearColumnFilter_caller<24, T, D, BrdColReflect>,
linearColumnFilter_caller<25, T, D, BrdColReflect>,
linearColumnFilter_caller<26, T, D, BrdColReflect>,
linearColumnFilter_caller<27, T, D, BrdColReflect>,
linearColumnFilter_caller<28, T, D, BrdColReflect>,
linearColumnFilter_caller<29, T, D, BrdColReflect>,
linearColumnFilter_caller<30, T, D, BrdColReflect>,
linearColumnFilter_caller<31, T, D, BrdColReflect>,
linearColumnFilter_caller<32, T, D, BrdColReflect>
},
{
0,
linearColumnFilter_caller< 1, T, D, BrdColWrap>,
linearColumnFilter_caller< 2, T, D, BrdColWrap>,
linearColumnFilter_caller< 3, T, D, BrdColWrap>,
linearColumnFilter_caller< 4, T, D, BrdColWrap>,
linearColumnFilter_caller< 5, T, D, BrdColWrap>,
linearColumnFilter_caller< 6, T, D, BrdColWrap>,
linearColumnFilter_caller< 7, T, D, BrdColWrap>,
linearColumnFilter_caller< 8, T, D, BrdColWrap>,
linearColumnFilter_caller< 9, T, D, BrdColWrap>,
linearColumnFilter_caller<10, T, D, BrdColWrap>,
linearColumnFilter_caller<11, T, D, BrdColWrap>,
linearColumnFilter_caller<12, T, D, BrdColWrap>,
linearColumnFilter_caller<13, T, D, BrdColWrap>,
linearColumnFilter_caller<14, T, D, BrdColWrap>,
linearColumnFilter_caller<15, T, D, BrdColWrap>,
linearColumnFilter_caller<16, T, D, BrdColWrap>,
linearColumnFilter_caller<17, T, D, BrdColWrap>,
linearColumnFilter_caller<18, T, D, BrdColWrap>,
linearColumnFilter_caller<19, T, D, BrdColWrap>,
linearColumnFilter_caller<20, T, D, BrdColWrap>,
linearColumnFilter_caller<21, T, D, BrdColWrap>,
linearColumnFilter_caller<22, T, D, BrdColWrap>,
linearColumnFilter_caller<23, T, D, BrdColWrap>,
linearColumnFilter_caller<24, T, D, BrdColWrap>,
linearColumnFilter_caller<25, T, D, BrdColWrap>,
linearColumnFilter_caller<26, T, D, BrdColWrap>,
linearColumnFilter_caller<27, T, D, BrdColWrap>,
linearColumnFilter_caller<28, T, D, BrdColWrap>,
linearColumnFilter_caller<29, T, D, BrdColWrap>,
linearColumnFilter_caller<30, T, D, BrdColWrap>,
linearColumnFilter_caller<31, T, D, BrdColWrap>,
linearColumnFilter_caller<32, T, D, BrdColWrap>
}
};
loadKernel(kernel, ksize);
callers[brd_type][ksize]((PtrStepSz<T>)src, (PtrStepSz<D>)dst, anchor, cc, stream);
}
template void linearColumnFilter_gpu<float , uchar >(PtrStepSzb src, PtrStepSzb dst, const float kernel[], int ksize, int anchor, int brd_type, int cc, hipStream_t stream);
template void linearColumnFilter_gpu<float4, uchar4>(PtrStepSzb src, PtrStepSzb dst, const float kernel[], int ksize, int anchor, int brd_type, int cc, hipStream_t stream);
template void linearColumnFilter_gpu<float3, short3>(PtrStepSzb src, PtrStepSzb dst, const float kernel[], int ksize, int anchor, int brd_type, int cc, hipStream_t stream);
template void linearColumnFilter_gpu<float , int >(PtrStepSzb src, PtrStepSzb dst, const float kernel[], int ksize, int anchor, int brd_type, int cc, hipStream_t stream);
template void linearColumnFilter_gpu<float , float >(PtrStepSzb src, PtrStepSzb dst, const float kernel[], int ksize, int anchor, int brd_type, int cc, hipStream_t stream);
} // namespace column_filter
}}} // namespace cv { namespace gpu { namespace device
#endif /* CUDA_DISABLER */ | 8a7f818e45812258be158cfd19e6884cdfd3967c.cu | /*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Copyright (C) 1993-2011, NVIDIA Corporation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#if !defined CUDA_DISABLER
#include "internal_shared.hpp"
#include "opencv2/gpu/device/saturate_cast.hpp"
#include "opencv2/gpu/device/vec_math.hpp"
#include "opencv2/gpu/device/limits.hpp"
#include "opencv2/gpu/device/border_interpolate.hpp"
#include "opencv2/gpu/device/static_check.hpp"
namespace cv { namespace gpu { namespace device
{
namespace column_filter
{
#define MAX_KERNEL_SIZE 32
__constant__ float c_kernel[MAX_KERNEL_SIZE];
void loadKernel(const float kernel[], int ksize)
{
cudaSafeCall( cudaMemcpyToSymbol(c_kernel, kernel, ksize * sizeof(float)) );
}
template <int KSIZE, typename T, typename D, typename B>
__global__ void linearColumnFilter(const PtrStepSz<T> src, PtrStep<D> dst, const int anchor, const B brd)
{
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 200)
const int BLOCK_DIM_X = 16;
const int BLOCK_DIM_Y = 16;
const int PATCH_PER_BLOCK = 4;
const int HALO_SIZE = KSIZE <= 16 ? 1 : 2;
#else
const int BLOCK_DIM_X = 16;
const int BLOCK_DIM_Y = 8;
const int PATCH_PER_BLOCK = 2;
const int HALO_SIZE = 2;
#endif
typedef typename TypeVec<float, VecTraits<T>::cn>::vec_type sum_t;
__shared__ sum_t smem[(PATCH_PER_BLOCK + 2 * HALO_SIZE) * BLOCK_DIM_Y][BLOCK_DIM_X];
const int x = blockIdx.x * BLOCK_DIM_X + threadIdx.x;
if (x >= src.cols)
return;
const T* src_col = src.ptr() + x;
const int yStart = blockIdx.y * (BLOCK_DIM_Y * PATCH_PER_BLOCK) + threadIdx.y;
if (blockIdx.y > 0)
{
//Upper halo
#pragma unroll
for (int j = 0; j < HALO_SIZE; ++j)
smem[threadIdx.y + j * BLOCK_DIM_Y][threadIdx.x] = saturate_cast<sum_t>(src(yStart - (HALO_SIZE - j) * BLOCK_DIM_Y, x));
}
else
{
//Upper halo
#pragma unroll
for (int j = 0; j < HALO_SIZE; ++j)
smem[threadIdx.y + j * BLOCK_DIM_Y][threadIdx.x] = saturate_cast<sum_t>(brd.at_low(yStart - (HALO_SIZE - j) * BLOCK_DIM_Y, src_col, src.step));
}
if (blockIdx.y + 2 < gridDim.y)
{
//Main data
#pragma unroll
for (int j = 0; j < PATCH_PER_BLOCK; ++j)
smem[threadIdx.y + HALO_SIZE * BLOCK_DIM_Y + j * BLOCK_DIM_Y][threadIdx.x] = saturate_cast<sum_t>(src(yStart + j * BLOCK_DIM_Y, x));
//Lower halo
#pragma unroll
for (int j = 0; j < HALO_SIZE; ++j)
smem[threadIdx.y + (PATCH_PER_BLOCK + HALO_SIZE) * BLOCK_DIM_Y + j * BLOCK_DIM_Y][threadIdx.x] = saturate_cast<sum_t>(src(yStart + (PATCH_PER_BLOCK + j) * BLOCK_DIM_Y, x));
}
else
{
//Main data
#pragma unroll
for (int j = 0; j < PATCH_PER_BLOCK; ++j)
smem[threadIdx.y + HALO_SIZE * BLOCK_DIM_Y + j * BLOCK_DIM_Y][threadIdx.x] = saturate_cast<sum_t>(brd.at_high(yStart + j * BLOCK_DIM_Y, src_col, src.step));
//Lower halo
#pragma unroll
for (int j = 0; j < HALO_SIZE; ++j)
smem[threadIdx.y + (PATCH_PER_BLOCK + HALO_SIZE) * BLOCK_DIM_Y + j * BLOCK_DIM_Y][threadIdx.x] = saturate_cast<sum_t>(brd.at_high(yStart + (PATCH_PER_BLOCK + j) * BLOCK_DIM_Y, src_col, src.step));
}
__syncthreads();
#pragma unroll
for (int j = 0; j < PATCH_PER_BLOCK; ++j)
{
const int y = yStart + j * BLOCK_DIM_Y;
if (y < src.rows)
{
sum_t sum = VecTraits<sum_t>::all(0);
#pragma unroll
for (int k = 0; k < KSIZE; ++k)
sum = sum + smem[threadIdx.y + HALO_SIZE * BLOCK_DIM_Y + j * BLOCK_DIM_Y - anchor + k][threadIdx.x] * c_kernel[k];
dst(y, x) = saturate_cast<D>(sum);
}
}
}
template <int KSIZE, typename T, typename D, template<typename> class B>
void linearColumnFilter_caller(PtrStepSz<T> src, PtrStepSz<D> dst, int anchor, int cc, cudaStream_t stream)
{
int BLOCK_DIM_X;
int BLOCK_DIM_Y;
int PATCH_PER_BLOCK;
if (cc >= 20)
{
BLOCK_DIM_X = 16;
BLOCK_DIM_Y = 16;
PATCH_PER_BLOCK = 4;
}
else
{
BLOCK_DIM_X = 16;
BLOCK_DIM_Y = 8;
PATCH_PER_BLOCK = 2;
}
const dim3 block(BLOCK_DIM_X, BLOCK_DIM_Y);
const dim3 grid(divUp(src.cols, BLOCK_DIM_X), divUp(src.rows, BLOCK_DIM_Y * PATCH_PER_BLOCK));
B<T> brd(src.rows);
linearColumnFilter<KSIZE, T, D><<<grid, block, 0, stream>>>(src, dst, anchor, brd);
cudaSafeCall( cudaGetLastError() );
if (stream == 0)
cudaSafeCall( cudaDeviceSynchronize() );
}
template <typename T, typename D>
void linearColumnFilter_gpu(PtrStepSzb src, PtrStepSzb dst, const float kernel[], int ksize, int anchor, int brd_type, int cc, cudaStream_t stream)
{
typedef void (*caller_t)(PtrStepSz<T> src, PtrStepSz<D> dst, int anchor, int cc, cudaStream_t stream);
static const caller_t callers[5][33] =
{
{
0,
linearColumnFilter_caller< 1, T, D, BrdColReflect101>,
linearColumnFilter_caller< 2, T, D, BrdColReflect101>,
linearColumnFilter_caller< 3, T, D, BrdColReflect101>,
linearColumnFilter_caller< 4, T, D, BrdColReflect101>,
linearColumnFilter_caller< 5, T, D, BrdColReflect101>,
linearColumnFilter_caller< 6, T, D, BrdColReflect101>,
linearColumnFilter_caller< 7, T, D, BrdColReflect101>,
linearColumnFilter_caller< 8, T, D, BrdColReflect101>,
linearColumnFilter_caller< 9, T, D, BrdColReflect101>,
linearColumnFilter_caller<10, T, D, BrdColReflect101>,
linearColumnFilter_caller<11, T, D, BrdColReflect101>,
linearColumnFilter_caller<12, T, D, BrdColReflect101>,
linearColumnFilter_caller<13, T, D, BrdColReflect101>,
linearColumnFilter_caller<14, T, D, BrdColReflect101>,
linearColumnFilter_caller<15, T, D, BrdColReflect101>,
linearColumnFilter_caller<16, T, D, BrdColReflect101>,
linearColumnFilter_caller<17, T, D, BrdColReflect101>,
linearColumnFilter_caller<18, T, D, BrdColReflect101>,
linearColumnFilter_caller<19, T, D, BrdColReflect101>,
linearColumnFilter_caller<20, T, D, BrdColReflect101>,
linearColumnFilter_caller<21, T, D, BrdColReflect101>,
linearColumnFilter_caller<22, T, D, BrdColReflect101>,
linearColumnFilter_caller<23, T, D, BrdColReflect101>,
linearColumnFilter_caller<24, T, D, BrdColReflect101>,
linearColumnFilter_caller<25, T, D, BrdColReflect101>,
linearColumnFilter_caller<26, T, D, BrdColReflect101>,
linearColumnFilter_caller<27, T, D, BrdColReflect101>,
linearColumnFilter_caller<28, T, D, BrdColReflect101>,
linearColumnFilter_caller<29, T, D, BrdColReflect101>,
linearColumnFilter_caller<30, T, D, BrdColReflect101>,
linearColumnFilter_caller<31, T, D, BrdColReflect101>,
linearColumnFilter_caller<32, T, D, BrdColReflect101>
},
{
0,
linearColumnFilter_caller< 1, T, D, BrdColReplicate>,
linearColumnFilter_caller< 2, T, D, BrdColReplicate>,
linearColumnFilter_caller< 3, T, D, BrdColReplicate>,
linearColumnFilter_caller< 4, T, D, BrdColReplicate>,
linearColumnFilter_caller< 5, T, D, BrdColReplicate>,
linearColumnFilter_caller< 6, T, D, BrdColReplicate>,
linearColumnFilter_caller< 7, T, D, BrdColReplicate>,
linearColumnFilter_caller< 8, T, D, BrdColReplicate>,
linearColumnFilter_caller< 9, T, D, BrdColReplicate>,
linearColumnFilter_caller<10, T, D, BrdColReplicate>,
linearColumnFilter_caller<11, T, D, BrdColReplicate>,
linearColumnFilter_caller<12, T, D, BrdColReplicate>,
linearColumnFilter_caller<13, T, D, BrdColReplicate>,
linearColumnFilter_caller<14, T, D, BrdColReplicate>,
linearColumnFilter_caller<15, T, D, BrdColReplicate>,
linearColumnFilter_caller<16, T, D, BrdColReplicate>,
linearColumnFilter_caller<17, T, D, BrdColReplicate>,
linearColumnFilter_caller<18, T, D, BrdColReplicate>,
linearColumnFilter_caller<19, T, D, BrdColReplicate>,
linearColumnFilter_caller<20, T, D, BrdColReplicate>,
linearColumnFilter_caller<21, T, D, BrdColReplicate>,
linearColumnFilter_caller<22, T, D, BrdColReplicate>,
linearColumnFilter_caller<23, T, D, BrdColReplicate>,
linearColumnFilter_caller<24, T, D, BrdColReplicate>,
linearColumnFilter_caller<25, T, D, BrdColReplicate>,
linearColumnFilter_caller<26, T, D, BrdColReplicate>,
linearColumnFilter_caller<27, T, D, BrdColReplicate>,
linearColumnFilter_caller<28, T, D, BrdColReplicate>,
linearColumnFilter_caller<29, T, D, BrdColReplicate>,
linearColumnFilter_caller<30, T, D, BrdColReplicate>,
linearColumnFilter_caller<31, T, D, BrdColReplicate>,
linearColumnFilter_caller<32, T, D, BrdColReplicate>
},
{
0,
linearColumnFilter_caller< 1, T, D, BrdColConstant>,
linearColumnFilter_caller< 2, T, D, BrdColConstant>,
linearColumnFilter_caller< 3, T, D, BrdColConstant>,
linearColumnFilter_caller< 4, T, D, BrdColConstant>,
linearColumnFilter_caller< 5, T, D, BrdColConstant>,
linearColumnFilter_caller< 6, T, D, BrdColConstant>,
linearColumnFilter_caller< 7, T, D, BrdColConstant>,
linearColumnFilter_caller< 8, T, D, BrdColConstant>,
linearColumnFilter_caller< 9, T, D, BrdColConstant>,
linearColumnFilter_caller<10, T, D, BrdColConstant>,
linearColumnFilter_caller<11, T, D, BrdColConstant>,
linearColumnFilter_caller<12, T, D, BrdColConstant>,
linearColumnFilter_caller<13, T, D, BrdColConstant>,
linearColumnFilter_caller<14, T, D, BrdColConstant>,
linearColumnFilter_caller<15, T, D, BrdColConstant>,
linearColumnFilter_caller<16, T, D, BrdColConstant>,
linearColumnFilter_caller<17, T, D, BrdColConstant>,
linearColumnFilter_caller<18, T, D, BrdColConstant>,
linearColumnFilter_caller<19, T, D, BrdColConstant>,
linearColumnFilter_caller<20, T, D, BrdColConstant>,
linearColumnFilter_caller<21, T, D, BrdColConstant>,
linearColumnFilter_caller<22, T, D, BrdColConstant>,
linearColumnFilter_caller<23, T, D, BrdColConstant>,
linearColumnFilter_caller<24, T, D, BrdColConstant>,
linearColumnFilter_caller<25, T, D, BrdColConstant>,
linearColumnFilter_caller<26, T, D, BrdColConstant>,
linearColumnFilter_caller<27, T, D, BrdColConstant>,
linearColumnFilter_caller<28, T, D, BrdColConstant>,
linearColumnFilter_caller<29, T, D, BrdColConstant>,
linearColumnFilter_caller<30, T, D, BrdColConstant>,
linearColumnFilter_caller<31, T, D, BrdColConstant>,
linearColumnFilter_caller<32, T, D, BrdColConstant>
},
{
0,
linearColumnFilter_caller< 1, T, D, BrdColReflect>,
linearColumnFilter_caller< 2, T, D, BrdColReflect>,
linearColumnFilter_caller< 3, T, D, BrdColReflect>,
linearColumnFilter_caller< 4, T, D, BrdColReflect>,
linearColumnFilter_caller< 5, T, D, BrdColReflect>,
linearColumnFilter_caller< 6, T, D, BrdColReflect>,
linearColumnFilter_caller< 7, T, D, BrdColReflect>,
linearColumnFilter_caller< 8, T, D, BrdColReflect>,
linearColumnFilter_caller< 9, T, D, BrdColReflect>,
linearColumnFilter_caller<10, T, D, BrdColReflect>,
linearColumnFilter_caller<11, T, D, BrdColReflect>,
linearColumnFilter_caller<12, T, D, BrdColReflect>,
linearColumnFilter_caller<13, T, D, BrdColReflect>,
linearColumnFilter_caller<14, T, D, BrdColReflect>,
linearColumnFilter_caller<15, T, D, BrdColReflect>,
linearColumnFilter_caller<16, T, D, BrdColReflect>,
linearColumnFilter_caller<17, T, D, BrdColReflect>,
linearColumnFilter_caller<18, T, D, BrdColReflect>,
linearColumnFilter_caller<19, T, D, BrdColReflect>,
linearColumnFilter_caller<20, T, D, BrdColReflect>,
linearColumnFilter_caller<21, T, D, BrdColReflect>,
linearColumnFilter_caller<22, T, D, BrdColReflect>,
linearColumnFilter_caller<23, T, D, BrdColReflect>,
linearColumnFilter_caller<24, T, D, BrdColReflect>,
linearColumnFilter_caller<25, T, D, BrdColReflect>,
linearColumnFilter_caller<26, T, D, BrdColReflect>,
linearColumnFilter_caller<27, T, D, BrdColReflect>,
linearColumnFilter_caller<28, T, D, BrdColReflect>,
linearColumnFilter_caller<29, T, D, BrdColReflect>,
linearColumnFilter_caller<30, T, D, BrdColReflect>,
linearColumnFilter_caller<31, T, D, BrdColReflect>,
linearColumnFilter_caller<32, T, D, BrdColReflect>
},
{
0,
linearColumnFilter_caller< 1, T, D, BrdColWrap>,
linearColumnFilter_caller< 2, T, D, BrdColWrap>,
linearColumnFilter_caller< 3, T, D, BrdColWrap>,
linearColumnFilter_caller< 4, T, D, BrdColWrap>,
linearColumnFilter_caller< 5, T, D, BrdColWrap>,
linearColumnFilter_caller< 6, T, D, BrdColWrap>,
linearColumnFilter_caller< 7, T, D, BrdColWrap>,
linearColumnFilter_caller< 8, T, D, BrdColWrap>,
linearColumnFilter_caller< 9, T, D, BrdColWrap>,
linearColumnFilter_caller<10, T, D, BrdColWrap>,
linearColumnFilter_caller<11, T, D, BrdColWrap>,
linearColumnFilter_caller<12, T, D, BrdColWrap>,
linearColumnFilter_caller<13, T, D, BrdColWrap>,
linearColumnFilter_caller<14, T, D, BrdColWrap>,
linearColumnFilter_caller<15, T, D, BrdColWrap>,
linearColumnFilter_caller<16, T, D, BrdColWrap>,
linearColumnFilter_caller<17, T, D, BrdColWrap>,
linearColumnFilter_caller<18, T, D, BrdColWrap>,
linearColumnFilter_caller<19, T, D, BrdColWrap>,
linearColumnFilter_caller<20, T, D, BrdColWrap>,
linearColumnFilter_caller<21, T, D, BrdColWrap>,
linearColumnFilter_caller<22, T, D, BrdColWrap>,
linearColumnFilter_caller<23, T, D, BrdColWrap>,
linearColumnFilter_caller<24, T, D, BrdColWrap>,
linearColumnFilter_caller<25, T, D, BrdColWrap>,
linearColumnFilter_caller<26, T, D, BrdColWrap>,
linearColumnFilter_caller<27, T, D, BrdColWrap>,
linearColumnFilter_caller<28, T, D, BrdColWrap>,
linearColumnFilter_caller<29, T, D, BrdColWrap>,
linearColumnFilter_caller<30, T, D, BrdColWrap>,
linearColumnFilter_caller<31, T, D, BrdColWrap>,
linearColumnFilter_caller<32, T, D, BrdColWrap>
}
};
loadKernel(kernel, ksize);
callers[brd_type][ksize]((PtrStepSz<T>)src, (PtrStepSz<D>)dst, anchor, cc, stream);
}
template void linearColumnFilter_gpu<float , uchar >(PtrStepSzb src, PtrStepSzb dst, const float kernel[], int ksize, int anchor, int brd_type, int cc, cudaStream_t stream);
template void linearColumnFilter_gpu<float4, uchar4>(PtrStepSzb src, PtrStepSzb dst, const float kernel[], int ksize, int anchor, int brd_type, int cc, cudaStream_t stream);
template void linearColumnFilter_gpu<float3, short3>(PtrStepSzb src, PtrStepSzb dst, const float kernel[], int ksize, int anchor, int brd_type, int cc, cudaStream_t stream);
template void linearColumnFilter_gpu<float , int >(PtrStepSzb src, PtrStepSzb dst, const float kernel[], int ksize, int anchor, int brd_type, int cc, cudaStream_t stream);
template void linearColumnFilter_gpu<float , float >(PtrStepSzb src, PtrStepSzb dst, const float kernel[], int ksize, int anchor, int brd_type, int cc, cudaStream_t stream);
} // namespace column_filter
}}} // namespace cv { namespace gpu { namespace device
#endif /* CUDA_DISABLER */ |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.